--- /dev/null
+From 2ded3e5b61d61d0bc90bebb8004db6184c7db6eb Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Thu, 28 Nov 2013 11:05:28 +0100
+Subject: ALSA: hda - Check leaf nodes to find aamix amps
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 2ded3e5b61d61d0bc90bebb8004db6184c7db6eb upstream.
+
+The current generic parser assumes blindly that the volume and mute
+amps are found in the aamix node itself. But on some codecs,
+typically Analog Devices ones, the aamix amps are separately
+implemented in each leaf node of the aamix node, and the current
+driver can't establish the correct amp controls. This is a regression
+compared with the previous static quirks.
+
+This patch extends the search for the amps to the leaf nodes for
+allowing the aamix controls again on such codecs.
+In this implementation, I didn't code to loop through the whole paths,
+since usually one depth should suffice, and we can't search too
+deeply, as it may result in the conflicting control assignments.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=65641
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/hda_generic.c | 57 ++++++++++++++++++++++++++++++++++----------
+ 1 file changed, 45 insertions(+), 12 deletions(-)
+
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -2808,6 +2808,42 @@ static int add_loopback_list(struct hda_
+ return 0;
+ }
+
++/* return true if either a volume or a mute amp is found for the given
++ * aamix path; the amp has to be either in the mixer node or its direct leaf
++ */
++static bool look_for_mix_leaf_ctls(struct hda_codec *codec, hda_nid_t mix_nid,
++ hda_nid_t pin, unsigned int *mix_val,
++ unsigned int *mute_val)
++{
++ int idx, num_conns;
++ const hda_nid_t *list;
++ hda_nid_t nid;
++
++ idx = snd_hda_get_conn_index(codec, mix_nid, pin, true);
++ if (idx < 0)
++ return false;
++
++ *mix_val = *mute_val = 0;
++ if (nid_has_volume(codec, mix_nid, HDA_INPUT))
++ *mix_val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT);
++ if (nid_has_mute(codec, mix_nid, HDA_INPUT))
++ *mute_val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT);
++ if (*mix_val && *mute_val)
++ return true;
++
++ /* check leaf node */
++ num_conns = snd_hda_get_conn_list(codec, mix_nid, &list);
++ if (num_conns < idx)
++ return false;
++ nid = list[idx];
++ if (!*mix_val && nid_has_volume(codec, nid, HDA_OUTPUT))
++ *mix_val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
++ if (!*mute_val && nid_has_mute(codec, nid, HDA_OUTPUT))
++ *mute_val = HDA_COMPOSE_AMP_VAL(nid, 3, 0, HDA_OUTPUT);
++
++ return *mix_val || *mute_val;
++}
++
+ /* create input playback/capture controls for the given pin */
+ static int new_analog_input(struct hda_codec *codec, int input_idx,
+ hda_nid_t pin, const char *ctlname, int ctlidx,
+@@ -2815,12 +2851,11 @@ static int new_analog_input(struct hda_c
+ {
+ struct hda_gen_spec *spec = codec->spec;
+ struct nid_path *path;
+- unsigned int val;
++ unsigned int mix_val, mute_val;
+ int err, idx;
+
+- if (!nid_has_volume(codec, mix_nid, HDA_INPUT) &&
+- !nid_has_mute(codec, mix_nid, HDA_INPUT))
+- return 0; /* no need for analog loopback */
++ if (!look_for_mix_leaf_ctls(codec, mix_nid, pin, &mix_val, &mute_val))
++ return 0;
+
+ path = snd_hda_add_new_path(codec, pin, mix_nid, 0);
+ if (!path)
+@@ -2829,20 +2864,18 @@ static int new_analog_input(struct hda_c
+ spec->loopback_paths[input_idx] = snd_hda_get_path_idx(codec, path);
+
+ idx = path->idx[path->depth - 1];
+- if (nid_has_volume(codec, mix_nid, HDA_INPUT)) {
+- val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT);
+- err = __add_pb_vol_ctrl(spec, HDA_CTL_WIDGET_VOL, ctlname, ctlidx, val);
++ if (mix_val) {
++ err = __add_pb_vol_ctrl(spec, HDA_CTL_WIDGET_VOL, ctlname, ctlidx, mix_val);
+ if (err < 0)
+ return err;
+- path->ctls[NID_PATH_VOL_CTL] = val;
++ path->ctls[NID_PATH_VOL_CTL] = mix_val;
+ }
+
+- if (nid_has_mute(codec, mix_nid, HDA_INPUT)) {
+- val = HDA_COMPOSE_AMP_VAL(mix_nid, 3, idx, HDA_INPUT);
+- err = __add_pb_sw_ctrl(spec, HDA_CTL_WIDGET_MUTE, ctlname, ctlidx, val);
++ if (mute_val) {
++ err = __add_pb_sw_ctrl(spec, HDA_CTL_WIDGET_MUTE, ctlname, ctlidx, mute_val);
+ if (err < 0)
+ return err;
+- path->ctls[NID_PATH_MUTE_CTL] = val;
++ path->ctls[NID_PATH_MUTE_CTL] = mute_val;
+ }
+
+ path->active = true;
--- /dev/null
+From ced4cefc75fdb8be95eaee325ad0f6b2fc0a484b Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Tue, 26 Nov 2013 08:33:45 +0100
+Subject: ALSA: hda - Create Headhpone Mic Jack Mode when really needed
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit ced4cefc75fdb8be95eaee325ad0f6b2fc0a484b upstream.
+
+When a headphone jack is configurable as input, the generic parser
+tries to make it retaskable as Headphone Mic. The switching can be
+done smoothly if Capture Source control exists (i.e. there is another
+input source). Or when user explicitly enables the creation of jack
+mode controls, "Headhpone Mic Jack Mode" will be created accordingly.
+
+However, if the headphone mic is the only input source, we have to
+create "Headphone Mic Jack Mode" control because there is no capture
+source selection. Otherwise, the generic parser assumes that the
+input is constantly enabled, thus the headphone is permanently set
+as input. This situation happens on the old MacBook Airs where no
+input is supported properly, for example.
+
+This patch fixes the problem: now "Headphone Mic Jack Mode" is created
+when such an input selection isn't possible.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=65681
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/hda_generic.c | 20 ++++++++++++--------
+ 1 file changed, 12 insertions(+), 8 deletions(-)
+
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -2506,12 +2506,8 @@ static int create_out_jack_modes(struct
+
+ for (i = 0; i < num_pins; i++) {
+ hda_nid_t pin = pins[i];
+- if (pin == spec->hp_mic_pin) {
+- int ret = create_hp_mic_jack_mode(codec, pin);
+- if (ret < 0)
+- return ret;
++ if (pin == spec->hp_mic_pin)
+ continue;
+- }
+ if (get_out_jack_num_items(codec, pin) > 1) {
+ struct snd_kcontrol_new *knew;
+ char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
+@@ -2784,9 +2780,6 @@ static int create_hp_mic_jack_mode(struc
+ struct hda_gen_spec *spec = codec->spec;
+ struct snd_kcontrol_new *knew;
+
+- if (get_out_jack_num_items(codec, pin) <= 1 &&
+- get_in_jack_num_items(codec, pin) <= 1)
+- return 0; /* no need */
+ knew = snd_hda_gen_add_kctl(spec, "Headphone Mic Jack Mode",
+ &hp_mic_jack_mode_enum);
+ if (!knew)
+@@ -4383,6 +4376,17 @@ int snd_hda_gen_parse_auto_config(struct
+ if (err < 0)
+ return err;
+
++ /* create "Headphone Mic Jack Mode" if no input selection is
++ * available (or user specifies add_jack_modes hint)
++ */
++ if (spec->hp_mic_pin &&
++ (spec->auto_mic || spec->input_mux.num_items == 1 ||
++ spec->add_jack_modes)) {
++ err = create_hp_mic_jack_mode(codec, spec->hp_mic_pin);
++ if (err < 0)
++ return err;
++ }
++
+ if (spec->add_jack_modes) {
+ if (cfg->line_out_type != AUTO_PIN_SPEAKER_OUT) {
+ err = create_out_jack_modes(codec, cfg->line_outs,
--- /dev/null
+From 16c0cefe8951b2c4b824fd06011ac1b359b1ab3b Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Tue, 26 Nov 2013 08:44:26 +0100
+Subject: ALSA: hda - Fix hp-mic mode without VREF bits
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 16c0cefe8951b2c4b824fd06011ac1b359b1ab3b upstream.
+
+When the hp mic pin has no VREF bits, the driver forgot to set PIN_IN
+bit. Spotted during debugging old MacBook Airs.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=65681
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/hda_generic.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/pci/hda/hda_generic.c
++++ b/sound/pci/hda/hda_generic.c
+@@ -2764,7 +2764,7 @@ static int hp_mic_jack_mode_put(struct s
+ val &= ~(AC_PINCTL_VREFEN | PIN_HP);
+ val |= get_vref_idx(vref_caps, idx) | PIN_IN;
+ } else
+- val = snd_hda_get_default_vref(codec, nid);
++ val = snd_hda_get_default_vref(codec, nid) | PIN_IN;
+ }
+ snd_hda_set_pin_ctl_cache(codec, nid, val);
+ call_hp_automute(codec, NULL);
--- /dev/null
+From 1f0bbf03cb829162ec8e6d03c98aaaed88c6f534 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Thu, 28 Nov 2013 15:21:21 +0100
+Subject: ALSA: hda - Initialize missing bass speaker pin for ASUS AIO ET2700
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 1f0bbf03cb829162ec8e6d03c98aaaed88c6f534 upstream.
+
+Add a fixup entry for the missing bass speaker pin 0x16 on ASUS ET2700
+AiO desktop. The channel map will be added in the next patch, so that
+this can be backported easily to stable kernels.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=65961
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -1771,6 +1771,7 @@ enum {
+ ALC889_FIXUP_IMAC91_VREF,
+ ALC882_FIXUP_INV_DMIC,
+ ALC882_FIXUP_NO_PRIMARY_HP,
++ ALC887_FIXUP_ASUS_BASS,
+ };
+
+ static void alc889_fixup_coef(struct hda_codec *codec,
+@@ -2094,6 +2095,13 @@ static const struct hda_fixup alc882_fix
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc882_fixup_no_primary_hp,
+ },
++ [ALC887_FIXUP_ASUS_BASS] = {
++ .type = HDA_FIXUP_PINS,
++ .v.pins = (const struct hda_pintbl[]) {
++ {0x16, 0x99130130}, /* bass speaker */
++ {}
++ },
++ },
+ };
+
+ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
+@@ -2127,6 +2135,7 @@ static const struct snd_pci_quirk alc882
+ SND_PCI_QUIRK(0x1043, 0x1873, "ASUS W90V", ALC882_FIXUP_ASUS_W90V),
+ SND_PCI_QUIRK(0x1043, 0x1971, "Asus W2JC", ALC882_FIXUP_ASUS_W2JC),
+ SND_PCI_QUIRK(0x1043, 0x835f, "Asus Eee 1601", ALC888_FIXUP_EEE1601),
++ SND_PCI_QUIRK(0x1043, 0x84bc, "ASUS ET2700", ALC887_FIXUP_ASUS_BASS),
+ SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
+ SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
+ SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
--- /dev/null
+From ba4c4d0a9021ab034554d532a98133d668b87599 Mon Sep 17 00:00:00 2001
+From: Kailang Yang <kailang@realtek.com>
+Date: Tue, 26 Nov 2013 15:17:50 +0800
+Subject: ALSA: hda/realtek - Add support of ALC231 codec
+
+From: Kailang Yang <kailang@realtek.com>
+
+commit ba4c4d0a9021ab034554d532a98133d668b87599 upstream.
+
+It's compatible with ALC269.
+
+Signed-off-by: Kailang Yang <kailang@realtek.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4922,6 +4922,7 @@ static int patch_alc680(struct hda_codec
+ */
+ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0221, .name = "ALC221", .patch = patch_alc269 },
++ { .id = 0x10ec0231, .name = "ALC231", .patch = patch_alc269 },
+ { .id = 0x10ec0233, .name = "ALC233", .patch = patch_alc269 },
+ { .id = 0x10ec0255, .name = "ALC255", .patch = patch_alc269 },
+ { .id = 0x10ec0260, .name = "ALC260", .patch = patch_alc260 },
--- /dev/null
+From 9ad54547cf6f4410eba83bb95dfd2a0966718d6d Mon Sep 17 00:00:00 2001
+From: Kailang Yang <kailang@realtek.com>
+Date: Tue, 26 Nov 2013 15:41:40 +0800
+Subject: ALSA: hda/realtek - Set pcbeep amp for ALC668
+
+From: Kailang Yang <kailang@realtek.com>
+
+commit 9ad54547cf6f4410eba83bb95dfd2a0966718d6d upstream.
+
+Set the missing pcbeep default amp for ALC668.
+
+Signed-off-by: Kailang Yang <kailang@realtek.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -4864,6 +4864,7 @@ static int patch_alc662(struct hda_codec
+ case 0x10ec0272:
+ case 0x10ec0663:
+ case 0x10ec0665:
++ case 0x10ec0668:
+ set_beep_amp(spec, 0x0b, 0x04, HDA_INPUT);
+ break;
+ case 0x10ec0273:
--- /dev/null
+From 3676f9ef5481d614f8c5c857f5319755be248268 Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Wed, 27 Nov 2013 16:59:27 +0000
+Subject: arm64: Move PTE_PROT_NONE higher up
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit 3676f9ef5481d614f8c5c857f5319755be248268 upstream.
+
+PTE_PROT_NONE means that a pte is present but does not have any
+read/write attributes. However, setting the memory type like
+pgprot_writecombine() is allowed and such bits overlap with
+PTE_PROT_NONE. This causes mmap/munmap issues in drivers that change the
+vma->vm_pg_prot on PROT_NONE mappings.
+
+This patch reverts the PTE_FILE/PTE_PROT_NONE shift in commit
+59911ca4325d (ARM64: mm: Move PTE_PROT_NONE bit) and moves PTE_PROT_NONE
+together with the other software bits.
+
+Signed-off-by: Steve Capper <steve.capper@linaro.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Tested-by: Steve Capper <steve.capper@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/pgtable.h | 31 +++++++++++++++++--------------
+ 1 file changed, 17 insertions(+), 14 deletions(-)
+
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -25,10 +25,11 @@
+ * Software defined PTE bits definition.
+ */
+ #define PTE_VALID (_AT(pteval_t, 1) << 0)
+-#define PTE_PROT_NONE (_AT(pteval_t, 1) << 2) /* only when !PTE_VALID */
+-#define PTE_FILE (_AT(pteval_t, 1) << 3) /* only when !pte_present() */
++#define PTE_FILE (_AT(pteval_t, 1) << 2) /* only when !pte_present() */
+ #define PTE_DIRTY (_AT(pteval_t, 1) << 55)
+ #define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
++ /* bit 57 for PMD_SECT_SPLITTING */
++#define PTE_PROT_NONE (_AT(pteval_t, 1) << 58) /* only when !PTE_VALID */
+
+ /*
+ * VMALLOC and SPARSEMEM_VMEMMAP ranges.
+@@ -357,18 +358,20 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+
+ /*
+ * Encode and decode a swap entry:
+- * bits 0, 2: present (must both be zero)
+- * bit 3: PTE_FILE
+- * bits 4-8: swap type
+- * bits 9-63: swap offset
++ * bits 0-1: present (must be zero)
++ * bit 2: PTE_FILE
++ * bits 3-8: swap type
++ * bits 9-57: swap offset
+ */
+-#define __SWP_TYPE_SHIFT 4
++#define __SWP_TYPE_SHIFT 3
+ #define __SWP_TYPE_BITS 6
++#define __SWP_OFFSET_BITS 49
+ #define __SWP_TYPE_MASK ((1 << __SWP_TYPE_BITS) - 1)
+ #define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
++#define __SWP_OFFSET_MASK ((1UL << __SWP_OFFSET_BITS) - 1)
+
+ #define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
+-#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
++#define __swp_offset(x) (((x).val >> __SWP_OFFSET_SHIFT) & __SWP_OFFSET_MASK)
+ #define __swp_entry(type,offset) ((swp_entry_t) { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
+
+ #define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+@@ -382,15 +385,15 @@ extern pgd_t idmap_pg_dir[PTRS_PER_PGD];
+
+ /*
+ * Encode and decode a file entry:
+- * bits 0, 2: present (must both be zero)
+- * bit 3: PTE_FILE
+- * bits 4-63: file offset / PAGE_SIZE
++ * bits 0-1: present (must be zero)
++ * bit 2: PTE_FILE
++ * bits 3-57: file offset / PAGE_SIZE
+ */
+ #define pte_file(pte) (pte_val(pte) & PTE_FILE)
+-#define pte_to_pgoff(x) (pte_val(x) >> 4)
+-#define pgoff_to_pte(x) __pte(((x) << 4) | PTE_FILE)
++#define pte_to_pgoff(x) (pte_val(x) >> 3)
++#define pgoff_to_pte(x) __pte(((x) << 3) | PTE_FILE)
+
+-#define PTE_FILE_MAX_BITS 60
++#define PTE_FILE_MAX_BITS 55
+
+ extern int kern_addr_valid(unsigned long addr);
+
--- /dev/null
+From e605b36575e896edd8161534550c9ea021b03bc0 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Wed, 27 Nov 2013 18:16:21 -0500
+Subject: cgroup: fix cgroup_subsys_state leak for seq_files
+
+From: Tejun Heo <tj@kernel.org>
+
+commit e605b36575e896edd8161534550c9ea021b03bc0 upstream.
+
+If a cgroup file implements either read_map() or read_seq_string(),
+such file is served using seq_file by overriding file->f_op to
+cgroup_seqfile_operations, which also overrides the release method to
+single_release() from cgroup_file_release().
+
+Because cgroup_file_open() didn't use to acquire any resources, this
+used to be fine, but since f7d58818ba42 ("cgroup: pin
+cgroup_subsys_state when opening a cgroupfs file"), cgroup_file_open()
+pins the css (cgroup_subsys_state) which is put by
+cgroup_file_release(). The patch forgot to update the release path
+for seq_files and each open/release cycle leaks a css reference.
+
+Fix it by updating cgroup_file_release() to also handle seq_files and
+using it for seq_file release path too.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cgroup.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -231,6 +231,7 @@ static void cgroup_destroy_css_killed(st
+ static int cgroup_destroy_locked(struct cgroup *cgrp);
+ static int cgroup_addrm_files(struct cgroup *cgrp, struct cftype cfts[],
+ bool is_add);
++static int cgroup_file_release(struct inode *inode, struct file *file);
+
+ /**
+ * cgroup_css - obtain a cgroup's css for the specified subsystem
+@@ -2471,7 +2472,7 @@ static const struct file_operations cgro
+ .read = seq_read,
+ .write = cgroup_file_write,
+ .llseek = seq_lseek,
+- .release = single_release,
++ .release = cgroup_file_release,
+ };
+
+ static int cgroup_file_open(struct inode *inode, struct file *file)
+@@ -2532,6 +2533,8 @@ static int cgroup_file_release(struct in
+ ret = cft->release(inode, file);
+ if (css->ss)
+ css_put(css);
++ if (file->f_op == &cgroup_seqfile_operations)
++ single_release(inode, file);
+ return ret;
+ }
+
--- /dev/null
+From e5fca243abae1445afbfceebda5f08462ef869d3 Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Fri, 22 Nov 2013 17:14:39 -0500
+Subject: cgroup: use a dedicated workqueue for cgroup destruction
+
+From: Tejun Heo <tj@kernel.org>
+
+commit e5fca243abae1445afbfceebda5f08462ef869d3 upstream.
+
+Since be44562613851 ("cgroup: remove synchronize_rcu() from
+cgroup_diput()"), cgroup destruction path makes use of workqueue. css
+freeing is performed from a work item from that point on and a later
+commit, ea15f8ccdb430 ("cgroup: split cgroup destruction into two
+steps"), moves css offlining to workqueue too.
+
+As cgroup destruction isn't depended upon for memory reclaim, the
+destruction work items were put on the system_wq; unfortunately, some
+controller may block in the destruction path for considerable duration
+while holding cgroup_mutex. As large part of destruction path is
+synchronized through cgroup_mutex, when combined with high rate of
+cgroup removals, this has potential to fill up system_wq's max_active
+of 256.
+
+Also, it turns out that memcg's css destruction path ends up queueing
+and waiting for work items on system_wq through work_on_cpu(). If
+such operation happens while system_wq is fully occupied by cgroup
+destruction work items, work_on_cpu() can't make forward progress
+because system_wq is full and other destruction work items on
+system_wq can't make forward progress because the work item waiting
+for work_on_cpu() is holding cgroup_mutex, leading to deadlock.
+
+This can be fixed by queueing destruction work items on a separate
+workqueue. This patch creates a dedicated workqueue -
+cgroup_destroy_wq - for this purpose. As these work items shouldn't
+have inter-dependencies and mostly serialized by cgroup_mutex anyway,
+giving high concurrency level doesn't buy anything and the workqueue's
+@max_active is set to 1 so that destruction work items are executed
+one by one on each CPU.
+
+Hugh Dickins: Because cgroup_init() is run before init_workqueues(),
+cgroup_destroy_wq can't be allocated from cgroup_init(). Do it from a
+separate core_initcall(). In the future, we probably want to reorder
+so that workqueue init happens before cgroup_init().
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Hugh Dickins <hughd@google.com>
+Reported-by: Shawn Bohrer <shawn.bohrer@gmail.com>
+Link: http://lkml.kernel.org/r/20131111220626.GA7509@sbohrermbp13-local.rgmadvisors.com
+Link: http://lkml.kernel.org/g/alpine.LNX.2.00.1310301606080.2333@eggly.anvils
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cgroup.c | 30 +++++++++++++++++++++++++++---
+ 1 file changed, 27 insertions(+), 3 deletions(-)
+
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -90,6 +90,14 @@ static DEFINE_MUTEX(cgroup_mutex);
+ static DEFINE_MUTEX(cgroup_root_mutex);
+
+ /*
++ * cgroup destruction makes heavy use of work items and there can be a lot
++ * of concurrent destructions. Use a separate workqueue so that cgroup
++ * destruction work items don't end up filling up max_active of system_wq
++ * which may lead to deadlock.
++ */
++static struct workqueue_struct *cgroup_destroy_wq;
++
++/*
+ * Generate an array of cgroup subsystem pointers. At boot time, this is
+ * populated with the built in subsystems, and modular subsystems are
+ * registered after that. The mutable section of this array is protected by
+@@ -908,7 +916,7 @@ static void cgroup_free_rcu(struct rcu_h
+ struct cgroup *cgrp = container_of(head, struct cgroup, rcu_head);
+
+ INIT_WORK(&cgrp->destroy_work, cgroup_free_fn);
+- schedule_work(&cgrp->destroy_work);
++ queue_work(cgroup_destroy_wq, &cgrp->destroy_work);
+ }
+
+ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
+@@ -4306,7 +4314,7 @@ static void css_free_rcu_fn(struct rcu_h
+ * css_put(). dput() requires process context which we don't have.
+ */
+ INIT_WORK(&css->destroy_work, css_free_work_fn);
+- schedule_work(&css->destroy_work);
++ queue_work(cgroup_destroy_wq, &css->destroy_work);
+ }
+
+ static void css_release(struct percpu_ref *ref)
+@@ -4603,7 +4611,7 @@ static void css_killed_ref_fn(struct per
+ container_of(ref, struct cgroup_subsys_state, refcnt);
+
+ INIT_WORK(&css->destroy_work, css_killed_work_fn);
+- schedule_work(&css->destroy_work);
++ queue_work(cgroup_destroy_wq, &css->destroy_work);
+ }
+
+ /**
+@@ -5139,6 +5147,22 @@ out:
+ return err;
+ }
+
++static int __init cgroup_wq_init(void)
++{
++ /*
++ * There isn't much point in executing destruction path in
++ * parallel. Good chunk is serialized with cgroup_mutex anyway.
++ * Use 1 for @max_active.
++ *
++ * We would prefer to do this in cgroup_init() above, but that
++ * is called before init_workqueues(): so leave this until after.
++ */
++ cgroup_destroy_wq = alloc_workqueue("cgroup_destroy", 0, 1);
++ BUG_ON(!cgroup_destroy_wq);
++ return 0;
++}
++core_initcall(cgroup_wq_init);
++
+ /*
+ * proc_cgroup_show()
+ * - Print task's cgroup paths into seq_file, one line for each hierarchy
--- /dev/null
+From 0fc0287c9ed1ffd3706f8b4d9b314aa102ef1245 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 26 Nov 2013 15:03:41 +0100
+Subject: cpuset: Fix memory allocator deadlock
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 0fc0287c9ed1ffd3706f8b4d9b314aa102ef1245 upstream.
+
+Juri hit the below lockdep report:
+
+[ 4.303391] ======================================================
+[ 4.303392] [ INFO: SOFTIRQ-safe -> SOFTIRQ-unsafe lock order detected ]
+[ 4.303394] 3.12.0-dl-peterz+ #144 Not tainted
+[ 4.303395] ------------------------------------------------------
+[ 4.303397] kworker/u4:3/689 [HC0[0]:SC0[0]:HE0:SE1] is trying to acquire:
+[ 4.303399] (&p->mems_allowed_seq){+.+...}, at: [<ffffffff8114e63c>] new_slab+0x6c/0x290
+[ 4.303417]
+[ 4.303417] and this task is already holding:
+[ 4.303418] (&(&q->__queue_lock)->rlock){..-...}, at: [<ffffffff812d2dfb>] blk_execute_rq_nowait+0x5b/0x100
+[ 4.303431] which would create a new lock dependency:
+[ 4.303432] (&(&q->__queue_lock)->rlock){..-...} -> (&p->mems_allowed_seq){+.+...}
+[ 4.303436]
+
+[ 4.303898] the dependencies between the lock to be acquired and SOFTIRQ-irq-unsafe lock:
+[ 4.303918] -> (&p->mems_allowed_seq){+.+...} ops: 2762 {
+[ 4.303922] HARDIRQ-ON-W at:
+[ 4.303923] [<ffffffff8108ab9a>] __lock_acquire+0x65a/0x1ff0
+[ 4.303926] [<ffffffff8108cbe3>] lock_acquire+0x93/0x140
+[ 4.303929] [<ffffffff81063dd6>] kthreadd+0x86/0x180
+[ 4.303931] [<ffffffff816ded6c>] ret_from_fork+0x7c/0xb0
+[ 4.303933] SOFTIRQ-ON-W at:
+[ 4.303933] [<ffffffff8108abcc>] __lock_acquire+0x68c/0x1ff0
+[ 4.303935] [<ffffffff8108cbe3>] lock_acquire+0x93/0x140
+[ 4.303940] [<ffffffff81063dd6>] kthreadd+0x86/0x180
+[ 4.303955] [<ffffffff816ded6c>] ret_from_fork+0x7c/0xb0
+[ 4.303959] INITIAL USE at:
+[ 4.303960] [<ffffffff8108a884>] __lock_acquire+0x344/0x1ff0
+[ 4.303963] [<ffffffff8108cbe3>] lock_acquire+0x93/0x140
+[ 4.303966] [<ffffffff81063dd6>] kthreadd+0x86/0x180
+[ 4.303969] [<ffffffff816ded6c>] ret_from_fork+0x7c/0xb0
+[ 4.303972] }
+
+Which reports that we take mems_allowed_seq with interrupts enabled. A
+little digging found that this can only be from
+cpuset_change_task_nodemask().
+
+This is an actual deadlock because an interrupt doing an allocation will
+hit get_mems_allowed()->...->__read_seqcount_begin(), which will spin
+forever waiting for the write side to complete.
+
+Cc: John Stultz <john.stultz@linaro.org>
+Cc: Mel Gorman <mgorman@suse.de>
+Reported-by: Juri Lelli <juri.lelli@gmail.com>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Tested-by: Juri Lelli <juri.lelli@gmail.com>
+Acked-by: Li Zefan <lizefan@huawei.com>
+Acked-by: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/cpuset.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/kernel/cpuset.c
++++ b/kernel/cpuset.c
+@@ -1033,8 +1033,10 @@ static void cpuset_change_task_nodemask(
+ need_loop = task_has_mempolicy(tsk) ||
+ !nodes_intersects(*newmems, tsk->mems_allowed);
+
+- if (need_loop)
++ if (need_loop) {
++ local_irq_disable();
+ write_seqcount_begin(&tsk->mems_allowed_seq);
++ }
+
+ nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
+ mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
+@@ -1042,8 +1044,10 @@ static void cpuset_change_task_nodemask(
+ mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
+ tsk->mems_allowed = *newmems;
+
+- if (need_loop)
++ if (need_loop) {
+ write_seqcount_end(&tsk->mems_allowed_seq);
++ local_irq_enable();
++ }
+
+ task_unlock(tsk);
+ }
--- /dev/null
+From 958b84fb3bef193198538b5c5902fa687cc8363f Mon Sep 17 00:00:00 2001
+From: Alex Deucher <alexander.deucher@amd.com>
+Date: Thu, 14 Nov 2013 10:17:34 -0500
+Subject: drm/radeon: adjust TN dpm parameters for stability (v2)
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+commit 958b84fb3bef193198538b5c5902fa687cc8363f upstream.
+
+Adjust some of the TN dpm settings for stability. Enabling
+these features causes hangs and other stability problems
+on certain boards.
+
+v2: leave uvd dpm enabled
+
+Bug:
+https://bugzilla.kernel.org/show_bug.cgi?id=63101
+
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/trinity_dpm.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/radeon/trinity_dpm.c
++++ b/drivers/gpu/drm/radeon/trinity_dpm.c
+@@ -1873,9 +1873,9 @@ int trinity_dpm_init(struct radeon_devic
+ pi->enable_sclk_ds = true;
+ pi->enable_gfx_power_gating = true;
+ pi->enable_gfx_clock_gating = true;
+- pi->enable_mg_clock_gating = true;
+- pi->enable_gfx_dynamic_mgpg = true; /* ??? */
+- pi->override_dynamic_mgpg = true;
++ pi->enable_mg_clock_gating = false;
++ pi->enable_gfx_dynamic_mgpg = false;
++ pi->override_dynamic_mgpg = false;
+ pi->enable_auto_thermal_throttling = true;
+ pi->voltage_drop_in_dce = false; /* need to restructure dpm/modeset interaction */
+ pi->uvd_dpm = true; /* ??? */
--- /dev/null
+From 7272c9d2286525d4c6bce788243cf2b6f306d15c Mon Sep 17 00:00:00 2001
+From: Samuel Li <samuel.li@amd.com>
+Date: Tue, 19 Nov 2013 15:04:45 -0500
+Subject: drm/radeon: hook up backlight functions for CI and KV family.
+
+From: Samuel Li <samuel.li@amd.com>
+
+commit 7272c9d2286525d4c6bce788243cf2b6f306d15c upstream.
+
+Fixes crashes when handling atif events due to the lack of a
+callback being registered.
+
+Signed-off-by: Samuel Li <samuel.li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/radeon_asic.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/drivers/gpu/drm/radeon/radeon_asic.c
++++ b/drivers/gpu/drm/radeon/radeon_asic.c
+@@ -2019,6 +2019,8 @@ static struct radeon_asic ci_asic = {
+ .bandwidth_update = &dce8_bandwidth_update,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .wait_for_vblank = &dce4_wait_for_vblank,
++ .set_backlight_level = &atombios_set_backlight_level,
++ .get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
+ },
+@@ -2119,6 +2121,8 @@ static struct radeon_asic kv_asic = {
+ .bandwidth_update = &dce8_bandwidth_update,
+ .get_vblank_counter = &evergreen_get_vblank_counter,
+ .wait_for_vblank = &dce4_wait_for_vblank,
++ .set_backlight_level = &atombios_set_backlight_level,
++ .get_backlight_level = &atombios_get_backlight_level,
+ .hdmi_enable = &evergreen_hdmi_enable,
+ .hdmi_setmode = &evergreen_hdmi_setmode,
+ },
--- /dev/null
+From a72b8859fd3941cc1d2940d5c43026d2c6fb959e Mon Sep 17 00:00:00 2001
+From: Robert Richter <robert.richter@linaro.org>
+Date: Thu, 10 Oct 2013 18:23:38 +0200
+Subject: edac, highbank: Fix interrupt setup of mem and l2 controller
+
+From: Robert Richter <robert.richter@linaro.org>
+
+commit a72b8859fd3941cc1d2940d5c43026d2c6fb959e upstream.
+
+Register and enable interrupts after the edac registration. Otherwise
+incomming ecc error interrupts lead to crashes during device setup.
+
+Fixing this in drivers for mc and l2.
+
+Signed-off-by: Robert Richter <robert.richter@linaro.org>
+Acked-by: Rob Herring <rob.herring@calxeda.com>
+Signed-off-by: Robert Richter <rric@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/edac/highbank_l2_edac.c | 18 ++++++++++--------
+ drivers/edac/highbank_mc_edac.c | 18 ++++++++++--------
+ 2 files changed, 20 insertions(+), 16 deletions(-)
+
+--- a/drivers/edac/highbank_l2_edac.c
++++ b/drivers/edac/highbank_l2_edac.c
+@@ -90,28 +90,30 @@ static int highbank_l2_err_probe(struct
+ goto err;
+ }
+
++ dci->mod_name = dev_name(&pdev->dev);
++ dci->dev_name = dev_name(&pdev->dev);
++
++ if (edac_device_add_device(dci))
++ goto err;
++
+ drvdata->db_irq = platform_get_irq(pdev, 0);
+ res = devm_request_irq(&pdev->dev, drvdata->db_irq,
+ highbank_l2_err_handler,
+ 0, dev_name(&pdev->dev), dci);
+ if (res < 0)
+- goto err;
++ goto err2;
+
+ drvdata->sb_irq = platform_get_irq(pdev, 1);
+ res = devm_request_irq(&pdev->dev, drvdata->sb_irq,
+ highbank_l2_err_handler,
+ 0, dev_name(&pdev->dev), dci);
+ if (res < 0)
+- goto err;
+-
+- dci->mod_name = dev_name(&pdev->dev);
+- dci->dev_name = dev_name(&pdev->dev);
+-
+- if (edac_device_add_device(dci))
+- goto err;
++ goto err2;
+
+ devres_close_group(&pdev->dev, NULL);
+ return 0;
++err2:
++ edac_device_del_device(&pdev->dev);
+ err:
+ devres_release_group(&pdev->dev, NULL);
+ edac_device_free_ctl_info(dci);
+--- a/drivers/edac/highbank_mc_edac.c
++++ b/drivers/edac/highbank_mc_edac.c
+@@ -189,14 +189,6 @@ static int highbank_mc_probe(struct plat
+ goto err;
+ }
+
+- irq = platform_get_irq(pdev, 0);
+- res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
+- 0, dev_name(&pdev->dev), mci);
+- if (res < 0) {
+- dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
+- goto err;
+- }
+-
+ mci->mtype_cap = MEM_FLAG_DDR3;
+ mci->edac_ctl_cap = EDAC_FLAG_NONE | EDAC_FLAG_SECDED;
+ mci->edac_cap = EDAC_FLAG_SECDED;
+@@ -217,10 +209,20 @@ static int highbank_mc_probe(struct plat
+ if (res < 0)
+ goto err;
+
++ irq = platform_get_irq(pdev, 0);
++ res = devm_request_irq(&pdev->dev, irq, highbank_mc_err_handler,
++ 0, dev_name(&pdev->dev), mci);
++ if (res < 0) {
++ dev_err(&pdev->dev, "Unable to request irq %d\n", irq);
++ goto err2;
++ }
++
+ highbank_mc_create_debugfs_nodes(mci);
+
+ devres_close_group(&pdev->dev, NULL);
+ return 0;
++err2:
++ edac_mc_del_mc(&pdev->dev);
+ err:
+ devres_release_group(&pdev->dev, NULL);
+ edac_mc_free(mci);
--- /dev/null
+From 80897aa787ecd58eabb29deab7cbec9249c9b7e6 Mon Sep 17 00:00:00 2001
+From: David Herrmann <dh.herrmann@gmail.com>
+Date: Tue, 26 Nov 2013 13:58:18 +0100
+Subject: HID: uhid: fix leak for 64/32 UHID_CREATE
+
+From: David Herrmann <dh.herrmann@gmail.com>
+
+commit 80897aa787ecd58eabb29deab7cbec9249c9b7e6 upstream.
+
+UHID allows short writes so user-space can omit unused fields. We
+automatically set them to 0 in the kernel. However, the 64/32 bit
+compat-handler didn't do that in the UHID_CREATE fallback. This will
+reveal random kernel heap data (of random size, even) to user-space.
+
+Fixes: befde0226a59 ('HID: uhid: make creating devices work on 64/32 systems')
+
+Reported-by: Ben Hutchings <ben@decadent.org.uk>
+Signed-off-by: David Herrmann <dh.herrmann@gmail.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/uhid.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/hid/uhid.c
++++ b/drivers/hid/uhid.c
+@@ -287,7 +287,7 @@ static int uhid_event_from_user(const ch
+ */
+ struct uhid_create_req_compat *compat;
+
+- compat = kmalloc(sizeof(*compat), GFP_KERNEL);
++ compat = kzalloc(sizeof(*compat), GFP_KERNEL);
+ if (!compat)
+ return -ENOMEM;
+
--- /dev/null
+From 0abda6fa81dced031e3df31ac29bfb253549c2d1 Mon Sep 17 00:00:00 2001
+From: David Herrmann <dh.herrmann@gmail.com>
+Date: Mon, 28 Oct 2013 17:47:53 +0100
+Subject: HID: wiimote: fix inverted pro-controller axes
+
+From: David Herrmann <dh.herrmann@gmail.com>
+
+commit 0abda6fa81dced031e3df31ac29bfb253549c2d1 upstream.
+
+The analog-stick vertical axes are inverted. Fix that! Otherwise, games
+and other gamepad applications need to carry their own fixups (which they
+thankfully haven't done, yet).
+
+Reported-by: Rafael Brune <mail@rbrune.de>
+Tested-by: Rafael Brune <mail@rbrune.de>
+Signed-off-by: David Herrmann <dh.herrmann@gmail.com>
+Signed-off-by: Jiri Kosina <jkosina@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/hid/hid-wiimote-modules.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/hid/hid-wiimote-modules.c
++++ b/drivers/hid/hid-wiimote-modules.c
+@@ -1656,9 +1656,9 @@ static void wiimod_pro_in_ext(struct wii
+ ry = (ext[6] & 0xff) | ((ext[7] & 0x0f) << 8);
+
+ input_report_abs(wdata->extension.input, ABS_X, lx - 0x800);
+- input_report_abs(wdata->extension.input, ABS_Y, ly - 0x800);
++ input_report_abs(wdata->extension.input, ABS_Y, 0x800 - ly);
+ input_report_abs(wdata->extension.input, ABS_RX, rx - 0x800);
+- input_report_abs(wdata->extension.input, ABS_RY, ry - 0x800);
++ input_report_abs(wdata->extension.input, ABS_RY, 0x800 - ry);
+
+ input_report_key(wdata->extension.input,
+ wiimod_pro_map[WIIMOD_PRO_KEY_RIGHT],
--- /dev/null
+From 04d9cd1224e5bc9d6146bab2866cdc81deb9b509 Mon Sep 17 00:00:00 2001
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+Date: Tue, 12 Nov 2013 18:05:07 -0800
+Subject: ib_isert: Avoid duplicate iscsit_increment_maxcmdsn call
+
+From: Nicholas Bellinger <nab@linux-iscsi.org>
+
+commit 04d9cd1224e5bc9d6146bab2866cdc81deb9b509 upstream.
+
+This patch avoids a duplicate iscsit_increment_maxcmdsn() call for
+ISER_IB_RDMA_WRITE within isert_map_rdma() + isert_reg_rdma_frwr(),
+which will already be occuring once during isert_put_datain() ->
+iscsit_build_rsp_pdu() operation.
+
+It also removes the local conn->stat_sn assignment + increment,
+and changes the third parameter to iscsit_build_rsp_pdu() to
+signal this should be done by iscsi_target_mode code.
+
+Tested-by: Moussa Ba <moussaba@micron.com>
+Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/infiniband/ulp/isert/ib_isert.c | 6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/drivers/infiniband/ulp/isert/ib_isert.c
++++ b/drivers/infiniband/ulp/isert/ib_isert.c
+@@ -1991,8 +1991,6 @@ isert_map_rdma(struct iscsi_conn *conn,
+
+ if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
+ data_left = se_cmd->data_length;
+- iscsit_increment_maxcmdsn(cmd, conn->sess);
+- cmd->stat_sn = conn->stat_sn++;
+ } else {
+ sg_off = cmd->write_data_done / PAGE_SIZE;
+ data_left = se_cmd->data_length - cmd->write_data_done;
+@@ -2204,8 +2202,6 @@ isert_reg_rdma_frwr(struct iscsi_conn *c
+
+ if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
+ data_left = se_cmd->data_length;
+- iscsit_increment_maxcmdsn(cmd, conn->sess);
+- cmd->stat_sn = conn->stat_sn++;
+ } else {
+ sg_off = cmd->write_data_done / PAGE_SIZE;
+ data_left = se_cmd->data_length - cmd->write_data_done;
+@@ -2314,7 +2310,7 @@ isert_put_datain(struct iscsi_conn *conn
+ * Build isert_conn->tx_desc for iSCSI response PDU and attach
+ */
+ isert_create_send_desc(isert_conn, isert_cmd, &isert_cmd->tx_desc);
+- iscsit_build_rsp_pdu(cmd, conn, false, (struct iscsi_scsi_rsp *)
++ iscsit_build_rsp_pdu(cmd, conn, true, (struct iscsi_scsi_rsp *)
+ &isert_cmd->tx_desc.iscsi_header);
+ isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
+ isert_init_send_wr(isert_cmd, &isert_cmd->tx_desc.send_wr);
--- /dev/null
+From 0ee005c7dc2803125275e24598f0fb37775a6af3 Mon Sep 17 00:00:00 2001
+From: Frank Zago <frank@zago.net>
+Date: Wed, 13 Nov 2013 22:53:00 +0000
+Subject: iio:accel:kxsd9 fix missing mutex unlock
+
+From: Frank Zago <frank@zago.net>
+
+commit 0ee005c7dc2803125275e24598f0fb37775a6af3 upstream.
+
+This will leave a lock held after reading from the device, preventing
+any further reads.
+
+Signed-off-by: Frank Zago <frank@zago.net>
+Signed-off-by: Jonathan Cameron <jic23@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/iio/accel/kxsd9.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/drivers/iio/accel/kxsd9.c
++++ b/drivers/iio/accel/kxsd9.c
+@@ -112,9 +112,10 @@ static int kxsd9_read(struct iio_dev *in
+ mutex_lock(&st->buf_lock);
+ st->tx[0] = KXSD9_READ(address);
+ ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
+- if (ret)
+- return ret;
+- return (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0);
++ if (!ret)
++ ret = (((u16)(st->rx[0])) << 8) | (st->rx[1] & 0xF0);
++ mutex_unlock(&st->buf_lock);
++ return ret;
+ }
+
+ static IIO_CONST_ATTR(accel_scale_available,
--- /dev/null
+From 02e5f5c0a0f726e66e3d8506ea1691e344277969 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Thu, 14 Nov 2013 15:16:15 +1100
+Subject: md: fix calculation of stacking limits on level change.
+
+From: NeilBrown <neilb@suse.de>
+
+commit 02e5f5c0a0f726e66e3d8506ea1691e344277969 upstream.
+
+The various ->run routines of md personalities assume that the 'queue'
+has been initialised by the blk_set_stacking_limits() call in
+md_alloc().
+
+However when the level is changed (by level_store()) the ->run routine
+for the new level is called for an array which has already had the
+stacking limits modified. This can result in incorrect final
+settings.
+
+So call blk_set_stacking_limits() before ->run in level_store().
+
+A specific consequence of this bug is that it causes
+discard_granularity to be set incorrectly when reshaping a RAID4 to a
+RAID0.
+
+This is suitable for any -stable kernel since 3.3 in which
+blk_set_stacking_limits() was introduced.
+
+Reported-and-tested-by: "Baldysiak, Pawel" <pawel.baldysiak@intel.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/md.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -3620,6 +3620,7 @@ level_store(struct mddev *mddev, const c
+ mddev->in_sync = 1;
+ del_timer_sync(&mddev->safemode_timer);
+ }
++ blk_set_stacking_limits(&mddev->queue->limits);
+ pers->run(mddev);
+ set_bit(MD_CHANGE_DEVS, &mddev->flags);
+ mddev_resume(mddev);
--- /dev/null
+From d206dcfa9809ec3409483e93b5e362f801fa0c27 Mon Sep 17 00:00:00 2001
+From: majianpeng <majianpeng@gmail.com>
+Date: Thu, 14 Nov 2013 15:16:19 +1100
+Subject: md/raid5: Before freeing old multi-thread worker, it should flush them.
+
+From: majianpeng <majianpeng@gmail.com>
+
+commit d206dcfa9809ec3409483e93b5e362f801fa0c27 upstream.
+
+When changing group_thread_cnt from sysfs entry, the kernel can oops.
+
+The kernel messages are:
+[ 740.961389] BUG: unable to handle kernel NULL pointer dereference at 0000000000000008
+[ 740.961444] IP: [<ffffffff81062570>] process_one_work+0x30/0x500
+[ 740.961476] PGD b9013067 PUD b651e067 PMD 0
+[ 740.961503] Oops: 0000 [#1] SMP
+[ 740.961525] Modules linked in: netconsole e1000e ptp pps_core
+[ 740.961577] CPU: 0 PID: 3683 Comm: kworker/u8:5 Not tainted 3.12.0+ #23
+[ 740.961602] Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./To be filled by O.E.M., BIOS 080015 11/09/2011
+[ 740.961646] task: ffff88013abe0000 ti: ffff88013a246000 task.ti: ffff88013a246000
+[ 740.961673] RIP: 0010:[<ffffffff81062570>] [<ffffffff81062570>] process_one_work+0x30/0x500
+[ 740.961708] RSP: 0018:ffff88013a247e08 EFLAGS: 00010086
+[ 740.961730] RAX: ffff8800b912b400 RBX: ffff88013a61e680 RCX: ffff8800b912b400
+[ 740.961757] RDX: ffff8800b912b600 RSI: ffff8800b912b600 RDI: ffff88013a61e680
+[ 740.961782] RBP: ffff88013a247e48 R08: ffff88013a246000 R09: 000000000002c09d
+[ 740.961808] R10: 000000000000010f R11: 0000000000000000 R12: ffff88013b00cc00
+[ 740.961833] R13: 0000000000000000 R14: ffff88013b00cf80 R15: ffff88013a61e6b0
+[ 740.961861] FS: 0000000000000000(0000) GS:ffff88013fc00000(0000) knlGS:0000000000000000
+[ 740.961893] CS: 0010 DS: 0000 ES: 0000 CR0: 000000008005003b
+[ 740.962001] CR2: 00000000000000b8 CR3: 00000000b24fe000 CR4: 00000000000407f0
+[ 740.962001] Stack:
+[ 740.962001] 0000000000000008 ffff8800b912b600 ffff88013b00cc00 ffff88013a61e680
+[ 740.962001] ffff88013b00cc00 ffff88013b00cc18 ffff88013b00cf80 ffff88013a61e6b0
+[ 740.962001] ffff88013a247eb8 ffffffff810639c6 0000000000012a80 ffff88013a247fd8
+[ 740.962001] Call Trace:
+[ 740.962001] [<ffffffff810639c6>] worker_thread+0x206/0x3f0
+[ 740.962001] [<ffffffff810637c0>] ? manage_workers+0x2c0/0x2c0
+[ 740.962001] [<ffffffff81069656>] kthread+0xc6/0xd0
+[ 740.962001] [<ffffffff81069590>] ? kthread_freezable_should_stop+0x70/0x70
+[ 740.962001] [<ffffffff81722ffc>] ret_from_fork+0x7c/0xb0
+[ 740.962001] [<ffffffff81069590>] ? kthread_freezable_should_stop+0x70/0x70
+[ 740.962001] Code: 89 e5 41 57 41 56 41 55 45 31 ed 41 54 53 48 89 fb 48 83 ec 18 48 8b 06 4c 8b 67 48 48 89 c1 30 c9 a8 04 4c 0f 45 e9 80 7f 58 00 <49> 8b 45 08 44 8b b0 00 01 00 00 78 0c 41 f6 44 24 10 04 0f 84
+[ 740.962001] RIP [<ffffffff81062570>] process_one_work+0x30/0x500
+[ 740.962001] RSP <ffff88013a247e08>
+[ 740.962001] CR2: 0000000000000008
+[ 740.962001] ---[ end trace 39181460000748de ]---
+[ 740.962001] Kernel panic - not syncing: Fatal exception
+
+This can happen if there are some stripes left, fewer than MAX_STRIPE_BATCH.
+A worker is queued to handle them.
+But before calling raid5_do_work, raid5d handles those
+stripes making conf->active_stripe = 0.
+So mddev_suspend() can return.
+We might then free old worker resources before the queued
+raid5_do_work() handled them. When it runs, it crashes.
+
+ raid5d() raid5_store_group_thread_cnt()
+ queue_work mddev_suspend()
+ handle_strips
+ active_stripe=0
+ free(old worker resources)
+ process_one_work
+ raid5_do_work
+
+To avoid this, we should only flush the worker resources before freeing them.
+
+This fixes a bug introduced in 3.12 so is suitable for the 3.12.x
+stable series.
+
+Fixes: b721420e8719131896b009b11edbbd27
+Signed-off-by: Jianpeng Ma <majianpeng@gmail.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Reviewed-by: Shaohua Li <shli@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid5.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -5240,6 +5240,9 @@ raid5_store_group_thread_cnt(struct mdde
+ old_groups = conf->worker_groups;
+ old_group_cnt = conf->worker_cnt_per_group;
+
++ if (old_groups)
++ flush_workqueue(raid5_wq);
++
+ conf->worker_groups = NULL;
+ err = alloc_thread_groups(conf, new);
+ if (err) {
--- /dev/null
+From 142d44c310819e1965ca70b4d55d7679f5797e25 Mon Sep 17 00:00:00 2001
+From: NeilBrown <neilb@suse.de>
+Date: Thu, 28 Nov 2013 10:34:18 +1100
+Subject: md: test mddev->flags more safely in md_check_recovery.
+
+From: NeilBrown <neilb@suse.de>
+
+commit 142d44c310819e1965ca70b4d55d7679f5797e25 upstream.
+
+commit 7a0a5355cbc71efa md: Don't test all of mddev->flags at once.
+made most tests on mddev->flags safer, but missed one.
+
+When
+commit 260fa034ef7a4ff8b7306 md: avoid deadlock when dirty buffers during md_stop.
+added MD_STILL_CLOSED, this caused md_check_recovery to misbehave.
+It can think there is something to do but find nothing. This can
+lead to the md thread spinning during array shutdown.
+
+https://bugzilla.kernel.org/show_bug.cgi?id=65721
+
+Reported-and-tested-by: Richard W.M. Jones <rjones@redhat.com>
+Fixes: 260fa034ef7a4ff8b7306
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/md.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -7792,7 +7792,7 @@ void md_check_recovery(struct mddev *mdd
+ if (mddev->ro && !test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
+ return;
+ if ( ! (
+- (mddev->flags & ~ (1<<MD_CHANGE_PENDING)) ||
++ (mddev->flags & MD_UPDATE_SB_FLAGS & ~ (1<<MD_CHANGE_PENDING)) ||
+ test_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
+ test_bit(MD_RECOVERY_DONE, &mddev->recovery) ||
+ (mddev->external == 0 && mddev->safemode == 1) ||
--- /dev/null
+From ec67ad82814bee92251fd963bf01c7a173856555 Mon Sep 17 00:00:00 2001
+From: Michael Neuling <mikey@neuling.org>
+Date: Mon, 25 Nov 2013 11:12:20 +1100
+Subject: powerpc/signals: Improved mark VSX not saved with small contexts fix
+
+From: Michael Neuling <mikey@neuling.org>
+
+commit ec67ad82814bee92251fd963bf01c7a173856555 upstream.
+
+In a recent patch:
+ commit c13f20ac48328b05cd3b8c19e31ed6c132b44b42
+ Author: Michael Neuling <mikey@neuling.org>
+ powerpc/signals: Mark VSX not saved with small contexts
+
+We fixed an issue but an improved solution was later discussed after the patch
+was merged.
+
+Firstly, this patch doesn't handle the 64bit signals case, which could also hit
+this issue (but has never been reported).
+
+Secondly, the original patch isn't clear what MSR VSX should be set to. The
+new approach below always clears the MSR VSX bit (to indicate no VSX is in the
+context) and sets it only in the specific case where VSX is available (ie. when
+VSX has been used and the signal context passed has space to provide the
+state).
+
+This reverts the original patch and replaces it with the improved solution. It
+also adds a 64 bit version.
+
+Signed-off-by: Michael Neuling <mikey@neuling.org>
+Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/powerpc/kernel/signal_32.c | 16 +++++++---------
+ arch/powerpc/kernel/signal_64.c | 6 ++++++
+ 2 files changed, 13 insertions(+), 9 deletions(-)
+
+--- a/arch/powerpc/kernel/signal_32.c
++++ b/arch/powerpc/kernel/signal_32.c
+@@ -445,6 +445,12 @@ static int save_user_regs(struct pt_regs
+ #endif /* CONFIG_ALTIVEC */
+ if (copy_fpr_to_user(&frame->mc_fregs, current))
+ return 1;
++
++ /*
++ * Clear the MSR VSX bit to indicate there is no valid state attached
++ * to this context, except in the specific case below where we set it.
++ */
++ msr &= ~MSR_VSX;
+ #ifdef CONFIG_VSX
+ /*
+ * Copy VSR 0-31 upper half from thread_struct to local
+@@ -457,15 +463,7 @@ static int save_user_regs(struct pt_regs
+ if (copy_vsx_to_user(&frame->mc_vsregs, current))
+ return 1;
+ msr |= MSR_VSX;
+- } else if (!ctx_has_vsx_region)
+- /*
+- * With a small context structure we can't hold the VSX
+- * registers, hence clear the MSR value to indicate the state
+- * was not saved.
+- */
+- msr &= ~MSR_VSX;
+-
+-
++ }
+ #endif /* CONFIG_VSX */
+ #ifdef CONFIG_SPE
+ /* save spe registers */
+--- a/arch/powerpc/kernel/signal_64.c
++++ b/arch/powerpc/kernel/signal_64.c
+@@ -121,6 +121,12 @@ static long setup_sigcontext(struct sigc
+ flush_fp_to_thread(current);
+ /* copy fpr regs and fpscr */
+ err |= copy_fpr_to_user(&sc->fp_regs, current);
++
++ /*
++ * Clear the MSR VSX bit to indicate there is no valid state attached
++ * to this context, except in the specific case below where we set it.
++ */
++ msr &= ~MSR_VSX;
+ #ifdef CONFIG_VSX
+ /*
+ * Copy VSX low doubleword to local buffer for formatting,
--- /dev/null
+From 97b6ff6be9da7675aab339334fda996d6c5077d9 Mon Sep 17 00:00:00 2001
+From: Jerome Glisse <jglisse@redhat.com>
+Date: Tue, 12 Nov 2013 10:51:16 -0500
+Subject: radeon: workaround pinning failure on low ram gpu
+
+From: Jerome Glisse <jglisse@redhat.com>
+
+commit 97b6ff6be9da7675aab339334fda996d6c5077d9 upstream.
+
+GPU with low amount of ram can fails at pinning new framebuffer before
+unpinning old one. On such failure, retry with unpinning old one before
+pinning new one allowing to work around the issue. This is somewhat
+ugly but only affect those old GPU we care about.
+
+Signed-off-by: Jerome Glisse <jglisse@redhat.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/gpu/drm/radeon/radeon_legacy_crtc.c | 28 ++++++++++++++++++++++++++++
+ 1 file changed, 28 insertions(+)
+
+--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
++++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+@@ -422,6 +422,7 @@ int radeon_crtc_do_set_base(struct drm_c
+ /* Pin framebuffer & get tilling informations */
+ obj = radeon_fb->obj;
+ rbo = gem_to_radeon_bo(obj);
++retry:
+ r = radeon_bo_reserve(rbo, false);
+ if (unlikely(r != 0))
+ return r;
+@@ -430,6 +431,33 @@ int radeon_crtc_do_set_base(struct drm_c
+ &base);
+ if (unlikely(r != 0)) {
+ radeon_bo_unreserve(rbo);
++
++ /* On old GPU like RN50 with little vram pining can fails because
++ * current fb is taking all space needed. So instead of unpining
++ * the old buffer after pining the new one, first unpin old one
++ * and then retry pining new one.
++ *
++ * As only master can set mode only master can pin and it is
++ * unlikely the master client will race with itself especialy
++ * on those old gpu with single crtc.
++ *
++ * We don't shutdown the display controller because new buffer
++ * will end up in same spot.
++ */
++ if (!atomic && fb && fb != crtc->fb) {
++ struct radeon_bo *old_rbo;
++ unsigned long nsize, osize;
++
++ old_rbo = gem_to_radeon_bo(to_radeon_framebuffer(fb)->obj);
++ osize = radeon_bo_size(old_rbo);
++ nsize = radeon_bo_size(rbo);
++ if (nsize <= osize && !radeon_bo_reserve(old_rbo, false)) {
++ radeon_bo_unpin(old_rbo);
++ radeon_bo_unreserve(old_rbo);
++ fb = NULL;
++ goto retry;
++ }
++ }
+ return -EINVAL;
+ }
+ radeon_bo_get_tiling_flags(rbo, &tiling_flags, NULL);
--- /dev/null
+From ad4068de49862b083ac2a15bc50689bb30ce3e44 Mon Sep 17 00:00:00 2001
+From: majianpeng <majianpeng@gmail.com>
+Date: Thu, 14 Nov 2013 15:16:15 +1100
+Subject: raid5: Use slow_path to release stripe when mddev->thread is null
+
+From: majianpeng <majianpeng@gmail.com>
+
+commit ad4068de49862b083ac2a15bc50689bb30ce3e44 upstream.
+
+When release_stripe() is called in grow_one_stripe(), the
+mddev->thread is null. So it will omit one wakeup this thread to
+release stripe.
+For this condition, use slow_path to release stripe.
+
+Bug was introduced in 3.12
+
+Fixes: 773ca82fa1ee58dd1bf88b
+Signed-off-by: Jianpeng Ma <majianpeng@gmail.com>
+Signed-off-by: NeilBrown <neilb@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/md/raid5.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/md/raid5.c
++++ b/drivers/md/raid5.c
+@@ -340,7 +340,8 @@ static void release_stripe(struct stripe
+ unsigned long flags;
+ bool wakeup;
+
+- if (test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
++ if (unlikely(!conf->mddev->thread) ||
++ test_and_set_bit(STRIPE_ON_RELEASE_LIST, &sh->state))
+ goto slow_path;
+ wakeup = llist_add(&sh->release_list, &conf->released_stripes);
+ if (wakeup)
--- /dev/null
+From 71a86ef055f569b93bc6901f007bdf447dbf515f Mon Sep 17 00:00:00 2001
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+Date: Thu, 21 Nov 2013 16:22:17 +0100
+Subject: s390/uaccess: add missing page table walk range check
+
+From: Heiko Carstens <heiko.carstens@de.ibm.com>
+
+commit 71a86ef055f569b93bc6901f007bdf447dbf515f upstream.
+
+When translating a user space address, the address must be checked against
+the ASCE limit of the process. If the address is larger than the maximum
+address that is reachable with the ASCE, an ASCE type exception must be
+generated.
+
+The current code simply ignored the higher order bits. This resulted in an
+address wrap around in user space instead of an exception in user space.
+
+Reviewed-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/lib/uaccess_pt.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/arch/s390/lib/uaccess_pt.c
++++ b/arch/s390/lib/uaccess_pt.c
+@@ -78,11 +78,14 @@ static size_t copy_in_kernel(size_t coun
+ * contains the (negative) exception code.
+ */
+ #ifdef CONFIG_64BIT
++
+ static unsigned long follow_table(struct mm_struct *mm,
+ unsigned long address, int write)
+ {
+ unsigned long *table = (unsigned long *)__pa(mm->pgd);
+
++ if (unlikely(address > mm->context.asce_limit - 1))
++ return -0x38UL;
+ switch (mm->context.asce_bits & _ASCE_TYPE_MASK) {
+ case _ASCE_TYPE_REGION1:
+ table = table + ((address >> 53) & 0x7ff);
drm-radeon-fix-uvd-destroy-ib-size.patch
drm-radeon-don-t-share-pplls-on-dce4.1.patch
radeon-i2c-do-not-count-reg-index-in-number-of-i2c-byte-we-are-writing.patch
+drm-radeon-hook-up-backlight-functions-for-ci-and-kv-family.patch
+drm-radeon-adjust-tn-dpm-parameters-for-stability-v2.patch
+radeon-workaround-pinning-failure-on-low-ram-gpu.patch
+ib_isert-avoid-duplicate-iscsit_increment_maxcmdsn-call.patch
+edac-highbank-fix-interrupt-setup-of-mem-and-l2-controller.patch
+hid-wiimote-fix-inverted-pro-controller-axes.patch
+setfacl-removes-part-of-acl-when-setting-posix-acls-to-samba.patch
+raid5-use-slow_path-to-release-stripe-when-mddev-thread-is-null.patch
+md-fix-calculation-of-stacking-limits-on-level-change.patch
+md-raid5-before-freeing-old-multi-thread-worker-it-should-flush-them.patch
+md-test-mddev-flags-more-safely-in-md_check_recovery.patch
+hid-uhid-fix-leak-for-64-32-uhid_create.patch
+powerpc-signals-improved-mark-vsx-not-saved-with-small-contexts-fix.patch
+iio-accel-kxsd9-fix-missing-mutex-unlock.patch
+arm64-move-pte_prot_none-higher-up.patch
+s390-uaccess-add-missing-page-table-walk-range-check.patch
+workqueue-fix-ordered-workqueues-in-numa-setups.patch
+cgroup-use-a-dedicated-workqueue-for-cgroup-destruction.patch
+cgroup-fix-cgroup_subsys_state-leak-for-seq_files.patch
+cpuset-fix-memory-allocator-deadlock.patch
+alsa-hda-realtek-set-pcbeep-amp-for-alc668.patch
+alsa-hda-realtek-add-support-of-alc231-codec.patch
+alsa-hda-fix-hp-mic-mode-without-vref-bits.patch
+alsa-hda-create-headhpone-mic-jack-mode-when-really-needed.patch
+alsa-hda-initialize-missing-bass-speaker-pin-for-asus-aio-et2700.patch
+alsa-hda-check-leaf-nodes-to-find-aamix-amps.patch
--- /dev/null
+From b1d93356427be6f050dc55c86eb019d173700af6 Mon Sep 17 00:00:00 2001
+From: Steve French <smfrench@gmail.com>
+Date: Fri, 15 Nov 2013 20:41:32 -0600
+Subject: setfacl removes part of ACL when setting POSIX ACLs to Samba
+
+From: Steve French <smfrench@gmail.com>
+
+commit b1d93356427be6f050dc55c86eb019d173700af6 upstream.
+
+setfacl over cifs mounts can remove the default ACL when setting the
+(non-default part of) the ACL and vice versa (we were leaving at 0
+rather than setting to -1 the count field for the unaffected
+half of the ACL. For example notice the setfacl removed
+the default ACL in this sequence:
+
+steven@steven-GA-970A-DS3:~/cifs-2.6$ getfacl /mnt/test-dir ; setfacl
+-m default:user:test:rwx,user:test:rwx /mnt/test-dir
+getfacl: Removing leading '/' from absolute path names
+user::rwx
+group::r-x
+other::r-x
+default:user::rwx
+default:user:test:rwx
+default:group::r-x
+default:mask::rwx
+default:other::r-x
+
+steven@steven-GA-970A-DS3:~/cifs-2.6$ getfacl /mnt/test-dir
+getfacl: Removing leading '/' from absolute path names
+user::rwx
+user:test:rwx
+group::r-x
+mask::rwx
+other::r-x
+
+Signed-off-by: Steve French <smfrench@gmail.com>
+Acked-by: Jeremy Allison <jra@samba.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cifs/cifssmb.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/fs/cifs/cifssmb.c
++++ b/fs/cifs/cifssmb.c
+@@ -3315,11 +3315,13 @@ static __u16 ACL_to_cifs_posix(char *par
+ return 0;
+ }
+ cifs_acl->version = cpu_to_le16(1);
+- if (acl_type == ACL_TYPE_ACCESS)
++ if (acl_type == ACL_TYPE_ACCESS) {
+ cifs_acl->access_entry_count = cpu_to_le16(count);
+- else if (acl_type == ACL_TYPE_DEFAULT)
++ cifs_acl->default_entry_count = __constant_cpu_to_le16(0xFFFF);
++ } else if (acl_type == ACL_TYPE_DEFAULT) {
+ cifs_acl->default_entry_count = cpu_to_le16(count);
+- else {
++ cifs_acl->access_entry_count = __constant_cpu_to_le16(0xFFFF);
++ } else {
+ cifs_dbg(FYI, "unknown ACL type %d\n", acl_type);
+ return 0;
+ }
--- /dev/null
+From 8a2b75384444488fc4f2cbb9f0921b6a0794838f Mon Sep 17 00:00:00 2001
+From: Tejun Heo <tj@kernel.org>
+Date: Thu, 5 Sep 2013 12:30:04 -0400
+Subject: workqueue: fix ordered workqueues in NUMA setups
+
+From: Tejun Heo <tj@kernel.org>
+
+commit 8a2b75384444488fc4f2cbb9f0921b6a0794838f upstream.
+
+An ordered workqueue implements execution ordering by using single
+pool_workqueue with max_active == 1. On a given pool_workqueue, work
+items are processed in FIFO order and limiting max_active to 1
+enforces the queued work items to be processed one by one.
+
+Unfortunately, 4c16bd327c ("workqueue: implement NUMA affinity for
+unbound workqueues") accidentally broke this guarantee by applying
+NUMA affinity to ordered workqueues too. On NUMA setups, an ordered
+workqueue would end up with separate pool_workqueues for different
+nodes. Each pool_workqueue still limits max_active to 1 but multiple
+work items may be executed concurrently and out of order depending on
+which node they are queued to.
+
+Fix it by using dedicated ordered_wq_attrs[] when creating ordered
+workqueues. The new attrs match the unbound ones except that no_numa
+is always set thus forcing all NUMA nodes to share the default
+pool_workqueue.
+
+While at it, add sanity check in workqueue creation path which
+verifies that an ordered workqueues has only the default
+pool_workqueue.
+
+Signed-off-by: Tejun Heo <tj@kernel.org>
+Reported-by: Libin <huawei.libin@huawei.com>
+Cc: Lai Jiangshan <laijs@cn.fujitsu.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/workqueue.c | 24 ++++++++++++++++++++++--
+ 1 file changed, 22 insertions(+), 2 deletions(-)
+
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -305,6 +305,9 @@ static DEFINE_HASHTABLE(unbound_pool_has
+ /* I: attributes used when instantiating standard unbound pools on demand */
+ static struct workqueue_attrs *unbound_std_wq_attrs[NR_STD_WORKER_POOLS];
+
++/* I: attributes used when instantiating ordered pools on demand */
++static struct workqueue_attrs *ordered_wq_attrs[NR_STD_WORKER_POOLS];
++
+ struct workqueue_struct *system_wq __read_mostly;
+ EXPORT_SYMBOL(system_wq);
+ struct workqueue_struct *system_highpri_wq __read_mostly;
+@@ -4106,7 +4109,7 @@ out_unlock:
+ static int alloc_and_link_pwqs(struct workqueue_struct *wq)
+ {
+ bool highpri = wq->flags & WQ_HIGHPRI;
+- int cpu;
++ int cpu, ret;
+
+ if (!(wq->flags & WQ_UNBOUND)) {
+ wq->cpu_pwqs = alloc_percpu(struct pool_workqueue);
+@@ -4126,6 +4129,13 @@ static int alloc_and_link_pwqs(struct wo
+ mutex_unlock(&wq->mutex);
+ }
+ return 0;
++ } else if (wq->flags & __WQ_ORDERED) {
++ ret = apply_workqueue_attrs(wq, ordered_wq_attrs[highpri]);
++ /* there should only be single pwq for ordering guarantee */
++ WARN(!ret && (wq->pwqs.next != &wq->dfl_pwq->pwqs_node ||
++ wq->pwqs.prev != &wq->dfl_pwq->pwqs_node),
++ "ordering guarantee broken for workqueue %s\n", wq->name);
++ return ret;
+ } else {
+ return apply_workqueue_attrs(wq, unbound_std_wq_attrs[highpri]);
+ }
+@@ -5051,13 +5061,23 @@ static int __init init_workqueues(void)
+ }
+ }
+
+- /* create default unbound wq attrs */
++ /* create default unbound and ordered wq attrs */
+ for (i = 0; i < NR_STD_WORKER_POOLS; i++) {
+ struct workqueue_attrs *attrs;
+
+ BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
+ attrs->nice = std_nice[i];
+ unbound_std_wq_attrs[i] = attrs;
++
++ /*
++ * An ordered wq should have only one pwq as ordering is
++ * guaranteed by max_active which is enforced by pwqs.
++ * Turn off NUMA so that dfl_pwq is used for all nodes.
++ */
++ BUG_ON(!(attrs = alloc_workqueue_attrs(GFP_KERNEL)));
++ attrs->nice = std_nice[i];
++ attrs->no_numa = true;
++ ordered_wq_attrs[i] = attrs;
+ }
+
+ system_wq = alloc_workqueue("events", 0, 0);