--- /dev/null
+From 623cf33cb055b1e81fa47e4fc16789b2c129e31e Mon Sep 17 00:00:00 2001
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+Date: Tue, 6 Aug 2013 02:26:22 +0200
+Subject: ACPI / PM: Walk physical_node_list under physical_node_lock
+
+From: "Rafael J. Wysocki" <rafael.j.wysocki@intel.com>
+
+commit 623cf33cb055b1e81fa47e4fc16789b2c129e31e upstream.
+
+The list of physical devices corresponding to an ACPI device
+object is walked by acpi_system_wakeup_device_seq_show() and
+physical_device_enable_wakeup() without taking that object's
+physical_node_lock mutex. Since each of those functions may be
+run at any time as a result of a user space action, the lack of
+appropriate locking in them may lead to a kernel crash if that
+happens during device hot-add or hot-remove involving the device
+object in question.
+
+Fix the issue by modifying acpi_system_wakeup_device_seq_show() and
+physical_device_enable_wakeup() to use physical_node_lock as
+appropriate.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/acpi/proc.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/drivers/acpi/proc.c
++++ b/drivers/acpi/proc.c
+@@ -311,6 +311,8 @@ acpi_system_wakeup_device_seq_show(struc
+ dev->pnp.bus_id,
+ (u32) dev->wakeup.sleep_state);
+
++ mutex_lock(&dev->physical_node_lock);
++
+ if (!dev->physical_node_count) {
+ seq_printf(seq, "%c%-8s\n",
+ dev->wakeup.flags.run_wake ? '*' : ' ',
+@@ -338,6 +340,8 @@ acpi_system_wakeup_device_seq_show(struc
+ put_device(ldev);
+ }
+ }
++
++ mutex_unlock(&dev->physical_node_lock);
+ }
+ mutex_unlock(&acpi_device_lock);
+ return 0;
+@@ -347,12 +351,16 @@ static void physical_device_enable_wakeu
+ {
+ struct acpi_device_physical_node *entry;
+
++ mutex_lock(&adev->physical_node_lock);
++
+ list_for_each_entry(entry,
+ &adev->physical_node_list, node)
+ if (entry->dev && device_can_wakeup(entry->dev)) {
+ bool enable = !device_may_wakeup(entry->dev);
+ device_set_wakeup_enable(entry->dev, enable);
+ }
++
++ mutex_unlock(&adev->physical_node_lock);
+ }
+
+ static ssize_t
--- /dev/null
+From ddb6b5a964371e8e52e696b2b258bda144c8bd3f Mon Sep 17 00:00:00 2001
+From: Jussi Kivilinna <jussi.kivilinna@iki.fi>
+Date: Tue, 6 Aug 2013 14:53:24 +0300
+Subject: ALSA: 6fire: fix DMA issues with URB transfer_buffer usage
+
+From: Jussi Kivilinna <jussi.kivilinna@iki.fi>
+
+commit ddb6b5a964371e8e52e696b2b258bda144c8bd3f upstream.
+
+Patch fixes 6fire not to use stack as URB transfer_buffer. URB buffers need to
+be DMA-able, which stack is not. Furthermore, transfer_buffer should not be
+allocated as part of larger device structure because DMA coherency issues and
+patch fixes this issue too.
+
+Signed-off-by: Jussi Kivilinna <jussi.kivilinna@iki.fi>
+Tested-by: Torsten Schenk <torsten.schenk@zoho.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/usb/6fire/comm.c | 38 +++++++++++++++++++++++++++++++++-----
+ sound/usb/6fire/comm.h | 2 +-
+ 2 files changed, 34 insertions(+), 6 deletions(-)
+
+--- a/sound/usb/6fire/comm.c
++++ b/sound/usb/6fire/comm.c
+@@ -110,19 +110,37 @@ static int usb6fire_comm_send_buffer(u8
+ static int usb6fire_comm_write8(struct comm_runtime *rt, u8 request,
+ u8 reg, u8 value)
+ {
+- u8 buffer[13]; /* 13: maximum length of message */
++ u8 *buffer;
++ int ret;
++
++ /* 13: maximum length of message */
++ buffer = kmalloc(13, GFP_KERNEL);
++ if (!buffer)
++ return -ENOMEM;
+
+ usb6fire_comm_init_buffer(buffer, 0x00, request, reg, value, 0x00);
+- return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
++ ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
++
++ kfree(buffer);
++ return ret;
+ }
+
+ static int usb6fire_comm_write16(struct comm_runtime *rt, u8 request,
+ u8 reg, u8 vl, u8 vh)
+ {
+- u8 buffer[13]; /* 13: maximum length of message */
++ u8 *buffer;
++ int ret;
++
++ /* 13: maximum length of message */
++ buffer = kmalloc(13, GFP_KERNEL);
++ if (!buffer)
++ return -ENOMEM;
+
+ usb6fire_comm_init_buffer(buffer, 0x00, request, reg, vl, vh);
+- return usb6fire_comm_send_buffer(buffer, rt->chip->dev);
++ ret = usb6fire_comm_send_buffer(buffer, rt->chip->dev);
++
++ kfree(buffer);
++ return ret;
+ }
+
+ int usb6fire_comm_init(struct sfire_chip *chip)
+@@ -135,6 +153,12 @@ int usb6fire_comm_init(struct sfire_chip
+ if (!rt)
+ return -ENOMEM;
+
++ rt->receiver_buffer = kzalloc(COMM_RECEIVER_BUFSIZE, GFP_KERNEL);
++ if (!rt->receiver_buffer) {
++ kfree(rt);
++ return -ENOMEM;
++ }
++
+ urb = &rt->receiver;
+ rt->serial = 1;
+ rt->chip = chip;
+@@ -153,6 +177,7 @@ int usb6fire_comm_init(struct sfire_chip
+ urb->interval = 1;
+ ret = usb_submit_urb(urb, GFP_KERNEL);
+ if (ret < 0) {
++ kfree(rt->receiver_buffer);
+ kfree(rt);
+ snd_printk(KERN_ERR PREFIX "cannot create comm data receiver.");
+ return ret;
+@@ -171,6 +196,9 @@ void usb6fire_comm_abort(struct sfire_ch
+
+ void usb6fire_comm_destroy(struct sfire_chip *chip)
+ {
+- kfree(chip->comm);
++ struct comm_runtime *rt = chip->comm;
++
++ kfree(rt->receiver_buffer);
++ kfree(rt);
+ chip->comm = NULL;
+ }
+--- a/sound/usb/6fire/comm.h
++++ b/sound/usb/6fire/comm.h
+@@ -24,7 +24,7 @@ struct comm_runtime {
+ struct sfire_chip *chip;
+
+ struct urb receiver;
+- u8 receiver_buffer[COMM_RECEIVER_BUFSIZE];
++ u8 *receiver_buffer;
+
+ u8 serial; /* urb serial */
+
--- /dev/null
+From 57e6dae1087bbaa6b33d3dd8a8e90b63888939a3 Mon Sep 17 00:00:00 2001
+From: Clemens Ladisch <clemens@ladisch.de>
+Date: Thu, 8 Aug 2013 11:24:55 +0200
+Subject: ALSA: usb-audio: do not trust too-big wMaxPacketSize values
+
+From: Clemens Ladisch <clemens@ladisch.de>
+
+commit 57e6dae1087bbaa6b33d3dd8a8e90b63888939a3 upstream.
+
+The driver used to assume that the streaming endpoint's wMaxPacketSize
+value would be an indication of how much data the endpoint expects or
+sends, and compute the number of packets per URB using this value.
+
+However, the Focusrite Scarlett 2i4 declares a value of 1024 bytes,
+while only about 88 or 44 bytes are be actually used. This discrepancy
+would result in URBs with far too few packets, which would not work
+correctly on the EHCI driver.
+
+To get correct URBs, use wMaxPacketSize only as an upper limit on the
+packet size.
+
+Reported-by: James Stone <jamesmstone@gmail.com>
+Tested-by: James Stone <jamesmstone@gmail.com>
+Signed-off-by: Clemens Ladisch <clemens@ladisch.de>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/usb/endpoint.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/sound/usb/endpoint.c
++++ b/sound/usb/endpoint.c
+@@ -591,17 +591,16 @@ static int data_ep_set_params(struct snd
+ ep->stride = frame_bits >> 3;
+ ep->silence_value = pcm_format == SNDRV_PCM_FORMAT_U8 ? 0x80 : 0;
+
+- /* calculate max. frequency */
+- if (ep->maxpacksize) {
++ /* assume max. frequency is 25% higher than nominal */
++ ep->freqmax = ep->freqn + (ep->freqn >> 2);
++ maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
++ >> (16 - ep->datainterval);
++ /* but wMaxPacketSize might reduce this */
++ if (ep->maxpacksize && ep->maxpacksize < maxsize) {
+ /* whatever fits into a max. size packet */
+ maxsize = ep->maxpacksize;
+ ep->freqmax = (maxsize / (frame_bits >> 3))
+ << (16 - ep->datainterval);
+- } else {
+- /* no max. packet size: just take 25% higher than nominal */
+- ep->freqmax = ep->freqn + (ep->freqn >> 2);
+- maxsize = ((ep->freqmax + 0xffff) * (frame_bits >> 3))
+- >> (16 - ep->datainterval);
+ }
+
+ if (ep->fill_max)
--- /dev/null
+From f3b15ccdbb9a79781578249a63318805e55a6c34 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <jbacik@fusionio.com>
+Date: Mon, 22 Jul 2013 12:54:30 -0400
+Subject: Btrfs: release both paths before logging dir/changed extents
+
+From: Josef Bacik <jbacik@fusionio.com>
+
+commit f3b15ccdbb9a79781578249a63318805e55a6c34 upstream.
+
+The ceph guys tripped over this bug where we were still holding onto the
+original path that we used to copy the inode with when logging. This is based
+on Chris's fix which was reported to fix the problem. We need to drop the paths
+in two cases anyway so just move the drop up so that we don't have duplicate
+code. Thanks,
+
+Signed-off-by: Josef Bacik <jbacik@fusionio.com>
+Signed-off-by: Chris Mason <chris.mason@fusionio.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/btrfs/tree-log.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+--- a/fs/btrfs/tree-log.c
++++ b/fs/btrfs/tree-log.c
+@@ -3728,8 +3728,9 @@ next_slot:
+ }
+
+ log_extents:
++ btrfs_release_path(path);
++ btrfs_release_path(dst_path);
+ if (fast_search) {
+- btrfs_release_path(dst_path);
+ ret = btrfs_log_changed_extents(trans, root, inode, dst_path);
+ if (ret) {
+ err = ret;
+@@ -3746,8 +3747,6 @@ log_extents:
+ }
+
+ if (inode_only == LOG_INODE_ALL && S_ISDIR(inode->i_mode)) {
+- btrfs_release_path(path);
+- btrfs_release_path(dst_path);
+ ret = log_directory_changes(trans, root, inode, path, dst_path);
+ if (ret) {
+ err = ret;
--- /dev/null
+From f54fe64d14dff3df6d45a48115d248a82557811f Mon Sep 17 00:00:00 2001
+From: Aaro Koskinen <aaro.koskinen@iki.fi>
+Date: Mon, 5 Aug 2013 21:27:12 +0300
+Subject: cpufreq: loongson2: fix regression related to clock management
+
+From: Aaro Koskinen <aaro.koskinen@iki.fi>
+
+commit f54fe64d14dff3df6d45a48115d248a82557811f upstream.
+
+Commit 42913c799 (MIPS: Loongson2: Use clk API instead of direct
+dereferences) broke the cpufreq functionality on Loongson2 boards:
+clk_set_rate() is called before the CPU frequency table is
+initialized, and therefore will always fail.
+
+Fix by moving the clk_set_rate() after the table initialization.
+Tested on Lemote FuLoong mini-PC.
+
+Signed-off-by: Aaro Koskinen <aaro.koskinen@iki.fi>
+Acked-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/loongson2_cpufreq.c | 11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/drivers/cpufreq/loongson2_cpufreq.c
++++ b/drivers/cpufreq/loongson2_cpufreq.c
+@@ -118,11 +118,6 @@ static int loongson2_cpufreq_cpu_init(st
+ clk_put(cpuclk);
+ return -EINVAL;
+ }
+- ret = clk_set_rate(cpuclk, rate);
+- if (ret) {
+- clk_put(cpuclk);
+- return ret;
+- }
+
+ /* clock table init */
+ for (i = 2;
+@@ -130,6 +125,12 @@ static int loongson2_cpufreq_cpu_init(st
+ i++)
+ loongson2_clockmod_table[i].frequency = (rate * i) / 8;
+
++ ret = clk_set_rate(cpuclk, rate);
++ if (ret) {
++ clk_put(cpuclk);
++ return ret;
++ }
++
+ policy->cur = loongson2_cpufreq_get(policy->cpu);
+
+ cpufreq_frequency_table_get_attr(&loongson2_clockmod_table[0],
--- /dev/null
+From 6c4640c3adfd97ce10efed7c07405f52d002b9a8 Mon Sep 17 00:00:00 2001
+From: Viresh Kumar <viresh.kumar@linaro.org>
+Date: Mon, 5 Aug 2013 12:28:02 +0530
+Subject: cpufreq: rename ignore_nice as ignore_nice_load
+
+From: Viresh Kumar <viresh.kumar@linaro.org>
+
+commit 6c4640c3adfd97ce10efed7c07405f52d002b9a8 upstream.
+
+This sysfs file was called ignore_nice_load earlier and commit
+4d5dcc4 (cpufreq: governor: Implement per policy instances of
+governors) changed its name to ignore_nice by mistake.
+
+Lets get it renamed back to its original name.
+
+Reported-by: Martin von Gagern <Martin.vGagern@gmx.net>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/cpufreq/cpufreq_conservative.c | 20 ++++++++++----------
+ drivers/cpufreq/cpufreq_governor.c | 8 ++++----
+ drivers/cpufreq/cpufreq_governor.h | 4 ++--
+ drivers/cpufreq/cpufreq_ondemand.c | 20 ++++++++++----------
+ 4 files changed, 26 insertions(+), 26 deletions(-)
+
+--- a/drivers/cpufreq/cpufreq_conservative.c
++++ b/drivers/cpufreq/cpufreq_conservative.c
+@@ -221,8 +221,8 @@ static ssize_t store_down_threshold(stru
+ return count;
+ }
+
+-static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
+- size_t count)
++static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
++ const char *buf, size_t count)
+ {
+ struct cs_dbs_tuners *cs_tuners = dbs_data->tuners;
+ unsigned int input, j;
+@@ -235,10 +235,10 @@ static ssize_t store_ignore_nice(struct
+ if (input > 1)
+ input = 1;
+
+- if (input == cs_tuners->ignore_nice) /* nothing to do */
++ if (input == cs_tuners->ignore_nice_load) /* nothing to do */
+ return count;
+
+- cs_tuners->ignore_nice = input;
++ cs_tuners->ignore_nice_load = input;
+
+ /* we need to re-evaluate prev_cpu_idle */
+ for_each_online_cpu(j) {
+@@ -246,7 +246,7 @@ static ssize_t store_ignore_nice(struct
+ dbs_info = &per_cpu(cs_cpu_dbs_info, j);
+ dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
+ &dbs_info->cdbs.prev_cpu_wall, 0);
+- if (cs_tuners->ignore_nice)
++ if (cs_tuners->ignore_nice_load)
+ dbs_info->cdbs.prev_cpu_nice =
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+ }
+@@ -279,7 +279,7 @@ show_store_one(cs, sampling_rate);
+ show_store_one(cs, sampling_down_factor);
+ show_store_one(cs, up_threshold);
+ show_store_one(cs, down_threshold);
+-show_store_one(cs, ignore_nice);
++show_store_one(cs, ignore_nice_load);
+ show_store_one(cs, freq_step);
+ declare_show_sampling_rate_min(cs);
+
+@@ -287,7 +287,7 @@ gov_sys_pol_attr_rw(sampling_rate);
+ gov_sys_pol_attr_rw(sampling_down_factor);
+ gov_sys_pol_attr_rw(up_threshold);
+ gov_sys_pol_attr_rw(down_threshold);
+-gov_sys_pol_attr_rw(ignore_nice);
++gov_sys_pol_attr_rw(ignore_nice_load);
+ gov_sys_pol_attr_rw(freq_step);
+ gov_sys_pol_attr_ro(sampling_rate_min);
+
+@@ -297,7 +297,7 @@ static struct attribute *dbs_attributes_
+ &sampling_down_factor_gov_sys.attr,
+ &up_threshold_gov_sys.attr,
+ &down_threshold_gov_sys.attr,
+- &ignore_nice_gov_sys.attr,
++ &ignore_nice_load_gov_sys.attr,
+ &freq_step_gov_sys.attr,
+ NULL
+ };
+@@ -313,7 +313,7 @@ static struct attribute *dbs_attributes_
+ &sampling_down_factor_gov_pol.attr,
+ &up_threshold_gov_pol.attr,
+ &down_threshold_gov_pol.attr,
+- &ignore_nice_gov_pol.attr,
++ &ignore_nice_load_gov_pol.attr,
+ &freq_step_gov_pol.attr,
+ NULL
+ };
+@@ -338,7 +338,7 @@ static int cs_init(struct dbs_data *dbs_
+ tuners->up_threshold = DEF_FREQUENCY_UP_THRESHOLD;
+ tuners->down_threshold = DEF_FREQUENCY_DOWN_THRESHOLD;
+ tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
+- tuners->ignore_nice = 0;
++ tuners->ignore_nice_load = 0;
+ tuners->freq_step = DEF_FREQUENCY_STEP;
+
+ dbs_data->tuners = tuners;
+--- a/drivers/cpufreq/cpufreq_governor.c
++++ b/drivers/cpufreq/cpufreq_governor.c
+@@ -91,9 +91,9 @@ void dbs_check_cpu(struct dbs_data *dbs_
+ unsigned int j;
+
+ if (dbs_data->cdata->governor == GOV_ONDEMAND)
+- ignore_nice = od_tuners->ignore_nice;
++ ignore_nice = od_tuners->ignore_nice_load;
+ else
+- ignore_nice = cs_tuners->ignore_nice;
++ ignore_nice = cs_tuners->ignore_nice_load;
+
+ policy = cdbs->cur_policy;
+
+@@ -336,12 +336,12 @@ int cpufreq_governor_dbs(struct cpufreq_
+ cs_tuners = dbs_data->tuners;
+ cs_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
+ sampling_rate = cs_tuners->sampling_rate;
+- ignore_nice = cs_tuners->ignore_nice;
++ ignore_nice = cs_tuners->ignore_nice_load;
+ } else {
+ od_tuners = dbs_data->tuners;
+ od_dbs_info = dbs_data->cdata->get_cpu_dbs_info_s(cpu);
+ sampling_rate = od_tuners->sampling_rate;
+- ignore_nice = od_tuners->ignore_nice;
++ ignore_nice = od_tuners->ignore_nice_load;
+ od_ops = dbs_data->cdata->gov_ops;
+ io_busy = od_tuners->io_is_busy;
+ }
+--- a/drivers/cpufreq/cpufreq_governor.h
++++ b/drivers/cpufreq/cpufreq_governor.h
+@@ -165,7 +165,7 @@ struct cs_cpu_dbs_info_s {
+
+ /* Per policy Governers sysfs tunables */
+ struct od_dbs_tuners {
+- unsigned int ignore_nice;
++ unsigned int ignore_nice_load;
+ unsigned int sampling_rate;
+ unsigned int sampling_down_factor;
+ unsigned int up_threshold;
+@@ -175,7 +175,7 @@ struct od_dbs_tuners {
+ };
+
+ struct cs_dbs_tuners {
+- unsigned int ignore_nice;
++ unsigned int ignore_nice_load;
+ unsigned int sampling_rate;
+ unsigned int sampling_down_factor;
+ unsigned int up_threshold;
+--- a/drivers/cpufreq/cpufreq_ondemand.c
++++ b/drivers/cpufreq/cpufreq_ondemand.c
+@@ -403,8 +403,8 @@ static ssize_t store_sampling_down_facto
+ return count;
+ }
+
+-static ssize_t store_ignore_nice(struct dbs_data *dbs_data, const char *buf,
+- size_t count)
++static ssize_t store_ignore_nice_load(struct dbs_data *dbs_data,
++ const char *buf, size_t count)
+ {
+ struct od_dbs_tuners *od_tuners = dbs_data->tuners;
+ unsigned int input;
+@@ -419,10 +419,10 @@ static ssize_t store_ignore_nice(struct
+ if (input > 1)
+ input = 1;
+
+- if (input == od_tuners->ignore_nice) { /* nothing to do */
++ if (input == od_tuners->ignore_nice_load) { /* nothing to do */
+ return count;
+ }
+- od_tuners->ignore_nice = input;
++ od_tuners->ignore_nice_load = input;
+
+ /* we need to re-evaluate prev_cpu_idle */
+ for_each_online_cpu(j) {
+@@ -430,7 +430,7 @@ static ssize_t store_ignore_nice(struct
+ dbs_info = &per_cpu(od_cpu_dbs_info, j);
+ dbs_info->cdbs.prev_cpu_idle = get_cpu_idle_time(j,
+ &dbs_info->cdbs.prev_cpu_wall, od_tuners->io_is_busy);
+- if (od_tuners->ignore_nice)
++ if (od_tuners->ignore_nice_load)
+ dbs_info->cdbs.prev_cpu_nice =
+ kcpustat_cpu(j).cpustat[CPUTIME_NICE];
+
+@@ -461,7 +461,7 @@ show_store_one(od, sampling_rate);
+ show_store_one(od, io_is_busy);
+ show_store_one(od, up_threshold);
+ show_store_one(od, sampling_down_factor);
+-show_store_one(od, ignore_nice);
++show_store_one(od, ignore_nice_load);
+ show_store_one(od, powersave_bias);
+ declare_show_sampling_rate_min(od);
+
+@@ -469,7 +469,7 @@ gov_sys_pol_attr_rw(sampling_rate);
+ gov_sys_pol_attr_rw(io_is_busy);
+ gov_sys_pol_attr_rw(up_threshold);
+ gov_sys_pol_attr_rw(sampling_down_factor);
+-gov_sys_pol_attr_rw(ignore_nice);
++gov_sys_pol_attr_rw(ignore_nice_load);
+ gov_sys_pol_attr_rw(powersave_bias);
+ gov_sys_pol_attr_ro(sampling_rate_min);
+
+@@ -478,7 +478,7 @@ static struct attribute *dbs_attributes_
+ &sampling_rate_gov_sys.attr,
+ &up_threshold_gov_sys.attr,
+ &sampling_down_factor_gov_sys.attr,
+- &ignore_nice_gov_sys.attr,
++ &ignore_nice_load_gov_sys.attr,
+ &powersave_bias_gov_sys.attr,
+ &io_is_busy_gov_sys.attr,
+ NULL
+@@ -494,7 +494,7 @@ static struct attribute *dbs_attributes_
+ &sampling_rate_gov_pol.attr,
+ &up_threshold_gov_pol.attr,
+ &sampling_down_factor_gov_pol.attr,
+- &ignore_nice_gov_pol.attr,
++ &ignore_nice_load_gov_pol.attr,
+ &powersave_bias_gov_pol.attr,
+ &io_is_busy_gov_pol.attr,
+ NULL
+@@ -544,7 +544,7 @@ static int od_init(struct dbs_data *dbs_
+ }
+
+ tuners->sampling_down_factor = DEF_SAMPLING_DOWN_FACTOR;
+- tuners->ignore_nice = 0;
++ tuners->ignore_nice_load = 0;
+ tuners->powersave_bias = default_powersave_bias;
+ tuners->io_is_busy = should_io_be_busy();
+
--- /dev/null
+From d6e102f498cbcc8dd2e36721a01213f036397112 Mon Sep 17 00:00:00 2001
+From: Fabio Estevam <fabio.estevam@freescale.com>
+Date: Mon, 1 Jul 2013 18:14:21 -0300
+Subject: i2c: i2c-mxs: Use DMA mode even for small transfers
+
+From: Fabio Estevam <fabio.estevam@freescale.com>
+
+commit d6e102f498cbcc8dd2e36721a01213f036397112 upstream.
+
+Recently we have been seing some reports about PIO mode not working properly.
+
+- http://www.spinics.net/lists/linux-i2c/msg11985.html
+- http://marc.info/?l=linux-i2c&m=137235593101385&w=2
+- https://lkml.org/lkml/2013/6/24/430
+
+Let's use DMA mode even for small transfers.
+
+Without this patch, i2c reads the incorrect sgtl5000 version on a mx28evk when
+touchscreen is enabled:
+
+[ 5.856270] sgtl5000 0-000a: Device with ID register 0 is not a sgtl5000
+[ 9.877307] sgtl5000 0-000a: ASoC: failed to probe CODEC -19
+[ 9.883528] mxs-sgtl5000 sound.12: ASoC: failed to instantiate card -19
+[ 9.892955] mxs-sgtl5000 sound.12: snd_soc_register_card failed (-19)
+
+[wsa: we have a proper solution for -next, so this non intrusive
+solution is OK for now]
+
+Signed-off-by: Fabio Estevam <fabio.estevam@freescale.com>
+Acked-by: Shawn Guo <shawn.guo@linaro.org>
+Acked-by: Lucas Stach <l.stach@pengutronix.de>
+Acked-by: Marek Vasut <marex@denx.de>
+Signed-off-by: Wolfram Sang <wsa@the-dreams.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/i2c/busses/i2c-mxs.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/i2c/busses/i2c-mxs.c
++++ b/drivers/i2c/busses/i2c-mxs.c
+@@ -494,7 +494,7 @@ static int mxs_i2c_xfer_msg(struct i2c_a
+ * based on this empirical measurement and a lot of previous frobbing.
+ */
+ i2c->cmd_err = 0;
+- if (msg->len < 8) {
++ if (0) { /* disable PIO mode until a proper fix is made */
+ ret = mxs_i2c_pio_setup_xfer(adap, msg, flags);
+ if (ret)
+ mxs_i2c_reset(i2c);
--- /dev/null
+From 9a1b6bf818e74bb7aabaecb59492b739f2f4d742 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Mon, 5 Aug 2013 12:06:12 -0400
+Subject: LOCKD: Don't call utsname()->nodename from nlmclnt_setlockargs
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 9a1b6bf818e74bb7aabaecb59492b739f2f4d742 upstream.
+
+Firstly, nlmclnt_setlockargs can be called from a reclaimer thread, in
+which case we're in entirely the wrong namespace.
+
+Secondly, commit 8aac62706adaaf0fab02c4327761561c8bda9448 (move
+exit_task_namespaces() outside of exit_notify()) now means that
+exit_task_work() is called after exit_task_namespaces(), which
+triggers an Oops when we're freeing up the locks.
+
+Fix this by ensuring that we initialise the nlm_host's rpc_client at mount
+time, so that the cl_nodename field is initialised to the value of
+utsname()->nodename that the net namespace uses. Then replace the
+lockd callers of utsname()->nodename.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Cc: Toralf Förster <toralf.foerster@gmx.de>
+Cc: Oleg Nesterov <oleg@redhat.com>
+Cc: Nix <nix@esperi.org.uk>
+Cc: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/lockd/clntlock.c | 13 +++++++++----
+ fs/lockd/clntproc.c | 5 +++--
+ 2 files changed, 12 insertions(+), 6 deletions(-)
+
+--- a/fs/lockd/clntlock.c
++++ b/fs/lockd/clntlock.c
+@@ -64,12 +64,17 @@ struct nlm_host *nlmclnt_init(const stru
+ nlm_init->protocol, nlm_version,
+ nlm_init->hostname, nlm_init->noresvport,
+ nlm_init->net);
+- if (host == NULL) {
+- lockd_down(nlm_init->net);
+- return ERR_PTR(-ENOLCK);
+- }
++ if (host == NULL)
++ goto out_nohost;
++ if (host->h_rpcclnt == NULL && nlm_bind_host(host) == NULL)
++ goto out_nobind;
+
+ return host;
++out_nobind:
++ nlmclnt_release_host(host);
++out_nohost:
++ lockd_down(nlm_init->net);
++ return ERR_PTR(-ENOLCK);
+ }
+ EXPORT_SYMBOL_GPL(nlmclnt_init);
+
+--- a/fs/lockd/clntproc.c
++++ b/fs/lockd/clntproc.c
+@@ -125,14 +125,15 @@ static void nlmclnt_setlockargs(struct n
+ {
+ struct nlm_args *argp = &req->a_args;
+ struct nlm_lock *lock = &argp->lock;
++ char *nodename = req->a_host->h_rpcclnt->cl_nodename;
+
+ nlmclnt_next_cookie(&argp->cookie);
+ memcpy(&lock->fh, NFS_FH(file_inode(fl->fl_file)), sizeof(struct nfs_fh));
+- lock->caller = utsname()->nodename;
++ lock->caller = nodename;
+ lock->oh.data = req->a_owner;
+ lock->oh.len = snprintf(req->a_owner, sizeof(req->a_owner), "%u@%s",
+ (unsigned int)fl->fl_u.nfs_fl.owner->pid,
+- utsname()->nodename);
++ nodename);
+ lock->svid = fl->fl_u.nfs_fl.owner->pid;
+ lock->fl.fl_start = fl->fl_start;
+ lock->fl.fl_end = fl->fl_end;
hwmon-adt7470-fix-incorrect-return-code-check.patch
staging-zcache-fix-zcache-kernel-parameter.patch
media-em28xx-fix-assignment-of-the-eeprom-data.patch
+i2c-i2c-mxs-use-dma-mode-even-for-small-transfers.patch
+cpufreq-loongson2-fix-regression-related-to-clock-management.patch
+cpufreq-rename-ignore_nice-as-ignore_nice_load.patch
+acpi-pm-walk-physical_node_list-under-physical_node_lock.patch
+tracing-fix-fields-of-struct-trace_iterator-that-are-zeroed-by-mistake.patch
+tracing-make-trace_iter_stop_on_free-stop-the-correct-buffer.patch
+tracing-use-flag-buffer_disabled-for-irqsoff-tracer.patch
+tracing-fix-reset-of-time-stamps-during-trace_clock-changes.patch
+alsa-usb-audio-do-not-trust-too-big-wmaxpacketsize-values.patch
+alsa-6fire-fix-dma-issues-with-urb-transfer_buffer-usage.patch
+btrfs-release-both-paths-before-logging-dir-changed-extents.patch
+lockd-don-t-call-utsname-nodename-from-nlmclnt_setlockargs.patch
+sunrpc-don-t-auto-disconnect-from-the-local-rpcbind-socket.patch
+sunrpc-if-the-rpcbind-channel-is-disconnected-fail-the-call-to-unregister.patch
--- /dev/null
+From 00326ed6442c66021cd4b5e19e80f3e2027d5d42 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Mon, 5 Aug 2013 14:10:43 -0400
+Subject: SUNRPC: Don't auto-disconnect from the local rpcbind socket
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 00326ed6442c66021cd4b5e19e80f3e2027d5d42 upstream.
+
+There is no need for the kernel to time out the AF_LOCAL connection to
+the rpcbind socket, and doing so is problematic because when it is
+time to reconnect, our process may no longer be using the same mount
+namespace.
+
+Reported-by: Nix <nix@esperi.org.uk>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Cc: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/rpcb_clnt.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -238,6 +238,14 @@ static int rpcb_create_local_unix(struct
+ .program = &rpcb_program,
+ .version = RPCBVERS_2,
+ .authflavor = RPC_AUTH_NULL,
++ /*
++ * We turn off the idle timeout to prevent the kernel
++ * from automatically disconnecting the socket.
++ * Otherwise, we'd have to cache the mount namespace
++ * of the caller and somehow pass that to the socket
++ * reconnect code.
++ */
++ .flags = RPC_CLNT_CREATE_NO_IDLE_TIMEOUT,
+ };
+ struct rpc_clnt *clnt, *clnt4;
+ int result = 0;
--- /dev/null
+From 786615bc1ce84150ded80daea6bd9f6297f48e73 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Mon, 5 Aug 2013 16:04:47 -0400
+Subject: SUNRPC: If the rpcbind channel is disconnected, fail the call to unregister
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit 786615bc1ce84150ded80daea6bd9f6297f48e73 upstream.
+
+If rpcbind causes our connection to the AF_LOCAL socket to close after
+we've registered a service, then we want to be careful about reconnecting
+since the mount namespace may have changed.
+
+By simply refusing to reconnect the AF_LOCAL socket in the case of
+unregister, we avoid the need to somehow save the mount namespace. While
+this may lead to some services not unregistering properly, it should
+be safe.
+
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Cc: Nix <nix@esperi.org.uk>
+Cc: Jeff Layton <jlayton@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/sunrpc/sched.h | 1 +
+ net/sunrpc/clnt.c | 4 ++++
+ net/sunrpc/netns.h | 1 +
+ net/sunrpc/rpcb_clnt.c | 40 +++++++++++++++++++++++++++-------------
+ 4 files changed, 33 insertions(+), 13 deletions(-)
+
+--- a/include/linux/sunrpc/sched.h
++++ b/include/linux/sunrpc/sched.h
+@@ -130,6 +130,7 @@ struct rpc_task_setup {
+ #define RPC_TASK_SOFTCONN 0x0400 /* Fail if can't connect */
+ #define RPC_TASK_SENT 0x0800 /* message was sent */
+ #define RPC_TASK_TIMEOUT 0x1000 /* fail with ETIMEDOUT on timeout */
++#define RPC_TASK_NOCONNECT 0x2000 /* return ENOTCONN if not connected */
+
+ #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
+ #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1644,6 +1644,10 @@ call_connect(struct rpc_task *task)
+ task->tk_action = call_connect_status;
+ if (task->tk_status < 0)
+ return;
++ if (task->tk_flags & RPC_TASK_NOCONNECT) {
++ rpc_exit(task, -ENOTCONN);
++ return;
++ }
+ xprt_connect(task);
+ }
+ }
+--- a/net/sunrpc/netns.h
++++ b/net/sunrpc/netns.h
+@@ -23,6 +23,7 @@ struct sunrpc_net {
+ struct rpc_clnt *rpcb_local_clnt4;
+ spinlock_t rpcb_clnt_lock;
+ unsigned int rpcb_users;
++ unsigned int rpcb_is_af_local : 1;
+
+ struct mutex gssp_lock;
+ wait_queue_head_t gssp_wq;
+--- a/net/sunrpc/rpcb_clnt.c
++++ b/net/sunrpc/rpcb_clnt.c
+@@ -204,13 +204,15 @@ void rpcb_put_local(struct net *net)
+ }
+
+ static void rpcb_set_local(struct net *net, struct rpc_clnt *clnt,
+- struct rpc_clnt *clnt4)
++ struct rpc_clnt *clnt4,
++ bool is_af_local)
+ {
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
+
+ /* Protected by rpcb_create_local_mutex */
+ sn->rpcb_local_clnt = clnt;
+ sn->rpcb_local_clnt4 = clnt4;
++ sn->rpcb_is_af_local = is_af_local ? 1 : 0;
+ smp_wmb();
+ sn->rpcb_users = 1;
+ dprintk("RPC: created new rpcb local clients (rpcb_local_clnt: "
+@@ -271,7 +273,7 @@ static int rpcb_create_local_unix(struct
+ clnt4 = NULL;
+ }
+
+- rpcb_set_local(net, clnt, clnt4);
++ rpcb_set_local(net, clnt, clnt4, true);
+
+ out:
+ return result;
+@@ -323,7 +325,7 @@ static int rpcb_create_local_net(struct
+ clnt4 = NULL;
+ }
+
+- rpcb_set_local(net, clnt, clnt4);
++ rpcb_set_local(net, clnt, clnt4, false);
+
+ out:
+ return result;
+@@ -384,13 +386,16 @@ static struct rpc_clnt *rpcb_create(stru
+ return rpc_create(&args);
+ }
+
+-static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
++static int rpcb_register_call(struct sunrpc_net *sn, struct rpc_clnt *clnt, struct rpc_message *msg, bool is_set)
+ {
+- int result, error = 0;
++ int flags = RPC_TASK_NOCONNECT;
++ int error, result = 0;
+
++ if (is_set || !sn->rpcb_is_af_local)
++ flags = RPC_TASK_SOFTCONN;
+ msg->rpc_resp = &result;
+
+- error = rpc_call_sync(clnt, msg, RPC_TASK_SOFTCONN);
++ error = rpc_call_sync(clnt, msg, flags);
+ if (error < 0) {
+ dprintk("RPC: failed to contact local rpcbind "
+ "server (errno %d).\n", -error);
+@@ -447,16 +452,19 @@ int rpcb_register(struct net *net, u32 p
+ .rpc_argp = &map,
+ };
+ struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
++ bool is_set = false;
+
+ dprintk("RPC: %sregistering (%u, %u, %d, %u) with local "
+ "rpcbind\n", (port ? "" : "un"),
+ prog, vers, prot, port);
+
+ msg.rpc_proc = &rpcb_procedures2[RPCBPROC_UNSET];
+- if (port)
++ if (port != 0) {
+ msg.rpc_proc = &rpcb_procedures2[RPCBPROC_SET];
++ is_set = true;
++ }
+
+- return rpcb_register_call(sn->rpcb_local_clnt, &msg);
++ return rpcb_register_call(sn, sn->rpcb_local_clnt, &msg, is_set);
+ }
+
+ /*
+@@ -469,6 +477,7 @@ static int rpcb_register_inet4(struct su
+ const struct sockaddr_in *sin = (const struct sockaddr_in *)sap;
+ struct rpcbind_args *map = msg->rpc_argp;
+ unsigned short port = ntohs(sin->sin_port);
++ bool is_set = false;
+ int result;
+
+ map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
+@@ -479,10 +488,12 @@ static int rpcb_register_inet4(struct su
+ map->r_addr, map->r_netid);
+
+ msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
+- if (port)
++ if (port != 0) {
+ msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
++ is_set = true;
++ }
+
+- result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
++ result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
+ kfree(map->r_addr);
+ return result;
+ }
+@@ -497,6 +508,7 @@ static int rpcb_register_inet6(struct su
+ const struct sockaddr_in6 *sin6 = (const struct sockaddr_in6 *)sap;
+ struct rpcbind_args *map = msg->rpc_argp;
+ unsigned short port = ntohs(sin6->sin6_port);
++ bool is_set = false;
+ int result;
+
+ map->r_addr = rpc_sockaddr2uaddr(sap, GFP_KERNEL);
+@@ -507,10 +519,12 @@ static int rpcb_register_inet6(struct su
+ map->r_addr, map->r_netid);
+
+ msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
+- if (port)
++ if (port != 0) {
+ msg->rpc_proc = &rpcb_procedures4[RPCBPROC_SET];
++ is_set = true;
++ }
+
+- result = rpcb_register_call(sn->rpcb_local_clnt4, msg);
++ result = rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, is_set);
+ kfree(map->r_addr);
+ return result;
+ }
+@@ -527,7 +541,7 @@ static int rpcb_unregister_all_protofami
+ map->r_addr = "";
+ msg->rpc_proc = &rpcb_procedures4[RPCBPROC_UNSET];
+
+- return rpcb_register_call(sn->rpcb_local_clnt4, msg);
++ return rpcb_register_call(sn, sn->rpcb_local_clnt4, msg, false);
+ }
+
+ /**
--- /dev/null
+From ed5467da0e369e65b247b99eb6403cb79172bcda Mon Sep 17 00:00:00 2001
+From: Andrew Vagin <avagin@openvz.org>
+Date: Fri, 2 Aug 2013 21:16:43 +0400
+Subject: tracing: Fix fields of struct trace_iterator that are zeroed by mistake
+
+From: Andrew Vagin <avagin@openvz.org>
+
+commit ed5467da0e369e65b247b99eb6403cb79172bcda upstream.
+
+tracing_read_pipe zeros all fields bellow "seq". The declaration contains
+a comment about that, but it doesn't help.
+
+The first field is "snapshot", it's true when current open file is
+snapshot. Looks obvious, that it should not be zeroed.
+
+The second field is "started". It was converted from cpumask_t to
+cpumask_var_t (v2.6.28-4983-g4462344), in other words it was
+converted from cpumask to pointer on cpumask.
+
+Currently the reference on "started" memory is lost after the first read
+from tracing_read_pipe and a proper object will never be freed.
+
+The "started" is never dereferenced for trace_pipe, because trace_pipe
+can't have the TRACE_FILE_ANNOTATE options.
+
+Link: http://lkml.kernel.org/r/1375463803-3085183-1-git-send-email-avagin@openvz.org
+
+Signed-off-by: Andrew Vagin <avagin@openvz.org>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/ftrace_event.h | 10 ++++++----
+ kernel/trace/trace.c | 1 +
+ 2 files changed, 7 insertions(+), 4 deletions(-)
+
+--- a/include/linux/ftrace_event.h
++++ b/include/linux/ftrace_event.h
+@@ -78,6 +78,11 @@ struct trace_iterator {
+ /* trace_seq for __print_flags() and __print_symbolic() etc. */
+ struct trace_seq tmp_seq;
+
++ cpumask_var_t started;
++
++ /* it's true when current open file is snapshot */
++ bool snapshot;
++
+ /* The below is zeroed out in pipe_read */
+ struct trace_seq seq;
+ struct trace_entry *ent;
+@@ -90,10 +95,7 @@ struct trace_iterator {
+ loff_t pos;
+ long idx;
+
+- cpumask_var_t started;
+-
+- /* it's true when current open file is snapshot */
+- bool snapshot;
++ /* All new field here will be zeroed out in pipe_read */
+ };
+
+ enum trace_iter_flags {
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4121,6 +4121,7 @@ waitagain:
+ memset(&iter->seq, 0,
+ sizeof(struct trace_iterator) -
+ offsetof(struct trace_iterator, seq));
++ cpumask_clear(iter->started);
+ iter->pos = -1;
+
+ trace_event_read_lock();
--- /dev/null
+From 9457158bbc0ee04ecef76862d73eecd8076e9c7b Mon Sep 17 00:00:00 2001
+From: Alexander Z Lam <azl@google.com>
+Date: Fri, 2 Aug 2013 18:36:16 -0700
+Subject: tracing: Fix reset of time stamps during trace_clock changes
+
+From: Alexander Z Lam <azl@google.com>
+
+commit 9457158bbc0ee04ecef76862d73eecd8076e9c7b upstream.
+
+Fixed two issues with changing the timestamp clock with trace_clock:
+
+ - The global buffer was reset on instance clock changes. Change this to pass
+ the correct per-instance buffer
+ - ftrace_now() is used to set buf->time_start in tracing_reset_online_cpus().
+ This was incorrect because ftrace_now() used the global buffer's clock to
+ return the current time. Change this to use buffer_ftrace_now() which
+ returns the current time for the correct per-instance buffer.
+
+Also removed tracing_reset_current() because it is not used anywhere
+
+Link: http://lkml.kernel.org/r/1375493777-17261-2-git-send-email-azl@google.com
+
+Signed-off-by: Alexander Z Lam <azl@google.com>
+Cc: Vaibhav Nagarnaik <vnagarnaik@google.com>
+Cc: David Sharp <dhsharp@google.com>
+Cc: Alexander Z Lam <lambchop468@gmail.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 24 ++++++++++++------------
+ 1 file changed, 12 insertions(+), 12 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -232,20 +232,25 @@ int filter_current_check_discard(struct
+ }
+ EXPORT_SYMBOL_GPL(filter_current_check_discard);
+
+-cycle_t ftrace_now(int cpu)
++cycle_t buffer_ftrace_now(struct trace_buffer *buf, int cpu)
+ {
+ u64 ts;
+
+ /* Early boot up does not have a buffer yet */
+- if (!global_trace.trace_buffer.buffer)
++ if (!buf->buffer)
+ return trace_clock_local();
+
+- ts = ring_buffer_time_stamp(global_trace.trace_buffer.buffer, cpu);
+- ring_buffer_normalize_time_stamp(global_trace.trace_buffer.buffer, cpu, &ts);
++ ts = ring_buffer_time_stamp(buf->buffer, cpu);
++ ring_buffer_normalize_time_stamp(buf->buffer, cpu, &ts);
+
+ return ts;
+ }
+
++cycle_t ftrace_now(int cpu)
++{
++ return buffer_ftrace_now(&global_trace.trace_buffer, cpu);
++}
++
+ /**
+ * tracing_is_enabled - Show if global_trace has been disabled
+ *
+@@ -1194,7 +1199,7 @@ void tracing_reset_online_cpus(struct tr
+ /* Make sure all commits have finished */
+ synchronize_sched();
+
+- buf->time_start = ftrace_now(buf->cpu);
++ buf->time_start = buffer_ftrace_now(buf, buf->cpu);
+
+ for_each_online_cpu(cpu)
+ ring_buffer_reset_cpu(buffer, cpu);
+@@ -1202,11 +1207,6 @@ void tracing_reset_online_cpus(struct tr
+ ring_buffer_record_enable(buffer);
+ }
+
+-void tracing_reset_current(int cpu)
+-{
+- tracing_reset(&global_trace.trace_buffer, cpu);
+-}
+-
+ /* Must have trace_types_lock held */
+ void tracing_reset_all_online_cpus(void)
+ {
+@@ -4647,12 +4647,12 @@ static ssize_t tracing_clock_write(struc
+ * New clock may not be consistent with the previous clock.
+ * Reset the buffer so that it doesn't have incomparable timestamps.
+ */
+- tracing_reset_online_cpus(&global_trace.trace_buffer);
++ tracing_reset_online_cpus(&tr->trace_buffer);
+
+ #ifdef CONFIG_TRACER_MAX_TRACE
+ if (tr->flags & TRACE_ARRAY_FL_GLOBAL && tr->max_buffer.buffer)
+ ring_buffer_set_clock(tr->max_buffer.buffer, trace_clocks[i].func);
+- tracing_reset_online_cpus(&global_trace.max_buffer);
++ tracing_reset_online_cpus(&tr->max_buffer);
+ #endif
+
+ mutex_unlock(&trace_types_lock);
--- /dev/null
+From 711e124379e0f889e40e2f01d7f5d61936d3cd23 Mon Sep 17 00:00:00 2001
+From: Alexander Z Lam <azl@google.com>
+Date: Fri, 2 Aug 2013 18:36:15 -0700
+Subject: tracing: Make TRACE_ITER_STOP_ON_FREE stop the correct buffer
+
+From: Alexander Z Lam <azl@google.com>
+
+commit 711e124379e0f889e40e2f01d7f5d61936d3cd23 upstream.
+
+Releasing the free_buffer file in an instance causes the global buffer
+to be stopped when TRACE_ITER_STOP_ON_FREE is enabled. Operate on the
+correct buffer.
+
+Link: http://lkml.kernel.org/r/1375493777-17261-1-git-send-email-azl@google.com
+
+Signed-off-by: Alexander Z Lam <azl@google.com>
+Cc: Vaibhav Nagarnaik <vnagarnaik@google.com>
+Cc: David Sharp <dhsharp@google.com>
+Cc: Alexander Z Lam <lambchop468@gmail.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4438,7 +4438,7 @@ tracing_free_buffer_release(struct inode
+
+ /* disable tracing ? */
+ if (trace_flags & TRACE_ITER_STOP_ON_FREE)
+- tracing_off();
++ tracer_tracing_off(tr);
+ /* resize the ring buffer to 0 */
+ tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
+
--- /dev/null
+From 10246fa35d4ffdfe472185d4cbf9c2dfd9a9f023 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Mon, 1 Jul 2013 15:58:24 -0400
+Subject: tracing: Use flag buffer_disabled for irqsoff tracer
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 10246fa35d4ffdfe472185d4cbf9c2dfd9a9f023 upstream.
+
+If the ring buffer is disabled and the irqsoff tracer records a trace it
+will clear out its buffer and lose the data it had previously recorded.
+
+Currently there's a callback when writing to the tracing_of file, but if
+tracing is disabled via the function tracer trigger, it will not inform
+the irqsoff tracer to stop recording.
+
+By using the "mirror" flag (buffer_disabled) in the trace_array, that keeps
+track of the status of the trace_array's buffer, it gives the irqsoff
+tracer a fast way to know if it should record a new trace or not.
+The flag may be a little behind the real state of the buffer, but it
+should not affect the trace too much. It's more important for the irqsoff
+tracer to be fast.
+
+Reported-by: Dave Jones <davej@redhat.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 101 +++++++++++++++++++++++++++++--------------
+ kernel/trace/trace_irqsoff.c | 4 -
+ 2 files changed, 72 insertions(+), 33 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -246,9 +246,24 @@ cycle_t ftrace_now(int cpu)
+ return ts;
+ }
+
++/**
++ * tracing_is_enabled - Show if global_trace has been disabled
++ *
++ * Shows if the global trace has been enabled or not. It uses the
++ * mirror flag "buffer_disabled" to be used in fast paths such as for
++ * the irqsoff tracer. But it may be inaccurate due to races. If you
++ * need to know the accurate state, use tracing_is_on() which is a little
++ * slower, but accurate.
++ */
+ int tracing_is_enabled(void)
+ {
+- return tracing_is_on();
++ /*
++ * For quick access (irqsoff uses this in fast path), just
++ * return the mirror variable of the state of the ring buffer.
++ * It's a little racy, but we don't really care.
++ */
++ smp_rmb();
++ return !global_trace.buffer_disabled;
+ }
+
+ /*
+@@ -361,6 +376,23 @@ unsigned long trace_flags = TRACE_ITER_P
+ TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
+ TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS | TRACE_ITER_FUNCTION;
+
++void tracer_tracing_on(struct trace_array *tr)
++{
++ if (tr->trace_buffer.buffer)
++ ring_buffer_record_on(tr->trace_buffer.buffer);
++ /*
++ * This flag is looked at when buffers haven't been allocated
++ * yet, or by some tracers (like irqsoff), that just want to
++ * know if the ring buffer has been disabled, but it can handle
++ * races of where it gets disabled but we still do a record.
++ * As the check is in the fast path of the tracers, it is more
++ * important to be fast than accurate.
++ */
++ tr->buffer_disabled = 0;
++ /* Make the flag seen by readers */
++ smp_wmb();
++}
++
+ /**
+ * tracing_on - enable tracing buffers
+ *
+@@ -369,15 +401,7 @@ unsigned long trace_flags = TRACE_ITER_P
+ */
+ void tracing_on(void)
+ {
+- if (global_trace.trace_buffer.buffer)
+- ring_buffer_record_on(global_trace.trace_buffer.buffer);
+- /*
+- * This flag is only looked at when buffers haven't been
+- * allocated yet. We don't really care about the race
+- * between setting this flag and actually turning
+- * on the buffer.
+- */
+- global_trace.buffer_disabled = 0;
++ tracer_tracing_on(&global_trace);
+ }
+ EXPORT_SYMBOL_GPL(tracing_on);
+
+@@ -571,6 +595,23 @@ void tracing_snapshot_alloc(void)
+ EXPORT_SYMBOL_GPL(tracing_snapshot_alloc);
+ #endif /* CONFIG_TRACER_SNAPSHOT */
+
++void tracer_tracing_off(struct trace_array *tr)
++{
++ if (tr->trace_buffer.buffer)
++ ring_buffer_record_off(tr->trace_buffer.buffer);
++ /*
++ * This flag is looked at when buffers haven't been allocated
++ * yet, or by some tracers (like irqsoff), that just want to
++ * know if the ring buffer has been disabled, but it can handle
++ * races of where it gets disabled but we still do a record.
++ * As the check is in the fast path of the tracers, it is more
++ * important to be fast than accurate.
++ */
++ tr->buffer_disabled = 1;
++ /* Make the flag seen by readers */
++ smp_wmb();
++}
++
+ /**
+ * tracing_off - turn off tracing buffers
+ *
+@@ -581,26 +622,29 @@ EXPORT_SYMBOL_GPL(tracing_snapshot_alloc
+ */
+ void tracing_off(void)
+ {
+- if (global_trace.trace_buffer.buffer)
+- ring_buffer_record_off(global_trace.trace_buffer.buffer);
+- /*
+- * This flag is only looked at when buffers haven't been
+- * allocated yet. We don't really care about the race
+- * between setting this flag and actually turning
+- * on the buffer.
+- */
+- global_trace.buffer_disabled = 1;
++ tracer_tracing_off(&global_trace);
+ }
+ EXPORT_SYMBOL_GPL(tracing_off);
+
+ /**
++ * tracer_tracing_is_on - show real state of ring buffer enabled
++ * @tr : the trace array to know if ring buffer is enabled
++ *
++ * Shows real state of the ring buffer if it is enabled or not.
++ */
++int tracer_tracing_is_on(struct trace_array *tr)
++{
++ if (tr->trace_buffer.buffer)
++ return ring_buffer_record_is_on(tr->trace_buffer.buffer);
++ return !tr->buffer_disabled;
++}
++
++/**
+ * tracing_is_on - show state of ring buffers enabled
+ */
+ int tracing_is_on(void)
+ {
+- if (global_trace.trace_buffer.buffer)
+- return ring_buffer_record_is_on(global_trace.trace_buffer.buffer);
+- return !global_trace.buffer_disabled;
++ return tracer_tracing_is_on(&global_trace);
+ }
+ EXPORT_SYMBOL_GPL(tracing_is_on);
+
+@@ -4060,7 +4104,7 @@ static int tracing_wait_pipe(struct file
+ *
+ * iter->pos will be 0 if we haven't read anything.
+ */
+- if (!tracing_is_enabled() && iter->pos)
++ if (!tracing_is_on() && iter->pos)
+ break;
+ }
+
+@@ -5772,15 +5816,10 @@ rb_simple_read(struct file *filp, char _
+ size_t cnt, loff_t *ppos)
+ {
+ struct trace_array *tr = filp->private_data;
+- struct ring_buffer *buffer = tr->trace_buffer.buffer;
+ char buf[64];
+ int r;
+
+- if (buffer)
+- r = ring_buffer_record_is_on(buffer);
+- else
+- r = 0;
+-
++ r = tracer_tracing_is_on(tr);
+ r = sprintf(buf, "%d\n", r);
+
+ return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
+@@ -5802,11 +5841,11 @@ rb_simple_write(struct file *filp, const
+ if (buffer) {
+ mutex_lock(&trace_types_lock);
+ if (val) {
+- ring_buffer_record_on(buffer);
++ tracer_tracing_on(tr);
+ if (tr->current_trace->start)
+ tr->current_trace->start(tr);
+ } else {
+- ring_buffer_record_off(buffer);
++ tracer_tracing_off(tr);
+ if (tr->current_trace->stop)
+ tr->current_trace->stop(tr);
+ }
+--- a/kernel/trace/trace_irqsoff.c
++++ b/kernel/trace/trace_irqsoff.c
+@@ -373,7 +373,7 @@ start_critical_timing(unsigned long ip,
+ struct trace_array_cpu *data;
+ unsigned long flags;
+
+- if (likely(!tracer_enabled))
++ if (!tracer_enabled || !tracing_is_enabled())
+ return;
+
+ cpu = raw_smp_processor_id();
+@@ -416,7 +416,7 @@ stop_critical_timing(unsigned long ip, u
+ else
+ return;
+
+- if (!tracer_enabled)
++ if (!tracer_enabled || !tracing_is_enabled())
+ return;
+
+ data = per_cpu_ptr(tr->trace_buffer.data, cpu);