]> git.ipfire.org Git - ipfire-2.x.git/blame - src/patches/suse-2.6.27.39/patches.kernel.org/patch-2.6.27.12-13
Fix oinkmaster patch.
[ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.kernel.org / patch-2.6.27.12-13
CommitLineData
82094b55
AF
1From: Greg Kroah-Hartman <gregkh@suse.de>
2Subject: Linux 2.6.27.13
3
4Upstream 2.6.27.13 release from kernel.org
5
6Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
7
8diff --git a/Documentation/sound/alsa/ALSA-Configuration.txt b/Documentation/sound/alsa/ALSA-Configuration.txt
9index b117e42..90f718c 100644
10--- a/Documentation/sound/alsa/ALSA-Configuration.txt
11+++ b/Documentation/sound/alsa/ALSA-Configuration.txt
12@@ -960,9 +960,10 @@ Prior to version 0.9.0rc4 options had a 'snd_' prefix. This was removed.
13 6stack 6-jack, separate surrounds (default)
14 3stack 3-stack, shared surrounds
15 laptop 2-channel only (FSC V2060, Samsung M50)
16- laptop-eapd 2-channel with EAPD (Samsung R65, ASUS A6J)
17+ laptop-eapd 2-channel with EAPD (ASUS A6J)
18 laptop-automute 2-channel with EAPD and HP-automute (Lenovo N100)
19 ultra 2-channel with EAPD (Samsung Ultra tablet PC)
20+ samsung 2-channel with EAPD (Samsung R65)
21
22 AD1988/AD1988B/AD1989A/AD1989B
23 6stack 6-jack
24diff --git a/Makefile b/Makefile
25index f0f8cdf..d879e7d 100644
26--- a/Makefile
27+++ b/Makefile
28@@ -1,7 +1,7 @@
29 VERSION = 2
30 PATCHLEVEL = 6
31 SUBLEVEL = 27
32-EXTRAVERSION = .12
33+EXTRAVERSION = .13
34 NAME = Trembling Tortoise
35
36 # *DOCUMENTATION*
37diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
38index 48e496f..fb7e69c 100644
39--- a/arch/ia64/Kconfig
40+++ b/arch/ia64/Kconfig
41@@ -15,6 +15,7 @@ config IA64
42 select ACPI if (!IA64_HP_SIM)
43 select PM if (!IA64_HP_SIM)
44 select ARCH_SUPPORTS_MSI
45+ select HAVE_UNSTABLE_SCHED_CLOCK
46 select HAVE_IDE
47 select HAVE_OPROFILE
48 select HAVE_KPROBES
49diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
50index db44e02..ba51948 100644
51--- a/arch/powerpc/mm/slice.c
52+++ b/arch/powerpc/mm/slice.c
53@@ -710,9 +710,18 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
54 unsigned long len)
55 {
56 struct slice_mask mask, available;
57+ unsigned int psize = mm->context.user_psize;
58
59 mask = slice_range_to_mask(addr, len);
60- available = slice_mask_for_size(mm, mm->context.user_psize);
61+ available = slice_mask_for_size(mm, psize);
62+#ifdef CONFIG_PPC_64K_PAGES
63+ /* We need to account for 4k slices too */
64+ if (psize == MMU_PAGE_64K) {
65+ struct slice_mask compat_mask;
66+ compat_mask = slice_mask_for_size(mm, MMU_PAGE_4K);
67+ or_mask(available, compat_mask);
68+ }
69+#endif
70
71 #if 0 /* too verbose */
72 slice_dbg("is_hugepage_only_range(mm=%p, addr=%lx, len=%lx)\n",
73diff --git a/drivers/firmware/dell_rbu.c b/drivers/firmware/dell_rbu.c
74index 13946eb..b4704e1 100644
75--- a/drivers/firmware/dell_rbu.c
76+++ b/drivers/firmware/dell_rbu.c
77@@ -576,7 +576,7 @@ static ssize_t read_rbu_image_type(struct kobject *kobj,
78 {
79 int size = 0;
80 if (!pos)
81- size = sprintf(buffer, "%s\n", image_type);
82+ size = scnprintf(buffer, count, "%s\n", image_type);
83 return size;
84 }
85
86@@ -648,7 +648,7 @@ static ssize_t read_rbu_packet_size(struct kobject *kobj,
87 int size = 0;
88 if (!pos) {
89 spin_lock(&rbu_data.lock);
90- size = sprintf(buffer, "%lu\n", rbu_data.packetsize);
91+ size = scnprintf(buffer, count, "%lu\n", rbu_data.packetsize);
92 spin_unlock(&rbu_data.lock);
93 }
94 return size;
95diff --git a/drivers/hwmon/abituguru3.c b/drivers/hwmon/abituguru3.c
96index d9e7a49..58a5efb 100644
97--- a/drivers/hwmon/abituguru3.c
98+++ b/drivers/hwmon/abituguru3.c
99@@ -1153,7 +1153,7 @@ static int __init abituguru3_dmi_detect(void)
100
101 static inline int abituguru3_dmi_detect(void)
102 {
103- return -ENODEV;
104+ return 1;
105 }
106
107 #endif /* CONFIG_DMI */
108diff --git a/drivers/hwmon/hwmon-vid.c b/drivers/hwmon/hwmon-vid.c
109index c54eff9..bfc2961 100644
110--- a/drivers/hwmon/hwmon-vid.c
111+++ b/drivers/hwmon/hwmon-vid.c
112@@ -180,6 +180,7 @@ static struct vrm_model vrm_models[] = {
113 {X86_VENDOR_AMD, 0x6, ANY, ANY, 90}, /* Athlon Duron etc */
114 {X86_VENDOR_AMD, 0xF, 0x3F, ANY, 24}, /* Athlon 64, Opteron */
115 {X86_VENDOR_AMD, 0xF, ANY, ANY, 25}, /* NPT family 0Fh */
116+ {X86_VENDOR_AMD, 0x10, ANY, ANY, 25}, /* NPT family 10h */
117 {X86_VENDOR_INTEL, 0x6, 0x9, ANY, 13}, /* Pentium M (130 nm) */
118 {X86_VENDOR_INTEL, 0x6, 0xB, ANY, 85}, /* Tualatin */
119 {X86_VENDOR_INTEL, 0x6, 0xD, ANY, 13}, /* Pentium M (90 nm) */
120diff --git a/drivers/misc/sgi-xp/xpc_sn2.c b/drivers/misc/sgi-xp/xpc_sn2.c
121index b4882cc..d32c1ee 100644
122--- a/drivers/misc/sgi-xp/xpc_sn2.c
123+++ b/drivers/misc/sgi-xp/xpc_sn2.c
124@@ -904,7 +904,7 @@ xpc_update_partition_info_sn2(struct xpc_partition *part, u8 remote_rp_version,
125 dev_dbg(xpc_part, " remote_vars_pa = 0x%016lx\n",
126 part_sn2->remote_vars_pa);
127
128- part->last_heartbeat = remote_vars->heartbeat;
129+ part->last_heartbeat = remote_vars->heartbeat - 1;
130 dev_dbg(xpc_part, " last_heartbeat = 0x%016lx\n",
131 part->last_heartbeat);
132
133diff --git a/drivers/net/irda/irda-usb.c b/drivers/net/irda/irda-usb.c
134index b5d6b9a..b427978 100644
135--- a/drivers/net/irda/irda-usb.c
136+++ b/drivers/net/irda/irda-usb.c
137@@ -1075,7 +1075,7 @@ static int stir421x_patch_device(struct irda_usb_cb *self)
138 {
139 unsigned int i;
140 int ret;
141- char stir421x_fw_name[11];
142+ char stir421x_fw_name[12];
143 const struct firmware *fw;
144 const unsigned char *fw_version_ptr; /* pointer to version string */
145 unsigned long fw_version = 0;
146diff --git a/drivers/net/r6040.c b/drivers/net/r6040.c
147index 5d86281..c71982d 100644
148--- a/drivers/net/r6040.c
149+++ b/drivers/net/r6040.c
150@@ -49,8 +49,8 @@
151 #include <asm/processor.h>
152
153 #define DRV_NAME "r6040"
154-#define DRV_VERSION "0.18"
155-#define DRV_RELDATE "13Jul2008"
156+#define DRV_VERSION "0.19"
157+#define DRV_RELDATE "18Dec2008"
158
159 /* PHY CHIP Address */
160 #define PHY1_ADDR 1 /* For MAC1 */
161@@ -214,7 +214,7 @@ static int r6040_phy_read(void __iomem *ioaddr, int phy_addr, int reg)
162 /* Wait for the read bit to be cleared */
163 while (limit--) {
164 cmd = ioread16(ioaddr + MMDIO);
165- if (cmd & MDIO_READ)
166+ if (!(cmd & MDIO_READ))
167 break;
168 }
169
170@@ -233,7 +233,7 @@ static void r6040_phy_write(void __iomem *ioaddr, int phy_addr, int reg, u16 val
171 /* Wait for the write bit to be cleared */
172 while (limit--) {
173 cmd = ioread16(ioaddr + MMDIO);
174- if (cmd & MDIO_WRITE)
175+ if (!(cmd & MDIO_WRITE))
176 break;
177 }
178 }
179@@ -681,8 +681,10 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
180 struct net_device *dev = dev_id;
181 struct r6040_private *lp = netdev_priv(dev);
182 void __iomem *ioaddr = lp->base;
183- u16 status;
184+ u16 misr, status;
185
186+ /* Save MIER */
187+ misr = ioread16(ioaddr + MIER);
188 /* Mask off RDC MAC interrupt */
189 iowrite16(MSK_INT, ioaddr + MIER);
190 /* Read MISR status and clear */
191@@ -702,7 +704,7 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
192 dev->stats.rx_fifo_errors++;
193
194 /* Mask off RX interrupt */
195- iowrite16(ioread16(ioaddr + MIER) & ~RX_INTS, ioaddr + MIER);
196+ misr &= ~RX_INTS;
197 netif_rx_schedule(dev, &lp->napi);
198 }
199
200@@ -710,6 +712,9 @@ static irqreturn_t r6040_interrupt(int irq, void *dev_id)
201 if (status & TX_INTS)
202 r6040_tx(dev);
203
204+ /* Restore RDC MAC interrupt */
205+ iowrite16(misr, ioaddr + MIER);
206+
207 return IRQ_HANDLED;
208 }
209
210diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
211index 6dbfed0..69120b5 100644
212--- a/drivers/net/wireless/ath9k/hw.c
213+++ b/drivers/net/wireless/ath9k/hw.c
214@@ -729,7 +729,7 @@ ath9k_hw_eeprom_set_board_values(struct ath_hal *ah,
215 AR_AN_TOP2_LOCALBIAS,
216 AR_AN_TOP2_LOCALBIAS_S,
217 pModal->local_bias);
218- DPRINTF(ah->ah_sc, ATH_DBG_ANY, "ForceXPAon: %d\n",
219+ DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "ForceXPAon: %d\n",
220 pModal->force_xpaon);
221 REG_RMW_FIELD(ah, AR_PHY_XPA_CFG, AR_PHY_FORCE_XPA_CFG,
222 pModal->force_xpaon);
223diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
224index cbaca23..bfff6b5 100644
225--- a/drivers/net/wireless/p54/p54usb.c
226+++ b/drivers/net/wireless/p54/p54usb.c
227@@ -53,6 +53,7 @@ static struct usb_device_id p54u_table[] __devinitdata = {
228 {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */
229 {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */
230 {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */
231+ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */
232 {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */
233 {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */
234 {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */
235diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c
236index 9761eaa..45bf8f7 100644
237--- a/drivers/net/wireless/rt2x00/rt73usb.c
238+++ b/drivers/net/wireless/rt2x00/rt73usb.c
239@@ -2113,6 +2113,7 @@ static struct usb_device_id rt73usb_device_table[] = {
240 /* Linksys */
241 { USB_DEVICE(0x13b1, 0x0020), USB_DEVICE_DATA(&rt73usb_ops) },
242 { USB_DEVICE(0x13b1, 0x0023), USB_DEVICE_DATA(&rt73usb_ops) },
243+ { USB_DEVICE(0x13b1, 0x0028), USB_DEVICE_DATA(&rt73usb_ops) },
244 /* MSI */
245 { USB_DEVICE(0x0db0, 0x6877), USB_DEVICE_DATA(&rt73usb_ops) },
246 { USB_DEVICE(0x0db0, 0x6874), USB_DEVICE_DATA(&rt73usb_ops) },
247diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
248index aa6fda1..8a82a62 100644
249--- a/drivers/pci/pcie/aspm.c
250+++ b/drivers/pci/pcie/aspm.c
251@@ -33,6 +33,11 @@ struct endpoint_state {
252 struct pcie_link_state {
253 struct list_head sibiling;
254 struct pci_dev *pdev;
255+ bool downstream_has_switch;
256+
257+ struct pcie_link_state *parent;
258+ struct list_head children;
259+ struct list_head link;
260
261 /* ASPM state */
262 unsigned int support_state;
263@@ -125,7 +130,7 @@ static void pcie_set_clock_pm(struct pci_dev *pdev, int enable)
264 link_state->clk_pm_enabled = !!enable;
265 }
266
267-static void pcie_check_clock_pm(struct pci_dev *pdev)
268+static void pcie_check_clock_pm(struct pci_dev *pdev, int blacklist)
269 {
270 int pos;
271 u32 reg32;
272@@ -149,10 +154,26 @@ static void pcie_check_clock_pm(struct pci_dev *pdev)
273 if (!(reg16 & PCI_EXP_LNKCTL_CLKREQ_EN))
274 enabled = 0;
275 }
276- link_state->clk_pm_capable = capable;
277 link_state->clk_pm_enabled = enabled;
278 link_state->bios_clk_state = enabled;
279- pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
280+ if (!blacklist) {
281+ link_state->clk_pm_capable = capable;
282+ pcie_set_clock_pm(pdev, policy_to_clkpm_state(pdev));
283+ } else {
284+ link_state->clk_pm_capable = 0;
285+ pcie_set_clock_pm(pdev, 0);
286+ }
287+}
288+
289+static bool pcie_aspm_downstream_has_switch(struct pci_dev *pdev)
290+{
291+ struct pci_dev *child_dev;
292+
293+ list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
294+ if (child_dev->pcie_type == PCI_EXP_TYPE_UPSTREAM)
295+ return true;
296+ }
297+ return false;
298 }
299
300 /*
301@@ -419,9 +440,9 @@ static unsigned int pcie_aspm_check_state(struct pci_dev *pdev,
302 {
303 struct pci_dev *child_dev;
304
305- /* If no child, disable the link */
306+ /* If no child, ignore the link */
307 if (list_empty(&pdev->subordinate->devices))
308- return 0;
309+ return state;
310 list_for_each_entry(child_dev, &pdev->subordinate->devices, bus_list) {
311 if (child_dev->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
312 /*
313@@ -462,6 +483,9 @@ static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state)
314 int valid = 1;
315 struct pcie_link_state *link_state = pdev->link_state;
316
317+ /* If no child, disable the link */
318+ if (list_empty(&pdev->subordinate->devices))
319+ state = 0;
320 /*
321 * if the downstream component has pci bridge function, don't do ASPM
322 * now
323@@ -493,20 +517,52 @@ static void __pcie_aspm_config_link(struct pci_dev *pdev, unsigned int state)
324 link_state->enabled_state = state;
325 }
326
327+static struct pcie_link_state *get_root_port_link(struct pcie_link_state *link)
328+{
329+ struct pcie_link_state *root_port_link = link;
330+ while (root_port_link->parent)
331+ root_port_link = root_port_link->parent;
332+ return root_port_link;
333+}
334+
335+/* check the whole hierarchy, and configure each link in the hierarchy */
336 static void __pcie_aspm_configure_link_state(struct pci_dev *pdev,
337 unsigned int state)
338 {
339 struct pcie_link_state *link_state = pdev->link_state;
340+ struct pcie_link_state *root_port_link = get_root_port_link(link_state);
341+ struct pcie_link_state *leaf;
342
343- if (link_state->support_state == 0)
344- return;
345 state &= PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
346
347- /* state 0 means disabling aspm */
348- state = pcie_aspm_check_state(pdev, state);
349+ /* check all links who have specific root port link */
350+ list_for_each_entry(leaf, &link_list, sibiling) {
351+ if (!list_empty(&leaf->children) ||
352+ get_root_port_link(leaf) != root_port_link)
353+ continue;
354+ state = pcie_aspm_check_state(leaf->pdev, state);
355+ }
356+ /* check root port link too in case it hasn't children */
357+ state = pcie_aspm_check_state(root_port_link->pdev, state);
358+
359 if (link_state->enabled_state == state)
360 return;
361- __pcie_aspm_config_link(pdev, state);
362+
363+ /*
364+ * we must change the hierarchy. See comments in
365+ * __pcie_aspm_config_link for the order
366+ **/
367+ if (state & PCIE_LINK_STATE_L1) {
368+ list_for_each_entry(leaf, &link_list, sibiling) {
369+ if (get_root_port_link(leaf) == root_port_link)
370+ __pcie_aspm_config_link(leaf->pdev, state);
371+ }
372+ } else {
373+ list_for_each_entry_reverse(leaf, &link_list, sibiling) {
374+ if (get_root_port_link(leaf) == root_port_link)
375+ __pcie_aspm_config_link(leaf->pdev, state);
376+ }
377+ }
378 }
379
380 /*
381@@ -570,6 +626,7 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
382 unsigned int state;
383 struct pcie_link_state *link_state;
384 int error = 0;
385+ int blacklist;
386
387 if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
388 return;
389@@ -580,29 +637,58 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
390 if (list_empty(&pdev->subordinate->devices))
391 goto out;
392
393- if (pcie_aspm_sanity_check(pdev))
394- goto out;
395+ blacklist = !!pcie_aspm_sanity_check(pdev);
396
397 mutex_lock(&aspm_lock);
398
399 link_state = kzalloc(sizeof(*link_state), GFP_KERNEL);
400 if (!link_state)
401 goto unlock_out;
402- pdev->link_state = link_state;
403
404- pcie_aspm_configure_common_clock(pdev);
405+ link_state->downstream_has_switch = pcie_aspm_downstream_has_switch(pdev);
406+ INIT_LIST_HEAD(&link_state->children);
407+ INIT_LIST_HEAD(&link_state->link);
408+ if (pdev->bus->self) {/* this is a switch */
409+ struct pcie_link_state *parent_link_state;
410
411- pcie_aspm_cap_init(pdev);
412+ parent_link_state = pdev->bus->parent->self->link_state;
413+ if (!parent_link_state) {
414+ kfree(link_state);
415+ goto unlock_out;
416+ }
417+ list_add(&link_state->link, &parent_link_state->children);
418+ link_state->parent = parent_link_state;
419+ }
420
421- /* config link state to avoid BIOS error */
422- state = pcie_aspm_check_state(pdev, policy_to_aspm_state(pdev));
423- __pcie_aspm_config_link(pdev, state);
424+ pdev->link_state = link_state;
425
426- pcie_check_clock_pm(pdev);
427+ if (!blacklist) {
428+ pcie_aspm_configure_common_clock(pdev);
429+ pcie_aspm_cap_init(pdev);
430+ } else {
431+ link_state->enabled_state = PCIE_LINK_STATE_L0S|PCIE_LINK_STATE_L1;
432+ link_state->bios_aspm_state = 0;
433+ /* Set support state to 0, so we will disable ASPM later */
434+ link_state->support_state = 0;
435+ }
436
437 link_state->pdev = pdev;
438 list_add(&link_state->sibiling, &link_list);
439
440+ if (link_state->downstream_has_switch) {
441+ /*
442+ * If link has switch, delay the link config. The leaf link
443+ * initialization will config the whole hierarchy. but we must
444+ * make sure BIOS doesn't set unsupported link state
445+ **/
446+ state = pcie_aspm_check_state(pdev, link_state->bios_aspm_state);
447+ __pcie_aspm_config_link(pdev, state);
448+ } else
449+ __pcie_aspm_configure_link_state(pdev,
450+ policy_to_aspm_state(pdev));
451+
452+ pcie_check_clock_pm(pdev, blacklist);
453+
454 unlock_out:
455 if (error)
456 free_link_state(pdev);
457@@ -635,6 +721,7 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
458 /* All functions are removed, so just disable ASPM for the link */
459 __pcie_aspm_config_one_dev(parent, 0);
460 list_del(&link_state->sibiling);
461+ list_del(&link_state->link);
462 /* Clock PM is for endpoint device */
463
464 free_link_state(parent);
465diff --git a/drivers/usb/storage/libusual.c b/drivers/usb/storage/libusual.c
466index d617e8a..f970b27 100644
467--- a/drivers/usb/storage/libusual.c
468+++ b/drivers/usb/storage/libusual.c
469@@ -46,6 +46,12 @@ static int usu_probe_thread(void *arg);
470 { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin,bcdDeviceMax), \
471 .driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
472
473+#define COMPLIANT_DEV(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax, \
474+ vendorName, productName, useProtocol, useTransport, \
475+ initFunction, flags) \
476+{ USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin, bcdDeviceMax), \
477+ .driver_info = (flags) }
478+
479 #define USUAL_DEV(useProto, useTrans, useType) \
480 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \
481 .driver_info = ((useType)<<24) }
482@@ -57,6 +63,7 @@ struct usb_device_id storage_usb_ids [] = {
483
484 #undef USUAL_DEV
485 #undef UNUSUAL_DEV
486+#undef COMPLIANT_DEV
487
488 MODULE_DEVICE_TABLE(usb, storage_usb_ids);
489 EXPORT_SYMBOL_GPL(storage_usb_ids);
490diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
491index 09779f6..620c2b5 100644
492--- a/drivers/usb/storage/scsiglue.c
493+++ b/drivers/usb/storage/scsiglue.c
494@@ -59,6 +59,13 @@
495 #include "transport.h"
496 #include "protocol.h"
497
498+/* Vendor IDs for companies that seem to include the READ CAPACITY bug
499+ * in all their devices
500+ */
501+#define VENDOR_ID_NOKIA 0x0421
502+#define VENDOR_ID_NIKON 0x04b0
503+#define VENDOR_ID_MOTOROLA 0x22b8
504+
505 /***********************************************************************
506 * Host functions
507 ***********************************************************************/
508@@ -134,6 +141,22 @@ static int slave_configure(struct scsi_device *sdev)
509 * settings can't be overridden via the scsi devinfo mechanism. */
510 if (sdev->type == TYPE_DISK) {
511
512+ /* Some vendors seem to put the READ CAPACITY bug into
513+ * all their devices -- primarily makers of cell phones
514+ * and digital cameras. Since these devices always use
515+ * flash media and can be expected to have an even number
516+ * of sectors, we will always enable the CAPACITY_HEURISTICS
517+ * flag unless told otherwise. */
518+ switch (le16_to_cpu(us->pusb_dev->descriptor.idVendor)) {
519+ case VENDOR_ID_NOKIA:
520+ case VENDOR_ID_NIKON:
521+ case VENDOR_ID_MOTOROLA:
522+ if (!(us->fflags & (US_FL_FIX_CAPACITY |
523+ US_FL_CAPACITY_OK)))
524+ us->fflags |= US_FL_CAPACITY_HEURISTICS;
525+ break;
526+ }
527+
528 /* Disk-type devices use MODE SENSE(6) if the protocol
529 * (SubClass) is Transparent SCSI, otherwise they use
530 * MODE SENSE(10). */
531@@ -196,6 +219,14 @@ static int slave_configure(struct scsi_device *sdev)
532 * sector in a larger then 1 sector read, since the performance
533 * impact is negible we set this flag for all USB disks */
534 sdev->last_sector_bug = 1;
535+
536+ /* Enable last-sector hacks for single-target devices using
537+ * the Bulk-only transport, unless we already know the
538+ * capacity will be decremented or is correct. */
539+ if (!(us->fflags & (US_FL_FIX_CAPACITY | US_FL_CAPACITY_OK |
540+ US_FL_SCM_MULT_TARG)) &&
541+ us->protocol == US_PR_BULK)
542+ us->use_last_sector_hacks = 1;
543 } else {
544
545 /* Non-disk-type devices don't need to blacklist any pages
546diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
547index 3523a0b..861e308 100644
548--- a/drivers/usb/storage/transport.c
549+++ b/drivers/usb/storage/transport.c
550@@ -57,6 +57,9 @@
551 #include "scsiglue.h"
552 #include "debug.h"
553
554+#include <linux/blkdev.h>
555+#include "../../scsi/sd.h"
556+
557
558 /***********************************************************************
559 * Data transfer routines
560@@ -511,6 +514,80 @@ int usb_stor_bulk_transfer_sg(struct us_data* us, unsigned int pipe,
561 * Transport routines
562 ***********************************************************************/
563
564+/* There are so many devices that report the capacity incorrectly,
565+ * this routine was written to counteract some of the resulting
566+ * problems.
567+ */
568+static void last_sector_hacks(struct us_data *us, struct scsi_cmnd *srb)
569+{
570+ struct gendisk *disk;
571+ struct scsi_disk *sdkp;
572+ u32 sector;
573+
574+ /* To Report "Medium Error: Record Not Found */
575+ static unsigned char record_not_found[18] = {
576+ [0] = 0x70, /* current error */
577+ [2] = MEDIUM_ERROR, /* = 0x03 */
578+ [7] = 0x0a, /* additional length */
579+ [12] = 0x14 /* Record Not Found */
580+ };
581+
582+ /* If last-sector problems can't occur, whether because the
583+ * capacity was already decremented or because the device is
584+ * known to report the correct capacity, then we don't need
585+ * to do anything.
586+ */
587+ if (!us->use_last_sector_hacks)
588+ return;
589+
590+ /* Was this command a READ(10) or a WRITE(10)? */
591+ if (srb->cmnd[0] != READ_10 && srb->cmnd[0] != WRITE_10)
592+ goto done;
593+
594+ /* Did this command access the last sector? */
595+ sector = (srb->cmnd[2] << 24) | (srb->cmnd[3] << 16) |
596+ (srb->cmnd[4] << 8) | (srb->cmnd[5]);
597+ disk = srb->request->rq_disk;
598+ if (!disk)
599+ goto done;
600+ sdkp = scsi_disk(disk);
601+ if (!sdkp)
602+ goto done;
603+ if (sector + 1 != sdkp->capacity)
604+ goto done;
605+
606+ if (srb->result == SAM_STAT_GOOD && scsi_get_resid(srb) == 0) {
607+
608+ /* The command succeeded. We know this device doesn't
609+ * have the last-sector bug, so stop checking it.
610+ */
611+ us->use_last_sector_hacks = 0;
612+
613+ } else {
614+ /* The command failed. Allow up to 3 retries in case this
615+ * is some normal sort of failure. After that, assume the
616+ * capacity is wrong and we're trying to access the sector
617+ * beyond the end. Replace the result code and sense data
618+ * with values that will cause the SCSI core to fail the
619+ * command immediately, instead of going into an infinite
620+ * (or even just a very long) retry loop.
621+ */
622+ if (++us->last_sector_retries < 3)
623+ return;
624+ srb->result = SAM_STAT_CHECK_CONDITION;
625+ memcpy(srb->sense_buffer, record_not_found,
626+ sizeof(record_not_found));
627+ }
628+
629+ done:
630+ /* Don't reset the retry counter for TEST UNIT READY commands,
631+ * because they get issued after device resets which might be
632+ * caused by a failed last-sector access.
633+ */
634+ if (srb->cmnd[0] != TEST_UNIT_READY)
635+ us->last_sector_retries = 0;
636+}
637+
638 /* Invoke the transport and basic error-handling/recovery methods
639 *
640 * This is used by the protocol layers to actually send the message to
641@@ -544,6 +621,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
642 /* if the transport provided its own sense data, don't auto-sense */
643 if (result == USB_STOR_TRANSPORT_NO_SENSE) {
644 srb->result = SAM_STAT_CHECK_CONDITION;
645+ last_sector_hacks(us, srb);
646 return;
647 }
648
649@@ -667,6 +745,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
650 scsi_bufflen(srb) - scsi_get_resid(srb) < srb->underflow)
651 srb->result = (DID_ERROR << 16) | (SUGGEST_RETRY << 24);
652
653+ last_sector_hacks(us, srb);
654 return;
655
656 /* Error and abort processing: try to resynchronize with the device
657@@ -694,6 +773,7 @@ void usb_stor_invoke_transport(struct scsi_cmnd *srb, struct us_data *us)
658 us->transport_reset(us);
659 }
660 clear_bit(US_FLIDX_RESETTING, &us->dflags);
661+ last_sector_hacks(us, srb);
662 }
663
664 /* Stop the current URB transfer */
665diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
666index 476da5d..6fcb6d1 100644
667--- a/drivers/usb/storage/unusual_devs.h
668+++ b/drivers/usb/storage/unusual_devs.h
669@@ -27,7 +27,8 @@
670
671 /* IMPORTANT NOTE: This file must be included in another file which does
672 * the following thing for it to work:
673- * The macro UNUSUAL_DEV() must be defined before this file is included
674+ * The UNUSUAL_DEV, COMPLIANT_DEV, and USUAL_DEV macros must be defined
675+ * before this file is included.
676 */
677
678 /* If you edit this file, please try to keep it sorted first by VendorID,
679@@ -46,6 +47,12 @@
680 * <usb-storage@lists.one-eyed-alien.net>
681 */
682
683+/* Note: If you add an entry only in order to set the CAPACITY_OK flag,
684+ * use the COMPLIANT_DEV macro instead of UNUSUAL_DEV. This is
685+ * because such entries mark devices which actually work correctly,
686+ * as opposed to devices that do something strangely or wrongly.
687+ */
688+
689 /* patch submitted by Vivian Bregier <Vivian.Bregier@imag.fr>
690 */
691 UNUSUAL_DEV( 0x03eb, 0x2002, 0x0100, 0x0100,
692@@ -160,20 +167,6 @@ UNUSUAL_DEV( 0x0421, 0x0019, 0x0592, 0x0592,
693 US_SC_DEVICE, US_PR_DEVICE, NULL,
694 US_FL_MAX_SECTORS_64 ),
695
696-/* Reported by Filip Joelsson <filip@blueturtle.nu> */
697-UNUSUAL_DEV( 0x0421, 0x005d, 0x0001, 0x0600,
698- "Nokia",
699- "Nokia 3110c",
700- US_SC_DEVICE, US_PR_DEVICE, NULL,
701- US_FL_FIX_CAPACITY ),
702-
703-/* Patch for Nokia 5310 capacity */
704-UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0701,
705- "Nokia",
706- "5310",
707- US_SC_DEVICE, US_PR_DEVICE, NULL,
708- US_FL_FIX_CAPACITY ),
709-
710 /* Reported by Mario Rettig <mariorettig@web.de> */
711 UNUSUAL_DEV( 0x0421, 0x042e, 0x0100, 0x0100,
712 "Nokia",
713@@ -239,56 +232,6 @@ UNUSUAL_DEV( 0x0421, 0x0495, 0x0370, 0x0370,
714 US_SC_DEVICE, US_PR_DEVICE, NULL,
715 US_FL_MAX_SECTORS_64 ),
716
717-/* Reported by Cedric Godin <cedric@belbone.be> */
718-UNUSUAL_DEV( 0x0421, 0x04b9, 0x0500, 0x0551,
719- "Nokia",
720- "5300",
721- US_SC_DEVICE, US_PR_DEVICE, NULL,
722- US_FL_FIX_CAPACITY ),
723-
724-/* Reported by Paulo Fessel <pfessel@gmail.com> */
725-UNUSUAL_DEV( 0x0421, 0x04bd, 0x0000, 0x9999,
726- "Nokia",
727- "5200",
728- US_SC_DEVICE, US_PR_DEVICE, NULL,
729- US_FL_FIX_CAPACITY ),
730-
731-/* Reported by Richard Nauber <RichardNauber@web.de> */
732-UNUSUAL_DEV( 0x0421, 0x04fa, 0x0550, 0x0660,
733- "Nokia",
734- "6300",
735- US_SC_DEVICE, US_PR_DEVICE, NULL,
736- US_FL_FIX_CAPACITY ),
737-
738-/* Reported by Ozan Sener <themgzzy@gmail.com> */
739-UNUSUAL_DEV( 0x0421, 0x0060, 0x0551, 0x0551,
740- "Nokia",
741- "3500c",
742- US_SC_DEVICE, US_PR_DEVICE, NULL,
743- US_FL_FIX_CAPACITY ),
744-
745-/* Reported by CSECSY Laszlo <boobaa@frugalware.org> */
746-UNUSUAL_DEV( 0x0421, 0x0063, 0x0001, 0x0601,
747- "Nokia",
748- "Nokia 3109c",
749- US_SC_DEVICE, US_PR_DEVICE, NULL,
750- US_FL_FIX_CAPACITY ),
751-
752-/* Patch for Nokia 5310 capacity */
753-UNUSUAL_DEV( 0x0421, 0x006a, 0x0000, 0x0591,
754- "Nokia",
755- "5310",
756- US_SC_DEVICE, US_PR_DEVICE, NULL,
757- US_FL_FIX_CAPACITY ),
758-
759-/* Submitted by Ricky Wong Yung Fei <evilbladewarrior@gmail.com> */
760-/* Nokia 7610 Supernova - Too many sectors reported in usb storage mode */
761-UNUSUAL_DEV( 0x0421, 0x00f5, 0x0000, 0x0470,
762- "Nokia",
763- "7610 Supernova",
764- US_SC_DEVICE, US_PR_DEVICE, NULL,
765- US_FL_FIX_CAPACITY ),
766-
767 /* Reported by Olaf Hering <olh@suse.de> from novell bug #105878 */
768 UNUSUAL_DEV( 0x0424, 0x0fdc, 0x0210, 0x0210,
769 "SMSC",
770@@ -692,6 +635,13 @@ UNUSUAL_DEV( 0x0525, 0xa140, 0x0100, 0x0100,
771 US_SC_8070, US_PR_DEVICE, NULL,
772 US_FL_FIX_INQUIRY ),
773
774+/* Added by Alan Stern <stern@rowland.harvard.edu> */
775+COMPLIANT_DEV(0x0525, 0xa4a5, 0x0000, 0x9999,
776+ "Linux",
777+ "File-backed Storage Gadget",
778+ US_SC_DEVICE, US_PR_DEVICE, NULL,
779+ US_FL_CAPACITY_OK ),
780+
781 /* Yakumo Mega Image 37
782 * Submitted by Stephan Fuhrmann <atomenergie@t-online.de> */
783 UNUSUAL_DEV( 0x052b, 0x1801, 0x0100, 0x0100,
784diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
785index 27016fd..ceb8ac3 100644
786--- a/drivers/usb/storage/usb.c
787+++ b/drivers/usb/storage/usb.c
788@@ -126,6 +126,8 @@ MODULE_PARM_DESC(delay_use, "seconds to delay before using a new device");
789 { USB_DEVICE_VER(id_vendor, id_product, bcdDeviceMin,bcdDeviceMax), \
790 .driver_info = (flags)|(USB_US_TYPE_STOR<<24) }
791
792+#define COMPLIANT_DEV UNUSUAL_DEV
793+
794 #define USUAL_DEV(useProto, useTrans, useType) \
795 { USB_INTERFACE_INFO(USB_CLASS_MASS_STORAGE, useProto, useTrans), \
796 .driver_info = (USB_US_TYPE_STOR<<24) }
797@@ -134,6 +136,7 @@ static struct usb_device_id storage_usb_ids [] = {
798
799 # include "unusual_devs.h"
800 #undef UNUSUAL_DEV
801+#undef COMPLIANT_DEV
802 #undef USUAL_DEV
803 /* Terminating entry */
804 { }
805@@ -164,6 +167,8 @@ MODULE_DEVICE_TABLE (usb, storage_usb_ids);
806 .initFunction = init_function, \
807 }
808
809+#define COMPLIANT_DEV UNUSUAL_DEV
810+
811 #define USUAL_DEV(use_protocol, use_transport, use_type) \
812 { \
813 .useProtocol = use_protocol, \
814@@ -173,6 +178,7 @@ MODULE_DEVICE_TABLE (usb, storage_usb_ids);
815 static struct us_unusual_dev us_unusual_dev_list[] = {
816 # include "unusual_devs.h"
817 # undef UNUSUAL_DEV
818+# undef COMPLIANT_DEV
819 # undef USUAL_DEV
820
821 /* Terminating entry */
822diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
823index a4ad73b..2e995c9 100644
824--- a/drivers/usb/storage/usb.h
825+++ b/drivers/usb/storage/usb.h
826@@ -155,6 +155,10 @@ struct us_data {
827 #ifdef CONFIG_PM
828 pm_hook suspend_resume_hook;
829 #endif
830+
831+ /* hacks for READ CAPACITY bug handling */
832+ int use_last_sector_hacks;
833+ int last_sector_retries;
834 };
835
836 /* Convert between us_data and the corresponding Scsi_Host */
837diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
838index 25adfc3..c8616a0 100644
839--- a/fs/fs-writeback.c
840+++ b/fs/fs-writeback.c
841@@ -421,9 +421,6 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
842 * If we're a pdlfush thread, then implement pdflush collision avoidance
843 * against the entire list.
844 *
845- * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so
846- * that it can be located for waiting on in __writeback_single_inode().
847- *
848 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
849 * This function assumes that the blockdev superblock's inodes are backed by
850 * a variety of queues, so all inodes are searched. For other superblocks,
851@@ -443,6 +440,7 @@ void generic_sync_sb_inodes(struct super_block *sb,
852 struct writeback_control *wbc)
853 {
854 const unsigned long start = jiffies; /* livelock avoidance */
855+ int sync = wbc->sync_mode == WB_SYNC_ALL;
856
857 spin_lock(&inode_lock);
858 if (!wbc->for_kupdate || list_empty(&sb->s_io))
859@@ -499,10 +497,6 @@ void generic_sync_sb_inodes(struct super_block *sb,
860 __iget(inode);
861 pages_skipped = wbc->pages_skipped;
862 __writeback_single_inode(inode, wbc);
863- if (wbc->sync_mode == WB_SYNC_HOLD) {
864- inode->dirtied_when = jiffies;
865- list_move(&inode->i_list, &sb->s_dirty);
866- }
867 if (current_is_pdflush())
868 writeback_release(bdi);
869 if (wbc->pages_skipped != pages_skipped) {
870@@ -523,7 +517,49 @@ void generic_sync_sb_inodes(struct super_block *sb,
871 if (!list_empty(&sb->s_more_io))
872 wbc->more_io = 1;
873 }
874- spin_unlock(&inode_lock);
875+
876+ if (sync) {
877+ struct inode *inode, *old_inode = NULL;
878+
879+ /*
880+ * Data integrity sync. Must wait for all pages under writeback,
881+ * because there may have been pages dirtied before our sync
882+ * call, but which had writeout started before we write it out.
883+ * In which case, the inode may not be on the dirty list, but
884+ * we still have to wait for that writeout.
885+ */
886+ list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
887+ struct address_space *mapping;
888+
889+ if (inode->i_state & (I_FREEING|I_WILL_FREE))
890+ continue;
891+ mapping = inode->i_mapping;
892+ if (mapping->nrpages == 0)
893+ continue;
894+ __iget(inode);
895+ spin_unlock(&inode_lock);
896+ /*
897+ * We hold a reference to 'inode' so it couldn't have
898+ * been removed from s_inodes list while we dropped the
899+ * inode_lock. We cannot iput the inode now as we can
900+ * be holding the last reference and we cannot iput it
901+ * under inode_lock. So we keep the reference and iput
902+ * it later.
903+ */
904+ iput(old_inode);
905+ old_inode = inode;
906+
907+ filemap_fdatawait(mapping);
908+
909+ cond_resched();
910+
911+ spin_lock(&inode_lock);
912+ }
913+ spin_unlock(&inode_lock);
914+ iput(old_inode);
915+ } else
916+ spin_unlock(&inode_lock);
917+
918 return; /* Leave any unwritten inodes on s_io */
919 }
920 EXPORT_SYMBOL_GPL(generic_sync_sb_inodes);
921@@ -588,8 +624,7 @@ restart:
922
923 /*
924 * writeback and wait upon the filesystem's dirty inodes. The caller will
925- * do this in two passes - one to write, and one to wait. WB_SYNC_HOLD is
926- * used to park the written inodes on sb->s_dirty for the wait pass.
927+ * do this in two passes - one to write, and one to wait.
928 *
929 * A finite limit is set on the number of pages which will be written.
930 * To prevent infinite livelock of sys_sync().
931@@ -600,30 +635,21 @@ restart:
932 void sync_inodes_sb(struct super_block *sb, int wait)
933 {
934 struct writeback_control wbc = {
935- .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
936+ .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
937 .range_start = 0,
938 .range_end = LLONG_MAX,
939 };
940- unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
941- unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
942
943- wbc.nr_to_write = nr_dirty + nr_unstable +
944- (inodes_stat.nr_inodes - inodes_stat.nr_unused) +
945- nr_dirty + nr_unstable;
946- wbc.nr_to_write += wbc.nr_to_write / 2; /* Bit more for luck */
947- sync_sb_inodes(sb, &wbc);
948-}
949+ if (!wait) {
950+ unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
951+ unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
952
953-/*
954- * Rather lame livelock avoidance.
955- */
956-static void set_sb_syncing(int val)
957-{
958- struct super_block *sb;
959- spin_lock(&sb_lock);
960- list_for_each_entry_reverse(sb, &super_blocks, s_list)
961- sb->s_syncing = val;
962- spin_unlock(&sb_lock);
963+ wbc.nr_to_write = nr_dirty + nr_unstable +
964+ (inodes_stat.nr_inodes - inodes_stat.nr_unused);
965+ } else
966+ wbc.nr_to_write = LONG_MAX; /* doesn't actually matter */
967+
968+ sync_sb_inodes(sb, &wbc);
969 }
970
971 /**
972@@ -652,9 +678,6 @@ static void __sync_inodes(int wait)
973 spin_lock(&sb_lock);
974 restart:
975 list_for_each_entry(sb, &super_blocks, s_list) {
976- if (sb->s_syncing)
977- continue;
978- sb->s_syncing = 1;
979 sb->s_count++;
980 spin_unlock(&sb_lock);
981 down_read(&sb->s_umount);
982@@ -672,13 +695,10 @@ restart:
983
984 void sync_inodes(int wait)
985 {
986- set_sb_syncing(0);
987 __sync_inodes(0);
988
989- if (wait) {
990- set_sb_syncing(0);
991+ if (wait)
992 __sync_inodes(1);
993- }
994 }
995
996 /**
997diff --git a/fs/sync.c b/fs/sync.c
998index 6cc8cb4..9e5f60d 100644
999--- a/fs/sync.c
1000+++ b/fs/sync.c
1001@@ -287,7 +287,7 @@ int do_sync_mapping_range(struct address_space *mapping, loff_t offset,
1002
1003 if (flags & SYNC_FILE_RANGE_WRITE) {
1004 ret = __filemap_fdatawrite_range(mapping, offset, endbyte,
1005- WB_SYNC_NONE);
1006+ WB_SYNC_ALL);
1007 if (ret < 0)
1008 goto out;
1009 }
1010diff --git a/include/linux/fs.h b/include/linux/fs.h
1011index d621217..d1b3e22 100644
1012--- a/include/linux/fs.h
1013+++ b/include/linux/fs.h
1014@@ -1080,7 +1080,6 @@ struct super_block {
1015 struct rw_semaphore s_umount;
1016 struct mutex s_lock;
1017 int s_count;
1018- int s_syncing;
1019 int s_need_sync_fs;
1020 atomic_t s_active;
1021 #ifdef CONFIG_SECURITY
1022diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
1023index d9a3bbe..bd414ec 100644
1024--- a/include/linux/usb_usual.h
1025+++ b/include/linux/usb_usual.h
1026@@ -52,8 +52,9 @@
1027 US_FLAG(MAX_SECTORS_MIN,0x00002000) \
1028 /* Sets max_sectors to arch min */ \
1029 US_FLAG(BULK_IGNORE_TAG,0x00004000) \
1030- /* Ignore tag mismatch in bulk operations */
1031-
1032+ /* Ignore tag mismatch in bulk operations */ \
1033+ US_FLAG(CAPACITY_OK, 0x00010000) \
1034+ /* READ CAPACITY response is correct */
1035
1036 #define US_FLAG(name, value) US_FL_##name = value ,
1037 enum { US_DO_ALL_FLAGS };
1038diff --git a/include/linux/writeback.h b/include/linux/writeback.h
1039index 12b15c5..c2835bb 100644
1040--- a/include/linux/writeback.h
1041+++ b/include/linux/writeback.h
1042@@ -30,7 +30,6 @@ static inline int task_is_pdflush(struct task_struct *task)
1043 enum writeback_sync_modes {
1044 WB_SYNC_NONE, /* Don't wait on anything */
1045 WB_SYNC_ALL, /* Wait on every mapping */
1046- WB_SYNC_HOLD, /* Hold the inode on sb_dirty for sys_sync() */
1047 };
1048
1049 /*
1050diff --git a/kernel/signal.c b/kernel/signal.c
1051index 6f06f43..3d161f0 100644
1052--- a/kernel/signal.c
1053+++ b/kernel/signal.c
1054@@ -1141,7 +1141,8 @@ static int kill_something_info(int sig, struct siginfo *info, pid_t pid)
1055 struct task_struct * p;
1056
1057 for_each_process(p) {
1058- if (p->pid > 1 && !same_thread_group(p, current)) {
1059+ if (task_pid_vnr(p) > 1 &&
1060+ !same_thread_group(p, current)) {
1061 int err = group_send_sig_info(sig, info, p);
1062 ++count;
1063 if (err != -EPERM)
1064diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
1065index 4220a2e..521960b 100644
1066--- a/kernel/time/timekeeping.c
1067+++ b/kernel/time/timekeeping.c
1068@@ -61,27 +61,23 @@ struct clocksource *clock;
1069
1070 #ifdef CONFIG_GENERIC_TIME
1071 /**
1072- * __get_nsec_offset - Returns nanoseconds since last call to periodic_hook
1073+ * clocksource_forward_now - update clock to the current time
1074 *
1075- * private function, must hold xtime_lock lock when being
1076- * called. Returns the number of nanoseconds since the
1077- * last call to update_wall_time() (adjusted by NTP scaling)
1078+ * Forward the current clock to update its state since the last call to
1079+ * update_wall_time(). This is useful before significant clock changes,
1080+ * as it avoids having to deal with this time offset explicitly.
1081 */
1082-static inline s64 __get_nsec_offset(void)
1083+static void clocksource_forward_now(void)
1084 {
1085 cycle_t cycle_now, cycle_delta;
1086- s64 ns_offset;
1087+ s64 nsec;
1088
1089- /* read clocksource: */
1090 cycle_now = clocksource_read(clock);
1091-
1092- /* calculate the delta since the last update_wall_time: */
1093 cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
1094+ clock->cycle_last = cycle_now;
1095
1096- /* convert to nanoseconds: */
1097- ns_offset = cyc2ns(clock, cycle_delta);
1098-
1099- return ns_offset;
1100+ nsec = cyc2ns(clock, cycle_delta);
1101+ timespec_add_ns(&xtime, nsec);
1102 }
1103
1104 /**
1105@@ -92,6 +88,7 @@ static inline s64 __get_nsec_offset(void)
1106 */
1107 void getnstimeofday(struct timespec *ts)
1108 {
1109+ cycle_t cycle_now, cycle_delta;
1110 unsigned long seq;
1111 s64 nsecs;
1112
1113@@ -101,7 +98,15 @@ void getnstimeofday(struct timespec *ts)
1114 seq = read_seqbegin(&xtime_lock);
1115
1116 *ts = xtime;
1117- nsecs = __get_nsec_offset();
1118+
1119+ /* read clocksource: */
1120+ cycle_now = clocksource_read(clock);
1121+
1122+ /* calculate the delta since the last update_wall_time: */
1123+ cycle_delta = (cycle_now - clock->cycle_last) & clock->mask;
1124+
1125+ /* convert to nanoseconds: */
1126+ nsecs = cyc2ns(clock, cycle_delta);
1127
1128 } while (read_seqretry(&xtime_lock, seq));
1129
1130@@ -134,22 +139,22 @@ EXPORT_SYMBOL(do_gettimeofday);
1131 */
1132 int do_settimeofday(struct timespec *tv)
1133 {
1134+ struct timespec ts_delta;
1135 unsigned long flags;
1136- time_t wtm_sec, sec = tv->tv_sec;
1137- long wtm_nsec, nsec = tv->tv_nsec;
1138
1139 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
1140 return -EINVAL;
1141
1142 write_seqlock_irqsave(&xtime_lock, flags);
1143
1144- nsec -= __get_nsec_offset();
1145+ clocksource_forward_now();
1146+
1147+ ts_delta.tv_sec = tv->tv_sec - xtime.tv_sec;
1148+ ts_delta.tv_nsec = tv->tv_nsec - xtime.tv_nsec;
1149+ wall_to_monotonic = timespec_sub(wall_to_monotonic, ts_delta);
1150
1151- wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - sec);
1152- wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - nsec);
1153+ xtime = *tv;
1154
1155- set_normalized_timespec(&xtime, sec, nsec);
1156- set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
1157 update_xtime_cache(0);
1158
1159 clock->error = 0;
1160@@ -175,22 +180,17 @@ EXPORT_SYMBOL(do_settimeofday);
1161 static void change_clocksource(void)
1162 {
1163 struct clocksource *new;
1164- cycle_t now;
1165- u64 nsec;
1166
1167 new = clocksource_get_next();
1168
1169 if (clock == new)
1170 return;
1171
1172- new->cycle_last = 0;
1173- now = clocksource_read(new);
1174- nsec = __get_nsec_offset();
1175- timespec_add_ns(&xtime, nsec);
1176+ clocksource_forward_now();
1177
1178 clock = new;
1179- clock->cycle_last = now;
1180-
1181+ clock->cycle_last = 0;
1182+ clock->cycle_last = clocksource_read(new);
1183 clock->error = 0;
1184 clock->xtime_nsec = 0;
1185 clocksource_calculate_interval(clock, NTP_INTERVAL_LENGTH);
1186@@ -205,8 +205,8 @@ static void change_clocksource(void)
1187 */
1188 }
1189 #else
1190+static inline void clocksource_forward_now(void) { }
1191 static inline void change_clocksource(void) { }
1192-static inline s64 __get_nsec_offset(void) { return 0; }
1193 #endif
1194
1195 /**
1196@@ -268,8 +268,6 @@ void __init timekeeping_init(void)
1197
1198 /* time in seconds when suspend began */
1199 static unsigned long timekeeping_suspend_time;
1200-/* xtime offset when we went into suspend */
1201-static s64 timekeeping_suspend_nsecs;
1202
1203 /**
1204 * timekeeping_resume - Resumes the generic timekeeping subsystem.
1205@@ -295,8 +293,6 @@ static int timekeeping_resume(struct sys_device *dev)
1206 wall_to_monotonic.tv_sec -= sleep_length;
1207 total_sleep_time += sleep_length;
1208 }
1209- /* Make sure that we have the correct xtime reference */
1210- timespec_add_ns(&xtime, timekeeping_suspend_nsecs);
1211 update_xtime_cache(0);
1212 /* re-base the last cycle value */
1213 clock->cycle_last = 0;
1214@@ -322,8 +318,7 @@ static int timekeeping_suspend(struct sys_device *dev, pm_message_t state)
1215 timekeeping_suspend_time = read_persistent_clock();
1216
1217 write_seqlock_irqsave(&xtime_lock, flags);
1218- /* Get the current xtime offset */
1219- timekeeping_suspend_nsecs = __get_nsec_offset();
1220+ clocksource_forward_now();
1221 timekeeping_suspended = 1;
1222 write_sequnlock_irqrestore(&xtime_lock, flags);
1223
1224@@ -464,10 +459,10 @@ void update_wall_time(void)
1225 */
1226 while (offset >= clock->cycle_interval) {
1227 /* accumulate one interval */
1228- clock->xtime_nsec += clock->xtime_interval;
1229- clock->cycle_last += clock->cycle_interval;
1230 offset -= clock->cycle_interval;
1231+ clock->cycle_last += clock->cycle_interval;
1232
1233+ clock->xtime_nsec += clock->xtime_interval;
1234 if (clock->xtime_nsec >= (u64)NSEC_PER_SEC << clock->shift) {
1235 clock->xtime_nsec -= (u64)NSEC_PER_SEC << clock->shift;
1236 xtime.tv_sec++;
1237diff --git a/lib/idr.c b/lib/idr.c
1238index 1c4f928..21154ae 100644
1239--- a/lib/idr.c
1240+++ b/lib/idr.c
1241@@ -121,7 +121,7 @@ int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
1242 {
1243 while (idp->id_free_cnt < IDR_FREE_MAX) {
1244 struct idr_layer *new;
1245- new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
1246+ new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
1247 if (new == NULL)
1248 return (0);
1249 move_to_free_list(idp, new);
1250@@ -623,16 +623,10 @@ void *idr_replace(struct idr *idp, void *ptr, int id)
1251 }
1252 EXPORT_SYMBOL(idr_replace);
1253
1254-static void idr_cache_ctor(void *idr_layer)
1255-{
1256- memset(idr_layer, 0, sizeof(struct idr_layer));
1257-}
1258-
1259 void __init idr_init_cache(void)
1260 {
1261 idr_layer_cache = kmem_cache_create("idr_layer_cache",
1262- sizeof(struct idr_layer), 0, SLAB_PANIC,
1263- idr_cache_ctor);
1264+ sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
1265 }
1266
1267 /**
1268diff --git a/mm/filemap.c b/mm/filemap.c
1269index f3033d0..8a477d3 100644
1270--- a/mm/filemap.c
1271+++ b/mm/filemap.c
1272@@ -209,7 +209,7 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
1273 int ret;
1274 struct writeback_control wbc = {
1275 .sync_mode = sync_mode,
1276- .nr_to_write = mapping->nrpages * 2,
1277+ .nr_to_write = LONG_MAX,
1278 .range_start = start,
1279 .range_end = end,
1280 };
1281@@ -1304,7 +1304,8 @@ generic_file_aio_read(struct kiocb *iocb, const struct iovec *iov,
1282 goto out; /* skip atime */
1283 size = i_size_read(inode);
1284 if (pos < size) {
1285- retval = filemap_write_and_wait(mapping);
1286+ retval = filemap_write_and_wait_range(mapping, pos,
1287+ pos + iov_length(iov, nr_segs) - 1);
1288 if (!retval) {
1289 retval = mapping->a_ops->direct_IO(READ, iocb,
1290 iov, pos, nr_segs);
1291@@ -2117,18 +2118,10 @@ generic_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
1292 if (count != ocount)
1293 *nr_segs = iov_shorten((struct iovec *)iov, *nr_segs, count);
1294
1295- /*
1296- * Unmap all mmappings of the file up-front.
1297- *
1298- * This will cause any pte dirty bits to be propagated into the
1299- * pageframes for the subsequent filemap_write_and_wait().
1300- */
1301 write_len = iov_length(iov, *nr_segs);
1302 end = (pos + write_len - 1) >> PAGE_CACHE_SHIFT;
1303- if (mapping_mapped(mapping))
1304- unmap_mapping_range(mapping, pos, write_len, 0);
1305
1306- written = filemap_write_and_wait(mapping);
1307+ written = filemap_write_and_wait_range(mapping, pos, pos + write_len - 1);
1308 if (written)
1309 goto out;
1310
1311@@ -2519,7 +2512,8 @@ generic_file_buffered_write(struct kiocb *iocb, const struct iovec *iov,
1312 * the file data here, to try to honour O_DIRECT expectations.
1313 */
1314 if (unlikely(file->f_flags & O_DIRECT) && written)
1315- status = filemap_write_and_wait(mapping);
1316+ status = filemap_write_and_wait_range(mapping,
1317+ pos, pos + written - 1);
1318
1319 return written ? written : status;
1320 }
1321diff --git a/mm/page-writeback.c b/mm/page-writeback.c
1322index 24de8b6..8875822 100644
1323--- a/mm/page-writeback.c
1324+++ b/mm/page-writeback.c
1325@@ -872,9 +872,11 @@ int write_cache_pages(struct address_space *mapping,
1326 int done = 0;
1327 struct pagevec pvec;
1328 int nr_pages;
1329+ pgoff_t uninitialized_var(writeback_index);
1330 pgoff_t index;
1331 pgoff_t end; /* Inclusive */
1332- int scanned = 0;
1333+ pgoff_t done_index;
1334+ int cycled;
1335 int range_whole = 0;
1336
1337 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1338@@ -884,82 +886,134 @@ int write_cache_pages(struct address_space *mapping,
1339
1340 pagevec_init(&pvec, 0);
1341 if (wbc->range_cyclic) {
1342- index = mapping->writeback_index; /* Start from prev offset */
1343+ writeback_index = mapping->writeback_index; /* prev offset */
1344+ index = writeback_index;
1345+ if (index == 0)
1346+ cycled = 1;
1347+ else
1348+ cycled = 0;
1349 end = -1;
1350 } else {
1351 index = wbc->range_start >> PAGE_CACHE_SHIFT;
1352 end = wbc->range_end >> PAGE_CACHE_SHIFT;
1353 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
1354 range_whole = 1;
1355- scanned = 1;
1356+ cycled = 1; /* ignore range_cyclic tests */
1357 }
1358 retry:
1359- while (!done && (index <= end) &&
1360- (nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1361- PAGECACHE_TAG_DIRTY,
1362- min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1))) {
1363- unsigned i;
1364+ done_index = index;
1365+ while (!done && (index <= end)) {
1366+ int i;
1367+
1368+ nr_pages = pagevec_lookup_tag(&pvec, mapping, &index,
1369+ PAGECACHE_TAG_DIRTY,
1370+ min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
1371+ if (nr_pages == 0)
1372+ break;
1373
1374- scanned = 1;
1375 for (i = 0; i < nr_pages; i++) {
1376 struct page *page = pvec.pages[i];
1377
1378 /*
1379- * At this point we hold neither mapping->tree_lock nor
1380- * lock on the page itself: the page may be truncated or
1381- * invalidated (changing page->mapping to NULL), or even
1382- * swizzled back from swapper_space to tmpfs file
1383- * mapping
1384+ * At this point, the page may be truncated or
1385+ * invalidated (changing page->mapping to NULL), or
1386+ * even swizzled back from swapper_space to tmpfs file
1387+ * mapping. However, page->index will not change
1388+ * because we have a reference on the page.
1389 */
1390+ if (page->index > end) {
1391+ /*
1392+ * can't be range_cyclic (1st pass) because
1393+ * end == -1 in that case.
1394+ */
1395+ done = 1;
1396+ break;
1397+ }
1398+
1399+ done_index = page->index + 1;
1400+
1401 lock_page(page);
1402
1403+ /*
1404+ * Page truncated or invalidated. We can freely skip it
1405+ * then, even for data integrity operations: the page
1406+ * has disappeared concurrently, so there could be no
1407+ * real expectation of this data interity operation
1408+ * even if there is now a new, dirty page at the same
1409+ * pagecache address.
1410+ */
1411 if (unlikely(page->mapping != mapping)) {
1412+continue_unlock:
1413 unlock_page(page);
1414 continue;
1415 }
1416
1417- if (!wbc->range_cyclic && page->index > end) {
1418- done = 1;
1419- unlock_page(page);
1420- continue;
1421+ if (!PageDirty(page)) {
1422+ /* someone wrote it for us */
1423+ goto continue_unlock;
1424 }
1425
1426- if (wbc->sync_mode != WB_SYNC_NONE)
1427- wait_on_page_writeback(page);
1428-
1429- if (PageWriteback(page) ||
1430- !clear_page_dirty_for_io(page)) {
1431- unlock_page(page);
1432- continue;
1433+ if (PageWriteback(page)) {
1434+ if (wbc->sync_mode != WB_SYNC_NONE)
1435+ wait_on_page_writeback(page);
1436+ else
1437+ goto continue_unlock;
1438 }
1439
1440+ BUG_ON(PageWriteback(page));
1441+ if (!clear_page_dirty_for_io(page))
1442+ goto continue_unlock;
1443+
1444 ret = (*writepage)(page, wbc, data);
1445
1446- if (unlikely(ret == AOP_WRITEPAGE_ACTIVATE)) {
1447- unlock_page(page);
1448- ret = 0;
1449+ if (unlikely(ret)) {
1450+ if (ret == AOP_WRITEPAGE_ACTIVATE) {
1451+ unlock_page(page);
1452+ ret = 0;
1453+ } else {
1454+ /*
1455+ * done_index is set past this page,
1456+ * so media errors will not choke
1457+ * background writeout for the entire
1458+ * file. This has consequences for
1459+ * range_cyclic semantics (ie. it may
1460+ * not be suitable for data integrity
1461+ * writeout).
1462+ */
1463+ done = 1;
1464+ break;
1465+ }
1466+ }
1467+
1468+ if (wbc->sync_mode == WB_SYNC_NONE) {
1469+ wbc->nr_to_write--;
1470+ if (wbc->nr_to_write <= 0) {
1471+ done = 1;
1472+ break;
1473+ }
1474 }
1475- if (ret || (--(wbc->nr_to_write) <= 0))
1476- done = 1;
1477 if (wbc->nonblocking && bdi_write_congested(bdi)) {
1478 wbc->encountered_congestion = 1;
1479 done = 1;
1480+ break;
1481 }
1482 }
1483 pagevec_release(&pvec);
1484 cond_resched();
1485 }
1486- if (!scanned && !done) {
1487+ if (!cycled) {
1488 /*
1489+ * range_cyclic:
1490 * We hit the last page and there is more work to be done: wrap
1491 * back to the start of the file
1492 */
1493- scanned = 1;
1494+ cycled = 1;
1495 index = 0;
1496+ end = writeback_index - 1;
1497 goto retry;
1498 }
1499 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
1500- mapping->writeback_index = index;
1501+ mapping->writeback_index = done_index;
1502
1503 if (wbc->range_cont)
1504 wbc->range_start = index << PAGE_CACHE_SHIFT;
1505diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
1506index 1ab341e..f57d576 100644
1507--- a/net/ipv4/tcp.c
1508+++ b/net/ipv4/tcp.c
1509@@ -576,10 +576,6 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
1510 else if (!ret) {
1511 if (spliced)
1512 break;
1513- if (flags & SPLICE_F_NONBLOCK) {
1514- ret = -EAGAIN;
1515- break;
1516- }
1517 if (sock_flag(sk, SOCK_DONE))
1518 break;
1519 if (sk->sk_err) {
1520@@ -597,6 +593,10 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
1521 ret = -ENOTCONN;
1522 break;
1523 }
1524+ if (flags & SPLICE_F_NONBLOCK) {
1525+ ret = -EAGAIN;
1526+ break;
1527+ }
1528 if (!timeo) {
1529 ret = -EAGAIN;
1530 break;
1531diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
1532index 29c7c99..52ee1dc 100644
1533--- a/net/ipv6/ip6_fib.c
1534+++ b/net/ipv6/ip6_fib.c
1535@@ -298,6 +298,10 @@ static void fib6_dump_end(struct netlink_callback *cb)
1536 struct fib6_walker_t *w = (void*)cb->args[2];
1537
1538 if (w) {
1539+ if (cb->args[4]) {
1540+ cb->args[4] = 0;
1541+ fib6_walker_unlink(w);
1542+ }
1543 cb->args[2] = 0;
1544 kfree(w);
1545 }
1546@@ -330,15 +334,12 @@ static int fib6_dump_table(struct fib6_table *table, struct sk_buff *skb,
1547 read_lock_bh(&table->tb6_lock);
1548 res = fib6_walk_continue(w);
1549 read_unlock_bh(&table->tb6_lock);
1550- if (res != 0) {
1551- if (res < 0)
1552- fib6_walker_unlink(w);
1553- goto end;
1554+ if (res <= 0) {
1555+ fib6_walker_unlink(w);
1556+ cb->args[4] = 0;
1557 }
1558- fib6_walker_unlink(w);
1559- cb->args[4] = 0;
1560 }
1561-end:
1562+
1563 return res;
1564 }
1565
1566diff --git a/net/sched/cls_u32.c b/net/sched/cls_u32.c
1567index 246f906..ea51fcd 100644
1568--- a/net/sched/cls_u32.c
1569+++ b/net/sched/cls_u32.c
1570@@ -637,8 +637,9 @@ static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
1571 break;
1572
1573 n->next = *ins;
1574- wmb();
1575+ tcf_tree_lock(tp);
1576 *ins = n;
1577+ tcf_tree_unlock(tp);
1578
1579 *arg = (unsigned long)n;
1580 return 0;
1581diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
1582index d14f020..d2943a4 100644
1583--- a/net/sched/sch_htb.c
1584+++ b/net/sched/sch_htb.c
1585@@ -924,6 +924,7 @@ static struct sk_buff *htb_dequeue(struct Qdisc *sch)
1586 }
1587 }
1588 sch->qstats.overlimits++;
1589+ qdisc_watchdog_cancel(&q->watchdog);
1590 qdisc_watchdog_schedule(&q->watchdog, next_event);
1591 fin:
1592 return skb;
1593diff --git a/net/sctp/sm_statefuns.c b/net/sctp/sm_statefuns.c
1594index 7c622af..649d174 100644
1595--- a/net/sctp/sm_statefuns.c
1596+++ b/net/sctp/sm_statefuns.c
1597@@ -3635,6 +3635,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
1598 {
1599 struct sctp_chunk *chunk = arg;
1600 struct sctp_fwdtsn_hdr *fwdtsn_hdr;
1601+ struct sctp_fwdtsn_skip *skip;
1602 __u16 len;
1603 __u32 tsn;
1604
1605@@ -3664,6 +3665,12 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn(const struct sctp_endpoint *ep,
1606 if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
1607 goto discard_noforce;
1608
1609+ /* Silently discard the chunk if stream-id is not valid */
1610+ sctp_walk_fwdtsn(skip, chunk) {
1611+ if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
1612+ goto discard_noforce;
1613+ }
1614+
1615 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
1616 if (len > sizeof(struct sctp_fwdtsn_hdr))
1617 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
1618@@ -3695,6 +3702,7 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
1619 {
1620 struct sctp_chunk *chunk = arg;
1621 struct sctp_fwdtsn_hdr *fwdtsn_hdr;
1622+ struct sctp_fwdtsn_skip *skip;
1623 __u16 len;
1624 __u32 tsn;
1625
1626@@ -3724,6 +3732,12 @@ sctp_disposition_t sctp_sf_eat_fwd_tsn_fast(
1627 if (sctp_tsnmap_check(&asoc->peer.tsn_map, tsn) < 0)
1628 goto gen_shutdown;
1629
1630+ /* Silently discard the chunk if stream-id is not valid */
1631+ sctp_walk_fwdtsn(skip, chunk) {
1632+ if (ntohs(skip->stream) >= asoc->c.sinit_max_instreams)
1633+ goto gen_shutdown;
1634+ }
1635+
1636 sctp_add_cmd_sf(commands, SCTP_CMD_REPORT_FWDTSN, SCTP_U32(tsn));
1637 if (len > sizeof(struct sctp_fwdtsn_hdr))
1638 sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_FWDTSN,
1639diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
1640index 9b4e0e9..3c0f421 100644
1641--- a/security/keys/keyctl.c
1642+++ b/security/keys/keyctl.c
1643@@ -270,6 +270,7 @@ long keyctl_join_session_keyring(const char __user *_name)
1644
1645 /* join the session */
1646 ret = join_session_keyring(name);
1647+ kfree(name);
1648
1649 error:
1650 return ret;
1651diff --git a/sound/pci/hda/patch_analog.c b/sound/pci/hda/patch_analog.c
1652index 591f62f..8c857d5 100644
1653--- a/sound/pci/hda/patch_analog.c
1654+++ b/sound/pci/hda/patch_analog.c
1655@@ -629,6 +629,36 @@ static struct snd_kcontrol_new ad1986a_laptop_eapd_mixers[] = {
1656 HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw),
1657 HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
1658 HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
1659+ HDA_CODEC_VOLUME("Internal Mic Playback Volume", 0x17, 0, HDA_OUTPUT),
1660+ HDA_CODEC_MUTE("Internal Mic Playback Switch", 0x17, 0, HDA_OUTPUT),
1661+ HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
1662+ HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
1663+ HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
1664+ HDA_CODEC_VOLUME("Capture Volume", 0x12, 0x0, HDA_OUTPUT),
1665+ HDA_CODEC_MUTE("Capture Switch", 0x12, 0x0, HDA_OUTPUT),
1666+ {
1667+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
1668+ .name = "Capture Source",
1669+ .info = ad198x_mux_enum_info,
1670+ .get = ad198x_mux_enum_get,
1671+ .put = ad198x_mux_enum_put,
1672+ },
1673+ {
1674+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
1675+ .name = "External Amplifier",
1676+ .info = ad198x_eapd_info,
1677+ .get = ad198x_eapd_get,
1678+ .put = ad198x_eapd_put,
1679+ .private_value = 0x1b | (1 << 8), /* port-D, inversed */
1680+ },
1681+ { } /* end */
1682+};
1683+
1684+static struct snd_kcontrol_new ad1986a_samsung_mixers[] = {
1685+ HDA_BIND_VOL("Master Playback Volume", &ad1986a_laptop_master_vol),
1686+ HDA_BIND_SW("Master Playback Switch", &ad1986a_laptop_master_sw),
1687+ HDA_CODEC_VOLUME("PCM Playback Volume", 0x03, 0x0, HDA_OUTPUT),
1688+ HDA_CODEC_MUTE("PCM Playback Switch", 0x03, 0x0, HDA_OUTPUT),
1689 HDA_CODEC_VOLUME("Mic Playback Volume", 0x13, 0x0, HDA_OUTPUT),
1690 HDA_CODEC_MUTE("Mic Playback Switch", 0x13, 0x0, HDA_OUTPUT),
1691 HDA_CODEC_VOLUME("Mic Boost", 0x0f, 0x0, HDA_OUTPUT),
1692@@ -917,6 +947,7 @@ enum {
1693 AD1986A_LAPTOP_EAPD,
1694 AD1986A_LAPTOP_AUTOMUTE,
1695 AD1986A_ULTRA,
1696+ AD1986A_SAMSUNG,
1697 AD1986A_MODELS
1698 };
1699
1700@@ -927,6 +958,7 @@ static const char *ad1986a_models[AD1986A_MODELS] = {
1701 [AD1986A_LAPTOP_EAPD] = "laptop-eapd",
1702 [AD1986A_LAPTOP_AUTOMUTE] = "laptop-automute",
1703 [AD1986A_ULTRA] = "ultra",
1704+ [AD1986A_SAMSUNG] = "samsung",
1705 };
1706
1707 static struct snd_pci_quirk ad1986a_cfg_tbl[] = {
1708@@ -949,9 +981,9 @@ static struct snd_pci_quirk ad1986a_cfg_tbl[] = {
1709 SND_PCI_QUIRK(0x1179, 0xff40, "Toshiba", AD1986A_LAPTOP_EAPD),
1710 SND_PCI_QUIRK(0x144d, 0xb03c, "Samsung R55", AD1986A_3STACK),
1711 SND_PCI_QUIRK(0x144d, 0xc01e, "FSC V2060", AD1986A_LAPTOP),
1712- SND_PCI_QUIRK(0x144d, 0xc023, "Samsung X60", AD1986A_LAPTOP_EAPD),
1713- SND_PCI_QUIRK(0x144d, 0xc024, "Samsung R65", AD1986A_LAPTOP_EAPD),
1714- SND_PCI_QUIRK(0x144d, 0xc026, "Samsung X11", AD1986A_LAPTOP_EAPD),
1715+ SND_PCI_QUIRK(0x144d, 0xc023, "Samsung X60", AD1986A_SAMSUNG),
1716+ SND_PCI_QUIRK(0x144d, 0xc024, "Samsung R65", AD1986A_SAMSUNG),
1717+ SND_PCI_QUIRK(0x144d, 0xc026, "Samsung X11", AD1986A_SAMSUNG),
1718 SND_PCI_QUIRK(0x144d, 0xc027, "Samsung Q1", AD1986A_ULTRA),
1719 SND_PCI_QUIRK(0x144d, 0xc504, "Samsung Q35", AD1986A_3STACK),
1720 SND_PCI_QUIRK(0x17aa, 0x1011, "Lenovo M55", AD1986A_LAPTOP),
1721@@ -1033,6 +1065,17 @@ static int patch_ad1986a(struct hda_codec *codec)
1722 break;
1723 case AD1986A_LAPTOP_EAPD:
1724 spec->mixers[0] = ad1986a_laptop_eapd_mixers;
1725+ spec->num_init_verbs = 2;
1726+ spec->init_verbs[1] = ad1986a_eapd_init_verbs;
1727+ spec->multiout.max_channels = 2;
1728+ spec->multiout.num_dacs = 1;
1729+ spec->multiout.dac_nids = ad1986a_laptop_dac_nids;
1730+ if (!is_jack_available(codec, 0x25))
1731+ spec->multiout.dig_out_nid = 0;
1732+ spec->input_mux = &ad1986a_laptop_eapd_capture_source;
1733+ break;
1734+ case AD1986A_SAMSUNG:
1735+ spec->mixers[0] = ad1986a_samsung_mixers;
1736 spec->num_init_verbs = 3;
1737 spec->init_verbs[1] = ad1986a_eapd_init_verbs;
1738 spec->init_verbs[2] = ad1986a_automic_verbs;
1739diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1740index a1a3a34..7225f0f 100644
1741--- a/sound/pci/hda/patch_realtek.c
1742+++ b/sound/pci/hda/patch_realtek.c
1743@@ -9882,6 +9882,7 @@ static struct snd_pci_quirk alc262_cfg_tbl[] = {
1744 SND_PCI_QUIRK(0x10cf, 0x142d, "Fujitsu Lifebook E8410", ALC262_FUJITSU),
1745 SND_PCI_QUIRK(0x144d, 0xc032, "Samsung Q1 Ultra", ALC262_ULTRA),
1746 SND_PCI_QUIRK(0x144d, 0xc039, "Samsung Q1U EL", ALC262_ULTRA),
1747+ SND_PCI_QUIRK(0x144d, 0xc510, "Samsung Q45", ALC262_HIPPO),
1748 SND_PCI_QUIRK(0x17aa, 0x384e, "Lenovo 3000 y410", ALC262_LENOVO_3000),
1749 SND_PCI_QUIRK(0x17ff, 0x0560, "Benq ED8", ALC262_BENQ_ED8),
1750 SND_PCI_QUIRK(0x17ff, 0x058d, "Benq T31-16", ALC262_BENQ_T31),