]> git.ipfire.org Git - ipfire-2.x.git/blame - src/patches/suse-2.6.27.39/patches.kernel.org/patch-2.6.27.21-22
Fix oinkmaster patch.
[ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.kernel.org / patch-2.6.27.21-22
CommitLineData
82094b55
AF
1From: Greg Kroah-Hartman <gregkh@suse.de>
2Subject: Linux 2.6.27.22
3
4Upstream 2.6.27.22 release from kernel.org
5
6Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
7
8diff --git a/Makefile b/Makefile
9index 0664c76..d3b7fc8 100644
10--- a/Makefile
11+++ b/Makefile
12@@ -1,7 +1,7 @@
13 VERSION = 2
14 PATCHLEVEL = 6
15 SUBLEVEL = 27
16-EXTRAVERSION = .21
17+EXTRAVERSION = .22
18 NAME = Trembling Tortoise
19
20 # *DOCUMENTATION*
21diff --git a/arch/powerpc/include/asm/futex.h b/arch/powerpc/include/asm/futex.h
22index 6d406c5..9696cc3 100644
23--- a/arch/powerpc/include/asm/futex.h
24+++ b/arch/powerpc/include/asm/futex.h
25@@ -27,7 +27,7 @@
26 PPC_LONG "1b,4b,2b,4b\n" \
27 ".previous" \
28 : "=&r" (oldval), "=&r" (ret) \
29- : "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
30+ : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \
31 : "cr0", "memory")
32
33 static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
34@@ -47,19 +47,19 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
35
36 switch (op) {
37 case FUTEX_OP_SET:
38- __futex_atomic_op("", ret, oldval, uaddr, oparg);
39+ __futex_atomic_op("mr %1,%4\n", ret, oldval, uaddr, oparg);
40 break;
41 case FUTEX_OP_ADD:
42- __futex_atomic_op("add %1,%0,%1\n", ret, oldval, uaddr, oparg);
43+ __futex_atomic_op("add %1,%0,%4\n", ret, oldval, uaddr, oparg);
44 break;
45 case FUTEX_OP_OR:
46- __futex_atomic_op("or %1,%0,%1\n", ret, oldval, uaddr, oparg);
47+ __futex_atomic_op("or %1,%0,%4\n", ret, oldval, uaddr, oparg);
48 break;
49 case FUTEX_OP_ANDN:
50- __futex_atomic_op("andc %1,%0,%1\n", ret, oldval, uaddr, oparg);
51+ __futex_atomic_op("andc %1,%0,%4\n", ret, oldval, uaddr, oparg);
52 break;
53 case FUTEX_OP_XOR:
54- __futex_atomic_op("xor %1,%0,%1\n", ret, oldval, uaddr, oparg);
55+ __futex_atomic_op("xor %1,%0,%4\n", ret, oldval, uaddr, oparg);
56 break;
57 default:
58 ret = -ENOSYS;
59diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
60index 101ed87..ae1c5b5 100644
61--- a/arch/powerpc/include/asm/processor.h
62+++ b/arch/powerpc/include/asm/processor.h
63@@ -309,6 +309,25 @@ static inline void prefetchw(const void *x)
64 #define HAVE_ARCH_PICK_MMAP_LAYOUT
65 #endif
66
67+#ifdef CONFIG_PPC64
68+static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
69+{
70+ unsigned long sp;
71+
72+ if (is_32)
73+ sp = regs->gpr[1] & 0x0ffffffffUL;
74+ else
75+ sp = regs->gpr[1];
76+
77+ return sp;
78+}
79+#else
80+static inline unsigned long get_clean_sp(struct pt_regs *regs, int is_32)
81+{
82+ return regs->gpr[1];
83+}
84+#endif
85+
86 #endif /* __KERNEL__ */
87 #endif /* __ASSEMBLY__ */
88 #endif /* _ASM_POWERPC_PROCESSOR_H */
89diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
90index a54405e..00b5078 100644
91--- a/arch/powerpc/kernel/signal.c
92+++ b/arch/powerpc/kernel/signal.c
93@@ -26,12 +26,12 @@ int show_unhandled_signals = 0;
94 * Allocate space for the signal frame
95 */
96 void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
97- size_t frame_size)
98+ size_t frame_size, int is_32)
99 {
100 unsigned long oldsp, newsp;
101
102 /* Default to using normal stack */
103- oldsp = regs->gpr[1];
104+ oldsp = get_clean_sp(regs, is_32);
105
106 /* Check for alt stack */
107 if ((ka->sa.sa_flags & SA_ONSTACK) &&
108diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
109index 28f4b9f..f77d502 100644
110--- a/arch/powerpc/kernel/signal.h
111+++ b/arch/powerpc/kernel/signal.h
112@@ -13,7 +13,7 @@
113 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
114
115 extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
116- size_t frame_size);
117+ size_t frame_size, int is_32);
118 extern void restore_sigmask(sigset_t *set);
119
120 extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
121diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
122index a6a4310..9084a27 100644
123--- a/arch/powerpc/kernel/signal_32.c
124+++ b/arch/powerpc/kernel/signal_32.c
125@@ -836,7 +836,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
126
127 /* Set up Signal Frame */
128 /* Put a Real Time Context onto stack */
129- rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf));
130+ rt_sf = get_sigframe(ka, regs, sizeof(*rt_sf), 1);
131 addr = rt_sf;
132 if (unlikely(rt_sf == NULL))
133 goto badframe;
134@@ -1170,7 +1170,7 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
135 unsigned long newsp = 0;
136
137 /* Set up Signal Frame */
138- frame = get_sigframe(ka, regs, sizeof(*frame));
139+ frame = get_sigframe(ka, regs, sizeof(*frame), 1);
140 if (unlikely(frame == NULL))
141 goto badframe;
142 sc = (struct sigcontext __user *) &frame->sctx;
143diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
144index e4acdbd..3de15b0 100644
145--- a/arch/powerpc/kernel/signal_64.c
146+++ b/arch/powerpc/kernel/signal_64.c
147@@ -404,7 +404,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
148 unsigned long newsp = 0;
149 long err = 0;
150
151- frame = get_sigframe(ka, regs, sizeof(*frame));
152+ frame = get_sigframe(ka, regs, sizeof(*frame), 0);
153 if (unlikely(frame == NULL))
154 goto badframe;
155
156diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
157index 8c3c25f..a99dbbe 100644
158--- a/arch/x86/boot/memory.c
159+++ b/arch/x86/boot/memory.c
160@@ -27,13 +27,14 @@ static int detect_memory_e820(void)
161 do {
162 size = sizeof(struct e820entry);
163
164- /* Important: %edx is clobbered by some BIOSes,
165- so it must be either used for the error output
166+ /* Important: %edx and %esi are clobbered by some BIOSes,
167+ so they must be either used for the error output
168 or explicitly marked clobbered. */
169 asm("int $0x15; setc %0"
170 : "=d" (err), "+b" (next), "=a" (id), "+c" (size),
171 "=m" (*desc)
172- : "D" (desc), "d" (SMAP), "a" (0xe820));
173+ : "D" (desc), "d" (SMAP), "a" (0xe820)
174+ : "esi");
175
176 /* BIOSes which terminate the chain with CF = 1 as opposed
177 to %ebx = 0 don't always report the SMAP signature on
178diff --git a/arch/x86/kernel/cpu/mtrr/generic.c b/arch/x86/kernel/cpu/mtrr/generic.c
179index cb7d3b6..26baabd 100644
180--- a/arch/x86/kernel/cpu/mtrr/generic.c
181+++ b/arch/x86/kernel/cpu/mtrr/generic.c
182@@ -45,6 +45,32 @@ u64 mtrr_tom2;
183 static int mtrr_show;
184 module_param_named(show, mtrr_show, bool, 0);
185
186+/**
187+ * BIOS is expected to clear MtrrFixDramModEn bit, see for example
188+ * "BIOS and Kernel Developer's Guide for the AMD Athlon 64 and AMD
189+ * Opteron Processors" (26094 Rev. 3.30 February 2006), section
190+ * "13.2.1.2 SYSCFG Register": "The MtrrFixDramModEn bit should be set
191+ * to 1 during BIOS initalization of the fixed MTRRs, then cleared to
192+ * 0 for operation."
193+ */
194+static inline void k8_check_syscfg_dram_mod_en(void)
195+{
196+ u32 lo, hi;
197+
198+ if (!((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) &&
199+ (boot_cpu_data.x86 >= 0x0f)))
200+ return;
201+
202+ rdmsr(MSR_K8_SYSCFG, lo, hi);
203+ if (lo & K8_MTRRFIXRANGE_DRAM_MODIFY) {
204+ printk(KERN_ERR "MTRR: CPU %u: SYSCFG[MtrrFixDramModEn]"
205+ " not cleared by BIOS, clearing this bit\n",
206+ smp_processor_id());
207+ lo &= ~K8_MTRRFIXRANGE_DRAM_MODIFY;
208+ mtrr_wrmsr(MSR_K8_SYSCFG, lo, hi);
209+ }
210+}
211+
212 /*
213 * Returns the effective MTRR type for the region
214 * Error returns:
215@@ -178,6 +204,8 @@ get_fixed_ranges(mtrr_type * frs)
216 unsigned int *p = (unsigned int *) frs;
217 int i;
218
219+ k8_check_syscfg_dram_mod_en();
220+
221 rdmsr(MTRRfix64K_00000_MSR, p[0], p[1]);
222
223 for (i = 0; i < 2; i++)
224@@ -312,27 +340,10 @@ void mtrr_wrmsr(unsigned msr, unsigned a, unsigned b)
225 }
226
227 /**
228- * Enable and allow read/write of extended fixed-range MTRR bits on K8 CPUs
229- * see AMD publication no. 24593, chapter 3.2.1 for more information
230- */
231-static inline void k8_enable_fixed_iorrs(void)
232-{
233- unsigned lo, hi;
234-
235- rdmsr(MSR_K8_SYSCFG, lo, hi);
236- mtrr_wrmsr(MSR_K8_SYSCFG, lo
237- | K8_MTRRFIXRANGE_DRAM_ENABLE
238- | K8_MTRRFIXRANGE_DRAM_MODIFY, hi);
239-}
240-
241-/**
242 * set_fixed_range - checks & updates a fixed-range MTRR if it differs from the value it should have
243 * @msr: MSR address of the MTTR which should be checked and updated
244 * @changed: pointer which indicates whether the MTRR needed to be changed
245 * @msrwords: pointer to the MSR values which the MSR should have
246- *
247- * If K8 extentions are wanted, update the K8 SYSCFG MSR also.
248- * See AMD publication no. 24593, chapter 7.8.1, page 233 for more information.
249 */
250 static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
251 {
252@@ -341,10 +352,6 @@ static void set_fixed_range(int msr, bool *changed, unsigned int *msrwords)
253 rdmsr(msr, lo, hi);
254
255 if (lo != msrwords[0] || hi != msrwords[1]) {
256- if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
257- (boot_cpu_data.x86 >= 0x0f && boot_cpu_data.x86 <= 0x11) &&
258- ((msrwords[0] | msrwords[1]) & K8_MTRR_RDMEM_WRMEM_MASK))
259- k8_enable_fixed_iorrs();
260 mtrr_wrmsr(msr, msrwords[0], msrwords[1]);
261 *changed = true;
262 }
263@@ -428,6 +435,8 @@ static int set_fixed_ranges(mtrr_type * frs)
264 bool changed = false;
265 int block=-1, range;
266
267+ k8_check_syscfg_dram_mod_en();
268+
269 while (fixed_range_blocks[++block].ranges)
270 for (range=0; range < fixed_range_blocks[block].ranges; range++)
271 set_fixed_range(fixed_range_blocks[block].base_msr + range,
272diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
273index 8791fc5..b4b899a 100644
274--- a/arch/x86/pci/i386.c
275+++ b/arch/x86/pci/i386.c
276@@ -326,6 +326,9 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
277 return -EINVAL;
278 }
279 flags = new_flags;
280+ vma->vm_page_prot = __pgprot(
281+ (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK) |
282+ flags);
283 }
284
285 if (((vma->vm_pgoff < max_low_pfn_mapped) ||
286diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
287index 2694998..e618cbe 100644
288--- a/drivers/acpi/ec.c
289+++ b/drivers/acpi/ec.c
290@@ -968,9 +968,9 @@ static const struct acpi_device_id ec_device_ids[] = {
291
292 int __init acpi_ec_ecdt_probe(void)
293 {
294- int ret;
295 acpi_status status;
296 struct acpi_table_ecdt *ecdt_ptr;
297+ acpi_handle dummy;
298
299 boot_ec = make_acpi_ec();
300 if (!boot_ec)
301@@ -996,30 +996,31 @@ int __init acpi_ec_ecdt_probe(void)
302 boot_ec->gpe = ecdt_ptr->gpe;
303 boot_ec->handle = ACPI_ROOT_OBJECT;
304 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle);
305- } else {
306- /* This workaround is needed only on some broken machines,
307- * which require early EC, but fail to provide ECDT */
308- acpi_handle x;
309- printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
310- status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
311- boot_ec, NULL);
312- /* Check that acpi_get_devices actually find something */
313- if (ACPI_FAILURE(status) || !boot_ec->handle)
314- goto error;
315- /* We really need to limit this workaround, the only ASUS,
316- * which needs it, has fake EC._INI method, so use it as flag.
317- * Keep boot_ec struct as it will be needed soon.
318- */
319- if (ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", &x)))
320- return -ENODEV;
321+ /* Add some basic check against completely broken table */
322+ if (boot_ec->data_addr != boot_ec->command_addr)
323+ goto install;
324+ /* fall through */
325 }
326-
327- ret = ec_install_handlers(boot_ec);
328- if (!ret) {
329+ /* This workaround is needed only on some broken machines,
330+ * which require early EC, but fail to provide ECDT */
331+ printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
332+ status = acpi_get_devices(ec_device_ids[0].id, ec_parse_device,
333+ boot_ec, NULL);
334+ /* Check that acpi_get_devices actually find something */
335+ if (ACPI_FAILURE(status) || !boot_ec->handle)
336+ goto error;
337+ /* We really need to limit this workaround, the only ASUS,
338+ * which needs it, has fake EC._INI method, so use it as flag.
339+ * Keep boot_ec struct as it will be needed soon.
340+ */
341+ if (ACPI_FAILURE(acpi_get_handle(boot_ec->handle, "_INI", &dummy)))
342+ return -ENODEV;
343+install:
344+ if (!ec_install_handlers(boot_ec)) {
345 first_ec = boot_ec;
346 return 0;
347 }
348- error:
349+error:
350 kfree(boot_ec);
351 boot_ec = NULL;
352 return -ENODEV;
353diff --git a/drivers/ata/pata_hpt37x.c b/drivers/ata/pata_hpt37x.c
354index 4216399..233a5fd 100644
355--- a/drivers/ata/pata_hpt37x.c
356+++ b/drivers/ata/pata_hpt37x.c
357@@ -8,7 +8,7 @@
358 * Copyright (C) 1999-2003 Andre Hedrick <andre@linux-ide.org>
359 * Portions Copyright (C) 2001 Sun Microsystems, Inc.
360 * Portions Copyright (C) 2003 Red Hat Inc
361- * Portions Copyright (C) 2005-2007 MontaVista Software, Inc.
362+ * Portions Copyright (C) 2005-2009 MontaVista Software, Inc.
363 *
364 * TODO
365 * Look into engine reset on timeout errors. Should not be required.
366@@ -24,7 +24,7 @@
367 #include <linux/libata.h>
368
369 #define DRV_NAME "pata_hpt37x"
370-#define DRV_VERSION "0.6.11"
371+#define DRV_VERSION "0.6.12"
372
373 struct hpt_clock {
374 u8 xfer_speed;
375@@ -445,23 +445,6 @@ static void hpt370_set_dmamode(struct ata_port *ap, struct ata_device *adev)
376 }
377
378 /**
379- * hpt370_bmdma_start - DMA engine begin
380- * @qc: ATA command
381- *
382- * The 370 and 370A want us to reset the DMA engine each time we
383- * use it. The 372 and later are fine.
384- */
385-
386-static void hpt370_bmdma_start(struct ata_queued_cmd *qc)
387-{
388- struct ata_port *ap = qc->ap;
389- struct pci_dev *pdev = to_pci_dev(ap->host->dev);
390- pci_write_config_byte(pdev, 0x50 + 4 * ap->port_no, 0x37);
391- udelay(10);
392- ata_bmdma_start(qc);
393-}
394-
395-/**
396 * hpt370_bmdma_end - DMA engine stop
397 * @qc: ATA command
398 *
399@@ -598,7 +581,6 @@ static struct scsi_host_template hpt37x_sht = {
400 static struct ata_port_operations hpt370_port_ops = {
401 .inherits = &ata_bmdma_port_ops,
402
403- .bmdma_start = hpt370_bmdma_start,
404 .bmdma_stop = hpt370_bmdma_stop,
405
406 .mode_filter = hpt370_filter,
407diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
408index 118dbde..8cf3dca 100644
409--- a/drivers/char/agp/generic.c
410+++ b/drivers/char/agp/generic.c
411@@ -1207,7 +1207,7 @@ void *agp_generic_alloc_page(struct agp_bridge_data *bridge)
412 {
413 struct page * page;
414
415- page = alloc_page(GFP_KERNEL | GFP_DMA32);
416+ page = alloc_page(GFP_KERNEL | GFP_DMA32 | __GFP_ZERO);
417 if (page == NULL)
418 return NULL;
419
420diff --git a/drivers/char/raw.c b/drivers/char/raw.c
421index 47b8cf2..92022aa 100644
422--- a/drivers/char/raw.c
423+++ b/drivers/char/raw.c
424@@ -90,6 +90,7 @@ out1:
425 blkdev_put(bdev);
426 out:
427 mutex_unlock(&raw_mutex);
428+ unlock_kernel();
429 return err;
430 }
431
432diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
433index 2d637e0..fdcd0ab 100644
434--- a/drivers/crypto/ixp4xx_crypto.c
435+++ b/drivers/crypto/ixp4xx_crypto.c
436@@ -101,6 +101,7 @@ struct buffer_desc {
437 u32 phys_addr;
438 u32 __reserved[4];
439 struct buffer_desc *next;
440+ enum dma_data_direction dir;
441 };
442
443 struct crypt_ctl {
444@@ -132,14 +133,10 @@ struct crypt_ctl {
445 struct ablk_ctx {
446 struct buffer_desc *src;
447 struct buffer_desc *dst;
448- unsigned src_nents;
449- unsigned dst_nents;
450 };
451
452 struct aead_ctx {
453 struct buffer_desc *buffer;
454- unsigned short assoc_nents;
455- unsigned short src_nents;
456 struct scatterlist ivlist;
457 /* used when the hmac is not on one sg entry */
458 u8 *hmac_virt;
459@@ -312,7 +309,7 @@ static struct crypt_ctl *get_crypt_desc_emerg(void)
460 }
461 }
462
463-static void free_buf_chain(struct buffer_desc *buf, u32 phys)
464+static void free_buf_chain(struct device *dev, struct buffer_desc *buf,u32 phys)
465 {
466 while (buf) {
467 struct buffer_desc *buf1;
468@@ -320,6 +317,7 @@ static void free_buf_chain(struct buffer_desc *buf, u32 phys)
469
470 buf1 = buf->next;
471 phys1 = buf->phys_next;
472+ dma_unmap_single(dev, buf->phys_next, buf->buf_len, buf->dir);
473 dma_pool_free(buffer_pool, buf, phys);
474 buf = buf1;
475 phys = phys1;
476@@ -348,7 +346,6 @@ static void one_packet(dma_addr_t phys)
477 struct crypt_ctl *crypt;
478 struct ixp_ctx *ctx;
479 int failed;
480- enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
481
482 failed = phys & 0x1 ? -EBADMSG : 0;
483 phys &= ~0x3;
484@@ -358,13 +355,8 @@ static void one_packet(dma_addr_t phys)
485 case CTL_FLAG_PERFORM_AEAD: {
486 struct aead_request *req = crypt->data.aead_req;
487 struct aead_ctx *req_ctx = aead_request_ctx(req);
488- dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents,
489- DMA_TO_DEVICE);
490- dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
491- dma_unmap_sg(dev, req->src, req_ctx->src_nents,
492- DMA_BIDIRECTIONAL);
493
494- free_buf_chain(req_ctx->buffer, crypt->src_buf);
495+ free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
496 if (req_ctx->hmac_virt) {
497 finish_scattered_hmac(crypt);
498 }
499@@ -374,16 +366,11 @@ static void one_packet(dma_addr_t phys)
500 case CTL_FLAG_PERFORM_ABLK: {
501 struct ablkcipher_request *req = crypt->data.ablk_req;
502 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
503- int nents;
504+
505 if (req_ctx->dst) {
506- nents = req_ctx->dst_nents;
507- dma_unmap_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
508- free_buf_chain(req_ctx->dst, crypt->dst_buf);
509- src_direction = DMA_TO_DEVICE;
510+ free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
511 }
512- nents = req_ctx->src_nents;
513- dma_unmap_sg(dev, req->src, nents, src_direction);
514- free_buf_chain(req_ctx->src, crypt->src_buf);
515+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
516 req->base.complete(&req->base, failed);
517 break;
518 }
519@@ -748,56 +735,35 @@ static int setup_cipher(struct crypto_tfm *tfm, int encrypt,
520 return 0;
521 }
522
523-static int count_sg(struct scatterlist *sg, int nbytes)
524+static struct buffer_desc *chainup_buffers(struct device *dev,
525+ struct scatterlist *sg, unsigned nbytes,
526+ struct buffer_desc *buf, gfp_t flags,
527+ enum dma_data_direction dir)
528 {
529- int i;
530- for (i = 0; nbytes > 0; i++, sg = sg_next(sg))
531- nbytes -= sg->length;
532- return i;
533-}
534-
535-static struct buffer_desc *chainup_buffers(struct scatterlist *sg,
536- unsigned nbytes, struct buffer_desc *buf, gfp_t flags)
537-{
538- int nents = 0;
539-
540- while (nbytes > 0) {
541+ for (;nbytes > 0; sg = scatterwalk_sg_next(sg)) {
542+ unsigned len = min(nbytes, sg->length);
543 struct buffer_desc *next_buf;
544 u32 next_buf_phys;
545- unsigned len = min(nbytes, sg_dma_len(sg));
546+ void *ptr;
547
548- nents++;
549 nbytes -= len;
550- if (!buf->phys_addr) {
551- buf->phys_addr = sg_dma_address(sg);
552- buf->buf_len = len;
553- buf->next = NULL;
554- buf->phys_next = 0;
555- goto next;
556- }
557- /* Two consecutive chunks on one page may be handled by the old
558- * buffer descriptor, increased by the length of the new one
559- */
560- if (sg_dma_address(sg) == buf->phys_addr + buf->buf_len) {
561- buf->buf_len += len;
562- goto next;
563- }
564+ ptr = page_address(sg_page(sg)) + sg->offset;
565 next_buf = dma_pool_alloc(buffer_pool, flags, &next_buf_phys);
566- if (!next_buf)
567- return NULL;
568+ if (!next_buf) {
569+ buf = NULL;
570+ break;
571+ }
572+ sg_dma_address(sg) = dma_map_single(dev, ptr, len, dir);
573 buf->next = next_buf;
574 buf->phys_next = next_buf_phys;
575-
576 buf = next_buf;
577- buf->next = NULL;
578- buf->phys_next = 0;
579+
580 buf->phys_addr = sg_dma_address(sg);
581 buf->buf_len = len;
582-next:
583- if (nbytes > 0) {
584- sg = sg_next(sg);
585- }
586+ buf->dir = dir;
587 }
588+ buf->next = NULL;
589+ buf->phys_next = 0;
590 return buf;
591 }
592
593@@ -858,12 +824,12 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
594 struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
595 struct ixp_ctx *ctx = crypto_ablkcipher_ctx(tfm);
596 unsigned ivsize = crypto_ablkcipher_ivsize(tfm);
597- int ret = -ENOMEM;
598 struct ix_sa_dir *dir;
599 struct crypt_ctl *crypt;
600- unsigned int nbytes = req->nbytes, nents;
601+ unsigned int nbytes = req->nbytes;
602 enum dma_data_direction src_direction = DMA_BIDIRECTIONAL;
603 struct ablk_ctx *req_ctx = ablkcipher_request_ctx(req);
604+ struct buffer_desc src_hook;
605 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
606 GFP_KERNEL : GFP_ATOMIC;
607
608@@ -876,7 +842,7 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
609
610 crypt = get_crypt_desc();
611 if (!crypt)
612- return ret;
613+ return -ENOMEM;
614
615 crypt->data.ablk_req = req;
616 crypt->crypto_ctx = dir->npe_ctx_phys;
617@@ -889,53 +855,41 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
618 BUG_ON(ivsize && !req->info);
619 memcpy(crypt->iv, req->info, ivsize);
620 if (req->src != req->dst) {
621+ struct buffer_desc dst_hook;
622 crypt->mode |= NPE_OP_NOT_IN_PLACE;
623- nents = count_sg(req->dst, nbytes);
624 /* This was never tested by Intel
625 * for more than one dst buffer, I think. */
626- BUG_ON(nents != 1);
627- req_ctx->dst_nents = nents;
628- dma_map_sg(dev, req->dst, nents, DMA_FROM_DEVICE);
629- req_ctx->dst = dma_pool_alloc(buffer_pool, flags,&crypt->dst_buf);
630- if (!req_ctx->dst)
631- goto unmap_sg_dest;
632- req_ctx->dst->phys_addr = 0;
633- if (!chainup_buffers(req->dst, nbytes, req_ctx->dst, flags))
634+ BUG_ON(req->dst->length < nbytes);
635+ req_ctx->dst = NULL;
636+ if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
637+ flags, DMA_FROM_DEVICE))
638 goto free_buf_dest;
639 src_direction = DMA_TO_DEVICE;
640+ req_ctx->dst = dst_hook.next;
641+ crypt->dst_buf = dst_hook.phys_next;
642 } else {
643 req_ctx->dst = NULL;
644- req_ctx->dst_nents = 0;
645 }
646- nents = count_sg(req->src, nbytes);
647- req_ctx->src_nents = nents;
648- dma_map_sg(dev, req->src, nents, src_direction);
649-
650- req_ctx->src = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
651- if (!req_ctx->src)
652- goto unmap_sg_src;
653- req_ctx->src->phys_addr = 0;
654- if (!chainup_buffers(req->src, nbytes, req_ctx->src, flags))
655+ req_ctx->src = NULL;
656+ if (!chainup_buffers(dev, req->src, nbytes, &src_hook,
657+ flags, src_direction))
658 goto free_buf_src;
659
660+ req_ctx->src = src_hook.next;
661+ crypt->src_buf = src_hook.phys_next;
662 crypt->ctl_flags |= CTL_FLAG_PERFORM_ABLK;
663 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
664 BUG_ON(qmgr_stat_overflow(SEND_QID));
665 return -EINPROGRESS;
666
667 free_buf_src:
668- free_buf_chain(req_ctx->src, crypt->src_buf);
669-unmap_sg_src:
670- dma_unmap_sg(dev, req->src, req_ctx->src_nents, src_direction);
671+ free_buf_chain(dev, req_ctx->src, crypt->src_buf);
672 free_buf_dest:
673 if (req->src != req->dst) {
674- free_buf_chain(req_ctx->dst, crypt->dst_buf);
675-unmap_sg_dest:
676- dma_unmap_sg(dev, req->src, req_ctx->dst_nents,
677- DMA_FROM_DEVICE);
678+ free_buf_chain(dev, req_ctx->dst, crypt->dst_buf);
679 }
680 crypt->ctl_flags = CTL_FLAG_UNUSED;
681- return ret;
682+ return -ENOMEM;
683 }
684
685 static int ablk_encrypt(struct ablkcipher_request *req)
686@@ -983,7 +937,7 @@ static int hmac_inconsistent(struct scatterlist *sg, unsigned start,
687 break;
688
689 offset += sg->length;
690- sg = sg_next(sg);
691+ sg = scatterwalk_sg_next(sg);
692 }
693 return (start + nbytes > offset + sg->length);
694 }
695@@ -995,11 +949,10 @@ static int aead_perform(struct aead_request *req, int encrypt,
696 struct ixp_ctx *ctx = crypto_aead_ctx(tfm);
697 unsigned ivsize = crypto_aead_ivsize(tfm);
698 unsigned authsize = crypto_aead_authsize(tfm);
699- int ret = -ENOMEM;
700 struct ix_sa_dir *dir;
701 struct crypt_ctl *crypt;
702- unsigned int cryptlen, nents;
703- struct buffer_desc *buf;
704+ unsigned int cryptlen;
705+ struct buffer_desc *buf, src_hook;
706 struct aead_ctx *req_ctx = aead_request_ctx(req);
707 gfp_t flags = req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP ?
708 GFP_KERNEL : GFP_ATOMIC;
709@@ -1020,7 +973,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
710 }
711 crypt = get_crypt_desc();
712 if (!crypt)
713- return ret;
714+ return -ENOMEM;
715
716 crypt->data.aead_req = req;
717 crypt->crypto_ctx = dir->npe_ctx_phys;
718@@ -1039,31 +992,27 @@ static int aead_perform(struct aead_request *req, int encrypt,
719 BUG(); /* -ENOTSUP because of my lazyness */
720 }
721
722- req_ctx->buffer = dma_pool_alloc(buffer_pool, flags, &crypt->src_buf);
723- if (!req_ctx->buffer)
724- goto out;
725- req_ctx->buffer->phys_addr = 0;
726 /* ASSOC data */
727- nents = count_sg(req->assoc, req->assoclen);
728- req_ctx->assoc_nents = nents;
729- dma_map_sg(dev, req->assoc, nents, DMA_TO_DEVICE);
730- buf = chainup_buffers(req->assoc, req->assoclen, req_ctx->buffer,flags);
731+ buf = chainup_buffers(dev, req->assoc, req->assoclen, &src_hook,
732+ flags, DMA_TO_DEVICE);
733+ req_ctx->buffer = src_hook.next;
734+ crypt->src_buf = src_hook.phys_next;
735 if (!buf)
736- goto unmap_sg_assoc;
737+ goto out;
738 /* IV */
739 sg_init_table(&req_ctx->ivlist, 1);
740 sg_set_buf(&req_ctx->ivlist, iv, ivsize);
741- dma_map_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
742- buf = chainup_buffers(&req_ctx->ivlist, ivsize, buf, flags);
743+ buf = chainup_buffers(dev, &req_ctx->ivlist, ivsize, buf, flags,
744+ DMA_BIDIRECTIONAL);
745 if (!buf)
746- goto unmap_sg_iv;
747+ goto free_chain;
748 if (unlikely(hmac_inconsistent(req->src, cryptlen, authsize))) {
749 /* The 12 hmac bytes are scattered,
750 * we need to copy them into a safe buffer */
751 req_ctx->hmac_virt = dma_pool_alloc(buffer_pool, flags,
752 &crypt->icv_rev_aes);
753 if (unlikely(!req_ctx->hmac_virt))
754- goto unmap_sg_iv;
755+ goto free_chain;
756 if (!encrypt) {
757 scatterwalk_map_and_copy(req_ctx->hmac_virt,
758 req->src, cryptlen, authsize, 0);
759@@ -1073,33 +1022,28 @@ static int aead_perform(struct aead_request *req, int encrypt,
760 req_ctx->hmac_virt = NULL;
761 }
762 /* Crypt */
763- nents = count_sg(req->src, cryptlen + authsize);
764- req_ctx->src_nents = nents;
765- dma_map_sg(dev, req->src, nents, DMA_BIDIRECTIONAL);
766- buf = chainup_buffers(req->src, cryptlen + authsize, buf, flags);
767+ buf = chainup_buffers(dev, req->src, cryptlen + authsize, buf, flags,
768+ DMA_BIDIRECTIONAL);
769 if (!buf)
770- goto unmap_sg_src;
771+ goto free_hmac_virt;
772 if (!req_ctx->hmac_virt) {
773 crypt->icv_rev_aes = buf->phys_addr + buf->buf_len - authsize;
774 }
775+
776 crypt->ctl_flags |= CTL_FLAG_PERFORM_AEAD;
777 qmgr_put_entry(SEND_QID, crypt_virt2phys(crypt));
778 BUG_ON(qmgr_stat_overflow(SEND_QID));
779 return -EINPROGRESS;
780-unmap_sg_src:
781- dma_unmap_sg(dev, req->src, req_ctx->src_nents, DMA_BIDIRECTIONAL);
782+free_hmac_virt:
783 if (req_ctx->hmac_virt) {
784 dma_pool_free(buffer_pool, req_ctx->hmac_virt,
785 crypt->icv_rev_aes);
786 }
787-unmap_sg_iv:
788- dma_unmap_sg(dev, &req_ctx->ivlist, 1, DMA_BIDIRECTIONAL);
789-unmap_sg_assoc:
790- dma_unmap_sg(dev, req->assoc, req_ctx->assoc_nents, DMA_TO_DEVICE);
791- free_buf_chain(req_ctx->buffer, crypt->src_buf);
792+free_chain:
793+ free_buf_chain(dev, req_ctx->buffer, crypt->src_buf);
794 out:
795 crypt->ctl_flags = CTL_FLAG_UNUSED;
796- return ret;
797+ return -ENOMEM;
798 }
799
800 static int aead_setup(struct crypto_aead *tfm, unsigned int authsize)
801diff --git a/drivers/ide/pci/hpt366.c b/drivers/ide/pci/hpt366.c
802index c37ab17..7e443a3 100644
803--- a/drivers/ide/pci/hpt366.c
804+++ b/drivers/ide/pci/hpt366.c
805@@ -114,6 +114,8 @@
806 * the register setting lists into the table indexed by the clock selected
807 * - set the correct hwif->ultra_mask for each individual chip
808 * - add Ultra and MW DMA mode filtering for the HPT37[24] based SATA cards
809+ * - stop resetting HPT370's state machine before each DMA transfer as that has
810+ * caused more harm than good
811 * Sergei Shtylyov, <sshtylyov@ru.mvista.com> or <source@mvista.com>
812 */
813
814@@ -134,7 +136,7 @@
815 #define DRV_NAME "hpt366"
816
817 /* various tuning parameters */
818-#define HPT_RESET_STATE_ENGINE
819+#undef HPT_RESET_STATE_ENGINE
820 #undef HPT_DELAY_INTERRUPT
821 #define HPT_SERIALIZE_IO 0
822
823diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
824index 3f11910..fcec2df 100644
825--- a/drivers/isdn/gigaset/bas-gigaset.c
826+++ b/drivers/isdn/gigaset/bas-gigaset.c
827@@ -46,6 +46,9 @@ MODULE_PARM_DESC(cidmode, "Call-ID mode");
828 /* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */
829 #define IF_WRITEBUF 264
830
831+/* interrupt pipe message size according to ibid. ch. 2.2 */
832+#define IP_MSGSIZE 3
833+
834 /* Values for the Gigaset 307x */
835 #define USB_GIGA_VENDOR_ID 0x0681
836 #define USB_3070_PRODUCT_ID 0x0001
837@@ -110,7 +113,7 @@ struct bas_cardstate {
838 unsigned char *rcvbuf; /* AT reply receive buffer */
839
840 struct urb *urb_int_in; /* URB for interrupt pipe */
841- unsigned char int_in_buf[3];
842+ unsigned char *int_in_buf;
843
844 spinlock_t lock; /* locks all following */
845 int basstate; /* bitmap (BS_*) */
846@@ -657,7 +660,7 @@ static void read_int_callback(struct urb *urb)
847 }
848
849 /* drop incomplete packets even if the missing bytes wouldn't matter */
850- if (unlikely(urb->actual_length < 3)) {
851+ if (unlikely(urb->actual_length < IP_MSGSIZE)) {
852 dev_warn(cs->dev, "incomplete interrupt packet (%d bytes)\n",
853 urb->actual_length);
854 goto resubmit;
855@@ -2127,6 +2130,7 @@ static void gigaset_reinitbcshw(struct bc_state *bcs)
856 static void gigaset_freecshw(struct cardstate *cs)
857 {
858 /* timers, URBs and rcvbuf are disposed of in disconnect */
859+ kfree(cs->hw.bas->int_in_buf);
860 kfree(cs->hw.bas);
861 cs->hw.bas = NULL;
862 }
863@@ -2232,6 +2236,12 @@ static int gigaset_probe(struct usb_interface *interface,
864 }
865 hostif = interface->cur_altsetting;
866 }
867+ ucs->int_in_buf = kmalloc(IP_MSGSIZE, GFP_KERNEL);
868+ if (!ucs->int_in_buf) {
869+ kfree(ucs);
870+ pr_err("out of memory\n");
871+ return 0;
872+ }
873
874 /* Reject application specific interfaces
875 */
876@@ -2290,7 +2300,7 @@ static int gigaset_probe(struct usb_interface *interface,
877 usb_fill_int_urb(ucs->urb_int_in, udev,
878 usb_rcvintpipe(udev,
879 (endpoint->bEndpointAddress) & 0x0f),
880- ucs->int_in_buf, 3, read_int_callback, cs,
881+ ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs,
882 endpoint->bInterval);
883 if ((rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL)) != 0) {
884 dev_err(cs->dev, "could not submit interrupt URB: %s\n",
885diff --git a/drivers/misc/thinkpad_acpi.c b/drivers/misc/thinkpad_acpi.c
886index 34bb0e4..1a551a8 100644
887--- a/drivers/misc/thinkpad_acpi.c
888+++ b/drivers/misc/thinkpad_acpi.c
889@@ -282,11 +282,17 @@ static u32 dbg_level;
890
891 static struct workqueue_struct *tpacpi_wq;
892
893+enum led_status_t {
894+ TPACPI_LED_OFF = 0,
895+ TPACPI_LED_ON,
896+ TPACPI_LED_BLINK,
897+};
898+
899 /* Special LED class that can defer work */
900 struct tpacpi_led_classdev {
901 struct led_classdev led_classdev;
902 struct work_struct work;
903- enum led_brightness new_brightness;
904+ enum led_status_t new_state;
905 unsigned int led;
906 };
907
908@@ -3478,7 +3484,7 @@ static void light_set_status_worker(struct work_struct *work)
909 container_of(work, struct tpacpi_led_classdev, work);
910
911 if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
912- light_set_status((data->new_brightness != LED_OFF));
913+ light_set_status((data->new_state != TPACPI_LED_OFF));
914 }
915
916 static void light_sysfs_set(struct led_classdev *led_cdev,
917@@ -3488,7 +3494,8 @@ static void light_sysfs_set(struct led_classdev *led_cdev,
918 container_of(led_cdev,
919 struct tpacpi_led_classdev,
920 led_classdev);
921- data->new_brightness = brightness;
922+ data->new_state = (brightness != LED_OFF) ?
923+ TPACPI_LED_ON : TPACPI_LED_OFF;
924 queue_work(tpacpi_wq, &data->work);
925 }
926
927@@ -3995,12 +4002,6 @@ enum { /* For TPACPI_LED_OLD */
928 TPACPI_LED_EC_HLMS = 0x0e, /* EC reg to select led to command */
929 };
930
931-enum led_status_t {
932- TPACPI_LED_OFF = 0,
933- TPACPI_LED_ON,
934- TPACPI_LED_BLINK,
935-};
936-
937 static enum led_access_mode led_supported;
938
939 TPACPI_HANDLE(led, ec, "SLED", /* 570 */
940@@ -4094,23 +4095,13 @@ static int led_set_status(const unsigned int led,
941 return rc;
942 }
943
944-static void led_sysfs_set_status(unsigned int led,
945- enum led_brightness brightness)
946-{
947- led_set_status(led,
948- (brightness == LED_OFF) ?
949- TPACPI_LED_OFF :
950- (tpacpi_led_state_cache[led] == TPACPI_LED_BLINK) ?
951- TPACPI_LED_BLINK : TPACPI_LED_ON);
952-}
953-
954 static void led_set_status_worker(struct work_struct *work)
955 {
956 struct tpacpi_led_classdev *data =
957 container_of(work, struct tpacpi_led_classdev, work);
958
959 if (likely(tpacpi_lifecycle == TPACPI_LIFE_RUNNING))
960- led_sysfs_set_status(data->led, data->new_brightness);
961+ led_set_status(data->led, data->new_state);
962 }
963
964 static void led_sysfs_set(struct led_classdev *led_cdev,
965@@ -4119,7 +4110,13 @@ static void led_sysfs_set(struct led_classdev *led_cdev,
966 struct tpacpi_led_classdev *data = container_of(led_cdev,
967 struct tpacpi_led_classdev, led_classdev);
968
969- data->new_brightness = brightness;
970+ if (brightness == LED_OFF)
971+ data->new_state = TPACPI_LED_OFF;
972+ else if (tpacpi_led_state_cache[data->led] != TPACPI_LED_BLINK)
973+ data->new_state = TPACPI_LED_ON;
974+ else
975+ data->new_state = TPACPI_LED_BLINK;
976+
977 queue_work(tpacpi_wq, &data->work);
978 }
979
980@@ -4137,7 +4134,7 @@ static int led_sysfs_blink_set(struct led_classdev *led_cdev,
981 } else if ((*delay_on != 500) || (*delay_off != 500))
982 return -EINVAL;
983
984- data->new_brightness = TPACPI_LED_BLINK;
985+ data->new_state = TPACPI_LED_BLINK;
986 queue_work(tpacpi_wq, &data->work);
987
988 return 0;
989diff --git a/drivers/net/b44.c b/drivers/net/b44.c
990index c3bda5c..f1521c6 100644
991--- a/drivers/net/b44.c
992+++ b/drivers/net/b44.c
993@@ -750,7 +750,7 @@ static void b44_recycle_rx(struct b44 *bp, int src_idx, u32 dest_idx_unmasked)
994 dest_idx * sizeof(dest_desc),
995 DMA_BIDIRECTIONAL);
996
997- ssb_dma_sync_single_for_device(bp->sdev, le32_to_cpu(src_desc->addr),
998+ ssb_dma_sync_single_for_device(bp->sdev, dest_map->mapping,
999 RX_PKT_BUF_SZ,
1000 DMA_FROM_DEVICE);
1001 }
1002diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
1003index 1b9c4dc..f9f29c9 100644
1004--- a/drivers/net/bonding/bond_main.c
1005+++ b/drivers/net/bonding/bond_main.c
1006@@ -3516,11 +3516,26 @@ static int bond_slave_netdev_event(unsigned long event, struct net_device *slave
1007 }
1008 break;
1009 case NETDEV_CHANGE:
1010- /*
1011- * TODO: is this what we get if somebody
1012- * sets up a hierarchical bond, then rmmod's
1013- * one of the slave bonding devices?
1014- */
1015+ if (bond->params.mode == BOND_MODE_8023AD || bond_is_lb(bond)) {
1016+ struct slave *slave;
1017+
1018+ slave = bond_get_slave_by_dev(bond, slave_dev);
1019+ if (slave) {
1020+ u16 old_speed = slave->speed;
1021+ u16 old_duplex = slave->duplex;
1022+
1023+ bond_update_speed_duplex(slave);
1024+
1025+ if (bond_is_lb(bond))
1026+ break;
1027+
1028+ if (old_speed != slave->speed)
1029+ bond_3ad_adapter_speed_changed(slave);
1030+ if (old_duplex != slave->duplex)
1031+ bond_3ad_adapter_duplex_changed(slave);
1032+ }
1033+ }
1034+
1035 break;
1036 case NETDEV_DOWN:
1037 /*
1038diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
1039index fb730ec..0f6de5f 100644
1040--- a/drivers/net/bonding/bonding.h
1041+++ b/drivers/net/bonding/bonding.h
1042@@ -248,6 +248,12 @@ static inline struct bonding *bond_get_bond_by_slave(struct slave *slave)
1043 return (struct bonding *)slave->dev->master->priv;
1044 }
1045
1046+static inline bool bond_is_lb(const struct bonding *bond)
1047+{
1048+ return bond->params.mode == BOND_MODE_TLB
1049+ || bond->params.mode == BOND_MODE_ALB;
1050+}
1051+
1052 #define BOND_FOM_NONE 0
1053 #define BOND_FOM_ACTIVE 1
1054 #define BOND_FOM_FOLLOW 2
1055diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
1056index 39c17bb..a98d31a 100644
1057--- a/drivers/net/r8169.c
1058+++ b/drivers/net/r8169.c
1059@@ -375,6 +375,22 @@ enum features {
1060 RTL_FEATURE_GMII = (1 << 2),
1061 };
1062
1063+struct rtl8169_counters {
1064+ __le64 tx_packets;
1065+ __le64 rx_packets;
1066+ __le64 tx_errors;
1067+ __le32 rx_errors;
1068+ __le16 rx_missed;
1069+ __le16 align_errors;
1070+ __le32 tx_one_collision;
1071+ __le32 tx_multi_collision;
1072+ __le64 rx_unicast;
1073+ __le64 rx_broadcast;
1074+ __le32 rx_multicast;
1075+ __le16 tx_aborted;
1076+ __le16 tx_underun;
1077+};
1078+
1079 struct rtl8169_private {
1080 void __iomem *mmio_addr; /* memory map physical address */
1081 struct pci_dev *pci_dev; /* Index of PCI device */
1082@@ -416,6 +432,7 @@ struct rtl8169_private {
1083 unsigned features;
1084
1085 struct mii_if_info mii;
1086+ struct rtl8169_counters counters;
1087 };
1088
1089 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
1090@@ -960,22 +977,6 @@ static const char rtl8169_gstrings[][ETH_GSTRING_LEN] = {
1091 "tx_underrun",
1092 };
1093
1094-struct rtl8169_counters {
1095- __le64 tx_packets;
1096- __le64 rx_packets;
1097- __le64 tx_errors;
1098- __le32 rx_errors;
1099- __le16 rx_missed;
1100- __le16 align_errors;
1101- __le32 tx_one_collision;
1102- __le32 tx_multi_collision;
1103- __le64 rx_unicast;
1104- __le64 rx_broadcast;
1105- __le32 rx_multicast;
1106- __le16 tx_aborted;
1107- __le16 tx_underun;
1108-};
1109-
1110 static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1111 {
1112 switch (sset) {
1113@@ -986,16 +987,21 @@ static int rtl8169_get_sset_count(struct net_device *dev, int sset)
1114 }
1115 }
1116
1117-static void rtl8169_get_ethtool_stats(struct net_device *dev,
1118- struct ethtool_stats *stats, u64 *data)
1119+static void rtl8169_update_counters(struct net_device *dev)
1120 {
1121 struct rtl8169_private *tp = netdev_priv(dev);
1122 void __iomem *ioaddr = tp->mmio_addr;
1123 struct rtl8169_counters *counters;
1124 dma_addr_t paddr;
1125 u32 cmd;
1126+ int wait = 1000;
1127
1128- ASSERT_RTNL();
1129+ /*
1130+ * Some chips are unable to dump tally counters when the receiver
1131+ * is disabled.
1132+ */
1133+ if ((RTL_R8(ChipCmd) & CmdRxEnb) == 0)
1134+ return;
1135
1136 counters = pci_alloc_consistent(tp->pci_dev, sizeof(*counters), &paddr);
1137 if (!counters)
1138@@ -1006,31 +1012,45 @@ static void rtl8169_get_ethtool_stats(struct net_device *dev,
1139 RTL_W32(CounterAddrLow, cmd);
1140 RTL_W32(CounterAddrLow, cmd | CounterDump);
1141
1142- while (RTL_R32(CounterAddrLow) & CounterDump) {
1143- if (msleep_interruptible(1))
1144+ while (wait--) {
1145+ if ((RTL_R32(CounterAddrLow) & CounterDump) == 0) {
1146+ /* copy updated counters */
1147+ memcpy(&tp->counters, counters, sizeof(*counters));
1148 break;
1149+ }
1150+ udelay(10);
1151 }
1152
1153 RTL_W32(CounterAddrLow, 0);
1154 RTL_W32(CounterAddrHigh, 0);
1155
1156- data[0] = le64_to_cpu(counters->tx_packets);
1157- data[1] = le64_to_cpu(counters->rx_packets);
1158- data[2] = le64_to_cpu(counters->tx_errors);
1159- data[3] = le32_to_cpu(counters->rx_errors);
1160- data[4] = le16_to_cpu(counters->rx_missed);
1161- data[5] = le16_to_cpu(counters->align_errors);
1162- data[6] = le32_to_cpu(counters->tx_one_collision);
1163- data[7] = le32_to_cpu(counters->tx_multi_collision);
1164- data[8] = le64_to_cpu(counters->rx_unicast);
1165- data[9] = le64_to_cpu(counters->rx_broadcast);
1166- data[10] = le32_to_cpu(counters->rx_multicast);
1167- data[11] = le16_to_cpu(counters->tx_aborted);
1168- data[12] = le16_to_cpu(counters->tx_underun);
1169-
1170 pci_free_consistent(tp->pci_dev, sizeof(*counters), counters, paddr);
1171 }
1172
1173+static void rtl8169_get_ethtool_stats(struct net_device *dev,
1174+ struct ethtool_stats *stats, u64 *data)
1175+{
1176+ struct rtl8169_private *tp = netdev_priv(dev);
1177+
1178+ ASSERT_RTNL();
1179+
1180+ rtl8169_update_counters(dev);
1181+
1182+ data[0] = le64_to_cpu(tp->counters.tx_packets);
1183+ data[1] = le64_to_cpu(tp->counters.rx_packets);
1184+ data[2] = le64_to_cpu(tp->counters.tx_errors);
1185+ data[3] = le32_to_cpu(tp->counters.rx_errors);
1186+ data[4] = le16_to_cpu(tp->counters.rx_missed);
1187+ data[5] = le16_to_cpu(tp->counters.align_errors);
1188+ data[6] = le32_to_cpu(tp->counters.tx_one_collision);
1189+ data[7] = le32_to_cpu(tp->counters.tx_multi_collision);
1190+ data[8] = le64_to_cpu(tp->counters.rx_unicast);
1191+ data[9] = le64_to_cpu(tp->counters.rx_broadcast);
1192+ data[10] = le32_to_cpu(tp->counters.rx_multicast);
1193+ data[11] = le16_to_cpu(tp->counters.tx_aborted);
1194+ data[12] = le16_to_cpu(tp->counters.tx_underun);
1195+}
1196+
1197 static void rtl8169_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1198 {
1199 switch(stringset) {
1200@@ -1667,8 +1687,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1201 goto err_out_free_res_4;
1202 }
1203
1204- /* Unneeded ? Don't mess with Mrs. Murphy. */
1205- rtl8169_irq_mask_and_ack(ioaddr);
1206+ RTL_W16(IntrMask, 0x0000);
1207
1208 /* Soft reset the chip. */
1209 RTL_W8(ChipCmd, CmdReset);
1210@@ -1680,6 +1699,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
1211 msleep_interruptible(1);
1212 }
1213
1214+ RTL_W16(IntrStatus, 0xffff);
1215+
1216 /* Identify chip attached to board */
1217 rtl8169_get_mac_version(tp, ioaddr);
1218
1219@@ -2529,13 +2550,6 @@ static int rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
1220 opts1 |= FirstFrag;
1221 } else {
1222 len = skb->len;
1223-
1224- if (unlikely(len < ETH_ZLEN)) {
1225- if (skb_padto(skb, ETH_ZLEN))
1226- goto err_update_stats;
1227- len = ETH_ZLEN;
1228- }
1229-
1230 opts1 |= FirstFrag | LastFrag;
1231 tp->tx_skb[entry].skb = skb;
1232 }
1233@@ -2573,7 +2587,6 @@ out:
1234 err_stop:
1235 netif_stop_queue(dev);
1236 ret = NETDEV_TX_BUSY;
1237-err_update_stats:
1238 dev->stats.tx_dropped++;
1239 goto out;
1240 }
1241@@ -2979,6 +2992,9 @@ static int rtl8169_close(struct net_device *dev)
1242 struct rtl8169_private *tp = netdev_priv(dev);
1243 struct pci_dev *pdev = tp->pci_dev;
1244
1245+ /* update counters before going down */
1246+ rtl8169_update_counters(dev);
1247+
1248 rtl8169_down(dev);
1249
1250 free_irq(dev->irq, dev);
1251diff --git a/drivers/net/wireless/ath9k/ath9k.h b/drivers/net/wireless/ath9k/ath9k.h
1252index d1b0fba..8ccf374 100644
1253--- a/drivers/net/wireless/ath9k/ath9k.h
1254+++ b/drivers/net/wireless/ath9k/ath9k.h
1255@@ -591,8 +591,8 @@ struct ath9k_country_entry {
1256 u8 iso[3];
1257 };
1258
1259-#define REG_WRITE(_ah, _reg, _val) iowrite32(_val, _ah->ah_sh + _reg)
1260-#define REG_READ(_ah, _reg) ioread32(_ah->ah_sh + _reg)
1261+#define REG_WRITE(_ah, _reg, _val) ath9k_iowrite32((_ah), (_reg), (_val))
1262+#define REG_READ(_ah, _reg) ath9k_ioread32((_ah), (_reg))
1263
1264 #define SM(_v, _f) (((_v) << _f##_S) & _f)
1265 #define MS(_v, _f) (((_v) & _f) >> _f##_S)
1266diff --git a/drivers/net/wireless/ath9k/core.c b/drivers/net/wireless/ath9k/core.c
1267index 87e37bc..e50ba6d 100644
1268--- a/drivers/net/wireless/ath9k/core.c
1269+++ b/drivers/net/wireless/ath9k/core.c
1270@@ -1120,6 +1120,7 @@ int ath_init(u16 devid, struct ath_softc *sc)
1271 sc->sc_cachelsz = csz << 2; /* convert to bytes */
1272
1273 spin_lock_init(&sc->sc_resetlock);
1274+ spin_lock_init(&sc->sc_serial_rw);
1275
1276 ah = ath9k_hw_attach(devid, sc, sc->mem, &status);
1277 if (ah == NULL) {
1278diff --git a/drivers/net/wireless/ath9k/core.h b/drivers/net/wireless/ath9k/core.h
1279index 88f4cc3..51ef315 100644
1280--- a/drivers/net/wireless/ath9k/core.h
1281+++ b/drivers/net/wireless/ath9k/core.h
1282@@ -1022,6 +1022,7 @@ struct ath_softc {
1283 spinlock_t sc_rxbuflock;
1284 spinlock_t sc_txbuflock;
1285 spinlock_t sc_resetlock;
1286+ spinlock_t sc_serial_rw;
1287 spinlock_t node_lock;
1288 };
1289
1290@@ -1069,4 +1070,36 @@ void ath_skb_unmap_single(struct ath_softc *sc,
1291 void ath_mcast_merge(struct ath_softc *sc, u32 mfilt[2]);
1292 enum ath9k_ht_macmode ath_cwm_macmode(struct ath_softc *sc);
1293
1294+/*
1295+ * Read and write, they both share the same lock. We do this to serialize
1296+ * reads and writes on Atheros 802.11n PCI devices only. This is required
1297+ * as the FIFO on these devices can only accept sanely 2 requests. After
1298+ * that the device goes bananas. Serializing the reads/writes prevents this
1299+ * from happening.
1300+ */
1301+
1302+static inline void ath9k_iowrite32(struct ath_hal *ah, u32 reg_offset, u32 val)
1303+{
1304+ if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
1305+ unsigned long flags;
1306+ spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
1307+ iowrite32(val, ah->ah_sc->mem + reg_offset);
1308+ spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
1309+ } else
1310+ iowrite32(val, ah->ah_sc->mem + reg_offset);
1311+}
1312+
1313+static inline unsigned int ath9k_ioread32(struct ath_hal *ah, u32 reg_offset)
1314+{
1315+ u32 val;
1316+ if (ah->ah_config.serialize_regmode == SER_REG_MODE_ON) {
1317+ unsigned long flags;
1318+ spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
1319+ val = ioread32(ah->ah_sc->mem + reg_offset);
1320+ spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
1321+ } else
1322+ val = ioread32(ah->ah_sc->mem + reg_offset);
1323+ return val;
1324+}
1325+
1326 #endif /* CORE_H */
1327diff --git a/drivers/net/wireless/ath9k/hw.c b/drivers/net/wireless/ath9k/hw.c
1328index 69120b5..c40b677 100644
1329--- a/drivers/net/wireless/ath9k/hw.c
1330+++ b/drivers/net/wireless/ath9k/hw.c
1331@@ -369,6 +369,25 @@ static void ath9k_hw_set_defaults(struct ath_hal *ah)
1332 }
1333
1334 ah->ah_config.intr_mitigation = 0;
1335+
1336+ /*
1337+ * We need this for PCI devices only (Cardbus, PCI, miniPCI)
1338+ * _and_ if on non-uniprocessor systems (Multiprocessor/HT).
1339+ * This means we use it for all AR5416 devices, and the few
1340+ * minor PCI AR9280 devices out there.
1341+ *
1342+ * Serialization is required because these devices do not handle
1343+ * well the case of two concurrent reads/writes due to the latency
1344+ * involved. During one read/write another read/write can be issued
1345+ * on another CPU while the previous read/write may still be working
1346+ * on our hardware, if we hit this case the hardware poops in a loop.
1347+ * We prevent this by serializing reads and writes.
1348+ *
1349+ * This issue is not present on PCI-Express devices or pre-AR5416
1350+ * devices (legacy, 802.11abg).
1351+ */
1352+ if (num_possible_cpus() > 1)
1353+ ah->ah_config.serialize_regmode = SER_REG_MODE_AUTO;
1354 }
1355
1356 static inline void ath9k_hw_override_ini(struct ath_hal *ah,
1357@@ -3294,7 +3313,8 @@ static struct ath_hal *ath9k_hw_do_attach(u16 devid,
1358 }
1359
1360 if (ah->ah_config.serialize_regmode == SER_REG_MODE_AUTO) {
1361- if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCI) {
1362+ if (ah->ah_macVersion == AR_SREV_VERSION_5416_PCI ||
1363+ (AR_SREV_9280(ah) && !ah->ah_isPciExpress)) {
1364 ah->ah_config.serialize_regmode =
1365 SER_REG_MODE_ON;
1366 } else {
1367diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
1368index 9dda816..4102aaa 100644
1369--- a/drivers/net/wireless/b43/xmit.c
1370+++ b/drivers/net/wireless/b43/xmit.c
1371@@ -51,7 +51,7 @@ static int b43_plcp_get_bitrate_idx_cck(struct b43_plcp_hdr6 *plcp)
1372 }
1373
1374 /* Extract the bitrate index out of an OFDM PLCP header. */
1375-static u8 b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy)
1376+static int b43_plcp_get_bitrate_idx_ofdm(struct b43_plcp_hdr6 *plcp, bool aphy)
1377 {
1378 int base = aphy ? 0 : 4;
1379
1380diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
1381index 55ac5c3..cb009f3 100644
1382--- a/drivers/scsi/libiscsi.c
1383+++ b/drivers/scsi/libiscsi.c
1384@@ -1807,12 +1807,14 @@ iscsi_pool_init(struct iscsi_pool *q, int max, void ***items, int item_size)
1385 num_arrays++;
1386 q->pool = kzalloc(num_arrays * max * sizeof(void*), GFP_KERNEL);
1387 if (q->pool == NULL)
1388- goto enomem;
1389+ return -ENOMEM;
1390
1391 q->queue = kfifo_init((void*)q->pool, max * sizeof(void*),
1392 GFP_KERNEL, NULL);
1393- if (q->queue == ERR_PTR(-ENOMEM))
1394+ if (IS_ERR(q->queue)) {
1395+ q->queue = NULL;
1396 goto enomem;
1397+ }
1398
1399 for (i = 0; i < max; i++) {
1400 q->pool[i] = kzalloc(item_size, GFP_KERNEL);
1401@@ -1842,8 +1844,7 @@ void iscsi_pool_free(struct iscsi_pool *q)
1402
1403 for (i = 0; i < q->max; i++)
1404 kfree(q->pool[i]);
1405- if (q->pool)
1406- kfree(q->pool);
1407+ kfree(q->pool);
1408 kfree(q->queue);
1409 }
1410 EXPORT_SYMBOL_GPL(iscsi_pool_free);
1411diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
1412index 023a4e9..abbdb31 100644
1413--- a/drivers/usb/class/cdc-wdm.c
1414+++ b/drivers/usb/class/cdc-wdm.c
1415@@ -641,7 +641,7 @@ next_desc:
1416
1417 iface = &intf->altsetting[0];
1418 ep = &iface->endpoint[0].desc;
1419- if (!usb_endpoint_is_int_in(ep)) {
1420+ if (!ep || !usb_endpoint_is_int_in(ep)) {
1421 rv = -EINVAL;
1422 goto err;
1423 }
1424diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
1425index 3a8bb53..fd7b356 100644
1426--- a/drivers/usb/gadget/f_rndis.c
1427+++ b/drivers/usb/gadget/f_rndis.c
1428@@ -437,7 +437,7 @@ invalid:
1429 DBG(cdev, "rndis req%02x.%02x v%04x i%04x l%d\n",
1430 ctrl->bRequestType, ctrl->bRequest,
1431 w_value, w_index, w_length);
1432- req->zero = 0;
1433+ req->zero = (value < w_length);
1434 req->length = value;
1435 value = usb_ep_queue(cdev->gadget->ep0, req, GFP_ATOMIC);
1436 if (value < 0)
1437diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
1438index 38a55af..bb3d7c3 100644
1439--- a/drivers/usb/gadget/u_ether.c
1440+++ b/drivers/usb/gadget/u_ether.c
1441@@ -175,12 +175,6 @@ static void eth_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *p)
1442 strlcpy(p->bus_info, dev_name(&dev->gadget->dev), sizeof p->bus_info);
1443 }
1444
1445-static u32 eth_get_link(struct net_device *net)
1446-{
1447- struct eth_dev *dev = netdev_priv(net);
1448- return dev->gadget->speed != USB_SPEED_UNKNOWN;
1449-}
1450-
1451 /* REVISIT can also support:
1452 * - WOL (by tracking suspends and issuing remote wakeup)
1453 * - msglevel (implies updated messaging)
1454@@ -189,7 +183,7 @@ static u32 eth_get_link(struct net_device *net)
1455
1456 static struct ethtool_ops ops = {
1457 .get_drvinfo = eth_get_drvinfo,
1458- .get_link = eth_get_link
1459+ .get_link = ethtool_op_get_link,
1460 };
1461
1462 static void defer_kevent(struct eth_dev *dev, int flag)
1463diff --git a/drivers/usb/host/ehci-q.c b/drivers/usb/host/ehci-q.c
1464index ecc9b66..01132ac 100644
1465--- a/drivers/usb/host/ehci-q.c
1466+++ b/drivers/usb/host/ehci-q.c
1467@@ -333,12 +333,40 @@ qh_completions (struct ehci_hcd *ehci, struct ehci_qh *qh)
1468 token = hc32_to_cpu(ehci, qtd->hw_token);
1469
1470 /* always clean up qtds the hc de-activated */
1471+ retry_xacterr:
1472 if ((token & QTD_STS_ACTIVE) == 0) {
1473
1474 /* on STALL, error, and short reads this urb must
1475 * complete and all its qtds must be recycled.
1476 */
1477 if ((token & QTD_STS_HALT) != 0) {
1478+
1479+ /* retry transaction errors until we
1480+ * reach the software xacterr limit
1481+ */
1482+ if ((token & QTD_STS_XACT) &&
1483+ QTD_CERR(token) == 0 &&
1484+ --qh->xacterrs > 0 &&
1485+ !urb->unlinked) {
1486+ ehci_dbg(ehci,
1487+ "detected XactErr len %d/%d retry %d\n",
1488+ qtd->length - QTD_LENGTH(token), qtd->length,
1489+ QH_XACTERR_MAX - qh->xacterrs);
1490+
1491+ /* reset the token in the qtd and the
1492+ * qh overlay (which still contains
1493+ * the qtd) so that we pick up from
1494+ * where we left off
1495+ */
1496+ token &= ~QTD_STS_HALT;
1497+ token |= QTD_STS_ACTIVE |
1498+ (EHCI_TUNE_CERR << 10);
1499+ qtd->hw_token = cpu_to_hc32(ehci,
1500+ token);
1501+ wmb();
1502+ qh->hw_token = cpu_to_hc32(ehci, token);
1503+ goto retry_xacterr;
1504+ }
1505 stopped = 1;
1506
1507 /* magic dummy for some short reads; qh won't advance.
1508@@ -421,6 +449,9 @@ halt:
1509 /* remove qtd; it's recycled after possible urb completion */
1510 list_del (&qtd->qtd_list);
1511 last = qtd;
1512+
1513+ /* reinit the xacterr counter for the next qtd */
1514+ qh->xacterrs = QH_XACTERR_MAX;
1515 }
1516
1517 /* last urb's completion might still need calling */
1518@@ -862,6 +893,7 @@ static void qh_link_async (struct ehci_hcd *ehci, struct ehci_qh *qh)
1519 head->qh_next.qh = qh;
1520 head->hw_next = dma;
1521
1522+ qh->xacterrs = QH_XACTERR_MAX;
1523 qh->qh_state = QH_STATE_LINKED;
1524 /* qtd completions reported later by interrupt */
1525 }
1526diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
1527index 33459d7..c165fe4 100644
1528--- a/drivers/usb/host/ehci.h
1529+++ b/drivers/usb/host/ehci.h
1530@@ -500,6 +500,9 @@ struct ehci_qh {
1531 #define QH_STATE_UNLINK_WAIT 4 /* LINKED and on reclaim q */
1532 #define QH_STATE_COMPLETING 5 /* don't touch token.HALT */
1533
1534+ u8 xacterrs; /* XactErr retry counter */
1535+#define QH_XACTERR_MAX 32 /* XactErr retry limit */
1536+
1537 /* periodic schedule info */
1538 u8 usecs; /* intr bandwidth */
1539 u8 gap_uf; /* uframes split/csplit gap */
1540diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
1541index 2d78712..13b34bc 100644
1542--- a/drivers/usb/serial/ftdi_sio.c
1543+++ b/drivers/usb/serial/ftdi_sio.c
1544@@ -662,6 +662,7 @@ static struct usb_device_id id_table_combined [] = {
1545 { USB_DEVICE(DE_VID, WHT_PID) },
1546 { USB_DEVICE(ADI_VID, ADI_GNICE_PID),
1547 .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
1548+ { USB_DEVICE(JETI_VID, JETI_SPC1201_PID) },
1549 { }, /* Optional parameter entry */
1550 { } /* Terminating entry */
1551 };
1552diff --git a/drivers/usb/serial/ftdi_sio.h b/drivers/usb/serial/ftdi_sio.h
1553index 80fa76e..3425122 100644
1554--- a/drivers/usb/serial/ftdi_sio.h
1555+++ b/drivers/usb/serial/ftdi_sio.h
1556@@ -890,6 +890,13 @@
1557 #define ADI_GNICE_PID 0xF000
1558
1559 /*
1560+ * JETI SPECTROMETER SPECBOS 1201
1561+ * http://www.jeti.com/products/sys/scb/scb1201.php
1562+ */
1563+#define JETI_VID 0x0c6c
1564+#define JETI_SPC1201_PID 0x04b2
1565+
1566+/*
1567 * BmRequestType: 1100 0000b
1568 * bRequest: FTDI_E2_READ
1569 * wValue: 0
1570diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c
1571index 898e67d..9466a99 100644
1572--- a/drivers/usb/storage/cypress_atacb.c
1573+++ b/drivers/usb/storage/cypress_atacb.c
1574@@ -133,19 +133,18 @@ void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us)
1575
1576 /* build the command for
1577 * reading the ATA registers */
1578- scsi_eh_prep_cmnd(srb, &ses, NULL, 0, 0);
1579- srb->sdb.length = sizeof(regs);
1580- sg_init_one(&ses.sense_sgl, regs, srb->sdb.length);
1581- srb->sdb.table.sgl = &ses.sense_sgl;
1582- srb->sc_data_direction = DMA_FROM_DEVICE;
1583- srb->sdb.table.nents = 1;
1584+ scsi_eh_prep_cmnd(srb, &ses, NULL, 0, sizeof(regs));
1585+
1586 /* we use the same command as before, but we set
1587 * the read taskfile bit, for not executing atacb command,
1588 * but reading register selected in srb->cmnd[4]
1589 */
1590+ srb->cmd_len = 16;
1591+ srb->cmnd = ses.cmnd;
1592 srb->cmnd[2] = 1;
1593
1594 usb_stor_transparent_scsi_command(srb, us);
1595+ memcpy(regs, srb->sense_buffer, sizeof(regs));
1596 tmp_result = srb->result;
1597 scsi_eh_restore_cmnd(srb, &ses);
1598 /* we fail to get registers, report invalid command */
1599@@ -162,8 +161,8 @@ void cypress_atacb_passthrough(struct scsi_cmnd *srb, struct us_data *us)
1600
1601 /* XXX we should generate sk, asc, ascq from status and error
1602 * regs
1603