1 From: Greg Kroah-Hartman <gregkh@suse.de>
2 Subject: Linux 2.6.27.6
4 Upstream 2.6.27.6 release from kernel.org
6 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
8 diff --git a/Documentation/cciss.txt b/Documentation/cciss.txt
9 index 8244c64..48d80d9 100644
10 --- a/Documentation/cciss.txt
11 +++ b/Documentation/cciss.txt
12 @@ -26,6 +26,8 @@ This driver is known to work with the following cards:
19 Detecting drive failures:
20 -------------------------
21 diff --git a/Makefile b/Makefile
22 index 4ea7b3c..1ea4453 100644
31 NAME = Trembling Tortoise
34 diff --git a/arch/arm/mach-pxa/include/mach/reset.h b/arch/arm/mach-pxa/include/mach/reset.h
35 index 9489a48..7b8842c 100644
36 --- a/arch/arm/mach-pxa/include/mach/reset.h
37 +++ b/arch/arm/mach-pxa/include/mach/reset.h
39 extern unsigned int reset_status;
40 extern void clear_reset_status(unsigned int mask);
43 - * register GPIO as reset generator
45 + * init_gpio_reset() - register GPIO as reset generator
48 + * @output - set gpio as out/low instead of input during normal work
50 -extern int init_gpio_reset(int gpio);
51 +extern int init_gpio_reset(int gpio, int output);
53 #endif /* __ASM_ARCH_RESET_H */
54 diff --git a/arch/arm/mach-pxa/reset.c b/arch/arm/mach-pxa/reset.c
55 index 9996c61..1b236a6 100644
56 --- a/arch/arm/mach-pxa/reset.c
57 +++ b/arch/arm/mach-pxa/reset.c
58 @@ -20,7 +20,7 @@ static void do_hw_reset(void);
60 static int reset_gpio = -1;
62 -int init_gpio_reset(int gpio)
63 +int init_gpio_reset(int gpio, int output)
67 @@ -30,9 +30,12 @@ int init_gpio_reset(int gpio)
71 - rc = gpio_direction_input(gpio);
73 + rc = gpio_direction_output(gpio, 0);
75 + rc = gpio_direction_input(gpio);
77 - printk(KERN_ERR "Can't configure reset_gpio for input\n");
78 + printk(KERN_ERR "Can't configure reset_gpio\n");
82 diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
83 index b569f3b..32cee4c 100644
84 --- a/arch/arm/mach-pxa/spitz.c
85 +++ b/arch/arm/mach-pxa/spitz.c
86 @@ -548,7 +548,7 @@ static void spitz_restart(char mode)
88 static void __init common_init(void)
90 - init_gpio_reset(SPITZ_GPIO_ON_RESET);
91 + init_gpio_reset(SPITZ_GPIO_ON_RESET, 1);
92 pm_power_off = spitz_poweroff;
93 arm_pm_restart = spitz_restart;
95 diff --git a/arch/arm/mach-pxa/tosa.c b/arch/arm/mach-pxa/tosa.c
96 index 9f3ef9e..130e37e 100644
97 --- a/arch/arm/mach-pxa/tosa.c
98 +++ b/arch/arm/mach-pxa/tosa.c
99 @@ -781,7 +781,7 @@ static void __init tosa_init(void)
100 gpio_set_wake(MFP_PIN_GPIO1, 1);
101 /* We can't pass to gpio-keys since it will drop the Reset altfunc */
103 - init_gpio_reset(TOSA_GPIO_ON_RESET);
104 + init_gpio_reset(TOSA_GPIO_ON_RESET, 0);
106 pm_power_off = tosa_poweroff;
107 arm_pm_restart = tosa_restart;
108 diff --git a/arch/arm/mm/cache-xsc3l2.c b/arch/arm/mm/cache-xsc3l2.c
109 index 158bd96..99ec030 100644
110 --- a/arch/arm/mm/cache-xsc3l2.c
111 +++ b/arch/arm/mm/cache-xsc3l2.c
112 @@ -97,7 +97,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
114 * Clean and invalidate partial last cache line.
116 - if (end & (CACHE_LINE_SIZE - 1)) {
117 + if (start < end && (end & (CACHE_LINE_SIZE - 1))) {
118 xsc3_l2_clean_pa(end & ~(CACHE_LINE_SIZE - 1));
119 xsc3_l2_inv_pa(end & ~(CACHE_LINE_SIZE - 1));
120 end &= ~(CACHE_LINE_SIZE - 1);
121 @@ -106,7 +106,7 @@ static void xsc3_l2_inv_range(unsigned long start, unsigned long end)
123 * Invalidate all full cache lines between 'start' and 'end'.
125 - while (start != end) {
126 + while (start < end) {
127 xsc3_l2_inv_pa(start);
128 start += CACHE_LINE_SIZE;
130 diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
131 index ed92864..552d2b7 100644
132 --- a/arch/x86/Kconfig
133 +++ b/arch/x86/Kconfig
134 @@ -1059,6 +1059,26 @@ config HIGHPTE
135 low memory. Setting this option will put user-space page table
136 entries in high memory.
138 +config X86_RESERVE_LOW_64K
139 + bool "Reserve low 64K of RAM on AMI/Phoenix BIOSen"
142 + Reserve the first 64K of physical RAM on BIOSes that are known
143 + to potentially corrupt that memory range. A numbers of BIOSes are
144 + known to utilize this area during suspend/resume, so it must not
145 + be used by the kernel.
147 + Set this to N if you are absolutely sure that you trust the BIOS
148 + to get all its memory reservations and usages right.
150 + If you have doubts about the BIOS (e.g. suspend/resume does not
151 + work or there's kernel crashes after certain hardware hotplug
152 + events) and it's not AMI or Phoenix, then you might want to enable
153 + X86_CHECK_BIOS_CORRUPTION=y to allow the kernel to check typical
154 + corruption patterns.
158 config MATH_EMULATION
160 prompt "Math emulation" if X86_32
161 diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
162 index 9838f25..64b5c42 100644
163 --- a/arch/x86/kernel/setup.c
164 +++ b/arch/x86/kernel/setup.c
165 @@ -578,6 +578,39 @@ static struct x86_quirks default_x86_quirks __initdata;
167 struct x86_quirks *x86_quirks __initdata = &default_x86_quirks;
169 +static int __init dmi_low_memory_corruption(const struct dmi_system_id *d)
172 + "%s detected: BIOS may corrupt low RAM, working it around.\n",
175 + e820_update_range(0, 0x10000, E820_RAM, E820_RESERVED);
176 + sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
181 +/* List of systems that have known low memory corruption BIOS problems */
182 +static struct dmi_system_id __initdata bad_bios_dmi_table[] = {
183 +#ifdef CONFIG_X86_RESERVE_LOW_64K
185 + .callback = dmi_low_memory_corruption,
186 + .ident = "AMI BIOS",
188 + DMI_MATCH(DMI_BIOS_VENDOR, "American Megatrends Inc."),
192 + .callback = dmi_low_memory_corruption,
193 + .ident = "Phoenix BIOS",
195 + DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
203 * Determine if we were loaded by an EFI loader. If so, then we have also been
204 * passed the efi memmap, systab, etc., so we should use these data structures
205 @@ -699,6 +732,10 @@ void __init setup_arch(char **cmdline_p)
207 finish_e820_parsing();
209 + dmi_scan_machine();
211 + dmi_check_system(bad_bios_dmi_table);
216 @@ -781,8 +818,6 @@ void __init setup_arch(char **cmdline_p)
220 - dmi_scan_machine();
225 @@ -885,3 +920,5 @@ void __init setup_arch(char **cmdline_p)
231 diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
232 index 8f98e9d..de850e9 100644
233 --- a/arch/x86/kernel/tsc.c
234 +++ b/arch/x86/kernel/tsc.c
235 @@ -639,10 +639,6 @@ void __init tsc_init(void)
236 cpu_khz = calibrate_cpu();
239 - lpj = ((u64)tsc_khz * 1000);
243 printk("Detected %lu.%03lu MHz processor.\n",
244 (unsigned long)cpu_khz / 1000,
245 (unsigned long)cpu_khz % 1000);
246 @@ -662,6 +658,10 @@ void __init tsc_init(void)
247 /* now allow native_sched_clock() to use rdtsc */
250 + lpj = ((u64)tsc_khz * 1000);
255 /* Check and install the TSC clocksource */
256 dmi_check_system(bad_tsc_dmi_table);
257 diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
258 index 7d2edf1..25d2161 100644
259 --- a/drivers/acpi/dock.c
260 +++ b/drivers/acpi/dock.c
261 @@ -604,14 +604,17 @@ static int handle_eject_request(struct dock_station *ds, u32 event)
262 static void dock_notify(acpi_handle handle, u32 event, void *data)
264 struct dock_station *ds = data;
265 + struct acpi_device *tmp;
268 case ACPI_NOTIFY_BUS_CHECK:
269 - if (!dock_in_progress(ds) && dock_present(ds)) {
270 + if (!dock_in_progress(ds) && acpi_bus_get_device(ds->handle,
274 if (!dock_present(ds)) {
275 printk(KERN_ERR PREFIX "Unable to dock!\n");
279 atomic_notifier_call_chain(&dock_notifier_list,
280 diff --git a/drivers/ata/libata-eh.c b/drivers/ata/libata-eh.c
281 index c1db2f2..2c4ccec 100644
282 --- a/drivers/ata/libata-eh.c
283 +++ b/drivers/ata/libata-eh.c
284 @@ -604,9 +604,6 @@ void ata_scsi_error(struct Scsi_Host *host)
285 if (ata_ncq_enabled(dev))
286 ehc->saved_ncq_enabled |= 1 << devno;
289 - /* set last reset timestamp to some time in the past */
290 - ehc->last_reset = jiffies - 60 * HZ;
293 ap->pflags |= ATA_PFLAG_EH_IN_PROGRESS;
294 @@ -2209,17 +2206,21 @@ int ata_eh_reset(struct ata_link *link, int classify,
295 if (link->flags & ATA_LFLAG_NO_SRST)
299 - deadline = ata_deadline(ehc->last_reset, ATA_EH_RESET_COOL_DOWN);
300 - if (time_before(now, deadline))
301 - schedule_timeout_uninterruptible(deadline - now);
302 + /* make sure each reset attemp is at least COOL_DOWN apart */
303 + if (ehc->i.flags & ATA_EHI_DID_RESET) {
305 + WARN_ON(time_after(ehc->last_reset, now));
306 + deadline = ata_deadline(ehc->last_reset,
307 + ATA_EH_RESET_COOL_DOWN);
308 + if (time_before(now, deadline))
309 + schedule_timeout_uninterruptible(deadline - now);
312 spin_lock_irqsave(ap->lock, flags);
313 ap->pflags |= ATA_PFLAG_RESETTING;
314 spin_unlock_irqrestore(ap->lock, flags);
316 ata_eh_about_to_do(link, NULL, ATA_EH_RESET);
317 - ehc->last_reset = jiffies;
319 ata_link_for_each_dev(dev, link) {
320 /* If we issue an SRST then an ATA drive (not ATAPI)
321 @@ -2285,7 +2286,6 @@ int ata_eh_reset(struct ata_link *link, int classify,
325 - ehc->last_reset = jiffies;
326 if (ata_is_host_link(link))
327 ata_eh_freeze_port(ap);
329 @@ -2297,6 +2297,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
330 reset == softreset ? "soft" : "hard");
332 /* mark that this EH session started with reset */
333 + ehc->last_reset = jiffies;
334 if (reset == hardreset)
335 ehc->i.flags |= ATA_EHI_DID_HARDRESET;
337 @@ -2404,7 +2405,7 @@ int ata_eh_reset(struct ata_link *link, int classify,
339 /* reset successful, schedule revalidation */
340 ata_eh_done(link, NULL, ATA_EH_RESET);
341 - ehc->last_reset = jiffies;
342 + ehc->last_reset = jiffies; /* update to completion time */
343 ehc->i.action |= ATA_EH_REVALIDATE;
346 diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
347 index b73116e..2ac91b8 100644
348 --- a/drivers/block/cciss.c
349 +++ b/drivers/block/cciss.c
350 @@ -96,6 +96,8 @@ static const struct pci_device_id cciss_pci_device_id[] = {
351 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3245},
352 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3247},
353 {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x3249},
354 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324A},
355 + {PCI_VENDOR_ID_HP, PCI_DEVICE_ID_HP_CISSE, 0x103C, 0x324B},
356 {PCI_VENDOR_ID_HP, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
357 PCI_CLASS_STORAGE_RAID << 8, 0xffff << 8, 0},
359 @@ -133,6 +135,8 @@ static struct board_type products[] = {
360 {0x3245103C, "Smart Array P410i", &SA5_access},
361 {0x3247103C, "Smart Array P411", &SA5_access},
362 {0x3249103C, "Smart Array P812", &SA5_access},
363 + {0x324A103C, "Smart Array P712m", &SA5_access},
364 + {0x324B103C, "Smart Array P711m", &SA5_access},
365 {0xFFFF103C, "Unknown Smart Array", &SA5_access},
368 @@ -1365,6 +1369,7 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
369 disk->first_minor = drv_index << NWD_SHIFT;
370 disk->fops = &cciss_fops;
371 disk->private_data = &h->drv[drv_index];
372 + disk->driverfs_dev = &h->pdev->dev;
374 /* Set up queue information */
375 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
376 @@ -3403,7 +3408,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
381 + int dac, return_code;
382 + InquiryData_struct *inq_buff = NULL;
384 i = alloc_cciss_hba();
386 @@ -3509,6 +3515,25 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
387 /* Turn the interrupts on so we can service requests */
388 hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
390 + /* Get the firmware version */
391 + inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
392 + if (inq_buff == NULL) {
393 + printk(KERN_ERR "cciss: out of memory\n");
397 + return_code = sendcmd_withirq(CISS_INQUIRY, i, inq_buff,
398 + sizeof(InquiryData_struct), 0, 0 , 0, TYPE_CMD);
399 + if (return_code == IO_OK) {
400 + hba[i]->firm_ver[0] = inq_buff->data_byte[32];
401 + hba[i]->firm_ver[1] = inq_buff->data_byte[33];
402 + hba[i]->firm_ver[2] = inq_buff->data_byte[34];
403 + hba[i]->firm_ver[3] = inq_buff->data_byte[35];
404 + } else { /* send command failed */
405 + printk(KERN_WARNING "cciss: unable to determine firmware"
406 + " version of controller\n");
411 hba[i]->cciss_max_sectors = 2048;
412 @@ -3519,6 +3544,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
417 #ifdef CONFIG_CISS_SCSI_TAPE
418 kfree(hba[i]->scsi_rejects.complete);
420 diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
421 index 09c1434..f5d2e54 100644
422 --- a/drivers/block/cpqarray.c
423 +++ b/drivers/block/cpqarray.c
424 @@ -567,7 +567,12 @@ static int __init cpqarray_init(void)
428 - return(num_cntlrs_reg);
429 + if (num_cntlrs_reg)
432 + pci_unregister_driver(&cpqarray_pci_driver);
437 /* Function to find the first free pointer into our hba[] array */
438 diff --git a/drivers/dca/dca-core.c b/drivers/dca/dca-core.c
439 index ec249d2..d883e1b 100644
440 --- a/drivers/dca/dca-core.c
441 +++ b/drivers/dca/dca-core.c
442 @@ -270,6 +270,6 @@ static void __exit dca_exit(void)
446 -module_init(dca_init);
447 +subsys_initcall(dca_init);
448 module_exit(dca_exit);
450 diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c
451 index bc8c6e3..3f4db54 100644
452 --- a/drivers/dma/ioat_dma.c
453 +++ b/drivers/dma/ioat_dma.c
454 @@ -519,7 +519,7 @@ static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx)
457 hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
458 - if (new->async_tx.callback) {
459 + if (first->async_tx.callback) {
460 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
462 /* move callback into to last desc */
463 @@ -611,7 +611,7 @@ static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx)
466 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_CP_STS;
467 - if (new->async_tx.callback) {
468 + if (first->async_tx.callback) {
469 hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN;
471 /* move callback into to last desc */
472 @@ -801,6 +801,12 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
473 struct ioat_desc_sw *desc, *_desc;
474 int in_use_descs = 0;
476 + /* Before freeing channel resources first check
477 + * if they have been previously allocated for this channel.
479 + if (ioat_chan->desccount == 0)
482 tasklet_disable(&ioat_chan->cleanup_task);
483 ioat_dma_memcpy_cleanup(ioat_chan);
485 @@ -863,6 +869,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan)
486 ioat_chan->last_completion = ioat_chan->completion_addr = 0;
487 ioat_chan->pending = 0;
488 ioat_chan->dmacount = 0;
489 + ioat_chan->desccount = 0;
490 ioat_chan->watchdog_completion = 0;
491 ioat_chan->last_compl_desc_addr_hw = 0;
492 ioat_chan->watchdog_tcp_cookie =
493 diff --git a/drivers/dma/iovlock.c b/drivers/dma/iovlock.c
494 index e763d72..9f6fe46 100644
495 --- a/drivers/dma/iovlock.c
496 +++ b/drivers/dma/iovlock.c
497 @@ -55,7 +55,6 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
499 int iovec_len_used = 0;
500 int iovec_pages_used = 0;
503 /* don't pin down non-user-based iovecs */
504 if (segment_eq(get_fs(), KERNEL_DS))
505 @@ -72,23 +71,21 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
506 local_list = kmalloc(sizeof(*local_list)
507 + (nr_iovecs * sizeof (struct dma_page_list))
508 + (iovec_pages_used * sizeof (struct page*)), GFP_KERNEL);
515 /* list of pages starts right after the page list array */
516 pages = (struct page **) &local_list->page_list[nr_iovecs];
518 + local_list->nr_iovecs = 0;
520 for (i = 0; i < nr_iovecs; i++) {
521 struct dma_page_list *page_list = &local_list->page_list[i];
523 len -= iov[i].iov_len;
525 - if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len)) {
527 + if (!access_ok(VERIFY_WRITE, iov[i].iov_base, iov[i].iov_len))
531 page_list->nr_pages = num_pages_spanned(&iov[i]);
532 page_list->base_address = iov[i].iov_base;
533 @@ -109,10 +106,8 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
535 up_read(¤t->mm->mmap_sem);
537 - if (ret != page_list->nr_pages) {
539 + if (ret != page_list->nr_pages)
543 local_list->nr_iovecs = i + 1;
545 @@ -122,7 +117,7 @@ struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len)
547 dma_unpin_iovec_pages(local_list);
549 - return ERR_PTR(err);
553 void dma_unpin_iovec_pages(struct dma_pinned_list *pinned_list)
554 diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
555 index c40f040..8c030d9 100644
556 --- a/drivers/hid/hidraw.c
557 +++ b/drivers/hid/hidraw.c
558 @@ -113,7 +113,7 @@ static ssize_t hidraw_write(struct file *file, const char __user *buffer, size_t
559 if (!dev->hid_output_raw_report)
562 - if (count > HID_MIN_BUFFER_SIZE) {
563 + if (count > HID_MAX_BUFFER_SIZE) {
564 printk(KERN_WARNING "hidraw: pid %d passed too large report\n",
565 task_pid_nr(current));
567 diff --git a/drivers/md/linear.c b/drivers/md/linear.c
568 index b1eebf8..a58a19e 100644
569 --- a/drivers/md/linear.c
570 +++ b/drivers/md/linear.c
571 @@ -157,6 +157,8 @@ static linear_conf_t *linear_conf(mddev_t *mddev, int raid_disks)
573 min_spacing = conf->array_sectors / 2;
574 sector_div(min_spacing, PAGE_SIZE/sizeof(struct dev_info *));
575 + if (min_spacing == 0)
578 /* min_spacing is the minimum spacing that will fit the hash
579 * table in one PAGE. This may be much smaller than needed.
580 diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
581 index e34cd0e..941576d 100644
582 --- a/drivers/md/raid10.c
583 +++ b/drivers/md/raid10.c
584 @@ -1132,7 +1132,7 @@ static int raid10_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
588 - if (rdev->raid_disk)
589 + if (rdev->raid_disk >= 0)
590 first = last = rdev->raid_disk;
592 if (rdev->saved_raid_disk >= 0 &&
593 diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
594 index 044d84e..f7284b9 100644
595 --- a/drivers/mmc/core/core.c
596 +++ b/drivers/mmc/core/core.c
597 @@ -280,7 +280,11 @@ void mmc_set_data_timeout(struct mmc_data *data, const struct mmc_card *card)
598 (card->host->ios.clock / 1000);
600 if (data->flags & MMC_DATA_WRITE)
603 + * The limit is really 250 ms, but that is
604 + * insufficient for some crappy cards.
610 diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
611 index a972cc6..9e7a236 100644
612 --- a/drivers/mtd/chips/cfi_cmdset_0002.c
613 +++ b/drivers/mtd/chips/cfi_cmdset_0002.c
614 @@ -362,19 +362,6 @@ struct mtd_info *cfi_cmdset_0002(struct map_info *map, int primary)
615 /* Set the default CFI lock/unlock addresses */
616 cfi->addr_unlock1 = 0x555;
617 cfi->addr_unlock2 = 0x2aa;
618 - /* Modify the unlock address if we are in compatibility mode */
619 - if ( /* x16 in x8 mode */
620 - ((cfi->device_type == CFI_DEVICETYPE_X8) &&
621 - (cfi->cfiq->InterfaceDesc ==
622 - CFI_INTERFACE_X8_BY_X16_ASYNC)) ||
623 - /* x32 in x16 mode */
624 - ((cfi->device_type == CFI_DEVICETYPE_X16) &&
625 - (cfi->cfiq->InterfaceDesc ==
626 - CFI_INTERFACE_X16_BY_X32_ASYNC)))
628 - cfi->addr_unlock1 = 0xaaa;
629 - cfi->addr_unlock2 = 0x555;
633 else if (cfi->cfi_mode == CFI_MODE_JEDEC) {
634 diff --git a/drivers/mtd/chips/jedec_probe.c b/drivers/mtd/chips/jedec_probe.c
635 index f84ab61..2f3f2f7 100644
636 --- a/drivers/mtd/chips/jedec_probe.c
637 +++ b/drivers/mtd/chips/jedec_probe.c
638 @@ -1808,9 +1808,7 @@ static inline u32 jedec_read_mfr(struct map_info *map, uint32_t base,
639 * several first banks can contain 0x7f instead of actual ID
642 - uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8),
643 - cfi_interleave(cfi),
645 + uint32_t ofs = cfi_build_cmd_addr(0 + (bank << 8), map, cfi);
646 mask = (1 << (cfi->device_type * 8)) - 1;
647 result = map_read(map, base + ofs);
649 @@ -1824,7 +1822,7 @@ static inline u32 jedec_read_id(struct map_info *map, uint32_t base,
653 - u32 ofs = cfi_build_cmd_addr(1, cfi_interleave(cfi), cfi->device_type);
654 + u32 ofs = cfi_build_cmd_addr(1, map, cfi);
655 mask = (1 << (cfi->device_type * 8)) -1;
656 result = map_read(map, base + ofs);
657 return result.x[0] & mask;
658 @@ -2067,8 +2065,8 @@ static int jedec_probe_chip(struct map_info *map, __u32 base,
661 /* Ensure the unlock addresses we try stay inside the map */
662 - probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, cfi_interleave(cfi), cfi->device_type);
663 - probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, cfi_interleave(cfi), cfi->device_type);
664 + probe_offset1 = cfi_build_cmd_addr(cfi->addr_unlock1, map, cfi);
665 + probe_offset2 = cfi_build_cmd_addr(cfi->addr_unlock2, map, cfi);
666 if ( ((base + probe_offset1 + map_bankwidth(map)) >= map->size) ||
667 ((base + probe_offset2 + map_bankwidth(map)) >= map->size))
669 diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
670 index 0f6f974..39c17bb 100644
671 --- a/drivers/net/r8169.c
672 +++ b/drivers/net/r8169.c
673 @@ -370,8 +370,9 @@ struct ring_info {
677 - RTL_FEATURE_WOL = (1 << 0),
678 - RTL_FEATURE_MSI = (1 << 1),
679 + RTL_FEATURE_WOL = (1 << 0),
680 + RTL_FEATURE_MSI = (1 << 1),
681 + RTL_FEATURE_GMII = (1 << 2),
684 struct rtl8169_private {
685 @@ -406,13 +407,15 @@ struct rtl8169_private {
686 struct vlan_group *vlgrp;
688 int (*set_speed)(struct net_device *, u8 autoneg, u16 speed, u8 duplex);
689 - void (*get_settings)(struct net_device *, struct ethtool_cmd *);
690 + int (*get_settings)(struct net_device *, struct ethtool_cmd *);
691 void (*phy_reset_enable)(void __iomem *);
692 void (*hw_start)(struct net_device *);
693 unsigned int (*phy_reset_pending)(void __iomem *);
694 unsigned int (*link_ok)(void __iomem *);
695 struct delayed_work task;
698 + struct mii_if_info mii;
701 MODULE_AUTHOR("Realtek and the Linux r8169 crew <netdev@vger.kernel.org>");
702 @@ -482,6 +485,23 @@ static int mdio_read(void __iomem *ioaddr, int reg_addr)
706 +static void rtl_mdio_write(struct net_device *dev, int phy_id, int location,
709 + struct rtl8169_private *tp = netdev_priv(dev);
710 + void __iomem *ioaddr = tp->mmio_addr;
712 + mdio_write(ioaddr, location, val);
715 +static int rtl_mdio_read(struct net_device *dev, int phy_id, int location)
717 + struct rtl8169_private *tp = netdev_priv(dev);
718 + void __iomem *ioaddr = tp->mmio_addr;
720 + return mdio_read(ioaddr, location);
723 static void rtl8169_irq_mask_and_ack(void __iomem *ioaddr)
725 RTL_W16(IntrMask, 0x0000);
726 @@ -720,9 +740,13 @@ static int rtl8169_set_speed_xmii(struct net_device *dev,
728 auto_nego |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
730 - if ((tp->mac_version == RTL_GIGA_MAC_VER_12) ||
731 - (tp->mac_version == RTL_GIGA_MAC_VER_17)) {
732 - /* Vendor specific (0x1f) and reserved (0x0e) MII registers. */
733 + if ((tp->mac_version == RTL_GIGA_MAC_VER_11) ||
734 + (tp->mac_version == RTL_GIGA_MAC_VER_12) ||
735 + (tp->mac_version >= RTL_GIGA_MAC_VER_17)) {
738 + * Vendor specific (0x1f) and reserved (0x0e) MII registers.
740 mdio_write(ioaddr, 0x1f, 0x0000);
741 mdio_write(ioaddr, 0x0e, 0x0000);
743 @@ -850,7 +874,7 @@ static int rtl8169_rx_vlan_skb(struct rtl8169_private *tp, struct RxDesc *desc,
747 -static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
748 +static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
750 struct rtl8169_private *tp = netdev_priv(dev);
751 void __iomem *ioaddr = tp->mmio_addr;
752 @@ -867,65 +891,29 @@ static void rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd)
754 cmd->speed = SPEED_1000;
755 cmd->duplex = DUPLEX_FULL; /* Always set */
760 -static void rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
761 +static int rtl8169_gset_xmii(struct net_device *dev, struct ethtool_cmd *cmd)
763 struct rtl8169_private *tp = netdev_priv(dev);
764 - void __iomem *ioaddr = tp->mmio_addr;
767 - cmd->supported = SUPPORTED_10baseT_Half |
768 - SUPPORTED_10baseT_Full |
769 - SUPPORTED_100baseT_Half |
770 - SUPPORTED_100baseT_Full |
771 - SUPPORTED_1000baseT_Full |
772 - SUPPORTED_Autoneg |
776 - cmd->advertising = ADVERTISED_TP | ADVERTISED_Autoneg;
778 - if (tp->phy_auto_nego_reg & ADVERTISE_10HALF)
779 - cmd->advertising |= ADVERTISED_10baseT_Half;
780 - if (tp->phy_auto_nego_reg & ADVERTISE_10FULL)
781 - cmd->advertising |= ADVERTISED_10baseT_Full;
782 - if (tp->phy_auto_nego_reg & ADVERTISE_100HALF)
783 - cmd->advertising |= ADVERTISED_100baseT_Half;
784 - if (tp->phy_auto_nego_reg & ADVERTISE_100FULL)
785 - cmd->advertising |= ADVERTISED_100baseT_Full;
786 - if (tp->phy_1000_ctrl_reg & ADVERTISE_1000FULL)
787 - cmd->advertising |= ADVERTISED_1000baseT_Full;
789 - status = RTL_R8(PHYstatus);
791 - if (status & _1000bpsF)
792 - cmd->speed = SPEED_1000;
793 - else if (status & _100bps)
794 - cmd->speed = SPEED_100;
795 - else if (status & _10bps)
796 - cmd->speed = SPEED_10;
798 - if (status & TxFlowCtrl)
799 - cmd->advertising |= ADVERTISED_Asym_Pause;
800 - if (status & RxFlowCtrl)
801 - cmd->advertising |= ADVERTISED_Pause;
803 - cmd->duplex = ((status & _1000bpsF) || (status & FullDup)) ?
804 - DUPLEX_FULL : DUPLEX_HALF;
806 + return mii_ethtool_gset(&tp->mii, cmd);
809 static int rtl8169_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
811 struct rtl8169_private *tp = netdev_priv(dev);
815 spin_lock_irqsave(&tp->lock, flags);
817 - tp->get_settings(dev, cmd);
818 + rc = tp->get_settings(dev, cmd);
820 spin_unlock_irqrestore(&tp->lock, flags);
825 static void rtl8169_get_regs(struct net_device *dev, struct ethtool_regs *regs,
826 @@ -1513,7 +1501,7 @@ static const struct rtl_cfg_info {
832 } rtl_cfg_infos [] = {
834 .hw_start = rtl_hw_start_8169,
835 @@ -1522,7 +1510,7 @@ static const struct rtl_cfg_info {
836 .intr_event = SYSErr | LinkChg | RxOverflow |
837 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
838 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
840 + .features = RTL_FEATURE_GMII
843 .hw_start = rtl_hw_start_8168,
844 @@ -1531,7 +1519,7 @@ static const struct rtl_cfg_info {
845 .intr_event = SYSErr | LinkChg | RxOverflow |
846 TxErr | TxOK | RxOK | RxErr,
847 .napi_event = TxErr | TxOK | RxOK | RxOverflow,
848 - .msi = RTL_FEATURE_MSI
849 + .features = RTL_FEATURE_GMII | RTL_FEATURE_MSI
852 .hw_start = rtl_hw_start_8101,
853 @@ -1540,7 +1528,7 @@ static const struct rtl_cfg_info {
854 .intr_event = SYSErr | LinkChg | RxOverflow | PCSTimeout |
855 RxFIFOOver | TxErr | TxOK | RxOK | RxErr,
856 .napi_event = RxFIFOOver | TxErr | TxOK | RxOK | RxOverflow,
857 - .msi = RTL_FEATURE_MSI
858 + .features = RTL_FEATURE_MSI
862 @@ -1552,7 +1540,7 @@ static unsigned rtl_try_msi(struct pci_dev *pdev, void __iomem *ioaddr,
865 cfg2 = RTL_R8(Config2) & ~MSIEnable;
867 + if (cfg->features & RTL_FEATURE_MSI) {
868 if (pci_enable_msi(pdev)) {
869 dev_info(&pdev->dev, "no MSI. Back to INTx.\n");
871 @@ -1578,6 +1566,7 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
872 const struct rtl_cfg_info *cfg = rtl_cfg_infos + ent->driver_data;
873 const unsigned int region = cfg->region;
874 struct rtl8169_private *tp;
875 + struct mii_if_info *mii;
876 struct net_device *dev;
877 void __iomem *ioaddr;
879 @@ -1602,6 +1591,14 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
881 tp->msg_enable = netif_msg_init(debug.msg_enable, R8169_MSG_DEFAULT);
885 + mii->mdio_read = rtl_mdio_read;
886 + mii->mdio_write = rtl_mdio_write;
887 + mii->phy_id_mask = 0x1f;
888 + mii->reg_num_mask = 0x1f;
889 + mii->supports_gmii = !!(cfg->features & RTL_FEATURE_GMII);
891 /* enable device (incl. PCI PM wakeup and hotplug setup) */
892 rc = pci_enable_device(pdev);
894 @@ -2099,8 +2096,6 @@ static void rtl_hw_start_8168(struct net_device *dev)
898 - RTL_W32(RxMissed, 0);
900 rtl_set_rx_mode(dev);
902 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
903 @@ -2143,8 +2138,6 @@ static void rtl_hw_start_8101(struct net_device *dev)
907 - RTL_W32(RxMissed, 0);
909 rtl_set_rx_mode(dev);
911 RTL_W8(ChipCmd, CmdTxEnb | CmdRxEnb);
912 @@ -2922,6 +2915,17 @@ static int rtl8169_poll(struct napi_struct *napi, int budget)
916 +static void rtl8169_rx_missed(struct net_device *dev, void __iomem *ioaddr)
918 + struct rtl8169_private *tp = netdev_priv(dev);
920 + if (tp->mac_version > RTL_GIGA_MAC_VER_06)
923 + dev->stats.rx_missed_errors += (RTL_R32(RxMissed) & 0xffffff);
924 + RTL_W32(RxMissed, 0);
927 static void rtl8169_down(struct net_device *dev)
929 struct rtl8169_private *tp = netdev_priv(dev);
930 @@ -2939,9 +2943,7 @@ core_down:
932 rtl8169_asic_down(ioaddr);
934 - /* Update the error counts. */
935 - dev->stats.rx_missed_errors += RTL_R32(RxMissed);
936 - RTL_W32(RxMissed, 0);
937 + rtl8169_rx_missed(dev, ioaddr);
939 spin_unlock_irq(&tp->lock);
941 @@ -3063,8 +3065,7 @@ static struct net_device_stats *rtl8169_get_stats(struct net_device *dev)
943 if (netif_running(dev)) {
944 spin_lock_irqsave(&tp->lock, flags);
945 - dev->stats.rx_missed_errors += RTL_R32(RxMissed);
946 - RTL_W32(RxMissed, 0);
947 + rtl8169_rx_missed(dev, ioaddr);
948 spin_unlock_irqrestore(&tp->lock, flags);
951 @@ -3089,8 +3090,7 @@ static int rtl8169_suspend(struct pci_dev *pdev, pm_message_t state)
953 rtl8169_asic_down(ioaddr);
955 - dev->stats.rx_missed_errors += RTL_R32(RxMissed);
956 - RTL_W32(RxMissed, 0);
957 + rtl8169_rx_missed(dev, ioaddr);
959 spin_unlock_irq(&tp->lock);
961 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
962 index 17d4f31..c479ee2 100644
963 --- a/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
964 +++ b/drivers/net/wireless/iwlwifi/iwl-5000-hw.h
965 @@ -129,6 +129,13 @@ struct iwl5000_shared {
967 } __attribute__ ((packed));
969 +/* calibrations defined for 5000 */
970 +/* defines the order in which results should be sent to the runtime uCode */
971 +enum iwl5000_calib {
973 + IWL5000_CALIB_TX_IQ,
974 + IWL5000_CALIB_TX_IQ_PERD,
977 #endif /* __iwl_5000_hw_h__ */
979 diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
980 index b08036a..79ff288 100644
981 --- a/drivers/net/wireless/iwlwifi/iwl-5000.c
982 +++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
983 @@ -445,48 +445,6 @@ static int iwl5000_send_Xtal_calib(struct iwl_priv *priv)
984 sizeof(cal_cmd), &cal_cmd);
987 -static int iwl5000_send_calib_results(struct iwl_priv *priv)
991 - struct iwl_host_cmd hcmd = {
992 - .id = REPLY_PHY_CALIBRATION_CMD,
993 - .meta.flags = CMD_SIZE_HUGE,
996 - if (priv->calib_results.lo_res) {
997 - hcmd.len = priv->calib_results.lo_res_len;
998 - hcmd.data = priv->calib_results.lo_res;
999 - ret = iwl_send_cmd_sync(priv, &hcmd);
1005 - if (priv->calib_results.tx_iq_res) {
1006 - hcmd.len = priv->calib_results.tx_iq_res_len;
1007 - hcmd.data = priv->calib_results.tx_iq_res;
1008 - ret = iwl_send_cmd_sync(priv, &hcmd);
1014 - if (priv->calib_results.tx_iq_perd_res) {
1015 - hcmd.len = priv->calib_results.tx_iq_perd_res_len;
1016 - hcmd.data = priv->calib_results.tx_iq_perd_res;
1017 - ret = iwl_send_cmd_sync(priv, &hcmd);
1025 - IWL_ERROR("Error %d\n", ret);
1029 static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
1031 struct iwl5000_calib_cfg_cmd calib_cfg_cmd;
1032 @@ -511,33 +469,30 @@ static void iwl5000_rx_calib_result(struct iwl_priv *priv,
1033 struct iwl_rx_packet *pkt = (void *)rxb->skb->data;
1034 struct iwl5000_calib_hdr *hdr = (struct iwl5000_calib_hdr *)pkt->u.raw;
1035 int len = le32_to_cpu(pkt->len) & FH_RSCSR_FRAME_SIZE_MSK;
1037 - iwl_free_calib_results(priv);
1040 /* reduce the size of the length field itself */
1043 + /* Define the order in which the results will be sent to the runtime
1044 + * uCode. iwl_send_calib_results sends them in a row according to their
1045 + * index. We sort them here */
1046 switch (hdr->op_code) {
1047 case IWL5000_PHY_CALIBRATE_LO_CMD:
1048 - priv->calib_results.lo_res = kzalloc(len, GFP_ATOMIC);
1049 - priv->calib_results.lo_res_len = len;
1050 - memcpy(priv->calib_results.lo_res, pkt->u.raw, len);
1051 + index = IWL5000_CALIB_LO;
1053 case IWL5000_PHY_CALIBRATE_TX_IQ_CMD:
1054 - priv->calib_results.tx_iq_res = kzalloc(len, GFP_ATOMIC);
1055 - priv->calib_results.tx_iq_res_len = len;
1056 - memcpy(priv->calib_results.tx_iq_res, pkt->u.raw, len);
1057 + index = IWL5000_CALIB_TX_IQ;
1059 case IWL5000_PHY_CALIBRATE_TX_IQ_PERD_CMD:
1060 - priv->calib_results.tx_iq_perd_res = kzalloc(len, GFP_ATOMIC);
1061 - priv->calib_results.tx_iq_perd_res_len = len;
1062 - memcpy(priv->calib_results.tx_iq_perd_res, pkt->u.raw, len);
1063 + index = IWL5000_CALIB_TX_IQ_PERD;
1066 IWL_ERROR("Unknown calibration notification %d\n",
1070 + iwl_calib_set(&priv->calib_results[index], pkt->u.raw, len);
1073 static void iwl5000_rx_calib_complete(struct iwl_priv *priv,
1074 @@ -832,7 +787,7 @@ static int iwl5000_alive_notify(struct iwl_priv *priv)
1075 iwl5000_send_Xtal_calib(priv);
1077 if (priv->ucode_type == UCODE_RT)
1078 - iwl5000_send_calib_results(priv);
1079 + iwl_send_calib_results(priv);
1083 diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
1084 index e01f048..72a6743 100644
1085 --- a/drivers/net/wireless/iwlwifi/iwl-agn.c
1086 +++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
1087 @@ -2090,7 +2090,6 @@ static void iwl_alive_start(struct iwl_priv *priv)
1088 iwl4965_error_recovery(priv);
1090 iwl_power_update_mode(priv, 1);
1091 - ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
1093 if (test_and_clear_bit(STATUS_MODE_PENDING, &priv->status))
1094 iwl4965_set_mode(priv, priv->iw_mode);
1095 @@ -2342,6 +2341,7 @@ static void iwl_bg_alive_start(struct work_struct *data)
1096 mutex_lock(&priv->mutex);
1097 iwl_alive_start(priv);
1098 mutex_unlock(&priv->mutex);
1099 + ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
1102 static void iwl4965_bg_rf_kill(struct work_struct *work)
1103 @@ -2486,6 +2486,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
1104 if (!priv->vif || !priv->is_open)
1107 + iwl_power_cancel_timeout(priv);
1108 iwl_scan_cancel_timeout(priv, 200);
1110 conf = ieee80211_get_hw_conf(priv->hw);
1111 @@ -2503,8 +2504,7 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
1113 priv->staging_rxon.filter_flags |= RXON_FILTER_ASSOC_MSK;
1115 - if (priv->current_ht_config.is_ht)
1116 - iwl_set_rxon_ht(priv, &priv->current_ht_config);
1117 + iwl_set_rxon_ht(priv, &priv->current_ht_config);
1119 iwl_set_rxon_chain(priv);
1120 priv->staging_rxon.assoc_id = cpu_to_le16(priv->assoc_id);
1121 @@ -2550,10 +2550,6 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
1125 - /* Enable Rx differential gain and sensitivity calibrations */
1126 - iwl_chain_noise_reset(priv);
1127 - priv->start_calib = 1;
1129 if (priv->iw_mode == IEEE80211_IF_TYPE_IBSS)
1130 priv->assoc_station_added = 1;
1132 @@ -2561,7 +2557,12 @@ static void iwl4965_post_associate(struct iwl_priv *priv)
1133 iwl_activate_qos(priv, 0);
1134 spin_unlock_irqrestore(&priv->lock, flags);
1136 - iwl_power_update_mode(priv, 0);
1137 + iwl_power_enable_management(priv);
1139 + /* Enable Rx differential gain and sensitivity calibrations */
1140 + iwl_chain_noise_reset(priv);
1141 + priv->start_calib = 1;
1143 /* we have just associated, don't start scan too early */
1144 priv->next_scan_jiffies = jiffies + IWL_DELAY_NEXT_SCAN;
1146 @@ -3212,18 +3213,26 @@ static int iwl4965_mac_hw_scan(struct ieee80211_hw *hw, u8 *ssid, size_t len)
1150 - /* we don't schedule scan within next_scan_jiffies period */
1151 + /* We don't schedule scan within next_scan_jiffies period.
1152 + * Avoid scanning during possible EAPOL exchange, return
1153 + * success immediately.
1155 if (priv->next_scan_jiffies &&
1156 - time_after(priv->next_scan_jiffies, jiffies)) {
1158 + time_after(priv->next_scan_jiffies, jiffies)) {
1159 + IWL_DEBUG_SCAN("scan rejected: within next scan period\n");
1160 + queue_work(priv->workqueue, &priv->scan_completed);
1164 /* if we just finished scan ask for delay */
1165 - if (priv->last_scan_jiffies && time_after(priv->last_scan_jiffies +
1166 - IWL_DELAY_NEXT_SCAN, jiffies)) {
1168 + if (iwl_is_associated(priv) && priv->last_scan_jiffies &&
1169 + time_after(priv->last_scan_jiffies + IWL_DELAY_NEXT_SCAN, jiffies)) {
1170 + IWL_DEBUG_SCAN("scan rejected: within previous scan period\n");
1171 + queue_work(priv->workqueue, &priv->scan_completed);
1177 IWL_DEBUG_SCAN("direct scan for %s [%d]\n ",
1178 iwl_escape_essid(ssid, len), (int)len);
1179 @@ -3546,6 +3555,16 @@ static void iwl4965_mac_reset_tsf(struct ieee80211_hw *hw)
1180 /* Per mac80211.h: This is only used in IBSS mode... */
1181 if (priv->iw_mode != IEEE80211_IF_TYPE_IBSS) {
1183 + /* switch to CAM during association period.
1184 + * the ucode will block any association/authentication
1185 + * frome during assiciation period if it can not hear
1186 + * the AP because of PM. the timer enable PM back is
1187 + * association do not complete
1189 + if (priv->hw->conf.channel->flags & (IEEE80211_CHAN_PASSIVE_SCAN |
1190 + IEEE80211_CHAN_RADAR))
1191 + iwl_power_disable_management(priv, 3000);
1193 IWL_DEBUG_MAC80211("leave - not in IBSS\n");
1194 mutex_unlock(&priv->mutex);
1196 @@ -4083,6 +4102,7 @@ static void iwl_setup_deferred_work(struct iwl_priv *priv)
1197 /* FIXME : remove when resolved PENDING */
1198 INIT_WORK(&priv->scan_completed, iwl_bg_scan_completed);
1199 iwl_setup_scan_deferred_work(priv);
1200 + iwl_setup_power_deferred_work(priv);
1202 if (priv->cfg->ops->lib->setup_deferred_work)
1203 priv->cfg->ops->lib->setup_deferred_work(priv);
1204 @@ -4102,6 +4122,7 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
1206 cancel_delayed_work_sync(&priv->init_alive_start);
1207 cancel_delayed_work(&priv->scan_check);
1208 + cancel_delayed_work_sync(&priv->set_power_save);
1209 cancel_delayed_work(&priv->alive_start);
1210 cancel_work_sync(&priv->beacon_update);
1211 del_timer_sync(&priv->statistics_periodic);
1212 @@ -4204,13 +4225,13 @@ static int iwl4965_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
1214 pci_set_master(pdev);
1216 - err = pci_set_dma_mask(pdev, DMA_64BIT_MASK);
1217 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
1219 - err = pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK);
1220 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
1222 - err = pci_set_dma_mask(pdev, DMA_32BIT_MASK);
1223 + err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1225 - err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK);
1226 + err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1227 /* both attempts failed: */
1229 printk(KERN_WARNING "%s: No suitable DMA available.\n",
1230 diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
1231 index ef49440..35fb4a4 100644
1232 --- a/drivers/net/wireless/iwlwifi/iwl-calib.c
1233 +++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
1235 #include "iwl-core.h"
1236 #include "iwl-calib.h"
1238 +/*****************************************************************************
1239 + * INIT calibrations framework
1240 + *****************************************************************************/
1242 + int iwl_send_calib_results(struct iwl_priv *priv)
1247 + struct iwl_host_cmd hcmd = {
1248 + .id = REPLY_PHY_CALIBRATION_CMD,
1249 + .meta.flags = CMD_SIZE_HUGE,
1252 + for (i = 0; i < IWL_CALIB_MAX; i++)
1253 + if (priv->calib_results[i].buf) {
1254 + hcmd.len = priv->calib_results[i].buf_len;
1255 + hcmd.data = priv->calib_results[i].buf;
1256 + ret = iwl_send_cmd_sync(priv, &hcmd);
1263 + IWL_ERROR("Error %d iteration %d\n", ret, i);
1266 +EXPORT_SYMBOL(iwl_send_calib_results);
1268 +int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len)
1270 + if (res->buf_len != len) {
1272 + res->buf = kzalloc(len, GFP_ATOMIC);
1274 + if (unlikely(res->buf == NULL))
1277 + res->buf_len = len;
1278 + memcpy(res->buf, buf, len);
1281 +EXPORT_SYMBOL(iwl_calib_set);
1283 +void iwl_calib_free_results(struct iwl_priv *priv)
1287 + for (i = 0; i < IWL_CALIB_MAX; i++) {
1288 + kfree(priv->calib_results[i].buf);
1289 + priv->calib_results[i].buf = NULL;
1290 + priv->calib_results[i].buf_len = 0;
1294 +/*****************************************************************************
1295 + * RUNTIME calibrations framework
1296 + *****************************************************************************/
1298 /* "false alarms" are signals that our DSP tries to lock onto,
1299 * but then determines that they are either noise, or transmissions
1300 * from a distant wireless network (also "noise", really) that get
1301 diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
1302 index 80f2f84..1383fd1 100644
1303 --- a/drivers/net/wireless/iwlwifi/iwl-core.c
1304 +++ b/drivers/net/wireless/iwlwifi/iwl-core.c
1305 @@ -646,8 +646,14 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
1306 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
1309 - if (!ht_info->is_ht)
1310 + if (!ht_info->is_ht) {
1311 + rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MIXED_MSK |
1312 + RXON_FLG_CHANNEL_MODE_PURE_40_MSK |
1313 + RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
1314 + RXON_FLG_FAT_PROT_MSK |
1315 + RXON_FLG_HT_PROT_MSK);
1319 /* Set up channel bandwidth: 20 MHz only, or 20/40 mixed if fat ok */
1320 if (iwl_is_fat_tx_allowed(priv, NULL))
1321 @@ -950,22 +956,6 @@ err:
1323 EXPORT_SYMBOL(iwl_init_drv);
1325 -void iwl_free_calib_results(struct iwl_priv *priv)
1327 - kfree(priv->calib_results.lo_res);
1328 - priv->calib_results.lo_res = NULL;
1329 - priv->calib_results.lo_res_len = 0;
1331 - kfree(priv->calib_results.tx_iq_res);
1332 - priv->calib_results.tx_iq_res = NULL;
1333 - priv->calib_results.tx_iq_res_len = 0;
1335 - kfree(priv->calib_results.tx_iq_perd_res);
1336 - priv->calib_results.tx_iq_perd_res = NULL;
1337 - priv->calib_results.tx_iq_perd_res_len = 0;
1339 -EXPORT_SYMBOL(iwl_free_calib_results);
1341 int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1344 @@ -993,10 +983,9 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1346 EXPORT_SYMBOL(iwl_set_tx_power);
1349 void iwl_uninit_drv(struct iwl_priv *priv)
1351 - iwl_free_calib_results(priv);
1352 + iwl_calib_free_results(priv);
1353 iwlcore_free_geos(priv);
1354 iwl_free_channel_map(priv);
1356 diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
1357 index 64f139e..51b36b1 100644
1358 --- a/drivers/net/wireless/iwlwifi/iwl-core.h
1359 +++ b/drivers/net/wireless/iwlwifi/iwl-core.h
1360 @@ -186,7 +186,6 @@ struct ieee80211_hw *iwl_alloc_all(struct iwl_cfg *cfg,
1361 void iwl_hw_detect(struct iwl_priv *priv);
1363 void iwl_clear_stations_table(struct iwl_priv *priv);
1364 -void iwl_free_calib_results(struct iwl_priv *priv);
1365 void iwl_reset_qos(struct iwl_priv *priv);
1366 void iwl_set_rxon_chain(struct iwl_priv *priv);
1367 int iwl_set_rxon_channel(struct iwl_priv *priv,
1368 @@ -291,6 +290,13 @@ int iwl_scan_initiate(struct iwl_priv *priv);
1369 void iwl_setup_rx_scan_handlers(struct iwl_priv *priv);
1370 void iwl_setup_scan_deferred_work(struct iwl_priv *priv);
1372 +/*******************************************************************************
1373 + * Calibrations - implemented in iwl-calib.c
1374 + ******************************************************************************/
1375 +int iwl_send_calib_results(struct iwl_priv *priv);
1376 +int iwl_calib_set(struct iwl_calib_result *res, const u8 *buf, int len);
1377 +void iwl_calib_free_results(struct iwl_priv *priv);
1379 /*****************************************************
1380 * S e n d i n g H o s t C o m m a n d s *
1381 *****************************************************/
1382 diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
1383 index cdfb343..09bdf8e 100644
1384 --- a/drivers/net/wireless/iwlwifi/iwl-dev.h
1385 +++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
1386 @@ -745,13 +745,10 @@ struct statistics_general_data {
1387 u32 beacon_energy_c;
1390 -struct iwl_calib_results {
1392 - void *tx_iq_perd_res;
1394 - u32 tx_iq_res_len;
1395 - u32 tx_iq_perd_res_len;
1397 +/* Opaque calibration results */
1398 +struct iwl_calib_result {
1404 @@ -813,6 +810,7 @@ enum {
1407 #define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */
1408 +#define IWL_CALIB_MAX 3
1412 @@ -857,7 +855,7 @@ struct iwl_priv {
1413 s32 last_temperature;
1415 /* init calibration results */
1416 - struct iwl_calib_results calib_results;
1417 + struct iwl_calib_result calib_results[IWL_CALIB_MAX];
1419 /* Scan related variables */
1420 unsigned long last_scan_jiffies;
1421 @@ -1047,6 +1045,7 @@ struct iwl_priv {
1423 struct tasklet_struct irq_tasklet;
1425 + struct delayed_work set_power_save;
1426 struct delayed_work init_alive_start;
1427 struct delayed_work alive_start;
1428 struct delayed_work scan_check;
1429 diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
1430 index a099c9e..ae60bfd 100644
1431 --- a/drivers/net/wireless/iwlwifi/iwl-power.c
1432 +++ b/drivers/net/wireless/iwlwifi/iwl-power.c
1433 @@ -324,7 +324,7 @@ EXPORT_SYMBOL(iwl_power_update_mode);
1434 * this will be usefull for rate scale to disable PM during heavy
1437 -int iwl_power_disable_management(struct iwl_priv *priv)
1438 +int iwl_power_disable_management(struct iwl_priv *priv, u32 ms)
1442 @@ -337,6 +337,11 @@ int iwl_power_disable_management(struct iwl_priv *priv)
1443 ret = iwl_power_update_mode(priv, 0);
1444 priv->power_data.power_disabled = 1;
1445 priv->power_data.user_power_setting = prev_mode;
1446 + cancel_delayed_work(&priv->set_power_save);
1448 + queue_delayed_work(priv->workqueue, &priv->set_power_save,
1449 + msecs_to_jiffies(ms));
1454 @@ -431,3 +436,35 @@ int iwl_power_temperature_change(struct iwl_priv *priv)
1457 EXPORT_SYMBOL(iwl_power_temperature_change);
1459 +static void iwl_bg_set_power_save(struct work_struct *work)
1461 + struct iwl_priv *priv = container_of(work,
1462 + struct iwl_priv, set_power_save.work);
1463 + IWL_DEBUG(IWL_DL_STATE, "update power\n");
1465 + if (test_bit(STATUS_EXIT_PENDING, &priv->status))
1468 + mutex_lock(&priv->mutex);
1470 + /* on starting association we disable power managment
1471 + * until association, if association failed then this
1472 + * timer will expire and enable PM again.
1474 + if (!iwl_is_associated(priv))
1475 + iwl_power_enable_management(priv);
1477 + mutex_unlock(&priv->mutex);
1479 +void iwl_setup_power_deferred_work(struct iwl_priv *priv)
1481 + INIT_DELAYED_WORK(&priv->set_power_save, iwl_bg_set_power_save);
1483 +EXPORT_SYMBOL(iwl_setup_power_deferred_work);
1485 +void iwl_power_cancel_timeout(struct iwl_priv *priv)
1487 + cancel_delayed_work(&priv->set_power_save);
1489 +EXPORT_SYMBOL(iwl_power_cancel_timeout);
1490 diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
1491 index abcbbf9..aa99f36 100644
1492 --- a/drivers/net/wireless/iwlwifi/iwl-power.h
1493 +++ b/drivers/net/wireless/iwlwifi/iwl-power.h
1494 @@ -78,8 +78,10 @@ struct iwl_power_mgr {
1495 u8 power_disabled; /* flag to disable using power saving level */
1498 +void iwl_setup_power_deferred_work(struct iwl_priv *priv);
1499 +void iwl_power_cancel_timeout(struct iwl_priv *priv);
1500 int iwl_power_update_mode(struct iwl_priv *priv, u8 refresh);
1501 -int iwl_power_disable_management(struct iwl_priv *priv);
1502 +int iwl_power_disable_management(struct iwl_priv *priv, u32 ms);
1503 int iwl_power_enable_management(struct iwl_priv *priv);
1504 int iwl_power_set_user_mode(struct iwl_priv *priv, u16 mode);
1505 int iwl_power_set_system_mode(struct iwl_priv *priv, u16 mode);
1506 diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
1507 index 6c8ac3a..3a90a67 100644
1508 --- a/drivers/net/wireless/iwlwifi/iwl-scan.c
1509 +++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
1510 @@ -464,11 +464,6 @@ void iwl_init_scan_params(struct iwl_priv *priv)
1512 int iwl_scan_initiate(struct iwl_priv *priv)
1514 - if (priv->iw_mode == IEEE80211_IF_TYPE_AP) {
1515 - IWL_ERROR("APs don't scan.\n");
1519 if (!iwl_is_ready_rf(priv)) {
1520 IWL_DEBUG_SCAN("Aborting scan due to not ready.\n");
1522 @@ -480,8 +475,7 @@ int iwl_scan_initiate(struct iwl_priv *priv)
1525 if (test_bit(STATUS_SCAN_ABORTING, &priv->status)) {
1526 - IWL_DEBUG_SCAN("Scan request while abort pending. "
1528 + IWL_DEBUG_SCAN("Scan request while abort pending\n");
1532 diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
1533 index b775d5b..752e7f8 100644
1534 --- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
1535 +++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
1536 @@ -5761,7 +5761,6 @@ static void iwl3945_alive_start(struct iwl3945_priv *priv)
1537 if (priv->error_recovering)
1538 iwl3945_error_recovery(priv);
1540 - ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
1544 @@ -6006,6 +6005,7 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
1545 mutex_lock(&priv->mutex);
1546 iwl3945_alive_start(priv);
1547 mutex_unlock(&priv->mutex);
1548 + ieee80211_notify_mac(priv->hw, IEEE80211_NOTIFY_RE_ASSOC);
1551 static void iwl3945_bg_rf_kill(struct work_struct *work)
1552 @@ -6259,6 +6259,11 @@ static void iwl3945_bg_request_scan(struct work_struct *data)
1554 (void *)&scan->data[le16_to_cpu(scan->tx_cmd.len)]);
1556 + if (scan->channel_count == 0) {
1557 + IWL_DEBUG_SCAN("channel count %d\n", scan->channel_count);
1561 cmd.len += le16_to_cpu(scan->tx_cmd.len) +
1562 scan->channel_count * sizeof(struct iwl3945_scan_channel);
1564 diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
1565 index a60ae86..a3ccd8c 100644
1566 --- a/drivers/net/wireless/zd1211rw/zd_usb.c
1567 +++ b/drivers/net/wireless/zd1211rw/zd_usb.c
1568 @@ -61,6 +61,7 @@ static struct usb_device_id usb_ids[] = {
1569 { USB_DEVICE(0x0105, 0x145f), .driver_info = DEVICE_ZD1211 },
1571 { USB_DEVICE(0x0ace, 0x1215), .driver_info = DEVICE_ZD1211B },
1572 + { USB_DEVICE(0x0ace, 0xb215), .driver_info = DEVICE_ZD1211B },
1573 { USB_DEVICE(0x157e, 0x300d), .driver_info = DEVICE_ZD1211B },
1574 { USB_DEVICE(0x079b, 0x0062), .driver_info = DEVICE_ZD1211B },
1575 { USB_DEVICE(0x1582, 0x6003), .driver_info = DEVICE_ZD1211B },
1576 @@ -82,6 +83,7 @@ static struct usb_device_id usb_ids[] = {
1577 { USB_DEVICE(0x0cde, 0x001a), .driver_info = DEVICE_ZD1211B },
1578 { USB_DEVICE(0x0586, 0x340a), .driver_info = DEVICE_ZD1211B },
1579 { USB_DEVICE(0x0471, 0x1237), .driver_info = DEVICE_ZD1211B },
1580 + { USB_DEVICE(0x07fa, 0x1196), .driver_info = DEVICE_ZD1211B },
1581 /* "Driverless" devices that need ejecting */
1582 { USB_DEVICE(0x0ace, 0x2011), .driver_info = DEVICE_INSTALLER },
1583 { USB_DEVICE(0x0ace, 0x20ff), .driver_info = DEVICE_INSTALLER },
1584 diff --git a/fs/ext3/super.c b/fs/ext3/super.c
1585 index f38a5af..810bf7c 100644
1586 --- a/fs/ext3/super.c
1587 +++ b/fs/ext3/super.c
1588 @@ -2365,13 +2365,12 @@ static void ext3_write_super (struct super_block * sb)
1590 static int ext3_sync_fs(struct super_block *sb, int wait)
1595 - if (journal_start_commit(EXT3_SB(sb)->s_journal, &target)) {
1597 - log_wait_commit(EXT3_SB(sb)->s_journal, target);
1600 + ext3_force_commit(sb);
1602 + journal_start_commit(EXT3_SB(sb)->s_journal, NULL);
1607 diff --git a/fs/hfs/catalog.c b/fs/hfs/catalog.c
1608 index ba85157..6d98f11 100644
1609 --- a/fs/hfs/catalog.c
1610 +++ b/fs/hfs/catalog.c
1611 @@ -190,6 +190,10 @@ int hfs_cat_find_brec(struct super_block *sb, u32 cnid,
1613 fd->search_key->cat.ParID = rec.thread.ParID;
1614 len = fd->search_key->cat.CName.len = rec.thread.CName.len;
1615 + if (len > HFS_NAMELEN) {
1616 + printk(KERN_ERR "hfs: bad catalog namelength\n");
1619 memcpy(fd->search_key->cat.CName.name, rec.thread.CName.name, len);
1620 return hfs_brec_find(fd);
1622 diff --git a/fs/jffs2/background.c b/fs/jffs2/background.c
1623 index 8adebd3..0fd792b 100644
1624 --- a/fs/jffs2/background.c
1625 +++ b/fs/jffs2/background.c
1626 @@ -85,15 +85,15 @@ static int jffs2_garbage_collect_thread(void *_c)
1628 allow_signal(SIGHUP);
1630 + spin_lock(&c->erase_completion_lock);
1631 if (!jffs2_thread_should_wake(c)) {
1632 set_current_state (TASK_INTERRUPTIBLE);
1633 + spin_unlock(&c->erase_completion_lock);
1634 D1(printk(KERN_DEBUG "jffs2_garbage_collect_thread sleeping...\n"));
1635 - /* Yes, there's a race here; we checked jffs2_thread_should_wake()
1636 - before setting current->state to TASK_INTERRUPTIBLE. But it doesn't
1637 - matter - We don't care if we miss a wakeup, because the GC thread
1638 - is only an optimisation anyway. */
1642 + spin_unlock(&c->erase_completion_lock);
1645 /* This thread is purely an optimisation. But if it runs when
1646 other things could be running, it actually makes things a
1647 diff --git a/fs/jffs2/compr_lzo.c b/fs/jffs2/compr_lzo.c
1648 index 47b0457..90cb60d 100644
1649 --- a/fs/jffs2/compr_lzo.c
1650 +++ b/fs/jffs2/compr_lzo.c
1653 static void *lzo_mem;
1654 static void *lzo_compress_buf;
1655 -static DEFINE_MUTEX(deflate_mutex);
1656 +static DEFINE_MUTEX(deflate_mutex); /* for lzo_mem and lzo_compress_buf */
1658 static void free_workspace(void)
1660 @@ -49,18 +49,21 @@ static int jffs2_lzo_compress(unsigned char *data_in, unsigned char *cpage_out,
1662 mutex_lock(&deflate_mutex);
1663 ret = lzo1x_1_compress(data_in, *sourcelen, lzo_compress_buf, &compress_size, lzo_mem);
1664 - mutex_unlock(&deflate_mutex);
1666 if (ret != LZO_E_OK)
1670 if (compress_size > *dstlen)
1674 memcpy(cpage_out, lzo_compress_buf, compress_size);
1675 - *dstlen = compress_size;
1676 + mutex_unlock(&deflate_mutex);
1678 + *dstlen = compress_size;
1682 + mutex_unlock(&deflate_mutex);
1686 static int jffs2_lzo_decompress(unsigned char *data_in, unsigned char *cpage_out,
1687 diff --git a/include/asm-generic/memory_model.h b/include/asm-generic/memory_model.h
1688 index ae060c6..18546d8 100644
1689 --- a/include/asm-generic/memory_model.h
1690 +++ b/include/asm-generic/memory_model.h
1693 #define __pfn_to_page(pfn) \
1694 ({ unsigned long __pfn = (pfn); \
1695 - unsigned long __nid = arch_pfn_to_nid(pfn); \
1696 + unsigned long __nid = arch_pfn_to_nid(__pfn); \
1697 NODE_DATA(__nid)->node_mem_map + arch_local_page_offset(__pfn, __nid);\
1700 diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
1701 index d6fb115..3a16bea 100644
1702 --- a/include/linux/mtd/cfi.h
1703 +++ b/include/linux/mtd/cfi.h
1704 @@ -281,9 +281,25 @@ struct cfi_private {
1706 * Returns the command address according to the given geometry.
1708 -static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs, int interleave, int type)
1709 +static inline uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
1710 + struct map_info *map, struct cfi_private *cfi)
1712 - return (cmd_ofs * type) * interleave;
1713 + unsigned bankwidth = map_bankwidth(map);
1714 + unsigned interleave = cfi_interleave(cfi);
1715 + unsigned type = cfi->device_type;
1718 + addr = (cmd_ofs * type) * interleave;
1720 + /* Modify the unlock address if we are in compatiblity mode.
1721 + * For 16bit devices on 8 bit busses
1722 + * and 32bit devices on 16 bit busses
1723 + * set the low bit of the alternating bit sequence of the address.
1725 + if (((type * interleave) > bankwidth) && ((uint8_t)cmd_ofs == 0xaa))
1726 + addr |= (type >> 1)*interleave;
1732 @@ -429,7 +445,7 @@ static inline uint32_t cfi_send_gen_cmd(u_char cmd, uint32_t cmd_addr, uint32_t
1733 int type, map_word *prev_val)
1736 - uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, cfi_interleave(cfi), type);
1737 + uint32_t addr = base + cfi_build_cmd_addr(cmd_addr, map, cfi);
1739 val = cfi_build_cmd(cmd, map, cfi);
1741 diff --git a/include/net/af_unix.h b/include/net/af_unix.h
1742 index 7dd29b7..c29ff1d 100644
1743 --- a/include/net/af_unix.h
1744 +++ b/include/net/af_unix.h
1745 @@ -54,6 +54,7 @@ struct unix_sock {
1746 atomic_long_t inflight;
1748 unsigned int gc_candidate : 1;
1749 + unsigned int gc_maybe_cycle : 1;
1750 wait_queue_head_t peer_wait;
1752 #define unix_sk(__sk) ((struct unix_sock *)__sk)
1753 diff --git a/kernel/cgroup.c b/kernel/cgroup.c
1754 index a0123d7..d68bf2b 100644
1755 --- a/kernel/cgroup.c
1756 +++ b/kernel/cgroup.c
1757 @@ -2443,7 +2443,6 @@ static int cgroup_rmdir(struct inode *unused_dir, struct dentry *dentry)
1758 list_del(&cgrp->sibling);
1759 spin_lock(&cgrp->dentry->d_lock);
1760 d = dget(cgrp->dentry);
1761 - cgrp->dentry = NULL;
1762 spin_unlock(&d->d_lock);
1764 cgroup_d_remove_dir(d);
1765 diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1766 index 67a7119..77427c8 100644
1769 @@ -353,11 +353,26 @@ static int vma_has_reserves(struct vm_area_struct *vma)
1773 +static void clear_gigantic_page(struct page *page,
1774 + unsigned long addr, unsigned long sz)
1777 + struct page *p = page;
1780 + for (i = 0; i < sz/PAGE_SIZE; i++, p = mem_map_next(p, page, i)) {
1782 + clear_user_highpage(p, addr + i * PAGE_SIZE);
1785 static void clear_huge_page(struct page *page,
1786 unsigned long addr, unsigned long sz)
1790 + if (unlikely(sz > MAX_ORDER_NR_PAGES))
1791 + return clear_gigantic_page(page, addr, sz);
1794 for (i = 0; i < sz/PAGE_SIZE; i++) {
1796 @@ -365,12 +380,32 @@ static void clear_huge_page(struct page *page,
1800 +static void copy_gigantic_page(struct page *dst, struct page *src,
1801 + unsigned long addr, struct vm_area_struct *vma)
1804 + struct hstate *h = hstate_vma(vma);
1805 + struct page *dst_base = dst;
1806 + struct page *src_base = src;
1808 + for (i = 0; i < pages_per_huge_page(h); ) {
1810 + copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma);
1813 + dst = mem_map_next(dst, dst_base, i);
1814 + src = mem_map_next(src, src_base, i);
1817 static void copy_huge_page(struct page *dst, struct page *src,
1818 unsigned long addr, struct vm_area_struct *vma)
1821 struct hstate *h = hstate_vma(vma);
1823 + if (unlikely(pages_per_huge_page(h) > MAX_ORDER_NR_PAGES))
1824 + return copy_gigantic_page(dst, src, addr, vma);
1827 for (i = 0; i < pages_per_huge_page(h); i++) {
1829 @@ -455,6 +490,8 @@ static void update_and_free_page(struct hstate *h, struct page *page)
1833 + VM_BUG_ON(h->order >= MAX_ORDER);
1836 h->nr_huge_pages_node[page_to_nid(page)]--;
1837 for (i = 0; i < pages_per_huge_page(h); i++) {
1838 @@ -969,6 +1006,14 @@ found:
1842 +static void prep_compound_huge_page(struct page *page, int order)
1844 + if (unlikely(order > (MAX_ORDER - 1)))
1845 + prep_compound_gigantic_page(page, order);
1847 + prep_compound_page(page, order);
1850 /* Put bootmem huge pages into the standard lists after mem_map is up */
1851 static void __init gather_bootmem_prealloc(void)
1853 @@ -979,7 +1024,7 @@ static void __init gather_bootmem_prealloc(void)
1854 struct hstate *h = m->hstate;
1855 __ClearPageReserved(page);
1856 WARN_ON(page_count(page) != 1);
1857 - prep_compound_page(page, h->order);
1858 + prep_compound_huge_page(page, h->order);
1859 prep_new_huge_page(h, page, page_to_nid(page));
1862 @@ -2103,7 +2148,7 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma,
1866 - pages[i] = page + pfn_offset;
1867 + pages[i] = mem_map_offset(page, pfn_offset);
1871 diff --git a/mm/internal.h b/mm/internal.h
1872 index 1f43f74..92729ea 100644
1875 @@ -17,6 +17,7 @@ void free_pgtables(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
1876 unsigned long floor, unsigned long ceiling);
1878 extern void prep_compound_page(struct page *page, unsigned long order);
1879 +extern void prep_compound_gigantic_page(struct page *page, unsigned long order);
1881 static inline void set_page_count(struct page *page, int v)
1883 @@ -53,6 +54,34 @@ static inline unsigned long page_order(struct page *page)
1887 + * Return the mem_map entry representing the 'offset' subpage within
1888 + * the maximally aligned gigantic page 'base'. Handle any discontiguity
1889 + * in the mem_map at MAX_ORDER_NR_PAGES boundaries.
1891 +static inline struct page *mem_map_offset(struct page *base, int offset)
1893 + if (unlikely(offset >= MAX_ORDER_NR_PAGES))
1894 + return pfn_to_page(page_to_pfn(base) + offset);
1895 + return base + offset;
1899 + * Iterator over all subpages withing the maximally aligned gigantic
1900 + * page 'base'. Handle any discontiguity in the mem_map.
1902 +static inline struct page *mem_map_next(struct page *iter,
1903 + struct page *base, int offset)
1905 + if (unlikely((offset & (MAX_ORDER_NR_PAGES - 1)) == 0)) {
1906 + unsigned long pfn = page_to_pfn(base) + offset;
1907 + if (!pfn_valid(pfn))
1909 + return pfn_to_page(pfn);
1915 * FLATMEM and DISCONTIGMEM configurations use alloc_bootmem_node,
1916 * so all functions starting at paging_init should be marked __init
1917 * in those cases. SPARSEMEM, however, allows for memory hotplug,
1918 diff --git a/mm/page_alloc.c b/mm/page_alloc.c
1919 index 27b8681..ed5cdae 100644
1920 --- a/mm/page_alloc.c
1921 +++ b/mm/page_alloc.c
1922 @@ -268,24 +268,39 @@ void prep_compound_page(struct page *page, unsigned long order)
1925 int nr_pages = 1 << order;
1927 + set_compound_page_dtor(page, free_compound_page);
1928 + set_compound_order(page, order);
1929 + __SetPageHead(page);
1930 + for (i = 1; i < nr_pages; i++) {
1931 + struct page *p = page + i;
1934 + p->first_page = page;
1938 +#ifdef CONFIG_HUGETLBFS
1939 +void prep_compound_gigantic_page(struct page *page, unsigned long order)
1942 + int nr_pages = 1 << order;
1943 struct page *p = page + 1;
1945 set_compound_page_dtor(page, free_compound_page);
1946 set_compound_order(page, order);
1947 __SetPageHead(page);
1948 - for (i = 1; i < nr_pages; i++, p++) {
1949 - if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0))
1950 - p = pfn_to_page(page_to_pfn(page) + i);
1951 + for (i = 1; i < nr_pages; i++, p = mem_map_next(p, page, i)) {
1953 p->first_page = page;
1958 static void destroy_compound_page(struct page *page, unsigned long order)
1961 int nr_pages = 1 << order;
1962 - struct page *p = page + 1;
1964 if (unlikely(compound_order(page) != order))
1966 @@ -293,9 +308,8 @@ static void destroy_compound_page(struct page *page, unsigned long order)
1967 if (unlikely(!PageHead(page)))
1969 __ClearPageHead(page);
1970 - for (i = 1; i < nr_pages; i++, p++) {
1971 - if (unlikely((i & (MAX_ORDER_NR_PAGES - 1)) == 0))
1972 - p = pfn_to_page(page_to_pfn(page) + i);
1973 + for (i = 1; i < nr_pages; i++) {
1974 + struct page *p = page + i;
1976 if (unlikely(!PageTail(p) |
1977 (p->first_page != page)))
1978 diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
1979 index 015606b..8bde9bf 100644
1980 --- a/net/unix/af_unix.c
1981 +++ b/net/unix/af_unix.c
1982 @@ -1300,14 +1300,23 @@ static void unix_destruct_fds(struct sk_buff *skb)
1986 -static void unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1987 +static int unix_attach_fds(struct scm_cookie *scm, struct sk_buff *skb)
1992 + * Need to duplicate file references for the sake of garbage
1993 + * collection. Otherwise a socket in the fps might become a
1994 + * candidate for GC while the skb is not yet queued.
1996 + UNIXCB(skb).fp = scm_fp_dup(scm->fp);
1997 + if (!UNIXCB(skb).fp)
2000 for (i=scm->fp->count-1; i>=0; i--)
2001 unix_inflight(scm->fp->fp[i]);
2002 - UNIXCB(skb).fp = scm->fp;
2003 skb->destructor = unix_destruct_fds;
2009 @@ -1366,8 +1375,11 @@ static int unix_dgram_sendmsg(struct kiocb *kiocb, struct socket *sock,
2012 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
2013 - if (siocb->scm->fp)
2014 - unix_attach_fds(siocb->scm, skb);
2015 + if (siocb->scm->fp) {
2016 + err = unix_attach_fds(siocb->scm, skb);
2020 unix_get_secdata(siocb->scm, skb);
2022 skb_reset_transport_header(skb);
2023 @@ -1536,8 +1548,13 @@ static int unix_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
2024 size = min_t(int, size, skb_tailroom(skb));
2026 memcpy(UNIXCREDS(skb), &siocb->scm->creds, sizeof(struct ucred));
2027 - if (siocb->scm->fp)
2028 - unix_attach_fds(siocb->scm, skb);
2029 + if (siocb->scm->fp) {
2030 + err = unix_attach_fds(siocb->scm, skb);
2037 if ((err = memcpy_fromiovec(skb_put(skb,size), msg->msg_iov, size)) != 0) {
2039 diff --git a/net/unix/garbage.c b/net/unix/garbage.c
2040 index 2a27b84..6d4a9a8 100644
2041 --- a/net/unix/garbage.c
2042 +++ b/net/unix/garbage.c
2043 @@ -186,8 +186,17 @@ static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
2045 struct sock *sk = unix_get_socket(*fp++);
2048 - func(unix_sk(sk));
2049 + struct unix_sock *u = unix_sk(sk);
2052 + * Ignore non-candidates, they could
2053 + * have been added to the queues after
2054 + * starting the garbage collection
2056 + if (u->gc_candidate) {
2062 if (hit && hitlist != NULL) {
2063 @@ -249,11 +258,11 @@ static void inc_inflight_move_tail(struct unix_sock *u)
2065 atomic_long_inc(&u->inflight);
2067 - * If this is still a candidate, move it to the end of the
2068 - * list, so that it's checked even if it was already passed
2070 + * If this still might be part of a cycle, move it to the end
2071 + * of the list, so that it's checked even if it was already
2074 - if (u->gc_candidate)
2075 + if (u->gc_maybe_cycle)
2076 list_move_tail(&u->link, &gc_candidates);
2079 @@ -267,6 +276,7 @@ void unix_gc(void)
2080 struct unix_sock *next;
2081 struct sk_buff_head hitlist;
2082 struct list_head cursor;
2083 + LIST_HEAD(not_cycle_list);
2085 spin_lock(&unix_gc_lock);
2087 @@ -282,10 +292,14 @@ void unix_gc(void)
2089 * Holding unix_gc_lock will protect these candidates from
2090 * being detached, and hence from gaining an external
2091 - * reference. This also means, that since there are no
2092 - * possible receivers, the receive queues of these sockets are
2093 - * static during the GC, even though the dequeue is done
2094 - * before the detach without atomicity guarantees.
2095 + * reference. Since there are no possible receivers, all
2096 + * buffers currently on the candidates' queues stay there
2097 + * during the garbage collection.
2099 + * We also know that no new candidate can be added onto the
2100 + * receive queues. Other, non candidate sockets _can_ be
2101 + * added to queue, so we must make sure only to touch
2104 list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
2106 @@ -299,6 +313,7 @@ void unix_gc(void)
2107 if (total_refs == inflight_refs) {
2108 list_move_tail(&u->link, &gc_candidates);
2109 u->gc_candidate = 1;
2110 + u->gc_maybe_cycle = 1;
2114 @@ -325,14 +340,24 @@ void unix_gc(void)
2115 list_move(&cursor, &u->link);
2117 if (atomic_long_read(&u->inflight) > 0) {
2118 - list_move_tail(&u->link, &gc_inflight_list);
2119 - u->gc_candidate = 0;
2120 + list_move_tail(&u->link, ¬_cycle_list);
2121 + u->gc_maybe_cycle = 0;
2122 scan_children(&u->sk, inc_inflight_move_tail, NULL);
2128 + * not_cycle_list contains those sockets which do not make up a
2129 + * cycle. Restore these to the inflight list.
2131 + while (!list_empty(¬_cycle_list)) {
2132 + u = list_entry(not_cycle_list.next, struct unix_sock, link);
2133 + u->gc_candidate = 0;
2134 + list_move_tail(&u->link, &gc_inflight_list);
2138 * Now gc_candidates contains only garbage. Restore original
2139 * inflight counters for these as well, and remove the skbuffs
2140 * which are creating the cycle(s).
2141 diff --git a/security/keys/internal.h b/security/keys/internal.h
2142 index b39f5c2..239098f 100644
2143 --- a/security/keys/internal.h
2144 +++ b/security/keys/internal.h
2145 @@ -107,6 +107,7 @@ extern key_ref_t search_process_keyrings(struct key_type *type,
2147 extern struct key *find_keyring_by_name(const char *name, bool skip_perm_check);
2149 +extern int install_user_keyrings(struct task_struct *tsk);
2150 extern int install_thread_keyring(struct task_struct *tsk);
2151 extern int install_process_keyring(struct task_struct *tsk);
2153 diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
2154 index 5be6d01..45b240a 100644
2155 --- a/security/keys/process_keys.c
2156 +++ b/security/keys/process_keys.c
2157 @@ -40,7 +40,7 @@ struct key_user root_key_user = {
2159 * install user and user session keyrings for a particular UID
2161 -static int install_user_keyrings(struct task_struct *tsk)
2162 +int install_user_keyrings(struct task_struct *tsk)
2164 struct user_struct *user = tsk->user;
2165 struct key *uid_keyring, *session_keyring;
2166 diff --git a/security/keys/request_key.c b/security/keys/request_key.c
2167 index ba32ca6..abea08f 100644
2168 --- a/security/keys/request_key.c
2169 +++ b/security/keys/request_key.c
2170 @@ -74,6 +74,10 @@ static int call_sbin_request_key(struct key_construction *cons,
2172 kenter("{%d},{%d},%s", key->serial, authkey->serial, op);
2174 + ret = install_user_keyrings(tsk);
2178 /* allocate a new session keyring */
2179 sprintf(desc, "_req.%u", key->serial);
2181 diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
2182 index f3da621..732ce13 100644
2183 --- a/sound/pci/hda/patch_sigmatel.c
2184 +++ b/sound/pci/hda/patch_sigmatel.c
2185 @@ -67,6 +67,7 @@ enum {
2190 STAC_92HD73XX_MODELS
2193 @@ -560,9 +561,7 @@ static struct hda_verb dell_eq_core_init[] = {
2196 static struct hda_verb dell_m6_core_init[] = {
2197 - /* set master volume to max value without distortion
2198 - * and direct control */
2199 - { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xec},
2200 + { 0x1f, AC_VERB_SET_VOLUME_KNOB_CONTROL, 0xff},
2201 /* setup audio connections */
2202 { 0x0d, AC_VERB_SET_CONNECT_SEL, 0x00},
2203 { 0x0a, AC_VERB_SET_CONNECT_SEL, 0x01},
2204 @@ -1297,11 +1296,13 @@ static unsigned int dell_m6_pin_configs[13] = {
2205 static unsigned int *stac92hd73xx_brd_tbl[STAC_92HD73XX_MODELS] = {
2206 [STAC_92HD73XX_REF] = ref92hd73xx_pin_configs,
2207 [STAC_DELL_M6] = dell_m6_pin_configs,
2208 + [STAC_DELL_EQ] = dell_m6_pin_configs,
2211 static const char *stac92hd73xx_models[STAC_92HD73XX_MODELS] = {
2212 [STAC_92HD73XX_REF] = "ref",
2213 [STAC_DELL_M6] = "dell-m6",
2214 + [STAC_DELL_EQ] = "dell-eq",
2217 static struct snd_pci_quirk stac92hd73xx_cfg_tbl[] = {
2218 @@ -3560,8 +3561,12 @@ again:
2219 spec->gpio_data = 0x01;
2221 switch (spec->board_config) {
2222 - case STAC_DELL_M6:
2223 + case STAC_DELL_EQ:
2224 spec->init = dell_eq_core_init;
2226 + case STAC_DELL_M6:
2228 + spec->init = dell_m6_core_init;
2229 switch (codec->subsystem_id) {
2230 case 0x1028025e: /* Analog Mics */
2232 @@ -3570,8 +3575,6 @@ again:
2234 case 0x10280271: /* Digital Mics */
2236 - spec->init = dell_m6_core_init;
2237 - /* fall-through */
2240 stac92xx_set_config_reg(codec, 0x13, 0x90A60160);