]> git.ipfire.org Git - ipfire-2.x.git/blame - src/patches/suse-2.6.27.39/patches.kernel.org/patch-2.6.27.24-25
Fix oinkmaster patch.
[ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.kernel.org / patch-2.6.27.24-25
CommitLineData
82094b55
AF
1From: Greg Kroah-Hartman <gregkh@suse.de>
2Subject: Upstream 2.6.27.25 release from kernel.org
3
4Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
5
6diff --git a/Documentation/filesystems/ext4.txt b/Documentation/filesystems/ext4.txt
7index 0d53949..befe8d4 100644
8--- a/Documentation/filesystems/ext4.txt
9+++ b/Documentation/filesystems/ext4.txt
10@@ -73,7 +73,7 @@ Mailing list: linux-ext4@vger.kernel.org
11 * extent format more robust in face of on-disk corruption due to magics,
12 * internal redunancy in tree
13 * improved file allocation (multi-block alloc)
14-* fix 32000 subdirectory limit
15+* lift 32000 subdirectory limit imposed by i_links_count[1]
16 * nsec timestamps for mtime, atime, ctime, create time
17 * inode version field on disk (NFSv4, Lustre)
18 * reduced e2fsck time via uninit_bg feature
19@@ -88,6 +88,9 @@ Mailing list: linux-ext4@vger.kernel.org
20 * efficent new ordered mode in JBD2 and ext4(avoid using buffer head to force
21 the ordering)
22
23+[1] Filesystems with a block size of 1k may see a limit imposed by the
24+directory hash tree having a maximum depth of two.
25+
26 2.2 Candidate features for future inclusion
27
28 * Online defrag (patches available but not well tested)
29diff --git a/Makefile b/Makefile
30index 2b8138a..1314692 100644
31--- a/Makefile
32+++ b/Makefile
33@@ -1,7 +1,7 @@
34 VERSION = 2
35 PATCHLEVEL = 6
36 SUBLEVEL = 27
37-EXTRAVERSION = .24
38+EXTRAVERSION = .25
39 NAME = Trembling Tortoise
40
41 # *DOCUMENTATION*
42diff --git a/arch/sparc/include/asm/pil.h b/arch/sparc/include/asm/pil.h
43index 71819bb..6ea90d7 100644
44--- a/arch/sparc/include/asm/pil.h
45+++ b/arch/sparc/include/asm/pil.h
46@@ -18,5 +18,6 @@
47 #define PIL_SMP_CTX_NEW_VERSION 4
48 #define PIL_DEVICE_IRQ 5
49 #define PIL_SMP_CALL_FUNC_SNGL 6
50+#define PIL_KGDB_CAPTURE 8
51
52 #endif /* !(_SPARC64_PIL_H) */
53diff --git a/arch/sparc/include/asm/tlb_64.h b/arch/sparc/include/asm/tlb_64.h
54index ec81cde..0aaa086 100644
55--- a/arch/sparc/include/asm/tlb_64.h
56+++ b/arch/sparc/include/asm/tlb_64.h
57@@ -58,6 +58,8 @@ static inline struct mmu_gather *tlb_gather_mmu(struct mm_struct *mm, unsigned i
58 static inline void tlb_flush_mmu(struct mmu_gather *mp)
59 {
60 if (mp->need_flush) {
61+ if (!mp->fullmm)
62+ flush_tlb_pending();
63 free_pages_and_swap_cache(mp->pages, mp->pages_nr);
64 mp->pages_nr = 0;
65 mp->need_flush = 0;
66@@ -78,8 +80,6 @@ static inline void tlb_finish_mmu(struct mmu_gather *mp, unsigned long start, un
67
68 if (mp->fullmm)
69 mp->fullmm = 0;
70- else
71- flush_tlb_pending();
72
73 /* keep the page table cache within bounds */
74 check_pgt_cache();
75diff --git a/arch/sparc/kernel/of_device.c b/arch/sparc/kernel/of_device.c
76index f58c537..e0bfc51 100644
77--- a/arch/sparc/kernel/of_device.c
78+++ b/arch/sparc/kernel/of_device.c
79@@ -223,8 +223,25 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
80
81 static int of_bus_sbus_match(struct device_node *np)
82 {
83- return !strcmp(np->name, "sbus") ||
84- !strcmp(np->name, "sbi");
85+ struct device_node *dp = np;
86+
87+ while (dp) {
88+ if (!strcmp(dp->name, "sbus") ||
89+ !strcmp(dp->name, "sbi"))
90+ return 1;
91+
92+ /* Have a look at use_1to1_mapping(). We're trying
93+ * to match SBUS if that's the top-level bus and we
94+ * don't have some intervening real bus that provides
95+ * ranges based translations.
96+ */
97+ if (of_find_property(dp, "ranges", NULL) != NULL)
98+ break;
99+
100+ dp = dp->parent;
101+ }
102+
103+ return 0;
104 }
105
106 static void of_bus_sbus_count_cells(struct device_node *child,
107diff --git a/arch/sparc64/kernel/irq.c b/arch/sparc64/kernel/irq.c
108index 7495bc7..0708a5b 100644
109--- a/arch/sparc64/kernel/irq.c
110+++ b/arch/sparc64/kernel/irq.c
111@@ -318,17 +318,25 @@ static void sun4u_set_affinity(unsigned int virt_irq, cpumask_t mask)
112 sun4u_irq_enable(virt_irq);
113 }
114
115+/* Don't do anything. The desc->status check for IRQ_DISABLED in
116+ * handler_irq() will skip the handler call and that will leave the
117+ * interrupt in the sent state. The next ->enable() call will hit the
118+ * ICLR register to reset the state machine.
119+ *
120+ * This scheme is necessary, instead of clearing the Valid bit in the
121+ * IMAP register, to handle the case of IMAP registers being shared by
122+ * multiple INOs (and thus ICLR registers). Since we use a different
123+ * virtual IRQ for each shared IMAP instance, the generic code thinks
124+ * there is only one user so it prematurely calls ->disable() on
125+ * free_irq().
126+ *
127+ * We have to provide an explicit ->disable() method instead of using
128+ * NULL to get the default. The reason is that if the generic code
129+ * sees that, it also hooks up a default ->shutdown method which
130+ * invokes ->mask() which we do not want. See irq_chip_set_defaults().
131+ */
132 static void sun4u_irq_disable(unsigned int virt_irq)
133 {
134- struct irq_handler_data *data = get_irq_chip_data(virt_irq);
135-
136- if (likely(data)) {
137- unsigned long imap = data->imap;
138- unsigned long tmp = upa_readq(imap);
139-
140- tmp &= ~IMAP_VALID;
141- upa_writeq(tmp, imap);
142- }
143 }
144
145 static void sun4u_irq_eoi(unsigned int virt_irq)
146@@ -739,7 +747,8 @@ void handler_irq(int irq, struct pt_regs *regs)
147
148 desc = irq_desc + virt_irq;
149
150- desc->handle_irq(virt_irq, desc);
151+ if (!(desc->status & IRQ_DISABLED))
152+ desc->handle_irq(virt_irq, desc);
153
154 bucket_pa = next_pa;
155 }
156diff --git a/arch/sparc64/kernel/kgdb.c b/arch/sparc64/kernel/kgdb.c
157index fefbe6d..f5a0fd4 100644
158--- a/arch/sparc64/kernel/kgdb.c
159+++ b/arch/sparc64/kernel/kgdb.c
160@@ -108,7 +108,7 @@ void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
161 }
162
163 #ifdef CONFIG_SMP
164-void smp_kgdb_capture_client(struct pt_regs *regs)
165+void smp_kgdb_capture_client(int irq, struct pt_regs *regs)
166 {
167 unsigned long flags;
168
169diff --git a/arch/sparc64/kernel/of_device.c b/arch/sparc64/kernel/of_device.c
170index 100ebd5..d342723 100644
171--- a/arch/sparc64/kernel/of_device.c
172+++ b/arch/sparc64/kernel/of_device.c
173@@ -278,8 +278,25 @@ static unsigned long of_bus_pci_get_flags(const u32 *addr, unsigned long flags)
174
175 static int of_bus_sbus_match(struct device_node *np)
176 {
177- return !strcmp(np->name, "sbus") ||
178- !strcmp(np->name, "sbi");
179+ struct device_node *dp = np;
180+
181+ while (dp) {
182+ if (!strcmp(dp->name, "sbus") ||
183+ !strcmp(dp->name, "sbi"))
184+ return 1;
185+
186+ /* Have a look at use_1to1_mapping(). We're trying
187+ * to match SBUS if that's the top-level bus and we
188+ * don't have some intervening real bus that provides
189+ * ranges based translations.
190+ */
191+ if (of_find_property(dp, "ranges", NULL) != NULL)
192+ break;
193+
194+ dp = dp->parent;
195+ }
196+
197+ return 0;
198 }
199
200 static void of_bus_sbus_count_cells(struct device_node *child,
201diff --git a/arch/sparc64/kernel/pci_common.c b/arch/sparc64/kernel/pci_common.c
202index 09a5ec2..d498c60 100644
203--- a/arch/sparc64/kernel/pci_common.c
204+++ b/arch/sparc64/kernel/pci_common.c
205@@ -368,7 +368,7 @@ static void pci_register_iommu_region(struct pci_pbm_info *pbm)
206 const u32 *vdma = of_get_property(pbm->prom_node, "virtual-dma", NULL);
207
208 if (vdma) {
209- struct resource *rp = kmalloc(sizeof(*rp), GFP_KERNEL);
210+ struct resource *rp = kzalloc(sizeof(*rp), GFP_KERNEL);
211
212 if (!rp) {
213 prom_printf("Cannot allocate IOMMU resource.\n");
214diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
215index 2be166c..a0ad401 100644
216--- a/arch/sparc64/kernel/smp.c
217+++ b/arch/sparc64/kernel/smp.c
218@@ -118,9 +118,9 @@ void __cpuinit smp_callin(void)
219 while (!cpu_isset(cpuid, smp_commenced_mask))
220 rmb();
221
222- ipi_call_lock();
223+ ipi_call_lock_irq();
224 cpu_set(cpuid, cpu_online_map);
225- ipi_call_unlock();
226+ ipi_call_unlock_irq();
227
228 /* idle thread is expected to have preempt disabled */
229 preempt_disable();
230@@ -1031,7 +1031,7 @@ void smp_fetch_global_regs(void)
231 * If the address space is non-shared (ie. mm->count == 1) we avoid
232 * cross calls when we want to flush the currently running process's
233 * tlb state. This is done by clearing all cpu bits except the current
234- * processor's in current->active_mm->cpu_vm_mask and performing the
235+ * processor's in current->mm->cpu_vm_mask and performing the
236 * flush locally only. This will force any subsequent cpus which run
237 * this task to flush the context from the local tlb if the process
238 * migrates to another cpu (again).
239@@ -1074,7 +1074,7 @@ void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long
240 u32 ctx = CTX_HWBITS(mm->context);
241 int cpu = get_cpu();
242
243- if (mm == current->active_mm && atomic_read(&mm->mm_users) == 1)
244+ if (mm == current->mm && atomic_read(&mm->mm_users) == 1)
245 mm->cpu_vm_mask = cpumask_of_cpu(cpu);
246 else
247 smp_cross_call_masked(&xcall_flush_tlb_pending,
248diff --git a/arch/sparc64/kernel/ttable.S b/arch/sparc64/kernel/ttable.S
249index 1ade3d6..89bf646 100644
250--- a/arch/sparc64/kernel/ttable.S
251+++ b/arch/sparc64/kernel/ttable.S
252@@ -63,7 +63,13 @@ tl0_irq6: TRAP_IRQ(smp_call_function_single_client, 6)
253 #else
254 tl0_irq6: BTRAP(0x46)
255 #endif
256-tl0_irq7: BTRAP(0x47) BTRAP(0x48) BTRAP(0x49)
257+tl0_irq7: BTRAP(0x47)
258+#ifdef CONFIG_KGDB
259+tl0_irq8: TRAP_IRQ(smp_kgdb_capture_client, 8)
260+#else
261+tl0_irq8: BTRAP(0x48)
262+#endif
263+tl0_irq9: BTRAP(0x49)
264 tl0_irq10: BTRAP(0x4a) BTRAP(0x4b) BTRAP(0x4c) BTRAP(0x4d)
265 tl0_irq14: TRAP_IRQ(timer_interrupt, 14)
266 tl0_irq15: TRAP_IRQ(handler_irq, 15)
267diff --git a/arch/sparc64/mm/ultra.S b/arch/sparc64/mm/ultra.S
268index 86773e8..f1d76cb 100644
269--- a/arch/sparc64/mm/ultra.S
270+++ b/arch/sparc64/mm/ultra.S
271@@ -681,28 +681,8 @@ xcall_new_mmu_context_version:
272 #ifdef CONFIG_KGDB
273 .globl xcall_kgdb_capture
274 xcall_kgdb_capture:
275-661: rdpr %pstate, %g2
276- wrpr %g2, PSTATE_IG | PSTATE_AG, %pstate
277- .section .sun4v_2insn_patch, "ax"
278- .word 661b
279- nop
280- nop
281- .previous
282-
283- rdpr %pil, %g2
284- wrpr %g0, 15, %pil
285- sethi %hi(109f), %g7
286- ba,pt %xcc, etrap_irq
287-109: or %g7, %lo(109b), %g7
288-#ifdef CONFIG_TRACE_IRQFLAGS
289- call trace_hardirqs_off
290- nop
291-#endif
292- call smp_kgdb_capture_client
293- add %sp, PTREGS_OFF, %o0
294- /* Has to be a non-v9 branch due to the large distance. */
295- ba rtrap_xcall
296- ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
297+ wr %g0, (1 << PIL_KGDB_CAPTURE), %set_softint
298+ retry
299 #endif
300
301 #endif /* CONFIG_SMP */
302diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
303index 6d5a3c4..2781331 100644
304--- a/arch/x86/kernel/setup.c
305+++ b/arch/x86/kernel/setup.c
306@@ -730,6 +730,9 @@ void __init setup_arch(char **cmdline_p)
307
308 finish_e820_parsing();
309
310+ if (efi_enabled)
311+ efi_init();
312+
313 dmi_scan_machine();
314
315 dmi_check_system(bad_bios_dmi_table);
316@@ -743,8 +746,6 @@ void __init setup_arch(char **cmdline_p)
317 insert_resource(&iomem_resource, &data_resource);
318 insert_resource(&iomem_resource, &bss_resource);
319
320- if (efi_enabled)
321- efi_init();
322
323 #ifdef CONFIG_X86_32
324 if (ppro_with_ram_bug()) {
325diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
326index 8f307d9..f46c340 100644
327--- a/arch/x86/mm/hugetlbpage.c
328+++ b/arch/x86/mm/hugetlbpage.c
329@@ -26,12 +26,16 @@ static unsigned long page_table_shareable(struct vm_area_struct *svma,
330 unsigned long sbase = saddr & PUD_MASK;
331 unsigned long s_end = sbase + PUD_SIZE;
332
333+ /* Allow segments to share if only one is marked locked */
334+ unsigned long vm_flags = vma->vm_flags & ~VM_LOCKED;
335+ unsigned long svm_flags = svma->vm_flags & ~VM_LOCKED;
336+
337 /*
338 * match the virtual addresses, permission and the alignment of the
339 * page table page.
340 */
341 if (pmd_index(addr) != pmd_index(saddr) ||
342- vma->vm_flags != svma->vm_flags ||
343+ vm_flags != svm_flags ||
344 sbase < svma->vm_start || svma->vm_end < s_end)
345 return 0;
346
347diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
348index 7c3b8dc..5468c19 100644
349--- a/arch/x86/mm/pageattr.c
350+++ b/arch/x86/mm/pageattr.c
351@@ -565,6 +565,17 @@ static int split_large_page(pte_t *kpte, unsigned long address)
352 ref_prot = pte_pgprot(pte_mkexec(pte_clrhuge(*kpte)));
353 pgprot_val(ref_prot) |= _PAGE_PRESENT;
354 __set_pmd_pte(kpte, address, mk_pte(base, ref_prot));
355+
356+ /*
357+ * Intel Atom errata AAH41 workaround.
358+ *
359+ * The real fix should be in hw or in a microcode update, but
360+ * we also probabilistically try to reduce the window of having
361+ * a large TLB mixed with 4K TLBs while instruction fetches are
362+ * going on.
363+ */
364+ __flush_tlb_all();
365+
366 base = NULL;
367
368 out_unlock:
369diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
370index 76d49eb..8a06160 100644
371--- a/arch/x86/pci/mmconfig-shared.c
372+++ b/arch/x86/pci/mmconfig-shared.c
373@@ -255,7 +255,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
374 if (!fixmem32)
375 return AE_OK;
376 if ((mcfg_res->start >= fixmem32->address) &&
377- (mcfg_res->end <= (fixmem32->address +
378+ (mcfg_res->end < (fixmem32->address +
379 fixmem32->address_length))) {
380 mcfg_res->flags = 1;
381 return AE_CTRL_TERMINATE;
382@@ -272,7 +272,7 @@ static acpi_status __init check_mcfg_resource(struct acpi_resource *res,
383 return AE_OK;
384
385 if ((mcfg_res->start >= address.minimum) &&
386- (mcfg_res->end <= (address.minimum + address.address_length))) {
387+ (mcfg_res->end < (address.minimum + address.address_length))) {
388 mcfg_res->flags = 1;
389 return AE_CTRL_TERMINATE;
390 }
391@@ -298,7 +298,7 @@ static int __init is_acpi_reserved(u64 start, u64 end, unsigned not_used)
392 struct resource mcfg_res;
393
394 mcfg_res.start = start;
395- mcfg_res.end = end;
396+ mcfg_res.end = end - 1;
397 mcfg_res.flags = 0;
398
399 acpi_get_devices("PNP0C01", find_mboard_resource, &mcfg_res, NULL);
400diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
401index 81b40ed..5639e27 100644
402--- a/drivers/acpi/processor_idle.c
403+++ b/drivers/acpi/processor_idle.c
404@@ -303,6 +303,9 @@ static void acpi_timer_check_state(int state, struct acpi_processor *pr,
405 struct acpi_processor_power *pwr = &pr->power;
406 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
407
408+ if (boot_cpu_has(X86_FEATURE_AMDC1E))
409+ type = ACPI_STATE_C1;
410+
411 /*
412 * Check, if one of the previous states already marked the lapic
413 * unstable
414@@ -1154,6 +1157,7 @@ static int acpi_processor_power_verify(struct acpi_processor *pr)
415 switch (cx->type) {
416 case ACPI_STATE_C1:
417 cx->valid = 1;
418+ acpi_timer_check_state(i, pr, cx);
419 break;
420
421 case ACPI_STATE_C2:
422@@ -1468,20 +1472,22 @@ static int acpi_idle_enter_c1(struct cpuidle_device *dev,
423
424 /* Do not access any ACPI IO ports in suspend path */
425 if (acpi_idle_suspend) {
426- acpi_safe_halt();
427 local_irq_enable();
428+ cpu_relax();
429 return 0;
430 }
431
432 if (pr->flags.bm_check)
433 acpi_idle_update_bm_rld(pr, cx);
434
435+ acpi_state_timer_broadcast(pr, cx, 1);
436 t1 = inl(acpi_gbl_FADT.xpm_timer_block.address);
437 acpi_idle_do_entry(cx);
438 t2 = inl(acpi_gbl_FADT.xpm_timer_block.address);
439
440 local_irq_enable();
441 cx->usage++;
442+ acpi_state_timer_broadcast(pr, cx, 0);
443
444 return ticks_elapsed_in_us(t1, t2);
445 }
446diff --git a/drivers/char/random.c b/drivers/char/random.c
447index 7ce1ac4..201b2c1 100644
448--- a/drivers/char/random.c
449+++ b/drivers/char/random.c
450@@ -1626,15 +1626,20 @@ EXPORT_SYMBOL(secure_dccp_sequence_number);
451 * value is not cryptographically secure but for several uses the cost of
452 * depleting entropy is too high
453 */
454+DEFINE_PER_CPU(__u32 [4], get_random_int_hash);
455 unsigned int get_random_int(void)
456 {
457- /*
458- * Use IP's RNG. It suits our purpose perfectly: it re-keys itself
459- * every second, from the entropy pool (and thus creates a limited
460- * drain on it), and uses halfMD4Transform within the second. We
461- * also mix it with jiffies and the PID:
462- */
463- return secure_ip_id((__force __be32)(current->pid + jiffies));
464+ struct keydata *keyptr;
465+ __u32 *hash = get_cpu_var(get_random_int_hash);
466+ int ret;
467+
468+ keyptr = get_keyptr();
469+ hash[0] += current->pid + jiffies + get_cycles();
470+
471+ ret = half_md4_transform(hash, keyptr->secret);
472+ put_cpu_var(get_random_int_hash);
473+
474+ return ret;
475 }
476
477 /*
478diff --git a/drivers/char/tpm/tpm_bios.c b/drivers/char/tpm/tpm_bios.c
479index 68f052b..2db432d 100644
480--- a/drivers/char/tpm/tpm_bios.c
481+++ b/drivers/char/tpm/tpm_bios.c
482@@ -214,7 +214,8 @@ static int get_event_name(char *dest, struct tcpa_event *event,
483 unsigned char * event_entry)
484 {
485 const char *name = "";
486- char data[40] = "";
487+ /* 41 so there is room for 40 data and 1 nul */
488+ char data[41] = "";
489 int i, n_len = 0, d_len = 0;
490 struct tcpa_pc_event *pc_event;
491
492diff --git a/drivers/hwmon/lm78.c b/drivers/hwmon/lm78.c
493index ed7859f..affee01 100644
494--- a/drivers/hwmon/lm78.c
495+++ b/drivers/hwmon/lm78.c
496@@ -178,7 +178,7 @@ static struct platform_driver lm78_isa_driver = {
497 .name = "lm78",
498 },
499 .probe = lm78_isa_probe,
500- .remove = lm78_isa_remove,
501+ .remove = __devexit_p(lm78_isa_remove),
502 };
503
504
505diff --git a/drivers/media/video/cx88/cx88-input.c b/drivers/media/video/cx88/cx88-input.c
506index 53526d9..dedf96b 100644
507--- a/drivers/media/video/cx88/cx88-input.c
508+++ b/drivers/media/video/cx88/cx88-input.c
509@@ -48,8 +48,7 @@ struct cx88_IR {
510
511 /* poll external decoder */
512 int polling;
513- struct work_struct work;
514- struct timer_list timer;
515+ struct delayed_work work;
516 u32 gpio_addr;
517 u32 last_gpio;
518 u32 mask_keycode;
519@@ -143,27 +142,19 @@ static void cx88_ir_handle_key(struct cx88_IR *ir)
520 }
521 }
522
523-static void ir_timer(unsigned long data)
524-{
525- struct cx88_IR *ir = (struct cx88_IR *)data;
526-
527- schedule_work(&ir->work);
528-}
529-
530 static void cx88_ir_work(struct work_struct *work)
531 {
532- struct cx88_IR *ir = container_of(work, struct cx88_IR, work);
533+ struct cx88_IR *ir = container_of(work, struct cx88_IR, work.work);
534
535 cx88_ir_handle_key(ir);
536- mod_timer(&ir->timer, jiffies + msecs_to_jiffies(ir->polling));
537+ schedule_delayed_work(&ir->work, msecs_to_jiffies(ir->polling));
538 }
539
540 void cx88_ir_start(struct cx88_core *core, struct cx88_IR *ir)
541 {
542 if (ir->polling) {
543- setup_timer(&ir->timer, ir_timer, (unsigned long)ir);
544- INIT_WORK(&ir->work, cx88_ir_work);
545- schedule_work(&ir->work);
546+ INIT_DELAYED_WORK(&ir->work, cx88_ir_work);
547+ schedule_delayed_work(&ir->work, 0);
548 }
549 if (ir->sampling) {
550 core->pci_irqmask |= PCI_INT_IR_SMPINT;
551@@ -179,10 +170,8 @@ void cx88_ir_stop(struct cx88_core *core, struct cx88_IR *ir)
552 core->pci_irqmask &= ~PCI_INT_IR_SMPINT;
553 }
554
555- if (ir->polling) {
556- del_timer_sync(&ir->timer);
557- flush_scheduled_work();
558- }
559+ if (ir->polling)
560+ cancel_delayed_work_sync(&ir->work);
561 }
562
563 /* ---------------------------------------------------------------------- */
564diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
565index 2486a65..ba91aee 100644
566--- a/drivers/net/bnx2.c
567+++ b/drivers/net/bnx2.c
568@@ -2574,6 +2574,7 @@ bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
569 /* Tell compiler that status block fields can change. */
570 barrier();
571 cons = *bnapi->hw_tx_cons_ptr;
572+ barrier();
573 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
574 cons++;
575 return cons;
576@@ -2849,6 +2850,7 @@ bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
577 /* Tell compiler that status block fields can change. */
578 barrier();
579 cons = *bnapi->hw_rx_cons_ptr;
580+ barrier();
581 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
582 cons++;
583 return cons;
584diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
585index 4489e58..e929e61 100644
586--- a/drivers/net/bonding/bond_alb.c
587+++ b/drivers/net/bonding/bond_alb.c
588@@ -1716,9 +1716,6 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
589 }
590 }
591
592- write_unlock_bh(&bond->curr_slave_lock);
593- read_unlock(&bond->lock);
594-
595 if (swap_slave) {
596 alb_swap_mac_addr(bond, swap_slave, bond->curr_active_slave);
597 alb_fasten_mac_swap(bond, swap_slave, bond->curr_active_slave);
598@@ -1726,16 +1723,15 @@ int bond_alb_set_mac_address(struct net_device *bond_dev, void *addr)
599 alb_set_slave_mac_addr(bond->curr_active_slave, bond_dev->dev_addr,
600 bond->alb_info.rlb_enabled);
601
602+ read_lock(&bond->lock);
603 alb_send_learning_packets(bond->curr_active_slave, bond_dev->dev_addr);
604 if (bond->alb_info.rlb_enabled) {
605 /* inform clients mac address has changed */
606 rlb_req_update_slave_clients(bond, bond->curr_active_slave);
607 }
608+ read_unlock(&bond->lock);
609 }
610
611- read_lock(&bond->lock);
612- write_lock_bh(&bond->curr_slave_lock);
613-
614 return 0;
615 }
616
617diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
618index 6b96357..1f60117 100644
619--- a/drivers/net/e1000/e1000_main.c
620+++ b/drivers/net/e1000/e1000_main.c
621@@ -4133,8 +4133,9 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
622 PCI_DMA_FROMDEVICE);
623
624 length = le16_to_cpu(rx_desc->length);
625-
626- if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
627+ /* !EOP means multiple descriptors were used to store a single
628+ * packet, also make sure the frame isn't just CRC only */
629+ if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
630 /* All receives must fit into a single buffer */
631 E1000_DBG("%s: Receive packet consumed multiple"
632 " buffers\n", netdev->name);
633diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
634index 89964fa..23110d8 100644
635--- a/drivers/net/igb/igb_ethtool.c
636+++ b/drivers/net/igb/igb_ethtool.c
637@@ -2029,6 +2029,10 @@ static struct ethtool_ops igb_ethtool_ops = {
638 .get_ethtool_stats = igb_get_ethtool_stats,
639 .get_coalesce = igb_get_coalesce,
640 .set_coalesce = igb_set_coalesce,
641+ .get_flags = ethtool_op_get_flags,
642+#ifdef CONFIG_IGB_LRO
643+ .set_flags = ethtool_op_set_flags,
644+#endif
645 };
646
647 void igb_set_ethtool_ops(struct net_device *netdev)
648diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
649index 4239450..1aa0388 100644
650--- a/drivers/net/macvlan.c
651+++ b/drivers/net/macvlan.c
652@@ -328,7 +328,8 @@ static u32 macvlan_ethtool_get_rx_csum(struct net_device *dev)
653 const struct macvlan_dev *vlan = netdev_priv(dev);
654 struct net_device *lowerdev = vlan->lowerdev;
655
656- if (lowerdev->ethtool_ops->get_rx_csum == NULL)
657+ if (lowerdev->ethtool_ops == NULL ||
658+ lowerdev->ethtool_ops->get_rx_csum == NULL)
659 return 0;
660 return lowerdev->ethtool_ops->get_rx_csum(lowerdev);
661 }
662diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
663index d6524db..7d41ec8 100644
664--- a/drivers/net/myri10ge/myri10ge.c
665+++ b/drivers/net/myri10ge/myri10ge.c
666@@ -2379,6 +2379,7 @@ static int myri10ge_open(struct net_device *dev)
667 lro_mgr->lro_arr = ss->rx_done.lro_desc;
668 lro_mgr->get_frag_header = myri10ge_get_frag_header;
669 lro_mgr->max_aggr = myri10ge_lro_max_pkts;
670+ lro_mgr->frag_align_pad = 2;
671 if (lro_mgr->max_aggr > MAX_SKB_FRAGS)
672 lro_mgr->max_aggr = MAX_SKB_FRAGS;
673
674diff --git a/drivers/scsi/3w-xxxx.c b/drivers/scsi/3w-xxxx.c
675index a0537f0..6221cdc 100644
676--- a/drivers/scsi/3w-xxxx.c
677+++ b/drivers/scsi/3w-xxxx.c
678@@ -6,7 +6,7 @@
679 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
680 Brad Strand <linux@3ware.com>
681
682- Copyright (C) 1999-2007 3ware Inc.
683+ Copyright (C) 1999-2009 3ware Inc.
684
685 Kernel compatiblity By: Andre Hedrick <andre@suse.com>
686 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
687@@ -1294,7 +1294,8 @@ static void tw_unmap_scsi_data(struct pci_dev *pdev, struct scsi_cmnd *cmd)
688 {
689 dprintk(KERN_WARNING "3w-xxxx: tw_unmap_scsi_data()\n");
690
691- scsi_dma_unmap(cmd);
692+ if (cmd->SCp.phase == TW_PHASE_SGLIST)
693+ scsi_dma_unmap(cmd);
694 } /* End tw_unmap_scsi_data() */
695
696 /* This function will reset a device extension */
697diff --git a/drivers/scsi/3w-xxxx.h b/drivers/scsi/3w-xxxx.h
698index 0742e68..e938615 100644
699--- a/drivers/scsi/3w-xxxx.h
700+++ b/drivers/scsi/3w-xxxx.h
701@@ -6,7 +6,7 @@
702 Arnaldo Carvalho de Melo <acme@conectiva.com.br>
703 Brad Strand <linux@3ware.com>
704
705- Copyright (C) 1999-2007 3ware Inc.
706+ Copyright (C) 1999-2009 3ware Inc.
707
708 Kernel compatiblity By: Andre Hedrick <andre@suse.com>
709 Non-Copyright (C) 2000 Andre Hedrick <andre@suse.com>
710diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
711index 2b7531d..08eefec 100644
712--- a/drivers/serial/icom.c
713+++ b/drivers/serial/icom.c
714@@ -1482,8 +1482,8 @@ static void icom_remove_adapter(struct icom_adapter *icom_adapter)
715
716 free_irq(icom_adapter->pci_dev->irq, (void *) icom_adapter);
717 iounmap(icom_adapter->base_addr);
718- icom_free_adapter(icom_adapter);
719 pci_release_regions(icom_adapter->pci_dev);
720+ icom_free_adapter(icom_adapter);
721 }
722
723 static void icom_kref_release(struct kref *kref)
724diff --git a/drivers/serial/mpc52xx_uart.c b/drivers/serial/mpc52xx_uart.c
725index 32e7acb..3485510 100644
726--- a/drivers/serial/mpc52xx_uart.c
727+++ b/drivers/serial/mpc52xx_uart.c
728@@ -1000,7 +1000,7 @@ mpc52xx_console_setup(struct console *co, char *options)
729 pr_debug("mpc52xx_console_setup co=%p, co->index=%i, options=%s\n",
730 co, co->index, options);
731
732- if ((co->index < 0) || (co->index > MPC52xx_PSC_MAXNUM)) {
733+ if ((co->index < 0) || (co->index >= MPC52xx_PSC_MAXNUM)) {
734 pr_debug("PSC%x out of range\n", co->index);
735 return -EINVAL;
736 }
737diff --git a/drivers/usb/host/isp1760-hcd.c b/drivers/usb/host/isp1760-hcd.c
738index 8017f1c..eb3b103 100644
739--- a/drivers/usb/host/isp1760-hcd.c
740+++ b/drivers/usb/host/isp1760-hcd.c
741@@ -1645,6 +1645,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
742 u32 reg_base, or_reg, skip_reg;
743 unsigned long flags;
744 struct ptd ptd;
745+ packet_enqueue *pe;
746
747 switch (usb_pipetype(urb->pipe)) {
748 case PIPE_ISOCHRONOUS:
749@@ -1656,6 +1657,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
750 reg_base = INT_REGS_OFFSET;
751 or_reg = HC_INT_IRQ_MASK_OR_REG;
752 skip_reg = HC_INT_PTD_SKIPMAP_REG;
753+ pe = enqueue_an_INT_packet;
754 break;
755
756 default:
757@@ -1663,6 +1665,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
758 reg_base = ATL_REGS_OFFSET;
759 or_reg = HC_ATL_IRQ_MASK_OR_REG;
760 skip_reg = HC_ATL_PTD_SKIPMAP_REG;
761+ pe = enqueue_an_ATL_packet;
762 break;
763 }
764
765@@ -1674,6 +1677,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
766 u32 skip_map;
767 u32 or_map;
768 struct isp1760_qtd *qtd;
769+ struct isp1760_qh *qh = ints->qh;
770
771 skip_map = isp1760_readl(hcd->regs + skip_reg);
772 skip_map |= 1 << i;
773@@ -1686,8 +1690,7 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
774 priv_write_copy(priv, (u32 *)&ptd, hcd->regs + reg_base
775 + i * sizeof(ptd), sizeof(ptd));
776 qtd = ints->qtd;
777-
778- clean_up_qtdlist(qtd);
779+ qtd = clean_up_qtdlist(qtd);
780
781 free_mem(priv, ints->payload);
782
783@@ -1698,7 +1701,24 @@ static int isp1760_urb_dequeue(struct usb_hcd *hcd, struct urb *urb,
784 ints->payload = 0;
785
786 isp1760_urb_done(priv, urb, status);
787+ if (qtd)
788+ pe(hcd, qh, qtd);
789 break;
790+
791+ } else if (ints->qtd) {
792+ struct isp1760_qtd *qtd, *prev_qtd = ints->qtd;
793+
794+ for (qtd = ints->qtd->hw_next; qtd; qtd = qtd->hw_next) {
795+ if (qtd->urb == urb) {
796+ prev_qtd->hw_next = clean_up_qtdlist(qtd);
797+ isp1760_urb_done(priv, urb, status);
798+ break;
799+ }
800+ prev_qtd = qtd;
801+ }
802+ /* we found the urb before the end of the list */
803+ if (qtd)
804+ break;
805 }
806 ints++;
807 }
808diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
809index eadbee3..1985721 100644
810--- a/fs/ext4/ext4.h
811+++ b/fs/ext4/ext4.h
812@@ -248,6 +248,30 @@ struct flex_groups {
813 #define EXT4_FL_USER_VISIBLE 0x000BDFFF /* User visible flags */
814 #define EXT4_FL_USER_MODIFIABLE 0x000380FF /* User modifiable flags */
815
816+/* Flags that should be inherited by new inodes from their parent. */
817+#define EXT4_FL_INHERITED (EXT4_SECRM_FL | EXT4_UNRM_FL | EXT4_COMPR_FL |\
818+ EXT4_SYNC_FL | EXT4_IMMUTABLE_FL | EXT4_APPEND_FL |\
819+ EXT4_NODUMP_FL | EXT4_NOATIME_FL |\
820+ EXT4_NOCOMPR_FL | EXT4_JOURNAL_DATA_FL |\
821+ EXT4_NOTAIL_FL | EXT4_DIRSYNC_FL)
822+
823+/* Flags that are appropriate for regular files (all but dir-specific ones). */
824+#define EXT4_REG_FLMASK (~(EXT4_DIRSYNC_FL | EXT4_TOPDIR_FL))
825+
826+/* Flags that are appropriate for non-directories/regular files. */
827+#define EXT4_OTHER_FLMASK (EXT4_NODUMP_FL | EXT4_NOATIME_FL)
828+
829+/* Mask out flags that are inappropriate for the given type of inode. */
830+static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
831+{
832+ if (S_ISDIR(mode))
833+ return flags;
834+ else if (S_ISREG(mode))
835+ return flags & EXT4_REG_FLMASK;
836+ else
837+ return flags & EXT4_OTHER_FLMASK;
838+}
839+
840 /*
841 * Inode dynamic state flags
842 */
843@@ -255,6 +279,7 @@ struct flex_groups {
844 #define EXT4_STATE_NEW 0x00000002 /* inode is newly created */
845 #define EXT4_STATE_XATTR 0x00000004 /* has in-inode xattrs */
846 #define EXT4_STATE_NO_EXPAND 0x00000008 /* No space for expansion */
847+#define EXT4_STATE_DA_ALLOC_CLOSE 0x00000010 /* Alloc DA blks on close */
848
849 /* Used to pass group descriptor data when online resize is done */
850 struct ext4_new_group_input {
851@@ -302,7 +327,9 @@ struct ext4_new_group_data {
852 #define EXT4_IOC_GROUP_EXTEND _IOW('f', 7, unsigned long)
853 #define EXT4_IOC_GROUP_ADD _IOW('f', 8, struct ext4_new_group_input)
854 #define EXT4_IOC_MIGRATE _IO('f', 9)
855+ /* note ioctl 10 reserved for an early version of the FIEMAP ioctl */
856 /* note ioctl 11 reserved for filesystem-independent FIEMAP ioctl */
857+#define EXT4_IOC_ALLOC_DA_BLKS _IO('f', 12)
858
859 /*
860 * ioctl commands in 32 bit emulation
861@@ -1078,6 +1105,7 @@ extern int ext4_can_truncate(struct inode *inode);
862 extern void ext4_truncate (struct inode *);
863 extern void ext4_set_inode_flags(struct inode *);
864 extern void ext4_get_inode_flags(struct ext4_inode_info *);
865+extern int ext4_alloc_da_blocks(struct inode *inode);
866 extern void ext4_set_aops(struct inode *inode);
867 extern int ext4_writepage_trans_blocks(struct inode *);
868 extern int ext4_meta_trans_blocks(struct inode *, int nrblocks, int idxblocks);
869diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
870index b24d3c5..f99635a 100644
871--- a/fs/ext4/extents.c
872+++ b/fs/ext4/extents.c
873@@ -1118,7 +1118,8 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
874 struct ext4_extent_idx *ix;
875 struct ext4_extent *ex;
876 ext4_fsblk_t block;
877- int depth, ee_len;
878+ int depth; /* Note, NOT eh_depth; depth from top of tree */
879+ int ee_len;
880
881 BUG_ON(path == NULL);
882 depth = path->p_depth;
883@@ -1177,7 +1178,8 @@ ext4_ext_search_right(struct inode *inode, struct ext4_ext_path *path,
884 if (bh == NULL)
885 return -EIO;
886 eh = ext_block_hdr(bh);
887- if (ext4_ext_check_header(inode, eh, depth)) {
888+ /* subtract from p_depth to get proper eh_depth */
889+ if (ext4_ext_check_header(inode, eh, path->p_depth - depth)) {
890 put_bh(bh);
891 return -EIO;
892 }
893@@ -1631,11 +1633,13 @@ ext4_ext_put_in_cache(struct inode *inode, ext4_lblk_t block,
894 {
895 struct ext4_ext_cache *cex;
896 BUG_ON(len == 0);
897+ spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
898 cex = &EXT4_I(inode)->i_cached_extent;
899 cex->ec_type = type;
900 cex->ec_block = block;
901 cex->ec_len = len;
902 cex->ec_start = start;
903+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
904 }
905
906 /*
907@@ -1692,12 +1696,17 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
908 struct ext4_extent *ex)
909 {
910 struct ext4_ext_cache *cex;
911+ int ret = EXT4_EXT_CACHE_NO;
912
913+ /*
914+ * We borrow i_block_reservation_lock to protect i_cached_extent
915+ */
916+ spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
917 cex = &EXT4_I(inode)->i_cached_extent;
918
919 /* has cache valid data? */
920 if (cex->ec_type == EXT4_EXT_CACHE_NO)
921- return EXT4_EXT_CACHE_NO;
922+ goto errout;
923
924 BUG_ON(cex->ec_type != EXT4_EXT_CACHE_GAP &&
925 cex->ec_type != EXT4_EXT_CACHE_EXTENT);
926@@ -1708,11 +1717,11 @@ ext4_ext_in_cache(struct inode *inode, ext4_lblk_t block,
927 ext_debug("%u cached by %u:%u:%llu\n",
928 block,
929 cex->ec_block, cex->ec_len, cex->ec_start);
930- return cex->ec_type;
931+ ret = cex->ec_type;
932 }
933-
934- /* not in cache */
935- return EXT4_EXT_CACHE_NO;
936+errout:
937+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
938+ return ret;
939 }
940
941 /*
942@@ -2668,6 +2677,8 @@ int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
943 if (allocated > max_blocks)
944 allocated = max_blocks;
945 set_buffer_unwritten(bh_result);
946+ bh_result->b_bdev = inode->i_sb->s_bdev;
947+ bh_result->b_blocknr = newblock;
948 goto out2;
949 }
950
951diff --git a/fs/ext4/file.c b/fs/ext4/file.c
952index 430eb79..c0d02f8 100644
953--- a/fs/ext4/file.c
954+++ b/fs/ext4/file.c
955@@ -33,9 +33,14 @@
956 */
957 static int ext4_release_file (struct inode * inode, struct file * filp)
958 {
959+ if (EXT4_I(inode)->i_state & EXT4_STATE_DA_ALLOC_CLOSE) {
960+ ext4_alloc_da_blocks(inode);
961+ EXT4_I(inode)->i_state &= ~EXT4_STATE_DA_ALLOC_CLOSE;
962+ }
963 /* if we are the last writer on the inode, drop the block reservation */
964 if ((filp->f_mode & FMODE_WRITE) &&
965- (atomic_read(&inode->i_writecount) == 1))
966+ (atomic_read(&inode->i_writecount) == 1) &&
967+ !EXT4_I(inode)->i_reserved_data_blocks)
968 {
969 down_write(&EXT4_I(inode)->i_data_sem);
970 ext4_discard_reservation(inode);
971diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
972index cce841f..e8754fd 100644
973--- a/fs/ext4/ialloc.c
974+++ b/fs/ext4/ialloc.c
975@@ -188,7 +188,7 @@ void ext4_free_inode (handle_t *handle, struct inode * inode)
976 struct ext4_group_desc * gdp;
977 struct ext4_super_block * es;
978 struct ext4_sb_info *sbi;
979- int fatal = 0, err;
980+ int fatal = 0, err, cleared;
981 ext4_group_t flex_group;
982
983 if (atomic_read(&inode->i_count) > 1) {
984@@ -242,10 +242,12 @@ void ext4_free_inode (handle_t *handle, struct inode * inode)
985 goto error_return;
986
987 /* Ok, now we can actually update the inode bitmaps.. */
988- if (!ext4_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
989- bit, bitmap_bh->b_data))
990- ext4_error (sb, "ext4_free_inode",
991- "bit already cleared for inode %lu", ino);
992+ spin_lock(sb_bgl_lock(sbi, block_group));
993+ cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
994+ spin_unlock(sb_bgl_lock(sbi, block_group));
995+ if (!cleared)
996+ ext4_error(sb, "ext4_free_inode",
997+ "bit already cleared for inode %lu", ino);
998 else {
999 gdp = ext4_get_group_desc (sb, block_group, &bh2);
1000
1001@@ -685,6 +687,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
1002 struct inode *ret;
1003 ext4_group_t i;
1004 int free = 0;
1005+ static int once = 1;
1006 ext4_group_t flex_group;
1007
1008 /* Cannot create files in a deleted directory */
1009@@ -704,10 +707,12 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode * dir, int mode)
1010 ret2 = find_group_flex(sb, dir, &group);
1011 if (ret2 == -1) {
1012 ret2 = find_group_other(sb, dir, &group);
1013- if (ret2 == 0 && printk_ratelimit())
1014+ if (ret2 == 0 && once) {
1015+ once = 0;
1016 printk(KERN_NOTICE "ext4: find_group_flex "
1017 "failed, fallback succeeded dir %lu\n",
1018 dir->i_ino);
1019+ }
1020 }
1021 goto got_group;
1022 }
1023@@ -861,16 +866,12 @@ got:
1024 ei->i_disksize = 0;
1025
1026 /*
1027- * Don't inherit extent flag from directory. We set extent flag on
1028- * newly created directory and file only if -o extent mount option is
1029- * specified
1030+ * Don't inherit extent flag from directory, amongst others. We set
1031+ * extent flag on newly created directory and file only if -o extent
1032+ * mount option is specified
1033 */
1034- ei->i_flags = EXT4_I(dir)->i_flags & ~(EXT4_INDEX_FL|EXT4_EXTENTS_FL);
1035- if (S_ISLNK(mode))
1036- ei->i_flags &= ~(EXT4_IMMUTABLE_FL|EXT4_APPEND_FL);
1037- /* dirsync only applies to directories */
1038- if (!S_ISDIR(mode))
1039- ei->i_flags &= ~EXT4_DIRSYNC_FL;
1040+ ei->i_flags =
1041+ ext4_mask_flags(mode, EXT4_I(dir)->i_flags & EXT4_FL_INHERITED);
1042 ei->i_file_acl = 0;
1043 ei->i_dtime = 0;
1044 ei->i_block_alloc_info = NULL;
1045diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
1046index 63b911b..aeebfc2 100644
1047--- a/fs/ext4/inode.c
1048+++ b/fs/ext4/inode.c
1049@@ -1046,6 +1046,14 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
1050 EXT4_I(inode)->i_reserved_meta_blocks = mdb;
1051 EXT4_I(inode)->i_allocated_meta_blocks = 0;
1052 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1053+
1054+ /*
1055+ * If we have done all the pending block allocations and if
1056+ * there aren't any writers on the inode, we can discard the
1057+ * inode's preallocations.
1058+ */
1059+ if (!total && (atomic_read(&inode->i_writecount) == 0))
1060+ ext4_discard_reservation(inode);
1061 }
1062
1063 /*
1064@@ -1077,6 +1085,7 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
1065 int retval;
1066
1067 clear_buffer_mapped(bh);
1068+ clear_buffer_unwritten(bh);
1069
1070 /*
1071 * Try to see if we can get the block without requesting
1072@@ -1107,6 +1116,18 @@ int ext4_get_blocks_wrap(handle_t *handle, struct inode *inode, sector_t block,
1073 return retval;
1074
1075 /*
1076+ * When we call get_blocks without the create flag, the
1077+ * BH_Unwritten flag could have gotten set if the blocks
1078+ * requested were part of a uninitialized extent. We need to
1079+ * clear this flag now that we are committed to convert all or
1080+ * part of the uninitialized extent to be an initialized
1081+ * extent. This is because we need to avoid the combination
1082+ * of BH_Unwritten and BH_Mapped flags being simultaneously
1083+ * set on the buffer_head.
1084+ */
1085+ clear_buffer_unwritten(bh);
1086+
1087+ /*
1088 * New blocks allocate and/or writing to uninitialized extent
1089 * will possibly result in updating i_data, so we take
1090 * the write lock of i_data_sem, and call get_blocks()
1091@@ -2097,6 +2118,10 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1092 struct buffer_head *bh_result, int create)
1093 {
1094 int ret = 0;
1095+ sector_t invalid_block = ~((sector_t) 0xffff);
1096+
1097+ if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1098+ invalid_block = ~0;
1099
1100 BUG_ON(create == 0);
1101 BUG_ON(bh_result->b_size != inode->i_sb->s_blocksize);
1102@@ -2118,11 +2143,18 @@ static int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1103 /* not enough space to reserve */
1104 return ret;
1105
1106- map_bh(bh_result, inode->i_sb, 0);
1107+ map_bh(bh_result, inode->i_sb, invalid_block);
1108 set_buffer_new(bh_result);
1109 set_buffer_delay(bh_result);
1110 } else if (ret > 0) {
1111 bh_result->b_size = (ret << inode->i_blkbits);
1112+ /*
1113+ * With sub-block writes into unwritten extents
1114+ * we also need to mark the buffer as new so that
1115+ * the unwritten parts of the buffer gets correctly zeroed.
1116+ */
1117+ if (buffer_unwritten(bh_result))
1118+ set_buffer_new(bh_result);
1119 ret = 0;
1120 }
1121
1122@@ -2585,6 +2617,48 @@ out:
1123 return;
1124 }
1125
1126+/*
1127+ * Force all delayed allocation blocks to be allocated for a given inode.
1128+ */
1129+int ext4_alloc_da_blocks(struct inode *inode)
1130+{
1131+ if (!EXT4_I(inode)->i_reserved_data_blocks &&
1132+ !EXT4_I(inode)->i_reserved_meta_blocks)
1133+ return 0;
1134+
1135+ /*
1136+ * We do something simple for now. The filemap_flush() will
1137+ * also start triggering a write of the data blocks, which is
1138+ * not strictly speaking necessary (and for users of
1139+ * laptop_mode, not even desirable). However, to do otherwise
1140+ * would require replicating code paths in:
1141+ *
1142+ * ext4_da_writepages() ->
1143+ * write_cache_pages() ---> (via passed in callback function)
1144+ * __mpage_da_writepage() -->
1145+ * mpage_add_bh_to_extent()
1146+ * mpage_da_map_blocks()
1147+ *
1148+ * The problem is that write_cache_pages(), located in
1149+ * mm/page-writeback.c, marks pages clean in preparation for
1150+ * doing I/O, which is not desirable if we're not planning on
1151+ * doing I/O at all.
1152+ *
1153+ * We could call write_cache_pages(), and then redirty all of
1154+ * the pages by calling redirty_page_for_writeback() but that
1155+ * would be ugly in the extreme. So instead we would need to
1156+ * replicate parts of the code in the above functions,
1157+ * simplifying them becuase we wouldn't actually intend to
1158+ * write out the pages, but rather only collect contiguous
1159+ * logical block extents, call the multi-block allocator, and
1160+ * then update the buffer heads with the block allocations.
1161+ *
1162+ * For now, though, we'll cheat by calling filemap_flush(),
1163+ * which will map the blocks, and start the I/O, but not
1164+ * actually wait for the I/O to complete.
1165+ */
1166+ return filemap_flush(inode->i_mapping);
1167+}
1168
1169 /*
1170 * bmap() is special. It gets used by applications such as lilo and by
1171@@ -3594,6 +3668,9 @@ void ext4_truncate(struct inode *inode)
1172 if (!ext4_can_truncate(inode))
1173 return;
1174
1175+ if (inode->i_size == 0)
1176+ ei->i_state |= EXT4_STATE_DA_ALLOC_CLOSE;
1177+
1178 if (EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL) {
1179 ext4_ext_truncate(inode);
1180 return;
1181@@ -4011,11 +4088,9 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1182 ei->i_flags = le32_to_cpu(raw_inode->i_flags);
1183 inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
1184 ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
1185- if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
1186- cpu_to_le32(EXT4_OS_HURD)) {
1187+ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
1188 ei->i_file_acl |=
1189 ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
1190- }
1191 inode->i_size = ext4_isize(raw_inode);
1192 ei->i_disksize = inode->i_size;
1193 inode->i_generation = le32_to_cpu(raw_inode->i_generation);
1194@@ -4062,6 +4137,18 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1195 (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
1196 }
1197
1198+ if (ei->i_file_acl &&
1199+ ((ei->i_file_acl <
1200+ (le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block) +
1201+ EXT4_SB(sb)->s_gdb_count)) ||
1202+ (ei->i_file_acl >= ext4_blocks_count(EXT4_SB(sb)->s_es)))) {
1203+ ext4_error(sb, __func__,
1204+ "bad extended attribute block %llu in inode #%lu",
1205+ ei->i_file_acl, inode->i_ino);
1206+ ret = -EIO;
1207+ goto bad_inode;
1208+ }
1209+
1210 if (S_ISREG(inode->i_mode)) {
1211 inode->i_op = &ext4_file_inode_operations;
1212 inode->i_fop = &ext4_file_operations;
1213@@ -4076,7 +4163,8 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1214 inode->i_op = &ext4_symlink_inode_operations;
1215 ext4_set_aops(inode);
1216 }
1217- } else {
1218+ } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
1219+ S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
1220 inode->i_op = &ext4_special_inode_operations;
1221 if (raw_inode->i_block[0])
1222 init_special_inode(inode, inode->i_mode,
1223@@ -4084,6 +4172,13 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
1224 else
1225 init_special_inode(inode, inode->i_mode,
1226 new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
1227+ } else {
1228+ brelse(bh);
1229+ ret = -EIO;
1230+ ext4_error(inode->i_sb, __func__,
1231+ "bogus i_mode (%o) for inode=%lu",
1232+ inode->i_mode, inode->i_ino);
1233+ goto bad_inode;
1234 }
1235 brelse (iloc.bh);
1236 ext4_set_inode_flags(inode);
1237diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
1238index 306bfd4..58dedf0 100644
1239--- a/fs/ext4/ioctl.c
1240+++ b/fs/ext4/ioctl.c
1241@@ -49,8 +49,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1242 if (err)
1243 return err;
1244
1245- if (!S_ISDIR(inode->i_mode))
1246- flags &= ~EXT4_DIRSYNC_FL;
1247+ flags = ext4_mask_flags(inode->i_mode, flags);
1248
1249 err = -EPERM;
1250 mutex_lock(&inode->i_mutex);
1251@@ -288,6 +287,20 @@ setversion_out:
1252 return err;
1253 }
1254
1255+ case EXT4_IOC_ALLOC_DA_BLKS:
1256+ {
1257+ int err;
1258+ if (!is_owner_or_cap(inode))
1259+ return -EACCES;
1260+
1261+ err = mnt_want_write(filp->f_path.mnt);
1262+ if (err)
1263+ return err;
1264+ err = ext4_alloc_da_blocks(inode);
1265+ mnt_drop_write(filp->f_path.mnt);
1266+ return err;
1267+ }
1268+
1269 default:
1270 return -ENOTTY;
1271 }
1272diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
1273index 39d7cc1..c7dc115 100644
1274--- a/fs/ext4/mballoc.c
1275+++ b/fs/ext4/mballoc.c
1276@@ -1450,7 +1450,7 @@ static void ext4_mb_measure_extent(struct ext4_allocation_context *ac,
1277 struct ext4_free_extent *gex = &ac->ac_g_ex;
1278
1279 BUG_ON(ex->fe_len <= 0);
1280- BUG_ON(ex->fe_len >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1281+ BUG_ON(ex->fe_len > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1282 BUG_ON(ex->fe_start >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1283 BUG_ON(ac->ac_status != AC_STATUS_CONTINUE);
1284
1285@@ -2698,7 +2698,7 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery)
1286 sbi->s_mb_maxs = kmalloc(i, GFP_KERNEL);
1287 if (sbi->s_mb_maxs == NULL) {
1288 clear_opt(sbi->s_mount_opt, MBALLOC);
1289- kfree(sbi->s_mb_maxs);
1290+ kfree(sbi->s_mb_offsets);
1291 return -ENOMEM;
1292 }
1293
1294@@ -3400,7 +3400,7 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
1295 }
1296 BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
1297 start > ac->ac_o_ex.fe_logical);
1298- BUG_ON(size <= 0 || size >= EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1299+ BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
1300
1301 /* now prepare goal request */
1302
1303@@ -3698,6 +3698,7 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
1304 struct super_block *sb, struct ext4_prealloc_space *pa)
1305 {
1306 unsigned long grp;
1307+ ext4_fsblk_t grp_blk;
1308
1309 if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0)
1310 return;
1311@@ -3712,8 +3713,12 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac,
1312 pa->pa_deleted = 1;
1313 spin_unlock(&pa->pa_lock);
1314
1315- /* -1 is to protect from crossing allocation group */
1316- ext4_get_group_no_and_offset(sb, pa->pa_pstart - 1, &grp, NULL);
1317+ grp_blk = pa->pa_pstart;
1318+ /* If linear, pa_pstart may be in the next group when pa is used up */
1319+ if (pa->pa_linear)
1320+ grp_blk--;
1321+
1322+ ext4_get_group_no_and_offset(sb, grp_blk, &grp, NULL);
1323
1324 /*
1325 * possible race:
1326@@ -4527,7 +4532,7 @@ static void ext4_mb_add_n_trim(struct ext4_allocation_context *ac)
1327 pa_inode_list) {
1328 spin_lock(&tmp_pa->pa_lock);
1329 if (tmp_pa->pa_deleted) {
1330- spin_unlock(&pa->pa_lock);
1331+ spin_unlock(&tmp_pa->pa_lock);
1332 continue;
1333 }
1334 if (!added && pa->pa_free < tmp_pa->pa_free) {
1335diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
1336index 4f3628f..6a71680 100644
1337--- a/fs/ext4/namei.c
1338+++ b/fs/ext4/namei.c
1339@@ -1055,8 +1055,16 @@ static struct dentry *ext4_lookup(struct inode * dir, struct dentry *dentry, str
1340 return ERR_PTR(-EIO);
1341 }
1342 inode = ext4_iget(dir->i_sb, ino);
1343- if (IS_ERR(inode))
1344- return ERR_CAST(inode);
1345+ if (unlikely(IS_ERR(inode))) {
1346+ if (PTR_ERR(inode) == -ESTALE) {
1347+ ext4_error(dir->i_sb, __func__,
1348+ "deleted inode referenced: %u",
1349+ ino);
1350+ return ERR_PTR(-EIO);
1351+ } else {
1352+ return ERR_CAST(inode);
1353+ }
1354+ }
1355 }
1356 return d_splice_alias(inode, dentry);
1357 }
1358@@ -2306,7 +2314,7 @@ static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry,
1359 struct inode * old_inode, * new_inode;
1360 struct buffer_head * old_bh, * new_bh, * dir_bh;
1361 struct ext4_dir_entry_2 * old_de, * new_de;
1362- int retval;
1363+ int retval, force_da_alloc = 0;
1364
1365 old_bh = new_bh = dir_bh = NULL;
1366
1367@@ -2444,6 +2452,7 @@ static int ext4_rename (struct inode * old_dir, struct dentry *old_dentry,
1368 ext4_mark_inode_dirty(handle, new_inode);
1369 if (!new_inode->i_nlink)
1370 ext4_orphan_add(handle, new_inode);
1371+ force_da_alloc = 1;
1372 }
1373 retval = 0;
1374
1375@@ -2452,6 +2461,8 @@ end_rename:
1376 brelse (old_bh);
1377 brelse (new_bh);
1378 ext4_journal_stop(handle);
1379+ if (retval == 0 && force_da_alloc)
1380+ ext4_alloc_da_blocks(old_inode);
1381 return retval;
1382 }
1383
1384diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
1385index 257ff26..bbe6d59 100644
1386--- a/fs/jbd2/revoke.c
1387+++ b/fs/jbd2/revoke.c
1388@@ -55,6 +55,25 @@
1389 * need do nothing.
1390 * RevokeValid set, Revoked set:
1391 * buffer has been revoked.
1392+ *
1393+ * Locking rules:
1394+ * We keep two hash tables of revoke records. One hashtable belongs to the
1395+ * running transaction (is pointed to by journal->j_revoke), the other one
1396+ * belongs to the committing transaction. Accesses to the second hash table
1397+ * happen only from the kjournald and no other thread touches this table. Also
1398+ * journal_switch_revoke_table() which switches which hashtable belongs to the
1399+ * running and which to the committing transaction is called only from
1400+ * kjournald. Therefore we need no locks when accessing the hashtable belonging
1401+ * to the committing transaction.
1402+ *
1403+ * All users operating on the hash table belonging to the running transaction
1404+ * have a handle to the transaction. Therefore they are safe from kjournald
1405+ * switching hash tables under them. For operations on the lists of entries in
1406+ * the hash table j_revoke_lock is used.
1407+ *
1408+ * Finally, also replay code uses the hash tables but at this moment noone else
1409+ * can touch them (filesystem isn't mounted yet) and hence no locking is
1410+ * needed.
1411 */
1412
1413 #ifndef __KERNEL__
1414@@ -401,8 +420,6 @@ int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
1415 * the second time we would still have a pending revoke to cancel. So,
1416 * do not trust the Revoked bit on buffers unless RevokeValid is also
1417 * set.
1418- *
1419- * The caller must have the journal locked.
1420 */
1421 int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
1422 {
1423@@ -480,10 +497,7 @@ void jbd2_journal_switch_revoke_table(journal_t *journal)
1424 /*
1425 * Write revoke records to the journal for all entries in the current
1426 * revoke hash, deleting the entries as we go.
1427- *
1428- * Called with the journal lock held.
1429 */
1430-
1431 void jbd2_journal_write_revoke_records(journal_t *journal,
1432 transaction_t *transaction)
1433 {
1434diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
1435index bff8733..c3fe156 100644
1436--- a/fs/nfs/dir.c
1437+++ b/fs/nfs/dir.c
1438@@ -1925,7 +1925,8 @@ int nfs_permission(struct inode *inode, int mask)
1439 case S_IFREG:
1440 /* NFSv4 has atomic_open... */
1441 if (nfs_server_capable(inode, NFS_CAP_ATOMIC_OPEN)
1442- && (mask & MAY_OPEN))
1443+ && (mask & MAY_OPEN)
1444+ && !(mask & MAY_EXEC))
1445 goto out;
1446 break;
1447 case S_IFDIR:
1448diff --git a/mm/hugetlb.c b/mm/hugetlb.c
1449index 81e9a82..1ecbcf6 100644
1450--- a/mm/hugetlb.c
1451+++ b/mm/hugetlb.c
1452@@ -286,7 +286,7 @@ void resv_map_release(struct kref *ref)
1453 static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
1454 {
1455 VM_BUG_ON(!is_vm_hugetlb_page(vma));
1456- if (!(vma->vm_flags & VM_SHARED))
1457+ if (!(vma->vm_flags & VM_MAYSHARE))
1458 return (struct resv_map *)(get_vma_private_data(vma) &
1459 ~HPAGE_RESV_MASK);
1460 return 0;
1461@@ -295,7 +295,7 @@ static struct resv_map *vma_resv_map(struct vm_area_struct *vma)
1462 static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
1463 {
1464 VM_BUG_ON(!is_vm_hugetlb_page(vma));
1465- VM_BUG_ON(vma->vm_flags & VM_SHARED);
1466+ VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
1467
1468 set_vma_private_data(vma, (get_vma_private_data(vma) &
1469 HPAGE_RESV_MASK) | (unsigned long)map);
1470@@ -304,7 +304,7 @@ static void set_vma_resv_map(struct vm_area_struct *vma, struct resv_map *map)
1471 static void set_vma_resv_flags(struct vm_area_struct *vma, unsigned long flags)
1472 {
1473 VM_BUG_ON(!is_vm_hugetlb_page(vma));
1474- VM_BUG_ON(vma->vm_flags & VM_SHARED);
1475+ VM_BUG_ON(vma->vm_flags & VM_MAYSHARE);
1476
1477 set_vma_private_data(vma, get_vma_private_data(vma) | flags);
1478 }
1479@@ -323,7 +323,7 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
1480 if (vma->vm_flags & VM_NORESERVE)
1481 return;
1482
1483- if (vma->vm_flags & VM_SHARED) {
1484+ if (vma->vm_flags & VM_MAYSHARE) {
1485 /* Shared mappings always use reserves */
1486 h->resv_huge_pages--;
1487 } else if (is_vma_resv_set(vma, HPAGE_RESV_OWNER)) {
1488@@ -339,14 +339,14 @@ static void decrement_hugepage_resv_vma(struct hstate *h,
1489 void reset_vma_resv_huge_pages(struct vm_area_struct *vma)
1490 {
1491 VM_BUG_ON(!is_vm_hugetlb_page(vma));
1492- if (!(vma->vm_flags & VM_SHARED))
1493+ if (!(vma->vm_flags & VM_MAYSHARE))
1494 vma->vm_private_data = (void *)0;
1495 }
1496
1497 /* Returns true if the VMA has associated reserve pages */
1498 static int vma_has_reserves(struct vm_area_struct *vma)
1499 {
1500- if (vma->vm_flags & VM_SHARED)
1501+ if (vma->vm_flags & VM_MAYSHARE)
1502 return 1;
1503 if (is_vma_resv_set(vma, HPAGE_RESV_OWNER))
1504 return 1;
1505@@ -890,7 +890,7 @@ static int vma_needs_reservation(struct hstate *h,
1506 struct address_space *mapping = vma->vm_file->f_mapping;
1507 struct inode *inode = mapping->host;
1508
1509- if (vma->vm_flags & VM_SHARED) {
1510+ if (vma->vm_flags & VM_MAYSHARE) {
1511 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1512 return region_chg(&inode->i_mapping->private_list,
1513 idx, idx + 1);
1514@@ -915,7 +915,7 @@ static void vma_commit_reservation(struct hstate *h,
1515 struct address_space *mapping = vma->vm_file->f_mapping;
1516 struct inode *inode = mapping->host;
1517
1518- if (vma->vm_flags & VM_SHARED) {
1519+ if (vma->vm_flags & VM_MAYSHARE) {
1520 pgoff_t idx = vma_hugecache_offset(h, vma, addr);
1521 region_add(&inode->i_mapping->private_list, idx, idx + 1);
1522
1523@@ -1862,7 +1862,7 @@ retry_avoidcopy:
1524 * at the time of fork() could consume its reserves on COW instead
1525 * of the full address range.
1526 */
1527- if (!(vma->vm_flags & VM_SHARED) &&
1528+ if (!(vma->vm_flags & VM_MAYSHARE) &&
1529 is_vma_resv_set(vma, HPAGE_RESV_OWNER) &&
1530 old_page != pagecache_page)
1531 outside_reserve = 1;
1532@@ -1969,7 +1969,7 @@ retry:
1533 clear_huge_page(page, address, huge_page_size(h));
1534 __SetPageUptodate(page);
1535
1536- if (vma->vm_flags & VM_SHARED) {
1537+ if (vma->vm_flags & VM_MAYSHARE) {
1538 int err;
1539 struct inode *inode = mapping->host;
1540
1541@@ -2073,7 +2073,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
1542 goto out_unlock;
1543 }
1544
1545- if (!(vma->vm_flags & VM_SHARED))
1546+ if (!(vma->vm_flags & VM_MAYSHARE))
1547 pagecache_page = hugetlbfs_pagecache_page(h,
1548 vma, address);
1549 }
1550@@ -2223,7 +2223,7 @@ int hugetlb_reserve_pages(struct inode *inode,
1551 * to reserve the full area even if read-only as mprotect() may be
1552 * called to make the mapping read-write. Assume !vma is a shm mapping
1553 */
1554- if (!vma || vma->vm_flags & VM_SHARED)
1555+ if (!vma || vma->vm_flags & VM_MAYSHARE)
1556 chg = region_chg(&inode->i_mapping->private_list, from, to);
1557 else {
1558 struct resv_map *resv_map = resv_map_alloc();
1559@@ -2246,7 +2246,7 @@ int hugetlb_reserve_pages(struct inode *inode,
1560 hugetlb_put_quota(inode->i_mapping, chg);
1561 return ret;
1562 }
1563- if (!vma || vma->vm_flags & VM_SHARED)
1564+ if (!vma || vma->vm_flags & VM_MAYSHARE)
1565 region_add(&inode->i_mapping->private_list, from, to);
1566 return 0;
1567 }
1568diff --git a/net/core/pktgen.c b/net/core/pktgen.c
1569index a756847..86714d1 100644
1570--- a/net/core/pktgen.c
1571+++ b/net/core/pktgen.c
1572@@ -2449,7 +2449,7 @@ static inline void free_SAs(struct pktgen_dev *pkt_dev)
1573 if (pkt_dev->cflows) {
1574 /* let go of the SAs if we have them */
1575 int i = 0;
1576- for (; i < pkt_dev->nflows; i++){
1577+ for (; i < pkt_dev->cflows; i++) {
1578 struct xfrm_state *x = pkt_dev->flows[i].x;
1579 if (x) {
1580 xfrm_state_put(x);
1581diff --git a/net/core/skbuff.c b/net/core/skbuff.c
1582index 0675991..7832287 100644
1583--- a/net/core/skbuff.c
1584+++ b/net/core/skbuff.c
1585@@ -1992,7 +1992,7 @@ unsigned int skb_seq_read(unsigned int consumed, const u8 **data,
1586 next_skb:
1587 block_limit = skb_headlen(st->cur_skb) + st->stepped_offset;
1588
1589- if (abs_offset < block_limit) {
1590+ if (abs_offset < block_limit && !st->frag_data) {
1591 *data = st->cur_skb->data + (abs_offset - st->stepped_offset);
1592 return block_limit - abs_offset;
1593 }
1594diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
1595index 7abc6b8..4eca4d3 100644
1596--- a/net/ipv4/tcp_input.c
1597+++ b/net/ipv4/tcp_input.c
1598@@ -931,6 +931,8 @@ static void tcp_init_metrics(struct sock *sk)
1599 tcp_bound_rto(sk);
1600 if (inet_csk(sk)->icsk_rto < TCP_TIMEOUT_INIT && !tp->rx_opt.saw_tstamp)
1601 goto reset;
1602+
1603+cwnd:
1604 tp->snd_cwnd = tcp_init_cwnd(tp, dst);
1605 tp->snd_cwnd_stamp = tcp_time_stamp;
1606 return;
1607@@ -945,6 +947,7 @@ reset:
1608 tp->mdev = tp->mdev_max = tp->rttvar = TCP_TIMEOUT_INIT;
1609 inet_csk(sk)->icsk_rto = TCP_TIMEOUT_INIT;
1610 }
1611+ goto cwnd;
1612 }
1613
1614 static void tcp_update_reordering(struct sock *sk, const int metric,
1615diff --git a/net/mac80211/rc80211_pid_algo.c b/net/mac80211/rc80211_pid_algo.c
1616index a914ba7..4077676 100644
1617--- a/net/mac80211/rc80211_pid_algo.c
1618+++ b/net/mac80211/rc80211_pid_algo.c
1619@@ -367,8 +367,40 @@ static void rate_control_pid_rate_init(void *priv, void *priv_sta,
1620 * Until that method is implemented, we will use the lowest supported
1621 * rate as a workaround. */
1622 struct ieee80211_supported_band *sband;
1623+ struct rc_pid_info *pinfo = priv;
1624+ struct rc_pid_rateinfo *rinfo = pinfo->rinfo;
1625+ int i, j, tmp;
1626+ bool s;
1627
1628 sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1629+
1630+ /* Sort the rates. This is optimized for the most common case (i.e.
1631+ * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
1632+ * mapping too. */
1633+ for (i = 0; i < sband->n_bitrates; i++) {
1634+ rinfo[i].index = i;
1635+ rinfo[i].rev_index = i;
1636+ if (RC_PID_FAST_START)
1637+ rinfo[i].diff = 0;
1638+ else
1639+ rinfo[i].diff = i * pinfo->norm_offset;
1640+ }
1641+ for (i = 1; i < sband->n_bitrates; i++) {
1642+ s = 0;
1643+ for (j = 0; j < sband->n_bitrates - i; j++)
1644+ if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
1645+ sband->bitrates[rinfo[j + 1].index].bitrate)) {
1646+ tmp = rinfo[j].index;
1647+ rinfo[j].index = rinfo[j + 1].index;
1648+ rinfo[j + 1].index = tmp;
1649+ rinfo[rinfo[j].index].rev_index = j;
1650+ rinfo[rinfo[j + 1].index].rev_index = j + 1;
1651+ s = 1;
1652+ }
1653+ if (!s)
1654+ break;
1655+ }
1656+
1657 sta->txrate_idx = rate_lowest_index(local, sband, sta);
1658 sta->fail_avg = 0;
1659 }
1660@@ -378,21 +410,23 @@ static void *rate_control_pid_alloc(struct ieee80211_local *local)
1661 struct rc_pid_info *pinfo;
1662 struct rc_pid_rateinfo *rinfo;
1663 struct ieee80211_supported_band *sband;
1664- int i, j, tmp;
1665- bool s;
1666+ int i, max_rates = 0;
1667 #ifdef CONFIG_MAC80211_DEBUGFS
1668 struct rc_pid_debugfs_entries *de;
1669 #endif
1670
1671- sband = local->hw.wiphy->bands[local->hw.conf.channel->band];
1672-
1673 pinfo = kmalloc(sizeof(*pinfo), GFP_ATOMIC);
1674 if (!pinfo)
1675 return NULL;
1676
1677+ for (i = 0; i < IEEE80211_NUM_BANDS; i++) {
1678+ sband = local->hw.wiphy->bands[i];
1679+ if (sband && sband->n_bitrates > max_rates)
1680+ max_rates = sband->n_bitrates;
1681+ }
1682 /* We can safely assume that sband won't change unless we get
1683 * reinitialized. */
1684- rinfo = kmalloc(sizeof(*rinfo) * sband->n_bitrates, GFP_ATOMIC);
1685+ rinfo = kmalloc(sizeof(*rinfo) * max_rates, GFP_ATOMIC);
1686 if (!rinfo) {
1687 kfree(pinfo);
1688 return NULL;
1689@@ -410,33 +444,6 @@ static void *rate_control_pid_alloc(struct ieee80211_local *local)
1690 pinfo->rinfo = rinfo;
1691 pinfo->oldrate = 0;
1692
1693- /* Sort the rates. This is optimized for the most common case (i.e.
1694- * almost-sorted CCK+OFDM rates). Kind of bubble-sort with reversed
1695- * mapping too. */
1696- for (i = 0; i < sband->n_bitrates; i++) {
1697- rinfo[i].index = i;
1698- rinfo[i].rev_index = i;
1699- if (RC_PID_FAST_START)
1700- rinfo[i].diff = 0;
1701- else
1702- rinfo[i].diff = i * pinfo->norm_offset;
1703- }
1704- for (i = 1; i < sband->n_bitrates; i++) {
1705- s = 0;
1706- for (j = 0; j < sband->n_bitrates - i; j++)
1707- if (unlikely(sband->bitrates[rinfo[j].index].bitrate >
1708- sband->bitrates[rinfo[j + 1].index].bitrate)) {
1709- tmp = rinfo[j].index;
1710- rinfo[j].index = rinfo[j + 1].index;
1711- rinfo[j + 1].index = tmp;
1712- rinfo[rinfo[j].index].rev_index = j;
1713- rinfo[rinfo[j + 1].index].rev_index = j + 1;
1714- s = 1;
1715- }
1716- if (!s)
1717- break;
1718- }
1719-
1720 #ifdef CONFIG_MAC80211_DEBUGFS
1721 de = &pinfo->dentries;
1722 de->dir = debugfs_create_dir("rc80211_pid",
1723diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
1724index f028f70..e2d25da 100644
1725--- a/security/selinux/hooks.c
1726+++ b/security/selinux/hooks.c
1727@@ -4477,7 +4477,7 @@ static int selinux_ip_postroute_iptables_compat(struct sock *sk,
1728 if (err)
1729 return err;
1730
1731- if (send_perm != 0)
1732+ if (!send_perm)
1733 return 0;
1734
1735 err = sel_netport_sid(sk->sk_protocol,
1736diff --git a/sound/usb/usbaudio.c b/sound/usb/usbaudio.c
1737index 1710623..c10e476 100644
1738--- a/sound/usb/usbaudio.c
1739+++ b/sound/usb/usbaudio.c
1740@@ -3367,7 +3367,7 @@ static int snd_usb_create_quirk(struct snd_usb_audio *chip,
1741 [QUIRK_MIDI_YAMAHA] = snd_usb_create_midi_interface,
1742 [QUIRK_MIDI_MIDIMAN] = snd_usb_create_midi_interface,
1743 [QUIRK_MIDI_NOVATION] = snd_usb_create_midi_interface,
1744- [QUIRK_MIDI_RAW] = snd_usb_create_midi_interface,
1745+ [QUIRK_MIDI_FASTLANE] = snd_usb_create_midi_interface,
1746 [QUIRK_MIDI_EMAGIC] = snd_usb_create_midi_interface,
1747 [QUIRK_MIDI_CME] = snd_usb_create_midi_interface,
1748 [QUIRK_AUDIO_STANDARD_INTERFACE] = create_standard_audio_quirk,
1749diff --git a/sound/usb/usbaudio.h b/sound/usb/usbaudio.h
1750index 7cf18c3..7191d82 100644
1751--- a/sound/usb/usbaudio.h
1752+++ b/sound/usb/usbaudio.h
1753@@ -153,7 +153,7 @@ enum quirk_type {
1754 QUIRK_MIDI_YAMAHA,
1755 QUIRK_MIDI_MIDIMAN,
1756 QUIRK_MIDI_NOVATION,
1757- QUIRK_MIDI_RAW,
1758+ QUIRK_MIDI_FASTLANE,
1759 QUIRK_MIDI_EMAGIC,
1760 QUIRK_MIDI_CME,
1761 QUIRK_AUDIO_STANDARD_INTERFACE,
1762diff --git a/sound/usb/usbmidi.c b/sound/usb/usbmidi.c
1763index 940ae5a..cd2b622 100644
1764--- a/sound/usb/usbmidi.c
1765+++ b/sound/usb/usbmidi.c
1766@@ -1733,8 +1733,18 @@ int snd_usb_create_midi_interface(struct snd_usb_audio* chip,
1767 umidi->usb_protocol_ops = &snd_usbmidi_novation_ops;
1768 err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
1769 break;
1770- case QUIRK_MIDI_RAW:
1771+ case QUIRK_MIDI_FASTLANE:
1772 umidi->usb_protocol_ops = &snd_usbmidi_raw_ops;
1773+ /*
1774+ * Interface 1 contains isochronous endpoints, but with the same
1775+ * numbers as in interface 0. Since it is interface 1 that the
1776+ * USB core has most recently seen, these descriptors are now
1777+ * associated with the endpoint numbers. This will foul up our
1778+ * attempts to submit bulk/interrupt URBs to the endpoints in
1779+ * interface 0, so we have to make sure that the USB core looks
1780+ * again at interface 0 by calling usb_set_interface() on it.
1781+ */
1782+ usb_set_interface(umidi->chip->dev, 0, 0);
1783 err = snd_usbmidi_detect_per_port_endpoints(umidi, endpoints);
1784 break;
1785 case QUIRK_MIDI_EMAGIC:
1786diff --git a/sound/usb/usbquirks.h b/sound/usb/usbquirks.h
1787index 9ea726c..076ca4c 100644
1788--- a/sound/usb/usbquirks.h
1789+++ b/sound/usb/usbquirks.h
1790@@ -1756,7 +1756,7 @@ YAMAHA_DEVICE(0x7010, "UB99"),
1791 .data = & (const struct snd_usb_audio_quirk[]) {
1792 {
1793 .ifnum = 0,
1794- .type = QUIRK_MIDI_RAW
1795+ .type = QUIRK_MIDI_FASTLANE
1796 },
1797 {
1798 .ifnum = 1,