]> git.ipfire.org Git - ipfire-2.x.git/blame - src/patches/suse-2.6.27.39/patches.kernel.org/patch-2.6.27.31-32
Fix oinkmaster patch.
[ipfire-2.x.git] / src / patches / suse-2.6.27.39 / patches.kernel.org / patch-2.6.27.31-32
CommitLineData
82094b55
AF
1From: Greg Kroah-Hartman <gregkh@suse.de>
2Subject: Linux 2.6.27.32
3
4Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
5
6diff --git a/Makefile b/Makefile
7index fa0d21e..00dc0ee 100644
8--- a/Makefile
9+++ b/Makefile
10@@ -1,7 +1,7 @@
11 VERSION = 2
12 PATCHLEVEL = 6
13 SUBLEVEL = 27
14-EXTRAVERSION = .31
15+EXTRAVERSION = .32
16 NAME = Trembling Tortoise
17
18 # *DOCUMENTATION*
19diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
20index 3da2508..95c65e3 100644
21--- a/arch/x86/kvm/mmu.c
22+++ b/arch/x86/kvm/mmu.c
23@@ -135,13 +135,6 @@ module_param(dbg, bool, 0644);
24 #define ACC_USER_MASK PT_USER_MASK
25 #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
26
27-struct kvm_pv_mmu_op_buffer {
28- void *ptr;
29- unsigned len;
30- unsigned processed;
31- char buf[512] __aligned(sizeof(long));
32-};
33-
34 struct kvm_rmap_desc {
35 u64 *shadow_ptes[RMAP_EXT];
36 struct kvm_rmap_desc *more;
37@@ -305,7 +298,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
38 if (r)
39 goto out;
40 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
41- rmap_desc_cache, 1);
42+ rmap_desc_cache, 4);
43 if (r)
44 goto out;
45 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
46@@ -1162,7 +1155,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
47 */
48 spte = shadow_base_present_pte | shadow_dirty_mask;
49 if (!speculative)
50- pte_access |= PT_ACCESSED_MASK;
51+ spte |= shadow_accessed_mask;
52 if (!dirty)
53 pte_access &= ~ACC_WRITE_MASK;
54 if (pte_access & ACC_EXEC_MASK)
55@@ -1357,7 +1350,19 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
56 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
57 }
58
59-static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
60+static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
61+{
62+ int ret = 0;
63+
64+ if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
65+ set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
66+ ret = 1;
67+ }
68+
69+ return ret;
70+}
71+
72+static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
73 {
74 int i;
75 gfn_t root_gfn;
76@@ -1372,13 +1377,15 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
77 ASSERT(!VALID_PAGE(root));
78 if (tdp_enabled)
79 metaphysical = 1;
80+ if (mmu_check_root(vcpu, root_gfn))
81+ return 1;
82 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
83 PT64_ROOT_LEVEL, metaphysical,
84 ACC_ALL, NULL);
85 root = __pa(sp->spt);
86 ++sp->root_count;
87 vcpu->arch.mmu.root_hpa = root;
88- return;
89+ return 0;
90 }
91 metaphysical = !is_paging(vcpu);
92 if (tdp_enabled)
93@@ -1395,6 +1402,8 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
94 root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
95 } else if (vcpu->arch.mmu.root_level == 0)
96 root_gfn = 0;
97+ if (mmu_check_root(vcpu, root_gfn))
98+ return 1;
99 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
100 PT32_ROOT_LEVEL, metaphysical,
101 ACC_ALL, NULL);
102@@ -1403,6 +1412,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
103 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
104 }
105 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
106+ return 0;
107 }
108
109 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
110@@ -1646,8 +1656,10 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
111 goto out;
112 spin_lock(&vcpu->kvm->mmu_lock);
113 kvm_mmu_free_some_pages(vcpu);
114- mmu_alloc_roots(vcpu);
115+ r = mmu_alloc_roots(vcpu);
116 spin_unlock(&vcpu->kvm->mmu_lock);
117+ if (r)
118+ goto out;
119 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
120 kvm_mmu_flush_tlb(vcpu);
121 out:
122@@ -1983,14 +1995,6 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp);
123
124 static void free_mmu_pages(struct kvm_vcpu *vcpu)
125 {
126- struct kvm_mmu_page *sp;
127-
128- while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
129- sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
130- struct kvm_mmu_page, link);
131- kvm_mmu_zap_page(vcpu->kvm, sp);
132- cond_resched();
133- }
134 free_page((unsigned long)vcpu->arch.mmu.pae_root);
135 }
136
137@@ -2068,6 +2072,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
138 if (pt[i] & PT_WRITABLE_MASK)
139 pt[i] &= ~PT_WRITABLE_MASK;
140 }
141+ kvm_flush_remote_tlbs(kvm);
142 }
143
144 void kvm_mmu_zap_all(struct kvm *kvm)
145@@ -2237,7 +2242,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
146
147 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
148 {
149- kvm_x86_ops->tlb_flush(vcpu);
150+ kvm_set_cr3(vcpu, vcpu->arch.cr3);
151 return 1;
152 }
153
154@@ -2291,18 +2296,18 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
155 gpa_t addr, unsigned long *ret)
156 {
157 int r;
158- struct kvm_pv_mmu_op_buffer buffer;
159+ struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
160
161- buffer.ptr = buffer.buf;
162- buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
163- buffer.processed = 0;
164+ buffer->ptr = buffer->buf;
165+ buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
166+ buffer->processed = 0;
167
168- r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
169+ r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
170 if (r)
171 goto out;
172
173- while (buffer.len) {
174- r = kvm_pv_mmu_op_one(vcpu, &buffer);
175+ while (buffer->len) {
176+ r = kvm_pv_mmu_op_one(vcpu, buffer);
177 if (r < 0)
178 goto out;
179 if (r == 0)
180@@ -2311,7 +2316,7 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
181
182 r = 1;
183 out:
184- *ret = buffer.processed;
185+ *ret = buffer->processed;
186 return r;
187 }
188
189diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
190index 8233b86..77cae01 100644
191--- a/arch/x86/kvm/svm.c
192+++ b/arch/x86/kvm/svm.c
193@@ -429,7 +429,6 @@ static __init int svm_hardware_setup(void)
194
195 iopm_va = page_address(iopm_pages);
196 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
197- clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
198 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
199
200 if (boot_cpu_has(X86_FEATURE_NX))
201diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
202index 7041cc5..4cee61a 100644
203--- a/arch/x86/kvm/vmx.c
204+++ b/arch/x86/kvm/vmx.c
205@@ -898,11 +898,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
206 int ret = 0;
207
208 switch (msr_index) {
209-#ifdef CONFIG_X86_64
210 case MSR_EFER:
211 vmx_load_host_state(vmx);
212 ret = kvm_set_msr_common(vcpu, msr_index, data);
213 break;
214+#ifdef CONFIG_X86_64
215 case MSR_FS_BASE:
216 vmcs_writel(GUEST_FS_BASE, data);
217 break;
218@@ -1789,7 +1789,7 @@ static void seg_setup(int seg)
219 vmcs_write16(sf->selector, 0);
220 vmcs_writel(sf->base, 0);
221 vmcs_write32(sf->limit, 0xffff);
222- vmcs_write32(sf->ar_bytes, 0x93);
223+ vmcs_write32(sf->ar_bytes, 0xf3);
224 }
225
226 static int alloc_apic_access_page(struct kvm *kvm)
227@@ -2036,6 +2036,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
228
229 fx_init(&vmx->vcpu);
230
231+ seg_setup(VCPU_SREG_CS);
232 /*
233 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
234 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
235@@ -2047,8 +2048,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
236 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
237 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
238 }
239- vmcs_write32(GUEST_CS_LIMIT, 0xffff);
240- vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
241
242 seg_setup(VCPU_SREG_DS);
243 seg_setup(VCPU_SREG_ES);
244@@ -2583,6 +2582,12 @@ static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
245 return 1;
246 }
247
248+static int handle_vmx_insn(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
249+{
250+ kvm_queue_exception(vcpu, UD_VECTOR);
251+ return 1;
252+}
253+
254 static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
255 {
256 skip_emulated_instruction(vcpu);
257@@ -2715,6 +2720,15 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
258 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
259 [EXIT_REASON_HLT] = handle_halt,
260 [EXIT_REASON_VMCALL] = handle_vmcall,
261+ [EXIT_REASON_VMCLEAR] = handle_vmx_insn,
262+ [EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
263+ [EXIT_REASON_VMPTRLD] = handle_vmx_insn,
264+ [EXIT_REASON_VMPTRST] = handle_vmx_insn,
265+ [EXIT_REASON_VMREAD] = handle_vmx_insn,
266+ [EXIT_REASON_VMRESUME] = handle_vmx_insn,
267+ [EXIT_REASON_VMWRITE] = handle_vmx_insn,
268+ [EXIT_REASON_VMOFF] = handle_vmx_insn,
269+ [EXIT_REASON_VMON] = handle_vmx_insn,
270 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
271 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
272 [EXIT_REASON_WBINVD] = handle_wbinvd,
273@@ -3300,7 +3314,8 @@ static int __init vmx_init(void)
274 bypass_guest_pf = 0;
275 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
276 VMX_EPT_WRITABLE_MASK |
277- VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
278+ VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT |
279+ VMX_EPT_IGMT_BIT);
280 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
281 VMX_EPT_EXECUTABLE_MASK);
282 kvm_enable_tdp();
283diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
284index 23e8373..198cdf3 100644
285--- a/arch/x86/kvm/vmx.h
286+++ b/arch/x86/kvm/vmx.h
287@@ -370,6 +370,7 @@ enum vmcs_field {
288 #define VMX_EPT_READABLE_MASK 0x1ull
289 #define VMX_EPT_WRITABLE_MASK 0x2ull
290 #define VMX_EPT_EXECUTABLE_MASK 0x4ull
291+#define VMX_EPT_IGMT_BIT (1ull << 6)
292
293 #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
294
295diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
296index 0d682fc..f7c7142 100644
297--- a/arch/x86/kvm/x86.c
298+++ b/arch/x86/kvm/x86.c
299@@ -318,6 +318,9 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);
300
301 void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
302 {
303+ unsigned long old_cr4 = vcpu->arch.cr4;
304+ unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
305+
306 if (cr4 & CR4_RESERVED_BITS) {
307 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
308 kvm_inject_gp(vcpu, 0);
309@@ -331,7 +334,8 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
310 kvm_inject_gp(vcpu, 0);
311 return;
312 }
313- } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
314+ } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
315+ && ((cr4 ^ old_cr4) & pdptr_bits)
316 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
317 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
318 kvm_inject_gp(vcpu, 0);
319@@ -752,6 +756,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
320 case MSR_IA32_MC0_MISC+8:
321 case MSR_IA32_MC0_MISC+12:
322 case MSR_IA32_MC0_MISC+16:
323+ case MSR_IA32_MC0_MISC+20:
324 case MSR_IA32_UCODE_REV:
325 case MSR_IA32_EBL_CR_POWERON:
326 data = 0;
327@@ -982,9 +987,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
328
329 static int is_efer_nx(void)
330 {
331- u64 efer;
332+ unsigned long long efer = 0;
333
334- rdmsrl(MSR_EFER, efer);
335+ rdmsrl_safe(MSR_EFER, &efer);
336 return efer & EFER_NX;
337 }
338
339@@ -1303,28 +1308,33 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
340 struct kvm_vcpu *vcpu = filp->private_data;
341 void __user *argp = (void __user *)arg;
342 int r;
343+ struct kvm_lapic_state *lapic = NULL;
344
345 switch (ioctl) {
346 case KVM_GET_LAPIC: {
347- struct kvm_lapic_state lapic;
348+ lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
349
350- memset(&lapic, 0, sizeof lapic);
351- r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
352+ r = -ENOMEM;
353+ if (!lapic)
354+ goto out;
355+ r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
356 if (r)
357 goto out;
358 r = -EFAULT;
359- if (copy_to_user(argp, &lapic, sizeof lapic))
360+ if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
361 goto out;
362 r = 0;
363 break;
364 }
365 case KVM_SET_LAPIC: {
366- struct kvm_lapic_state lapic;
367-
368+ lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
369+ r = -ENOMEM;
370+ if (!lapic)
371+ goto out;
372 r = -EFAULT;
373- if (copy_from_user(&lapic, argp, sizeof lapic))
374+ if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
375 goto out;
376- r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
377+ r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
378 if (r)
379 goto out;
380 r = 0;
381@@ -1422,6 +1432,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
382 r = -EINVAL;
383 }
384 out:
385+ if (lapic)
386+ kfree(lapic);
387 return r;
388 }
389
390@@ -1442,10 +1454,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
391 return -EINVAL;
392
393 down_write(&kvm->slots_lock);
394+ spin_lock(&kvm->mmu_lock);
395
396 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
397 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
398
399+ spin_unlock(&kvm->mmu_lock);
400 up_write(&kvm->slots_lock);
401 return 0;
402 }
403@@ -1612,7 +1626,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
404
405 /* If nothing is dirty, don't bother messing with page tables. */
406 if (is_dirty) {
407+ spin_lock(&kvm->mmu_lock);
408 kvm_mmu_slot_remove_write_access(kvm, log->slot);
409+ spin_unlock(&kvm->mmu_lock);
410 kvm_flush_remote_tlbs(kvm);
411 memslot = &kvm->memslots[log->slot];
412 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
413@@ -1630,6 +1646,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
414 struct kvm *kvm = filp->private_data;
415 void __user *argp = (void __user *)arg;
416 int r = -EINVAL;
417+ /*
418+ * This union makes it completely explicit to gcc-3.x
419+ * that these two variables' stack usage should be
420+ * combined, not added together.
421+ */
422+ union {
423+ struct kvm_pit_state ps;
424+ struct kvm_memory_alias alias;
425+ } u;
426
427 switch (ioctl) {
428 case KVM_SET_TSS_ADDR:
429@@ -1661,17 +1686,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
430 case KVM_GET_NR_MMU_PAGES:
431 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
432 break;
433- case KVM_SET_MEMORY_ALIAS: {
434- struct kvm_memory_alias alias;
435-
436+ case KVM_SET_MEMORY_ALIAS:
437 r = -EFAULT;
438- if (copy_from_user(&alias, argp, sizeof alias))
439+ if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
440 goto out;
441- r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
442+ r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
443 if (r)
444 goto out;
445 break;
446- }
447 case KVM_CREATE_IRQCHIP:
448 r = -ENOMEM;
449 kvm->arch.vpic = kvm_create_pic(kvm);
450@@ -1713,65 +1735,77 @@ long kvm_arch_vm_ioctl(struct file *filp,
451 }
452 case KVM_GET_IRQCHIP: {
453 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
454- struct kvm_irqchip chip;
455+ struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
456
457- r = -EFAULT;
458- if (copy_from_user(&chip, argp, sizeof chip))
459+ r = -ENOMEM;
460+ if (!chip)
461 goto out;
462+ r = -EFAULT;
463+ if (copy_from_user(chip, argp, sizeof *chip))
464+ goto get_irqchip_out;
465 r = -ENXIO;
466 if (!irqchip_in_kernel(kvm))
467- goto out;
468- r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
469+ goto get_irqchip_out;
470+ r = kvm_vm_ioctl_get_irqchip(kvm, chip);
471 if (r)
472- goto out;
473+ goto get_irqchip_out;
474 r = -EFAULT;
475- if (copy_to_user(argp, &chip, sizeof chip))
476- goto out;
477+ if (copy_to_user(argp, chip, sizeof *chip))
478+ goto get_irqchip_out;
479 r = 0;
480+ get_irqchip_out:
481+ kfree(chip);
482+ if (r)
483+ goto out;
484 break;
485 }
486 case KVM_SET_IRQCHIP: {
487 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
488- struct kvm_irqchip chip;
489+ struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
490
491- r = -EFAULT;
492- if (copy_from_user(&chip, argp, sizeof chip))
493+ r = -ENOMEM;
494+ if (!chip)
495 goto out;
496+ r = -EFAULT;
497+ if (copy_from_user(chip, argp, sizeof *chip))
498+ goto set_irqchip_out;
499 r = -ENXIO;
500 if (!irqchip_in_kernel(kvm))
501- goto out;
502- r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
503+ goto set_irqchip_out;
504+ r = kvm_vm_ioctl_set_irqchip(kvm, chip);
505 if (r)
506- goto out;
507+ goto set_irqchip_out;
508 r = 0;
509+ set_irqchip_out:
510+ kfree(chip);
511+ if (r)
512+ goto out;
513 break;
514 }
515 case KVM_GET_PIT: {
516- struct kvm_pit_state ps;
517 r = -EFAULT;
518- if (copy_from_user(&ps, argp, sizeof ps))
519+ if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
520 goto out;
521 r = -ENXIO;
522 if (!kvm->arch.vpit)
523 goto out;
524- r = kvm_vm_ioctl_get_pit(kvm, &ps);
525+ r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
526 if (r)
527 goto out;
528 r = -EFAULT;
529- if (copy_to_user(argp, &ps, sizeof ps))
530+ if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
531 goto out;
532 r = 0;
533 break;
534 }
535 case KVM_SET_PIT: {
536- struct kvm_pit_state ps;
537 r = -EFAULT;
538- if (copy_from_user(&ps, argp, sizeof ps))
539+ if (copy_from_user(&u.ps, argp, sizeof u.ps))
540 goto out;
541 r = -ENXIO;
542 if (!kvm->arch.vpit)
543 goto out;
544- r = kvm_vm_ioctl_set_pit(kvm, &ps);
545+ r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
546 if (r)
547 goto out;
548 r = 0;
549@@ -2813,10 +2847,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
550 down_read(&vcpu->kvm->slots_lock);
551 vapic_enter(vcpu);
552
553-preempted:
554- if (vcpu->guest_debug.enabled)
555- kvm_x86_ops->guest_debug_pre(vcpu);
556-
557 again:
558 if (vcpu->requests)
559 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
560@@ -2870,6 +2900,9 @@ again:
561 goto out;
562 }
563
564+ if (vcpu->guest_debug.enabled)
565+ kvm_x86_ops->guest_debug_pre(vcpu);
566+
567 vcpu->guest_mode = 1;
568 /*
569 * Make sure that guest_mode assignment won't happen after
570@@ -2944,7 +2977,7 @@ out:
571 if (r > 0) {
572 kvm_resched(vcpu);
573 down_read(&vcpu->kvm->slots_lock);
574- goto preempted;
575+ goto again;
576 }
577
578 post_kvm_run_save(vcpu, kvm_run);
579@@ -3294,11 +3327,33 @@ static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
580 return 0;
581 }
582
583+int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
584+{
585+ struct kvm_segment segvar = {
586+ .base = selector << 4,
587+ .limit = 0xffff,
588+ .selector = selector,
589+ .type = 3,
590+ .present = 1,
591+ .dpl = 3,
592+ .db = 0,
593+ .s = 1,
594+ .l = 0,
595+ .g = 0,
596+ .avl = 0,
597+ .unusable = 0,
598+ };
599+ kvm_x86_ops->set_segment(vcpu, &segvar, seg);
600+ return 0;
601+}
602+
603 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
604 int type_bits, int seg)
605 {
606 struct kvm_segment kvm_seg;
607
608+ if (!(vcpu->arch.cr0 & X86_CR0_PE))
609+ return kvm_load_realmode_segment(vcpu, selector, seg);
610 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
611 return 1;
612 kvm_seg.type |= type_bits;
613@@ -3981,7 +4036,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
614 userspace_addr = do_mmap(NULL, 0,
615 npages * PAGE_SIZE,
616 PROT_READ | PROT_WRITE,
617- MAP_SHARED | MAP_ANONYMOUS,
618+ MAP_PRIVATE | MAP_ANONYMOUS,
619 0);
620 up_write(&current->mm->mmap_sem);
621
622@@ -4008,12 +4063,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
623 }
624 }
625
626+ spin_lock(&kvm->mmu_lock);
627 if (!kvm->arch.n_requested_mmu_pages) {
628 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
629 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
630 }
631
632 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
633+ spin_unlock(&kvm->mmu_lock);
634 kvm_flush_remote_tlbs(kvm);
635
636 return 0;
637@@ -4022,6 +4079,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
638 void kvm_arch_flush_shadow(struct kvm *kvm)
639 {
640 kvm_mmu_zap_all(kvm);
641+ kvm_reload_remote_mmus(kvm);
642 }
643
644 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
645diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
646index ebda9a8..3340c62 100644
647--- a/drivers/char/mxser.c
648+++ b/drivers/char/mxser.c
649@@ -1099,8 +1099,6 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
650 if (retval)
651 return retval;
652
653- /* unmark here for very high baud rate (ex. 921600 bps) used */
654- tty->low_latency = 1;
655 return 0;
656 }
657
658diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
659index 66a0f93..4dfb5a1 100644
660--- a/drivers/char/nozomi.c
661+++ b/drivers/char/nozomi.c
662@@ -1584,7 +1584,6 @@ static int ntty_open(struct tty_struct *tty, struct file *file)
663
664 /* Enable interrupt downlink for channel */
665 if (port->tty_open_count == 1) {
666- tty->low_latency = 1;
667 tty->driver_data = port;
668 port->tty = tty;
669 DBG1("open: %d", port->token_dl);
670diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
671index a6e730f..682f411 100644
672--- a/drivers/net/ehea/ehea_main.c
673+++ b/drivers/net/ehea/ehea_main.c
674@@ -1530,6 +1530,9 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
675 {
676 int ret, i;
677
678+ if (pr->qp)
679+ netif_napi_del(&pr->napi);
680+
681 ret = ehea_destroy_qp(pr->qp);
682
683 if (!ret) {
684diff --git a/drivers/parport/share.c b/drivers/parport/share.c
685index a8a62bb..a592f29 100644
686--- a/drivers/parport/share.c
687+++ b/drivers/parport/share.c
688@@ -614,7 +614,10 @@ parport_register_device(struct parport *port, const char *name,
689 * pardevice fields. -arca
690 */
691 port->ops->init_state(tmp, tmp->state);
692- parport_device_proc_register(tmp);
693+ if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
694+ port->proc_device = tmp;
695+ parport_device_proc_register(tmp);
696+ }
697 return tmp;
698
699 out_free_all:
700@@ -646,10 +649,14 @@ void parport_unregister_device(struct pardevice *dev)
701 }
702 #endif
703
704- parport_device_proc_unregister(dev);
705-
706 port = dev->port->physport;
707
708+ if (port->proc_device == dev) {
709+ port->proc_device = NULL;
710+ clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
711+ parport_device_proc_unregister(dev);
712+ }
713+
714 if (port->cad == dev) {
715 printk(KERN_DEBUG "%s: %s forgot to release port\n",
716 port->name, dev->name);
717diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
718index ae87d08..25f2008 100644
719--- a/drivers/scsi/sr_ioctl.c
720+++ b/drivers/scsi/sr_ioctl.c
721@@ -309,6 +309,11 @@ int sr_drive_status(struct cdrom_device_info *cdi, int slot)
722 if (0 == sr_test_unit_ready(cd->device, &sshdr))
723 return CDS_DISC_OK;
724
725+ /* SK/ASC/ASCQ of 2/4/1 means "unit is becoming ready" */
726+ if (scsi_sense_valid(&sshdr) && sshdr.sense_key == NOT_READY
727+ && sshdr.asc == 0x04 && sshdr.ascq == 0x01)
728+ return CDS_DRIVE_NOT_READY;
729+
730 if (!cdrom_get_media_event(cdi, &med)) {
731 if (med.media_present)
732 return CDS_DISC_OK;
733diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
734index b4d7235..7b3df8e 100644
735--- a/drivers/usb/serial/cyberjack.c
736+++ b/drivers/usb/serial/cyberjack.c
737@@ -174,13 +174,6 @@ static int cyberjack_open(struct tty_struct *tty,
738 dbg("%s - usb_clear_halt", __func__);
739 usb_clear_halt(port->serial->dev, port->write_urb->pipe);
740
741- /* force low_latency on so that our tty_push actually forces
742- * the data through, otherwise it is scheduled, and with high
743- * data rates (like with OHCI) data can get lost.
744- */
745- if (tty)
746- tty->low_latency = 1;
747-
748 priv = usb_get_serial_port_data(port);
749 spin_lock_irqsave(&priv->lock, flags);
750 priv->rdtodo = 0;
751diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
752index 22837a3..7eb473b 100644
753--- a/drivers/usb/serial/cypress_m8.c
754+++ b/drivers/usb/serial/cypress_m8.c
755@@ -655,10 +655,6 @@ static int cypress_open(struct tty_struct *tty,
756 priv->rx_flags = 0;
757 spin_unlock_irqrestore(&priv->lock, flags);
758
759- /* setting to zero could cause data loss */
760- if (tty)
761- tty->low_latency = 1;
762-
763 /* raise both lines and set termios */
764 spin_lock_irqsave(&priv->lock, flags);
765 priv->line_control = CONTROL_DTR | CONTROL_RTS;
766diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
767index a6ab5b5..28ee28c 100644
768--- a/drivers/usb/serial/empeg.c
769+++ b/drivers/usb/serial/empeg.c
770@@ -478,12 +478,6 @@ static void empeg_set_termios(struct tty_struct *tty,
771 termios->c_cflag
772 |= CS8; /* character size 8 bits */
773
774- /*
775- * Force low_latency on; otherwise the pushes are scheduled;
776- * this is bad as it opens up the possibility of dropping bytes
777- * on the floor. We don't want to drop bytes on the floor. :)
778- */
779- tty->low_latency = 1;
780 tty_encode_baud_rate(tty, 115200, 115200);
781 }
782
783diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
784index d953820..d860071 100644
785--- a/drivers/usb/serial/garmin_gps.c
786+++ b/drivers/usb/serial/garmin_gps.c
787@@ -972,14 +972,6 @@ static int garmin_open(struct tty_struct *tty,
788
789 dbg("%s - port %d", __func__, port->number);
790
791- /*
792- * Force low_latency on so that our tty_push actually forces the data
793- * through, otherwise it is scheduled, and with high data rates (like
794- * with OHCI) data can get lost.
795- */
796- if (tty)
797- tty->low_latency = 1;
798-
799 spin_lock_irqsave(&garmin_data_p->lock, flags);
800 garmin_data_p->mode = initial_mode;
801 garmin_data_p->count = 0;
802diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
803index fe84c88..aa7f08b 100644
804--- a/drivers/usb/serial/generic.c
805+++ b/drivers/usb/serial/generic.c
806@@ -122,12 +122,6 @@ int usb_serial_generic_open(struct tty_struct *tty,
807
808 dbg("%s - port %d", __func__, port->number);
809
810- /* force low_latency on so that our tty_push actually forces the data
811- through, otherwise it is scheduled, and with high data rates (like
812- with OHCI) data can get lost. */
813- if (tty)
814- tty->low_latency = 1;
815-
816 /* clear the throttle flags */
817 spin_lock_irqsave(&port->lock, flags);
818 port->throttled = 0;
819diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
820index bfa508d..183045a 100644
821--- a/drivers/usb/serial/io_edgeport.c
822+++ b/drivers/usb/serial/io_edgeport.c
823@@ -193,8 +193,6 @@ static const struct divisor_table_entry divisor_table[] = {
824 /* local variables */
825 static int debug;
826
827-static int low_latency = 1; /* tty low latency flag, on by default */
828-
829 static atomic_t CmdUrbs; /* Number of outstanding Command Write Urbs */
830
831
832@@ -861,9 +859,6 @@ static int edge_open(struct tty_struct *tty,
833 if (edge_port == NULL)
834 return -ENODEV;
835
836- if (tty)
837- tty->low_latency = low_latency;
838-
839 /* see if we've set up our endpoint info yet (can't set it up
840 in edge_startup as the structures were not set up at that time.) */
841 serial = port->serial;
842@@ -3281,6 +3276,3 @@ MODULE_FIRMWARE("edgeport/down2.fw");
843
844 module_param(debug, bool, S_IRUGO | S_IWUSR);
845 MODULE_PARM_DESC(debug, "Debug enabled or not");
846-
847-module_param(low_latency, bool, S_IRUGO | S_IWUSR);
848-MODULE_PARM_DESC(low_latency, "Low latency enabled or not");
849diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
850index cb4c543..0d744f0 100644
851--- a/drivers/usb/serial/io_ti.c
852+++ b/drivers/usb/serial/io_ti.c
853@@ -76,7 +76,6 @@ struct edgeport_uart_buf_desc {
854 #define EDGE_READ_URB_STOPPING 1
855 #define EDGE_READ_URB_STOPPED 2
856
857-#define EDGE_LOW_LATENCY 1
858 #define EDGE_CLOSING_WAIT 4000 /* in .01 sec */
859
860 #define EDGE_OUT_BUF_SIZE 1024
861@@ -232,7 +231,6 @@ static unsigned short OperationalBuildNumber;
862
863 static int debug;
864
865-static int low_latency = EDGE_LOW_LATENCY;
866 static int closing_wait = EDGE_CLOSING_WAIT;
867 static int ignore_cpu_rev;
868 static int default_uart_mode; /* RS232 */
869@@ -1838,9 +1836,6 @@ static int edge_open(struct tty_struct *tty,
870 if (edge_port == NULL)
871 return -ENODEV;
872
873- if (tty)
874- tty->low_latency = low_latency;
875-
876 port_number = port->number - port->serial->minor;
877 switch (port_number) {
878 case 0:
879@@ -2995,9 +2990,6 @@ MODULE_FIRMWARE("edgeport/down3.bin");
880 module_param(debug, bool, S_IRUGO | S_IWUSR);
881 MODULE_PARM_DESC(debug, "Debug enabled or not");
882
883-module_param(low_latency, bool, S_IRUGO | S_IWUSR);
884-MODULE_PARM_DESC(low_latency, "Low latency enabled or not");
885-
886 module_param(closing_wait, int, S_IRUGO | S_IWUSR);
887 MODULE_PARM_DESC(closing_wait, "Maximum wait for data to drain, in .01 secs");
888
889diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
890index cd9a2e1..ae0b0ff 100644
891--- a/drivers/usb/serial/ipaq.c
892+++ b/drivers/usb/serial/ipaq.c
893@@ -635,13 +635,7 @@ static int ipaq_open(struct tty_struct *tty,
894 priv->free_len += PACKET_SIZE;
895 }
896
897- /*
898- * Force low latency on. This will immediately push data to the line
899- * discipline instead of queueing.
900- */
901-
902 if (tty) {
903- tty->low_latency = 1;
904 /* FIXME: These two are bogus */
905 tty->raw = 1;
906 tty->real_raw = 1;
907diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
908index a842025..b1c0c9a 100644
909--- a/drivers/usb/serial/ipw.c
910+++ b/drivers/usb/serial/ipw.c
911@@ -206,9 +206,6 @@ static int ipw_open(struct tty_struct *tty,
912 if (!buf_flow_init)
913 return -ENOMEM;
914
915- if (tty)
916- tty->low_latency = 1;
917-
918 /* --1: Tell the modem to initialize (we think) From sniffs this is
919 * always the first thing that gets sent to the modem during
920 * opening of the device */
921diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
922index ddff37f..d6da4c9 100644
923--- a/drivers/usb/serial/iuu_phoenix.c
924+++ b/drivers/usb/serial/iuu_phoenix.c
925@@ -1046,7 +1046,6 @@ static int iuu_open(struct tty_struct *tty,
926 tty->termios->c_oflag = 0;
927 tty->termios->c_iflag = 0;
928 priv->termios_initialized = 1;
929- tty->low_latency = 1;
930 priv->poll = 0;
931 }
932 spin_unlock_irqrestore(&priv->lock, flags);
933diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
934index deba28e..5326c59 100644
935--- a/drivers/usb/serial/kobil_sct.c
936+++ b/drivers/usb/serial/kobil_sct.c
937@@ -231,13 +231,7 @@ static int kobil_open(struct tty_struct *tty,
938 /* someone sets the dev to 0 if the close method has been called */
939 port->interrupt_in_urb->dev = port->serial->dev;
940
941-
942- /* force low_latency on so that our tty_push actually forces
943- * the data through, otherwise it is scheduled, and with high
944- * data rates (like with OHCI) data can get lost.
945- */
946 if (tty) {
947- tty->low_latency = 1;
948
949 /* Default to echo off and other sane device settings */
950 tty->termios->c_lflag = 0;
951diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
952index 7c4917d..1c2402d 100644
953--- a/drivers/usb/serial/mos7720.c
954+++ b/drivers/usb/serial/mos7720.c
955@@ -442,13 +442,6 @@ static int mos7720_open(struct tty_struct *tty,
956 data = 0x0c;
957 send_mos_cmd(serial, MOS_WRITE, port_number, 0x01, &data);
958
959- /* force low_latency on so that our tty_push actually forces *
960- * the data through,otherwise it is scheduled, and with *
961- * high data rates (like with OHCI) data can get lost. */
962-
963- if (tty)
964- tty->low_latency = 1;
965-
966 /* see if we've set up our endpoint info yet *
967 * (can't set it up in mos7720_startup as the *
968 * structures were not set up at that time.) */
969diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
970index 09d8206..8befcbb 100644
971--- a/drivers/usb/serial/mos7840.c
972+++ b/drivers/usb/serial/mos7840.c
973@@ -990,12 +990,6 @@ static int mos7840_open(struct tty_struct *tty,
974 status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset,
975 Data);
976
977- /* force low_latency on so that our tty_push actually forces *
978- * the data through,otherwise it is scheduled, and with *
979- * high data rates (like with OHCI) data can get lost. */
980- if (tty)
981- tty->low_latency = 1;
982-
983 /* Check to see if we've set up our endpoint info yet *
984 * (can't set it up in mos7840_startup as the structures *
985 * were not set up at that time.) */
986diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
987index 211cd61..faa30ad 100644
988--- a/drivers/usb/serial/option.c
989+++ b/drivers/usb/serial/option.c
990@@ -914,9 +914,6 @@ static int option_open(struct tty_struct *tty,
991 usb_pipeout(urb->pipe), 0); */
992 }
993
994- if (tty)
995- tty->low_latency = 1;
996-
997 option_send_setup(tty, port);
998
999 return 0;
1000diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
1001index ea1a103..639328b 100644
1002--- a/drivers/usb/serial/sierra.c
1003+++ b/drivers/usb/serial/sierra.c
1004@@ -576,9 +576,6 @@ static int sierra_open(struct tty_struct *tty,
1005 }
1006 }
1007
1008- if (tty)
1009- tty->low_latency = 1;
1010-
1011 sierra_send_setup(tty, port);
1012
1013 /* start up the interrupt endpoint if we have one */
1014diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
1015index bc5e905..55b9d67 100644
1016--- a/drivers/usb/serial/ti_usb_3410_5052.c
1017+++ b/drivers/usb/serial/ti_usb_3410_5052.c
1018@@ -101,11 +101,10 @@
1019
1020 #define TI_TRANSFER_TIMEOUT 2
1021
1022-#define TI_DEFAULT_LOW_LATENCY 0
1023 #define TI_DEFAULT_CLOSING_WAIT 4000 /* in .01 secs */
1024
1025 /* supported setserial flags */
1026-#define TI_SET_SERIAL_FLAGS (ASYNC_LOW_LATENCY)
1027+#define TI_SET_SERIAL_FLAGS 0
1028
1029 /* read urb states */
1030 #define TI_READ_URB_RUNNING 0
1031@@ -212,7 +211,6 @@ static int ti_buf_get(struct circ_buf *cb, char *buf, int count);
1032
1033 /* module parameters */
1034 static int debug;
1035-static int low_latency = TI_DEFAULT_LOW_LATENCY;
1036 static int closing_wait = TI_DEFAULT_CLOSING_WAIT;
1037 static ushort vendor_3410[TI_EXTRA_VID_PID_COUNT];
1038 static unsigned int vendor_3410_count;
1039@@ -333,10 +331,6 @@ MODULE_FIRMWARE("ti_5052.fw");
1040 module_param(debug, bool, S_IRUGO | S_IWUSR);
1041 MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes");
1042
1043-module_param(low_latency, bool, S_IRUGO | S_IWUSR);
1044-MODULE_PARM_DESC(low_latency,
1045- "TTY low_latency flag, 0=off, 1=on, default is off");
1046-
1047 module_param(closing_wait, int, S_IRUGO | S_IWUSR);
1048 MODULE_PARM_DESC(closing_wait,
1049 "Maximum wait for data to drain in close, in .01 secs, default is 4000");
1050@@ -480,7 +474,6 @@ static int ti_startup(struct usb_serial *serial)
1051 spin_lock_init(&tport->tp_lock);
1052 tport->tp_uart_base_addr = (i == 0 ?
1053 TI_UART1_BASE_ADDR : TI_UART2_BASE_ADDR);
1054- tport->tp_flags = low_latency ? ASYNC_LOW_LATENCY : 0;
1055 tport->tp_closing_wait = closing_wait;
1056 init_waitqueue_head(&tport->tp_msr_wait);
1057 init_waitqueue_head(&tport->tp_write_wait);
1058@@ -560,10 +553,6 @@ static int ti_open(struct tty_struct *tty,
1059 if (mutex_lock_interruptible(&tdev->td_open_close_lock))
1060 return -ERESTARTSYS;
1061
1062- if (tty)
1063- tty->low_latency =
1064- (tport->tp_flags & ASYNC_LOW_LATENCY) ? 1 : 0;
1065-
1066 port_number = port->number - port->serial->minor;
1067
1068 memset(&(tport->tp_icount), 0x00, sizeof(tport->tp_icount));
1069@@ -1480,10 +1469,6 @@ static int ti_set_serial_info(struct ti_port *tport,
1070 return -EFAULT;
1071
1072 tport->tp_flags = new_serial.flags & TI_SET_SERIAL_FLAGS;
1073- /* FIXME */
1074- if (port->port.tty)
1075- port->port.tty->low_latency =
1076- (tport->tp_flags & ASYNC_LOW_LATENCY) ? 1 : 0;
1077 tport->tp_closing_wait = new_serial.closing_wait;
1078
1079 return 0;
1080diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
1081index cf8924f..ec33fa5 100644
1082--- a/drivers/usb/serial/visor.c
1083+++ b/drivers/usb/serial/visor.c
1084@@ -296,14 +296,6 @@ static int visor_open(struct tty_struct *tty, struct usb_serial_port *port,
1085 priv->throttled = 0;
1086 spin_unlock_irqrestore(&priv->lock, flags);
1087
1088- /*
1089- * Force low_latency on so that our tty_push actually forces the data
1090- * through, otherwise it is scheduled, and with high data rates (like
1091- * with OHCI) data can get lost.
1092- */
1093- if (tty)
1094- tty->low_latency = 1;
1095-
1096 /* Start reading from the device */
1097 usb_fill_bulk_urb(port->read_urb, serial->dev,
1098 usb_rcvbulkpipe(serial->dev,
1099diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
1100index a53da14..0f04b70 100644
1101--- a/fs/ocfs2/aops.c
1102+++ b/fs/ocfs2/aops.c
1103@@ -908,18 +908,17 @@ struct ocfs2_write_cluster_desc {
1104 */
1105 unsigned c_new;
1106 unsigned c_unwritten;
1107+ unsigned c_needs_zero;
1108 };
1109
1110-static inline int ocfs2_should_zero_cluster(struct ocfs2_write_cluster_desc *d)
1111-{
1112- return d->c_new || d->c_unwritten;
1113-}
1114-
1115 struct ocfs2_write_ctxt {
1116 /* Logical cluster position / len of write */
1117 u32 w_cpos;
1118 u32 w_clen;
1119
1120+ /* First cluster allocated in a nonsparse extend */
1121+ u32 w_first_new_cpos;
1122+
1123 struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
1124
1125 /*
1126@@ -997,6 +996,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
1127 return -ENOMEM;
1128
1129 wc->w_cpos = pos >> osb->s_clustersize_bits;
1130+ wc->w_first_new_cpos = UINT_MAX;
1131 cend = (pos + len - 1) >> osb->s_clustersize_bits;
1132 wc->w_clen = cend - wc->w_cpos + 1;
1133 get_bh(di_bh);
1134@@ -1239,13 +1239,11 @@ static int ocfs2_write_cluster(struct address_space *mapping,
1135 struct ocfs2_write_ctxt *wc, u32 cpos,
1136 loff_t user_pos, unsigned user_len)
1137 {
1138- int ret, i, new, should_zero = 0;
1139+ int ret, i, new;
1140 u64 v_blkno, p_blkno;
1141 struct inode *inode = mapping->host;
1142
1143 new = phys == 0 ? 1 : 0;
1144- if (new || unwritten)
1145- should_zero = 1;
1146
1147 if (new) {
1148 u32 tmp_pos;
1149@@ -1356,7 +1354,9 @@ static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
1150 local_len = osb->s_clustersize - cluster_off;
1151
1152 ret = ocfs2_write_cluster(mapping, desc->c_phys,
1153- desc->c_unwritten, data_ac, meta_ac,
1154+ desc->c_unwritten,
1155+ desc->c_needs_zero,
1156+ data_ac, meta_ac,
1157 wc, desc->c_cpos, pos, local_len);
1158 if (ret) {
1159 mlog_errno(ret);
1160@@ -1406,14 +1406,14 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1161 * newly allocated cluster.
1162 */
1163 desc = &wc->w_desc[0];
1164- if (ocfs2_should_zero_cluster(desc))
1165+ if (desc->c_needs_zero)
1166 ocfs2_figure_cluster_boundaries(osb,
1167 desc->c_cpos,
1168 &wc->w_target_from,
1169 NULL);
1170
1171 desc = &wc->w_desc[wc->w_clen - 1];
1172- if (ocfs2_should_zero_cluster(desc))
1173+ if (desc->c_needs_zero)
1174 ocfs2_figure_cluster_boundaries(osb,
1175 desc->c_cpos,
1176 NULL,
1177@@ -1481,13 +1481,28 @@ static int ocfs2_populate_write_desc(struct inode *inode,
1178 phys++;
1179 }
1180
1181+ /*
1182+ * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
1183+ * file that got extended. w_first_new_cpos tells us
1184+ * where the newly allocated clusters are so we can
1185+ * zero them.
1186+ */
1187+ if (desc->c_cpos >= wc->w_first_new_cpos) {
1188+ BUG_ON(phys == 0);
1189+ desc->c_needs_zero = 1;
1190+ }
1191+
1192 desc->c_phys = phys;
1193 if (phys == 0) {
1194 desc->c_new = 1;
1195+ desc->c_needs_zero = 1;
1196 *clusters_to_alloc = *clusters_to_alloc + 1;
1197 }
1198- if (ext_flags & OCFS2_EXT_UNWRITTEN)
1199+
1200+ if (ext_flags & OCFS2_EXT_UNWRITTEN) {
1201 desc->c_unwritten = 1;
1202+ desc->c_needs_zero = 1;
1203+ }
1204
1205 num_clusters--;
1206 }
1207@@ -1644,10 +1659,13 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
1208 if (newsize <= i_size_read(inode))
1209 return 0;
1210
1211- ret = ocfs2_extend_no_holes(inode, newsize, newsize - len);
1212+ ret = ocfs2_extend_no_holes(inode, newsize, pos);
1213 if (ret)
1214 mlog_errno(ret);
1215
1216+ wc->w_first_new_cpos =
1217+ ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
1218+
1219 return ret;
1220 }
1221
1222@@ -1656,7 +1674,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
1223 struct page **pagep, void **fsdata,
1224 struct buffer_head *di_bh, struct page *mmap_page)
1225 {
1226- int ret, credits = OCFS2_INODE_UPDATE_CREDITS;
1227+ int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
1228 unsigned int clusters_to_alloc, extents_to_split;
1229 struct ocfs2_write_ctxt *wc;
1230 struct inode *inode = mapping->host;
1231@@ -1724,8 +1742,19 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
1232
1233 }
1234
1235- ocfs2_set_target_boundaries(osb, wc, pos, len,
1236- clusters_to_alloc + extents_to_split);
1237+ /*
1238+ * We have to zero sparse allocated clusters, unwritten extent clusters,
1239+ * and non-sparse clusters we just extended. For non-sparse writes,
1240+ * we know zeros will only be needed in the first and/or last cluster.
1241+ */
1242+ if (clusters_to_alloc || extents_to_split ||
1243+ (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
1244+ wc->w_desc[wc->w_clen - 1].c_needs_zero)))
1245+ cluster_of_pages = 1;
1246+ else
1247+ cluster_of_pages = 0;
1248+
1249+ ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
1250
1251 handle = ocfs2_start_trans(osb, credits);
1252 if (IS_ERR(handle)) {
1253@@ -1753,8 +1782,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
1254 * extent.
1255 */
1256 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos,
1257- clusters_to_alloc + extents_to_split,
1258- mmap_page);
1259+ cluster_of_pages, mmap_page);
1260 if (ret) {
1261 mlog_errno(ret);
1262 goto out_commit;
1263diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
1264index c2e34c2..cf7c887 100644
1265--- a/include/asm-x86/kvm_host.h
1266+++ b/include/asm-x86/kvm_host.h
1267@@ -195,6 +195,13 @@ struct kvm_mmu_page {
1268 };
1269 };
1270
1271+struct kvm_pv_mmu_op_buffer {
1272+ void *ptr;
1273+ unsigned len;
1274+ unsigned processed;
1275+ char buf[512] __aligned(sizeof(long));
1276+};
1277+
1278 /*
1279 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
1280 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
1281@@ -237,6 +244,9 @@ struct kvm_vcpu_arch {
1282 bool tpr_access_reporting;
1283
1284 struct kvm_mmu mmu;
1285+ /* only needed in kvm_pv_mmu_op() path, but it's hot so
1286+ * put it here to avoid allocation */
1287+ struct kvm_pv_mmu_op_buffer mmu_op_buffer;
1288
1289 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
1290 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
1291diff --git a/include/linux/parport.h b/include/linux/parport.h
1292index 6a0d7cd..986252e 100644
1293--- a/include/linux/parport.h
1294+++ b/include/linux/parport.h
1295@@ -326,6 +326,10 @@ struct parport {
1296 int spintime;
1297 atomic_t ref_count;
1298
1299+ unsigned long devflags;
1300+#define PARPORT_DEVPROC_REGISTERED 0
1301+ struct pardevice *proc_device; /* Currently register proc device */
1302+
1303 struct list_head full_list;
1304 struct parport *slaves[3];
1305 };
1306diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
1307index 4d80a11..75a87fe 100644
1308--- a/include/linux/sunrpc/xprt.h
1309+++ b/include/linux/sunrpc/xprt.h
1310@@ -260,6 +260,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
1311 #define XPRT_BOUND (4)
1312 #define XPRT_BINDING (5)
1313 #define XPRT_CLOSING (6)
1314+#define XPRT_CONNECTION_CLOSE (8)
1315
1316 static inline void xprt_set_connected(struct rpc_xprt *xprt)
1317 {
1318diff --git a/kernel/fork.c b/kernel/fork.c
1319index fcbd28c..3fdf3d5 100644
1320--- a/kernel/fork.c
1321+++ b/kernel/fork.c
1322@@ -767,11 +767,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1323 struct signal_struct *sig;
1324 int ret;
1325
1326- if (clone_flags & CLONE_THREAD) {
1327- atomic_inc(&current->signal->count);
1328- atomic_inc(&current->signal->live);
1329+ if (clone_flags & CLONE_THREAD)
1330 return 0;
1331- }
1332+
1333 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
1334 tsk->signal = sig;
1335 if (!sig)
1336@@ -844,16 +842,6 @@ void __cleanup_signal(struct signal_struct *sig)
1337 kmem_cache_free(signal_cachep, sig);
1338 }
1339
1340-static void cleanup_signal(struct task_struct *tsk)
1341-{
1342- struct signal_struct *sig = tsk->signal;
1343-
1344- atomic_dec(&sig->live);
1345-
1346- if (atomic_dec_and_test(&sig->count))
1347- __cleanup_signal(sig);
1348-}
1349-
1350 static void copy_flags(unsigned long clone_flags, struct task_struct *p)
1351 {
1352 unsigned long new_flags = p->flags;
1353@@ -1201,6 +1189,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1354 }
1355
1356 if (clone_flags & CLONE_THREAD) {
1357+ atomic_inc(&current->signal->count);
1358+ atomic_inc(&current->signal->live);
1359 p->group_leader = current->group_leader;
1360 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1361
1362@@ -1261,7 +1251,8 @@ bad_fork_cleanup_mm:
1363 if (p->mm)
1364 mmput(p->mm);
1365 bad_fork_cleanup_signal:
1366- cleanup_signal(p);
1367+ if (!(clone_flags & CLONE_THREAD))
1368+ __cleanup_signal(p->signal);
1369 bad_fork_cleanup_sighand:
1370 __cleanup_sighand(p->sighand);
1371 bad_fork_cleanup_fs:
1372diff --git a/kernel/kthread.c b/kernel/kthread.c
1373index 96cff2f..9548d52 100644
1374--- a/kernel/kthread.c
1375+++ b/kernel/kthread.c
1376@@ -213,12 +213,12 @@ int kthread_stop(struct task_struct *k)
1377 /* Now set kthread_should_stop() to true, and wake it up. */
1378 kthread_stop_info.k = k;
1379 wake_up_process(k);
1380- put_task_struct(k);
1381
1382 /* Once it dies, reset stop ptr, gather result and we're done. */
1383 wait_for_completion(&kthread_stop_info.done);
1384 kthread_stop_info.k = NULL;
1385 ret = kthread_stop_info.err;
1386+ put_task_struct(k);
1387 mutex_unlock(&kthread_stop_lock);
1388
1389 return ret;
1390diff --git a/kernel/signal.c b/kernel/signal.c
1391index 7d0a222..de2b649 100644
1392--- a/kernel/signal.c
1393+++ b/kernel/signal.c
1394@@ -2353,11 +2353,9 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
1395 stack_t oss;
1396 int error;
1397
1398- if (uoss) {
1399- oss.ss_sp = (void __user *) current->sas_ss_sp;
1400- oss.ss_size = current->sas_ss_size;
1401- oss.ss_flags = sas_ss_flags(sp);
1402- }
1403+ oss.ss_sp = (void __user *) current->sas_ss_sp;
1404+ oss.ss_size = current->sas_ss_size;
1405+ oss.ss_flags = sas_ss_flags(sp);
1406
1407 if (uss) {
1408 void __user *ss_sp;
1409@@ -2400,13 +2398,16 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
1410 current->sas_ss_size = ss_size;
1411 }
1412
1413+ error = 0;
1414 if (uoss) {
1415 error = -EFAULT;
1416- if (copy_to_user(uoss, &oss, sizeof(oss)))
1417+ if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
1418 goto out;
1419+ error = __put_user(oss.ss_sp, &uoss->ss_sp) |
1420+ __put_user(oss.ss_size, &uoss->ss_size) |
1421+ __put_user(oss.ss_flags, &uoss->ss_flags);
1422 }
1423
1424- error = 0;
1425 out:
1426 return error;
1427 }
1428diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
1429index 0c85042..8067dc7 100644
1430--- a/net/appletalk/ddp.c
1431+++ b/net/appletalk/ddp.c
1432@@ -1245,6 +1245,7 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
1433 return -ENOBUFS;
1434
1435 *uaddr_len = sizeof(struct sockaddr_at);
1436+ memset(&sat.sat_zero, 0, sizeof(sat.sat_zero));
1437
1438 if (peer) {
1439 if (sk->sk_state != TCP_ESTABLISHED)
1440diff --git a/net/can/raw.c b/net/can/raw.c
1441index 6e0663f..08f31d4 100644
1442--- a/net/can/raw.c
1443+++ b/net/can/raw.c
1444@@ -396,6 +396,7 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
1445 if (peer)
1446 return -EOPNOTSUPP;
1447
1448+ memset(addr, 0, sizeof(*addr));
1449 addr->can_family = AF_CAN;
1450 addr->can_ifindex = ro->ifindex;
1451
1452diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
1453index 8789d2b..9aae86f 100644
1454--- a/net/econet/af_econet.c
1455+++ b/net/econet/af_econet.c
1456@@ -520,6 +520,7 @@ static int econet_getname(struct socket *sock, struct sockaddr *uaddr,
1457 if (peer)
1458 return -EOPNOTSUPP;
1459
1460+ memset(sec, 0, sizeof(*sec));
1461 mutex_lock(&econet_mutex);
1462
1463 sk = sock->sk;
1464diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
1465index 3eb5bcc..b28409c 100644
1466--- a/net/irda/af_irda.c
1467+++ b/net/irda/af_irda.c
1468@@ -714,6 +714,7 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
1469 struct sock *sk = sock->sk;
1470 struct irda_sock *self = irda_sk(sk);
1471
1472+ memset(&saddr, 0, sizeof(saddr));
1473 if (peer) {
1474 if (sk->sk_state != TCP_ESTABLISHED)
1475 return -ENOTCONN;
1476diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
1477index 5bcc452..90a55b1 100644
1478--- a/net/llc/af_llc.c
1479+++ b/net/llc/af_llc.c
1480@@ -915,6 +915,7 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
1481 struct llc_sock *llc = llc_sk(sk);
1482 int rc = 0;
1483
1484+ memset(&sllc, 0, sizeof(sllc));
1485 lock_sock(sk);
1486 if (sock_flag(sk, SOCK_ZAPPED))
1487 goto out;
1488diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
1489index db9e263..ad72bde 100644
1490--- a/net/netrom/af_netrom.c
1491+++ b/net/netrom/af_netrom.c
1492@@ -848,6 +848,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
1493 sax->fsa_ax25.sax25_family = AF_NETROM;
1494 sax->fsa_ax25.sax25_ndigis = 1;
1495 sax->fsa_ax25.sax25_call = nr->user_addr;
1496+ memset(sax->fsa_digipeater, 0, sizeof(sax->fsa_digipeater));
1497 sax->fsa_digipeater[0] = nr->dest_addr;
1498 *uaddr_len = sizeof(struct full_sockaddr_ax25);
1499 } else {
1500diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
1501index c062361..f132243 100644
1502--- a/net/rose/af_rose.c
1503+++ b/net/rose/af_rose.c
1504@@ -957,6 +957,7 @@ static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
1505 struct rose_sock *rose = rose_sk(sk);
1506 int n;
1507
1508+ memset(srose, 0, sizeof(*srose));
1509 if (peer != 0) {
1510 if (sk->sk_state != TCP_ESTABLISHED)
1511 return -ENOTCONN;
1512diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
1513index 66cfe88..860b1d4 100644
1514--- a/net/sunrpc/clnt.c
1515+++ b/net/sunrpc/clnt.c
1516@@ -860,6 +860,7 @@ static inline void
1517 rpc_task_force_reencode(struct rpc_task *task)
1518 {
1519 task->tk_rqstp->rq_snd_buf.len = 0;
1520+ task->tk_rqstp->rq_bytes_sent = 0;
1521 }
1522
1523 static inline void
1524diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
1525index 99a52aa..b66be67 100644
1526--- a/net/sunrpc/xprt.c
1527+++ b/net/sunrpc/xprt.c
1528@@ -645,10 +645,8 @@ xprt_init_autodisconnect(unsigned long data)
1529 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
1530 goto out_abort;
1531 spin_unlock(&xprt->transport_lock);
1532- if (xprt_connecting(xprt))
1533- xprt_release_write(xprt, NULL);
1534- else
1535- queue_work(rpciod_workqueue, &xprt->task_cleanup);
1536+ set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1537+ queue_work(rpciod_workqueue, &xprt->task_cleanup);
1538 return;
1539 out_abort:
1540 spin_unlock(&xprt->transport_lock);
1541diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
1542index 8f9295d..a283304 100644
1543--- a/net/sunrpc/xprtsock.c
1544+++ b/net/sunrpc/xprtsock.c
1545@@ -748,6 +748,9 @@ out_release:
1546 *
1547 * This is used when all requests are complete; ie, no DRC state remains
1548 * on the server we want to save.
1549+ *
1550+ * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
1551+ * xs_reset_transport() zeroing the socket from underneath a writer.
1552 */
1553 static void xs_close(struct rpc_xprt *xprt)
1554 {
1555@@ -781,6 +784,14 @@ clear_close_wait:
1556 xprt_disconnect_done(xprt);
1557 }
1558
1559+static void xs_tcp_close(struct rpc_xprt *xprt)
1560+{
1561+ if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state))
1562+ xs_close(xprt);
1563+ else
1564+ xs_tcp_shutdown(xprt);
1565+}
1566+
1567 /**
1568 * xs_destroy - prepare to shutdown a transport
1569 * @xprt: doomed transport
1570@@ -1676,11 +1687,21 @@ static void xs_tcp_connect_worker4(struct work_struct *work)
1571 goto out_clear;
1572 case -ECONNREFUSED:
1573 case -ECONNRESET:
1574+ case -ENETUNREACH:
1575 /* retry with existing socket, after a delay */
1576- break;
1577+ goto out_clear;
1578 default:
1579 /* get rid of existing socket, and retry */
1580 xs_tcp_shutdown(xprt);
1581+ printk("%s: connect returned unhandled error %d\n",
1582+ __func__, status);
1583+ case -EADDRNOTAVAIL:
1584+ /* We're probably in TIME_WAIT. Get rid of existing socket,
1585+ * and retry
1586+ */
1587+ set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1588+ xprt_force_disconnect(xprt);
1589+ status = -EAGAIN;
1590 }
1591 }
1592 out:
1593@@ -1735,11 +1756,21 @@ static void xs_tcp_connect_worker6(struct work_struct *work)
1594 goto out_clear;
1595 case -ECONNREFUSED:
1596 case -ECONNRESET:
1597+ case -ENETUNREACH:
1598 /* retry with existing socket, after a delay */
1599- break;
1600+ goto out_clear;
1601 default:
1602 /* get rid of existing socket, and retry */
1603 xs_tcp_shutdown(xprt);
1604+ printk("%s: connect returned unhandled error %d\n",
1605+ __func__, status);
1606+ case -EADDRNOTAVAIL:
1607+ /* We're probably in TIME_WAIT. Get rid of existing socket,
1608+ * and retry
1609+ */
1610+ set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1611+ xprt_force_disconnect(xprt);
1612+ status = -EAGAIN;
1613 }
1614 }
1615 out:
1616@@ -1871,7 +1902,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
1617 .buf_free = rpc_free,
1618 .send_request = xs_tcp_send_request,
1619 .set_retrans_timeout = xprt_set_retrans_timeout_def,
1620- .close = xs_tcp_shutdown,
1621+ .close = xs_tcp_close,
1622 .destroy = xs_destroy,
1623 .print_stats = xs_tcp_print_stats,
1624 };
1625diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
1626index 1533f03..4cfbd79 100644
1627--- a/sound/core/pcm_lib.c
1628+++ b/sound/core/pcm_lib.c
1629@@ -779,47 +779,24 @@ static int snd_interval_ratden(struct snd_interval *i,
1630 int snd_interval_list(struct snd_interval *i, unsigned int count, unsigned int *list, unsigned int mask)
1631 {
1632 unsigned int k;
1633- int changed = 0;
1634+ struct snd_interval list_range;
1635
1636 if (!count) {
1637 i->empty = 1;
1638 return -EINVAL;
1639 }
1640+ snd_interval_any(&list_range);
1641+ list_range.min = UINT_MAX;
1642+ list_range.max = 0;
1643 for (k = 0; k < count; k++) {
1644 if (mask && !(mask & (1 << k)))
1645 continue;
1646- if (i->min == list[k] && !i->openmin)
1647- goto _l1;
1648- if (i->min < list[k]) {
1649- i->min = list[k];
1650- i->openmin = 0;
1651- changed = 1;
1652- goto _l1;
1653- }
1654- }
1655- i->empty = 1;
1656- return -EINVAL;
1657- _l1:
1658- for (k = count; k-- > 0;) {
1659- if (mask && !(mask & (1 << k)))
1660+ if (!snd_interval_test(i, list[k]))
1661 continue;
1662- if (i->max == list[k] && !i->openmax)
1663- goto _l2;
1664- if (i->max > list[k]) {
1665- i->max = list[k];
1666- i->openmax = 0;
1667- changed = 1;
1668- goto _l2;
1669- }
1670+ list_range.min = min(list_range.min, list[k]);
1671+ list_range.max = max(list_range.max, list[k]);
1672 }
1673- i->empty = 1;
1674- return -EINVAL;
1675- _l2:
1676- if (snd_interval_checkempty(i)) {
1677- i->empty = 1;
1678- return -EINVAL;
1679- }
1680- return changed;
1681+ return snd_interval_refine(i, &list_range);
1682 }
1683
1684 EXPORT_SYMBOL(snd_interval_list);
1685diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1686index 07f8dcc..2581b8c 100644
1687--- a/sound/pci/hda/patch_realtek.c
1688+++ b/sound/pci/hda/patch_realtek.c
1689@@ -5580,9 +5580,9 @@ static struct hda_verb alc885_mbp_ch2_init[] = {
1690 };
1691
1692 /*
1693- * 6ch mode
1694+ * 4ch mode
1695 */
1696-static struct hda_verb alc885_mbp_ch6_init[] = {
1697+static struct hda_verb alc885_mbp_ch4_init[] = {
1698 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
1699 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1700 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
1701@@ -5591,9 +5591,9 @@ static struct hda_verb alc885_mbp_ch6_init[] = {
1702 { } /* end */
1703 };
1704
1705-static struct hda_channel_mode alc885_mbp_6ch_modes[2] = {
1706+static struct hda_channel_mode alc885_mbp_4ch_modes[2] = {
1707 { 2, alc885_mbp_ch2_init },
1708- { 6, alc885_mbp_ch6_init },
1709+ { 4, alc885_mbp_ch4_init },
1710 };
1711
1712
1713@@ -5628,10 +5628,11 @@ static struct snd_kcontrol_new alc882_base_mixer[] = {
1714 };
1715
1716 static struct snd_kcontrol_new alc885_mbp3_mixer[] = {
1717- HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x00, HDA_OUTPUT),
1718- HDA_BIND_MUTE ("Front Playback Switch", 0x0c, 0x02, HDA_INPUT),
1719- HDA_CODEC_MUTE ("Speaker Playback Switch", 0x14, 0x00, HDA_OUTPUT),
1720- HDA_CODEC_VOLUME("Line-Out Playback Volume", 0x0d, 0x00, HDA_OUTPUT),
1721+ HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0c, 0x00, HDA_OUTPUT),
1722+ HDA_BIND_MUTE ("Speaker Playback Switch", 0x0c, 0x02, HDA_INPUT),
1723+ HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0e, 0x00, HDA_OUTPUT),
1724+ HDA_BIND_MUTE ("Headphone Playback Switch", 0x0e, 0x02, HDA_INPUT),
1725+ HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x00, HDA_OUTPUT),
1726 HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
1727 HDA_CODEC_MUTE ("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
1728 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT),
1729@@ -5879,14 +5880,18 @@ static struct hda_verb alc885_mbp3_init_verbs[] = {
1730 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
1731 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
1732 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
1733+ /* HP mixer */
1734+ {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
1735+ {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
1736+ {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
1737 /* Front Pin: output 0 (0x0c) */
1738 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
1739 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1740 {0x14, AC_VERB_SET_CONNECT_SEL, 0x00},
1741- /* HP Pin: output 0 (0x0d) */
1742+ /* HP Pin: output 0 (0x0e) */
1743 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc4},
1744- {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
1745- {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
1746+ {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1747+ {0x15, AC_VERB_SET_CONNECT_SEL, 0x02},
1748 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
1749 /* Mic (rear) pin: input vref at 80% */
1750 {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
1751@@ -6326,10 +6331,11 @@ static struct alc_config_preset alc882_presets[] = {
1752 .mixers = { alc885_mbp3_mixer, alc882_chmode_mixer },
1753 .init_verbs = { alc885_mbp3_init_verbs,
1754 alc880_gpio1_init_verbs },
1755- .num_dacs = ARRAY_SIZE(alc882_dac_nids),
1756+ .num_dacs = 2,
1757 .dac_nids = alc882_dac_nids,
1758- .channel_mode = alc885_mbp_6ch_modes,
1759- .num_channel_mode = ARRAY_SIZE(alc885_mbp_6ch_modes),
1760+ .hp_nid = 0x04,
1761+ .channel_mode = alc885_mbp_4ch_modes,
1762+ .num_channel_mode = ARRAY_SIZE(alc885_mbp_4ch_modes),
1763 .input_mux = &alc882_capture_source,
1764 .dig_out_nid = ALC882_DIGOUT_NID,
1765 .dig_in_nid = ALC882_DIGIN_NID,
1766@@ -11634,6 +11640,8 @@ static int patch_alc269(struct hda_codec *codec)
1767 spec->num_adc_nids = ARRAY_SIZE(alc269_adc_nids);
1768 spec->capsrc_nids = alc269_capsrc_nids;
1769
1770+ spec->vmaster_nid = 0x02;
1771+
1772 codec->patch_ops = alc_patch_ops;
1773 if (board_config == ALC269_AUTO)
1774 spec->init_hook = alc269_auto_init;
1775diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
1776index 7dd9b0b..db062b5 100644
1777--- a/virt/kvm/kvm_main.c
1778+++ b/virt/kvm/kvm_main.c
1779@@ -406,6 +406,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
1780 #endif
1781 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1782 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1783+#else
1784+ kvm_arch_flush_shadow(kvm);
1785 #endif
1786 kvm_arch_destroy_vm(kvm);
1787 mmdrop(mm);
1788@@ -548,6 +550,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
1789 if (!new.dirty_bitmap)
1790 goto out_free;
1791 memset(new.dirty_bitmap, 0, dirty_bytes);
1792+ if (old.npages)
1793+ kvm_arch_flush_shadow(kvm);
1794 }
1795 #endif /* not defined CONFIG_S390 */
1796
1797@@ -726,7 +730,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1798 return page_to_pfn(bad_page);
1799 }
1800
1801- npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
1802+ npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
1803 NULL);
1804
1805 if (unlikely(npages != 1)) {
1806@@ -1074,12 +1078,11 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
1807
1808 r = kvm_arch_vcpu_setup(vcpu);
1809 if (r)
1810- goto vcpu_destroy;
1811+ return r;
1812
1813 mutex_lock(&kvm->lock);
1814 if (kvm->vcpus[n]) {
1815 r = -EEXIST;
1816- mutex_unlock(&kvm->lock);
1817 goto vcpu_destroy;
1818 }
1819 kvm->vcpus[n] = vcpu;
1820@@ -1095,8 +1098,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
1821 unlink:
1822 mutex_lock(&kvm->lock);
1823 kvm->vcpus[n] = NULL;
1824- mutex_unlock(&kvm->lock);
1825 vcpu_destroy:
1826+ mutex_unlock(&kvm->lock);
1827 kvm_arch_vcpu_destroy(vcpu);
1828 return r;
1829 }
1830@@ -1118,6 +1121,8 @@ static long kvm_vcpu_ioctl(struct file *filp,
1831 struct kvm_vcpu *vcpu = filp->private_data;
1832 void __user *argp = (void __user *)arg;
1833 int r;
1834+ struct kvm_fpu *fpu = NULL;
1835+ struct kvm_sregs *kvm_sregs = NULL;
1836
1837 if (vcpu->kvm->mm != current->mm)
1838 return -EIO;
1839@@ -1165,25 +1170,28 @@ out_free2:
1840 break;
1841 }
1842 case KVM_GET_SREGS: {
1843- struct kvm_sregs kvm_sregs;
1844-
1845- memset(&kvm_sregs, 0, sizeof kvm_sregs);
1846- r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
1847+ kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1848+ r = -ENOMEM;
1849+ if (!kvm_sregs)
1850+ goto out;
1851+ r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
1852 if (r)
1853 goto out;
1854 r = -EFAULT;
1855- if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
1856+ if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
1857 goto out;
1858 r = 0;
1859 break;
1860 }
1861 case KVM_SET_SREGS: {
1862- struct kvm_sregs kvm_sregs;
1863-
1864+ kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1865+ r = -ENOMEM;
1866+ if (!kvm_sregs)
1867+ goto out;
1868 r = -EFAULT;
1869- if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
1870+ if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
1871 goto out;
1872- r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
1873+ r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
1874 if (r)
1875 goto out;
1876 r = 0;
1877@@ -1264,25 +1272,28 @@ out_free2:
1878 break;
1879 }
1880 case KVM_GET_FPU: {
1881- struct kvm_fpu fpu;
1882-
1883- memset(&fpu, 0, sizeof fpu);
1884- r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
1885+ fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1886+ r = -ENOMEM;
1887+ if (!fpu)
1888+ goto out;
1889+ r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
1890 if (r)
1891 goto out;
1892 r = -EFAULT;
1893- if (copy_to_user(argp, &fpu, sizeof fpu))
1894+ if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
1895 goto out;
1896 r = 0;
1897 break;
1898 }
1899 case KVM_SET_FPU: {
1900- struct kvm_fpu fpu;
1901-
1902+ fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1903+ r = -ENOMEM;
1904+ if (!fpu)
1905+ goto out;
1906 r = -EFAULT;
1907- if (copy_from_user(&fpu, argp, sizeof fpu))
1908+ if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
1909 goto out;
1910- r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
1911+ r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
1912 if (r)
1913 goto out;
1914 r = 0;
1915@@ -1292,6 +1303,8 @@ out_free2:
1916 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
1917 }
1918 out:
1919+ kfree(fpu);
1920+ kfree(kvm_sregs);
1921 return r;
1922 }
1923