1 From: Greg Kroah-Hartman <gregkh@suse.de>
2 Subject: Linux 2.6.27.32
4 Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
6 diff --git a/Makefile b/Makefile
7 index fa0d21e..00dc0ee 100644
16 NAME = Trembling Tortoise
19 diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
20 index 3da2508..95c65e3 100644
21 --- a/arch/x86/kvm/mmu.c
22 +++ b/arch/x86/kvm/mmu.c
23 @@ -135,13 +135,6 @@ module_param(dbg, bool, 0644);
24 #define ACC_USER_MASK PT_USER_MASK
25 #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK)
27 -struct kvm_pv_mmu_op_buffer {
31 - char buf[512] __aligned(sizeof(long));
34 struct kvm_rmap_desc {
35 u64 *shadow_ptes[RMAP_EXT];
36 struct kvm_rmap_desc *more;
37 @@ -305,7 +298,7 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu)
40 r = mmu_topup_memory_cache(&vcpu->arch.mmu_rmap_desc_cache,
41 - rmap_desc_cache, 1);
42 + rmap_desc_cache, 4);
45 r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8);
46 @@ -1162,7 +1155,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *shadow_pte,
48 spte = shadow_base_present_pte | shadow_dirty_mask;
50 - pte_access |= PT_ACCESSED_MASK;
51 + spte |= shadow_accessed_mask;
53 pte_access &= ~ACC_WRITE_MASK;
54 if (pte_access & ACC_EXEC_MASK)
55 @@ -1357,7 +1350,19 @@ static void mmu_free_roots(struct kvm_vcpu *vcpu)
56 vcpu->arch.mmu.root_hpa = INVALID_PAGE;
59 -static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
60 +static int mmu_check_root(struct kvm_vcpu *vcpu, gfn_t root_gfn)
64 + if (!kvm_is_visible_gfn(vcpu->kvm, root_gfn)) {
65 + set_bit(KVM_REQ_TRIPLE_FAULT, &vcpu->requests);
72 +static int mmu_alloc_roots(struct kvm_vcpu *vcpu)
76 @@ -1372,13 +1377,15 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
77 ASSERT(!VALID_PAGE(root));
80 + if (mmu_check_root(vcpu, root_gfn))
82 sp = kvm_mmu_get_page(vcpu, root_gfn, 0,
83 PT64_ROOT_LEVEL, metaphysical,
87 vcpu->arch.mmu.root_hpa = root;
91 metaphysical = !is_paging(vcpu);
93 @@ -1395,6 +1402,8 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
94 root_gfn = vcpu->arch.pdptrs[i] >> PAGE_SHIFT;
95 } else if (vcpu->arch.mmu.root_level == 0)
97 + if (mmu_check_root(vcpu, root_gfn))
99 sp = kvm_mmu_get_page(vcpu, root_gfn, i << 30,
100 PT32_ROOT_LEVEL, metaphysical,
102 @@ -1403,6 +1412,7 @@ static void mmu_alloc_roots(struct kvm_vcpu *vcpu)
103 vcpu->arch.mmu.pae_root[i] = root | PT_PRESENT_MASK;
105 vcpu->arch.mmu.root_hpa = __pa(vcpu->arch.mmu.pae_root);
109 static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr)
110 @@ -1646,8 +1656,10 @@ int kvm_mmu_load(struct kvm_vcpu *vcpu)
112 spin_lock(&vcpu->kvm->mmu_lock);
113 kvm_mmu_free_some_pages(vcpu);
114 - mmu_alloc_roots(vcpu);
115 + r = mmu_alloc_roots(vcpu);
116 spin_unlock(&vcpu->kvm->mmu_lock);
119 kvm_x86_ops->set_cr3(vcpu, vcpu->arch.mmu.root_hpa);
120 kvm_mmu_flush_tlb(vcpu);
122 @@ -1983,14 +1995,6 @@ EXPORT_SYMBOL_GPL(kvm_disable_tdp);
124 static void free_mmu_pages(struct kvm_vcpu *vcpu)
126 - struct kvm_mmu_page *sp;
128 - while (!list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
129 - sp = container_of(vcpu->kvm->arch.active_mmu_pages.next,
130 - struct kvm_mmu_page, link);
131 - kvm_mmu_zap_page(vcpu->kvm, sp);
134 free_page((unsigned long)vcpu->arch.mmu.pae_root);
137 @@ -2068,6 +2072,7 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot)
138 if (pt[i] & PT_WRITABLE_MASK)
139 pt[i] &= ~PT_WRITABLE_MASK;
141 + kvm_flush_remote_tlbs(kvm);
144 void kvm_mmu_zap_all(struct kvm *kvm)
145 @@ -2237,7 +2242,7 @@ static int kvm_pv_mmu_write(struct kvm_vcpu *vcpu,
147 static int kvm_pv_mmu_flush_tlb(struct kvm_vcpu *vcpu)
149 - kvm_x86_ops->tlb_flush(vcpu);
150 + kvm_set_cr3(vcpu, vcpu->arch.cr3);
154 @@ -2291,18 +2296,18 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
155 gpa_t addr, unsigned long *ret)
158 - struct kvm_pv_mmu_op_buffer buffer;
159 + struct kvm_pv_mmu_op_buffer *buffer = &vcpu->arch.mmu_op_buffer;
161 - buffer.ptr = buffer.buf;
162 - buffer.len = min_t(unsigned long, bytes, sizeof buffer.buf);
163 - buffer.processed = 0;
164 + buffer->ptr = buffer->buf;
165 + buffer->len = min_t(unsigned long, bytes, sizeof buffer->buf);
166 + buffer->processed = 0;
168 - r = kvm_read_guest(vcpu->kvm, addr, buffer.buf, buffer.len);
169 + r = kvm_read_guest(vcpu->kvm, addr, buffer->buf, buffer->len);
173 - while (buffer.len) {
174 - r = kvm_pv_mmu_op_one(vcpu, &buffer);
175 + while (buffer->len) {
176 + r = kvm_pv_mmu_op_one(vcpu, buffer);
180 @@ -2311,7 +2316,7 @@ int kvm_pv_mmu_op(struct kvm_vcpu *vcpu, unsigned long bytes,
184 - *ret = buffer.processed;
185 + *ret = buffer->processed;
189 diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
190 index 8233b86..77cae01 100644
191 --- a/arch/x86/kvm/svm.c
192 +++ b/arch/x86/kvm/svm.c
193 @@ -429,7 +429,6 @@ static __init int svm_hardware_setup(void)
195 iopm_va = page_address(iopm_pages);
196 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
197 - clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
198 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
200 if (boot_cpu_has(X86_FEATURE_NX))
201 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
202 index 7041cc5..4cee61a 100644
203 --- a/arch/x86/kvm/vmx.c
204 +++ b/arch/x86/kvm/vmx.c
205 @@ -898,11 +898,11 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
209 -#ifdef CONFIG_X86_64
211 vmx_load_host_state(vmx);
212 ret = kvm_set_msr_common(vcpu, msr_index, data);
214 +#ifdef CONFIG_X86_64
216 vmcs_writel(GUEST_FS_BASE, data);
218 @@ -1789,7 +1789,7 @@ static void seg_setup(int seg)
219 vmcs_write16(sf->selector, 0);
220 vmcs_writel(sf->base, 0);
221 vmcs_write32(sf->limit, 0xffff);
222 - vmcs_write32(sf->ar_bytes, 0x93);
223 + vmcs_write32(sf->ar_bytes, 0xf3);
226 static int alloc_apic_access_page(struct kvm *kvm)
227 @@ -2036,6 +2036,7 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
231 + seg_setup(VCPU_SREG_CS);
233 * GUEST_CS_BASE should really be 0xffff0000, but VT vm86 mode
234 * insists on having GUEST_CS_BASE == GUEST_CS_SELECTOR << 4. Sigh.
235 @@ -2047,8 +2048,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
236 vmcs_write16(GUEST_CS_SELECTOR, vmx->vcpu.arch.sipi_vector << 8);
237 vmcs_writel(GUEST_CS_BASE, vmx->vcpu.arch.sipi_vector << 12);
239 - vmcs_write32(GUEST_CS_LIMIT, 0xffff);
240 - vmcs_write32(GUEST_CS_AR_BYTES, 0x9b);
242 seg_setup(VCPU_SREG_DS);
243 seg_setup(VCPU_SREG_ES);
244 @@ -2583,6 +2582,12 @@ static int handle_vmcall(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
248 +static int handle_vmx_insn(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
250 + kvm_queue_exception(vcpu, UD_VECTOR);
254 static int handle_wbinvd(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
256 skip_emulated_instruction(vcpu);
257 @@ -2715,6 +2720,15 @@ static int (*kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu,
258 [EXIT_REASON_PENDING_INTERRUPT] = handle_interrupt_window,
259 [EXIT_REASON_HLT] = handle_halt,
260 [EXIT_REASON_VMCALL] = handle_vmcall,
261 + [EXIT_REASON_VMCLEAR] = handle_vmx_insn,
262 + [EXIT_REASON_VMLAUNCH] = handle_vmx_insn,
263 + [EXIT_REASON_VMPTRLD] = handle_vmx_insn,
264 + [EXIT_REASON_VMPTRST] = handle_vmx_insn,
265 + [EXIT_REASON_VMREAD] = handle_vmx_insn,
266 + [EXIT_REASON_VMRESUME] = handle_vmx_insn,
267 + [EXIT_REASON_VMWRITE] = handle_vmx_insn,
268 + [EXIT_REASON_VMOFF] = handle_vmx_insn,
269 + [EXIT_REASON_VMON] = handle_vmx_insn,
270 [EXIT_REASON_TPR_BELOW_THRESHOLD] = handle_tpr_below_threshold,
271 [EXIT_REASON_APIC_ACCESS] = handle_apic_access,
272 [EXIT_REASON_WBINVD] = handle_wbinvd,
273 @@ -3300,7 +3314,8 @@ static int __init vmx_init(void)
275 kvm_mmu_set_base_ptes(VMX_EPT_READABLE_MASK |
276 VMX_EPT_WRITABLE_MASK |
277 - VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT);
278 + VMX_EPT_DEFAULT_MT << VMX_EPT_MT_EPTE_SHIFT |
280 kvm_mmu_set_mask_ptes(0ull, 0ull, 0ull, 0ull,
281 VMX_EPT_EXECUTABLE_MASK);
283 diff --git a/arch/x86/kvm/vmx.h b/arch/x86/kvm/vmx.h
284 index 23e8373..198cdf3 100644
285 --- a/arch/x86/kvm/vmx.h
286 +++ b/arch/x86/kvm/vmx.h
287 @@ -370,6 +370,7 @@ enum vmcs_field {
288 #define VMX_EPT_READABLE_MASK 0x1ull
289 #define VMX_EPT_WRITABLE_MASK 0x2ull
290 #define VMX_EPT_EXECUTABLE_MASK 0x4ull
291 +#define VMX_EPT_IGMT_BIT (1ull << 6)
293 #define VMX_EPT_IDENTITY_PAGETABLE_ADDR 0xfffbc000ul
295 diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
296 index 0d682fc..f7c7142 100644
297 --- a/arch/x86/kvm/x86.c
298 +++ b/arch/x86/kvm/x86.c
299 @@ -318,6 +318,9 @@ EXPORT_SYMBOL_GPL(kvm_lmsw);
301 void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
303 + unsigned long old_cr4 = vcpu->arch.cr4;
304 + unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE;
306 if (cr4 & CR4_RESERVED_BITS) {
307 printk(KERN_DEBUG "set_cr4: #GP, reserved bits\n");
308 kvm_inject_gp(vcpu, 0);
309 @@ -331,7 +334,8 @@ void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
310 kvm_inject_gp(vcpu, 0);
313 - } else if (is_paging(vcpu) && !is_pae(vcpu) && (cr4 & X86_CR4_PAE)
314 + } else if (is_paging(vcpu) && (cr4 & X86_CR4_PAE)
315 + && ((cr4 ^ old_cr4) & pdptr_bits)
316 && !load_pdptrs(vcpu, vcpu->arch.cr3)) {
317 printk(KERN_DEBUG "set_cr4: #GP, pdptrs reserved bits\n");
318 kvm_inject_gp(vcpu, 0);
319 @@ -752,6 +756,7 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
320 case MSR_IA32_MC0_MISC+8:
321 case MSR_IA32_MC0_MISC+12:
322 case MSR_IA32_MC0_MISC+16:
323 + case MSR_IA32_MC0_MISC+20:
324 case MSR_IA32_UCODE_REV:
325 case MSR_IA32_EBL_CR_POWERON:
327 @@ -982,9 +987,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
329 static int is_efer_nx(void)
332 + unsigned long long efer = 0;
334 - rdmsrl(MSR_EFER, efer);
335 + rdmsrl_safe(MSR_EFER, &efer);
336 return efer & EFER_NX;
339 @@ -1303,28 +1308,33 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
340 struct kvm_vcpu *vcpu = filp->private_data;
341 void __user *argp = (void __user *)arg;
343 + struct kvm_lapic_state *lapic = NULL;
346 case KVM_GET_LAPIC: {
347 - struct kvm_lapic_state lapic;
348 + lapic = kzalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
350 - memset(&lapic, 0, sizeof lapic);
351 - r = kvm_vcpu_ioctl_get_lapic(vcpu, &lapic);
355 + r = kvm_vcpu_ioctl_get_lapic(vcpu, lapic);
359 - if (copy_to_user(argp, &lapic, sizeof lapic))
360 + if (copy_to_user(argp, lapic, sizeof(struct kvm_lapic_state)))
365 case KVM_SET_LAPIC: {
366 - struct kvm_lapic_state lapic;
368 + lapic = kmalloc(sizeof(struct kvm_lapic_state), GFP_KERNEL);
373 - if (copy_from_user(&lapic, argp, sizeof lapic))
374 + if (copy_from_user(lapic, argp, sizeof(struct kvm_lapic_state)))
376 - r = kvm_vcpu_ioctl_set_lapic(vcpu, &lapic);;
377 + r = kvm_vcpu_ioctl_set_lapic(vcpu, lapic);
381 @@ -1422,6 +1432,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
390 @@ -1442,10 +1454,12 @@ static int kvm_vm_ioctl_set_nr_mmu_pages(struct kvm *kvm,
393 down_write(&kvm->slots_lock);
394 + spin_lock(&kvm->mmu_lock);
396 kvm_mmu_change_mmu_pages(kvm, kvm_nr_mmu_pages);
397 kvm->arch.n_requested_mmu_pages = kvm_nr_mmu_pages;
399 + spin_unlock(&kvm->mmu_lock);
400 up_write(&kvm->slots_lock);
403 @@ -1612,7 +1626,9 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
405 /* If nothing is dirty, don't bother messing with page tables. */
407 + spin_lock(&kvm->mmu_lock);
408 kvm_mmu_slot_remove_write_access(kvm, log->slot);
409 + spin_unlock(&kvm->mmu_lock);
410 kvm_flush_remote_tlbs(kvm);
411 memslot = &kvm->memslots[log->slot];
412 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
413 @@ -1630,6 +1646,15 @@ long kvm_arch_vm_ioctl(struct file *filp,
414 struct kvm *kvm = filp->private_data;
415 void __user *argp = (void __user *)arg;
418 + * This union makes it completely explicit to gcc-3.x
419 + * that these two variables' stack usage should be
420 + * combined, not added together.
423 + struct kvm_pit_state ps;
424 + struct kvm_memory_alias alias;
428 case KVM_SET_TSS_ADDR:
429 @@ -1661,17 +1686,14 @@ long kvm_arch_vm_ioctl(struct file *filp,
430 case KVM_GET_NR_MMU_PAGES:
431 r = kvm_vm_ioctl_get_nr_mmu_pages(kvm);
433 - case KVM_SET_MEMORY_ALIAS: {
434 - struct kvm_memory_alias alias;
436 + case KVM_SET_MEMORY_ALIAS:
438 - if (copy_from_user(&alias, argp, sizeof alias))
439 + if (copy_from_user(&u.alias, argp, sizeof(struct kvm_memory_alias)))
441 - r = kvm_vm_ioctl_set_memory_alias(kvm, &alias);
442 + r = kvm_vm_ioctl_set_memory_alias(kvm, &u.alias);
447 case KVM_CREATE_IRQCHIP:
449 kvm->arch.vpic = kvm_create_pic(kvm);
450 @@ -1713,65 +1735,77 @@ long kvm_arch_vm_ioctl(struct file *filp,
452 case KVM_GET_IRQCHIP: {
453 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
454 - struct kvm_irqchip chip;
455 + struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
458 - if (copy_from_user(&chip, argp, sizeof chip))
463 + if (copy_from_user(chip, argp, sizeof *chip))
464 + goto get_irqchip_out;
466 if (!irqchip_in_kernel(kvm))
468 - r = kvm_vm_ioctl_get_irqchip(kvm, &chip);
469 + goto get_irqchip_out;
470 + r = kvm_vm_ioctl_get_irqchip(kvm, chip);
473 + goto get_irqchip_out;
475 - if (copy_to_user(argp, &chip, sizeof chip))
477 + if (copy_to_user(argp, chip, sizeof *chip))
478 + goto get_irqchip_out;
486 case KVM_SET_IRQCHIP: {
487 /* 0: PIC master, 1: PIC slave, 2: IOAPIC */
488 - struct kvm_irqchip chip;
489 + struct kvm_irqchip *chip = kmalloc(sizeof(*chip), GFP_KERNEL);
492 - if (copy_from_user(&chip, argp, sizeof chip))
497 + if (copy_from_user(chip, argp, sizeof *chip))
498 + goto set_irqchip_out;
500 if (!irqchip_in_kernel(kvm))
502 - r = kvm_vm_ioctl_set_irqchip(kvm, &chip);
503 + goto set_irqchip_out;
504 + r = kvm_vm_ioctl_set_irqchip(kvm, chip);
507 + goto set_irqchip_out;
516 - struct kvm_pit_state ps;
518 - if (copy_from_user(&ps, argp, sizeof ps))
519 + if (copy_from_user(&u.ps, argp, sizeof(struct kvm_pit_state)))
524 - r = kvm_vm_ioctl_get_pit(kvm, &ps);
525 + r = kvm_vm_ioctl_get_pit(kvm, &u.ps);
529 - if (copy_to_user(argp, &ps, sizeof ps))
530 + if (copy_to_user(argp, &u.ps, sizeof(struct kvm_pit_state)))
536 - struct kvm_pit_state ps;
538 - if (copy_from_user(&ps, argp, sizeof ps))
539 + if (copy_from_user(&u.ps, argp, sizeof u.ps))
544 - r = kvm_vm_ioctl_set_pit(kvm, &ps);
545 + r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
549 @@ -2813,10 +2847,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
550 down_read(&vcpu->kvm->slots_lock);
554 - if (vcpu->guest_debug.enabled)
555 - kvm_x86_ops->guest_debug_pre(vcpu);
559 if (test_and_clear_bit(KVM_REQ_MMU_RELOAD, &vcpu->requests))
560 @@ -2870,6 +2900,9 @@ again:
564 + if (vcpu->guest_debug.enabled)
565 + kvm_x86_ops->guest_debug_pre(vcpu);
567 vcpu->guest_mode = 1;
569 * Make sure that guest_mode assignment won't happen after
570 @@ -2944,7 +2977,7 @@ out:
573 down_read(&vcpu->kvm->slots_lock);
578 post_kvm_run_save(vcpu, kvm_run);
579 @@ -3294,11 +3327,33 @@ static int load_segment_descriptor_to_kvm_desct(struct kvm_vcpu *vcpu,
583 +int kvm_load_realmode_segment(struct kvm_vcpu *vcpu, u16 selector, int seg)
585 + struct kvm_segment segvar = {
586 + .base = selector << 4,
588 + .selector = selector,
599 + kvm_x86_ops->set_segment(vcpu, &segvar, seg);
603 int kvm_load_segment_descriptor(struct kvm_vcpu *vcpu, u16 selector,
604 int type_bits, int seg)
606 struct kvm_segment kvm_seg;
608 + if (!(vcpu->arch.cr0 & X86_CR0_PE))
609 + return kvm_load_realmode_segment(vcpu, selector, seg);
610 if (load_segment_descriptor_to_kvm_desct(vcpu, selector, &kvm_seg))
612 kvm_seg.type |= type_bits;
613 @@ -3981,7 +4036,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
614 userspace_addr = do_mmap(NULL, 0,
616 PROT_READ | PROT_WRITE,
617 - MAP_SHARED | MAP_ANONYMOUS,
618 + MAP_PRIVATE | MAP_ANONYMOUS,
620 up_write(¤t->mm->mmap_sem);
622 @@ -4008,12 +4063,14 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
626 + spin_lock(&kvm->mmu_lock);
627 if (!kvm->arch.n_requested_mmu_pages) {
628 unsigned int nr_mmu_pages = kvm_mmu_calculate_mmu_pages(kvm);
629 kvm_mmu_change_mmu_pages(kvm, nr_mmu_pages);
632 kvm_mmu_slot_remove_write_access(kvm, mem->slot);
633 + spin_unlock(&kvm->mmu_lock);
634 kvm_flush_remote_tlbs(kvm);
637 @@ -4022,6 +4079,7 @@ int kvm_arch_set_memory_region(struct kvm *kvm,
638 void kvm_arch_flush_shadow(struct kvm *kvm)
640 kvm_mmu_zap_all(kvm);
641 + kvm_reload_remote_mmus(kvm);
644 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
645 diff --git a/drivers/char/mxser.c b/drivers/char/mxser.c
646 index ebda9a8..3340c62 100644
647 --- a/drivers/char/mxser.c
648 +++ b/drivers/char/mxser.c
649 @@ -1099,8 +1099,6 @@ static int mxser_open(struct tty_struct *tty, struct file *filp)
653 - /* unmark here for very high baud rate (ex. 921600 bps) used */
654 - tty->low_latency = 1;
658 diff --git a/drivers/char/nozomi.c b/drivers/char/nozomi.c
659 index 66a0f93..4dfb5a1 100644
660 --- a/drivers/char/nozomi.c
661 +++ b/drivers/char/nozomi.c
662 @@ -1584,7 +1584,6 @@ static int ntty_open(struct tty_struct *tty, struct file *file)
664 /* Enable interrupt downlink for channel */
665 if (port->tty_open_count == 1) {
666 - tty->low_latency = 1;
667 tty->driver_data = port;
669 DBG1("open: %d", port->token_dl);
670 diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
671 index a6e730f..682f411 100644
672 --- a/drivers/net/ehea/ehea_main.c
673 +++ b/drivers/net/ehea/ehea_main.c
674 @@ -1530,6 +1530,9 @@ static int ehea_clean_portres(struct ehea_port *port, struct ehea_port_res *pr)
679 + netif_napi_del(&pr->napi);
681 ret = ehea_destroy_qp(pr->qp);
684 diff --git a/drivers/parport/share.c b/drivers/parport/share.c
685 index a8a62bb..a592f29 100644
686 --- a/drivers/parport/share.c
687 +++ b/drivers/parport/share.c
688 @@ -614,7 +614,10 @@ parport_register_device(struct parport *port, const char *name,
689 * pardevice fields. -arca
691 port->ops->init_state(tmp, tmp->state);
692 - parport_device_proc_register(tmp);
693 + if (!test_and_set_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags)) {
694 + port->proc_device = tmp;
695 + parport_device_proc_register(tmp);
700 @@ -646,10 +649,14 @@ void parport_unregister_device(struct pardevice *dev)
704 - parport_device_proc_unregister(dev);
706 port = dev->port->physport;
708 + if (port->proc_device == dev) {
709 + port->proc_device = NULL;
710 + clear_bit(PARPORT_DEVPROC_REGISTERED, &port->devflags);
711 + parport_device_proc_unregister(dev);
714 if (port->cad == dev) {
715 printk(KERN_DEBUG "%s: %s forgot to release port\n",
716 port->name, dev->name);
717 diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
718 index ae87d08..25f2008 100644
719 --- a/drivers/scsi/sr_ioctl.c
720 +++ b/drivers/scsi/sr_ioctl.c
721 @@ -309,6 +309,11 @@ int sr_drive_status(struct cdrom_device_info *cdi, int slot)
722 if (0 == sr_test_unit_ready(cd->device, &sshdr))
725 + /* SK/ASC/ASCQ of 2/4/1 means "unit is becoming ready" */
726 + if (scsi_sense_valid(&sshdr) && sshdr.sense_key == NOT_READY
727 + && sshdr.asc == 0x04 && sshdr.ascq == 0x01)
728 + return CDS_DRIVE_NOT_READY;
730 if (!cdrom_get_media_event(cdi, &med)) {
731 if (med.media_present)
733 diff --git a/drivers/usb/serial/cyberjack.c b/drivers/usb/serial/cyberjack.c
734 index b4d7235..7b3df8e 100644
735 --- a/drivers/usb/serial/cyberjack.c
736 +++ b/drivers/usb/serial/cyberjack.c
737 @@ -174,13 +174,6 @@ static int cyberjack_open(struct tty_struct *tty,
738 dbg("%s - usb_clear_halt", __func__);
739 usb_clear_halt(port->serial->dev, port->write_urb->pipe);
741 - /* force low_latency on so that our tty_push actually forces
742 - * the data through, otherwise it is scheduled, and with high
743 - * data rates (like with OHCI) data can get lost.
746 - tty->low_latency = 1;
748 priv = usb_get_serial_port_data(port);
749 spin_lock_irqsave(&priv->lock, flags);
751 diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c
752 index 22837a3..7eb473b 100644
753 --- a/drivers/usb/serial/cypress_m8.c
754 +++ b/drivers/usb/serial/cypress_m8.c
755 @@ -655,10 +655,6 @@ static int cypress_open(struct tty_struct *tty,
757 spin_unlock_irqrestore(&priv->lock, flags);
759 - /* setting to zero could cause data loss */
761 - tty->low_latency = 1;
763 /* raise both lines and set termios */
764 spin_lock_irqsave(&priv->lock, flags);
765 priv->line_control = CONTROL_DTR | CONTROL_RTS;
766 diff --git a/drivers/usb/serial/empeg.c b/drivers/usb/serial/empeg.c
767 index a6ab5b5..28ee28c 100644
768 --- a/drivers/usb/serial/empeg.c
769 +++ b/drivers/usb/serial/empeg.c
770 @@ -478,12 +478,6 @@ static void empeg_set_termios(struct tty_struct *tty,
772 |= CS8; /* character size 8 bits */
775 - * Force low_latency on; otherwise the pushes are scheduled;
776 - * this is bad as it opens up the possibility of dropping bytes
777 - * on the floor. We don't want to drop bytes on the floor. :)
779 - tty->low_latency = 1;
780 tty_encode_baud_rate(tty, 115200, 115200);
783 diff --git a/drivers/usb/serial/garmin_gps.c b/drivers/usb/serial/garmin_gps.c
784 index d953820..d860071 100644
785 --- a/drivers/usb/serial/garmin_gps.c
786 +++ b/drivers/usb/serial/garmin_gps.c
787 @@ -972,14 +972,6 @@ static int garmin_open(struct tty_struct *tty,
789 dbg("%s - port %d", __func__, port->number);
792 - * Force low_latency on so that our tty_push actually forces the data
793 - * through, otherwise it is scheduled, and with high data rates (like
794 - * with OHCI) data can get lost.
797 - tty->low_latency = 1;
799 spin_lock_irqsave(&garmin_data_p->lock, flags);
800 garmin_data_p->mode = initial_mode;
801 garmin_data_p->count = 0;
802 diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
803 index fe84c88..aa7f08b 100644
804 --- a/drivers/usb/serial/generic.c
805 +++ b/drivers/usb/serial/generic.c
806 @@ -122,12 +122,6 @@ int usb_serial_generic_open(struct tty_struct *tty,
808 dbg("%s - port %d", __func__, port->number);
810 - /* force low_latency on so that our tty_push actually forces the data
811 - through, otherwise it is scheduled, and with high data rates (like
812 - with OHCI) data can get lost. */
814 - tty->low_latency = 1;
816 /* clear the throttle flags */
817 spin_lock_irqsave(&port->lock, flags);
819 diff --git a/drivers/usb/serial/io_edgeport.c b/drivers/usb/serial/io_edgeport.c
820 index bfa508d..183045a 100644
821 --- a/drivers/usb/serial/io_edgeport.c
822 +++ b/drivers/usb/serial/io_edgeport.c
823 @@ -193,8 +193,6 @@ static const struct divisor_table_entry divisor_table[] = {
824 /* local variables */
827 -static int low_latency = 1; /* tty low latency flag, on by default */
829 static atomic_t CmdUrbs; /* Number of outstanding Command Write Urbs */
832 @@ -861,9 +859,6 @@ static int edge_open(struct tty_struct *tty,
833 if (edge_port == NULL)
837 - tty->low_latency = low_latency;
839 /* see if we've set up our endpoint info yet (can't set it up
840 in edge_startup as the structures were not set up at that time.) */
841 serial = port->serial;
842 @@ -3281,6 +3276,3 @@ MODULE_FIRMWARE("edgeport/down2.fw");
844 module_param(debug, bool, S_IRUGO | S_IWUSR);
845 MODULE_PARM_DESC(debug, "Debug enabled or not");
847 -module_param(low_latency, bool, S_IRUGO | S_IWUSR);
848 -MODULE_PARM_DESC(low_latency, "Low latency enabled or not");
849 diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c
850 index cb4c543..0d744f0 100644
851 --- a/drivers/usb/serial/io_ti.c
852 +++ b/drivers/usb/serial/io_ti.c
853 @@ -76,7 +76,6 @@ struct edgeport_uart_buf_desc {
854 #define EDGE_READ_URB_STOPPING 1
855 #define EDGE_READ_URB_STOPPED 2
857 -#define EDGE_LOW_LATENCY 1
858 #define EDGE_CLOSING_WAIT 4000 /* in .01 sec */
860 #define EDGE_OUT_BUF_SIZE 1024
861 @@ -232,7 +231,6 @@ static unsigned short OperationalBuildNumber;
865 -static int low_latency = EDGE_LOW_LATENCY;
866 static int closing_wait = EDGE_CLOSING_WAIT;
867 static int ignore_cpu_rev;
868 static int default_uart_mode; /* RS232 */
869 @@ -1838,9 +1836,6 @@ static int edge_open(struct tty_struct *tty,
870 if (edge_port == NULL)
874 - tty->low_latency = low_latency;
876 port_number = port->number - port->serial->minor;
877 switch (port_number) {
879 @@ -2995,9 +2990,6 @@ MODULE_FIRMWARE("edgeport/down3.bin");
880 module_param(debug, bool, S_IRUGO | S_IWUSR);
881 MODULE_PARM_DESC(debug, "Debug enabled or not");
883 -module_param(low_latency, bool, S_IRUGO | S_IWUSR);
884 -MODULE_PARM_DESC(low_latency, "Low latency enabled or not");
886 module_param(closing_wait, int, S_IRUGO | S_IWUSR);
887 MODULE_PARM_DESC(closing_wait, "Maximum wait for data to drain, in .01 secs");
889 diff --git a/drivers/usb/serial/ipaq.c b/drivers/usb/serial/ipaq.c
890 index cd9a2e1..ae0b0ff 100644
891 --- a/drivers/usb/serial/ipaq.c
892 +++ b/drivers/usb/serial/ipaq.c
893 @@ -635,13 +635,7 @@ static int ipaq_open(struct tty_struct *tty,
894 priv->free_len += PACKET_SIZE;
898 - * Force low latency on. This will immediately push data to the line
899 - * discipline instead of queueing.
903 - tty->low_latency = 1;
904 /* FIXME: These two are bogus */
907 diff --git a/drivers/usb/serial/ipw.c b/drivers/usb/serial/ipw.c
908 index a842025..b1c0c9a 100644
909 --- a/drivers/usb/serial/ipw.c
910 +++ b/drivers/usb/serial/ipw.c
911 @@ -206,9 +206,6 @@ static int ipw_open(struct tty_struct *tty,
916 - tty->low_latency = 1;
918 /* --1: Tell the modem to initialize (we think) From sniffs this is
919 * always the first thing that gets sent to the modem during
920 * opening of the device */
921 diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
922 index ddff37f..d6da4c9 100644
923 --- a/drivers/usb/serial/iuu_phoenix.c
924 +++ b/drivers/usb/serial/iuu_phoenix.c
925 @@ -1046,7 +1046,6 @@ static int iuu_open(struct tty_struct *tty,
926 tty->termios->c_oflag = 0;
927 tty->termios->c_iflag = 0;
928 priv->termios_initialized = 1;
929 - tty->low_latency = 1;
932 spin_unlock_irqrestore(&priv->lock, flags);
933 diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
934 index deba28e..5326c59 100644
935 --- a/drivers/usb/serial/kobil_sct.c
936 +++ b/drivers/usb/serial/kobil_sct.c
937 @@ -231,13 +231,7 @@ static int kobil_open(struct tty_struct *tty,
938 /* someone sets the dev to 0 if the close method has been called */
939 port->interrupt_in_urb->dev = port->serial->dev;
942 - /* force low_latency on so that our tty_push actually forces
943 - * the data through, otherwise it is scheduled, and with high
944 - * data rates (like with OHCI) data can get lost.
947 - tty->low_latency = 1;
949 /* Default to echo off and other sane device settings */
950 tty->termios->c_lflag = 0;
951 diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c
952 index 7c4917d..1c2402d 100644
953 --- a/drivers/usb/serial/mos7720.c
954 +++ b/drivers/usb/serial/mos7720.c
955 @@ -442,13 +442,6 @@ static int mos7720_open(struct tty_struct *tty,
957 send_mos_cmd(serial, MOS_WRITE, port_number, 0x01, &data);
959 - /* force low_latency on so that our tty_push actually forces *
960 - * the data through,otherwise it is scheduled, and with *
961 - * high data rates (like with OHCI) data can get lost. */
964 - tty->low_latency = 1;
966 /* see if we've set up our endpoint info yet *
967 * (can't set it up in mos7720_startup as the *
968 * structures were not set up at that time.) */
969 diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
970 index 09d8206..8befcbb 100644
971 --- a/drivers/usb/serial/mos7840.c
972 +++ b/drivers/usb/serial/mos7840.c
973 @@ -990,12 +990,6 @@ static int mos7840_open(struct tty_struct *tty,
974 status = mos7840_set_reg_sync(port, mos7840_port->ControlRegOffset,
977 - /* force low_latency on so that our tty_push actually forces *
978 - * the data through,otherwise it is scheduled, and with *
979 - * high data rates (like with OHCI) data can get lost. */
981 - tty->low_latency = 1;
983 /* Check to see if we've set up our endpoint info yet *
984 * (can't set it up in mos7840_startup as the structures *
985 * were not set up at that time.) */
986 diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
987 index 211cd61..faa30ad 100644
988 --- a/drivers/usb/serial/option.c
989 +++ b/drivers/usb/serial/option.c
990 @@ -914,9 +914,6 @@ static int option_open(struct tty_struct *tty,
991 usb_pipeout(urb->pipe), 0); */
995 - tty->low_latency = 1;
997 option_send_setup(tty, port);
1000 diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
1001 index ea1a103..639328b 100644
1002 --- a/drivers/usb/serial/sierra.c
1003 +++ b/drivers/usb/serial/sierra.c
1004 @@ -576,9 +576,6 @@ static int sierra_open(struct tty_struct *tty,
1009 - tty->low_latency = 1;
1011 sierra_send_setup(tty, port);
1013 /* start up the interrupt endpoint if we have one */
1014 diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c
1015 index bc5e905..55b9d67 100644
1016 --- a/drivers/usb/serial/ti_usb_3410_5052.c
1017 +++ b/drivers/usb/serial/ti_usb_3410_5052.c
1018 @@ -101,11 +101,10 @@
1020 #define TI_TRANSFER_TIMEOUT 2
1022 -#define TI_DEFAULT_LOW_LATENCY 0
1023 #define TI_DEFAULT_CLOSING_WAIT 4000 /* in .01 secs */
1025 /* supported setserial flags */
1026 -#define TI_SET_SERIAL_FLAGS (ASYNC_LOW_LATENCY)
1027 +#define TI_SET_SERIAL_FLAGS 0
1029 /* read urb states */
1030 #define TI_READ_URB_RUNNING 0
1031 @@ -212,7 +211,6 @@ static int ti_buf_get(struct circ_buf *cb, char *buf, int count);
1033 /* module parameters */
1035 -static int low_latency = TI_DEFAULT_LOW_LATENCY;
1036 static int closing_wait = TI_DEFAULT_CLOSING_WAIT;
1037 static ushort vendor_3410[TI_EXTRA_VID_PID_COUNT];
1038 static unsigned int vendor_3410_count;
1039 @@ -333,10 +331,6 @@ MODULE_FIRMWARE("ti_5052.fw");
1040 module_param(debug, bool, S_IRUGO | S_IWUSR);
1041 MODULE_PARM_DESC(debug, "Enable debugging, 0=no, 1=yes");
1043 -module_param(low_latency, bool, S_IRUGO | S_IWUSR);
1044 -MODULE_PARM_DESC(low_latency,
1045 - "TTY low_latency flag, 0=off, 1=on, default is off");
1047 module_param(closing_wait, int, S_IRUGO | S_IWUSR);
1048 MODULE_PARM_DESC(closing_wait,
1049 "Maximum wait for data to drain in close, in .01 secs, default is 4000");
1050 @@ -480,7 +474,6 @@ static int ti_startup(struct usb_serial *serial)
1051 spin_lock_init(&tport->tp_lock);
1052 tport->tp_uart_base_addr = (i == 0 ?
1053 TI_UART1_BASE_ADDR : TI_UART2_BASE_ADDR);
1054 - tport->tp_flags = low_latency ? ASYNC_LOW_LATENCY : 0;
1055 tport->tp_closing_wait = closing_wait;
1056 init_waitqueue_head(&tport->tp_msr_wait);
1057 init_waitqueue_head(&tport->tp_write_wait);
1058 @@ -560,10 +553,6 @@ static int ti_open(struct tty_struct *tty,
1059 if (mutex_lock_interruptible(&tdev->td_open_close_lock))
1060 return -ERESTARTSYS;
1063 - tty->low_latency =
1064 - (tport->tp_flags & ASYNC_LOW_LATENCY) ? 1 : 0;
1066 port_number = port->number - port->serial->minor;
1068 memset(&(tport->tp_icount), 0x00, sizeof(tport->tp_icount));
1069 @@ -1480,10 +1469,6 @@ static int ti_set_serial_info(struct ti_port *tport,
1072 tport->tp_flags = new_serial.flags & TI_SET_SERIAL_FLAGS;
1074 - if (port->port.tty)
1075 - port->port.tty->low_latency =
1076 - (tport->tp_flags & ASYNC_LOW_LATENCY) ? 1 : 0;
1077 tport->tp_closing_wait = new_serial.closing_wait;
1080 diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c
1081 index cf8924f..ec33fa5 100644
1082 --- a/drivers/usb/serial/visor.c
1083 +++ b/drivers/usb/serial/visor.c
1084 @@ -296,14 +296,6 @@ static int visor_open(struct tty_struct *tty, struct usb_serial_port *port,
1085 priv->throttled = 0;
1086 spin_unlock_irqrestore(&priv->lock, flags);
1089 - * Force low_latency on so that our tty_push actually forces the data
1090 - * through, otherwise it is scheduled, and with high data rates (like
1091 - * with OHCI) data can get lost.
1094 - tty->low_latency = 1;
1096 /* Start reading from the device */
1097 usb_fill_bulk_urb(port->read_urb, serial->dev,
1098 usb_rcvbulkpipe(serial->dev,
1099 diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
1100 index a53da14..0f04b70 100644
1101 --- a/fs/ocfs2/aops.c
1102 +++ b/fs/ocfs2/aops.c
1103 @@ -908,18 +908,17 @@ struct ocfs2_write_cluster_desc {
1106 unsigned c_unwritten;
1107 + unsigned c_needs_zero;
1110 -static inline int ocfs2_should_zero_cluster(struct ocfs2_write_cluster_desc *d)
1112 - return d->c_new || d->c_unwritten;
1115 struct ocfs2_write_ctxt {
1116 /* Logical cluster position / len of write */
1120 + /* First cluster allocated in a nonsparse extend */
1121 + u32 w_first_new_cpos;
1123 struct ocfs2_write_cluster_desc w_desc[OCFS2_MAX_CLUSTERS_PER_PAGE];
1126 @@ -997,6 +996,7 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
1129 wc->w_cpos = pos >> osb->s_clustersize_bits;
1130 + wc->w_first_new_cpos = UINT_MAX;
1131 cend = (pos + len - 1) >> osb->s_clustersize_bits;
1132 wc->w_clen = cend - wc->w_cpos + 1;
1134 @@ -1239,13 +1239,11 @@ static int ocfs2_write_cluster(struct address_space *mapping,
1135 struct ocfs2_write_ctxt *wc, u32 cpos,
1136 loff_t user_pos, unsigned user_len)
1138 - int ret, i, new, should_zero = 0;
1140 u64 v_blkno, p_blkno;
1141 struct inode *inode = mapping->host;
1143 new = phys == 0 ? 1 : 0;
1144 - if (new || unwritten)
1149 @@ -1356,7 +1354,9 @@ static int ocfs2_write_cluster_by_desc(struct address_space *mapping,
1150 local_len = osb->s_clustersize - cluster_off;
1152 ret = ocfs2_write_cluster(mapping, desc->c_phys,
1153 - desc->c_unwritten, data_ac, meta_ac,
1154 + desc->c_unwritten,
1155 + desc->c_needs_zero,
1157 wc, desc->c_cpos, pos, local_len);
1160 @@ -1406,14 +1406,14 @@ static void ocfs2_set_target_boundaries(struct ocfs2_super *osb,
1161 * newly allocated cluster.
1163 desc = &wc->w_desc[0];
1164 - if (ocfs2_should_zero_cluster(desc))
1165 + if (desc->c_needs_zero)
1166 ocfs2_figure_cluster_boundaries(osb,
1171 desc = &wc->w_desc[wc->w_clen - 1];
1172 - if (ocfs2_should_zero_cluster(desc))
1173 + if (desc->c_needs_zero)
1174 ocfs2_figure_cluster_boundaries(osb,
1177 @@ -1481,13 +1481,28 @@ static int ocfs2_populate_write_desc(struct inode *inode,
1182 + * If w_first_new_cpos is < UINT_MAX, we have a non-sparse
1183 + * file that got extended. w_first_new_cpos tells us
1184 + * where the newly allocated clusters are so we can
1187 + if (desc->c_cpos >= wc->w_first_new_cpos) {
1188 + BUG_ON(phys == 0);
1189 + desc->c_needs_zero = 1;
1192 desc->c_phys = phys;
1195 + desc->c_needs_zero = 1;
1196 *clusters_to_alloc = *clusters_to_alloc + 1;
1198 - if (ext_flags & OCFS2_EXT_UNWRITTEN)
1200 + if (ext_flags & OCFS2_EXT_UNWRITTEN) {
1201 desc->c_unwritten = 1;
1202 + desc->c_needs_zero = 1;
1207 @@ -1644,10 +1659,13 @@ static int ocfs2_expand_nonsparse_inode(struct inode *inode, loff_t pos,
1208 if (newsize <= i_size_read(inode))
1211 - ret = ocfs2_extend_no_holes(inode, newsize, newsize - len);
1212 + ret = ocfs2_extend_no_holes(inode, newsize, pos);
1216 + wc->w_first_new_cpos =
1217 + ocfs2_clusters_for_bytes(inode->i_sb, i_size_read(inode));
1222 @@ -1656,7 +1674,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
1223 struct page **pagep, void **fsdata,
1224 struct buffer_head *di_bh, struct page *mmap_page)
1226 - int ret, credits = OCFS2_INODE_UPDATE_CREDITS;
1227 + int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
1228 unsigned int clusters_to_alloc, extents_to_split;
1229 struct ocfs2_write_ctxt *wc;
1230 struct inode *inode = mapping->host;
1231 @@ -1724,8 +1742,19 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
1235 - ocfs2_set_target_boundaries(osb, wc, pos, len,
1236 - clusters_to_alloc + extents_to_split);
1238 + * We have to zero sparse allocated clusters, unwritten extent clusters,
1239 + * and non-sparse clusters we just extended. For non-sparse writes,
1240 + * we know zeros will only be needed in the first and/or last cluster.
1242 + if (clusters_to_alloc || extents_to_split ||
1243 + (wc->w_clen && (wc->w_desc[0].c_needs_zero ||
1244 + wc->w_desc[wc->w_clen - 1].c_needs_zero)))
1245 + cluster_of_pages = 1;
1247 + cluster_of_pages = 0;
1249 + ocfs2_set_target_boundaries(osb, wc, pos, len, cluster_of_pages);
1251 handle = ocfs2_start_trans(osb, credits);
1252 if (IS_ERR(handle)) {
1253 @@ -1753,8 +1782,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
1256 ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos,
1257 - clusters_to_alloc + extents_to_split,
1259 + cluster_of_pages, mmap_page);
1263 diff --git a/include/asm-x86/kvm_host.h b/include/asm-x86/kvm_host.h
1264 index c2e34c2..cf7c887 100644
1265 --- a/include/asm-x86/kvm_host.h
1266 +++ b/include/asm-x86/kvm_host.h
1267 @@ -195,6 +195,13 @@ struct kvm_mmu_page {
1271 +struct kvm_pv_mmu_op_buffer {
1274 + unsigned processed;
1275 + char buf[512] __aligned(sizeof(long));
1279 * x86 supports 3 paging modes (4-level 64-bit, 3-level 64-bit, and 2-level
1280 * 32-bit). The kvm_mmu structure abstracts the details of the current mmu
1281 @@ -237,6 +244,9 @@ struct kvm_vcpu_arch {
1282 bool tpr_access_reporting;
1285 + /* only needed in kvm_pv_mmu_op() path, but it's hot so
1286 + * put it here to avoid allocation */
1287 + struct kvm_pv_mmu_op_buffer mmu_op_buffer;
1289 struct kvm_mmu_memory_cache mmu_pte_chain_cache;
1290 struct kvm_mmu_memory_cache mmu_rmap_desc_cache;
1291 diff --git a/include/linux/parport.h b/include/linux/parport.h
1292 index 6a0d7cd..986252e 100644
1293 --- a/include/linux/parport.h
1294 +++ b/include/linux/parport.h
1295 @@ -326,6 +326,10 @@ struct parport {
1299 + unsigned long devflags;
1300 +#define PARPORT_DEVPROC_REGISTERED 0
1301 + struct pardevice *proc_device; /* Currently register proc device */
1303 struct list_head full_list;
1304 struct parport *slaves[3];
1306 diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
1307 index 4d80a11..75a87fe 100644
1308 --- a/include/linux/sunrpc/xprt.h
1309 +++ b/include/linux/sunrpc/xprt.h
1310 @@ -260,6 +260,7 @@ void xprt_conditional_disconnect(struct rpc_xprt *xprt, unsigned int cookie);
1311 #define XPRT_BOUND (4)
1312 #define XPRT_BINDING (5)
1313 #define XPRT_CLOSING (6)
1314 +#define XPRT_CONNECTION_CLOSE (8)
1316 static inline void xprt_set_connected(struct rpc_xprt *xprt)
1318 diff --git a/kernel/fork.c b/kernel/fork.c
1319 index fcbd28c..3fdf3d5 100644
1322 @@ -767,11 +767,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
1323 struct signal_struct *sig;
1326 - if (clone_flags & CLONE_THREAD) {
1327 - atomic_inc(¤t->signal->count);
1328 - atomic_inc(¤t->signal->live);
1329 + if (clone_flags & CLONE_THREAD)
1333 sig = kmem_cache_alloc(signal_cachep, GFP_KERNEL);
1336 @@ -844,16 +842,6 @@ void __cleanup_signal(struct signal_struct *sig)
1337 kmem_cache_free(signal_cachep, sig);
1340 -static void cleanup_signal(struct task_struct *tsk)
1342 - struct signal_struct *sig = tsk->signal;
1344 - atomic_dec(&sig->live);
1346 - if (atomic_dec_and_test(&sig->count))
1347 - __cleanup_signal(sig);
1350 static void copy_flags(unsigned long clone_flags, struct task_struct *p)
1352 unsigned long new_flags = p->flags;
1353 @@ -1201,6 +1189,8 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1356 if (clone_flags & CLONE_THREAD) {
1357 + atomic_inc(¤t->signal->count);
1358 + atomic_inc(¤t->signal->live);
1359 p->group_leader = current->group_leader;
1360 list_add_tail_rcu(&p->thread_group, &p->group_leader->thread_group);
1362 @@ -1261,7 +1251,8 @@ bad_fork_cleanup_mm:
1365 bad_fork_cleanup_signal:
1366 - cleanup_signal(p);
1367 + if (!(clone_flags & CLONE_THREAD))
1368 + __cleanup_signal(p->signal);
1369 bad_fork_cleanup_sighand:
1370 __cleanup_sighand(p->sighand);
1371 bad_fork_cleanup_fs:
1372 diff --git a/kernel/kthread.c b/kernel/kthread.c
1373 index 96cff2f..9548d52 100644
1374 --- a/kernel/kthread.c
1375 +++ b/kernel/kthread.c
1376 @@ -213,12 +213,12 @@ int kthread_stop(struct task_struct *k)
1377 /* Now set kthread_should_stop() to true, and wake it up. */
1378 kthread_stop_info.k = k;
1380 - put_task_struct(k);
1382 /* Once it dies, reset stop ptr, gather result and we're done. */
1383 wait_for_completion(&kthread_stop_info.done);
1384 kthread_stop_info.k = NULL;
1385 ret = kthread_stop_info.err;
1386 + put_task_struct(k);
1387 mutex_unlock(&kthread_stop_lock);
1390 diff --git a/kernel/signal.c b/kernel/signal.c
1391 index 7d0a222..de2b649 100644
1392 --- a/kernel/signal.c
1393 +++ b/kernel/signal.c
1394 @@ -2353,11 +2353,9 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
1399 - oss.ss_sp = (void __user *) current->sas_ss_sp;
1400 - oss.ss_size = current->sas_ss_size;
1401 - oss.ss_flags = sas_ss_flags(sp);
1403 + oss.ss_sp = (void __user *) current->sas_ss_sp;
1404 + oss.ss_size = current->sas_ss_size;
1405 + oss.ss_flags = sas_ss_flags(sp);
1409 @@ -2400,13 +2398,16 @@ do_sigaltstack (const stack_t __user *uss, stack_t __user *uoss, unsigned long s
1410 current->sas_ss_size = ss_size;
1416 - if (copy_to_user(uoss, &oss, sizeof(oss)))
1417 + if (!access_ok(VERIFY_WRITE, uoss, sizeof(*uoss)))
1419 + error = __put_user(oss.ss_sp, &uoss->ss_sp) |
1420 + __put_user(oss.ss_size, &uoss->ss_size) |
1421 + __put_user(oss.ss_flags, &uoss->ss_flags);
1428 diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
1429 index 0c85042..8067dc7 100644
1430 --- a/net/appletalk/ddp.c
1431 +++ b/net/appletalk/ddp.c
1432 @@ -1245,6 +1245,7 @@ static int atalk_getname(struct socket *sock, struct sockaddr *uaddr,
1435 *uaddr_len = sizeof(struct sockaddr_at);
1436 + memset(&sat.sat_zero, 0, sizeof(sat.sat_zero));
1439 if (sk->sk_state != TCP_ESTABLISHED)
1440 diff --git a/net/can/raw.c b/net/can/raw.c
1441 index 6e0663f..08f31d4 100644
1444 @@ -396,6 +396,7 @@ static int raw_getname(struct socket *sock, struct sockaddr *uaddr,
1448 + memset(addr, 0, sizeof(*addr));
1449 addr->can_family = AF_CAN;
1450 addr->can_ifindex = ro->ifindex;
1452 diff --git a/net/econet/af_econet.c b/net/econet/af_econet.c
1453 index 8789d2b..9aae86f 100644
1454 --- a/net/econet/af_econet.c
1455 +++ b/net/econet/af_econet.c
1456 @@ -520,6 +520,7 @@ static int econet_getname(struct socket *sock, struct sockaddr *uaddr,
1460 + memset(sec, 0, sizeof(*sec));
1461 mutex_lock(&econet_mutex);
1464 diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
1465 index 3eb5bcc..b28409c 100644
1466 --- a/net/irda/af_irda.c
1467 +++ b/net/irda/af_irda.c
1468 @@ -714,6 +714,7 @@ static int irda_getname(struct socket *sock, struct sockaddr *uaddr,
1469 struct sock *sk = sock->sk;
1470 struct irda_sock *self = irda_sk(sk);
1472 + memset(&saddr, 0, sizeof(saddr));
1474 if (sk->sk_state != TCP_ESTABLISHED)
1476 diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
1477 index 5bcc452..90a55b1 100644
1478 --- a/net/llc/af_llc.c
1479 +++ b/net/llc/af_llc.c
1480 @@ -915,6 +915,7 @@ static int llc_ui_getname(struct socket *sock, struct sockaddr *uaddr,
1481 struct llc_sock *llc = llc_sk(sk);
1484 + memset(&sllc, 0, sizeof(sllc));
1486 if (sock_flag(sk, SOCK_ZAPPED))
1488 diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c
1489 index db9e263..ad72bde 100644
1490 --- a/net/netrom/af_netrom.c
1491 +++ b/net/netrom/af_netrom.c
1492 @@ -848,6 +848,7 @@ static int nr_getname(struct socket *sock, struct sockaddr *uaddr,
1493 sax->fsa_ax25.sax25_family = AF_NETROM;
1494 sax->fsa_ax25.sax25_ndigis = 1;
1495 sax->fsa_ax25.sax25_call = nr->user_addr;
1496 + memset(sax->fsa_digipeater, 0, sizeof(sax->fsa_digipeater));
1497 sax->fsa_digipeater[0] = nr->dest_addr;
1498 *uaddr_len = sizeof(struct full_sockaddr_ax25);
1500 diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c
1501 index c062361..f132243 100644
1502 --- a/net/rose/af_rose.c
1503 +++ b/net/rose/af_rose.c
1504 @@ -957,6 +957,7 @@ static int rose_getname(struct socket *sock, struct sockaddr *uaddr,
1505 struct rose_sock *rose = rose_sk(sk);
1508 + memset(srose, 0, sizeof(*srose));
1510 if (sk->sk_state != TCP_ESTABLISHED)
1512 diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
1513 index 66cfe88..860b1d4 100644
1514 --- a/net/sunrpc/clnt.c
1515 +++ b/net/sunrpc/clnt.c
1516 @@ -860,6 +860,7 @@ static inline void
1517 rpc_task_force_reencode(struct rpc_task *task)
1519 task->tk_rqstp->rq_snd_buf.len = 0;
1520 + task->tk_rqstp->rq_bytes_sent = 0;
1524 diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
1525 index 99a52aa..b66be67 100644
1526 --- a/net/sunrpc/xprt.c
1527 +++ b/net/sunrpc/xprt.c
1528 @@ -645,10 +645,8 @@ xprt_init_autodisconnect(unsigned long data)
1529 if (test_and_set_bit(XPRT_LOCKED, &xprt->state))
1531 spin_unlock(&xprt->transport_lock);
1532 - if (xprt_connecting(xprt))
1533 - xprt_release_write(xprt, NULL);
1535 - queue_work(rpciod_workqueue, &xprt->task_cleanup);
1536 + set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1537 + queue_work(rpciod_workqueue, &xprt->task_cleanup);
1540 spin_unlock(&xprt->transport_lock);
1541 diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
1542 index 8f9295d..a283304 100644
1543 --- a/net/sunrpc/xprtsock.c
1544 +++ b/net/sunrpc/xprtsock.c
1545 @@ -748,6 +748,9 @@ out_release:
1547 * This is used when all requests are complete; ie, no DRC state remains
1548 * on the server we want to save.
1550 + * The caller _must_ be holding XPRT_LOCKED in order to avoid issues with
1551 + * xs_reset_transport() zeroing the socket from underneath a writer.
1553 static void xs_close(struct rpc_xprt *xprt)
1555 @@ -781,6 +784,14 @@ clear_close_wait:
1556 xprt_disconnect_done(xprt);
1559 +static void xs_tcp_close(struct rpc_xprt *xprt)
1561 + if (test_and_clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state))
1564 + xs_tcp_shutdown(xprt);
1568 * xs_destroy - prepare to shutdown a transport
1569 * @xprt: doomed transport
1570 @@ -1676,11 +1687,21 @@ static void xs_tcp_connect_worker4(struct work_struct *work)
1574 + case -ENETUNREACH:
1575 /* retry with existing socket, after a delay */
1579 /* get rid of existing socket, and retry */
1580 xs_tcp_shutdown(xprt);
1581 + printk("%s: connect returned unhandled error %d\n",
1582 + __func__, status);
1583 + case -EADDRNOTAVAIL:
1584 + /* We're probably in TIME_WAIT. Get rid of existing socket,
1587 + set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1588 + xprt_force_disconnect(xprt);
1593 @@ -1735,11 +1756,21 @@ static void xs_tcp_connect_worker6(struct work_struct *work)
1597 + case -ENETUNREACH:
1598 /* retry with existing socket, after a delay */
1602 /* get rid of existing socket, and retry */
1603 xs_tcp_shutdown(xprt);
1604 + printk("%s: connect returned unhandled error %d\n",
1605 + __func__, status);
1606 + case -EADDRNOTAVAIL:
1607 + /* We're probably in TIME_WAIT. Get rid of existing socket,
1610 + set_bit(XPRT_CONNECTION_CLOSE, &xprt->state);
1611 + xprt_force_disconnect(xprt);
1616 @@ -1871,7 +1902,7 @@ static struct rpc_xprt_ops xs_tcp_ops = {
1617 .buf_free = rpc_free,
1618 .send_request = xs_tcp_send_request,
1619 .set_retrans_timeout = xprt_set_retrans_timeout_def,
1620 - .close = xs_tcp_shutdown,
1621 + .close = xs_tcp_close,
1622 .destroy = xs_destroy,
1623 .print_stats = xs_tcp_print_stats,
1625 diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
1626 index 1533f03..4cfbd79 100644
1627 --- a/sound/core/pcm_lib.c
1628 +++ b/sound/core/pcm_lib.c
1629 @@ -779,47 +779,24 @@ static int snd_interval_ratden(struct snd_interval *i,
1630 int snd_interval_list(struct snd_interval *i, unsigned int count, unsigned int *list, unsigned int mask)
1634 + struct snd_interval list_range;
1640 + snd_interval_any(&list_range);
1641 + list_range.min = UINT_MAX;
1642 + list_range.max = 0;
1643 for (k = 0; k < count; k++) {
1644 if (mask && !(mask & (1 << k)))
1646 - if (i->min == list[k] && !i->openmin)
1648 - if (i->min < list[k]) {
1658 - for (k = count; k-- > 0;) {
1659 - if (mask && !(mask & (1 << k)))
1660 + if (!snd_interval_test(i, list[k]))
1662 - if (i->max == list[k] && !i->openmax)
1664 - if (i->max > list[k]) {
1670 + list_range.min = min(list_range.min, list[k]);
1671 + list_range.max = max(list_range.max, list[k]);
1676 - if (snd_interval_checkempty(i)) {
1681 + return snd_interval_refine(i, &list_range);
1684 EXPORT_SYMBOL(snd_interval_list);
1685 diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
1686 index 07f8dcc..2581b8c 100644
1687 --- a/sound/pci/hda/patch_realtek.c
1688 +++ b/sound/pci/hda/patch_realtek.c
1689 @@ -5580,9 +5580,9 @@ static struct hda_verb alc885_mbp_ch2_init[] = {
1696 -static struct hda_verb alc885_mbp_ch6_init[] = {
1697 +static struct hda_verb alc885_mbp_ch4_init[] = {
1698 { 0x1a, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT },
1699 { 0x1a, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1700 { 0x1a, AC_VERB_SET_CONNECT_SEL, 0x01 },
1701 @@ -5591,9 +5591,9 @@ static struct hda_verb alc885_mbp_ch6_init[] = {
1705 -static struct hda_channel_mode alc885_mbp_6ch_modes[2] = {
1706 +static struct hda_channel_mode alc885_mbp_4ch_modes[2] = {
1707 { 2, alc885_mbp_ch2_init },
1708 - { 6, alc885_mbp_ch6_init },
1709 + { 4, alc885_mbp_ch4_init },
1713 @@ -5628,10 +5628,11 @@ static struct snd_kcontrol_new alc882_base_mixer[] = {
1716 static struct snd_kcontrol_new alc885_mbp3_mixer[] = {
1717 - HDA_CODEC_VOLUME("Front Playback Volume", 0x0c, 0x00, HDA_OUTPUT),
1718 - HDA_BIND_MUTE ("Front Playback Switch", 0x0c, 0x02, HDA_INPUT),
1719 - HDA_CODEC_MUTE ("Speaker Playback Switch", 0x14, 0x00, HDA_OUTPUT),
1720 - HDA_CODEC_VOLUME("Line-Out Playback Volume", 0x0d, 0x00, HDA_OUTPUT),
1721 + HDA_CODEC_VOLUME("Speaker Playback Volume", 0x0c, 0x00, HDA_OUTPUT),
1722 + HDA_BIND_MUTE ("Speaker Playback Switch", 0x0c, 0x02, HDA_INPUT),
1723 + HDA_CODEC_VOLUME("Headphone Playback Volume", 0x0e, 0x00, HDA_OUTPUT),
1724 + HDA_BIND_MUTE ("Headphone Playback Switch", 0x0e, 0x02, HDA_INPUT),
1725 + HDA_CODEC_VOLUME("Surround Playback Volume", 0x0d, 0x00, HDA_OUTPUT),
1726 HDA_CODEC_VOLUME("Line Playback Volume", 0x0b, 0x02, HDA_INPUT),
1727 HDA_CODEC_MUTE ("Line Playback Switch", 0x0b, 0x02, HDA_INPUT),
1728 HDA_CODEC_VOLUME("Mic Playback Volume", 0x0b, 0x00, HDA_INPUT),
1729 @@ -5879,14 +5880,18 @@ static struct hda_verb alc885_mbp3_init_verbs[] = {
1730 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
1731 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
1732 {0x0d, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
1734 + {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO},
1735 + {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(0)},
1736 + {0x0e, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_MUTE(1)},
1737 /* Front Pin: output 0 (0x0c) */
1738 {0x14, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_OUT},
1739 {0x14, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1740 {0x14, AC_VERB_SET_CONNECT_SEL, 0x00},
1741 - /* HP Pin: output 0 (0x0d) */
1742 + /* HP Pin: output 0 (0x0e) */
1743 {0x15, AC_VERB_SET_PIN_WIDGET_CONTROL, 0xc4},
1744 - {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE},
1745 - {0x15, AC_VERB_SET_CONNECT_SEL, 0x00},
1746 + {0x15, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE},
1747 + {0x15, AC_VERB_SET_CONNECT_SEL, 0x02},
1748 {0x15, AC_VERB_SET_UNSOLICITED_ENABLE, ALC880_HP_EVENT | AC_USRSP_EN},
1749 /* Mic (rear) pin: input vref at 80% */
1750 {0x18, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80},
1751 @@ -6326,10 +6331,11 @@ static struct alc_config_preset alc882_presets[] = {
1752 .mixers = { alc885_mbp3_mixer, alc882_chmode_mixer },
1753 .init_verbs = { alc885_mbp3_init_verbs,
1754 alc880_gpio1_init_verbs },
1755 - .num_dacs = ARRAY_SIZE(alc882_dac_nids),
1757 .dac_nids = alc882_dac_nids,
1758 - .channel_mode = alc885_mbp_6ch_modes,
1759 - .num_channel_mode = ARRAY_SIZE(alc885_mbp_6ch_modes),
1761 + .channel_mode = alc885_mbp_4ch_modes,
1762 + .num_channel_mode = ARRAY_SIZE(alc885_mbp_4ch_modes),
1763 .input_mux = &alc882_capture_source,
1764 .dig_out_nid = ALC882_DIGOUT_NID,
1765 .dig_in_nid = ALC882_DIGIN_NID,
1766 @@ -11634,6 +11640,8 @@ static int patch_alc269(struct hda_codec *codec)
1767 spec->num_adc_nids = ARRAY_SIZE(alc269_adc_nids);
1768 spec->capsrc_nids = alc269_capsrc_nids;
1770 + spec->vmaster_nid = 0x02;
1772 codec->patch_ops = alc_patch_ops;
1773 if (board_config == ALC269_AUTO)
1774 spec->init_hook = alc269_auto_init;
1775 diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
1776 index 7dd9b0b..db062b5 100644
1777 --- a/virt/kvm/kvm_main.c
1778 +++ b/virt/kvm/kvm_main.c
1779 @@ -406,6 +406,8 @@ static void kvm_destroy_vm(struct kvm *kvm)
1781 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1782 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
1784 + kvm_arch_flush_shadow(kvm);
1786 kvm_arch_destroy_vm(kvm);
1788 @@ -548,6 +550,8 @@ int __kvm_set_memory_region(struct kvm *kvm,
1789 if (!new.dirty_bitmap)
1791 memset(new.dirty_bitmap, 0, dirty_bytes);
1793 + kvm_arch_flush_shadow(kvm);
1795 #endif /* not defined CONFIG_S390 */
1797 @@ -726,7 +730,7 @@ pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
1798 return page_to_pfn(bad_page);
1801 - npages = get_user_pages(current, current->mm, addr, 1, 1, 1, page,
1802 + npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
1805 if (unlikely(npages != 1)) {
1806 @@ -1074,12 +1078,11 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
1808 r = kvm_arch_vcpu_setup(vcpu);
1810 - goto vcpu_destroy;
1813 mutex_lock(&kvm->lock);
1814 if (kvm->vcpus[n]) {
1816 - mutex_unlock(&kvm->lock);
1819 kvm->vcpus[n] = vcpu;
1820 @@ -1095,8 +1098,8 @@ static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
1822 mutex_lock(&kvm->lock);
1823 kvm->vcpus[n] = NULL;
1824 - mutex_unlock(&kvm->lock);
1826 + mutex_unlock(&kvm->lock);
1827 kvm_arch_vcpu_destroy(vcpu);
1830 @@ -1118,6 +1121,8 @@ static long kvm_vcpu_ioctl(struct file *filp,
1831 struct kvm_vcpu *vcpu = filp->private_data;
1832 void __user *argp = (void __user *)arg;
1834 + struct kvm_fpu *fpu = NULL;
1835 + struct kvm_sregs *kvm_sregs = NULL;
1837 if (vcpu->kvm->mm != current->mm)
1839 @@ -1165,25 +1170,28 @@ out_free2:
1842 case KVM_GET_SREGS: {
1843 - struct kvm_sregs kvm_sregs;
1845 - memset(&kvm_sregs, 0, sizeof kvm_sregs);
1846 - r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, &kvm_sregs);
1847 + kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1851 + r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
1855 - if (copy_to_user(argp, &kvm_sregs, sizeof kvm_sregs))
1856 + if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
1861 case KVM_SET_SREGS: {
1862 - struct kvm_sregs kvm_sregs;
1864 + kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1869 - if (copy_from_user(&kvm_sregs, argp, sizeof kvm_sregs))
1870 + if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
1872 - r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, &kvm_sregs);
1873 + r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
1877 @@ -1264,25 +1272,28 @@ out_free2:
1881 - struct kvm_fpu fpu;
1883 - memset(&fpu, 0, sizeof fpu);
1884 - r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, &fpu);
1885 + fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1889 + r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
1893 - if (copy_to_user(argp, &fpu, sizeof fpu))
1894 + if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
1900 - struct kvm_fpu fpu;
1902 + fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1907 - if (copy_from_user(&fpu, argp, sizeof fpu))
1908 + if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
1910 - r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, &fpu);
1911 + r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
1915 @@ -1292,6 +1303,8 @@ out_free2:
1916 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);