1 From: jbeulich@novell.com
3 Patch-mainline: obsolete (superceded by c/s 867)
5 get_user_pages() expects vma->vm_private_data of VM_FOREIGN vma-s to
6 point to an array of struct page *, hence blktap must not alter the
9 Found-by: Isaku Yamahata <yamahata@valinux.co.jp>
11 --- sle11-2009-04-20.orig/drivers/xen/blktap/blktap.c 2009-04-20 11:41:53.000000000 +0200
12 +++ sle11-2009-04-20/drivers/xen/blktap/blktap.c 2009-04-29 09:36:57.000000000 +0200
13 @@ -320,7 +320,7 @@ static pte_t blktap_clear_pte(struct vm_
14 int offset, seg, usr_idx, pending_idx, mmap_idx;
15 unsigned long uvstart = vma->vm_start + (RING_PAGES << PAGE_SHIFT);
17 - struct tap_vma_priv *priv;
20 struct grant_handle_pair *khandle;
21 struct gnttab_unmap_grant_ref unmap[2];
22 @@ -335,12 +335,12 @@ static pte_t blktap_clear_pte(struct vm_
25 info = vma->vm_file->private_data;
26 - priv = vma->vm_private_data;
27 + map = vma->vm_private_data;
29 /* TODO Should these be changed to if statements? */
31 BUG_ON(!info->idx_map);
35 offset = (int) ((uvaddr - uvstart) >> PAGE_SHIFT);
36 usr_idx = OFFSET_TO_USR_IDX(offset);
37 @@ -352,7 +352,7 @@ static pte_t blktap_clear_pte(struct vm_
38 kvaddr = idx_to_kaddr(mmap_idx, pending_idx, seg);
39 pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
40 ClearPageReserved(pg);
41 - priv->map[offset + RING_PAGES] = NULL;
42 + map[offset + RING_PAGES] = NULL;
44 khandle = &pending_handle(mmap_idx, pending_idx, seg);
46 @@ -395,9 +395,11 @@ static pte_t blktap_clear_pte(struct vm_
48 static void blktap_vma_close(struct vm_area_struct *vma)
50 - struct tap_vma_priv *priv = vma->vm_private_data;
51 + if (vma->vm_private_data) {
52 + struct tap_vma_priv *priv = container_of(vma->vm_private_data,
53 + struct tap_vma_priv,
57 priv->info->vma = NULL;
60 @@ -708,7 +710,7 @@ static int blktap_mmap(struct file *filp
64 - vma->vm_private_data = priv;
65 + vma->vm_private_data = priv->map;
66 vma->vm_flags |= VM_FOREIGN;
67 vma->vm_flags |= VM_DONTCOPY;
69 @@ -1189,7 +1191,7 @@ static int blktap_read_ufe_ring(tap_blki
70 for (j = 0; j < pending_req->nr_pages; j++) {
72 unsigned long kvaddr, uvaddr;
73 - struct tap_vma_priv *priv = info->vma->vm_private_data;
74 + struct page **map = info->vma->vm_private_data;
78 @@ -1200,7 +1202,7 @@ static int blktap_read_ufe_ring(tap_blki
79 ClearPageReserved(pg);
80 offset = (uvaddr - info->vma->vm_start)
82 - priv->map[offset] = NULL;
85 fast_flush_area(pending_req, pending_idx, usr_idx, info->minor);
86 info->idx_map[usr_idx] = INVALID_REQ;
87 @@ -1366,7 +1368,7 @@ static void dispatch_rw_block_io(blkif_t
89 int ret, i, nr_sects = 0;
91 - struct tap_vma_priv *priv;
92 + struct page **pgmap;
93 blkif_request_t *target;
94 int pending_idx = RTN_PEND_IDX(pending_req,pending_req->mem_idx);
96 @@ -1432,7 +1434,6 @@ static void dispatch_rw_block_io(blkif_t
97 pending_req->status = BLKIF_RSP_OKAY;
98 pending_req->nr_pages = nseg;
100 - priv = info->vma->vm_private_data;
101 mm = info->vma->vm_mm;
102 if (!xen_feature(XENFEAT_auto_translated_physmap))
103 down_write(&mm->mmap_sem);
104 @@ -1477,6 +1478,8 @@ static void dispatch_rw_block_io(blkif_t
105 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, op);
108 + pgmap = info->vma->vm_private_data;
110 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
111 up_write(&mm->mmap_sem);
113 @@ -1516,7 +1519,7 @@ static void dispatch_rw_block_io(blkif_t
115 offset = (uvaddr - info->vma->vm_start) >> PAGE_SHIFT;
116 pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
117 - priv->map[offset] = pg;
118 + pgmap[offset] = pg;
121 for (i = 0; i < nseg; i++) {
122 @@ -1543,7 +1546,7 @@ static void dispatch_rw_block_io(blkif_t
124 offset = (uvaddr - info->vma->vm_start) >> PAGE_SHIFT;
125 pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
126 - priv->map[offset] = pg;
127 + pgmap[offset] = pg;