]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - include/rdma/ib_umem_odp.h
Linux 4.20.17
[thirdparty/kernel/stable.git] / include / rdma / ib_umem_odp.h
1 /*
2 * Copyright (c) 2014 Mellanox Technologies. All rights reserved.
3 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
33 #ifndef IB_UMEM_ODP_H
34 #define IB_UMEM_ODP_H
35
36 #include <rdma/ib_umem.h>
37 #include <rdma/ib_verbs.h>
38 #include <linux/interval_tree.h>
39
40 struct umem_odp_node {
41 u64 __subtree_last;
42 struct rb_node rb;
43 };
44
45 struct ib_umem_odp {
46 struct ib_umem umem;
47 struct ib_ucontext_per_mm *per_mm;
48
49 /*
50 * An array of the pages included in the on-demand paging umem.
51 * Indices of pages that are currently not mapped into the device will
52 * contain NULL.
53 */
54 struct page **page_list;
55 /*
56 * An array of the same size as page_list, with DMA addresses mapped
57 * for pages the pages in page_list. The lower two bits designate
58 * access permissions. See ODP_READ_ALLOWED_BIT and
59 * ODP_WRITE_ALLOWED_BIT.
60 */
61 dma_addr_t *dma_list;
62 /*
63 * The umem_mutex protects the page_list and dma_list fields of an ODP
64 * umem, allowing only a single thread to map/unmap pages. The mutex
65 * also protects access to the mmu notifier counters.
66 */
67 struct mutex umem_mutex;
68 void *private; /* for the HW driver to use. */
69
70 int notifiers_seq;
71 int notifiers_count;
72
73 /* Tree tracking */
74 struct umem_odp_node interval_tree;
75
76 struct completion notifier_completion;
77 int dying;
78 struct work_struct work;
79 };
80
81 static inline struct ib_umem_odp *to_ib_umem_odp(struct ib_umem *umem)
82 {
83 return container_of(umem, struct ib_umem_odp, umem);
84 }
85
86 #ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
87
88 struct ib_ucontext_per_mm {
89 struct ib_ucontext *context;
90 struct mm_struct *mm;
91 struct pid *tgid;
92 bool active;
93
94 struct rb_root_cached umem_tree;
95 /* Protects umem_tree */
96 struct rw_semaphore umem_rwsem;
97
98 struct mmu_notifier mn;
99 unsigned int odp_mrs_count;
100
101 struct list_head ucontext_list;
102 struct rcu_head rcu;
103 };
104
105 int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access);
106 struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext_per_mm *per_mm,
107 unsigned long addr, size_t size);
108 void ib_umem_odp_release(struct ib_umem_odp *umem_odp);
109
110 /*
111 * The lower 2 bits of the DMA address signal the R/W permissions for
112 * the entry. To upgrade the permissions, provide the appropriate
113 * bitmask to the map_dma_pages function.
114 *
115 * Be aware that upgrading a mapped address might result in change of
116 * the DMA address for the page.
117 */
118 #define ODP_READ_ALLOWED_BIT (1<<0ULL)
119 #define ODP_WRITE_ALLOWED_BIT (1<<1ULL)
120
121 #define ODP_DMA_ADDR_MASK (~(ODP_READ_ALLOWED_BIT | ODP_WRITE_ALLOWED_BIT))
122
123 int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
124 u64 bcnt, u64 access_mask,
125 unsigned long current_seq);
126
127 void ib_umem_odp_unmap_dma_pages(struct ib_umem_odp *umem_odp, u64 start_offset,
128 u64 bound);
129
130 typedef int (*umem_call_back)(struct ib_umem_odp *item, u64 start, u64 end,
131 void *cookie);
132 /*
133 * Call the callback on each ib_umem in the range. Returns the logical or of
134 * the return values of the functions called.
135 */
136 int rbt_ib_umem_for_each_in_range(struct rb_root_cached *root,
137 u64 start, u64 end,
138 umem_call_back cb,
139 bool blockable, void *cookie);
140
141 /*
142 * Find first region intersecting with address range.
143 * Return NULL if not found
144 */
145 struct ib_umem_odp *rbt_ib_umem_lookup(struct rb_root_cached *root,
146 u64 addr, u64 length);
147
148 static inline int ib_umem_mmu_notifier_retry(struct ib_umem_odp *umem_odp,
149 unsigned long mmu_seq)
150 {
151 /*
152 * This code is strongly based on the KVM code from
153 * mmu_notifier_retry. Should be called with
154 * the relevant locks taken (umem_odp->umem_mutex
155 * and the ucontext umem_mutex semaphore locked for read).
156 */
157
158 if (unlikely(umem_odp->notifiers_count))
159 return 1;
160 if (umem_odp->notifiers_seq != mmu_seq)
161 return 1;
162 return 0;
163 }
164
165 #else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
166
167 static inline int ib_umem_odp_get(struct ib_umem_odp *umem_odp, int access)
168 {
169 return -EINVAL;
170 }
171
172 static inline struct ib_umem_odp *
173 ib_alloc_odp_umem(struct ib_ucontext *context, unsigned long addr, size_t size)
174 {
175 return ERR_PTR(-EINVAL);
176 }
177
178 static inline void ib_umem_odp_release(struct ib_umem_odp *umem_odp) {}
179
180 #endif /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
181
182 #endif /* IB_UMEM_ODP_H */