]> git.ipfire.org Git - thirdparty/kernel/stable.git/blob - drivers/gpu/drm/i915/i915_gem_internal.c
Merge tag 'pwm/for-5.2-rc1' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git...
[thirdparty/kernel/stable.git] / drivers / gpu / drm / i915 / i915_gem_internal.c
1 /*
2 * Copyright © 2014-2016 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 */
24
25 #include <drm/i915_drm.h>
26 #include "i915_drv.h"
27
28 #define QUIET (__GFP_NORETRY | __GFP_NOWARN)
29 #define MAYFAIL (__GFP_RETRY_MAYFAIL | __GFP_NOWARN)
30
31 /* convert swiotlb segment size into sensible units (pages)! */
32 #define IO_TLB_SEGPAGES (IO_TLB_SEGSIZE << IO_TLB_SHIFT >> PAGE_SHIFT)
33
34 static void internal_free_pages(struct sg_table *st)
35 {
36 struct scatterlist *sg;
37
38 for (sg = st->sgl; sg; sg = __sg_next(sg)) {
39 if (sg_page(sg))
40 __free_pages(sg_page(sg), get_order(sg->length));
41 }
42
43 sg_free_table(st);
44 kfree(st);
45 }
46
47 static int i915_gem_object_get_pages_internal(struct drm_i915_gem_object *obj)
48 {
49 struct drm_i915_private *i915 = to_i915(obj->base.dev);
50 struct sg_table *st;
51 struct scatterlist *sg;
52 unsigned int sg_page_sizes;
53 unsigned int npages;
54 int max_order;
55 gfp_t gfp;
56
57 max_order = MAX_ORDER;
58 #ifdef CONFIG_SWIOTLB
59 if (swiotlb_nr_tbl()) {
60 unsigned int max_segment;
61
62 max_segment = swiotlb_max_segment();
63 if (max_segment) {
64 max_segment = max_t(unsigned int, max_segment,
65 PAGE_SIZE) >> PAGE_SHIFT;
66 max_order = min(max_order, ilog2(max_segment));
67 }
68 }
69 #endif
70
71 gfp = GFP_KERNEL | __GFP_HIGHMEM | __GFP_RECLAIMABLE;
72 if (IS_I965GM(i915) || IS_I965G(i915)) {
73 /* 965gm cannot relocate objects above 4GiB. */
74 gfp &= ~__GFP_HIGHMEM;
75 gfp |= __GFP_DMA32;
76 }
77
78 create_st:
79 st = kmalloc(sizeof(*st), GFP_KERNEL);
80 if (!st)
81 return -ENOMEM;
82
83 npages = obj->base.size / PAGE_SIZE;
84 if (sg_alloc_table(st, npages, GFP_KERNEL)) {
85 kfree(st);
86 return -ENOMEM;
87 }
88
89 sg = st->sgl;
90 st->nents = 0;
91 sg_page_sizes = 0;
92
93 do {
94 int order = min(fls(npages) - 1, max_order);
95 struct page *page;
96
97 do {
98 page = alloc_pages(gfp | (order ? QUIET : MAYFAIL),
99 order);
100 if (page)
101 break;
102 if (!order--)
103 goto err;
104
105 /* Limit subsequent allocations as well */
106 max_order = order;
107 } while (1);
108
109 sg_set_page(sg, page, PAGE_SIZE << order, 0);
110 sg_page_sizes |= PAGE_SIZE << order;
111 st->nents++;
112
113 npages -= 1 << order;
114 if (!npages) {
115 sg_mark_end(sg);
116 break;
117 }
118
119 sg = __sg_next(sg);
120 } while (1);
121
122 if (i915_gem_gtt_prepare_pages(obj, st)) {
123 /* Failed to dma-map try again with single page sg segments */
124 if (get_order(st->sgl->length)) {
125 internal_free_pages(st);
126 max_order = 0;
127 goto create_st;
128 }
129 goto err;
130 }
131
132 /* Mark the pages as dontneed whilst they are still pinned. As soon
133 * as they are unpinned they are allowed to be reaped by the shrinker,
134 * and the caller is expected to repopulate - the contents of this
135 * object are only valid whilst active and pinned.
136 */
137 obj->mm.madv = I915_MADV_DONTNEED;
138
139 __i915_gem_object_set_pages(obj, st, sg_page_sizes);
140
141 return 0;
142
143 err:
144 sg_set_page(sg, NULL, 0, 0);
145 sg_mark_end(sg);
146 internal_free_pages(st);
147
148 return -ENOMEM;
149 }
150
151 static void i915_gem_object_put_pages_internal(struct drm_i915_gem_object *obj,
152 struct sg_table *pages)
153 {
154 i915_gem_gtt_finish_pages(obj, pages);
155 internal_free_pages(pages);
156
157 obj->mm.dirty = false;
158 obj->mm.madv = I915_MADV_WILLNEED;
159 }
160
161 static const struct drm_i915_gem_object_ops i915_gem_object_internal_ops = {
162 .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE |
163 I915_GEM_OBJECT_IS_SHRINKABLE,
164 .get_pages = i915_gem_object_get_pages_internal,
165 .put_pages = i915_gem_object_put_pages_internal,
166 };
167
168 /**
169 * i915_gem_object_create_internal: create an object with volatile pages
170 * @i915: the i915 device
171 * @size: the size in bytes of backing storage to allocate for the object
172 *
173 * Creates a new object that wraps some internal memory for private use.
174 * This object is not backed by swappable storage, and as such its contents
175 * are volatile and only valid whilst pinned. If the object is reaped by the
176 * shrinker, its pages and data will be discarded. Equally, it is not a full
177 * GEM object and so not valid for access from userspace. This makes it useful
178 * for hardware interfaces like ringbuffers (which are pinned from the time
179 * the request is written to the time the hardware stops accessing it), but
180 * not for contexts (which need to be preserved when not active for later
181 * reuse). Note that it is not cleared upon allocation.
182 */
183 struct drm_i915_gem_object *
184 i915_gem_object_create_internal(struct drm_i915_private *i915,
185 phys_addr_t size)
186 {
187 struct drm_i915_gem_object *obj;
188 unsigned int cache_level;
189
190 GEM_BUG_ON(!size);
191 GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));
192
193 if (overflows_type(size, obj->base.size))
194 return ERR_PTR(-E2BIG);
195
196 obj = i915_gem_object_alloc();
197 if (!obj)
198 return ERR_PTR(-ENOMEM);
199
200 drm_gem_private_object_init(&i915->drm, &obj->base, size);
201 i915_gem_object_init(obj, &i915_gem_object_internal_ops);
202
203 obj->read_domains = I915_GEM_DOMAIN_CPU;
204 obj->write_domain = I915_GEM_DOMAIN_CPU;
205
206 cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
207 i915_gem_object_set_cache_coherency(obj, cache_level);
208
209 return obj;
210 }