diff options
Diffstat (limited to 'meta-moblin/packages/linux/linux-moblin-2.6.27/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch')
-rw-r--r-- | meta-moblin/packages/linux/linux-moblin-2.6.27/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch | 5483 |
1 files changed, 5483 insertions, 0 deletions
diff --git a/meta-moblin/packages/linux/linux-moblin-2.6.27/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch b/meta-moblin/packages/linux/linux-moblin-2.6.27/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch new file mode 100644 index 0000000000..95cca5d0c6 --- /dev/null +++ b/meta-moblin/packages/linux/linux-moblin-2.6.27/0014-drm-Add-GEM-graphics-execution-manager-to-i915.patch | |||
@@ -0,0 +1,5483 @@ | |||
1 | commit c97398223c6a505fac2c783a624dc80e0aa5d5d0 | ||
2 | Author: Eric Anholt <eric@anholt.net> | ||
3 | Date: Wed Jul 30 12:06:12 2008 -0700 | ||
4 | |||
5 | drm: Add GEM ("graphics execution manager") to i915 driver. | ||
6 | |||
7 | GEM allows the creation of persistent buffer objects accessible by the | ||
8 | graphics device through new ioctls for managing execution of commands on the | ||
9 | device. The userland API is almost entirely driver-specific to ensure that | ||
10 | any driver building on this model can easily map the interface to individual | ||
11 | driver requirements. | ||
12 | |||
13 | GEM is used by the 2d driver for managing its internal state allocations and | ||
14 | will be used for pixmap storage to reduce memory consumption and enable | ||
15 | zero-copy GLX_EXT_texture_from_pixmap, and in the 3d driver is used to enable | ||
16 | GL_EXT_framebuffer_object and GL_ARB_pixel_buffer_object. | ||
17 | |||
18 | Signed-off-by: Eric Anholt <eric@anholt.net> | ||
19 | |||
20 | diff --git a/drivers/gpu/drm/Makefile b/drivers/gpu/drm/Makefile | ||
21 | index e9f9a97..74da994 100644 | ||
22 | --- a/drivers/gpu/drm/Makefile | ||
23 | +++ b/drivers/gpu/drm/Makefile | ||
24 | @@ -4,8 +4,9 @@ | ||
25 | |||
26 | ccflags-y := -Iinclude/drm | ||
27 | |||
28 | -drm-y := drm_auth.o drm_bufs.o drm_context.o drm_dma.o drm_drawable.o \ | ||
29 | - drm_drv.o drm_fops.o drm_ioctl.o drm_irq.o \ | ||
30 | +drm-y := drm_auth.o drm_bufs.o drm_cache.o \ | ||
31 | + drm_context.o drm_dma.o drm_drawable.o \ | ||
32 | + drm_drv.o drm_fops.o drm_gem.o drm_ioctl.o drm_irq.o \ | ||
33 | drm_lock.o drm_memory.o drm_proc.o drm_stub.o drm_vm.o \ | ||
34 | drm_agpsupport.o drm_scatter.o ati_pcigart.o drm_pci.o \ | ||
35 | drm_sysfs.o drm_hashtab.o drm_sman.o drm_mm.o | ||
36 | diff --git a/drivers/gpu/drm/drm_agpsupport.c b/drivers/gpu/drm/drm_agpsupport.c | ||
37 | index aefa5ac..2639be2 100644 | ||
38 | --- a/drivers/gpu/drm/drm_agpsupport.c | ||
39 | +++ b/drivers/gpu/drm/drm_agpsupport.c | ||
40 | @@ -33,6 +33,7 @@ | ||
41 | |||
42 | #include "drmP.h" | ||
43 | #include <linux/module.h> | ||
44 | +#include <asm/agp.h> | ||
45 | |||
46 | #if __OS_HAS_AGP | ||
47 | |||
48 | @@ -452,4 +453,52 @@ int drm_agp_unbind_memory(DRM_AGP_MEM * handle) | ||
49 | return agp_unbind_memory(handle); | ||
50 | } | ||
51 | |||
52 | -#endif /* __OS_HAS_AGP */ | ||
53 | +/** | ||
54 | + * Binds a collection of pages into AGP memory at the given offset, returning | ||
55 | + * the AGP memory structure containing them. | ||
56 | + * | ||
57 | + * No reference is held on the pages during this time -- it is up to the | ||
58 | + * caller to handle that. | ||
59 | + */ | ||
60 | +DRM_AGP_MEM * | ||
61 | +drm_agp_bind_pages(struct drm_device *dev, | ||
62 | + struct page **pages, | ||
63 | + unsigned long num_pages, | ||
64 | + uint32_t gtt_offset) | ||
65 | +{ | ||
66 | + DRM_AGP_MEM *mem; | ||
67 | + int ret, i; | ||
68 | + | ||
69 | + DRM_DEBUG("\n"); | ||
70 | + | ||
71 | + mem = drm_agp_allocate_memory(dev->agp->bridge, num_pages, | ||
72 | + AGP_USER_MEMORY); | ||
73 | + if (mem == NULL) { | ||
74 | + DRM_ERROR("Failed to allocate memory for %ld pages\n", | ||
75 | + num_pages); | ||
76 | + return NULL; | ||
77 | + } | ||
78 | + | ||
79 | + for (i = 0; i < num_pages; i++) | ||
80 | + mem->memory[i] = phys_to_gart(page_to_phys(pages[i])); | ||
81 | + mem->page_count = num_pages; | ||
82 | + | ||
83 | + mem->is_flushed = true; | ||
84 | + ret = drm_agp_bind_memory(mem, gtt_offset / PAGE_SIZE); | ||
85 | + if (ret != 0) { | ||
86 | + DRM_ERROR("Failed to bind AGP memory: %d\n", ret); | ||
87 | + agp_free_memory(mem); | ||
88 | + return NULL; | ||
89 | + } | ||
90 | + | ||
91 | + return mem; | ||
92 | +} | ||
93 | +EXPORT_SYMBOL(drm_agp_bind_pages); | ||
94 | + | ||
95 | +void drm_agp_chipset_flush(struct drm_device *dev) | ||
96 | +{ | ||
97 | + agp_flush_chipset(dev->agp->bridge); | ||
98 | +} | ||
99 | +EXPORT_SYMBOL(drm_agp_chipset_flush); | ||
100 | + | ||
101 | +#endif /* __OS_HAS_AGP */ | ||
102 | diff --git a/drivers/gpu/drm/drm_cache.c b/drivers/gpu/drm/drm_cache.c | ||
103 | new file mode 100644 | ||
104 | index 0000000..9475f7d | ||
105 | --- /dev/null | ||
106 | +++ b/drivers/gpu/drm/drm_cache.c | ||
107 | @@ -0,0 +1,76 @@ | ||
108 | +/************************************************************************** | ||
109 | + * | ||
110 | + * Copyright (c) 2006-2007 Tungsten Graphics, Inc., Cedar Park, TX., USA | ||
111 | + * All Rights Reserved. | ||
112 | + * | ||
113 | + * Permission is hereby granted, free of charge, to any person obtaining a | ||
114 | + * copy of this software and associated documentation files (the | ||
115 | + * "Software"), to deal in the Software without restriction, including | ||
116 | + * without limitation the rights to use, copy, modify, merge, publish, | ||
117 | + * distribute, sub license, and/or sell copies of the Software, and to | ||
118 | + * permit persons to whom the Software is furnished to do so, subject to | ||
119 | + * the following conditions: | ||
120 | + * | ||
121 | + * The above copyright notice and this permission notice (including the | ||
122 | + * next paragraph) shall be included in all copies or substantial portions | ||
123 | + * of the Software. | ||
124 | + * | ||
125 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
126 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
127 | + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL | ||
128 | + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, | ||
129 | + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR | ||
130 | + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE | ||
131 | + * USE OR OTHER DEALINGS IN THE SOFTWARE. | ||
132 | + * | ||
133 | + **************************************************************************/ | ||
134 | +/* | ||
135 | + * Authors: Thomas Hellström <thomas-at-tungstengraphics-dot-com> | ||
136 | + */ | ||
137 | + | ||
138 | +#include "drmP.h" | ||
139 | + | ||
140 | +#if defined(CONFIG_X86) | ||
141 | +static void | ||
142 | +drm_clflush_page(struct page *page) | ||
143 | +{ | ||
144 | + uint8_t *page_virtual; | ||
145 | + unsigned int i; | ||
146 | + | ||
147 | + if (unlikely(page == NULL)) | ||
148 | + return; | ||
149 | + | ||
150 | + page_virtual = kmap_atomic(page, KM_USER0); | ||
151 | + for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size) | ||
152 | + clflush(page_virtual + i); | ||
153 | + kunmap_atomic(page_virtual, KM_USER0); | ||
154 | +} | ||
155 | +#endif | ||
156 | + | ||
157 | +static void | ||
158 | +drm_clflush_ipi_handler(void *null) | ||
159 | +{ | ||
160 | + wbinvd(); | ||
161 | +} | ||
162 | + | ||
163 | +void | ||
164 | +drm_clflush_pages(struct page *pages[], unsigned long num_pages) | ||
165 | +{ | ||
166 | + | ||
167 | +#if defined(CONFIG_X86) | ||
168 | + if (cpu_has_clflush) { | ||
169 | + unsigned long i; | ||
170 | + | ||
171 | + mb(); | ||
172 | + for (i = 0; i < num_pages; ++i) | ||
173 | + drm_clflush_page(*pages++); | ||
174 | + mb(); | ||
175 | + | ||
176 | + return; | ||
177 | + } | ||
178 | +#endif | ||
179 | + | ||
180 | + if (on_each_cpu(drm_clflush_ipi_handler, NULL, 1) != 0) | ||
181 | + DRM_ERROR("Timed out waiting for cache flush.\n"); | ||
182 | +} | ||
183 | +EXPORT_SYMBOL(drm_clflush_pages); | ||
184 | diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c | ||
185 | index fb45fe7..96f416a 100644 | ||
186 | --- a/drivers/gpu/drm/drm_drv.c | ||
187 | +++ b/drivers/gpu/drm/drm_drv.c | ||
188 | @@ -119,6 +119,10 @@ static struct drm_ioctl_desc drm_ioctls[] = { | ||
189 | DRM_IOCTL_DEF(DRM_IOCTL_MODESET_CTL, drm_modeset_ctl, 0), | ||
190 | |||
191 | DRM_IOCTL_DEF(DRM_IOCTL_UPDATE_DRAW, drm_update_drawable_info, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | ||
192 | + | ||
193 | + DRM_IOCTL_DEF(DRM_IOCTL_GEM_CLOSE, drm_gem_close_ioctl, 0), | ||
194 | + DRM_IOCTL_DEF(DRM_IOCTL_GEM_FLINK, drm_gem_flink_ioctl, DRM_AUTH), | ||
195 | + DRM_IOCTL_DEF(DRM_IOCTL_GEM_OPEN, drm_gem_open_ioctl, DRM_AUTH), | ||
196 | }; | ||
197 | |||
198 | #define DRM_CORE_IOCTL_COUNT ARRAY_SIZE( drm_ioctls ) | ||
199 | diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c | ||
200 | index dcf8b4d..0d46627 100644 | ||
201 | --- a/drivers/gpu/drm/drm_fops.c | ||
202 | +++ b/drivers/gpu/drm/drm_fops.c | ||
203 | @@ -256,6 +256,9 @@ static int drm_open_helper(struct inode *inode, struct file *filp, | ||
204 | |||
205 | INIT_LIST_HEAD(&priv->lhead); | ||
206 | |||
207 | + if (dev->driver->driver_features & DRIVER_GEM) | ||
208 | + drm_gem_open(dev, priv); | ||
209 | + | ||
210 | if (dev->driver->open) { | ||
211 | ret = dev->driver->open(dev, priv); | ||
212 | if (ret < 0) | ||
213 | @@ -400,6 +403,9 @@ int drm_release(struct inode *inode, struct file *filp) | ||
214 | dev->driver->reclaim_buffers(dev, file_priv); | ||
215 | } | ||
216 | |||
217 | + if (dev->driver->driver_features & DRIVER_GEM) | ||
218 | + drm_gem_release(dev, file_priv); | ||
219 | + | ||
220 | drm_fasync(-1, filp, 0); | ||
221 | |||
222 | mutex_lock(&dev->ctxlist_mutex); | ||
223 | diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c | ||
224 | new file mode 100644 | ||
225 | index 0000000..434155b | ||
226 | --- /dev/null | ||
227 | +++ b/drivers/gpu/drm/drm_gem.c | ||
228 | @@ -0,0 +1,420 @@ | ||
229 | +/* | ||
230 | + * Copyright © 2008 Intel Corporation | ||
231 | + * | ||
232 | + * Permission is hereby granted, free of charge, to any person obtaining a | ||
233 | + * copy of this software and associated documentation files (the "Software"), | ||
234 | + * to deal in the Software without restriction, including without limitation | ||
235 | + * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
236 | + * and/or sell copies of the Software, and to permit persons to whom the | ||
237 | + * Software is furnished to do so, subject to the following conditions: | ||
238 | + * | ||
239 | + * The above copyright notice and this permission notice (including the next | ||
240 | + * paragraph) shall be included in all copies or substantial portions of the | ||
241 | + * Software. | ||
242 | + * | ||
243 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
244 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
245 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
246 | + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
247 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
248 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
249 | + * IN THE SOFTWARE. | ||
250 | + * | ||
251 | + * Authors: | ||
252 | + * Eric Anholt <eric@anholt.net> | ||
253 | + * | ||
254 | + */ | ||
255 | + | ||
256 | +#include <linux/types.h> | ||
257 | +#include <linux/slab.h> | ||
258 | +#include <linux/mm.h> | ||
259 | +#include <linux/uaccess.h> | ||
260 | +#include <linux/fs.h> | ||
261 | +#include <linux/file.h> | ||
262 | +#include <linux/module.h> | ||
263 | +#include <linux/mman.h> | ||
264 | +#include <linux/pagemap.h> | ||
265 | +#include "drmP.h" | ||
266 | + | ||
267 | +/** @file drm_gem.c | ||
268 | + * | ||
269 | + * This file provides some of the base ioctls and library routines for | ||
270 | + * the graphics memory manager implemented by each device driver. | ||
271 | + * | ||
272 | + * Because various devices have different requirements in terms of | ||
273 | + * synchronization and migration strategies, implementing that is left up to | ||
274 | + * the driver, and all that the general API provides should be generic -- | ||
275 | + * allocating objects, reading/writing data with the cpu, freeing objects. | ||
276 | + * Even there, platform-dependent optimizations for reading/writing data with | ||
277 | + * the CPU mean we'll likely hook those out to driver-specific calls. However, | ||
278 | + * the DRI2 implementation wants to have at least allocate/mmap be generic. | ||
279 | + * | ||
280 | + * The goal was to have swap-backed object allocation managed through | ||
281 | + * struct file. However, file descriptors as handles to a struct file have | ||
282 | + * two major failings: | ||
283 | + * - Process limits prevent more than 1024 or so being used at a time by | ||
284 | + * default. | ||
285 | + * - Inability to allocate high fds will aggravate the X Server's select() | ||
286 | + * handling, and likely that of many GL client applications as well. | ||
287 | + * | ||
288 | + * This led to a plan of using our own integer IDs (called handles, following | ||
289 | + * DRM terminology) to mimic fds, and implement the fd syscalls we need as | ||
290 | + * ioctls. The objects themselves will still include the struct file so | ||
291 | + * that we can transition to fds if the required kernel infrastructure shows | ||
292 | + * up at a later date, and as our interface with shmfs for memory allocation. | ||
293 | + */ | ||
294 | + | ||
295 | +/** | ||
296 | + * Initialize the GEM device fields | ||
297 | + */ | ||
298 | + | ||
299 | +int | ||
300 | +drm_gem_init(struct drm_device *dev) | ||
301 | +{ | ||
302 | + spin_lock_init(&dev->object_name_lock); | ||
303 | + idr_init(&dev->object_name_idr); | ||
304 | + atomic_set(&dev->object_count, 0); | ||
305 | + atomic_set(&dev->object_memory, 0); | ||
306 | + atomic_set(&dev->pin_count, 0); | ||
307 | + atomic_set(&dev->pin_memory, 0); | ||
308 | + atomic_set(&dev->gtt_count, 0); | ||
309 | + atomic_set(&dev->gtt_memory, 0); | ||
310 | + return 0; | ||
311 | +} | ||
312 | + | ||
313 | +/** | ||
314 | + * Allocate a GEM object of the specified size with shmfs backing store | ||
315 | + */ | ||
316 | +struct drm_gem_object * | ||
317 | +drm_gem_object_alloc(struct drm_device *dev, size_t size) | ||
318 | +{ | ||
319 | + struct drm_gem_object *obj; | ||
320 | + | ||
321 | + BUG_ON((size & (PAGE_SIZE - 1)) != 0); | ||
322 | + | ||
323 | + obj = kcalloc(1, sizeof(*obj), GFP_KERNEL); | ||
324 | + | ||
325 | + obj->dev = dev; | ||
326 | + obj->filp = shmem_file_setup("drm mm object", size, 0); | ||
327 | + if (IS_ERR(obj->filp)) { | ||
328 | + kfree(obj); | ||
329 | + return NULL; | ||
330 | + } | ||
331 | + | ||
332 | + kref_init(&obj->refcount); | ||
333 | + kref_init(&obj->handlecount); | ||
334 | + obj->size = size; | ||
335 | + if (dev->driver->gem_init_object != NULL && | ||
336 | + dev->driver->gem_init_object(obj) != 0) { | ||
337 | + fput(obj->filp); | ||
338 | + kfree(obj); | ||
339 | + return NULL; | ||
340 | + } | ||
341 | + atomic_inc(&dev->object_count); | ||
342 | + atomic_add(obj->size, &dev->object_memory); | ||
343 | + return obj; | ||
344 | +} | ||
345 | +EXPORT_SYMBOL(drm_gem_object_alloc); | ||
346 | + | ||
347 | +/** | ||
348 | + * Removes the mapping from handle to filp for this object. | ||
349 | + */ | ||
350 | +static int | ||
351 | +drm_gem_handle_delete(struct drm_file *filp, int handle) | ||
352 | +{ | ||
353 | + struct drm_device *dev; | ||
354 | + struct drm_gem_object *obj; | ||
355 | + | ||
356 | + /* This is gross. The idr system doesn't let us try a delete and | ||
357 | + * return an error code. It just spews if you fail at deleting. | ||
358 | + * So, we have to grab a lock around finding the object and then | ||
359 | + * doing the delete on it and dropping the refcount, or the user | ||
360 | + * could race us to double-decrement the refcount and cause a | ||
361 | + * use-after-free later. Given the frequency of our handle lookups, | ||
362 | + * we may want to use ida for number allocation and a hash table | ||
363 | + * for the pointers, anyway. | ||
364 | + */ | ||
365 | + spin_lock(&filp->table_lock); | ||
366 | + | ||
367 | + /* Check if we currently have a reference on the object */ | ||
368 | + obj = idr_find(&filp->object_idr, handle); | ||
369 | + if (obj == NULL) { | ||
370 | + spin_unlock(&filp->table_lock); | ||
371 | + return -EINVAL; | ||
372 | + } | ||
373 | + dev = obj->dev; | ||
374 | + | ||
375 | + /* Release reference and decrement refcount. */ | ||
376 | + idr_remove(&filp->object_idr, handle); | ||
377 | + spin_unlock(&filp->table_lock); | ||
378 | + | ||
379 | + mutex_lock(&dev->struct_mutex); | ||
380 | + drm_gem_object_handle_unreference(obj); | ||
381 | + mutex_unlock(&dev->struct_mutex); | ||
382 | + | ||
383 | + return 0; | ||
384 | +} | ||
385 | + | ||
386 | +/** | ||
387 | + * Create a handle for this object. This adds a handle reference | ||
388 | + * to the object, which includes a regular reference count. Callers | ||
389 | + * will likely want to dereference the object afterwards. | ||
390 | + */ | ||
391 | +int | ||
392 | +drm_gem_handle_create(struct drm_file *file_priv, | ||
393 | + struct drm_gem_object *obj, | ||
394 | + int *handlep) | ||
395 | +{ | ||
396 | + int ret; | ||
397 | + | ||
398 | + /* | ||
399 | + * Get the user-visible handle using idr. | ||
400 | + */ | ||
401 | +again: | ||
402 | + /* ensure there is space available to allocate a handle */ | ||
403 | + if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0) | ||
404 | + return -ENOMEM; | ||
405 | + | ||
406 | + /* do the allocation under our spinlock */ | ||
407 | + spin_lock(&file_priv->table_lock); | ||
408 | + ret = idr_get_new_above(&file_priv->object_idr, obj, 1, handlep); | ||
409 | + spin_unlock(&file_priv->table_lock); | ||
410 | + if (ret == -EAGAIN) | ||
411 | + goto again; | ||
412 | + | ||
413 | + if (ret != 0) | ||
414 | + return ret; | ||
415 | + | ||
416 | + drm_gem_object_handle_reference(obj); | ||
417 | + return 0; | ||
418 | +} | ||
419 | +EXPORT_SYMBOL(drm_gem_handle_create); | ||
420 | + | ||
421 | +/** Returns a reference to the object named by the handle. */ | ||
422 | +struct drm_gem_object * | ||
423 | +drm_gem_object_lookup(struct drm_device *dev, struct drm_file *filp, | ||
424 | + int handle) | ||
425 | +{ | ||
426 | + struct drm_gem_object *obj; | ||
427 | + | ||
428 | + spin_lock(&filp->table_lock); | ||
429 | + | ||
430 | + /* Check if we currently have a reference on the object */ | ||
431 | + obj = idr_find(&filp->object_idr, handle); | ||
432 | + if (obj == NULL) { | ||
433 | + spin_unlock(&filp->table_lock); | ||
434 | + return NULL; | ||
435 | + } | ||
436 | + | ||
437 | + drm_gem_object_reference(obj); | ||
438 | + | ||
439 | + spin_unlock(&filp->table_lock); | ||
440 | + | ||
441 | + return obj; | ||
442 | +} | ||
443 | +EXPORT_SYMBOL(drm_gem_object_lookup); | ||
444 | + | ||
445 | +/** | ||
446 | + * Releases the handle to an mm object. | ||
447 | + */ | ||
448 | +int | ||
449 | +drm_gem_close_ioctl(struct drm_device *dev, void *data, | ||
450 | + struct drm_file *file_priv) | ||
451 | +{ | ||
452 | + struct drm_gem_close *args = data; | ||
453 | + int ret; | ||
454 | + | ||
455 | + if (!(dev->driver->driver_features & DRIVER_GEM)) | ||
456 | + return -ENODEV; | ||
457 | + | ||
458 | + ret = drm_gem_handle_delete(file_priv, args->handle); | ||
459 | + | ||
460 | + return ret; | ||
461 | +} | ||
462 | + | ||
463 | +/** | ||
464 | + * Create a global name for an object, returning the name. | ||
465 | + * | ||
466 | + * Note that the name does not hold a reference; when the object | ||
467 | + * is freed, the name goes away. | ||
468 | + */ | ||
469 | +int | ||
470 | +drm_gem_flink_ioctl(struct drm_device *dev, void *data, | ||
471 | + struct drm_file *file_priv) | ||
472 | +{ | ||
473 | + struct drm_gem_flink *args = data; | ||
474 | + struct drm_gem_object *obj; | ||
475 | + int ret; | ||
476 | + | ||
477 | + if (!(dev->driver->driver_features & DRIVER_GEM)) | ||
478 | + return -ENODEV; | ||
479 | + | ||
480 | + obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
481 | + if (obj == NULL) | ||
482 | + return -EINVAL; | ||
483 | + | ||
484 | +again: | ||
485 | + if (idr_pre_get(&dev->object_name_idr, GFP_KERNEL) == 0) | ||
486 | + return -ENOMEM; | ||
487 | + | ||
488 | + spin_lock(&dev->object_name_lock); | ||
489 | + if (obj->name) { | ||
490 | + spin_unlock(&dev->object_name_lock); | ||
491 | + return -EEXIST; | ||
492 | + } | ||
493 | + ret = idr_get_new_above(&dev->object_name_idr, obj, 1, | ||
494 | + &obj->name); | ||
495 | + spin_unlock(&dev->object_name_lock); | ||
496 | + if (ret == -EAGAIN) | ||
497 | + goto again; | ||
498 | + | ||
499 | + if (ret != 0) { | ||
500 | + mutex_lock(&dev->struct_mutex); | ||
501 | + drm_gem_object_unreference(obj); | ||
502 | + mutex_unlock(&dev->struct_mutex); | ||
503 | + return ret; | ||
504 | + } | ||
505 | + | ||
506 | + /* | ||
507 | + * Leave the reference from the lookup around as the | ||
508 | + * name table now holds one | ||
509 | + */ | ||
510 | + args->name = (uint64_t) obj->name; | ||
511 | + | ||
512 | + return 0; | ||
513 | +} | ||
514 | + | ||
515 | +/** | ||
516 | + * Open an object using the global name, returning a handle and the size. | ||
517 | + * | ||
518 | + * This handle (of course) holds a reference to the object, so the object | ||
519 | + * will not go away until the handle is deleted. | ||
520 | + */ | ||
521 | +int | ||
522 | +drm_gem_open_ioctl(struct drm_device *dev, void *data, | ||
523 | + struct drm_file *file_priv) | ||
524 | +{ | ||
525 | + struct drm_gem_open *args = data; | ||
526 | + struct drm_gem_object *obj; | ||
527 | + int ret; | ||
528 | + int handle; | ||
529 | + | ||
530 | + if (!(dev->driver->driver_features & DRIVER_GEM)) | ||
531 | + return -ENODEV; | ||
532 | + | ||
533 | + spin_lock(&dev->object_name_lock); | ||
534 | + obj = idr_find(&dev->object_name_idr, (int) args->name); | ||
535 | + if (obj) | ||
536 | + drm_gem_object_reference(obj); | ||
537 | + spin_unlock(&dev->object_name_lock); | ||
538 | + if (!obj) | ||
539 | + return -ENOENT; | ||
540 | + | ||
541 | + ret = drm_gem_handle_create(file_priv, obj, &handle); | ||
542 | + mutex_lock(&dev->struct_mutex); | ||
543 | + drm_gem_object_unreference(obj); | ||
544 | + mutex_unlock(&dev->struct_mutex); | ||
545 | + if (ret) | ||
546 | + return ret; | ||
547 | + | ||
548 | + args->handle = handle; | ||
549 | + args->size = obj->size; | ||
550 | + | ||
551 | + return 0; | ||
552 | +} | ||
553 | + | ||
554 | +/** | ||
555 | + * Called at device open time, sets up the structure for handling refcounting | ||
556 | + * of mm objects. | ||
557 | + */ | ||
558 | +void | ||
559 | +drm_gem_open(struct drm_device *dev, struct drm_file *file_private) | ||
560 | +{ | ||
561 | + idr_init(&file_private->object_idr); | ||
562 | + spin_lock_init(&file_private->table_lock); | ||
563 | +} | ||
564 | + | ||
565 | +/** | ||
566 | + * Called at device close to release the file's | ||
567 | + * handle references on objects. | ||
568 | + */ | ||
569 | +static int | ||
570 | +drm_gem_object_release_handle(int id, void *ptr, void *data) | ||
571 | +{ | ||
572 | + struct drm_gem_object *obj = ptr; | ||
573 | + | ||
574 | + drm_gem_object_handle_unreference(obj); | ||
575 | + | ||
576 | + return 0; | ||
577 | +} | ||
578 | + | ||
579 | +/** | ||
580 | + * Called at close time when the filp is going away. | ||
581 | + * | ||
582 | + * Releases any remaining references on objects by this filp. | ||
583 | + */ | ||
584 | +void | ||
585 | +drm_gem_release(struct drm_device *dev, struct drm_file *file_private) | ||
586 | +{ | ||
587 | + mutex_lock(&dev->struct_mutex); | ||
588 | + idr_for_each(&file_private->object_idr, | ||
589 | + &drm_gem_object_release_handle, NULL); | ||
590 | + | ||
591 | + idr_destroy(&file_private->object_idr); | ||
592 | + mutex_unlock(&dev->struct_mutex); | ||
593 | +} | ||
594 | + | ||
595 | +/** | ||
596 | + * Called after the last reference to the object has been lost. | ||
597 | + * | ||
598 | + * Frees the object | ||
599 | + */ | ||
600 | +void | ||
601 | +drm_gem_object_free(struct kref *kref) | ||
602 | +{ | ||
603 | + struct drm_gem_object *obj = (struct drm_gem_object *) kref; | ||
604 | + struct drm_device *dev = obj->dev; | ||
605 | + | ||
606 | + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
607 | + | ||
608 | + if (dev->driver->gem_free_object != NULL) | ||
609 | + dev->driver->gem_free_object(obj); | ||
610 | + | ||
611 | + fput(obj->filp); | ||
612 | + atomic_dec(&dev->object_count); | ||
613 | + atomic_sub(obj->size, &dev->object_memory); | ||
614 | + kfree(obj); | ||
615 | +} | ||
616 | +EXPORT_SYMBOL(drm_gem_object_free); | ||
617 | + | ||
618 | +/** | ||
619 | + * Called after the last handle to the object has been closed | ||
620 | + * | ||
621 | + * Removes any name for the object. Note that this must be | ||
622 | + * called before drm_gem_object_free or we'll be touching | ||
623 | + * freed memory | ||
624 | + */ | ||
625 | +void | ||
626 | +drm_gem_object_handle_free(struct kref *kref) | ||
627 | +{ | ||
628 | + struct drm_gem_object *obj = container_of(kref, | ||
629 | + struct drm_gem_object, | ||
630 | + handlecount); | ||
631 | + struct drm_device *dev = obj->dev; | ||
632 | + | ||
633 | + /* Remove any name for this object */ | ||
634 | + spin_lock(&dev->object_name_lock); | ||
635 | + if (obj->name) { | ||
636 | + idr_remove(&dev->object_name_idr, obj->name); | ||
637 | + spin_unlock(&dev->object_name_lock); | ||
638 | + /* | ||
639 | + * The object name held a reference to this object, drop | ||
640 | + * that now. | ||
641 | + */ | ||
642 | + drm_gem_object_unreference(obj); | ||
643 | + } else | ||
644 | + spin_unlock(&dev->object_name_lock); | ||
645 | + | ||
646 | +} | ||
647 | +EXPORT_SYMBOL(drm_gem_object_handle_free); | ||
648 | + | ||
649 | diff --git a/drivers/gpu/drm/drm_memory.c b/drivers/gpu/drm/drm_memory.c | ||
650 | index 0177012..803bc9e 100644 | ||
651 | --- a/drivers/gpu/drm/drm_memory.c | ||
652 | +++ b/drivers/gpu/drm/drm_memory.c | ||
653 | @@ -133,6 +133,7 @@ int drm_free_agp(DRM_AGP_MEM * handle, int pages) | ||
654 | { | ||
655 | return drm_agp_free_memory(handle) ? 0 : -EINVAL; | ||
656 | } | ||
657 | +EXPORT_SYMBOL(drm_free_agp); | ||
658 | |||
659 | /** Wrapper around agp_bind_memory() */ | ||
660 | int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start) | ||
661 | @@ -145,6 +146,7 @@ int drm_unbind_agp(DRM_AGP_MEM * handle) | ||
662 | { | ||
663 | return drm_agp_unbind_memory(handle); | ||
664 | } | ||
665 | +EXPORT_SYMBOL(drm_unbind_agp); | ||
666 | |||
667 | #else /* __OS_HAS_AGP */ | ||
668 | static inline void *agp_remap(unsigned long offset, unsigned long size, | ||
669 | diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c | ||
670 | index dcff9e9..217ad7d 100644 | ||
671 | --- a/drivers/gpu/drm/drm_mm.c | ||
672 | +++ b/drivers/gpu/drm/drm_mm.c | ||
673 | @@ -169,6 +169,7 @@ struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent, | ||
674 | |||
675 | return child; | ||
676 | } | ||
677 | +EXPORT_SYMBOL(drm_mm_get_block); | ||
678 | |||
679 | /* | ||
680 | * Put a block. Merge with the previous and / or next block if they are free. | ||
681 | @@ -217,6 +218,7 @@ void drm_mm_put_block(struct drm_mm_node * cur) | ||
682 | drm_free(cur, sizeof(*cur), DRM_MEM_MM); | ||
683 | } | ||
684 | } | ||
685 | +EXPORT_SYMBOL(drm_mm_put_block); | ||
686 | |||
687 | struct drm_mm_node *drm_mm_search_free(const struct drm_mm * mm, | ||
688 | unsigned long size, | ||
689 | @@ -265,6 +267,7 @@ int drm_mm_clean(struct drm_mm * mm) | ||
690 | |||
691 | return (head->next->next == head); | ||
692 | } | ||
693 | +EXPORT_SYMBOL(drm_mm_search_free); | ||
694 | |||
695 | int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) | ||
696 | { | ||
697 | @@ -273,7 +276,7 @@ int drm_mm_init(struct drm_mm * mm, unsigned long start, unsigned long size) | ||
698 | |||
699 | return drm_mm_create_tail_node(mm, start, size); | ||
700 | } | ||
701 | - | ||
702 | +EXPORT_SYMBOL(drm_mm_init); | ||
703 | |||
704 | void drm_mm_takedown(struct drm_mm * mm) | ||
705 | { | ||
706 | diff --git a/drivers/gpu/drm/drm_proc.c b/drivers/gpu/drm/drm_proc.c | ||
707 | index 93b1e04..d490db4 100644 | ||
708 | --- a/drivers/gpu/drm/drm_proc.c | ||
709 | +++ b/drivers/gpu/drm/drm_proc.c | ||
710 | @@ -49,6 +49,10 @@ static int drm_queues_info(char *buf, char **start, off_t offset, | ||
711 | int request, int *eof, void *data); | ||
712 | static int drm_bufs_info(char *buf, char **start, off_t offset, | ||
713 | int request, int *eof, void *data); | ||
714 | +static int drm_gem_name_info(char *buf, char **start, off_t offset, | ||
715 | + int request, int *eof, void *data); | ||
716 | +static int drm_gem_object_info(char *buf, char **start, off_t offset, | ||
717 | + int request, int *eof, void *data); | ||
718 | #if DRM_DEBUG_CODE | ||
719 | static int drm_vma_info(char *buf, char **start, off_t offset, | ||
720 | int request, int *eof, void *data); | ||
721 | @@ -60,13 +64,16 @@ static int drm_vma_info(char *buf, char **start, off_t offset, | ||
722 | static struct drm_proc_list { | ||
723 | const char *name; /**< file name */ | ||
724 | int (*f) (char *, char **, off_t, int, int *, void *); /**< proc callback*/ | ||
725 | + u32 driver_features; /**< Required driver features for this entry */ | ||
726 | } drm_proc_list[] = { | ||
727 | - {"name", drm_name_info}, | ||
728 | - {"mem", drm_mem_info}, | ||
729 | - {"vm", drm_vm_info}, | ||
730 | - {"clients", drm_clients_info}, | ||
731 | - {"queues", drm_queues_info}, | ||
732 | - {"bufs", drm_bufs_info}, | ||
733 | + {"name", drm_name_info, 0}, | ||
734 | + {"mem", drm_mem_info, 0}, | ||
735 | + {"vm", drm_vm_info, 0}, | ||
736 | + {"clients", drm_clients_info, 0}, | ||
737 | + {"queues", drm_queues_info, 0}, | ||
738 | + {"bufs", drm_bufs_info, 0}, | ||
739 | + {"gem_names", drm_gem_name_info, DRIVER_GEM}, | ||
740 | + {"gem_objects", drm_gem_object_info, DRIVER_GEM}, | ||
741 | #if DRM_DEBUG_CODE | ||
742 | {"vma", drm_vma_info}, | ||
743 | #endif | ||
744 | @@ -90,8 +97,9 @@ static struct drm_proc_list { | ||
745 | int drm_proc_init(struct drm_minor *minor, int minor_id, | ||
746 | struct proc_dir_entry *root) | ||
747 | { | ||
748 | + struct drm_device *dev = minor->dev; | ||
749 | struct proc_dir_entry *ent; | ||
750 | - int i, j; | ||
751 | + int i, j, ret; | ||
752 | char name[64]; | ||
753 | |||
754 | sprintf(name, "%d", minor_id); | ||
755 | @@ -102,23 +110,42 @@ int drm_proc_init(struct drm_minor *minor, int minor_id, | ||
756 | } | ||
757 | |||
758 | for (i = 0; i < DRM_PROC_ENTRIES; i++) { | ||
759 | + u32 features = drm_proc_list[i].driver_features; | ||
760 | + | ||
761 | + if (features != 0 && | ||
762 | + (dev->driver->driver_features & features) != features) | ||
763 | + continue; | ||
764 | + | ||
765 | ent = create_proc_entry(drm_proc_list[i].name, | ||
766 | S_IFREG | S_IRUGO, minor->dev_root); | ||
767 | if (!ent) { | ||
768 | DRM_ERROR("Cannot create /proc/dri/%s/%s\n", | ||
769 | name, drm_proc_list[i].name); | ||
770 | - for (j = 0; j < i; j++) | ||
771 | - remove_proc_entry(drm_proc_list[i].name, | ||
772 | - minor->dev_root); | ||
773 | - remove_proc_entry(name, root); | ||
774 | - minor->dev_root = NULL; | ||
775 | - return -1; | ||
776 | + ret = -1; | ||
777 | + goto fail; | ||
778 | } | ||
779 | ent->read_proc = drm_proc_list[i].f; | ||
780 | ent->data = minor; | ||
781 | } | ||
782 | |||
783 | + if (dev->driver->proc_init) { | ||
784 | + ret = dev->driver->proc_init(minor); | ||
785 | + if (ret) { | ||
786 | + DRM_ERROR("DRM: Driver failed to initialize " | ||
787 | + "/proc/dri.\n"); | ||
788 | + goto fail; | ||
789 | + } | ||
790 | + } | ||
791 | + | ||
792 | return 0; | ||
793 | + fail: | ||
794 | + | ||
795 | + for (j = 0; j < i; j++) | ||
796 | + remove_proc_entry(drm_proc_list[i].name, | ||
797 | + minor->dev_root); | ||
798 | + remove_proc_entry(name, root); | ||
799 | + minor->dev_root = NULL; | ||
800 | + return ret; | ||
801 | } | ||
802 | |||
803 | /** | ||
804 | @@ -133,12 +160,16 @@ int drm_proc_init(struct drm_minor *minor, int minor_id, | ||
805 | */ | ||
806 | int drm_proc_cleanup(struct drm_minor *minor, struct proc_dir_entry *root) | ||
807 | { | ||
808 | + struct drm_device *dev = minor->dev; | ||
809 | int i; | ||
810 | char name[64]; | ||
811 | |||
812 | if (!root || !minor->dev_root) | ||
813 | return 0; | ||
814 | |||
815 | + if (dev->driver->proc_cleanup) | ||
816 | + dev->driver->proc_cleanup(minor); | ||
817 | + | ||
818 | for (i = 0; i < DRM_PROC_ENTRIES; i++) | ||
819 | remove_proc_entry(drm_proc_list[i].name, minor->dev_root); | ||
820 | sprintf(name, "%d", minor->index); | ||
821 | @@ -480,6 +511,84 @@ static int drm_clients_info(char *buf, char **start, off_t offset, | ||
822 | return ret; | ||
823 | } | ||
824 | |||
825 | +struct drm_gem_name_info_data { | ||
826 | + int len; | ||
827 | + char *buf; | ||
828 | + int eof; | ||
829 | +}; | ||
830 | + | ||
831 | +static int drm_gem_one_name_info(int id, void *ptr, void *data) | ||
832 | +{ | ||
833 | + struct drm_gem_object *obj = ptr; | ||
834 | + struct drm_gem_name_info_data *nid = data; | ||
835 | + | ||
836 | + DRM_INFO("name %d size %d\n", obj->name, obj->size); | ||
837 | + if (nid->eof) | ||
838 | + return 0; | ||
839 | + | ||
840 | + nid->len += sprintf(&nid->buf[nid->len], | ||
841 | + "%6d%9d%8d%9d\n", | ||
842 | + obj->name, obj->size, | ||
843 | + atomic_read(&obj->handlecount.refcount), | ||
844 | + atomic_read(&obj->refcount.refcount)); | ||
845 | + if (nid->len > DRM_PROC_LIMIT) { | ||
846 | + nid->eof = 1; | ||
847 | + return 0; | ||
848 | + } | ||
849 | + return 0; | ||
850 | +} | ||
851 | + | ||
852 | +static int drm_gem_name_info(char *buf, char **start, off_t offset, | ||
853 | + int request, int *eof, void *data) | ||
854 | +{ | ||
855 | + struct drm_minor *minor = (struct drm_minor *) data; | ||
856 | + struct drm_device *dev = minor->dev; | ||
857 | + struct drm_gem_name_info_data nid; | ||
858 | + | ||
859 | + if (offset > DRM_PROC_LIMIT) { | ||
860 | + *eof = 1; | ||
861 | + return 0; | ||
862 | + } | ||
863 | + | ||
864 | + nid.len = sprintf(buf, " name size handles refcount\n"); | ||
865 | + nid.buf = buf; | ||
866 | + nid.eof = 0; | ||
867 | + idr_for_each(&dev->object_name_idr, drm_gem_one_name_info, &nid); | ||
868 | + | ||
869 | + *start = &buf[offset]; | ||
870 | + *eof = 0; | ||
871 | + if (nid.len > request + offset) | ||
872 | + return request; | ||
873 | + *eof = 1; | ||
874 | + return nid.len - offset; | ||
875 | +} | ||
876 | + | ||
877 | +static int drm_gem_object_info(char *buf, char **start, off_t offset, | ||
878 | + int request, int *eof, void *data) | ||
879 | +{ | ||
880 | + struct drm_minor *minor = (struct drm_minor *) data; | ||
881 | + struct drm_device *dev = minor->dev; | ||
882 | + int len = 0; | ||
883 | + | ||
884 | + if (offset > DRM_PROC_LIMIT) { | ||
885 | + *eof = 1; | ||
886 | + return 0; | ||
887 | + } | ||
888 | + | ||
889 | + *start = &buf[offset]; | ||
890 | + *eof = 0; | ||
891 | + DRM_PROC_PRINT("%d objects\n", atomic_read(&dev->object_count)); | ||
892 | + DRM_PROC_PRINT("%d object bytes\n", atomic_read(&dev->object_memory)); | ||
893 | + DRM_PROC_PRINT("%d pinned\n", atomic_read(&dev->pin_count)); | ||
894 | + DRM_PROC_PRINT("%d pin bytes\n", atomic_read(&dev->pin_memory)); | ||
895 | + DRM_PROC_PRINT("%d gtt bytes\n", atomic_read(&dev->gtt_memory)); | ||
896 | + DRM_PROC_PRINT("%d gtt total\n", dev->gtt_total); | ||
897 | + if (len > request + offset) | ||
898 | + return request; | ||
899 | + *eof = 1; | ||
900 | + return len - offset; | ||
901 | +} | ||
902 | + | ||
903 | #if DRM_DEBUG_CODE | ||
904 | |||
905 | static int drm__vma_info(char *buf, char **start, off_t offset, int request, | ||
906 | diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c | ||
907 | index c2f584f..82f4657 100644 | ||
908 | --- a/drivers/gpu/drm/drm_stub.c | ||
909 | +++ b/drivers/gpu/drm/drm_stub.c | ||
910 | @@ -152,6 +152,15 @@ static int drm_fill_in_dev(struct drm_device * dev, struct pci_dev *pdev, | ||
911 | goto error_out_unreg; | ||
912 | } | ||
913 | |||
914 | + if (driver->driver_features & DRIVER_GEM) { | ||
915 | + retcode = drm_gem_init(dev); | ||
916 | + if (retcode) { | ||
917 | + DRM_ERROR("Cannot initialize graphics execution " | ||
918 | + "manager (GEM)\n"); | ||
919 | + goto error_out_unreg; | ||
920 | + } | ||
921 | + } | ||
922 | + | ||
923 | return 0; | ||
924 | |||
925 | error_out_unreg: | ||
926 | @@ -317,6 +326,7 @@ int drm_put_dev(struct drm_device * dev) | ||
927 | int drm_put_minor(struct drm_minor **minor_p) | ||
928 | { | ||
929 | struct drm_minor *minor = *minor_p; | ||
930 | + | ||
931 | DRM_DEBUG("release secondary minor %d\n", minor->index); | ||
932 | |||
933 | if (minor->type == DRM_MINOR_LEGACY) | ||
934 | diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile | ||
935 | index c4bbda6..5ba78e4 100644 | ||
936 | --- a/drivers/gpu/drm/i915/Makefile | ||
937 | +++ b/drivers/gpu/drm/i915/Makefile | ||
938 | @@ -4,7 +4,11 @@ | ||
939 | |||
940 | ccflags-y := -Iinclude/drm | ||
941 | i915-y := i915_drv.o i915_dma.o i915_irq.o i915_mem.o i915_opregion.o \ | ||
942 | - i915_suspend.o | ||
943 | + i915_suspend.o \ | ||
944 | + i915_gem.o \ | ||
945 | + i915_gem_debug.o \ | ||
946 | + i915_gem_proc.o \ | ||
947 | + i915_gem_tiling.o | ||
948 | |||
949 | i915-$(CONFIG_COMPAT) += i915_ioc32.o | ||
950 | |||
951 | diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c | ||
952 | index 8609ec2..3b5aa74 100644 | ||
953 | --- a/drivers/gpu/drm/i915/i915_dma.c | ||
954 | +++ b/drivers/gpu/drm/i915/i915_dma.c | ||
955 | @@ -170,24 +170,31 @@ static int i915_initialize(struct drm_device * dev, drm_i915_init_t * init) | ||
956 | dev_priv->sarea_priv = (drm_i915_sarea_t *) | ||
957 | ((u8 *) dev_priv->sarea->handle + init->sarea_priv_offset); | ||
958 | |||
959 | - dev_priv->ring.Start = init->ring_start; | ||
960 | - dev_priv->ring.End = init->ring_end; | ||
961 | - dev_priv->ring.Size = init->ring_size; | ||
962 | - dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; | ||
963 | + if (init->ring_size != 0) { | ||
964 | + if (dev_priv->ring.ring_obj != NULL) { | ||
965 | + i915_dma_cleanup(dev); | ||
966 | + DRM_ERROR("Client tried to initialize ringbuffer in " | ||
967 | + "GEM mode\n"); | ||
968 | + return -EINVAL; | ||
969 | + } | ||
970 | |||
971 | - dev_priv->ring.map.offset = init->ring_start; | ||
972 | - dev_priv->ring.map.size = init->ring_size; | ||
973 | - dev_priv->ring.map.type = 0; | ||
974 | - dev_priv->ring.map.flags = 0; | ||
975 | - dev_priv->ring.map.mtrr = 0; | ||
976 | + dev_priv->ring.Size = init->ring_size; | ||
977 | + dev_priv->ring.tail_mask = dev_priv->ring.Size - 1; | ||
978 | |||
979 | - drm_core_ioremap(&dev_priv->ring.map, dev); | ||
980 | + dev_priv->ring.map.offset = init->ring_start; | ||
981 | + dev_priv->ring.map.size = init->ring_size; | ||
982 | + dev_priv->ring.map.type = 0; | ||
983 | + dev_priv->ring.map.flags = 0; | ||
984 | + dev_priv->ring.map.mtrr = 0; | ||
985 | |||
986 | - if (dev_priv->ring.map.handle == NULL) { | ||
987 | - i915_dma_cleanup(dev); | ||
988 | - DRM_ERROR("can not ioremap virtual address for" | ||
989 | - " ring buffer\n"); | ||
990 | - return -ENOMEM; | ||
991 | + drm_core_ioremap(&dev_priv->ring.map, dev); | ||
992 | + | ||
993 | + if (dev_priv->ring.map.handle == NULL) { | ||
994 | + i915_dma_cleanup(dev); | ||
995 | + DRM_ERROR("can not ioremap virtual address for" | ||
996 | + " ring buffer\n"); | ||
997 | + return -ENOMEM; | ||
998 | + } | ||
999 | } | ||
1000 | |||
1001 | dev_priv->ring.virtual_start = dev_priv->ring.map.handle; | ||
1002 | @@ -377,9 +384,10 @@ static int i915_emit_cmds(struct drm_device * dev, int __user * buffer, int dwor | ||
1003 | return 0; | ||
1004 | } | ||
1005 | |||
1006 | -static int i915_emit_box(struct drm_device * dev, | ||
1007 | - struct drm_clip_rect __user * boxes, | ||
1008 | - int i, int DR1, int DR4) | ||
1009 | +int | ||
1010 | +i915_emit_box(struct drm_device *dev, | ||
1011 | + struct drm_clip_rect __user *boxes, | ||
1012 | + int i, int DR1, int DR4) | ||
1013 | { | ||
1014 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1015 | struct drm_clip_rect box; | ||
1016 | @@ -681,6 +689,9 @@ static int i915_getparam(struct drm_device *dev, void *data, | ||
1017 | case I915_PARAM_LAST_DISPATCH: | ||
1018 | value = READ_BREADCRUMB(dev_priv); | ||
1019 | break; | ||
1020 | + case I915_PARAM_HAS_GEM: | ||
1021 | + value = 1; | ||
1022 | + break; | ||
1023 | default: | ||
1024 | DRM_ERROR("Unknown parameter %d\n", param->param); | ||
1025 | return -EINVAL; | ||
1026 | @@ -784,6 +795,7 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | ||
1027 | memset(dev_priv, 0, sizeof(drm_i915_private_t)); | ||
1028 | |||
1029 | dev->dev_private = (void *)dev_priv; | ||
1030 | + dev_priv->dev = dev; | ||
1031 | |||
1032 | /* Add register map (needed for suspend/resume) */ | ||
1033 | base = drm_get_resource_start(dev, mmio_bar); | ||
1034 | @@ -793,6 +805,8 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags) | ||
1035 | _DRM_KERNEL | _DRM_DRIVER, | ||
1036 | &dev_priv->mmio_map); | ||
1037 | |||
1038 | + i915_gem_load(dev); | ||
1039 | + | ||
1040 | /* Init HWS */ | ||
1041 | if (!I915_NEED_GFX_HWS(dev)) { | ||
1042 | ret = i915_init_phys_hws(dev); | ||
1043 | @@ -838,6 +852,25 @@ int i915_driver_unload(struct drm_device *dev) | ||
1044 | return 0; | ||
1045 | } | ||
1046 | |||
1047 | +int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv) | ||
1048 | +{ | ||
1049 | + struct drm_i915_file_private *i915_file_priv; | ||
1050 | + | ||
1051 | + DRM_DEBUG("\n"); | ||
1052 | + i915_file_priv = (struct drm_i915_file_private *) | ||
1053 | + drm_alloc(sizeof(*i915_file_priv), DRM_MEM_FILES); | ||
1054 | + | ||
1055 | + if (!i915_file_priv) | ||
1056 | + return -ENOMEM; | ||
1057 | + | ||
1058 | + file_priv->driver_priv = i915_file_priv; | ||
1059 | + | ||
1060 | + i915_file_priv->mm.last_gem_seqno = 0; | ||
1061 | + i915_file_priv->mm.last_gem_throttle_seqno = 0; | ||
1062 | + | ||
1063 | + return 0; | ||
1064 | +} | ||
1065 | + | ||
1066 | void i915_driver_lastclose(struct drm_device * dev) | ||
1067 | { | ||
1068 | drm_i915_private_t *dev_priv = dev->dev_private; | ||
1069 | @@ -845,6 +878,8 @@ void i915_driver_lastclose(struct drm_device * dev) | ||
1070 | if (!dev_priv) | ||
1071 | return; | ||
1072 | |||
1073 | + i915_gem_lastclose(dev); | ||
1074 | + | ||
1075 | if (dev_priv->agp_heap) | ||
1076 | i915_mem_takedown(&(dev_priv->agp_heap)); | ||
1077 | |||
1078 | @@ -857,6 +892,13 @@ void i915_driver_preclose(struct drm_device * dev, struct drm_file *file_priv) | ||
1079 | i915_mem_release(dev, file_priv, dev_priv->agp_heap); | ||
1080 | } | ||
1081 | |||
1082 | +void i915_driver_postclose(struct drm_device *dev, struct drm_file *file_priv) | ||
1083 | +{ | ||
1084 | + struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; | ||
1085 | + | ||
1086 | + drm_free(i915_file_priv, sizeof(*i915_file_priv), DRM_MEM_FILES); | ||
1087 | +} | ||
1088 | + | ||
1089 | struct drm_ioctl_desc i915_ioctls[] = { | ||
1090 | DRM_IOCTL_DEF(DRM_I915_INIT, i915_dma_init, DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), | ||
1091 | DRM_IOCTL_DEF(DRM_I915_FLUSH, i915_flush_ioctl, DRM_AUTH), | ||
1092 | @@ -875,6 +917,22 @@ struct drm_ioctl_desc i915_ioctls[] = { | ||
1093 | DRM_IOCTL_DEF(DRM_I915_GET_VBLANK_PIPE, i915_vblank_pipe_get, DRM_AUTH ), | ||
1094 | DRM_IOCTL_DEF(DRM_I915_VBLANK_SWAP, i915_vblank_swap, DRM_AUTH), | ||
1095 | DRM_IOCTL_DEF(DRM_I915_HWS_ADDR, i915_set_status_page, DRM_AUTH), | ||
1096 | + DRM_IOCTL_DEF(DRM_I915_GEM_INIT, i915_gem_init_ioctl, DRM_AUTH), | ||
1097 | + DRM_IOCTL_DEF(DRM_I915_GEM_EXECBUFFER, i915_gem_execbuffer, DRM_AUTH), | ||
1098 | + DRM_IOCTL_DEF(DRM_I915_GEM_PIN, i915_gem_pin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | ||
1099 | + DRM_IOCTL_DEF(DRM_I915_GEM_UNPIN, i915_gem_unpin_ioctl, DRM_AUTH|DRM_ROOT_ONLY), | ||
1100 | + DRM_IOCTL_DEF(DRM_I915_GEM_BUSY, i915_gem_busy_ioctl, DRM_AUTH), | ||
1101 | + DRM_IOCTL_DEF(DRM_I915_GEM_THROTTLE, i915_gem_throttle_ioctl, DRM_AUTH), | ||
1102 | + DRM_IOCTL_DEF(DRM_I915_GEM_ENTERVT, i915_gem_entervt_ioctl, DRM_AUTH), | ||
1103 | + DRM_IOCTL_DEF(DRM_I915_GEM_LEAVEVT, i915_gem_leavevt_ioctl, DRM_AUTH), | ||
1104 | + DRM_IOCTL_DEF(DRM_I915_GEM_CREATE, i915_gem_create_ioctl, 0), | ||
1105 | + DRM_IOCTL_DEF(DRM_I915_GEM_PREAD, i915_gem_pread_ioctl, 0), | ||
1106 | + DRM_IOCTL_DEF(DRM_I915_GEM_PWRITE, i915_gem_pwrite_ioctl, 0), | ||
1107 | + DRM_IOCTL_DEF(DRM_I915_GEM_MMAP, i915_gem_mmap_ioctl, 0), | ||
1108 | + DRM_IOCTL_DEF(DRM_I915_GEM_SET_DOMAIN, i915_gem_set_domain_ioctl, 0), | ||
1109 | + DRM_IOCTL_DEF(DRM_I915_GEM_SW_FINISH, i915_gem_sw_finish_ioctl, 0), | ||
1110 | + DRM_IOCTL_DEF(DRM_I915_GEM_SET_TILING, i915_gem_set_tiling, 0), | ||
1111 | + DRM_IOCTL_DEF(DRM_I915_GEM_GET_TILING, i915_gem_get_tiling, 0), | ||
1112 | }; | ||
1113 | |||
1114 | int i915_max_ioctl = DRM_ARRAY_SIZE(i915_ioctls); | ||
1115 | diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c | ||
1116 | index 37af03f..a80ead2 100644 | ||
1117 | --- a/drivers/gpu/drm/i915/i915_drv.c | ||
1118 | +++ b/drivers/gpu/drm/i915/i915_drv.c | ||
1119 | @@ -85,12 +85,15 @@ static struct drm_driver driver = { | ||
1120 | /* don't use mtrr's here, the Xserver or user space app should | ||
1121 | * deal with them for intel hardware. | ||
1122 | */ | ||
1123 | - .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | | ||
1124 | - DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED, | ||
1125 | + .driver_features = | ||
1126 | + DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ | ||
1127 | + DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, | ||
1128 | .load = i915_driver_load, | ||
1129 | .unload = i915_driver_unload, | ||
1130 | + .open = i915_driver_open, | ||
1131 | .lastclose = i915_driver_lastclose, | ||
1132 | .preclose = i915_driver_preclose, | ||
1133 | + .postclose = i915_driver_postclose, | ||
1134 | .suspend = i915_suspend, | ||
1135 | .resume = i915_resume, | ||
1136 | .device_is_agp = i915_driver_device_is_agp, | ||
1137 | @@ -104,6 +107,10 @@ static struct drm_driver driver = { | ||
1138 | .reclaim_buffers = drm_core_reclaim_buffers, | ||
1139 | .get_map_ofs = drm_core_get_map_ofs, | ||
1140 | .get_reg_ofs = drm_core_get_reg_ofs, | ||
1141 | + .proc_init = i915_gem_proc_init, | ||
1142 | + .proc_cleanup = i915_gem_proc_cleanup, | ||
1143 | + .gem_init_object = i915_gem_init_object, | ||
1144 | + .gem_free_object = i915_gem_free_object, | ||
1145 | .ioctls = i915_ioctls, | ||
1146 | .fops = { | ||
1147 | .owner = THIS_MODULE, | ||
1148 | diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h | ||
1149 | index d1a02be..87b071a 100644 | ||
1150 | --- a/drivers/gpu/drm/i915/i915_drv.h | ||
1151 | +++ b/drivers/gpu/drm/i915/i915_drv.h | ||
1152 | @@ -39,7 +39,7 @@ | ||
1153 | |||
1154 | #define DRIVER_NAME "i915" | ||
1155 | #define DRIVER_DESC "Intel Graphics" | ||
1156 | -#define DRIVER_DATE "20060119" | ||
1157 | +#define DRIVER_DATE "20080730" | ||
1158 | |||
1159 | enum pipe { | ||
1160 | PIPE_A = 0, | ||
1161 | @@ -60,16 +60,23 @@ enum pipe { | ||
1162 | #define DRIVER_MINOR 6 | ||
1163 | #define DRIVER_PATCHLEVEL 0 | ||
1164 | |||
1165 | +#define WATCH_COHERENCY 0 | ||
1166 | +#define WATCH_BUF 0 | ||
1167 | +#define WATCH_EXEC 0 | ||
1168 | +#define WATCH_LRU 0 | ||
1169 | +#define WATCH_RELOC 0 | ||
1170 | +#define WATCH_INACTIVE 0 | ||
1171 | +#define WATCH_PWRITE 0 | ||
1172 | + | ||
1173 | typedef struct _drm_i915_ring_buffer { | ||
1174 | int tail_mask; | ||
1175 | - unsigned long Start; | ||
1176 | - unsigned long End; | ||
1177 | unsigned long Size; | ||
1178 | u8 *virtual_start; | ||
1179 | int head; | ||
1180 | int tail; | ||
1181 | int space; | ||
1182 | drm_local_map_t map; | ||
1183 | + struct drm_gem_object *ring_obj; | ||
1184 | } drm_i915_ring_buffer_t; | ||
1185 | |||
1186 | struct mem_block { | ||
1187 | @@ -101,6 +108,8 @@ struct intel_opregion { | ||
1188 | }; | ||
1189 | |||
1190 | typedef struct drm_i915_private { | ||
1191 | + struct drm_device *dev; | ||
1192 | + | ||
1193 | drm_local_map_t *sarea; | ||
1194 | drm_local_map_t *mmio_map; | ||
1195 | |||
1196 | @@ -113,6 +122,7 @@ typedef struct drm_i915_private { | ||
1197 | uint32_t counter; | ||
1198 | unsigned int status_gfx_addr; | ||
1199 | drm_local_map_t hws_map; | ||
1200 | + struct drm_gem_object *hws_obj; | ||
1201 | |||
1202 | unsigned int cpp; | ||
1203 | int back_offset; | ||
1204 | @@ -122,7 +132,6 @@ typedef struct drm_i915_private { | ||
1205 | |||
1206 | wait_queue_head_t irq_queue; | ||
1207 | atomic_t irq_received; | ||
1208 | - atomic_t irq_emitted; | ||
1209 | /** Protects user_irq_refcount and irq_mask_reg */ | ||
1210 | spinlock_t user_irq_lock; | ||
1211 | /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */ | ||
1212 | @@ -230,8 +239,174 @@ typedef struct drm_i915_private { | ||
1213 | u8 saveDACMASK; | ||
1214 | u8 saveDACDATA[256*3]; /* 256 3-byte colors */ | ||
1215 | u8 saveCR[37]; | ||
1216 | + | ||
1217 | + struct { | ||
1218 | + struct drm_mm gtt_space; | ||
1219 | + | ||
1220 | + /** | ||
1221 | + * List of objects currently involved in rendering from the | ||
1222 | + * ringbuffer. | ||
1223 | + * | ||
1224 | + * A reference is held on the buffer while on this list. | ||
1225 | + */ | ||
1226 | + struct list_head active_list; | ||
1227 | + | ||
1228 | + /** | ||
1229 | + * List of objects which are not in the ringbuffer but which | ||
1230 | + * still have a write_domain which needs to be flushed before | ||
1231 | + * unbinding. | ||
1232 | + * | ||
1233 | + * A reference is held on the buffer while on this list. | ||
1234 | + */ | ||
1235 | + struct list_head flushing_list; | ||
1236 | + | ||
1237 | + /** | ||
1238 | + * LRU list of objects which are not in the ringbuffer and | ||
1239 | + * are ready to unbind, but are still in the GTT. | ||
1240 | + * | ||
1241 | + * A reference is not held on the buffer while on this list, | ||
1242 | + * as merely being GTT-bound shouldn't prevent its being | ||
1243 | + * freed, and we'll pull it off the list in the free path. | ||
1244 | + */ | ||
1245 | + struct list_head inactive_list; | ||
1246 | + | ||
1247 | + /** | ||
1248 | + * List of breadcrumbs associated with GPU requests currently | ||
1249 | + * outstanding. | ||
1250 | + */ | ||
1251 | + struct list_head request_list; | ||
1252 | + | ||
1253 | + /** | ||
1254 | + * We leave the user IRQ off as much as possible, | ||
1255 | + * but this means that requests will finish and never | ||
1256 | + * be retired once the system goes idle. Set a timer to | ||
1257 | + * fire periodically while the ring is running. When it | ||
1258 | + * fires, go retire requests. | ||
1259 | + */ | ||
1260 | + struct delayed_work retire_work; | ||
1261 | + | ||
1262 | + uint32_t next_gem_seqno; | ||
1263 | + | ||
1264 | + /** | ||
1265 | + * Waiting sequence number, if any | ||
1266 | + */ | ||
1267 | + uint32_t waiting_gem_seqno; | ||
1268 | + | ||
1269 | + /** | ||
1270 | + * Last seq seen at irq time | ||
1271 | + */ | ||
1272 | + uint32_t irq_gem_seqno; | ||
1273 | + | ||
1274 | + /** | ||
1275 | + * Flag if the X Server, and thus DRM, is not currently in | ||
1276 | + * control of the device. | ||
1277 | + * | ||
1278 | + * This is set between LeaveVT and EnterVT. It needs to be | ||
1279 | + * replaced with a semaphore. It also needs to be | ||
1280 | + * transitioned away from for kernel modesetting. | ||
1281 | + */ | ||
1282 | + int suspended; | ||
1283 | + | ||
1284 | + /** | ||
1285 | + * Flag if the hardware appears to be wedged. | ||
1286 | + * | ||
1287 | + * This is set when attempts to idle the device timeout. | ||
1288 | + * It prevents command submission from occuring and makes | ||
1289 | + * every pending request fail | ||
1290 | + */ | ||
1291 | + int wedged; | ||
1292 | + | ||
1293 | + /** Bit 6 swizzling required for X tiling */ | ||
1294 | + uint32_t bit_6_swizzle_x; | ||
1295 | + /** Bit 6 swizzling required for Y tiling */ | ||
1296 | + uint32_t bit_6_swizzle_y; | ||
1297 | + } mm; | ||
1298 | } drm_i915_private_t; | ||
1299 | |||
1300 | +/** driver private structure attached to each drm_gem_object */ | ||
1301 | +struct drm_i915_gem_object { | ||
1302 | + struct drm_gem_object *obj; | ||
1303 | + | ||
1304 | + /** Current space allocated to this object in the GTT, if any. */ | ||
1305 | + struct drm_mm_node *gtt_space; | ||
1306 | + | ||
1307 | + /** This object's place on the active/flushing/inactive lists */ | ||
1308 | + struct list_head list; | ||
1309 | + | ||
1310 | + /** | ||
1311 | + * This is set if the object is on the active or flushing lists | ||
1312 | + * (has pending rendering), and is not set if it's on inactive (ready | ||
1313 | + * to be unbound). | ||
1314 | + */ | ||
1315 | + int active; | ||
1316 | + | ||
1317 | + /** | ||
1318 | + * This is set if the object has been written to since last bound | ||
1319 | + * to the GTT | ||
1320 | + */ | ||
1321 | + int dirty; | ||
1322 | + | ||
1323 | + /** AGP memory structure for our GTT binding. */ | ||
1324 | + DRM_AGP_MEM *agp_mem; | ||
1325 | + | ||
1326 | + struct page **page_list; | ||
1327 | + | ||
1328 | + /** | ||
1329 | + * Current offset of the object in GTT space. | ||
1330 | + * | ||
1331 | + * This is the same as gtt_space->start | ||
1332 | + */ | ||
1333 | + uint32_t gtt_offset; | ||
1334 | + | ||
1335 | + /** Boolean whether this object has a valid gtt offset. */ | ||
1336 | + int gtt_bound; | ||
1337 | + | ||
1338 | + /** How many users have pinned this object in GTT space */ | ||
1339 | + int pin_count; | ||
1340 | + | ||
1341 | + /** Breadcrumb of last rendering to the buffer. */ | ||
1342 | + uint32_t last_rendering_seqno; | ||
1343 | + | ||
1344 | + /** Current tiling mode for the object. */ | ||
1345 | + uint32_t tiling_mode; | ||
1346 | + | ||
1347 | + /** | ||
1348 | + * Flagging of which individual pages are valid in GEM_DOMAIN_CPU when | ||
1349 | + * GEM_DOMAIN_CPU is not in the object's read domain. | ||
1350 | + */ | ||
1351 | + uint8_t *page_cpu_valid; | ||
1352 | +}; | ||
1353 | + | ||
1354 | +/** | ||
1355 | + * Request queue structure. | ||
1356 | + * | ||
1357 | + * The request queue allows us to note sequence numbers that have been emitted | ||
1358 | + * and may be associated with active buffers to be retired. | ||
1359 | + * | ||
1360 | + * By keeping this list, we can avoid having to do questionable | ||
1361 | + * sequence-number comparisons on buffer last_rendering_seqnos, and associate | ||
1362 | + * an emission time with seqnos for tracking how far ahead of the GPU we are. | ||
1363 | + */ | ||
1364 | +struct drm_i915_gem_request { | ||
1365 | + /** GEM sequence number associated with this request. */ | ||
1366 | + uint32_t seqno; | ||
1367 | + | ||
1368 | + /** Time at which this request was emitted, in jiffies. */ | ||
1369 | + unsigned long emitted_jiffies; | ||
1370 | + | ||
1371 | + /** Cache domains that were flushed at the start of the request. */ | ||
1372 | + uint32_t flush_domains; | ||
1373 | + | ||
1374 | + struct list_head list; | ||
1375 | +}; | ||
1376 | + | ||
1377 | +struct drm_i915_file_private { | ||
1378 | + struct { | ||
1379 | + uint32_t last_gem_seqno; | ||
1380 | + uint32_t last_gem_throttle_seqno; | ||
1381 | + } mm; | ||
1382 | +}; | ||
1383 | + | ||
1384 | extern struct drm_ioctl_desc i915_ioctls[]; | ||
1385 | extern int i915_max_ioctl; | ||
1386 | |||
1387 | @@ -239,18 +414,26 @@ extern int i915_max_ioctl; | ||
1388 | extern void i915_kernel_lost_context(struct drm_device * dev); | ||
1389 | extern int i915_driver_load(struct drm_device *, unsigned long flags); | ||
1390 | extern int i915_driver_unload(struct drm_device *); | ||
1391 | +extern int i915_driver_open(struct drm_device *dev, struct drm_file *file_priv); | ||
1392 | extern void i915_driver_lastclose(struct drm_device * dev); | ||
1393 | extern void i915_driver_preclose(struct drm_device *dev, | ||
1394 | struct drm_file *file_priv); | ||
1395 | +extern void i915_driver_postclose(struct drm_device *dev, | ||
1396 | + struct drm_file *file_priv); | ||
1397 | extern int i915_driver_device_is_agp(struct drm_device * dev); | ||
1398 | extern long i915_compat_ioctl(struct file *filp, unsigned int cmd, | ||
1399 | unsigned long arg); | ||
1400 | +extern int i915_emit_box(struct drm_device *dev, | ||
1401 | + struct drm_clip_rect __user *boxes, | ||
1402 | + int i, int DR1, int DR4); | ||
1403 | |||
1404 | /* i915_irq.c */ | ||
1405 | extern int i915_irq_emit(struct drm_device *dev, void *data, | ||
1406 | struct drm_file *file_priv); | ||
1407 | extern int i915_irq_wait(struct drm_device *dev, void *data, | ||
1408 | struct drm_file *file_priv); | ||
1409 | +void i915_user_irq_get(struct drm_device *dev); | ||
1410 | +void i915_user_irq_put(struct drm_device *dev); | ||
1411 | |||
1412 | extern irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS); | ||
1413 | extern void i915_driver_irq_preinstall(struct drm_device * dev); | ||
1414 | @@ -279,6 +462,67 @@ extern int i915_mem_destroy_heap(struct drm_device *dev, void *data, | ||
1415 | extern void i915_mem_takedown(struct mem_block **heap); | ||
1416 | extern void i915_mem_release(struct drm_device * dev, | ||
1417 | struct drm_file *file_priv, struct mem_block *heap); | ||
1418 | +/* i915_gem.c */ | ||
1419 | +int i915_gem_init_ioctl(struct drm_device *dev, void *data, | ||
1420 | + struct drm_file *file_priv); | ||
1421 | +int i915_gem_create_ioctl(struct drm_device *dev, void *data, | ||
1422 | + struct drm_file *file_priv); | ||
1423 | +int i915_gem_pread_ioctl(struct drm_device *dev, void *data, | ||
1424 | + struct drm_file *file_priv); | ||
1425 | +int i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | ||
1426 | + struct drm_file *file_priv); | ||
1427 | +int i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | ||
1428 | + struct drm_file *file_priv); | ||
1429 | +int i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | ||
1430 | + struct drm_file *file_priv); | ||
1431 | +int i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | ||
1432 | + struct drm_file *file_priv); | ||
1433 | +int i915_gem_execbuffer(struct drm_device *dev, void *data, | ||
1434 | + struct drm_file *file_priv); | ||
1435 | +int i915_gem_pin_ioctl(struct drm_device *dev, void *data, | ||
1436 | + struct drm_file *file_priv); | ||
1437 | +int i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | ||
1438 | + struct drm_file *file_priv); | ||
1439 | +int i915_gem_busy_ioctl(struct drm_device *dev, void *data, | ||
1440 | + struct drm_file *file_priv); | ||
1441 | +int i915_gem_throttle_ioctl(struct drm_device *dev, void *data, | ||
1442 | + struct drm_file *file_priv); | ||
1443 | +int i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | ||
1444 | + struct drm_file *file_priv); | ||
1445 | +int i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, | ||
1446 | + struct drm_file *file_priv); | ||
1447 | +int i915_gem_set_tiling(struct drm_device *dev, void *data, | ||
1448 | + struct drm_file *file_priv); | ||
1449 | +int i915_gem_get_tiling(struct drm_device *dev, void *data, | ||
1450 | + struct drm_file *file_priv); | ||
1451 | +void i915_gem_load(struct drm_device *dev); | ||
1452 | +int i915_gem_proc_init(struct drm_minor *minor); | ||
1453 | +void i915_gem_proc_cleanup(struct drm_minor *minor); | ||
1454 | +int i915_gem_init_object(struct drm_gem_object *obj); | ||
1455 | +void i915_gem_free_object(struct drm_gem_object *obj); | ||
1456 | +int i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment); | ||
1457 | +void i915_gem_object_unpin(struct drm_gem_object *obj); | ||
1458 | +void i915_gem_lastclose(struct drm_device *dev); | ||
1459 | +uint32_t i915_get_gem_seqno(struct drm_device *dev); | ||
1460 | +void i915_gem_retire_requests(struct drm_device *dev); | ||
1461 | +void i915_gem_retire_work_handler(struct work_struct *work); | ||
1462 | +void i915_gem_clflush_object(struct drm_gem_object *obj); | ||
1463 | + | ||
1464 | +/* i915_gem_tiling.c */ | ||
1465 | +void i915_gem_detect_bit_6_swizzle(struct drm_device *dev); | ||
1466 | + | ||
1467 | +/* i915_gem_debug.c */ | ||
1468 | +void i915_gem_dump_object(struct drm_gem_object *obj, int len, | ||
1469 | + const char *where, uint32_t mark); | ||
1470 | +#if WATCH_INACTIVE | ||
1471 | +void i915_verify_inactive(struct drm_device *dev, char *file, int line); | ||
1472 | +#else | ||
1473 | +#define i915_verify_inactive(dev, file, line) | ||
1474 | +#endif | ||
1475 | +void i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle); | ||
1476 | +void i915_gem_dump_object(struct drm_gem_object *obj, int len, | ||
1477 | + const char *where, uint32_t mark); | ||
1478 | +void i915_dump_lru(struct drm_device *dev, const char *where); | ||
1479 | |||
1480 | /* i915_suspend.c */ | ||
1481 | extern int i915_save_state(struct drm_device *dev); | ||
1482 | @@ -347,6 +591,7 @@ extern void opregion_enable_asle(struct drm_device *dev); | ||
1483 | */ | ||
1484 | #define READ_HWSP(dev_priv, reg) (((volatile u32*)(dev_priv->hw_status_page))[reg]) | ||
1485 | #define READ_BREADCRUMB(dev_priv) READ_HWSP(dev_priv, 5) | ||
1486 | +#define I915_GEM_HWS_INDEX 0x10 | ||
1487 | |||
1488 | extern int i915_wait_ring(struct drm_device * dev, int n, const char *caller); | ||
1489 | |||
1490 | diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c | ||
1491 | new file mode 100644 | ||
1492 | index 0000000..90ae8a0 | ||
1493 | --- /dev/null | ||
1494 | +++ b/drivers/gpu/drm/i915/i915_gem.c | ||
1495 | @@ -0,0 +1,2497 @@ | ||
1496 | +/* | ||
1497 | + * Copyright © 2008 Intel Corporation | ||
1498 | + * | ||
1499 | + * Permission is hereby granted, free of charge, to any person obtaining a | ||
1500 | + * copy of this software and associated documentation files (the "Software"), | ||
1501 | + * to deal in the Software without restriction, including without limitation | ||
1502 | + * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
1503 | + * and/or sell copies of the Software, and to permit persons to whom the | ||
1504 | + * Software is furnished to do so, subject to the following conditions: | ||
1505 | + * | ||
1506 | + * The above copyright notice and this permission notice (including the next | ||
1507 | + * paragraph) shall be included in all copies or substantial portions of the | ||
1508 | + * Software. | ||
1509 | + * | ||
1510 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
1511 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
1512 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
1513 | + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
1514 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
1515 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
1516 | + * IN THE SOFTWARE. | ||
1517 | + * | ||
1518 | + * Authors: | ||
1519 | + * Eric Anholt <eric@anholt.net> | ||
1520 | + * | ||
1521 | + */ | ||
1522 | + | ||
1523 | +#include "drmP.h" | ||
1524 | +#include "drm.h" | ||
1525 | +#include "i915_drm.h" | ||
1526 | +#include "i915_drv.h" | ||
1527 | +#include <linux/swap.h> | ||
1528 | + | ||
1529 | +static int | ||
1530 | +i915_gem_object_set_domain(struct drm_gem_object *obj, | ||
1531 | + uint32_t read_domains, | ||
1532 | + uint32_t write_domain); | ||
1533 | +static int | ||
1534 | +i915_gem_object_set_domain_range(struct drm_gem_object *obj, | ||
1535 | + uint64_t offset, | ||
1536 | + uint64_t size, | ||
1537 | + uint32_t read_domains, | ||
1538 | + uint32_t write_domain); | ||
1539 | +static int | ||
1540 | +i915_gem_set_domain(struct drm_gem_object *obj, | ||
1541 | + struct drm_file *file_priv, | ||
1542 | + uint32_t read_domains, | ||
1543 | + uint32_t write_domain); | ||
1544 | +static int i915_gem_object_get_page_list(struct drm_gem_object *obj); | ||
1545 | +static void i915_gem_object_free_page_list(struct drm_gem_object *obj); | ||
1546 | +static int i915_gem_object_wait_rendering(struct drm_gem_object *obj); | ||
1547 | + | ||
1548 | +int | ||
1549 | +i915_gem_init_ioctl(struct drm_device *dev, void *data, | ||
1550 | + struct drm_file *file_priv) | ||
1551 | +{ | ||
1552 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
1553 | + struct drm_i915_gem_init *args = data; | ||
1554 | + | ||
1555 | + mutex_lock(&dev->struct_mutex); | ||
1556 | + | ||
1557 | + if (args->gtt_start >= args->gtt_end || | ||
1558 | + (args->gtt_start & (PAGE_SIZE - 1)) != 0 || | ||
1559 | + (args->gtt_end & (PAGE_SIZE - 1)) != 0) { | ||
1560 | + mutex_unlock(&dev->struct_mutex); | ||
1561 | + return -EINVAL; | ||
1562 | + } | ||
1563 | + | ||
1564 | + drm_mm_init(&dev_priv->mm.gtt_space, args->gtt_start, | ||
1565 | + args->gtt_end - args->gtt_start); | ||
1566 | + | ||
1567 | + dev->gtt_total = (uint32_t) (args->gtt_end - args->gtt_start); | ||
1568 | + | ||
1569 | + mutex_unlock(&dev->struct_mutex); | ||
1570 | + | ||
1571 | + return 0; | ||
1572 | +} | ||
1573 | + | ||
1574 | + | ||
1575 | +/** | ||
1576 | + * Creates a new mm object and returns a handle to it. | ||
1577 | + */ | ||
1578 | +int | ||
1579 | +i915_gem_create_ioctl(struct drm_device *dev, void *data, | ||
1580 | + struct drm_file *file_priv) | ||
1581 | +{ | ||
1582 | + struct drm_i915_gem_create *args = data; | ||
1583 | + struct drm_gem_object *obj; | ||
1584 | + int handle, ret; | ||
1585 | + | ||
1586 | + args->size = roundup(args->size, PAGE_SIZE); | ||
1587 | + | ||
1588 | + /* Allocate the new object */ | ||
1589 | + obj = drm_gem_object_alloc(dev, args->size); | ||
1590 | + if (obj == NULL) | ||
1591 | + return -ENOMEM; | ||
1592 | + | ||
1593 | + ret = drm_gem_handle_create(file_priv, obj, &handle); | ||
1594 | + mutex_lock(&dev->struct_mutex); | ||
1595 | + drm_gem_object_handle_unreference(obj); | ||
1596 | + mutex_unlock(&dev->struct_mutex); | ||
1597 | + | ||
1598 | + if (ret) | ||
1599 | + return ret; | ||
1600 | + | ||
1601 | + args->handle = handle; | ||
1602 | + | ||
1603 | + return 0; | ||
1604 | +} | ||
1605 | + | ||
1606 | +/** | ||
1607 | + * Reads data from the object referenced by handle. | ||
1608 | + * | ||
1609 | + * On error, the contents of *data are undefined. | ||
1610 | + */ | ||
1611 | +int | ||
1612 | +i915_gem_pread_ioctl(struct drm_device *dev, void *data, | ||
1613 | + struct drm_file *file_priv) | ||
1614 | +{ | ||
1615 | + struct drm_i915_gem_pread *args = data; | ||
1616 | + struct drm_gem_object *obj; | ||
1617 | + struct drm_i915_gem_object *obj_priv; | ||
1618 | + ssize_t read; | ||
1619 | + loff_t offset; | ||
1620 | + int ret; | ||
1621 | + | ||
1622 | + obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
1623 | + if (obj == NULL) | ||
1624 | + return -EBADF; | ||
1625 | + obj_priv = obj->driver_private; | ||
1626 | + | ||
1627 | + /* Bounds check source. | ||
1628 | + * | ||
1629 | + * XXX: This could use review for overflow issues... | ||
1630 | + */ | ||
1631 | + if (args->offset > obj->size || args->size > obj->size || | ||
1632 | + args->offset + args->size > obj->size) { | ||
1633 | + drm_gem_object_unreference(obj); | ||
1634 | + return -EINVAL; | ||
1635 | + } | ||
1636 | + | ||
1637 | + mutex_lock(&dev->struct_mutex); | ||
1638 | + | ||
1639 | + ret = i915_gem_object_set_domain_range(obj, args->offset, args->size, | ||
1640 | + I915_GEM_DOMAIN_CPU, 0); | ||
1641 | + if (ret != 0) { | ||
1642 | + drm_gem_object_unreference(obj); | ||
1643 | + mutex_unlock(&dev->struct_mutex); | ||
1644 | + } | ||
1645 | + | ||
1646 | + offset = args->offset; | ||
1647 | + | ||
1648 | + read = vfs_read(obj->filp, (char __user *)(uintptr_t)args->data_ptr, | ||
1649 | + args->size, &offset); | ||
1650 | + if (read != args->size) { | ||
1651 | + drm_gem_object_unreference(obj); | ||
1652 | + mutex_unlock(&dev->struct_mutex); | ||
1653 | + if (read < 0) | ||
1654 | + return read; | ||
1655 | + else | ||
1656 | + return -EINVAL; | ||
1657 | + } | ||
1658 | + | ||
1659 | + drm_gem_object_unreference(obj); | ||
1660 | + mutex_unlock(&dev->struct_mutex); | ||
1661 | + | ||
1662 | + return 0; | ||
1663 | +} | ||
1664 | + | ||
1665 | +static int | ||
1666 | +i915_gem_gtt_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | ||
1667 | + struct drm_i915_gem_pwrite *args, | ||
1668 | + struct drm_file *file_priv) | ||
1669 | +{ | ||
1670 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1671 | + ssize_t remain; | ||
1672 | + loff_t offset; | ||
1673 | + char __user *user_data; | ||
1674 | + char *vaddr; | ||
1675 | + int i, o, l; | ||
1676 | + int ret = 0; | ||
1677 | + unsigned long pfn; | ||
1678 | + unsigned long unwritten; | ||
1679 | + | ||
1680 | + user_data = (char __user *) (uintptr_t) args->data_ptr; | ||
1681 | + remain = args->size; | ||
1682 | + if (!access_ok(VERIFY_READ, user_data, remain)) | ||
1683 | + return -EFAULT; | ||
1684 | + | ||
1685 | + | ||
1686 | + mutex_lock(&dev->struct_mutex); | ||
1687 | + ret = i915_gem_object_pin(obj, 0); | ||
1688 | + if (ret) { | ||
1689 | + mutex_unlock(&dev->struct_mutex); | ||
1690 | + return ret; | ||
1691 | + } | ||
1692 | + ret = i915_gem_set_domain(obj, file_priv, | ||
1693 | + I915_GEM_DOMAIN_GTT, I915_GEM_DOMAIN_GTT); | ||
1694 | + if (ret) | ||
1695 | + goto fail; | ||
1696 | + | ||
1697 | + obj_priv = obj->driver_private; | ||
1698 | + offset = obj_priv->gtt_offset + args->offset; | ||
1699 | + obj_priv->dirty = 1; | ||
1700 | + | ||
1701 | + while (remain > 0) { | ||
1702 | + /* Operation in this page | ||
1703 | + * | ||
1704 | + * i = page number | ||
1705 | + * o = offset within page | ||
1706 | + * l = bytes to copy | ||
1707 | + */ | ||
1708 | + i = offset >> PAGE_SHIFT; | ||
1709 | + o = offset & (PAGE_SIZE-1); | ||
1710 | + l = remain; | ||
1711 | + if ((o + l) > PAGE_SIZE) | ||
1712 | + l = PAGE_SIZE - o; | ||
1713 | + | ||
1714 | + pfn = (dev->agp->base >> PAGE_SHIFT) + i; | ||
1715 | + | ||
1716 | +#ifdef CONFIG_HIGHMEM | ||
1717 | + /* kmap_atomic can't map IO pages on non-HIGHMEM kernels | ||
1718 | + */ | ||
1719 | + vaddr = kmap_atomic_pfn(pfn, KM_USER0); | ||
1720 | +#if WATCH_PWRITE | ||
1721 | + DRM_INFO("pwrite i %d o %d l %d pfn %ld vaddr %p\n", | ||
1722 | + i, o, l, pfn, vaddr); | ||
1723 | +#endif | ||
1724 | + unwritten = __copy_from_user_inatomic_nocache(vaddr + o, | ||
1725 | + user_data, l); | ||
1726 | + kunmap_atomic(vaddr, KM_USER0); | ||
1727 | + | ||
1728 | + if (unwritten) | ||
1729 | +#endif /* CONFIG_HIGHMEM */ | ||
1730 | + { | ||
1731 | + vaddr = ioremap(pfn << PAGE_SHIFT, PAGE_SIZE); | ||
1732 | +#if WATCH_PWRITE | ||
1733 | + DRM_INFO("pwrite slow i %d o %d l %d " | ||
1734 | + "pfn %ld vaddr %p\n", | ||
1735 | + i, o, l, pfn, vaddr); | ||
1736 | +#endif | ||
1737 | + if (vaddr == NULL) { | ||
1738 | + ret = -EFAULT; | ||
1739 | + goto fail; | ||
1740 | + } | ||
1741 | + unwritten = __copy_from_user(vaddr + o, user_data, l); | ||
1742 | +#if WATCH_PWRITE | ||
1743 | + DRM_INFO("unwritten %ld\n", unwritten); | ||
1744 | +#endif | ||
1745 | + iounmap(vaddr); | ||
1746 | + if (unwritten) { | ||
1747 | + ret = -EFAULT; | ||
1748 | + goto fail; | ||
1749 | + } | ||
1750 | + } | ||
1751 | + | ||
1752 | + remain -= l; | ||
1753 | + user_data += l; | ||
1754 | + offset += l; | ||
1755 | + } | ||
1756 | +#if WATCH_PWRITE && 1 | ||
1757 | + i915_gem_clflush_object(obj); | ||
1758 | + i915_gem_dump_object(obj, args->offset + args->size, __func__, ~0); | ||
1759 | + i915_gem_clflush_object(obj); | ||
1760 | +#endif | ||
1761 | + | ||
1762 | +fail: | ||
1763 | + i915_gem_object_unpin(obj); | ||
1764 | + mutex_unlock(&dev->struct_mutex); | ||
1765 | + | ||
1766 | + return ret; | ||
1767 | +} | ||
1768 | + | ||
1769 | +int | ||
1770 | +i915_gem_shmem_pwrite(struct drm_device *dev, struct drm_gem_object *obj, | ||
1771 | + struct drm_i915_gem_pwrite *args, | ||
1772 | + struct drm_file *file_priv) | ||
1773 | +{ | ||
1774 | + int ret; | ||
1775 | + loff_t offset; | ||
1776 | + ssize_t written; | ||
1777 | + | ||
1778 | + mutex_lock(&dev->struct_mutex); | ||
1779 | + | ||
1780 | + ret = i915_gem_set_domain(obj, file_priv, | ||
1781 | + I915_GEM_DOMAIN_CPU, I915_GEM_DOMAIN_CPU); | ||
1782 | + if (ret) { | ||
1783 | + mutex_unlock(&dev->struct_mutex); | ||
1784 | + return ret; | ||
1785 | + } | ||
1786 | + | ||
1787 | + offset = args->offset; | ||
1788 | + | ||
1789 | + written = vfs_write(obj->filp, | ||
1790 | + (char __user *)(uintptr_t) args->data_ptr, | ||
1791 | + args->size, &offset); | ||
1792 | + if (written != args->size) { | ||
1793 | + mutex_unlock(&dev->struct_mutex); | ||
1794 | + if (written < 0) | ||
1795 | + return written; | ||
1796 | + else | ||
1797 | + return -EINVAL; | ||
1798 | + } | ||
1799 | + | ||
1800 | + mutex_unlock(&dev->struct_mutex); | ||
1801 | + | ||
1802 | + return 0; | ||
1803 | +} | ||
1804 | + | ||
1805 | +/** | ||
1806 | + * Writes data to the object referenced by handle. | ||
1807 | + * | ||
1808 | + * On error, the contents of the buffer that were to be modified are undefined. | ||
1809 | + */ | ||
1810 | +int | ||
1811 | +i915_gem_pwrite_ioctl(struct drm_device *dev, void *data, | ||
1812 | + struct drm_file *file_priv) | ||
1813 | +{ | ||
1814 | + struct drm_i915_gem_pwrite *args = data; | ||
1815 | + struct drm_gem_object *obj; | ||
1816 | + struct drm_i915_gem_object *obj_priv; | ||
1817 | + int ret = 0; | ||
1818 | + | ||
1819 | + obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
1820 | + if (obj == NULL) | ||
1821 | + return -EBADF; | ||
1822 | + obj_priv = obj->driver_private; | ||
1823 | + | ||
1824 | + /* Bounds check destination. | ||
1825 | + * | ||
1826 | + * XXX: This could use review for overflow issues... | ||
1827 | + */ | ||
1828 | + if (args->offset > obj->size || args->size > obj->size || | ||
1829 | + args->offset + args->size > obj->size) { | ||
1830 | + drm_gem_object_unreference(obj); | ||
1831 | + return -EINVAL; | ||
1832 | + } | ||
1833 | + | ||
1834 | + /* We can only do the GTT pwrite on untiled buffers, as otherwise | ||
1835 | + * it would end up going through the fenced access, and we'll get | ||
1836 | + * different detiling behavior between reading and writing. | ||
1837 | + * pread/pwrite currently are reading and writing from the CPU | ||
1838 | + * perspective, requiring manual detiling by the client. | ||
1839 | + */ | ||
1840 | + if (obj_priv->tiling_mode == I915_TILING_NONE && | ||
1841 | + dev->gtt_total != 0) | ||
1842 | + ret = i915_gem_gtt_pwrite(dev, obj, args, file_priv); | ||
1843 | + else | ||
1844 | + ret = i915_gem_shmem_pwrite(dev, obj, args, file_priv); | ||
1845 | + | ||
1846 | +#if WATCH_PWRITE | ||
1847 | + if (ret) | ||
1848 | + DRM_INFO("pwrite failed %d\n", ret); | ||
1849 | +#endif | ||
1850 | + | ||
1851 | + drm_gem_object_unreference(obj); | ||
1852 | + | ||
1853 | + return ret; | ||
1854 | +} | ||
1855 | + | ||
1856 | +/** | ||
1857 | + * Called when user space prepares to use an object | ||
1858 | + */ | ||
1859 | +int | ||
1860 | +i915_gem_set_domain_ioctl(struct drm_device *dev, void *data, | ||
1861 | + struct drm_file *file_priv) | ||
1862 | +{ | ||
1863 | + struct drm_i915_gem_set_domain *args = data; | ||
1864 | + struct drm_gem_object *obj; | ||
1865 | + int ret; | ||
1866 | + | ||
1867 | + if (!(dev->driver->driver_features & DRIVER_GEM)) | ||
1868 | + return -ENODEV; | ||
1869 | + | ||
1870 | + obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
1871 | + if (obj == NULL) | ||
1872 | + return -EBADF; | ||
1873 | + | ||
1874 | + mutex_lock(&dev->struct_mutex); | ||
1875 | +#if WATCH_BUF | ||
1876 | + DRM_INFO("set_domain_ioctl %p(%d), %08x %08x\n", | ||
1877 | + obj, obj->size, args->read_domains, args->write_domain); | ||
1878 | +#endif | ||
1879 | + ret = i915_gem_set_domain(obj, file_priv, | ||
1880 | + args->read_domains, args->write_domain); | ||
1881 | + drm_gem_object_unreference(obj); | ||
1882 | + mutex_unlock(&dev->struct_mutex); | ||
1883 | + return ret; | ||
1884 | +} | ||
1885 | + | ||
1886 | +/** | ||
1887 | + * Called when user space has done writes to this buffer | ||
1888 | + */ | ||
1889 | +int | ||
1890 | +i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data, | ||
1891 | + struct drm_file *file_priv) | ||
1892 | +{ | ||
1893 | + struct drm_i915_gem_sw_finish *args = data; | ||
1894 | + struct drm_gem_object *obj; | ||
1895 | + struct drm_i915_gem_object *obj_priv; | ||
1896 | + int ret = 0; | ||
1897 | + | ||
1898 | + if (!(dev->driver->driver_features & DRIVER_GEM)) | ||
1899 | + return -ENODEV; | ||
1900 | + | ||
1901 | + mutex_lock(&dev->struct_mutex); | ||
1902 | + obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
1903 | + if (obj == NULL) { | ||
1904 | + mutex_unlock(&dev->struct_mutex); | ||
1905 | + return -EBADF; | ||
1906 | + } | ||
1907 | + | ||
1908 | +#if WATCH_BUF | ||
1909 | + DRM_INFO("%s: sw_finish %d (%p %d)\n", | ||
1910 | + __func__, args->handle, obj, obj->size); | ||
1911 | +#endif | ||
1912 | + obj_priv = obj->driver_private; | ||
1913 | + | ||
1914 | + /* Pinned buffers may be scanout, so flush the cache */ | ||
1915 | + if ((obj->write_domain & I915_GEM_DOMAIN_CPU) && obj_priv->pin_count) { | ||
1916 | + i915_gem_clflush_object(obj); | ||
1917 | + drm_agp_chipset_flush(dev); | ||
1918 | + } | ||
1919 | + drm_gem_object_unreference(obj); | ||
1920 | + mutex_unlock(&dev->struct_mutex); | ||
1921 | + return ret; | ||
1922 | +} | ||
1923 | + | ||
1924 | +/** | ||
1925 | + * Maps the contents of an object, returning the address it is mapped | ||
1926 | + * into. | ||
1927 | + * | ||
1928 | + * While the mapping holds a reference on the contents of the object, it doesn't | ||
1929 | + * imply a ref on the object itself. | ||
1930 | + */ | ||
1931 | +int | ||
1932 | +i915_gem_mmap_ioctl(struct drm_device *dev, void *data, | ||
1933 | + struct drm_file *file_priv) | ||
1934 | +{ | ||
1935 | + struct drm_i915_gem_mmap *args = data; | ||
1936 | + struct drm_gem_object *obj; | ||
1937 | + loff_t offset; | ||
1938 | + unsigned long addr; | ||
1939 | + | ||
1940 | + if (!(dev->driver->driver_features & DRIVER_GEM)) | ||
1941 | + return -ENODEV; | ||
1942 | + | ||
1943 | + obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
1944 | + if (obj == NULL) | ||
1945 | + return -EBADF; | ||
1946 | + | ||
1947 | + offset = args->offset; | ||
1948 | + | ||
1949 | + down_write(¤t->mm->mmap_sem); | ||
1950 | + addr = do_mmap(obj->filp, 0, args->size, | ||
1951 | + PROT_READ | PROT_WRITE, MAP_SHARED, | ||
1952 | + args->offset); | ||
1953 | + up_write(¤t->mm->mmap_sem); | ||
1954 | + mutex_lock(&dev->struct_mutex); | ||
1955 | + drm_gem_object_unreference(obj); | ||
1956 | + mutex_unlock(&dev->struct_mutex); | ||
1957 | + if (IS_ERR((void *)addr)) | ||
1958 | + return addr; | ||
1959 | + | ||
1960 | + args->addr_ptr = (uint64_t) addr; | ||
1961 | + | ||
1962 | + return 0; | ||
1963 | +} | ||
1964 | + | ||
1965 | +static void | ||
1966 | +i915_gem_object_free_page_list(struct drm_gem_object *obj) | ||
1967 | +{ | ||
1968 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1969 | + int page_count = obj->size / PAGE_SIZE; | ||
1970 | + int i; | ||
1971 | + | ||
1972 | + if (obj_priv->page_list == NULL) | ||
1973 | + return; | ||
1974 | + | ||
1975 | + | ||
1976 | + for (i = 0; i < page_count; i++) | ||
1977 | + if (obj_priv->page_list[i] != NULL) { | ||
1978 | + if (obj_priv->dirty) | ||
1979 | + set_page_dirty(obj_priv->page_list[i]); | ||
1980 | + mark_page_accessed(obj_priv->page_list[i]); | ||
1981 | + page_cache_release(obj_priv->page_list[i]); | ||
1982 | + } | ||
1983 | + obj_priv->dirty = 0; | ||
1984 | + | ||
1985 | + drm_free(obj_priv->page_list, | ||
1986 | + page_count * sizeof(struct page *), | ||
1987 | + DRM_MEM_DRIVER); | ||
1988 | + obj_priv->page_list = NULL; | ||
1989 | +} | ||
1990 | + | ||
1991 | +static void | ||
1992 | +i915_gem_object_move_to_active(struct drm_gem_object *obj) | ||
1993 | +{ | ||
1994 | + struct drm_device *dev = obj->dev; | ||
1995 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
1996 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
1997 | + | ||
1998 | + /* Add a reference if we're newly entering the active list. */ | ||
1999 | + if (!obj_priv->active) { | ||
2000 | + drm_gem_object_reference(obj); | ||
2001 | + obj_priv->active = 1; | ||
2002 | + } | ||
2003 | + /* Move from whatever list we were on to the tail of execution. */ | ||
2004 | + list_move_tail(&obj_priv->list, | ||
2005 | + &dev_priv->mm.active_list); | ||
2006 | +} | ||
2007 | + | ||
2008 | + | ||
2009 | +static void | ||
2010 | +i915_gem_object_move_to_inactive(struct drm_gem_object *obj) | ||
2011 | +{ | ||
2012 | + struct drm_device *dev = obj->dev; | ||
2013 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
2014 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
2015 | + | ||
2016 | + i915_verify_inactive(dev, __FILE__, __LINE__); | ||
2017 | + if (obj_priv->pin_count != 0) | ||
2018 | + list_del_init(&obj_priv->list); | ||
2019 | + else | ||
2020 | + list_move_tail(&obj_priv->list, &dev_priv->mm.inactive_list); | ||
2021 | + | ||
2022 | + if (obj_priv->active) { | ||
2023 | + obj_priv->active = 0; | ||
2024 | + drm_gem_object_unreference(obj); | ||
2025 | + } | ||
2026 | + i915_verify_inactive(dev, __FILE__, __LINE__); | ||
2027 | +} | ||
2028 | + | ||
2029 | +/** | ||
2030 | + * Creates a new sequence number, emitting a write of it to the status page | ||
2031 | + * plus an interrupt, which will trigger i915_user_interrupt_handler. | ||
2032 | + * | ||
2033 | + * Must be called with struct_lock held. | ||
2034 | + * | ||
2035 | + * Returned sequence numbers are nonzero on success. | ||
2036 | + */ | ||
2037 | +static uint32_t | ||
2038 | +i915_add_request(struct drm_device *dev, uint32_t flush_domains) | ||
2039 | +{ | ||
2040 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
2041 | + struct drm_i915_gem_request *request; | ||
2042 | + uint32_t seqno; | ||
2043 | + int was_empty; | ||
2044 | + RING_LOCALS; | ||
2045 | + | ||
2046 | + request = drm_calloc(1, sizeof(*request), DRM_MEM_DRIVER); | ||
2047 | + if (request == NULL) | ||
2048 | + return 0; | ||
2049 | + | ||
2050 | + /* Grab the seqno we're going to make this request be, and bump the | ||
2051 | + * next (skipping 0 so it can be the reserved no-seqno value). | ||
2052 | + */ | ||
2053 | + seqno = dev_priv->mm.next_gem_seqno; | ||
2054 | + dev_priv->mm.next_gem_seqno++; | ||
2055 | + if (dev_priv->mm.next_gem_seqno == 0) | ||
2056 | + dev_priv->mm.next_gem_seqno++; | ||
2057 | + | ||
2058 | + BEGIN_LP_RING(4); | ||
2059 | + OUT_RING(MI_STORE_DWORD_INDEX); | ||
2060 | + OUT_RING(I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT); | ||
2061 | + OUT_RING(seqno); | ||
2062 | + | ||
2063 | + OUT_RING(MI_USER_INTERRUPT); | ||
2064 | + ADVANCE_LP_RING(); | ||
2065 | + | ||
2066 | + DRM_DEBUG("%d\n", seqno); | ||
2067 | + | ||
2068 | + request->seqno = seqno; | ||
2069 | + request->emitted_jiffies = jiffies; | ||
2070 | + request->flush_domains = flush_domains; | ||
2071 | + was_empty = list_empty(&dev_priv->mm.request_list); | ||
2072 | + list_add_tail(&request->list, &dev_priv->mm.request_list); | ||
2073 | + | ||
2074 | + if (was_empty) | ||
2075 | + schedule_delayed_work(&dev_priv->mm.retire_work, HZ); | ||
2076 | + return seqno; | ||
2077 | +} | ||
2078 | + | ||
2079 | +/** | ||
2080 | + * Command execution barrier | ||
2081 | + * | ||
2082 | + * Ensures that all commands in the ring are finished | ||
2083 | + * before signalling the CPU | ||
2084 | + */ | ||
2085 | +uint32_t | ||
2086 | +i915_retire_commands(struct drm_device *dev) | ||
2087 | +{ | ||
2088 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
2089 | + uint32_t cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; | ||
2090 | + uint32_t flush_domains = 0; | ||
2091 | + RING_LOCALS; | ||
2092 | + | ||
2093 | + /* The sampler always gets flushed on i965 (sigh) */ | ||
2094 | + if (IS_I965G(dev)) | ||
2095 | + flush_domains |= I915_GEM_DOMAIN_SAMPLER; | ||
2096 | + BEGIN_LP_RING(2); | ||
2097 | + OUT_RING(cmd); | ||
2098 | + OUT_RING(0); /* noop */ | ||
2099 | + ADVANCE_LP_RING(); | ||
2100 | + return flush_domains; | ||
2101 | +} | ||
2102 | + | ||
2103 | +/** | ||
2104 | + * Moves buffers associated only with the given active seqno from the active | ||
2105 | + * to inactive list, potentially freeing them. | ||
2106 | + */ | ||
2107 | +static void | ||
2108 | +i915_gem_retire_request(struct drm_device *dev, | ||
2109 | + struct drm_i915_gem_request *request) | ||
2110 | +{ | ||
2111 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
2112 | + | ||
2113 | + /* Move any buffers on the active list that are no longer referenced | ||
2114 | + * by the ringbuffer to the flushing/inactive lists as appropriate. | ||
2115 | + */ | ||
2116 | + while (!list_empty(&dev_priv->mm.active_list)) { | ||
2117 | + struct drm_gem_object *obj; | ||
2118 | + struct drm_i915_gem_object *obj_priv; | ||
2119 | + | ||
2120 | + obj_priv = list_first_entry(&dev_priv->mm.active_list, | ||
2121 | + struct drm_i915_gem_object, | ||
2122 | + list); | ||
2123 | + obj = obj_priv->obj; | ||
2124 | + | ||
2125 | + /* If the seqno being retired doesn't match the oldest in the | ||
2126 | + * list, then the oldest in the list must still be newer than | ||
2127 | + * this seqno. | ||
2128 | + */ | ||
2129 | + if (obj_priv->last_rendering_seqno != request->seqno) | ||
2130 | + return; | ||
2131 | +#if WATCH_LRU | ||
2132 | + DRM_INFO("%s: retire %d moves to inactive list %p\n", | ||
2133 | + __func__, request->seqno, obj); | ||
2134 | +#endif | ||
2135 | + | ||
2136 | + if (obj->write_domain != 0) { | ||
2137 | + list_move_tail(&obj_priv->list, | ||
2138 | + &dev_priv->mm.flushing_list); | ||
2139 | + } else { | ||
2140 | + i915_gem_object_move_to_inactive(obj); | ||
2141 | + } | ||
2142 | + } | ||
2143 | + | ||
2144 | + if (request->flush_domains != 0) { | ||
2145 | + struct drm_i915_gem_object *obj_priv, *next; | ||
2146 | + | ||
2147 | + /* Clear the write domain and activity from any buffers | ||
2148 | + * that are just waiting for a flush matching the one retired. | ||
2149 | + */ | ||
2150 | + list_for_each_entry_safe(obj_priv, next, | ||
2151 | + &dev_priv->mm.flushing_list, list) { | ||
2152 | + struct drm_gem_object *obj = obj_priv->obj; | ||
2153 | + | ||
2154 | + if (obj->write_domain & request->flush_domains) { | ||
2155 | + obj->write_domain = 0; | ||
2156 | + i915_gem_object_move_to_inactive(obj); | ||
2157 | + } | ||
2158 | + } | ||
2159 | + | ||
2160 | + } | ||
2161 | +} | ||
2162 | + | ||
2163 | +/** | ||
2164 | + * Returns true if seq1 is later than seq2. | ||
2165 | + */ | ||
2166 | +static int | ||
2167 | +i915_seqno_passed(uint32_t seq1, uint32_t seq2) | ||
2168 | +{ | ||
2169 | + return (int32_t)(seq1 - seq2) >= 0; | ||
2170 | +} | ||
2171 | + | ||
2172 | +uint32_t | ||
2173 | +i915_get_gem_seqno(struct drm_device *dev) | ||
2174 | +{ | ||
2175 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
2176 | + | ||
2177 | + return READ_HWSP(dev_priv, I915_GEM_HWS_INDEX); | ||
2178 | +} | ||
2179 | + | ||
2180 | +/** | ||
2181 | + * This function clears the request list as sequence numbers are passed. | ||
2182 | + */ | ||
2183 | +void | ||
2184 | +i915_gem_retire_requests(struct drm_device *dev) | ||
2185 | +{ | ||
2186 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
2187 | + uint32_t seqno; | ||
2188 | + | ||
2189 | + seqno = i915_get_gem_seqno(dev); | ||
2190 | + | ||
2191 | + while (!list_empty(&dev_priv->mm.request_list)) { | ||
2192 | + struct drm_i915_gem_request *request; | ||
2193 | + uint32_t retiring_seqno; | ||
2194 | + | ||
2195 | + request = list_first_entry(&dev_priv->mm.request_list, | ||
2196 | + struct drm_i915_gem_request, | ||
2197 | + list); | ||
2198 | + retiring_seqno = request->seqno; | ||
2199 | + | ||
2200 | + if (i915_seqno_passed(seqno, retiring_seqno) || | ||
2201 | + dev_priv->mm.wedged) { | ||
2202 | + i915_gem_retire_request(dev, request); | ||
2203 | + | ||
2204 | + list_del(&request->list); | ||
2205 | + drm_free(request, sizeof(*request), DRM_MEM_DRIVER); | ||
2206 | + } else | ||
2207 | + break; | ||
2208 | + } | ||
2209 | +} | ||
2210 | + | ||
2211 | +void | ||
2212 | +i915_gem_retire_work_handler(struct work_struct *work) | ||
2213 | +{ | ||
2214 | + drm_i915_private_t *dev_priv; | ||
2215 | + struct drm_device *dev; | ||
2216 | + | ||
2217 | + dev_priv = container_of(work, drm_i915_private_t, | ||
2218 | + mm.retire_work.work); | ||
2219 | + dev = dev_priv->dev; | ||
2220 | + | ||
2221 | + mutex_lock(&dev->struct_mutex); | ||
2222 | + i915_gem_retire_requests(dev); | ||
2223 | + if (!list_empty(&dev_priv->mm.request_list)) | ||
2224 | + schedule_delayed_work(&dev_priv->mm.retire_work, HZ); | ||
2225 | + mutex_unlock(&dev->struct_mutex); | ||
2226 | +} | ||
2227 | + | ||
2228 | +/** | ||
2229 | + * Waits for a sequence number to be signaled, and cleans up the | ||
2230 | + * request and object lists appropriately for that event. | ||
2231 | + */ | ||
2232 | +int | ||
2233 | +i915_wait_request(struct drm_device *dev, uint32_t seqno) | ||
2234 | +{ | ||
2235 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
2236 | + int ret = 0; | ||
2237 | + | ||
2238 | + BUG_ON(seqno == 0); | ||
2239 | + | ||
2240 | + if (!i915_seqno_passed(i915_get_gem_seqno(dev), seqno)) { | ||
2241 | + dev_priv->mm.waiting_gem_seqno = seqno; | ||
2242 | + i915_user_irq_get(dev); | ||
2243 | + ret = wait_event_interruptible(dev_priv->irq_queue, | ||
2244 | + i915_seqno_passed(i915_get_gem_seqno(dev), | ||
2245 | + seqno) || | ||
2246 | + dev_priv->mm.wedged); | ||
2247 | + i915_user_irq_put(dev); | ||
2248 | + dev_priv->mm.waiting_gem_seqno = 0; | ||
2249 | + } | ||
2250 | + if (dev_priv->mm.wedged) | ||
2251 | + ret = -EIO; | ||
2252 | + | ||
2253 | + if (ret && ret != -ERESTARTSYS) | ||
2254 | + DRM_ERROR("%s returns %d (awaiting %d at %d)\n", | ||
2255 | + __func__, ret, seqno, i915_get_gem_seqno(dev)); | ||
2256 | + | ||
2257 | + /* Directly dispatch request retiring. While we have the work queue | ||
2258 | + * to handle this, the waiter on a request often wants an associated | ||
2259 | + * buffer to have made it to the inactive list, and we would need | ||
2260 | + * a separate wait queue to handle that. | ||
2261 | + */ | ||
2262 | + if (ret == 0) | ||
2263 | + i915_gem_retire_requests(dev); | ||
2264 | + | ||
2265 | + return ret; | ||
2266 | +} | ||
2267 | + | ||
2268 | +static void | ||
2269 | +i915_gem_flush(struct drm_device *dev, | ||
2270 | + uint32_t invalidate_domains, | ||
2271 | + uint32_t flush_domains) | ||
2272 | +{ | ||
2273 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
2274 | + uint32_t cmd; | ||
2275 | + RING_LOCALS; | ||
2276 | + | ||
2277 | +#if WATCH_EXEC | ||
2278 | + DRM_INFO("%s: invalidate %08x flush %08x\n", __func__, | ||
2279 | + invalidate_domains, flush_domains); | ||
2280 | +#endif | ||
2281 | + | ||
2282 | + if (flush_domains & I915_GEM_DOMAIN_CPU) | ||
2283 | + drm_agp_chipset_flush(dev); | ||
2284 | + | ||
2285 | + if ((invalidate_domains | flush_domains) & ~(I915_GEM_DOMAIN_CPU | | ||
2286 | + I915_GEM_DOMAIN_GTT)) { | ||
2287 | + /* | ||
2288 | + * read/write caches: | ||
2289 | + * | ||
2290 | + * I915_GEM_DOMAIN_RENDER is always invalidated, but is | ||
2291 | + * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is | ||
2292 | + * also flushed at 2d versus 3d pipeline switches. | ||
2293 | + * | ||
2294 | + * read-only caches: | ||
2295 | + * | ||
2296 | + * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if | ||
2297 | + * MI_READ_FLUSH is set, and is always flushed on 965. | ||
2298 | + * | ||
2299 | + * I915_GEM_DOMAIN_COMMAND may not exist? | ||
2300 | + * | ||
2301 | + * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is | ||
2302 | + * invalidated when MI_EXE_FLUSH is set. | ||
2303 | + * | ||
2304 | + * I915_GEM_DOMAIN_VERTEX, which exists on 965, is | ||
2305 | + * invalidated with every MI_FLUSH. | ||
2306 | + * | ||
2307 | + * TLBs: | ||
2308 | + * | ||
2309 | + * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND | ||
2310 | + * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and | ||
2311 | + * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER | ||
2312 | + * are flushed at any MI_FLUSH. | ||
2313 | + */ | ||
2314 | + | ||
2315 | + cmd = MI_FLUSH | MI_NO_WRITE_FLUSH; | ||
2316 | + if ((invalidate_domains|flush_domains) & | ||
2317 | + I915_GEM_DOMAIN_RENDER) | ||
2318 | + cmd &= ~MI_NO_WRITE_FLUSH; | ||
2319 | + if (!IS_I965G(dev)) { | ||
2320 | + /* | ||
2321 | + * On the 965, the sampler cache always gets flushed | ||
2322 | + * and this bit is reserved. | ||
2323 | + */ | ||
2324 | + if (invalidate_domains & I915_GEM_DOMAIN_SAMPLER) | ||
2325 | + cmd |= MI_READ_FLUSH; | ||
2326 | + } | ||
2327 | + if (invalidate_domains & I915_GEM_DOMAIN_INSTRUCTION) | ||
2328 | + cmd |= MI_EXE_FLUSH; | ||
2329 | + | ||
2330 | +#if WATCH_EXEC | ||
2331 | + DRM_INFO("%s: queue flush %08x to ring\n", __func__, cmd); | ||
2332 | +#endif | ||
2333 | + BEGIN_LP_RING(2); | ||
2334 | + OUT_RING(cmd); | ||
2335 | + OUT_RING(0); /* noop */ | ||
2336 | + ADVANCE_LP_RING(); | ||
2337 | + } | ||
2338 | +} | ||
2339 | + | ||
2340 | +/** | ||
2341 | + * Ensures that all rendering to the object has completed and the object is | ||
2342 | + * safe to unbind from the GTT or access from the CPU. | ||
2343 | + */ | ||
2344 | +static int | ||
2345 | +i915_gem_object_wait_rendering(struct drm_gem_object *obj) | ||
2346 | +{ | ||
2347 | + struct drm_device *dev = obj->dev; | ||
2348 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
2349 | + int ret; | ||
2350 | + | ||
2351 | + /* If there are writes queued to the buffer, flush and | ||
2352 | + * create a new seqno to wait for. | ||
2353 | + */ | ||
2354 | + if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) { | ||
2355 | + uint32_t write_domain = obj->write_domain; | ||
2356 | +#if WATCH_BUF | ||
2357 | + DRM_INFO("%s: flushing object %p from write domain %08x\n", | ||
2358 | + __func__, obj, write_domain); | ||
2359 | +#endif | ||
2360 | + i915_gem_flush(dev, 0, write_domain); | ||
2361 | + | ||
2362 | + i915_gem_object_move_to_active(obj); | ||
2363 | + obj_priv->last_rendering_seqno = i915_add_request(dev, | ||
2364 | + write_domain); | ||
2365 | + BUG_ON(obj_priv->last_rendering_seqno == 0); | ||
2366 | +#if WATCH_LRU | ||
2367 | + DRM_INFO("%s: flush moves to exec list %p\n", __func__, obj); | ||
2368 | +#endif | ||
2369 | + } | ||
2370 | + | ||
2371 | + /* If there is rendering queued on the buffer being evicted, wait for | ||
2372 | + * it. | ||
2373 | + */ | ||
2374 | + if (obj_priv->active) { | ||
2375 | +#if WATCH_BUF | ||
2376 | + DRM_INFO("%s: object %p wait for seqno %08x\n", | ||
2377 | + __func__, obj, obj_priv->last_rendering_seqno); | ||
2378 | +#endif | ||
2379 | + ret = i915_wait_request(dev, obj_priv->last_rendering_seqno); | ||
2380 | + if (ret != 0) | ||
2381 | + return ret; | ||
2382 | + } | ||
2383 | + | ||
2384 | + return 0; | ||
2385 | +} | ||
2386 | + | ||
2387 | +/** | ||
2388 | + * Unbinds an object from the GTT aperture. | ||
2389 | + */ | ||
2390 | +static int | ||
2391 | +i915_gem_object_unbind(struct drm_gem_object *obj) | ||
2392 | +{ | ||
2393 | + struct drm_device *dev = obj->dev; | ||
2394 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
2395 | + int ret = 0; | ||
2396 | + | ||
2397 | +#if WATCH_BUF | ||
2398 | + DRM_INFO("%s:%d %p\n", __func__, __LINE__, obj); | ||
2399 | + DRM_INFO("gtt_space %p\n", obj_priv->gtt_space); | ||
2400 | +#endif | ||
2401 | + if (obj_priv->gtt_space == NULL) | ||
2402 | + return 0; | ||
2403 | + | ||
2404 | + if (obj_priv->pin_count != 0) { | ||
2405 | + DRM_ERROR("Attempting to unbind pinned buffer\n"); | ||
2406 | + return -EINVAL; | ||
2407 | + } | ||
2408 | + | ||
2409 | + /* Wait for any rendering to complete | ||
2410 | + */ | ||
2411 | + ret = i915_gem_object_wait_rendering(obj); | ||
2412 | + if (ret) { | ||
2413 | + DRM_ERROR("wait_rendering failed: %d\n", ret); | ||
2414 | + return ret; | ||
2415 | + } | ||
2416 | + | ||
2417 | + /* Move the object to the CPU domain to ensure that | ||
2418 | + * any possible CPU writes while it's not in the GTT | ||
2419 | + * are flushed when we go to remap it. This will | ||
2420 | + * also ensure that all pending GPU writes are finished | ||
2421 | + * before we unbind. | ||
2422 | + */ | ||
2423 | + ret = i915_gem_object_set_domain(obj, I915_GEM_DOMAIN_CPU, | ||
2424 | + I915_GEM_DOMAIN_CPU); | ||
2425 | + if (ret) { | ||
2426 | + DRM_ERROR("set_domain failed: %d\n", ret); | ||
2427 | + return ret; | ||
2428 | + } | ||
2429 | + | ||
2430 | + if (obj_priv->agp_mem != NULL) { | ||
2431 | + drm_unbind_agp(obj_priv->agp_mem); | ||
2432 | + drm_free_agp(obj_priv->agp_mem, obj->size / PAGE_SIZE); | ||
2433 | + obj_priv->agp_mem = NULL; | ||
2434 | + } | ||
2435 | + | ||
2436 | + BUG_ON(obj_priv->active); | ||
2437 | + | ||
2438 | + i915_gem_object_free_page_list(obj); | ||
2439 | + | ||
2440 | + if (obj_priv->gtt_space) { | ||
2441 | + atomic_dec(&dev->gtt_count); | ||
2442 | + atomic_sub(obj->size, &dev->gtt_memory); | ||
2443 | + | ||
2444 | + drm_mm_put_block(obj_priv->gtt_space); | ||
2445 | + obj_priv->gtt_space = NULL; | ||
2446 | + } | ||
2447 | + | ||
2448 | + /* Remove ourselves from the LRU list if present. */ | ||
2449 | + if (!list_empty(&obj_priv->list)) | ||
2450 | + list_del_init(&obj_priv->list); | ||
2451 | + | ||
2452 | + return 0; | ||
2453 | +} | ||
2454 | + | ||
2455 | +static int | ||
2456 | +i915_gem_evict_something(struct drm_device *dev) | ||
2457 | +{ | ||
2458 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
2459 | + struct drm_gem_object *obj; | ||
2460 | + struct drm_i915_gem_object *obj_priv; | ||
2461 | + int ret = 0; | ||
2462 | + | ||
2463 | + for (;;) { | ||
2464 | + /* If there's an inactive buffer available now, grab it | ||
2465 | + * and be done. | ||
2466 | + */ | ||
2467 | + if (!list_empty(&dev_priv->mm.inactive_list)) { | ||
2468 | + obj_priv = list_first_entry(&dev_priv->mm.inactive_list, | ||
2469 | + struct drm_i915_gem_object, | ||
2470 | + list); | ||
2471 | + obj = obj_priv->obj; | ||
2472 | + BUG_ON(obj_priv->pin_count != 0); | ||
2473 | +#if WATCH_LRU | ||
2474 | + DRM_INFO("%s: evicting %p\n", __func__, obj); | ||
2475 | +#endif | ||
2476 | + BUG_ON(obj_priv->active); | ||
2477 | + | ||
2478 | + /* Wait on the rendering and unbind the buffer. */ | ||
2479 | + ret = i915_gem_object_unbind(obj); | ||
2480 | + break; | ||
2481 | + } | ||
2482 | + | ||
2483 | + /* If we didn't get anything, but the ring is still processing | ||
2484 | + * things, wait for one of those things to finish and hopefully | ||
2485 | + * leave us a buffer to evict. | ||
2486 | + */ | ||
2487 | + if (!list_empty(&dev_priv->mm.request_list)) { | ||
2488 | + struct drm_i915_gem_request *request; | ||
2489 | + | ||
2490 | + request = list_first_entry(&dev_priv->mm.request_list, | ||
2491 | + struct drm_i915_gem_request, | ||
2492 | + list); | ||
2493 | + | ||
2494 | + ret = i915_wait_request(dev, request->seqno); | ||
2495 | + if (ret) | ||
2496 | + break; | ||
2497 | + | ||
2498 | + /* if waiting caused an object to become inactive, | ||
2499 | + * then loop around and wait for it. Otherwise, we | ||
2500 | + * assume that waiting freed and unbound something, | ||
2501 | + * so there should now be some space in the GTT | ||
2502 | + */ | ||
2503 | + if (!list_empty(&dev_priv->mm.inactive_list)) | ||
2504 | + continue; | ||
2505 | + break; | ||
2506 | + } | ||
2507 | + | ||
2508 | + /* If we didn't have anything on the request list but there | ||
2509 | + * are buffers awaiting a flush, emit one and try again. | ||
2510 | + * When we wait on it, those buffers waiting for that flush | ||
2511 | + * will get moved to inactive. | ||
2512 | + */ | ||
2513 | + if (!list_empty(&dev_priv->mm.flushing_list)) { | ||
2514 | + obj_priv = list_first_entry(&dev_priv->mm.flushing_list, | ||
2515 | + struct drm_i915_gem_object, | ||
2516 | + list); | ||
2517 | + obj = obj_priv->obj; | ||
2518 | + | ||
2519 | + i915_gem_flush(dev, | ||
2520 | + obj->write_domain, | ||
2521 | + obj->write_domain); | ||
2522 | + i915_add_request(dev, obj->write_domain); | ||
2523 | + | ||
2524 | + obj = NULL; | ||
2525 | + continue; | ||
2526 | + } | ||
2527 | + | ||
2528 | + DRM_ERROR("inactive empty %d request empty %d " | ||
2529 | + "flushing empty %d\n", | ||
2530 | + list_empty(&dev_priv->mm.inactive_list), | ||
2531 | + list_empty(&dev_priv->mm.request_list), | ||
2532 | + list_empty(&dev_priv->mm.flushing_list)); | ||
2533 | + /* If we didn't do any of the above, there's nothing to be done | ||
2534 | + * and we just can't fit it in. | ||
2535 | + */ | ||
2536 | + return -ENOMEM; | ||
2537 | + } | ||
2538 | + return ret; | ||
2539 | +} | ||
2540 | + | ||
2541 | +static int | ||
2542 | +i915_gem_object_get_page_list(struct drm_gem_object *obj) | ||
2543 | +{ | ||
2544 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
2545 | + int page_count, i; | ||
2546 | + struct address_space *mapping; | ||
2547 | + struct inode *inode; | ||
2548 | + struct page *page; | ||
2549 | + int ret; | ||
2550 | + | ||
2551 | + if (obj_priv->page_list) | ||
2552 | + return 0; | ||
2553 | + | ||
2554 | + /* Get the list of pages out of our struct file. They'll be pinned | ||
2555 | + * at this point until we release them. | ||
2556 | + */ | ||
2557 | + page_count = obj->size / PAGE_SIZE; | ||
2558 | + BUG_ON(obj_priv->page_list != NULL); | ||
2559 | + obj_priv->page_list = drm_calloc(page_count, sizeof(struct page *), | ||
2560 | + DRM_MEM_DRIVER); | ||
2561 | + if (obj_priv->page_list == NULL) { | ||
2562 | + DRM_ERROR("Faled to allocate page list\n"); | ||
2563 | + return -ENOMEM; | ||
2564 | + } | ||
2565 | + | ||
2566 | + inode = obj->filp->f_path.dentry->d_inode; | ||
2567 | + mapping = inode->i_mapping; | ||
2568 | + for (i = 0; i < page_count; i++) { | ||
2569 | + page = read_mapping_page(mapping, i, NULL); | ||
2570 | + if (IS_ERR(page)) { | ||
2571 | + ret = PTR_ERR(page); | ||
2572 | + DRM_ERROR("read_mapping_page failed: %d\n", ret); | ||
2573 | + i915_gem_object_free_page_list(obj); | ||
2574 | + return ret; | ||
2575 | + } | ||
2576 | + obj_priv->page_list[i] = page; | ||
2577 | + } | ||
2578 | + return 0; | ||
2579 | +} | ||
2580 | + | ||
2581 | +/** | ||
2582 | + * Finds free space in the GTT aperture and binds the object there. | ||
2583 | + */ | ||
2584 | +static int | ||
2585 | +i915_gem_object_bind_to_gtt(struct drm_gem_object *obj, unsigned alignment) | ||
2586 | +{ | ||
2587 | + struct drm_device *dev = obj->dev; | ||
2588 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
2589 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
2590 | + struct drm_mm_node *free_space; | ||
2591 | + int page_count, ret; | ||
2592 | + | ||
2593 | + if (alignment == 0) | ||
2594 | + alignment = PAGE_SIZE; | ||
2595 | + if (alignment & (PAGE_SIZE - 1)) { | ||
2596 | + DRM_ERROR("Invalid object alignment requested %u\n", alignment); | ||
2597 | + return -EINVAL; | ||
2598 | + } | ||
2599 | + | ||
2600 | + search_free: | ||
2601 | + free_space = drm_mm_search_free(&dev_priv->mm.gtt_space, | ||
2602 | + obj->size, alignment, 0); | ||
2603 | + if (free_space != NULL) { | ||
2604 | + obj_priv->gtt_space = drm_mm_get_block(free_space, obj->size, | ||
2605 | + alignment); | ||
2606 | + if (obj_priv->gtt_space != NULL) { | ||
2607 | + obj_priv->gtt_space->private = obj; | ||
2608 | + obj_priv->gtt_offset = obj_priv->gtt_space->start; | ||
2609 | + } | ||
2610 | + } | ||
2611 | + if (obj_priv->gtt_space == NULL) { | ||
2612 | + /* If the gtt is empty and we're still having trouble | ||
2613 | + * fitting our object in, we're out of memory. | ||
2614 | + */ | ||
2615 | +#if WATCH_LRU | ||
2616 | + DRM_INFO("%s: GTT full, evicting something\n", __func__); | ||
2617 | +#endif | ||
2618 | + if (list_empty(&dev_priv->mm.inactive_list) && | ||
2619 | + list_empty(&dev_priv->mm.flushing_list) && | ||
2620 | + list_empty(&dev_priv->mm.active_list)) { | ||
2621 | + DRM_ERROR("GTT full, but LRU list empty\n"); | ||
2622 | + return -ENOMEM; | ||
2623 | + } | ||
2624 | + | ||
2625 | + ret = i915_gem_evict_something(dev); | ||
2626 | + if (ret != 0) { | ||
2627 | + DRM_ERROR("Failed to evict a buffer %d\n", ret); | ||
2628 | + return ret; | ||
2629 | + } | ||
2630 | + goto search_free; | ||
2631 | + } | ||
2632 | + | ||
2633 | +#if WATCH_BUF | ||
2634 | + DRM_INFO("Binding object of size %d at 0x%08x\n", | ||
2635 | + obj->size, obj_priv->gtt_offset); | ||
2636 | +#endif | ||
2637 | + ret = i915_gem_object_get_page_list(obj); | ||
2638 | + if (ret) { | ||
2639 | + drm_mm_put_block(obj_priv->gtt_space); | ||
2640 | + obj_priv->gtt_space = NULL; | ||
2641 | + return ret; | ||
2642 | + } | ||
2643 | + | ||
2644 | + page_count = obj->size / PAGE_SIZE; | ||
2645 | + /* Create an AGP memory structure pointing at our pages, and bind it | ||
2646 | + * into the GTT. | ||
2647 | + */ | ||
2648 | + obj_priv->agp_mem = drm_agp_bind_pages(dev, | ||
2649 | + obj_priv->page_list, | ||
2650 | + page_count, | ||
2651 | + obj_priv->gtt_offset); | ||
2652 | + if (obj_priv->agp_mem == NULL) { | ||
2653 | + i915_gem_object_free_page_list(obj); | ||
2654 | + drm_mm_put_block(obj_priv->gtt_space); | ||
2655 | + obj_priv->gtt_space = NULL; | ||
2656 | + return -ENOMEM; | ||
2657 | + } | ||
2658 | + atomic_inc(&dev->gtt_count); | ||
2659 | + atomic_add(obj->size, &dev->gtt_memory); | ||
2660 | + | ||
2661 | + /* Assert that the object is not currently in any GPU domain. As it | ||
2662 | + * wasn't in the GTT, there shouldn't be any way it could have been in | ||
2663 | + * a GPU cache | ||
2664 | + */ | ||
2665 | + BUG_ON(obj->read_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); | ||
2666 | + BUG_ON(obj->write_domain & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); | ||
2667 | + | ||
2668 | + return 0; | ||
2669 | +} | ||
2670 | + | ||
2671 | +void | ||
2672 | +i915_gem_clflush_object(struct drm_gem_object *obj) | ||
2673 | +{ | ||
2674 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
2675 | + | ||
2676 | + /* If we don't have a page list set up, then we're not pinned | ||
2677 | + * to GPU, and we can ignore the cache flush because it'll happen | ||
2678 | + * again at bind time. | ||
2679 | + */ | ||
2680 | + if (obj_priv->page_list == NULL) | ||
2681 | + return; | ||
2682 | + | ||
2683 | + drm_clflush_pages(obj_priv->page_list, obj->size / PAGE_SIZE); | ||
2684 | +} | ||
2685 | + | ||
2686 | +/* | ||
2687 | + * Set the next domain for the specified object. This | ||
2688 | + * may not actually perform the necessary flushing/invaliding though, | ||
2689 | + * as that may want to be batched with other set_domain operations | ||
2690 | + * | ||
2691 | + * This is (we hope) the only really tricky part of gem. The goal | ||
2692 | + * is fairly simple -- track which caches hold bits of the object | ||
2693 | + * and make sure they remain coherent. A few concrete examples may | ||
2694 | + * help to explain how it works. For shorthand, we use the notation | ||
2695 | + * (read_domains, write_domain), e.g. (CPU, CPU) to indicate the | ||
2696 | + * a pair of read and write domain masks. | ||
2697 | + * | ||
2698 | + * Case 1: the batch buffer | ||
2699 | + * | ||
2700 | + * 1. Allocated | ||
2701 | + * 2. Written by CPU | ||
2702 | + * 3. Mapped to GTT | ||
2703 | + * 4. Read by GPU | ||
2704 | + * 5. Unmapped from GTT | ||
2705 | + * 6. Freed | ||
2706 | + * | ||
2707 | + * Let's take these a step at a time | ||
2708 | + * | ||
2709 | + * 1. Allocated | ||
2710 | + * Pages allocated from the kernel may still have | ||
2711 | + * cache contents, so we set them to (CPU, CPU) always. | ||
2712 | + * 2. Written by CPU (using pwrite) | ||
2713 | + * The pwrite function calls set_domain (CPU, CPU) and | ||
2714 | + * this function does nothing (as nothing changes) | ||
2715 | + * 3. Mapped by GTT | ||
2716 | + * This function asserts that the object is not | ||
2717 | + * currently in any GPU-based read or write domains | ||
2718 | + * 4. Read by GPU | ||
2719 | + * i915_gem_execbuffer calls set_domain (COMMAND, 0). | ||
2720 | + * As write_domain is zero, this function adds in the | ||
2721 | + * current read domains (CPU+COMMAND, 0). | ||
2722 | + * flush_domains is set to CPU. | ||
2723 | + * invalidate_domains is set to COMMAND | ||
2724 | + * clflush is run to get data out of the CPU caches | ||
2725 | + * then i915_dev_set_domain calls i915_gem_flush to | ||
2726 | + * emit an MI_FLUSH and drm_agp_chipset_flush | ||
2727 | + * 5. Unmapped from GTT | ||
2728 | + * i915_gem_object_unbind calls set_domain (CPU, CPU) | ||
2729 | + * flush_domains and invalidate_domains end up both zero | ||
2730 | + * so no flushing/invalidating happens | ||
2731 | + * 6. Freed | ||
2732 | + * yay, done | ||
2733 | + * | ||
2734 | + * Case 2: The shared render buffer | ||
2735 | + * | ||
2736 | + * 1. Allocated | ||
2737 | + * 2. Mapped to GTT | ||
2738 | + * 3. Read/written by GPU | ||
2739 | + * 4. set_domain to (CPU,CPU) | ||
2740 | + * 5. Read/written by CPU | ||
2741 | + * 6. Read/written by GPU | ||
2742 | + * | ||
2743 | + * 1. Allocated | ||
2744 | + * Same as last example, (CPU, CPU) | ||
2745 | + * 2. Mapped to GTT | ||
2746 | + * Nothing changes (assertions find that it is not in the GPU) | ||
2747 | + * 3. Read/written by GPU | ||
2748 | + * execbuffer calls set_domain (RENDER, RENDER) | ||
2749 | + * flush_domains gets CPU | ||
2750 | + * invalidate_domains gets GPU | ||
2751 | + * clflush (obj) | ||
2752 | + * MI_FLUSH and drm_agp_chipset_flush | ||
2753 | + * 4. set_domain (CPU, CPU) | ||
2754 | + * flush_domains gets GPU | ||
2755 | + * invalidate_domains gets CPU | ||
2756 | + * wait_rendering (obj) to make sure all drawing is complete. | ||
2757 | + * This will include an MI_FLUSH to get the data from GPU | ||
2758 | + * to memory | ||
2759 | + * clflush (obj) to invalidate the CPU cache | ||
2760 | + * Another MI_FLUSH in i915_gem_flush (eliminate this somehow?) | ||
2761 | + * 5. Read/written by CPU | ||
2762 | + * cache lines are loaded and dirtied | ||
2763 | + * 6. Read written by GPU | ||
2764 | + * Same as last GPU access | ||
2765 | + * | ||
2766 | + * Case 3: The constant buffer | ||
2767 | + * | ||
2768 | + * 1. Allocated | ||
2769 | + * 2. Written by CPU | ||
2770 | + * 3. Read by GPU | ||
2771 | + * 4. Updated (written) by CPU again | ||
2772 | + * 5. Read by GPU | ||
2773 | + * | ||
2774 | + * 1. Allocated | ||
2775 | + * (CPU, CPU) | ||
2776 | + * 2. Written by CPU | ||
2777 | + * (CPU, CPU) | ||
2778 | + * 3. Read by GPU | ||
2779 | + * (CPU+RENDER, 0) | ||
2780 | + * flush_domains = CPU | ||
2781 | + * invalidate_domains = RENDER | ||
2782 | + * clflush (obj) | ||
2783 | + * MI_FLUSH | ||
2784 | + * drm_agp_chipset_flush | ||
2785 | + * 4. Updated (written) by CPU again | ||
2786 | + * (CPU, CPU) | ||
2787 | + * flush_domains = 0 (no previous write domain) | ||
2788 | + * invalidate_domains = 0 (no new read domains) | ||
2789 | + * 5. Read by GPU | ||
2790 | + * (CPU+RENDER, 0) | ||
2791 | + * flush_domains = CPU | ||
2792 | + * invalidate_domains = RENDER | ||
2793 | + * clflush (obj) | ||
2794 | + * MI_FLUSH | ||
2795 | + * drm_agp_chipset_flush | ||
2796 | + */ | ||
2797 | +static int | ||
2798 | +i915_gem_object_set_domain(struct drm_gem_object *obj, | ||
2799 | + uint32_t read_domains, | ||
2800 | + uint32_t write_domain) | ||
2801 | +{ | ||
2802 | + struct drm_device *dev = obj->dev; | ||
2803 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
2804 | + uint32_t invalidate_domains = 0; | ||
2805 | + uint32_t flush_domains = 0; | ||
2806 | + int ret; | ||
2807 | + | ||
2808 | +#if WATCH_BUF | ||
2809 | + DRM_INFO("%s: object %p read %08x -> %08x write %08x -> %08x\n", | ||
2810 | + __func__, obj, | ||
2811 | + obj->read_domains, read_domains, | ||
2812 | + obj->write_domain, write_domain); | ||
2813 | +#endif | ||
2814 | + /* | ||
2815 | + * If the object isn't moving to a new write domain, | ||
2816 | + * let the object stay in multiple read domains | ||
2817 | + */ | ||
2818 | + if (write_domain == 0) | ||
2819 | + read_domains |= obj->read_domains; | ||
2820 | + else | ||
2821 | + obj_priv->dirty = 1; | ||
2822 | + | ||
2823 | + /* | ||
2824 | + * Flush the current write domain if | ||
2825 | + * the new read domains don't match. Invalidate | ||
2826 | + * any read domains which differ from the old | ||
2827 | + * write domain | ||
2828 | + */ | ||
2829 | + if (obj->write_domain && obj->write_domain != read_domains) { | ||
2830 | + flush_domains |= obj->write_domain; | ||
2831 | + invalidate_domains |= read_domains & ~obj->write_domain; | ||
2832 | + } | ||
2833 | + /* | ||
2834 | + * Invalidate any read caches which may have | ||
2835 | + * stale data. That is, any new read domains. | ||
2836 | + */ | ||
2837 | + invalidate_domains |= read_domains & ~obj->read_domains; | ||
2838 | + if ((flush_domains | invalidate_domains) & I915_GEM_DOMAIN_CPU) { | ||
2839 | +#if WATCH_BUF | ||
2840 | + DRM_INFO("%s: CPU domain flush %08x invalidate %08x\n", | ||
2841 | + __func__, flush_domains, invalidate_domains); | ||
2842 | +#endif | ||
2843 | + /* | ||
2844 | + * If we're invaliding the CPU cache and flushing a GPU cache, | ||
2845 | + * then pause for rendering so that the GPU caches will be | ||
2846 | + * flushed before the cpu cache is invalidated | ||
2847 | + */ | ||
2848 | + if ((invalidate_domains & I915_GEM_DOMAIN_CPU) && | ||
2849 | + (flush_domains & ~(I915_GEM_DOMAIN_CPU | | ||
2850 | + I915_GEM_DOMAIN_GTT))) { | ||
2851 | + ret = i915_gem_object_wait_rendering(obj); | ||
2852 | + if (ret) | ||
2853 | + return ret; | ||
2854 | + } | ||
2855 | + i915_gem_clflush_object(obj); | ||
2856 | + } | ||
2857 | + | ||
2858 | + if ((write_domain | flush_domains) != 0) | ||
2859 | + obj->write_domain = write_domain; | ||
2860 | + | ||
2861 | + /* If we're invalidating the CPU domain, clear the per-page CPU | ||
2862 | + * domain list as well. | ||
2863 | + */ | ||
2864 | + if (obj_priv->page_cpu_valid != NULL && | ||
2865 | + (write_domain != 0 || | ||
2866 | + read_domains & I915_GEM_DOMAIN_CPU)) { | ||
2867 | + drm_free(obj_priv->page_cpu_valid, obj->size / PAGE_SIZE, | ||
2868 | + DRM_MEM_DRIVER); | ||
2869 | + obj_priv->page_cpu_valid = NULL; | ||
2870 | + } | ||
2871 | + obj->read_domains = read_domains; | ||
2872 | + | ||
2873 | + dev->invalidate_domains |= invalidate_domains; | ||
2874 | + dev->flush_domains |= flush_domains; | ||
2875 | +#if WATCH_BUF | ||
2876 | + DRM_INFO("%s: read %08x write %08x invalidate %08x flush %08x\n", | ||
2877 | + __func__, | ||
2878 | + obj->read_domains, obj->write_domain, | ||
2879 | + dev->invalidate_domains, dev->flush_domains); | ||
2880 | +#endif | ||
2881 | + return 0; | ||
2882 | +} | ||
2883 | + | ||
2884 | +/** | ||
2885 | + * Set the read/write domain on a range of the object. | ||
2886 | + * | ||
2887 | + * Currently only implemented for CPU reads, otherwise drops to normal | ||
2888 | + * i915_gem_object_set_domain(). | ||
2889 | + */ | ||
2890 | +static int | ||
2891 | +i915_gem_object_set_domain_range(struct drm_gem_object *obj, | ||
2892 | + uint64_t offset, | ||
2893 | + uint64_t size, | ||
2894 | + uint32_t read_domains, | ||
2895 | + uint32_t write_domain) | ||
2896 | +{ | ||
2897 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
2898 | + int ret, i; | ||
2899 | + | ||
2900 | + if (obj->read_domains & I915_GEM_DOMAIN_CPU) | ||
2901 | + return 0; | ||
2902 | + | ||
2903 | + if (read_domains != I915_GEM_DOMAIN_CPU || | ||
2904 | + write_domain != 0) | ||
2905 | + return i915_gem_object_set_domain(obj, | ||
2906 | + read_domains, write_domain); | ||
2907 | + | ||
2908 | + /* Wait on any GPU rendering to the object to be flushed. */ | ||
2909 | + if (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT)) { | ||
2910 | + ret = i915_gem_object_wait_rendering(obj); | ||
2911 | + if (ret) | ||
2912 | + return ret; | ||
2913 | + } | ||
2914 | + | ||
2915 | + if (obj_priv->page_cpu_valid == NULL) { | ||
2916 | + obj_priv->page_cpu_valid = drm_calloc(1, obj->size / PAGE_SIZE, | ||
2917 | + DRM_MEM_DRIVER); | ||
2918 | + } | ||
2919 | + | ||
2920 | + /* Flush the cache on any pages that are still invalid from the CPU's | ||
2921 | + * perspective. | ||
2922 | + */ | ||
2923 | + for (i = offset / PAGE_SIZE; i <= (offset + size - 1) / PAGE_SIZE; i++) { | ||
2924 | + if (obj_priv->page_cpu_valid[i]) | ||
2925 | + continue; | ||
2926 | + | ||
2927 | + drm_clflush_pages(obj_priv->page_list + i, 1); | ||
2928 | + | ||
2929 | + obj_priv->page_cpu_valid[i] = 1; | ||
2930 | + } | ||
2931 | + | ||
2932 | + return 0; | ||
2933 | +} | ||
2934 | + | ||
2935 | +/** | ||
2936 | + * Once all of the objects have been set in the proper domain, | ||
2937 | + * perform the necessary flush and invalidate operations. | ||
2938 | + * | ||
2939 | + * Returns the write domains flushed, for use in flush tracking. | ||
2940 | + */ | ||
2941 | +static uint32_t | ||
2942 | +i915_gem_dev_set_domain(struct drm_device *dev) | ||
2943 | +{ | ||
2944 | + uint32_t flush_domains = dev->flush_domains; | ||
2945 | + | ||
2946 | + /* | ||
2947 | + * Now that all the buffers are synced to the proper domains, | ||
2948 | + * flush and invalidate the collected domains | ||
2949 | + */ | ||
2950 | + if (dev->invalidate_domains | dev->flush_domains) { | ||
2951 | +#if WATCH_EXEC | ||
2952 | + DRM_INFO("%s: invalidate_domains %08x flush_domains %08x\n", | ||
2953 | + __func__, | ||
2954 | + dev->invalidate_domains, | ||
2955 | + dev->flush_domains); | ||
2956 | +#endif | ||
2957 | + i915_gem_flush(dev, | ||
2958 | + dev->invalidate_domains, | ||
2959 | + dev->flush_domains); | ||
2960 | + dev->invalidate_domains = 0; | ||
2961 | + dev->flush_domains = 0; | ||
2962 | + } | ||
2963 | + | ||
2964 | + return flush_domains; | ||
2965 | +} | ||
2966 | + | ||
2967 | +/** | ||
2968 | + * Pin an object to the GTT and evaluate the relocations landing in it. | ||
2969 | + */ | ||
2970 | +static int | ||
2971 | +i915_gem_object_pin_and_relocate(struct drm_gem_object *obj, | ||
2972 | + struct drm_file *file_priv, | ||
2973 | + struct drm_i915_gem_exec_object *entry) | ||
2974 | +{ | ||
2975 | + struct drm_device *dev = obj->dev; | ||
2976 | + struct drm_i915_gem_relocation_entry reloc; | ||
2977 | + struct drm_i915_gem_relocation_entry __user *relocs; | ||
2978 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
2979 | + int i, ret; | ||
2980 | + uint32_t last_reloc_offset = -1; | ||
2981 | + void *reloc_page = NULL; | ||
2982 | + | ||
2983 | + /* Choose the GTT offset for our buffer and put it there. */ | ||
2984 | + ret = i915_gem_object_pin(obj, (uint32_t) entry->alignment); | ||
2985 | + if (ret) | ||
2986 | + return ret; | ||
2987 | + | ||
2988 | + entry->offset = obj_priv->gtt_offset; | ||
2989 | + | ||
2990 | + relocs = (struct drm_i915_gem_relocation_entry __user *) | ||
2991 | + (uintptr_t) entry->relocs_ptr; | ||
2992 | + /* Apply the relocations, using the GTT aperture to avoid cache | ||
2993 | + * flushing requirements. | ||
2994 | + */ | ||
2995 | + for (i = 0; i < entry->relocation_count; i++) { | ||
2996 | + struct drm_gem_object *target_obj; | ||
2997 | + struct drm_i915_gem_object *target_obj_priv; | ||
2998 | + uint32_t reloc_val, reloc_offset, *reloc_entry; | ||
2999 | + int ret; | ||
3000 | + | ||
3001 | + ret = copy_from_user(&reloc, relocs + i, sizeof(reloc)); | ||
3002 | + if (ret != 0) { | ||
3003 | + i915_gem_object_unpin(obj); | ||
3004 | + return ret; | ||
3005 | + } | ||
3006 | + | ||
3007 | + target_obj = drm_gem_object_lookup(obj->dev, file_priv, | ||
3008 | + reloc.target_handle); | ||
3009 | + if (target_obj == NULL) { | ||
3010 | + i915_gem_object_unpin(obj); | ||
3011 | + return -EBADF; | ||
3012 | + } | ||
3013 | + target_obj_priv = target_obj->driver_private; | ||
3014 | + | ||
3015 | + /* The target buffer should have appeared before us in the | ||
3016 | + * exec_object list, so it should have a GTT space bound by now. | ||
3017 | + */ | ||
3018 | + if (target_obj_priv->gtt_space == NULL) { | ||
3019 | + DRM_ERROR("No GTT space found for object %d\n", | ||
3020 | + reloc.target_handle); | ||
3021 | + drm_gem_object_unreference(target_obj); | ||
3022 | + i915_gem_object_unpin(obj); | ||
3023 | + return -EINVAL; | ||
3024 | + } | ||
3025 | + | ||
3026 | + if (reloc.offset > obj->size - 4) { | ||
3027 | + DRM_ERROR("Relocation beyond object bounds: " | ||
3028 | + "obj %p target %d offset %d size %d.\n", | ||
3029 | + obj, reloc.target_handle, | ||
3030 | + (int) reloc.offset, (int) obj->size); | ||
3031 | + drm_gem_object_unreference(target_obj); | ||
3032 | + i915_gem_object_unpin(obj); | ||
3033 | + return -EINVAL; | ||
3034 | + } | ||
3035 | + if (reloc.offset & 3) { | ||
3036 | + DRM_ERROR("Relocation not 4-byte aligned: " | ||
3037 | + "obj %p target %d offset %d.\n", | ||
3038 | + obj, reloc.target_handle, | ||
3039 | + (int) reloc.offset); | ||
3040 | + drm_gem_object_unreference(target_obj); | ||
3041 | + i915_gem_object_unpin(obj); | ||
3042 | + return -EINVAL; | ||
3043 | + } | ||
3044 | + | ||
3045 | + if (reloc.write_domain && target_obj->pending_write_domain && | ||
3046 | + reloc.write_domain != target_obj->pending_write_domain) { | ||
3047 | + DRM_ERROR("Write domain conflict: " | ||
3048 | + "obj %p target %d offset %d " | ||
3049 | + "new %08x old %08x\n", | ||
3050 | + obj, reloc.target_handle, | ||
3051 | + (int) reloc.offset, | ||
3052 | + reloc.write_domain, | ||
3053 | + target_obj->pending_write_domain); | ||
3054 | + drm_gem_object_unreference(target_obj); | ||
3055 | + i915_gem_object_unpin(obj); | ||
3056 | + return -EINVAL; | ||
3057 | + } | ||
3058 | + | ||
3059 | +#if WATCH_RELOC | ||
3060 | + DRM_INFO("%s: obj %p offset %08x target %d " | ||
3061 | + "read %08x write %08x gtt %08x " | ||
3062 | + "presumed %08x delta %08x\n", | ||
3063 | + __func__, | ||
3064 | + obj, | ||
3065 | + (int) reloc.offset, | ||
3066 | + (int) reloc.target_handle, | ||
3067 | + (int) reloc.read_domains, | ||
3068 | + (int) reloc.write_domain, | ||
3069 | + (int) target_obj_priv->gtt_offset, | ||
3070 | + (int) reloc.presumed_offset, | ||
3071 | + reloc.delta); | ||
3072 | +#endif | ||
3073 | + | ||
3074 | + target_obj->pending_read_domains |= reloc.read_domains; | ||
3075 | + target_obj->pending_write_domain |= reloc.write_domain; | ||
3076 | + | ||
3077 | + /* If the relocation already has the right value in it, no | ||
3078 | + * more work needs to be done. | ||
3079 | + */ | ||
3080 | + if (target_obj_priv->gtt_offset == reloc.presumed_offset) { | ||
3081 | + drm_gem_object_unreference(target_obj); | ||
3082 | + continue; | ||
3083 | + } | ||
3084 | + | ||
3085 | + /* Now that we're going to actually write some data in, | ||
3086 | + * make sure that any rendering using this buffer's contents | ||
3087 | + * is completed. | ||
3088 | + */ | ||
3089 | + i915_gem_object_wait_rendering(obj); | ||
3090 | + | ||
3091 | + /* As we're writing through the gtt, flush | ||
3092 | + * any CPU writes before we write the relocations | ||
3093 | + */ | ||
3094 | + if (obj->write_domain & I915_GEM_DOMAIN_CPU) { | ||
3095 | + i915_gem_clflush_object(obj); | ||
3096 | + drm_agp_chipset_flush(dev); | ||
3097 | + obj->write_domain = 0; | ||
3098 | + } | ||
3099 | + | ||
3100 | + /* Map the page containing the relocation we're going to | ||
3101 | + * perform. | ||
3102 | + */ | ||
3103 | + reloc_offset = obj_priv->gtt_offset + reloc.offset; | ||
3104 | + if (reloc_page == NULL || | ||
3105 | + (last_reloc_offset & ~(PAGE_SIZE - 1)) != | ||
3106 | + (reloc_offset & ~(PAGE_SIZE - 1))) { | ||
3107 | + if (reloc_page != NULL) | ||
3108 | + iounmap(reloc_page); | ||
3109 | + | ||
3110 | + reloc_page = ioremap(dev->agp->base + | ||
3111 | + (reloc_offset & ~(PAGE_SIZE - 1)), | ||
3112 | + PAGE_SIZE); | ||
3113 | + last_reloc_offset = reloc_offset; | ||
3114 | + if (reloc_page == NULL) { | ||
3115 | + drm_gem_object_unreference(target_obj); | ||
3116 | + i915_gem_object_unpin(obj); | ||
3117 | + return -ENOMEM; | ||
3118 | + } | ||
3119 | + } | ||
3120 | + | ||
3121 | + reloc_entry = (uint32_t *)((char *)reloc_page + | ||
3122 | + (reloc_offset & (PAGE_SIZE - 1))); | ||
3123 | + reloc_val = target_obj_priv->gtt_offset + reloc.delta; | ||
3124 | + | ||
3125 | +#if WATCH_BUF | ||
3126 | + DRM_INFO("Applied relocation: %p@0x%08x %08x -> %08x\n", | ||
3127 | + obj, (unsigned int) reloc.offset, | ||
3128 | + readl(reloc_entry), reloc_val); | ||
3129 | +#endif | ||
3130 | + writel(reloc_val, reloc_entry); | ||
3131 | + | ||
3132 | + /* Write the updated presumed offset for this entry back out | ||
3133 | + * to the user. | ||
3134 | + */ | ||
3135 | + reloc.presumed_offset = target_obj_priv->gtt_offset; | ||
3136 | + ret = copy_to_user(relocs + i, &reloc, sizeof(reloc)); | ||
3137 | + if (ret != 0) { | ||
3138 | + drm_gem_object_unreference(target_obj); | ||
3139 | + i915_gem_object_unpin(obj); | ||
3140 | + return ret; | ||
3141 | + } | ||
3142 | + | ||
3143 | + drm_gem_object_unreference(target_obj); | ||
3144 | + } | ||
3145 | + | ||
3146 | + if (reloc_page != NULL) | ||
3147 | + iounmap(reloc_page); | ||
3148 | + | ||
3149 | +#if WATCH_BUF | ||
3150 | + if (0) | ||
3151 | + i915_gem_dump_object(obj, 128, __func__, ~0); | ||
3152 | +#endif | ||
3153 | + return 0; | ||
3154 | +} | ||
3155 | + | ||
3156 | +/** Dispatch a batchbuffer to the ring | ||
3157 | + */ | ||
3158 | +static int | ||
3159 | +i915_dispatch_gem_execbuffer(struct drm_device *dev, | ||
3160 | + struct drm_i915_gem_execbuffer *exec, | ||
3161 | + uint64_t exec_offset) | ||
3162 | +{ | ||
3163 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
3164 | + struct drm_clip_rect __user *boxes = (struct drm_clip_rect __user *) | ||
3165 | + (uintptr_t) exec->cliprects_ptr; | ||
3166 | + int nbox = exec->num_cliprects; | ||
3167 | + int i = 0, count; | ||
3168 | + uint32_t exec_start, exec_len; | ||
3169 | + RING_LOCALS; | ||
3170 | + | ||
3171 | + exec_start = (uint32_t) exec_offset + exec->batch_start_offset; | ||
3172 | + exec_len = (uint32_t) exec->batch_len; | ||
3173 | + | ||
3174 | + if ((exec_start | exec_len) & 0x7) { | ||
3175 | + DRM_ERROR("alignment\n"); | ||
3176 | + return -EINVAL; | ||
3177 | + } | ||
3178 | + | ||
3179 | + if (!exec_start) | ||
3180 | + return -EINVAL; | ||
3181 | + | ||
3182 | + count = nbox ? nbox : 1; | ||
3183 | + | ||
3184 | + for (i = 0; i < count; i++) { | ||
3185 | + if (i < nbox) { | ||
3186 | + int ret = i915_emit_box(dev, boxes, i, | ||
3187 | + exec->DR1, exec->DR4); | ||
3188 | + if (ret) | ||
3189 | + return ret; | ||
3190 | + } | ||
3191 | + | ||
3192 | + if (IS_I830(dev) || IS_845G(dev)) { | ||
3193 | + BEGIN_LP_RING(4); | ||
3194 | + OUT_RING(MI_BATCH_BUFFER); | ||
3195 | + OUT_RING(exec_start | MI_BATCH_NON_SECURE); | ||
3196 | + OUT_RING(exec_start + exec_len - 4); | ||
3197 | + OUT_RING(0); | ||
3198 | + ADVANCE_LP_RING(); | ||
3199 | + } else { | ||
3200 | + BEGIN_LP_RING(2); | ||
3201 | + if (IS_I965G(dev)) { | ||
3202 | + OUT_RING(MI_BATCH_BUFFER_START | | ||
3203 | + (2 << 6) | | ||
3204 | + MI_BATCH_NON_SECURE_I965); | ||
3205 | + OUT_RING(exec_start); | ||
3206 | + } else { | ||
3207 | + OUT_RING(MI_BATCH_BUFFER_START | | ||
3208 | + (2 << 6)); | ||
3209 | + OUT_RING(exec_start | MI_BATCH_NON_SECURE); | ||
3210 | + } | ||
3211 | + ADVANCE_LP_RING(); | ||
3212 | + } | ||
3213 | + } | ||
3214 | + | ||
3215 | + /* XXX breadcrumb */ | ||
3216 | + return 0; | ||
3217 | +} | ||
3218 | + | ||
3219 | +/* Throttle our rendering by waiting until the ring has completed our requests | ||
3220 | + * emitted over 20 msec ago. | ||
3221 | + * | ||
3222 | + * This should get us reasonable parallelism between CPU and GPU but also | ||
3223 | + * relatively low latency when blocking on a particular request to finish. | ||
3224 | + */ | ||
3225 | +static int | ||
3226 | +i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file_priv) | ||
3227 | +{ | ||
3228 | + struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; | ||
3229 | + int ret = 0; | ||
3230 | + uint32_t seqno; | ||
3231 | + | ||
3232 | + mutex_lock(&dev->struct_mutex); | ||
3233 | + seqno = i915_file_priv->mm.last_gem_throttle_seqno; | ||
3234 | + i915_file_priv->mm.last_gem_throttle_seqno = | ||
3235 | + i915_file_priv->mm.last_gem_seqno; | ||
3236 | + if (seqno) | ||
3237 | + ret = i915_wait_request(dev, seqno); | ||
3238 | + mutex_unlock(&dev->struct_mutex); | ||
3239 | + return ret; | ||
3240 | +} | ||
3241 | + | ||
3242 | +int | ||
3243 | +i915_gem_execbuffer(struct drm_device *dev, void *data, | ||
3244 | + struct drm_file *file_priv) | ||
3245 | +{ | ||
3246 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
3247 | + struct drm_i915_file_private *i915_file_priv = file_priv->driver_priv; | ||
3248 | + struct drm_i915_gem_execbuffer *args = data; | ||
3249 | + struct drm_i915_gem_exec_object *exec_list = NULL; | ||
3250 | + struct drm_gem_object **object_list = NULL; | ||
3251 | + struct drm_gem_object *batch_obj; | ||
3252 | + int ret, i, pinned = 0; | ||
3253 | + uint64_t exec_offset; | ||
3254 | + uint32_t seqno, flush_domains; | ||
3255 | + | ||
3256 | +#if WATCH_EXEC | ||
3257 | + DRM_INFO("buffers_ptr %d buffer_count %d len %08x\n", | ||
3258 | + (int) args->buffers_ptr, args->buffer_count, args->batch_len); | ||
3259 | +#endif | ||
3260 | + | ||
3261 | + /* Copy in the exec list from userland */ | ||
3262 | + exec_list = drm_calloc(sizeof(*exec_list), args->buffer_count, | ||
3263 | + DRM_MEM_DRIVER); | ||
3264 | + object_list = drm_calloc(sizeof(*object_list), args->buffer_count, | ||
3265 | + DRM_MEM_DRIVER); | ||
3266 | + if (exec_list == NULL || object_list == NULL) { | ||
3267 | + DRM_ERROR("Failed to allocate exec or object list " | ||
3268 | + "for %d buffers\n", | ||
3269 | + args->buffer_count); | ||
3270 | + ret = -ENOMEM; | ||
3271 | + goto pre_mutex_err; | ||
3272 | + } | ||
3273 | + ret = copy_from_user(exec_list, | ||
3274 | + (struct drm_i915_relocation_entry __user *) | ||
3275 | + (uintptr_t) args->buffers_ptr, | ||
3276 | + sizeof(*exec_list) * args->buffer_count); | ||
3277 | + if (ret != 0) { | ||
3278 | + DRM_ERROR("copy %d exec entries failed %d\n", | ||
3279 | + args->buffer_count, ret); | ||
3280 | + goto pre_mutex_err; | ||
3281 | + } | ||
3282 | + | ||
3283 | + mutex_lock(&dev->struct_mutex); | ||
3284 | + | ||
3285 | + i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3286 | + | ||
3287 | + if (dev_priv->mm.wedged) { | ||
3288 | + DRM_ERROR("Execbuf while wedged\n"); | ||
3289 | + mutex_unlock(&dev->struct_mutex); | ||
3290 | + return -EIO; | ||
3291 | + } | ||
3292 | + | ||
3293 | + if (dev_priv->mm.suspended) { | ||
3294 | + DRM_ERROR("Execbuf while VT-switched.\n"); | ||
3295 | + mutex_unlock(&dev->struct_mutex); | ||
3296 | + return -EBUSY; | ||
3297 | + } | ||
3298 | + | ||
3299 | + /* Zero the gloabl flush/invalidate flags. These | ||
3300 | + * will be modified as each object is bound to the | ||
3301 | + * gtt | ||
3302 | + */ | ||
3303 | + dev->invalidate_domains = 0; | ||
3304 | + dev->flush_domains = 0; | ||
3305 | + | ||
3306 | + /* Look up object handles and perform the relocations */ | ||
3307 | + for (i = 0; i < args->buffer_count; i++) { | ||
3308 | + object_list[i] = drm_gem_object_lookup(dev, file_priv, | ||
3309 | + exec_list[i].handle); | ||
3310 | + if (object_list[i] == NULL) { | ||
3311 | + DRM_ERROR("Invalid object handle %d at index %d\n", | ||
3312 | + exec_list[i].handle, i); | ||
3313 | + ret = -EBADF; | ||
3314 | + goto err; | ||
3315 | + } | ||
3316 | + | ||
3317 | + object_list[i]->pending_read_domains = 0; | ||
3318 | + object_list[i]->pending_write_domain = 0; | ||
3319 | + ret = i915_gem_object_pin_and_relocate(object_list[i], | ||
3320 | + file_priv, | ||
3321 | + &exec_list[i]); | ||
3322 | + if (ret) { | ||
3323 | + DRM_ERROR("object bind and relocate failed %d\n", ret); | ||
3324 | + goto err; | ||
3325 | + } | ||
3326 | + pinned = i + 1; | ||
3327 | + } | ||
3328 | + | ||
3329 | + /* Set the pending read domains for the batch buffer to COMMAND */ | ||
3330 | + batch_obj = object_list[args->buffer_count-1]; | ||
3331 | + batch_obj->pending_read_domains = I915_GEM_DOMAIN_COMMAND; | ||
3332 | + batch_obj->pending_write_domain = 0; | ||
3333 | + | ||
3334 | + i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3335 | + | ||
3336 | + for (i = 0; i < args->buffer_count; i++) { | ||
3337 | + struct drm_gem_object *obj = object_list[i]; | ||
3338 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
3339 | + | ||
3340 | + if (obj_priv->gtt_space == NULL) { | ||
3341 | + /* We evicted the buffer in the process of validating | ||
3342 | + * our set of buffers in. We could try to recover by | ||
3343 | + * kicking them everything out and trying again from | ||
3344 | + * the start. | ||
3345 | + */ | ||
3346 | + ret = -ENOMEM; | ||
3347 | + goto err; | ||
3348 | + } | ||
3349 | + | ||
3350 | + /* make sure all previous memory operations have passed */ | ||
3351 | + ret = i915_gem_object_set_domain(obj, | ||
3352 | + obj->pending_read_domains, | ||
3353 | + obj->pending_write_domain); | ||
3354 | + if (ret) | ||
3355 | + goto err; | ||
3356 | + } | ||
3357 | + | ||
3358 | + i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3359 | + | ||
3360 | + /* Flush/invalidate caches and chipset buffer */ | ||
3361 | + flush_domains = i915_gem_dev_set_domain(dev); | ||
3362 | + | ||
3363 | + i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3364 | + | ||
3365 | +#if WATCH_COHERENCY | ||
3366 | + for (i = 0; i < args->buffer_count; i++) { | ||
3367 | + i915_gem_object_check_coherency(object_list[i], | ||
3368 | + exec_list[i].handle); | ||
3369 | + } | ||
3370 | +#endif | ||
3371 | + | ||
3372 | + exec_offset = exec_list[args->buffer_count - 1].offset; | ||
3373 | + | ||
3374 | +#if WATCH_EXEC | ||
3375 | + i915_gem_dump_object(object_list[args->buffer_count - 1], | ||
3376 | + args->batch_len, | ||
3377 | + __func__, | ||
3378 | + ~0); | ||
3379 | +#endif | ||
3380 | + | ||
3381 | + (void)i915_add_request(dev, flush_domains); | ||
3382 | + | ||
3383 | + /* Exec the batchbuffer */ | ||
3384 | + ret = i915_dispatch_gem_execbuffer(dev, args, exec_offset); | ||
3385 | + if (ret) { | ||
3386 | + DRM_ERROR("dispatch failed %d\n", ret); | ||
3387 | + goto err; | ||
3388 | + } | ||
3389 | + | ||
3390 | + /* | ||
3391 | + * Ensure that the commands in the batch buffer are | ||
3392 | + * finished before the interrupt fires | ||
3393 | + */ | ||
3394 | + flush_domains = i915_retire_commands(dev); | ||
3395 | + | ||
3396 | + i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3397 | + | ||
3398 | + /* | ||
3399 | + * Get a seqno representing the execution of the current buffer, | ||
3400 | + * which we can wait on. We would like to mitigate these interrupts, | ||
3401 | + * likely by only creating seqnos occasionally (so that we have | ||
3402 | + * *some* interrupts representing completion of buffers that we can | ||
3403 | + * wait on when trying to clear up gtt space). | ||
3404 | + */ | ||
3405 | + seqno = i915_add_request(dev, flush_domains); | ||
3406 | + BUG_ON(seqno == 0); | ||
3407 | + i915_file_priv->mm.last_gem_seqno = seqno; | ||
3408 | + for (i = 0; i < args->buffer_count; i++) { | ||
3409 | + struct drm_gem_object *obj = object_list[i]; | ||
3410 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
3411 | + | ||
3412 | + i915_gem_object_move_to_active(obj); | ||
3413 | + obj_priv->last_rendering_seqno = seqno; | ||
3414 | +#if WATCH_LRU | ||
3415 | + DRM_INFO("%s: move to exec list %p\n", __func__, obj); | ||
3416 | +#endif | ||
3417 | + } | ||
3418 | +#if WATCH_LRU | ||
3419 | + i915_dump_lru(dev, __func__); | ||
3420 | +#endif | ||
3421 | + | ||
3422 | + i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3423 | + | ||
3424 | + /* Copy the new buffer offsets back to the user's exec list. */ | ||
3425 | + ret = copy_to_user((struct drm_i915_relocation_entry __user *) | ||
3426 | + (uintptr_t) args->buffers_ptr, | ||
3427 | + exec_list, | ||
3428 | + sizeof(*exec_list) * args->buffer_count); | ||
3429 | + if (ret) | ||
3430 | + DRM_ERROR("failed to copy %d exec entries " | ||
3431 | + "back to user (%d)\n", | ||
3432 | + args->buffer_count, ret); | ||
3433 | +err: | ||
3434 | + if (object_list != NULL) { | ||
3435 | + for (i = 0; i < pinned; i++) | ||
3436 | + i915_gem_object_unpin(object_list[i]); | ||
3437 | + | ||
3438 | + for (i = 0; i < args->buffer_count; i++) | ||
3439 | + drm_gem_object_unreference(object_list[i]); | ||
3440 | + } | ||
3441 | + mutex_unlock(&dev->struct_mutex); | ||
3442 | + | ||
3443 | +pre_mutex_err: | ||
3444 | + drm_free(object_list, sizeof(*object_list) * args->buffer_count, | ||
3445 | + DRM_MEM_DRIVER); | ||
3446 | + drm_free(exec_list, sizeof(*exec_list) * args->buffer_count, | ||
3447 | + DRM_MEM_DRIVER); | ||
3448 | + | ||
3449 | + return ret; | ||
3450 | +} | ||
3451 | + | ||
3452 | +int | ||
3453 | +i915_gem_object_pin(struct drm_gem_object *obj, uint32_t alignment) | ||
3454 | +{ | ||
3455 | + struct drm_device *dev = obj->dev; | ||
3456 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
3457 | + int ret; | ||
3458 | + | ||
3459 | + i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3460 | + if (obj_priv->gtt_space == NULL) { | ||
3461 | + ret = i915_gem_object_bind_to_gtt(obj, alignment); | ||
3462 | + if (ret != 0) { | ||
3463 | + DRM_ERROR("Failure to bind: %d", ret); | ||
3464 | + return ret; | ||
3465 | + } | ||
3466 | + } | ||
3467 | + obj_priv->pin_count++; | ||
3468 | + | ||
3469 | + /* If the object is not active and not pending a flush, | ||
3470 | + * remove it from the inactive list | ||
3471 | + */ | ||
3472 | + if (obj_priv->pin_count == 1) { | ||
3473 | + atomic_inc(&dev->pin_count); | ||
3474 | + atomic_add(obj->size, &dev->pin_memory); | ||
3475 | + if (!obj_priv->active && | ||
3476 | + (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | | ||
3477 | + I915_GEM_DOMAIN_GTT)) == 0 && | ||
3478 | + !list_empty(&obj_priv->list)) | ||
3479 | + list_del_init(&obj_priv->list); | ||
3480 | + } | ||
3481 | + i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3482 | + | ||
3483 | + return 0; | ||
3484 | +} | ||
3485 | + | ||
3486 | +void | ||
3487 | +i915_gem_object_unpin(struct drm_gem_object *obj) | ||
3488 | +{ | ||
3489 | + struct drm_device *dev = obj->dev; | ||
3490 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
3491 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
3492 | + | ||
3493 | + i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3494 | + obj_priv->pin_count--; | ||
3495 | + BUG_ON(obj_priv->pin_count < 0); | ||
3496 | + BUG_ON(obj_priv->gtt_space == NULL); | ||
3497 | + | ||
3498 | + /* If the object is no longer pinned, and is | ||
3499 | + * neither active nor being flushed, then stick it on | ||
3500 | + * the inactive list | ||
3501 | + */ | ||
3502 | + if (obj_priv->pin_count == 0) { | ||
3503 | + if (!obj_priv->active && | ||
3504 | + (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | | ||
3505 | + I915_GEM_DOMAIN_GTT)) == 0) | ||
3506 | + list_move_tail(&obj_priv->list, | ||
3507 | + &dev_priv->mm.inactive_list); | ||
3508 | + atomic_dec(&dev->pin_count); | ||
3509 | + atomic_sub(obj->size, &dev->pin_memory); | ||
3510 | + } | ||
3511 | + i915_verify_inactive(dev, __FILE__, __LINE__); | ||
3512 | +} | ||
3513 | + | ||
3514 | +int | ||
3515 | +i915_gem_pin_ioctl(struct drm_device *dev, void *data, | ||
3516 | + struct drm_file *file_priv) | ||
3517 | +{ | ||
3518 | + struct drm_i915_gem_pin *args = data; | ||
3519 | + struct drm_gem_object *obj; | ||
3520 | + struct drm_i915_gem_object *obj_priv; | ||
3521 | + int ret; | ||
3522 | + | ||
3523 | + mutex_lock(&dev->struct_mutex); | ||
3524 | + | ||
3525 | + obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
3526 | + if (obj == NULL) { | ||
3527 | + DRM_ERROR("Bad handle in i915_gem_pin_ioctl(): %d\n", | ||
3528 | + args->handle); | ||
3529 | + mutex_unlock(&dev->struct_mutex); | ||
3530 | + return -EBADF; | ||
3531 | + } | ||
3532 | + obj_priv = obj->driver_private; | ||
3533 | + | ||
3534 | + ret = i915_gem_object_pin(obj, args->alignment); | ||
3535 | + if (ret != 0) { | ||
3536 | + drm_gem_object_unreference(obj); | ||
3537 | + mutex_unlock(&dev->struct_mutex); | ||
3538 | + return ret; | ||
3539 | + } | ||
3540 | + | ||
3541 | + /* XXX - flush the CPU caches for pinned objects | ||
3542 | + * as the X server doesn't manage domains yet | ||
3543 | + */ | ||
3544 | + if (obj->write_domain & I915_GEM_DOMAIN_CPU) { | ||
3545 | + i915_gem_clflush_object(obj); | ||
3546 | + drm_agp_chipset_flush(dev); | ||
3547 | + obj->write_domain = 0; | ||
3548 | + } | ||
3549 | + args->offset = obj_priv->gtt_offset; | ||
3550 | + drm_gem_object_unreference(obj); | ||
3551 | + mutex_unlock(&dev->struct_mutex); | ||
3552 | + | ||
3553 | + return 0; | ||
3554 | +} | ||
3555 | + | ||
3556 | +int | ||
3557 | +i915_gem_unpin_ioctl(struct drm_device *dev, void *data, | ||
3558 | + struct drm_file *file_priv) | ||
3559 | +{ | ||
3560 | + struct drm_i915_gem_pin *args = data; | ||
3561 | + struct drm_gem_object *obj; | ||
3562 | + | ||
3563 | + mutex_lock(&dev->struct_mutex); | ||
3564 | + | ||
3565 | + obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
3566 | + if (obj == NULL) { | ||
3567 | + DRM_ERROR("Bad handle in i915_gem_unpin_ioctl(): %d\n", | ||
3568 | + args->handle); | ||
3569 | + mutex_unlock(&dev->struct_mutex); | ||
3570 | + return -EBADF; | ||
3571 | + } | ||
3572 | + | ||
3573 | + i915_gem_object_unpin(obj); | ||
3574 | + | ||
3575 | + drm_gem_object_unreference(obj); | ||
3576 | + mutex_unlock(&dev->struct_mutex); | ||
3577 | + return 0; | ||
3578 | +} | ||
3579 | + | ||
3580 | +int | ||
3581 | +i915_gem_busy_ioctl(struct drm_device *dev, void *data, | ||
3582 | + struct drm_file *file_priv) | ||
3583 | +{ | ||
3584 | + struct drm_i915_gem_busy *args = data; | ||
3585 | + struct drm_gem_object *obj; | ||
3586 | + struct drm_i915_gem_object *obj_priv; | ||
3587 | + | ||
3588 | + mutex_lock(&dev->struct_mutex); | ||
3589 | + obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
3590 | + if (obj == NULL) { | ||
3591 | + DRM_ERROR("Bad handle in i915_gem_busy_ioctl(): %d\n", | ||
3592 | + args->handle); | ||
3593 | + mutex_unlock(&dev->struct_mutex); | ||
3594 | + return -EBADF; | ||
3595 | + } | ||
3596 | + | ||
3597 | + obj_priv = obj->driver_private; | ||
3598 | + args->busy = obj_priv->active; | ||
3599 | + | ||
3600 | + drm_gem_object_unreference(obj); | ||
3601 | + mutex_unlock(&dev->struct_mutex); | ||
3602 | + return 0; | ||
3603 | +} | ||
3604 | + | ||
3605 | +int | ||
3606 | +i915_gem_throttle_ioctl(struct drm_device *dev, void *data, | ||
3607 | + struct drm_file *file_priv) | ||
3608 | +{ | ||
3609 | + return i915_gem_ring_throttle(dev, file_priv); | ||
3610 | +} | ||
3611 | + | ||
3612 | +int i915_gem_init_object(struct drm_gem_object *obj) | ||
3613 | +{ | ||
3614 | + struct drm_i915_gem_object *obj_priv; | ||
3615 | + | ||
3616 | + obj_priv = drm_calloc(1, sizeof(*obj_priv), DRM_MEM_DRIVER); | ||
3617 | + if (obj_priv == NULL) | ||
3618 | + return -ENOMEM; | ||
3619 | + | ||
3620 | + /* | ||
3621 | + * We've just allocated pages from the kernel, | ||
3622 | + * so they've just been written by the CPU with | ||
3623 | + * zeros. They'll need to be clflushed before we | ||
3624 | + * use them with the GPU. | ||
3625 | + */ | ||
3626 | + obj->write_domain = I915_GEM_DOMAIN_CPU; | ||
3627 | + obj->read_domains = I915_GEM_DOMAIN_CPU; | ||
3628 | + | ||
3629 | + obj->driver_private = obj_priv; | ||
3630 | + obj_priv->obj = obj; | ||
3631 | + INIT_LIST_HEAD(&obj_priv->list); | ||
3632 | + return 0; | ||
3633 | +} | ||
3634 | + | ||
3635 | +void i915_gem_free_object(struct drm_gem_object *obj) | ||
3636 | +{ | ||
3637 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
3638 | + | ||
3639 | + while (obj_priv->pin_count > 0) | ||
3640 | + i915_gem_object_unpin(obj); | ||
3641 | + | ||
3642 | + i915_gem_object_unbind(obj); | ||
3643 | + | ||
3644 | + drm_free(obj_priv->page_cpu_valid, 1, DRM_MEM_DRIVER); | ||
3645 | + drm_free(obj->driver_private, 1, DRM_MEM_DRIVER); | ||
3646 | +} | ||
3647 | + | ||
3648 | +static int | ||
3649 | +i915_gem_set_domain(struct drm_gem_object *obj, | ||
3650 | + struct drm_file *file_priv, | ||
3651 | + uint32_t read_domains, | ||
3652 | + uint32_t write_domain) | ||
3653 | +{ | ||
3654 | + struct drm_device *dev = obj->dev; | ||
3655 | + int ret; | ||
3656 | + uint32_t flush_domains; | ||
3657 | + | ||
3658 | + BUG_ON(!mutex_is_locked(&dev->struct_mutex)); | ||
3659 | + | ||
3660 | + ret = i915_gem_object_set_domain(obj, read_domains, write_domain); | ||
3661 | + if (ret) | ||
3662 | + return ret; | ||
3663 | + flush_domains = i915_gem_dev_set_domain(obj->dev); | ||
3664 | + | ||
3665 | + if (flush_domains & ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)) | ||
3666 | + (void) i915_add_request(dev, flush_domains); | ||
3667 | + | ||
3668 | + return 0; | ||
3669 | +} | ||
3670 | + | ||
3671 | +/** Unbinds all objects that are on the given buffer list. */ | ||
3672 | +static int | ||
3673 | +i915_gem_evict_from_list(struct drm_device *dev, struct list_head *head) | ||
3674 | +{ | ||
3675 | + struct drm_gem_object *obj; | ||
3676 | + struct drm_i915_gem_object *obj_priv; | ||
3677 | + int ret; | ||
3678 | + | ||
3679 | + while (!list_empty(head)) { | ||
3680 | + obj_priv = list_first_entry(head, | ||
3681 | + struct drm_i915_gem_object, | ||
3682 | + list); | ||
3683 | + obj = obj_priv->obj; | ||
3684 | + | ||
3685 | + if (obj_priv->pin_count != 0) { | ||
3686 | + DRM_ERROR("Pinned object in unbind list\n"); | ||
3687 | + mutex_unlock(&dev->struct_mutex); | ||
3688 | + return -EINVAL; | ||
3689 | + } | ||
3690 | + | ||
3691 | + ret = i915_gem_object_unbind(obj); | ||
3692 | + if (ret != 0) { | ||
3693 | + DRM_ERROR("Error unbinding object in LeaveVT: %d\n", | ||
3694 | + ret); | ||
3695 | + mutex_unlock(&dev->struct_mutex); | ||
3696 | + return ret; | ||
3697 | + } | ||
3698 | + } | ||
3699 | + | ||
3700 | + | ||
3701 | + return 0; | ||
3702 | +} | ||
3703 | + | ||
3704 | +static int | ||
3705 | +i915_gem_idle(struct drm_device *dev) | ||
3706 | +{ | ||
3707 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
3708 | + uint32_t seqno, cur_seqno, last_seqno; | ||
3709 | + int stuck, ret; | ||
3710 | + | ||
3711 | + if (dev_priv->mm.suspended) | ||
3712 | + return 0; | ||
3713 | + | ||
3714 | + /* Hack! Don't let anybody do execbuf while we don't control the chip. | ||
3715 | + * We need to replace this with a semaphore, or something. | ||
3716 | + */ | ||
3717 | + dev_priv->mm.suspended = 1; | ||
3718 | + | ||
3719 | + i915_kernel_lost_context(dev); | ||
3720 | + | ||
3721 | + /* Flush the GPU along with all non-CPU write domains | ||
3722 | + */ | ||
3723 | + i915_gem_flush(dev, ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT), | ||
3724 | + ~(I915_GEM_DOMAIN_CPU|I915_GEM_DOMAIN_GTT)); | ||
3725 | + seqno = i915_add_request(dev, ~(I915_GEM_DOMAIN_CPU | | ||
3726 | + I915_GEM_DOMAIN_GTT)); | ||
3727 | + | ||
3728 | + if (seqno == 0) { | ||
3729 | + mutex_unlock(&dev->struct_mutex); | ||
3730 | + return -ENOMEM; | ||
3731 | + } | ||
3732 | + | ||
3733 | + dev_priv->mm.waiting_gem_seqno = seqno; | ||
3734 | + last_seqno = 0; | ||
3735 | + stuck = 0; | ||
3736 | + for (;;) { | ||
3737 | + cur_seqno = i915_get_gem_seqno(dev); | ||
3738 | + if (i915_seqno_passed(cur_seqno, seqno)) | ||
3739 | + break; | ||
3740 | + if (last_seqno == cur_seqno) { | ||
3741 | + if (stuck++ > 100) { | ||
3742 | + DRM_ERROR("hardware wedged\n"); | ||
3743 | + dev_priv->mm.wedged = 1; | ||
3744 | + DRM_WAKEUP(&dev_priv->irq_queue); | ||
3745 | + break; | ||
3746 | + } | ||
3747 | + } | ||
3748 | + msleep(10); | ||
3749 | + last_seqno = cur_seqno; | ||
3750 | + } | ||
3751 | + dev_priv->mm.waiting_gem_seqno = 0; | ||
3752 | + | ||
3753 | + i915_gem_retire_requests(dev); | ||
3754 | + | ||
3755 | + /* Active and flushing should now be empty as we've | ||
3756 | + * waited for a sequence higher than any pending execbuffer | ||
3757 | + */ | ||
3758 | + BUG_ON(!list_empty(&dev_priv->mm.active_list)); | ||
3759 | + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
3760 | + | ||
3761 | + /* Request should now be empty as we've also waited | ||
3762 | + * for the last request in the list | ||
3763 | + */ | ||
3764 | + BUG_ON(!list_empty(&dev_priv->mm.request_list)); | ||
3765 | + | ||
3766 | + /* Move all buffers out of the GTT. */ | ||
3767 | + ret = i915_gem_evict_from_list(dev, &dev_priv->mm.inactive_list); | ||
3768 | + if (ret) | ||
3769 | + return ret; | ||
3770 | + | ||
3771 | + BUG_ON(!list_empty(&dev_priv->mm.active_list)); | ||
3772 | + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
3773 | + BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); | ||
3774 | + BUG_ON(!list_empty(&dev_priv->mm.request_list)); | ||
3775 | + return 0; | ||
3776 | +} | ||
3777 | + | ||
3778 | +static int | ||
3779 | +i915_gem_init_hws(struct drm_device *dev) | ||
3780 | +{ | ||
3781 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
3782 | + struct drm_gem_object *obj; | ||
3783 | + struct drm_i915_gem_object *obj_priv; | ||
3784 | + int ret; | ||
3785 | + | ||
3786 | + /* If we need a physical address for the status page, it's already | ||
3787 | + * initialized at driver load time. | ||
3788 | + */ | ||
3789 | + if (!I915_NEED_GFX_HWS(dev)) | ||
3790 | + return 0; | ||
3791 | + | ||
3792 | + obj = drm_gem_object_alloc(dev, 4096); | ||
3793 | + if (obj == NULL) { | ||
3794 | + DRM_ERROR("Failed to allocate status page\n"); | ||
3795 | + return -ENOMEM; | ||
3796 | + } | ||
3797 | + obj_priv = obj->driver_private; | ||
3798 | + | ||
3799 | + ret = i915_gem_object_pin(obj, 4096); | ||
3800 | + if (ret != 0) { | ||
3801 | + drm_gem_object_unreference(obj); | ||
3802 | + return ret; | ||
3803 | + } | ||
3804 | + | ||
3805 | + dev_priv->status_gfx_addr = obj_priv->gtt_offset; | ||
3806 | + dev_priv->hws_map.offset = dev->agp->base + obj_priv->gtt_offset; | ||
3807 | + dev_priv->hws_map.size = 4096; | ||
3808 | + dev_priv->hws_map.type = 0; | ||
3809 | + dev_priv->hws_map.flags = 0; | ||
3810 | + dev_priv->hws_map.mtrr = 0; | ||
3811 | + | ||
3812 | + drm_core_ioremap(&dev_priv->hws_map, dev); | ||
3813 | + if (dev_priv->hws_map.handle == NULL) { | ||
3814 | + DRM_ERROR("Failed to map status page.\n"); | ||
3815 | + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | ||
3816 | + drm_gem_object_unreference(obj); | ||
3817 | + return -EINVAL; | ||
3818 | + } | ||
3819 | + dev_priv->hws_obj = obj; | ||
3820 | + dev_priv->hw_status_page = dev_priv->hws_map.handle; | ||
3821 | + memset(dev_priv->hw_status_page, 0, PAGE_SIZE); | ||
3822 | + I915_WRITE(HWS_PGA, dev_priv->status_gfx_addr); | ||
3823 | + DRM_DEBUG("hws offset: 0x%08x\n", dev_priv->status_gfx_addr); | ||
3824 | + | ||
3825 | + return 0; | ||
3826 | +} | ||
3827 | + | ||
3828 | +static int | ||
3829 | +i915_gem_init_ringbuffer(struct drm_device *dev) | ||
3830 | +{ | ||
3831 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
3832 | + struct drm_gem_object *obj; | ||
3833 | + struct drm_i915_gem_object *obj_priv; | ||
3834 | + int ret; | ||
3835 | + | ||
3836 | + ret = i915_gem_init_hws(dev); | ||
3837 | + if (ret != 0) | ||
3838 | + return ret; | ||
3839 | + | ||
3840 | + obj = drm_gem_object_alloc(dev, 128 * 1024); | ||
3841 | + if (obj == NULL) { | ||
3842 | + DRM_ERROR("Failed to allocate ringbuffer\n"); | ||
3843 | + return -ENOMEM; | ||
3844 | + } | ||
3845 | + obj_priv = obj->driver_private; | ||
3846 | + | ||
3847 | + ret = i915_gem_object_pin(obj, 4096); | ||
3848 | + if (ret != 0) { | ||
3849 | + drm_gem_object_unreference(obj); | ||
3850 | + return ret; | ||
3851 | + } | ||
3852 | + | ||
3853 | + /* Set up the kernel mapping for the ring. */ | ||
3854 | + dev_priv->ring.Size = obj->size; | ||
3855 | + dev_priv->ring.tail_mask = obj->size - 1; | ||
3856 | + | ||
3857 | + dev_priv->ring.map.offset = dev->agp->base + obj_priv->gtt_offset; | ||
3858 | + dev_priv->ring.map.size = obj->size; | ||
3859 | + dev_priv->ring.map.type = 0; | ||
3860 | + dev_priv->ring.map.flags = 0; | ||
3861 | + dev_priv->ring.map.mtrr = 0; | ||
3862 | + | ||
3863 | + drm_core_ioremap(&dev_priv->ring.map, dev); | ||
3864 | + if (dev_priv->ring.map.handle == NULL) { | ||
3865 | + DRM_ERROR("Failed to map ringbuffer.\n"); | ||
3866 | + memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); | ||
3867 | + drm_gem_object_unreference(obj); | ||
3868 | + return -EINVAL; | ||
3869 | + } | ||
3870 | + dev_priv->ring.ring_obj = obj; | ||
3871 | + dev_priv->ring.virtual_start = dev_priv->ring.map.handle; | ||
3872 | + | ||
3873 | + /* Stop the ring if it's running. */ | ||
3874 | + I915_WRITE(PRB0_CTL, 0); | ||
3875 | + I915_WRITE(PRB0_HEAD, 0); | ||
3876 | + I915_WRITE(PRB0_TAIL, 0); | ||
3877 | + I915_WRITE(PRB0_START, 0); | ||
3878 | + | ||
3879 | + /* Initialize the ring. */ | ||
3880 | + I915_WRITE(PRB0_START, obj_priv->gtt_offset); | ||
3881 | + I915_WRITE(PRB0_CTL, | ||
3882 | + ((obj->size - 4096) & RING_NR_PAGES) | | ||
3883 | + RING_NO_REPORT | | ||
3884 | + RING_VALID); | ||
3885 | + | ||
3886 | + /* Update our cache of the ring state */ | ||
3887 | + i915_kernel_lost_context(dev); | ||
3888 | + | ||
3889 | + return 0; | ||
3890 | +} | ||
3891 | + | ||
3892 | +static void | ||
3893 | +i915_gem_cleanup_ringbuffer(struct drm_device *dev) | ||
3894 | +{ | ||
3895 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
3896 | + | ||
3897 | + if (dev_priv->ring.ring_obj == NULL) | ||
3898 | + return; | ||
3899 | + | ||
3900 | + drm_core_ioremapfree(&dev_priv->ring.map, dev); | ||
3901 | + | ||
3902 | + i915_gem_object_unpin(dev_priv->ring.ring_obj); | ||
3903 | + drm_gem_object_unreference(dev_priv->ring.ring_obj); | ||
3904 | + dev_priv->ring.ring_obj = NULL; | ||
3905 | + memset(&dev_priv->ring, 0, sizeof(dev_priv->ring)); | ||
3906 | + | ||
3907 | + if (dev_priv->hws_obj != NULL) { | ||
3908 | + i915_gem_object_unpin(dev_priv->hws_obj); | ||
3909 | + drm_gem_object_unreference(dev_priv->hws_obj); | ||
3910 | + dev_priv->hws_obj = NULL; | ||
3911 | + memset(&dev_priv->hws_map, 0, sizeof(dev_priv->hws_map)); | ||
3912 | + | ||
3913 | + /* Write high address into HWS_PGA when disabling. */ | ||
3914 | + I915_WRITE(HWS_PGA, 0x1ffff000); | ||
3915 | + } | ||
3916 | +} | ||
3917 | + | ||
3918 | +int | ||
3919 | +i915_gem_entervt_ioctl(struct drm_device *dev, void *data, | ||
3920 | + struct drm_file *file_priv) | ||
3921 | +{ | ||
3922 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
3923 | + int ret; | ||
3924 | + | ||
3925 | + if (dev_priv->mm.wedged) { | ||
3926 | + DRM_ERROR("Reenabling wedged hardware, good luck\n"); | ||
3927 | + dev_priv->mm.wedged = 0; | ||
3928 | + } | ||
3929 | + | ||
3930 | + ret = i915_gem_init_ringbuffer(dev); | ||
3931 | + if (ret != 0) | ||
3932 | + return ret; | ||
3933 | + | ||
3934 | + mutex_lock(&dev->struct_mutex); | ||
3935 | + BUG_ON(!list_empty(&dev_priv->mm.active_list)); | ||
3936 | + BUG_ON(!list_empty(&dev_priv->mm.flushing_list)); | ||
3937 | + BUG_ON(!list_empty(&dev_priv->mm.inactive_list)); | ||
3938 | + BUG_ON(!list_empty(&dev_priv->mm.request_list)); | ||
3939 | + dev_priv->mm.suspended = 0; | ||
3940 | + mutex_unlock(&dev->struct_mutex); | ||
3941 | + return 0; | ||
3942 | +} | ||
3943 | + | ||
3944 | +int | ||
3945 | +i915_gem_leavevt_ioctl(struct drm_device *dev, void *data, | ||
3946 | + struct drm_file *file_priv) | ||
3947 | +{ | ||
3948 | + int ret; | ||
3949 | + | ||
3950 | + mutex_lock(&dev->struct_mutex); | ||
3951 | + ret = i915_gem_idle(dev); | ||
3952 | + if (ret == 0) | ||
3953 | + i915_gem_cleanup_ringbuffer(dev); | ||
3954 | + mutex_unlock(&dev->struct_mutex); | ||
3955 | + | ||
3956 | + return 0; | ||
3957 | +} | ||
3958 | + | ||
3959 | +void | ||
3960 | +i915_gem_lastclose(struct drm_device *dev) | ||
3961 | +{ | ||
3962 | + int ret; | ||
3963 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
3964 | + | ||
3965 | + mutex_lock(&dev->struct_mutex); | ||
3966 | + | ||
3967 | + if (dev_priv->ring.ring_obj != NULL) { | ||
3968 | + ret = i915_gem_idle(dev); | ||
3969 | + if (ret) | ||
3970 | + DRM_ERROR("failed to idle hardware: %d\n", ret); | ||
3971 | + | ||
3972 | + i915_gem_cleanup_ringbuffer(dev); | ||
3973 | + } | ||
3974 | + | ||
3975 | + mutex_unlock(&dev->struct_mutex); | ||
3976 | +} | ||
3977 | + | ||
3978 | +void | ||
3979 | +i915_gem_load(struct drm_device *dev) | ||
3980 | +{ | ||
3981 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
3982 | + | ||
3983 | + INIT_LIST_HEAD(&dev_priv->mm.active_list); | ||
3984 | + INIT_LIST_HEAD(&dev_priv->mm.flushing_list); | ||
3985 | + INIT_LIST_HEAD(&dev_priv->mm.inactive_list); | ||
3986 | + INIT_LIST_HEAD(&dev_priv->mm.request_list); | ||
3987 | + INIT_DELAYED_WORK(&dev_priv->mm.retire_work, | ||
3988 | + i915_gem_retire_work_handler); | ||
3989 | + dev_priv->mm.next_gem_seqno = 1; | ||
3990 | + | ||
3991 | + i915_gem_detect_bit_6_swizzle(dev); | ||
3992 | +} | ||
3993 | diff --git a/drivers/gpu/drm/i915/i915_gem_debug.c b/drivers/gpu/drm/i915/i915_gem_debug.c | ||
3994 | new file mode 100644 | ||
3995 | index 0000000..131c088 | ||
3996 | --- /dev/null | ||
3997 | +++ b/drivers/gpu/drm/i915/i915_gem_debug.c | ||
3998 | @@ -0,0 +1,201 @@ | ||
3999 | +/* | ||
4000 | + * Copyright © 2008 Intel Corporation | ||
4001 | + * | ||
4002 | + * Permission is hereby granted, free of charge, to any person obtaining a | ||
4003 | + * copy of this software and associated documentation files (the "Software"), | ||
4004 | + * to deal in the Software without restriction, including without limitation | ||
4005 | + * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
4006 | + * and/or sell copies of the Software, and to permit persons to whom the | ||
4007 | + * Software is furnished to do so, subject to the following conditions: | ||
4008 | + * | ||
4009 | + * The above copyright notice and this permission notice (including the next | ||
4010 | + * paragraph) shall be included in all copies or substantial portions of the | ||
4011 | + * Software. | ||
4012 | + * | ||
4013 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
4014 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
4015 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
4016 | + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
4017 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
4018 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
4019 | + * IN THE SOFTWARE. | ||
4020 | + * | ||
4021 | + * Authors: | ||
4022 | + * Keith Packard <keithp@keithp.com> | ||
4023 | + * | ||
4024 | + */ | ||
4025 | + | ||
4026 | +#include "drmP.h" | ||
4027 | +#include "drm.h" | ||
4028 | +#include "i915_drm.h" | ||
4029 | +#include "i915_drv.h" | ||
4030 | + | ||
4031 | +#if WATCH_INACTIVE | ||
4032 | +void | ||
4033 | +i915_verify_inactive(struct drm_device *dev, char *file, int line) | ||
4034 | +{ | ||
4035 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
4036 | + struct drm_gem_object *obj; | ||
4037 | + struct drm_i915_gem_object *obj_priv; | ||
4038 | + | ||
4039 | + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | ||
4040 | + obj = obj_priv->obj; | ||
4041 | + if (obj_priv->pin_count || obj_priv->active || | ||
4042 | + (obj->write_domain & ~(I915_GEM_DOMAIN_CPU | | ||
4043 | + I915_GEM_DOMAIN_GTT))) | ||
4044 | + DRM_ERROR("inactive %p (p %d a %d w %x) %s:%d\n", | ||
4045 | + obj, | ||
4046 | + obj_priv->pin_count, obj_priv->active, | ||
4047 | + obj->write_domain, file, line); | ||
4048 | + } | ||
4049 | +} | ||
4050 | +#endif /* WATCH_INACTIVE */ | ||
4051 | + | ||
4052 | + | ||
4053 | +#if WATCH_BUF | WATCH_EXEC | WATCH_PWRITE | ||
4054 | +static void | ||
4055 | +i915_gem_dump_page(struct page *page, uint32_t start, uint32_t end, | ||
4056 | + uint32_t bias, uint32_t mark) | ||
4057 | +{ | ||
4058 | + uint32_t *mem = kmap_atomic(page, KM_USER0); | ||
4059 | + int i; | ||
4060 | + for (i = start; i < end; i += 4) | ||
4061 | + DRM_INFO("%08x: %08x%s\n", | ||
4062 | + (int) (bias + i), mem[i / 4], | ||
4063 | + (bias + i == mark) ? " ********" : ""); | ||
4064 | + kunmap_atomic(mem, KM_USER0); | ||
4065 | + /* give syslog time to catch up */ | ||
4066 | + msleep(1); | ||
4067 | +} | ||
4068 | + | ||
4069 | +void | ||
4070 | +i915_gem_dump_object(struct drm_gem_object *obj, int len, | ||
4071 | + const char *where, uint32_t mark) | ||
4072 | +{ | ||
4073 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
4074 | + int page; | ||
4075 | + | ||
4076 | + DRM_INFO("%s: object at offset %08x\n", where, obj_priv->gtt_offset); | ||
4077 | + for (page = 0; page < (len + PAGE_SIZE-1) / PAGE_SIZE; page++) { | ||
4078 | + int page_len, chunk, chunk_len; | ||
4079 | + | ||
4080 | + page_len = len - page * PAGE_SIZE; | ||
4081 | + if (page_len > PAGE_SIZE) | ||
4082 | + page_len = PAGE_SIZE; | ||
4083 | + | ||
4084 | + for (chunk = 0; chunk < page_len; chunk += 128) { | ||
4085 | + chunk_len = page_len - chunk; | ||
4086 | + if (chunk_len > 128) | ||
4087 | + chunk_len = 128; | ||
4088 | + i915_gem_dump_page(obj_priv->page_list[page], | ||
4089 | + chunk, chunk + chunk_len, | ||
4090 | + obj_priv->gtt_offset + | ||
4091 | + page * PAGE_SIZE, | ||
4092 | + mark); | ||
4093 | + } | ||
4094 | + } | ||
4095 | +} | ||
4096 | +#endif | ||
4097 | + | ||
4098 | +#if WATCH_LRU | ||
4099 | +void | ||
4100 | +i915_dump_lru(struct drm_device *dev, const char *where) | ||
4101 | +{ | ||
4102 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
4103 | + struct drm_i915_gem_object *obj_priv; | ||
4104 | + | ||
4105 | + DRM_INFO("active list %s {\n", where); | ||
4106 | + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, | ||
4107 | + list) | ||
4108 | + { | ||
4109 | + DRM_INFO(" %p: %08x\n", obj_priv, | ||
4110 | + obj_priv->last_rendering_seqno); | ||
4111 | + } | ||
4112 | + DRM_INFO("}\n"); | ||
4113 | + DRM_INFO("flushing list %s {\n", where); | ||
4114 | + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, | ||
4115 | + list) | ||
4116 | + { | ||
4117 | + DRM_INFO(" %p: %08x\n", obj_priv, | ||
4118 | + obj_priv->last_rendering_seqno); | ||
4119 | + } | ||
4120 | + DRM_INFO("}\n"); | ||
4121 | + DRM_INFO("inactive %s {\n", where); | ||
4122 | + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, list) { | ||
4123 | + DRM_INFO(" %p: %08x\n", obj_priv, | ||
4124 | + obj_priv->last_rendering_seqno); | ||
4125 | + } | ||
4126 | + DRM_INFO("}\n"); | ||
4127 | +} | ||
4128 | +#endif | ||
4129 | + | ||
4130 | + | ||
4131 | +#if WATCH_COHERENCY | ||
4132 | +void | ||
4133 | +i915_gem_object_check_coherency(struct drm_gem_object *obj, int handle) | ||
4134 | +{ | ||
4135 | + struct drm_device *dev = obj->dev; | ||
4136 | + struct drm_i915_gem_object *obj_priv = obj->driver_private; | ||
4137 | + int page; | ||
4138 | + uint32_t *gtt_mapping; | ||
4139 | + uint32_t *backing_map = NULL; | ||
4140 | + int bad_count = 0; | ||
4141 | + | ||
4142 | + DRM_INFO("%s: checking coherency of object %p@0x%08x (%d, %dkb):\n", | ||
4143 | + __func__, obj, obj_priv->gtt_offset, handle, | ||
4144 | + obj->size / 1024); | ||
4145 | + | ||
4146 | + gtt_mapping = ioremap(dev->agp->base + obj_priv->gtt_offset, | ||
4147 | + obj->size); | ||
4148 | + if (gtt_mapping == NULL) { | ||
4149 | + DRM_ERROR("failed to map GTT space\n"); | ||
4150 | + return; | ||
4151 | + } | ||
4152 | + | ||
4153 | + for (page = 0; page < obj->size / PAGE_SIZE; page++) { | ||
4154 | + int i; | ||
4155 | + | ||
4156 | + backing_map = kmap_atomic(obj_priv->page_list[page], KM_USER0); | ||
4157 | + | ||
4158 | + if (backing_map == NULL) { | ||
4159 | + DRM_ERROR("failed to map backing page\n"); | ||
4160 | + goto out; | ||
4161 | + } | ||
4162 | + | ||
4163 | + for (i = 0; i < PAGE_SIZE / 4; i++) { | ||
4164 | + uint32_t cpuval = backing_map[i]; | ||
4165 | + uint32_t gttval = readl(gtt_mapping + | ||
4166 | + page * 1024 + i); | ||
4167 | + | ||
4168 | + if (cpuval != gttval) { | ||
4169 | + DRM_INFO("incoherent CPU vs GPU at 0x%08x: " | ||
4170 | + "0x%08x vs 0x%08x\n", | ||
4171 | + (int)(obj_priv->gtt_offset + | ||
4172 | + page * PAGE_SIZE + i * 4), | ||
4173 | + cpuval, gttval); | ||
4174 | + if (bad_count++ >= 8) { | ||
4175 | + DRM_INFO("...\n"); | ||
4176 | + goto out; | ||
4177 | + } | ||
4178 | + } | ||
4179 | + } | ||
4180 | + kunmap_atomic(backing_map, KM_USER0); | ||
4181 | + backing_map = NULL; | ||
4182 | + } | ||
4183 | + | ||
4184 | + out: | ||
4185 | + if (backing_map != NULL) | ||
4186 | + kunmap_atomic(backing_map, KM_USER0); | ||
4187 | + iounmap(gtt_mapping); | ||
4188 | + | ||
4189 | + /* give syslog time to catch up */ | ||
4190 | + msleep(1); | ||
4191 | + | ||
4192 | + /* Directly flush the object, since we just loaded values with the CPU | ||
4193 | + * from the backing pages and we don't want to disturb the cache | ||
4194 | + * management that we're trying to observe. | ||
4195 | + */ | ||
4196 | + | ||
4197 | + i915_gem_clflush_object(obj); | ||
4198 | +} | ||
4199 | +#endif | ||
4200 | diff --git a/drivers/gpu/drm/i915/i915_gem_proc.c b/drivers/gpu/drm/i915/i915_gem_proc.c | ||
4201 | new file mode 100644 | ||
4202 | index 0000000..15d4160 | ||
4203 | --- /dev/null | ||
4204 | +++ b/drivers/gpu/drm/i915/i915_gem_proc.c | ||
4205 | @@ -0,0 +1,292 @@ | ||
4206 | +/* | ||
4207 | + * Copyright © 2008 Intel Corporation | ||
4208 | + * | ||
4209 | + * Permission is hereby granted, free of charge, to any person obtaining a | ||
4210 | + * copy of this software and associated documentation files (the "Software"), | ||
4211 | + * to deal in the Software without restriction, including without limitation | ||
4212 | + * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
4213 | + * and/or sell copies of the Software, and to permit persons to whom the | ||
4214 | + * Software is furnished to do so, subject to the following conditions: | ||
4215 | + * | ||
4216 | + * The above copyright notice and this permission notice (including the next | ||
4217 | + * paragraph) shall be included in all copies or substantial portions of the | ||
4218 | + * Software. | ||
4219 | + * | ||
4220 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
4221 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
4222 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
4223 | + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
4224 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
4225 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
4226 | + * IN THE SOFTWARE. | ||
4227 | + * | ||
4228 | + * Authors: | ||
4229 | + * Eric Anholt <eric@anholt.net> | ||
4230 | + * Keith Packard <keithp@keithp.com> | ||
4231 | + * | ||
4232 | + */ | ||
4233 | + | ||
4234 | +#include "drmP.h" | ||
4235 | +#include "drm.h" | ||
4236 | +#include "i915_drm.h" | ||
4237 | +#include "i915_drv.h" | ||
4238 | + | ||
4239 | +static int i915_gem_active_info(char *buf, char **start, off_t offset, | ||
4240 | + int request, int *eof, void *data) | ||
4241 | +{ | ||
4242 | + struct drm_minor *minor = (struct drm_minor *) data; | ||
4243 | + struct drm_device *dev = minor->dev; | ||
4244 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
4245 | + struct drm_i915_gem_object *obj_priv; | ||
4246 | + int len = 0; | ||
4247 | + | ||
4248 | + if (offset > DRM_PROC_LIMIT) { | ||
4249 | + *eof = 1; | ||
4250 | + return 0; | ||
4251 | + } | ||
4252 | + | ||
4253 | + *start = &buf[offset]; | ||
4254 | + *eof = 0; | ||
4255 | + DRM_PROC_PRINT("Active:\n"); | ||
4256 | + list_for_each_entry(obj_priv, &dev_priv->mm.active_list, | ||
4257 | + list) | ||
4258 | + { | ||
4259 | + struct drm_gem_object *obj = obj_priv->obj; | ||
4260 | + if (obj->name) { | ||
4261 | + DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", | ||
4262 | + obj, obj->name, | ||
4263 | + obj->read_domains, obj->write_domain, | ||
4264 | + obj_priv->last_rendering_seqno); | ||
4265 | + } else { | ||
4266 | + DRM_PROC_PRINT(" %p: %08x %08x %d\n", | ||
4267 | + obj, | ||
4268 | + obj->read_domains, obj->write_domain, | ||
4269 | + obj_priv->last_rendering_seqno); | ||
4270 | + } | ||
4271 | + } | ||
4272 | + if (len > request + offset) | ||
4273 | + return request; | ||
4274 | + *eof = 1; | ||
4275 | + return len - offset; | ||
4276 | +} | ||
4277 | + | ||
4278 | +static int i915_gem_flushing_info(char *buf, char **start, off_t offset, | ||
4279 | + int request, int *eof, void *data) | ||
4280 | +{ | ||
4281 | + struct drm_minor *minor = (struct drm_minor *) data; | ||
4282 | + struct drm_device *dev = minor->dev; | ||
4283 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
4284 | + struct drm_i915_gem_object *obj_priv; | ||
4285 | + int len = 0; | ||
4286 | + | ||
4287 | + if (offset > DRM_PROC_LIMIT) { | ||
4288 | + *eof = 1; | ||
4289 | + return 0; | ||
4290 | + } | ||
4291 | + | ||
4292 | + *start = &buf[offset]; | ||
4293 | + *eof = 0; | ||
4294 | + DRM_PROC_PRINT("Flushing:\n"); | ||
4295 | + list_for_each_entry(obj_priv, &dev_priv->mm.flushing_list, | ||
4296 | + list) | ||
4297 | + { | ||
4298 | + struct drm_gem_object *obj = obj_priv->obj; | ||
4299 | + if (obj->name) { | ||
4300 | + DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", | ||
4301 | + obj, obj->name, | ||
4302 | + obj->read_domains, obj->write_domain, | ||
4303 | + obj_priv->last_rendering_seqno); | ||
4304 | + } else { | ||
4305 | + DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, | ||
4306 | + obj->read_domains, obj->write_domain, | ||
4307 | + obj_priv->last_rendering_seqno); | ||
4308 | + } | ||
4309 | + } | ||
4310 | + if (len > request + offset) | ||
4311 | + return request; | ||
4312 | + *eof = 1; | ||
4313 | + return len - offset; | ||
4314 | +} | ||
4315 | + | ||
4316 | +static int i915_gem_inactive_info(char *buf, char **start, off_t offset, | ||
4317 | + int request, int *eof, void *data) | ||
4318 | +{ | ||
4319 | + struct drm_minor *minor = (struct drm_minor *) data; | ||
4320 | + struct drm_device *dev = minor->dev; | ||
4321 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
4322 | + struct drm_i915_gem_object *obj_priv; | ||
4323 | + int len = 0; | ||
4324 | + | ||
4325 | + if (offset > DRM_PROC_LIMIT) { | ||
4326 | + *eof = 1; | ||
4327 | + return 0; | ||
4328 | + } | ||
4329 | + | ||
4330 | + *start = &buf[offset]; | ||
4331 | + *eof = 0; | ||
4332 | + DRM_PROC_PRINT("Inactive:\n"); | ||
4333 | + list_for_each_entry(obj_priv, &dev_priv->mm.inactive_list, | ||
4334 | + list) | ||
4335 | + { | ||
4336 | + struct drm_gem_object *obj = obj_priv->obj; | ||
4337 | + if (obj->name) { | ||
4338 | + DRM_PROC_PRINT(" %p(%d): %08x %08x %d\n", | ||
4339 | + obj, obj->name, | ||
4340 | + obj->read_domains, obj->write_domain, | ||
4341 | + obj_priv->last_rendering_seqno); | ||
4342 | + } else { | ||
4343 | + DRM_PROC_PRINT(" %p: %08x %08x %d\n", obj, | ||
4344 | + obj->read_domains, obj->write_domain, | ||
4345 | + obj_priv->last_rendering_seqno); | ||
4346 | + } | ||
4347 | + } | ||
4348 | + if (len > request + offset) | ||
4349 | + return request; | ||
4350 | + *eof = 1; | ||
4351 | + return len - offset; | ||
4352 | +} | ||
4353 | + | ||
4354 | +static int i915_gem_request_info(char *buf, char **start, off_t offset, | ||
4355 | + int request, int *eof, void *data) | ||
4356 | +{ | ||
4357 | + struct drm_minor *minor = (struct drm_minor *) data; | ||
4358 | + struct drm_device *dev = minor->dev; | ||
4359 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
4360 | + struct drm_i915_gem_request *gem_request; | ||
4361 | + int len = 0; | ||
4362 | + | ||
4363 | + if (offset > DRM_PROC_LIMIT) { | ||
4364 | + *eof = 1; | ||
4365 | + return 0; | ||
4366 | + } | ||
4367 | + | ||
4368 | + *start = &buf[offset]; | ||
4369 | + *eof = 0; | ||
4370 | + DRM_PROC_PRINT("Request:\n"); | ||
4371 | + list_for_each_entry(gem_request, &dev_priv->mm.request_list, | ||
4372 | + list) | ||
4373 | + { | ||
4374 | + DRM_PROC_PRINT(" %d @ %d %08x\n", | ||
4375 | + gem_request->seqno, | ||
4376 | + (int) (jiffies - gem_request->emitted_jiffies), | ||
4377 | + gem_request->flush_domains); | ||
4378 | + } | ||
4379 | + if (len > request + offset) | ||
4380 | + return request; | ||
4381 | + *eof = 1; | ||
4382 | + return len - offset; | ||
4383 | +} | ||
4384 | + | ||
4385 | +static int i915_gem_seqno_info(char *buf, char **start, off_t offset, | ||
4386 | + int request, int *eof, void *data) | ||
4387 | +{ | ||
4388 | + struct drm_minor *minor = (struct drm_minor *) data; | ||
4389 | + struct drm_device *dev = minor->dev; | ||
4390 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
4391 | + int len = 0; | ||
4392 | + | ||
4393 | + if (offset > DRM_PROC_LIMIT) { | ||
4394 | + *eof = 1; | ||
4395 | + return 0; | ||
4396 | + } | ||
4397 | + | ||
4398 | + *start = &buf[offset]; | ||
4399 | + *eof = 0; | ||
4400 | + DRM_PROC_PRINT("Current sequence: %d\n", i915_get_gem_seqno(dev)); | ||
4401 | + DRM_PROC_PRINT("Waiter sequence: %d\n", | ||
4402 | + dev_priv->mm.waiting_gem_seqno); | ||
4403 | + DRM_PROC_PRINT("IRQ sequence: %d\n", dev_priv->mm.irq_gem_seqno); | ||
4404 | + if (len > request + offset) | ||
4405 | + return request; | ||
4406 | + *eof = 1; | ||
4407 | + return len - offset; | ||
4408 | +} | ||
4409 | + | ||
4410 | + | ||
4411 | +static int i915_interrupt_info(char *buf, char **start, off_t offset, | ||
4412 | + int request, int *eof, void *data) | ||
4413 | +{ | ||
4414 | + struct drm_minor *minor = (struct drm_minor *) data; | ||
4415 | + struct drm_device *dev = minor->dev; | ||
4416 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
4417 | + int len = 0; | ||
4418 | + | ||
4419 | + if (offset > DRM_PROC_LIMIT) { | ||
4420 | + *eof = 1; | ||
4421 | + return 0; | ||
4422 | + } | ||
4423 | + | ||
4424 | + *start = &buf[offset]; | ||
4425 | + *eof = 0; | ||
4426 | + DRM_PROC_PRINT("Interrupt enable: %08x\n", | ||
4427 | + I915_READ(IER)); | ||
4428 | + DRM_PROC_PRINT("Interrupt identity: %08x\n", | ||
4429 | + I915_READ(IIR)); | ||
4430 | + DRM_PROC_PRINT("Interrupt mask: %08x\n", | ||
4431 | + I915_READ(IMR)); | ||
4432 | + DRM_PROC_PRINT("Pipe A stat: %08x\n", | ||
4433 | + I915_READ(PIPEASTAT)); | ||
4434 | + DRM_PROC_PRINT("Pipe B stat: %08x\n", | ||
4435 | + I915_READ(PIPEBSTAT)); | ||
4436 | + DRM_PROC_PRINT("Interrupts received: %d\n", | ||
4437 | + atomic_read(&dev_priv->irq_received)); | ||
4438 | + DRM_PROC_PRINT("Current sequence: %d\n", | ||
4439 | + i915_get_gem_seqno(dev)); | ||
4440 | + DRM_PROC_PRINT("Waiter sequence: %d\n", | ||
4441 | + dev_priv->mm.waiting_gem_seqno); | ||
4442 | + DRM_PROC_PRINT("IRQ sequence: %d\n", | ||
4443 | + dev_priv->mm.irq_gem_seqno); | ||
4444 | + if (len > request + offset) | ||
4445 | + return request; | ||
4446 | + *eof = 1; | ||
4447 | + return len - offset; | ||
4448 | +} | ||
4449 | + | ||
4450 | +static struct drm_proc_list { | ||
4451 | + /** file name */ | ||
4452 | + const char *name; | ||
4453 | + /** proc callback*/ | ||
4454 | + int (*f) (char *, char **, off_t, int, int *, void *); | ||
4455 | +} i915_gem_proc_list[] = { | ||
4456 | + {"i915_gem_active", i915_gem_active_info}, | ||
4457 | + {"i915_gem_flushing", i915_gem_flushing_info}, | ||
4458 | + {"i915_gem_inactive", i915_gem_inactive_info}, | ||
4459 | + {"i915_gem_request", i915_gem_request_info}, | ||
4460 | + {"i915_gem_seqno", i915_gem_seqno_info}, | ||
4461 | + {"i915_gem_interrupt", i915_interrupt_info}, | ||
4462 | +}; | ||
4463 | + | ||
4464 | +#define I915_GEM_PROC_ENTRIES ARRAY_SIZE(i915_gem_proc_list) | ||
4465 | + | ||
4466 | +int i915_gem_proc_init(struct drm_minor *minor) | ||
4467 | +{ | ||
4468 | + struct proc_dir_entry *ent; | ||
4469 | + int i, j; | ||
4470 | + | ||
4471 | + for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) { | ||
4472 | + ent = create_proc_entry(i915_gem_proc_list[i].name, | ||
4473 | + S_IFREG | S_IRUGO, minor->dev_root); | ||
4474 | + if (!ent) { | ||
4475 | + DRM_ERROR("Cannot create /proc/dri/.../%s\n", | ||
4476 | + i915_gem_proc_list[i].name); | ||
4477 | + for (j = 0; j < i; j++) | ||
4478 | + remove_proc_entry(i915_gem_proc_list[i].name, | ||
4479 | + minor->dev_root); | ||
4480 | + return -1; | ||
4481 | + } | ||
4482 | + ent->read_proc = i915_gem_proc_list[i].f; | ||
4483 | + ent->data = minor; | ||
4484 | + } | ||
4485 | + return 0; | ||
4486 | +} | ||
4487 | + | ||
4488 | +void i915_gem_proc_cleanup(struct drm_minor *minor) | ||
4489 | +{ | ||
4490 | + int i; | ||
4491 | + | ||
4492 | + if (!minor->dev_root) | ||
4493 | + return; | ||
4494 | + | ||
4495 | + for (i = 0; i < I915_GEM_PROC_ENTRIES; i++) | ||
4496 | + remove_proc_entry(i915_gem_proc_list[i].name, minor->dev_root); | ||
4497 | +} | ||
4498 | diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c | ||
4499 | new file mode 100644 | ||
4500 | index 0000000..0c1b3a0 | ||
4501 | --- /dev/null | ||
4502 | +++ b/drivers/gpu/drm/i915/i915_gem_tiling.c | ||
4503 | @@ -0,0 +1,256 @@ | ||
4504 | +/* | ||
4505 | + * Copyright © 2008 Intel Corporation | ||
4506 | + * | ||
4507 | + * Permission is hereby granted, free of charge, to any person obtaining a | ||
4508 | + * copy of this software and associated documentation files (the "Software"), | ||
4509 | + * to deal in the Software without restriction, including without limitation | ||
4510 | + * the rights to use, copy, modify, merge, publish, distribute, sublicense, | ||
4511 | + * and/or sell copies of the Software, and to permit persons to whom the | ||
4512 | + * Software is furnished to do so, subject to the following conditions: | ||
4513 | + * | ||
4514 | + * The above copyright notice and this permission notice (including the next | ||
4515 | + * paragraph) shall be included in all copies or substantial portions of the | ||
4516 | + * Software. | ||
4517 | + * | ||
4518 | + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||
4519 | + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||
4520 | + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | ||
4521 | + * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||
4522 | + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | ||
4523 | + * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | ||
4524 | + * IN THE SOFTWARE. | ||
4525 | + * | ||
4526 | + * Authors: | ||
4527 | + * Eric Anholt <eric@anholt.net> | ||
4528 | + * | ||
4529 | + */ | ||
4530 | + | ||
4531 | +#include "drmP.h" | ||
4532 | +#include "drm.h" | ||
4533 | +#include "i915_drm.h" | ||
4534 | +#include "i915_drv.h" | ||
4535 | + | ||
4536 | +/** @file i915_gem_tiling.c | ||
4537 | + * | ||
4538 | + * Support for managing tiling state of buffer objects. | ||
4539 | + * | ||
4540 | + * The idea behind tiling is to increase cache hit rates by rearranging | ||
4541 | + * pixel data so that a group of pixel accesses are in the same cacheline. | ||
4542 | + * Performance improvement from doing this on the back/depth buffer are on | ||
4543 | + * the order of 30%. | ||
4544 | + * | ||
4545 | + * Intel architectures make this somewhat more complicated, though, by | ||
4546 | + * adjustments made to addressing of data when the memory is in interleaved | ||
4547 | + * mode (matched pairs of DIMMS) to improve memory bandwidth. | ||
4548 | + * For interleaved memory, the CPU sends every sequential 64 bytes | ||
4549 | + * to an alternate memory channel so it can get the bandwidth from both. | ||
4550 | + * | ||
4551 | + * The GPU also rearranges its accesses for increased bandwidth to interleaved | ||
4552 | + * memory, and it matches what the CPU does for non-tiled. However, when tiled | ||
4553 | + * it does it a little differently, since one walks addresses not just in the | ||
4554 | + * X direction but also Y. So, along with alternating channels when bit | ||
4555 | + * 6 of the address flips, it also alternates when other bits flip -- Bits 9 | ||
4556 | + * (every 512 bytes, an X tile scanline) and 10 (every two X tile scanlines) | ||
4557 | + * are common to both the 915 and 965-class hardware. | ||
4558 | + * | ||
4559 | + * The CPU also sometimes XORs in higher bits as well, to improve | ||
4560 | + * bandwidth doing strided access like we do so frequently in graphics. This | ||
4561 | + * is called "Channel XOR Randomization" in the MCH documentation. The result | ||
4562 | + * is that the CPU is XORing in either bit 11 or bit 17 to bit 6 of its address | ||
4563 | + * decode. | ||
4564 | + * | ||
4565 | + * All of this bit 6 XORing has an effect on our memory management, | ||
4566 | + * as we need to make sure that the 3d driver can correctly address object | ||
4567 | + * contents. | ||
4568 | + * | ||
4569 | + * If we don't have interleaved memory, all tiling is safe and no swizzling is | ||
4570 | + * required. | ||
4571 | + * | ||
4572 | + * When bit 17 is XORed in, we simply refuse to tile at all. Bit | ||
4573 | + * 17 is not just a page offset, so as we page an objet out and back in, | ||
4574 | + * individual pages in it will have different bit 17 addresses, resulting in | ||
4575 | + * each 64 bytes being swapped with its neighbor! | ||
4576 | + * | ||
4577 | + * Otherwise, if interleaved, we have to tell the 3d driver what the address | ||
4578 | + * swizzling it needs to do is, since it's writing with the CPU to the pages | ||
4579 | + * (bit 6 and potentially bit 11 XORed in), and the GPU is reading from the | ||
4580 | + * pages (bit 6, 9, and 10 XORed in), resulting in a cumulative bit swizzling | ||
4581 | + * required by the CPU of XORing in bit 6, 9, 10, and potentially 11, in order | ||
4582 | + * to match what the GPU expects. | ||
4583 | + */ | ||
4584 | + | ||
4585 | +/** | ||
4586 | + * Detects bit 6 swizzling of address lookup between IGD access and CPU | ||
4587 | + * access through main memory. | ||
4588 | + */ | ||
4589 | +void | ||
4590 | +i915_gem_detect_bit_6_swizzle(struct drm_device *dev) | ||
4591 | +{ | ||
4592 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
4593 | + uint32_t swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | ||
4594 | + uint32_t swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | ||
4595 | + | ||
4596 | + if (!IS_I9XX(dev)) { | ||
4597 | + /* As far as we know, the 865 doesn't have these bit 6 | ||
4598 | + * swizzling issues. | ||
4599 | + */ | ||
4600 | + swizzle_x = I915_BIT_6_SWIZZLE_NONE; | ||
4601 | + swizzle_y = I915_BIT_6_SWIZZLE_NONE; | ||
4602 | + } else if (!IS_I965G(dev) || IS_I965GM(dev)) { | ||
4603 | + uint32_t dcc; | ||
4604 | + | ||
4605 | + /* On 915-945 and GM965, channel interleave by the CPU is | ||
4606 | + * determined by DCC. The CPU will alternate based on bit 6 | ||
4607 | + * in interleaved mode, and the GPU will then also alternate | ||
4608 | + * on bit 6, 9, and 10 for X, but the CPU may also optionally | ||
4609 | + * alternate based on bit 17 (XOR not disabled and XOR | ||
4610 | + * bit == 17). | ||
4611 | + */ | ||
4612 | + dcc = I915_READ(DCC); | ||
4613 | + switch (dcc & DCC_ADDRESSING_MODE_MASK) { | ||
4614 | + case DCC_ADDRESSING_MODE_SINGLE_CHANNEL: | ||
4615 | + case DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC: | ||
4616 | + swizzle_x = I915_BIT_6_SWIZZLE_NONE; | ||
4617 | + swizzle_y = I915_BIT_6_SWIZZLE_NONE; | ||
4618 | + break; | ||
4619 | + case DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED: | ||
4620 | + if (IS_I915G(dev) || IS_I915GM(dev) || | ||
4621 | + dcc & DCC_CHANNEL_XOR_DISABLE) { | ||
4622 | + swizzle_x = I915_BIT_6_SWIZZLE_9_10; | ||
4623 | + swizzle_y = I915_BIT_6_SWIZZLE_9; | ||
4624 | + } else if (IS_I965GM(dev)) { | ||
4625 | + /* GM965 only does bit 11-based channel | ||
4626 | + * randomization | ||
4627 | + */ | ||
4628 | + swizzle_x = I915_BIT_6_SWIZZLE_9_10_11; | ||
4629 | + swizzle_y = I915_BIT_6_SWIZZLE_9_11; | ||
4630 | + } else { | ||
4631 | + /* Bit 17 or perhaps other swizzling */ | ||
4632 | + swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | ||
4633 | + swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | ||
4634 | + } | ||
4635 | + break; | ||
4636 | + } | ||
4637 | + if (dcc == 0xffffffff) { | ||
4638 | + DRM_ERROR("Couldn't read from MCHBAR. " | ||
4639 | + "Disabling tiling.\n"); | ||
4640 | + swizzle_x = I915_BIT_6_SWIZZLE_UNKNOWN; | ||
4641 | + swizzle_y = I915_BIT_6_SWIZZLE_UNKNOWN; | ||
4642 | + } | ||
4643 | + } else { | ||
4644 | + /* The 965, G33, and newer, have a very flexible memory | ||
4645 | + * configuration. It will enable dual-channel mode | ||
4646 | + * (interleaving) on as much memory as it can, and the GPU | ||
4647 | + * will additionally sometimes enable different bit 6 | ||
4648 | + * swizzling for tiled objects from the CPU. | ||
4649 | + * | ||
4650 | + * Here's what I found on the G965: | ||
4651 | + * slot fill memory size swizzling | ||
4652 | + * 0A 0B 1A 1B 1-ch 2-ch | ||
4653 | + * 512 0 0 0 512 0 O | ||
4654 | + * 512 0 512 0 16 1008 X | ||
4655 | + * 512 0 0 512 16 1008 X | ||
4656 | + * 0 512 0 512 16 1008 X | ||
4657 | + * 1024 1024 1024 0 2048 1024 O | ||
4658 | + * | ||
4659 | + * We could probably detect this based on either the DRB | ||
4660 | + * matching, which was the case for the swizzling required in | ||
4661 | + * the table above, or from the 1-ch value being less than | ||
4662 | + * the minimum size of a rank. | ||
4663 | + */ | ||
4664 | + if (I915_READ16(C0DRB3) != I915_READ16(C1DRB3)) { | ||
4665 | + swizzle_x = I915_BIT_6_SWIZZLE_NONE; | ||
4666 | + swizzle_y = I915_BIT_6_SWIZZLE_NONE; | ||
4667 | + } else { | ||
4668 | + swizzle_x = I915_BIT_6_SWIZZLE_9_10; | ||
4669 | + swizzle_y = I915_BIT_6_SWIZZLE_9; | ||
4670 | + } | ||
4671 | + } | ||
4672 | + | ||
4673 | + dev_priv->mm.bit_6_swizzle_x = swizzle_x; | ||
4674 | + dev_priv->mm.bit_6_swizzle_y = swizzle_y; | ||
4675 | +} | ||
4676 | + | ||
4677 | +/** | ||
4678 | + * Sets the tiling mode of an object, returning the required swizzling of | ||
4679 | + * bit 6 of addresses in the object. | ||
4680 | + */ | ||
4681 | +int | ||
4682 | +i915_gem_set_tiling(struct drm_device *dev, void *data, | ||
4683 | + struct drm_file *file_priv) | ||
4684 | +{ | ||
4685 | + struct drm_i915_gem_set_tiling *args = data; | ||
4686 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
4687 | + struct drm_gem_object *obj; | ||
4688 | + struct drm_i915_gem_object *obj_priv; | ||
4689 | + | ||
4690 | + obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
4691 | + if (obj == NULL) | ||
4692 | + return -EINVAL; | ||
4693 | + obj_priv = obj->driver_private; | ||
4694 | + | ||
4695 | + mutex_lock(&dev->struct_mutex); | ||
4696 | + | ||
4697 | + if (args->tiling_mode == I915_TILING_NONE) { | ||
4698 | + obj_priv->tiling_mode = I915_TILING_NONE; | ||
4699 | + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; | ||
4700 | + } else { | ||
4701 | + if (args->tiling_mode == I915_TILING_X) | ||
4702 | + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; | ||
4703 | + else | ||
4704 | + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; | ||
4705 | + /* If we can't handle the swizzling, make it untiled. */ | ||
4706 | + if (args->swizzle_mode == I915_BIT_6_SWIZZLE_UNKNOWN) { | ||
4707 | + args->tiling_mode = I915_TILING_NONE; | ||
4708 | + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; | ||
4709 | + } | ||
4710 | + } | ||
4711 | + obj_priv->tiling_mode = args->tiling_mode; | ||
4712 | + | ||
4713 | + mutex_unlock(&dev->struct_mutex); | ||
4714 | + | ||
4715 | + drm_gem_object_unreference(obj); | ||
4716 | + | ||
4717 | + return 0; | ||
4718 | +} | ||
4719 | + | ||
4720 | +/** | ||
4721 | + * Returns the current tiling mode and required bit 6 swizzling for the object. | ||
4722 | + */ | ||
4723 | +int | ||
4724 | +i915_gem_get_tiling(struct drm_device *dev, void *data, | ||
4725 | + struct drm_file *file_priv) | ||
4726 | +{ | ||
4727 | + struct drm_i915_gem_get_tiling *args = data; | ||
4728 | + drm_i915_private_t *dev_priv = dev->dev_private; | ||
4729 | + struct drm_gem_object *obj; | ||
4730 | + struct drm_i915_gem_object *obj_priv; | ||
4731 | + | ||
4732 | + obj = drm_gem_object_lookup(dev, file_priv, args->handle); | ||
4733 | + if (obj == NULL) | ||
4734 | + return -EINVAL; | ||
4735 | + obj_priv = obj->driver_private; | ||
4736 | + | ||
4737 | + mutex_lock(&dev->struct_mutex); | ||
4738 | + | ||
4739 | + args->tiling_mode = obj_priv->tiling_mode; | ||
4740 | + switch (obj_priv->tiling_mode) { | ||
4741 | + case I915_TILING_X: | ||
4742 | + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_x; | ||
4743 | + break; | ||
4744 | + case I915_TILING_Y: | ||
4745 | + args->swizzle_mode = dev_priv->mm.bit_6_swizzle_y; | ||
4746 | + break; | ||
4747 | + case I915_TILING_NONE: | ||
4748 | + args->swizzle_mode = I915_BIT_6_SWIZZLE_NONE; | ||
4749 | + break; | ||
4750 | + default: | ||
4751 | + DRM_ERROR("unknown tiling mode\n"); | ||
4752 | + } | ||
4753 | + | ||
4754 | + mutex_unlock(&dev->struct_mutex); | ||
4755 | + | ||
4756 | + drm_gem_object_unreference(obj); | ||
4757 | + | ||
4758 | + return 0; | ||
4759 | +} | ||
4760 | diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c | ||
4761 | index f875959..f295bdf 100644 | ||
4762 | --- a/drivers/gpu/drm/i915/i915_irq.c | ||
4763 | +++ b/drivers/gpu/drm/i915/i915_irq.c | ||
4764 | @@ -407,15 +407,20 @@ irqreturn_t i915_driver_irq_handler(DRM_IRQ_ARGS) | ||
4765 | I915_WRITE(PIPEBSTAT, pipeb_stats); | ||
4766 | } | ||
4767 | |||
4768 | - if (iir & I915_ASLE_INTERRUPT) | ||
4769 | - opregion_asle_intr(dev); | ||
4770 | + I915_WRITE(IIR, iir); | ||
4771 | + if (dev->pdev->msi_enabled) | ||
4772 | + I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
4773 | + (void) I915_READ(IIR); /* Flush posted writes */ | ||
4774 | |||
4775 | dev_priv->sarea_priv->last_dispatch = READ_BREADCRUMB(dev_priv); | ||
4776 | |||
4777 | - if (dev->pdev->msi_enabled) | ||
4778 | - I915_WRITE(IMR, dev_priv->irq_mask_reg); | ||
4779 | - I915_WRITE(IIR, iir); | ||
4780 | - (void) I915_READ(IIR); | ||
4781 | + if (iir & I915_USER_INTERRUPT) { | ||
4782 | + dev_priv->mm.irq_gem_seqno = i915_get_gem_seqno(dev); | ||
4783 | + DRM_WAKEUP(&dev_priv->irq_queue); | ||
4784 | + } | ||
4785 | + | ||
4786 | + if (iir & I915_ASLE_INTERRUPT) | ||
4787 | + opregion_asle_intr(dev); | ||
4788 | |||
4789 | if (vblank && dev_priv->swaps_pending > 0) | ||
4790 | drm_locked_tasklet(dev, i915_vblank_tasklet); | ||
4791 | @@ -449,7 +454,7 @@ static int i915_emit_irq(struct drm_device * dev) | ||
4792 | return dev_priv->counter; | ||
4793 | } | ||
4794 | |||
4795 | -static void i915_user_irq_get(struct drm_device *dev) | ||
4796 | +void i915_user_irq_get(struct drm_device *dev) | ||
4797 | { | ||
4798 | drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; | ||
4799 | |||
4800 | diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h | ||
4801 | index 43ad2cb..5c2d9f2 100644 | ||
4802 | --- a/drivers/gpu/drm/i915/i915_reg.h | ||
4803 | +++ b/drivers/gpu/drm/i915/i915_reg.h | ||
4804 | @@ -25,19 +25,6 @@ | ||
4805 | #ifndef _I915_REG_H_ | ||
4806 | #define _I915_REG_H_ | ||
4807 | |||
4808 | -/* MCH MMIO space */ | ||
4809 | -/** 915-945 and GM965 MCH register controlling DRAM channel access */ | ||
4810 | -#define DCC 0x200 | ||
4811 | -#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) | ||
4812 | -#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) | ||
4813 | -#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) | ||
4814 | -#define DCC_ADDRESSING_MODE_MASK (3 << 0) | ||
4815 | -#define DCC_CHANNEL_XOR_DISABLE (1 << 10) | ||
4816 | - | ||
4817 | -/** 965 MCH register controlling DRAM channel configuration */ | ||
4818 | -#define CHDECMISC 0x111 | ||
4819 | -#define CHDECMISC_FLEXMEMORY (1 << 1) | ||
4820 | - | ||
4821 | /* | ||
4822 | * The Bridge device's PCI config space has information about the | ||
4823 | * fb aperture size and the amount of pre-reserved memory. | ||
4824 | @@ -516,6 +503,30 @@ | ||
4825 | #define PALETTE_A 0x0a000 | ||
4826 | #define PALETTE_B 0x0a800 | ||
4827 | |||
4828 | +/* MCH MMIO space */ | ||
4829 | + | ||
4830 | +/* | ||
4831 | + * MCHBAR mirror. | ||
4832 | + * | ||
4833 | + * This mirrors the MCHBAR MMIO space whose location is determined by | ||
4834 | + * device 0 function 0's pci config register 0x44 or 0x48 and matches it in | ||
4835 | + * every way. It is not accessible from the CP register read instructions. | ||
4836 | + * | ||
4837 | + */ | ||
4838 | +#define MCHBAR_MIRROR_BASE 0x10000 | ||
4839 | + | ||
4840 | +/** 915-945 and GM965 MCH register controlling DRAM channel access */ | ||
4841 | +#define DCC 0x10200 | ||
4842 | +#define DCC_ADDRESSING_MODE_SINGLE_CHANNEL (0 << 0) | ||
4843 | +#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_ASYMMETRIC (1 << 0) | ||
4844 | +#define DCC_ADDRESSING_MODE_DUAL_CHANNEL_INTERLEAVED (2 << 0) | ||
4845 | +#define DCC_ADDRESSING_MODE_MASK (3 << 0) | ||
4846 | +#define DCC_CHANNEL_XOR_DISABLE (1 << 10) | ||
4847 | + | ||
4848 | +/** 965 MCH register controlling DRAM channel configuration */ | ||
4849 | +#define C0DRB3 0x10206 | ||
4850 | +#define C1DRB3 0x10606 | ||
4851 | + | ||
4852 | /* | ||
4853 | * Overlay regs | ||
4854 | */ | ||
4855 | diff --git a/include/drm/drm.h b/include/drm/drm.h | ||
4856 | index 15e5503..f46ba4b 100644 | ||
4857 | --- a/include/drm/drm.h | ||
4858 | +++ b/include/drm/drm.h | ||
4859 | @@ -570,6 +570,34 @@ struct drm_set_version { | ||
4860 | int drm_dd_minor; | ||
4861 | }; | ||
4862 | |||
4863 | +/** DRM_IOCTL_GEM_CLOSE ioctl argument type */ | ||
4864 | +struct drm_gem_close { | ||
4865 | + /** Handle of the object to be closed. */ | ||
4866 | + uint32_t handle; | ||
4867 | + uint32_t pad; | ||
4868 | +}; | ||
4869 | + | ||
4870 | +/** DRM_IOCTL_GEM_FLINK ioctl argument type */ | ||
4871 | +struct drm_gem_flink { | ||
4872 | + /** Handle for the object being named */ | ||
4873 | + uint32_t handle; | ||
4874 | + | ||
4875 | + /** Returned global name */ | ||
4876 | + uint32_t name; | ||
4877 | +}; | ||
4878 | + | ||
4879 | +/** DRM_IOCTL_GEM_OPEN ioctl argument type */ | ||
4880 | +struct drm_gem_open { | ||
4881 | + /** Name of object being opened */ | ||
4882 | + uint32_t name; | ||
4883 | + | ||
4884 | + /** Returned handle for the object */ | ||
4885 | + uint32_t handle; | ||
4886 | + | ||
4887 | + /** Returned size of the object */ | ||
4888 | + uint64_t size; | ||
4889 | +}; | ||
4890 | + | ||
4891 | #define DRM_IOCTL_BASE 'd' | ||
4892 | #define DRM_IO(nr) _IO(DRM_IOCTL_BASE,nr) | ||
4893 | #define DRM_IOR(nr,type) _IOR(DRM_IOCTL_BASE,nr,type) | ||
4894 | @@ -585,6 +613,9 @@ struct drm_set_version { | ||
4895 | #define DRM_IOCTL_GET_STATS DRM_IOR( 0x06, struct drm_stats) | ||
4896 | #define DRM_IOCTL_SET_VERSION DRM_IOWR(0x07, struct drm_set_version) | ||
4897 | #define DRM_IOCTL_MODESET_CTL DRM_IOW(0x08, struct drm_modeset_ctl) | ||
4898 | +#define DRM_IOCTL_GEM_CLOSE DRM_IOW (0x09, struct drm_gem_close) | ||
4899 | +#define DRM_IOCTL_GEM_FLINK DRM_IOWR(0x0a, struct drm_gem_flink) | ||
4900 | +#define DRM_IOCTL_GEM_OPEN DRM_IOWR(0x0b, struct drm_gem_open) | ||
4901 | |||
4902 | #define DRM_IOCTL_SET_UNIQUE DRM_IOW( 0x10, struct drm_unique) | ||
4903 | #define DRM_IOCTL_AUTH_MAGIC DRM_IOW( 0x11, struct drm_auth) | ||
4904 | diff --git a/include/drm/drmP.h b/include/drm/drmP.h | ||
4905 | index e79ce07..1469a1b 100644 | ||
4906 | --- a/include/drm/drmP.h | ||
4907 | +++ b/include/drm/drmP.h | ||
4908 | @@ -104,6 +104,7 @@ struct drm_device; | ||
4909 | #define DRIVER_DMA_QUEUE 0x200 | ||
4910 | #define DRIVER_FB_DMA 0x400 | ||
4911 | #define DRIVER_IRQ_VBL2 0x800 | ||
4912 | +#define DRIVER_GEM 0x1000 | ||
4913 | |||
4914 | /***********************************************************************/ | ||
4915 | /** \name Begin the DRM... */ | ||
4916 | @@ -387,6 +388,10 @@ struct drm_file { | ||
4917 | struct drm_minor *minor; | ||
4918 | int remove_auth_on_close; | ||
4919 | unsigned long lock_count; | ||
4920 | + /** Mapping of mm object handles to object pointers. */ | ||
4921 | + struct idr object_idr; | ||
4922 | + /** Lock for synchronization of access to object_idr. */ | ||
4923 | + spinlock_t table_lock; | ||
4924 | struct file *filp; | ||
4925 | void *driver_priv; | ||
4926 | }; | ||
4927 | @@ -558,6 +563,56 @@ struct drm_ati_pcigart_info { | ||
4928 | }; | ||
4929 | |||
4930 | /** | ||
4931 | + * This structure defines the drm_mm memory object, which will be used by the | ||
4932 | + * DRM for its buffer objects. | ||
4933 | + */ | ||
4934 | +struct drm_gem_object { | ||
4935 | + /** Reference count of this object */ | ||
4936 | + struct kref refcount; | ||
4937 | + | ||
4938 | + /** Handle count of this object. Each handle also holds a reference */ | ||
4939 | + struct kref handlecount; | ||
4940 | + | ||
4941 | + /** Related drm device */ | ||
4942 | + struct drm_device *dev; | ||
4943 | + | ||
4944 | + /** File representing the shmem storage */ | ||
4945 | + struct file *filp; | ||
4946 | + | ||
4947 | + /** | ||
4948 | + * Size of the object, in bytes. Immutable over the object's | ||
4949 | + * lifetime. | ||
4950 | + */ | ||
4951 | + size_t size; | ||
4952 | + | ||
4953 | + /** | ||
4954 | + * Global name for this object, starts at 1. 0 means unnamed. | ||
4955 | + * Access is covered by the object_name_lock in the related drm_device | ||
4956 | + */ | ||
4957 | + int name; | ||
4958 | + | ||
4959 | + /** | ||
4960 | + * Memory domains. These monitor which caches contain read/write data | ||
4961 | + * related to the object. When transitioning from one set of domains | ||
4962 | + * to another, the driver is called to ensure that caches are suitably | ||
4963 | + * flushed and invalidated | ||
4964 | + */ | ||
4965 | + uint32_t read_domains; | ||
4966 | + uint32_t write_domain; | ||
4967 | + | ||
4968 | + /** | ||
4969 | + * While validating an exec operation, the | ||
4970 | + * new read/write domain values are computed here. | ||
4971 | + * They will be transferred to the above values | ||
4972 | + * at the point that any cache flushing occurs | ||
4973 | + */ | ||
4974 | + uint32_t pending_read_domains; | ||
4975 | + uint32_t pending_write_domain; | ||
4976 | + | ||
4977 | + void *driver_private; | ||
4978 | +}; | ||
4979 | + | ||
4980 | +/** | ||
4981 | * DRM driver structure. This structure represent the common code for | ||
4982 | * a family of cards. There will one drm_device for each card present | ||
4983 | * in this family | ||
4984 | @@ -657,6 +712,18 @@ struct drm_driver { | ||
4985 | void (*set_version) (struct drm_device *dev, | ||
4986 | struct drm_set_version *sv); | ||
4987 | |||
4988 | + int (*proc_init)(struct drm_minor *minor); | ||
4989 | + void (*proc_cleanup)(struct drm_minor *minor); | ||
4990 | + | ||
4991 | + /** | ||
4992 | + * Driver-specific constructor for drm_gem_objects, to set up | ||
4993 | + * obj->driver_private. | ||
4994 | + * | ||
4995 | + * Returns 0 on success. | ||
4996 | + */ | ||
4997 | + int (*gem_init_object) (struct drm_gem_object *obj); | ||
4998 | + void (*gem_free_object) (struct drm_gem_object *obj); | ||
4999 | + | ||
5000 | int major; | ||
5001 | int minor; | ||
5002 | int patchlevel; | ||
5003 | @@ -830,6 +897,22 @@ struct drm_device { | ||
5004 | spinlock_t drw_lock; | ||
5005 | struct idr drw_idr; | ||
5006 | /*@} */ | ||
5007 | + | ||
5008 | + /** \name GEM information */ | ||
5009 | + /*@{ */ | ||
5010 | + spinlock_t object_name_lock; | ||
5011 | + struct idr object_name_idr; | ||
5012 | + atomic_t object_count; | ||
5013 | + atomic_t object_memory; | ||
5014 | + atomic_t pin_count; | ||
5015 | + atomic_t pin_memory; | ||
5016 | + atomic_t gtt_count; | ||
5017 | + atomic_t gtt_memory; | ||
5018 | + uint32_t gtt_total; | ||
5019 | + uint32_t invalidate_domains; /* domains pending invalidation */ | ||
5020 | + uint32_t flush_domains; /* domains pending flush */ | ||
5021 | + /*@} */ | ||
5022 | + | ||
5023 | }; | ||
5024 | |||
5025 | static __inline__ int drm_core_check_feature(struct drm_device *dev, | ||
5026 | @@ -926,6 +1009,10 @@ extern void *drm_realloc(void *oldpt, size_t oldsize, size_t size, int area); | ||
5027 | extern DRM_AGP_MEM *drm_alloc_agp(struct drm_device *dev, int pages, u32 type); | ||
5028 | extern int drm_free_agp(DRM_AGP_MEM * handle, int pages); | ||
5029 | extern int drm_bind_agp(DRM_AGP_MEM * handle, unsigned int start); | ||
5030 | +extern DRM_AGP_MEM *drm_agp_bind_pages(struct drm_device *dev, | ||
5031 | + struct page **pages, | ||
5032 | + unsigned long num_pages, | ||
5033 | + uint32_t gtt_offset); | ||
5034 | extern int drm_unbind_agp(DRM_AGP_MEM * handle); | ||
5035 | |||
5036 | /* Misc. IOCTL support (drm_ioctl.h) */ | ||
5037 | @@ -988,6 +1075,9 @@ extern int drm_getmagic(struct drm_device *dev, void *data, | ||
5038 | extern int drm_authmagic(struct drm_device *dev, void *data, | ||
5039 | struct drm_file *file_priv); | ||
5040 | |||
5041 | +/* Cache management (drm_cache.c) */ | ||
5042 | +void drm_clflush_pages(struct page *pages[], unsigned long num_pages); | ||
5043 | + | ||
5044 | /* Locking IOCTL support (drm_lock.h) */ | ||
5045 | extern int drm_lock(struct drm_device *dev, void *data, | ||
5046 | struct drm_file *file_priv); | ||
5047 | @@ -1094,6 +1184,7 @@ extern DRM_AGP_MEM *drm_agp_allocate_memory(struct agp_bridge_data *bridge, size | ||
5048 | extern int drm_agp_free_memory(DRM_AGP_MEM * handle); | ||
5049 | extern int drm_agp_bind_memory(DRM_AGP_MEM * handle, off_t start); | ||
5050 | extern int drm_agp_unbind_memory(DRM_AGP_MEM * handle); | ||
5051 | +extern void drm_agp_chipset_flush(struct drm_device *dev); | ||
5052 | |||
5053 | /* Stub support (drm_stub.h) */ | ||
5054 | extern int drm_get_dev(struct pci_dev *pdev, const struct pci_device_id *ent, | ||
5055 | @@ -1156,6 +1247,66 @@ extern unsigned long drm_mm_tail_space(struct drm_mm *mm); | ||
5056 | extern int drm_mm_remove_space_from_tail(struct drm_mm *mm, unsigned long size); | ||
5057 | extern int drm_mm_add_space_to_tail(struct drm_mm *mm, unsigned long size); | ||
5058 | |||
5059 | +/* Graphics Execution Manager library functions (drm_gem.c) */ | ||
5060 | +int drm_gem_init(struct drm_device *dev); | ||
5061 | +void drm_gem_object_free(struct kref *kref); | ||
5062 | +struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev, | ||
5063 | + size_t size); | ||
5064 | +void drm_gem_object_handle_free(struct kref *kref); | ||
5065 | + | ||
5066 | +static inline void | ||
5067 | +drm_gem_object_reference(struct drm_gem_object *obj) | ||
5068 | +{ | ||
5069 | + kref_get(&obj->refcount); | ||
5070 | +} | ||
5071 | + | ||
5072 | +static inline void | ||
5073 | +drm_gem_object_unreference(struct drm_gem_object *obj) | ||
5074 | +{ | ||
5075 | + if (obj == NULL) | ||
5076 | + return; | ||
5077 | + | ||
5078 | + kref_put(&obj->refcount, drm_gem_object_free); | ||
5079 | +} | ||
5080 | + | ||
5081 | +int drm_gem_handle_create(struct drm_file *file_priv, | ||
5082 | + struct drm_gem_object *obj, | ||
5083 | + int *handlep); | ||
5084 | + | ||
5085 | +static inline void | ||
5086 | +drm_gem_object_handle_reference(struct drm_gem_object *obj) | ||
5087 | +{ | ||
5088 | + drm_gem_object_reference(obj); | ||
5089 | + kref_get(&obj->handlecount); | ||
5090 | +} | ||
5091 | + | ||
5092 | +static inline void | ||
5093 | +drm_gem_object_handle_unreference(struct drm_gem_object *obj) | ||
5094 | +{ | ||
5095 | + if (obj == NULL) | ||
5096 | + return; | ||
5097 | + | ||
5098 | + /* | ||
5099 | + * Must bump handle count first as this may be the last | ||
5100 | + * ref, in which case the object would disappear before we | ||
5101 | + * checked for a name | ||
5102 | + */ | ||
5103 | + kref_put(&obj->handlecount, drm_gem_object_handle_free); | ||
5104 | + drm_gem_object_unreference(obj); | ||
5105 | +} | ||
5106 | + | ||
5107 | +struct drm_gem_object *drm_gem_object_lookup(struct drm_device *dev, | ||
5108 | + struct drm_file *filp, | ||
5109 | + int handle); | ||
5110 | +int drm_gem_close_ioctl(struct drm_device *dev, void *data, | ||
5111 | + struct drm_file *file_priv); | ||
5112 | +int drm_gem_flink_ioctl(struct drm_device *dev, void *data, | ||
5113 | + struct drm_file *file_priv); | ||
5114 | +int drm_gem_open_ioctl(struct drm_device *dev, void *data, | ||
5115 | + struct drm_file *file_priv); | ||
5116 | +void drm_gem_open(struct drm_device *dev, struct drm_file *file_private); | ||
5117 | +void drm_gem_release(struct drm_device *dev, struct drm_file *file_private); | ||
5118 | + | ||
5119 | extern void drm_core_ioremap(struct drm_map *map, struct drm_device *dev); | ||
5120 | extern void drm_core_ioremap_wc(struct drm_map *map, struct drm_device *dev); | ||
5121 | extern void drm_core_ioremapfree(struct drm_map *map, struct drm_device *dev); | ||
5122 | diff --git a/include/drm/i915_drm.h b/include/drm/i915_drm.h | ||
5123 | index 05c66cf..59d08fc 100644 | ||
5124 | --- a/include/drm/i915_drm.h | ||
5125 | +++ b/include/drm/i915_drm.h | ||
5126 | @@ -143,6 +143,22 @@ typedef struct _drm_i915_sarea { | ||
5127 | #define DRM_I915_GET_VBLANK_PIPE 0x0e | ||
5128 | #define DRM_I915_VBLANK_SWAP 0x0f | ||
5129 | #define DRM_I915_HWS_ADDR 0x11 | ||
5130 | +#define DRM_I915_GEM_INIT 0x13 | ||
5131 | +#define DRM_I915_GEM_EXECBUFFER 0x14 | ||
5132 | +#define DRM_I915_GEM_PIN 0x15 | ||
5133 | +#define DRM_I915_GEM_UNPIN 0x16 | ||
5134 | +#define DRM_I915_GEM_BUSY 0x17 | ||
5135 | +#define DRM_I915_GEM_THROTTLE 0x18 | ||
5136 | +#define DRM_I915_GEM_ENTERVT 0x19 | ||
5137 | +#define DRM_I915_GEM_LEAVEVT 0x1a | ||
5138 | +#define DRM_I915_GEM_CREATE 0x1b | ||
5139 | +#define DRM_I915_GEM_PREAD 0x1c | ||
5140 | +#define DRM_I915_GEM_PWRITE 0x1d | ||
5141 | +#define DRM_I915_GEM_MMAP 0x1e | ||
5142 | +#define DRM_I915_GEM_SET_DOMAIN 0x1f | ||
5143 | +#define DRM_I915_GEM_SW_FINISH 0x20 | ||
5144 | +#define DRM_I915_GEM_SET_TILING 0x21 | ||
5145 | +#define DRM_I915_GEM_GET_TILING 0x22 | ||
5146 | |||
5147 | #define DRM_IOCTL_I915_INIT DRM_IOW( DRM_COMMAND_BASE + DRM_I915_INIT, drm_i915_init_t) | ||
5148 | #define DRM_IOCTL_I915_FLUSH DRM_IO ( DRM_COMMAND_BASE + DRM_I915_FLUSH) | ||
5149 | @@ -160,6 +176,20 @@ typedef struct _drm_i915_sarea { | ||
5150 | #define DRM_IOCTL_I915_SET_VBLANK_PIPE DRM_IOW( DRM_COMMAND_BASE + DRM_I915_SET_VBLANK_PIPE, drm_i915_vblank_pipe_t) | ||
5151 | #define DRM_IOCTL_I915_GET_VBLANK_PIPE DRM_IOR( DRM_COMMAND_BASE + DRM_I915_GET_VBLANK_PIPE, drm_i915_vblank_pipe_t) | ||
5152 | #define DRM_IOCTL_I915_VBLANK_SWAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_VBLANK_SWAP, drm_i915_vblank_swap_t) | ||
5153 | +#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin) | ||
5154 | +#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin) | ||
5155 | +#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy) | ||
5156 | +#define DRM_IOCTL_I915_GEM_THROTTLE DRM_IO ( DRM_COMMAND_BASE + DRM_I915_GEM_THROTTLE) | ||
5157 | +#define DRM_IOCTL_I915_GEM_ENTERVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_ENTERVT) | ||
5158 | +#define DRM_IOCTL_I915_GEM_LEAVEVT DRM_IO(DRM_COMMAND_BASE + DRM_I915_GEM_LEAVEVT) | ||
5159 | +#define DRM_IOCTL_I915_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_CREATE, struct drm_i915_gem_create) | ||
5160 | +#define DRM_IOCTL_I915_GEM_PREAD DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PREAD, struct drm_i915_gem_pread) | ||
5161 | +#define DRM_IOCTL_I915_GEM_PWRITE DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_PWRITE, struct drm_i915_gem_pwrite) | ||
5162 | +#define DRM_IOCTL_I915_GEM_MMAP DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_MMAP, struct drm_i915_gem_mmap) | ||
5163 | +#define DRM_IOCTL_I915_GEM_SET_DOMAIN DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SET_DOMAIN, struct drm_i915_gem_set_domain) | ||
5164 | +#define DRM_IOCTL_I915_GEM_SW_FINISH DRM_IOW (DRM_COMMAND_BASE + DRM_I915_GEM_SW_FINISH, struct drm_i915_gem_sw_finish) | ||
5165 | +#define DRM_IOCTL_I915_GEM_SET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_SET_TILING, struct drm_i915_gem_set_tiling) | ||
5166 | +#define DRM_IOCTL_I915_GEM_GET_TILING DRM_IOWR (DRM_COMMAND_BASE + DRM_I915_GEM_GET_TILING, struct drm_i915_gem_get_tiling) | ||
5167 | |||
5168 | /* Allow drivers to submit batchbuffers directly to hardware, relying | ||
5169 | * on the security mechanisms provided by hardware. | ||
5170 | @@ -200,6 +230,7 @@ typedef struct drm_i915_irq_wait { | ||
5171 | #define I915_PARAM_IRQ_ACTIVE 1 | ||
5172 | #define I915_PARAM_ALLOW_BATCHBUFFER 2 | ||
5173 | #define I915_PARAM_LAST_DISPATCH 3 | ||
5174 | +#define I915_PARAM_HAS_GEM 5 | ||
5175 | |||
5176 | typedef struct drm_i915_getparam { | ||
5177 | int param; | ||
5178 | @@ -267,4 +298,305 @@ typedef struct drm_i915_hws_addr { | ||
5179 | uint64_t addr; | ||
5180 | } drm_i915_hws_addr_t; | ||
5181 | |||
5182 | +struct drm_i915_gem_init { | ||
5183 | + /** | ||
5184 | + * Beginning offset in the GTT to be managed by the DRM memory | ||
5185 | + * manager. | ||
5186 | + */ | ||
5187 | + uint64_t gtt_start; | ||
5188 | + /** | ||
5189 | + * Ending offset in the GTT to be managed by the DRM memory | ||
5190 | + * manager. | ||
5191 | + */ | ||
5192 | + uint64_t gtt_end; | ||
5193 | +}; | ||
5194 | + | ||
5195 | +struct drm_i915_gem_create { | ||
5196 | + /** | ||
5197 | + * Requested size for the object. | ||
5198 | + * | ||
5199 | + * The (page-aligned) allocated size for the object will be returned. | ||
5200 | + */ | ||
5201 | + uint64_t size; | ||
5202 | + /** | ||
5203 | + * Returned handle for the object. | ||
5204 | + * | ||
5205 | + * Object handles are nonzero. | ||
5206 | + */ | ||
5207 | + uint32_t handle; | ||
5208 | + uint32_t pad; | ||
5209 | +}; | ||
5210 | + | ||
5211 | +struct drm_i915_gem_pread { | ||
5212 | + /** Handle for the object being read. */ | ||
5213 | + uint32_t handle; | ||
5214 | + uint32_t pad; | ||
5215 | + /** Offset into the object to read from */ | ||
5216 | + uint64_t offset; | ||
5217 | + /** Length of data to read */ | ||
5218 | + uint64_t size; | ||
5219 | + /** | ||
5220 | + * Pointer to write the data into. | ||
5221 | + * | ||
5222 | + * This is a fixed-size type for 32/64 compatibility. | ||
5223 | + */ | ||
5224 | + uint64_t data_ptr; | ||
5225 | +}; | ||
5226 | + | ||
5227 | +struct drm_i915_gem_pwrite { | ||
5228 | + /** Handle for the object being written to. */ | ||
5229 | + uint32_t handle; | ||
5230 | + uint32_t pad; | ||
5231 | + /** Offset into the object to write to */ | ||
5232 | + uint64_t offset; | ||
5233 | + /** Length of data to write */ | ||
5234 | + uint64_t size; | ||
5235 | + /** | ||
5236 | + * Pointer to read the data from. | ||
5237 | + * | ||
5238 | + * This is a fixed-size type for 32/64 compatibility. | ||
5239 | + */ | ||
5240 | + uint64_t data_ptr; | ||
5241 | +}; | ||
5242 | + | ||
5243 | +struct drm_i915_gem_mmap { | ||
5244 | + /** Handle for the object being mapped. */ | ||
5245 | + uint32_t handle; | ||
5246 | + uint32_t pad; | ||
5247 | + /** Offset in the object to map. */ | ||
5248 | + uint64_t offset; | ||
5249 | + /** | ||
5250 | + * Length of data to map. | ||
5251 | + * | ||
5252 | + * The value will be page-aligned. | ||
5253 | + */ | ||
5254 | + uint64_t size; | ||
5255 | + /** | ||
5256 | + * Returned pointer the data was mapped at. | ||
5257 | + * | ||
5258 | + * This is a fixed-size type for 32/64 compatibility. | ||
5259 | + */ | ||
5260 | + uint64_t addr_ptr; | ||
5261 | +}; | ||
5262 | + | ||
5263 | +struct drm_i915_gem_set_domain { | ||
5264 | + /** Handle for the object */ | ||
5265 | + uint32_t handle; | ||
5266 | + | ||
5267 | + /** New read domains */ | ||
5268 | + uint32_t read_domains; | ||
5269 | + | ||
5270 | + /** New write domain */ | ||
5271 | + uint32_t write_domain; | ||
5272 | +}; | ||
5273 | + | ||
5274 | +struct drm_i915_gem_sw_finish { | ||
5275 | + /** Handle for the object */ | ||
5276 | + uint32_t handle; | ||
5277 | +}; | ||
5278 | + | ||
5279 | +struct drm_i915_gem_relocation_entry { | ||
5280 | + /** | ||
5281 | + * Handle of the buffer being pointed to by this relocation entry. | ||
5282 | + * | ||
5283 | + * It's appealing to make this be an index into the mm_validate_entry | ||
5284 | + * list to refer to the buffer, but this allows the driver to create | ||
5285 | + * a relocation list for state buffers and not re-write it per | ||
5286 | + * exec using the buffer. | ||
5287 | + */ | ||
5288 | + uint32_t target_handle; | ||
5289 | + | ||
5290 | + /** | ||
5291 | + * Value to be added to the offset of the target buffer to make up | ||
5292 | + * the relocation entry. | ||
5293 | + */ | ||
5294 | + uint32_t delta; | ||
5295 | + | ||
5296 | + /** Offset in the buffer the relocation entry will be written into */ | ||
5297 | + uint64_t offset; | ||
5298 | + | ||
5299 | + /** | ||
5300 | + * Offset value of the target buffer that the relocation entry was last | ||
5301 | + * written as. | ||
5302 | + * | ||
5303 | + * If the buffer has the same offset as last time, we can skip syncing | ||
5304 | + * and writing the relocation. This value is written back out by | ||
5305 | + * the execbuffer ioctl when the relocation is written. | ||
5306 | + */ | ||
5307 | + uint64_t presumed_offset; | ||
5308 | + | ||
5309 | + /** | ||
5310 | + * Target memory domains read by this operation. | ||
5311 | + */ | ||
5312 | + uint32_t read_domains; | ||
5313 | + | ||
5314 | + /** | ||
5315 | + * Target memory domains written by this operation. | ||
5316 | + * | ||
5317 | + * Note that only one domain may be written by the whole | ||
5318 | + * execbuffer operation, so that where there are conflicts, | ||
5319 | + * the application will get -EINVAL back. | ||
5320 | + */ | ||
5321 | + uint32_t write_domain; | ||
5322 | +}; | ||
5323 | + | ||
5324 | +/** @{ | ||
5325 | + * Intel memory domains | ||
5326 | + * | ||
5327 | + * Most of these just align with the various caches in | ||
5328 | + * the system and are used to flush and invalidate as | ||
5329 | + * objects end up cached in different domains. | ||
5330 | + */ | ||
5331 | +/** CPU cache */ | ||
5332 | +#define I915_GEM_DOMAIN_CPU 0x00000001 | ||
5333 | +/** Render cache, used by 2D and 3D drawing */ | ||
5334 | +#define I915_GEM_DOMAIN_RENDER 0x00000002 | ||
5335 | +/** Sampler cache, used by texture engine */ | ||
5336 | +#define I915_GEM_DOMAIN_SAMPLER 0x00000004 | ||
5337 | +/** Command queue, used to load batch buffers */ | ||
5338 | +#define I915_GEM_DOMAIN_COMMAND 0x00000008 | ||
5339 | +/** Instruction cache, used by shader programs */ | ||
5340 | +#define I915_GEM_DOMAIN_INSTRUCTION 0x00000010 | ||
5341 | +/** Vertex address cache */ | ||
5342 | +#define I915_GEM_DOMAIN_VERTEX 0x00000020 | ||
5343 | +/** GTT domain - aperture and scanout */ | ||
5344 | +#define I915_GEM_DOMAIN_GTT 0x00000040 | ||
5345 | +/** @} */ | ||
5346 | + | ||
5347 | +struct drm_i915_gem_exec_object { | ||
5348 | + /** | ||
5349 | + * User's handle for a buffer to be bound into the GTT for this | ||
5350 | + * operation. | ||
5351 | + */ | ||
5352 | + uint32_t handle; | ||
5353 | + | ||
5354 | + /** Number of relocations to be performed on this buffer */ | ||
5355 | + uint32_t relocation_count; | ||
5356 | + /** | ||
5357 | + * Pointer to array of struct drm_i915_gem_relocation_entry containing | ||
5358 | + * the relocations to be performed in this buffer. | ||
5359 | + */ | ||
5360 | + uint64_t relocs_ptr; | ||
5361 | + | ||
5362 | + /** Required alignment in graphics aperture */ | ||
5363 | + uint64_t alignment; | ||
5364 | + | ||
5365 | + /** | ||
5366 | + * Returned value of the updated offset of the object, for future | ||
5367 | + * presumed_offset writes. | ||
5368 | + */ | ||
5369 | + uint64_t offset; | ||
5370 | +}; | ||
5371 | + | ||
5372 | +struct drm_i915_gem_execbuffer { | ||
5373 | + /** | ||
5374 | + * List of buffers to be validated with their relocations to be | ||
5375 | + * performend on them. | ||
5376 | + * | ||
5377 | + * This is a pointer to an array of struct drm_i915_gem_validate_entry. | ||
5378 | + * | ||
5379 | + * These buffers must be listed in an order such that all relocations | ||
5380 | + * a buffer is performing refer to buffers that have already appeared | ||
5381 | + * in the validate list. | ||
5382 | + */ | ||
5383 | + uint64_t buffers_ptr; | ||
5384 | + uint32_t buffer_count; | ||
5385 | + | ||
5386 | + /** Offset in the batchbuffer to start execution from. */ | ||
5387 | + uint32_t batch_start_offset; | ||
5388 | + /** Bytes used in batchbuffer from batch_start_offset */ | ||
5389 | + uint32_t batch_len; | ||
5390 | + uint32_t DR1; | ||
5391 | + uint32_t DR4; | ||
5392 | + uint32_t num_cliprects; | ||
5393 | + /** This is a struct drm_clip_rect *cliprects */ | ||
5394 | + uint64_t cliprects_ptr; | ||
5395 | +}; | ||
5396 | + | ||
5397 | +struct drm_i915_gem_pin { | ||
5398 | + /** Handle of the buffer to be pinned. */ | ||
5399 | + uint32_t handle; | ||
5400 | + uint32_t pad; | ||
5401 | + | ||
5402 | + /** alignment required within the aperture */ | ||
5403 | + uint64_t alignment; | ||
5404 | + | ||
5405 | + /** Returned GTT offset of the buffer. */ | ||
5406 | + uint64_t offset; | ||
5407 | +}; | ||
5408 | + | ||
5409 | +struct drm_i915_gem_unpin { | ||
5410 | + /** Handle of the buffer to be unpinned. */ | ||
5411 | + uint32_t handle; | ||
5412 | + uint32_t pad; | ||
5413 | +}; | ||
5414 | + | ||
5415 | +struct drm_i915_gem_busy { | ||
5416 | + /** Handle of the buffer to check for busy */ | ||
5417 | + uint32_t handle; | ||
5418 | + | ||
5419 | + /** Return busy status (1 if busy, 0 if idle) */ | ||
5420 | + uint32_t busy; | ||
5421 | +}; | ||
5422 | + | ||
5423 | +#define I915_TILING_NONE 0 | ||
5424 | +#define I915_TILING_X 1 | ||
5425 | +#define I915_TILING_Y 2 | ||
5426 | + | ||
5427 | +#define I915_BIT_6_SWIZZLE_NONE 0 | ||
5428 | +#define I915_BIT_6_SWIZZLE_9 1 | ||
5429 | +#define I915_BIT_6_SWIZZLE_9_10 2 | ||
5430 | +#define I915_BIT_6_SWIZZLE_9_11 3 | ||
5431 | +#define I915_BIT_6_SWIZZLE_9_10_11 4 | ||
5432 | +/* Not seen by userland */ | ||
5433 | +#define I915_BIT_6_SWIZZLE_UNKNOWN 5 | ||
5434 | + | ||
5435 | +struct drm_i915_gem_set_tiling { | ||
5436 | + /** Handle of the buffer to have its tiling state updated */ | ||
5437 | + uint32_t handle; | ||
5438 | + | ||
5439 | + /** | ||
5440 | + * Tiling mode for the object (I915_TILING_NONE, I915_TILING_X, | ||
5441 | + * I915_TILING_Y). | ||
5442 | + * | ||
5443 | + * This value is to be set on request, and will be updated by the | ||
5444 | + * kernel on successful return with the actual chosen tiling layout. | ||
5445 | + * | ||
5446 | + * The tiling mode may be demoted to I915_TILING_NONE when the system | ||
5447 | + * has bit 6 swizzling that can't be managed correctly by GEM. | ||
5448 | + * | ||
5449 | + * Buffer contents become undefined when changing tiling_mode. | ||
5450 | + */ | ||
5451 | + uint32_t tiling_mode; | ||
5452 | + | ||
5453 | + /** | ||
5454 | + * Stride in bytes for the object when in I915_TILING_X or | ||
5455 | + * I915_TILING_Y. | ||
5456 | + */ | ||
5457 | + uint32_t stride; | ||
5458 | + | ||
5459 | + /** | ||
5460 | + * Returned address bit 6 swizzling required for CPU access through | ||
5461 | + * mmap mapping. | ||
5462 | + */ | ||
5463 | + uint32_t swizzle_mode; | ||
5464 | +}; | ||
5465 | + | ||
5466 | +struct drm_i915_gem_get_tiling { | ||
5467 | + /** Handle of the buffer to get tiling state for. */ | ||
5468 | + uint32_t handle; | ||
5469 | + | ||
5470 | + /** | ||
5471 | + * Current tiling mode for the object (I915_TILING_NONE, I915_TILING_X, | ||
5472 | + * I915_TILING_Y). | ||
5473 | + */ | ||
5474 | + uint32_t tiling_mode; | ||
5475 | + | ||
5476 | + /** | ||
5477 | + * Returned address bit 6 swizzling required for CPU access through | ||
5478 | + * mmap mapping. | ||
5479 | + */ | ||
5480 | + uint32_t swizzle_mode; | ||
5481 | +}; | ||
5482 | + | ||
5483 | #endif /* _I915_DRM_H_ */ | ||