Print this page
2976 remove useless offsetof() macros
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/drm/drmP.h
+++ new/usr/src/uts/common/io/drm/drmP.h
1 1 /*
2 2 * drmP.h -- Private header for Direct Rendering Manager -*- linux-c -*-
3 3 * Created: Mon Jan 4 10:05:05 1999 by faith@precisioninsight.com
4 4 */
5 5 /*
6 6 * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
7 7 * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
8 8 * Copyright (c) 2009, Intel Corporation.
9 9 * All rights reserved.
10 10 *
11 11 * Permission is hereby granted, free of charge, to any person obtaining a
12 12 * copy of this software and associated documentation files (the "Software"),
13 13 * to deal in the Software without restriction, including without limitation
14 14 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
15 15 * and/or sell copies of the Software, and to permit persons to whom the
16 16 * Software is furnished to do so, subject to the following conditions:
17 17 *
18 18 * The above copyright notice and this permission notice (including the next
19 19 * paragraph) shall be included in all copies or substantial portions of the
20 20 * Software.
21 21 *
22 22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
25 25 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
26 26 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
27 27 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
28 28 * OTHER DEALINGS IN THE SOFTWARE.
29 29 *
30 30 * Authors:
31 31 * Rickard E. (Rik) Faith <faith@valinux.com>
32 32 * Gareth Hughes <gareth@valinux.com>
33 33 *
34 34 */
35 35
36 36 /*
37 37 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
38 38 * Use is subject to license terms.
39 39 */
40 40
41 41 #ifndef _DRMP_H
42 42 #define _DRMP_H
43 43
44 44 #include <sys/sysmacros.h>
45 45 #include <sys/types.h>
46 46 #include <sys/conf.h>
47 47 #include <sys/modctl.h>
48 48 #include <sys/stat.h>
↓ open down ↓ |
48 lines elided |
↑ open up ↑ |
49 49 #include <sys/file.h>
50 50 #include <sys/cmn_err.h>
51 51 #include <sys/varargs.h>
52 52 #include <sys/pci.h>
53 53 #include <sys/ddi.h>
54 54 #include <sys/sunddi.h>
55 55 #include <sys/sunldi.h>
56 56 #include <sys/pmem.h>
57 57 #include <sys/agpgart.h>
58 58 #include <sys/time.h>
59 +#include <sys/sysmacros.h>
59 60 #include "drm_atomic.h"
60 61 #include "drm.h"
61 62 #include "queue.h"
62 63 #include "drm_linux_list.h"
63 64
64 65 #ifndef __inline__
65 66 #define __inline__ inline
66 67 #endif
67 68
68 69 #if !defined(__FUNCTION__)
69 70 #if defined(C99)
70 71 #define __FUNCTION__ __func__
71 72 #else
72 73 #define __FUNCTION__ " "
73 74 #endif
74 75 #endif
75 76
76 77 /* DRM space units */
77 78 #define DRM_PAGE_SHIFT PAGESHIFT
78 79 #define DRM_PAGE_SIZE (1 << DRM_PAGE_SHIFT)
79 80 #define DRM_PAGE_OFFSET (DRM_PAGE_SIZE - 1)
80 81 #define DRM_PAGE_MASK ~(DRM_PAGE_SIZE - 1)
81 82 #define DRM_MB2PAGES(x) ((x) << 8)
82 83 #define DRM_PAGES2BYTES(x) ((x) << DRM_PAGE_SHIFT)
83 84 #define DRM_BYTES2PAGES(x) ((x) >> DRM_PAGE_SHIFT)
84 85 #define DRM_PAGES2KB(x) ((x) << 2)
85 86 #define DRM_ALIGNED(offset) (((offset) & DRM_PAGE_OFFSET) == 0)
86 87
87 88 #define PAGE_SHIFT DRM_PAGE_SHIFT
88 89 #define PAGE_SIZE DRM_PAGE_SIZE
89 90
90 91 #define DRM_MAX_INSTANCES 8
91 92 #define DRM_DEVNODE "drm"
92 93 #define DRM_UNOPENED 0
93 94 #define DRM_OPENED 1
94 95
95 96 #define DRM_HASH_SIZE 16 /* Size of key hash table */
96 97 #define DRM_KERNEL_CONTEXT 0 /* Change drm_resctx if changed */
97 98 #define DRM_RESERVED_CONTEXTS 1 /* Change drm_resctx if changed */
98 99
99 100 #define DRM_MEM_DMA 0
100 101 #define DRM_MEM_SAREA 1
101 102 #define DRM_MEM_DRIVER 2
102 103 #define DRM_MEM_MAGIC 3
103 104 #define DRM_MEM_IOCTLS 4
104 105 #define DRM_MEM_MAPS 5
105 106 #define DRM_MEM_BUFS 6
106 107 #define DRM_MEM_SEGS 7
107 108 #define DRM_MEM_PAGES 8
108 109 #define DRM_MEM_FILES 9
109 110 #define DRM_MEM_QUEUES 10
110 111 #define DRM_MEM_CMDS 11
111 112 #define DRM_MEM_MAPPINGS 12
112 113 #define DRM_MEM_BUFLISTS 13
113 114 #define DRM_MEM_DRMLISTS 14
114 115 #define DRM_MEM_TOTALDRM 15
115 116 #define DRM_MEM_BOUNDDRM 16
116 117 #define DRM_MEM_CTXBITMAP 17
117 118 #define DRM_MEM_STUB 18
118 119 #define DRM_MEM_SGLISTS 19
119 120 #define DRM_MEM_AGPLISTS 20
120 121 #define DRM_MEM_CTXLIST 21
121 122 #define DRM_MEM_MM 22
122 123 #define DRM_MEM_HASHTAB 23
123 124 #define DRM_MEM_OBJECTS 24
124 125
125 126 #define DRM_MAX_CTXBITMAP (PAGE_SIZE * 8)
126 127 #define DRM_MAP_HASH_OFFSET 0x10000000
127 128 #define DRM_MAP_HASH_ORDER 12
128 129 #define DRM_OBJECT_HASH_ORDER 12
129 130 #define DRM_FILE_PAGE_OFFSET_START ((0xFFFFFFFFUL >> PAGE_SHIFT) + 1)
130 131 #define DRM_FILE_PAGE_OFFSET_SIZE ((0xFFFFFFFFUL >> PAGE_SHIFT) * 16)
131 132 #define DRM_MM_INIT_MAX_PAGES 256
132 133
133 134
134 135 /* Internal types and structures */
135 136 #define DRM_ARRAY_SIZE(x) (sizeof (x) / sizeof (x[0]))
136 137 #define DRM_MIN(a, b) ((a) < (b) ? (a) : (b))
137 138 #define DRM_MAX(a, b) ((a) > (b) ? (a) : (b))
138 139
139 140 #define DRM_IF_VERSION(maj, min) (maj << 16 | min)
140 141
141 142 #define __OS_HAS_AGP 1
142 143
143 144 #define DRM_DEV_MOD (S_IRUSR|S_IWUSR|S_IRGRP|S_IWGRP)
144 145 #define DRM_DEV_UID 0
145 146 #define DRM_DEV_GID 0
146 147
147 148 #define DRM_CURRENTPID ddi_get_pid()
148 149 #define DRM_SPINLOCK(l) mutex_enter(l)
149 150 #define DRM_SPINUNLOCK(u) mutex_exit(u)
150 151 #define DRM_SPINLOCK_ASSERT(l)
151 152 #define DRM_LOCK() mutex_enter(&dev->dev_lock)
152 153 #define DRM_UNLOCK() mutex_exit(&dev->dev_lock)
153 154 #define DRM_LOCK_OWNED() ASSERT(mutex_owned(&dev->dev_lock))
154 155 #define spin_lock_irqsave(l, flag) mutex_enter(l)
155 156 #define spin_unlock_irqrestore(u, flag) mutex_exit(u)
156 157 #define spin_lock(l) mutex_enter(l)
157 158 #define spin_unlock(u) mutex_exit(u)
158 159
159 160
160 161 #define DRM_UDELAY(sec) delay(drv_usectohz(sec *1000))
161 162 #define DRM_MEMORYBARRIER()
162 163
163 164 typedef struct drm_file drm_file_t;
164 165 typedef struct drm_device drm_device_t;
165 166 typedef struct drm_driver_info drm_driver_t;
166 167
167 168 #define DRM_DEVICE drm_device_t *dev = dev1
168 169 #define DRM_IOCTL_ARGS \
169 170 drm_device_t *dev1, intptr_t data, drm_file_t *fpriv, int mode
170 171
171 172 #define DRM_COPYFROM_WITH_RETURN(dest, src, size) \
172 173 if (ddi_copyin((src), (dest), (size), 0)) { \
173 174 DRM_ERROR("%s: copy from user failed", __func__); \
174 175 return (EFAULT); \
175 176 }
176 177
177 178 #define DRM_COPYTO_WITH_RETURN(dest, src, size) \
178 179 if (ddi_copyout((src), (dest), (size), 0)) { \
179 180 DRM_ERROR("%s: copy to user failed", __func__); \
180 181 return (EFAULT); \
181 182 }
182 183
183 184 #define DRM_COPY_FROM_USER(dest, src, size) \
184 185 ddi_copyin((src), (dest), (size), 0) /* flag for src */
185 186
186 187 #define DRM_COPY_TO_USER(dest, src, size) \
187 188 ddi_copyout((src), (dest), (size), 0) /* flags for dest */
188 189
189 190 #define DRM_COPY_FROM_USER_UNCHECKED(arg1, arg2, arg3) \
190 191 ddi_copyin((arg2), (arg1), (arg3), 0)
191 192
192 193 #define DRM_COPY_TO_USER_UNCHECKED(arg1, arg2, arg3) \
193 194 ddi_copyout((arg2), arg1, arg3, 0)
194 195
195 196 #define DRM_READ8(map, offset) \
196 197 *(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset))
197 198 #define DRM_READ16(map, offset) \
198 199 *(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset))
199 200 #define DRM_READ32(map, offset) \
200 201 *(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset))
201 202 #define DRM_WRITE8(map, offset, val) \
202 203 *(volatile uint8_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
203 204 #define DRM_WRITE16(map, offset, val) \
204 205 *(volatile uint16_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
205 206 #define DRM_WRITE32(map, offset, val) \
206 207 *(volatile uint32_t *)((uintptr_t)((map)->dev_addr) + (offset)) = (val)
207 208
208 209 typedef struct drm_wait_queue {
209 210 kcondvar_t cv;
210 211 kmutex_t lock;
211 212 }wait_queue_head_t;
212 213
213 214 #define DRM_INIT_WAITQUEUE(q, pri) \
214 215 { \
215 216 mutex_init(&(q)->lock, NULL, MUTEX_DRIVER, pri); \
216 217 cv_init(&(q)->cv, NULL, CV_DRIVER, NULL); \
217 218 }
218 219
219 220 #define DRM_FINI_WAITQUEUE(q) \
220 221 { \
221 222 mutex_destroy(&(q)->lock); \
222 223 cv_destroy(&(q)->cv); \
223 224 }
224 225
225 226 #define DRM_WAKEUP(q) \
226 227 { \
227 228 mutex_enter(&(q)->lock); \
228 229 cv_broadcast(&(q)->cv); \
229 230 mutex_exit(&(q)->lock); \
230 231 }
231 232
232 233 #define jiffies ddi_get_lbolt()
233 234
234 235 #define DRM_WAIT_ON(ret, q, timeout, condition) \
235 236 mutex_enter(&(q)->lock); \
236 237 while (!(condition)) { \
237 238 ret = cv_reltimedwait_sig(&(q)->cv, &(q)->lock, timeout,\
238 239 TR_CLOCK_TICK); \
239 240 if (ret == -1) { \
240 241 ret = EBUSY; \
241 242 break; \
242 243 } else if (ret == 0) { \
243 244 ret = EINTR; \
244 245 break; \
245 246 } else { \
246 247 ret = 0; \
247 248 } \
248 249 } \
249 250 mutex_exit(&(q)->lock);
250 251
251 252 #define DRM_WAIT(ret, q, condition) \
252 253 mutex_enter(&(q)->lock); \
253 254 if (!(condition)) { \
254 255 ret = cv_timedwait_sig(&(q)->cv, &(q)->lock, jiffies + 30 * DRM_HZ); \
255 256 if (ret == -1) { \
256 257 /* gfx maybe hang */ \
257 258 if (!(condition)) \
258 259 ret = -2; \
259 260 } else { \
260 261 ret = 0; \
261 262 } \
262 263 } \
263 264 mutex_exit(&(q)->lock);
264 265
265 266
266 267 #define DRM_GETSAREA() \
267 268 { \
268 269 drm_local_map_t *map; \
269 270 DRM_SPINLOCK_ASSERT(&dev->dev_lock); \
270 271 TAILQ_FOREACH(map, &dev->maplist, link) { \
271 272 if (map->type == _DRM_SHM && \
272 273 map->flags & _DRM_CONTAINS_LOCK) { \
273 274 dev_priv->sarea = map; \
274 275 break; \
275 276 } \
276 277 } \
277 278 }
278 279
279 280 #define LOCK_TEST_WITH_RETURN(dev, fpriv) \
280 281 if (!_DRM_LOCK_IS_HELD(dev->lock.hw_lock->lock) || \
281 282 dev->lock.filp != fpriv) { \
282 283 DRM_DEBUG("%s called without lock held", __func__); \
283 284 return (EINVAL); \
284 285 }
285 286
286 287 #define DRM_IRQ_ARGS caddr_t arg
287 288 #define IRQ_HANDLED DDI_INTR_CLAIMED
288 289 #define IRQ_NONE DDI_INTR_UNCLAIMED
289 290
290 291 enum {
291 292 DRM_IS_NOT_AGP,
292 293 DRM_IS_AGP,
293 294 DRM_MIGHT_BE_AGP
294 295 };
295 296
296 297 /* Capabilities taken from src/sys/dev/pci/pcireg.h. */
297 298 #ifndef PCIY_AGP
298 299 #define PCIY_AGP 0x02
299 300 #endif
300 301
301 302 #ifndef PCIY_EXPRESS
302 303 #define PCIY_EXPRESS 0x10
303 304 #endif
304 305
305 306 #define PAGE_ALIGN(addr) (((addr) + DRM_PAGE_SIZE - 1) & DRM_PAGE_MASK)
306 307 #define DRM_SUSER(p) (crgetsgid(p) == 0 || crgetsuid(p) == 0)
307 308
308 309 #define DRM_GEM_OBJIDR_HASHNODE 1024
309 310 #define idr_list_for_each(entry, head) \
310 311 for (int key = 0; key < DRM_GEM_OBJIDR_HASHNODE; key++) \
311 312 list_for_each(entry, &(head)->next[key])
312 313
313 314 /*
314 315 * wait for 400 milliseconds
315 316 */
316 317 #define DRM_HZ drv_usectohz(400000)
317 318
318 319 typedef unsigned long dma_addr_t;
319 320 typedef uint64_t u64;
320 321 typedef uint32_t u32;
321 322 typedef uint16_t u16;
322 323 typedef uint8_t u8;
323 324 typedef uint_t irqreturn_t;
324 325
325 326 #define DRM_SUPPORT 1
326 327 #define DRM_UNSUPPORT 0
327 328
328 329 #define __OS_HAS_AGP 1
329 330
330 331 typedef struct drm_pci_id_list
331 332 {
332 333 int vendor;
333 334 int device;
334 335 long driver_private;
335 336 char *name;
336 337 } drm_pci_id_list_t;
337 338
338 339 #define DRM_AUTH 0x1
339 340 #define DRM_MASTER 0x2
340 341 #define DRM_ROOT_ONLY 0x4
341 342 typedef int drm_ioctl_t(DRM_IOCTL_ARGS);
342 343 typedef struct drm_ioctl_desc {
343 344 int (*func)(DRM_IOCTL_ARGS);
344 345 int flags;
345 346 } drm_ioctl_desc_t;
346 347
347 348 typedef struct drm_magic_entry {
348 349 drm_magic_t magic;
349 350 struct drm_file *priv;
350 351 struct drm_magic_entry *next;
351 352 } drm_magic_entry_t;
352 353
353 354 typedef struct drm_magic_head {
354 355 struct drm_magic_entry *head;
355 356 struct drm_magic_entry *tail;
356 357 } drm_magic_head_t;
357 358
358 359 typedef struct drm_buf {
359 360 int idx; /* Index into master buflist */
360 361 int total; /* Buffer size */
361 362 int order; /* log-base-2(total) */
362 363 int used; /* Amount of buffer in use (for DMA) */
363 364 unsigned long offset; /* Byte offset (used internally) */
364 365 void *address; /* Address of buffer */
365 366 unsigned long bus_address; /* Bus address of buffer */
366 367 struct drm_buf *next; /* Kernel-only: used for free list */
367 368 volatile int pending; /* On hardware DMA queue */
368 369 drm_file_t *filp;
369 370 /* Uniq. identifier of holding process */
370 371 int context; /* Kernel queue for this buffer */
371 372 enum {
372 373 DRM_LIST_NONE = 0,
373 374 DRM_LIST_FREE = 1,
374 375 DRM_LIST_WAIT = 2,
375 376 DRM_LIST_PEND = 3,
376 377 DRM_LIST_PRIO = 4,
377 378 DRM_LIST_RECLAIM = 5
378 379 } list; /* Which list we're on */
379 380
380 381 int dev_priv_size; /* Size of buffer private stoarge */
381 382 void *dev_private; /* Per-buffer private storage */
382 383 } drm_buf_t;
383 384
384 385 typedef struct drm_freelist {
385 386 int initialized; /* Freelist in use */
386 387 uint32_t count; /* Number of free buffers */
387 388 drm_buf_t *next; /* End pointer */
388 389
389 390 int low_mark; /* Low water mark */
390 391 int high_mark; /* High water mark */
391 392 } drm_freelist_t;
392 393
393 394 typedef struct drm_buf_entry {
394 395 int buf_size;
395 396 int buf_count;
396 397 drm_buf_t *buflist;
397 398 int seg_count;
398 399 int page_order;
399 400
400 401 uint32_t *seglist;
401 402 unsigned long *seglist_bus;
402 403
403 404 drm_freelist_t freelist;
404 405 } drm_buf_entry_t;
405 406
406 407 typedef TAILQ_HEAD(drm_file_list, drm_file) drm_file_list_t;
407 408
408 409 /* BEGIN CSTYLED */
409 410 typedef struct drm_local_map {
410 411 unsigned long offset; /* Physical address (0 for SAREA) */
411 412 unsigned long size; /* Physical size (bytes) */
412 413 drm_map_type_t type; /* Type of memory mapped */
413 414 drm_map_flags_t flags; /* Flags */
414 415 void *handle; /* User-space: "Handle" to pass to mmap */
415 416 /* Kernel-space: kernel-virtual address */
416 417 int mtrr; /* Boolean: MTRR used */
417 418 /* Private data */
418 419 int rid; /* PCI resource ID for bus_space */
419 420 int kernel_owned; /* Boolean: 1= initmapped, 0= addmapped */
420 421 caddr_t dev_addr; /* base device address */
421 422 ddi_acc_handle_t dev_handle; /* The data access handle */
422 423 ddi_umem_cookie_t drm_umem_cookie; /* For SAREA alloc and free */
423 424 TAILQ_ENTRY(drm_local_map) link;
424 425 } drm_local_map_t;
425 426 /* END CSTYLED */
426 427
427 428 /*
428 429 * This structure defines the drm_mm memory object, which will be used by the
429 430 * DRM for its buffer objects.
430 431 */
431 432 struct drm_gem_object {
432 433 /* Reference count of this object */
433 434 atomic_t refcount;
434 435
435 436 /* Handle count of this object. Each handle also holds a reference */
436 437 atomic_t handlecount;
437 438
438 439 /* Related drm device */
439 440 struct drm_device *dev;
440 441
441 442 int flink;
442 443 /*
443 444 * Size of the object, in bytes. Immutable over the object's
444 445 * lifetime.
445 446 */
446 447 size_t size;
447 448
448 449 /*
449 450 * Global name for this object, starts at 1. 0 means unnamed.
450 451 * Access is covered by the object_name_lock in the related drm_device
451 452 */
452 453 int name;
453 454
454 455 /*
455 456 * Memory domains. These monitor which caches contain read/write data
456 457 * related to the object. When transitioning from one set of domains
457 458 * to another, the driver is called to ensure that caches are suitably
458 459 * flushed and invalidated
459 460 */
460 461 uint32_t read_domains;
461 462 uint32_t write_domain;
462 463
463 464 /*
464 465 * While validating an exec operation, the
465 466 * new read/write domain values are computed here.
466 467 * They will be transferred to the above values
467 468 * at the point that any cache flushing occurs
468 469 */
469 470 uint32_t pending_read_domains;
470 471 uint32_t pending_write_domain;
471 472
472 473 void *driver_private;
473 474
474 475 drm_local_map_t *map;
475 476 ddi_dma_handle_t dma_hdl;
476 477 ddi_acc_handle_t acc_hdl;
477 478 caddr_t kaddr;
478 479 size_t real_size; /* real size of memory */
479 480 pfn_t *pfnarray;
480 481 };
481 482
482 483 struct idr_list {
483 484 struct idr_list *next, *prev;
484 485 struct drm_gem_object *obj;
485 486 uint32_t handle;
486 487 caddr_t contain_ptr;
487 488 };
488 489
489 490 struct drm_file {
490 491 TAILQ_ENTRY(drm_file) link;
491 492 int authenticated;
492 493 int master;
493 494 int minor;
494 495 pid_t pid;
495 496 uid_t uid;
496 497 int refs;
497 498 drm_magic_t magic;
498 499 unsigned long ioctl_count;
499 500 void *driver_priv;
500 501 /* Mapping of mm object handles to object pointers. */
501 502 struct idr_list object_idr;
502 503 /* Lock for synchronization of access to object_idr. */
503 504 kmutex_t table_lock;
504 505
505 506 dev_t dev;
506 507 cred_t *credp;
507 508 };
508 509
509 510 typedef struct drm_lock_data {
510 511 drm_hw_lock_t *hw_lock; /* Hardware lock */
511 512 drm_file_t *filp;
512 513 /* Uniq. identifier of holding process */
513 514 kcondvar_t lock_cv; /* lock queue - SOLARIS Specific */
514 515 kmutex_t lock_mutex; /* lock - SOLARIS Specific */
515 516 unsigned long lock_time; /* Time of last lock in clock ticks */
516 517 } drm_lock_data_t;
517 518
518 519 /*
519 520 * This structure, in drm_device_t, is always initialized while the device
520 521 * is open. dev->dma_lock protects the incrementing of dev->buf_use, which
521 522 * when set marks that no further bufs may be allocated until device teardown
522 523 * occurs (when the last open of the device has closed). The high/low
523 524 * watermarks of bufs are only touched by the X Server, and thus not
524 525 * concurrently accessed, so no locking is needed.
525 526 */
526 527 typedef struct drm_device_dma {
527 528 drm_buf_entry_t bufs[DRM_MAX_ORDER+1];
528 529 int buf_count;
529 530 drm_buf_t **buflist; /* Vector of pointers info bufs */
530 531 int seg_count;
531 532 int page_count;
532 533 unsigned long *pagelist;
533 534 unsigned long byte_count;
534 535 enum {
535 536 _DRM_DMA_USE_AGP = 0x01,
536 537 _DRM_DMA_USE_SG = 0x02
537 538 } flags;
538 539 } drm_device_dma_t;
539 540
540 541 typedef struct drm_agp_mem {
541 542 void *handle;
542 543 unsigned long bound; /* address */
543 544 int pages;
544 545 caddr_t phys_addr;
545 546 struct drm_agp_mem *prev;
546 547 struct drm_agp_mem *next;
547 548 } drm_agp_mem_t;
548 549
549 550 typedef struct drm_agp_head {
550 551 agp_info_t agp_info;
551 552 const char *chipset;
552 553 drm_agp_mem_t *memory;
553 554 unsigned long mode;
554 555 int enabled;
555 556 int acquired;
556 557 unsigned long base;
557 558 int mtrr;
558 559 int cant_use_aperture;
559 560 unsigned long page_mask;
560 561 ldi_ident_t agpgart_li;
561 562 ldi_handle_t agpgart_lh;
562 563 } drm_agp_head_t;
563 564
564 565
565 566 typedef struct drm_dma_handle {
566 567 ddi_dma_handle_t dma_hdl;
567 568 ddi_acc_handle_t acc_hdl;
568 569 ddi_dma_cookie_t cookie;
569 570 uint_t cookie_num;
570 571 uintptr_t vaddr; /* virtual addr */
571 572 uintptr_t paddr; /* physical addr */
572 573 size_t real_sz; /* real size of memory */
573 574 } drm_dma_handle_t;
574 575
575 576 typedef struct drm_sg_mem {
576 577 unsigned long handle;
577 578 void *virtual;
578 579 int pages;
579 580 dma_addr_t *busaddr;
580 581 ddi_umem_cookie_t *umem_cookie;
581 582 drm_dma_handle_t *dmah_sg;
582 583 drm_dma_handle_t *dmah_gart; /* Handle to PCI memory */
583 584 } drm_sg_mem_t;
584 585
585 586 /*
586 587 * Generic memory manager structs
587 588 */
588 589
589 590 struct drm_mm_node {
590 591 struct list_head fl_entry;
591 592 struct list_head ml_entry;
592 593 int free;
593 594 unsigned long start;
594 595 unsigned long size;
595 596 struct drm_mm *mm;
596 597 void *private;
597 598 };
598 599
599 600 struct drm_mm {
600 601 struct list_head fl_entry;
601 602 struct list_head ml_entry;
602 603 };
603 604
604 605 typedef TAILQ_HEAD(drm_map_list, drm_local_map) drm_map_list_t;
605 606
606 607 typedef TAILQ_HEAD(drm_vbl_sig_list, drm_vbl_sig) drm_vbl_sig_list_t;
607 608 typedef struct drm_vbl_sig {
608 609 TAILQ_ENTRY(drm_vbl_sig) link;
609 610 unsigned int sequence;
610 611 int signo;
611 612 int pid;
612 613 } drm_vbl_sig_t;
613 614
614 615
615 616 /* used for clone device */
616 617 typedef TAILQ_HEAD(drm_cminor_list, drm_cminor) drm_cminor_list_t;
617 618 typedef struct drm_cminor {
618 619 TAILQ_ENTRY(drm_cminor) link;
619 620 drm_file_t *fpriv;
620 621 int minor;
621 622 } drm_cminor_t;
622 623
623 624 /* location of GART table */
624 625 #define DRM_ATI_GART_MAIN 1
625 626 #define DRM_ATI_GART_FB 2
626 627
627 628 typedef struct ati_pcigart_info {
628 629 int gart_table_location;
629 630 int is_pcie;
630 631 void *addr;
631 632 dma_addr_t bus_addr;
632 633 drm_local_map_t mapping;
633 634 } drm_ati_pcigart_info;
634 635
635 636 /* DRM device structure */
636 637 struct drm_device;
637 638 struct drm_driver_info {
638 639 int (*load)(struct drm_device *, unsigned long);
639 640 int (*firstopen)(struct drm_device *);
640 641 int (*open)(struct drm_device *, drm_file_t *);
641 642 void (*preclose)(struct drm_device *, drm_file_t *);
642 643 void (*postclose)(struct drm_device *, drm_file_t *);
643 644 void (*lastclose)(struct drm_device *);
644 645 int (*unload)(struct drm_device *);
645 646 void (*reclaim_buffers_locked)(struct drm_device *, drm_file_t *);
646 647 int (*presetup)(struct drm_device *);
647 648 int (*postsetup)(struct drm_device *);
648 649 int (*open_helper)(struct drm_device *, drm_file_t *);
649 650 void (*free_filp_priv)(struct drm_device *, drm_file_t *);
650 651 void (*release)(struct drm_device *, void *);
651 652 int (*dma_ioctl)(DRM_IOCTL_ARGS);
652 653 void (*dma_ready)(struct drm_device *);
653 654 int (*dma_quiescent)(struct drm_device *);
654 655 int (*dma_flush_block_and_flush)(struct drm_device *,
655 656 int, drm_lock_flags_t);
656 657 int (*dma_flush_unblock)(struct drm_device *, int,
657 658 drm_lock_flags_t);
658 659 int (*context_ctor)(struct drm_device *, int);
659 660 int (*context_dtor)(struct drm_device *, int);
660 661 int (*kernel_context_switch)(struct drm_device *, int, int);
661 662 int (*kernel_context_switch_unlock)(struct drm_device *);
662 663 int (*device_is_agp) (struct drm_device *);
663 664 int (*irq_preinstall)(struct drm_device *);
664 665 void (*irq_postinstall)(struct drm_device *);
665 666 void (*irq_uninstall)(struct drm_device *dev);
666 667 uint_t (*irq_handler)(DRM_IRQ_ARGS);
667 668 int (*vblank_wait)(struct drm_device *, unsigned int *);
668 669 int (*vblank_wait2)(struct drm_device *, unsigned int *);
669 670 /* added for intel minimized vblank */
670 671 u32 (*get_vblank_counter)(struct drm_device *dev, int crtc);
671 672 int (*enable_vblank)(struct drm_device *dev, int crtc);
672 673 void (*disable_vblank)(struct drm_device *dev, int crtc);
673 674
674 675 /*
675 676 * Driver-specific constructor for drm_gem_objects, to set up
676 677 * obj->driver_private.
677 678 *
678 679 * Returns 0 on success.
679 680 */
680 681 int (*gem_init_object) (struct drm_gem_object *obj);
681 682 void (*gem_free_object) (struct drm_gem_object *obj);
682 683
683 684
684 685 drm_ioctl_desc_t *driver_ioctls;
685 686 int max_driver_ioctl;
686 687
687 688 int buf_priv_size;
688 689 int driver_major;
689 690 int driver_minor;
690 691 int driver_patchlevel;
691 692 const char *driver_name; /* Simple driver name */
692 693 const char *driver_desc; /* Longer driver name */
693 694 const char *driver_date; /* Date of last major changes. */
694 695
695 696 unsigned use_agp :1;
696 697 unsigned require_agp :1;
697 698 unsigned use_sg :1;
698 699 unsigned use_dma :1;
699 700 unsigned use_pci_dma :1;
700 701 unsigned use_dma_queue :1;
701 702 unsigned use_irq :1;
702 703 unsigned use_vbl_irq :1;
703 704 unsigned use_vbl_irq2 :1;
704 705 unsigned use_mtrr :1;
705 706 unsigned use_gem;
706 707 };
707 708
708 709 /*
709 710 * hardware-specific code needs to initialize mutexes which
710 711 * can be used in interrupt context, so they need to know
711 712 * the interrupt priority. Interrupt cookie in drm_device
712 713 * structure is the intr_block field.
713 714 */
714 715 #define DRM_INTR_PRI(dev) \
715 716 DDI_INTR_PRI((dev)->intr_block)
716 717
717 718 struct drm_device {
718 719 drm_driver_t *driver;
719 720 drm_cminor_list_t minordevs;
720 721 dev_info_t *dip;
721 722 void *drm_handle;
722 723 int drm_supported;
723 724 const char *desc; /* current driver description */
724 725 kmutex_t *irq_mutex;
725 726 kcondvar_t *irq_cv;
726 727
727 728 ddi_iblock_cookie_t intr_block;
728 729 uint32_t pci_device; /* PCI device id */
729 730 uint32_t pci_vendor;
730 731 char *unique; /* Unique identifier: e.g., busid */
731 732 int unique_len; /* Length of unique field */
732 733 int if_version; /* Highest interface version set */
733 734 int flags; /* Flags to open(2) */
734 735
735 736 /* Locks */
736 737 kmutex_t vbl_lock; /* protects vblank operations */
737 738 kmutex_t dma_lock; /* protects dev->dma */
738 739 kmutex_t irq_lock; /* protects irq condition checks */
739 740 kmutex_t dev_lock; /* protects everything else */
740 741 drm_lock_data_t lock; /* Information on hardware lock */
741 742 kmutex_t struct_mutex; /* < For others */
742 743
743 744 /* Usage Counters */
744 745 int open_count; /* Outstanding files open */
745 746 int buf_use; /* Buffers in use -- cannot alloc */
746 747
747 748 /* Performance counters */
748 749 unsigned long counters;
749 750 drm_stat_type_t types[15];
750 751 uint32_t counts[15];
751 752
752 753 /* Authentication */
753 754 drm_file_list_t files;
754 755 drm_magic_head_t magiclist[DRM_HASH_SIZE];
755 756
756 757 /* Linked list of mappable regions. Protected by dev_lock */
757 758 drm_map_list_t maplist;
758 759
759 760 drm_local_map_t **context_sareas;
760 761 int max_context;
761 762
762 763 /* DMA queues (contexts) */
763 764 drm_device_dma_t *dma; /* Optional pointer for DMA support */
764 765
765 766 /* Context support */
766 767 int irq; /* Interrupt used by board */
767 768 int irq_enabled; /* True if the irq handler is enabled */
768 769 int pci_domain;
769 770 int pci_bus;
770 771 int pci_slot;
771 772 int pci_func;
772 773 atomic_t context_flag; /* Context swapping flag */
773 774 int last_context; /* Last current context */
774 775
775 776 /* Only used for Radeon */
776 777 atomic_t vbl_received;
777 778 atomic_t vbl_received2;
778 779
779 780 drm_vbl_sig_list_t vbl_sig_list;
780 781 drm_vbl_sig_list_t vbl_sig_list2;
781 782 /*
782 783 * At load time, disabling the vblank interrupt won't be allowed since
783 784 * old clients may not call the modeset ioctl and therefore misbehave.
784 785 * Once the modeset ioctl *has* been called though, we can safely
785 786 * disable them when unused.
786 787 */
787 788 int vblank_disable_allowed;
788 789
789 790 wait_queue_head_t vbl_queue; /* vbl wait channel */
790 791 /* vbl wait channel array */
791 792 wait_queue_head_t *vbl_queues;
792 793
793 794 /* number of VBLANK interrupts */
794 795 /* (driver must alloc the right number of counters) */
795 796 atomic_t *_vblank_count;
796 797 /* signal list to send on VBLANK */
797 798 struct drm_vbl_sig_list *vbl_sigs;
798 799
799 800 /* number of signals pending on all crtcs */
800 801 atomic_t vbl_signal_pending;
801 802 /* number of users of vblank interrupts per crtc */
802 803 atomic_t *vblank_refcount;
803 804 /* protected by dev->vbl_lock, used for wraparound handling */
804 805 u32 *last_vblank;
805 806 /* so we don't call enable more than */
806 807 atomic_t *vblank_enabled;
807 808 /* Display driver is setting mode */
808 809 int *vblank_inmodeset;
809 810 /* Don't wait while crtc is likely disabled */
810 811 int *vblank_suspend;
811 812 /* size of vblank counter register */
812 813 u32 max_vblank_count;
813 814 int num_crtcs;
814 815 kmutex_t tasklet_lock;
815 816 void (*locked_tasklet_func)(struct drm_device *dev);
816 817
817 818 pid_t buf_pgid;
818 819 drm_agp_head_t *agp;
819 820 drm_sg_mem_t *sg; /* Scatter gather memory */
820 821 uint32_t *ctx_bitmap;
821 822 void *dev_private;
822 823 unsigned int agp_buffer_token;
823 824 drm_local_map_t *agp_buffer_map;
824 825
825 826 kstat_t *asoft_ksp; /* kstat support */
826 827
827 828 /* name Drawable information */
828 829 kmutex_t drw_lock;
829 830 unsigned int drw_bitfield_length;
830 831 u32 *drw_bitfield;
831 832 unsigned int drw_info_length;
832 833 drm_drawable_info_t **drw_info;
833 834
834 835 /* \name GEM information */
835 836 /* @{ */
836 837 kmutex_t object_name_lock;
837 838 struct idr_list object_name_idr;
838 839 atomic_t object_count;
839 840 atomic_t object_memory;
840 841 atomic_t pin_count;
841 842 atomic_t pin_memory;
842 843 atomic_t gtt_count;
843 844 atomic_t gtt_memory;
844 845 uint32_t gtt_total;
845 846 uint32_t invalidate_domains; /* domains pending invalidation */
846 847 uint32_t flush_domains; /* domains pending flush */
847 848 /* @} */
848 849
849 850 /*
850 851 * Saving S3 context
851 852 */
852 853 void *s3_private;
853 854 };
854 855
855 856 /* Memory management support (drm_memory.c) */
856 857 void drm_mem_init(void);
857 858 void drm_mem_uninit(void);
858 859 void *drm_alloc(size_t, int);
859 860 void *drm_calloc(size_t, size_t, int);
860 861 void *drm_realloc(void *, size_t, size_t, int);
861 862 void drm_free(void *, size_t, int);
862 863 int drm_ioremap(drm_device_t *, drm_local_map_t *);
863 864 void drm_ioremapfree(drm_local_map_t *);
864 865
865 866 void drm_core_ioremap(struct drm_local_map *, struct drm_device *);
866 867 void drm_core_ioremapfree(struct drm_local_map *, struct drm_device *);
867 868
868 869 void drm_pci_free(drm_device_t *, drm_dma_handle_t *);
869 870 void *drm_pci_alloc(drm_device_t *, size_t, size_t, dma_addr_t, int);
870 871
871 872 struct drm_local_map *drm_core_findmap(struct drm_device *, unsigned long);
872 873
873 874 int drm_context_switch(drm_device_t *, int, int);
874 875 int drm_context_switch_complete(drm_device_t *, int);
875 876 int drm_ctxbitmap_init(drm_device_t *);
876 877 void drm_ctxbitmap_cleanup(drm_device_t *);
877 878 void drm_ctxbitmap_free(drm_device_t *, int);
878 879 int drm_ctxbitmap_next(drm_device_t *);
879 880
880 881 /* Locking IOCTL support (drm_lock.c) */
881 882 int drm_lock_take(drm_lock_data_t *, unsigned int);
882 883 int drm_lock_transfer(drm_device_t *,
883 884 drm_lock_data_t *, unsigned int);
884 885 int drm_lock_free(drm_device_t *,
885 886 volatile unsigned int *, unsigned int);
886 887
887 888 /* Buffer management support (drm_bufs.c) */
888 889 unsigned long drm_get_resource_start(drm_device_t *, unsigned int);
889 890 unsigned long drm_get_resource_len(drm_device_t *, unsigned int);
890 891 int drm_initmap(drm_device_t *, unsigned long, unsigned long,
891 892 unsigned int, int, int);
892 893 void drm_rmmap(drm_device_t *, drm_local_map_t *);
893 894 int drm_addmap(drm_device_t *, unsigned long, unsigned long,
894 895 drm_map_type_t, drm_map_flags_t, drm_local_map_t **);
895 896 int drm_order(unsigned long);
896 897
897 898 /* DMA support (drm_dma.c) */
898 899 int drm_dma_setup(drm_device_t *);
899 900 void drm_dma_takedown(drm_device_t *);
900 901 void drm_free_buffer(drm_device_t *, drm_buf_t *);
901 902 void drm_reclaim_buffers(drm_device_t *, drm_file_t *);
902 903 #define drm_core_reclaim_buffers drm_reclaim_buffers
903 904
904 905 /* IRQ support (drm_irq.c) */
905 906 int drm_irq_install(drm_device_t *);
906 907 int drm_irq_uninstall(drm_device_t *);
907 908 uint_t drm_irq_handler(DRM_IRQ_ARGS);
908 909 void drm_driver_irq_preinstall(drm_device_t *);
909 910 void drm_driver_irq_postinstall(drm_device_t *);
910 911 void drm_driver_irq_uninstall(drm_device_t *);
911 912 int drm_vblank_wait(drm_device_t *, unsigned int *);
912 913 void drm_vbl_send_signals(drm_device_t *);
913 914 void drm_handle_vblank(struct drm_device *dev, int crtc);
914 915 u32 drm_vblank_count(struct drm_device *dev, int crtc);
915 916 int drm_vblank_get(struct drm_device *dev, int crtc);
916 917 void drm_vblank_put(struct drm_device *dev, int crtc);
917 918 int drm_vblank_init(struct drm_device *dev, int num_crtcs);
918 919 void drm_vblank_cleanup(struct drm_device *dev);
919 920 int drm_modeset_ctl(DRM_IOCTL_ARGS);
920 921
921 922 /* AGP/GART support (drm_agpsupport.c) */
922 923 int drm_device_is_agp(drm_device_t *);
923 924 int drm_device_is_pcie(drm_device_t *);
924 925 drm_agp_head_t *drm_agp_init(drm_device_t *);
925 926 void drm_agp_fini(drm_device_t *);
926 927 int drm_agp_do_release(drm_device_t *);
927 928 void *drm_agp_allocate_memory(size_t pages,
928 929 uint32_t type, drm_device_t *dev);
929 930 int drm_agp_free_memory(agp_allocate_t *handle, drm_device_t *dev);
930 931 int drm_agp_bind_memory(unsigned int, uint32_t, drm_device_t *);
931 932 int drm_agp_unbind_memory(unsigned long, drm_device_t *);
932 933 int drm_agp_bind_pages(drm_device_t *dev,
933 934 pfn_t *pages,
934 935 unsigned long num_pages,
935 936 uint32_t gtt_offset);
936 937 int drm_agp_unbind_pages(drm_device_t *dev,
937 938 unsigned long num_pages,
938 939 uint32_t gtt_offset,
939 940 uint32_t type);
940 941 void drm_agp_chipset_flush(struct drm_device *dev);
941 942 void drm_agp_rebind(struct drm_device *dev);
942 943
943 944 /* kstat support (drm_kstats.c) */
944 945 int drm_init_kstats(drm_device_t *);
945 946 void drm_fini_kstats(drm_device_t *);
946 947
947 948 /* Scatter Gather Support (drm_scatter.c) */
948 949 void drm_sg_cleanup(drm_device_t *, drm_sg_mem_t *);
949 950
950 951 /* ATI PCIGART support (ati_pcigart.c) */
951 952 int drm_ati_pcigart_init(drm_device_t *, drm_ati_pcigart_info *);
952 953 int drm_ati_pcigart_cleanup(drm_device_t *, drm_ati_pcigart_info *);
953 954
954 955 /* Locking IOCTL support (drm_drv.c) */
955 956 int drm_lock(DRM_IOCTL_ARGS);
956 957 int drm_unlock(DRM_IOCTL_ARGS);
957 958 int drm_version(DRM_IOCTL_ARGS);
958 959 int drm_setversion(DRM_IOCTL_ARGS);
959 960 /* Cache management (drm_cache.c) */
960 961 void drm_clflush_pages(caddr_t *pages, unsigned long num_pages);
961 962
962 963 /* Misc. IOCTL support (drm_ioctl.c) */
963 964 int drm_irq_by_busid(DRM_IOCTL_ARGS);
964 965 int drm_getunique(DRM_IOCTL_ARGS);
965 966 int drm_setunique(DRM_IOCTL_ARGS);
966 967 int drm_getmap(DRM_IOCTL_ARGS);
967 968 int drm_getclient(DRM_IOCTL_ARGS);
968 969 int drm_getstats(DRM_IOCTL_ARGS);
969 970 int drm_noop(DRM_IOCTL_ARGS);
970 971
971 972 /* Context IOCTL support (drm_context.c) */
972 973 int drm_resctx(DRM_IOCTL_ARGS);
973 974 int drm_addctx(DRM_IOCTL_ARGS);
974 975 int drm_modctx(DRM_IOCTL_ARGS);
975 976 int drm_getctx(DRM_IOCTL_ARGS);
976 977 int drm_switchctx(DRM_IOCTL_ARGS);
977 978 int drm_newctx(DRM_IOCTL_ARGS);
978 979 int drm_rmctx(DRM_IOCTL_ARGS);
979 980 int drm_setsareactx(DRM_IOCTL_ARGS);
980 981 int drm_getsareactx(DRM_IOCTL_ARGS);
981 982
982 983 /* Drawable IOCTL support (drm_drawable.c) */
983 984 int drm_adddraw(DRM_IOCTL_ARGS);
984 985 int drm_rmdraw(DRM_IOCTL_ARGS);
985 986 int drm_update_draw(DRM_IOCTL_ARGS);
986 987
987 988 /* Authentication IOCTL support (drm_auth.c) */
988 989 int drm_getmagic(DRM_IOCTL_ARGS);
989 990 int drm_authmagic(DRM_IOCTL_ARGS);
990 991 int drm_remove_magic(drm_device_t *, drm_magic_t);
991 992 drm_file_t *drm_find_file(drm_device_t *, drm_magic_t);
992 993 /* Buffer management support (drm_bufs.c) */
993 994 int drm_addmap_ioctl(DRM_IOCTL_ARGS);
994 995 int drm_rmmap_ioctl(DRM_IOCTL_ARGS);
995 996 int drm_addbufs_ioctl(DRM_IOCTL_ARGS);
996 997 int drm_infobufs(DRM_IOCTL_ARGS);
997 998 int drm_markbufs(DRM_IOCTL_ARGS);
998 999 int drm_freebufs(DRM_IOCTL_ARGS);
999 1000 int drm_mapbufs(DRM_IOCTL_ARGS);
1000 1001
1001 1002 /* DMA support (drm_dma.c) */
1002 1003 int drm_dma(DRM_IOCTL_ARGS);
1003 1004
1004 1005 /* IRQ support (drm_irq.c) */
1005 1006 int drm_control(DRM_IOCTL_ARGS);
1006 1007 int drm_wait_vblank(DRM_IOCTL_ARGS);
1007 1008
1008 1009 /* AGP/GART support (drm_agpsupport.c) */
1009 1010 int drm_agp_acquire(DRM_IOCTL_ARGS);
1010 1011 int drm_agp_release(DRM_IOCTL_ARGS);
1011 1012 int drm_agp_enable(DRM_IOCTL_ARGS);
1012 1013 int drm_agp_info(DRM_IOCTL_ARGS);
1013 1014 int drm_agp_alloc(DRM_IOCTL_ARGS);
1014 1015 int drm_agp_free(DRM_IOCTL_ARGS);
1015 1016 int drm_agp_unbind(DRM_IOCTL_ARGS);
1016 1017 int drm_agp_bind(DRM_IOCTL_ARGS);
1017 1018
1018 1019 /* Scatter Gather Support (drm_scatter.c) */
1019 1020 int drm_sg_alloc(DRM_IOCTL_ARGS);
1020 1021 int drm_sg_free(DRM_IOCTL_ARGS);
1021 1022
1022 1023 /* drm_mm.c */
1023 1024 struct drm_mm_node *drm_mm_get_block(struct drm_mm_node *parent,
1024 1025 unsigned long size, unsigned alignment);
1025 1026 struct drm_mm_node *drm_mm_search_free(const struct drm_mm *mm,
1026 1027 unsigned long size,
1027 1028 unsigned alignment, int best_match);
1028 1029
1029 1030 extern void drm_mm_clean_ml(const struct drm_mm *mm);
1030 1031 extern int drm_debug_flag;
1031 1032
1032 1033 /* We add function to support DRM_DEBUG,DRM_ERROR,DRM_INFO */
1033 1034 extern void drm_debug(const char *fmt, ...);
1034 1035 extern void drm_error(const char *fmt, ...);
1035 1036 extern void drm_info(const char *fmt, ...);
1036 1037
1037 1038 #ifdef DEBUG
1038 1039 #define DRM_DEBUG if (drm_debug_flag >= 2) drm_debug
1039 1040 #define DRM_INFO if (drm_debug_flag >= 1) drm_info
1040 1041 #else
1041 1042 #define DRM_DEBUG(...)
1042 1043 #define DRM_INFO(...)
1043 1044 #endif
1044 1045
1045 1046 #define DRM_ERROR drm_error
1046 1047
1047 1048
1048 1049 #define MAX_INSTNUMS 16
1049 1050
1050 1051 extern int drm_dev_to_instance(dev_t);
1051 1052 extern int drm_dev_to_minor(dev_t);
1052 1053 extern void *drm_supp_register(dev_info_t *, drm_device_t *);
1053 1054 extern int drm_supp_unregister(void *);
1054 1055
1055 1056 extern int drm_open(drm_device_t *, drm_cminor_t *, int, int, cred_t *);
1056 1057 extern int drm_close(drm_device_t *, int, int, int, cred_t *);
1057 1058 extern int drm_attach(drm_device_t *);
1058 1059 extern int drm_detach(drm_device_t *);
1059 1060 extern int drm_probe(drm_device_t *, drm_pci_id_list_t *);
1060 1061
1061 1062 extern int drm_pci_init(drm_device_t *);
1062 1063 extern void drm_pci_end(drm_device_t *);
1063 1064 extern int pci_get_info(drm_device_t *, int *, int *, int *);
1064 1065 extern int pci_get_irq(drm_device_t *);
1065 1066 extern int pci_get_vendor(drm_device_t *);
1066 1067 extern int pci_get_device(drm_device_t *);
1067 1068
1068 1069 extern struct drm_drawable_info *drm_get_drawable_info(drm_device_t *,
1069 1070 drm_drawable_t);
1070 1071 /* File Operations helpers (drm_fops.c) */
1071 1072 extern drm_file_t *drm_find_file_by_proc(drm_device_t *, cred_t *);
1072 1073 extern drm_cminor_t *drm_find_file_by_minor(drm_device_t *, int);
1073 1074 extern int drm_open_helper(drm_device_t *, drm_cminor_t *, int, int,
1074 1075 cred_t *);
1075 1076
1076 1077 /* Graphics Execution Manager library functions (drm_gem.c) */
1077 1078 int drm_gem_init(struct drm_device *dev);
1078 1079 void drm_gem_object_free(struct drm_gem_object *obj);
1079 1080 struct drm_gem_object *drm_gem_object_alloc(struct drm_device *dev,
1080 1081 size_t size);
1081 1082 void drm_gem_object_handle_free(struct drm_gem_object *obj);
1082 1083
1083 1084 void drm_gem_object_reference(struct drm_gem_object *obj);
1084 1085 void drm_gem_object_unreference(struct drm_gem_object *obj);
1085 1086
1086 1087 int drm_gem_handle_create(struct drm_file *file_priv,
1087 1088 struct drm_gem_object *obj,
1088 1089 int *handlep);
1089 1090 void drm_gem_object_handle_reference(struct drm_gem_object *obj);
1090 1091
1091 1092 void drm_gem_object_handle_unreference(struct drm_gem_object *obj);
1092 1093
1093 1094 struct drm_gem_object *drm_gem_object_lookup(struct drm_file *filp,
1094 1095 int handle);
1095 1096 int drm_gem_close_ioctl(DRM_IOCTL_ARGS);
1096 1097 int drm_gem_flink_ioctl(DRM_IOCTL_ARGS);
1097 1098 int drm_gem_open_ioctl(DRM_IOCTL_ARGS);
1098 1099 void drm_gem_open(struct drm_file *file_private);
1099 1100 void drm_gem_release(struct drm_device *dev, struct drm_file *file_private);
1100 1101
1101 1102
1102 1103 #endif /* _DRMP_H */
↓ open down ↓ |
1034 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX