Print this page
4888 Undocument dma_req(9s)
4884 EOF scsi_hba_attach
4886 EOF ddi_dmae_getlim
4887 EOF ddi_iomin
4634 undocument scsi_hba_attach() and ddi_dma_lim(9s)
4630 clean stale references to ddi_iopb_alloc and ddi_iopb_free
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/sys/ddidmareq.h
+++ new/usr/src/uts/common/sys/ddidmareq.h
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24 /*
25 - * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
25 + * Copyright 2014 Garrett D'Amore <garrett@damore.org>
26 26 */
27 27
28 28 #ifndef _SYS_DDIDMAREQ_H
29 29 #define _SYS_DDIDMAREQ_H
30 30
31 31 #ifdef __cplusplus
32 32 extern "C" {
33 33 #endif
34 34
35 35 /*
36 36 * Memory Objects
37 37 *
38 38 * Definitions of structures that can describe
39 39 * an object that can be mapped for DMA.
40 40 */
41 41
42 42 /*
43 43 * Structure describing a virtual address
44 44 */
45 45 struct v_address {
46 46 caddr_t v_addr; /* base virtual address */
47 47 struct as *v_as; /* pointer to address space */
48 48 void *v_priv; /* priv data for shadow I/O */
49 49 };
50 50
51 51 /*
52 52 * Structure describing a page-based address
53 53 */
54 54 struct pp_address {
55 55 /*
56 56 * A pointer to a circularly linked list of page structures.
57 57 */
58 58 struct page *pp_pp;
59 59 uint_t pp_offset; /* offset within first page */
60 60 };
61 61
62 62 /*
63 63 * Structure to describe a physical memory address.
64 64 */
65 65 struct phy_address {
66 66 ulong_t p_addr; /* base physical address */
67 67 ulong_t p_memtype; /* memory type */
68 68 };
69 69
70 70 /*
71 71 * Structure to describe an array DVMA addresses.
72 72 * Under normal circumstances, dv_nseg will be 1.
73 73 * dvs_start is always page aligned.
74 74 */
75 75 struct dvma_address {
76 76 size_t dv_off;
77 77 size_t dv_nseg;
78 78 struct dvmaseg {
79 79 uint64_t dvs_start;
80 80 size_t dvs_len;
81 81 } *dv_seg;
82 82 };
83 83
84 84 /*
85 85 * A union of all of the above structures.
86 86 *
87 87 * This union describes the relationship between
88 88 * the kind of an address description and an object.
89 89 */
90 90 typedef union {
91 91 struct v_address virt_obj; /* Some virtual address */
92 92 struct pp_address pp_obj; /* Some page-based address */
93 93 struct phy_address phys_obj; /* Some physical address */
94 94 struct dvma_address dvma_obj;
95 95 } ddi_dma_aobj_t;
96 96
97 97 /*
98 98 * DMA object types - used to select how the object
99 99 * being mapped is being addressed by the IU.
100 100 */
101 101 typedef enum {
102 102 DMA_OTYP_VADDR = 0, /* enforce starting value of zero */
103 103 DMA_OTYP_PAGES,
104 104 DMA_OTYP_PADDR,
105 105 DMA_OTYP_BUFVADDR,
106 106 DMA_OTYP_DVADDR
107 107 } ddi_dma_atyp_t;
108 108
109 109 /*
110 110 * A compact package to describe an object that is to be mapped for DMA.
111 111 */
112 112 typedef struct {
113 113 uint_t dmao_size; /* size, in bytes, of the object */
114 114 ddi_dma_atyp_t dmao_type; /* type of object */
115 115 ddi_dma_aobj_t dmao_obj; /* the object described */
116 116 } ddi_dma_obj_t;
117 117
118 118 /*
119 119 * DMA addressing limits.
120 120 *
121 121 * This structure describes the constraints that a particular device's
122 122 * DMA engine has to its parent so that the parent may correctly set
123 123 * things up for a DMA mapping. Each parent may in turn modify the
124 124 * constraints listed in a DMA request structure in order to describe
125 125 * to its parent any changed or additional constraints. The rules
126 126 * are that each parent may modify a constraint in order to further
127 127 * constrain things (e.g., picking a more limited address range than
128 128 * that permitted by the child), but that the parent may not ignore
129 129 * a child's constraints.
130 130 *
131 131 * A particular constraint that we do *not* address is whether or not
132 132 * a requested mapping is too large for a DMA engine's counter to
133 133 * correctly track. It is still up to each driver to explicitly handle
134 134 * transfers that are too large for its own hardware to deal with directly.
135 135 *
136 136 * The mapping routines that are cognizant of this structure will
137 137 * copy any user defined limits structure if they need to modify
138 138 * the fields (as alluded to above).
139 139 *
140 140 * A note as to how to define constraints:
141 141 *
142 142 * How you define the constraints for your device depends on how you
143 143 * define your device. For example, you may have an SBus card with a
144 144 * device on it that address only the bottom 16mb of virtual DMA space.
145 145 * However, if the card also has ancillary circuitry that pulls the high 8
146 146 * bits of address lines high, the more correct expression for your device
147 147 * is that it address [0xff000000..0xffffffff] rather than [0..0x00ffffff].
148 148 */
149 149 #if defined(__sparc)
150 150 typedef struct ddi_dma_lim {
151 151
152 152 /*
153 153 * Low range of 32 bit addressing capability.
154 154 */
155 155 uint_t dlim_addr_lo;
156 156
157 157 /*
158 158 * Upper inclusive bound of addressing capability. It is an
159 159 * inclusive boundary limit to allow for the addressing range
160 160 * [0..0xffffffff] to be specified in preference to [0..0].
161 161 */
162 162 uint_t dlim_addr_hi;
163 163
164 164 /*
165 165 * Inclusive upper bound with which The DMA engine's counter acts as
166 166 * a register.
167 167 *
168 168 * This handles the case where an upper portion of a DMA address
169 169 * register is a latch instead of being a full 32 bit register
170 170 * (e.g., the upper 8 bits may remain constant while the lower
171 171 * 24 bits are the real address register).
172 172 *
173 173 * This essentially gives a hint about segment limitations
174 174 * to the mapping routines.
175 175 */
176 176 uint_t dlim_cntr_max;
177 177
178 178 /*
179 179 * DMA burst sizes.
180 180 *
181 181 * At the time of a mapping request, this tag defines the possible
182 182 * DMA burst cycle sizes that the requestor's DMA engine can
183 183 * emit. The format of the data is binary encoding of burst sizes
184 184 * assumed to be powers of two. That is, if a DMA engine is capable
185 185 * of doing 1, 2, 4 and 16 byte transfers, the encoding would be 0x17.
186 186 *
187 187 * As the mapping request is handled by intervening nexi, the
188 188 * burstsizes value may be modified. Prior to enabling DMA for
189 189 * the specific device, the driver that owns the DMA engine should
190 190 * check (via ddi_dma_burstsizes(9F)) what the allowed burstsizes
191 191 * have become and program their DMA engine appropriately.
192 192 */
193 193 uint_t dlim_burstsizes;
194 194
195 195 /*
196 196 * Minimum effective DMA transfer size, in units of bytes.
197 197 *
198 198 * This value specifies the minimum effective granularity of the
199 199 * DMA engine. It is distinct from dlim_burtsizes in that it
200 200 * describes the minimum amount of access a DMA transfer will
201 201 * effect. dlim_burtsizes describes in what electrical fashion
202 202 * the DMA engine might perform its accesses, while dlim_minxfer
203 203 * describes the minimum amount of memory that can be touched by
204 204 * the DMA transfer.
205 205 *
206 206 * As the mapping request is handled by intervening nexi, the
207 207 * dlim_minxfer value may be modifed contingent upon the presence
208 208 * (and use) of I/O caches and DMA write buffers in between the
209 209 * DMA engine and the object that DMA is being performed on.
210 210 *
211 211 */
212 212 uint_t dlim_minxfer;
213 213
214 214 /*
215 215 * Expected average data rate for this DMA engine
216 216 * while transferring data.
217 217 *
218 218 * This is used as a hint for a number of operations that might
219 219 * want to know the possible optimal latency requirements of this
220 220 * device. A value of zero will be interpreted as a 'do not care'.
221 221 */
222 222 uint_t dlim_dmaspeed;
223 223
224 224 } ddi_dma_lim_t;
225 225
226 226 #elif defined(__x86)
227 227
228 228 /*
229 229 * values for dlim_minxfer
230 230 */
231 231 #define DMA_UNIT_8 1
232 232 #define DMA_UNIT_16 2
233 233 #define DMA_UNIT_32 4
234 234
235 235 /*
236 236 * Version number
237 237 */
238 238 #define DMALIM_VER0 ((0x86000000) + 0)
239 239
240 240 typedef struct ddi_dma_lim {
241 241
242 242 /*
243 243 * Low range of 32 bit addressing capability.
244 244 */
245 245 uint_t dlim_addr_lo;
246 246
247 247 /*
248 248 * Upper Inclusive bound of 32 bit addressing capability.
249 249 *
250 250 * The ISA nexus restricts this to 0x00ffffff, since this bus has
251 251 * only 24 address lines. This enforces the 16 Mb address limitation.
252 252 * The EISA nexus restricts this to 0xffffffff.
253 253 */
254 254 uint_t dlim_addr_hi;
255 255
256 256 /*
257 257 * DMA engine counter not used; set to 0
258 258 */
259 259 uint_t dlim_cntr_max;
260 260
261 261 /*
262 262 * DMA burst sizes not used; set to 1
263 263 */
264 264 uint_t dlim_burstsizes;
265 265
266 266 /*
267 267 * Minimum effective DMA transfer size.
268 268 *
269 269 * This value specifies the minimum effective granularity of the
270 270 * DMA engine. It is distinct from dlim_burstsizes in that it
271 271 * describes the minimum amount of access a DMA transfer will
272 272 * effect. dlim_burstsizes describes in what electrical fashion
273 273 * the DMA engine might perform its accesses, while dlim_minxfer
274 274 * describes the minimum amount of memory that can be touched by
275 275 * the DMA transfer.
276 276 *
277 277 * This value also implies the required address alignment.
278 278 * The number of bytes transferred is assumed to be
279 279 * dlim_minxfer * (DMA engine count)
280 280 *
281 281 * It should be set to DMA_UNIT_8, DMA_UNIT_16, or DMA_UNIT_32.
282 282 */
283 283 uint_t dlim_minxfer;
284 284
285 285 /*
286 286 * Expected average data rate for this DMA engine
287 287 * while transferring data.
288 288 *
289 289 * This is used as a hint for a number of operations that might
290 290 * want to know the possible optimal latency requirements of this
291 291 * device. A value of zero will be interpreted as a 'do not care'.
292 292 */
293 293 uint_t dlim_dmaspeed;
294 294
295 295
296 296 /*
297 297 * Version number of this structure
298 298 */
299 299 uint_t dlim_version; /* = 0x86 << 24 + 0 */
300 300
301 301 /*
302 302 * Inclusive upper bound with which the DMA engine's Address acts as
303 303 * a register.
304 304 * This handles the case where an upper portion of a DMA address
305 305 * register is a latch instead of being a full 32 bit register
306 306 * (e.g., the upper 16 bits remain constant while the lower 16 bits
307 307 * are incremented for each DMA transfer).
308 308 *
309 309 * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff,
310 310 * since the ISA DMA engine has a 16-bit register for low address and
311 311 * an 8-bit latch for high address. This enforces the first 64 Kb
312 312 * limitation (address boundary).
313 313 * The EISA nexus restricts only 3rd-party DMA requests to 0xffffffff.
314 314 */
315 315 uint_t dlim_adreg_max;
316 316
317 317 /*
318 318 * Maximum transfer count that the DMA engine can handle.
319 319 *
320 320 * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff,
321 321 * since the ISA DMA engine has a 16-bit register for counting.
322 322 * This enforces the other 64 Kb limitation (count size).
323 323 * The EISA nexus restricts only 3rd-party DMA requests to 0x00ffffff,
324 324 * since the EISA DMA engine has a 24-bit register for counting.
325 325 *
326 326 * This transfer count limitation is a per segment limitation.
327 327 * It can also be used to restrict the size of segments.
328 328 *
329 329 * This is used as a bit mask, so it must be a power of 2, minus 1.
330 330 */
331 331 uint_t dlim_ctreg_max;
332 332
333 333 /*
334 334 * Granularity of DMA transfer, in units of bytes.
335 335 *
336 336 * Breakup sizes must be multiples of this value.
337 337 * If no scatter/gather capabilty is specified, then the size of
338 338 * each DMA transfer must be a multiple of this value.
339 339 *
340 340 * If there is scatter/gather capability, then a single cookie cannot
341 341 * be smaller in size than the minimum xfer value, and may be less
342 342 * than the granularity value. The total transfer length of the
343 343 * scatter/gather list should be a multiple of the granularity value;
344 344 * use dlim_sgllen to specify the length of the scatter/gather list.
345 345 *
346 346 * This value should be equal to the sector size of the device.
347 347 */
348 348 uint_t dlim_granular;
349 349
350 350 /*
351 351 * Length of scatter/gather list
352 352 *
353 353 * This value specifies the number of segments or cookies that a DMA
354 354 * engine can consume in one i/o request to the device. For 3rd-party
355 355 * DMA that uses the bus nexus this should be set to 1. Devices with
356 356 * 1st-party DMA capability should specify the number of entries in
357 357 * its scatter/gather list. The breakup routine will ensure that each
358 358 * group of dlim_sgllen cookies (within a DMA window) will have a
359 359 * total transfer length that is a multiple of dlim_granular.
360 360 *
361 361 * < 0 : tbd
362 362 * = 0 : breakup is for PIO.
363 363 * = 1 : breakup is for DMA engine with no scatter/gather
364 364 * capability.
365 365 * >= 2 : breakup is for DMA engine with scatter/gather
366 366 * capability; value is max number of entries in list.
367 367 *
368 368 * Note that this list length is not dependent on the DMA window
369 369 * size. The size of the DMA window is based on resources consumed,
370 370 * such as intermediate buffers. Several s/g lists may exist within
371 371 * a window. But the end of a window does imply the end of the s/g
372 372 * list.
373 373 */
374 374 short dlim_sgllen;
375 375
376 376 /*
377 377 * Size of device i/o request
378 378 *
379 379 * This value indicates the maximum number of bytes the device
380 380 * can transmit/receive for one i/o command. This limitation is
381 381 * significant ony if it is less than (dlim_ctreg_max * dlim_sgllen).
382 382 */
383 383 uint_t dlim_reqsize;
384 384
385 385 } ddi_dma_lim_t;
386 386
387 387 #else
388 388 #error "struct ddi_dma_lim not defined for this architecture"
389 389 #endif /* defined(__sparc) */
390 390
391 391 /*
392 392 * Flags definition for dma_attr_flags
393 393 */
394 394
395 395 /*
396 396 * return physical DMA address on platforms
397 397 * which support DVMA
398 398 */
399 399 #define DDI_DMA_FORCE_PHYSICAL 0x0100
400 400
401 401 /*
402 402 * An error will be flagged for DMA data path errors
403 403 */
404 404 #define DDI_DMA_FLAGERR 0x200
405 405
406 406 /*
407 407 * Enable relaxed ordering
408 408 */
409 409 #define DDI_DMA_RELAXED_ORDERING 0x400
410 410
411 411
412 412 /*
413 413 * Consolidation private x86 only flag which will cause a bounce buffer
414 414 * (paddr < dma_attr_seg) to be used if the buffer passed to the bind
415 415 * operation contains pages both above and below dma_attr_seg. If this flag
416 416 * is set, dma_attr_seg must be <= dma_attr_addr_hi.
417 417 */
418 418 #define _DDI_DMA_BOUNCE_ON_SEG 0x8000
419 419
420 420 #define DMA_ATTR_V0 0
421 421 #define DMA_ATTR_VERSION DMA_ATTR_V0
422 422
423 423 typedef struct ddi_dma_attr {
424 424 uint_t dma_attr_version; /* version number */
425 425 uint64_t dma_attr_addr_lo; /* low DMA address range */
426 426 uint64_t dma_attr_addr_hi; /* high DMA address range */
427 427 uint64_t dma_attr_count_max; /* DMA counter register */
428 428 uint64_t dma_attr_align; /* DMA address alignment */
429 429 uint_t dma_attr_burstsizes; /* DMA burstsizes */
430 430 uint32_t dma_attr_minxfer; /* min effective DMA size */
431 431 uint64_t dma_attr_maxxfer; /* max DMA xfer size */
432 432 uint64_t dma_attr_seg; /* segment boundary */
433 433 int dma_attr_sgllen; /* s/g length */
434 434 uint32_t dma_attr_granular; /* granularity of device */
435 435 uint_t dma_attr_flags; /* Bus specific DMA flags */
436 436 } ddi_dma_attr_t;
437 437
438 438 /*
439 439 * Handy macro to set a maximum bit value (should be elsewhere)
440 440 *
441 441 * Clear off all bits lower then 'mybit' in val; if there are no
442 442 * bits higher than or equal to mybit in val then set mybit. Assumes
443 443 * mybit equals some power of 2 and is not zero.
444 444 */
445 445 #define maxbit(val, mybit) \
446 446 ((val) & ~((mybit)-1)) | ((((val) & ~((mybit)-1)) == 0) ? (mybit) : 0)
447 447
448 448 /*
449 449 * Handy macro to set a minimum bit value (should be elsewhere)
450 450 *
451 451 * Clear off all bits higher then 'mybit' in val; if there are no
452 452 * bits lower than or equal to mybit in val then set mybit. Assumes
453 453 * mybit equals some pow2 and is not zero.
454 454 */
455 455 #define minbit(val, mybit) \
456 456 (((val)&((mybit)|((mybit)-1))) | \
457 457 ((((val) & ((mybit)-1)) == 0) ? (mybit) : 0))
458 458
459 459 /*
460 460 * Structure of a request to map an object for DMA.
461 461 */
462 462 typedef struct ddi_dma_req {
463 463 /*
464 464 * Caller's DMA engine constraints.
465 465 *
466 466 * If there are no particular constraints to the caller's DMA
467 467 * engine, this field may be set to NULL. The implementation DMA
468 468 * setup functions will then select a set of standard beginning
469 469 * constraints.
470 470 *
471 471 * In either case, as the mapping proceeds, the initial DMA
472 472 * constraints may become more restrictive as each intervening
473 473 * nexus might add further restrictions.
474 474 */
475 475 ddi_dma_lim_t *dmar_limits;
476 476
477 477 /*
478 478 * Contains the information passed to the DMA mapping allocation
479 479 * routine(s).
480 480 */
481 481 uint_t dmar_flags;
482 482
483 483 /*
484 484 * Callback function. A caller of the DMA mapping functions must
485 485 * specify by filling in this field whether the allocation routines
486 486 * can sleep awaiting mapping resources, must *not* sleep awaiting
487 487 * resources, or may *not* sleep awaiting any resources and must
488 488 * call the function specified by dmar_fp with the the argument
489 489 * dmar_arg when resources might have become available at a future
490 490 * time.
491 491 */
492 492 int (*dmar_fp)();
493 493
494 494 caddr_t dmar_arg; /* Callback function argument */
495 495
496 496 /*
497 497 * Description of the object to be mapped for DMA.
498 498 * Must be last in this structure in case that the
499 499 * union ddi_dma_obj_t changes in the future.
500 500 */
501 501 ddi_dma_obj_t dmar_object;
502 502
503 503 } ddi_dma_req_t;
504 504
505 505 /*
506 506 * Defines for the DMA mapping allocation functions
507 507 *
508 508 * If a DMA callback funtion is set to anything other than the following
509 509 * defines then it is assumed that one wishes a callback and is providing
510 510 * a function address.
511 511 */
512 512 #ifdef __STDC__
513 513 #define DDI_DMA_DONTWAIT ((int (*)(caddr_t))0)
514 514 #define DDI_DMA_SLEEP ((int (*)(caddr_t))1)
515 515 #else
516 516 #define DDI_DMA_DONTWAIT ((int (*)())0)
517 517 #define DDI_DMA_SLEEP ((int (*)())1)
518 518 #endif
519 519
520 520 /*
521 521 * Return values from callback functions.
522 522 */
523 523 #define DDI_DMA_CALLBACK_RUNOUT 0
524 524 #define DDI_DMA_CALLBACK_DONE 1
525 525
526 526 /*
527 527 * Flag definitions for the allocation functions.
528 528 */
529 529 #define DDI_DMA_WRITE 0x0001 /* Direction memory --> IO */
530 530 #define DDI_DMA_READ 0x0002 /* Direction IO --> memory */
531 531 #define DDI_DMA_RDWR (DDI_DMA_READ | DDI_DMA_WRITE)
532 532
533 533 /*
534 534 * If possible, establish a MMU redzone after the mapping (to protect
535 535 * against cheap DMA hardware that might get out of control).
536 536 */
537 537 #define DDI_DMA_REDZONE 0x0004
538 538
539 539 /*
540 540 * A partial allocation is allowed. That is, if the size of the object
541 541 * exceeds the mapping resources available, only map a portion of the
542 542 * object and return status indicating that this took place. The caller
543 543 * can use the functions ddi_dma_numwin(9F) and ddi_dma_getwin(9F) to
544 544 * change, at a later point, the actual mapped portion of the object.
545 545 *
546 546 * The mapped portion begins at offset 0 of the object.
547 547 *
548 548 */
549 549 #define DDI_DMA_PARTIAL 0x0008
550 550
551 551 /*
552 552 * Map the object for byte consistent access. Note that explicit
553 553 * synchronization (via ddi_dma_sync(9F)) will still be required.
554 554 * Consider this flag to be a hint to the mapping routines as to
555 555 * the intended use of the mapping.
556 556 *
557 557 * Normal data transfers can be usually consider to use 'streaming'
558 558 * modes of operations. They start at a specific point, transfer a
559 559 * fairly large amount of data sequentially, and then stop (usually
560 560 * on a well aligned boundary).
561 561 *
562 562 * Control mode data transfers (for memory resident device control blocks,
563 563 * e.g., ethernet message descriptors) do not access memory in such
564 564 * a streaming sequential fashion. Instead, they tend to modify a few
565 565 * words or bytes, move around and maybe modify a few more.
566 566 *
567 567 * There are many machine implementations that make this difficult to
568 568 * control in a generic and seamless fashion. Therefore, explicit synch-
569 569 * ronization steps (via ddi_dma_sync(9F)) are still required (even if you
570 570 * ask for a byte-consistent mapping) in order to make the view of the
571 571 * memory object shared between a CPU and a DMA master in consistent.
572 572 * However, judicious use of this flag can give sufficient hints to
573 573 * the mapping routines to attempt to pick the most efficacious mapping
574 574 * such that the synchronization steps are as efficient as possible.
575 575 *
576 576 */
577 577 #define DDI_DMA_CONSISTENT 0x0010
578 578
579 579 /*
580 580 * Some DMA mappings have to be 'exclusive' access.
581 581 */
582 582 #define DDI_DMA_EXCLUSIVE 0x0020
583 583
584 584 /*
585 585 * Sequential, unidirectional, block-sized and block aligned transfers
586 586 */
587 587 #define DDI_DMA_STREAMING 0x0040
588 588
589 589 /*
590 590 * Support for 64-bit SBus devices
591 591 */
592 592 #define DDI_DMA_SBUS_64BIT 0x2000
593 593
594 594 /*
595 595 * Return values from the mapping allocation functions.
596 596 */
597 597
598 598 /*
599 599 * succeeded in satisfying request
600 600 */
601 601 #define DDI_DMA_MAPPED 0
602 602
603 603 /*
604 604 * Mapping is legitimate (for advisory calls).
605 605 */
606 606 #define DDI_DMA_MAPOK 0
607 607
608 608 /*
609 609 * Succeeded in mapping a portion of the request.
610 610 */
611 611 #define DDI_DMA_PARTIAL_MAP 1
612 612
613 613 /*
614 614 * indicates end of window/segment list
615 615 */
616 616 #define DDI_DMA_DONE 2
617 617
618 618 /*
619 619 * No resources to map request.
620 620 */
621 621 #define DDI_DMA_NORESOURCES -1
622 622
623 623 /*
624 624 * Can't establish a mapping to the specified object
625 625 * (no specific reason).
626 626 */
627 627 #define DDI_DMA_NOMAPPING -2
628 628
629 629 /*
630 630 * The request is too big to be mapped.
631 631 */
632 632 #define DDI_DMA_TOOBIG -3
633 633
634 634 /*
635 635 * The request is too small to be mapped.
636 636 */
637 637 #define DDI_DMA_TOOSMALL -4
638 638
639 639 /*
640 640 * The request cannot be mapped because the object
641 641 * is locked against mapping by another DMA master.
642 642 */
643 643 #define DDI_DMA_LOCKED -5
644 644
645 645 /*
646 646 * The request cannot be mapped because the limits
647 647 * structure has bogus values.
648 648 */
649 649 #define DDI_DMA_BADLIMITS -6
650 650
651 651 /*
652 652 * the segment/window pointer is stale
653 653 */
654 654 #define DDI_DMA_STALE -7
655 655
656 656 /*
657 657 * The system can't allocate DMA resources using
658 658 * the given DMA attributes
659 659 */
660 660 #define DDI_DMA_BADATTR -8
661 661
662 662 /*
663 663 * A DMA handle is already used for a DMA
664 664 */
665 665 #define DDI_DMA_INUSE -9
666 666
667 667
668 668 /*
669 669 * DVMA disabled or not supported. use physical DMA
670 670 */
671 671 #define DDI_DMA_USE_PHYSICAL -10
672 672
673 673
674 674 /*
675 675 * In order for the access to a memory object to be consistent
676 676 * between a device and a CPU, the function ddi_dma_sync(9F)
677 677 * must be called upon the DMA handle. The following flags
678 678 * define whose view of the object should be made consistent.
679 679 * There are different flags here because on different machines
680 680 * there are definite performance implications of how long
681 681 * such synchronization takes.
682 682 *
683 683 * DDI_DMA_SYNC_FORDEV makes all device references to the object
684 684 * mapped by the DMA handle up to date. It should be used by a
685 685 * driver after a cpu modifies the memory object (over the range
686 686 * specified by the other arguments to the ddi_dma_sync(9F) call).
687 687 *
688 688 * DDI_DMA_SYNC_FORCPU makes all cpu references to the object
689 689 * mapped by the DMA handle up to date. It should be used
690 690 * by a driver after the receipt of data from the device to
691 691 * the memory object is done (over the range specified by
692 692 * the other arguments to the ddi_dma_sync(9F) call).
693 693 *
694 694 * If the only mapping that concerns the driver is one for the
695 695 * kernel (such as memory allocated by ddi_iopb_alloc(9F)), the
696 696 * flag DDI_DMA_SYNC_FORKERNEL can be used. This is a hint to the
697 697 * system that if it can synchronize the kernel's view faster
698 698 * that the CPU's view, it can do so, otherwise it acts the
699 699 * same as DDI_DMA_SYNC_FORCPU. DDI_DMA_SYNC_FORKERNEL might
700 700 * speed up the synchronization of kernel mappings in case of
701 701 * non IO-coherent CPU caches.
702 702 */
703 703 #define DDI_DMA_SYNC_FORDEV 0x0
704 704 #define DDI_DMA_SYNC_FORCPU 0x1
705 705 #define DDI_DMA_SYNC_FORKERNEL 0x2
706 706
707 707 /*
708 708 * Bus nexus control functions for DMA
709 709 */
710 710
711 711 /*
712 712 * Control operations, defined here so that devops.h can be included
713 713 * by drivers without having to include a specific SYSDDI implementation
714 714 * header file.
715 715 */
716 716
717 717 enum ddi_dma_ctlops {
718 718 DDI_DMA_FREE, /* obsolete - do not use */
719 719 DDI_DMA_SYNC, /* obsolete - do not use */
720 720 DDI_DMA_HTOC, /* obsolete - do not use */
721 721 DDI_DMA_KVADDR, /* obsolete - do not use */
722 722 DDI_DMA_MOVWIN, /* obsolete - do not use */
↓ open down ↓ |
687 lines elided |
↑ open up ↑ |
723 723 DDI_DMA_REPWIN, /* obsolete - do not use */
724 724 DDI_DMA_GETERR, /* obsolete - do not use */
725 725 DDI_DMA_COFF, /* obsolete - do not use */
726 726 DDI_DMA_NEXTWIN, /* obsolete - do not use */
727 727 DDI_DMA_NEXTSEG, /* obsolete - do not use */
728 728 DDI_DMA_SEGTOC, /* obsolete - do not use */
729 729 DDI_DMA_RESERVE, /* reserve some DVMA range */
730 730 DDI_DMA_RELEASE, /* free preallocated DVMA range */
731 731 DDI_DMA_RESETH, /* obsolete - do not use */
732 732 DDI_DMA_CKSYNC, /* obsolete - do not use */
733 - DDI_DMA_IOPB_ALLOC, /* get contiguous DMA-able memory */
734 - DDI_DMA_IOPB_FREE, /* return contiguous DMA-able memory */
735 - DDI_DMA_SMEM_ALLOC, /* get contiguous DMA-able memory */
736 - DDI_DMA_SMEM_FREE, /* return contiguous DMA-able memory */
733 + DDI_DMA_IOPB_ALLOC, /* obsolete - do not use */
734 + DDI_DMA_IOPB_FREE, /* obsolete - do not use */
735 + DDI_DMA_SMEM_ALLOC, /* obsolete - do not use */
736 + DDI_DMA_SMEM_FREE, /* obsolete - do not use */
737 737 DDI_DMA_SET_SBUS64, /* 64 bit SBus support */
738 - DDI_DMA_REMAP, /* remap DMA buffers after relocation */
738 + DDI_DMA_REMAP, /* remap DVMA buffers after relocation */
739 739
740 740 /*
741 741 * control ops for DMA engine on motherboard
742 742 */
743 743 DDI_DMA_E_ACQUIRE, /* get channel for exclusive use */
744 744 DDI_DMA_E_FREE, /* release channel */
745 745 DDI_DMA_E_1STPTY, /* setup channel for 1st party DMA */
746 746 DDI_DMA_E_GETCB, /* get control block for DMA engine */
747 747 DDI_DMA_E_FREECB, /* free control blk for DMA engine */
748 748 DDI_DMA_E_PROG, /* program channel of DMA engine */
749 749 DDI_DMA_E_SWSETUP, /* setup channel for software control */
750 750 DDI_DMA_E_SWSTART, /* software operation of DMA channel */
751 751 DDI_DMA_E_ENABLE, /* enable channel of DMA engine */
752 752 DDI_DMA_E_STOP, /* stop a channel of DMA engine */
753 753 DDI_DMA_E_DISABLE, /* disable channel of DMA engine */
754 754 DDI_DMA_E_GETCNT, /* get remaining xfer count */
755 - DDI_DMA_E_GETLIM, /* get DMA engine limits */
755 + DDI_DMA_E_GETLIM, /* obsolete - do not use */
756 756 DDI_DMA_E_GETATTR /* get DMA engine attributes */
757 757 };
758 758
759 759 /*
760 760 * Cache attribute flags:
761 761 *
762 762 * IOMEM_DATA_CACHED
763 763 * The CPU can cache the data it fetches and push it to memory at a later
764 764 * time. This is the default attribute and used if no cache attributes is
765 765 * specified.
766 766 *
767 767 * IOMEM_DATA_UC_WR_COMBINE
768 768 * The CPU never caches the data but writes may occur out of order or be
769 769 * combined. It implies re-ordering.
770 770 *
771 771 * IOMEM_DATA_UNCACHED
772 772 * The CPU never caches the data and has uncacheable access to memory.
773 773 * It also implies strict ordering.
774 774 *
775 775 * The cache attributes are mutually exclusive, and any combination of the
776 776 * values leads to a failure. On the sparc architecture, only IOMEM_DATA_CACHED
777 777 * is meaningful, but others lead to a failure.
778 778 */
779 779 #define IOMEM_DATA_CACHED 0x10000 /* data is cached */
780 780 #define IOMEM_DATA_UC_WR_COMBINE 0x20000 /* data is not cached, but */
781 781 /* writes might be combined */
782 782 #define IOMEM_DATA_UNCACHED 0x40000 /* data is not cached. */
783 783 #define IOMEM_DATA_MASK 0xF0000 /* cache attrs mask */
784 784
785 785 /*
786 786 * Check if either uncacheable or write-combining specified. (those flags are
787 787 * mutually exclusive) This macro is used to override hat attributes if either
788 788 * one is set.
789 789 */
790 790 #define OVERRIDE_CACHE_ATTR(attr) \
791 791 (attr & (IOMEM_DATA_UNCACHED | IOMEM_DATA_UC_WR_COMBINE))
792 792
793 793 /*
794 794 * Get the cache attribute from flags. If there is no attributes,
795 795 * return IOMEM_DATA_CACHED (default attribute).
796 796 */
797 797 #define IOMEM_CACHE_ATTR(flags) \
798 798 ((flags & IOMEM_DATA_MASK) ? (flags & IOMEM_DATA_MASK) : \
799 799 IOMEM_DATA_CACHED)
800 800
801 801 #ifdef __cplusplus
802 802 }
803 803 #endif
804 804
805 805 #endif /* _SYS_DDIDMAREQ_H */
↓ open down ↓ |
40 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX