1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 /*
  25  * Copyright 2012 Garrett D'Amore <garrett@damore.org>.  All rights reserved.
  26  */
  27 
  28 #ifndef _SYS_DDIDMAREQ_H
  29 #define _SYS_DDIDMAREQ_H
  30 
  31 #ifdef  __cplusplus
  32 extern "C" {
  33 #endif
  34 
  35 /*
  36  * Memory Objects
  37  *
  38  * Definitions of structures that can describe
  39  * an object that can be mapped for DMA.
  40  */
  41 
  42 /*
  43  * Structure describing a virtual address
  44  */
  45 struct v_address {
  46         caddr_t         v_addr;         /* base virtual address */
  47         struct  as      *v_as;          /* pointer to address space */
  48         void            *v_priv;        /* priv data for shadow I/O */
  49 };
  50 
  51 /*
  52  * Structure describing a page-based address
  53  */
  54 struct pp_address {
  55         /*
  56          * A pointer to a circularly linked list of page structures.
  57          */
  58         struct page *pp_pp;
  59         uint_t pp_offset;       /* offset within first page */
  60 };
  61 
  62 /*
  63  * Structure to describe a physical memory address.
  64  */
  65 struct phy_address {
  66         ulong_t p_addr;         /* base physical address */
  67         ulong_t p_memtype;      /* memory type */
  68 };
  69 
  70 /*
  71  * Structure to describe an array DVMA addresses.
  72  * Under normal circumstances, dv_nseg will be 1.
  73  * dvs_start is always page aligned.
  74  */
  75 struct dvma_address {
  76         size_t dv_off;
  77         size_t dv_nseg;
  78         struct dvmaseg {
  79                 uint64_t dvs_start;
  80                 size_t dvs_len;
  81         } *dv_seg;
  82 };
  83 
  84 /*
  85  * A union of all of the above structures.
  86  *
  87  * This union describes the relationship between
  88  * the kind of an address description and an object.
  89  */
  90 typedef union {
  91         struct v_address virt_obj;      /* Some virtual address         */
  92         struct pp_address pp_obj;       /* Some page-based address      */
  93         struct phy_address phys_obj;    /* Some physical address        */
  94         struct dvma_address dvma_obj;
  95 } ddi_dma_aobj_t;
  96 
  97 /*
  98  * DMA object types - used to select how the object
  99  * being mapped is being addressed by the IU.
 100  */
 101 typedef enum {
 102         DMA_OTYP_VADDR = 0,     /* enforce starting value of zero */
 103         DMA_OTYP_PAGES,
 104         DMA_OTYP_PADDR,
 105         DMA_OTYP_BUFVADDR,
 106         DMA_OTYP_DVADDR
 107 } ddi_dma_atyp_t;
 108 
 109 /*
 110  * A compact package to describe an object that is to be mapped for DMA.
 111  */
 112 typedef struct {
 113         uint_t          dmao_size;      /* size, in bytes, of the object */
 114         ddi_dma_atyp_t  dmao_type;      /* type of object */
 115         ddi_dma_aobj_t  dmao_obj;       /* the object described */
 116 } ddi_dma_obj_t;
 117 
 118 /*
 119  * DMA addressing limits.
 120  *
 121  * This structure describes the constraints that a particular device's
 122  * DMA engine has to its parent so that the parent may correctly set
 123  * things up for a DMA mapping. Each parent may in turn modify the
 124  * constraints listed in a DMA request structure in order to describe
 125  * to its parent any changed or additional constraints. The rules
 126  * are that each parent may modify a constraint in order to further
 127  * constrain things (e.g., picking a more limited address range than
 128  * that permitted by the child), but that the parent may not ignore
 129  * a child's constraints.
 130  *
 131  * A particular constraint that we do *not* address is whether or not
 132  * a requested mapping is too large for a DMA engine's counter to
 133  * correctly track. It is still up to each driver to explicitly handle
 134  * transfers that are too large for its own hardware to deal with directly.
 135  *
 136  * The mapping routines that are cognizant of this structure will
 137  * copy any user defined limits structure if they need to modify
 138  * the fields (as alluded to above).
 139  *
 140  * A note as to how to define constraints:
 141  *
 142  * How you define the constraints for your device depends on how you
 143  * define your device. For example, you may have an SBus card with a
 144  * device on it that address only the bottom 16mb of virtual DMA space.
 145  * However, if the card also has ancillary circuitry that pulls the high 8
 146  * bits of address lines high, the more correct expression for your device
 147  * is that it address [0xff000000..0xffffffff] rather than [0..0x00ffffff].
 148  */
 149 #if defined(__sparc)
 150 typedef struct ddi_dma_lim {
 151 
 152         /*
 153          * Low range of 32 bit addressing capability.
 154          */
 155         uint_t  dlim_addr_lo;
 156 
 157         /*
 158          * Upper inclusive bound of addressing capability. It is an
 159          * inclusive boundary limit to allow for the addressing range
 160          * [0..0xffffffff] to be specified in preference to [0..0].
 161          */
 162         uint_t  dlim_addr_hi;
 163 
 164         /*
 165          * Inclusive upper bound with which The DMA engine's counter acts as
 166          * a register.
 167          *
 168          * This handles the case where an upper portion of a DMA address
 169          * register is a latch instead of being a full 32 bit register
 170          * (e.g., the upper 8 bits may remain constant while the lower
 171          * 24 bits are the real address register).
 172          *
 173          * This essentially gives a hint about segment limitations
 174          * to the mapping routines.
 175          */
 176         uint_t  dlim_cntr_max;
 177 
 178         /*
 179          * DMA burst sizes.
 180          *
 181          * At the time of a mapping request, this tag defines the possible
 182          * DMA burst cycle sizes that the requestor's DMA engine can
 183          * emit. The format of the data is binary encoding of burst sizes
 184          * assumed to be powers of two. That is, if a DMA engine is capable
 185          * of doing 1, 2, 4 and 16 byte transfers, the encoding would be 0x17.
 186          *
 187          * As the mapping request is handled by intervening nexi, the
 188          * burstsizes value may be modified. Prior to enabling DMA for
 189          * the specific device, the driver that owns the DMA engine should
 190          * check (via ddi_dma_burstsizes(9F)) what the allowed burstsizes
 191          * have become and program their DMA engine appropriately.
 192          */
 193         uint_t  dlim_burstsizes;
 194 
 195         /*
 196          * Minimum effective DMA transfer size, in units of bytes.
 197          *
 198          * This value specifies the minimum effective granularity of the
 199          * DMA engine. It is distinct from dlim_burtsizes in that it
 200          * describes the minimum amount of access a DMA transfer will
 201          * effect. dlim_burtsizes describes in what electrical fashion
 202          * the DMA engine might perform its accesses, while dlim_minxfer
 203          * describes the minimum amount of memory that can be touched by
 204          * the DMA transfer.
 205          *
 206          * As the mapping request is handled by intervening nexi, the
 207          * dlim_minxfer value may be modifed contingent upon the presence
 208          * (and use) of I/O caches and DMA write buffers in between the
 209          * DMA engine and the object that DMA is being performed on.
 210          *
 211          */
 212         uint_t  dlim_minxfer;
 213 
 214         /*
 215          * Expected average data rate for this DMA engine
 216          * while transferring data.
 217          *
 218          * This is used as a hint for a number of operations that might
 219          * want to know the possible optimal latency requirements of this
 220          * device. A value of zero will be interpreted as a 'do not care'.
 221          */
 222         uint_t  dlim_dmaspeed;
 223 
 224 } ddi_dma_lim_t;
 225 
 226 #elif defined(__x86)
 227 
 228 /*
 229  * values for dlim_minxfer
 230  */
 231 #define DMA_UNIT_8  1
 232 #define DMA_UNIT_16 2
 233 #define DMA_UNIT_32 4
 234 
 235 /*
 236  * Version number
 237  */
 238 #define DMALIM_VER0     ((0x86000000) + 0)
 239 
 240 typedef struct ddi_dma_lim {
 241 
 242         /*
 243          * Low range of 32 bit addressing capability.
 244          */
 245         uint_t  dlim_addr_lo;
 246 
 247         /*
 248          * Upper Inclusive bound of 32 bit addressing capability.
 249          *
 250          * The ISA nexus restricts this to 0x00ffffff, since this bus has
 251          * only 24 address lines.  This enforces the 16 Mb address limitation.
 252          * The EISA nexus restricts this to 0xffffffff.
 253          */
 254         uint_t  dlim_addr_hi;
 255 
 256         /*
 257          * DMA engine counter not used; set to 0
 258          */
 259         uint_t  dlim_cntr_max;
 260 
 261         /*
 262          *  DMA burst sizes not used; set to 1
 263          */
 264         uint_t  dlim_burstsizes;
 265 
 266         /*
 267          * Minimum effective DMA transfer size.
 268          *
 269          * This value specifies the minimum effective granularity of the
 270          * DMA engine. It is distinct from dlim_burstsizes in that it
 271          * describes the minimum amount of access a DMA transfer will
 272          * effect. dlim_burstsizes describes in what electrical fashion
 273          * the DMA engine might perform its accesses, while dlim_minxfer
 274          * describes the minimum amount of memory that can be touched by
 275          * the DMA transfer.
 276          *
 277          * This value also implies the required address alignment.
 278          * The number of bytes transferred is assumed to be
 279          *      dlim_minxfer * (DMA engine count)
 280          *
 281          * It should be set to DMA_UNIT_8, DMA_UNIT_16, or DMA_UNIT_32.
 282          */
 283         uint_t  dlim_minxfer;
 284 
 285         /*
 286          * Expected average data rate for this DMA engine
 287          * while transferring data.
 288          *
 289          * This is used as a hint for a number of operations that might
 290          * want to know the possible optimal latency requirements of this
 291          * device. A value of zero will be interpreted as a 'do not care'.
 292          */
 293         uint_t  dlim_dmaspeed;
 294 
 295 
 296         /*
 297          * Version number of this structure
 298          */
 299         uint_t  dlim_version;   /* = 0x86 << 24 + 0 */
 300 
 301         /*
 302          * Inclusive upper bound with which the DMA engine's Address acts as
 303          * a register.
 304          * This handles the case where an upper portion of a DMA address
 305          * register is a latch instead of being a full 32 bit register
 306          * (e.g., the upper 16 bits remain constant while the lower 16 bits
 307          * are incremented for each DMA transfer).
 308          *
 309          * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff,
 310          * since the ISA DMA engine has a 16-bit register for low address and
 311          * an 8-bit latch for high address.  This enforces the first 64 Kb
 312          * limitation (address boundary).
 313          * The EISA nexus restricts only 3rd-party DMA requests to 0xffffffff.
 314          */
 315         uint_t  dlim_adreg_max;
 316 
 317         /*
 318          * Maximum transfer count that the DMA engine can handle.
 319          *
 320          * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff,
 321          * since the ISA DMA engine has a 16-bit register for counting.
 322          * This enforces the other 64 Kb limitation (count size).
 323          * The EISA nexus restricts only 3rd-party DMA requests to 0x00ffffff,
 324          * since the EISA DMA engine has a 24-bit register for counting.
 325          *
 326          * This transfer count limitation is a per segment limitation.
 327          * It can also be used to restrict the size of segments.
 328          *
 329          * This is used as a bit mask, so it must be a power of 2, minus 1.
 330          */
 331         uint_t  dlim_ctreg_max;
 332 
 333         /*
 334          * Granularity of DMA transfer, in units of bytes.
 335          *
 336          * Breakup sizes must be multiples of this value.
 337          * If no scatter/gather capabilty is specified, then the size of
 338          * each DMA transfer must be a multiple of this value.
 339          *
 340          * If there is scatter/gather capability, then a single cookie cannot
 341          * be smaller in size than the minimum xfer value, and may be less
 342          * than the granularity value.  The total transfer length of the
 343          * scatter/gather list should be a multiple of the granularity value;
 344          * use dlim_sgllen to specify the length of the scatter/gather list.
 345          *
 346          * This value should be equal to the sector size of the device.
 347          */
 348         uint_t  dlim_granular;
 349 
 350         /*
 351          * Length of scatter/gather list
 352          *
 353          * This value specifies the number of segments or cookies that a DMA
 354          * engine can consume in one i/o request to the device.  For 3rd-party
 355          * DMA that uses the bus nexus this should be set to 1.  Devices with
 356          * 1st-party DMA capability should specify the number of entries in
 357          * its scatter/gather list.  The breakup routine will ensure that each
 358          * group of dlim_sgllen cookies (within a DMA window) will have a
 359          * total transfer length that is a multiple of dlim_granular.
 360          *
 361          *      < 0  :  tbd
 362          *      = 0  :  breakup is for PIO.
 363          *      = 1  :  breakup is for DMA engine with no scatter/gather
 364          *              capability.
 365          *      >= 2 :  breakup is for DMA engine with scatter/gather
 366          *              capability; value is max number of entries in list.
 367          *
 368          * Note that this list length is not dependent on the DMA window
 369          * size.  The size of the DMA window is based on resources consumed,
 370          * such as intermediate buffers.  Several s/g lists may exist within
 371          * a window.  But the end of a window does imply the end of the s/g
 372          * list.
 373          */
 374         short   dlim_sgllen;
 375 
 376         /*
 377          * Size of device i/o request
 378          *
 379          * This value indicates the maximum number of bytes the device
 380          * can transmit/receive for one i/o command.  This limitation is
 381          * significant ony if it is less than (dlim_ctreg_max * dlim_sgllen).
 382          */
 383         uint_t  dlim_reqsize;
 384 
 385 } ddi_dma_lim_t;
 386 
 387 #else
 388 #error "struct ddi_dma_lim not defined for this architecture"
 389 #endif  /* defined(__sparc) */
 390 
 391 /*
 392  * Flags definition for dma_attr_flags
 393  */
 394 
 395 /*
 396  * return physical DMA address on platforms
 397  * which support DVMA
 398  */
 399 #define DDI_DMA_FORCE_PHYSICAL          0x0100
 400 
 401 /*
 402  * An error will be flagged for DMA data path errors
 403  */
 404 #define DDI_DMA_FLAGERR                 0x200
 405 
 406 /*
 407  * Enable relaxed ordering
 408  */
 409 #define DDI_DMA_RELAXED_ORDERING        0x400
 410 
 411 
 412 /*
 413  * Consolidation private x86 only flag which will cause a bounce buffer
 414  * (paddr < dma_attr_seg) to be used if the buffer passed to the bind
 415  * operation contains pages both above and below dma_attr_seg. If this flag
 416  * is set, dma_attr_seg must be <= dma_attr_addr_hi.
 417  */
 418 #define _DDI_DMA_BOUNCE_ON_SEG          0x8000
 419 
 420 #define DMA_ATTR_V0             0
 421 #define DMA_ATTR_VERSION        DMA_ATTR_V0
 422 
 423 typedef struct ddi_dma_attr {
 424         uint_t          dma_attr_version;       /* version number */
 425         uint64_t        dma_attr_addr_lo;       /* low DMA address range */
 426         uint64_t        dma_attr_addr_hi;       /* high DMA address range */
 427         uint64_t        dma_attr_count_max;     /* DMA counter register */
 428         uint64_t        dma_attr_align;         /* DMA address alignment */
 429         uint_t          dma_attr_burstsizes;    /* DMA burstsizes */
 430         uint32_t        dma_attr_minxfer;       /* min effective DMA size */
 431         uint64_t        dma_attr_maxxfer;       /* max DMA xfer size */
 432         uint64_t        dma_attr_seg;           /* segment boundary */
 433         int             dma_attr_sgllen;        /* s/g length */
 434         uint32_t        dma_attr_granular;      /* granularity of device */
 435         uint_t          dma_attr_flags;         /* Bus specific DMA flags */
 436 } ddi_dma_attr_t;
 437 
 438 /*
 439  * Handy macro to set a maximum bit value (should be elsewhere)
 440  *
 441  * Clear off all bits lower then 'mybit' in val; if there are no
 442  * bits higher than or equal to mybit in val then set mybit. Assumes
 443  * mybit equals some power of 2 and is not zero.
 444  */
 445 #define maxbit(val, mybit)      \
 446         ((val) & ~((mybit)-1)) | ((((val) & ~((mybit)-1)) == 0) ? (mybit) : 0)
 447 
 448 /*
 449  * Handy macro to set a minimum bit value (should be elsewhere)
 450  *
 451  * Clear off all bits higher then 'mybit' in val; if there are no
 452  * bits lower than or equal to mybit in val then set mybit. Assumes
 453  * mybit equals some pow2 and is not zero.
 454  */
 455 #define minbit(val, mybit)      \
 456         (((val)&((mybit)|((mybit)-1))) | \
 457         ((((val) & ((mybit)-1)) == 0) ? (mybit) : 0))
 458 
 459 /*
 460  * Structure of a request to map an object for DMA.
 461  */
 462 typedef struct ddi_dma_req {
 463         /*
 464          * Caller's DMA engine constraints.
 465          *
 466          * If there are no particular constraints to the caller's DMA
 467          * engine, this field may be set to NULL. The implementation DMA
 468          * setup functions will then select a set of standard beginning
 469          * constraints.
 470          *
 471          * In either case, as the mapping proceeds, the initial DMA
 472          * constraints may become more restrictive as each intervening
 473          * nexus might add further restrictions.
 474          */
 475         ddi_dma_lim_t   *dmar_limits;
 476 
 477         /*
 478          * Contains the information passed to the DMA mapping allocation
 479          * routine(s).
 480          */
 481         uint_t          dmar_flags;
 482 
 483         /*
 484          * Callback function. A caller of the DMA mapping functions must
 485          * specify by filling in this field whether the allocation routines
 486          * can sleep awaiting mapping resources, must *not* sleep awaiting
 487          * resources, or may *not* sleep awaiting any resources and must
 488          * call the function specified by dmar_fp with the the argument
 489          * dmar_arg when resources might have become available at a future
 490          * time.
 491          */
 492         int             (*dmar_fp)();
 493 
 494         caddr_t         dmar_arg;       /* Callback function argument */
 495 
 496         /*
 497          * Description of the object to be mapped for DMA.
 498          * Must be last in this structure in case that the
 499          * union ddi_dma_obj_t changes in the future.
 500          */
 501         ddi_dma_obj_t   dmar_object;
 502 
 503 } ddi_dma_req_t;
 504 
 505 /*
 506  * Defines for the DMA mapping allocation functions
 507  *
 508  * If a DMA callback funtion is set to anything other than the following
 509  * defines then it is assumed that one wishes a callback and is providing
 510  * a function address.
 511  */
 512 #ifdef __STDC__
 513 #define DDI_DMA_DONTWAIT        ((int (*)(caddr_t))0)
 514 #define DDI_DMA_SLEEP           ((int (*)(caddr_t))1)
 515 #else
 516 #define DDI_DMA_DONTWAIT        ((int (*)())0)
 517 #define DDI_DMA_SLEEP           ((int (*)())1)
 518 #endif
 519 
 520 /*
 521  * Return values from callback functions.
 522  */
 523 #define DDI_DMA_CALLBACK_RUNOUT 0
 524 #define DDI_DMA_CALLBACK_DONE   1
 525 
 526 /*
 527  * Flag definitions for the allocation functions.
 528  */
 529 #define DDI_DMA_WRITE           0x0001  /* Direction memory --> IO   */
 530 #define DDI_DMA_READ            0x0002  /* Direction IO --> memory   */
 531 #define DDI_DMA_RDWR            (DDI_DMA_READ | DDI_DMA_WRITE)
 532 
 533 /*
 534  * If possible, establish a MMU redzone after the mapping (to protect
 535  * against cheap DMA hardware that might get out of control).
 536  */
 537 #define DDI_DMA_REDZONE         0x0004
 538 
 539 /*
 540  * A partial allocation is allowed. That is, if the size of the object
 541  * exceeds the mapping resources available, only map a portion of the
 542  * object and return status indicating that this took place. The caller
 543  * can use the functions ddi_dma_numwin(9F) and ddi_dma_getwin(9F) to
 544  * change, at a later point, the actual mapped portion of the object.
 545  *
 546  * The mapped portion begins at offset 0 of the object.
 547  *
 548  */
 549 #define DDI_DMA_PARTIAL         0x0008
 550 
 551 /*
 552  * Map the object for byte consistent access. Note that explicit
 553  * synchronization (via ddi_dma_sync(9F)) will still be required.
 554  * Consider this flag to be a hint to the mapping routines as to
 555  * the intended use of the mapping.
 556  *
 557  * Normal data transfers can be usually consider to use 'streaming'
 558  * modes of operations. They start at a specific point, transfer a
 559  * fairly large amount of data sequentially, and then stop (usually
 560  * on a well aligned boundary).
 561  *
 562  * Control mode data transfers (for memory resident device control blocks,
 563  * e.g., ethernet message descriptors) do not access memory in such
 564  * a streaming sequential fashion. Instead, they tend to modify a few
 565  * words or bytes, move around and maybe modify a few more.
 566  *
 567  * There are many machine implementations that make this difficult to
 568  * control in a generic and seamless fashion. Therefore, explicit synch-
 569  * ronization steps (via ddi_dma_sync(9F)) are still required (even if you
 570  * ask for a byte-consistent mapping) in order to make the view of the
 571  * memory object shared between a CPU and a DMA master in consistent.
 572  * However, judicious use of this flag can give sufficient hints to
 573  * the mapping routines to attempt to pick the most efficacious mapping
 574  * such that the synchronization steps are as efficient as possible.
 575  *
 576  */
 577 #define DDI_DMA_CONSISTENT      0x0010
 578 
 579 /*
 580  * Some DMA mappings have to be 'exclusive' access.
 581  */
 582 #define DDI_DMA_EXCLUSIVE       0x0020
 583 
 584 /*
 585  * Sequential, unidirectional, block-sized and block aligned transfers
 586  */
 587 #define DDI_DMA_STREAMING       0x0040
 588 
 589 /*
 590  * Support for 64-bit SBus devices
 591  */
 592 #define DDI_DMA_SBUS_64BIT      0x2000
 593 
 594 /*
 595  * Return values from the mapping allocation functions.
 596  */
 597 
 598 /*
 599  * succeeded in satisfying request
 600  */
 601 #define DDI_DMA_MAPPED          0
 602 
 603 /*
 604  * Mapping is legitimate (for advisory calls).
 605  */
 606 #define DDI_DMA_MAPOK           0
 607 
 608 /*
 609  * Succeeded in mapping a portion of the request.
 610  */
 611 #define DDI_DMA_PARTIAL_MAP     1
 612 
 613 /*
 614  * indicates end of window/segment list
 615  */
 616 #define DDI_DMA_DONE            2
 617 
 618 /*
 619  * No resources to map request.
 620  */
 621 #define DDI_DMA_NORESOURCES     -1
 622 
 623 /*
 624  * Can't establish a mapping to the specified object
 625  * (no specific reason).
 626  */
 627 #define DDI_DMA_NOMAPPING       -2
 628 
 629 /*
 630  * The request is too big to be mapped.
 631  */
 632 #define DDI_DMA_TOOBIG          -3
 633 
 634 /*
 635  * The request is too small to be mapped.
 636  */
 637 #define DDI_DMA_TOOSMALL        -4
 638 
 639 /*
 640  * The request cannot be mapped because the object
 641  * is locked against mapping by another DMA master.
 642  */
 643 #define DDI_DMA_LOCKED          -5
 644 
 645 /*
 646  * The request cannot be mapped because the limits
 647  * structure has bogus values.
 648  */
 649 #define DDI_DMA_BADLIMITS       -6
 650 
 651 /*
 652  * the segment/window pointer is stale
 653  */
 654 #define DDI_DMA_STALE           -7
 655 
 656 /*
 657  * The system can't allocate DMA resources using
 658  * the given DMA attributes
 659  */
 660 #define DDI_DMA_BADATTR         -8
 661 
 662 /*
 663  * A DMA handle is already used for a DMA
 664  */
 665 #define DDI_DMA_INUSE           -9
 666 
 667 
 668 /*
 669  * DVMA disabled or not supported. use physical DMA
 670  */
 671 #define DDI_DMA_USE_PHYSICAL            -10
 672 
 673 
 674 /*
 675  * In order for the access to a memory object to be consistent
 676  * between a device and a CPU, the function ddi_dma_sync(9F)
 677  * must be called upon the DMA handle. The following flags
 678  * define whose view of the object should be made consistent.
 679  * There are different flags here because on different machines
 680  * there are definite performance implications of how long
 681  * such synchronization takes.
 682  *
 683  * DDI_DMA_SYNC_FORDEV makes all device references to the object
 684  * mapped by the DMA handle up to date. It should be used by a
 685  * driver after a cpu modifies the memory object (over the range
 686  * specified by the other arguments to the ddi_dma_sync(9F) call).
 687  *
 688  * DDI_DMA_SYNC_FORCPU makes all cpu references to the object
 689  * mapped by the DMA handle up to date. It should be used
 690  * by a driver after the receipt of data from the device to
 691  * the memory object is done (over the range specified by
 692  * the other arguments to the ddi_dma_sync(9F) call).
 693  *
 694  * If the only mapping that concerns the driver is one for the
 695  * kernel (such as memory allocated by ddi_iopb_alloc(9F)), the
 696  * flag DDI_DMA_SYNC_FORKERNEL can be used. This is a hint to the
 697  * system that if it can synchronize the kernel's view faster
 698  * that the CPU's view, it can do so, otherwise it acts the
 699  * same as DDI_DMA_SYNC_FORCPU. DDI_DMA_SYNC_FORKERNEL might
 700  * speed up the synchronization of kernel mappings in case of
 701  * non IO-coherent CPU caches.
 702  */
 703 #define DDI_DMA_SYNC_FORDEV     0x0
 704 #define DDI_DMA_SYNC_FORCPU     0x1
 705 #define DDI_DMA_SYNC_FORKERNEL  0x2
 706 
 707 /*
 708  * Bus nexus control functions for DMA
 709  */
 710 
 711 /*
 712  * Control operations, defined here so that devops.h can be included
 713  * by drivers without having to include a specific SYSDDI implementation
 714  * header file.
 715  */
 716 
 717 enum ddi_dma_ctlops {
 718         DDI_DMA_FREE,           /* obsolete - do not use                */
 719         DDI_DMA_SYNC,           /* obsolete - do not use                */
 720         DDI_DMA_HTOC,           /* obsolete - do not use                */
 721         DDI_DMA_KVADDR,         /* obsolete - do not use                */
 722         DDI_DMA_MOVWIN,         /* obsolete - do not use                */
 723         DDI_DMA_REPWIN,         /* obsolete - do not use                */
 724         DDI_DMA_GETERR,         /* obsolete - do not use                */
 725         DDI_DMA_COFF,           /* obsolete - do not use                */
 726         DDI_DMA_NEXTWIN,        /* obsolete - do not use                */
 727         DDI_DMA_NEXTSEG,        /* obsolete - do not use                */
 728         DDI_DMA_SEGTOC,         /* obsolete - do not use                */
 729         DDI_DMA_RESERVE,        /* reserve some DVMA range              */
 730         DDI_DMA_RELEASE,        /* free preallocated DVMA range         */
 731         DDI_DMA_RESETH,         /* obsolete - do not use                */
 732         DDI_DMA_CKSYNC,         /* obsolete - do not use                */
 733         DDI_DMA_IOPB_ALLOC,     /* get contiguous DMA-able memory       */
 734         DDI_DMA_IOPB_FREE,      /* return contiguous DMA-able memory    */
 735         DDI_DMA_SMEM_ALLOC,     /* get contiguous DMA-able memory       */
 736         DDI_DMA_SMEM_FREE,      /* return contiguous DMA-able memory    */
 737         DDI_DMA_SET_SBUS64,     /* 64 bit SBus support                  */
 738         DDI_DMA_REMAP,          /* remap DMA buffers after relocation   */
 739 
 740                 /*
 741                  * control ops for DMA engine on motherboard
 742                  */
 743         DDI_DMA_E_ACQUIRE,      /* get channel for exclusive use        */
 744         DDI_DMA_E_FREE,         /* release channel                      */
 745         DDI_DMA_E_1STPTY,       /* setup channel for 1st party DMA      */
 746         DDI_DMA_E_GETCB,        /* get control block for DMA engine     */
 747         DDI_DMA_E_FREECB,       /* free control blk for DMA engine      */
 748         DDI_DMA_E_PROG,         /* program channel of DMA engine        */
 749         DDI_DMA_E_SWSETUP,      /* setup channel for software control   */
 750         DDI_DMA_E_SWSTART,      /* software operation of DMA channel    */
 751         DDI_DMA_E_ENABLE,       /* enable channel of DMA engine         */
 752         DDI_DMA_E_STOP,         /* stop a channel of DMA engine         */
 753         DDI_DMA_E_DISABLE,      /* disable channel of DMA engine        */
 754         DDI_DMA_E_GETCNT,       /* get remaining xfer count             */
 755         DDI_DMA_E_GETLIM,       /* get DMA engine limits                */
 756         DDI_DMA_E_GETATTR       /* get DMA engine attributes            */
 757 };
 758 
 759 /*
 760  * Cache attribute flags:
 761  *
 762  * IOMEM_DATA_CACHED
 763  *   The CPU can cache the data it fetches and push it to memory at a later
 764  *   time. This is the default attribute and used if no cache attributes is
 765  *   specified.
 766  *
 767  * IOMEM_DATA_UC_WR_COMBINE
 768  *   The CPU never caches the data but writes may occur out of order or be
 769  *   combined. It implies re-ordering.
 770  *
 771  * IOMEM_DATA_UNCACHED
 772  *   The CPU never caches the data and has uncacheable access to memory.
 773  *   It also implies strict ordering.
 774  *
 775  * The cache attributes are mutually exclusive, and any combination of the
 776  * values leads to a failure. On the sparc architecture, only IOMEM_DATA_CACHED
 777  * is meaningful, but others lead to a failure.
 778  */
 779 #define IOMEM_DATA_CACHED               0x10000 /* data is cached */
 780 #define IOMEM_DATA_UC_WR_COMBINE        0x20000 /* data is not cached, but */
 781                                                 /* writes might be combined */
 782 #define IOMEM_DATA_UNCACHED             0x40000 /* data is not cached. */
 783 #define IOMEM_DATA_MASK                 0xF0000 /* cache attrs mask */
 784 
 785 /*
 786  * Check if either uncacheable or write-combining specified. (those flags are
 787  * mutually exclusive) This macro is used to override hat attributes if either
 788  * one is set.
 789  */
 790 #define OVERRIDE_CACHE_ATTR(attr)       \
 791         (attr & (IOMEM_DATA_UNCACHED | IOMEM_DATA_UC_WR_COMBINE))
 792 
 793 /*
 794  * Get the cache attribute from flags. If there is no attributes,
 795  * return IOMEM_DATA_CACHED (default attribute).
 796  */
 797 #define IOMEM_CACHE_ATTR(flags) \
 798         ((flags & IOMEM_DATA_MASK) ? (flags & IOMEM_DATA_MASK) : \
 799             IOMEM_DATA_CACHED)
 800 
 801 #ifdef  __cplusplus
 802 }
 803 #endif
 804 
 805 #endif  /* _SYS_DDIDMAREQ_H */