1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright 2014 Garrett D'Amore <garrett@damore.org>
  23  *
  24  * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
  25  */
  26 
  27 #ifndef _SYS_DDIDMAREQ_H
  28 #define _SYS_DDIDMAREQ_H
  29 
  30 #ifdef  __cplusplus
  31 extern "C" {
  32 #endif
  33 
  34 /*
  35  * Memory Objects
  36  *
  37  * Definitions of structures that can describe
  38  * an object that can be mapped for DMA.
  39  */
  40 
  41 /*
  42  * Structure describing a virtual address
  43  */
  44 struct v_address {
  45         caddr_t         v_addr;         /* base virtual address */
  46         struct  as      *v_as;          /* pointer to address space */
  47         void            *v_priv;        /* priv data for shadow I/O */
  48 };
  49 
  50 /*
  51  * Structure describing a page-based address
  52  */
  53 struct pp_address {
  54         /*
  55          * A pointer to a circularly linked list of page structures.
  56          */
  57         struct page *pp_pp;
  58         uint_t pp_offset;       /* offset within first page */
  59 };
  60 
  61 /*
  62  * Structure to describe a physical memory address.
  63  */
  64 struct phy_address {
  65         ulong_t p_addr;         /* base physical address */
  66         ulong_t p_memtype;      /* memory type */
  67 };
  68 
  69 /*
  70  * Structure to describe an array DVMA addresses.
  71  * Under normal circumstances, dv_nseg will be 1.
  72  * dvs_start is always page aligned.
  73  */
  74 struct dvma_address {
  75         size_t dv_off;
  76         size_t dv_nseg;
  77         struct dvmaseg {
  78                 uint64_t dvs_start;
  79                 size_t dvs_len;
  80         } *dv_seg;
  81 };
  82 
  83 /*
  84  * A union of all of the above structures.
  85  *
  86  * This union describes the relationship between
  87  * the kind of an address description and an object.
  88  */
  89 typedef union {
  90         struct v_address virt_obj;      /* Some virtual address         */
  91         struct pp_address pp_obj;       /* Some page-based address      */
  92         struct phy_address phys_obj;    /* Some physical address        */
  93         struct dvma_address dvma_obj;
  94 } ddi_dma_aobj_t;
  95 
  96 /*
  97  * DMA object types - used to select how the object
  98  * being mapped is being addressed by the IU.
  99  */
 100 typedef enum {
 101         DMA_OTYP_VADDR = 0,     /* enforce starting value of zero */
 102         DMA_OTYP_PAGES,
 103         DMA_OTYP_PADDR,
 104         DMA_OTYP_BUFVADDR,
 105         DMA_OTYP_DVADDR
 106 } ddi_dma_atyp_t;
 107 
 108 /*
 109  * A compact package to describe an object that is to be mapped for DMA.
 110  */
 111 typedef struct {
 112         uint_t          dmao_size;      /* size, in bytes, of the object */
 113         ddi_dma_atyp_t  dmao_type;      /* type of object */
 114         ddi_dma_aobj_t  dmao_obj;       /* the object described */
 115 } ddi_dma_obj_t;
 116 
 117 /*
 118  * DMA addressing limits.
 119  *
 120  * This structure describes the constraints that a particular device's
 121  * DMA engine has to its parent so that the parent may correctly set
 122  * things up for a DMA mapping. Each parent may in turn modify the
 123  * constraints listed in a DMA request structure in order to describe
 124  * to its parent any changed or additional constraints. The rules
 125  * are that each parent may modify a constraint in order to further
 126  * constrain things (e.g., picking a more limited address range than
 127  * that permitted by the child), but that the parent may not ignore
 128  * a child's constraints.
 129  *
 130  * A particular constraint that we do *not* address is whether or not
 131  * a requested mapping is too large for a DMA engine's counter to
 132  * correctly track. It is still up to each driver to explicitly handle
 133  * transfers that are too large for its own hardware to deal with directly.
 134  *
 135  * The mapping routines that are cognizant of this structure will
 136  * copy any user defined limits structure if they need to modify
 137  * the fields (as alluded to above).
 138  *
 139  * A note as to how to define constraints:
 140  *
 141  * How you define the constraints for your device depends on how you
 142  * define your device. For example, you may have an SBus card with a
 143  * device on it that address only the bottom 16mb of virtual DMA space.
 144  * However, if the card also has ancillary circuitry that pulls the high 8
 145  * bits of address lines high, the more correct expression for your device
 146  * is that it address [0xff000000..0xffffffff] rather than [0..0x00ffffff].
 147  */
 148 #if defined(__sparc)
 149 typedef struct ddi_dma_lim {
 150 
 151         /*
 152          * Low range of 32 bit addressing capability.
 153          */
 154         uint_t  dlim_addr_lo;
 155 
 156         /*
 157          * Upper inclusive bound of addressing capability. It is an
 158          * inclusive boundary limit to allow for the addressing range
 159          * [0..0xffffffff] to be specified in preference to [0..0].
 160          */
 161         uint_t  dlim_addr_hi;
 162 
 163         /*
 164          * Inclusive upper bound with which The DMA engine's counter acts as
 165          * a register.
 166          *
 167          * This handles the case where an upper portion of a DMA address
 168          * register is a latch instead of being a full 32 bit register
 169          * (e.g., the upper 8 bits may remain constant while the lower
 170          * 24 bits are the real address register).
 171          *
 172          * This essentially gives a hint about segment limitations
 173          * to the mapping routines.
 174          */
 175         uint_t  dlim_cntr_max;
 176 
 177         /*
 178          * DMA burst sizes.
 179          *
 180          * At the time of a mapping request, this tag defines the possible
 181          * DMA burst cycle sizes that the requestor's DMA engine can
 182          * emit. The format of the data is binary encoding of burst sizes
 183          * assumed to be powers of two. That is, if a DMA engine is capable
 184          * of doing 1, 2, 4 and 16 byte transfers, the encoding would be 0x17.
 185          *
 186          * As the mapping request is handled by intervening nexi, the
 187          * burstsizes value may be modified. Prior to enabling DMA for
 188          * the specific device, the driver that owns the DMA engine should
 189          * check (via ddi_dma_burstsizes(9F)) what the allowed burstsizes
 190          * have become and program their DMA engine appropriately.
 191          */
 192         uint_t  dlim_burstsizes;
 193 
 194         /*
 195          * Minimum effective DMA transfer size, in units of bytes.
 196          *
 197          * This value specifies the minimum effective granularity of the
 198          * DMA engine. It is distinct from dlim_burtsizes in that it
 199          * describes the minimum amount of access a DMA transfer will
 200          * effect. dlim_burtsizes describes in what electrical fashion
 201          * the DMA engine might perform its accesses, while dlim_minxfer
 202          * describes the minimum amount of memory that can be touched by
 203          * the DMA transfer.
 204          *
 205          * As the mapping request is handled by intervening nexi, the
 206          * dlim_minxfer value may be modifed contingent upon the presence
 207          * (and use) of I/O caches and DMA write buffers in between the
 208          * DMA engine and the object that DMA is being performed on.
 209          *
 210          */
 211         uint_t  dlim_minxfer;
 212 
 213         /*
 214          * Expected average data rate for this DMA engine
 215          * while transferring data.
 216          *
 217          * This is used as a hint for a number of operations that might
 218          * want to know the possible optimal latency requirements of this
 219          * device. A value of zero will be interpreted as a 'do not care'.
 220          */
 221         uint_t  dlim_dmaspeed;
 222 
 223 } ddi_dma_lim_t;
 224 
 225 #elif defined(__x86)
 226 
 227 /*
 228  * values for dlim_minxfer
 229  */
 230 #define DMA_UNIT_8  1
 231 #define DMA_UNIT_16 2
 232 #define DMA_UNIT_32 4
 233 
 234 /*
 235  * Version number
 236  */
 237 #define DMALIM_VER0     ((0x86000000) + 0)
 238 
 239 typedef struct ddi_dma_lim {
 240 
 241         /*
 242          * Low range of 32 bit addressing capability.
 243          */
 244         uint_t  dlim_addr_lo;
 245 
 246         /*
 247          * Upper Inclusive bound of 32 bit addressing capability.
 248          *
 249          * The ISA nexus restricts this to 0x00ffffff, since this bus has
 250          * only 24 address lines.  This enforces the 16 Mb address limitation.
 251          * The EISA nexus restricts this to 0xffffffff.
 252          */
 253         uint_t  dlim_addr_hi;
 254 
 255         /*
 256          * DMA engine counter not used; set to 0
 257          */
 258         uint_t  dlim_cntr_max;
 259 
 260         /*
 261          *  DMA burst sizes not used; set to 1
 262          */
 263         uint_t  dlim_burstsizes;
 264 
 265         /*
 266          * Minimum effective DMA transfer size.
 267          *
 268          * This value specifies the minimum effective granularity of the
 269          * DMA engine. It is distinct from dlim_burstsizes in that it
 270          * describes the minimum amount of access a DMA transfer will
 271          * effect. dlim_burstsizes describes in what electrical fashion
 272          * the DMA engine might perform its accesses, while dlim_minxfer
 273          * describes the minimum amount of memory that can be touched by
 274          * the DMA transfer.
 275          *
 276          * This value also implies the required address alignment.
 277          * The number of bytes transferred is assumed to be
 278          *      dlim_minxfer * (DMA engine count)
 279          *
 280          * It should be set to DMA_UNIT_8, DMA_UNIT_16, or DMA_UNIT_32.
 281          */
 282         uint_t  dlim_minxfer;
 283 
 284         /*
 285          * Expected average data rate for this DMA engine
 286          * while transferring data.
 287          *
 288          * This is used as a hint for a number of operations that might
 289          * want to know the possible optimal latency requirements of this
 290          * device. A value of zero will be interpreted as a 'do not care'.
 291          */
 292         uint_t  dlim_dmaspeed;
 293 
 294 
 295         /*
 296          * Version number of this structure
 297          */
 298         uint_t  dlim_version;   /* = 0x86 << 24 + 0 */
 299 
 300         /*
 301          * Inclusive upper bound with which the DMA engine's Address acts as
 302          * a register.
 303          * This handles the case where an upper portion of a DMA address
 304          * register is a latch instead of being a full 32 bit register
 305          * (e.g., the upper 16 bits remain constant while the lower 16 bits
 306          * are incremented for each DMA transfer).
 307          *
 308          * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff,
 309          * since the ISA DMA engine has a 16-bit register for low address and
 310          * an 8-bit latch for high address.  This enforces the first 64 Kb
 311          * limitation (address boundary).
 312          * The EISA nexus restricts only 3rd-party DMA requests to 0xffffffff.
 313          */
 314         uint_t  dlim_adreg_max;
 315 
 316         /*
 317          * Maximum transfer count that the DMA engine can handle.
 318          *
 319          * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff,
 320          * since the ISA DMA engine has a 16-bit register for counting.
 321          * This enforces the other 64 Kb limitation (count size).
 322          * The EISA nexus restricts only 3rd-party DMA requests to 0x00ffffff,
 323          * since the EISA DMA engine has a 24-bit register for counting.
 324          *
 325          * This transfer count limitation is a per segment limitation.
 326          * It can also be used to restrict the size of segments.
 327          *
 328          * This is used as a bit mask, so it must be a power of 2, minus 1.
 329          */
 330         uint_t  dlim_ctreg_max;
 331 
 332         /*
 333          * Granularity of DMA transfer, in units of bytes.
 334          *
 335          * Breakup sizes must be multiples of this value.
 336          * If no scatter/gather capabilty is specified, then the size of
 337          * each DMA transfer must be a multiple of this value.
 338          *
 339          * If there is scatter/gather capability, then a single cookie cannot
 340          * be smaller in size than the minimum xfer value, and may be less
 341          * than the granularity value.  The total transfer length of the
 342          * scatter/gather list should be a multiple of the granularity value;
 343          * use dlim_sgllen to specify the length of the scatter/gather list.
 344          *
 345          * This value should be equal to the sector size of the device.
 346          */
 347         uint_t  dlim_granular;
 348 
 349         /*
 350          * Length of scatter/gather list
 351          *
 352          * This value specifies the number of segments or cookies that a DMA
 353          * engine can consume in one i/o request to the device.  For 3rd-party
 354          * DMA that uses the bus nexus this should be set to 1.  Devices with
 355          * 1st-party DMA capability should specify the number of entries in
 356          * its scatter/gather list.  The breakup routine will ensure that each
 357          * group of dlim_sgllen cookies (within a DMA window) will have a
 358          * total transfer length that is a multiple of dlim_granular.
 359          *
 360          *      < 0  :  tbd
 361          *      = 0  :  breakup is for PIO.
 362          *      = 1  :  breakup is for DMA engine with no scatter/gather
 363          *              capability.
 364          *      >= 2 :  breakup is for DMA engine with scatter/gather
 365          *              capability; value is max number of entries in list.
 366          *
 367          * Note that this list length is not dependent on the DMA window
 368          * size.  The size of the DMA window is based on resources consumed,
 369          * such as intermediate buffers.  Several s/g lists may exist within
 370          * a window.  But the end of a window does imply the end of the s/g
 371          * list.
 372          */
 373         short   dlim_sgllen;
 374 
 375         /*
 376          * Size of device i/o request
 377          *
 378          * This value indicates the maximum number of bytes the device
 379          * can transmit/receive for one i/o command.  This limitation is
 380          * significant ony if it is less than (dlim_ctreg_max * dlim_sgllen).
 381          */
 382         uint_t  dlim_reqsize;
 383 
 384 } ddi_dma_lim_t;
 385 
 386 #else
 387 #error "struct ddi_dma_lim not defined for this architecture"
 388 #endif  /* defined(__sparc) */
 389 
 390 /*
 391  * Flags definition for dma_attr_flags
 392  */
 393 
 394 /*
 395  * return physical DMA address on platforms
 396  * which support DVMA
 397  */
 398 #define DDI_DMA_FORCE_PHYSICAL          0x0100
 399 
 400 /*
 401  * An error will be flagged for DMA data path errors
 402  */
 403 #define DDI_DMA_FLAGERR                 0x200
 404 
 405 /*
 406  * Enable relaxed ordering
 407  */
 408 #define DDI_DMA_RELAXED_ORDERING        0x400
 409 
 410 
 411 /*
 412  * Consolidation private x86 only flag which will cause a bounce buffer
 413  * (paddr < dma_attr_seg) to be used if the buffer passed to the bind
 414  * operation contains pages both above and below dma_attr_seg. If this flag
 415  * is set, dma_attr_seg must be <= dma_attr_addr_hi.
 416  */
 417 #define _DDI_DMA_BOUNCE_ON_SEG          0x8000
 418 
 419 #define DMA_ATTR_V0             0
 420 #define DMA_ATTR_VERSION        DMA_ATTR_V0
 421 
 422 typedef struct ddi_dma_attr {
 423         uint_t          dma_attr_version;       /* version number */
 424         uint64_t        dma_attr_addr_lo;       /* low DMA address range */
 425         uint64_t        dma_attr_addr_hi;       /* high DMA address range */
 426         uint64_t        dma_attr_count_max;     /* DMA counter register */
 427         uint64_t        dma_attr_align;         /* DMA address alignment */
 428         uint_t          dma_attr_burstsizes;    /* DMA burstsizes */
 429         uint32_t        dma_attr_minxfer;       /* min effective DMA size */
 430         uint64_t        dma_attr_maxxfer;       /* max DMA xfer size */
 431         uint64_t        dma_attr_seg;           /* segment boundary */
 432         int             dma_attr_sgllen;        /* s/g length */
 433         uint32_t        dma_attr_granular;      /* granularity of device */
 434         uint_t          dma_attr_flags;         /* Bus specific DMA flags */
 435 } ddi_dma_attr_t;
 436 
 437 /*
 438  * Handy macro to set a maximum bit value (should be elsewhere)
 439  *
 440  * Clear off all bits lower then 'mybit' in val; if there are no
 441  * bits higher than or equal to mybit in val then set mybit. Assumes
 442  * mybit equals some power of 2 and is not zero.
 443  */
 444 #define maxbit(val, mybit)      \
 445         ((val) & ~((mybit)-1)) | ((((val) & ~((mybit)-1)) == 0) ? (mybit) : 0)
 446 
 447 /*
 448  * Handy macro to set a minimum bit value (should be elsewhere)
 449  *
 450  * Clear off all bits higher then 'mybit' in val; if there are no
 451  * bits lower than or equal to mybit in val then set mybit. Assumes
 452  * mybit equals some pow2 and is not zero.
 453  */
 454 #define minbit(val, mybit)      \
 455         (((val)&((mybit)|((mybit)-1))) | \
 456         ((((val) & ((mybit)-1)) == 0) ? (mybit) : 0))
 457 
 458 /*
 459  * Structure of a request to map an object for DMA.
 460  */
 461 typedef struct ddi_dma_req {
 462         /*
 463          * Caller's DMA engine constraints.
 464          *
 465          * If there are no particular constraints to the caller's DMA
 466          * engine, this field may be set to NULL. The implementation DMA
 467          * setup functions will then select a set of standard beginning
 468          * constraints.
 469          *
 470          * In either case, as the mapping proceeds, the initial DMA
 471          * constraints may become more restrictive as each intervening
 472          * nexus might add further restrictions.
 473          */
 474         ddi_dma_lim_t   *dmar_limits;
 475 
 476         /*
 477          * Contains the information passed to the DMA mapping allocation
 478          * routine(s).
 479          */
 480         uint_t          dmar_flags;
 481 
 482         /*
 483          * Callback function. A caller of the DMA mapping functions must
 484          * specify by filling in this field whether the allocation routines
 485          * can sleep awaiting mapping resources, must *not* sleep awaiting
 486          * resources, or may *not* sleep awaiting any resources and must
 487          * call the function specified by dmar_fp with the the argument
 488          * dmar_arg when resources might have become available at a future
 489          * time.
 490          */
 491         int             (*dmar_fp)();
 492 
 493         caddr_t         dmar_arg;       /* Callback function argument */
 494 
 495         /*
 496          * Description of the object to be mapped for DMA.
 497          * Must be last in this structure in case that the
 498          * union ddi_dma_obj_t changes in the future.
 499          */
 500         ddi_dma_obj_t   dmar_object;
 501 
 502 } ddi_dma_req_t;
 503 
 504 /*
 505  * Defines for the DMA mapping allocation functions
 506  *
 507  * If a DMA callback funtion is set to anything other than the following
 508  * defines then it is assumed that one wishes a callback and is providing
 509  * a function address.
 510  */
 511 #define DDI_DMA_DONTWAIT        ((int (*)(caddr_t))0)
 512 #define DDI_DMA_SLEEP           ((int (*)(caddr_t))1)
 513 
 514 /*
 515  * Return values from callback functions.
 516  */
 517 #define DDI_DMA_CALLBACK_RUNOUT 0
 518 #define DDI_DMA_CALLBACK_DONE   1
 519 
 520 /*
 521  * Flag definitions for the allocation functions.
 522  */
 523 #define DDI_DMA_WRITE           0x0001  /* Direction memory --> IO   */
 524 #define DDI_DMA_READ            0x0002  /* Direction IO --> memory   */
 525 #define DDI_DMA_RDWR            (DDI_DMA_READ | DDI_DMA_WRITE)
 526 
 527 /*
 528  * If possible, establish a MMU redzone after the mapping (to protect
 529  * against cheap DMA hardware that might get out of control).
 530  */
 531 #define DDI_DMA_REDZONE         0x0004
 532 
 533 /*
 534  * A partial allocation is allowed. That is, if the size of the object
 535  * exceeds the mapping resources available, only map a portion of the
 536  * object and return status indicating that this took place. The caller
 537  * can use the functions ddi_dma_numwin(9F) and ddi_dma_getwin(9F) to
 538  * change, at a later point, the actual mapped portion of the object.
 539  *
 540  * The mapped portion begins at offset 0 of the object.
 541  *
 542  */
 543 #define DDI_DMA_PARTIAL         0x0008
 544 
 545 /*
 546  * Map the object for byte consistent access. Note that explicit
 547  * synchronization (via ddi_dma_sync(9F)) will still be required.
 548  * Consider this flag to be a hint to the mapping routines as to
 549  * the intended use of the mapping.
 550  *
 551  * Normal data transfers can be usually consider to use 'streaming'
 552  * modes of operations. They start at a specific point, transfer a
 553  * fairly large amount of data sequentially, and then stop (usually
 554  * on a well aligned boundary).
 555  *
 556  * Control mode data transfers (for memory resident device control blocks,
 557  * e.g., ethernet message descriptors) do not access memory in such
 558  * a streaming sequential fashion. Instead, they tend to modify a few
 559  * words or bytes, move around and maybe modify a few more.
 560  *
 561  * There are many machine implementations that make this difficult to
 562  * control in a generic and seamless fashion. Therefore, explicit synch-
 563  * ronization steps (via ddi_dma_sync(9F)) are still required (even if you
 564  * ask for a byte-consistent mapping) in order to make the view of the
 565  * memory object shared between a CPU and a DMA master in consistent.
 566  * However, judicious use of this flag can give sufficient hints to
 567  * the mapping routines to attempt to pick the most efficacious mapping
 568  * such that the synchronization steps are as efficient as possible.
 569  *
 570  */
 571 #define DDI_DMA_CONSISTENT      0x0010
 572 
 573 /*
 574  * Some DMA mappings have to be 'exclusive' access.
 575  */
 576 #define DDI_DMA_EXCLUSIVE       0x0020
 577 
 578 /*
 579  * Sequential, unidirectional, block-sized and block aligned transfers
 580  */
 581 #define DDI_DMA_STREAMING       0x0040
 582 
 583 /*
 584  * Support for 64-bit SBus devices
 585  */
 586 #define DDI_DMA_SBUS_64BIT      0x2000
 587 
 588 /*
 589  * Return values from the mapping allocation functions.
 590  */
 591 
 592 /*
 593  * succeeded in satisfying request
 594  */
 595 #define DDI_DMA_MAPPED          0
 596 
 597 /*
 598  * Mapping is legitimate (for advisory calls).
 599  */
 600 #define DDI_DMA_MAPOK           0
 601 
 602 /*
 603  * Succeeded in mapping a portion of the request.
 604  */
 605 #define DDI_DMA_PARTIAL_MAP     1
 606 
 607 /*
 608  * indicates end of window/segment list
 609  */
 610 #define DDI_DMA_DONE            2
 611 
 612 /*
 613  * No resources to map request.
 614  */
 615 #define DDI_DMA_NORESOURCES     -1
 616 
 617 /*
 618  * Can't establish a mapping to the specified object
 619  * (no specific reason).
 620  */
 621 #define DDI_DMA_NOMAPPING       -2
 622 
 623 /*
 624  * The request is too big to be mapped.
 625  */
 626 #define DDI_DMA_TOOBIG          -3
 627 
 628 /*
 629  * The request is too small to be mapped.
 630  */
 631 #define DDI_DMA_TOOSMALL        -4
 632 
 633 /*
 634  * The request cannot be mapped because the object
 635  * is locked against mapping by another DMA master.
 636  */
 637 #define DDI_DMA_LOCKED          -5
 638 
 639 /*
 640  * The request cannot be mapped because the limits
 641  * structure has bogus values.
 642  */
 643 #define DDI_DMA_BADLIMITS       -6
 644 
 645 /*
 646  * the segment/window pointer is stale
 647  */
 648 #define DDI_DMA_STALE           -7
 649 
 650 /*
 651  * The system can't allocate DMA resources using
 652  * the given DMA attributes
 653  */
 654 #define DDI_DMA_BADATTR         -8
 655 
 656 /*
 657  * A DMA handle is already used for a DMA
 658  */
 659 #define DDI_DMA_INUSE           -9
 660 
 661 
 662 /*
 663  * DVMA disabled or not supported. use physical DMA
 664  */
 665 #define DDI_DMA_USE_PHYSICAL            -10
 666 
 667 
 668 /*
 669  * In order for the access to a memory object to be consistent
 670  * between a device and a CPU, the function ddi_dma_sync(9F)
 671  * must be called upon the DMA handle. The following flags
 672  * define whose view of the object should be made consistent.
 673  * There are different flags here because on different machines
 674  * there are definite performance implications of how long
 675  * such synchronization takes.
 676  *
 677  * DDI_DMA_SYNC_FORDEV makes all device references to the object
 678  * mapped by the DMA handle up to date. It should be used by a
 679  * driver after a cpu modifies the memory object (over the range
 680  * specified by the other arguments to the ddi_dma_sync(9F) call).
 681  *
 682  * DDI_DMA_SYNC_FORCPU makes all cpu references to the object
 683  * mapped by the DMA handle up to date. It should be used
 684  * by a driver after the receipt of data from the device to
 685  * the memory object is done (over the range specified by
 686  * the other arguments to the ddi_dma_sync(9F) call).
 687  *
 688  * If the only mapping that concerns the driver is one for the
 689  * kernel (such as memory allocated by ddi_iopb_alloc(9F)), the
 690  * flag DDI_DMA_SYNC_FORKERNEL can be used. This is a hint to the
 691  * system that if it can synchronize the kernel's view faster
 692  * that the CPU's view, it can do so, otherwise it acts the
 693  * same as DDI_DMA_SYNC_FORCPU. DDI_DMA_SYNC_FORKERNEL might
 694  * speed up the synchronization of kernel mappings in case of
 695  * non IO-coherent CPU caches.
 696  */
 697 #define DDI_DMA_SYNC_FORDEV     0x0
 698 #define DDI_DMA_SYNC_FORCPU     0x1
 699 #define DDI_DMA_SYNC_FORKERNEL  0x2
 700 
 701 /*
 702  * Bus nexus control functions for DMA
 703  */
 704 
 705 /*
 706  * Control operations, defined here so that devops.h can be included
 707  * by drivers without having to include a specific SYSDDI implementation
 708  * header file.
 709  */
 710 
 711 enum ddi_dma_ctlops {
 712         DDI_DMA_FREE,           /* obsolete - do not use                */
 713         DDI_DMA_SYNC,           /* obsolete - do not use                */
 714         DDI_DMA_HTOC,           /* obsolete - do not use                */
 715         DDI_DMA_KVADDR,         /* obsolete - do not use                */
 716         DDI_DMA_MOVWIN,         /* obsolete - do not use                */
 717         DDI_DMA_REPWIN,         /* obsolete - do not use                */
 718         DDI_DMA_GETERR,         /* obsolete - do not use                */
 719         DDI_DMA_COFF,           /* obsolete - do not use                */
 720         DDI_DMA_NEXTWIN,        /* obsolete - do not use                */
 721         DDI_DMA_NEXTSEG,        /* obsolete - do not use                */
 722         DDI_DMA_SEGTOC,         /* obsolete - do not use                */
 723         DDI_DMA_RESERVE,        /* reserve some DVMA range              */
 724         DDI_DMA_RELEASE,        /* free preallocated DVMA range         */
 725         DDI_DMA_RESETH,         /* obsolete - do not use                */
 726         DDI_DMA_CKSYNC,         /* obsolete - do not use                */
 727         DDI_DMA_IOPB_ALLOC,     /* obsolete - do not use                */
 728         DDI_DMA_IOPB_FREE,      /* obsolete - do not use                */
 729         DDI_DMA_SMEM_ALLOC,     /* obsolete - do not use                */
 730         DDI_DMA_SMEM_FREE,      /* obsolete - do not use                */
 731         DDI_DMA_SET_SBUS64,     /* 64 bit SBus support                  */
 732         DDI_DMA_REMAP,          /* remap DVMA buffers after relocation  */
 733 
 734                 /*
 735                  * control ops for DMA engine on motherboard
 736                  */
 737         DDI_DMA_E_ACQUIRE,      /* get channel for exclusive use        */
 738         DDI_DMA_E_FREE,         /* release channel                      */
 739         DDI_DMA_E_1STPTY,       /* setup channel for 1st party DMA      */
 740         DDI_DMA_E_GETCB,        /* get control block for DMA engine     */
 741         DDI_DMA_E_FREECB,       /* free control blk for DMA engine      */
 742         DDI_DMA_E_PROG,         /* program channel of DMA engine        */
 743         DDI_DMA_E_SWSETUP,      /* setup channel for software control   */
 744         DDI_DMA_E_SWSTART,      /* software operation of DMA channel    */
 745         DDI_DMA_E_ENABLE,       /* enable channel of DMA engine         */
 746         DDI_DMA_E_STOP,         /* stop a channel of DMA engine         */
 747         DDI_DMA_E_DISABLE,      /* disable channel of DMA engine        */
 748         DDI_DMA_E_GETCNT,       /* get remaining xfer count             */
 749         DDI_DMA_E_GETLIM,       /* obsolete - do not use                */
 750         DDI_DMA_E_GETATTR       /* get DMA engine attributes            */
 751 };
 752 
 753 /*
 754  * Cache attribute flags:
 755  *
 756  * IOMEM_DATA_CACHED
 757  *   The CPU can cache the data it fetches and push it to memory at a later
 758  *   time. This is the default attribute and used if no cache attributes is
 759  *   specified.
 760  *
 761  * IOMEM_DATA_UC_WR_COMBINE
 762  *   The CPU never caches the data but writes may occur out of order or be
 763  *   combined. It implies re-ordering.
 764  *
 765  * IOMEM_DATA_UNCACHED
 766  *   The CPU never caches the data and has uncacheable access to memory.
 767  *   It also implies strict ordering.
 768  *
 769  * The cache attributes are mutually exclusive, and any combination of the
 770  * values leads to a failure. On the sparc architecture, only IOMEM_DATA_CACHED
 771  * is meaningful, but others lead to a failure.
 772  */
 773 #define IOMEM_DATA_CACHED               0x10000 /* data is cached */
 774 #define IOMEM_DATA_UC_WR_COMBINE        0x20000 /* data is not cached, but */
 775                                                 /* writes might be combined */
 776 #define IOMEM_DATA_UNCACHED             0x40000 /* data is not cached. */
 777 #define IOMEM_DATA_MASK                 0xF0000 /* cache attrs mask */
 778 
 779 /*
 780  * Check if either uncacheable or write-combining specified. (those flags are
 781  * mutually exclusive) This macro is used to override hat attributes if either
 782  * one is set.
 783  */
 784 #define OVERRIDE_CACHE_ATTR(attr)       \
 785         (attr & (IOMEM_DATA_UNCACHED | IOMEM_DATA_UC_WR_COMBINE))
 786 
 787 /*
 788  * Get the cache attribute from flags. If there is no attributes,
 789  * return IOMEM_DATA_CACHED (default attribute).
 790  */
 791 #define IOMEM_CACHE_ATTR(flags) \
 792         ((flags & IOMEM_DATA_MASK) ? (flags & IOMEM_DATA_MASK) : \
 793             IOMEM_DATA_CACHED)
 794 
 795 #ifdef  __cplusplus
 796 }
 797 #endif
 798 
 799 #endif  /* _SYS_DDIDMAREQ_H */