1 /*
   2  * Copyright (c) 2009, Intel Corporation.
   3  * All Rights Reserved.
   4  */
   5 
   6 /*
   7  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
   8  * Use is subject to license terms.
   9  */
  10 /*
  11  * Portions Philip Brown phil@bolthole.com Dec 2001
  12  */
  13 
  14 
  15 /*
  16  * agpgart driver
  17  *
  18  * This driver is primary targeted at providing memory support for INTEL
  19  * AGP device, INTEL memory less video card, and AMD64 cpu GART devices.
  20  * So there are four main architectures, ARC_IGD810, ARC_IGD830, ARC_INTELAGP,
  21  * ARC_AMD64AGP to agpgart driver. However, the memory
  22  * interfaces are the same for these architectures. The difference is how to
  23  * manage the hardware GART table for them.
  24  *
  25  * For large memory allocation, this driver use direct mapping to userland
  26  * application interface to save kernel virtual memory .
  27  */
  28 
  29 #include <sys/types.h>
  30 #include <sys/pci.h>
  31 #include <sys/systm.h>
  32 #include <sys/conf.h>
  33 #include <sys/file.h>
  34 #include <sys/kstat.h>
  35 #include <sys/stat.h>
  36 #include <sys/modctl.h>
  37 #include <sys/ddi.h>
  38 #include <sys/sunddi.h>
  39 #include <sys/sunldi.h>
  40 #include <sys/policy.h>
  41 #include <sys/ddidevmap.h>
  42 #include <vm/seg_dev.h>
  43 #include <sys/pmem.h>
  44 #include <sys/agpgart.h>
  45 #include <sys/agp/agpdefs.h>
  46 #include <sys/agp/agpgart_impl.h>
  47 #include <sys/agp/agpamd64gart_io.h>
  48 #include <sys/agp/agpmaster_io.h>
  49 #include <sys/agp/agptarget_io.h>
  50 
  51 /* Dynamic debug support */
  52 int agp_debug_var = 0;
  53 #define AGPDB_PRINT1(fmt)       if (agp_debug_var == 1) cmn_err fmt
  54 #define AGPDB_PRINT2(fmt)       if (agp_debug_var >= 1) cmn_err fmt
  55 
  56 /* Driver global softstate handle */
  57 static void *agpgart_glob_soft_handle;
  58 
  59 #define MAX_INSTNUM                     16
  60 
  61 #define AGP_DEV2INST(devt)      (getminor((devt)) >> 4)
  62 #define AGP_INST2MINOR(instance)        ((instance) << 4)
  63 #define IS_INTEL_830(type)      ((type) == ARC_IGD830)
  64 #define IS_TRUE_AGP(type)       (((type) == ARC_INTELAGP) || \
  65         ((type) == ARC_AMD64AGP))
  66 
  67 #define AGP_HASH_NODE   1024
  68 
  69 static void
  70 list_head_init(struct list_head  *head) {
  71         struct  list_head       *entry, *tmp;
  72         /* HASH for accelerate */
  73         entry = kmem_zalloc(AGP_HASH_NODE *
  74                 sizeof (struct list_head), KM_SLEEP);
  75         head->next = entry;
  76         for (int i = 0; i < AGP_HASH_NODE; i++) {
  77         tmp = &entry[i];
  78         tmp->next = tmp;
  79         tmp->prev = tmp;
  80         tmp->gttseg = NULL;
  81         }
  82 }
  83 
  84 static void
  85 list_head_add_new(struct list_head      *head,
  86                 igd_gtt_seg_t   *gttseg)
  87 {
  88         struct list_head  *entry, *tmp;
  89         int key;
  90         entry = kmem_zalloc(sizeof (*entry), KM_SLEEP);
  91         key = gttseg->igs_pgstart % AGP_HASH_NODE;
  92         tmp = &head->next[key];
  93         tmp->next->prev = entry;
  94         entry->next = tmp->next;
  95         entry->prev = tmp;
  96         tmp->next = entry;
  97         entry->gttseg = gttseg;
  98 }
  99 
 100 static void
 101 list_head_del(struct list_head  *entry) {
 102         (entry)->next->prev = (entry)->prev;      \
 103         (entry)->prev->next = (entry)->next;      \
 104         (entry)->gttseg = NULL; \
 105 }
 106 
 107 #define list_head_for_each_safe(entry,  temp,   head)   \
 108         for (int key = 0; key < AGP_HASH_NODE; key++)        \
 109         for (entry = (&(head)->next[key])->next, temp = (entry)->next;     \
 110                 entry != &(head)->next[key];     \
 111                 entry = temp, temp = temp->next)
 112 
 113 
 114 #define agpinfo_default_to_32(v, v32)   \
 115         {       \
 116                 (v32).agpi32_version = (v).agpi_version;        \
 117                 (v32).agpi32_devid = (v).agpi_devid;    \
 118                 (v32).agpi32_mode = (v).agpi_mode;      \
 119                 (v32).agpi32_aperbase = (uint32_t)(v).agpi_aperbase;    \
 120                 (v32).agpi32_apersize = (uint32_t)(v).agpi_apersize;    \
 121                 (v32).agpi32_pgtotal = (v).agpi_pgtotal;        \
 122                 (v32).agpi32_pgsystem = (v).agpi_pgsystem;      \
 123                 (v32).agpi32_pgused = (v).agpi_pgused;  \
 124         }
 125 
 126 static ddi_dma_attr_t agpgart_dma_attr = {
 127         DMA_ATTR_V0,
 128         0U,                             /* dma_attr_addr_lo */
 129         0xffffffffU,                    /* dma_attr_addr_hi */
 130         0xffffffffU,                    /* dma_attr_count_max */
 131         (uint64_t)AGP_PAGE_SIZE,        /* dma_attr_align */
 132         1,                              /* dma_attr_burstsizes */
 133         1,                              /* dma_attr_minxfer */
 134         0xffffffffU,                    /* dma_attr_maxxfer */
 135         0xffffffffU,                    /* dma_attr_seg */
 136         1,                              /* dma_attr_sgllen, variable */
 137         4,                              /* dma_attr_granular */
 138         0                               /* dma_attr_flags */
 139 };
 140 
 141 /*
 142  * AMD64 supports gart table above 4G. See alloc_gart_table.
 143  */
 144 static ddi_dma_attr_t garttable_dma_attr = {
 145         DMA_ATTR_V0,
 146         0U,                             /* dma_attr_addr_lo */
 147         0xffffffffU,                    /* dma_attr_addr_hi */
 148         0xffffffffU,                    /* dma_attr_count_max */
 149         (uint64_t)AGP_PAGE_SIZE,        /* dma_attr_align */
 150         1,                              /* dma_attr_burstsizes */
 151         1,                              /* dma_attr_minxfer */
 152         0xffffffffU,                    /* dma_attr_maxxfer */
 153         0xffffffffU,                    /* dma_attr_seg */
 154         1,                              /* dma_attr_sgllen, variable */
 155         4,                              /* dma_attr_granular */
 156         0                               /* dma_attr_flags */
 157 };
 158 
 159 /*
 160  * AGPGART table need a physical contiguous memory. To assure that
 161  * each access to gart table is strongly ordered and uncachable,
 162  * we use DDI_STRICTORDER_ACC.
 163  */
 164 static ddi_device_acc_attr_t gart_dev_acc_attr = {
 165         DDI_DEVICE_ATTR_V0,
 166         DDI_NEVERSWAP_ACC,
 167         DDI_STRICTORDER_ACC     /* must be DDI_STRICTORDER_ACC */
 168 };
 169 
 170 /*
 171  * AGP memory is usually used as texture memory or for a framebuffer, so we
 172  * can set the memory attribute to write combining. Video drivers will
 173  * determine the frame buffer attributes, for example the memory is write
 174  * combinging or non-cachable. However, the interface between Xorg and agpgart
 175  * driver to support attribute selcetion doesn't exist yet. So we set agp memory
 176  * to non-cachable by default now. This attribute might be overridden
 177  * by MTTR in X86.
 178  */
 179 static ddi_device_acc_attr_t mem_dev_acc_attr = {
 180         DDI_DEVICE_ATTR_V0,
 181         DDI_NEVERSWAP_ACC,
 182         DDI_STRICTORDER_ACC     /* Can be DDI_MERGING_OK_ACC */
 183 };
 184 
 185 static keytable_ent_t *
 186 agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset);
 187 static void
 188 amd64_gart_unregister(amd64_garts_dev_t *cpu_garts);
 189 
 190 
 191 static void
 192 agp_devmap_unmap(devmap_cookie_t handle, void *devprivate,
 193     offset_t off, size_t len, devmap_cookie_t new_handle1,
 194     void **new_devprivate1, devmap_cookie_t new_handle2,
 195     void **new_devprivate2)
 196 {
 197 
 198         struct keytable_ent *mementry;
 199         agpgart_softstate_t *softstate;
 200         agpgart_ctx_t *ctxp, *newctxp1, *newctxp2;
 201 
 202         ASSERT(AGP_ALIGNED(len) && AGP_ALIGNED(off));
 203         ASSERT(devprivate);
 204         ASSERT(handle);
 205 
 206         ctxp = (agpgart_ctx_t *)devprivate;
 207         softstate = ctxp->actx_sc;
 208         ASSERT(softstate);
 209 
 210         if (new_handle1 != NULL) {
 211                 newctxp1 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
 212                 newctxp1->actx_sc = softstate;
 213                 newctxp1->actx_off = ctxp->actx_off;
 214                 *new_devprivate1 = newctxp1;
 215         }
 216 
 217         if (new_handle2 != NULL) {
 218                 newctxp2 = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
 219                 newctxp2->actx_sc = softstate;
 220                 newctxp2->actx_off = off + len;
 221                 *new_devprivate2 = newctxp2;
 222         }
 223 
 224         mutex_enter(&softstate->asoft_instmutex);
 225         if ((new_handle1 == NULL) && (new_handle2 == NULL)) {
 226                 mementry =
 227                     agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off));
 228                 ASSERT(mementry);
 229                 mementry->kte_refcnt--;
 230         } else if ((new_handle1 != NULL) && (new_handle2 != NULL)) {
 231                 mementry =
 232                     agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(off));
 233                 ASSERT(mementry);
 234                 mementry->kte_refcnt++;
 235         }
 236         ASSERT(mementry->kte_refcnt >= 0);
 237         mutex_exit(&softstate->asoft_instmutex);
 238         kmem_free(ctxp, sizeof (struct agpgart_ctx));
 239 }
 240 
 241 /*ARGSUSED*/
 242 static int
 243 agp_devmap_map(devmap_cookie_t handle, dev_t dev,
 244     uint_t flags, offset_t offset, size_t len, void **new_devprivate)
 245 {
 246         agpgart_softstate_t *softstate;
 247         int instance;
 248         struct keytable_ent *mementry;
 249         agpgart_ctx_t *newctxp;
 250 
 251         ASSERT(handle);
 252         instance = AGP_DEV2INST(dev);
 253         softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
 254         if (softstate == NULL) {
 255                 AGPDB_PRINT2((CE_WARN, "agp_devmap_map: get soft state err"));
 256                 return (ENXIO);
 257         }
 258 
 259         ASSERT(softstate);
 260         ASSERT(mutex_owned(&softstate->asoft_instmutex));
 261         ASSERT(len);
 262         ASSERT(AGP_ALIGNED(offset) && AGP_ALIGNED(len));
 263 
 264         mementry =
 265             agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset));
 266         ASSERT(mementry);
 267         mementry->kte_refcnt++;
 268         ASSERT(mementry->kte_refcnt >= 0);
 269         newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
 270         newctxp->actx_off = offset;
 271         newctxp->actx_sc = softstate;
 272         *new_devprivate = newctxp;
 273 
 274         return (0);
 275 }
 276 
 277 /*ARGSUSED*/
 278 static int agp_devmap_dup(devmap_cookie_t handle, void *devprivate,
 279     devmap_cookie_t new_handle, void **new_devprivate)
 280 {
 281         struct keytable_ent *mementry;
 282         agpgart_ctx_t *newctxp, *ctxp;
 283         agpgart_softstate_t *softstate;
 284 
 285         ASSERT(devprivate);
 286         ASSERT(handle && new_handle);
 287 
 288         ctxp = (agpgart_ctx_t *)devprivate;
 289         ASSERT(AGP_ALIGNED(ctxp->actx_off));
 290 
 291         newctxp = kmem_zalloc(sizeof (agpgart_ctx_t), KM_SLEEP);
 292         newctxp->actx_off = ctxp->actx_off;
 293         newctxp->actx_sc = ctxp->actx_sc;
 294         softstate = (agpgart_softstate_t *)newctxp->actx_sc;
 295 
 296         mutex_enter(&softstate->asoft_instmutex);
 297         mementry = agp_find_bound_keyent(softstate,
 298             AGP_BYTES2PAGES(newctxp->actx_off));
 299         mementry->kte_refcnt++;
 300         ASSERT(mementry->kte_refcnt >= 0);
 301         mutex_exit(&softstate->asoft_instmutex);
 302         *new_devprivate = newctxp;
 303 
 304         return (0);
 305 }
 306 
 307 struct devmap_callback_ctl agp_devmap_cb = {
 308         DEVMAP_OPS_REV,         /* rev */
 309         agp_devmap_map,         /* map */
 310         NULL,                   /* access */
 311         agp_devmap_dup,         /* dup */
 312         agp_devmap_unmap,       /* unmap */
 313 };
 314 
 315 /*
 316  * agp_master_regis_byname()
 317  *
 318  * Description:
 319  *      Open the AGP master device node by device path name and
 320  *      register the device handle for later operations.
 321  *      We check all possible driver instance from 0
 322  *      to MAX_INSTNUM because the master device could be
 323  *      at any instance number. Only one AGP master is supported.
 324  *
 325  * Arguments:
 326  *      master_hdlp             AGP master device LDI handle pointer
 327  *      agpgart_l               AGPGART driver LDI identifier
 328  *
 329  * Returns:
 330  *      -1                      failed
 331  *      0                       success
 332  */
 333 static int
 334 agp_master_regis_byname(ldi_handle_t *master_hdlp, ldi_ident_t agpgart_li)
 335 {
 336         int     i;
 337         char    buf[MAXPATHLEN];
 338 
 339         ASSERT(master_hdlp);
 340         ASSERT(agpgart_li);
 341 
 342         /*
 343          * Search all possible instance numbers for the agp master device.
 344          * Only one master device is supported now, so the search ends
 345          * when one master device is found.
 346          */
 347         for (i = 0; i < MAX_INSTNUM; i++) {
 348                 (void) snprintf(buf, MAXPATHLEN, "%s%d", AGPMASTER_DEVLINK, i);
 349                 if ((ldi_open_by_name(buf, 0, kcred,
 350                     master_hdlp, agpgart_li)))
 351                         continue;
 352                 AGPDB_PRINT1((CE_NOTE,
 353                     "master device found: instance number=%d", i));
 354                 break;
 355 
 356         }
 357 
 358         /* AGP master device not found */
 359         if (i == MAX_INSTNUM)
 360                 return (-1);
 361 
 362         return (0);
 363 }
 364 
 365 /*
 366  * agp_target_regis_byname()
 367  *
 368  * Description:
 369  *      This function opens agp bridge device node by
 370  *      device path name and registers the device handle
 371  *      for later operations.
 372  *      We check driver instance from 0 to MAX_INSTNUM
 373  *      because the master device could be at any instance
 374  *      number. Only one agp target is supported.
 375  *
 376  *
 377  * Arguments:
 378  *      target_hdlp             AGP target device LDI handle pointer
 379  *      agpgart_l               AGPGART driver LDI identifier
 380  *
 381  * Returns:
 382  *      -1                      failed
 383  *      0                       success
 384  */
 385 static int
 386 agp_target_regis_byname(ldi_handle_t *target_hdlp, ldi_ident_t agpgart_li)
 387 {
 388         int     i;
 389         char    buf[MAXPATHLEN];
 390 
 391         ASSERT(target_hdlp);
 392         ASSERT(agpgart_li);
 393 
 394         for (i = 0; i < MAX_INSTNUM; i++) {
 395                 (void) snprintf(buf, MAXPATHLEN, "%s%d", AGPTARGET_DEVLINK, i);
 396                 if ((ldi_open_by_name(buf, 0, kcred,
 397                     target_hdlp, agpgart_li)))
 398                         continue;
 399 
 400                 AGPDB_PRINT1((CE_NOTE,
 401                     "bridge device found: instance number=%d", i));
 402                 break;
 403 
 404         }
 405 
 406         /* AGP bridge device not found */
 407         if (i == MAX_INSTNUM) {
 408                 AGPDB_PRINT2((CE_WARN, "bridge device not found"));
 409                 return (-1);
 410         }
 411 
 412         return (0);
 413 }
 414 
 415 /*
 416  * amd64_gart_regis_byname()
 417  *
 418  * Description:
 419  *      Open all amd64 gart device nodes by deice path name and
 420  *      register the device handles for later operations. Each cpu
 421  *      has its own amd64 gart device.
 422  *
 423  * Arguments:
 424  *      cpu_garts               cpu garts device list header
 425  *      agpgart_l               AGPGART driver LDI identifier
 426  *
 427  * Returns:
 428  *      -1                      failed
 429  *      0                       success
 430  */
 431 static int
 432 amd64_gart_regis_byname(amd64_garts_dev_t *cpu_garts, ldi_ident_t agpgart_li)
 433 {
 434         amd64_gart_dev_list_t   *gart_list;
 435         int                     i;
 436         char                    buf[MAXPATHLEN];
 437         ldi_handle_t            gart_hdl;
 438         int                     ret;
 439 
 440         ASSERT(cpu_garts);
 441         ASSERT(agpgart_li);
 442 
 443         /*
 444          * Search all possible instance numbers for the gart devices.
 445          * There can be multiple on-cpu gart devices for Opteron server.
 446          */
 447         for (i = 0; i < MAX_INSTNUM; i++) {
 448                 (void) snprintf(buf, MAXPATHLEN, "%s%d", CPUGART_DEVLINK, i);
 449                 ret = ldi_open_by_name(buf, 0, kcred,
 450                     &gart_hdl, agpgart_li);
 451 
 452                 if (ret == ENODEV)
 453                         continue;
 454                 else if (ret != 0) { /* There was an error opening the device */
 455                         amd64_gart_unregister(cpu_garts);
 456                         return (ret);
 457                 }
 458 
 459                 AGPDB_PRINT1((CE_NOTE,
 460                     "amd64 gart device found: instance number=%d", i));
 461 
 462                 gart_list = (amd64_gart_dev_list_t *)
 463                     kmem_zalloc(sizeof (amd64_gart_dev_list_t), KM_SLEEP);
 464 
 465                 /* Add new item to the head of the gart device list */
 466                 gart_list->gart_devhdl = gart_hdl;
 467                 gart_list->next = cpu_garts->gart_dev_list_head;
 468                 cpu_garts->gart_dev_list_head = gart_list;
 469                 cpu_garts->gart_device_num++;
 470         }
 471 
 472         if (cpu_garts->gart_device_num == 0)
 473                 return (ENODEV);
 474         return (0);
 475 }
 476 
 477 /*
 478  * Unregister agp master device handle
 479  */
 480 static void
 481 agp_master_unregister(ldi_handle_t *master_hdlp)
 482 {
 483         ASSERT(master_hdlp);
 484 
 485         if (master_hdlp) {
 486                 (void) ldi_close(*master_hdlp, 0, kcred);
 487                 *master_hdlp = NULL;
 488         }
 489 }
 490 
 491 /*
 492  * Unregister agp bridge device handle
 493  */
 494 static void
 495 agp_target_unregister(ldi_handle_t *target_hdlp)
 496 {
 497         if (target_hdlp) {
 498                 (void) ldi_close(*target_hdlp, 0, kcred);
 499                 *target_hdlp = NULL;
 500         }
 501 }
 502 
 503 /*
 504  * Unregister all amd64 gart device handles
 505  */
 506 static void
 507 amd64_gart_unregister(amd64_garts_dev_t *cpu_garts)
 508 {
 509         amd64_gart_dev_list_t   *gart_list;
 510         amd64_gart_dev_list_t   *next;
 511 
 512         ASSERT(cpu_garts);
 513 
 514         for (gart_list = cpu_garts->gart_dev_list_head;
 515             gart_list; gart_list = next) {
 516 
 517                 ASSERT(gart_list->gart_devhdl);
 518                 (void) ldi_close(gart_list->gart_devhdl, 0, kcred);
 519                 next = gart_list->next;
 520                 /* Free allocated memory */
 521                 kmem_free(gart_list, sizeof (amd64_gart_dev_list_t));
 522         }
 523         cpu_garts->gart_dev_list_head = NULL;
 524         cpu_garts->gart_device_num = 0;
 525 }
 526 
 527 /*
 528  * lyr_detect_master_type()
 529  *
 530  * Description:
 531  *      This function gets agp master type by querying agp master device.
 532  *
 533  * Arguments:
 534  *      master_hdlp             agp master device ldi handle pointer
 535  *
 536  * Returns:
 537  *      -1                      unsupported device
 538  *      DEVICE_IS_I810          i810 series
 539  *      DEVICE_IS_I810          i830 series
 540  *      DEVICE_IS_AGP           true agp master
 541  */
 542 static int
 543 lyr_detect_master_type(ldi_handle_t *master_hdlp)
 544 {
 545         int vtype;
 546         int err;
 547 
 548         ASSERT(master_hdlp);
 549 
 550         /* ldi_ioctl(agpmaster) */
 551         err = ldi_ioctl(*master_hdlp, DEVICE_DETECT,
 552             (intptr_t)&vtype, FKIOCTL, kcred, 0);
 553         if (err) /* Unsupported graphics device */
 554                 return (-1);
 555         return (vtype);
 556 }
 557 
 558 /*
 559  * devtect_target_type()
 560  *
 561  * Description:
 562  *      This function gets the host bridge chipset type by querying the agp
 563  *      target device.
 564  *
 565  * Arguments:
 566  *      target_hdlp             agp target device LDI handle pointer
 567  *
 568  * Returns:
 569  *      CHIP_IS_INTEL           Intel agp chipsets
 570  *      CHIP_IS_AMD             AMD agp chipset
 571  *      -1                      unsupported chipset
 572  */
 573 static int
 574 lyr_detect_target_type(ldi_handle_t *target_hdlp)
 575 {
 576         int btype;
 577         int err;
 578 
 579         ASSERT(target_hdlp);
 580 
 581         err = ldi_ioctl(*target_hdlp, CHIP_DETECT, (intptr_t)&btype,
 582             FKIOCTL, kcred, 0);
 583         if (err)        /* Unsupported bridge device */
 584                 return (-1);
 585         return (btype);
 586 }
 587 
 588 /*
 589  * lyr_init()
 590  *
 591  * Description:
 592  *      This function detects the  graphics system architecture and
 593  *      registers all relative device handles in a global structure
 594  *      "agp_regdev". Then it stores the system arc type in driver
 595  *      soft state.
 596  *
 597  * Arguments:
 598  *      agp_regdev              AGP devices registration struct pointer
 599  *      agpgart_l               AGPGART driver LDI identifier
 600  *
 601  * Returns:
 602  *      0       System arc supported and agp devices registration successed.
 603  *      -1      System arc not supported or device registration failed.
 604  */
 605 int
 606 lyr_init(agp_registered_dev_t *agp_regdev, ldi_ident_t agpgart_li)
 607 {
 608         ldi_handle_t *master_hdlp;
 609         ldi_handle_t *target_hdlp;
 610         amd64_garts_dev_t *garts_dev;
 611         int card_type, chip_type;
 612         int ret;
 613 
 614         ASSERT(agp_regdev);
 615 
 616         bzero(agp_regdev, sizeof (agp_registered_dev_t));
 617         agp_regdev->agprd_arctype = ARC_UNKNOWN;
 618         /*
 619          * Register agp devices, assuming all instances attached, and
 620          * detect which agp architucture this server belongs to. This
 621          * must be done before the agpgart driver starts to use layered
 622          * driver interfaces.
 623          */
 624         master_hdlp = &agp_regdev->agprd_masterhdl;
 625         target_hdlp = &agp_regdev->agprd_targethdl;
 626         garts_dev = &agp_regdev->agprd_cpugarts;
 627 
 628         /* Check whether the system is amd64 arc */
 629         if ((ret = amd64_gart_regis_byname(garts_dev, agpgart_li)) == ENODEV) {
 630                 /* No amd64 gart devices */
 631                 AGPDB_PRINT1((CE_NOTE,
 632                     "lyr_init: this is not an amd64 system"));
 633                 if (agp_master_regis_byname(master_hdlp, agpgart_li)) {
 634                         AGPDB_PRINT2((CE_WARN,
 635                             "lyr_init: register master device unsuccessful"));
 636                         goto err1;
 637                 }
 638                 if (agp_target_regis_byname(target_hdlp, agpgart_li)) {
 639                         AGPDB_PRINT2((CE_WARN,
 640                             "lyr_init: register target device unsuccessful"));
 641                         goto err2;
 642                 }
 643                 card_type = lyr_detect_master_type(master_hdlp);
 644                 /*
 645                  * Detect system arc by master device. If it is a intel
 646                  * integrated device, finish the detection successfully.
 647                  */
 648                 switch (card_type) {
 649                 case DEVICE_IS_I810:    /* I810 likewise graphics */
 650                         AGPDB_PRINT1((CE_NOTE,
 651                             "lyr_init: the system is Intel 810 arch"));
 652                         agp_regdev->agprd_arctype = ARC_IGD810;
 653                         return (0);
 654                 case DEVICE_IS_I830:    /* I830 likewise graphics */
 655                         AGPDB_PRINT1((CE_NOTE,
 656                             "lyr_init: the system is Intel 830 arch"));
 657                         agp_regdev->agprd_arctype = ARC_IGD830;
 658                         return (0);
 659                 case DEVICE_IS_AGP:     /* AGP graphics */
 660                         break;
 661                 default:                /* Non IGD/AGP graphics */
 662                         AGPDB_PRINT2((CE_WARN,
 663                             "lyr_init: non-supported master device"));
 664                         goto err3;
 665                 }
 666 
 667                 chip_type = lyr_detect_target_type(target_hdlp);
 668 
 669                 /* Continue to detect AGP arc by target device */
 670                 switch (chip_type) {
 671                 case CHIP_IS_INTEL:     /* Intel chipset */
 672                         AGPDB_PRINT1((CE_NOTE,
 673                             "lyr_init: Intel AGP arch detected"));
 674                         agp_regdev->agprd_arctype = ARC_INTELAGP;
 675                         return (0);
 676                 case CHIP_IS_AMD:       /* AMD chipset */
 677                         AGPDB_PRINT2((CE_WARN,
 678                             "lyr_init: no cpu gart, but have AMD64 chipsets"));
 679                         goto err3;
 680                 default:                /* Non supported chipset */
 681                         AGPDB_PRINT2((CE_WARN,
 682                             "lyr_init: detection can not continue"));
 683                         goto err3;
 684                 }
 685 
 686         }
 687 
 688         if (ret)
 689                 return (-1); /* Errors in open amd64 cpu gart devices */
 690 
 691         /*
 692          * AMD64 cpu gart device exsits, continue detection
 693          */
 694         if (agp_master_regis_byname(master_hdlp, agpgart_li)) {
 695                 AGPDB_PRINT1((CE_NOTE, "lyr_init: no AGP master in amd64"));
 696                 goto err1;
 697         }
 698 
 699         if (agp_target_regis_byname(target_hdlp, agpgart_li)) {
 700                 AGPDB_PRINT1((CE_NOTE,
 701                     "lyr_init: no AGP bridge"));
 702                 goto err2;
 703         }
 704 
 705         AGPDB_PRINT1((CE_NOTE,
 706             "lyr_init: the system is AMD64 AGP architecture"));
 707 
 708         agp_regdev->agprd_arctype = ARC_AMD64AGP;
 709 
 710         return (0); /* Finished successfully */
 711 
 712 err3:
 713         agp_target_unregister(&agp_regdev->agprd_targethdl);
 714 err2:
 715         agp_master_unregister(&agp_regdev->agprd_masterhdl);
 716 err1:
 717         /* AMD64 CPU gart registered ? */
 718         if (ret == 0) {
 719                 amd64_gart_unregister(garts_dev);
 720         }
 721         agp_regdev->agprd_arctype = ARC_UNKNOWN;
 722         return (-1);
 723 }
 724 
 725 void
 726 lyr_end(agp_registered_dev_t *agp_regdev)
 727 {
 728         ASSERT(agp_regdev);
 729 
 730         switch (agp_regdev->agprd_arctype) {
 731         case ARC_IGD810:
 732         case ARC_IGD830:
 733         case ARC_INTELAGP:
 734                 agp_master_unregister(&agp_regdev->agprd_masterhdl);
 735                 agp_target_unregister(&agp_regdev->agprd_targethdl);
 736 
 737                 return;
 738         case ARC_AMD64AGP:
 739                 agp_master_unregister(&agp_regdev->agprd_masterhdl);
 740                 agp_target_unregister(&agp_regdev->agprd_targethdl);
 741                 amd64_gart_unregister(&agp_regdev->agprd_cpugarts);
 742 
 743                 return;
 744         default:
 745                 ASSERT(0);
 746                 return;
 747         }
 748 }
 749 
 750 int
 751 lyr_get_info(agp_kern_info_t *info, agp_registered_dev_t *agp_regdev)
 752 {
 753         ldi_handle_t hdl;
 754         igd_info_t value1;
 755         i_agp_info_t value2;
 756         size_t prealloc_size;
 757         int err;
 758 
 759         ASSERT(info);
 760         ASSERT(agp_regdev);
 761 
 762         switch (agp_regdev->agprd_arctype) {
 763         case ARC_IGD810:
 764                 hdl = agp_regdev->agprd_masterhdl;
 765                 err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1,
 766                     FKIOCTL, kcred, 0);
 767                 if (err)
 768                         return (-1);
 769                 info->agpki_mdevid = value1.igd_devid;
 770                 info->agpki_aperbase = value1.igd_aperbase;
 771                 info->agpki_apersize = (uint32_t)value1.igd_apersize;
 772 
 773                 hdl = agp_regdev->agprd_targethdl;
 774                 err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE,
 775                     (intptr_t)&prealloc_size, FKIOCTL, kcred, 0);
 776                 if (err)
 777                         return (-1);
 778                 info->agpki_presize = prealloc_size;
 779 
 780                 break;
 781 
 782         case ARC_IGD830:
 783                 hdl = agp_regdev->agprd_masterhdl;
 784                 err = ldi_ioctl(hdl, I8XX_GET_INFO, (intptr_t)&value1,
 785                     FKIOCTL, kcred, 0);
 786                 if (err)
 787                         return (-1);
 788                 info->agpki_mdevid = value1.igd_devid;
 789                 info->agpki_aperbase = value1.igd_aperbase;
 790                 info->agpki_apersize = (uint32_t)value1.igd_apersize;
 791 
 792                 hdl = agp_regdev->agprd_targethdl;
 793                 err = ldi_ioctl(hdl, I8XX_GET_PREALLOC_SIZE,
 794                     (intptr_t)&prealloc_size, FKIOCTL, kcred, 0);
 795                 if (err)
 796                         return (-1);
 797 
 798                 /*
 799                  * Assume all units are kilobytes unless explicitly
 800                  * stated below:
 801                  * preallocated GTT memory = preallocated memory - GTT size
 802                  *      - scratch page size
 803                  *
 804                  * scratch page size = 4
 805                  * GTT size (KB) = aperture size (MB)
 806                  * this algorithm came from Xorg source code
 807                  */
 808                 if (prealloc_size > (info->agpki_apersize + 4))
 809                         prealloc_size =
 810                             prealloc_size - info->agpki_apersize - 4;
 811                 else {
 812                         AGPDB_PRINT2((CE_WARN, "lyr_get_info: "
 813                             "pre-allocated memory too small, setting to zero"));
 814                         prealloc_size = 0;
 815                 }
 816                 info->agpki_presize = prealloc_size;
 817                 AGPDB_PRINT2((CE_NOTE,
 818                     "lyr_get_info: prealloc_size = %ldKB, apersize = %dMB",
 819                     prealloc_size, info->agpki_apersize));
 820                 break;
 821         case ARC_INTELAGP:
 822         case ARC_AMD64AGP:
 823                 /* AGP devices */
 824                 hdl = agp_regdev->agprd_masterhdl;
 825                 err = ldi_ioctl(hdl, AGP_MASTER_GETINFO,
 826                     (intptr_t)&value2, FKIOCTL, kcred, 0);
 827                 if (err)
 828                         return (-1);
 829                 info->agpki_mdevid = value2.iagp_devid;
 830                 info->agpki_mver = value2.iagp_ver;
 831                 info->agpki_mstatus = value2.iagp_mode;
 832                 hdl = agp_regdev->agprd_targethdl;
 833                 err = ldi_ioctl(hdl, AGP_TARGET_GETINFO,
 834                     (intptr_t)&value2, FKIOCTL, kcred, 0);
 835                 if (err)
 836                         return (-1);
 837                 info->agpki_tdevid = value2.iagp_devid;
 838                 info->agpki_tver = value2.iagp_ver;
 839                 info->agpki_tstatus = value2.iagp_mode;
 840                 info->agpki_aperbase = value2.iagp_aperbase;
 841                 info->agpki_apersize = (uint32_t)value2.iagp_apersize;
 842                 break;
 843         default:
 844                 AGPDB_PRINT2((CE_WARN,
 845                     "lyr_get_info: function doesn't work for unknown arc"));
 846                 return (-1);
 847         }
 848         if ((info->agpki_apersize >= MAXAPERMEGAS) ||
 849             (info->agpki_apersize == 0) ||
 850             (info->agpki_aperbase == 0)) {
 851                 AGPDB_PRINT2((CE_WARN,
 852                     "lyr_get_info: aperture is not programmed correctly!"));
 853                 return (-1);
 854         }
 855 
 856         return (0);
 857 }
 858 
 859 /*
 860  * lyr_i8xx_add_to_gtt()
 861  *
 862  * Description:
 863  *      This function sets up the integrated video device gtt table
 864  *      via an ioclt to the AGP master driver.
 865  *
 866  * Arguments:
 867  *      pg_offset       The start entry to be setup
 868  *      keyent          Keytable entity pointer
 869  *      agp_regdev      AGP devices registration struct pointer
 870  *
 871  * Returns:
 872  *      0               success
 873  *      -1              invalid operations
 874  */
 875 int
 876 lyr_i8xx_add_to_gtt(uint32_t pg_offset, keytable_ent_t *keyent,
 877     agp_registered_dev_t *agp_regdev)
 878 {
 879         int err = 0;
 880         int rval;
 881         ldi_handle_t hdl;
 882         igd_gtt_seg_t gttseg;
 883         uint32_t *addrp, i;
 884         uint32_t npages;
 885 
 886         ASSERT(keyent);
 887         ASSERT(agp_regdev);
 888         gttseg.igs_pgstart =  pg_offset;
 889         npages = keyent->kte_pages;
 890         gttseg.igs_npage = npages;
 891         gttseg.igs_type = keyent->kte_type;
 892         gttseg.igs_phyaddr = (uint32_t *)kmem_zalloc
 893             (sizeof (uint32_t) * gttseg.igs_npage, KM_SLEEP);
 894 
 895         addrp = gttseg.igs_phyaddr;
 896         for (i = 0; i < npages; i++, addrp++) {
 897                 *addrp =
 898                     (uint32_t)((keyent->kte_pfnarray[i]) << GTT_PAGE_SHIFT);
 899         }
 900 
 901         hdl = agp_regdev->agprd_masterhdl;
 902         if (ldi_ioctl(hdl, I8XX_ADD2GTT, (intptr_t)&gttseg, FKIOCTL,
 903             kcred, &rval)) {
 904                 AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: ldi_ioctl error"));
 905                 AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pg_start=0x%x",
 906                     gttseg.igs_pgstart));
 907                 AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: pages=0x%x",
 908                     gttseg.igs_npage));
 909                 AGPDB_PRINT2((CE_WARN, "lyr_i8xx_add_to_gtt: type=0x%x",
 910                     gttseg.igs_type));
 911                 err = -1;
 912         }
 913         kmem_free(gttseg.igs_phyaddr, sizeof (uint32_t) * gttseg.igs_npage);
 914         return (err);
 915 }
 916 
 917 /*
 918  * lyr_i8xx_remove_from_gtt()
 919  *
 920  * Description:
 921  *      This function clears the integrated video device gtt table via
 922  *      an ioctl to the agp master device.
 923  *
 924  * Arguments:
 925  *      pg_offset       The starting entry to be cleared
 926  *      npage           The number of entries to be cleared
 927  *      agp_regdev      AGP devices struct pointer
 928  *
 929  * Returns:
 930  *      0               success
 931  *      -1              invalid operations
 932  */
 933 int
 934 lyr_i8xx_remove_from_gtt(uint32_t pg_offset, uint32_t npage,
 935     agp_registered_dev_t *agp_regdev)
 936 {
 937         int                     rval;
 938         ldi_handle_t            hdl;
 939         igd_gtt_seg_t           gttseg;
 940 
 941         gttseg.igs_pgstart =  pg_offset;
 942         gttseg.igs_npage = npage;
 943 
 944         hdl = agp_regdev->agprd_masterhdl;
 945         if (ldi_ioctl(hdl, I8XX_REM_GTT, (intptr_t)&gttseg, FKIOCTL,
 946             kcred, &rval))
 947                 return (-1);
 948 
 949         return (0);
 950 }
 951 
 952 /*
 953  * lyr_set_gart_addr()
 954  *
 955  * Description:
 956  *      This function puts the gart table physical address in the
 957  *      gart base register.
 958  *      Please refer to gart and gtt table base register format for
 959  *      gart base register format in agpdefs.h.
 960  *
 961  * Arguments:
 962  *      phy_base        The base physical address of gart table
 963  *      agp_regdev      AGP devices registration struct pointer
 964  *
 965  * Returns:
 966  *      0               success
 967  *      -1              failed
 968  *
 969  */
 970 
 971 int
 972 lyr_set_gart_addr(uint64_t phy_base, agp_registered_dev_t *agp_regdev)
 973 {
 974         amd64_gart_dev_list_t   *gart_list;
 975         ldi_handle_t            hdl;
 976         int                     err = 0;
 977 
 978         ASSERT(agp_regdev);
 979         switch (agp_regdev->agprd_arctype) {
 980         case ARC_IGD810:
 981         {
 982                 uint32_t base;
 983 
 984                 ASSERT((phy_base & I810_POINTER_MASK) == 0);
 985                 base = (uint32_t)phy_base;
 986 
 987                 hdl = agp_regdev->agprd_masterhdl;
 988                 err = ldi_ioctl(hdl, I810_SET_GTT_BASE,
 989                     (intptr_t)&base, FKIOCTL, kcred, 0);
 990                 break;
 991         }
 992         case ARC_INTELAGP:
 993         {
 994                 uint32_t addr;
 995                 addr = (uint32_t)phy_base;
 996 
 997                 ASSERT((phy_base & GTT_POINTER_MASK) == 0);
 998                 hdl = agp_regdev->agprd_targethdl;
 999                 err = ldi_ioctl(hdl, AGP_TARGET_SET_GATTADDR,
1000                     (intptr_t)&addr, FKIOCTL, kcred, 0);
1001                 break;
1002         }
1003         case ARC_AMD64AGP:
1004         {
1005                 uint32_t addr;
1006 
1007                 ASSERT((phy_base & AMD64_POINTER_MASK) == 0);
1008                 addr = (uint32_t)((phy_base >> AMD64_GARTBASE_SHIFT)
1009                     & AMD64_GARTBASE_MASK);
1010 
1011                 for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1012                     gart_list;
1013                     gart_list = gart_list->next) {
1014                         hdl = gart_list->gart_devhdl;
1015                         if (ldi_ioctl(hdl, AMD64_SET_GART_ADDR,
1016                             (intptr_t)&addr, FKIOCTL, kcred, 0)) {
1017                                 err = -1;
1018                                 break;
1019                         }
1020                 }
1021                 break;
1022         }
1023         default:
1024                 err = -1;
1025         }
1026 
1027         if (err)
1028                 return (-1);
1029 
1030         return (0);
1031 }
1032 
1033 int
1034 lyr_set_agp_cmd(uint32_t cmd, agp_registered_dev_t *agp_regdev)
1035 {
1036         ldi_handle_t hdl;
1037         uint32_t command;
1038 
1039         ASSERT(agp_regdev);
1040         command = cmd;
1041         hdl = agp_regdev->agprd_targethdl;
1042         if (ldi_ioctl(hdl, AGP_TARGET_SETCMD,
1043             (intptr_t)&command, FKIOCTL, kcred, 0))
1044                 return (-1);
1045         hdl = agp_regdev->agprd_masterhdl;
1046         if (ldi_ioctl(hdl, AGP_MASTER_SETCMD,
1047             (intptr_t)&command, FKIOCTL, kcred, 0))
1048                 return (-1);
1049 
1050         return (0);
1051 }
1052 
1053 int
1054 lyr_config_devices(agp_registered_dev_t *agp_regdev)
1055 {
1056         amd64_gart_dev_list_t   *gart_list;
1057         ldi_handle_t            hdl;
1058         int                     rc = 0;
1059 
1060         ASSERT(agp_regdev);
1061         switch (agp_regdev->agprd_arctype) {
1062         case ARC_IGD830:
1063         case ARC_IGD810:
1064                 break;
1065         case ARC_INTELAGP:
1066         {
1067                 hdl = agp_regdev->agprd_targethdl;
1068                 rc = ldi_ioctl(hdl, AGP_TARGET_CONFIGURE,
1069                     0, FKIOCTL, kcred, 0);
1070                 break;
1071         }
1072         case ARC_AMD64AGP:
1073         {
1074                 /*
1075                  * BIOS always shadow registers such like Aperture Base
1076                  * register, Aperture Size Register from the AGP bridge
1077                  * to the AMD64 CPU host bridge. If future BIOSes are broken
1078                  * in this regard, we may need to shadow these registers
1079                  * in driver.
1080                  */
1081 
1082                 for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1083                     gart_list;
1084                     gart_list = gart_list->next) {
1085                         hdl = gart_list->gart_devhdl;
1086                         if (ldi_ioctl(hdl, AMD64_CONFIGURE,
1087                             0, FKIOCTL, kcred, 0)) {
1088                                 rc = -1;
1089                                 break;
1090                         }
1091                 }
1092                 break;
1093         }
1094         default:
1095                 rc = -1;
1096         }
1097 
1098         if (rc)
1099                 return (-1);
1100 
1101         return (0);
1102 }
1103 
1104 int
1105 lyr_unconfig_devices(agp_registered_dev_t *agp_regdev)
1106 {
1107         amd64_gart_dev_list_t   *gart_list;
1108         ldi_handle_t            hdl;
1109         int                     rc = 0;
1110 
1111         ASSERT(agp_regdev);
1112         switch (agp_regdev->agprd_arctype) {
1113         case ARC_IGD830:
1114         case ARC_IGD810:
1115         {
1116                 hdl = agp_regdev->agprd_masterhdl;
1117                 rc = ldi_ioctl(hdl, I8XX_UNCONFIG, 0, FKIOCTL, kcred, 0);
1118                 break;
1119         }
1120         case ARC_INTELAGP:
1121         {
1122                 hdl = agp_regdev->agprd_targethdl;
1123                 rc = ldi_ioctl(hdl, AGP_TARGET_UNCONFIG,
1124                     0, FKIOCTL, kcred, 0);
1125                 break;
1126         }
1127         case ARC_AMD64AGP:
1128         {
1129                 for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1130                     gart_list; gart_list = gart_list->next) {
1131                         hdl = gart_list->gart_devhdl;
1132                         if (ldi_ioctl(hdl, AMD64_UNCONFIG,
1133                             0, FKIOCTL, kcred, 0)) {
1134                                 rc = -1;
1135                                 break;
1136                         }
1137                 }
1138                 break;
1139         }
1140         default:
1141                 rc = -1;
1142         }
1143 
1144         if (rc)
1145                 return (-1);
1146 
1147         return (0);
1148 }
1149 
1150 /*
1151  * lyr_flush_gart_cache()
1152  *
1153  * Description:
1154  *      This function flushes the GART translation look-aside buffer. All
1155  *      GART translation caches will be flushed after this operation.
1156  *
1157  * Arguments:
1158  *      agp_regdev      AGP devices struct pointer
1159  */
1160 void
1161 lyr_flush_gart_cache(agp_registered_dev_t *agp_regdev)
1162 {
1163         amd64_gart_dev_list_t   *gart_list;
1164         ldi_handle_t            hdl;
1165 
1166         ASSERT(agp_regdev);
1167         if (agp_regdev->agprd_arctype == ARC_AMD64AGP) {
1168                 for (gart_list = agp_regdev->agprd_cpugarts.gart_dev_list_head;
1169                     gart_list; gart_list = gart_list->next) {
1170                         hdl = gart_list->gart_devhdl;
1171                         (void) ldi_ioctl(hdl, AMD64_FLUSH_GTLB,
1172                             0, FKIOCTL, kcred, 0);
1173                 }
1174         } else if (agp_regdev->agprd_arctype == ARC_INTELAGP) {
1175                 hdl = agp_regdev->agprd_targethdl;
1176                 (void) ldi_ioctl(hdl, AGP_TARGET_FLUSH_GTLB, 0,
1177                     FKIOCTL, kcred, 0);
1178         }
1179 }
1180 
1181 /*
1182  * get_max_pages()
1183  *
1184  * Description:
1185  *      This function compute the total pages allowed for agp aperture
1186  *      based on the ammount of physical pages.
1187  *      The algorithm is: compare the aperture size with 1/4 of total
1188  *      physical pages, and use the smaller one to for the max available
1189  *      pages. But the minimum video memory should be 192M.
1190  *
1191  * Arguments:
1192  *      aper_size       system agp aperture size (in MB)
1193  *
1194  * Returns:
1195  *      The max possible number of agp memory pages available to users
1196  */
1197 static uint32_t
1198 get_max_pages(uint32_t aper_size)
1199 {
1200         uint32_t i, j, size;
1201 
1202         ASSERT(aper_size <= MAXAPERMEGAS);
1203 
1204         i = AGP_MB2PAGES(aper_size);
1205         j = (physmem >> 2);
1206 
1207         size = ((i < j) ? i : j);
1208 
1209         if (size < AGP_MB2PAGES(MINAPERMEGAS))
1210                 size = AGP_MB2PAGES(MINAPERMEGAS);
1211         return (size);
1212 }
1213 
1214 /*
1215  * agp_fill_empty_keyent()
1216  *
1217  * Description:
1218  *      This function finds a empty key table slot and
1219  *      fills it with a new entity.
1220  *
1221  * Arguments:
1222  *      softsate        driver soft state pointer
1223  *      entryp          new entity data pointer
1224  *
1225  * Returns:
1226  *      NULL    no key table slot available
1227  *      entryp  the new entity slot pointer
1228  */
1229 static keytable_ent_t *
1230 agp_fill_empty_keyent(agpgart_softstate_t *softstate, keytable_ent_t *entryp)
1231 {
1232         int key;
1233         keytable_ent_t *newentryp;
1234 
1235         ASSERT(softstate);
1236         ASSERT(entryp);
1237         ASSERT(entryp->kte_memhdl);
1238         ASSERT(entryp->kte_pfnarray);
1239         ASSERT(mutex_owned(&softstate->asoft_instmutex));
1240 
1241         for (key = 0; key < AGP_MAXKEYS; key++) {
1242                 newentryp = &softstate->asoft_table[key];
1243                 if (newentryp->kte_memhdl == NULL) {
1244                         break;
1245                 }
1246         }
1247 
1248         if (key >= AGP_MAXKEYS) {
1249                 AGPDB_PRINT2((CE_WARN,
1250                     "agp_fill_empty_keyent: key table exhausted"));
1251                 return (NULL);
1252         }
1253 
1254         ASSERT(newentryp->kte_pfnarray == NULL);
1255         bcopy(entryp, newentryp, sizeof (keytable_ent_t));
1256         newentryp->kte_key = key;
1257 
1258         return (newentryp);
1259 }
1260 
1261 /*
1262  * agp_find_bound_keyent()
1263  *
1264  * Description:
1265  *      This function finds the key table entity by agp aperture page offset.
1266  *      Every keytable entity will have an agp aperture range after the binding
1267  *      operation.
1268  *
1269  * Arguments:
1270  *      softsate        driver soft state pointer
1271  *      pg_offset       agp aperture page offset
1272  *
1273  * Returns:
1274  *      NULL            no such keytable entity
1275  *      pointer         key table entity pointer found
1276  */
1277 static keytable_ent_t *
1278 agp_find_bound_keyent(agpgart_softstate_t *softstate, uint32_t pg_offset)
1279 {
1280         int keycount;
1281         keytable_ent_t *entryp;
1282 
1283         ASSERT(softstate);
1284         ASSERT(mutex_owned(&softstate->asoft_instmutex));
1285 
1286         for (keycount = 0; keycount < AGP_MAXKEYS; keycount++) {
1287                 entryp = &softstate->asoft_table[keycount];
1288                 if (entryp->kte_bound == 0) {
1289                         continue;
1290                 }
1291 
1292                 if (pg_offset < entryp->kte_pgoff)
1293                         continue;
1294                 if (pg_offset >= (entryp->kte_pgoff + entryp->kte_pages))
1295                         continue;
1296 
1297                 ASSERT(entryp->kte_memhdl);
1298                 ASSERT(entryp->kte_pfnarray);
1299 
1300                 return (entryp);
1301         }
1302 
1303         return (NULL);
1304 }
1305 
1306 /*
1307  * agp_check_off()
1308  *
1309  * Description:
1310  *      This function checks whether an AGP aperture range to be bound
1311  *      overlaps with AGP offset already bound.
1312  *
1313  * Arguments:
1314  *      entryp          key table start entry pointer
1315  *      pg_start        AGP range start page offset
1316  *      pg_num          pages number to be bound
1317  *
1318  * Returns:
1319  *      0               Does not overlap
1320  *      -1              Overlaps
1321  */
1322 
1323 static int
1324 agp_check_off(keytable_ent_t *entryp, uint32_t pg_start, uint32_t pg_num)
1325 {
1326         int key;
1327         uint64_t pg_end;
1328         uint64_t kpg_end;
1329 
1330         ASSERT(entryp);
1331 
1332         pg_end = pg_start + pg_num;
1333         for (key = 0; key < AGP_MAXKEYS; key++) {
1334                 if (!entryp[key].kte_bound)
1335                         continue;
1336 
1337                 kpg_end = entryp[key].kte_pgoff + entryp[key].kte_pages;
1338                 if (!((pg_end <= entryp[key].kte_pgoff) ||
1339                     (pg_start >= kpg_end)))
1340                         break;
1341         }
1342 
1343         if (key == AGP_MAXKEYS)
1344                 return (0);
1345         else
1346                 return (-1);
1347 }
1348 
1349 static int
1350 is_controlling_proc(agpgart_softstate_t *st)
1351 {
1352         ASSERT(st);
1353 
1354         if (!st->asoft_acquired) {
1355                 AGPDB_PRINT2((CE_WARN,
1356                     "ioctl_agpgart_setup: gart not acquired"));
1357                 return (-1);
1358         }
1359         if (st->asoft_curpid != ddi_get_pid()) {
1360                 AGPDB_PRINT2((CE_WARN,
1361                     "ioctl_agpgart_release: not  controlling process"));
1362                 return (-1);
1363         }
1364 
1365         return (0);
1366 }
1367 
1368 static void release_control(agpgart_softstate_t *st)
1369 {
1370         st->asoft_curpid = 0;
1371         st->asoft_acquired = 0;
1372 }
1373 
1374 static void acquire_control(agpgart_softstate_t *st)
1375 {
1376         st->asoft_curpid = ddi_get_pid();
1377         st->asoft_acquired = 1;
1378 }
1379 
1380 /*
1381  * agp_remove_from_gart()
1382  *
1383  * Description:
1384  *      This function fills the gart table entries by a given page
1385  *      frame number array and setup the agp aperture page to physical
1386  *      memory page translation.
1387  * Arguments:
1388  *      pg_offset       Starting aperture page to be bound
1389  *      entries         the number of pages to be bound
1390  *      acc_hdl         GART table dma memory acc handle
1391  *      tablep          GART table kernel virtual address
1392  */
1393 static void
1394 agp_remove_from_gart(
1395     uint32_t pg_offset,
1396     uint32_t entries,
1397     ddi_dma_handle_t dma_hdl,
1398     uint32_t *tablep)
1399 {
1400         uint32_t items = 0;
1401         uint32_t *entryp;
1402 
1403         entryp = tablep + pg_offset;
1404         while (items < entries) {
1405                 *(entryp + items) = 0;
1406                 items++;
1407         }
1408         (void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t),
1409             entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV);
1410 }
1411 
1412 /*
1413  * agp_unbind_key()
1414  *
1415  * Description:
1416  *      This function unbinds AGP memory from the gart table. It will clear
1417  *      all the gart entries related to this agp memory.
1418  *
1419  * Arguments:
1420  *      softstate               driver soft state pointer
1421  *      entryp                  key table entity pointer
1422  *
1423  * Returns:
1424  *      EINVAL          invalid key table entity pointer
1425  *      0               success
1426  *
1427  */
1428 static int
1429 agp_unbind_key(agpgart_softstate_t *softstate, keytable_ent_t *entryp)
1430 {
1431         int retval = 0;
1432 
1433         ASSERT(entryp);
1434         ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
1435 
1436         if (!entryp->kte_bound) {
1437                 AGPDB_PRINT2((CE_WARN,
1438                     "agp_unbind_key: key = 0x%x, not bound",
1439                     entryp->kte_key));
1440                 return (EINVAL);
1441         }
1442         if (entryp->kte_refcnt) {
1443                 AGPDB_PRINT2((CE_WARN,
1444                     "agp_unbind_key: memory is exported to users"));
1445                 return (EINVAL);
1446         }
1447 
1448         ASSERT((entryp->kte_pgoff + entryp->kte_pages) <=
1449             AGP_MB2PAGES(softstate->asoft_info.agpki_apersize));
1450         ASSERT((softstate->asoft_devreg.agprd_arctype != ARC_UNKNOWN));
1451 
1452         switch (softstate->asoft_devreg.agprd_arctype) {
1453         case ARC_IGD810:
1454         case ARC_IGD830:
1455                 retval = lyr_i8xx_remove_from_gtt(
1456                     entryp->kte_pgoff, entryp->kte_pages,
1457                     &softstate->asoft_devreg);
1458                 if (retval) {
1459                         AGPDB_PRINT2((CE_WARN,
1460                             "agp_unbind_key: Key = 0x%x, clear table error",
1461                             entryp->kte_key));
1462                         return (EIO);
1463                 }
1464                 break;
1465         case ARC_INTELAGP:
1466         case ARC_AMD64AGP:
1467                 agp_remove_from_gart(entryp->kte_pgoff,
1468                     entryp->kte_pages,
1469                     softstate->gart_dma_handle,
1470                     (uint32_t *)softstate->gart_vbase);
1471                 /* Flush GTLB table */
1472                 lyr_flush_gart_cache(&softstate->asoft_devreg);
1473 
1474                 break;
1475         }
1476 
1477         entryp->kte_bound = 0;
1478 
1479         return (0);
1480 }
1481 
1482 /*
1483  * agp_dealloc_kmem()
1484  *
1485  * Description:
1486  *      This function deallocates dma memory resources for userland
1487  *      applications.
1488  *
1489  * Arguments:
1490  *      entryp          keytable entity pointer
1491  */
1492 static void
1493 agp_dealloc_kmem(keytable_ent_t *entryp)
1494 {
1495         kmem_free(entryp->kte_pfnarray, sizeof (pfn_t) * entryp->kte_pages);
1496         entryp->kte_pfnarray = NULL;
1497 
1498         (void) ddi_dma_unbind_handle(KMEMP(entryp->kte_memhdl)->kmem_handle);
1499         KMEMP(entryp->kte_memhdl)->kmem_cookies_num = 0;
1500         ddi_dma_mem_free(&KMEMP(entryp->kte_memhdl)->kmem_acchdl);
1501         KMEMP(entryp->kte_memhdl)->kmem_acchdl = NULL;
1502         KMEMP(entryp->kte_memhdl)->kmem_reallen = 0;
1503         KMEMP(entryp->kte_memhdl)->kmem_kvaddr = NULL;
1504 
1505         ddi_dma_free_handle(&(KMEMP(entryp->kte_memhdl)->kmem_handle));
1506         KMEMP(entryp->kte_memhdl)->kmem_handle = NULL;
1507 
1508         kmem_free(entryp->kte_memhdl, sizeof (agp_kmem_handle_t));
1509         entryp->kte_memhdl = NULL;
1510 }
1511 
1512 /*
1513  * agp_dealloc_mem()
1514  *
1515  * Description:
1516  *      This function deallocates physical memory resources allocated for
1517  *      userland applications.
1518  *
1519  * Arguments:
1520  *      st              driver soft state pointer
1521  *      entryp          key table entity pointer
1522  *
1523  * Returns:
1524  *      -1              not a valid memory type or the memory is mapped by
1525  *                      user area applications
1526  *      0               success
1527  */
1528 static int
1529 agp_dealloc_mem(agpgart_softstate_t *st, keytable_ent_t *entryp)
1530 {
1531 
1532         ASSERT(entryp);
1533         ASSERT(st);
1534         ASSERT(entryp->kte_memhdl);
1535         ASSERT(mutex_owned(&st->asoft_instmutex));
1536 
1537         /* auto unbind here */
1538         if (entryp->kte_bound && !entryp->kte_refcnt) {
1539                 AGPDB_PRINT2((CE_WARN,
1540                     "agp_dealloc_mem: key=0x%x, auto unbind",
1541                     entryp->kte_key));
1542 
1543                 /*
1544                  * agp_dealloc_mem may be called indirectly by agp_detach.
1545                  * In the agp_detach function, agpgart_close is already
1546                  * called which will free the gart table. agp_unbind_key
1547                  * will panic if no valid gart table exists. So test if
1548                  * gart table exsits here.
1549                  */
1550                 if (st->asoft_opened)
1551                         (void) agp_unbind_key(st, entryp);
1552         }
1553         if (entryp->kte_refcnt) {
1554                 AGPDB_PRINT2((CE_WARN,
1555                     "agp_dealloc_mem: memory is exported to users"));
1556                 return (-1);
1557         }
1558 
1559         switch (entryp->kte_type) {
1560         case AGP_NORMAL:
1561         case AGP_PHYSICAL:
1562                 agp_dealloc_kmem(entryp);
1563                 break;
1564         default:
1565                 return (-1);
1566         }
1567 
1568         return (0);
1569 }
1570 
1571 /*
1572  * agp_del_allkeys()
1573  *
1574  * Description:
1575  *      This function calls agp_dealloc_mem to release all the agp memory
1576  *      resource allocated.
1577  *
1578  * Arguments:
1579  *      softsate        driver soft state pointer
1580  * Returns:
1581  *      -1              can not free all agp memory
1582  *      0               success
1583  *
1584  */
1585 static int
1586 agp_del_allkeys(agpgart_softstate_t *softstate)
1587 {
1588         int key;
1589         int ret = 0;
1590 
1591         ASSERT(softstate);
1592         for (key = 0; key < AGP_MAXKEYS; key++) {
1593                 if (softstate->asoft_table[key].kte_memhdl != NULL) {
1594                         /*
1595                          * Check if we can free agp memory now.
1596                          * If agp memory is exported to user
1597                          * applications, agp_dealloc_mem will fail.
1598                          */
1599                         if (agp_dealloc_mem(softstate,
1600                             &softstate->asoft_table[key]))
1601                                 ret = -1;
1602                 }
1603         }
1604 
1605         return (ret);
1606 }
1607 
1608 /*
1609  * pfn2gartentry()
1610  *
1611  * Description:
1612  *      This function converts a physical address to GART entry.
1613  *      For AMD64, hardware only support addresses below 40bits,
1614  *      about 1024G physical address, so the largest pfn
1615  *      number is below 28 bits. Please refer to GART and GTT entry
1616  *      format table in agpdefs.h for entry format. Intel IGD only
1617  *      only supports GTT entry below 1G. Intel AGP only supports
1618  *      GART entry below 4G.
1619  *
1620  * Arguments:
1621  *      arc_type                system agp arc type
1622  *      pfn                     page frame number
1623  *      itemv                   the entry item to be returned
1624  * Returns:
1625  *      -1                      not a invalid page frame
1626  *      0                       conversion success
1627  */
1628 static int
1629 pfn2gartentry(agp_arc_type_t arc_type, pfn_t pfn, uint32_t *itemv)
1630 {
1631         uint64_t paddr;
1632 
1633         paddr = (uint64_t)pfn << AGP_PAGE_SHIFT;
1634         AGPDB_PRINT1((CE_NOTE, "checking pfn number %lu for type %d",
1635             pfn, arc_type));
1636 
1637         switch (arc_type) {
1638         case ARC_INTELAGP:
1639         {
1640                 /* Only support 32-bit hardware address */
1641                 if ((paddr & AGP_INTEL_POINTER_MASK) != 0) {
1642                         AGPDB_PRINT2((CE_WARN,
1643                             "INTEL AGP Hardware only support 32 bits"));
1644                         return (-1);
1645                 }
1646                 *itemv =  (pfn << AGP_PAGE_SHIFT) | AGP_ENTRY_VALID;
1647 
1648                 break;
1649         }
1650         case ARC_AMD64AGP:
1651         {
1652                 uint32_t value1, value2;
1653                 /* Physaddr should not exceed 40-bit */
1654                 if ((paddr & AMD64_POINTER_MASK) != 0) {
1655                         AGPDB_PRINT2((CE_WARN,
1656                             "AMD64 GART hardware only supoort 40 bits"));
1657                         return (-1);
1658                 }
1659                 value1 = (uint32_t)pfn >> 20;
1660                 value1 <<= 4;
1661                 value2 = (uint32_t)pfn << 12;
1662 
1663                 *itemv = value1 | value2 | AMD64_ENTRY_VALID;
1664                 break;
1665         }
1666         case ARC_IGD810:
1667                 if ((paddr & I810_POINTER_MASK) != 0) {
1668                         AGPDB_PRINT2((CE_WARN,
1669                             "Intel i810 only support 30 bits"));
1670                         return (-1);
1671                 }
1672                 break;
1673 
1674         case ARC_IGD830:
1675                 if ((paddr & GTT_POINTER_MASK) != 0) {
1676                         AGPDB_PRINT2((CE_WARN,
1677                             "Intel IGD only support 32 bits"));
1678                         return (-1);
1679                 }
1680                 break;
1681         default:
1682                 AGPDB_PRINT2((CE_WARN,
1683                     "pfn2gartentry: arc type = %d, not support", arc_type));
1684                 return (-1);
1685         }
1686         return (0);
1687 }
1688 
1689 /*
1690  * Check allocated physical pages validity, only called in DEBUG
1691  * mode.
1692  */
1693 static int
1694 agp_check_pfns(agp_arc_type_t arc_type, pfn_t *pfnarray, int items)
1695 {
1696         int count;
1697         uint32_t ret;
1698 
1699         for (count = 0; count < items; count++) {
1700                 if (pfn2gartentry(arc_type, pfnarray[count], &ret))
1701                         break;
1702         }
1703         if (count < items)
1704                 return (-1);
1705         else
1706                 return (0);
1707 }
1708 
1709 /*
1710  * kmem_getpfns()
1711  *
1712  * Description:
1713  *      This function gets page frame numbers from dma handle.
1714  *
1715  * Arguments:
1716  *      dma_handle              dma hanle allocated by ddi_dma_alloc_handle
1717  *      dma_cookip              dma cookie pointer
1718  *      cookies_num             cookies number
1719  *      pfnarray                array to store page frames
1720  *
1721  * Returns:
1722  *      0               success
1723  */
1724 static int
1725 kmem_getpfns(
1726     ddi_dma_handle_t dma_handle,
1727     ddi_dma_cookie_t *dma_cookiep,
1728     int cookies_num,
1729     pfn_t *pfnarray)
1730 {
1731         int     num_cookies;
1732         int     index = 0;
1733 
1734         num_cookies = cookies_num;
1735 
1736         while (num_cookies > 0) {
1737                 uint64_t ck_startaddr, ck_length, ck_end;
1738                 ck_startaddr = dma_cookiep->dmac_address;
1739                 ck_length = dma_cookiep->dmac_size;
1740 
1741                 ck_end = ck_startaddr + ck_length;
1742                 while (ck_startaddr < ck_end) {
1743                         pfnarray[index] = (pfn_t)ck_startaddr >> AGP_PAGE_SHIFT;
1744                         ck_startaddr += AGP_PAGE_SIZE;
1745                         index++;
1746                 }
1747 
1748                 num_cookies--;
1749                 if (num_cookies > 0) {
1750                         ddi_dma_nextcookie(dma_handle, dma_cookiep);
1751                 }
1752         }
1753 
1754         return (0);
1755 }
1756 
1757 static int
1758 copyinfo(agpgart_softstate_t *softstate, agp_info_t *info)
1759 {
1760         switch (softstate->asoft_devreg.agprd_arctype) {
1761         case ARC_IGD810:
1762         case ARC_IGD830:
1763                 info->agpi_version.agpv_major = 0;
1764                 info->agpi_version.agpv_minor = 0;
1765                 info->agpi_devid = softstate->asoft_info.agpki_mdevid;
1766                 info->agpi_mode = 0;
1767                 break;
1768         case ARC_INTELAGP:
1769         case ARC_AMD64AGP:
1770                 info->agpi_version = softstate->asoft_info.agpki_tver;
1771                 info->agpi_devid = softstate->asoft_info.agpki_tdevid;
1772                 info->agpi_mode = softstate->asoft_info.agpki_tstatus;
1773                 break;
1774         default:
1775                 AGPDB_PRINT2((CE_WARN, "copyinfo: UNKNOW ARC"));
1776                 return (-1);
1777         }
1778         /*
1779          * 64bit->32bit conversion possible
1780          */
1781         info->agpi_aperbase = softstate->asoft_info.agpki_aperbase;
1782         info->agpi_apersize = softstate->asoft_info.agpki_apersize;
1783         info->agpi_pgtotal = softstate->asoft_pgtotal;
1784         info->agpi_pgsystem = info->agpi_pgtotal;
1785         info->agpi_pgused = softstate->asoft_pgused;
1786 
1787         return (0);
1788 }
1789 
1790 static uint32_t
1791 agp_v2_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode)
1792 {
1793         uint32_t cmd;
1794         int rq, sba, over4g, fw, rate;
1795 
1796         /*
1797          * tstatus: target device status
1798          * mstatus: master device status
1799          * mode: the agp mode to be sent
1800          */
1801 
1802         /*
1803          * RQ - Request Queue size
1804          * set RQ to the min of mode and tstatus
1805          * if mode set a RQ larger than hardware can support,
1806          * use the max RQ which hardware can support.
1807          * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support
1808          * Corelogic will enqueue agp transaction
1809          */
1810         rq = mode & AGPSTAT_RQ_MASK;
1811         if ((tstatus & AGPSTAT_RQ_MASK) < rq)
1812                 rq = tstatus & AGPSTAT_RQ_MASK;
1813 
1814         /*
1815          * SBA - Sideband Addressing
1816          *
1817          * Sideband Addressing provides an additional bus to pass requests
1818          * (address and command) to the target from the master.
1819          *
1820          * set SBA if all three support it
1821          */
1822         sba = (tstatus & AGPSTAT_SBA) & (mstatus & AGPSTAT_SBA)
1823             & (mode & AGPSTAT_SBA);
1824 
1825         /* set OVER4G  if all three support it */
1826         over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G)
1827             & (mode & AGPSTAT_OVER4G);
1828 
1829         /*
1830          * FW - fast write
1831          *
1832          * acceleration of memory write transactions from the corelogic to the
1833          * A.G.P. master device acting like a PCI target.
1834          *
1835          * set FW if all three support it
1836          */
1837         fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW)
1838             & (mode & AGPSTAT_FW);
1839 
1840         /*
1841          * figure out the max rate
1842          * AGP v2 support: 4X, 2X, 1X speed
1843          * status bit           meaning
1844          * ---------------------------------------------
1845          * 7:3                  others
1846          * 3                    0 stand for V2 support
1847          * 0:2                  001:1X, 010:2X, 100:4X
1848          * ----------------------------------------------
1849          */
1850         rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK)
1851             & (mode & AGPSTAT_RATE_MASK);
1852         if (rate & AGP2_RATE_4X)
1853                 rate = AGP2_RATE_4X;
1854         else if (rate & AGP2_RATE_2X)
1855                 rate = AGP2_RATE_2X;
1856         else
1857                 rate = AGP2_RATE_1X;
1858 
1859         cmd = rq | sba | over4g | fw | rate;
1860         /* enable agp mode */
1861         cmd |= AGPCMD_AGPEN;
1862 
1863         return (cmd);
1864 }
1865 
1866 static uint32_t
1867 agp_v3_setup(uint32_t tstatus, uint32_t mstatus, uint32_t mode)
1868 {
1869         uint32_t cmd = 0;
1870         uint32_t rq, arqsz, cal, sba, over4g, fw, rate;
1871 
1872         /*
1873          * tstatus: target device status
1874          * mstatus: master device status
1875          * mode: the agp mode to be set
1876          */
1877 
1878         /*
1879          * RQ - Request Queue size
1880          * Set RQ to the min of mode and tstatus
1881          * If mode set a RQ larger than hardware can support,
1882          * use the max RQ which hardware can support.
1883          * tstatus & AGPSTAT_RQ_MASK is the max RQ hardware can support
1884          * Corelogic will enqueue agp transaction;
1885          */
1886         rq = mode & AGPSTAT_RQ_MASK;
1887         if ((tstatus & AGPSTAT_RQ_MASK) < rq)
1888                 rq = tstatus & AGPSTAT_RQ_MASK;
1889 
1890         /*
1891          * ARQSZ - Asynchronous Request Queue size
1892          * Set the value equal to tstatus.
1893          * Don't allow the mode register to override values
1894          */
1895         arqsz = tstatus & AGPSTAT_ARQSZ_MASK;
1896 
1897         /*
1898          * CAL - Calibration cycle
1899          * Set to the min of tstatus and mstatus
1900          * Don't allow override by mode register
1901          */
1902         cal = tstatus & AGPSTAT_CAL_MASK;
1903         if ((mstatus & AGPSTAT_CAL_MASK) < cal)
1904                 cal = mstatus & AGPSTAT_CAL_MASK;
1905 
1906         /*
1907          * SBA - Sideband Addressing
1908          *
1909          * Sideband Addressing provides an additional bus to pass requests
1910          * (address and command) to the target from the master.
1911          *
1912          * SBA in agp v3.0 must be set
1913          */
1914         sba = AGPCMD_SBAEN;
1915 
1916         /* GART64B is not set since no hardware supports it now */
1917 
1918         /* Set OVER4G if all three support it */
1919         over4g = (tstatus & AGPSTAT_OVER4G) & (mstatus & AGPSTAT_OVER4G)
1920             & (mode & AGPSTAT_OVER4G);
1921 
1922         /*
1923          * FW - fast write
1924          *
1925          * Acceleration of memory write transactions from the corelogic to the
1926          * A.G.P. master device acting like a PCI target.
1927          *
1928          * Always set FW in AGP 3.0
1929          */
1930         fw = (tstatus & AGPSTAT_FW) & (mstatus & AGPSTAT_FW)
1931             & (mode & AGPSTAT_FW);
1932 
1933         /*
1934          * Figure out the max rate
1935          *
1936          * AGP v3 support: 8X, 4X speed
1937          *
1938          * status bit           meaning
1939          * ---------------------------------------------
1940          * 7:3                  others
1941          * 3                    1 stand for V3 support
1942          * 0:2                  001:4X, 010:8X, 011:4X,8X
1943          * ----------------------------------------------
1944          */
1945         rate = (tstatus & AGPSTAT_RATE_MASK) & (mstatus & AGPSTAT_RATE_MASK)
1946             & (mode & AGPSTAT_RATE_MASK);
1947         if (rate & AGP3_RATE_8X)
1948                 rate = AGP3_RATE_8X;
1949         else
1950                 rate = AGP3_RATE_4X;
1951 
1952         cmd = rq | arqsz | cal | sba | over4g | fw | rate;
1953         /* Enable AGP mode */
1954         cmd |= AGPCMD_AGPEN;
1955 
1956         return (cmd);
1957 }
1958 
1959 static int
1960 agp_setup(agpgart_softstate_t *softstate, uint32_t mode)
1961 {
1962         uint32_t tstatus, mstatus;
1963         uint32_t agp_mode;
1964 
1965         tstatus = softstate->asoft_info.agpki_tstatus;
1966         mstatus = softstate->asoft_info.agpki_mstatus;
1967 
1968         /*
1969          * There are three kinds of AGP mode. AGP mode 1.0, 2.0, 3.0
1970          * AGP mode 2.0 is fully compatible with AGP mode 1.0, so we
1971          * only check 2.0 and 3.0 mode. AGP 3.0 device can work in
1972          * two AGP 2.0 or AGP 3.0 mode. By checking AGP status register,
1973          * we can get which mode it is working at. The working mode of
1974          * AGP master and AGP target must be consistent. That is, both
1975          * of them must work on AGP 3.0 mode or AGP 2.0 mode.
1976          */
1977         if ((softstate->asoft_info.agpki_tver.agpv_major == 3) &&
1978             (tstatus & AGPSTAT_MODE3)) {
1979                 /* Master device should be 3.0 mode, too */
1980                 if ((softstate->asoft_info.agpki_mver.agpv_major != 3) ||
1981                     ((mstatus & AGPSTAT_MODE3) == 0))
1982                         return (EIO);
1983 
1984                 agp_mode = agp_v3_setup(tstatus, mstatus, mode);
1985                 /* Write to the AGPCMD register of target and master devices */
1986                 if (lyr_set_agp_cmd(agp_mode,
1987                     &softstate->asoft_devreg))
1988                         return (EIO);
1989 
1990                 softstate->asoft_mode = agp_mode;
1991 
1992                 return (0);
1993         }
1994 
1995         /*
1996          * If agp taget device doesn't work in AGP 3.0 mode,
1997          * it must work in AGP 2.0 mode. And make sure
1998          * master device work in AGP 2.0 mode too
1999          */
2000         if ((softstate->asoft_info.agpki_mver.agpv_major == 3) &&
2001             (mstatus & AGPSTAT_MODE3))
2002                 return (EIO);
2003 
2004         agp_mode = agp_v2_setup(tstatus, mstatus, mode);
2005         if (lyr_set_agp_cmd(agp_mode, &softstate->asoft_devreg))
2006                 return (EIO);
2007         softstate->asoft_mode = agp_mode;
2008 
2009         return (0);
2010 }
2011 
2012 /*
2013  * agp_alloc_kmem()
2014  *
2015  * Description:
2016  *      This function allocates physical memory for userland applications
2017  *      by ddi interfaces. This function can also be called to allocate
2018  *      small phsyical contiguous pages, usually tens of kilobytes.
2019  *
2020  * Arguments:
2021  *      softsate        driver soft state pointer
2022  *      length          memory size
2023  *
2024  * Returns:
2025  *      entryp          new keytable entity pointer
2026  *      NULL            no keytable slot available or no physical
2027  *                      memory available
2028  */
2029 static keytable_ent_t *
2030 agp_alloc_kmem(agpgart_softstate_t *softstate, size_t length, int type)
2031 {
2032         keytable_ent_t  keyentry;
2033         keytable_ent_t  *entryp;
2034         int             ret;
2035 
2036         ASSERT(AGP_ALIGNED(length));
2037 
2038         bzero(&keyentry, sizeof (keytable_ent_t));
2039 
2040         keyentry.kte_pages = AGP_BYTES2PAGES(length);
2041         keyentry.kte_type = type;
2042 
2043         /*
2044          * Set dma_attr_sgllen to assure contiguous physical pages
2045          */
2046         if (type == AGP_PHYSICAL)
2047                 agpgart_dma_attr.dma_attr_sgllen = 1;
2048         else
2049                 agpgart_dma_attr.dma_attr_sgllen = (int)keyentry.kte_pages;
2050 
2051         /* 4k size pages */
2052         keyentry.kte_memhdl = kmem_zalloc(sizeof (agp_kmem_handle_t), KM_SLEEP);
2053 
2054         if (ddi_dma_alloc_handle(softstate->asoft_dip,
2055             &agpgart_dma_attr,
2056             DDI_DMA_SLEEP, NULL,
2057             &(KMEMP(keyentry.kte_memhdl)->kmem_handle))) {
2058                 AGPDB_PRINT2((CE_WARN,
2059                     "agp_alloc_kmem: ddi_dma_allco_hanlde error"));
2060                 goto err4;
2061         }
2062 
2063         if ((ret = ddi_dma_mem_alloc(
2064             KMEMP(keyentry.kte_memhdl)->kmem_handle,
2065             length,
2066             &gart_dev_acc_attr,
2067             DDI_DMA_CONSISTENT,
2068             DDI_DMA_SLEEP, NULL,
2069             &KMEMP(keyentry.kte_memhdl)->kmem_kvaddr,
2070             &KMEMP(keyentry.kte_memhdl)->kmem_reallen,
2071             &KMEMP(keyentry.kte_memhdl)->kmem_acchdl)) != 0) {
2072                 AGPDB_PRINT2((CE_WARN,
2073                     "agp_alloc_kmem: ddi_dma_mem_alloc error"));
2074 
2075                 goto err3;
2076         }
2077 
2078         ret = ddi_dma_addr_bind_handle(
2079             KMEMP(keyentry.kte_memhdl)->kmem_handle,
2080             NULL,
2081             KMEMP(keyentry.kte_memhdl)->kmem_kvaddr,
2082             length,
2083             DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2084             DDI_DMA_SLEEP,
2085             NULL,
2086             &KMEMP(keyentry.kte_memhdl)->kmem_dcookie,
2087             &KMEMP(keyentry.kte_memhdl)->kmem_cookies_num);
2088 
2089         /*
2090          * Even dma_attr_sgllen = 1, ddi_dma_addr_bind_handle may return more
2091          * than one cookie, we check this in the if statement.
2092          */
2093 
2094         if ((ret != DDI_DMA_MAPPED) ||
2095             ((agpgart_dma_attr.dma_attr_sgllen == 1) &&
2096             (KMEMP(keyentry.kte_memhdl)->kmem_cookies_num != 1))) {
2097                 AGPDB_PRINT2((CE_WARN,
2098                     "agp_alloc_kmem: can not alloc physical memory properly"));
2099                 goto err2;
2100         }
2101 
2102         keyentry.kte_pfnarray = (pfn_t *)kmem_zalloc(sizeof (pfn_t) *
2103             keyentry.kte_pages, KM_SLEEP);
2104 
2105         if (kmem_getpfns(
2106             KMEMP(keyentry.kte_memhdl)->kmem_handle,
2107             &KMEMP(keyentry.kte_memhdl)->kmem_dcookie,
2108             KMEMP(keyentry.kte_memhdl)->kmem_cookies_num,
2109             keyentry.kte_pfnarray)) {
2110                 AGPDB_PRINT2((CE_WARN, "agp_alloc_kmem: get pfn array error"));
2111                 goto err1;
2112         }
2113 
2114         ASSERT(!agp_check_pfns(softstate->asoft_devreg.agprd_arctype,
2115             keyentry.kte_pfnarray, keyentry.kte_pages));
2116         if (agp_check_pfns(softstate->asoft_devreg.agprd_arctype,
2117             keyentry.kte_pfnarray, keyentry.kte_pages))
2118                 goto err1;
2119         entryp = agp_fill_empty_keyent(softstate, &keyentry);
2120         if (!entryp) {
2121                 AGPDB_PRINT2((CE_WARN,
2122                     "agp_alloc_kmem: agp_fill_empty_keyent error"));
2123 
2124                 goto err1;
2125         }
2126         ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
2127 
2128         return (entryp);
2129 
2130 err1:
2131         kmem_free(keyentry.kte_pfnarray, sizeof (pfn_t) * keyentry.kte_pages);
2132         keyentry.kte_pfnarray = NULL;
2133         (void) ddi_dma_unbind_handle(KMEMP(keyentry.kte_memhdl)->kmem_handle);
2134         KMEMP(keyentry.kte_memhdl)->kmem_cookies_num = 0;
2135 err2:
2136         ddi_dma_mem_free(&KMEMP(keyentry.kte_memhdl)->kmem_acchdl);
2137         KMEMP(keyentry.kte_memhdl)->kmem_acchdl = NULL;
2138         KMEMP(keyentry.kte_memhdl)->kmem_reallen = 0;
2139         KMEMP(keyentry.kte_memhdl)->kmem_kvaddr = NULL;
2140 err3:
2141         ddi_dma_free_handle(&(KMEMP(keyentry.kte_memhdl)->kmem_handle));
2142         KMEMP(keyentry.kte_memhdl)->kmem_handle = NULL;
2143 err4:
2144         kmem_free(keyentry.kte_memhdl, sizeof (agp_kmem_handle_t));
2145         keyentry.kte_memhdl = NULL;
2146         return (NULL);
2147 
2148 }
2149 
2150 /*
2151  * agp_alloc_mem()
2152  *
2153  * Description:
2154  *      This function allocate physical memory for userland applications,
2155  *      in order to save kernel virtual space, we use the direct mapping
2156  *      memory interface if it is available.
2157  *
2158  * Arguments:
2159  *      st              driver soft state pointer
2160  *      length          memory size
2161  *      type            AGP_NORMAL: normal agp memory, AGP_PHISYCAL: specical
2162  *                      memory type for intel i810 IGD
2163  *
2164  * Returns:
2165  *      NULL    Invalid memory type or can not allocate memory
2166  *      Keytable entry pointer returned by agp_alloc_kmem
2167  */
2168 static keytable_ent_t *
2169 agp_alloc_mem(agpgart_softstate_t *st, size_t length, int type)
2170 {
2171 
2172         /*
2173          * AGP_PHYSICAL type require contiguous physical pages exported
2174          * to X drivers, like i810 HW cursor, ARGB cursor. the number of
2175          * pages needed is usuallysmall and contiguous, 4K, 16K. So we
2176          * use DDI interface to allocated such memory. And X use xsvc
2177          * drivers to map this memory into its own address space.
2178          */
2179         ASSERT(st);
2180 
2181         switch (type) {
2182         case AGP_NORMAL:
2183         case AGP_PHYSICAL:
2184                 return (agp_alloc_kmem(st, length, type));
2185         default:
2186                 return (NULL);
2187         }
2188 }
2189 
2190 /*
2191  * free_gart_table()
2192  *
2193  * Description:
2194  *      This function frees the gart table memory allocated by driver.
2195  *      Must disable gart table before calling this function.
2196  *
2197  * Arguments:
2198  *      softstate               driver soft state pointer
2199  *
2200  */
2201 static void
2202 free_gart_table(agpgart_softstate_t *st)
2203 {
2204 
2205         if (st->gart_dma_handle == NULL)
2206                 return;
2207 
2208         (void) ddi_dma_unbind_handle(st->gart_dma_handle);
2209         ddi_dma_mem_free(&st->gart_dma_acc_handle);
2210         st->gart_dma_acc_handle = NULL;
2211         ddi_dma_free_handle(&st->gart_dma_handle);
2212         st->gart_dma_handle = NULL;
2213         st->gart_vbase = 0;
2214         st->gart_size = 0;
2215 }
2216 
2217 /*
2218  * alloc_gart_table()
2219  *
2220  * Description:
2221  *      This function allocates one physical continuous gart table.
2222  *      INTEL integrated video device except i810 have their special
2223  *      video bios; No need to allocate gart table for them.
2224  *
2225  * Arguments:
2226  *      st              driver soft state pointer
2227  *
2228  * Returns:
2229  *      0               success
2230  *      -1              can not allocate gart tabl
2231  */
2232 static int
2233 alloc_gart_table(agpgart_softstate_t *st)
2234 {
2235         int                     num_pages;
2236         size_t                  table_size;
2237         int                     ret = DDI_SUCCESS;
2238         ddi_dma_cookie_t        cookie;
2239         uint32_t                num_cookies;
2240 
2241         num_pages = AGP_MB2PAGES(st->asoft_info.agpki_apersize);
2242 
2243         /*
2244          * Only 40-bit maximum physical memory is supported by today's
2245          * AGP hardware (32-bit gart tables can hold 40-bit memory addresses).
2246          * No one supports 64-bit gart entries now, so the size of gart
2247          * entries defaults to 32-bit though AGP3.0 specifies the possibility
2248          * of 64-bit gart entries.
2249          */
2250 
2251         table_size = num_pages * (sizeof (uint32_t));
2252 
2253         /*
2254          * Only AMD64 can put gart table above 4G, 40 bits at maximum
2255          */
2256         if (st->asoft_devreg.agprd_arctype == ARC_AMD64AGP)
2257                 garttable_dma_attr.dma_attr_addr_hi = 0xffffffffffLL;
2258         else
2259                 garttable_dma_attr.dma_attr_addr_hi = 0xffffffffU;
2260         /* Allocate physical continuous page frame for gart table */
2261         if (ret = ddi_dma_alloc_handle(st->asoft_dip,
2262             &garttable_dma_attr,
2263             DDI_DMA_SLEEP,
2264             NULL, &st->gart_dma_handle)) {
2265                 AGPDB_PRINT2((CE_WARN,
2266                     "alloc_gart_table: ddi_dma_alloc_handle failed"));
2267                 goto err3;
2268         }
2269 
2270         if (ret = ddi_dma_mem_alloc(st->gart_dma_handle,
2271             table_size,
2272             &gart_dev_acc_attr,
2273             DDI_DMA_CONSISTENT,
2274             DDI_DMA_SLEEP, NULL,
2275             &st->gart_vbase,
2276             &st->gart_size,
2277             &st->gart_dma_acc_handle)) {
2278                 AGPDB_PRINT2((CE_WARN,
2279                     "alloc_gart_table: ddi_dma_mem_alloc failed"));
2280                 goto err2;
2281 
2282         }
2283 
2284         ret = ddi_dma_addr_bind_handle(st->gart_dma_handle,
2285             NULL, st->gart_vbase,
2286             table_size,
2287             DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2288             DDI_DMA_SLEEP, NULL,
2289             &cookie,  &num_cookies);
2290 
2291         st->gart_pbase = cookie.dmac_address;
2292 
2293         if ((ret != DDI_DMA_MAPPED) || (num_cookies != 1)) {
2294                 if (num_cookies > 1)
2295                         (void) ddi_dma_unbind_handle(st->gart_dma_handle);
2296                 AGPDB_PRINT2((CE_WARN,
2297                     "alloc_gart_table: alloc contiguous phys memory failed"));
2298                 goto err1;
2299         }
2300 
2301         return (0);
2302 err1:
2303         ddi_dma_mem_free(&st->gart_dma_acc_handle);
2304         st->gart_dma_acc_handle = NULL;
2305 err2:
2306         ddi_dma_free_handle(&st->gart_dma_handle);
2307         st->gart_dma_handle = NULL;
2308 err3:
2309         st->gart_pbase = 0;
2310         st->gart_size = 0;
2311         st->gart_vbase = 0;
2312 
2313         return (-1);
2314 }
2315 
2316 /*
2317  * agp_add_to_gart()
2318  *
2319  * Description:
2320  *      This function fills the gart table entries by a given page frame number
2321  *      array and set up the agp aperture page to physical memory page
2322  *      translation.
2323  * Arguments:
2324  *      type            valid sytem arc types ARC_AMD64AGP, ARC_INTELAGP,
2325  *                      ARC_AMD64AGP
2326  *      pfnarray        allocated physical page frame number array
2327  *      pg_offset       agp aperture start page to be bound
2328  *      entries         the number of pages to be bound
2329  *      dma_hdl         gart table dma memory handle
2330  *      tablep          gart table kernel virtual address
2331  * Returns:
2332  *      -1              failed
2333  *      0               success
2334  */
2335 static int
2336 agp_add_to_gart(
2337     agp_arc_type_t type,
2338     pfn_t *pfnarray,
2339     uint32_t pg_offset,
2340     uint32_t entries,
2341     ddi_dma_handle_t dma_hdl,
2342     uint32_t *tablep)
2343 {
2344         int items = 0;
2345         uint32_t *entryp;
2346         uint32_t itemv;
2347 
2348         entryp = tablep + pg_offset;
2349         while (items < entries) {
2350                 if (pfn2gartentry(type, pfnarray[items], &itemv))
2351                         break;
2352                 *(entryp + items) = itemv;
2353                 items++;
2354         }
2355         if (items < entries)
2356                 return (-1);
2357 
2358         (void) ddi_dma_sync(dma_hdl, pg_offset * sizeof (uint32_t),
2359             entries * sizeof (uint32_t), DDI_DMA_SYNC_FORDEV);
2360 
2361         return (0);
2362 }
2363 
2364 /*
2365  * agp_bind_key()
2366  *
2367  * Description:
2368  *      This function will call low level gart table access functions to
2369  *      set up gart table translation. Also it will do some sanity
2370  *      checking on key table entry.
2371  *
2372  * Arguments:
2373  *      softstate               driver soft state pointer
2374  *      keyent                  key table entity pointer to be bound
2375  *      pg_offset               aperture start page to be bound
2376  * Returns:
2377  *      EINVAL                  not a valid operation
2378  */
2379 static int
2380 agp_bind_key(agpgart_softstate_t *softstate,
2381     keytable_ent_t  *keyent, uint32_t  pg_offset)
2382 {
2383         uint64_t pg_end;
2384         int ret = 0;
2385 
2386         ASSERT(keyent);
2387         ASSERT((keyent->kte_key >= 0) && (keyent->kte_key < AGP_MAXKEYS));
2388         ASSERT(mutex_owned(&softstate->asoft_instmutex));
2389 
2390         pg_end = pg_offset + keyent->kte_pages;
2391 
2392         if (pg_end > AGP_MB2PAGES(softstate->asoft_info.agpki_apersize)) {
2393                 AGPDB_PRINT2((CE_WARN,
2394                     "agp_bind_key: key=0x%x,exceed aper range",
2395                     keyent->kte_key));
2396 
2397                 return (EINVAL);
2398         }
2399 
2400         if (agp_check_off(softstate->asoft_table,
2401             pg_offset, keyent->kte_pages)) {
2402                 AGPDB_PRINT2((CE_WARN,
2403                     "agp_bind_key: pg_offset=0x%x, pages=0x%lx overlaped",
2404                     pg_offset, keyent->kte_pages));
2405                 return (EINVAL);
2406         }
2407 
2408         ASSERT(keyent->kte_pfnarray != NULL);
2409 
2410         switch (softstate->asoft_devreg.agprd_arctype) {
2411         case ARC_IGD810:
2412         case ARC_IGD830:
2413                 ret = lyr_i8xx_add_to_gtt(pg_offset, keyent,
2414                     &softstate->asoft_devreg);
2415                 if (ret)
2416                         return (EIO);
2417                 break;
2418         case ARC_INTELAGP:
2419         case ARC_AMD64AGP:
2420                 ret =  agp_add_to_gart(
2421                     softstate->asoft_devreg.agprd_arctype,
2422                     keyent->kte_pfnarray,
2423                     pg_offset,
2424                     keyent->kte_pages,
2425                     softstate->gart_dma_handle,
2426                     (uint32_t *)softstate->gart_vbase);
2427                 if (ret)
2428                         return (EINVAL);
2429                 /* Flush GTLB table */
2430                 lyr_flush_gart_cache(&softstate->asoft_devreg);
2431                 break;
2432         default:
2433                 AGPDB_PRINT2((CE_WARN,
2434                     "agp_bind_key: arc type = 0x%x unsupported",
2435                     softstate->asoft_devreg.agprd_arctype));
2436                 return (EINVAL);
2437         }
2438         return (0);
2439 }
2440 
2441 static int
2442 agpgart_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2443 {
2444         int instance;
2445         agpgart_softstate_t *softstate;
2446 
2447         if (cmd != DDI_ATTACH) {
2448                 AGPDB_PRINT2((CE_WARN,
2449                     "agpgart_attach: only attach op supported"));
2450                 return (DDI_FAILURE);
2451         }
2452         instance = ddi_get_instance(dip);
2453 
2454         if (ddi_soft_state_zalloc(agpgart_glob_soft_handle, instance)
2455             != DDI_SUCCESS) {
2456                 AGPDB_PRINT2((CE_WARN,
2457                     "agpgart_attach: soft state zalloc failed"));
2458                 goto err1;
2459 
2460         }
2461         softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2462         mutex_init(&softstate->asoft_instmutex, NULL, MUTEX_DRIVER, NULL);
2463         softstate->asoft_dip = dip;
2464         /*
2465          * Allocate LDI identifier for agpgart driver
2466          * Agpgart driver is the kernel consumer
2467          */
2468         if (ldi_ident_from_dip(dip, &softstate->asoft_li)) {
2469                 AGPDB_PRINT2((CE_WARN,
2470                     "agpgart_attach: LDI indentifier allcation failed"));
2471                 goto err2;
2472         }
2473 
2474         softstate->asoft_devreg.agprd_arctype = ARC_UNKNOWN;
2475         /* Install agp kstat */
2476         if (agp_init_kstats(softstate)) {
2477                 AGPDB_PRINT2((CE_WARN, "agpgart_attach: init kstats error"));
2478                 goto err3;
2479         }
2480         /*
2481          * devfs will create /dev/agpgart
2482          * and  /devices/agpgart:agpgart
2483          */
2484 
2485         if (ddi_create_minor_node(dip, AGPGART_DEVNODE, S_IFCHR,
2486             AGP_INST2MINOR(instance),
2487             DDI_NT_AGP_PSEUDO, 0)) {
2488                 AGPDB_PRINT2((CE_WARN,
2489                     "agpgart_attach: Can not create minor node"));
2490                 goto err4;
2491         }
2492 
2493         softstate->asoft_table = kmem_zalloc(
2494             AGP_MAXKEYS * (sizeof (keytable_ent_t)),
2495             KM_SLEEP);
2496 
2497         list_head_init(&softstate->mapped_list);
2498 
2499         return (DDI_SUCCESS);
2500 err4:
2501         agp_fini_kstats(softstate);
2502 err3:
2503         ldi_ident_release(softstate->asoft_li);
2504 err2:
2505         ddi_soft_state_free(agpgart_glob_soft_handle, instance);
2506 err1:
2507         return (DDI_FAILURE);
2508 }
2509 
2510 static int
2511 agpgart_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2512 {
2513         int instance;
2514         agpgart_softstate_t *st;
2515 
2516         instance = ddi_get_instance(dip);
2517 
2518         st = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2519 
2520         if (cmd != DDI_DETACH)
2521                 return (DDI_FAILURE);
2522 
2523         /*
2524          * Caller should free all the memory allocated explicitly.
2525          * We release the memory allocated by caller which is not
2526          * properly freed. mutex_enter here make sure assertion on
2527          * softstate mutex success in agp_dealloc_mem.
2528          */
2529         mutex_enter(&st->asoft_instmutex);
2530         if (agp_del_allkeys(st)) {
2531                 AGPDB_PRINT2((CE_WARN, "agpgart_detach: agp_del_allkeys err"));
2532                 AGPDB_PRINT2((CE_WARN,
2533                     "you might free agp memory exported to your applications"));
2534 
2535                 mutex_exit(&st->asoft_instmutex);
2536                 return (DDI_FAILURE);
2537         }
2538         mutex_exit(&st->asoft_instmutex);
2539         if (st->asoft_table) {
2540                 kmem_free(st->asoft_table,
2541                     AGP_MAXKEYS * (sizeof (keytable_ent_t)));
2542                 st->asoft_table = 0;
2543         }
2544 
2545         struct list_head        *entry, *temp,  *head;
2546         igd_gtt_seg_t   *gttseg;
2547         list_head_for_each_safe(entry, temp, &st->mapped_list) {
2548                 gttseg = entry->gttseg;
2549                 list_head_del(entry);
2550                 kmem_free(entry, sizeof (*entry));
2551                 kmem_free(gttseg->igs_phyaddr,
2552                     sizeof (uint32_t) * gttseg->igs_npage);
2553                 kmem_free(gttseg, sizeof (igd_gtt_seg_t));
2554         }
2555         head = &st->mapped_list;
2556         kmem_free(head->next,
2557             AGP_HASH_NODE * sizeof (struct list_head));
2558         head->next = NULL;
2559 
2560         ddi_remove_minor_node(dip, AGPGART_DEVNODE);
2561         agp_fini_kstats(st);
2562         ldi_ident_release(st->asoft_li);
2563         mutex_destroy(&st->asoft_instmutex);
2564         ddi_soft_state_free(agpgart_glob_soft_handle, instance);
2565 
2566         return (DDI_SUCCESS);
2567 }
2568 
2569 /*ARGSUSED*/
2570 static int
2571 agpgart_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
2572     void **resultp)
2573 {
2574         agpgart_softstate_t *st;
2575         int instance, rval = DDI_FAILURE;
2576         dev_t dev;
2577 
2578         switch (cmd) {
2579         case DDI_INFO_DEVT2DEVINFO:
2580                 dev = (dev_t)arg;
2581                 instance = AGP_DEV2INST(dev);
2582                 st = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2583                 if (st != NULL) {
2584                         mutex_enter(&st->asoft_instmutex);
2585                         *resultp = st->asoft_dip;
2586                         mutex_exit(&st->asoft_instmutex);
2587                         rval = DDI_SUCCESS;
2588                 } else
2589                         *resultp = NULL;
2590 
2591                 break;
2592         case DDI_INFO_DEVT2INSTANCE:
2593                 dev = (dev_t)arg;
2594                 instance = AGP_DEV2INST(dev);
2595                 *resultp = (void *)(uintptr_t)instance;
2596                 rval = DDI_SUCCESS;
2597 
2598                 break;
2599         default:
2600                 break;
2601         }
2602 
2603         return (rval);
2604 }
2605 
2606 /*
2607  * agpgart_open()
2608  *
2609  * Description:
2610  *      This function is the driver open entry point. If it is the
2611  *      first time the agpgart driver is opened, the driver will
2612  *      open other agp related layered drivers and set up the agpgart
2613  *      table properly.
2614  *
2615  * Arguments:
2616  *      dev                     device number pointer
2617  *      openflags               open flags
2618  *      otyp                    OTYP_BLK, OTYP_CHR
2619  *      credp                   user's credential's struct pointer
2620  *
2621  * Returns:
2622  *      ENXIO                   operation error
2623  *      EAGAIN                  resoure temporarily unvailable
2624  *      0                       success
2625  */
2626 /*ARGSUSED*/
2627 static int
2628 agpgart_open(dev_t *dev, int openflags, int otyp, cred_t *credp)
2629 {
2630         int instance = AGP_DEV2INST(*dev);
2631         agpgart_softstate_t *softstate;
2632         int rc = 0;
2633         uint32_t devid;
2634 
2635         if (secpolicy_gart_access(credp)) {
2636                 AGPDB_PRINT2((CE_WARN, "agpgart_open: permission denied"));
2637                 return (EPERM);
2638         }
2639         softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2640         if (softstate == NULL) {
2641                 AGPDB_PRINT2((CE_WARN, "agpgart_open: get soft state err"));
2642                 return (ENXIO);
2643         }
2644 
2645         mutex_enter(&softstate->asoft_instmutex);
2646 
2647         if (softstate->asoft_opened) {
2648                 softstate->asoft_opened++;
2649                 mutex_exit(&softstate->asoft_instmutex);
2650                 return (0);
2651         }
2652 
2653         /*
2654          * The driver is opened first time, so we initialize layered
2655          * driver interface and softstate member here.
2656          */
2657         softstate->asoft_pgused = 0;
2658         if (lyr_init(&softstate->asoft_devreg, softstate->asoft_li)) {
2659                 AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_init failed"));
2660                 mutex_exit(&softstate->asoft_instmutex);
2661                 return (EAGAIN);
2662         }
2663 
2664         /* Call into layered driver */
2665         if (lyr_get_info(&softstate->asoft_info, &softstate->asoft_devreg)) {
2666                 AGPDB_PRINT2((CE_WARN, "agpgart_open: lyr_get_info error"));
2667                 lyr_end(&softstate->asoft_devreg);
2668                 mutex_exit(&softstate->asoft_instmutex);
2669                 return (EIO);
2670         }
2671 
2672         /*
2673          * BIOS already set up gtt table for ARC_IGD830
2674          */
2675         if (IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) {
2676                 softstate->asoft_opened++;
2677 
2678                 softstate->asoft_pgtotal =
2679                     get_max_pages(softstate->asoft_info.agpki_apersize);
2680 
2681                 if (lyr_config_devices(&softstate->asoft_devreg)) {
2682                         AGPDB_PRINT2((CE_WARN,
2683                             "agpgart_open: lyr_config_devices error"));
2684                         lyr_end(&softstate->asoft_devreg);
2685                         mutex_exit(&softstate->asoft_instmutex);
2686 
2687                         return (EIO);
2688                 }
2689                 devid = softstate->asoft_info.agpki_mdevid;
2690                 if (IS_INTEL_915(devid) ||
2691                     IS_INTEL_965(devid) ||
2692                     IS_INTEL_X33(devid) ||
2693                     IS_INTEL_G4X(devid)) {
2694                         rc = ldi_ioctl(softstate->asoft_devreg.agprd_targethdl,
2695                             INTEL_CHIPSET_FLUSH_SETUP, 0, FKIOCTL, kcred, 0);
2696                 }
2697                 if (rc) {
2698                         AGPDB_PRINT2((CE_WARN,
2699                             "agpgart_open: Intel chipset flush setup error"));
2700                         lyr_end(&softstate->asoft_devreg);
2701                         mutex_exit(&softstate->asoft_instmutex);
2702                         return (EIO);
2703                 }
2704                 mutex_exit(&softstate->asoft_instmutex);
2705                 return (0);
2706         }
2707 
2708         rc = alloc_gart_table(softstate);
2709 
2710         /*
2711          * Allocate physically contiguous pages for AGP arc or
2712          * i810 arc. If failed, divide aper_size by 2 to
2713          * reduce gart table size until 4 megabytes. This
2714          * is just a workaround for systems with very few
2715          * physically contiguous memory.
2716          */
2717         if (rc) {
2718                 while ((softstate->asoft_info.agpki_apersize >= 4) &&
2719                     (alloc_gart_table(softstate))) {
2720                         softstate->asoft_info.agpki_apersize >>= 1;
2721                 }
2722                 if (softstate->asoft_info.agpki_apersize >= 4)
2723                         rc = 0;
2724         }
2725 
2726         if (rc != 0) {
2727                 AGPDB_PRINT2((CE_WARN,
2728                     "agpgart_open: alloc gart table failed"));
2729                 lyr_end(&softstate->asoft_devreg);
2730                 mutex_exit(&softstate->asoft_instmutex);
2731                 return (EAGAIN);
2732         }
2733 
2734         softstate->asoft_pgtotal =
2735             get_max_pages(softstate->asoft_info.agpki_apersize);
2736         /*
2737          * BIOS doesn't initialize GTT for i810,
2738          * So i810 GTT must be created by driver.
2739          *
2740          * Set up gart table and enable it.
2741          */
2742         if (lyr_set_gart_addr(softstate->gart_pbase,
2743             &softstate->asoft_devreg)) {
2744                 AGPDB_PRINT2((CE_WARN,
2745                     "agpgart_open: set gart table addr failed"));
2746                 free_gart_table(softstate);
2747                 lyr_end(&softstate->asoft_devreg);
2748                 mutex_exit(&softstate->asoft_instmutex);
2749                 return (EIO);
2750         }
2751         if (lyr_config_devices(&softstate->asoft_devreg)) {
2752                 AGPDB_PRINT2((CE_WARN,
2753                     "agpgart_open: lyr_config_devices failed"));
2754                 free_gart_table(softstate);
2755                 lyr_end(&softstate->asoft_devreg);
2756                 mutex_exit(&softstate->asoft_instmutex);
2757                 return (EIO);
2758         }
2759 
2760         softstate->asoft_opened++;
2761         mutex_exit(&softstate->asoft_instmutex);
2762 
2763         return (0);
2764 }
2765 
2766 /*
2767  * agpgart_close()
2768  *
2769  * Description:
2770  *      agpgart_close will release resources allocated in the first open
2771  *      and close other open layered drivers. Also it frees the memory
2772  *      allocated by ioctls.
2773  *
2774  * Arguments:
2775  *      dev                     device number
2776  *      flag                    file status flag
2777  *      otyp                    OTYP_BLK, OTYP_CHR
2778  *      credp                   user's credential's struct pointer
2779  *
2780  * Returns:
2781  *      ENXIO                   not an error, to support "deferred attach"
2782  *      0                       success
2783  */
2784 /*ARGSUSED*/
2785 static int
2786 agpgart_close(dev_t dev, int flag, int otyp, cred_t *credp)
2787 {
2788         int instance = AGP_DEV2INST(dev);
2789         agpgart_softstate_t *softstate;
2790         int rc = 0;
2791         uint32_t devid;
2792 
2793         softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
2794         if (softstate == NULL) {
2795                 AGPDB_PRINT2((CE_WARN, "agpgart_close: get soft state err"));
2796                 return (ENXIO);
2797         }
2798 
2799         mutex_enter(&softstate->asoft_instmutex);
2800         ASSERT(softstate->asoft_opened);
2801 
2802 
2803         /*
2804          * If the last process close this device is not the controlling
2805          * process, also release the control over agpgart driver here if the
2806          * the controlling process fails to release the control before it
2807          * close the driver.
2808          */
2809         if (softstate->asoft_acquired == 1) {
2810                 AGPDB_PRINT2((CE_WARN,
2811                     "agpgart_close: auto release control over driver"));
2812                 release_control(softstate);
2813         }
2814 
2815         devid = softstate->asoft_info.agpki_mdevid;
2816         if (IS_INTEL_915(devid) ||
2817             IS_INTEL_965(devid) ||
2818             IS_INTEL_X33(devid) ||
2819             IS_INTEL_G4X(devid)) {
2820                 rc = ldi_ioctl(softstate->asoft_devreg.agprd_targethdl,
2821                     INTEL_CHIPSET_FLUSH_FREE, 0, FKIOCTL, kcred, 0);
2822         }
2823         if (rc) {
2824                 AGPDB_PRINT2((CE_WARN,
2825                     "agpgart_open: Intel chipset flush free error"));
2826         }
2827 
2828         if (lyr_unconfig_devices(&softstate->asoft_devreg)) {
2829                 AGPDB_PRINT2((CE_WARN,
2830                     "agpgart_close: lyr_unconfig_device error"));
2831                 mutex_exit(&softstate->asoft_instmutex);
2832                 return (EIO);
2833         }
2834         softstate->asoft_agpen = 0;
2835 
2836         if (!IS_INTEL_830(softstate->asoft_devreg.agprd_arctype)) {
2837                 free_gart_table(softstate);
2838         }
2839 
2840         lyr_end(&softstate->asoft_devreg);
2841 
2842         /*
2843          * This statement must be positioned before agp_del_allkeys
2844          * agp_dealloc_mem indirectly called by agp_del_allkeys
2845          * will test this variable.
2846          */
2847         softstate->asoft_opened = 0;
2848 
2849         /*
2850          * Free the memory allocated by user applications which
2851          * was never deallocated.
2852          */
2853         (void) agp_del_allkeys(softstate);
2854 
2855         mutex_exit(&softstate->asoft_instmutex);
2856 
2857         return (0);
2858 }
2859 
2860 static int
2861 ioctl_agpgart_info(agpgart_softstate_t  *softstate, void  *arg, int flags)
2862 {
2863         agp_info_t infostruct;
2864 #ifdef _MULTI_DATAMODEL
2865         agp_info32_t infostruct32;
2866 #endif
2867 
2868         bzero(&infostruct, sizeof (agp_info_t));
2869 
2870 #ifdef _MULTI_DATAMODEL
2871         bzero(&infostruct32, sizeof (agp_info32_t));
2872         if (ddi_model_convert_from(flags & FMODELS) == DDI_MODEL_ILP32) {
2873                 if (copyinfo(softstate, &infostruct))
2874                         return (EINVAL);
2875 
2876                 agpinfo_default_to_32(infostruct, infostruct32);
2877                 if (ddi_copyout(&infostruct32, arg,
2878                     sizeof (agp_info32_t), flags) != 0)
2879                         return (EFAULT);
2880 
2881                 return (0);
2882         }
2883 #endif /* _MULTI_DATAMODEL */
2884         if (copyinfo(softstate, &infostruct))
2885                 return (EINVAL);
2886 
2887         if (ddi_copyout(&infostruct, arg, sizeof (agp_info_t), flags) != 0) {
2888                 return (EFAULT);
2889         }
2890 
2891         return (0);
2892 }
2893 
2894 static int
2895 ioctl_agpgart_acquire(agpgart_softstate_t  *st)
2896 {
2897         if (st->asoft_acquired) {
2898                 AGPDB_PRINT2((CE_WARN, "ioctl_acquire: already acquired"));
2899                 return (EBUSY);
2900         }
2901         acquire_control(st);
2902         return (0);
2903 }
2904 
2905 static int
2906 ioctl_agpgart_release(agpgart_softstate_t  *st)
2907 {
2908         if (is_controlling_proc(st) < 0) {
2909                 AGPDB_PRINT2((CE_WARN,
2910                     "ioctl_agpgart_release: not a controlling process"));
2911                 return (EPERM);
2912         }
2913         release_control(st);
2914         return (0);
2915 }
2916 
2917 static int
2918 ioctl_agpgart_setup(agpgart_softstate_t  *st, void  *arg, int flags)
2919 {
2920         agp_setup_t data;
2921         int rc = 0;
2922 
2923         if (is_controlling_proc(st) < 0) {
2924                 AGPDB_PRINT2((CE_WARN,
2925                     "ioctl_agpgart_setup: not a controlling process"));
2926                 return (EPERM);
2927         }
2928 
2929         if (!IS_TRUE_AGP(st->asoft_devreg.agprd_arctype)) {
2930                 AGPDB_PRINT2((CE_WARN,
2931                     "ioctl_agpgart_setup: no true agp bridge"));
2932                 return (EINVAL);
2933         }
2934 
2935         if (ddi_copyin(arg, &data, sizeof (agp_setup_t), flags) != 0)
2936                 return (EFAULT);
2937 
2938         if (rc = agp_setup(st, data.agps_mode))
2939                 return (rc);
2940         /* Store agp mode status for kstat */
2941         st->asoft_agpen = 1;
2942         return (0);
2943 }
2944 
2945 static int
2946 ioctl_agpgart_alloc(agpgart_softstate_t  *st, void  *arg, int flags)
2947 {
2948         agp_allocate_t  alloc_info;
2949         keytable_ent_t  *entryp;
2950         size_t          length;
2951         uint64_t        pg_num;
2952 
2953         if (is_controlling_proc(st) < 0) {
2954                 AGPDB_PRINT2((CE_WARN,
2955                     "ioctl_agpgart_alloc: not a controlling process"));
2956                 return (EPERM);
2957         }
2958 
2959         if (ddi_copyin(arg, &alloc_info,
2960             sizeof (agp_allocate_t), flags) != 0) {
2961                 return (EFAULT);
2962         }
2963         pg_num = st->asoft_pgused + alloc_info.agpa_pgcount;
2964         if (pg_num > st->asoft_pgtotal) {
2965                 AGPDB_PRINT2((CE_WARN,
2966                     "ioctl_agpgart_alloc: exceeding the memory pages limit"));
2967                 AGPDB_PRINT2((CE_WARN,
2968                     "ioctl_agpgart_alloc: request %x pages failed",
2969                     alloc_info.agpa_pgcount));
2970                 AGPDB_PRINT2((CE_WARN,
2971                     "ioctl_agpgart_alloc: pages used %x total is %x",
2972                     st->asoft_pgused, st->asoft_pgtotal));
2973 
2974                 return (EINVAL);
2975         }
2976 
2977         length = AGP_PAGES2BYTES(alloc_info.agpa_pgcount);
2978         entryp = agp_alloc_mem(st, length, alloc_info.agpa_type);
2979         if (!entryp) {
2980                 AGPDB_PRINT2((CE_WARN,
2981                     "ioctl_agpgart_alloc: allocate 0x%lx bytes failed",
2982                     length));
2983                 return (ENOMEM);
2984         }
2985         ASSERT((entryp->kte_key >= 0) && (entryp->kte_key < AGP_MAXKEYS));
2986         alloc_info.agpa_key = entryp->kte_key;
2987         if (alloc_info.agpa_type == AGP_PHYSICAL) {
2988                 alloc_info.agpa_physical =
2989                     (uint32_t)(entryp->kte_pfnarray[0] << AGP_PAGE_SHIFT);
2990         }
2991         /* Update the memory pagse used */
2992         st->asoft_pgused += alloc_info.agpa_pgcount;
2993 
2994         if (ddi_copyout(&alloc_info, arg,
2995             sizeof (agp_allocate_t), flags) != 0) {
2996 
2997                 return (EFAULT);
2998         }
2999 
3000         return (0);
3001 }
3002 
3003 static int
3004 ioctl_agpgart_dealloc(agpgart_softstate_t  *st, intptr_t arg)
3005 {
3006         int key;
3007         keytable_ent_t  *keyent;
3008 
3009         if (is_controlling_proc(st) < 0) {
3010                 AGPDB_PRINT2((CE_WARN,
3011                     "ioctl_agpgart_dealloc: not a controlling process"));
3012                 return (EPERM);
3013         }
3014         key = (int)arg;
3015         if ((key >= AGP_MAXKEYS) || key < 0) {
3016                 return (EINVAL);
3017         }
3018         keyent = &st->asoft_table[key];
3019         if (!keyent->kte_memhdl) {
3020                 return (EINVAL);
3021         }
3022 
3023         if (agp_dealloc_mem(st, keyent))
3024                 return (EINVAL);
3025 
3026         /* Update the memory pages used */
3027         st->asoft_pgused -= keyent->kte_pages;
3028         bzero(keyent, sizeof (keytable_ent_t));
3029 
3030         return (0);
3031 }
3032 
3033 static int
3034 ioctl_agpgart_bind(agpgart_softstate_t  *st, void  *arg, int flags)
3035 {
3036         agp_bind_t      bind_info;
3037         keytable_ent_t  *keyent;
3038         int             key;
3039         uint32_t        pg_offset;
3040         int             retval = 0;
3041 
3042         if (is_controlling_proc(st) < 0) {
3043                 AGPDB_PRINT2((CE_WARN,
3044                     "ioctl_agpgart_bind: not a controlling process"));
3045                 return (EPERM);
3046         }
3047 
3048         if (ddi_copyin(arg, &bind_info, sizeof (agp_bind_t), flags) != 0) {
3049                 return (EFAULT);
3050         }
3051 
3052         key = bind_info.agpb_key;
3053         if ((key >= AGP_MAXKEYS) || key < 0) {
3054                 AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_bind: invalid key"));
3055                 return (EINVAL);
3056         }
3057 
3058         if (IS_INTEL_830(st->asoft_devreg.agprd_arctype)) {
3059                 if (AGP_PAGES2KB(bind_info.agpb_pgstart) <
3060                     st->asoft_info.agpki_presize) {
3061                         AGPDB_PRINT2((CE_WARN,
3062                             "ioctl_agpgart_bind: bind to prealloc area "
3063                             "pgstart = %dKB < presize = %ldKB",
3064                             AGP_PAGES2KB(bind_info.agpb_pgstart),
3065                             st->asoft_info.agpki_presize));
3066                         return (EINVAL);
3067                 }
3068         }
3069 
3070         pg_offset = bind_info.agpb_pgstart;
3071         keyent = &st->asoft_table[key];
3072         if (!keyent->kte_memhdl) {
3073                 AGPDB_PRINT2((CE_WARN,
3074                     "ioctl_agpgart_bind: Key = 0x%x can't get keyenty",
3075                     key));
3076                 return (EINVAL);
3077         }
3078 
3079         if (keyent->kte_bound != 0) {
3080                 AGPDB_PRINT2((CE_WARN,
3081                     "ioctl_agpgart_bind: Key = 0x%x already bound",
3082                     key));
3083                 return (EINVAL);
3084         }
3085         retval = agp_bind_key(st, keyent, pg_offset);
3086 
3087         if (retval == 0) {
3088                 keyent->kte_pgoff = pg_offset;
3089                 keyent->kte_bound = 1;
3090         }
3091 
3092         return (retval);
3093 }
3094 
3095 static int
3096 ioctl_agpgart_unbind(agpgart_softstate_t  *st, void  *arg, int flags)
3097 {
3098         int key, retval = 0;
3099         agp_unbind_t unbindinfo;
3100         keytable_ent_t *keyent;
3101 
3102         if (is_controlling_proc(st) < 0) {
3103                 AGPDB_PRINT2((CE_WARN,
3104                     "ioctl_agpgart_bind: not a controlling process"));
3105                 return (EPERM);
3106         }
3107 
3108         if (ddi_copyin(arg, &unbindinfo, sizeof (unbindinfo), flags) != 0) {
3109                 return (EFAULT);
3110         }
3111         key = unbindinfo.agpu_key;
3112         if ((key >= AGP_MAXKEYS) || key < 0) {
3113                 AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_unbind: invalid key"));
3114                 return (EINVAL);
3115         }
3116         keyent = &st->asoft_table[key];
3117         if (!keyent->kte_bound) {
3118                 return (EINVAL);
3119         }
3120 
3121         if ((retval = agp_unbind_key(st, keyent)) != 0)
3122                 return (retval);
3123 
3124         return (0);
3125 }
3126 
3127 static int
3128 ioctl_agpgart_flush_chipset(agpgart_softstate_t *st)
3129 {
3130         ldi_handle_t    hdl;
3131         uint32_t devid;
3132         int rc = 0;
3133         devid = st->asoft_info.agpki_mdevid;
3134         hdl = st->asoft_devreg.agprd_targethdl;
3135         if (IS_INTEL_915(devid) ||
3136             IS_INTEL_965(devid) ||
3137             IS_INTEL_X33(devid) ||
3138             IS_INTEL_G4X(devid)) {
3139                 rc = ldi_ioctl(hdl, INTEL_CHIPSET_FLUSH, 0, FKIOCTL, kcred, 0);
3140         }
3141         return  (rc);
3142 }
3143 
3144 static int
3145 ioctl_agpgart_pages_bind(agpgart_softstate_t  *st, void  *arg, int flags)
3146 {
3147         agp_bind_pages_t        bind_info;
3148         uint32_t        pg_offset;
3149         int err = 0;
3150         ldi_handle_t hdl;
3151         uint32_t npages;
3152         igd_gtt_seg_t *gttseg;
3153         uint32_t i;
3154         int rval;
3155         if (ddi_copyin(arg, &bind_info,
3156             sizeof (agp_bind_pages_t), flags) != 0) {
3157                 return (EFAULT);
3158         }
3159 
3160         gttseg = (igd_gtt_seg_t *)kmem_zalloc(sizeof (igd_gtt_seg_t),
3161             KM_SLEEP);
3162 
3163         pg_offset = bind_info.agpb_pgstart;
3164 
3165         gttseg->igs_pgstart =  pg_offset;
3166         npages = (uint32_t)bind_info.agpb_pgcount;
3167         gttseg->igs_npage = npages;
3168 
3169         gttseg->igs_type = AGP_NORMAL;
3170         gttseg->igs_phyaddr = (uint32_t *)kmem_zalloc
3171             (sizeof (uint32_t) * gttseg->igs_npage, KM_SLEEP);
3172 
3173         for (i = 0; i < npages; i++) {
3174                 gttseg->igs_phyaddr[i] = bind_info.agpb_pages[i] <<
3175                     GTT_PAGE_SHIFT;
3176         }
3177 
3178         hdl = st->asoft_devreg.agprd_masterhdl;
3179         if (ldi_ioctl(hdl, I8XX_ADD2GTT, (intptr_t)gttseg, FKIOCTL,
3180             kcred, &rval)) {
3181                 AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_pages_bind: start0x%x",
3182                     gttseg->igs_pgstart));
3183                 AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_pages_bind: pages=0x%x",
3184                     gttseg->igs_npage));
3185                 AGPDB_PRINT2((CE_WARN, "ioctl_agpgart_pages_bind: type=0x%x",
3186                     gttseg->igs_type));
3187                 err = -1;
3188         }
3189 
3190         list_head_add_new(&st->mapped_list, gttseg);
3191         return (err);
3192 }
3193 
3194 static int
3195 ioctl_agpgart_pages_unbind(agpgart_softstate_t  *st, void  *arg, int flags)
3196 {
3197         agp_unbind_pages_t unbind_info;
3198         int     rval;
3199         ldi_handle_t    hdl;
3200         igd_gtt_seg_t   *gttseg;
3201 
3202         if (ddi_copyin(arg, &unbind_info, sizeof (unbind_info), flags) != 0) {
3203                 return (EFAULT);
3204         }
3205 
3206         struct list_head  *entry, *temp;
3207         list_head_for_each_safe(entry, temp, &st->mapped_list) {
3208                 if (entry->gttseg->igs_pgstart == unbind_info.agpb_pgstart) {
3209                         gttseg = entry->gttseg;
3210                         /* not unbind if VT switch */
3211                         if (unbind_info.agpb_type) {
3212                                 list_head_del(entry);
3213                                 kmem_free(entry, sizeof (*entry));
3214                         }
3215                         break;
3216                 }
3217         }
3218         ASSERT(gttseg != NULL);
3219         gttseg->igs_pgstart =  unbind_info.agpb_pgstart;
3220         ASSERT(gttseg->igs_npage == unbind_info.agpb_pgcount);
3221 
3222         hdl = st->asoft_devreg.agprd_masterhdl;
3223         if (ldi_ioctl(hdl, I8XX_REM_GTT, (intptr_t)gttseg, FKIOCTL,
3224             kcred, &rval))
3225                 return (-1);
3226 
3227         if (unbind_info.agpb_type) {
3228                 kmem_free(gttseg->igs_phyaddr, sizeof (uint32_t) *
3229                     gttseg->igs_npage);
3230                 kmem_free(gttseg, sizeof (igd_gtt_seg_t));
3231         }
3232 
3233         return (0);
3234 }
3235 
3236 static int
3237 ioctl_agpgart_pages_rebind(agpgart_softstate_t  *st)
3238 {
3239         int     rval;
3240         ldi_handle_t    hdl;
3241         igd_gtt_seg_t   *gttseg;
3242         int err = 0;
3243 
3244         hdl = st->asoft_devreg.agprd_masterhdl;
3245         struct list_head  *entry, *temp;
3246         list_head_for_each_safe(entry, temp, &st->mapped_list) {
3247                 gttseg = entry->gttseg;
3248                 list_head_del(entry);
3249                 kmem_free(entry, sizeof (*entry));
3250                 if (ldi_ioctl(hdl, I8XX_ADD2GTT, (intptr_t)gttseg, FKIOCTL,
3251                     kcred, &rval)) {
3252                         AGPDB_PRINT2((CE_WARN, "agpgart_pages_rebind errori"));
3253                         err = -1;
3254                         break;
3255                 }
3256                 kmem_free(gttseg->igs_phyaddr, sizeof (uint32_t) *
3257                     gttseg->igs_npage);
3258                 kmem_free(gttseg, sizeof (igd_gtt_seg_t));
3259 
3260         }
3261         return (err);
3262 
3263 }
3264 
3265 /*ARGSUSED*/
3266 static int
3267 agpgart_ioctl(dev_t dev, int cmd, intptr_t intarg, int flags,
3268     cred_t *credp, int *rvalp)
3269 {
3270         int instance;
3271         int retval = 0;
3272         void *arg = (void*)intarg;
3273 
3274         agpgart_softstate_t *softstate;
3275 
3276         instance = AGP_DEV2INST(dev);
3277         softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3278         if (softstate == NULL) {
3279                 AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: get soft state err"));
3280                 return (ENXIO);
3281         }
3282 
3283         mutex_enter(&softstate->asoft_instmutex);
3284 
3285         switch (cmd) {
3286         case AGPIOC_INFO:
3287                 retval = ioctl_agpgart_info(softstate, arg, flags);
3288                 break;
3289         case AGPIOC_ACQUIRE:
3290                 retval = ioctl_agpgart_acquire(softstate);
3291                 break;
3292         case AGPIOC_RELEASE:
3293                 retval = ioctl_agpgart_release(softstate);
3294                 break;
3295         case AGPIOC_SETUP:
3296                 retval = ioctl_agpgart_setup(softstate, arg, flags);
3297                 break;
3298         case AGPIOC_ALLOCATE:
3299                 retval = ioctl_agpgart_alloc(softstate, arg, flags);
3300                 break;
3301         case AGPIOC_DEALLOCATE:
3302                 retval = ioctl_agpgart_dealloc(softstate, intarg);
3303                 break;
3304         case AGPIOC_BIND:
3305                 retval = ioctl_agpgart_bind(softstate, arg, flags);
3306                 break;
3307         case AGPIOC_UNBIND:
3308                 retval = ioctl_agpgart_unbind(softstate, arg, flags);
3309                 break;
3310         case AGPIOC_FLUSHCHIPSET:
3311                 retval = ioctl_agpgart_flush_chipset(softstate);
3312                 break;
3313         case AGPIOC_PAGES_BIND:
3314                 retval = ioctl_agpgart_pages_bind(softstate, arg, flags);
3315                 break;
3316         case AGPIOC_PAGES_UNBIND:
3317                 retval = ioctl_agpgart_pages_unbind(softstate, arg, flags);
3318                 break;
3319         case AGPIOC_PAGES_REBIND:
3320                 retval = ioctl_agpgart_pages_rebind(softstate);
3321                 break;
3322         default:
3323                 AGPDB_PRINT2((CE_WARN, "agpgart_ioctl: wrong argument"));
3324                 retval = ENXIO;
3325                 break;
3326         }
3327 
3328         mutex_exit(&softstate->asoft_instmutex);
3329         return (retval);
3330 }
3331 
3332 static int
3333 agpgart_segmap(dev_t dev, off_t off, struct as *asp,
3334     caddr_t *addrp, off_t len, unsigned int prot,
3335     unsigned int maxprot, unsigned int flags, cred_t *credp)
3336 {
3337 
3338         struct agpgart_softstate *softstate;
3339         int instance;
3340         int rc = 0;
3341 
3342         instance = AGP_DEV2INST(dev);
3343         softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3344         if (softstate == NULL) {
3345                 AGPDB_PRINT2((CE_WARN, "agpgart_segmap: get soft state err"));
3346                 return (ENXIO);
3347         }
3348         if (!AGP_ALIGNED(len))
3349                 return (EINVAL);
3350 
3351         mutex_enter(&softstate->asoft_instmutex);
3352 
3353         rc = devmap_setup(dev, (offset_t)off, asp, addrp,
3354             (size_t)len, prot, maxprot, flags, credp);
3355 
3356         mutex_exit(&softstate->asoft_instmutex);
3357         return (rc);
3358 }
3359 
3360 /*ARGSUSED*/
3361 static int
3362 agpgart_devmap(dev_t dev, devmap_cookie_t cookie, offset_t offset, size_t len,
3363     size_t *mappedlen, uint_t model)
3364 {
3365         struct agpgart_softstate *softstate;
3366         int instance, status;
3367         struct keytable_ent *mementry;
3368         offset_t local_offset;
3369 
3370         instance = AGP_DEV2INST(dev);
3371         softstate = ddi_get_soft_state(agpgart_glob_soft_handle, instance);
3372         if (softstate == NULL) {
3373                 AGPDB_PRINT2((CE_WARN, "agpgart_devmap: get soft state err"));
3374                 return (ENXIO);
3375         }
3376 
3377 
3378         if (offset > MB2BYTES(softstate->asoft_info.agpki_apersize)) {
3379                 AGPDB_PRINT2((CE_WARN, "agpgart_devmap: offset is too large"));
3380                 return (EINVAL);
3381         }
3382 
3383         /*
3384          * Can not find any memory now, so fail.
3385          */
3386 
3387         mementry = agp_find_bound_keyent(softstate, AGP_BYTES2PAGES(offset));
3388 
3389         if (mementry == NULL) {
3390                 AGPDB_PRINT2((CE_WARN,
3391                     "agpgart_devmap: can not find the proper keyent"));
3392                 return (EINVAL);
3393         }
3394 
3395         local_offset = offset - AGP_PAGES2BYTES(mementry->kte_pgoff);
3396 
3397         if (len > (AGP_PAGES2BYTES(mementry->kte_pages) - local_offset)) {
3398                 len = AGP_PAGES2BYTES(mementry->kte_pages) - local_offset;
3399         }
3400 
3401         switch (mementry->kte_type) {
3402         case AGP_NORMAL:
3403                 if (PMEMP(mementry->kte_memhdl)->pmem_cookie) {
3404                         status = devmap_pmem_setup(cookie,
3405                             softstate->asoft_dip,
3406                             &agp_devmap_cb,
3407                             PMEMP(mementry->kte_memhdl)->pmem_cookie,
3408                             local_offset,
3409                             len, PROT_ALL,
3410                             (DEVMAP_DEFAULTS|IOMEM_DATA_UC_WR_COMBINE),
3411                             &mem_dev_acc_attr);
3412                 } else {
3413                         AGPDB_PRINT2((CE_WARN,
3414                             "agpgart_devmap: not a valid memory type"));
3415                         return (EINVAL);
3416 
3417                 }
3418 
3419                 break;
3420         default:
3421                 AGPDB_PRINT2((CE_WARN,
3422                     "agpgart_devmap: not a valid memory type"));
3423                 return (EINVAL);
3424         }
3425 
3426 
3427         if (status == 0) {
3428                 *mappedlen = len;
3429         } else {
3430                 *mappedlen = 0;
3431                 AGPDB_PRINT2((CE_WARN,
3432                     "agpgart_devmap: devmap interface failed"));
3433                 return (EINVAL);
3434         }
3435 
3436         return (0);
3437 }
3438 
3439 static struct cb_ops    agpgart_cb_ops = {
3440         agpgart_open,           /* open() */
3441         agpgart_close,          /* close() */
3442         nodev,                  /* strategy() */
3443         nodev,                  /* print routine */
3444         nodev,                  /* no dump routine */
3445         nodev,                  /* read() */
3446         nodev,                  /* write() */
3447         agpgart_ioctl,          /* agpgart_ioctl */
3448         agpgart_devmap,         /* devmap routine */
3449         nodev,                  /* no longer use mmap routine */
3450         agpgart_segmap,         /* system segmap routine */
3451         nochpoll,               /* no chpoll routine */
3452         ddi_prop_op,            /* system prop operations */
3453         0,                      /* not a STREAMS driver */
3454         D_DEVMAP | D_MP,        /* safe for multi-thread/multi-processor */
3455         CB_REV,                 /* cb_ops version? */
3456         nodev,                  /* cb_aread() */
3457         nodev,                  /* cb_awrite() */
3458 };
3459 
3460 static struct dev_ops agpgart_ops = {
3461         DEVO_REV,               /* devo_rev */
3462         0,                      /* devo_refcnt */
3463         agpgart_getinfo,        /* devo_getinfo */
3464         nulldev,                /* devo_identify */
3465         nulldev,                /* devo_probe */
3466         agpgart_attach,         /* devo_attach */
3467         agpgart_detach,         /* devo_detach */
3468         nodev,                  /* devo_reset */
3469         &agpgart_cb_ops,    /* devo_cb_ops */
3470         (struct bus_ops *)0,    /* devo_bus_ops */
3471         NULL,                   /* devo_power */
3472         ddi_quiesce_not_needed, /* devo_quiesce */
3473 };
3474 
3475 static  struct modldrv modldrv = {
3476         &mod_driverops,
3477         "AGP driver",
3478         &agpgart_ops,
3479 };
3480 
3481 static struct modlinkage modlinkage = {
3482         MODREV_1,               /* MODREV_1 is indicated by manual */
3483         {&modldrv, NULL, NULL, NULL}
3484 };
3485 
3486 static void *agpgart_glob_soft_handle;
3487 
3488 int
3489 _init(void)
3490 {
3491         int ret = DDI_SUCCESS;
3492 
3493         ret = ddi_soft_state_init(&agpgart_glob_soft_handle,
3494             sizeof (agpgart_softstate_t),
3495             AGPGART_MAX_INSTANCES);
3496 
3497         if (ret != 0) {
3498                 AGPDB_PRINT2((CE_WARN,
3499                     "_init: soft state init error code=0x%x", ret));
3500                 return (ret);
3501         }
3502 
3503         if ((ret = mod_install(&modlinkage)) != 0) {
3504                 AGPDB_PRINT2((CE_WARN,
3505                     "_init: mod install error code=0x%x", ret));
3506                 ddi_soft_state_fini(&agpgart_glob_soft_handle);
3507                 return (ret);
3508         }
3509 
3510         return (DDI_SUCCESS);
3511 }
3512 
3513 int
3514 _info(struct modinfo *modinfop)
3515 {
3516         return (mod_info(&modlinkage, modinfop));
3517 }
3518 
3519 int
3520 _fini(void)
3521 {
3522         int ret;
3523 
3524         if ((ret = mod_remove(&modlinkage)) == 0) {
3525                 ddi_soft_state_fini(&agpgart_glob_soft_handle);
3526         }
3527 
3528         return (ret);
3529 }