Print this page
3644 Add virtio-net support into the Illumos
Reviewed by: Alexey Zaytsev, alexey.zaytsev@gmail.com
Reviewed by: Yuri Pankov, yuri.pankov@nexenta.com
Reviewed by: David Hoppner, 0xffea@gmail.com


  54 #include <sys/conf.h>
  55 #include <sys/kmem.h>
  56 #include <sys/debug.h>
  57 #include <sys/modctl.h>
  58 #include <sys/autoconf.h>
  59 #include <sys/ddi_impldefs.h>
  60 #include <sys/ddi.h>
  61 #include <sys/sunddi.h>
  62 #include <sys/sunndi.h>
  63 #include <sys/avintr.h>
  64 #include <sys/spl.h>
  65 #include <sys/promif.h>
  66 #include <sys/list.h>
  67 #include <sys/bootconf.h>
  68 #include <sys/bootsvcs.h>
  69 #include <sys/sysmacros.h>
  70 #include <sys/pci.h>
  71 
  72 #include "virtiovar.h"
  73 #include "virtioreg.h"

  74 #define NDEVNAMES       (sizeof (virtio_device_name) / sizeof (char *))
  75 #define MINSEG_INDIRECT 2       /* use indirect if nsegs >= this value */
  76 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1)) & \
  77             ~(VIRTIO_PAGE_SIZE-1))
  78 
  79 void
  80 virtio_set_status(struct virtio_softc *sc, unsigned int status)
  81 {
  82         int old = 0;
  83 
  84         if (status != 0)
  85                 old = ddi_get8(sc->sc_ioh,
  86                     (uint8_t *)(sc->sc_io_addr +
  87                     VIRTIO_CONFIG_DEVICE_STATUS));

  88 
  89         ddi_put8(sc->sc_ioh,
  90             (uint8_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_STATUS),
  91             status | old);
  92 }
  93 
  94 /*
  95  * Negotiate features, save the result in sc->sc_features
  96  */
  97 uint32_t
  98 virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
  99 {
 100         uint32_t host_features;
 101         uint32_t features;
 102 
 103         host_features = ddi_get32(sc->sc_ioh,
 104             /* LINTED E_BAD_PTR_CAST_ALIGN */
 105             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES));
 106 
 107         dev_debug(sc->sc_dev, CE_NOTE,
 108             "host features: %x, guest features: %x",
 109             host_features, guest_features);
 110 
 111         features = host_features & guest_features;
 112         ddi_put32(sc->sc_ioh,
 113             /* LINTED E_BAD_PTR_CAST_ALIGN */
 114             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_GUEST_FEATURES),
 115             features);
 116 
 117         sc->sc_features = features;
 118 
 119         return (host_features);
 120 }
 121 
 122 size_t
 123 virtio_show_features(uint32_t features,
 124     char *buf, size_t len)
 125 {
 126         char *orig_buf = buf;
 127         char *bufend = buf + len;
 128 
 129         /* LINTED E_PTRDIFF_OVERFLOW */
 130         buf += snprintf(buf, bufend - buf, "Generic ( ");
 131         if (features & VIRTIO_F_RING_INDIRECT_DESC)
 132                 /* LINTED E_PTRDIFF_OVERFLOW */
 133                 buf += snprintf(buf, bufend - buf, "INDIRECT_DESC ");
 134 
 135         /* LINTED E_PTRDIFF_OVERFLOW */
 136         buf += snprintf(buf, bufend - buf, ") ");
 137 
 138         /* LINTED E_PTRDIFF_OVERFLOW */
 139         return (buf - orig_buf);
 140 }
 141 
 142 boolean_t
 143 virtio_has_feature(struct virtio_softc *sc, uint32_t feature)
 144 {


 177 uint64_t
 178 virtio_read_device_config_8(struct virtio_softc *sc, unsigned int index)
 179 {
 180         uint64_t r;
 181 
 182         ASSERT(sc->sc_config_offset);
 183         r = ddi_get32(sc->sc_ioh,
 184             /* LINTED E_BAD_PTR_CAST_ALIGN */
 185             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 186             index + sizeof (uint32_t)));
 187 
 188         r <<= 32;
 189 
 190         r += ddi_get32(sc->sc_ioh,
 191             /* LINTED E_BAD_PTR_CAST_ALIGN */
 192             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 193         return (r);
 194 }
 195 
 196 void
 197 virtio_write_device_config_1(struct virtio_softc *sc,
 198     unsigned int index, uint8_t value)
 199 {
 200         ASSERT(sc->sc_config_offset);
 201         ddi_put8(sc->sc_ioh,
 202             (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 203 }
 204 
 205 void
 206 virtio_write_device_config_2(struct virtio_softc *sc,
 207     unsigned int index, uint16_t value)
 208 {
 209         ASSERT(sc->sc_config_offset);
 210         ddi_put16(sc->sc_ioh,
 211             /* LINTED E_BAD_PTR_CAST_ALIGN */
 212             (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 213 }
 214 
 215 void
 216 virtio_write_device_config_4(struct virtio_softc *sc,
 217     unsigned int index, uint32_t value)
 218 {
 219         ASSERT(sc->sc_config_offset);
 220         ddi_put32(sc->sc_ioh,
 221             /* LINTED E_BAD_PTR_CAST_ALIGN */
 222             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 223 }
 224 
 225 void
 226 virtio_write_device_config_8(struct virtio_softc *sc,
 227     unsigned int index, uint64_t value)
 228 {
 229         ASSERT(sc->sc_config_offset);
 230         ddi_put32(sc->sc_ioh,
 231             /* LINTED E_BAD_PTR_CAST_ALIGN */
 232             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index),
 233             value & 0xFFFFFFFF);
 234         ddi_put32(sc->sc_ioh,
 235             /* LINTED E_BAD_PTR_CAST_ALIGN */
 236             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 237             index + sizeof (uint32_t)), value >> 32);
 238 }
 239 
 240 /*
 241  * Start/stop vq interrupt.  No guarantee.
 242  */
 243 void
 244 virtio_stop_vq_intr(struct virtqueue *vq)
 245 {
 246         vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 247 }
 248 
 249 void
 250 virtio_start_vq_intr(struct virtqueue *vq)
 251 {
 252         vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
 253 }
 254 
 255 static ddi_dma_attr_t virtio_vq_dma_attr = {
 256         DMA_ATTR_V0,    /* Version number */
 257         0,              /* low address */
 258         /*
 259          * high address. Has to fit into 32 bits
 260          * after page-shifting
 261          */
 262         0x00000FFFFFFFFFFF,
 263         0xFFFFFFFF,     /* counter register max */
 264         VIRTIO_PAGE_SIZE, /* page alignment required */
 265         0x3F,           /* burst sizes: 1 - 32 */
 266         0x1,            /* minimum transfer size */
 267         0xFFFFFFFF,     /* max transfer size */
 268         0xFFFFFFFF,     /* address register max */
 269         1,              /* no scatter-gather */
 270         1,              /* device operates on bytes */
 271         0,              /* attr flag: set to 0 */
 272 };
 273 
 274 static ddi_dma_attr_t virtio_vq_indirect_dma_attr = {
 275         DMA_ATTR_V0,    /* Version number */
 276         0,              /* low address */
 277         0xFFFFFFFFFFFFFFFF, /* high address */
 278         0xFFFFFFFF,     /* counter register max */
 279         1,              /* No specific alignment */
 280         0x3F,           /* burst sizes: 1 - 32 */
 281         0x1,            /* minimum transfer size */
 282         0xFFFFFFFF,     /* max transfer size */


 306 }
 307 
 308 
 309 static int
 310 virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
 311 {
 312         int allocsize, num;
 313         size_t len;
 314         unsigned int ncookies;
 315         int ret;
 316 
 317         num = entry->qe_queue->vq_indirect_num;
 318         ASSERT(num > 1);
 319 
 320         allocsize = sizeof (struct vring_desc) * num;
 321 
 322         ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
 323             DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
 324         if (ret != DDI_SUCCESS) {
 325                 dev_err(sc->sc_dev, CE_WARN,
 326                     "Failed to allocate dma handle for indirect descriptors,"
 327                     " entry %d, vq %d", entry->qe_index,
 328                     entry->qe_queue->vq_index);
 329                 goto out_alloc_handle;
 330         }
 331 
 332         ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle,
 333             allocsize, &virtio_vq_devattr,
 334             DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 335             (caddr_t *)&entry->qe_indirect_descs, &len,
 336             &entry->qe_indirect_dma_acch);
 337         if (ret != DDI_SUCCESS) {
 338                 dev_err(sc->sc_dev, CE_WARN,
 339                     "Failed to alocate dma memory for indirect descriptors,"
 340                     " entry %d, vq %d,", entry->qe_index,
 341                     entry->qe_queue->vq_index);
 342                 goto out_alloc;
 343         }
 344 
 345         (void) memset(entry->qe_indirect_descs, 0xff, allocsize);
 346 
 347         ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL,
 348             (caddr_t)entry->qe_indirect_descs, len,
 349             DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
 350             DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_cookie, &ncookies);
 351         if (ret != DDI_DMA_MAPPED) {
 352                 dev_err(sc->sc_dev, CE_WARN,
 353                     "Failed to bind dma memory for indirect descriptors,"
 354                     "entry %d, vq %d", entry->qe_index,
 355                     entry->qe_queue->vq_index);
 356                 goto out_bind;
 357         }
 358 
 359         /* We asked for a single segment */
 360         ASSERT(ncookies == 1);
 361 
 362         return (0);
 363 
 364 out_bind:
 365         ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
 366 out_alloc:
 367         ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
 368 out_alloc_handle:
 369 
 370         return (ret);
 371 }
 372 
 373 /*


 382         int indirect_num = vq->vq_indirect_num;
 383 
 384         /* free slot management */
 385         list_create(&vq->vq_freelist, sizeof (struct vq_entry),
 386             offsetof(struct vq_entry, qe_list));
 387 
 388         for (i = 0; i < vq_size; i++) {
 389                 struct vq_entry *entry = &vq->vq_entries[i];
 390                 list_insert_tail(&vq->vq_freelist, entry);
 391                 entry->qe_index = i;
 392                 entry->qe_desc = &vq->vq_descs[i];
 393                 entry->qe_queue = vq;
 394 
 395                 if (indirect_num) {
 396                         ret = virtio_alloc_indirect(sc, entry);
 397                         if (ret)
 398                                 goto out_indirect;
 399                 }
 400         }
 401 
 402         mutex_init(&vq->vq_freelist_lock, "virtio-freelist",
 403             MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio));
 404         mutex_init(&vq->vq_avail_lock, "virtio-avail",
 405             MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio));
 406         mutex_init(&vq->vq_used_lock, "virtio-used",
 407             MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio));
 408 
 409         return (0);
 410 
 411 out_indirect:
 412         for (i = 0; i < vq_size; i++) {
 413                 struct vq_entry *entry = &vq->vq_entries[i];
 414                 if (entry->qe_indirect_descs)
 415                         virtio_free_indirect(entry);
 416         }
 417 
 418         return (ret);
 419 }
 420 
 421 
 422 
 423 /*
 424  * Allocate/free a vq.
 425  */
 426 struct virtqueue *
 427 virtio_alloc_vq(struct virtio_softc *sc,
 428     unsigned int index,
 429     unsigned int size,
 430     unsigned int indirect_num,
 431     const char *name)
 432 {
 433         int vq_size, allocsize1, allocsize2, allocsize = 0;
 434         int ret;
 435         unsigned int ncookies;
 436         size_t len;
 437         struct virtqueue *vq;
 438 
 439 
 440         ddi_put16(sc->sc_ioh,
 441             /* LINTED E_BAD_PTR_CAST_ALIGN */
 442             (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
 443         vq_size = ddi_get16(sc->sc_ioh,
 444             /* LINTED E_BAD_PTR_CAST_ALIGN */
 445             (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
 446         if (vq_size == 0) {
 447                 dev_err(sc->sc_dev, CE_WARN,
 448                     "virtqueue dest not exist, index %d for %s\n", index, name);
 449                 goto out;
 450         }
 451 
 452         vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);
 453 
 454         /* size 0 => use native vq size, good for receive queues. */
 455         if (size)
 456                 vq_size = MIN(vq_size, size);
 457 
 458         /* allocsize1: descriptor table + avail ring + pad */
 459         allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
 460             sizeof (struct vring_avail) +
 461             sizeof (uint16_t) * vq_size);
 462         /* allocsize2: used ring + pad */
 463         allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used)
 464             + sizeof (struct vring_used_elem) * vq_size);
 465 
 466         allocsize = allocsize1 + allocsize2;
 467 
 468         ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
 469             DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
 470         if (ret != DDI_SUCCESS) {
 471                 dev_err(sc->sc_dev, CE_WARN,
 472                     "Failed to allocate dma handle for vq %d", index);
 473                 goto out_alloc_handle;
 474         }
 475 
 476         ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
 477             &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 478             (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
 479         if (ret != DDI_SUCCESS) {
 480                 dev_err(sc->sc_dev, CE_WARN,
 481                     "Failed to alocate dma memory for vq %d", index);
 482                 goto out_alloc;
 483         }
 484 
 485 
 486         ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
 487             (caddr_t)vq->vq_vaddr, len,
 488             DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
 489             DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
 490         if (ret != DDI_DMA_MAPPED) {
 491                 dev_err(sc->sc_dev, CE_WARN,
 492                     "Failed to bind dma memory for vq %d", index);
 493                 goto out_bind;
 494         }
 495 
 496         /* We asked for a single segment */
 497         ASSERT(ncookies == 1);
 498         /* and page-ligned buffers. */
 499         ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);
 500 
 501         (void) memset(vq->vq_vaddr, 0, allocsize);
 502 
 503         /* Make sure all zeros hit the buffer before we point the host to it */
 504         membar_producer();
 505 
 506         /* set the vq address */
 507         ddi_put32(sc->sc_ioh,
 508             /* LINTED E_BAD_PTR_CAST_ALIGN */


 515         vq->vq_index = index;
 516         vq->vq_descs = vq->vq_vaddr;
 517         vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
 518         vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
 519         vq->vq_usedoffset = allocsize1;
 520         vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);
 521 
 522         ASSERT(indirect_num == 0 ||
 523             virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
 524         vq->vq_indirect_num = indirect_num;
 525 
 526         /* free slot management */
 527         vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
 528             KM_SLEEP);
 529 
 530         ret = virtio_init_vq(sc, vq);
 531         if (ret)
 532                 goto out_init;
 533 
 534         dev_debug(sc->sc_dev, CE_NOTE,
 535             "Allocated %d entries for vq %d:%s (%d incdirect descs)",
 536             vq_size, index, name, indirect_num * vq_size);
 537 
 538         return (vq);
 539 
 540 out_init:
 541         kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
 542         (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
 543 out_bind:
 544         ddi_dma_mem_free(&vq->vq_dma_acch);
 545 out_alloc:
 546         ddi_dma_free_handle(&vq->vq_dma_handle);
 547 out_alloc_handle:
 548         kmem_free(vq, sizeof (struct virtqueue));
 549 out:
 550         return (NULL);
 551 }
 552 
 553 
 554 void
 555 virtio_free_vq(struct virtqueue *vq)
 556 {
 557         struct virtio_softc *sc = vq->vq_owner;
 558         int i;
 559 
 560         /* tell device that there's no virtqueue any longer */
 561         ddi_put16(sc->sc_ioh,
 562             /* LINTED E_BAD_PTR_CAST_ALIGN */
 563             (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
 564             vq->vq_index);
 565         ddi_put32(sc->sc_ioh,
 566             /* LINTED E_BAD_PTR_CAST_ALIGN */
 567             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);
 568 
 569         /* Free the indirect descriptors, if any. */
 570         for (i = 0; i < vq->vq_num; i++) {
 571                 struct vq_entry *entry = &vq->vq_entries[i];
 572                 if (entry->qe_indirect_descs)
 573                         virtio_free_indirect(entry);


 631  */
 632 uint_t
 633 vq_num_used(struct virtqueue *vq)
 634 {
 635         /* vq->vq_freelist_lock would not help here. */
 636         return (vq->vq_used_entries);
 637 }
 638 
 639 static inline void
 640 virtio_ve_set_desc(struct vring_desc *desc, uint64_t paddr, uint32_t len,
 641     boolean_t write)
 642 {
 643         desc->addr = paddr;
 644         desc->len = len;
 645         desc->next = 0;
 646         desc->flags = 0;
 647 
 648         /* 'write' - from the driver's point of view */
 649         if (!write)
 650                 desc->flags = VRING_DESC_F_WRITE;
 651 
 652 
 653 }
 654 
 655 void
 656 virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 657     boolean_t write)
 658 {
 659         virtio_ve_set_desc(qe->qe_desc, paddr, len, write);
 660 }
 661 






 662 void
 663 virtio_ve_add_indirect_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 664     boolean_t write)
 665 {
 666         struct vring_desc *indirect_desc;
 667 
 668         ASSERT(qe->qe_queue->vq_indirect_num);
 669         ASSERT(qe->qe_indirect_next < qe->qe_queue->vq_indirect_num);
 670 
 671         indirect_desc = &qe->qe_indirect_descs[qe->qe_indirect_next];
 672         virtio_ve_set_desc(indirect_desc, paddr, len, write);
 673         qe->qe_indirect_next++;
 674 }
 675 
 676 void
 677 virtio_ve_add_cookie(struct vq_entry *qe, ddi_dma_handle_t dma_handle,
 678     ddi_dma_cookie_t dma_cookie, unsigned int ncookies, boolean_t write)
 679 {
 680         int i;
 681 


 685                 ddi_dma_nextcookie(dma_handle, &dma_cookie);
 686         }
 687 }
 688 
 689 void
 690 virtio_sync_vq(struct virtqueue *vq)
 691 {
 692         struct virtio_softc *vsc = vq->vq_owner;
 693 
 694         /* Make sure the avail ring update hit the buffer */
 695         membar_producer();
 696 
 697         vq->vq_avail->idx = vq->vq_avail_idx;
 698 
 699         /* Make sure the avail idx update hits the buffer */
 700         membar_producer();
 701 
 702         /* Make sure we see the flags update */
 703         membar_consumer();
 704 
 705         if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
 706                 ddi_put16(vsc->sc_ioh,
 707                     /* LINTED E_BAD_PTR_CAST_ALIGN */
 708                     (uint16_t *)(vsc->sc_io_addr +
 709                     VIRTIO_CONFIG_QUEUE_NOTIFY),
 710                     vq->vq_index);

 711 }
 712 
 713 void
 714 virtio_push_chain(struct vq_entry *qe, boolean_t sync)
 715 {
 716         struct virtqueue *vq = qe->qe_queue;
 717         struct vq_entry *head = qe;
 718         struct vring_desc *desc;
 719         int idx;
 720 
 721         ASSERT(qe);
 722 
 723         /*
 724          * Bind the descs together, paddr and len should be already
 725          * set with virtio_ve_set
 726          */
 727         do {
 728                 /* Bind the indirect descriptors */
 729                 if (qe->qe_indirect_next > 1) {
 730                         uint16_t i = 0;


 756                 }
 757 
 758                 qe = qe->qe_next;
 759         } while (qe);
 760 
 761         mutex_enter(&vq->vq_avail_lock);
 762         idx = vq->vq_avail_idx;
 763         vq->vq_avail_idx++;
 764 
 765         /* Make sure the bits hit the descriptor(s) */
 766         membar_producer();
 767         vq->vq_avail->ring[idx % vq->vq_num] = head->qe_index;
 768 
 769         /* Notify the device, if needed. */
 770         if (sync)
 771                 virtio_sync_vq(vq);
 772 
 773         mutex_exit(&vq->vq_avail_lock);
 774 }
 775 
 776 /* Get a chain of descriptors from the used ring, if one is available. */


 777 struct vq_entry *
 778 virtio_pull_chain(struct virtqueue *vq, uint32_t *len)
 779 {
 780         struct vq_entry *head;
 781         int slot;
 782         int usedidx;
 783 
 784         mutex_enter(&vq->vq_used_lock);
 785 
 786         /* No used entries? Bye. */
 787         if (vq->vq_used_idx == vq->vq_used->idx) {
 788                 mutex_exit(&vq->vq_used_lock);
 789                 return (NULL);
 790         }
 791 
 792         usedidx = vq->vq_used_idx;
 793         vq->vq_used_idx++;
 794         mutex_exit(&vq->vq_used_lock);
 795 
 796         usedidx %= vq->vq_num;


 802         *len = vq->vq_used->ring[usedidx].len;
 803 
 804         head = &vq->vq_entries[slot];
 805 
 806         return (head);
 807 }
 808 
 809 void
 810 virtio_free_chain(struct vq_entry *qe)
 811 {
 812         struct vq_entry *tmp;
 813         struct virtqueue *vq = qe->qe_queue;
 814 
 815         ASSERT(qe);
 816 
 817         do {
 818                 ASSERT(qe->qe_queue == vq);
 819                 tmp = qe->qe_next;
 820                 vq_free_entry(vq, qe);
 821                 qe = tmp;
 822         } while (tmp);
 823 }
 824 
 825 void
 826 virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second)
 827 {
 828         first->qe_next = second;
 829 }
 830 
 831 static int
 832 virtio_register_msi(struct virtio_softc *sc,
 833     struct virtio_int_handler *config_handler,
 834     struct virtio_int_handler vq_handlers[],
 835     int intr_types)
 836 {
 837         int count, actual;
 838         int int_type;
 839         int i;
 840         int handler_count;
 841         int ret;
 842 
 843         /* If both MSI and MSI-x are reported, prefer MSI-x. */
 844         int_type = DDI_INTR_TYPE_MSI;
 845         if (intr_types & DDI_INTR_TYPE_MSIX)
 846                 int_type = DDI_INTR_TYPE_MSIX;
 847 
 848         /* Walk the handler table to get the number of handlers. */
 849         for (handler_count = 0;
 850             vq_handlers && vq_handlers[handler_count].vh_func;
 851             handler_count++)
 852                 ;
 853 
 854         /* +1 if there is a config change handler. */
 855         if (config_handler)
 856                 handler_count++;
 857 
 858         /* Number of MSIs supported by the device. */
 859         ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count);
 860         if (ret != DDI_SUCCESS) {
 861                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_nintrs failed");
 862                 return (ret);
 863         }
 864 
 865         /*
 866          * Those who try to register more handlers then the device
 867          * supports shall suffer.
 868          */
 869         ASSERT(handler_count <= count);
 870 
 871         sc->sc_intr_htable = kmem_zalloc(
 872             sizeof (ddi_intr_handle_t) * handler_count, KM_SLEEP);
 873 
 874         ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0,
 875             handler_count, &actual, DDI_INTR_ALLOC_NORMAL);
 876         if (ret != DDI_SUCCESS) {
 877                 dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret);
 878                 goto out_msi_alloc;
 879         }
 880 
 881         if (actual != handler_count) {
 882                 dev_err(sc->sc_dev, CE_WARN,
 883                     "Not enough MSI available: need %d, available %d",
 884                     handler_count, actual);
 885                 goto out_msi_available;
 886         }
 887 
 888         sc->sc_intr_num = handler_count;
 889         sc->sc_intr_config = B_FALSE;
 890         if (config_handler) {
 891                 sc->sc_intr_config = B_TRUE;
 892         }
 893 
 894         /* Assume they are all same priority */
 895         ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
 896         if (ret != DDI_SUCCESS) {
 897                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
 898                 goto out_msi_prio;
 899         }
 900 
 901         /* Add the vq handlers */
 902         for (i = 0; vq_handlers[i].vh_func; i++) {
 903                 ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 904                     vq_handlers[i].vh_func,
 905                     sc, vq_handlers[i].vh_priv);
 906                 if (ret != DDI_SUCCESS) {
 907                         dev_err(sc->sc_dev, CE_WARN,
 908                             "ddi_intr_add_handler failed");
 909                         /* Remove the handlers that succeeded. */
 910                         while (--i >= 0) {
 911                                 (void) ddi_intr_remove_handler(
 912                                     sc->sc_intr_htable[i]);
 913                         }
 914                         goto out_add_handlers;
 915                 }
 916         }
 917 
 918         /* Don't forget the config handler */
 919         if (config_handler) {
 920                 ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 921                     config_handler->vh_func,
 922                     sc, config_handler->vh_priv);
 923                 if (ret != DDI_SUCCESS) {
 924                         dev_err(sc->sc_dev, CE_WARN,
 925                             "ddi_intr_add_handler failed");
 926                         /* Remove the handlers that succeeded. */
 927                         while (--i >= 0) {
 928                                 (void) ddi_intr_remove_handler(
 929                                     sc->sc_intr_htable[i]);
 930                         }
 931                         goto out_add_handlers;
 932                 }
 933         }
 934 
 935         /* We know we are using MSI, so set the config offset. */
 936         sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
 937 
 938         ret = ddi_intr_get_cap(sc->sc_intr_htable[0],
 939             &sc->sc_intr_cap);
 940         /* Just in case. */
 941         if (ret != DDI_SUCCESS)
 942                 sc->sc_intr_cap = 0;
 943 
 944 out_add_handlers:
 945 out_msi_prio:
 946 out_msi_available:
 947         for (i = 0; i < actual; i++)
 948                 (void) ddi_intr_free(sc->sc_intr_htable[i]);
 949 out_msi_alloc:
 950         kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) * count);
 951 
 952         return (ret);
 953 }
 954 
 955 struct virtio_handler_container {
 956         int nhandlers;
 957         struct virtio_int_handler config_handler;
 958         struct virtio_int_handler vq_handlers[];
 959 };


 991  * config_handler and vq_handlers may be allocated on stack.
 992  * Take precautions not to loose them.
 993  */
 994 static int
 995 virtio_register_intx(struct virtio_softc *sc,
 996     struct virtio_int_handler *config_handler,
 997     struct virtio_int_handler vq_handlers[])
 998 {
 999         int vq_handler_count;
1000         int config_handler_count = 0;
1001         int actual;
1002         struct virtio_handler_container *vhc;
1003         int ret = DDI_FAILURE;
1004 
1005         /* Walk the handler table to get the number of handlers. */
1006         for (vq_handler_count = 0;
1007             vq_handlers && vq_handlers[vq_handler_count].vh_func;
1008             vq_handler_count++)
1009                 ;
1010 
1011         if (config_handler)
1012                 config_handler_count = 1;
1013 
1014         vhc = kmem_zalloc(sizeof (struct virtio_handler_container) +
1015             sizeof (struct virtio_int_handler) * vq_handler_count,
1016             KM_SLEEP);
1017 
1018         vhc->nhandlers = vq_handler_count;
1019         (void) memcpy(vhc->vq_handlers, vq_handlers,
1020             sizeof (struct virtio_int_handler) * vq_handler_count);
1021 
1022         if (config_handler) {
1023                 (void) memcpy(&vhc->config_handler, config_handler,
1024                     sizeof (struct virtio_int_handler));
1025         }
1026 
1027         /* Just a single entry for a single interrupt. */
1028         sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
1029 
1030         ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable,
1031             DDI_INTR_TYPE_FIXED, 0, 1, &actual,
1032             DDI_INTR_ALLOC_NORMAL);
1033         if (ret != DDI_SUCCESS) {
1034                 dev_err(sc->sc_dev, CE_WARN,
1035                     "Failed to allocate a fixed interrupt: %d", ret);
1036                 goto out_int_alloc;
1037         }
1038 
1039         ASSERT(actual == 1);
1040         sc->sc_intr_num = 1;
1041 
1042         ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
1043         if (ret != DDI_SUCCESS) {
1044                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
1045                 goto out_prio;
1046         }
1047 
1048         ret = ddi_intr_add_handler(sc->sc_intr_htable[0],
1049             virtio_intx_dispatch, sc, vhc);
1050         if (ret != DDI_SUCCESS) {
1051                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed");
1052                 goto out_add_handlers;


1094                 if (!ret)
1095                         return (0);
1096         }
1097 
1098         /* Fall back to old-fashioned interrupts. */
1099         if (intr_types & DDI_INTR_TYPE_FIXED) {
1100                 dev_debug(sc->sc_dev, CE_WARN,
1101                     "Using legacy interrupts");
1102 
1103                 return (virtio_register_intx(sc, config_handler, vq_handlers));
1104         }
1105 
1106         dev_err(sc->sc_dev, CE_WARN,
1107             "MSI failed and fixed interrupts not supported. Giving up.");
1108         ret = DDI_FAILURE;
1109 
1110 out_inttype:
1111         return (ret);
1112 }
1113 
1114 
1115 static int
1116 virtio_enable_msi(struct virtio_softc *sc)
1117 {
1118         int ret, i;
1119         int vq_handler_count = sc->sc_intr_num;
1120 
1121         /* Number of handlers, not counting the counfig. */
1122         if (sc->sc_intr_config)
1123                 vq_handler_count--;
1124 
1125         /* Enable the iterrupts. Either the whole block, or one by one. */
1126         if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1127                 ret = ddi_intr_block_enable(sc->sc_intr_htable,
1128                     sc->sc_intr_num);
1129                 if (ret != DDI_SUCCESS) {
1130                         dev_err(sc->sc_dev, CE_WARN,
1131                             "Failed to enable MSI, falling back to INTx");
1132                         goto out_enable;
1133                 }
1134         } else {
1135                 for (i = 0; i < sc->sc_intr_num; i++) {
1136                         ret = ddi_intr_enable(sc->sc_intr_htable[i]);
1137                         if (ret != DDI_SUCCESS) {
1138                                 dev_err(sc->sc_dev, CE_WARN,
1139                                     "Failed to enable MSI %d, "
1140                                     "falling back to INTx", i);
1141 
1142                                 while (--i >= 0) {
1143                                         (void) ddi_intr_disable(
1144                                             sc->sc_intr_htable[i]);
1145                                 }
1146                                 goto out_enable;
1147                         }
1148                 }
1149         }
1150 
1151         /* Bind the allocated MSI to the queues and config */
1152         for (i = 0; i < vq_handler_count; i++) {
1153                 int check;

1154                 ddi_put16(sc->sc_ioh,
1155                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1156                     (uint16_t *)(sc->sc_io_addr +
1157                     VIRTIO_CONFIG_QUEUE_SELECT), i);
1158 
1159                 ddi_put16(sc->sc_ioh,
1160                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1161                     (uint16_t *)(sc->sc_io_addr +
1162                     VIRTIO_CONFIG_QUEUE_VECTOR), i);
1163 
1164                 check = ddi_get16(sc->sc_ioh,
1165                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1166                     (uint16_t *)(sc->sc_io_addr +
1167                     VIRTIO_CONFIG_QUEUE_VECTOR));
1168                 if (check != i) {
1169                         dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler"
1170                             "for VQ %d, MSI %d. Check = %x", i, i, check);
1171                         ret = ENODEV;
1172                         goto out_bind;
1173                 }
1174         }
1175 
1176         if (sc->sc_intr_config) {
1177                 int check;

1178                 ddi_put16(sc->sc_ioh,
1179                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1180                     (uint16_t *)(sc->sc_io_addr +
1181                     VIRTIO_CONFIG_CONFIG_VECTOR), i);
1182 
1183                 check = ddi_get16(sc->sc_ioh,
1184                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1185                     (uint16_t *)(sc->sc_io_addr +
1186                     VIRTIO_CONFIG_CONFIG_VECTOR));
1187                 if (check != i) {
1188                         dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1189                             "for Config updates, MSI %d", i);
1190                         ret = ENODEV;
1191                         goto out_bind;
1192                 }
1193         }
1194 
1195         return (DDI_SUCCESS);
1196 
1197 out_bind:


1202                     (uint16_t *)(sc->sc_io_addr +
1203                     VIRTIO_CONFIG_QUEUE_SELECT), i);
1204 
1205                 ddi_put16(sc->sc_ioh,
1206                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1207                     (uint16_t *)(sc->sc_io_addr +
1208                     VIRTIO_CONFIG_QUEUE_VECTOR),
1209                     VIRTIO_MSI_NO_VECTOR);
1210         }
1211         /* And the config */
1212         /* LINTED E_BAD_PTR_CAST_ALIGN */
1213         ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1214             VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR);
1215 
1216         ret = DDI_FAILURE;
1217 
1218 out_enable:
1219         return (ret);
1220 }
1221 
1222 static int virtio_enable_intx(struct virtio_softc *sc)

1223 {
1224         int ret;
1225 
1226         ret = ddi_intr_enable(sc->sc_intr_htable[0]);
1227         if (ret != DDI_SUCCESS)
1228                 dev_err(sc->sc_dev, CE_WARN,
1229                     "Failed to enable interrupt: %d", ret);


1230         return (ret);
1231 }
1232 
1233 /*
1234  * We can't enable/disable individual handlers in the INTx case so do
1235  * the whole bunch even in the msi case.
1236  */
1237 int
1238 virtio_enable_ints(struct virtio_softc *sc)
1239 {
1240 
1241         /* See if we are using MSI. */
1242         if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI)
1243                 return (virtio_enable_msi(sc));
1244 
1245         ASSERT(sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI);
1246 
1247         return (virtio_enable_intx(sc));
1248 }
1249 


1265                         ddi_put16(sc->sc_ioh,
1266                             /* LINTED E_BAD_PTR_CAST_ALIGN */
1267                             (uint16_t *)(sc->sc_io_addr +
1268                             VIRTIO_CONFIG_QUEUE_VECTOR),
1269                             VIRTIO_MSI_NO_VECTOR);
1270                 }
1271                 /* And the config */
1272                 /* LINTED E_BAD_PTR_CAST_ALIGN */
1273                 ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1274                     VIRTIO_CONFIG_CONFIG_VECTOR),
1275                     VIRTIO_MSI_NO_VECTOR);
1276 
1277         }
1278 
1279         /* Disable the iterrupts. Either the whole block, or one by one. */
1280         if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1281                 ret = ddi_intr_block_disable(sc->sc_intr_htable,
1282                     sc->sc_intr_num);
1283                 if (ret != DDI_SUCCESS) {
1284                         dev_err(sc->sc_dev, CE_WARN,
1285                             "Failed to disable MSIs, won't be able to"
1286                             "reuse next time");
1287                 }
1288         } else {
1289                 for (i = 0; i < sc->sc_intr_num; i++) {
1290                         ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1291                         if (ret != DDI_SUCCESS) {
1292                                 dev_err(sc->sc_dev, CE_WARN,
1293                                     "Failed to disable interrupt %d, "
1294                                     "won't be able to reuse", i);
1295 
1296                         }
1297                 }
1298         }
1299 
1300 
1301         for (i = 0; i < sc->sc_intr_num; i++) {
1302                 (void) ddi_intr_remove_handler(sc->sc_intr_htable[i]);
1303         }
1304 
1305         for (i = 0; i < sc->sc_intr_num; i++)
1306                 (void) ddi_intr_free(sc->sc_intr_htable[i]);
1307 
1308         kmem_free(sc->sc_intr_htable,
1309             sizeof (ddi_intr_handle_t) * sc->sc_intr_num);
1310 
1311 
1312         /* After disabling interrupts, the config offset is non-MSI. */
1313         sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1314 }
1315 
1316 /*
1317  * Module linkage information for the kernel.
1318  */
1319 static struct modlmisc modlmisc = {
1320         &mod_miscops, /* Type of module */
1321         "VirtIO common library module",
1322 };
1323 
1324 static struct modlinkage modlinkage = {
1325         MODREV_1,
1326         {
1327                 (void *)&modlmisc,
1328                 NULL
1329         }
1330 };
1331 


  54 #include <sys/conf.h>
  55 #include <sys/kmem.h>
  56 #include <sys/debug.h>
  57 #include <sys/modctl.h>
  58 #include <sys/autoconf.h>
  59 #include <sys/ddi_impldefs.h>
  60 #include <sys/ddi.h>
  61 #include <sys/sunddi.h>
  62 #include <sys/sunndi.h>
  63 #include <sys/avintr.h>
  64 #include <sys/spl.h>
  65 #include <sys/promif.h>
  66 #include <sys/list.h>
  67 #include <sys/bootconf.h>
  68 #include <sys/bootsvcs.h>
  69 #include <sys/sysmacros.h>
  70 #include <sys/pci.h>
  71 
  72 #include "virtiovar.h"
  73 #include "virtioreg.h"
  74 
  75 #define NDEVNAMES       (sizeof (virtio_device_name) / sizeof (char *))
  76 #define MINSEG_INDIRECT 2       /* use indirect if nsegs >= this value */
  77 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1)) & \
  78             ~(VIRTIO_PAGE_SIZE-1))
  79 
  80 void
  81 virtio_set_status(struct virtio_softc *sc, unsigned int status)
  82 {
  83         int old = 0;
  84 
  85         if (status != 0) {
  86                 old = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +

  87                     VIRTIO_CONFIG_DEVICE_STATUS));
  88         }
  89 
  90         ddi_put8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
  91             VIRTIO_CONFIG_DEVICE_STATUS), status | old);

  92 }
  93 
  94 /*
  95  * Negotiate features, save the result in sc->sc_features
  96  */
  97 uint32_t
  98 virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
  99 {
 100         uint32_t host_features;
 101         uint32_t features;
 102 
 103         host_features = ddi_get32(sc->sc_ioh,
 104             /* LINTED E_BAD_PTR_CAST_ALIGN */
 105             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES));
 106 
 107         dev_debug(sc->sc_dev, CE_NOTE, "host features: %x, guest features: %x",

 108             host_features, guest_features);
 109 
 110         features = host_features & guest_features;
 111         ddi_put32(sc->sc_ioh,
 112             /* LINTED E_BAD_PTR_CAST_ALIGN */
 113             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_GUEST_FEATURES),
 114             features);
 115 
 116         sc->sc_features = features;
 117 
 118         return (host_features);
 119 }
 120 
 121 size_t
 122 virtio_show_features(uint32_t features, char *buf, size_t len)

 123 {
 124         char *orig_buf = buf;
 125         char *bufend = buf + len;
 126 
 127         /* LINTED E_PTRDIFF_OVERFLOW */
 128         buf += snprintf(buf, bufend - buf, "Generic ( ");
 129         if (features & VIRTIO_F_RING_INDIRECT_DESC)
 130                 /* LINTED E_PTRDIFF_OVERFLOW */
 131                 buf += snprintf(buf, bufend - buf, "INDIRECT_DESC ");
 132 
 133         /* LINTED E_PTRDIFF_OVERFLOW */
 134         buf += snprintf(buf, bufend - buf, ") ");
 135 
 136         /* LINTED E_PTRDIFF_OVERFLOW */
 137         return (buf - orig_buf);
 138 }
 139 
 140 boolean_t
 141 virtio_has_feature(struct virtio_softc *sc, uint32_t feature)
 142 {


 175 uint64_t
 176 virtio_read_device_config_8(struct virtio_softc *sc, unsigned int index)
 177 {
 178         uint64_t r;
 179 
 180         ASSERT(sc->sc_config_offset);
 181         r = ddi_get32(sc->sc_ioh,
 182             /* LINTED E_BAD_PTR_CAST_ALIGN */
 183             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 184             index + sizeof (uint32_t)));
 185 
 186         r <<= 32;
 187 
 188         r += ddi_get32(sc->sc_ioh,
 189             /* LINTED E_BAD_PTR_CAST_ALIGN */
 190             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 191         return (r);
 192 }
 193 
 194 void
 195 virtio_write_device_config_1(struct virtio_softc *sc, unsigned int index,
 196     uint8_t value)
 197 {
 198         ASSERT(sc->sc_config_offset);
 199         ddi_put8(sc->sc_ioh,
 200             (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 201 }
 202 
 203 void
 204 virtio_write_device_config_2(struct virtio_softc *sc, unsigned int index,
 205     uint16_t value)
 206 {
 207         ASSERT(sc->sc_config_offset);
 208         ddi_put16(sc->sc_ioh,
 209             /* LINTED E_BAD_PTR_CAST_ALIGN */
 210             (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 211 }
 212 
 213 void
 214 virtio_write_device_config_4(struct virtio_softc *sc, unsigned int index,
 215     uint32_t value)
 216 {
 217         ASSERT(sc->sc_config_offset);
 218         ddi_put32(sc->sc_ioh,
 219             /* LINTED E_BAD_PTR_CAST_ALIGN */
 220             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 221 }
 222 
 223 void
 224 virtio_write_device_config_8(struct virtio_softc *sc, unsigned int index,
 225     uint64_t value)
 226 {
 227         ASSERT(sc->sc_config_offset);
 228         ddi_put32(sc->sc_ioh,
 229             /* LINTED E_BAD_PTR_CAST_ALIGN */
 230             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index),
 231             value & 0xFFFFFFFF);
 232         ddi_put32(sc->sc_ioh,
 233             /* LINTED E_BAD_PTR_CAST_ALIGN */
 234             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 235             index + sizeof (uint32_t)), value >> 32);
 236 }
 237 
 238 /*
 239  * Start/stop vq interrupt.  No guarantee.
 240  */
 241 void
 242 virtio_stop_vq_intr(struct virtqueue *vq)
 243 {
 244         vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 245 }
 246 
 247 void
 248 virtio_start_vq_intr(struct virtqueue *vq)
 249 {
 250         vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
 251 }
 252 
 253 static ddi_dma_attr_t virtio_vq_dma_attr = {
 254         DMA_ATTR_V0,            /* Version number */
 255         0,                      /* low address */
 256         0x00000FFFFFFFFFFF,     /* high address. Has to fit into 32 bits */
 257                                 /* after page-shifting */



 258         0xFFFFFFFF,             /* counter register max */
 259         VIRTIO_PAGE_SIZE,       /* page alignment required */
 260         0x3F,                   /* burst sizes: 1 - 32 */
 261         0x1,                    /* minimum transfer size */
 262         0xFFFFFFFF,             /* max transfer size */
 263         0xFFFFFFFF,             /* address register max */
 264         1,                      /* no scatter-gather */
 265         1,                      /* device operates on bytes */
 266         0,                      /* attr flag: set to 0 */
 267 };
 268 
 269 static ddi_dma_attr_t virtio_vq_indirect_dma_attr = {
 270         DMA_ATTR_V0,            /* Version number */
 271         0,                      /* low address */
 272         0xFFFFFFFFFFFFFFFF,     /* high address */
 273         0xFFFFFFFF,             /* counter register max */
 274         1,                      /* No specific alignment */
 275         0x3F,                   /* burst sizes: 1 - 32 */
 276         0x1,                    /* minimum transfer size */
 277         0xFFFFFFFF,             /* max transfer size */


 301 }
 302 
 303 
 304 static int
 305 virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
 306 {
 307         int allocsize, num;
 308         size_t len;
 309         unsigned int ncookies;
 310         int ret;
 311 
 312         num = entry->qe_queue->vq_indirect_num;
 313         ASSERT(num > 1);
 314 
 315         allocsize = sizeof (struct vring_desc) * num;
 316 
 317         ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
 318             DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
 319         if (ret != DDI_SUCCESS) {
 320                 dev_err(sc->sc_dev, CE_WARN,
 321                     "Failed to allocate dma handle for indirect descriptors, "
 322                     "entry %d, vq %d", entry->qe_index,
 323                     entry->qe_queue->vq_index);
 324                 goto out_alloc_handle;
 325         }
 326 
 327         ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle, allocsize,
 328             &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,

 329             (caddr_t *)&entry->qe_indirect_descs, &len,
 330             &entry->qe_indirect_dma_acch);
 331         if (ret != DDI_SUCCESS) {
 332                 dev_err(sc->sc_dev, CE_WARN,
 333                     "Failed to allocate dma memory for indirect descriptors, "
 334                     "entry %d, vq %d,", entry->qe_index,
 335                     entry->qe_queue->vq_index);
 336                 goto out_alloc;
 337         }
 338 
 339         (void) memset(entry->qe_indirect_descs, 0xff, allocsize);
 340 
 341         ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL,
 342             (caddr_t)entry->qe_indirect_descs, len,
 343             DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 344             &entry->qe_indirect_dma_cookie, &ncookies);
 345         if (ret != DDI_DMA_MAPPED) {
 346                 dev_err(sc->sc_dev, CE_WARN,
 347                     "Failed to bind dma memory for indirect descriptors, "
 348                     "entry %d, vq %d", entry->qe_index,
 349                     entry->qe_queue->vq_index);
 350                 goto out_bind;
 351         }
 352 
 353         /* We asked for a single segment */
 354         ASSERT(ncookies == 1);
 355 
 356         return (0);
 357 
 358 out_bind:
 359         ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
 360 out_alloc:
 361         ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
 362 out_alloc_handle:
 363 
 364         return (ret);
 365 }
 366 
 367 /*


 376         int indirect_num = vq->vq_indirect_num;
 377 
 378         /* free slot management */
 379         list_create(&vq->vq_freelist, sizeof (struct vq_entry),
 380             offsetof(struct vq_entry, qe_list));
 381 
 382         for (i = 0; i < vq_size; i++) {
 383                 struct vq_entry *entry = &vq->vq_entries[i];
 384                 list_insert_tail(&vq->vq_freelist, entry);
 385                 entry->qe_index = i;
 386                 entry->qe_desc = &vq->vq_descs[i];
 387                 entry->qe_queue = vq;
 388 
 389                 if (indirect_num) {
 390                         ret = virtio_alloc_indirect(sc, entry);
 391                         if (ret)
 392                                 goto out_indirect;
 393                 }
 394         }
 395 
 396         mutex_init(&vq->vq_freelist_lock, "virtio-freelist", MUTEX_DRIVER,
 397             DDI_INTR_PRI(sc->sc_intr_prio));
 398         mutex_init(&vq->vq_avail_lock, "virtio-avail", MUTEX_DRIVER,
 399             DDI_INTR_PRI(sc->sc_intr_prio));
 400         mutex_init(&vq->vq_used_lock, "virtio-used", MUTEX_DRIVER,
 401             DDI_INTR_PRI(sc->sc_intr_prio));
 402 
 403         return (0);
 404 
 405 out_indirect:
 406         for (i = 0; i < vq_size; i++) {
 407                 struct vq_entry *entry = &vq->vq_entries[i];
 408                 if (entry->qe_indirect_descs)
 409                         virtio_free_indirect(entry);
 410         }
 411 
 412         return (ret);
 413 }
 414 


 415 /*
 416  * Allocate/free a vq.
 417  */
 418 struct virtqueue *
 419 virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
 420     unsigned int indirect_num, const char *name)



 421 {
 422         int vq_size, allocsize1, allocsize2, allocsize = 0;
 423         int ret;
 424         unsigned int ncookies;
 425         size_t len;
 426         struct virtqueue *vq;
 427 

 428         ddi_put16(sc->sc_ioh,
 429             /* LINTED E_BAD_PTR_CAST_ALIGN */
 430             (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
 431         vq_size = ddi_get16(sc->sc_ioh,
 432             /* LINTED E_BAD_PTR_CAST_ALIGN */
 433             (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
 434         if (vq_size == 0) {
 435                 dev_err(sc->sc_dev, CE_WARN,
 436                     "virtqueue dest not exist, index %d for %s\n", index, name);
 437                 goto out;
 438         }
 439 
 440         vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);
 441 
 442         /* size 0 => use native vq size, good for receive queues. */
 443         if (size)
 444                 vq_size = MIN(vq_size, size);
 445 
 446         /* allocsize1: descriptor table + avail ring + pad */
 447         allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
 448             sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);

 449         /* allocsize2: used ring + pad */
 450         allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
 451             sizeof (struct vring_used_elem) * vq_size);
 452 
 453         allocsize = allocsize1 + allocsize2;
 454 
 455         ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
 456             DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
 457         if (ret != DDI_SUCCESS) {
 458                 dev_err(sc->sc_dev, CE_WARN,
 459                     "Failed to allocate dma handle for vq %d", index);
 460                 goto out_alloc_handle;
 461         }
 462 
 463         ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
 464             &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 465             (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
 466         if (ret != DDI_SUCCESS) {
 467                 dev_err(sc->sc_dev, CE_WARN,
 468                     "Failed to allocate dma memory for vq %d", index);
 469                 goto out_alloc;
 470         }
 471 

 472         ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
 473             (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,

 474             DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
 475         if (ret != DDI_DMA_MAPPED) {
 476                 dev_err(sc->sc_dev, CE_WARN,
 477                     "Failed to bind dma memory for vq %d", index);
 478                 goto out_bind;
 479         }
 480 
 481         /* We asked for a single segment */
 482         ASSERT(ncookies == 1);
 483         /* and page-ligned buffers. */
 484         ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);
 485 
 486         (void) memset(vq->vq_vaddr, 0, allocsize);
 487 
 488         /* Make sure all zeros hit the buffer before we point the host to it */
 489         membar_producer();
 490 
 491         /* set the vq address */
 492         ddi_put32(sc->sc_ioh,
 493             /* LINTED E_BAD_PTR_CAST_ALIGN */


 500         vq->vq_index = index;
 501         vq->vq_descs = vq->vq_vaddr;
 502         vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
 503         vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
 504         vq->vq_usedoffset = allocsize1;
 505         vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);
 506 
 507         ASSERT(indirect_num == 0 ||
 508             virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
 509         vq->vq_indirect_num = indirect_num;
 510 
 511         /* free slot management */
 512         vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
 513             KM_SLEEP);
 514 
 515         ret = virtio_init_vq(sc, vq);
 516         if (ret)
 517                 goto out_init;
 518 
 519         dev_debug(sc->sc_dev, CE_NOTE,
 520             "Allocated %d entries for vq %d:%s (%d indirect descs)",
 521             vq_size, index, name, indirect_num * vq_size);
 522 
 523         return (vq);
 524 
 525 out_init:
 526         kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
 527         (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
 528 out_bind:
 529         ddi_dma_mem_free(&vq->vq_dma_acch);
 530 out_alloc:
 531         ddi_dma_free_handle(&vq->vq_dma_handle);
 532 out_alloc_handle:
 533         kmem_free(vq, sizeof (struct virtqueue));
 534 out:
 535         return (NULL);
 536 }
 537 

 538 void
 539 virtio_free_vq(struct virtqueue *vq)
 540 {
 541         struct virtio_softc *sc = vq->vq_owner;
 542         int i;
 543 
 544         /* tell device that there's no virtqueue any longer */
 545         ddi_put16(sc->sc_ioh,
 546             /* LINTED E_BAD_PTR_CAST_ALIGN */
 547             (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
 548             vq->vq_index);
 549         ddi_put32(sc->sc_ioh,
 550             /* LINTED E_BAD_PTR_CAST_ALIGN */
 551             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);
 552 
 553         /* Free the indirect descriptors, if any. */
 554         for (i = 0; i < vq->vq_num; i++) {
 555                 struct vq_entry *entry = &vq->vq_entries[i];
 556                 if (entry->qe_indirect_descs)
 557                         virtio_free_indirect(entry);


 615  */
 616 uint_t
 617 vq_num_used(struct virtqueue *vq)
 618 {
 619         /* vq->vq_freelist_lock would not help here. */
 620         return (vq->vq_used_entries);
 621 }
 622 
 623 static inline void
 624 virtio_ve_set_desc(struct vring_desc *desc, uint64_t paddr, uint32_t len,
 625     boolean_t write)
 626 {
 627         desc->addr = paddr;
 628         desc->len = len;
 629         desc->next = 0;
 630         desc->flags = 0;
 631 
 632         /* 'write' - from the driver's point of view */
 633         if (!write)
 634                 desc->flags = VRING_DESC_F_WRITE;


 635 }
 636 
 637 void
 638 virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 639     boolean_t write)
 640 {
 641         virtio_ve_set_desc(qe->qe_desc, paddr, len, write);
 642 }
 643 
 644 unsigned int
 645 virtio_ve_indirect_available(struct vq_entry *qe)
 646 {
 647         return (qe->qe_queue->vq_indirect_num - (qe->qe_indirect_next - 1));
 648 }
 649 
 650 void
 651 virtio_ve_add_indirect_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 652     boolean_t write)
 653 {
 654         struct vring_desc *indirect_desc;
 655 
 656         ASSERT(qe->qe_queue->vq_indirect_num);
 657         ASSERT(qe->qe_indirect_next < qe->qe_queue->vq_indirect_num);
 658 
 659         indirect_desc = &qe->qe_indirect_descs[qe->qe_indirect_next];
 660         virtio_ve_set_desc(indirect_desc, paddr, len, write);
 661         qe->qe_indirect_next++;
 662 }
 663 
 664 void
 665 virtio_ve_add_cookie(struct vq_entry *qe, ddi_dma_handle_t dma_handle,
 666     ddi_dma_cookie_t dma_cookie, unsigned int ncookies, boolean_t write)
 667 {
 668         int i;
 669 


 673                 ddi_dma_nextcookie(dma_handle, &dma_cookie);
 674         }
 675 }
 676 
 677 void
 678 virtio_sync_vq(struct virtqueue *vq)
 679 {
 680         struct virtio_softc *vsc = vq->vq_owner;
 681 
 682         /* Make sure the avail ring update hit the buffer */
 683         membar_producer();
 684 
 685         vq->vq_avail->idx = vq->vq_avail_idx;
 686 
 687         /* Make sure the avail idx update hits the buffer */
 688         membar_producer();
 689 
 690         /* Make sure we see the flags update */
 691         membar_consumer();
 692 
 693         if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) {
 694                 ddi_put16(vsc->sc_ioh,
 695                     /* LINTED E_BAD_PTR_CAST_ALIGN */
 696                     (uint16_t *)(vsc->sc_io_addr +
 697                     VIRTIO_CONFIG_QUEUE_NOTIFY),
 698                     vq->vq_index);
 699         }
 700 }
 701 
 702 void
 703 virtio_push_chain(struct vq_entry *qe, boolean_t sync)
 704 {
 705         struct virtqueue *vq = qe->qe_queue;
 706         struct vq_entry *head = qe;
 707         struct vring_desc *desc;
 708         int idx;
 709 
 710         ASSERT(qe);
 711 
 712         /*
 713          * Bind the descs together, paddr and len should be already
 714          * set with virtio_ve_set
 715          */
 716         do {
 717                 /* Bind the indirect descriptors */
 718                 if (qe->qe_indirect_next > 1) {
 719                         uint16_t i = 0;


 745                 }
 746 
 747                 qe = qe->qe_next;
 748         } while (qe);
 749 
 750         mutex_enter(&vq->vq_avail_lock);
 751         idx = vq->vq_avail_idx;
 752         vq->vq_avail_idx++;
 753 
 754         /* Make sure the bits hit the descriptor(s) */
 755         membar_producer();
 756         vq->vq_avail->ring[idx % vq->vq_num] = head->qe_index;
 757 
 758         /* Notify the device, if needed. */
 759         if (sync)
 760                 virtio_sync_vq(vq);
 761 
 762         mutex_exit(&vq->vq_avail_lock);
 763 }
 764 
 765 /*
 766  * Get a chain of descriptors from the used ring, if one is available.
 767  */
 768 struct vq_entry *
 769 virtio_pull_chain(struct virtqueue *vq, uint32_t *len)
 770 {
 771         struct vq_entry *head;
 772         int slot;
 773         int usedidx;
 774 
 775         mutex_enter(&vq->vq_used_lock);
 776 
 777         /* No used entries? Bye. */
 778         if (vq->vq_used_idx == vq->vq_used->idx) {
 779                 mutex_exit(&vq->vq_used_lock);
 780                 return (NULL);
 781         }
 782 
 783         usedidx = vq->vq_used_idx;
 784         vq->vq_used_idx++;
 785         mutex_exit(&vq->vq_used_lock);
 786 
 787         usedidx %= vq->vq_num;


 793         *len = vq->vq_used->ring[usedidx].len;
 794 
 795         head = &vq->vq_entries[slot];
 796 
 797         return (head);
 798 }
 799 
 800 void
 801 virtio_free_chain(struct vq_entry *qe)
 802 {
 803         struct vq_entry *tmp;
 804         struct virtqueue *vq = qe->qe_queue;
 805 
 806         ASSERT(qe);
 807 
 808         do {
 809                 ASSERT(qe->qe_queue == vq);
 810                 tmp = qe->qe_next;
 811                 vq_free_entry(vq, qe);
 812                 qe = tmp;
 813         } while (tmp != NULL);
 814 }
 815 
 816 void
 817 virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second)
 818 {
 819         first->qe_next = second;
 820 }
 821 
 822 static int
 823 virtio_register_msi(struct virtio_softc *sc,
 824     struct virtio_int_handler *config_handler,
 825     struct virtio_int_handler vq_handlers[], int intr_types)

 826 {
 827         int count, actual;
 828         int int_type;
 829         int i;
 830         int handler_count;
 831         int ret;
 832 
 833         /* If both MSI and MSI-x are reported, prefer MSI-x. */
 834         int_type = DDI_INTR_TYPE_MSI;
 835         if (intr_types & DDI_INTR_TYPE_MSIX)
 836                 int_type = DDI_INTR_TYPE_MSIX;
 837 
 838         /* Walk the handler table to get the number of handlers. */
 839         for (handler_count = 0;
 840             vq_handlers && vq_handlers[handler_count].vh_func;
 841             handler_count++)
 842                 ;
 843 
 844         /* +1 if there is a config change handler. */
 845         if (config_handler != NULL)
 846                 handler_count++;
 847 
 848         /* Number of MSIs supported by the device. */
 849         ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count);
 850         if (ret != DDI_SUCCESS) {
 851                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_nintrs failed");
 852                 return (ret);
 853         }
 854 
 855         /*
 856          * Those who try to register more handlers then the device
 857          * supports shall suffer.
 858          */
 859         ASSERT(handler_count <= count);
 860 
 861         sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t) *
 862             handler_count, KM_SLEEP);
 863 
 864         ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0,
 865             handler_count, &actual, DDI_INTR_ALLOC_NORMAL);
 866         if (ret != DDI_SUCCESS) {
 867                 dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret);
 868                 goto out_msi_alloc;
 869         }
 870 
 871         if (actual != handler_count) {
 872                 dev_err(sc->sc_dev, CE_WARN,
 873                     "Not enough MSI available: need %d, available %d",
 874                     handler_count, actual);
 875                 goto out_msi_available;
 876         }
 877 
 878         sc->sc_intr_num = handler_count;
 879         sc->sc_intr_config = B_FALSE;
 880         if (config_handler != NULL) {
 881                 sc->sc_intr_config = B_TRUE;
 882         }
 883 
 884         /* Assume they are all same priority */
 885         ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
 886         if (ret != DDI_SUCCESS) {
 887                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
 888                 goto out_msi_prio;
 889         }
 890 
 891         /* Add the vq handlers */
 892         for (i = 0; vq_handlers[i].vh_func; i++) {
 893                 ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 894                     vq_handlers[i].vh_func, sc, vq_handlers[i].vh_priv);

 895                 if (ret != DDI_SUCCESS) {
 896                         dev_err(sc->sc_dev, CE_WARN,
 897                             "ddi_intr_add_handler failed");
 898                         /* Remove the handlers that succeeded. */
 899                         while (--i >= 0) {
 900                                 (void) ddi_intr_remove_handler(
 901                                     sc->sc_intr_htable[i]);
 902                         }
 903                         goto out_add_handlers;
 904                 }
 905         }
 906 
 907         /* Don't forget the config handler */
 908         if (config_handler != NULL) {
 909                 ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 910                     config_handler->vh_func, sc, config_handler->vh_priv);

 911                 if (ret != DDI_SUCCESS) {
 912                         dev_err(sc->sc_dev, CE_WARN,
 913                             "ddi_intr_add_handler failed");
 914                         /* Remove the handlers that succeeded. */
 915                         while (--i >= 0) {
 916                                 (void) ddi_intr_remove_handler(
 917                                     sc->sc_intr_htable[i]);
 918                         }
 919                         goto out_add_handlers;
 920                 }
 921         }
 922 
 923         /* We know we are using MSI, so set the config offset. */
 924         sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
 925 
 926         ret = ddi_intr_get_cap(sc->sc_intr_htable[0], &sc->sc_intr_cap);

 927         /* Just in case. */
 928         if (ret != DDI_SUCCESS)
 929                 sc->sc_intr_cap = 0;
 930 
 931 out_add_handlers:
 932 out_msi_prio:
 933 out_msi_available:
 934         for (i = 0; i < actual; i++)
 935                 (void) ddi_intr_free(sc->sc_intr_htable[i]);
 936 out_msi_alloc:
 937         kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) * count);
 938 
 939         return (ret);
 940 }
 941 
 942 struct virtio_handler_container {
 943         int nhandlers;
 944         struct virtio_int_handler config_handler;
 945         struct virtio_int_handler vq_handlers[];
 946 };


 978  * config_handler and vq_handlers may be allocated on stack.
 979  * Take precautions not to loose them.
 980  */
 981 static int
 982 virtio_register_intx(struct virtio_softc *sc,
 983     struct virtio_int_handler *config_handler,
 984     struct virtio_int_handler vq_handlers[])
 985 {
 986         int vq_handler_count;
 987         int config_handler_count = 0;
 988         int actual;
 989         struct virtio_handler_container *vhc;
 990         int ret = DDI_FAILURE;
 991 
 992         /* Walk the handler table to get the number of handlers. */
 993         for (vq_handler_count = 0;
 994             vq_handlers && vq_handlers[vq_handler_count].vh_func;
 995             vq_handler_count++)
 996                 ;
 997 
 998         if (config_handler != NULL)
 999                 config_handler_count = 1;
1000 
1001         vhc = kmem_zalloc(sizeof (struct virtio_handler_container) +
1002             sizeof (struct virtio_int_handler) * vq_handler_count, KM_SLEEP);

1003 
1004         vhc->nhandlers = vq_handler_count;
1005         (void) memcpy(vhc->vq_handlers, vq_handlers,
1006             sizeof (struct virtio_int_handler) * vq_handler_count);
1007 
1008         if (config_handler != NULL) {
1009                 (void) memcpy(&vhc->config_handler, config_handler,
1010                     sizeof (struct virtio_int_handler));
1011         }
1012 
1013         /* Just a single entry for a single interrupt. */
1014         sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
1015 
1016         ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable,
1017             DDI_INTR_TYPE_FIXED, 0, 1, &actual, DDI_INTR_ALLOC_NORMAL);

1018         if (ret != DDI_SUCCESS) {
1019                 dev_err(sc->sc_dev, CE_WARN,
1020                     "Failed to allocate a fixed interrupt: %d", ret);
1021                 goto out_int_alloc;
1022         }
1023 
1024         ASSERT(actual == 1);
1025         sc->sc_intr_num = 1;
1026 
1027         ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
1028         if (ret != DDI_SUCCESS) {
1029                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
1030                 goto out_prio;
1031         }
1032 
1033         ret = ddi_intr_add_handler(sc->sc_intr_htable[0],
1034             virtio_intx_dispatch, sc, vhc);
1035         if (ret != DDI_SUCCESS) {
1036                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed");
1037                 goto out_add_handlers;


1079                 if (!ret)
1080                         return (0);
1081         }
1082 
1083         /* Fall back to old-fashioned interrupts. */
1084         if (intr_types & DDI_INTR_TYPE_FIXED) {
1085                 dev_debug(sc->sc_dev, CE_WARN,
1086                     "Using legacy interrupts");
1087 
1088                 return (virtio_register_intx(sc, config_handler, vq_handlers));
1089         }
1090 
1091         dev_err(sc->sc_dev, CE_WARN,
1092             "MSI failed and fixed interrupts not supported. Giving up.");
1093         ret = DDI_FAILURE;
1094 
1095 out_inttype:
1096         return (ret);
1097 }
1098 

1099 static int
1100 virtio_enable_msi(struct virtio_softc *sc)
1101 {
1102         int ret, i;
1103         int vq_handler_count = sc->sc_intr_num;
1104 
1105         /* Number of handlers, not counting the counfig. */
1106         if (sc->sc_intr_config)
1107                 vq_handler_count--;
1108 
1109         /* Enable the iterrupts. Either the whole block, or one by one. */
1110         if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1111                 ret = ddi_intr_block_enable(sc->sc_intr_htable,
1112                     sc->sc_intr_num);
1113                 if (ret != DDI_SUCCESS) {
1114                         dev_err(sc->sc_dev, CE_WARN,
1115                             "Failed to enable MSI, falling back to INTx");
1116                         goto out_enable;
1117                 }
1118         } else {
1119                 for (i = 0; i < sc->sc_intr_num; i++) {
1120                         ret = ddi_intr_enable(sc->sc_intr_htable[i]);
1121                         if (ret != DDI_SUCCESS) {
1122                                 dev_err(sc->sc_dev, CE_WARN,
1123                                     "Failed to enable MSI %d, "
1124                                     "falling back to INTx", i);
1125 
1126                                 while (--i >= 0) {
1127                                         (void) ddi_intr_disable(
1128                                             sc->sc_intr_htable[i]);
1129                                 }
1130                                 goto out_enable;
1131                         }
1132                 }
1133         }
1134 
1135         /* Bind the allocated MSI to the queues and config */
1136         for (i = 0; i < vq_handler_count; i++) {
1137                 int check;
1138 
1139                 ddi_put16(sc->sc_ioh,
1140                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1141                     (uint16_t *)(sc->sc_io_addr +
1142                     VIRTIO_CONFIG_QUEUE_SELECT), i);
1143 
1144                 ddi_put16(sc->sc_ioh,
1145                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1146                     (uint16_t *)(sc->sc_io_addr +
1147                     VIRTIO_CONFIG_QUEUE_VECTOR), i);
1148 
1149                 check = ddi_get16(sc->sc_ioh,
1150                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1151                     (uint16_t *)(sc->sc_io_addr +
1152                     VIRTIO_CONFIG_QUEUE_VECTOR));
1153                 if (check != i) {
1154                         dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1155                             "for VQ %d, MSI %d. Check = %x", i, i, check);
1156                         ret = ENODEV;
1157                         goto out_bind;
1158                 }
1159         }
1160 
1161         if (sc->sc_intr_config) {
1162                 int check;
1163 
1164                 ddi_put16(sc->sc_ioh,
1165                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1166                     (uint16_t *)(sc->sc_io_addr +
1167                     VIRTIO_CONFIG_CONFIG_VECTOR), i);
1168 
1169                 check = ddi_get16(sc->sc_ioh,
1170                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1171                     (uint16_t *)(sc->sc_io_addr +
1172                     VIRTIO_CONFIG_CONFIG_VECTOR));
1173                 if (check != i) {
1174                         dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1175                             "for Config updates, MSI %d", i);
1176                         ret = ENODEV;
1177                         goto out_bind;
1178                 }
1179         }
1180 
1181         return (DDI_SUCCESS);
1182 
1183 out_bind:


1188                     (uint16_t *)(sc->sc_io_addr +
1189                     VIRTIO_CONFIG_QUEUE_SELECT), i);
1190 
1191                 ddi_put16(sc->sc_ioh,
1192                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1193                     (uint16_t *)(sc->sc_io_addr +
1194                     VIRTIO_CONFIG_QUEUE_VECTOR),
1195                     VIRTIO_MSI_NO_VECTOR);
1196         }
1197         /* And the config */
1198         /* LINTED E_BAD_PTR_CAST_ALIGN */
1199         ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1200             VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR);
1201 
1202         ret = DDI_FAILURE;
1203 
1204 out_enable:
1205         return (ret);
1206 }
1207 
1208 static int
1209 virtio_enable_intx(struct virtio_softc *sc)
1210 {
1211         int ret;
1212 
1213         ret = ddi_intr_enable(sc->sc_intr_htable[0]);
1214         if (ret != DDI_SUCCESS) {
1215                 dev_err(sc->sc_dev, CE_WARN,
1216                     "Failed to enable interrupt: %d", ret);
1217         }
1218 
1219         return (ret);
1220 }
1221 
1222 /*
1223  * We can't enable/disable individual handlers in the INTx case so do
1224  * the whole bunch even in the msi case.
1225  */
1226 int
1227 virtio_enable_ints(struct virtio_softc *sc)
1228 {
1229 
1230         /* See if we are using MSI. */
1231         if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI)
1232                 return (virtio_enable_msi(sc));
1233 
1234         ASSERT(sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI);
1235 
1236         return (virtio_enable_intx(sc));
1237 }
1238 


1254                         ddi_put16(sc->sc_ioh,
1255                             /* LINTED E_BAD_PTR_CAST_ALIGN */
1256                             (uint16_t *)(sc->sc_io_addr +
1257                             VIRTIO_CONFIG_QUEUE_VECTOR),
1258                             VIRTIO_MSI_NO_VECTOR);
1259                 }
1260                 /* And the config */
1261                 /* LINTED E_BAD_PTR_CAST_ALIGN */
1262                 ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1263                     VIRTIO_CONFIG_CONFIG_VECTOR),
1264                     VIRTIO_MSI_NO_VECTOR);
1265 
1266         }
1267 
1268         /* Disable the iterrupts. Either the whole block, or one by one. */
1269         if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1270                 ret = ddi_intr_block_disable(sc->sc_intr_htable,
1271                     sc->sc_intr_num);
1272                 if (ret != DDI_SUCCESS) {
1273                         dev_err(sc->sc_dev, CE_WARN,
1274                             "Failed to disable MSIs, won't be able to "
1275                             "reuse next time");
1276                 }
1277         } else {
1278                 for (i = 0; i < sc->sc_intr_num; i++) {
1279                         ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1280                         if (ret != DDI_SUCCESS) {
1281                                 dev_err(sc->sc_dev, CE_WARN,
1282                                     "Failed to disable interrupt %d, "
1283                                     "won't be able to reuse", i);

1284                         }
1285                 }
1286         }
1287 
1288 
1289         for (i = 0; i < sc->sc_intr_num; i++) {
1290                 (void) ddi_intr_remove_handler(sc->sc_intr_htable[i]);
1291         }
1292 
1293         for (i = 0; i < sc->sc_intr_num; i++)
1294                 (void) ddi_intr_free(sc->sc_intr_htable[i]);
1295 
1296         kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) *
1297             sc->sc_intr_num);
1298 

1299         /* After disabling interrupts, the config offset is non-MSI. */
1300         sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1301 }
1302 
1303 /*
1304  * Module linkage information for the kernel.
1305  */
1306 static struct modlmisc modlmisc = {
1307         &mod_miscops,       /* Type of module */
1308         "VirtIO common library module",
1309 };
1310 
1311 static struct modlinkage modlinkage = {
1312         MODREV_1,
1313         {
1314                 (void *)&modlmisc,
1315                 NULL
1316         }
1317 };
1318