1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2012 Nexenta Systems, Inc.
  24  * Copyright 2012 Alexey Zaytsev <alexey.zaytsev@gmail.com>
  25  */
  26 
  27 /* Based on the NetBSD virtio driver by Minoura Makoto. */
  28 /*
  29  * Copyright (c) 2010 Minoura Makoto.
  30  * All rights reserved.
  31  *
  32  * Redistribution and use in source and binary forms, with or without
  33  * modification, are permitted provided that the following conditions
  34  * are met:
  35  * 1. Redistributions of source code must retain the above copyright
  36  *    notice, this list of conditions and the following disclaimer.
  37  * 2. Redistributions in binary form must reproduce the above copyright
  38  *    notice, this list of conditions and the following disclaimer in the
  39  *    documentation and/or other materials provided with the distribution.
  40  *
  41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  42  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  43  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  44  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  45  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  46  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  47  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  48  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  49  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  50  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  51  *
  52  */
  53 
  54 #include <sys/conf.h>
  55 #include <sys/kmem.h>
  56 #include <sys/debug.h>
  57 #include <sys/modctl.h>
  58 #include <sys/autoconf.h>
  59 #include <sys/ddi_impldefs.h>
  60 #include <sys/ddi.h>
  61 #include <sys/sunddi.h>
  62 #include <sys/sunndi.h>
  63 #include <sys/avintr.h>
  64 #include <sys/spl.h>
  65 #include <sys/promif.h>
  66 #include <sys/list.h>
  67 #include <sys/bootconf.h>
  68 #include <sys/bootsvcs.h>
  69 #include <sys/sysmacros.h>
  70 #include <sys/pci.h>
  71 
  72 #include "virtiovar.h"
  73 #include "virtioreg.h"
  74 #define NDEVNAMES       (sizeof (virtio_device_name) / sizeof (char *))
  75 #define MINSEG_INDIRECT 2       /* use indirect if nsegs >= this value */
  76 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1)) & \
  77             ~(VIRTIO_PAGE_SIZE-1))
  78 
  79 void
  80 virtio_set_status(struct virtio_softc *sc, unsigned int status)
  81 {
  82         int old = 0;
  83 
  84         if (status != 0)
  85                 old = ddi_get8(sc->sc_ioh,
  86                     (uint8_t *)(sc->sc_io_addr +
  87                     VIRTIO_CONFIG_DEVICE_STATUS));
  88 
  89         ddi_put8(sc->sc_ioh,
  90             (uint8_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_STATUS),
  91             status | old);
  92 }
  93 
  94 /*
  95  * Negotiate features, save the result in sc->sc_features
  96  */
  97 uint32_t
  98 virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
  99 {
 100         uint32_t host_features;
 101         uint32_t features;
 102 
 103         host_features = ddi_get32(sc->sc_ioh,
 104             /* LINTED E_BAD_PTR_CAST_ALIGN */
 105             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES));
 106 
 107         dev_debug(sc->sc_dev, CE_NOTE,
 108             "host features: %x, guest features: %x",
 109             host_features, guest_features);
 110 
 111         features = host_features & guest_features;
 112         ddi_put32(sc->sc_ioh,
 113             /* LINTED E_BAD_PTR_CAST_ALIGN */
 114             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_GUEST_FEATURES),
 115             features);
 116 
 117         sc->sc_features = features;
 118 
 119         return (host_features);
 120 }
 121 
 122 size_t
 123 virtio_show_features(uint32_t features,
 124     char *buf, size_t len)
 125 {
 126         char *orig_buf = buf;
 127         char *bufend = buf + len;
 128 
 129         /* LINTED E_PTRDIFF_OVERFLOW */
 130         buf += snprintf(buf, bufend - buf, "Generic ( ");
 131         if (features & VIRTIO_F_RING_INDIRECT_DESC)
 132                 /* LINTED E_PTRDIFF_OVERFLOW */
 133                 buf += snprintf(buf, bufend - buf, "INDIRECT_DESC ");
 134 
 135         /* LINTED E_PTRDIFF_OVERFLOW */
 136         buf += snprintf(buf, bufend - buf, ") ");
 137 
 138         /* LINTED E_PTRDIFF_OVERFLOW */
 139         return (buf - orig_buf);
 140 }
 141 
 142 boolean_t
 143 virtio_has_feature(struct virtio_softc *sc, uint32_t feature)
 144 {
 145         return (sc->sc_features & feature);
 146 }
 147 
 148 /*
 149  * Device configuration registers.
 150  */
 151 uint8_t
 152 virtio_read_device_config_1(struct virtio_softc *sc, unsigned int index)
 153 {
 154         ASSERT(sc->sc_config_offset);
 155         return ddi_get8(sc->sc_ioh,
 156             (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 157 }
 158 
 159 uint16_t
 160 virtio_read_device_config_2(struct virtio_softc *sc, unsigned int index)
 161 {
 162         ASSERT(sc->sc_config_offset);
 163         return ddi_get16(sc->sc_ioh,
 164             /* LINTED E_BAD_PTR_CAST_ALIGN */
 165             (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 166 }
 167 
 168 uint32_t
 169 virtio_read_device_config_4(struct virtio_softc *sc, unsigned int index)
 170 {
 171         ASSERT(sc->sc_config_offset);
 172         return ddi_get32(sc->sc_ioh,
 173             /* LINTED E_BAD_PTR_CAST_ALIGN */
 174             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 175 }
 176 
 177 uint64_t
 178 virtio_read_device_config_8(struct virtio_softc *sc, unsigned int index)
 179 {
 180         uint64_t r;
 181 
 182         ASSERT(sc->sc_config_offset);
 183         r = ddi_get32(sc->sc_ioh,
 184             /* LINTED E_BAD_PTR_CAST_ALIGN */
 185             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 186             index + sizeof (uint32_t)));
 187 
 188         r <<= 32;
 189 
 190         r += ddi_get32(sc->sc_ioh,
 191             /* LINTED E_BAD_PTR_CAST_ALIGN */
 192             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 193         return (r);
 194 }
 195 
 196 void
 197 virtio_write_device_config_1(struct virtio_softc *sc,
 198     unsigned int index, uint8_t value)
 199 {
 200         ASSERT(sc->sc_config_offset);
 201         ddi_put8(sc->sc_ioh,
 202             (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 203 }
 204 
 205 void
 206 virtio_write_device_config_2(struct virtio_softc *sc,
 207     unsigned int index, uint16_t value)
 208 {
 209         ASSERT(sc->sc_config_offset);
 210         ddi_put16(sc->sc_ioh,
 211             /* LINTED E_BAD_PTR_CAST_ALIGN */
 212             (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 213 }
 214 
 215 void
 216 virtio_write_device_config_4(struct virtio_softc *sc,
 217     unsigned int index, uint32_t value)
 218 {
 219         ASSERT(sc->sc_config_offset);
 220         ddi_put32(sc->sc_ioh,
 221             /* LINTED E_BAD_PTR_CAST_ALIGN */
 222             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 223 }
 224 
 225 void
 226 virtio_write_device_config_8(struct virtio_softc *sc,
 227     unsigned int index, uint64_t value)
 228 {
 229         ASSERT(sc->sc_config_offset);
 230         ddi_put32(sc->sc_ioh,
 231             /* LINTED E_BAD_PTR_CAST_ALIGN */
 232             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index),
 233             value & 0xFFFFFFFF);
 234         ddi_put32(sc->sc_ioh,
 235             /* LINTED E_BAD_PTR_CAST_ALIGN */
 236             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 237             index + sizeof (uint32_t)), value >> 32);
 238 }
 239 
 240 /*
 241  * Start/stop vq interrupt.  No guarantee.
 242  */
 243 void
 244 virtio_stop_vq_intr(struct virtqueue *vq)
 245 {
 246         vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 247 }
 248 
 249 void
 250 virtio_start_vq_intr(struct virtqueue *vq)
 251 {
 252         vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
 253 }
 254 
 255 static ddi_dma_attr_t virtio_vq_dma_attr = {
 256         DMA_ATTR_V0,    /* Version number */
 257         0,              /* low address */
 258         /*
 259          * high address. Has to fit into 32 bits
 260          * after page-shifting
 261          */
 262         0x00000FFFFFFFFFFF,
 263         0xFFFFFFFF,     /* counter register max */
 264         VIRTIO_PAGE_SIZE, /* page alignment required */
 265         0x3F,           /* burst sizes: 1 - 32 */
 266         0x1,            /* minimum transfer size */
 267         0xFFFFFFFF,     /* max transfer size */
 268         0xFFFFFFFF,     /* address register max */
 269         1,              /* no scatter-gather */
 270         1,              /* device operates on bytes */
 271         0,              /* attr flag: set to 0 */
 272 };
 273 
 274 static ddi_dma_attr_t virtio_vq_indirect_dma_attr = {
 275         DMA_ATTR_V0,    /* Version number */
 276         0,              /* low address */
 277         0xFFFFFFFFFFFFFFFF, /* high address */
 278         0xFFFFFFFF,     /* counter register max */
 279         1,              /* No specific alignment */
 280         0x3F,           /* burst sizes: 1 - 32 */
 281         0x1,            /* minimum transfer size */
 282         0xFFFFFFFF,     /* max transfer size */
 283         0xFFFFFFFF,     /* address register max */
 284         1,              /* no scatter-gather */
 285         1,              /* device operates on bytes */
 286         0,              /* attr flag: set to 0 */
 287 };
 288 
 289 /* Same for direct and indirect descriptors. */
 290 static ddi_device_acc_attr_t virtio_vq_devattr = {
 291         DDI_DEVICE_ATTR_V0,
 292         DDI_NEVERSWAP_ACC,
 293         DDI_STORECACHING_OK_ACC,
 294         DDI_DEFAULT_ACC
 295 };
 296 
 297 static void
 298 virtio_free_indirect(struct vq_entry *entry)
 299 {
 300 
 301         (void) ddi_dma_unbind_handle(entry->qe_indirect_dma_handle);
 302         ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
 303         ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
 304 
 305         entry->qe_indirect_descs = NULL;
 306 }
 307 
 308 
 309 static int
 310 virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
 311 {
 312         int allocsize, num;
 313         size_t len;
 314         unsigned int ncookies;
 315         int ret;
 316 
 317         num = entry->qe_queue->vq_indirect_num;
 318         ASSERT(num > 1);
 319 
 320         allocsize = sizeof (struct vring_desc) * num;
 321 
 322         ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
 323             DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
 324         if (ret != DDI_SUCCESS) {
 325                 dev_err(sc->sc_dev, CE_WARN,
 326                     "Failed to allocate dma handle for indirect descriptors,"
 327                     " entry %d, vq %d", entry->qe_index,
 328                     entry->qe_queue->vq_index);
 329                 goto out_alloc_handle;
 330         }
 331 
 332         ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle,
 333             allocsize, &virtio_vq_devattr,
 334             DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 335             (caddr_t *)&entry->qe_indirect_descs, &len,
 336             &entry->qe_indirect_dma_acch);
 337         if (ret != DDI_SUCCESS) {
 338                 dev_err(sc->sc_dev, CE_WARN,
 339                     "Failed to alocate dma memory for indirect descriptors,"
 340                     " entry %d, vq %d,", entry->qe_index,
 341                     entry->qe_queue->vq_index);
 342                 goto out_alloc;
 343         }
 344 
 345         (void) memset(entry->qe_indirect_descs, 0xff, allocsize);
 346 
 347         ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL,
 348             (caddr_t)entry->qe_indirect_descs, len,
 349             DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
 350             DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_cookie, &ncookies);
 351         if (ret != DDI_DMA_MAPPED) {
 352                 dev_err(sc->sc_dev, CE_WARN,
 353                     "Failed to bind dma memory for indirect descriptors,"
 354                     "entry %d, vq %d", entry->qe_index,
 355                     entry->qe_queue->vq_index);
 356                 goto out_bind;
 357         }
 358 
 359         /* We asked for a single segment */
 360         ASSERT(ncookies == 1);
 361 
 362         return (0);
 363 
 364 out_bind:
 365         ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
 366 out_alloc:
 367         ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
 368 out_alloc_handle:
 369 
 370         return (ret);
 371 }
 372 
 373 /*
 374  * Initialize the vq structure.
 375  */
 376 static int
 377 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq)
 378 {
 379         int ret;
 380         uint16_t i;
 381         int vq_size = vq->vq_num;
 382         int indirect_num = vq->vq_indirect_num;
 383 
 384         /* free slot management */
 385         list_create(&vq->vq_freelist, sizeof (struct vq_entry),
 386             offsetof(struct vq_entry, qe_list));
 387 
 388         for (i = 0; i < vq_size; i++) {
 389                 struct vq_entry *entry = &vq->vq_entries[i];
 390                 list_insert_tail(&vq->vq_freelist, entry);
 391                 entry->qe_index = i;
 392                 entry->qe_desc = &vq->vq_descs[i];
 393                 entry->qe_queue = vq;
 394 
 395                 if (indirect_num) {
 396                         ret = virtio_alloc_indirect(sc, entry);
 397                         if (ret)
 398                                 goto out_indirect;
 399                 }
 400         }
 401 
 402         mutex_init(&vq->vq_freelist_lock, "virtio-freelist",
 403             MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio));
 404         mutex_init(&vq->vq_avail_lock, "virtio-avail",
 405             MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio));
 406         mutex_init(&vq->vq_used_lock, "virtio-used",
 407             MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio));
 408 
 409         return (0);
 410 
 411 out_indirect:
 412         for (i = 0; i < vq_size; i++) {
 413                 struct vq_entry *entry = &vq->vq_entries[i];
 414                 if (entry->qe_indirect_descs)
 415                         virtio_free_indirect(entry);
 416         }
 417 
 418         return (ret);
 419 }
 420 
 421 
 422 
 423 /*
 424  * Allocate/free a vq.
 425  */
 426 struct virtqueue *
 427 virtio_alloc_vq(struct virtio_softc *sc,
 428     unsigned int index,
 429     unsigned int size,
 430     unsigned int indirect_num,
 431     const char *name)
 432 {
 433         int vq_size, allocsize1, allocsize2, allocsize = 0;
 434         int ret;
 435         unsigned int ncookies;
 436         size_t len;
 437         struct virtqueue *vq;
 438 
 439 
 440         ddi_put16(sc->sc_ioh,
 441             /* LINTED E_BAD_PTR_CAST_ALIGN */
 442             (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
 443         vq_size = ddi_get16(sc->sc_ioh,
 444             /* LINTED E_BAD_PTR_CAST_ALIGN */
 445             (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
 446         if (vq_size == 0) {
 447                 dev_err(sc->sc_dev, CE_WARN,
 448                     "virtqueue dest not exist, index %d for %s\n", index, name);
 449                 goto out;
 450         }
 451 
 452         vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);
 453 
 454         /* size 0 => use native vq size, good for receive queues. */
 455         if (size)
 456                 vq_size = MIN(vq_size, size);
 457 
 458         /* allocsize1: descriptor table + avail ring + pad */
 459         allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
 460             sizeof (struct vring_avail) +
 461             sizeof (uint16_t) * vq_size);
 462         /* allocsize2: used ring + pad */
 463         allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used)
 464             + sizeof (struct vring_used_elem) * vq_size);
 465 
 466         allocsize = allocsize1 + allocsize2;
 467 
 468         ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
 469             DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
 470         if (ret != DDI_SUCCESS) {
 471                 dev_err(sc->sc_dev, CE_WARN,
 472                     "Failed to allocate dma handle for vq %d", index);
 473                 goto out_alloc_handle;
 474         }
 475 
 476         ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
 477             &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 478             (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
 479         if (ret != DDI_SUCCESS) {
 480                 dev_err(sc->sc_dev, CE_WARN,
 481                     "Failed to alocate dma memory for vq %d", index);
 482                 goto out_alloc;
 483         }
 484 
 485 
 486         ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
 487             (caddr_t)vq->vq_vaddr, len,
 488             DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
 489             DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
 490         if (ret != DDI_DMA_MAPPED) {
 491                 dev_err(sc->sc_dev, CE_WARN,
 492                     "Failed to bind dma memory for vq %d", index);
 493                 goto out_bind;
 494         }
 495 
 496         /* We asked for a single segment */
 497         ASSERT(ncookies == 1);
 498         /* and page-ligned buffers. */
 499         ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);
 500 
 501         (void) memset(vq->vq_vaddr, 0, allocsize);
 502 
 503         /* Make sure all zeros hit the buffer before we point the host to it */
 504         membar_producer();
 505 
 506         /* set the vq address */
 507         ddi_put32(sc->sc_ioh,
 508             /* LINTED E_BAD_PTR_CAST_ALIGN */
 509             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
 510             (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));
 511 
 512         /* remember addresses and offsets for later use */
 513         vq->vq_owner = sc;
 514         vq->vq_num = vq_size;
 515         vq->vq_index = index;
 516         vq->vq_descs = vq->vq_vaddr;
 517         vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
 518         vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
 519         vq->vq_usedoffset = allocsize1;
 520         vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);
 521 
 522         ASSERT(indirect_num == 0 ||
 523             virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
 524         vq->vq_indirect_num = indirect_num;
 525 
 526         /* free slot management */
 527         vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
 528             KM_SLEEP);
 529 
 530         ret = virtio_init_vq(sc, vq);
 531         if (ret)
 532                 goto out_init;
 533 
 534         dev_debug(sc->sc_dev, CE_NOTE,
 535             "Allocated %d entries for vq %d:%s (%d incdirect descs)",
 536             vq_size, index, name, indirect_num * vq_size);
 537 
 538         return (vq);
 539 
 540 out_init:
 541         kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
 542         (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
 543 out_bind:
 544         ddi_dma_mem_free(&vq->vq_dma_acch);
 545 out_alloc:
 546         ddi_dma_free_handle(&vq->vq_dma_handle);
 547 out_alloc_handle:
 548         kmem_free(vq, sizeof (struct virtqueue));
 549 out:
 550         return (NULL);
 551 }
 552 
 553 
 554 void
 555 virtio_free_vq(struct virtqueue *vq)
 556 {
 557         struct virtio_softc *sc = vq->vq_owner;
 558         int i;
 559 
 560         /* tell device that there's no virtqueue any longer */
 561         ddi_put16(sc->sc_ioh,
 562             /* LINTED E_BAD_PTR_CAST_ALIGN */
 563             (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
 564             vq->vq_index);
 565         ddi_put32(sc->sc_ioh,
 566             /* LINTED E_BAD_PTR_CAST_ALIGN */
 567             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);
 568 
 569         /* Free the indirect descriptors, if any. */
 570         for (i = 0; i < vq->vq_num; i++) {
 571                 struct vq_entry *entry = &vq->vq_entries[i];
 572                 if (entry->qe_indirect_descs)
 573                         virtio_free_indirect(entry);
 574         }
 575 
 576         kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);
 577 
 578         (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
 579         ddi_dma_mem_free(&vq->vq_dma_acch);
 580         ddi_dma_free_handle(&vq->vq_dma_handle);
 581 
 582         mutex_destroy(&vq->vq_used_lock);
 583         mutex_destroy(&vq->vq_avail_lock);
 584         mutex_destroy(&vq->vq_freelist_lock);
 585 
 586         kmem_free(vq, sizeof (struct virtqueue));
 587 }
 588 
 589 /*
 590  * Free descriptor management.
 591  */
 592 struct vq_entry *
 593 vq_alloc_entry(struct virtqueue *vq)
 594 {
 595         struct vq_entry *qe;
 596 
 597         mutex_enter(&vq->vq_freelist_lock);
 598         if (list_is_empty(&vq->vq_freelist)) {
 599                 mutex_exit(&vq->vq_freelist_lock);
 600                 return (NULL);
 601         }
 602         qe = list_remove_head(&vq->vq_freelist);
 603 
 604         ASSERT(vq->vq_used_entries >= 0);
 605         vq->vq_used_entries++;
 606 
 607         mutex_exit(&vq->vq_freelist_lock);
 608 
 609         qe->qe_next = NULL;
 610         qe->qe_indirect_next = 0;
 611         (void) memset(qe->qe_desc, 0, sizeof (struct vring_desc));
 612 
 613         return (qe);
 614 }
 615 
 616 void
 617 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
 618 {
 619         mutex_enter(&vq->vq_freelist_lock);
 620 
 621         list_insert_head(&vq->vq_freelist, qe);
 622         vq->vq_used_entries--;
 623         ASSERT(vq->vq_used_entries >= 0);
 624         mutex_exit(&vq->vq_freelist_lock);
 625 }
 626 
 627 /*
 628  * We (intentionally) don't have a global vq mutex, so you are
 629  * responsible for external locking to avoid allocting/freeing any
 630  * entries before using the returned value. Have fun.
 631  */
 632 uint_t
 633 vq_num_used(struct virtqueue *vq)
 634 {
 635         /* vq->vq_freelist_lock would not help here. */
 636         return (vq->vq_used_entries);
 637 }
 638 
 639 static inline void
 640 virtio_ve_set_desc(struct vring_desc *desc, uint64_t paddr, uint32_t len,
 641     boolean_t write)
 642 {
 643         desc->addr = paddr;
 644         desc->len = len;
 645         desc->next = 0;
 646         desc->flags = 0;
 647 
 648         /* 'write' - from the driver's point of view */
 649         if (!write)
 650                 desc->flags = VRING_DESC_F_WRITE;
 651 
 652 
 653 }
 654 
 655 void
 656 virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 657     boolean_t write)
 658 {
 659         virtio_ve_set_desc(qe->qe_desc, paddr, len, write);
 660 }
 661 
 662 void
 663 virtio_ve_add_indirect_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 664     boolean_t write)
 665 {
 666         struct vring_desc *indirect_desc;
 667 
 668         ASSERT(qe->qe_queue->vq_indirect_num);
 669         ASSERT(qe->qe_indirect_next < qe->qe_queue->vq_indirect_num);
 670 
 671         indirect_desc = &qe->qe_indirect_descs[qe->qe_indirect_next];
 672         virtio_ve_set_desc(indirect_desc, paddr, len, write);
 673         qe->qe_indirect_next++;
 674 }
 675 
 676 void
 677 virtio_ve_add_cookie(struct vq_entry *qe, ddi_dma_handle_t dma_handle,
 678     ddi_dma_cookie_t dma_cookie, unsigned int ncookies, boolean_t write)
 679 {
 680         int i;
 681 
 682         for (i = 0; i < ncookies; i++) {
 683                 virtio_ve_add_indirect_buf(qe, dma_cookie.dmac_laddress,
 684                     dma_cookie.dmac_size, write);
 685                 ddi_dma_nextcookie(dma_handle, &dma_cookie);
 686         }
 687 }
 688 
 689 void
 690 virtio_sync_vq(struct virtqueue *vq)
 691 {
 692         struct virtio_softc *vsc = vq->vq_owner;
 693 
 694         /* Make sure the avail ring update hit the buffer */
 695         membar_producer();
 696 
 697         vq->vq_avail->idx = vq->vq_avail_idx;
 698 
 699         /* Make sure the avail idx update hits the buffer */
 700         membar_producer();
 701 
 702         /* Make sure we see the flags update */
 703         membar_consumer();
 704 
 705         if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
 706                 ddi_put16(vsc->sc_ioh,
 707                     /* LINTED E_BAD_PTR_CAST_ALIGN */
 708                     (uint16_t *)(vsc->sc_io_addr +
 709                     VIRTIO_CONFIG_QUEUE_NOTIFY),
 710                     vq->vq_index);
 711 }
 712 
 713 void
 714 virtio_push_chain(struct vq_entry *qe, boolean_t sync)
 715 {
 716         struct virtqueue *vq = qe->qe_queue;
 717         struct vq_entry *head = qe;
 718         struct vring_desc *desc;
 719         int idx;
 720 
 721         ASSERT(qe);
 722 
 723         /*
 724          * Bind the descs together, paddr and len should be already
 725          * set with virtio_ve_set
 726          */
 727         do {
 728                 /* Bind the indirect descriptors */
 729                 if (qe->qe_indirect_next > 1) {
 730                         uint16_t i = 0;
 731 
 732                         /*
 733                          * Set the pointer/flags to the
 734                          * first indirect descriptor
 735                          */
 736                         virtio_ve_set_desc(qe->qe_desc,
 737                             qe->qe_indirect_dma_cookie.dmac_laddress,
 738                             sizeof (struct vring_desc) * qe->qe_indirect_next,
 739                             B_FALSE);
 740                         qe->qe_desc->flags |= VRING_DESC_F_INDIRECT;
 741 
 742                         /* For all but the last one, add the next index/flag */
 743                         do {
 744                                 desc = &qe->qe_indirect_descs[i];
 745                                 i++;
 746 
 747                                 desc->flags |= VRING_DESC_F_NEXT;
 748                                 desc->next = i;
 749                         } while (i < qe->qe_indirect_next - 1);
 750 
 751                 }
 752 
 753                 if (qe->qe_next) {
 754                         qe->qe_desc->flags |= VRING_DESC_F_NEXT;
 755                         qe->qe_desc->next = qe->qe_next->qe_index;
 756                 }
 757 
 758                 qe = qe->qe_next;
 759         } while (qe);
 760 
 761         mutex_enter(&vq->vq_avail_lock);
 762         idx = vq->vq_avail_idx;
 763         vq->vq_avail_idx++;
 764 
 765         /* Make sure the bits hit the descriptor(s) */
 766         membar_producer();
 767         vq->vq_avail->ring[idx % vq->vq_num] = head->qe_index;
 768 
 769         /* Notify the device, if needed. */
 770         if (sync)
 771                 virtio_sync_vq(vq);
 772 
 773         mutex_exit(&vq->vq_avail_lock);
 774 }
 775 
 776 /* Get a chain of descriptors from the used ring, if one is available. */
 777 struct vq_entry *
 778 virtio_pull_chain(struct virtqueue *vq, uint32_t *len)
 779 {
 780         struct vq_entry *head;
 781         int slot;
 782         int usedidx;
 783 
 784         mutex_enter(&vq->vq_used_lock);
 785 
 786         /* No used entries? Bye. */
 787         if (vq->vq_used_idx == vq->vq_used->idx) {
 788                 mutex_exit(&vq->vq_used_lock);
 789                 return (NULL);
 790         }
 791 
 792         usedidx = vq->vq_used_idx;
 793         vq->vq_used_idx++;
 794         mutex_exit(&vq->vq_used_lock);
 795 
 796         usedidx %= vq->vq_num;
 797 
 798         /* Make sure we do the next step _after_ checking the idx. */
 799         membar_consumer();
 800 
 801         slot = vq->vq_used->ring[usedidx].id;
 802         *len = vq->vq_used->ring[usedidx].len;
 803 
 804         head = &vq->vq_entries[slot];
 805 
 806         return (head);
 807 }
 808 
 809 void
 810 virtio_free_chain(struct vq_entry *qe)
 811 {
 812         struct vq_entry *tmp;
 813         struct virtqueue *vq = qe->qe_queue;
 814 
 815         ASSERT(qe);
 816 
 817         do {
 818                 ASSERT(qe->qe_queue == vq);
 819                 tmp = qe->qe_next;
 820                 vq_free_entry(vq, qe);
 821                 qe = tmp;
 822         } while (tmp);
 823 }
 824 
 825 void
 826 virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second)
 827 {
 828         first->qe_next = second;
 829 }
 830 
 831 static int
 832 virtio_register_msi(struct virtio_softc *sc,
 833     struct virtio_int_handler *config_handler,
 834     struct virtio_int_handler vq_handlers[],
 835     int intr_types)
 836 {
 837         int count, actual;
 838         int int_type;
 839         int i;
 840         int handler_count;
 841         int ret;
 842 
 843         /* If both MSI and MSI-x are reported, prefer MSI-x. */
 844         int_type = DDI_INTR_TYPE_MSI;
 845         if (intr_types & DDI_INTR_TYPE_MSIX)
 846                 int_type = DDI_INTR_TYPE_MSIX;
 847 
 848         /* Walk the handler table to get the number of handlers. */
 849         for (handler_count = 0;
 850             vq_handlers && vq_handlers[handler_count].vh_func;
 851             handler_count++)
 852                 ;
 853 
 854         /* +1 if there is a config change handler. */
 855         if (config_handler)
 856                 handler_count++;
 857 
 858         /* Number of MSIs supported by the device. */
 859         ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count);
 860         if (ret != DDI_SUCCESS) {
 861                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_nintrs failed");
 862                 return (ret);
 863         }
 864 
 865         /*
 866          * Those who try to register more handlers then the device
 867          * supports shall suffer.
 868          */
 869         ASSERT(handler_count <= count);
 870 
 871         sc->sc_intr_htable = kmem_zalloc(
 872             sizeof (ddi_intr_handle_t) * handler_count, KM_SLEEP);
 873 
 874         ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0,
 875             handler_count, &actual, DDI_INTR_ALLOC_NORMAL);
 876         if (ret != DDI_SUCCESS) {
 877                 dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret);
 878                 goto out_msi_alloc;
 879         }
 880 
 881         if (actual != handler_count) {
 882                 dev_err(sc->sc_dev, CE_WARN,
 883                     "Not enough MSI available: need %d, available %d",
 884                     handler_count, actual);
 885                 goto out_msi_available;
 886         }
 887 
 888         sc->sc_intr_num = handler_count;
 889         sc->sc_intr_config = B_FALSE;
 890         if (config_handler) {
 891                 sc->sc_intr_config = B_TRUE;
 892         }
 893 
 894         /* Assume they are all same priority */
 895         ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
 896         if (ret != DDI_SUCCESS) {
 897                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
 898                 goto out_msi_prio;
 899         }
 900 
 901         /* Add the vq handlers */
 902         for (i = 0; vq_handlers[i].vh_func; i++) {
 903                 ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 904                     vq_handlers[i].vh_func,
 905                     sc, vq_handlers[i].vh_priv);
 906                 if (ret != DDI_SUCCESS) {
 907                         dev_err(sc->sc_dev, CE_WARN,
 908                             "ddi_intr_add_handler failed");
 909                         /* Remove the handlers that succeeded. */
 910                         while (--i >= 0) {
 911                                 (void) ddi_intr_remove_handler(
 912                                     sc->sc_intr_htable[i]);
 913                         }
 914                         goto out_add_handlers;
 915                 }
 916         }
 917 
 918         /* Don't forget the config handler */
 919         if (config_handler) {
 920                 ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 921                     config_handler->vh_func,
 922                     sc, config_handler->vh_priv);
 923                 if (ret != DDI_SUCCESS) {
 924                         dev_err(sc->sc_dev, CE_WARN,
 925                             "ddi_intr_add_handler failed");
 926                         /* Remove the handlers that succeeded. */
 927                         while (--i >= 0) {
 928                                 (void) ddi_intr_remove_handler(
 929                                     sc->sc_intr_htable[i]);
 930                         }
 931                         goto out_add_handlers;
 932                 }
 933         }
 934 
 935         /* We know we are using MSI, so set the config offset. */
 936         sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
 937 
 938         ret = ddi_intr_get_cap(sc->sc_intr_htable[0],
 939             &sc->sc_intr_cap);
 940         /* Just in case. */
 941         if (ret != DDI_SUCCESS)
 942                 sc->sc_intr_cap = 0;
 943 
 944 out_add_handlers:
 945 out_msi_prio:
 946 out_msi_available:
 947         for (i = 0; i < actual; i++)
 948                 (void) ddi_intr_free(sc->sc_intr_htable[i]);
 949 out_msi_alloc:
 950         kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) * count);
 951 
 952         return (ret);
 953 }
 954 
 955 struct virtio_handler_container {
 956         int nhandlers;
 957         struct virtio_int_handler config_handler;
 958         struct virtio_int_handler vq_handlers[];
 959 };
 960 
 961 uint_t
 962 virtio_intx_dispatch(caddr_t arg1, caddr_t arg2)
 963 {
 964         struct virtio_softc *sc = (void *)arg1;
 965         struct virtio_handler_container *vhc = (void *)arg2;
 966         uint8_t isr_status;
 967         int i;
 968 
 969         isr_status = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
 970             VIRTIO_CONFIG_ISR_STATUS));
 971 
 972         if (!isr_status)
 973                 return (DDI_INTR_UNCLAIMED);
 974 
 975         if ((isr_status & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
 976             vhc->config_handler.vh_func) {
 977                 vhc->config_handler.vh_func((void *)sc,
 978                     vhc->config_handler.vh_priv);
 979         }
 980 
 981         /* Notify all handlers */
 982         for (i = 0; i < vhc->nhandlers; i++) {
 983                 vhc->vq_handlers[i].vh_func((void *)sc,
 984                     vhc->vq_handlers[i].vh_priv);
 985         }
 986 
 987         return (DDI_INTR_CLAIMED);
 988 }
 989 
 990 /*
 991  * config_handler and vq_handlers may be allocated on stack.
 992  * Take precautions not to loose them.
 993  */
 994 static int
 995 virtio_register_intx(struct virtio_softc *sc,
 996     struct virtio_int_handler *config_handler,
 997     struct virtio_int_handler vq_handlers[])
 998 {
 999         int vq_handler_count;
1000         int config_handler_count = 0;
1001         int actual;
1002         struct virtio_handler_container *vhc;
1003         int ret = DDI_FAILURE;
1004 
1005         /* Walk the handler table to get the number of handlers. */
1006         for (vq_handler_count = 0;
1007             vq_handlers && vq_handlers[vq_handler_count].vh_func;
1008             vq_handler_count++)
1009                 ;
1010 
1011         if (config_handler)
1012                 config_handler_count = 1;
1013 
1014         vhc = kmem_zalloc(sizeof (struct virtio_handler_container) +
1015             sizeof (struct virtio_int_handler) * vq_handler_count,
1016             KM_SLEEP);
1017 
1018         vhc->nhandlers = vq_handler_count;
1019         (void) memcpy(vhc->vq_handlers, vq_handlers,
1020             sizeof (struct virtio_int_handler) * vq_handler_count);
1021 
1022         if (config_handler) {
1023                 (void) memcpy(&vhc->config_handler, config_handler,
1024                     sizeof (struct virtio_int_handler));
1025         }
1026 
1027         /* Just a single entry for a single interrupt. */
1028         sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
1029 
1030         ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable,
1031             DDI_INTR_TYPE_FIXED, 0, 1, &actual,
1032             DDI_INTR_ALLOC_NORMAL);
1033         if (ret != DDI_SUCCESS) {
1034                 dev_err(sc->sc_dev, CE_WARN,
1035                     "Failed to allocate a fixed interrupt: %d", ret);
1036                 goto out_int_alloc;
1037         }
1038 
1039         ASSERT(actual == 1);
1040         sc->sc_intr_num = 1;
1041 
1042         ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
1043         if (ret != DDI_SUCCESS) {
1044                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
1045                 goto out_prio;
1046         }
1047 
1048         ret = ddi_intr_add_handler(sc->sc_intr_htable[0],
1049             virtio_intx_dispatch, sc, vhc);
1050         if (ret != DDI_SUCCESS) {
1051                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed");
1052                 goto out_add_handlers;
1053         }
1054 
1055         /* We know we are not using MSI, so set the config offset. */
1056         sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1057 
1058         return (DDI_SUCCESS);
1059 
1060 out_add_handlers:
1061 out_prio:
1062         (void) ddi_intr_free(sc->sc_intr_htable[0]);
1063 out_int_alloc:
1064         kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
1065         kmem_free(vhc, sizeof (struct virtio_int_handler) *
1066             (vq_handler_count + config_handler_count));
1067         return (ret);
1068 }
1069 
1070 /*
1071  * We find out if we support MSI during this, and the register layout
1072  * depends on the MSI (doh). Don't acces the device specific bits in
1073  * BAR 0 before calling it!
1074  */
1075 int
1076 virtio_register_ints(struct virtio_softc *sc,
1077     struct virtio_int_handler *config_handler,
1078     struct virtio_int_handler vq_handlers[])
1079 {
1080         int ret;
1081         int intr_types;
1082 
1083         /* Determine which types of interrupts are supported */
1084         ret = ddi_intr_get_supported_types(sc->sc_dev, &intr_types);
1085         if (ret != DDI_SUCCESS) {
1086                 dev_err(sc->sc_dev, CE_WARN, "Can't get supported int types");
1087                 goto out_inttype;
1088         }
1089 
1090         /* If we have msi, let's use them. */
1091         if (intr_types & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
1092                 ret = virtio_register_msi(sc, config_handler,
1093                     vq_handlers, intr_types);
1094                 if (!ret)
1095                         return (0);
1096         }
1097 
1098         /* Fall back to old-fashioned interrupts. */
1099         if (intr_types & DDI_INTR_TYPE_FIXED) {
1100                 dev_debug(sc->sc_dev, CE_WARN,
1101                     "Using legacy interrupts");
1102 
1103                 return (virtio_register_intx(sc, config_handler, vq_handlers));
1104         }
1105 
1106         dev_err(sc->sc_dev, CE_WARN,
1107             "MSI failed and fixed interrupts not supported. Giving up.");
1108         ret = DDI_FAILURE;
1109 
1110 out_inttype:
1111         return (ret);
1112 }
1113 
1114 
1115 static int
1116 virtio_enable_msi(struct virtio_softc *sc)
1117 {
1118         int ret, i;
1119         int vq_handler_count = sc->sc_intr_num;
1120 
1121         /* Number of handlers, not counting the counfig. */
1122         if (sc->sc_intr_config)
1123                 vq_handler_count--;
1124 
1125         /* Enable the iterrupts. Either the whole block, or one by one. */
1126         if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1127                 ret = ddi_intr_block_enable(sc->sc_intr_htable,
1128                     sc->sc_intr_num);
1129                 if (ret != DDI_SUCCESS) {
1130                         dev_err(sc->sc_dev, CE_WARN,
1131                             "Failed to enable MSI, falling back to INTx");
1132                         goto out_enable;
1133                 }
1134         } else {
1135                 for (i = 0; i < sc->sc_intr_num; i++) {
1136                         ret = ddi_intr_enable(sc->sc_intr_htable[i]);
1137                         if (ret != DDI_SUCCESS) {
1138                                 dev_err(sc->sc_dev, CE_WARN,
1139                                     "Failed to enable MSI %d, "
1140                                     "falling back to INTx", i);
1141 
1142                                 while (--i >= 0) {
1143                                         (void) ddi_intr_disable(
1144                                             sc->sc_intr_htable[i]);
1145                                 }
1146                                 goto out_enable;
1147                         }
1148                 }
1149         }
1150 
1151         /* Bind the allocated MSI to the queues and config */
1152         for (i = 0; i < vq_handler_count; i++) {
1153                 int check;
1154                 ddi_put16(sc->sc_ioh,
1155                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1156                     (uint16_t *)(sc->sc_io_addr +
1157                     VIRTIO_CONFIG_QUEUE_SELECT), i);
1158 
1159                 ddi_put16(sc->sc_ioh,
1160                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1161                     (uint16_t *)(sc->sc_io_addr +
1162                     VIRTIO_CONFIG_QUEUE_VECTOR), i);
1163 
1164                 check = ddi_get16(sc->sc_ioh,
1165                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1166                     (uint16_t *)(sc->sc_io_addr +
1167                     VIRTIO_CONFIG_QUEUE_VECTOR));
1168                 if (check != i) {
1169                         dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler"
1170                             "for VQ %d, MSI %d. Check = %x", i, i, check);
1171                         ret = ENODEV;
1172                         goto out_bind;
1173                 }
1174         }
1175 
1176         if (sc->sc_intr_config) {
1177                 int check;
1178                 ddi_put16(sc->sc_ioh,
1179                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1180                     (uint16_t *)(sc->sc_io_addr +
1181                     VIRTIO_CONFIG_CONFIG_VECTOR), i);
1182 
1183                 check = ddi_get16(sc->sc_ioh,
1184                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1185                     (uint16_t *)(sc->sc_io_addr +
1186                     VIRTIO_CONFIG_CONFIG_VECTOR));
1187                 if (check != i) {
1188                         dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1189                             "for Config updates, MSI %d", i);
1190                         ret = ENODEV;
1191                         goto out_bind;
1192                 }
1193         }
1194 
1195         return (DDI_SUCCESS);
1196 
1197 out_bind:
1198         /* Unbind the vqs */
1199         for (i = 0; i < vq_handler_count - 1; i++) {
1200                 ddi_put16(sc->sc_ioh,
1201                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1202                     (uint16_t *)(sc->sc_io_addr +
1203                     VIRTIO_CONFIG_QUEUE_SELECT), i);
1204 
1205                 ddi_put16(sc->sc_ioh,
1206                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1207                     (uint16_t *)(sc->sc_io_addr +
1208                     VIRTIO_CONFIG_QUEUE_VECTOR),
1209                     VIRTIO_MSI_NO_VECTOR);
1210         }
1211         /* And the config */
1212         /* LINTED E_BAD_PTR_CAST_ALIGN */
1213         ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1214             VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR);
1215 
1216         ret = DDI_FAILURE;
1217 
1218 out_enable:
1219         return (ret);
1220 }
1221 
1222 static int virtio_enable_intx(struct virtio_softc *sc)
1223 {
1224         int ret;
1225 
1226         ret = ddi_intr_enable(sc->sc_intr_htable[0]);
1227         if (ret != DDI_SUCCESS)
1228                 dev_err(sc->sc_dev, CE_WARN,
1229                     "Failed to enable interrupt: %d", ret);
1230         return (ret);
1231 }
1232 
1233 /*
1234  * We can't enable/disable individual handlers in the INTx case so do
1235  * the whole bunch even in the msi case.
1236  */
1237 int
1238 virtio_enable_ints(struct virtio_softc *sc)
1239 {
1240 
1241         /* See if we are using MSI. */
1242         if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI)
1243                 return (virtio_enable_msi(sc));
1244 
1245         ASSERT(sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI);
1246 
1247         return (virtio_enable_intx(sc));
1248 }
1249 
1250 void
1251 virtio_release_ints(struct virtio_softc *sc)
1252 {
1253         int i;
1254         int ret;
1255 
1256         /* We were running with MSI, unbind them. */
1257         if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) {
1258                 /* Unbind all vqs */
1259                 for (i = 0; i < sc->sc_nvqs; i++) {
1260                         ddi_put16(sc->sc_ioh,
1261                             /* LINTED E_BAD_PTR_CAST_ALIGN */
1262                             (uint16_t *)(sc->sc_io_addr +
1263                             VIRTIO_CONFIG_QUEUE_SELECT), i);
1264 
1265                         ddi_put16(sc->sc_ioh,
1266                             /* LINTED E_BAD_PTR_CAST_ALIGN */
1267                             (uint16_t *)(sc->sc_io_addr +
1268                             VIRTIO_CONFIG_QUEUE_VECTOR),
1269                             VIRTIO_MSI_NO_VECTOR);
1270                 }
1271                 /* And the config */
1272                 /* LINTED E_BAD_PTR_CAST_ALIGN */
1273                 ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1274                     VIRTIO_CONFIG_CONFIG_VECTOR),
1275                     VIRTIO_MSI_NO_VECTOR);
1276 
1277         }
1278 
1279         /* Disable the iterrupts. Either the whole block, or one by one. */
1280         if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1281                 ret = ddi_intr_block_disable(sc->sc_intr_htable,
1282                     sc->sc_intr_num);
1283                 if (ret != DDI_SUCCESS) {
1284                         dev_err(sc->sc_dev, CE_WARN,
1285                             "Failed to disable MSIs, won't be able to"
1286                             "reuse next time");
1287                 }
1288         } else {
1289                 for (i = 0; i < sc->sc_intr_num; i++) {
1290                         ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1291                         if (ret != DDI_SUCCESS) {
1292                                 dev_err(sc->sc_dev, CE_WARN,
1293                                     "Failed to disable interrupt %d, "
1294                                     "won't be able to reuse", i);
1295 
1296                         }
1297                 }
1298         }
1299 
1300 
1301         for (i = 0; i < sc->sc_intr_num; i++) {
1302                 (void) ddi_intr_remove_handler(sc->sc_intr_htable[i]);
1303         }
1304 
1305         for (i = 0; i < sc->sc_intr_num; i++)
1306                 (void) ddi_intr_free(sc->sc_intr_htable[i]);
1307 
1308         kmem_free(sc->sc_intr_htable,
1309             sizeof (ddi_intr_handle_t) * sc->sc_intr_num);
1310 
1311 
1312         /* After disabling interrupts, the config offset is non-MSI. */
1313         sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1314 }
1315 
1316 /*
1317  * Module linkage information for the kernel.
1318  */
1319 static struct modlmisc modlmisc = {
1320         &mod_miscops, /* Type of module */
1321         "VirtIO common library module",
1322 };
1323 
1324 static struct modlinkage modlinkage = {
1325         MODREV_1,
1326         {
1327                 (void *)&modlmisc,
1328                 NULL
1329         }
1330 };
1331 
1332 int
1333 _init(void)
1334 {
1335         return (mod_install(&modlinkage));
1336 }
1337 
1338 int
1339 _fini(void)
1340 {
1341         return (mod_remove(&modlinkage));
1342 }
1343 
1344 int
1345 _info(struct modinfo *modinfop)
1346 {
1347         return (mod_info(&modlinkage, modinfop));
1348 }