1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2012 Nexenta Systems, Inc.
  24  * Copyright 2012 Alexey Zaytsev <alexey.zaytsev@gmail.com>
  25  */
  26 
  27 /* Based on the NetBSD virtio driver by Minoura Makoto. */
  28 /*
  29  * Copyright (c) 2010 Minoura Makoto.
  30  * All rights reserved.
  31  *
  32  * Redistribution and use in source and binary forms, with or without
  33  * modification, are permitted provided that the following conditions
  34  * are met:
  35  * 1. Redistributions of source code must retain the above copyright
  36  *    notice, this list of conditions and the following disclaimer.
  37  * 2. Redistributions in binary form must reproduce the above copyright
  38  *    notice, this list of conditions and the following disclaimer in the
  39  *    documentation and/or other materials provided with the distribution.
  40  *
  41  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
  42  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
  43  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
  44  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
  45  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
  46  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  47  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  48  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  49  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
  50  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  51  *
  52  */
  53 
  54 #include <sys/conf.h>
  55 #include <sys/kmem.h>
  56 #include <sys/debug.h>
  57 #include <sys/modctl.h>
  58 #include <sys/autoconf.h>
  59 #include <sys/ddi_impldefs.h>
  60 #include <sys/ddi.h>
  61 #include <sys/sunddi.h>
  62 #include <sys/sunndi.h>
  63 #include <sys/avintr.h>
  64 #include <sys/spl.h>
  65 #include <sys/promif.h>
  66 #include <sys/list.h>
  67 #include <sys/bootconf.h>
  68 #include <sys/bootsvcs.h>
  69 #include <sys/sysmacros.h>
  70 #include <sys/pci.h>
  71 
  72 #include "virtiovar.h"
  73 #include "virtioreg.h"
  74 
  75 #define NDEVNAMES       (sizeof (virtio_device_name) / sizeof (char *))
  76 #define MINSEG_INDIRECT 2       /* use indirect if nsegs >= this value */
  77 #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1)) & \
  78             ~(VIRTIO_PAGE_SIZE-1))
  79 
  80 void
  81 virtio_set_status(struct virtio_softc *sc, unsigned int status)
  82 {
  83         int old = 0;
  84 
  85         if (status != 0) {
  86                 old = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
  87                     VIRTIO_CONFIG_DEVICE_STATUS));
  88         }
  89 
  90         ddi_put8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
  91             VIRTIO_CONFIG_DEVICE_STATUS), status | old);
  92 }
  93 
  94 /*
  95  * Negotiate features, save the result in sc->sc_features
  96  */
  97 uint32_t
  98 virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
  99 {
 100         uint32_t host_features;
 101         uint32_t features;
 102 
 103         host_features = ddi_get32(sc->sc_ioh,
 104             /* LINTED E_BAD_PTR_CAST_ALIGN */
 105             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES));
 106 
 107         dev_debug(sc->sc_dev, CE_NOTE, "host features: %x, guest features: %x",
 108             host_features, guest_features);
 109 
 110         features = host_features & guest_features;
 111         ddi_put32(sc->sc_ioh,
 112             /* LINTED E_BAD_PTR_CAST_ALIGN */
 113             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_GUEST_FEATURES),
 114             features);
 115 
 116         sc->sc_features = features;
 117 
 118         return (host_features);
 119 }
 120 
 121 size_t
 122 virtio_show_features(uint32_t features, char *buf, size_t len)
 123 {
 124         char *orig_buf = buf;
 125         char *bufend = buf + len;
 126 
 127         /* LINTED E_PTRDIFF_OVERFLOW */
 128         buf += snprintf(buf, bufend - buf, "Generic ( ");
 129         if (features & VIRTIO_F_RING_INDIRECT_DESC)
 130                 /* LINTED E_PTRDIFF_OVERFLOW */
 131                 buf += snprintf(buf, bufend - buf, "INDIRECT_DESC ");
 132 
 133         /* LINTED E_PTRDIFF_OVERFLOW */
 134         buf += snprintf(buf, bufend - buf, ") ");
 135 
 136         /* LINTED E_PTRDIFF_OVERFLOW */
 137         return (buf - orig_buf);
 138 }
 139 
 140 boolean_t
 141 virtio_has_feature(struct virtio_softc *sc, uint32_t feature)
 142 {
 143         return (sc->sc_features & feature);
 144 }
 145 
 146 /*
 147  * Device configuration registers.
 148  */
 149 uint8_t
 150 virtio_read_device_config_1(struct virtio_softc *sc, unsigned int index)
 151 {
 152         ASSERT(sc->sc_config_offset);
 153         return ddi_get8(sc->sc_ioh,
 154             (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 155 }
 156 
 157 uint16_t
 158 virtio_read_device_config_2(struct virtio_softc *sc, unsigned int index)
 159 {
 160         ASSERT(sc->sc_config_offset);
 161         return ddi_get16(sc->sc_ioh,
 162             /* LINTED E_BAD_PTR_CAST_ALIGN */
 163             (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 164 }
 165 
 166 uint32_t
 167 virtio_read_device_config_4(struct virtio_softc *sc, unsigned int index)
 168 {
 169         ASSERT(sc->sc_config_offset);
 170         return ddi_get32(sc->sc_ioh,
 171             /* LINTED E_BAD_PTR_CAST_ALIGN */
 172             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 173 }
 174 
 175 uint64_t
 176 virtio_read_device_config_8(struct virtio_softc *sc, unsigned int index)
 177 {
 178         uint64_t r;
 179 
 180         ASSERT(sc->sc_config_offset);
 181         r = ddi_get32(sc->sc_ioh,
 182             /* LINTED E_BAD_PTR_CAST_ALIGN */
 183             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 184             index + sizeof (uint32_t)));
 185 
 186         r <<= 32;
 187 
 188         r += ddi_get32(sc->sc_ioh,
 189             /* LINTED E_BAD_PTR_CAST_ALIGN */
 190             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 191         return (r);
 192 }
 193 
 194 void
 195 virtio_write_device_config_1(struct virtio_softc *sc, unsigned int index,
 196     uint8_t value)
 197 {
 198         ASSERT(sc->sc_config_offset);
 199         ddi_put8(sc->sc_ioh,
 200             (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 201 }
 202 
 203 void
 204 virtio_write_device_config_2(struct virtio_softc *sc, unsigned int index,
 205     uint16_t value)
 206 {
 207         ASSERT(sc->sc_config_offset);
 208         ddi_put16(sc->sc_ioh,
 209             /* LINTED E_BAD_PTR_CAST_ALIGN */
 210             (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 211 }
 212 
 213 void
 214 virtio_write_device_config_4(struct virtio_softc *sc, unsigned int index,
 215     uint32_t value)
 216 {
 217         ASSERT(sc->sc_config_offset);
 218         ddi_put32(sc->sc_ioh,
 219             /* LINTED E_BAD_PTR_CAST_ALIGN */
 220             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 221 }
 222 
 223 void
 224 virtio_write_device_config_8(struct virtio_softc *sc, unsigned int index,
 225     uint64_t value)
 226 {
 227         ASSERT(sc->sc_config_offset);
 228         ddi_put32(sc->sc_ioh,
 229             /* LINTED E_BAD_PTR_CAST_ALIGN */
 230             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index),
 231             value & 0xFFFFFFFF);
 232         ddi_put32(sc->sc_ioh,
 233             /* LINTED E_BAD_PTR_CAST_ALIGN */
 234             (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 235             index + sizeof (uint32_t)), value >> 32);
 236 }
 237 
 238 /*
 239  * Start/stop vq interrupt.  No guarantee.
 240  */
 241 void
 242 virtio_stop_vq_intr(struct virtqueue *vq)
 243 {
 244         vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 245 }
 246 
 247 void
 248 virtio_start_vq_intr(struct virtqueue *vq)
 249 {
 250         vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
 251 }
 252 
 253 static ddi_dma_attr_t virtio_vq_dma_attr = {
 254         DMA_ATTR_V0,            /* Version number */
 255         0,                      /* low address */
 256         0x00000FFFFFFFFFFF,     /* high address. Has to fit into 32 bits */
 257                                 /* after page-shifting */
 258         0xFFFFFFFF,             /* counter register max */
 259         VIRTIO_PAGE_SIZE,       /* page alignment required */
 260         0x3F,                   /* burst sizes: 1 - 32 */
 261         0x1,                    /* minimum transfer size */
 262         0xFFFFFFFF,             /* max transfer size */
 263         0xFFFFFFFF,             /* address register max */
 264         1,                      /* no scatter-gather */
 265         1,                      /* device operates on bytes */
 266         0,                      /* attr flag: set to 0 */
 267 };
 268 
 269 static ddi_dma_attr_t virtio_vq_indirect_dma_attr = {
 270         DMA_ATTR_V0,            /* Version number */
 271         0,                      /* low address */
 272         0xFFFFFFFFFFFFFFFF,     /* high address */
 273         0xFFFFFFFF,             /* counter register max */
 274         1,                      /* No specific alignment */
 275         0x3F,                   /* burst sizes: 1 - 32 */
 276         0x1,                    /* minimum transfer size */
 277         0xFFFFFFFF,             /* max transfer size */
 278         0xFFFFFFFF,             /* address register max */
 279         1,                      /* no scatter-gather */
 280         1,                      /* device operates on bytes */
 281         0,                      /* attr flag: set to 0 */
 282 };
 283 
 284 /* Same for direct and indirect descriptors. */
 285 static ddi_device_acc_attr_t virtio_vq_devattr = {
 286         DDI_DEVICE_ATTR_V0,
 287         DDI_NEVERSWAP_ACC,
 288         DDI_STORECACHING_OK_ACC,
 289         DDI_DEFAULT_ACC
 290 };
 291 
 292 static void
 293 virtio_free_indirect(struct vq_entry *entry)
 294 {
 295 
 296         (void) ddi_dma_unbind_handle(entry->qe_indirect_dma_handle);
 297         ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
 298         ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
 299 
 300         entry->qe_indirect_descs = NULL;
 301 }
 302 
 303 
 304 static int
 305 virtio_alloc_indirect(struct virtio_softc *sc, struct vq_entry *entry)
 306 {
 307         int allocsize, num;
 308         size_t len;
 309         unsigned int ncookies;
 310         int ret;
 311 
 312         num = entry->qe_queue->vq_indirect_num;
 313         ASSERT(num > 1);
 314 
 315         allocsize = sizeof (struct vring_desc) * num;
 316 
 317         ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
 318             DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
 319         if (ret != DDI_SUCCESS) {
 320                 dev_err(sc->sc_dev, CE_WARN,
 321                     "Failed to allocate dma handle for indirect descriptors, "
 322                     "entry %d, vq %d", entry->qe_index,
 323                     entry->qe_queue->vq_index);
 324                 goto out_alloc_handle;
 325         }
 326 
 327         ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle, allocsize,
 328             &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 329             (caddr_t *)&entry->qe_indirect_descs, &len,
 330             &entry->qe_indirect_dma_acch);
 331         if (ret != DDI_SUCCESS) {
 332                 dev_err(sc->sc_dev, CE_WARN,
 333                     "Failed to allocate dma memory for indirect descriptors, "
 334                     "entry %d, vq %d,", entry->qe_index,
 335                     entry->qe_queue->vq_index);
 336                 goto out_alloc;
 337         }
 338 
 339         (void) memset(entry->qe_indirect_descs, 0xff, allocsize);
 340 
 341         ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL,
 342             (caddr_t)entry->qe_indirect_descs, len,
 343             DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 344             &entry->qe_indirect_dma_cookie, &ncookies);
 345         if (ret != DDI_DMA_MAPPED) {
 346                 dev_err(sc->sc_dev, CE_WARN,
 347                     "Failed to bind dma memory for indirect descriptors, "
 348                     "entry %d, vq %d", entry->qe_index,
 349                     entry->qe_queue->vq_index);
 350                 goto out_bind;
 351         }
 352 
 353         /* We asked for a single segment */
 354         ASSERT(ncookies == 1);
 355 
 356         return (0);
 357 
 358 out_bind:
 359         ddi_dma_mem_free(&entry->qe_indirect_dma_acch);
 360 out_alloc:
 361         ddi_dma_free_handle(&entry->qe_indirect_dma_handle);
 362 out_alloc_handle:
 363 
 364         return (ret);
 365 }
 366 
 367 /*
 368  * Initialize the vq structure.
 369  */
 370 static int
 371 virtio_init_vq(struct virtio_softc *sc, struct virtqueue *vq)
 372 {
 373         int ret;
 374         uint16_t i;
 375         int vq_size = vq->vq_num;
 376         int indirect_num = vq->vq_indirect_num;
 377 
 378         /* free slot management */
 379         list_create(&vq->vq_freelist, sizeof (struct vq_entry),
 380             offsetof(struct vq_entry, qe_list));
 381 
 382         for (i = 0; i < vq_size; i++) {
 383                 struct vq_entry *entry = &vq->vq_entries[i];
 384                 list_insert_tail(&vq->vq_freelist, entry);
 385                 entry->qe_index = i;
 386                 entry->qe_desc = &vq->vq_descs[i];
 387                 entry->qe_queue = vq;
 388 
 389                 if (indirect_num) {
 390                         ret = virtio_alloc_indirect(sc, entry);
 391                         if (ret)
 392                                 goto out_indirect;
 393                 }
 394         }
 395 
 396         mutex_init(&vq->vq_freelist_lock, "virtio-freelist", MUTEX_DRIVER,
 397             DDI_INTR_PRI(sc->sc_intr_prio));
 398         mutex_init(&vq->vq_avail_lock, "virtio-avail", MUTEX_DRIVER,
 399             DDI_INTR_PRI(sc->sc_intr_prio));
 400         mutex_init(&vq->vq_used_lock, "virtio-used", MUTEX_DRIVER,
 401             DDI_INTR_PRI(sc->sc_intr_prio));
 402 
 403         return (0);
 404 
 405 out_indirect:
 406         for (i = 0; i < vq_size; i++) {
 407                 struct vq_entry *entry = &vq->vq_entries[i];
 408                 if (entry->qe_indirect_descs)
 409                         virtio_free_indirect(entry);
 410         }
 411 
 412         return (ret);
 413 }
 414 
 415 /*
 416  * Allocate/free a vq.
 417  */
 418 struct virtqueue *
 419 virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
 420     unsigned int indirect_num, const char *name)
 421 {
 422         int vq_size, allocsize1, allocsize2, allocsize = 0;
 423         int ret;
 424         unsigned int ncookies;
 425         size_t len;
 426         struct virtqueue *vq;
 427 
 428         ddi_put16(sc->sc_ioh,
 429             /* LINTED E_BAD_PTR_CAST_ALIGN */
 430             (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
 431         vq_size = ddi_get16(sc->sc_ioh,
 432             /* LINTED E_BAD_PTR_CAST_ALIGN */
 433             (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
 434         if (vq_size == 0) {
 435                 dev_err(sc->sc_dev, CE_WARN,
 436                     "virtqueue dest not exist, index %d for %s\n", index, name);
 437                 goto out;
 438         }
 439 
 440         vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);
 441 
 442         /* size 0 => use native vq size, good for receive queues. */
 443         if (size)
 444                 vq_size = MIN(vq_size, size);
 445 
 446         /* allocsize1: descriptor table + avail ring + pad */
 447         allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
 448             sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
 449         /* allocsize2: used ring + pad */
 450         allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
 451             sizeof (struct vring_used_elem) * vq_size);
 452 
 453         allocsize = allocsize1 + allocsize2;
 454 
 455         ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
 456             DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
 457         if (ret != DDI_SUCCESS) {
 458                 dev_err(sc->sc_dev, CE_WARN,
 459                     "Failed to allocate dma handle for vq %d", index);
 460                 goto out_alloc_handle;
 461         }
 462 
 463         ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
 464             &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 465             (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
 466         if (ret != DDI_SUCCESS) {
 467                 dev_err(sc->sc_dev, CE_WARN,
 468                     "Failed to allocate dma memory for vq %d", index);
 469                 goto out_alloc;
 470         }
 471 
 472         ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
 473             (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
 474             DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
 475         if (ret != DDI_DMA_MAPPED) {
 476                 dev_err(sc->sc_dev, CE_WARN,
 477                     "Failed to bind dma memory for vq %d", index);
 478                 goto out_bind;
 479         }
 480 
 481         /* We asked for a single segment */
 482         ASSERT(ncookies == 1);
 483         /* and page-ligned buffers. */
 484         ASSERT(vq->vq_dma_cookie.dmac_laddress % VIRTIO_PAGE_SIZE == 0);
 485 
 486         (void) memset(vq->vq_vaddr, 0, allocsize);
 487 
 488         /* Make sure all zeros hit the buffer before we point the host to it */
 489         membar_producer();
 490 
 491         /* set the vq address */
 492         ddi_put32(sc->sc_ioh,
 493             /* LINTED E_BAD_PTR_CAST_ALIGN */
 494             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS),
 495             (vq->vq_dma_cookie.dmac_laddress / VIRTIO_PAGE_SIZE));
 496 
 497         /* remember addresses and offsets for later use */
 498         vq->vq_owner = sc;
 499         vq->vq_num = vq_size;
 500         vq->vq_index = index;
 501         vq->vq_descs = vq->vq_vaddr;
 502         vq->vq_availoffset = sizeof (struct vring_desc)*vq_size;
 503         vq->vq_avail = (void *)(((char *)vq->vq_descs) + vq->vq_availoffset);
 504         vq->vq_usedoffset = allocsize1;
 505         vq->vq_used = (void *)(((char *)vq->vq_descs) + vq->vq_usedoffset);
 506 
 507         ASSERT(indirect_num == 0 ||
 508             virtio_has_feature(sc, VIRTIO_F_RING_INDIRECT_DESC));
 509         vq->vq_indirect_num = indirect_num;
 510 
 511         /* free slot management */
 512         vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
 513             KM_SLEEP);
 514 
 515         ret = virtio_init_vq(sc, vq);
 516         if (ret)
 517                 goto out_init;
 518 
 519         dev_debug(sc->sc_dev, CE_NOTE,
 520             "Allocated %d entries for vq %d:%s (%d indirect descs)",
 521             vq_size, index, name, indirect_num * vq_size);
 522 
 523         return (vq);
 524 
 525 out_init:
 526         kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
 527         (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
 528 out_bind:
 529         ddi_dma_mem_free(&vq->vq_dma_acch);
 530 out_alloc:
 531         ddi_dma_free_handle(&vq->vq_dma_handle);
 532 out_alloc_handle:
 533         kmem_free(vq, sizeof (struct virtqueue));
 534 out:
 535         return (NULL);
 536 }
 537 
 538 void
 539 virtio_free_vq(struct virtqueue *vq)
 540 {
 541         struct virtio_softc *sc = vq->vq_owner;
 542         int i;
 543 
 544         /* tell device that there's no virtqueue any longer */
 545         ddi_put16(sc->sc_ioh,
 546             /* LINTED E_BAD_PTR_CAST_ALIGN */
 547             (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
 548             vq->vq_index);
 549         ddi_put32(sc->sc_ioh,
 550             /* LINTED E_BAD_PTR_CAST_ALIGN */
 551             (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_ADDRESS), 0);
 552 
 553         /* Free the indirect descriptors, if any. */
 554         for (i = 0; i < vq->vq_num; i++) {
 555                 struct vq_entry *entry = &vq->vq_entries[i];
 556                 if (entry->qe_indirect_descs)
 557                         virtio_free_indirect(entry);
 558         }
 559 
 560         kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq->vq_num);
 561 
 562         (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
 563         ddi_dma_mem_free(&vq->vq_dma_acch);
 564         ddi_dma_free_handle(&vq->vq_dma_handle);
 565 
 566         mutex_destroy(&vq->vq_used_lock);
 567         mutex_destroy(&vq->vq_avail_lock);
 568         mutex_destroy(&vq->vq_freelist_lock);
 569 
 570         kmem_free(vq, sizeof (struct virtqueue));
 571 }
 572 
 573 /*
 574  * Free descriptor management.
 575  */
 576 struct vq_entry *
 577 vq_alloc_entry(struct virtqueue *vq)
 578 {
 579         struct vq_entry *qe;
 580 
 581         mutex_enter(&vq->vq_freelist_lock);
 582         if (list_is_empty(&vq->vq_freelist)) {
 583                 mutex_exit(&vq->vq_freelist_lock);
 584                 return (NULL);
 585         }
 586         qe = list_remove_head(&vq->vq_freelist);
 587 
 588         ASSERT(vq->vq_used_entries >= 0);
 589         vq->vq_used_entries++;
 590 
 591         mutex_exit(&vq->vq_freelist_lock);
 592 
 593         qe->qe_next = NULL;
 594         qe->qe_indirect_next = 0;
 595         (void) memset(qe->qe_desc, 0, sizeof (struct vring_desc));
 596 
 597         return (qe);
 598 }
 599 
 600 void
 601 vq_free_entry(struct virtqueue *vq, struct vq_entry *qe)
 602 {
 603         mutex_enter(&vq->vq_freelist_lock);
 604 
 605         list_insert_head(&vq->vq_freelist, qe);
 606         vq->vq_used_entries--;
 607         ASSERT(vq->vq_used_entries >= 0);
 608         mutex_exit(&vq->vq_freelist_lock);
 609 }
 610 
 611 /*
 612  * We (intentionally) don't have a global vq mutex, so you are
 613  * responsible for external locking to avoid allocting/freeing any
 614  * entries before using the returned value. Have fun.
 615  */
 616 uint_t
 617 vq_num_used(struct virtqueue *vq)
 618 {
 619         /* vq->vq_freelist_lock would not help here. */
 620         return (vq->vq_used_entries);
 621 }
 622 
 623 static inline void
 624 virtio_ve_set_desc(struct vring_desc *desc, uint64_t paddr, uint32_t len,
 625     boolean_t write)
 626 {
 627         desc->addr = paddr;
 628         desc->len = len;
 629         desc->next = 0;
 630         desc->flags = 0;
 631 
 632         /* 'write' - from the driver's point of view */
 633         if (!write)
 634                 desc->flags = VRING_DESC_F_WRITE;
 635 }
 636 
 637 void
 638 virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 639     boolean_t write)
 640 {
 641         virtio_ve_set_desc(qe->qe_desc, paddr, len, write);
 642 }
 643 
 644 unsigned int
 645 virtio_ve_indirect_available(struct vq_entry *qe)
 646 {
 647         return (qe->qe_queue->vq_indirect_num - (qe->qe_indirect_next - 1));
 648 }
 649 
 650 void
 651 virtio_ve_add_indirect_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 652     boolean_t write)
 653 {
 654         struct vring_desc *indirect_desc;
 655 
 656         ASSERT(qe->qe_queue->vq_indirect_num);
 657         ASSERT(qe->qe_indirect_next < qe->qe_queue->vq_indirect_num);
 658 
 659         indirect_desc = &qe->qe_indirect_descs[qe->qe_indirect_next];
 660         virtio_ve_set_desc(indirect_desc, paddr, len, write);
 661         qe->qe_indirect_next++;
 662 }
 663 
 664 void
 665 virtio_ve_add_cookie(struct vq_entry *qe, ddi_dma_handle_t dma_handle,
 666     ddi_dma_cookie_t dma_cookie, unsigned int ncookies, boolean_t write)
 667 {
 668         int i;
 669 
 670         for (i = 0; i < ncookies; i++) {
 671                 virtio_ve_add_indirect_buf(qe, dma_cookie.dmac_laddress,
 672                     dma_cookie.dmac_size, write);
 673                 ddi_dma_nextcookie(dma_handle, &dma_cookie);
 674         }
 675 }
 676 
 677 void
 678 virtio_sync_vq(struct virtqueue *vq)
 679 {
 680         struct virtio_softc *vsc = vq->vq_owner;
 681 
 682         /* Make sure the avail ring update hit the buffer */
 683         membar_producer();
 684 
 685         vq->vq_avail->idx = vq->vq_avail_idx;
 686 
 687         /* Make sure the avail idx update hits the buffer */
 688         membar_producer();
 689 
 690         /* Make sure we see the flags update */
 691         membar_consumer();
 692 
 693         if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) {
 694                 ddi_put16(vsc->sc_ioh,
 695                     /* LINTED E_BAD_PTR_CAST_ALIGN */
 696                     (uint16_t *)(vsc->sc_io_addr +
 697                     VIRTIO_CONFIG_QUEUE_NOTIFY),
 698                     vq->vq_index);
 699         }
 700 }
 701 
 702 void
 703 virtio_push_chain(struct vq_entry *qe, boolean_t sync)
 704 {
 705         struct virtqueue *vq = qe->qe_queue;
 706         struct vq_entry *head = qe;
 707         struct vring_desc *desc;
 708         int idx;
 709 
 710         ASSERT(qe);
 711 
 712         /*
 713          * Bind the descs together, paddr and len should be already
 714          * set with virtio_ve_set
 715          */
 716         do {
 717                 /* Bind the indirect descriptors */
 718                 if (qe->qe_indirect_next > 1) {
 719                         uint16_t i = 0;
 720 
 721                         /*
 722                          * Set the pointer/flags to the
 723                          * first indirect descriptor
 724                          */
 725                         virtio_ve_set_desc(qe->qe_desc,
 726                             qe->qe_indirect_dma_cookie.dmac_laddress,
 727                             sizeof (struct vring_desc) * qe->qe_indirect_next,
 728                             B_FALSE);
 729                         qe->qe_desc->flags |= VRING_DESC_F_INDIRECT;
 730 
 731                         /* For all but the last one, add the next index/flag */
 732                         do {
 733                                 desc = &qe->qe_indirect_descs[i];
 734                                 i++;
 735 
 736                                 desc->flags |= VRING_DESC_F_NEXT;
 737                                 desc->next = i;
 738                         } while (i < qe->qe_indirect_next - 1);
 739 
 740                 }
 741 
 742                 if (qe->qe_next) {
 743                         qe->qe_desc->flags |= VRING_DESC_F_NEXT;
 744                         qe->qe_desc->next = qe->qe_next->qe_index;
 745                 }
 746 
 747                 qe = qe->qe_next;
 748         } while (qe);
 749 
 750         mutex_enter(&vq->vq_avail_lock);
 751         idx = vq->vq_avail_idx;
 752         vq->vq_avail_idx++;
 753 
 754         /* Make sure the bits hit the descriptor(s) */
 755         membar_producer();
 756         vq->vq_avail->ring[idx % vq->vq_num] = head->qe_index;
 757 
 758         /* Notify the device, if needed. */
 759         if (sync)
 760                 virtio_sync_vq(vq);
 761 
 762         mutex_exit(&vq->vq_avail_lock);
 763 }
 764 
 765 /*
 766  * Get a chain of descriptors from the used ring, if one is available.
 767  */
 768 struct vq_entry *
 769 virtio_pull_chain(struct virtqueue *vq, uint32_t *len)
 770 {
 771         struct vq_entry *head;
 772         int slot;
 773         int usedidx;
 774 
 775         mutex_enter(&vq->vq_used_lock);
 776 
 777         /* No used entries? Bye. */
 778         if (vq->vq_used_idx == vq->vq_used->idx) {
 779                 mutex_exit(&vq->vq_used_lock);
 780                 return (NULL);
 781         }
 782 
 783         usedidx = vq->vq_used_idx;
 784         vq->vq_used_idx++;
 785         mutex_exit(&vq->vq_used_lock);
 786 
 787         usedidx %= vq->vq_num;
 788 
 789         /* Make sure we do the next step _after_ checking the idx. */
 790         membar_consumer();
 791 
 792         slot = vq->vq_used->ring[usedidx].id;
 793         *len = vq->vq_used->ring[usedidx].len;
 794 
 795         head = &vq->vq_entries[slot];
 796 
 797         return (head);
 798 }
 799 
 800 void
 801 virtio_free_chain(struct vq_entry *qe)
 802 {
 803         struct vq_entry *tmp;
 804         struct virtqueue *vq = qe->qe_queue;
 805 
 806         ASSERT(qe);
 807 
 808         do {
 809                 ASSERT(qe->qe_queue == vq);
 810                 tmp = qe->qe_next;
 811                 vq_free_entry(vq, qe);
 812                 qe = tmp;
 813         } while (tmp != NULL);
 814 }
 815 
 816 void
 817 virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second)
 818 {
 819         first->qe_next = second;
 820 }
 821 
 822 static int
 823 virtio_register_msi(struct virtio_softc *sc,
 824     struct virtio_int_handler *config_handler,
 825     struct virtio_int_handler vq_handlers[], int intr_types)
 826 {
 827         int count, actual;
 828         int int_type;
 829         int i;
 830         int handler_count;
 831         int ret;
 832 
 833         /* If both MSI and MSI-x are reported, prefer MSI-x. */
 834         int_type = DDI_INTR_TYPE_MSI;
 835         if (intr_types & DDI_INTR_TYPE_MSIX)
 836                 int_type = DDI_INTR_TYPE_MSIX;
 837 
 838         /* Walk the handler table to get the number of handlers. */
 839         for (handler_count = 0;
 840             vq_handlers && vq_handlers[handler_count].vh_func;
 841             handler_count++)
 842                 ;
 843 
 844         /* +1 if there is a config change handler. */
 845         if (config_handler != NULL)
 846                 handler_count++;
 847 
 848         /* Number of MSIs supported by the device. */
 849         ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count);
 850         if (ret != DDI_SUCCESS) {
 851                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_nintrs failed");
 852                 return (ret);
 853         }
 854 
 855         /*
 856          * Those who try to register more handlers then the device
 857          * supports shall suffer.
 858          */
 859         ASSERT(handler_count <= count);
 860 
 861         sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t) *
 862             handler_count, KM_SLEEP);
 863 
 864         ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0,
 865             handler_count, &actual, DDI_INTR_ALLOC_NORMAL);
 866         if (ret != DDI_SUCCESS) {
 867                 dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret);
 868                 goto out_msi_alloc;
 869         }
 870 
 871         if (actual != handler_count) {
 872                 dev_err(sc->sc_dev, CE_WARN,
 873                     "Not enough MSI available: need %d, available %d",
 874                     handler_count, actual);
 875                 goto out_msi_available;
 876         }
 877 
 878         sc->sc_intr_num = handler_count;
 879         sc->sc_intr_config = B_FALSE;
 880         if (config_handler != NULL) {
 881                 sc->sc_intr_config = B_TRUE;
 882         }
 883 
 884         /* Assume they are all same priority */
 885         ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
 886         if (ret != DDI_SUCCESS) {
 887                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
 888                 goto out_msi_prio;
 889         }
 890 
 891         /* Add the vq handlers */
 892         for (i = 0; vq_handlers[i].vh_func; i++) {
 893                 ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 894                     vq_handlers[i].vh_func, sc, vq_handlers[i].vh_priv);
 895                 if (ret != DDI_SUCCESS) {
 896                         dev_err(sc->sc_dev, CE_WARN,
 897                             "ddi_intr_add_handler failed");
 898                         /* Remove the handlers that succeeded. */
 899                         while (--i >= 0) {
 900                                 (void) ddi_intr_remove_handler(
 901                                     sc->sc_intr_htable[i]);
 902                         }
 903                         goto out_add_handlers;
 904                 }
 905         }
 906 
 907         /* Don't forget the config handler */
 908         if (config_handler != NULL) {
 909                 ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 910                     config_handler->vh_func, sc, config_handler->vh_priv);
 911                 if (ret != DDI_SUCCESS) {
 912                         dev_err(sc->sc_dev, CE_WARN,
 913                             "ddi_intr_add_handler failed");
 914                         /* Remove the handlers that succeeded. */
 915                         while (--i >= 0) {
 916                                 (void) ddi_intr_remove_handler(
 917                                     sc->sc_intr_htable[i]);
 918                         }
 919                         goto out_add_handlers;
 920                 }
 921         }
 922 
 923         /* We know we are using MSI, so set the config offset. */
 924         sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
 925 
 926         ret = ddi_intr_get_cap(sc->sc_intr_htable[0], &sc->sc_intr_cap);
 927         /* Just in case. */
 928         if (ret != DDI_SUCCESS)
 929                 sc->sc_intr_cap = 0;
 930 
 931 out_add_handlers:
 932 out_msi_prio:
 933 out_msi_available:
 934         for (i = 0; i < actual; i++)
 935                 (void) ddi_intr_free(sc->sc_intr_htable[i]);
 936 out_msi_alloc:
 937         kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) * count);
 938 
 939         return (ret);
 940 }
 941 
 942 struct virtio_handler_container {
 943         int nhandlers;
 944         struct virtio_int_handler config_handler;
 945         struct virtio_int_handler vq_handlers[];
 946 };
 947 
 948 uint_t
 949 virtio_intx_dispatch(caddr_t arg1, caddr_t arg2)
 950 {
 951         struct virtio_softc *sc = (void *)arg1;
 952         struct virtio_handler_container *vhc = (void *)arg2;
 953         uint8_t isr_status;
 954         int i;
 955 
 956         isr_status = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
 957             VIRTIO_CONFIG_ISR_STATUS));
 958 
 959         if (!isr_status)
 960                 return (DDI_INTR_UNCLAIMED);
 961 
 962         if ((isr_status & VIRTIO_CONFIG_ISR_CONFIG_CHANGE) &&
 963             vhc->config_handler.vh_func) {
 964                 vhc->config_handler.vh_func((void *)sc,
 965                     vhc->config_handler.vh_priv);
 966         }
 967 
 968         /* Notify all handlers */
 969         for (i = 0; i < vhc->nhandlers; i++) {
 970                 vhc->vq_handlers[i].vh_func((void *)sc,
 971                     vhc->vq_handlers[i].vh_priv);
 972         }
 973 
 974         return (DDI_INTR_CLAIMED);
 975 }
 976 
 977 /*
 978  * config_handler and vq_handlers may be allocated on stack.
 979  * Take precautions not to loose them.
 980  */
 981 static int
 982 virtio_register_intx(struct virtio_softc *sc,
 983     struct virtio_int_handler *config_handler,
 984     struct virtio_int_handler vq_handlers[])
 985 {
 986         int vq_handler_count;
 987         int config_handler_count = 0;
 988         int actual;
 989         struct virtio_handler_container *vhc;
 990         int ret = DDI_FAILURE;
 991 
 992         /* Walk the handler table to get the number of handlers. */
 993         for (vq_handler_count = 0;
 994             vq_handlers && vq_handlers[vq_handler_count].vh_func;
 995             vq_handler_count++)
 996                 ;
 997 
 998         if (config_handler != NULL)
 999                 config_handler_count = 1;
1000 
1001         vhc = kmem_zalloc(sizeof (struct virtio_handler_container) +
1002             sizeof (struct virtio_int_handler) * vq_handler_count, KM_SLEEP);
1003 
1004         vhc->nhandlers = vq_handler_count;
1005         (void) memcpy(vhc->vq_handlers, vq_handlers,
1006             sizeof (struct virtio_int_handler) * vq_handler_count);
1007 
1008         if (config_handler != NULL) {
1009                 (void) memcpy(&vhc->config_handler, config_handler,
1010                     sizeof (struct virtio_int_handler));
1011         }
1012 
1013         /* Just a single entry for a single interrupt. */
1014         sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
1015 
1016         ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable,
1017             DDI_INTR_TYPE_FIXED, 0, 1, &actual, DDI_INTR_ALLOC_NORMAL);
1018         if (ret != DDI_SUCCESS) {
1019                 dev_err(sc->sc_dev, CE_WARN,
1020                     "Failed to allocate a fixed interrupt: %d", ret);
1021                 goto out_int_alloc;
1022         }
1023 
1024         ASSERT(actual == 1);
1025         sc->sc_intr_num = 1;
1026 
1027         ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
1028         if (ret != DDI_SUCCESS) {
1029                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
1030                 goto out_prio;
1031         }
1032 
1033         ret = ddi_intr_add_handler(sc->sc_intr_htable[0],
1034             virtio_intx_dispatch, sc, vhc);
1035         if (ret != DDI_SUCCESS) {
1036                 dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed");
1037                 goto out_add_handlers;
1038         }
1039 
1040         /* We know we are not using MSI, so set the config offset. */
1041         sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1042 
1043         return (DDI_SUCCESS);
1044 
1045 out_add_handlers:
1046 out_prio:
1047         (void) ddi_intr_free(sc->sc_intr_htable[0]);
1048 out_int_alloc:
1049         kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t));
1050         kmem_free(vhc, sizeof (struct virtio_int_handler) *
1051             (vq_handler_count + config_handler_count));
1052         return (ret);
1053 }
1054 
1055 /*
1056  * We find out if we support MSI during this, and the register layout
1057  * depends on the MSI (doh). Don't acces the device specific bits in
1058  * BAR 0 before calling it!
1059  */
1060 int
1061 virtio_register_ints(struct virtio_softc *sc,
1062     struct virtio_int_handler *config_handler,
1063     struct virtio_int_handler vq_handlers[])
1064 {
1065         int ret;
1066         int intr_types;
1067 
1068         /* Determine which types of interrupts are supported */
1069         ret = ddi_intr_get_supported_types(sc->sc_dev, &intr_types);
1070         if (ret != DDI_SUCCESS) {
1071                 dev_err(sc->sc_dev, CE_WARN, "Can't get supported int types");
1072                 goto out_inttype;
1073         }
1074 
1075         /* If we have msi, let's use them. */
1076         if (intr_types & (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
1077                 ret = virtio_register_msi(sc, config_handler,
1078                     vq_handlers, intr_types);
1079                 if (!ret)
1080                         return (0);
1081         }
1082 
1083         /* Fall back to old-fashioned interrupts. */
1084         if (intr_types & DDI_INTR_TYPE_FIXED) {
1085                 dev_debug(sc->sc_dev, CE_WARN,
1086                     "Using legacy interrupts");
1087 
1088                 return (virtio_register_intx(sc, config_handler, vq_handlers));
1089         }
1090 
1091         dev_err(sc->sc_dev, CE_WARN,
1092             "MSI failed and fixed interrupts not supported. Giving up.");
1093         ret = DDI_FAILURE;
1094 
1095 out_inttype:
1096         return (ret);
1097 }
1098 
1099 static int
1100 virtio_enable_msi(struct virtio_softc *sc)
1101 {
1102         int ret, i;
1103         int vq_handler_count = sc->sc_intr_num;
1104 
1105         /* Number of handlers, not counting the counfig. */
1106         if (sc->sc_intr_config)
1107                 vq_handler_count--;
1108 
1109         /* Enable the iterrupts. Either the whole block, or one by one. */
1110         if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1111                 ret = ddi_intr_block_enable(sc->sc_intr_htable,
1112                     sc->sc_intr_num);
1113                 if (ret != DDI_SUCCESS) {
1114                         dev_err(sc->sc_dev, CE_WARN,
1115                             "Failed to enable MSI, falling back to INTx");
1116                         goto out_enable;
1117                 }
1118         } else {
1119                 for (i = 0; i < sc->sc_intr_num; i++) {
1120                         ret = ddi_intr_enable(sc->sc_intr_htable[i]);
1121                         if (ret != DDI_SUCCESS) {
1122                                 dev_err(sc->sc_dev, CE_WARN,
1123                                     "Failed to enable MSI %d, "
1124                                     "falling back to INTx", i);
1125 
1126                                 while (--i >= 0) {
1127                                         (void) ddi_intr_disable(
1128                                             sc->sc_intr_htable[i]);
1129                                 }
1130                                 goto out_enable;
1131                         }
1132                 }
1133         }
1134 
1135         /* Bind the allocated MSI to the queues and config */
1136         for (i = 0; i < vq_handler_count; i++) {
1137                 int check;
1138 
1139                 ddi_put16(sc->sc_ioh,
1140                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1141                     (uint16_t *)(sc->sc_io_addr +
1142                     VIRTIO_CONFIG_QUEUE_SELECT), i);
1143 
1144                 ddi_put16(sc->sc_ioh,
1145                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1146                     (uint16_t *)(sc->sc_io_addr +
1147                     VIRTIO_CONFIG_QUEUE_VECTOR), i);
1148 
1149                 check = ddi_get16(sc->sc_ioh,
1150                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1151                     (uint16_t *)(sc->sc_io_addr +
1152                     VIRTIO_CONFIG_QUEUE_VECTOR));
1153                 if (check != i) {
1154                         dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1155                             "for VQ %d, MSI %d. Check = %x", i, i, check);
1156                         ret = ENODEV;
1157                         goto out_bind;
1158                 }
1159         }
1160 
1161         if (sc->sc_intr_config) {
1162                 int check;
1163 
1164                 ddi_put16(sc->sc_ioh,
1165                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1166                     (uint16_t *)(sc->sc_io_addr +
1167                     VIRTIO_CONFIG_CONFIG_VECTOR), i);
1168 
1169                 check = ddi_get16(sc->sc_ioh,
1170                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1171                     (uint16_t *)(sc->sc_io_addr +
1172                     VIRTIO_CONFIG_CONFIG_VECTOR));
1173                 if (check != i) {
1174                         dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1175                             "for Config updates, MSI %d", i);
1176                         ret = ENODEV;
1177                         goto out_bind;
1178                 }
1179         }
1180 
1181         return (DDI_SUCCESS);
1182 
1183 out_bind:
1184         /* Unbind the vqs */
1185         for (i = 0; i < vq_handler_count - 1; i++) {
1186                 ddi_put16(sc->sc_ioh,
1187                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1188                     (uint16_t *)(sc->sc_io_addr +
1189                     VIRTIO_CONFIG_QUEUE_SELECT), i);
1190 
1191                 ddi_put16(sc->sc_ioh,
1192                     /* LINTED E_BAD_PTR_CAST_ALIGN */
1193                     (uint16_t *)(sc->sc_io_addr +
1194                     VIRTIO_CONFIG_QUEUE_VECTOR),
1195                     VIRTIO_MSI_NO_VECTOR);
1196         }
1197         /* And the config */
1198         /* LINTED E_BAD_PTR_CAST_ALIGN */
1199         ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1200             VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR);
1201 
1202         ret = DDI_FAILURE;
1203 
1204 out_enable:
1205         return (ret);
1206 }
1207 
1208 static int
1209 virtio_enable_intx(struct virtio_softc *sc)
1210 {
1211         int ret;
1212 
1213         ret = ddi_intr_enable(sc->sc_intr_htable[0]);
1214         if (ret != DDI_SUCCESS) {
1215                 dev_err(sc->sc_dev, CE_WARN,
1216                     "Failed to enable interrupt: %d", ret);
1217         }
1218 
1219         return (ret);
1220 }
1221 
1222 /*
1223  * We can't enable/disable individual handlers in the INTx case so do
1224  * the whole bunch even in the msi case.
1225  */
1226 int
1227 virtio_enable_ints(struct virtio_softc *sc)
1228 {
1229 
1230         /* See if we are using MSI. */
1231         if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI)
1232                 return (virtio_enable_msi(sc));
1233 
1234         ASSERT(sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI);
1235 
1236         return (virtio_enable_intx(sc));
1237 }
1238 
1239 void
1240 virtio_release_ints(struct virtio_softc *sc)
1241 {
1242         int i;
1243         int ret;
1244 
1245         /* We were running with MSI, unbind them. */
1246         if (sc->sc_config_offset == VIRTIO_CONFIG_DEVICE_CONFIG_MSI) {
1247                 /* Unbind all vqs */
1248                 for (i = 0; i < sc->sc_nvqs; i++) {
1249                         ddi_put16(sc->sc_ioh,
1250                             /* LINTED E_BAD_PTR_CAST_ALIGN */
1251                             (uint16_t *)(sc->sc_io_addr +
1252                             VIRTIO_CONFIG_QUEUE_SELECT), i);
1253 
1254                         ddi_put16(sc->sc_ioh,
1255                             /* LINTED E_BAD_PTR_CAST_ALIGN */
1256                             (uint16_t *)(sc->sc_io_addr +
1257                             VIRTIO_CONFIG_QUEUE_VECTOR),
1258                             VIRTIO_MSI_NO_VECTOR);
1259                 }
1260                 /* And the config */
1261                 /* LINTED E_BAD_PTR_CAST_ALIGN */
1262                 ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1263                     VIRTIO_CONFIG_CONFIG_VECTOR),
1264                     VIRTIO_MSI_NO_VECTOR);
1265 
1266         }
1267 
1268         /* Disable the iterrupts. Either the whole block, or one by one. */
1269         if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1270                 ret = ddi_intr_block_disable(sc->sc_intr_htable,
1271                     sc->sc_intr_num);
1272                 if (ret != DDI_SUCCESS) {
1273                         dev_err(sc->sc_dev, CE_WARN,
1274                             "Failed to disable MSIs, won't be able to "
1275                             "reuse next time");
1276                 }
1277         } else {
1278                 for (i = 0; i < sc->sc_intr_num; i++) {
1279                         ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1280                         if (ret != DDI_SUCCESS) {
1281                                 dev_err(sc->sc_dev, CE_WARN,
1282                                     "Failed to disable interrupt %d, "
1283                                     "won't be able to reuse", i);
1284                         }
1285                 }
1286         }
1287 
1288 
1289         for (i = 0; i < sc->sc_intr_num; i++) {
1290                 (void) ddi_intr_remove_handler(sc->sc_intr_htable[i]);
1291         }
1292 
1293         for (i = 0; i < sc->sc_intr_num; i++)
1294                 (void) ddi_intr_free(sc->sc_intr_htable[i]);
1295 
1296         kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) *
1297             sc->sc_intr_num);
1298 
1299         /* After disabling interrupts, the config offset is non-MSI. */
1300         sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1301 }
1302 
1303 /*
1304  * Module linkage information for the kernel.
1305  */
1306 static struct modlmisc modlmisc = {
1307         &mod_miscops,       /* Type of module */
1308         "VirtIO common library module",
1309 };
1310 
1311 static struct modlinkage modlinkage = {
1312         MODREV_1,
1313         {
1314                 (void *)&modlmisc,
1315                 NULL
1316         }
1317 };
1318 
1319 int
1320 _init(void)
1321 {
1322         return (mod_install(&modlinkage));
1323 }
1324 
1325 int
1326 _fini(void)
1327 {
1328         return (mod_remove(&modlinkage));
1329 }
1330 
1331 int
1332 _info(struct modinfo *modinfop)
1333 {
1334         return (mod_info(&modlinkage, modinfop));
1335 }