Print this page
3644 Add virtio-net support into the Illumos
Reviewed by: Alexey Zaytsev, alexey.zaytsev@gmail.com
Reviewed by: Yuri Pankov, yuri.pankov@nexenta.com
Reviewed by: David Hoppner, 0xffea@gmail.com

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/common/io/virtio/virtio.c
          +++ new/usr/src/uts/common/io/virtio/virtio.c
↓ open down ↓ 63 lines elided ↑ open up ↑
  64   64  #include <sys/spl.h>
  65   65  #include <sys/promif.h>
  66   66  #include <sys/list.h>
  67   67  #include <sys/bootconf.h>
  68   68  #include <sys/bootsvcs.h>
  69   69  #include <sys/sysmacros.h>
  70   70  #include <sys/pci.h>
  71   71  
  72   72  #include "virtiovar.h"
  73   73  #include "virtioreg.h"
       74 +
  74   75  #define NDEVNAMES       (sizeof (virtio_device_name) / sizeof (char *))
  75   76  #define MINSEG_INDIRECT 2       /* use indirect if nsegs >= this value */
  76   77  #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1)) & \
  77   78              ~(VIRTIO_PAGE_SIZE-1))
  78   79  
  79   80  void
  80   81  virtio_set_status(struct virtio_softc *sc, unsigned int status)
  81   82  {
  82   83          int old = 0;
  83   84  
  84      -        if (status != 0)
  85      -                old = ddi_get8(sc->sc_ioh,
  86      -                    (uint8_t *)(sc->sc_io_addr +
       85 +        if (status != 0) {
       86 +                old = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
  87   87                      VIRTIO_CONFIG_DEVICE_STATUS));
       88 +        }
  88   89  
  89      -        ddi_put8(sc->sc_ioh,
  90      -            (uint8_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_STATUS),
  91      -            status | old);
       90 +        ddi_put8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr +
       91 +            VIRTIO_CONFIG_DEVICE_STATUS), status | old);
  92   92  }
  93   93  
  94   94  /*
  95   95   * Negotiate features, save the result in sc->sc_features
  96   96   */
  97   97  uint32_t
  98   98  virtio_negotiate_features(struct virtio_softc *sc, uint32_t guest_features)
  99   99  {
 100  100          uint32_t host_features;
 101  101          uint32_t features;
 102  102  
 103  103          host_features = ddi_get32(sc->sc_ioh,
 104  104              /* LINTED E_BAD_PTR_CAST_ALIGN */
 105  105              (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES));
 106  106  
 107      -        dev_debug(sc->sc_dev, CE_NOTE,
 108      -            "host features: %x, guest features: %x",
      107 +        dev_debug(sc->sc_dev, CE_NOTE, "host features: %x, guest features: %x",
 109  108              host_features, guest_features);
 110  109  
 111  110          features = host_features & guest_features;
 112  111          ddi_put32(sc->sc_ioh,
 113  112              /* LINTED E_BAD_PTR_CAST_ALIGN */
 114  113              (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_GUEST_FEATURES),
 115  114              features);
 116  115  
 117  116          sc->sc_features = features;
 118  117  
 119  118          return (host_features);
 120  119  }
 121  120  
 122  121  size_t
 123      -virtio_show_features(uint32_t features,
 124      -    char *buf, size_t len)
      122 +virtio_show_features(uint32_t features, char *buf, size_t len)
 125  123  {
 126  124          char *orig_buf = buf;
 127  125          char *bufend = buf + len;
 128  126  
 129  127          /* LINTED E_PTRDIFF_OVERFLOW */
 130  128          buf += snprintf(buf, bufend - buf, "Generic ( ");
 131  129          if (features & VIRTIO_F_RING_INDIRECT_DESC)
 132  130                  /* LINTED E_PTRDIFF_OVERFLOW */
 133  131                  buf += snprintf(buf, bufend - buf, "INDIRECT_DESC ");
 134  132  
↓ open down ↓ 52 lines elided ↑ open up ↑
 187  185  
 188  186          r <<= 32;
 189  187  
 190  188          r += ddi_get32(sc->sc_ioh,
 191  189              /* LINTED E_BAD_PTR_CAST_ALIGN */
 192  190              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index));
 193  191          return (r);
 194  192  }
 195  193  
 196  194  void
 197      -virtio_write_device_config_1(struct virtio_softc *sc,
 198      -    unsigned int index, uint8_t value)
      195 +virtio_write_device_config_1(struct virtio_softc *sc, unsigned int index,
      196 +    uint8_t value)
 199  197  {
 200  198          ASSERT(sc->sc_config_offset);
 201  199          ddi_put8(sc->sc_ioh,
 202  200              (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 203  201  }
 204  202  
 205  203  void
 206      -virtio_write_device_config_2(struct virtio_softc *sc,
 207      -    unsigned int index, uint16_t value)
      204 +virtio_write_device_config_2(struct virtio_softc *sc, unsigned int index,
      205 +    uint16_t value)
 208  206  {
 209  207          ASSERT(sc->sc_config_offset);
 210  208          ddi_put16(sc->sc_ioh,
 211  209              /* LINTED E_BAD_PTR_CAST_ALIGN */
 212  210              (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 213  211  }
 214  212  
 215  213  void
 216      -virtio_write_device_config_4(struct virtio_softc *sc,
 217      -    unsigned int index, uint32_t value)
      214 +virtio_write_device_config_4(struct virtio_softc *sc, unsigned int index,
      215 +    uint32_t value)
 218  216  {
 219  217          ASSERT(sc->sc_config_offset);
 220  218          ddi_put32(sc->sc_ioh,
 221  219              /* LINTED E_BAD_PTR_CAST_ALIGN */
 222  220              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value);
 223  221  }
 224  222  
 225  223  void
 226      -virtio_write_device_config_8(struct virtio_softc *sc,
 227      -    unsigned int index, uint64_t value)
      224 +virtio_write_device_config_8(struct virtio_softc *sc, unsigned int index,
      225 +    uint64_t value)
 228  226  {
 229  227          ASSERT(sc->sc_config_offset);
 230  228          ddi_put32(sc->sc_ioh,
 231  229              /* LINTED E_BAD_PTR_CAST_ALIGN */
 232  230              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index),
 233  231              value & 0xFFFFFFFF);
 234  232          ddi_put32(sc->sc_ioh,
 235  233              /* LINTED E_BAD_PTR_CAST_ALIGN */
 236  234              (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset +
 237  235              index + sizeof (uint32_t)), value >> 32);
↓ open down ↓ 8 lines elided ↑ open up ↑
 246  244          vq->vq_avail->flags |= VRING_AVAIL_F_NO_INTERRUPT;
 247  245  }
 248  246  
 249  247  void
 250  248  virtio_start_vq_intr(struct virtqueue *vq)
 251  249  {
 252  250          vq->vq_avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT;
 253  251  }
 254  252  
 255  253  static ddi_dma_attr_t virtio_vq_dma_attr = {
 256      -        DMA_ATTR_V0,    /* Version number */
 257      -        0,              /* low address */
 258      -        /*
 259      -         * high address. Has to fit into 32 bits
 260      -         * after page-shifting
 261      -         */
 262      -        0x00000FFFFFFFFFFF,
 263      -        0xFFFFFFFF,     /* counter register max */
 264      -        VIRTIO_PAGE_SIZE, /* page alignment required */
 265      -        0x3F,           /* burst sizes: 1 - 32 */
 266      -        0x1,            /* minimum transfer size */
 267      -        0xFFFFFFFF,     /* max transfer size */
 268      -        0xFFFFFFFF,     /* address register max */
 269      -        1,              /* no scatter-gather */
 270      -        1,              /* device operates on bytes */
 271      -        0,              /* attr flag: set to 0 */
      254 +        DMA_ATTR_V0,            /* Version number */
      255 +        0,                      /* low address */
      256 +        0x00000FFFFFFFFFFF,     /* high address. Has to fit into 32 bits */
      257 +                                /* after page-shifting */
      258 +        0xFFFFFFFF,             /* counter register max */
      259 +        VIRTIO_PAGE_SIZE,       /* page alignment required */
      260 +        0x3F,                   /* burst sizes: 1 - 32 */
      261 +        0x1,                    /* minimum transfer size */
      262 +        0xFFFFFFFF,             /* max transfer size */
      263 +        0xFFFFFFFF,             /* address register max */
      264 +        1,                      /* no scatter-gather */
      265 +        1,                      /* device operates on bytes */
      266 +        0,                      /* attr flag: set to 0 */
 272  267  };
 273  268  
 274  269  static ddi_dma_attr_t virtio_vq_indirect_dma_attr = {
 275      -        DMA_ATTR_V0,    /* Version number */
 276      -        0,              /* low address */
 277      -        0xFFFFFFFFFFFFFFFF, /* high address */
 278      -        0xFFFFFFFF,     /* counter register max */
 279      -        1,              /* No specific alignment */
 280      -        0x3F,           /* burst sizes: 1 - 32 */
 281      -        0x1,            /* minimum transfer size */
 282      -        0xFFFFFFFF,     /* max transfer size */
 283      -        0xFFFFFFFF,     /* address register max */
 284      -        1,              /* no scatter-gather */
 285      -        1,              /* device operates on bytes */
 286      -        0,              /* attr flag: set to 0 */
      270 +        DMA_ATTR_V0,            /* Version number */
      271 +        0,                      /* low address */
      272 +        0xFFFFFFFFFFFFFFFF,     /* high address */
      273 +        0xFFFFFFFF,             /* counter register max */
      274 +        1,                      /* No specific alignment */
      275 +        0x3F,                   /* burst sizes: 1 - 32 */
      276 +        0x1,                    /* minimum transfer size */
      277 +        0xFFFFFFFF,             /* max transfer size */
      278 +        0xFFFFFFFF,             /* address register max */
      279 +        1,                      /* no scatter-gather */
      280 +        1,                      /* device operates on bytes */
      281 +        0,                      /* attr flag: set to 0 */
 287  282  };
 288  283  
 289  284  /* Same for direct and indirect descriptors. */
 290  285  static ddi_device_acc_attr_t virtio_vq_devattr = {
 291  286          DDI_DEVICE_ATTR_V0,
 292  287          DDI_NEVERSWAP_ACC,
 293  288          DDI_STORECACHING_OK_ACC,
 294  289          DDI_DEFAULT_ACC
 295  290  };
 296  291  
↓ open down ↓ 19 lines elided ↑ open up ↑
 316  311  
 317  312          num = entry->qe_queue->vq_indirect_num;
 318  313          ASSERT(num > 1);
 319  314  
 320  315          allocsize = sizeof (struct vring_desc) * num;
 321  316  
 322  317          ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr,
 323  318              DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle);
 324  319          if (ret != DDI_SUCCESS) {
 325  320                  dev_err(sc->sc_dev, CE_WARN,
 326      -                    "Failed to allocate dma handle for indirect descriptors,"
 327      -                    " entry %d, vq %d", entry->qe_index,
      321 +                    "Failed to allocate dma handle for indirect descriptors, "
      322 +                    "entry %d, vq %d", entry->qe_index,
 328  323                      entry->qe_queue->vq_index);
 329  324                  goto out_alloc_handle;
 330  325          }
 331  326  
 332      -        ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle,
 333      -            allocsize, &virtio_vq_devattr,
 334      -            DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
      327 +        ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle, allocsize,
      328 +            &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 335  329              (caddr_t *)&entry->qe_indirect_descs, &len,
 336  330              &entry->qe_indirect_dma_acch);
 337  331          if (ret != DDI_SUCCESS) {
 338  332                  dev_err(sc->sc_dev, CE_WARN,
 339      -                    "Failed to alocate dma memory for indirect descriptors,"
 340      -                    " entry %d, vq %d,", entry->qe_index,
      333 +                    "Failed to allocate dma memory for indirect descriptors, "
      334 +                    "entry %d, vq %d,", entry->qe_index,
 341  335                      entry->qe_queue->vq_index);
 342  336                  goto out_alloc;
 343  337          }
 344  338  
 345  339          (void) memset(entry->qe_indirect_descs, 0xff, allocsize);
 346  340  
 347  341          ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL,
 348  342              (caddr_t)entry->qe_indirect_descs, len,
 349      -            DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
 350      -            DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_cookie, &ncookies);
      343 +            DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
      344 +            &entry->qe_indirect_dma_cookie, &ncookies);
 351  345          if (ret != DDI_DMA_MAPPED) {
 352  346                  dev_err(sc->sc_dev, CE_WARN,
 353      -                    "Failed to bind dma memory for indirect descriptors,"
      347 +                    "Failed to bind dma memory for indirect descriptors, "
 354  348                      "entry %d, vq %d", entry->qe_index,
 355  349                      entry->qe_queue->vq_index);
 356  350                  goto out_bind;
 357  351          }
 358  352  
 359  353          /* We asked for a single segment */
 360  354          ASSERT(ncookies == 1);
 361  355  
 362  356          return (0);
 363  357  
↓ open down ↓ 28 lines elided ↑ open up ↑
 392  386                  entry->qe_desc = &vq->vq_descs[i];
 393  387                  entry->qe_queue = vq;
 394  388  
 395  389                  if (indirect_num) {
 396  390                          ret = virtio_alloc_indirect(sc, entry);
 397  391                          if (ret)
 398  392                                  goto out_indirect;
 399  393                  }
 400  394          }
 401  395  
 402      -        mutex_init(&vq->vq_freelist_lock, "virtio-freelist",
 403      -            MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio));
 404      -        mutex_init(&vq->vq_avail_lock, "virtio-avail",
 405      -            MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio));
 406      -        mutex_init(&vq->vq_used_lock, "virtio-used",
 407      -            MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio));
      396 +        mutex_init(&vq->vq_freelist_lock, "virtio-freelist", MUTEX_DRIVER,
      397 +            DDI_INTR_PRI(sc->sc_intr_prio));
      398 +        mutex_init(&vq->vq_avail_lock, "virtio-avail", MUTEX_DRIVER,
      399 +            DDI_INTR_PRI(sc->sc_intr_prio));
      400 +        mutex_init(&vq->vq_used_lock, "virtio-used", MUTEX_DRIVER,
      401 +            DDI_INTR_PRI(sc->sc_intr_prio));
 408  402  
 409  403          return (0);
 410  404  
 411  405  out_indirect:
 412  406          for (i = 0; i < vq_size; i++) {
 413  407                  struct vq_entry *entry = &vq->vq_entries[i];
 414  408                  if (entry->qe_indirect_descs)
 415  409                          virtio_free_indirect(entry);
 416  410          }
 417  411  
 418  412          return (ret);
 419  413  }
 420  414  
 421      -
 422      -
 423  415  /*
 424  416   * Allocate/free a vq.
 425  417   */
 426  418  struct virtqueue *
 427      -virtio_alloc_vq(struct virtio_softc *sc,
 428      -    unsigned int index,
 429      -    unsigned int size,
 430      -    unsigned int indirect_num,
 431      -    const char *name)
      419 +virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size,
      420 +    unsigned int indirect_num, const char *name)
 432  421  {
 433  422          int vq_size, allocsize1, allocsize2, allocsize = 0;
 434  423          int ret;
 435  424          unsigned int ncookies;
 436  425          size_t len;
 437  426          struct virtqueue *vq;
 438  427  
 439      -
 440  428          ddi_put16(sc->sc_ioh,
 441  429              /* LINTED E_BAD_PTR_CAST_ALIGN */
 442  430              (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index);
 443  431          vq_size = ddi_get16(sc->sc_ioh,
 444  432              /* LINTED E_BAD_PTR_CAST_ALIGN */
 445  433              (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SIZE));
 446  434          if (vq_size == 0) {
 447  435                  dev_err(sc->sc_dev, CE_WARN,
 448  436                      "virtqueue dest not exist, index %d for %s\n", index, name);
 449  437                  goto out;
 450  438          }
 451  439  
 452  440          vq = kmem_zalloc(sizeof (struct virtqueue), KM_SLEEP);
 453  441  
 454  442          /* size 0 => use native vq size, good for receive queues. */
 455  443          if (size)
 456  444                  vq_size = MIN(vq_size, size);
 457  445  
 458  446          /* allocsize1: descriptor table + avail ring + pad */
 459  447          allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size +
 460      -            sizeof (struct vring_avail) +
 461      -            sizeof (uint16_t) * vq_size);
      448 +            sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size);
 462  449          /* allocsize2: used ring + pad */
 463      -        allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used)
 464      -            + sizeof (struct vring_used_elem) * vq_size);
      450 +        allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) +
      451 +            sizeof (struct vring_used_elem) * vq_size);
 465  452  
 466  453          allocsize = allocsize1 + allocsize2;
 467  454  
 468  455          ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr,
 469  456              DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
 470  457          if (ret != DDI_SUCCESS) {
 471  458                  dev_err(sc->sc_dev, CE_WARN,
 472  459                      "Failed to allocate dma handle for vq %d", index);
 473  460                  goto out_alloc_handle;
 474  461          }
 475  462  
 476  463          ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize,
 477  464              &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
 478  465              (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch);
 479  466          if (ret != DDI_SUCCESS) {
 480  467                  dev_err(sc->sc_dev, CE_WARN,
 481      -                    "Failed to alocate dma memory for vq %d", index);
      468 +                    "Failed to allocate dma memory for vq %d", index);
 482  469                  goto out_alloc;
 483  470          }
 484  471  
 485      -
 486  472          ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL,
 487      -            (caddr_t)vq->vq_vaddr, len,
 488      -            DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
      473 +            (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
 489  474              DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies);
 490  475          if (ret != DDI_DMA_MAPPED) {
 491  476                  dev_err(sc->sc_dev, CE_WARN,
 492  477                      "Failed to bind dma memory for vq %d", index);
 493  478                  goto out_bind;
 494  479          }
 495  480  
 496  481          /* We asked for a single segment */
 497  482          ASSERT(ncookies == 1);
 498  483          /* and page-ligned buffers. */
↓ open down ↓ 26 lines elided ↑ open up ↑
 525  510  
 526  511          /* free slot management */
 527  512          vq->vq_entries = kmem_zalloc(sizeof (struct vq_entry) * vq_size,
 528  513              KM_SLEEP);
 529  514  
 530  515          ret = virtio_init_vq(sc, vq);
 531  516          if (ret)
 532  517                  goto out_init;
 533  518  
 534  519          dev_debug(sc->sc_dev, CE_NOTE,
 535      -            "Allocated %d entries for vq %d:%s (%d incdirect descs)",
      520 +            "Allocated %d entries for vq %d:%s (%d indirect descs)",
 536  521              vq_size, index, name, indirect_num * vq_size);
 537  522  
 538  523          return (vq);
 539  524  
 540  525  out_init:
 541  526          kmem_free(vq->vq_entries, sizeof (struct vq_entry) * vq_size);
 542  527          (void) ddi_dma_unbind_handle(vq->vq_dma_handle);
 543  528  out_bind:
 544  529          ddi_dma_mem_free(&vq->vq_dma_acch);
 545  530  out_alloc:
 546  531          ddi_dma_free_handle(&vq->vq_dma_handle);
 547  532  out_alloc_handle:
 548  533          kmem_free(vq, sizeof (struct virtqueue));
 549  534  out:
 550  535          return (NULL);
 551  536  }
 552  537  
 553      -
 554  538  void
 555  539  virtio_free_vq(struct virtqueue *vq)
 556  540  {
 557  541          struct virtio_softc *sc = vq->vq_owner;
 558  542          int i;
 559  543  
 560  544          /* tell device that there's no virtqueue any longer */
 561  545          ddi_put16(sc->sc_ioh,
 562  546              /* LINTED E_BAD_PTR_CAST_ALIGN */
 563  547              (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT),
↓ open down ↓ 77 lines elided ↑ open up ↑
 641  625      boolean_t write)
 642  626  {
 643  627          desc->addr = paddr;
 644  628          desc->len = len;
 645  629          desc->next = 0;
 646  630          desc->flags = 0;
 647  631  
 648  632          /* 'write' - from the driver's point of view */
 649  633          if (!write)
 650  634                  desc->flags = VRING_DESC_F_WRITE;
 651      -
 652      -
 653  635  }
 654  636  
 655  637  void
 656  638  virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 657  639      boolean_t write)
 658  640  {
 659  641          virtio_ve_set_desc(qe->qe_desc, paddr, len, write);
 660  642  }
 661  643  
      644 +unsigned int
      645 +virtio_ve_indirect_available(struct vq_entry *qe)
      646 +{
      647 +        return (qe->qe_queue->vq_indirect_num - (qe->qe_indirect_next - 1));
      648 +}
      649 +
 662  650  void
 663  651  virtio_ve_add_indirect_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len,
 664  652      boolean_t write)
 665  653  {
 666  654          struct vring_desc *indirect_desc;
 667  655  
 668  656          ASSERT(qe->qe_queue->vq_indirect_num);
 669  657          ASSERT(qe->qe_indirect_next < qe->qe_queue->vq_indirect_num);
 670  658  
 671  659          indirect_desc = &qe->qe_indirect_descs[qe->qe_indirect_next];
↓ open down ↓ 23 lines elided ↑ open up ↑
 695  683          membar_producer();
 696  684  
 697  685          vq->vq_avail->idx = vq->vq_avail_idx;
 698  686  
 699  687          /* Make sure the avail idx update hits the buffer */
 700  688          membar_producer();
 701  689  
 702  690          /* Make sure we see the flags update */
 703  691          membar_consumer();
 704  692  
 705      -        if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY))
      693 +        if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) {
 706  694                  ddi_put16(vsc->sc_ioh,
 707  695                      /* LINTED E_BAD_PTR_CAST_ALIGN */
 708  696                      (uint16_t *)(vsc->sc_io_addr +
 709  697                      VIRTIO_CONFIG_QUEUE_NOTIFY),
 710  698                      vq->vq_index);
      699 +        }
 711  700  }
 712  701  
 713  702  void
 714  703  virtio_push_chain(struct vq_entry *qe, boolean_t sync)
 715  704  {
 716  705          struct virtqueue *vq = qe->qe_queue;
 717  706          struct vq_entry *head = qe;
 718  707          struct vring_desc *desc;
 719  708          int idx;
 720  709  
↓ open down ↓ 45 lines elided ↑ open up ↑
 766  755          membar_producer();
 767  756          vq->vq_avail->ring[idx % vq->vq_num] = head->qe_index;
 768  757  
 769  758          /* Notify the device, if needed. */
 770  759          if (sync)
 771  760                  virtio_sync_vq(vq);
 772  761  
 773  762          mutex_exit(&vq->vq_avail_lock);
 774  763  }
 775  764  
 776      -/* Get a chain of descriptors from the used ring, if one is available. */
      765 +/*
      766 + * Get a chain of descriptors from the used ring, if one is available.
      767 + */
 777  768  struct vq_entry *
 778  769  virtio_pull_chain(struct virtqueue *vq, uint32_t *len)
 779  770  {
 780  771          struct vq_entry *head;
 781  772          int slot;
 782  773          int usedidx;
 783  774  
 784  775          mutex_enter(&vq->vq_used_lock);
 785  776  
 786  777          /* No used entries? Bye. */
↓ open down ↓ 25 lines elided ↑ open up ↑
 812  803          struct vq_entry *tmp;
 813  804          struct virtqueue *vq = qe->qe_queue;
 814  805  
 815  806          ASSERT(qe);
 816  807  
 817  808          do {
 818  809                  ASSERT(qe->qe_queue == vq);
 819  810                  tmp = qe->qe_next;
 820  811                  vq_free_entry(vq, qe);
 821  812                  qe = tmp;
 822      -        } while (tmp);
      813 +        } while (tmp != NULL);
 823  814  }
 824  815  
 825  816  void
 826  817  virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second)
 827  818  {
 828  819          first->qe_next = second;
 829  820  }
 830  821  
 831  822  static int
 832  823  virtio_register_msi(struct virtio_softc *sc,
 833  824      struct virtio_int_handler *config_handler,
 834      -    struct virtio_int_handler vq_handlers[],
 835      -    int intr_types)
      825 +    struct virtio_int_handler vq_handlers[], int intr_types)
 836  826  {
 837  827          int count, actual;
 838  828          int int_type;
 839  829          int i;
 840  830          int handler_count;
 841  831          int ret;
 842  832  
 843  833          /* If both MSI and MSI-x are reported, prefer MSI-x. */
 844  834          int_type = DDI_INTR_TYPE_MSI;
 845  835          if (intr_types & DDI_INTR_TYPE_MSIX)
 846  836                  int_type = DDI_INTR_TYPE_MSIX;
 847  837  
 848  838          /* Walk the handler table to get the number of handlers. */
 849  839          for (handler_count = 0;
 850  840              vq_handlers && vq_handlers[handler_count].vh_func;
 851  841              handler_count++)
 852  842                  ;
 853  843  
 854  844          /* +1 if there is a config change handler. */
 855      -        if (config_handler)
      845 +        if (config_handler != NULL)
 856  846                  handler_count++;
 857  847  
 858  848          /* Number of MSIs supported by the device. */
 859  849          ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count);
 860  850          if (ret != DDI_SUCCESS) {
 861  851                  dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_nintrs failed");
 862  852                  return (ret);
 863  853          }
 864  854  
 865  855          /*
 866  856           * Those who try to register more handlers then the device
 867  857           * supports shall suffer.
 868  858           */
 869  859          ASSERT(handler_count <= count);
 870  860  
 871      -        sc->sc_intr_htable = kmem_zalloc(
 872      -            sizeof (ddi_intr_handle_t) * handler_count, KM_SLEEP);
      861 +        sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t) *
      862 +            handler_count, KM_SLEEP);
 873  863  
 874  864          ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0,
 875  865              handler_count, &actual, DDI_INTR_ALLOC_NORMAL);
 876  866          if (ret != DDI_SUCCESS) {
 877  867                  dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret);
 878  868                  goto out_msi_alloc;
 879  869          }
 880  870  
 881  871          if (actual != handler_count) {
 882  872                  dev_err(sc->sc_dev, CE_WARN,
 883  873                      "Not enough MSI available: need %d, available %d",
 884  874                      handler_count, actual);
 885  875                  goto out_msi_available;
 886  876          }
 887  877  
 888  878          sc->sc_intr_num = handler_count;
 889  879          sc->sc_intr_config = B_FALSE;
 890      -        if (config_handler) {
      880 +        if (config_handler != NULL) {
 891  881                  sc->sc_intr_config = B_TRUE;
 892  882          }
 893  883  
 894  884          /* Assume they are all same priority */
 895  885          ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
 896  886          if (ret != DDI_SUCCESS) {
 897  887                  dev_err(sc->sc_dev, CE_WARN, "ddi_intr_get_pri failed");
 898  888                  goto out_msi_prio;
 899  889          }
 900  890  
 901  891          /* Add the vq handlers */
 902  892          for (i = 0; vq_handlers[i].vh_func; i++) {
 903  893                  ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 904      -                    vq_handlers[i].vh_func,
 905      -                    sc, vq_handlers[i].vh_priv);
      894 +                    vq_handlers[i].vh_func, sc, vq_handlers[i].vh_priv);
 906  895                  if (ret != DDI_SUCCESS) {
 907  896                          dev_err(sc->sc_dev, CE_WARN,
 908  897                              "ddi_intr_add_handler failed");
 909  898                          /* Remove the handlers that succeeded. */
 910  899                          while (--i >= 0) {
 911  900                                  (void) ddi_intr_remove_handler(
 912  901                                      sc->sc_intr_htable[i]);
 913  902                          }
 914  903                          goto out_add_handlers;
 915  904                  }
 916  905          }
 917  906  
 918  907          /* Don't forget the config handler */
 919      -        if (config_handler) {
      908 +        if (config_handler != NULL) {
 920  909                  ret = ddi_intr_add_handler(sc->sc_intr_htable[i],
 921      -                    config_handler->vh_func,
 922      -                    sc, config_handler->vh_priv);
      910 +                    config_handler->vh_func, sc, config_handler->vh_priv);
 923  911                  if (ret != DDI_SUCCESS) {
 924  912                          dev_err(sc->sc_dev, CE_WARN,
 925  913                              "ddi_intr_add_handler failed");
 926  914                          /* Remove the handlers that succeeded. */
 927  915                          while (--i >= 0) {
 928  916                                  (void) ddi_intr_remove_handler(
 929  917                                      sc->sc_intr_htable[i]);
 930  918                          }
 931  919                          goto out_add_handlers;
 932  920                  }
 933  921          }
 934  922  
 935  923          /* We know we are using MSI, so set the config offset. */
 936  924          sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI;
 937  925  
 938      -        ret = ddi_intr_get_cap(sc->sc_intr_htable[0],
 939      -            &sc->sc_intr_cap);
      926 +        ret = ddi_intr_get_cap(sc->sc_intr_htable[0], &sc->sc_intr_cap);
 940  927          /* Just in case. */
 941  928          if (ret != DDI_SUCCESS)
 942  929                  sc->sc_intr_cap = 0;
 943  930  
 944  931  out_add_handlers:
 945  932  out_msi_prio:
 946  933  out_msi_available:
 947  934          for (i = 0; i < actual; i++)
 948  935                  (void) ddi_intr_free(sc->sc_intr_htable[i]);
 949  936  out_msi_alloc:
↓ open down ↓ 51 lines elided ↑ open up ↑
1001  988          int actual;
1002  989          struct virtio_handler_container *vhc;
1003  990          int ret = DDI_FAILURE;
1004  991  
1005  992          /* Walk the handler table to get the number of handlers. */
1006  993          for (vq_handler_count = 0;
1007  994              vq_handlers && vq_handlers[vq_handler_count].vh_func;
1008  995              vq_handler_count++)
1009  996                  ;
1010  997  
1011      -        if (config_handler)
      998 +        if (config_handler != NULL)
1012  999                  config_handler_count = 1;
1013 1000  
1014 1001          vhc = kmem_zalloc(sizeof (struct virtio_handler_container) +
1015      -            sizeof (struct virtio_int_handler) * vq_handler_count,
1016      -            KM_SLEEP);
     1002 +            sizeof (struct virtio_int_handler) * vq_handler_count, KM_SLEEP);
1017 1003  
1018 1004          vhc->nhandlers = vq_handler_count;
1019 1005          (void) memcpy(vhc->vq_handlers, vq_handlers,
1020 1006              sizeof (struct virtio_int_handler) * vq_handler_count);
1021 1007  
1022      -        if (config_handler) {
     1008 +        if (config_handler != NULL) {
1023 1009                  (void) memcpy(&vhc->config_handler, config_handler,
1024 1010                      sizeof (struct virtio_int_handler));
1025 1011          }
1026 1012  
1027 1013          /* Just a single entry for a single interrupt. */
1028 1014          sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP);
1029 1015  
1030 1016          ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable,
1031      -            DDI_INTR_TYPE_FIXED, 0, 1, &actual,
1032      -            DDI_INTR_ALLOC_NORMAL);
     1017 +            DDI_INTR_TYPE_FIXED, 0, 1, &actual, DDI_INTR_ALLOC_NORMAL);
1033 1018          if (ret != DDI_SUCCESS) {
1034 1019                  dev_err(sc->sc_dev, CE_WARN,
1035 1020                      "Failed to allocate a fixed interrupt: %d", ret);
1036 1021                  goto out_int_alloc;
1037 1022          }
1038 1023  
1039 1024          ASSERT(actual == 1);
1040 1025          sc->sc_intr_num = 1;
1041 1026  
1042 1027          ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
↓ open down ↓ 61 lines elided ↑ open up ↑
1104 1089          }
1105 1090  
1106 1091          dev_err(sc->sc_dev, CE_WARN,
1107 1092              "MSI failed and fixed interrupts not supported. Giving up.");
1108 1093          ret = DDI_FAILURE;
1109 1094  
1110 1095  out_inttype:
1111 1096          return (ret);
1112 1097  }
1113 1098  
1114      -
1115 1099  static int
1116 1100  virtio_enable_msi(struct virtio_softc *sc)
1117 1101  {
1118 1102          int ret, i;
1119 1103          int vq_handler_count = sc->sc_intr_num;
1120 1104  
1121 1105          /* Number of handlers, not counting the counfig. */
1122 1106          if (sc->sc_intr_config)
1123 1107                  vq_handler_count--;
1124 1108  
↓ open down ↓ 19 lines elided ↑ open up ↑
1144 1128                                              sc->sc_intr_htable[i]);
1145 1129                                  }
1146 1130                                  goto out_enable;
1147 1131                          }
1148 1132                  }
1149 1133          }
1150 1134  
1151 1135          /* Bind the allocated MSI to the queues and config */
1152 1136          for (i = 0; i < vq_handler_count; i++) {
1153 1137                  int check;
     1138 +
1154 1139                  ddi_put16(sc->sc_ioh,
1155 1140                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1156 1141                      (uint16_t *)(sc->sc_io_addr +
1157 1142                      VIRTIO_CONFIG_QUEUE_SELECT), i);
1158 1143  
1159 1144                  ddi_put16(sc->sc_ioh,
1160 1145                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1161 1146                      (uint16_t *)(sc->sc_io_addr +
1162 1147                      VIRTIO_CONFIG_QUEUE_VECTOR), i);
1163 1148  
1164 1149                  check = ddi_get16(sc->sc_ioh,
1165 1150                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1166 1151                      (uint16_t *)(sc->sc_io_addr +
1167 1152                      VIRTIO_CONFIG_QUEUE_VECTOR));
1168 1153                  if (check != i) {
1169      -                        dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler"
     1154 +                        dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler "
1170 1155                              "for VQ %d, MSI %d. Check = %x", i, i, check);
1171 1156                          ret = ENODEV;
1172 1157                          goto out_bind;
1173 1158                  }
1174 1159          }
1175 1160  
1176 1161          if (sc->sc_intr_config) {
1177 1162                  int check;
     1163 +
1178 1164                  ddi_put16(sc->sc_ioh,
1179 1165                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1180 1166                      (uint16_t *)(sc->sc_io_addr +
1181 1167                      VIRTIO_CONFIG_CONFIG_VECTOR), i);
1182 1168  
1183 1169                  check = ddi_get16(sc->sc_ioh,
1184 1170                      /* LINTED E_BAD_PTR_CAST_ALIGN */
1185 1171                      (uint16_t *)(sc->sc_io_addr +
1186 1172                      VIRTIO_CONFIG_CONFIG_VECTOR));
1187 1173                  if (check != i) {
↓ open down ↓ 24 lines elided ↑ open up ↑
1212 1198          /* LINTED E_BAD_PTR_CAST_ALIGN */
1213 1199          ddi_put16(sc->sc_ioh, (uint16_t *)(sc->sc_io_addr +
1214 1200              VIRTIO_CONFIG_CONFIG_VECTOR), VIRTIO_MSI_NO_VECTOR);
1215 1201  
1216 1202          ret = DDI_FAILURE;
1217 1203  
1218 1204  out_enable:
1219 1205          return (ret);
1220 1206  }
1221 1207  
1222      -static int virtio_enable_intx(struct virtio_softc *sc)
     1208 +static int
     1209 +virtio_enable_intx(struct virtio_softc *sc)
1223 1210  {
1224 1211          int ret;
1225 1212  
1226 1213          ret = ddi_intr_enable(sc->sc_intr_htable[0]);
1227      -        if (ret != DDI_SUCCESS)
     1214 +        if (ret != DDI_SUCCESS) {
1228 1215                  dev_err(sc->sc_dev, CE_WARN,
1229 1216                      "Failed to enable interrupt: %d", ret);
     1217 +        }
     1218 +
1230 1219          return (ret);
1231 1220  }
1232 1221  
1233 1222  /*
1234 1223   * We can't enable/disable individual handlers in the INTx case so do
1235 1224   * the whole bunch even in the msi case.
1236 1225   */
1237 1226  int
1238 1227  virtio_enable_ints(struct virtio_softc *sc)
1239 1228  {
↓ open down ↓ 35 lines elided ↑ open up ↑
1275 1264                      VIRTIO_MSI_NO_VECTOR);
1276 1265  
1277 1266          }
1278 1267  
1279 1268          /* Disable the iterrupts. Either the whole block, or one by one. */
1280 1269          if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) {
1281 1270                  ret = ddi_intr_block_disable(sc->sc_intr_htable,
1282 1271                      sc->sc_intr_num);
1283 1272                  if (ret != DDI_SUCCESS) {
1284 1273                          dev_err(sc->sc_dev, CE_WARN,
1285      -                            "Failed to disable MSIs, won't be able to"
     1274 +                            "Failed to disable MSIs, won't be able to "
1286 1275                              "reuse next time");
1287 1276                  }
1288 1277          } else {
1289 1278                  for (i = 0; i < sc->sc_intr_num; i++) {
1290 1279                          ret = ddi_intr_disable(sc->sc_intr_htable[i]);
1291 1280                          if (ret != DDI_SUCCESS) {
1292 1281                                  dev_err(sc->sc_dev, CE_WARN,
1293 1282                                      "Failed to disable interrupt %d, "
1294 1283                                      "won't be able to reuse", i);
1295      -
1296 1284                          }
1297 1285                  }
1298 1286          }
1299 1287  
1300 1288  
1301 1289          for (i = 0; i < sc->sc_intr_num; i++) {
1302 1290                  (void) ddi_intr_remove_handler(sc->sc_intr_htable[i]);
1303 1291          }
1304 1292  
1305 1293          for (i = 0; i < sc->sc_intr_num; i++)
1306 1294                  (void) ddi_intr_free(sc->sc_intr_htable[i]);
1307 1295  
1308      -        kmem_free(sc->sc_intr_htable,
1309      -            sizeof (ddi_intr_handle_t) * sc->sc_intr_num);
     1296 +        kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) *
     1297 +            sc->sc_intr_num);
1310 1298  
1311      -
1312 1299          /* After disabling interrupts, the config offset is non-MSI. */
1313 1300          sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI;
1314 1301  }
1315 1302  
1316 1303  /*
1317 1304   * Module linkage information for the kernel.
1318 1305   */
1319 1306  static struct modlmisc modlmisc = {
1320      -        &mod_miscops, /* Type of module */
     1307 +        &mod_miscops,   /* Type of module */
1321 1308          "VirtIO common library module",
1322 1309  };
1323 1310  
1324 1311  static struct modlinkage modlinkage = {
1325 1312          MODREV_1,
1326 1313          {
1327 1314                  (void *)&modlmisc,
1328 1315                  NULL
1329 1316          }
1330 1317  };
↓ open down ↓ 18 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX