Print this page
3644 Add virtio-net support into the Illumos
Reviewed by: Alexey Zaytsev, alexey.zaytsev@gmail.com
Reviewed by: Yuri Pankov, yuri.pankov@nexenta.com
Reviewed by: David Hoppner, 0xffea@gmail.com

*** 69,78 **** --- 69,79 ---- #include <sys/sysmacros.h> #include <sys/pci.h> #include "virtiovar.h" #include "virtioreg.h" + #define NDEVNAMES (sizeof (virtio_device_name) / sizeof (char *)) #define MINSEG_INDIRECT 2 /* use indirect if nsegs >= this value */ #define VIRTQUEUE_ALIGN(n) (((n)+(VIRTIO_PAGE_SIZE-1)) & \ ~(VIRTIO_PAGE_SIZE-1))
*** 79,96 **** void virtio_set_status(struct virtio_softc *sc, unsigned int status) { int old = 0; ! if (status != 0) ! old = ddi_get8(sc->sc_ioh, ! (uint8_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_STATUS)); ! ddi_put8(sc->sc_ioh, ! (uint8_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_STATUS), ! status | old); } /* * Negotiate features, save the result in sc->sc_features */ --- 80,96 ---- void virtio_set_status(struct virtio_softc *sc, unsigned int status) { int old = 0; ! if (status != 0) { ! old = ddi_get8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_STATUS)); + } ! ddi_put8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr + ! VIRTIO_CONFIG_DEVICE_STATUS), status | old); } /* * Negotiate features, save the result in sc->sc_features */
*** 102,113 **** host_features = ddi_get32(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES)); ! dev_debug(sc->sc_dev, CE_NOTE, ! "host features: %x, guest features: %x", host_features, guest_features); features = host_features & guest_features; ddi_put32(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ --- 102,112 ---- host_features = ddi_get32(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint32_t *)(sc->sc_io_addr + VIRTIO_CONFIG_DEVICE_FEATURES)); ! dev_debug(sc->sc_dev, CE_NOTE, "host features: %x, guest features: %x", host_features, guest_features); features = host_features & guest_features; ddi_put32(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */
*** 118,129 **** return (host_features); } size_t ! virtio_show_features(uint32_t features, ! char *buf, size_t len) { char *orig_buf = buf; char *bufend = buf + len; /* LINTED E_PTRDIFF_OVERFLOW */ --- 117,127 ---- return (host_features); } size_t ! virtio_show_features(uint32_t features, char *buf, size_t len) { char *orig_buf = buf; char *bufend = buf + len; /* LINTED E_PTRDIFF_OVERFLOW */
*** 192,232 **** (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index)); return (r); } void ! virtio_write_device_config_1(struct virtio_softc *sc, ! unsigned int index, uint8_t value) { ASSERT(sc->sc_config_offset); ddi_put8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value); } void ! virtio_write_device_config_2(struct virtio_softc *sc, ! unsigned int index, uint16_t value) { ASSERT(sc->sc_config_offset); ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value); } void ! virtio_write_device_config_4(struct virtio_softc *sc, ! unsigned int index, uint32_t value) { ASSERT(sc->sc_config_offset); ddi_put32(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value); } void ! virtio_write_device_config_8(struct virtio_softc *sc, ! unsigned int index, uint64_t value) { ASSERT(sc->sc_config_offset); ddi_put32(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), --- 190,230 ---- (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index)); return (r); } void ! virtio_write_device_config_1(struct virtio_softc *sc, unsigned int index, ! uint8_t value) { ASSERT(sc->sc_config_offset); ddi_put8(sc->sc_ioh, (uint8_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value); } void ! virtio_write_device_config_2(struct virtio_softc *sc, unsigned int index, ! uint16_t value) { ASSERT(sc->sc_config_offset); ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value); } void ! virtio_write_device_config_4(struct virtio_softc *sc, unsigned int index, ! uint32_t value) { ASSERT(sc->sc_config_offset); ddi_put32(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index), value); } void ! virtio_write_device_config_8(struct virtio_softc *sc, unsigned int index, ! uint64_t value) { ASSERT(sc->sc_config_offset); ddi_put32(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint32_t *)(sc->sc_io_addr + sc->sc_config_offset + index),
*** 253,267 **** } static ddi_dma_attr_t virtio_vq_dma_attr = { DMA_ATTR_V0, /* Version number */ 0, /* low address */ ! /* ! * high address. Has to fit into 32 bits ! * after page-shifting ! */ ! 0x00000FFFFFFFFFFF, 0xFFFFFFFF, /* counter register max */ VIRTIO_PAGE_SIZE, /* page alignment required */ 0x3F, /* burst sizes: 1 - 32 */ 0x1, /* minimum transfer size */ 0xFFFFFFFF, /* max transfer size */ --- 251,262 ---- } static ddi_dma_attr_t virtio_vq_dma_attr = { DMA_ATTR_V0, /* Version number */ 0, /* low address */ ! 0x00000FFFFFFFFFFF, /* high address. Has to fit into 32 bits */ ! /* after page-shifting */ 0xFFFFFFFF, /* counter register max */ VIRTIO_PAGE_SIZE, /* page alignment required */ 0x3F, /* burst sizes: 1 - 32 */ 0x1, /* minimum transfer size */ 0xFFFFFFFF, /* max transfer size */
*** 321,358 **** ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr, DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, ! "Failed to allocate dma handle for indirect descriptors," ! " entry %d, vq %d", entry->qe_index, entry->qe_queue->vq_index); goto out_alloc_handle; } ! ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle, ! allocsize, &virtio_vq_devattr, ! DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, (caddr_t *)&entry->qe_indirect_descs, &len, &entry->qe_indirect_dma_acch); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, ! "Failed to alocate dma memory for indirect descriptors," ! " entry %d, vq %d,", entry->qe_index, entry->qe_queue->vq_index); goto out_alloc; } (void) memset(entry->qe_indirect_descs, 0xff, allocsize); ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL, (caddr_t)entry->qe_indirect_descs, len, ! DDI_DMA_RDWR | DDI_DMA_CONSISTENT, ! DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_cookie, &ncookies); if (ret != DDI_DMA_MAPPED) { dev_err(sc->sc_dev, CE_WARN, ! "Failed to bind dma memory for indirect descriptors," "entry %d, vq %d", entry->qe_index, entry->qe_queue->vq_index); goto out_bind; } --- 316,352 ---- ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_indirect_dma_attr, DDI_DMA_SLEEP, NULL, &entry->qe_indirect_dma_handle); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, ! "Failed to allocate dma handle for indirect descriptors, " ! "entry %d, vq %d", entry->qe_index, entry->qe_queue->vq_index); goto out_alloc_handle; } ! ret = ddi_dma_mem_alloc(entry->qe_indirect_dma_handle, allocsize, ! &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, (caddr_t *)&entry->qe_indirect_descs, &len, &entry->qe_indirect_dma_acch); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, ! "Failed to allocate dma memory for indirect descriptors, " ! "entry %d, vq %d,", entry->qe_index, entry->qe_queue->vq_index); goto out_alloc; } (void) memset(entry->qe_indirect_descs, 0xff, allocsize); ret = ddi_dma_addr_bind_handle(entry->qe_indirect_dma_handle, NULL, (caddr_t)entry->qe_indirect_descs, len, ! DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, ! &entry->qe_indirect_dma_cookie, &ncookies); if (ret != DDI_DMA_MAPPED) { dev_err(sc->sc_dev, CE_WARN, ! "Failed to bind dma memory for indirect descriptors, " "entry %d, vq %d", entry->qe_index, entry->qe_queue->vq_index); goto out_bind; }
*** 397,412 **** if (ret) goto out_indirect; } } ! mutex_init(&vq->vq_freelist_lock, "virtio-freelist", ! MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio)); ! mutex_init(&vq->vq_avail_lock, "virtio-avail", ! MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio)); ! mutex_init(&vq->vq_used_lock, "virtio-used", ! MUTEX_DRIVER, DDI_INTR_PRI(sc->sc_intr_prio)); return (0); out_indirect: for (i = 0; i < vq_size; i++) { --- 391,406 ---- if (ret) goto out_indirect; } } ! mutex_init(&vq->vq_freelist_lock, "virtio-freelist", MUTEX_DRIVER, ! DDI_INTR_PRI(sc->sc_intr_prio)); ! mutex_init(&vq->vq_avail_lock, "virtio-avail", MUTEX_DRIVER, ! DDI_INTR_PRI(sc->sc_intr_prio)); ! mutex_init(&vq->vq_used_lock, "virtio-used", MUTEX_DRIVER, ! DDI_INTR_PRI(sc->sc_intr_prio)); return (0); out_indirect: for (i = 0; i < vq_size; i++) {
*** 416,444 **** } return (ret); } - - /* * Allocate/free a vq. */ struct virtqueue * ! virtio_alloc_vq(struct virtio_softc *sc, ! unsigned int index, ! unsigned int size, ! unsigned int indirect_num, ! const char *name) { int vq_size, allocsize1, allocsize2, allocsize = 0; int ret; unsigned int ncookies; size_t len; struct virtqueue *vq; - ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index); vq_size = ddi_get16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ --- 410,432 ---- } return (ret); } /* * Allocate/free a vq. */ struct virtqueue * ! virtio_alloc_vq(struct virtio_softc *sc, unsigned int index, unsigned int size, ! unsigned int indirect_num, const char *name) { int vq_size, allocsize1, allocsize2, allocsize = 0; int ret; unsigned int ncookies; size_t len; struct virtqueue *vq; ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), index); vq_size = ddi_get16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */
*** 455,469 **** if (size) vq_size = MIN(vq_size, size); /* allocsize1: descriptor table + avail ring + pad */ allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size + ! sizeof (struct vring_avail) + ! sizeof (uint16_t) * vq_size); /* allocsize2: used ring + pad */ ! allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) ! + sizeof (struct vring_used_elem) * vq_size); allocsize = allocsize1 + allocsize2; ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr, DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle); --- 443,456 ---- if (size) vq_size = MIN(vq_size, size); /* allocsize1: descriptor table + avail ring + pad */ allocsize1 = VIRTQUEUE_ALIGN(sizeof (struct vring_desc) * vq_size + ! sizeof (struct vring_avail) + sizeof (uint16_t) * vq_size); /* allocsize2: used ring + pad */ ! allocsize2 = VIRTQUEUE_ALIGN(sizeof (struct vring_used) + ! sizeof (struct vring_used_elem) * vq_size); allocsize = allocsize1 + allocsize2; ret = ddi_dma_alloc_handle(sc->sc_dev, &virtio_vq_dma_attr, DDI_DMA_SLEEP, NULL, &vq->vq_dma_handle);
*** 476,493 **** ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize, &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, ! "Failed to alocate dma memory for vq %d", index); goto out_alloc; } - ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL, ! (caddr_t)vq->vq_vaddr, len, ! DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies); if (ret != DDI_DMA_MAPPED) { dev_err(sc->sc_dev, CE_WARN, "Failed to bind dma memory for vq %d", index); goto out_bind; --- 463,478 ---- ret = ddi_dma_mem_alloc(vq->vq_dma_handle, allocsize, &virtio_vq_devattr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, (caddr_t *)&vq->vq_vaddr, &len, &vq->vq_dma_acch); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, ! "Failed to allocate dma memory for vq %d", index); goto out_alloc; } ret = ddi_dma_addr_bind_handle(vq->vq_dma_handle, NULL, ! (caddr_t)vq->vq_vaddr, len, DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &vq->vq_dma_cookie, &ncookies); if (ret != DDI_DMA_MAPPED) { dev_err(sc->sc_dev, CE_WARN, "Failed to bind dma memory for vq %d", index); goto out_bind;
*** 530,540 **** ret = virtio_init_vq(sc, vq); if (ret) goto out_init; dev_debug(sc->sc_dev, CE_NOTE, ! "Allocated %d entries for vq %d:%s (%d incdirect descs)", vq_size, index, name, indirect_num * vq_size); return (vq); out_init: --- 515,525 ---- ret = virtio_init_vq(sc, vq); if (ret) goto out_init; dev_debug(sc->sc_dev, CE_NOTE, ! "Allocated %d entries for vq %d:%s (%d indirect descs)", vq_size, index, name, indirect_num * vq_size); return (vq); out_init:
*** 548,558 **** kmem_free(vq, sizeof (struct virtqueue)); out: return (NULL); } - void virtio_free_vq(struct virtqueue *vq) { struct virtio_softc *sc = vq->vq_owner; int i; --- 533,542 ----
*** 646,666 **** desc->flags = 0; /* 'write' - from the driver's point of view */ if (!write) desc->flags = VRING_DESC_F_WRITE; - - } void virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len, boolean_t write) { virtio_ve_set_desc(qe->qe_desc, paddr, len, write); } void virtio_ve_add_indirect_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len, boolean_t write) { struct vring_desc *indirect_desc; --- 630,654 ---- desc->flags = 0; /* 'write' - from the driver's point of view */ if (!write) desc->flags = VRING_DESC_F_WRITE; } void virtio_ve_set(struct vq_entry *qe, uint64_t paddr, uint32_t len, boolean_t write) { virtio_ve_set_desc(qe->qe_desc, paddr, len, write); } + unsigned int + virtio_ve_indirect_available(struct vq_entry *qe) + { + return (qe->qe_queue->vq_indirect_num - (qe->qe_indirect_next - 1)); + } + void virtio_ve_add_indirect_buf(struct vq_entry *qe, uint64_t paddr, uint32_t len, boolean_t write) { struct vring_desc *indirect_desc;
*** 700,715 **** membar_producer(); /* Make sure we see the flags update */ membar_consumer(); ! if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) ddi_put16(vsc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(vsc->sc_io_addr + VIRTIO_CONFIG_QUEUE_NOTIFY), vq->vq_index); } void virtio_push_chain(struct vq_entry *qe, boolean_t sync) { --- 688,704 ---- membar_producer(); /* Make sure we see the flags update */ membar_consumer(); ! if (!(vq->vq_used->flags & VRING_USED_F_NO_NOTIFY)) { ddi_put16(vsc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(vsc->sc_io_addr + VIRTIO_CONFIG_QUEUE_NOTIFY), vq->vq_index); + } } void virtio_push_chain(struct vq_entry *qe, boolean_t sync) {
*** 771,781 **** virtio_sync_vq(vq); mutex_exit(&vq->vq_avail_lock); } ! /* Get a chain of descriptors from the used ring, if one is available. */ struct vq_entry * virtio_pull_chain(struct virtqueue *vq, uint32_t *len) { struct vq_entry *head; int slot; --- 760,772 ---- virtio_sync_vq(vq); mutex_exit(&vq->vq_avail_lock); } ! /* ! * Get a chain of descriptors from the used ring, if one is available. ! */ struct vq_entry * virtio_pull_chain(struct virtqueue *vq, uint32_t *len) { struct vq_entry *head; int slot;
*** 817,827 **** do { ASSERT(qe->qe_queue == vq); tmp = qe->qe_next; vq_free_entry(vq, qe); qe = tmp; ! } while (tmp); } void virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second) { --- 808,818 ---- do { ASSERT(qe->qe_queue == vq); tmp = qe->qe_next; vq_free_entry(vq, qe); qe = tmp; ! } while (tmp != NULL); } void virtio_ventry_stick(struct vq_entry *first, struct vq_entry *second) {
*** 829,840 **** } static int virtio_register_msi(struct virtio_softc *sc, struct virtio_int_handler *config_handler, ! struct virtio_int_handler vq_handlers[], ! int intr_types) { int count, actual; int int_type; int i; int handler_count; --- 820,830 ---- } static int virtio_register_msi(struct virtio_softc *sc, struct virtio_int_handler *config_handler, ! struct virtio_int_handler vq_handlers[], int intr_types) { int count, actual; int int_type; int i; int handler_count;
*** 850,860 **** vq_handlers && vq_handlers[handler_count].vh_func; handler_count++) ; /* +1 if there is a config change handler. */ ! if (config_handler) handler_count++; /* Number of MSIs supported by the device. */ ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count); if (ret != DDI_SUCCESS) { --- 840,850 ---- vq_handlers && vq_handlers[handler_count].vh_func; handler_count++) ; /* +1 if there is a config change handler. */ ! if (config_handler != NULL) handler_count++; /* Number of MSIs supported by the device. */ ret = ddi_intr_get_nintrs(sc->sc_dev, int_type, &count); if (ret != DDI_SUCCESS) {
*** 866,877 **** * Those who try to register more handlers then the device * supports shall suffer. */ ASSERT(handler_count <= count); ! sc->sc_intr_htable = kmem_zalloc( ! sizeof (ddi_intr_handle_t) * handler_count, KM_SLEEP); ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0, handler_count, &actual, DDI_INTR_ALLOC_NORMAL); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret); --- 856,867 ---- * Those who try to register more handlers then the device * supports shall suffer. */ ASSERT(handler_count <= count); ! sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t) * ! handler_count, KM_SLEEP); ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, int_type, 0, handler_count, &actual, DDI_INTR_ALLOC_NORMAL); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to allocate MSI: %d", ret);
*** 885,895 **** goto out_msi_available; } sc->sc_intr_num = handler_count; sc->sc_intr_config = B_FALSE; ! if (config_handler) { sc->sc_intr_config = B_TRUE; } /* Assume they are all same priority */ ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio); --- 875,885 ---- goto out_msi_available; } sc->sc_intr_num = handler_count; sc->sc_intr_config = B_FALSE; ! if (config_handler != NULL) { sc->sc_intr_config = B_TRUE; } /* Assume they are all same priority */ ret = ddi_intr_get_pri(sc->sc_intr_htable[0], &sc->sc_intr_prio);
*** 899,910 **** } /* Add the vq handlers */ for (i = 0; vq_handlers[i].vh_func; i++) { ret = ddi_intr_add_handler(sc->sc_intr_htable[i], ! vq_handlers[i].vh_func, ! sc, vq_handlers[i].vh_priv); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed"); /* Remove the handlers that succeeded. */ while (--i >= 0) { --- 889,899 ---- } /* Add the vq handlers */ for (i = 0; vq_handlers[i].vh_func; i++) { ret = ddi_intr_add_handler(sc->sc_intr_htable[i], ! vq_handlers[i].vh_func, sc, vq_handlers[i].vh_priv); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed"); /* Remove the handlers that succeeded. */ while (--i >= 0) {
*** 914,927 **** goto out_add_handlers; } } /* Don't forget the config handler */ ! if (config_handler) { ret = ddi_intr_add_handler(sc->sc_intr_htable[i], ! config_handler->vh_func, ! sc, config_handler->vh_priv); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed"); /* Remove the handlers that succeeded. */ while (--i >= 0) { --- 903,915 ---- goto out_add_handlers; } } /* Don't forget the config handler */ ! if (config_handler != NULL) { ret = ddi_intr_add_handler(sc->sc_intr_htable[i], ! config_handler->vh_func, sc, config_handler->vh_priv); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "ddi_intr_add_handler failed"); /* Remove the handlers that succeeded. */ while (--i >= 0) {
*** 933,944 **** } /* We know we are using MSI, so set the config offset. */ sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI; ! ret = ddi_intr_get_cap(sc->sc_intr_htable[0], ! &sc->sc_intr_cap); /* Just in case. */ if (ret != DDI_SUCCESS) sc->sc_intr_cap = 0; out_add_handlers: --- 921,931 ---- } /* We know we are using MSI, so set the config offset. */ sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_MSI; ! ret = ddi_intr_get_cap(sc->sc_intr_htable[0], &sc->sc_intr_cap); /* Just in case. */ if (ret != DDI_SUCCESS) sc->sc_intr_cap = 0; out_add_handlers:
*** 1006,1037 **** for (vq_handler_count = 0; vq_handlers && vq_handlers[vq_handler_count].vh_func; vq_handler_count++) ; ! if (config_handler) config_handler_count = 1; vhc = kmem_zalloc(sizeof (struct virtio_handler_container) + ! sizeof (struct virtio_int_handler) * vq_handler_count, ! KM_SLEEP); vhc->nhandlers = vq_handler_count; (void) memcpy(vhc->vq_handlers, vq_handlers, sizeof (struct virtio_int_handler) * vq_handler_count); ! if (config_handler) { (void) memcpy(&vhc->config_handler, config_handler, sizeof (struct virtio_int_handler)); } /* Just a single entry for a single interrupt. */ sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP); ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, ! DDI_INTR_TYPE_FIXED, 0, 1, &actual, ! DDI_INTR_ALLOC_NORMAL); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to allocate a fixed interrupt: %d", ret); goto out_int_alloc; } --- 993,1022 ---- for (vq_handler_count = 0; vq_handlers && vq_handlers[vq_handler_count].vh_func; vq_handler_count++) ; ! if (config_handler != NULL) config_handler_count = 1; vhc = kmem_zalloc(sizeof (struct virtio_handler_container) + ! sizeof (struct virtio_int_handler) * vq_handler_count, KM_SLEEP); vhc->nhandlers = vq_handler_count; (void) memcpy(vhc->vq_handlers, vq_handlers, sizeof (struct virtio_int_handler) * vq_handler_count); ! if (config_handler != NULL) { (void) memcpy(&vhc->config_handler, config_handler, sizeof (struct virtio_int_handler)); } /* Just a single entry for a single interrupt. */ sc->sc_intr_htable = kmem_zalloc(sizeof (ddi_intr_handle_t), KM_SLEEP); ret = ddi_intr_alloc(sc->sc_dev, sc->sc_intr_htable, ! DDI_INTR_TYPE_FIXED, 0, 1, &actual, DDI_INTR_ALLOC_NORMAL); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to allocate a fixed interrupt: %d", ret); goto out_int_alloc; }
*** 1109,1119 **** out_inttype: return (ret); } - static int virtio_enable_msi(struct virtio_softc *sc) { int ret, i; int vq_handler_count = sc->sc_intr_num; --- 1094,1103 ----
*** 1149,1158 **** --- 1133,1143 ---- } /* Bind the allocated MSI to the queues and config */ for (i = 0; i < vq_handler_count; i++) { int check; + ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_SELECT), i);
*** 1164,1182 **** check = ddi_get16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_VECTOR)); if (check != i) { ! dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler" "for VQ %d, MSI %d. Check = %x", i, i, check); ret = ENODEV; goto out_bind; } } if (sc->sc_intr_config) { int check; ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_CONFIG_VECTOR), i); --- 1149,1168 ---- check = ddi_get16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_QUEUE_VECTOR)); if (check != i) { ! dev_err(sc->sc_dev, CE_WARN, "Failed to bind handler " "for VQ %d, MSI %d. Check = %x", i, i, check); ret = ENODEV; goto out_bind; } } if (sc->sc_intr_config) { int check; + ddi_put16(sc->sc_ioh, /* LINTED E_BAD_PTR_CAST_ALIGN */ (uint16_t *)(sc->sc_io_addr + VIRTIO_CONFIG_CONFIG_VECTOR), i);
*** 1217,1234 **** out_enable: return (ret); } ! static int virtio_enable_intx(struct virtio_softc *sc) { int ret; ret = ddi_intr_enable(sc->sc_intr_htable[0]); ! if (ret != DDI_SUCCESS) dev_err(sc->sc_dev, CE_WARN, "Failed to enable interrupt: %d", ret); return (ret); } /* * We can't enable/disable individual handlers in the INTx case so do --- 1203,1223 ---- out_enable: return (ret); } ! static int ! virtio_enable_intx(struct virtio_softc *sc) { int ret; ret = ddi_intr_enable(sc->sc_intr_htable[0]); ! if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to enable interrupt: %d", ret); + } + return (ret); } /* * We can't enable/disable individual handlers in the INTx case so do
*** 1280,1300 **** if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) { ret = ddi_intr_block_disable(sc->sc_intr_htable, sc->sc_intr_num); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, ! "Failed to disable MSIs, won't be able to" "reuse next time"); } } else { for (i = 0; i < sc->sc_intr_num; i++) { ret = ddi_intr_disable(sc->sc_intr_htable[i]); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to disable interrupt %d, " "won't be able to reuse", i); - } } } --- 1269,1288 ---- if (sc->sc_intr_cap & DDI_INTR_FLAG_BLOCK) { ret = ddi_intr_block_disable(sc->sc_intr_htable, sc->sc_intr_num); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, ! "Failed to disable MSIs, won't be able to " "reuse next time"); } } else { for (i = 0; i < sc->sc_intr_num; i++) { ret = ddi_intr_disable(sc->sc_intr_htable[i]); if (ret != DDI_SUCCESS) { dev_err(sc->sc_dev, CE_WARN, "Failed to disable interrupt %d, " "won't be able to reuse", i); } } }
*** 1303,1316 **** } for (i = 0; i < sc->sc_intr_num; i++) (void) ddi_intr_free(sc->sc_intr_htable[i]); ! kmem_free(sc->sc_intr_htable, ! sizeof (ddi_intr_handle_t) * sc->sc_intr_num); - /* After disabling interrupts, the config offset is non-MSI. */ sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; } /* --- 1291,1303 ---- } for (i = 0; i < sc->sc_intr_num; i++) (void) ddi_intr_free(sc->sc_intr_htable[i]); ! kmem_free(sc->sc_intr_htable, sizeof (ddi_intr_handle_t) * ! sc->sc_intr_num); /* After disabling interrupts, the config offset is non-MSI. */ sc->sc_config_offset = VIRTIO_CONFIG_DEVICE_CONFIG_NOMSI; } /*