646 TAVOR_UAR_DOORBELL(state, (uint64_t *)&state->ts_uar->eq,
647 doorbell);
648 }
649
650 /*
651 * tavor_eq_poll
652 * Context: Only called from interrupt context (and during panic)
653 */
654 static void
655 tavor_eq_poll(tavor_state_t *state, tavor_eqhdl_t eq)
656 {
657 uint64_t *clr_ecr;
658 tavor_hw_eqe_t *eqe;
659 uint64_t ecr_mask;
660 uint32_t cons_indx, wrap_around_mask;
661 int (*eqfunction)(tavor_state_t *state, tavor_eqhdl_t eq,
662 tavor_hw_eqe_t *eqe);
663
664 TAVOR_TNF_ENTER(tavor_eq_poll);
665
666 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*eq))
667
668 /* Find the pointer to the clr_ECR register */
669 clr_ecr = state->ts_cmd_regs.clr_ecr;
670
671 /*
672 * Check for Local Catastrophic Error If we have this kind of error,
673 * then we don't need to do anything else here, as this kind of
674 * catastrophic error is handled separately. So we call the
675 * catastrophic handler, clear the ECR and then return.
676 */
677 if (eq->eq_evttypemask == TAVOR_EVT_MSK_LOCAL_CAT_ERROR) {
678 /*
679 * Call Catastrophic Error handler
680 */
681 tavor_eq_catastrophic(state);
682
683 /*
684 * Clear the ECR. Specifically, clear the bit corresponding
685 * to the event queue just processed.
686 */
687 ecr_mask = ((uint64_t)1 << eq->eq_eqnum);
688 ddi_put64(state->ts_reg_cmdhdl, clr_ecr, ecr_mask);
689
690 TAVOR_TNF_EXIT(tavor_eq_poll);
691 return;
692 }
693
694 /* Get the consumer pointer index */
695 cons_indx = eq->eq_consindx;
696
697 /*
698 * Calculate the wrap around mask. Note: This operation only works
699 * because all Tavor event queues have power-of-2 sizes
700 */
701 wrap_around_mask = (eq->eq_bufsz - 1);
702
703 /* Calculate the pointer to the first EQ entry */
704 eqe = &eq->eq_buf[cons_indx];
705 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*eqe))
706
707 /*
708 * Sync the current EQE to read
709 * We need to force a ddi_dma_sync() here (independent of how the
710 * EQ was mapped) because it is possible for us to receive the
711 * interrupt, do a read of the ECR, and have each of these
712 * operations complete successfully even though the hardware's DMA
713 * to the EQ has not yet completed.
714 */
715 tavor_eqe_sync(eq, eqe, DDI_DMA_SYNC_FORCPU, TAVOR_EQ_SYNC_FORCE);
716
717 /*
718 * Pull the handler function for this EQ from the Tavor Event Queue
719 * handle
720 */
721 eqfunction = eq->eq_func;
722
723 /*
724 * Keep pulling entries from the EQ until we find an entry owner by
725 * the hardware. As long as there the EQE's owned by SW, process
995 * SPIN in the HCR since the event queues are not setup yet, and we
996 * cannot NOSPIN at this point in time.
997 */
998 mr_attr.mr_vaddr = (uint64_t)(uintptr_t)buf;
999 mr_attr.mr_len = eq->eq_eqinfo.qa_size;
1000 mr_attr.mr_as = NULL;
1001 mr_attr.mr_flags = IBT_MR_NOSLEEP | IBT_MR_ENABLE_LOCAL_WRITE;
1002 dma_xfer_mode = state->ts_cfg_profile->cp_streaming_consistent;
1003 if (dma_xfer_mode == DDI_DMA_STREAMING) {
1004 mr_attr.mr_flags |= IBT_MR_NONCOHERENT;
1005 }
1006 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1007 op.mro_bind_dmahdl = eq->eq_eqinfo.qa_dmahdl;
1008 op.mro_bind_override_addr = 0;
1009 status = tavor_mr_register(state, pd, &mr_attr, &mr, &op);
1010 if (status != DDI_SUCCESS) {
1011 /* Set "status" and "errormsg" and goto failure */
1012 TAVOR_TNF_FAIL(DDI_FAILURE, "failed register mr");
1013 goto eqalloc_fail4;
1014 }
1015 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
1016 addr = mr->mr_bindinfo.bi_addr;
1017 lkey = mr->mr_lkey;
1018
1019 /* Determine if later ddi_dma_sync will be necessary */
1020 eq->eq_sync = TAVOR_EQ_IS_SYNC_REQ(state, eq->eq_eqinfo);
1021
1022 /* Sync entire EQ for use by the hardware (if necessary) */
1023 if (eq->eq_sync) {
1024 (void) ddi_dma_sync(mr->mr_bindinfo.bi_dmahdl, 0,
1025 eq->eq_eqinfo.qa_size, DDI_DMA_SYNC_FORDEV);
1026 }
1027
1028 /*
1029 * Fill in the EQC entry. This is the final step before passing
1030 * ownership of the EQC entry to the Tavor hardware. We use all of
1031 * the information collected/calculated above to fill in the
1032 * requisite portions of the EQC. Note: We create all EQs in the
1033 * "fired" state. We will arm them later (after our interrupt
1034 * routine had been registered.)
1035 */
|
646 TAVOR_UAR_DOORBELL(state, (uint64_t *)&state->ts_uar->eq,
647 doorbell);
648 }
649
650 /*
651 * tavor_eq_poll
652 * Context: Only called from interrupt context (and during panic)
653 */
654 static void
655 tavor_eq_poll(tavor_state_t *state, tavor_eqhdl_t eq)
656 {
657 uint64_t *clr_ecr;
658 tavor_hw_eqe_t *eqe;
659 uint64_t ecr_mask;
660 uint32_t cons_indx, wrap_around_mask;
661 int (*eqfunction)(tavor_state_t *state, tavor_eqhdl_t eq,
662 tavor_hw_eqe_t *eqe);
663
664 TAVOR_TNF_ENTER(tavor_eq_poll);
665
666 /* Find the pointer to the clr_ECR register */
667 clr_ecr = state->ts_cmd_regs.clr_ecr;
668
669 /*
670 * Check for Local Catastrophic Error If we have this kind of error,
671 * then we don't need to do anything else here, as this kind of
672 * catastrophic error is handled separately. So we call the
673 * catastrophic handler, clear the ECR and then return.
674 */
675 if (eq->eq_evttypemask == TAVOR_EVT_MSK_LOCAL_CAT_ERROR) {
676 /*
677 * Call Catastrophic Error handler
678 */
679 tavor_eq_catastrophic(state);
680
681 /*
682 * Clear the ECR. Specifically, clear the bit corresponding
683 * to the event queue just processed.
684 */
685 ecr_mask = ((uint64_t)1 << eq->eq_eqnum);
686 ddi_put64(state->ts_reg_cmdhdl, clr_ecr, ecr_mask);
687
688 TAVOR_TNF_EXIT(tavor_eq_poll);
689 return;
690 }
691
692 /* Get the consumer pointer index */
693 cons_indx = eq->eq_consindx;
694
695 /*
696 * Calculate the wrap around mask. Note: This operation only works
697 * because all Tavor event queues have power-of-2 sizes
698 */
699 wrap_around_mask = (eq->eq_bufsz - 1);
700
701 /* Calculate the pointer to the first EQ entry */
702 eqe = &eq->eq_buf[cons_indx];
703
704 /*
705 * Sync the current EQE to read
706 * We need to force a ddi_dma_sync() here (independent of how the
707 * EQ was mapped) because it is possible for us to receive the
708 * interrupt, do a read of the ECR, and have each of these
709 * operations complete successfully even though the hardware's DMA
710 * to the EQ has not yet completed.
711 */
712 tavor_eqe_sync(eq, eqe, DDI_DMA_SYNC_FORCPU, TAVOR_EQ_SYNC_FORCE);
713
714 /*
715 * Pull the handler function for this EQ from the Tavor Event Queue
716 * handle
717 */
718 eqfunction = eq->eq_func;
719
720 /*
721 * Keep pulling entries from the EQ until we find an entry owner by
722 * the hardware. As long as there the EQE's owned by SW, process
992 * SPIN in the HCR since the event queues are not setup yet, and we
993 * cannot NOSPIN at this point in time.
994 */
995 mr_attr.mr_vaddr = (uint64_t)(uintptr_t)buf;
996 mr_attr.mr_len = eq->eq_eqinfo.qa_size;
997 mr_attr.mr_as = NULL;
998 mr_attr.mr_flags = IBT_MR_NOSLEEP | IBT_MR_ENABLE_LOCAL_WRITE;
999 dma_xfer_mode = state->ts_cfg_profile->cp_streaming_consistent;
1000 if (dma_xfer_mode == DDI_DMA_STREAMING) {
1001 mr_attr.mr_flags |= IBT_MR_NONCOHERENT;
1002 }
1003 op.mro_bind_type = state->ts_cfg_profile->cp_iommu_bypass;
1004 op.mro_bind_dmahdl = eq->eq_eqinfo.qa_dmahdl;
1005 op.mro_bind_override_addr = 0;
1006 status = tavor_mr_register(state, pd, &mr_attr, &mr, &op);
1007 if (status != DDI_SUCCESS) {
1008 /* Set "status" and "errormsg" and goto failure */
1009 TAVOR_TNF_FAIL(DDI_FAILURE, "failed register mr");
1010 goto eqalloc_fail4;
1011 }
1012 addr = mr->mr_bindinfo.bi_addr;
1013 lkey = mr->mr_lkey;
1014
1015 /* Determine if later ddi_dma_sync will be necessary */
1016 eq->eq_sync = TAVOR_EQ_IS_SYNC_REQ(state, eq->eq_eqinfo);
1017
1018 /* Sync entire EQ for use by the hardware (if necessary) */
1019 if (eq->eq_sync) {
1020 (void) ddi_dma_sync(mr->mr_bindinfo.bi_dmahdl, 0,
1021 eq->eq_eqinfo.qa_size, DDI_DMA_SYNC_FORDEV);
1022 }
1023
1024 /*
1025 * Fill in the EQC entry. This is the final step before passing
1026 * ownership of the EQC entry to the Tavor hardware. We use all of
1027 * the information collected/calculated above to fill in the
1028 * requisite portions of the EQC. Note: We create all EQs in the
1029 * "fired" state. We will arm them later (after our interrupt
1030 * routine had been registered.)
1031 */
|