52 #include <sys/cred.h>
53 #include <sys/note.h>
54 #include <sys/ib/ibtl/ibti.h>
55 #include <sys/socket.h>
56 #include <netinet/in.h>
57 #include <daplt_if.h>
58 #include <daplt.h>
59
60 /*
61 * The following variables support the debug log buffer scheme.
62 */
63 #ifdef DEBUG
64 static char daplka_dbgbuf[0x80000];
65 #else /* DEBUG */
66 static char daplka_dbgbuf[0x4000];
67 #endif /* DEBUG */
68 static int daplka_dbgsize = sizeof (daplka_dbgbuf);
69 static size_t daplka_dbgnext;
70 static int daplka_dbginit = 0;
71 static kmutex_t daplka_dbglock;
72 _NOTE(MUTEX_PROTECTS_DATA(daplka_dbglock,
73 daplka_dbgbuf
74 daplka_dbgnext))
75
76 static int daplka_dbg = 0x0103;
77 static void daplka_console(const char *, ...);
78 static void daplka_debug(const char *, ...);
79 static int daplka_apm = 0x1; /* default enable */
80 static int daplka_failback = 0x1; /* default enable */
81 static int daplka_query_aft_setaltpath = 10;
82
83 #define DERR \
84 if (daplka_dbg & 0x100) \
85 daplka_debug
86
87 #ifdef DEBUG
88
89 #define DINFO \
90 daplka_console
91
92 #define D1 \
93 if (daplka_dbg & 0x01) \
94 daplka_debug
542 static daplka_t *daplka_dev = NULL;
543 static void *daplka_state = NULL;
544
545 /*
546 * global SP hash table
547 */
548 static daplka_hash_table_t daplka_global_sp_htbl;
549
550 /*
551 * timer_info hash table
552 */
553 static daplka_hash_table_t daplka_timer_info_htbl;
554 static uint32_t daplka_timer_hkey = 0;
555
556 /*
557 * shared MR avl tree
558 */
559 static avl_tree_t daplka_shared_mr_tree;
560 static kmutex_t daplka_shared_mr_lock;
561 static int daplka_shared_mr_cmp(const void *, const void *);
562 _NOTE(MUTEX_PROTECTS_DATA(daplka_shared_mr_lock,
563 daplka_shared_mr_tree))
564
565 /*
566 * default kmem flags used by this driver
567 */
568 static int daplka_km_flags = KM_SLEEP;
569
570 /*
571 * taskq used for handling background tasks
572 */
573 static taskq_t *daplka_taskq = NULL;
574
575 /*
576 * daplka_cm_delay is the length of time the active
577 * side needs to wait before timing out on the REP message.
578 */
579 static clock_t daplka_cm_delay = 60000000;
580
581 /*
582 * modunload will fail if pending_close is non-zero
583 */
2195 /* fill in open channel args */
2196 chan_args.oc_path = &path_info;
2197 chan_args.oc_cm_handler = daplka_cm_rc_handler;
2198 chan_args.oc_cm_clnt_private = (void *)ep_rp;
2199 chan_args.oc_rdma_ra_out = hca_attrp->hca_max_rdma_out_chan;
2200 chan_args.oc_rdma_ra_in = hca_attrp->hca_max_rdma_in_chan;
2201 chan_args.oc_path_retry_cnt = 7; /* 3-bit field */
2202 chan_args.oc_path_rnr_retry_cnt = IBT_RNR_INFINITE_RETRY;
2203
2204 ASSERT(args.epc_priv_sz > 0);
2205 priv_data = (void *)args.epc_priv;
2206
2207 chan_args.oc_priv_data_len = args.epc_priv_sz;
2208 chan_args.oc_priv_data = priv_data;
2209
2210 /*
2211 * calculate checksum value of hello message and
2212 * put hello message in networking byte order
2213 */
2214 dp = (DAPL_PRIVATE *)priv_data;
2215 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*dp))
2216 dp->hello_msg.hi_port = htons(dp->hello_msg.hi_port);
2217 dp->hello_msg.hi_checksum = 0;
2218 dp->hello_msg.hi_checksum = htons(daplka_hellomsg_cksum(dp));
2219 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*dp))
2220
2221 if (args.epc_timeout > 0) {
2222 /*
2223 * increment refcnt before passing reference to
2224 * timer_info_alloc.
2225 */
2226 DAPLKA_RS_REF(ep_rp);
2227 timerp = daplka_timer_info_alloc(ep_rp);
2228 if (timerp == NULL) {
2229 DERR("ep_connect: cannot allocate timer\n");
2230 /*
2231 * we need to remove the reference if
2232 * allocation failed.
2233 */
2234 DAPLKA_RS_UNREF(ep_rp);
2235 retval = ENOMEM;
2236 goto cleanup;
2237 }
2238 /*
2239 * We generate our own hkeys so that timer_hkey can fit
2589 ibt_cq_attr_t cq_attr;
2590 dapl_evd_create_t args;
2591 uint64_t evd_hkey = 0;
2592 boolean_t inserted = B_FALSE;
2593 int retval = 0;
2594 ibt_status_t status;
2595
2596 retval = ddi_copyin((void *)arg, &args, sizeof (dapl_evd_create_t),
2597 mode);
2598 if (retval != 0) {
2599 DERR("evd_create: copyin error %d", retval);
2600 return (EFAULT);
2601 }
2602 if ((args.evd_flags &
2603 ~(DAT_EVD_DEFAULT_FLAG | DAT_EVD_SOFTWARE_FLAG)) != 0) {
2604 DERR("evd_create: invalid flags 0x%x\n", args.evd_flags);
2605 return (EINVAL);
2606 }
2607
2608 evd_rp = kmem_zalloc(sizeof (daplka_evd_resource_t), daplka_km_flags);
2609 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*evd_rp))
2610 DAPLKA_RS_INIT(evd_rp, DAPL_TYPE_EVD,
2611 DAPLKA_RS_RNUM(ia_rp), daplka_evd_destroy);
2612
2613 mutex_init(&evd_rp->evd_lock, NULL, MUTEX_DRIVER, NULL);
2614 cv_init(&evd_rp->evd_cv, NULL, CV_DRIVER, NULL);
2615 evd_rp->evd_hca = ia_rp->ia_hca;
2616 evd_rp->evd_flags = args.evd_flags;
2617 evd_rp->evd_hca_hdl = ia_rp->ia_hca_hdl;
2618 evd_rp->evd_cookie = args.evd_cookie;
2619 evd_rp->evd_cno_res = NULL;
2620 evd_rp->evd_cr_events.eel_event_type = DAPLKA_EVD_CM_EVENTS;
2621 evd_rp->evd_conn_events.eel_event_type = DAPLKA_EVD_CM_EVENTS;
2622 evd_rp->evd_async_events.eel_event_type = DAPLKA_EVD_ASYNC_EVENTS;
2623
2624 /*
2625 * if the client specified a non-zero cno_hkey, we
2626 * lookup the cno and save the reference for later use.
2627 */
2628 if (args.evd_cno_hkey > 0) {
2629 daplka_cno_resource_t *cno_rp;
2677 if (status != IBT_SUCCESS) {
2678 DERR("evd_create: ibt_ci_data_out error(%d)", status);
2679 *rvalp = (int)status;
2680 retval = 0;
2681 goto cleanup;
2682 }
2683
2684 args.evd_cq_real_size = evd_rp->evd_cq_real_size;
2685
2686 ibt_set_cq_handler(evd_rp->evd_cq_hdl, daplka_cq_handler,
2687 (void *)evd_rp);
2688 }
2689
2690 retval = daplka_hash_insert(&ia_rp->ia_evd_htbl,
2691 &evd_hkey, (void *)evd_rp);
2692 if (retval != 0) {
2693 DERR("evd_ceate: cannot insert evd %d\n", retval);
2694 goto cleanup;
2695 }
2696 inserted = B_TRUE;
2697 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*evd_rp))
2698
2699 /*
2700 * If this evd handles async events need to add to the IA resource
2701 * async evd list
2702 */
2703 if (evd_rp->evd_flags & DAT_EVD_ASYNC_FLAG) {
2704 async_evd = kmem_zalloc(sizeof (daplka_async_evd_hkey_t),
2705 daplka_km_flags);
2706 /* add the evd to the head of the list */
2707 mutex_enter(&ia_rp->ia_lock);
2708 async_evd->aeh_evd_hkey = evd_hkey;
2709 async_evd->aeh_next = ia_rp->ia_async_evd_hkeys;
2710 ia_rp->ia_async_evd_hkeys = async_evd;
2711 mutex_exit(&ia_rp->ia_lock);
2712 }
2713
2714 args.evd_hkey = evd_hkey;
2715 retval = copyout(&args, (void *)arg, sizeof (dapl_evd_create_t));
2716 if (retval != 0) {
2717 DERR("evd_create: copyout error %d\n", retval);
3471 kmem_free(curr, sizeof (daplka_async_evd_hkey_t));
3472 }
3473
3474 /* UNREF calls the actual free function when refcnt is zero */
3475 DAPLKA_RS_UNREF(evd_rp);
3476 return (0);
3477 }
3478
3479 /*
3480 * destroys EVD resource.
3481 * called when refcnt drops to zero.
3482 */
3483 static int
3484 daplka_evd_destroy(daplka_resource_t *gen_rp)
3485 {
3486 daplka_evd_resource_t *evd_rp = (daplka_evd_resource_t *)gen_rp;
3487 ibt_status_t status;
3488 daplka_evd_event_t *evt;
3489 ibt_priv_data_len_t len;
3490
3491 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*evd_rp))
3492 D3("evd_destroy: entering, evd_rp 0x%p, rnum %d\n",
3493 evd_rp, DAPLKA_RS_RNUM(evd_rp));
3494 /*
3495 * free CQ
3496 */
3497 if (evd_rp->evd_cq_hdl) {
3498 ibt_set_cq_handler(evd_rp->evd_cq_hdl, NULL, NULL);
3499 mutex_enter(&daplka_dev->daplka_mutex);
3500 ibt_set_cq_private(evd_rp->evd_cq_hdl, NULL);
3501 mutex_exit(&daplka_dev->daplka_mutex);
3502
3503 status = daplka_ibt_free_cq(evd_rp, evd_rp->evd_cq_hdl);
3504 if (status != IBT_SUCCESS) {
3505 DERR("evd_destroy: ibt_free_cq returned %d\n", status);
3506 }
3507 evd_rp->evd_cq_hdl = NULL;
3508 D2("evd_destroy: cq freed, rnum %d\n", DAPLKA_RS_RNUM(evd_rp));
3509 }
3510
3511 /*
3696 /*
3697 * allocates a CNO.
3698 * the returned cno_hkey may subsequently be used in evd_create.
3699 */
3700 /* ARGSUSED */
3701 static int
3702 daplka_cno_alloc(daplka_ia_resource_t *ia_rp, intptr_t arg, int mode,
3703 cred_t *cred, int *rvalp)
3704 {
3705 dapl_cno_alloc_t args;
3706 daplka_cno_resource_t *cno_rp = NULL;
3707 uint64_t cno_hkey = 0;
3708 boolean_t inserted = B_FALSE;
3709 int retval = 0;
3710
3711 cno_rp = kmem_zalloc(sizeof (*cno_rp), daplka_km_flags);
3712 if (cno_rp == NULL) {
3713 DERR("cno_alloc: cannot allocate cno resource\n");
3714 return (ENOMEM);
3715 }
3716 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*cno_rp))
3717 DAPLKA_RS_INIT(cno_rp, DAPL_TYPE_CNO,
3718 DAPLKA_RS_RNUM(ia_rp), daplka_cno_destroy);
3719
3720 mutex_init(&cno_rp->cno_lock, NULL, MUTEX_DRIVER, NULL);
3721 cv_init(&cno_rp->cno_cv, NULL, CV_DRIVER, NULL);
3722 cno_rp->cno_evd_cookie = 0;
3723
3724 /* insert into cno hash table */
3725 retval = daplka_hash_insert(&ia_rp->ia_cno_htbl,
3726 &cno_hkey, (void *)cno_rp);
3727 if (retval != 0) {
3728 DERR("cno_alloc: cannot insert cno resource\n");
3729 goto cleanup;
3730 }
3731 inserted = B_TRUE;
3732 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*cno_rp))
3733
3734 /* return hkey to library */
3735 args.cno_hkey = cno_hkey;
3736
3737 retval = ddi_copyout(&args, (void *)arg, sizeof (dapl_cno_alloc_t),
3738 mode);
3739 if (retval != 0) {
3740 DERR("cno_alloc: copyout error %d\n", retval);
3741 retval = EFAULT;
3742 goto cleanup;
3743 }
3744 return (0);
3745
3746 cleanup:;
3747 if (inserted) {
3748 daplka_cno_resource_t *free_rp = NULL;
3749
3750 (void) daplka_hash_remove(&ia_rp->ia_cno_htbl, cno_hkey,
3751 (void **)&free_rp);
3752 if (free_rp != cno_rp) {
4181 DERR("cr_reject: ibt_cm_proceed returned %d\n", status);
4182 *rvalp = (int)status;
4183 retval = 0;
4184 }
4185
4186 cleanup:;
4187 if (sp_rp != NULL) {
4188 DAPLKA_RS_UNREF(sp_rp);
4189 }
4190 return (retval);
4191 }
4192
4193
4194 /*
4195 * daplka_sp_match is used by daplka_hash_walk for finding SPs
4196 */
4197 typedef struct daplka_sp_match_s {
4198 uint64_t spm_conn_qual;
4199 daplka_sp_resource_t *spm_sp_rp;
4200 } daplka_sp_match_t;
4201 _NOTE(SCHEME_PROTECTS_DATA("daplka", daplka_sp_match_s::spm_sp_rp))
4202
4203 static int
4204 daplka_sp_match(void *objp, void *arg)
4205 {
4206 daplka_sp_resource_t *sp_rp = (daplka_sp_resource_t *)objp;
4207
4208 ASSERT(DAPLKA_RS_TYPE(sp_rp) == DAPL_TYPE_SP);
4209 if (sp_rp->sp_conn_qual ==
4210 ((daplka_sp_match_t *)arg)->spm_conn_qual) {
4211 ((daplka_sp_match_t *)arg)->spm_sp_rp = sp_rp;
4212 D2("daplka_sp_match: found sp, conn_qual %016llu\n",
4213 (longlong_t)((daplka_sp_match_t *)arg)->spm_conn_qual);
4214 DAPLKA_RS_REF(sp_rp);
4215 return (1);
4216 }
4217 return (0);
4218 }
4219
4220 /*
4221 * cr_handoff allows the client to handoff a connection request from
4574 boolean_t inserted = B_FALSE;
4575 daplka_mr_resource_t *mr_rp;
4576 daplka_pd_resource_t *pd_rp;
4577 dapl_mr_register_t args;
4578 ibt_mr_data_in_t mr_cb_data_in;
4579 uint64_t mr_hkey = 0;
4580 ibt_status_t status;
4581 int retval;
4582
4583 retval = ddi_copyin((void *)arg, &args, sizeof (dapl_mr_register_t),
4584 mode);
4585 if (retval != 0) {
4586 DERR("mr_register: copyin error %d\n", retval);
4587 return (EINVAL);
4588 }
4589 mr_rp = kmem_zalloc(sizeof (daplka_mr_resource_t), daplka_km_flags);
4590 if (mr_rp == NULL) {
4591 DERR("mr_register: cannot allocate mr resource\n");
4592 return (ENOMEM);
4593 }
4594 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_rp))
4595 DAPLKA_RS_INIT(mr_rp, DAPL_TYPE_MR,
4596 DAPLKA_RS_RNUM(ia_rp), daplka_mr_destroy);
4597
4598 mutex_init(&mr_rp->mr_lock, NULL, MUTEX_DRIVER, NULL);
4599 mr_rp->mr_hca = ia_rp->ia_hca;
4600 mr_rp->mr_hca_hdl = ia_rp->ia_hca_hdl;
4601 mr_rp->mr_next = NULL;
4602 mr_rp->mr_shared_mr = NULL;
4603
4604 /* get pd handle */
4605 pd_rp = (daplka_pd_resource_t *)
4606 daplka_hash_lookup(&ia_rp->ia_pd_htbl, args.mr_pd_hkey);
4607 if (pd_rp == NULL) {
4608 DERR("mr_register: cannot find pd resource\n");
4609 retval = EINVAL;
4610 goto cleanup;
4611 }
4612 ASSERT(DAPLKA_RS_TYPE(pd_rp) == DAPL_TYPE_PD);
4613 mr_rp->mr_pd_res = pd_rp;
4614
4642 status = ibt_ci_data_in(ia_rp->ia_hca_hdl,
4643 IBT_CI_NO_FLAGS, IBT_HDL_MR, (void *)mr_rp->mr_hdl,
4644 &mr_cb_data_in, sizeof (mr_cb_data_in));
4645
4646 if (status != IBT_SUCCESS) {
4647 DERR("mr_register: ibt_ci_data_in error(%d) ver(%d)",
4648 status, mr_cb_data_in.mr_rev);
4649 *rvalp = (int)status;
4650 retval = 0;
4651 goto cleanup;
4652 }
4653
4654 /* insert into mr hash table */
4655 retval = daplka_hash_insert(&ia_rp->ia_mr_htbl,
4656 &mr_hkey, (void *)mr_rp);
4657 if (retval != 0) {
4658 DERR("mr_register: cannot insert mr resource into mr_htbl\n");
4659 goto cleanup;
4660 }
4661 inserted = B_TRUE;
4662 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*mr_rp))
4663
4664 args.mr_lkey = mr_rp->mr_desc.md_lkey;
4665 args.mr_rkey = mr_rp->mr_desc.md_rkey;
4666 args.mr_hkey = mr_hkey;
4667
4668 retval = ddi_copyout((void *)&args, (void *)arg,
4669 sizeof (dapl_mr_register_t), mode);
4670 if (retval != 0) {
4671 DERR("mr_register: copyout error %d\n", retval);
4672 retval = EFAULT;
4673 goto cleanup;
4674 }
4675 return (0);
4676
4677 cleanup:;
4678 if (inserted) {
4679 daplka_mr_resource_t *free_rp = NULL;
4680
4681 (void) daplka_hash_remove(&ia_rp->ia_mr_htbl, mr_hkey,
4682 (void **)&free_rp);
4776 } else {
4777 D2("mr_register_shared: cannot find cookie:\n"
4778 "0x%016llx%016llx%016llx%016llx%016llx\n",
4779 (longlong_t)tmp_smr.smr_cookie.mc_uint_arr[4],
4780 (longlong_t)tmp_smr.smr_cookie.mc_uint_arr[3],
4781 (longlong_t)tmp_smr.smr_cookie.mc_uint_arr[2],
4782 (longlong_t)tmp_smr.smr_cookie.mc_uint_arr[1],
4783 (longlong_t)tmp_smr.smr_cookie.mc_uint_arr[0]);
4784
4785 /*
4786 * if we cannot find smrp, we need to create and
4787 * insert one into daplka_shared_mr_tree
4788 */
4789 smrp = kmem_zalloc(sizeof (daplka_shared_mr_t),
4790 daplka_km_flags);
4791 if (smrp == NULL) {
4792 retval = ENOMEM;
4793 mutex_exit(&daplka_shared_mr_lock);
4794 goto cleanup;
4795 }
4796 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*smrp))
4797 smrp->smr_refcnt = 1;
4798 smrp->smr_cookie = args.mrs_shm_cookie;
4799 smrp->smr_state = DAPLKA_SMR_TRANSITIONING;
4800 smrp->smr_mr_list = NULL;
4801 cv_init(&smrp->smr_cv, NULL, CV_DRIVER, NULL);
4802 avl_insert(&daplka_shared_mr_tree, smrp, where);
4803 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*smrp))
4804 }
4805 mutex_exit(&daplka_shared_mr_lock);
4806
4807 mr_rp = kmem_zalloc(sizeof (daplka_mr_resource_t), daplka_km_flags);
4808 if (mr_rp == NULL) {
4809 DERR("mr_register_shared: cannot allocate mr resource\n");
4810 goto cleanup;
4811 }
4812 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_rp))
4813 DAPLKA_RS_INIT(mr_rp, DAPL_TYPE_MR,
4814 DAPLKA_RS_RNUM(ia_rp), daplka_mr_destroy);
4815
4816 mutex_init(&mr_rp->mr_lock, NULL, MUTEX_DRIVER, NULL);
4817 mr_rp->mr_hca = ia_rp->ia_hca;
4818 mr_rp->mr_hca_hdl = ia_rp->ia_hca_hdl;
4819 mr_rp->mr_next = NULL;
4820 mr_rp->mr_shared_mr = NULL;
4821
4822 /* get pd handle */
4823 pd_rp = (daplka_pd_resource_t *)
4824 daplka_hash_lookup(&ia_rp->ia_pd_htbl, args.mrs_pd_hkey);
4825 if (pd_rp == NULL) {
4826 DERR("mr_register_shared: cannot find pd resource\n");
4827 retval = EINVAL;
4828 goto cleanup;
4829 }
4830 ASSERT(DAPLKA_RS_TYPE(pd_rp) == DAPL_TYPE_PD);
4831 mr_rp->mr_pd_res = pd_rp;
4832
4906 retval = 0;
4907 goto cleanup;
4908 }
4909
4910 /*
4911 * we bump reference of mr_rp and enqueue it onto smrp.
4912 */
4913 DAPLKA_RS_REF(mr_rp);
4914 mr_rp->mr_next = smrp->smr_mr_list;
4915 smrp->smr_mr_list = mr_rp;
4916 mr_rp->mr_shared_mr = smrp;
4917
4918 /* insert into mr hash table */
4919 retval = daplka_hash_insert(&ia_rp->ia_mr_htbl,
4920 &mr_hkey, (void *)mr_rp);
4921 if (retval != 0) {
4922 DERR("mr_register_shared: cannot insert mr resource\n");
4923 goto cleanup;
4924 }
4925 inserted = B_TRUE;
4926 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*mr_rp))
4927
4928 /*
4929 * at this point, there are two references to our mr resource.
4930 * one is kept in ia_mr_htbl. the other is kept in the list
4931 * within this shared mr object (smrp). when we deregister this
4932 * mr or when a callback invalidates this mr, the reference kept
4933 * by this shared mr object will be removed.
4934 */
4935
4936 args.mrs_lkey = mr_rp->mr_desc.md_lkey;
4937 args.mrs_rkey = mr_rp->mr_desc.md_rkey;
4938 args.mrs_hkey = mr_hkey;
4939
4940 retval = ddi_copyout((void *)&args, (void *)arg,
4941 sizeof (dapl_mr_register_shared_t), mode);
4942 if (retval != 0) {
4943 DERR("mr_register_shared: copyout error %d\n", retval);
4944 retval = EFAULT;
4945 goto cleanup;
4946 }
4961 (void) daplka_hash_remove(&ia_rp->ia_mr_htbl, mr_hkey,
4962 (void **)&free_rp);
4963 if (free_rp != mr_rp) {
4964 DERR("mr_register_shared: "
4965 "cannot remove mr from hash table\n");
4966 /*
4967 * we can only get here if another thread
4968 * has completed the cleanup in mr_deregister
4969 */
4970 return (retval);
4971 }
4972 }
4973 if (smrp != NULL) {
4974 mutex_enter(&daplka_shared_mr_lock);
4975 ASSERT(smrp->smr_refcnt > 0);
4976 smrp->smr_refcnt--;
4977
4978 if (smrp->smr_refcnt == 0) {
4979 DERR("mr_register_shared: freeing smrp 0x%p\n", smrp);
4980 avl_remove(&daplka_shared_mr_tree, smrp);
4981 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*smrp))
4982 if (smrp->smr_mr_list != NULL) {
4983 /*
4984 * the refcnt is 0. if there is anything
4985 * left on the list, it must be ours.
4986 */
4987 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_rp))
4988 ASSERT(smrp->smr_mr_list == mr_rp);
4989 DAPLKA_RS_UNREF(mr_rp);
4990 smrp->smr_mr_list = NULL;
4991 ASSERT(mr_rp->mr_shared_mr == smrp);
4992 mr_rp->mr_shared_mr = NULL;
4993 ASSERT(mr_rp->mr_next == NULL);
4994 }
4995 smrp->smr_state = DAPLKA_SMR_FREED;
4996 cv_destroy(&smrp->smr_cv);
4997 kmem_free(smrp, sizeof (daplka_shared_mr_t));
4998 } else {
4999 DERR("mr_register_shared: resetting smr_state "
5000 "smrp 0x%p, %d waiters remain\n", smrp,
5001 smrp->smr_refcnt);
5002 ASSERT(smrp->smr_state == DAPLKA_SMR_TRANSITIONING);
5003 if (smrp->smr_mr_list != NULL && mr_rp != NULL) {
5004 daplka_mr_resource_t **mpp;
5005
5006 /*
5007 * search and remove mr_rp from smr_mr_list
5008 */
5009 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_rp))
5010 mpp = &smrp->smr_mr_list;
5011 while (*mpp != NULL) {
5012 if (*mpp == mr_rp) {
5013 *mpp = (*mpp)->mr_next;
5014 DAPLKA_RS_UNREF(mr_rp);
5015 ASSERT(mr_rp->mr_shared_mr ==
5016 smrp);
5017 mr_rp->mr_shared_mr = NULL;
5018 mr_rp->mr_next = NULL;
5019 break;
5020 }
5021 mpp = &(*mpp)->mr_next;
5022 }
5023 }
5024 /*
5025 * note that smr_state == READY does not necessarily
5026 * mean that smr_mr_list is non empty. for this case,
5027 * we are doing cleanup because of a failure. we set
5028 * the state to READY to allow other threads to
5029 * continue.
5061 retval = ddi_copyin((void *)arg, &args,
5062 sizeof (dapl_mr_register_lmr_t), mode);
5063 if (retval != 0) {
5064 DERR("mr_register_lmr: copyin error %d\n", retval);
5065 return (EINVAL);
5066 }
5067 orig_mr_rp = (daplka_mr_resource_t *)
5068 daplka_hash_lookup(&ia_rp->ia_mr_htbl, args.mrl_orig_hkey);
5069 if (orig_mr_rp == NULL) {
5070 DERR("mr_register_lmr: cannot find mr resource\n");
5071 return (EINVAL);
5072 }
5073 ASSERT(DAPLKA_RS_TYPE(orig_mr_rp) == DAPL_TYPE_MR);
5074
5075 mr_rp = kmem_zalloc(sizeof (daplka_mr_resource_t), daplka_km_flags);
5076 if (mr_rp == NULL) {
5077 DERR("mr_register_lmr: cannot allocate mr resource\n");
5078 retval = ENOMEM;
5079 goto cleanup;
5080 }
5081 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_rp))
5082 DAPLKA_RS_INIT(mr_rp, DAPL_TYPE_MR,
5083 DAPLKA_RS_RNUM(ia_rp), daplka_mr_destroy);
5084
5085 mutex_init(&mr_rp->mr_lock, NULL, MUTEX_DRIVER, NULL);
5086 mr_rp->mr_hca = ia_rp->ia_hca;
5087 mr_rp->mr_hca_hdl = ia_rp->ia_hca_hdl;
5088 mr_rp->mr_next = NULL;
5089 mr_rp->mr_shared_mr = NULL;
5090
5091 DAPLKA_RS_REF(orig_mr_rp->mr_pd_res);
5092 mr_rp->mr_pd_res = orig_mr_rp->mr_pd_res;
5093 mr_rp->mr_attr = orig_mr_rp->mr_attr;
5094
5095 /* Pass the IO addr that was returned while allocating the orig MR */
5096 mem_sattr.mr_vaddr = orig_mr_rp->mr_desc.md_vaddr;
5097 mem_sattr.mr_flags = args.mrl_flags | IBT_MR_NOSLEEP;
5098
5099 status = daplka_ibt_register_shared_mr(mr_rp, ia_rp->ia_hca_hdl,
5100 orig_mr_rp->mr_hdl, mr_rp->mr_pd_res->pd_hdl, &mem_sattr,
5101 &mr_rp->mr_hdl, &mr_rp->mr_desc);
5119 &mr_cb_data_in, sizeof (mr_cb_data_in));
5120
5121 if (status != IBT_SUCCESS) {
5122 DERR("mr_register_lmr: ibt_ci_data_in error(%d) ver(%d)",
5123 status, mr_cb_data_in.mr_rev);
5124 *rvalp = (int)status;
5125 retval = 0;
5126 goto cleanup;
5127 }
5128 mr_rp->mr_attr.mr_len = orig_mr_rp->mr_attr.mr_len;
5129 mr_rp->mr_attr.mr_flags = mem_sattr.mr_flags;
5130
5131 /* insert into mr hash table */
5132 retval = daplka_hash_insert(&ia_rp->ia_mr_htbl, &mr_hkey,
5133 (void *)mr_rp);
5134 if (retval != 0) {
5135 DERR("mr_register: cannot insert mr resource into mr_htbl\n");
5136 goto cleanup;
5137 }
5138 inserted = B_TRUE;
5139 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*mr_rp))
5140
5141 args.mrl_lkey = mr_rp->mr_desc.md_lkey;
5142 args.mrl_rkey = mr_rp->mr_desc.md_rkey;
5143 args.mrl_hkey = mr_hkey;
5144
5145 retval = ddi_copyout((void *)&args, (void *)arg,
5146 sizeof (dapl_mr_register_lmr_t), mode);
5147 if (retval != 0) {
5148 DERR("mr_register_lmr: copyout error %d\n", retval);
5149 retval = EFAULT;
5150 goto cleanup;
5151 }
5152 if (orig_mr_rp != NULL) {
5153 DAPLKA_RS_UNREF(orig_mr_rp);
5154 }
5155 return (0);
5156
5157 cleanup:;
5158 if (inserted) {
5159 daplka_mr_resource_t *free_rp = NULL;
5350 if (status != IBT_SUCCESS) {
5351 DERR("mr_sync: ibt_sync_mr error %d\n", status);
5352 *rvalp = (int)status;
5353 }
5354 for (i = 0; i < args.mrs_numseg; i++) {
5355 DAPLKA_RS_UNREF(mr_rp[i]);
5356 }
5357 return (0);
5358 }
5359
5360 /*
5361 * destroys a memory region.
5362 * called when refcnt drops to zero.
5363 */
5364 static int
5365 daplka_mr_destroy(daplka_resource_t *gen_rp)
5366 {
5367 daplka_mr_resource_t *mr_rp = (daplka_mr_resource_t *)gen_rp;
5368 ibt_status_t status;
5369
5370 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr_rp))
5371 ASSERT(DAPLKA_RS_REFCNT(mr_rp) == 0);
5372 ASSERT(mr_rp->mr_shared_mr == NULL);
5373 D3("mr_destroy: entering, mr_rp 0x%p, rnum %d\n",
5374 mr_rp, DAPLKA_RS_RNUM(mr_rp));
5375
5376 /*
5377 * deregister mr
5378 */
5379 if (mr_rp->mr_hdl) {
5380 status = daplka_ibt_deregister_mr(mr_rp, mr_rp->mr_hca_hdl,
5381 mr_rp->mr_hdl);
5382 if (status != IBT_SUCCESS) {
5383 DERR("mr_destroy: ibt_deregister_mr returned %d\n",
5384 status);
5385 }
5386 mr_rp->mr_hdl = NULL;
5387 D3("mr_destroy: mr deregistered\n");
5388 }
5389 mr_rp->mr_attr.mr_vaddr = NULL;
5390
5442 /*
5443 * allocates a protection domain.
5444 */
5445 /* ARGSUSED */
5446 static int
5447 daplka_pd_alloc(daplka_ia_resource_t *ia_rp, intptr_t arg, int mode,
5448 cred_t *cred, int *rvalp)
5449 {
5450 dapl_pd_alloc_t args;
5451 daplka_pd_resource_t *pd_rp;
5452 ibt_status_t status;
5453 uint64_t pd_hkey = 0;
5454 boolean_t inserted = B_FALSE;
5455 int retval;
5456
5457 pd_rp = kmem_zalloc(sizeof (*pd_rp), daplka_km_flags);
5458 if (pd_rp == NULL) {
5459 DERR("pd_alloc: cannot allocate pd resource\n");
5460 return (ENOMEM);
5461 }
5462 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pd_rp))
5463 DAPLKA_RS_INIT(pd_rp, DAPL_TYPE_PD,
5464 DAPLKA_RS_RNUM(ia_rp), daplka_pd_destroy);
5465
5466 pd_rp->pd_hca = ia_rp->ia_hca;
5467 pd_rp->pd_hca_hdl = ia_rp->ia_hca_hdl;
5468 status = daplka_ibt_alloc_pd(pd_rp, pd_rp->pd_hca_hdl,
5469 IBT_PD_NO_FLAGS, &pd_rp->pd_hdl);
5470 if (status != IBT_SUCCESS) {
5471 DERR("pd_alloc: ibt_alloc_pd returned %d\n", status);
5472 *rvalp = (int)status;
5473 retval = 0;
5474 goto cleanup;
5475 }
5476
5477 /* insert into pd hash table */
5478 retval = daplka_hash_insert(&ia_rp->ia_pd_htbl,
5479 &pd_hkey, (void *)pd_rp);
5480 if (retval != 0) {
5481 DERR("pd_alloc: cannot insert pd resource into pd_htbl\n");
5482 goto cleanup;
5483 }
5484 inserted = B_TRUE;
5485 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*pd_rp))
5486
5487 /* return hkey to library */
5488 args.pda_hkey = pd_hkey;
5489
5490 retval = ddi_copyout(&args, (void *)arg, sizeof (dapl_pd_alloc_t),
5491 mode);
5492 if (retval != 0) {
5493 DERR("pd_alloc: copyout error %d\n", retval);
5494 retval = EFAULT;
5495 goto cleanup;
5496 }
5497 return (0);
5498
5499 cleanup:;
5500 if (inserted) {
5501 daplka_pd_resource_t *free_rp = NULL;
5502
5503 (void) daplka_hash_remove(&ia_rp->ia_pd_htbl, pd_hkey,
5504 (void **)&free_rp);
5505 if (free_rp != pd_rp) {
5508 * we can only get here if another thread
5509 * has completed the cleanup in pd_free
5510 */
5511 return (retval);
5512 }
5513 }
5514 DAPLKA_RS_UNREF(pd_rp);
5515 return (retval);
5516 }
5517
5518 /*
5519 * destroys a protection domain.
5520 * called when refcnt drops to zero.
5521 */
5522 static int
5523 daplka_pd_destroy(daplka_resource_t *gen_rp)
5524 {
5525 daplka_pd_resource_t *pd_rp = (daplka_pd_resource_t *)gen_rp;
5526 ibt_status_t status;
5527
5528 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*pd_rp))
5529 ASSERT(DAPLKA_RS_REFCNT(pd_rp) == 0);
5530 D3("pd_destroy: entering, pd_rp %p, rnum %d\n",
5531 pd_rp, DAPLKA_RS_RNUM(pd_rp));
5532
5533 ASSERT(DAPLKA_RS_TYPE(pd_rp) == DAPL_TYPE_PD);
5534 if (pd_rp->pd_hdl != NULL) {
5535 status = daplka_ibt_free_pd(pd_rp, pd_rp->pd_hca_hdl,
5536 pd_rp->pd_hdl);
5537 if (status != IBT_SUCCESS) {
5538 DERR("pd_destroy: ibt_free_pd returned %d\n", status);
5539 }
5540 }
5541 DAPLKA_RS_FINI(pd_rp);
5542 kmem_free(pd_rp, sizeof (daplka_pd_resource_t));
5543 D3("pd_destroy: exiting, pd_rp %p\n", pd_rp);
5544 return (0);
5545 }
5546
5547 static void
5548 daplka_hash_pd_free(void *obj)
5600 ibt_status_t status;
5601 boolean_t inserted = B_FALSE;
5602 uint64_t mw_hkey;
5603 ibt_rkey_t mw_rkey;
5604 int retval;
5605
5606 retval = ddi_copyin((void *)arg, &args, sizeof (dapl_mw_alloc_t), mode);
5607 if (retval != 0) {
5608 DERR("mw_alloc: copyin error %d\n", retval);
5609 return (EFAULT);
5610 }
5611
5612 /*
5613 * Allocate and initialize a MW resource
5614 */
5615 mw_rp = kmem_zalloc(sizeof (daplka_mw_resource_t), daplka_km_flags);
5616 if (mw_rp == NULL) {
5617 DERR("mw_alloc: cannot allocate mw resource\n");
5618 return (ENOMEM);
5619 }
5620 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mw_rp))
5621 DAPLKA_RS_INIT(mw_rp, DAPL_TYPE_MW,
5622 DAPLKA_RS_RNUM(ia_rp), daplka_mw_destroy);
5623
5624 mutex_init(&mw_rp->mw_lock, NULL, MUTEX_DRIVER, NULL);
5625 mw_rp->mw_hca = ia_rp->ia_hca;
5626 mw_rp->mw_hca_hdl = ia_rp->ia_hca_hdl;
5627
5628 /* get pd handle */
5629 pd_rp = (daplka_pd_resource_t *)
5630 daplka_hash_lookup(&ia_rp->ia_pd_htbl, args.mw_pd_hkey);
5631 if (pd_rp == NULL) {
5632 DERR("mw_alloc: cannot find pd resource\n");
5633 goto cleanup;
5634 }
5635 ASSERT(DAPLKA_RS_TYPE(pd_rp) == DAPL_TYPE_PD);
5636
5637 mw_rp->mw_pd_res = pd_rp;
5638
5639 status = daplka_ibt_alloc_mw(mw_rp, mw_rp->mw_hca_hdl,
5640 pd_rp->pd_hdl, IBT_MW_NOSLEEP, &mw_rp->mw_hdl, &mw_rkey);
5681 goto cleanup;
5682 }
5683
5684 /* insert into mw hash table */
5685 mw_hkey = 0;
5686 retval = daplka_hash_insert(&ia_rp->ia_mw_htbl, &mw_hkey,
5687 (void *)mw_rp);
5688 if (retval != 0) {
5689 DERR("mw_alloc: cannot insert mw resource into mw_htbl\n");
5690 mutex_enter(&ia_rp->ia_lock);
5691 ASSERT(ia_rp->ia_state == DAPLKA_IA_MW_ALLOC_IN_PROGRESS);
5692 ia_rp->ia_mw_alloccnt--;
5693 if (ia_rp->ia_mw_alloccnt == 0) {
5694 ia_rp->ia_state = DAPLKA_IA_INIT;
5695 cv_broadcast(&ia_rp->ia_cv);
5696 }
5697 mutex_exit(&ia_rp->ia_lock);
5698 goto cleanup;
5699 }
5700 inserted = B_TRUE;
5701 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*mw_rp))
5702
5703 D3("mw_alloc: ibt_alloc_mw mw_hdl(%p) mw_rkey(0x%llx)\n",
5704 mw_rp->mw_hdl, (longlong_t)mw_rkey);
5705
5706 mutex_enter(&ia_rp->ia_lock);
5707 /*
5708 * We are done with mw_alloc if this was the last mw_alloc
5709 * change state back to DAPLKA_IA_INIT and wake up waiters
5710 * specifically the unlock callback.
5711 */
5712 ASSERT(ia_rp->ia_state == DAPLKA_IA_MW_ALLOC_IN_PROGRESS);
5713 ia_rp->ia_mw_alloccnt--;
5714 if (ia_rp->ia_mw_alloccnt == 0) {
5715 ia_rp->ia_state = DAPLKA_IA_INIT;
5716 cv_broadcast(&ia_rp->ia_cv);
5717 }
5718 mutex_exit(&ia_rp->ia_lock);
5719
5720 args.mw_hkey = mw_hkey;
5721 args.mw_rkey = mw_rkey;
5776 return (EINVAL);
5777 }
5778
5779 ASSERT(DAPLKA_RS_TYPE(mw_rp) == DAPL_TYPE_MW);
5780
5781 /* UNREF calls the actual free function when refcnt is zero */
5782 DAPLKA_RS_UNREF(mw_rp);
5783 return (retval);
5784 }
5785
5786 /*
5787 * destroys the memory window.
5788 * called when refcnt drops to zero.
5789 */
5790 static int
5791 daplka_mw_destroy(daplka_resource_t *gen_rp)
5792 {
5793 daplka_mw_resource_t *mw_rp = (daplka_mw_resource_t *)gen_rp;
5794 ibt_status_t status;
5795
5796 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mw_rp))
5797 ASSERT(DAPLKA_RS_REFCNT(mw_rp) == 0);
5798 D3("mw_destroy: entering, mw_rp 0x%p, rnum %d\n",
5799 mw_rp, DAPLKA_RS_RNUM(mw_rp));
5800
5801 /*
5802 * free memory window
5803 */
5804 if (mw_rp->mw_hdl) {
5805 status = daplka_ibt_free_mw(mw_rp, mw_rp->mw_hca_hdl,
5806 mw_rp->mw_hdl);
5807 if (status != IBT_SUCCESS) {
5808 DERR("mw_destroy: ibt_free_mw returned %d\n", status);
5809 }
5810 mw_rp->mw_hdl = NULL;
5811 D3("mw_destroy: mw freed\n");
5812 }
5813
5814 /*
5815 * release reference on PD
5816 */
6186 ibt_srv_bind_t sb_args;
6187 ibt_status_t status;
6188 ib_svc_id_t retsid = 0;
6189 uint64_t sp_hkey = 0;
6190 boolean_t bumped = B_FALSE;
6191 int backlog_size;
6192 int retval = 0;
6193
6194 retval = ddi_copyin((void *)arg, &args,
6195 sizeof (dapl_service_register_t), mode);
6196 if (retval != 0) {
6197 DERR("service_register: copyin error %d\n", retval);
6198 return (EINVAL);
6199 }
6200
6201 sp_rp = kmem_zalloc(sizeof (*sp_rp), daplka_km_flags);
6202 if (sp_rp == NULL) {
6203 DERR("service_register: cannot allocate sp resource\n");
6204 return (ENOMEM);
6205 }
6206 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sp_rp))
6207 DAPLKA_RS_INIT(sp_rp, DAPL_TYPE_SP,
6208 DAPLKA_RS_RNUM(ia_rp), daplka_sp_destroy);
6209
6210 /* check if evd exists */
6211 evd_rp = (daplka_evd_resource_t *)
6212 daplka_hash_lookup(&ia_rp->ia_evd_htbl, args.sr_evd_hkey);
6213 if (evd_rp == NULL) {
6214 DERR("service_register: evd resource not found\n");
6215 retval = EINVAL;
6216 goto cleanup;
6217 }
6218 /*
6219 * initialize backlog size
6220 */
6221 if (evd_rp && evd_rp->evd_cq_real_size > 0) {
6222 backlog_size = evd_rp->evd_cq_real_size + 1;
6223 } else {
6224 backlog_size = DAPLKA_DEFAULT_SP_BACKLOG;
6225 }
6226 D2("service_register: args.sr_sid = %llu\n", (longlong_t)args.sr_sid);
6280 *rvalp = (int)status;
6281 retval = 0;
6282 goto cleanup;
6283 }
6284
6285 /*
6286 * need to bump refcnt because the global hash table will
6287 * have a reference to sp_rp
6288 */
6289 DAPLKA_RS_REF(sp_rp);
6290 bumped = B_TRUE;
6291
6292 /* insert into global sp hash table */
6293 sp_rp->sp_global_hkey = 0;
6294 retval = daplka_hash_insert(&daplka_global_sp_htbl,
6295 &sp_rp->sp_global_hkey, (void *)sp_rp);
6296 if (retval != 0) {
6297 DERR("service_register: cannot insert sp resource\n");
6298 goto cleanup;
6299 }
6300 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*sp_rp))
6301
6302 /* insert into per-IA sp hash table */
6303 retval = daplka_hash_insert(&ia_rp->ia_sp_htbl,
6304 &sp_hkey, (void *)sp_rp);
6305 if (retval != 0) {
6306 DERR("service_register: cannot insert sp resource\n");
6307 goto cleanup;
6308 }
6309
6310 /* pass index to application */
6311 args.sr_sp_hkey = sp_hkey;
6312 retval = ddi_copyout(&args, (void *)arg,
6313 sizeof (dapl_service_register_t), mode);
6314 if (retval != 0) {
6315 DERR("service_register: copyout error %d\n", retval);
6316 retval = EFAULT;
6317 goto cleanup;
6318 }
6319 return (0);
6320
6408
6409 /* remove the global reference */
6410 if (g_sp_rp == sp_rp) {
6411 DAPLKA_RS_UNREF(g_sp_rp);
6412 }
6413
6414 DAPLKA_RS_UNREF(sp_rp);
6415 return (0);
6416 }
6417
6418 /*
6419 * destroys a service point.
6420 * called when the refcnt drops to zero.
6421 */
6422 static int
6423 daplka_sp_destroy(daplka_resource_t *gen_rp)
6424 {
6425 daplka_sp_resource_t *sp_rp = (daplka_sp_resource_t *)gen_rp;
6426 ibt_status_t status;
6427
6428 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*sp_rp))
6429 ASSERT(DAPLKA_RS_REFCNT(sp_rp) == 0);
6430 D3("sp_destroy: entering, sp_rp %p, rnum %d\n",
6431 sp_rp, DAPLKA_RS_RNUM(sp_rp));
6432
6433 /*
6434 * it is possible for pending connections to remain
6435 * on an SP. We need to clean them up here.
6436 */
6437 if (sp_rp->sp_backlog != NULL) {
6438 ibt_cm_proceed_reply_t proc_reply;
6439 int i, cnt = 0;
6440 void *spcp_sidp;
6441
6442 for (i = 0; i < sp_rp->sp_backlog_size; i++) {
6443 if (sp_rp->sp_backlog[i].spcp_state ==
6444 DAPLKA_SPCP_PENDING) {
6445 cnt++;
6446 if (sp_rp->sp_backlog[i].spcp_sid == NULL) {
6447 DERR("sp_destroy: "
6448 "spcp_sid == NULL!\n");
6566 mutex_enter(&spp->sp_lock);
6567 for (bkl_index = 0; bkl_index < spp->sp_backlog_size; bkl_index++) {
6568 if (spp->sp_backlog[bkl_index].spcp_state == DAPLKA_SPCP_INIT) {
6569 conn = &spp->sp_backlog[bkl_index];
6570 ASSERT(conn->spcp_sid == NULL);
6571 conn->spcp_state = DAPLKA_SPCP_PENDING;
6572 conn->spcp_sid = event->cm_session_id;
6573 break;
6574 }
6575 }
6576 mutex_exit(&spp->sp_lock);
6577
6578 /*
6579 * too many pending connections
6580 */
6581 if (bkl_index == spp->sp_backlog_size) {
6582 DERR("service_req: connection pending exceeded %d limit\n",
6583 spp->sp_backlog_size);
6584 return (IBT_CM_NO_RESOURCE);
6585 }
6586 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*conn))
6587
6588 /*
6589 * save data for cr_handoff
6590 */
6591 if (pr_data != NULL && pr_len > 0) {
6592 int trunc_len = pr_len;
6593
6594 if (trunc_len > DAPL_MAX_PRIVATE_DATA_SIZE) {
6595 DERR("service_req: private data truncated\n");
6596 trunc_len = DAPL_MAX_PRIVATE_DATA_SIZE;
6597 }
6598 conn->spcp_req_len = trunc_len;
6599 bcopy(pr_data, conn->spcp_req_data, trunc_len);
6600 } else {
6601 conn->spcp_req_len = 0;
6602 }
6603 conn->spcp_rdma_ra_in = event->cm_event.req.req_rdma_ra_in;
6604 conn->spcp_rdma_ra_out = event->cm_event.req.req_rdma_ra_out;
6605
6606 /*
6654 daplka_cm_delay, NULL, 0);
6655 if (status != IBT_SUCCESS) {
6656 DERR("service_req: ibt_cm_delay failed %d\n", status);
6657 cm_status = IBT_CM_NO_RESOURCE;
6658 goto cleanup;
6659 }
6660
6661 /*
6662 * enqueue cr_ev onto the cr_events list of the EVD
6663 * corresponding to the SP
6664 */
6665 D2("service_req: enqueue event(%p) evdp(%p) priv_data(%p) "
6666 "priv_len(%d) psep(0x%llx)\n", cr_ev, spp->sp_evd_res,
6667 cr_ev->ee_cmev.ec_cm_ev_priv_data,
6668 (int)cr_ev->ee_cmev.ec_cm_ev_priv_data_len,
6669 (longlong_t)cr_ev->ee_cmev.ec_cm_psep_cookie);
6670
6671 daplka_evd_wakeup(spp->sp_evd_res,
6672 &spp->sp_evd_res->evd_cr_events, cr_ev);
6673
6674 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*conn))
6675 return (IBT_CM_DEFER);
6676
6677 cleanup:;
6678 /*
6679 * free the cr event
6680 */
6681 if (cr_ev != NULL) {
6682 if (cr_ev->ee_cmev.ec_cm_ev_priv_data != NULL) {
6683 kmem_free(cr_ev->ee_cmev.ec_cm_ev_priv_data, pr_len);
6684 cr_ev->ee_cmev.ec_cm_ev_priv_data = NULL;
6685 cr_ev->ee_cmev.ec_cm_ev_priv_data_len = 0;
6686 }
6687 kmem_free(cr_ev, sizeof (daplka_evd_event_t));
6688 }
6689 /*
6690 * release our slot in the backlog array
6691 */
6692 if (conn != NULL) {
6693 mutex_enter(&spp->sp_lock);
6694 ASSERT(conn->spcp_state == DAPLKA_SPCP_PENDING);
6977 }
6978
6979 /*
6980 * this is the passive side CM handler. it gets registered
6981 * when an SP resource is created in daplka_service_register.
6982 */
6983 static ibt_cm_status_t
6984 daplka_cm_service_handler(void *cm_private, ibt_cm_event_t *event,
6985 ibt_cm_return_args_t *ret_args, void *priv_data, ibt_priv_data_len_t len)
6986 {
6987 daplka_sp_resource_t *sp_rp = (daplka_sp_resource_t *)cm_private;
6988
6989 if (sp_rp == NULL) {
6990 DERR("service_handler: sp_rp == NULL\n");
6991 return (IBT_CM_NO_RESOURCE);
6992 }
6993 /*
6994 * default is not to return priv data
6995 */
6996 if (ret_args != NULL) {
6997 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ret_args))
6998 ret_args->cm_ret_len = 0;
6999 }
7000
7001 switch (event->cm_type) {
7002 case IBT_CM_EVENT_REQ_RCV:
7003 D2("service_handler: IBT_CM_EVENT_REQ_RCV\n");
7004 return (daplka_cm_service_req(sp_rp, event, ret_args,
7005 event->cm_priv_data, event->cm_priv_data_len));
7006
7007 case IBT_CM_EVENT_REP_RCV:
7008 /* passive side should not receive this event */
7009 D2("service_handler: IBT_CM_EVENT_REP_RCV\n");
7010 return (IBT_CM_DEFAULT);
7011
7012 case IBT_CM_EVENT_CONN_CLOSED:
7013 D2("service_handler: IBT_CM_EVENT_CONN_CLOSED %d\n",
7014 event->cm_event.closed);
7015 return (daplka_cm_service_conn_closed(sp_rp, event, ret_args,
7016 priv_data, len));
7017
7406 }
7407
7408 /*
7409 * This is the active side CM handler. It gets registered when
7410 * ibt_open_rc_channel is called.
7411 */
7412 static ibt_cm_status_t
7413 daplka_cm_rc_handler(void *cm_private, ibt_cm_event_t *event,
7414 ibt_cm_return_args_t *ret_args, void *priv_data, ibt_priv_data_len_t len)
7415 {
7416 daplka_ep_resource_t *ep_rp = (daplka_ep_resource_t *)cm_private;
7417
7418 if (ep_rp == NULL) {
7419 DERR("rc_handler: ep_rp == NULL\n");
7420 return (IBT_CM_NO_CHANNEL);
7421 }
7422 /*
7423 * default is not to return priv data
7424 */
7425 if (ret_args != NULL) {
7426 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ret_args))
7427 ret_args->cm_ret_len = 0;
7428 }
7429
7430 switch (event->cm_type) {
7431 case IBT_CM_EVENT_REQ_RCV:
7432 /* active side should not receive this event */
7433 D2("rc_handler: IBT_CM_EVENT_REQ_RCV\n");
7434 break;
7435
7436 case IBT_CM_EVENT_REP_RCV:
7437 /* connection accepted by passive side */
7438 D2("rc_handler: IBT_CM_EVENT_REP_RCV\n");
7439 return (daplka_cm_rc_rep_rcv(ep_rp, event, ret_args,
7440 priv_data, len));
7441
7442 case IBT_CM_EVENT_CONN_CLOSED:
7443 D2("rc_handler: IBT_CM_EVENT_CONN_CLOSED %d\n",
7444 event->cm_event.closed);
7445 return (daplka_cm_rc_conn_closed(ep_rp, event, ret_args,
7446 priv_data, len));
7528 return (EINVAL);
7529 }
7530 hca_hdl = hca->hca_hdl;
7531 if (hca_hdl == NULL) {
7532 DERR("ia_create: hca_hdl == NULL\n");
7533 DAPLKA_RELE_HCA(daplka_dev, hca);
7534 return (EINVAL);
7535 }
7536 status = ibt_query_hca_ports(hca_hdl, (uint8_t)args.ia_port,
7537 &pinfop, &pinfon, &size);
7538 if (status != IBT_SUCCESS) {
7539 DERR("ia_create: ibt_query_hca_ports returned %d\n", status);
7540 *rvalp = (int)status;
7541 DAPLKA_RELE_HCA(daplka_dev, hca);
7542 return (0);
7543 }
7544 sgid = pinfop->p_sgid_tbl[0];
7545 ibt_free_portinfo(pinfop, size);
7546
7547 ia_rp = kmem_zalloc(sizeof (daplka_ia_resource_t), daplka_km_flags);
7548 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ia_rp))
7549 DAPLKA_RS_INIT(ia_rp, DAPL_TYPE_IA, rnum, daplka_ia_destroy);
7550
7551 mutex_init(&ia_rp->ia_lock, NULL, MUTEX_DRIVER, NULL);
7552 cv_init(&ia_rp->ia_cv, NULL, CV_DRIVER, NULL);
7553 ia_rp->ia_hca_hdl = hca_hdl;
7554 ia_rp->ia_hca_sgid = sgid;
7555 ia_rp->ia_hca = hca;
7556 ia_rp->ia_port_num = args.ia_port;
7557 ia_rp->ia_port_pkey = args.ia_pkey;
7558 ia_rp->ia_pid = ddi_get_pid();
7559 ia_rp->ia_async_evd_hkeys = NULL;
7560 ia_rp->ia_ar_registered = B_FALSE;
7561 bcopy(args.ia_sadata, ia_rp->ia_sadata, DAPL_ATS_NBYTES);
7562
7563 /* register Address Record */
7564 ar_s.ar_gid = ia_rp->ia_hca_sgid;
7565 ar_s.ar_pkey = ia_rp->ia_port_pkey;
7566 bcopy(ia_rp->ia_sadata, ar_s.ar_data, DAPL_ATS_NBYTES);
7567 #define UC(b) ar_s.ar_data[(b)]
7568 D3("daplka_ia_create: SA[8] %d.%d.%d.%d\n",
7620 daplka_hash_sp_free, daplka_hash_generic_lookup);
7621 if (retval != 0) {
7622 DERR("ia_create: cannot create sp hash table\n");
7623 goto cleanup;
7624 }
7625 retval = daplka_hash_create(&ia_rp->ia_srq_htbl, DAPLKA_SRQ_HTBL_SZ,
7626 daplka_hash_srq_free, daplka_hash_generic_lookup);
7627 if (retval != 0) {
7628 DERR("ia_create: cannot create srq hash table\n");
7629 goto cleanup;
7630 }
7631 /*
7632 * insert ia_rp into the global resource table
7633 */
7634 retval = daplka_resource_insert(rnum, (daplka_resource_t *)ia_rp);
7635 if (retval != 0) {
7636 DERR("ia_create: cannot insert resource\n");
7637 goto cleanup;
7638 }
7639 inserted = B_TRUE;
7640 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*ia_rp))
7641
7642 args.ia_resnum = rnum;
7643 retval = copyout(&args, (void *)arg, sizeof (dapl_ia_create_t));
7644 if (retval != 0) {
7645 DERR("ia_create: copyout error %d\n", retval);
7646 retval = EFAULT;
7647 goto cleanup;
7648 }
7649 return (0);
7650
7651 cleanup:;
7652 if (inserted) {
7653 tmp_rp = (daplka_ia_resource_t *)daplka_resource_remove(rnum);
7654 if (tmp_rp != ia_rp) {
7655 /*
7656 * we can return here because another thread must
7657 * have freed up the resource
7658 */
7659 DERR("ia_create: cannot remove resource\n");
7660 return (retval);
7661 }
7662 }
7663 DAPLKA_RS_UNREF(ia_rp);
7664 return (retval);
7665 }
7666
7667 /*
7668 * destroys an IA resource
7669 */
7670 static int
7671 daplka_ia_destroy(daplka_resource_t *gen_rp)
7672 {
7673 daplka_ia_resource_t *ia_rp = (daplka_ia_resource_t *)gen_rp;
7674 daplka_async_evd_hkey_t *hkp;
7675 int cnt;
7676 ibt_ar_t ar_s;
7677
7678 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*ia_rp))
7679 D3("ia_destroy: entering, ia_rp 0x%p\n", ia_rp);
7680
7681 /* deregister Address Record */
7682 if (ia_rp->ia_ar_registered) {
7683 ar_s.ar_gid = ia_rp->ia_hca_sgid;
7684 ar_s.ar_pkey = ia_rp->ia_port_pkey;
7685 bcopy(ia_rp->ia_sadata, ar_s.ar_data, DAPL_ATS_NBYTES);
7686 (void) ibt_deregister_ar(daplka_dev->daplka_clnt_hdl, &ar_s);
7687 ia_rp->ia_ar_registered = B_FALSE;
7688 }
7689
7690 /*
7691 * destroy hash tables. make sure resources are
7692 * destroyed in the correct order.
7693 */
7694 daplka_hash_destroy(&ia_rp->ia_mw_htbl);
7695 daplka_hash_destroy(&ia_rp->ia_mr_htbl);
7696 daplka_hash_destroy(&ia_rp->ia_ep_htbl);
7697 daplka_hash_destroy(&ia_rp->ia_srq_htbl);
7698 daplka_hash_destroy(&ia_rp->ia_evd_htbl);
9066 * len - last valid entry in array.
9067 *
9068 * A search operation based on a resource number is as follows:
9069 * index = rnum / RESOURCE_BLKSZ;
9070 * ASSERT(index < resource_block.len);
9071 * ASSERT(index < resource_block.sz);
9072 * offset = rnum % RESOURCE_BLKSZ;
9073 * ASSERT(offset >= resource_block.root[index]->base);
9074 * ASSERT(offset < resource_block.root[index]->base + RESOURCE_BLKSZ);
9075 * return resource_block.root[index]->blks[offset];
9076 *
9077 * A resource blk is freed when its used count reaches zero.
9078 */
9079
9080 /*
9081 * initializes the global resource table
9082 */
9083 static void
9084 daplka_resource_init(void)
9085 {
9086 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(daplka_resource))
9087 rw_init(&daplka_resource.daplka_rct_lock, NULL, RW_DRIVER, NULL);
9088 daplka_resource.daplka_rc_len = 0;
9089 daplka_resource.daplka_rc_sz = 0;
9090 daplka_resource.daplka_rc_cnt = 0;
9091 daplka_resource.daplka_rc_flag = 0;
9092 daplka_resource.daplka_rc_root = NULL;
9093 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(daplka_resource))
9094 }
9095
9096 /*
9097 * destroys the global resource table
9098 */
9099 static void
9100 daplka_resource_fini(void)
9101 {
9102 int i;
9103
9104 rw_enter(&daplka_resource.daplka_rct_lock, RW_WRITER);
9105 for (i = 0; i < daplka_resource.daplka_rc_len; i++) {
9106 daplka_resource_blk_t *blk;
9107 int j;
9108
9109 blk = daplka_resource.daplka_rc_root[i];
9110 if (blk == NULL) {
9111 continue;
9112 }
9113 for (j = 0; j < DAPLKA_RC_BLKSZ; j++) {
9412 * applied on the object before
9413 * daplka_hash_lookup returns
9414 * output:
9415 * none
9416 *
9417 * return value(s):
9418 * EINVAL nbuckets is not a power of 2
9419 * ENOMEM cannot allocate buckets
9420 * 0 success
9421 */
9422 static int
9423 daplka_hash_create(daplka_hash_table_t *htblp, uint_t nbuckets,
9424 void (*free_func)(void *), void (*lookup_func)(void *))
9425 {
9426 int i;
9427
9428 if ((nbuckets & ~(nbuckets - 1)) != nbuckets) {
9429 DERR("hash_create: nbuckets not power of 2\n");
9430 return (EINVAL);
9431 }
9432 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*htblp))
9433
9434 htblp->ht_buckets =
9435 kmem_zalloc(sizeof (daplka_hash_bucket_t) * nbuckets,
9436 daplka_km_flags);
9437 if (htblp->ht_buckets == NULL) {
9438 DERR("hash_create: cannot allocate buckets\n");
9439 return (ENOMEM);
9440 }
9441 for (i = 0; i < nbuckets; i++) {
9442 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(htblp->ht_buckets[i]))
9443 htblp->ht_buckets[i].hb_count = 0;
9444 htblp->ht_buckets[i].hb_entries = NULL;
9445 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(htblp->ht_buckets[i]))
9446 }
9447 rw_init(&htblp->ht_table_lock, NULL, RW_DRIVER, NULL);
9448 mutex_init(&htblp->ht_key_lock, NULL, MUTEX_DRIVER, NULL);
9449
9450 htblp->ht_count = 0;
9451 htblp->ht_next_hkey = (uint64_t)gethrtime();
9452 htblp->ht_nbuckets = nbuckets;
9453 htblp->ht_free_func = free_func;
9454 htblp->ht_lookup_func = lookup_func;
9455 htblp->ht_initialized = B_TRUE;
9456 D3("hash_create: done, buckets = %d\n", nbuckets);
9457 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*htblp))
9458 return (0);
9459 }
9460
9461 /*
9462 * daplka_hash_insert:
9463 * inserts an object into a hash table
9464 *
9465 * input:
9466 * htblp pointer to hash table
9467 *
9468 * hkeyp pointer to hash key.
9469 * *hkeyp being non-zero means that the caller
9470 * has generated its own hkey. if *hkeyp is zero,
9471 * this function will generate an hkey for the
9472 * caller. it is recommended that the caller
9473 * leave the hkey generation to this function
9474 * because the hkey is more likely to be evenly
9475 * distributed.
9476 *
9477 * objp pointer to object to be inserted into
9631 * allows the caller to choose what type
9632 * of lock to acquire before walking the
9633 * table.
9634 *
9635 * output:
9636 * none
9637 *
9638 * return value(s):
9639 * none
9640 */
9641 static void
9642 daplka_hash_walk(daplka_hash_table_t *htblp, int (*func)(void *, void *),
9643 void *farg, krw_t lockmode)
9644 {
9645 daplka_hash_entry_t *curr_hep;
9646 daplka_hash_bucket_t *hbp;
9647 uint32_t bucket, retval = 0;
9648
9649 ASSERT(lockmode == RW_WRITER || lockmode == RW_READER);
9650
9651 /* needed for warlock */
9652 if (lockmode == RW_WRITER) {
9653 rw_enter(&htblp->ht_table_lock, RW_WRITER);
9654 } else {
9655 rw_enter(&htblp->ht_table_lock, RW_READER);
9656 }
9657 for (bucket = 0; bucket < htblp->ht_nbuckets && retval == 0; bucket++) {
9658 hbp = &htblp->ht_buckets[bucket];
9659 curr_hep = hbp->hb_entries;
9660 while (curr_hep != NULL) {
9661 retval = (*func)(curr_hep->he_objp, farg);
9662 if (retval != 0) {
9663 break;
9664 }
9665 curr_hep = curr_hep->he_next;
9666 }
9667 }
9668 rw_exit(&htblp->ht_table_lock);
9669 }
9670
9671 /*
|
52 #include <sys/cred.h>
53 #include <sys/note.h>
54 #include <sys/ib/ibtl/ibti.h>
55 #include <sys/socket.h>
56 #include <netinet/in.h>
57 #include <daplt_if.h>
58 #include <daplt.h>
59
60 /*
61 * The following variables support the debug log buffer scheme.
62 */
63 #ifdef DEBUG
64 static char daplka_dbgbuf[0x80000];
65 #else /* DEBUG */
66 static char daplka_dbgbuf[0x4000];
67 #endif /* DEBUG */
68 static int daplka_dbgsize = sizeof (daplka_dbgbuf);
69 static size_t daplka_dbgnext;
70 static int daplka_dbginit = 0;
71 static kmutex_t daplka_dbglock;
72
73 static int daplka_dbg = 0x0103;
74 static void daplka_console(const char *, ...);
75 static void daplka_debug(const char *, ...);
76 static int daplka_apm = 0x1; /* default enable */
77 static int daplka_failback = 0x1; /* default enable */
78 static int daplka_query_aft_setaltpath = 10;
79
80 #define DERR \
81 if (daplka_dbg & 0x100) \
82 daplka_debug
83
84 #ifdef DEBUG
85
86 #define DINFO \
87 daplka_console
88
89 #define D1 \
90 if (daplka_dbg & 0x01) \
91 daplka_debug
539 static daplka_t *daplka_dev = NULL;
540 static void *daplka_state = NULL;
541
542 /*
543 * global SP hash table
544 */
545 static daplka_hash_table_t daplka_global_sp_htbl;
546
547 /*
548 * timer_info hash table
549 */
550 static daplka_hash_table_t daplka_timer_info_htbl;
551 static uint32_t daplka_timer_hkey = 0;
552
553 /*
554 * shared MR avl tree
555 */
556 static avl_tree_t daplka_shared_mr_tree;
557 static kmutex_t daplka_shared_mr_lock;
558 static int daplka_shared_mr_cmp(const void *, const void *);
559
560 /*
561 * default kmem flags used by this driver
562 */
563 static int daplka_km_flags = KM_SLEEP;
564
565 /*
566 * taskq used for handling background tasks
567 */
568 static taskq_t *daplka_taskq = NULL;
569
570 /*
571 * daplka_cm_delay is the length of time the active
572 * side needs to wait before timing out on the REP message.
573 */
574 static clock_t daplka_cm_delay = 60000000;
575
576 /*
577 * modunload will fail if pending_close is non-zero
578 */
2190 /* fill in open channel args */
2191 chan_args.oc_path = &path_info;
2192 chan_args.oc_cm_handler = daplka_cm_rc_handler;
2193 chan_args.oc_cm_clnt_private = (void *)ep_rp;
2194 chan_args.oc_rdma_ra_out = hca_attrp->hca_max_rdma_out_chan;
2195 chan_args.oc_rdma_ra_in = hca_attrp->hca_max_rdma_in_chan;
2196 chan_args.oc_path_retry_cnt = 7; /* 3-bit field */
2197 chan_args.oc_path_rnr_retry_cnt = IBT_RNR_INFINITE_RETRY;
2198
2199 ASSERT(args.epc_priv_sz > 0);
2200 priv_data = (void *)args.epc_priv;
2201
2202 chan_args.oc_priv_data_len = args.epc_priv_sz;
2203 chan_args.oc_priv_data = priv_data;
2204
2205 /*
2206 * calculate checksum value of hello message and
2207 * put hello message in networking byte order
2208 */
2209 dp = (DAPL_PRIVATE *)priv_data;
2210 dp->hello_msg.hi_port = htons(dp->hello_msg.hi_port);
2211 dp->hello_msg.hi_checksum = 0;
2212 dp->hello_msg.hi_checksum = htons(daplka_hellomsg_cksum(dp));
2213
2214 if (args.epc_timeout > 0) {
2215 /*
2216 * increment refcnt before passing reference to
2217 * timer_info_alloc.
2218 */
2219 DAPLKA_RS_REF(ep_rp);
2220 timerp = daplka_timer_info_alloc(ep_rp);
2221 if (timerp == NULL) {
2222 DERR("ep_connect: cannot allocate timer\n");
2223 /*
2224 * we need to remove the reference if
2225 * allocation failed.
2226 */
2227 DAPLKA_RS_UNREF(ep_rp);
2228 retval = ENOMEM;
2229 goto cleanup;
2230 }
2231 /*
2232 * We generate our own hkeys so that timer_hkey can fit
2582 ibt_cq_attr_t cq_attr;
2583 dapl_evd_create_t args;
2584 uint64_t evd_hkey = 0;
2585 boolean_t inserted = B_FALSE;
2586 int retval = 0;
2587 ibt_status_t status;
2588
2589 retval = ddi_copyin((void *)arg, &args, sizeof (dapl_evd_create_t),
2590 mode);
2591 if (retval != 0) {
2592 DERR("evd_create: copyin error %d", retval);
2593 return (EFAULT);
2594 }
2595 if ((args.evd_flags &
2596 ~(DAT_EVD_DEFAULT_FLAG | DAT_EVD_SOFTWARE_FLAG)) != 0) {
2597 DERR("evd_create: invalid flags 0x%x\n", args.evd_flags);
2598 return (EINVAL);
2599 }
2600
2601 evd_rp = kmem_zalloc(sizeof (daplka_evd_resource_t), daplka_km_flags);
2602 DAPLKA_RS_INIT(evd_rp, DAPL_TYPE_EVD,
2603 DAPLKA_RS_RNUM(ia_rp), daplka_evd_destroy);
2604
2605 mutex_init(&evd_rp->evd_lock, NULL, MUTEX_DRIVER, NULL);
2606 cv_init(&evd_rp->evd_cv, NULL, CV_DRIVER, NULL);
2607 evd_rp->evd_hca = ia_rp->ia_hca;
2608 evd_rp->evd_flags = args.evd_flags;
2609 evd_rp->evd_hca_hdl = ia_rp->ia_hca_hdl;
2610 evd_rp->evd_cookie = args.evd_cookie;
2611 evd_rp->evd_cno_res = NULL;
2612 evd_rp->evd_cr_events.eel_event_type = DAPLKA_EVD_CM_EVENTS;
2613 evd_rp->evd_conn_events.eel_event_type = DAPLKA_EVD_CM_EVENTS;
2614 evd_rp->evd_async_events.eel_event_type = DAPLKA_EVD_ASYNC_EVENTS;
2615
2616 /*
2617 * if the client specified a non-zero cno_hkey, we
2618 * lookup the cno and save the reference for later use.
2619 */
2620 if (args.evd_cno_hkey > 0) {
2621 daplka_cno_resource_t *cno_rp;
2669 if (status != IBT_SUCCESS) {
2670 DERR("evd_create: ibt_ci_data_out error(%d)", status);
2671 *rvalp = (int)status;
2672 retval = 0;
2673 goto cleanup;
2674 }
2675
2676 args.evd_cq_real_size = evd_rp->evd_cq_real_size;
2677
2678 ibt_set_cq_handler(evd_rp->evd_cq_hdl, daplka_cq_handler,
2679 (void *)evd_rp);
2680 }
2681
2682 retval = daplka_hash_insert(&ia_rp->ia_evd_htbl,
2683 &evd_hkey, (void *)evd_rp);
2684 if (retval != 0) {
2685 DERR("evd_ceate: cannot insert evd %d\n", retval);
2686 goto cleanup;
2687 }
2688 inserted = B_TRUE;
2689
2690 /*
2691 * If this evd handles async events need to add to the IA resource
2692 * async evd list
2693 */
2694 if (evd_rp->evd_flags & DAT_EVD_ASYNC_FLAG) {
2695 async_evd = kmem_zalloc(sizeof (daplka_async_evd_hkey_t),
2696 daplka_km_flags);
2697 /* add the evd to the head of the list */
2698 mutex_enter(&ia_rp->ia_lock);
2699 async_evd->aeh_evd_hkey = evd_hkey;
2700 async_evd->aeh_next = ia_rp->ia_async_evd_hkeys;
2701 ia_rp->ia_async_evd_hkeys = async_evd;
2702 mutex_exit(&ia_rp->ia_lock);
2703 }
2704
2705 args.evd_hkey = evd_hkey;
2706 retval = copyout(&args, (void *)arg, sizeof (dapl_evd_create_t));
2707 if (retval != 0) {
2708 DERR("evd_create: copyout error %d\n", retval);
3462 kmem_free(curr, sizeof (daplka_async_evd_hkey_t));
3463 }
3464
3465 /* UNREF calls the actual free function when refcnt is zero */
3466 DAPLKA_RS_UNREF(evd_rp);
3467 return (0);
3468 }
3469
3470 /*
3471 * destroys EVD resource.
3472 * called when refcnt drops to zero.
3473 */
3474 static int
3475 daplka_evd_destroy(daplka_resource_t *gen_rp)
3476 {
3477 daplka_evd_resource_t *evd_rp = (daplka_evd_resource_t *)gen_rp;
3478 ibt_status_t status;
3479 daplka_evd_event_t *evt;
3480 ibt_priv_data_len_t len;
3481
3482 D3("evd_destroy: entering, evd_rp 0x%p, rnum %d\n",
3483 evd_rp, DAPLKA_RS_RNUM(evd_rp));
3484 /*
3485 * free CQ
3486 */
3487 if (evd_rp->evd_cq_hdl) {
3488 ibt_set_cq_handler(evd_rp->evd_cq_hdl, NULL, NULL);
3489 mutex_enter(&daplka_dev->daplka_mutex);
3490 ibt_set_cq_private(evd_rp->evd_cq_hdl, NULL);
3491 mutex_exit(&daplka_dev->daplka_mutex);
3492
3493 status = daplka_ibt_free_cq(evd_rp, evd_rp->evd_cq_hdl);
3494 if (status != IBT_SUCCESS) {
3495 DERR("evd_destroy: ibt_free_cq returned %d\n", status);
3496 }
3497 evd_rp->evd_cq_hdl = NULL;
3498 D2("evd_destroy: cq freed, rnum %d\n", DAPLKA_RS_RNUM(evd_rp));
3499 }
3500
3501 /*
3686 /*
3687 * allocates a CNO.
3688 * the returned cno_hkey may subsequently be used in evd_create.
3689 */
3690 /* ARGSUSED */
3691 static int
3692 daplka_cno_alloc(daplka_ia_resource_t *ia_rp, intptr_t arg, int mode,
3693 cred_t *cred, int *rvalp)
3694 {
3695 dapl_cno_alloc_t args;
3696 daplka_cno_resource_t *cno_rp = NULL;
3697 uint64_t cno_hkey = 0;
3698 boolean_t inserted = B_FALSE;
3699 int retval = 0;
3700
3701 cno_rp = kmem_zalloc(sizeof (*cno_rp), daplka_km_flags);
3702 if (cno_rp == NULL) {
3703 DERR("cno_alloc: cannot allocate cno resource\n");
3704 return (ENOMEM);
3705 }
3706 DAPLKA_RS_INIT(cno_rp, DAPL_TYPE_CNO,
3707 DAPLKA_RS_RNUM(ia_rp), daplka_cno_destroy);
3708
3709 mutex_init(&cno_rp->cno_lock, NULL, MUTEX_DRIVER, NULL);
3710 cv_init(&cno_rp->cno_cv, NULL, CV_DRIVER, NULL);
3711 cno_rp->cno_evd_cookie = 0;
3712
3713 /* insert into cno hash table */
3714 retval = daplka_hash_insert(&ia_rp->ia_cno_htbl,
3715 &cno_hkey, (void *)cno_rp);
3716 if (retval != 0) {
3717 DERR("cno_alloc: cannot insert cno resource\n");
3718 goto cleanup;
3719 }
3720 inserted = B_TRUE;
3721
3722 /* return hkey to library */
3723 args.cno_hkey = cno_hkey;
3724
3725 retval = ddi_copyout(&args, (void *)arg, sizeof (dapl_cno_alloc_t),
3726 mode);
3727 if (retval != 0) {
3728 DERR("cno_alloc: copyout error %d\n", retval);
3729 retval = EFAULT;
3730 goto cleanup;
3731 }
3732 return (0);
3733
3734 cleanup:;
3735 if (inserted) {
3736 daplka_cno_resource_t *free_rp = NULL;
3737
3738 (void) daplka_hash_remove(&ia_rp->ia_cno_htbl, cno_hkey,
3739 (void **)&free_rp);
3740 if (free_rp != cno_rp) {
4169 DERR("cr_reject: ibt_cm_proceed returned %d\n", status);
4170 *rvalp = (int)status;
4171 retval = 0;
4172 }
4173
4174 cleanup:;
4175 if (sp_rp != NULL) {
4176 DAPLKA_RS_UNREF(sp_rp);
4177 }
4178 return (retval);
4179 }
4180
4181
4182 /*
4183 * daplka_sp_match is used by daplka_hash_walk for finding SPs
4184 */
4185 typedef struct daplka_sp_match_s {
4186 uint64_t spm_conn_qual;
4187 daplka_sp_resource_t *spm_sp_rp;
4188 } daplka_sp_match_t;
4189
4190 static int
4191 daplka_sp_match(void *objp, void *arg)
4192 {
4193 daplka_sp_resource_t *sp_rp = (daplka_sp_resource_t *)objp;
4194
4195 ASSERT(DAPLKA_RS_TYPE(sp_rp) == DAPL_TYPE_SP);
4196 if (sp_rp->sp_conn_qual ==
4197 ((daplka_sp_match_t *)arg)->spm_conn_qual) {
4198 ((daplka_sp_match_t *)arg)->spm_sp_rp = sp_rp;
4199 D2("daplka_sp_match: found sp, conn_qual %016llu\n",
4200 (longlong_t)((daplka_sp_match_t *)arg)->spm_conn_qual);
4201 DAPLKA_RS_REF(sp_rp);
4202 return (1);
4203 }
4204 return (0);
4205 }
4206
4207 /*
4208 * cr_handoff allows the client to handoff a connection request from
4561 boolean_t inserted = B_FALSE;
4562 daplka_mr_resource_t *mr_rp;
4563 daplka_pd_resource_t *pd_rp;
4564 dapl_mr_register_t args;
4565 ibt_mr_data_in_t mr_cb_data_in;
4566 uint64_t mr_hkey = 0;
4567 ibt_status_t status;
4568 int retval;
4569
4570 retval = ddi_copyin((void *)arg, &args, sizeof (dapl_mr_register_t),
4571 mode);
4572 if (retval != 0) {
4573 DERR("mr_register: copyin error %d\n", retval);
4574 return (EINVAL);
4575 }
4576 mr_rp = kmem_zalloc(sizeof (daplka_mr_resource_t), daplka_km_flags);
4577 if (mr_rp == NULL) {
4578 DERR("mr_register: cannot allocate mr resource\n");
4579 return (ENOMEM);
4580 }
4581 DAPLKA_RS_INIT(mr_rp, DAPL_TYPE_MR,
4582 DAPLKA_RS_RNUM(ia_rp), daplka_mr_destroy);
4583
4584 mutex_init(&mr_rp->mr_lock, NULL, MUTEX_DRIVER, NULL);
4585 mr_rp->mr_hca = ia_rp->ia_hca;
4586 mr_rp->mr_hca_hdl = ia_rp->ia_hca_hdl;
4587 mr_rp->mr_next = NULL;
4588 mr_rp->mr_shared_mr = NULL;
4589
4590 /* get pd handle */
4591 pd_rp = (daplka_pd_resource_t *)
4592 daplka_hash_lookup(&ia_rp->ia_pd_htbl, args.mr_pd_hkey);
4593 if (pd_rp == NULL) {
4594 DERR("mr_register: cannot find pd resource\n");
4595 retval = EINVAL;
4596 goto cleanup;
4597 }
4598 ASSERT(DAPLKA_RS_TYPE(pd_rp) == DAPL_TYPE_PD);
4599 mr_rp->mr_pd_res = pd_rp;
4600
4628 status = ibt_ci_data_in(ia_rp->ia_hca_hdl,
4629 IBT_CI_NO_FLAGS, IBT_HDL_MR, (void *)mr_rp->mr_hdl,
4630 &mr_cb_data_in, sizeof (mr_cb_data_in));
4631
4632 if (status != IBT_SUCCESS) {
4633 DERR("mr_register: ibt_ci_data_in error(%d) ver(%d)",
4634 status, mr_cb_data_in.mr_rev);
4635 *rvalp = (int)status;
4636 retval = 0;
4637 goto cleanup;
4638 }
4639
4640 /* insert into mr hash table */
4641 retval = daplka_hash_insert(&ia_rp->ia_mr_htbl,
4642 &mr_hkey, (void *)mr_rp);
4643 if (retval != 0) {
4644 DERR("mr_register: cannot insert mr resource into mr_htbl\n");
4645 goto cleanup;
4646 }
4647 inserted = B_TRUE;
4648
4649 args.mr_lkey = mr_rp->mr_desc.md_lkey;
4650 args.mr_rkey = mr_rp->mr_desc.md_rkey;
4651 args.mr_hkey = mr_hkey;
4652
4653 retval = ddi_copyout((void *)&args, (void *)arg,
4654 sizeof (dapl_mr_register_t), mode);
4655 if (retval != 0) {
4656 DERR("mr_register: copyout error %d\n", retval);
4657 retval = EFAULT;
4658 goto cleanup;
4659 }
4660 return (0);
4661
4662 cleanup:;
4663 if (inserted) {
4664 daplka_mr_resource_t *free_rp = NULL;
4665
4666 (void) daplka_hash_remove(&ia_rp->ia_mr_htbl, mr_hkey,
4667 (void **)&free_rp);
4761 } else {
4762 D2("mr_register_shared: cannot find cookie:\n"
4763 "0x%016llx%016llx%016llx%016llx%016llx\n",
4764 (longlong_t)tmp_smr.smr_cookie.mc_uint_arr[4],
4765 (longlong_t)tmp_smr.smr_cookie.mc_uint_arr[3],
4766 (longlong_t)tmp_smr.smr_cookie.mc_uint_arr[2],
4767 (longlong_t)tmp_smr.smr_cookie.mc_uint_arr[1],
4768 (longlong_t)tmp_smr.smr_cookie.mc_uint_arr[0]);
4769
4770 /*
4771 * if we cannot find smrp, we need to create and
4772 * insert one into daplka_shared_mr_tree
4773 */
4774 smrp = kmem_zalloc(sizeof (daplka_shared_mr_t),
4775 daplka_km_flags);
4776 if (smrp == NULL) {
4777 retval = ENOMEM;
4778 mutex_exit(&daplka_shared_mr_lock);
4779 goto cleanup;
4780 }
4781 smrp->smr_refcnt = 1;
4782 smrp->smr_cookie = args.mrs_shm_cookie;
4783 smrp->smr_state = DAPLKA_SMR_TRANSITIONING;
4784 smrp->smr_mr_list = NULL;
4785 cv_init(&smrp->smr_cv, NULL, CV_DRIVER, NULL);
4786 avl_insert(&daplka_shared_mr_tree, smrp, where);
4787 }
4788 mutex_exit(&daplka_shared_mr_lock);
4789
4790 mr_rp = kmem_zalloc(sizeof (daplka_mr_resource_t), daplka_km_flags);
4791 if (mr_rp == NULL) {
4792 DERR("mr_register_shared: cannot allocate mr resource\n");
4793 goto cleanup;
4794 }
4795 DAPLKA_RS_INIT(mr_rp, DAPL_TYPE_MR,
4796 DAPLKA_RS_RNUM(ia_rp), daplka_mr_destroy);
4797
4798 mutex_init(&mr_rp->mr_lock, NULL, MUTEX_DRIVER, NULL);
4799 mr_rp->mr_hca = ia_rp->ia_hca;
4800 mr_rp->mr_hca_hdl = ia_rp->ia_hca_hdl;
4801 mr_rp->mr_next = NULL;
4802 mr_rp->mr_shared_mr = NULL;
4803
4804 /* get pd handle */
4805 pd_rp = (daplka_pd_resource_t *)
4806 daplka_hash_lookup(&ia_rp->ia_pd_htbl, args.mrs_pd_hkey);
4807 if (pd_rp == NULL) {
4808 DERR("mr_register_shared: cannot find pd resource\n");
4809 retval = EINVAL;
4810 goto cleanup;
4811 }
4812 ASSERT(DAPLKA_RS_TYPE(pd_rp) == DAPL_TYPE_PD);
4813 mr_rp->mr_pd_res = pd_rp;
4814
4888 retval = 0;
4889 goto cleanup;
4890 }
4891
4892 /*
4893 * we bump reference of mr_rp and enqueue it onto smrp.
4894 */
4895 DAPLKA_RS_REF(mr_rp);
4896 mr_rp->mr_next = smrp->smr_mr_list;
4897 smrp->smr_mr_list = mr_rp;
4898 mr_rp->mr_shared_mr = smrp;
4899
4900 /* insert into mr hash table */
4901 retval = daplka_hash_insert(&ia_rp->ia_mr_htbl,
4902 &mr_hkey, (void *)mr_rp);
4903 if (retval != 0) {
4904 DERR("mr_register_shared: cannot insert mr resource\n");
4905 goto cleanup;
4906 }
4907 inserted = B_TRUE;
4908
4909 /*
4910 * at this point, there are two references to our mr resource.
4911 * one is kept in ia_mr_htbl. the other is kept in the list
4912 * within this shared mr object (smrp). when we deregister this
4913 * mr or when a callback invalidates this mr, the reference kept
4914 * by this shared mr object will be removed.
4915 */
4916
4917 args.mrs_lkey = mr_rp->mr_desc.md_lkey;
4918 args.mrs_rkey = mr_rp->mr_desc.md_rkey;
4919 args.mrs_hkey = mr_hkey;
4920
4921 retval = ddi_copyout((void *)&args, (void *)arg,
4922 sizeof (dapl_mr_register_shared_t), mode);
4923 if (retval != 0) {
4924 DERR("mr_register_shared: copyout error %d\n", retval);
4925 retval = EFAULT;
4926 goto cleanup;
4927 }
4942 (void) daplka_hash_remove(&ia_rp->ia_mr_htbl, mr_hkey,
4943 (void **)&free_rp);
4944 if (free_rp != mr_rp) {
4945 DERR("mr_register_shared: "
4946 "cannot remove mr from hash table\n");
4947 /*
4948 * we can only get here if another thread
4949 * has completed the cleanup in mr_deregister
4950 */
4951 return (retval);
4952 }
4953 }
4954 if (smrp != NULL) {
4955 mutex_enter(&daplka_shared_mr_lock);
4956 ASSERT(smrp->smr_refcnt > 0);
4957 smrp->smr_refcnt--;
4958
4959 if (smrp->smr_refcnt == 0) {
4960 DERR("mr_register_shared: freeing smrp 0x%p\n", smrp);
4961 avl_remove(&daplka_shared_mr_tree, smrp);
4962 if (smrp->smr_mr_list != NULL) {
4963 /*
4964 * the refcnt is 0. if there is anything
4965 * left on the list, it must be ours.
4966 */
4967 ASSERT(smrp->smr_mr_list == mr_rp);
4968 DAPLKA_RS_UNREF(mr_rp);
4969 smrp->smr_mr_list = NULL;
4970 ASSERT(mr_rp->mr_shared_mr == smrp);
4971 mr_rp->mr_shared_mr = NULL;
4972 ASSERT(mr_rp->mr_next == NULL);
4973 }
4974 smrp->smr_state = DAPLKA_SMR_FREED;
4975 cv_destroy(&smrp->smr_cv);
4976 kmem_free(smrp, sizeof (daplka_shared_mr_t));
4977 } else {
4978 DERR("mr_register_shared: resetting smr_state "
4979 "smrp 0x%p, %d waiters remain\n", smrp,
4980 smrp->smr_refcnt);
4981 ASSERT(smrp->smr_state == DAPLKA_SMR_TRANSITIONING);
4982 if (smrp->smr_mr_list != NULL && mr_rp != NULL) {
4983 daplka_mr_resource_t **mpp;
4984
4985 /*
4986 * search and remove mr_rp from smr_mr_list
4987 */
4988 mpp = &smrp->smr_mr_list;
4989 while (*mpp != NULL) {
4990 if (*mpp == mr_rp) {
4991 *mpp = (*mpp)->mr_next;
4992 DAPLKA_RS_UNREF(mr_rp);
4993 ASSERT(mr_rp->mr_shared_mr ==
4994 smrp);
4995 mr_rp->mr_shared_mr = NULL;
4996 mr_rp->mr_next = NULL;
4997 break;
4998 }
4999 mpp = &(*mpp)->mr_next;
5000 }
5001 }
5002 /*
5003 * note that smr_state == READY does not necessarily
5004 * mean that smr_mr_list is non empty. for this case,
5005 * we are doing cleanup because of a failure. we set
5006 * the state to READY to allow other threads to
5007 * continue.
5039 retval = ddi_copyin((void *)arg, &args,
5040 sizeof (dapl_mr_register_lmr_t), mode);
5041 if (retval != 0) {
5042 DERR("mr_register_lmr: copyin error %d\n", retval);
5043 return (EINVAL);
5044 }
5045 orig_mr_rp = (daplka_mr_resource_t *)
5046 daplka_hash_lookup(&ia_rp->ia_mr_htbl, args.mrl_orig_hkey);
5047 if (orig_mr_rp == NULL) {
5048 DERR("mr_register_lmr: cannot find mr resource\n");
5049 return (EINVAL);
5050 }
5051 ASSERT(DAPLKA_RS_TYPE(orig_mr_rp) == DAPL_TYPE_MR);
5052
5053 mr_rp = kmem_zalloc(sizeof (daplka_mr_resource_t), daplka_km_flags);
5054 if (mr_rp == NULL) {
5055 DERR("mr_register_lmr: cannot allocate mr resource\n");
5056 retval = ENOMEM;
5057 goto cleanup;
5058 }
5059 DAPLKA_RS_INIT(mr_rp, DAPL_TYPE_MR,
5060 DAPLKA_RS_RNUM(ia_rp), daplka_mr_destroy);
5061
5062 mutex_init(&mr_rp->mr_lock, NULL, MUTEX_DRIVER, NULL);
5063 mr_rp->mr_hca = ia_rp->ia_hca;
5064 mr_rp->mr_hca_hdl = ia_rp->ia_hca_hdl;
5065 mr_rp->mr_next = NULL;
5066 mr_rp->mr_shared_mr = NULL;
5067
5068 DAPLKA_RS_REF(orig_mr_rp->mr_pd_res);
5069 mr_rp->mr_pd_res = orig_mr_rp->mr_pd_res;
5070 mr_rp->mr_attr = orig_mr_rp->mr_attr;
5071
5072 /* Pass the IO addr that was returned while allocating the orig MR */
5073 mem_sattr.mr_vaddr = orig_mr_rp->mr_desc.md_vaddr;
5074 mem_sattr.mr_flags = args.mrl_flags | IBT_MR_NOSLEEP;
5075
5076 status = daplka_ibt_register_shared_mr(mr_rp, ia_rp->ia_hca_hdl,
5077 orig_mr_rp->mr_hdl, mr_rp->mr_pd_res->pd_hdl, &mem_sattr,
5078 &mr_rp->mr_hdl, &mr_rp->mr_desc);
5096 &mr_cb_data_in, sizeof (mr_cb_data_in));
5097
5098 if (status != IBT_SUCCESS) {
5099 DERR("mr_register_lmr: ibt_ci_data_in error(%d) ver(%d)",
5100 status, mr_cb_data_in.mr_rev);
5101 *rvalp = (int)status;
5102 retval = 0;
5103 goto cleanup;
5104 }
5105 mr_rp->mr_attr.mr_len = orig_mr_rp->mr_attr.mr_len;
5106 mr_rp->mr_attr.mr_flags = mem_sattr.mr_flags;
5107
5108 /* insert into mr hash table */
5109 retval = daplka_hash_insert(&ia_rp->ia_mr_htbl, &mr_hkey,
5110 (void *)mr_rp);
5111 if (retval != 0) {
5112 DERR("mr_register: cannot insert mr resource into mr_htbl\n");
5113 goto cleanup;
5114 }
5115 inserted = B_TRUE;
5116
5117 args.mrl_lkey = mr_rp->mr_desc.md_lkey;
5118 args.mrl_rkey = mr_rp->mr_desc.md_rkey;
5119 args.mrl_hkey = mr_hkey;
5120
5121 retval = ddi_copyout((void *)&args, (void *)arg,
5122 sizeof (dapl_mr_register_lmr_t), mode);
5123 if (retval != 0) {
5124 DERR("mr_register_lmr: copyout error %d\n", retval);
5125 retval = EFAULT;
5126 goto cleanup;
5127 }
5128 if (orig_mr_rp != NULL) {
5129 DAPLKA_RS_UNREF(orig_mr_rp);
5130 }
5131 return (0);
5132
5133 cleanup:;
5134 if (inserted) {
5135 daplka_mr_resource_t *free_rp = NULL;
5326 if (status != IBT_SUCCESS) {
5327 DERR("mr_sync: ibt_sync_mr error %d\n", status);
5328 *rvalp = (int)status;
5329 }
5330 for (i = 0; i < args.mrs_numseg; i++) {
5331 DAPLKA_RS_UNREF(mr_rp[i]);
5332 }
5333 return (0);
5334 }
5335
5336 /*
5337 * destroys a memory region.
5338 * called when refcnt drops to zero.
5339 */
5340 static int
5341 daplka_mr_destroy(daplka_resource_t *gen_rp)
5342 {
5343 daplka_mr_resource_t *mr_rp = (daplka_mr_resource_t *)gen_rp;
5344 ibt_status_t status;
5345
5346 ASSERT(DAPLKA_RS_REFCNT(mr_rp) == 0);
5347 ASSERT(mr_rp->mr_shared_mr == NULL);
5348 D3("mr_destroy: entering, mr_rp 0x%p, rnum %d\n",
5349 mr_rp, DAPLKA_RS_RNUM(mr_rp));
5350
5351 /*
5352 * deregister mr
5353 */
5354 if (mr_rp->mr_hdl) {
5355 status = daplka_ibt_deregister_mr(mr_rp, mr_rp->mr_hca_hdl,
5356 mr_rp->mr_hdl);
5357 if (status != IBT_SUCCESS) {
5358 DERR("mr_destroy: ibt_deregister_mr returned %d\n",
5359 status);
5360 }
5361 mr_rp->mr_hdl = NULL;
5362 D3("mr_destroy: mr deregistered\n");
5363 }
5364 mr_rp->mr_attr.mr_vaddr = NULL;
5365
5417 /*
5418 * allocates a protection domain.
5419 */
5420 /* ARGSUSED */
5421 static int
5422 daplka_pd_alloc(daplka_ia_resource_t *ia_rp, intptr_t arg, int mode,
5423 cred_t *cred, int *rvalp)
5424 {
5425 dapl_pd_alloc_t args;
5426 daplka_pd_resource_t *pd_rp;
5427 ibt_status_t status;
5428 uint64_t pd_hkey = 0;
5429 boolean_t inserted = B_FALSE;
5430 int retval;
5431
5432 pd_rp = kmem_zalloc(sizeof (*pd_rp), daplka_km_flags);
5433 if (pd_rp == NULL) {
5434 DERR("pd_alloc: cannot allocate pd resource\n");
5435 return (ENOMEM);
5436 }
5437 DAPLKA_RS_INIT(pd_rp, DAPL_TYPE_PD,
5438 DAPLKA_RS_RNUM(ia_rp), daplka_pd_destroy);
5439
5440 pd_rp->pd_hca = ia_rp->ia_hca;
5441 pd_rp->pd_hca_hdl = ia_rp->ia_hca_hdl;
5442 status = daplka_ibt_alloc_pd(pd_rp, pd_rp->pd_hca_hdl,
5443 IBT_PD_NO_FLAGS, &pd_rp->pd_hdl);
5444 if (status != IBT_SUCCESS) {
5445 DERR("pd_alloc: ibt_alloc_pd returned %d\n", status);
5446 *rvalp = (int)status;
5447 retval = 0;
5448 goto cleanup;
5449 }
5450
5451 /* insert into pd hash table */
5452 retval = daplka_hash_insert(&ia_rp->ia_pd_htbl,
5453 &pd_hkey, (void *)pd_rp);
5454 if (retval != 0) {
5455 DERR("pd_alloc: cannot insert pd resource into pd_htbl\n");
5456 goto cleanup;
5457 }
5458 inserted = B_TRUE;
5459
5460 /* return hkey to library */
5461 args.pda_hkey = pd_hkey;
5462
5463 retval = ddi_copyout(&args, (void *)arg, sizeof (dapl_pd_alloc_t),
5464 mode);
5465 if (retval != 0) {
5466 DERR("pd_alloc: copyout error %d\n", retval);
5467 retval = EFAULT;
5468 goto cleanup;
5469 }
5470 return (0);
5471
5472 cleanup:;
5473 if (inserted) {
5474 daplka_pd_resource_t *free_rp = NULL;
5475
5476 (void) daplka_hash_remove(&ia_rp->ia_pd_htbl, pd_hkey,
5477 (void **)&free_rp);
5478 if (free_rp != pd_rp) {
5481 * we can only get here if another thread
5482 * has completed the cleanup in pd_free
5483 */
5484 return (retval);
5485 }
5486 }
5487 DAPLKA_RS_UNREF(pd_rp);
5488 return (retval);
5489 }
5490
5491 /*
5492 * destroys a protection domain.
5493 * called when refcnt drops to zero.
5494 */
5495 static int
5496 daplka_pd_destroy(daplka_resource_t *gen_rp)
5497 {
5498 daplka_pd_resource_t *pd_rp = (daplka_pd_resource_t *)gen_rp;
5499 ibt_status_t status;
5500
5501 ASSERT(DAPLKA_RS_REFCNT(pd_rp) == 0);
5502 D3("pd_destroy: entering, pd_rp %p, rnum %d\n",
5503 pd_rp, DAPLKA_RS_RNUM(pd_rp));
5504
5505 ASSERT(DAPLKA_RS_TYPE(pd_rp) == DAPL_TYPE_PD);
5506 if (pd_rp->pd_hdl != NULL) {
5507 status = daplka_ibt_free_pd(pd_rp, pd_rp->pd_hca_hdl,
5508 pd_rp->pd_hdl);
5509 if (status != IBT_SUCCESS) {
5510 DERR("pd_destroy: ibt_free_pd returned %d\n", status);
5511 }
5512 }
5513 DAPLKA_RS_FINI(pd_rp);
5514 kmem_free(pd_rp, sizeof (daplka_pd_resource_t));
5515 D3("pd_destroy: exiting, pd_rp %p\n", pd_rp);
5516 return (0);
5517 }
5518
5519 static void
5520 daplka_hash_pd_free(void *obj)
5572 ibt_status_t status;
5573 boolean_t inserted = B_FALSE;
5574 uint64_t mw_hkey;
5575 ibt_rkey_t mw_rkey;
5576 int retval;
5577
5578 retval = ddi_copyin((void *)arg, &args, sizeof (dapl_mw_alloc_t), mode);
5579 if (retval != 0) {
5580 DERR("mw_alloc: copyin error %d\n", retval);
5581 return (EFAULT);
5582 }
5583
5584 /*
5585 * Allocate and initialize a MW resource
5586 */
5587 mw_rp = kmem_zalloc(sizeof (daplka_mw_resource_t), daplka_km_flags);
5588 if (mw_rp == NULL) {
5589 DERR("mw_alloc: cannot allocate mw resource\n");
5590 return (ENOMEM);
5591 }
5592 DAPLKA_RS_INIT(mw_rp, DAPL_TYPE_MW,
5593 DAPLKA_RS_RNUM(ia_rp), daplka_mw_destroy);
5594
5595 mutex_init(&mw_rp->mw_lock, NULL, MUTEX_DRIVER, NULL);
5596 mw_rp->mw_hca = ia_rp->ia_hca;
5597 mw_rp->mw_hca_hdl = ia_rp->ia_hca_hdl;
5598
5599 /* get pd handle */
5600 pd_rp = (daplka_pd_resource_t *)
5601 daplka_hash_lookup(&ia_rp->ia_pd_htbl, args.mw_pd_hkey);
5602 if (pd_rp == NULL) {
5603 DERR("mw_alloc: cannot find pd resource\n");
5604 goto cleanup;
5605 }
5606 ASSERT(DAPLKA_RS_TYPE(pd_rp) == DAPL_TYPE_PD);
5607
5608 mw_rp->mw_pd_res = pd_rp;
5609
5610 status = daplka_ibt_alloc_mw(mw_rp, mw_rp->mw_hca_hdl,
5611 pd_rp->pd_hdl, IBT_MW_NOSLEEP, &mw_rp->mw_hdl, &mw_rkey);
5652 goto cleanup;
5653 }
5654
5655 /* insert into mw hash table */
5656 mw_hkey = 0;
5657 retval = daplka_hash_insert(&ia_rp->ia_mw_htbl, &mw_hkey,
5658 (void *)mw_rp);
5659 if (retval != 0) {
5660 DERR("mw_alloc: cannot insert mw resource into mw_htbl\n");
5661 mutex_enter(&ia_rp->ia_lock);
5662 ASSERT(ia_rp->ia_state == DAPLKA_IA_MW_ALLOC_IN_PROGRESS);
5663 ia_rp->ia_mw_alloccnt--;
5664 if (ia_rp->ia_mw_alloccnt == 0) {
5665 ia_rp->ia_state = DAPLKA_IA_INIT;
5666 cv_broadcast(&ia_rp->ia_cv);
5667 }
5668 mutex_exit(&ia_rp->ia_lock);
5669 goto cleanup;
5670 }
5671 inserted = B_TRUE;
5672
5673 D3("mw_alloc: ibt_alloc_mw mw_hdl(%p) mw_rkey(0x%llx)\n",
5674 mw_rp->mw_hdl, (longlong_t)mw_rkey);
5675
5676 mutex_enter(&ia_rp->ia_lock);
5677 /*
5678 * We are done with mw_alloc if this was the last mw_alloc
5679 * change state back to DAPLKA_IA_INIT and wake up waiters
5680 * specifically the unlock callback.
5681 */
5682 ASSERT(ia_rp->ia_state == DAPLKA_IA_MW_ALLOC_IN_PROGRESS);
5683 ia_rp->ia_mw_alloccnt--;
5684 if (ia_rp->ia_mw_alloccnt == 0) {
5685 ia_rp->ia_state = DAPLKA_IA_INIT;
5686 cv_broadcast(&ia_rp->ia_cv);
5687 }
5688 mutex_exit(&ia_rp->ia_lock);
5689
5690 args.mw_hkey = mw_hkey;
5691 args.mw_rkey = mw_rkey;
5746 return (EINVAL);
5747 }
5748
5749 ASSERT(DAPLKA_RS_TYPE(mw_rp) == DAPL_TYPE_MW);
5750
5751 /* UNREF calls the actual free function when refcnt is zero */
5752 DAPLKA_RS_UNREF(mw_rp);
5753 return (retval);
5754 }
5755
5756 /*
5757 * destroys the memory window.
5758 * called when refcnt drops to zero.
5759 */
5760 static int
5761 daplka_mw_destroy(daplka_resource_t *gen_rp)
5762 {
5763 daplka_mw_resource_t *mw_rp = (daplka_mw_resource_t *)gen_rp;
5764 ibt_status_t status;
5765
5766 ASSERT(DAPLKA_RS_REFCNT(mw_rp) == 0);
5767 D3("mw_destroy: entering, mw_rp 0x%p, rnum %d\n",
5768 mw_rp, DAPLKA_RS_RNUM(mw_rp));
5769
5770 /*
5771 * free memory window
5772 */
5773 if (mw_rp->mw_hdl) {
5774 status = daplka_ibt_free_mw(mw_rp, mw_rp->mw_hca_hdl,
5775 mw_rp->mw_hdl);
5776 if (status != IBT_SUCCESS) {
5777 DERR("mw_destroy: ibt_free_mw returned %d\n", status);
5778 }
5779 mw_rp->mw_hdl = NULL;
5780 D3("mw_destroy: mw freed\n");
5781 }
5782
5783 /*
5784 * release reference on PD
5785 */
6155 ibt_srv_bind_t sb_args;
6156 ibt_status_t status;
6157 ib_svc_id_t retsid = 0;
6158 uint64_t sp_hkey = 0;
6159 boolean_t bumped = B_FALSE;
6160 int backlog_size;
6161 int retval = 0;
6162
6163 retval = ddi_copyin((void *)arg, &args,
6164 sizeof (dapl_service_register_t), mode);
6165 if (retval != 0) {
6166 DERR("service_register: copyin error %d\n", retval);
6167 return (EINVAL);
6168 }
6169
6170 sp_rp = kmem_zalloc(sizeof (*sp_rp), daplka_km_flags);
6171 if (sp_rp == NULL) {
6172 DERR("service_register: cannot allocate sp resource\n");
6173 return (ENOMEM);
6174 }
6175 DAPLKA_RS_INIT(sp_rp, DAPL_TYPE_SP,
6176 DAPLKA_RS_RNUM(ia_rp), daplka_sp_destroy);
6177
6178 /* check if evd exists */
6179 evd_rp = (daplka_evd_resource_t *)
6180 daplka_hash_lookup(&ia_rp->ia_evd_htbl, args.sr_evd_hkey);
6181 if (evd_rp == NULL) {
6182 DERR("service_register: evd resource not found\n");
6183 retval = EINVAL;
6184 goto cleanup;
6185 }
6186 /*
6187 * initialize backlog size
6188 */
6189 if (evd_rp && evd_rp->evd_cq_real_size > 0) {
6190 backlog_size = evd_rp->evd_cq_real_size + 1;
6191 } else {
6192 backlog_size = DAPLKA_DEFAULT_SP_BACKLOG;
6193 }
6194 D2("service_register: args.sr_sid = %llu\n", (longlong_t)args.sr_sid);
6248 *rvalp = (int)status;
6249 retval = 0;
6250 goto cleanup;
6251 }
6252
6253 /*
6254 * need to bump refcnt because the global hash table will
6255 * have a reference to sp_rp
6256 */
6257 DAPLKA_RS_REF(sp_rp);
6258 bumped = B_TRUE;
6259
6260 /* insert into global sp hash table */
6261 sp_rp->sp_global_hkey = 0;
6262 retval = daplka_hash_insert(&daplka_global_sp_htbl,
6263 &sp_rp->sp_global_hkey, (void *)sp_rp);
6264 if (retval != 0) {
6265 DERR("service_register: cannot insert sp resource\n");
6266 goto cleanup;
6267 }
6268
6269 /* insert into per-IA sp hash table */
6270 retval = daplka_hash_insert(&ia_rp->ia_sp_htbl,
6271 &sp_hkey, (void *)sp_rp);
6272 if (retval != 0) {
6273 DERR("service_register: cannot insert sp resource\n");
6274 goto cleanup;
6275 }
6276
6277 /* pass index to application */
6278 args.sr_sp_hkey = sp_hkey;
6279 retval = ddi_copyout(&args, (void *)arg,
6280 sizeof (dapl_service_register_t), mode);
6281 if (retval != 0) {
6282 DERR("service_register: copyout error %d\n", retval);
6283 retval = EFAULT;
6284 goto cleanup;
6285 }
6286 return (0);
6287
6375
6376 /* remove the global reference */
6377 if (g_sp_rp == sp_rp) {
6378 DAPLKA_RS_UNREF(g_sp_rp);
6379 }
6380
6381 DAPLKA_RS_UNREF(sp_rp);
6382 return (0);
6383 }
6384
6385 /*
6386 * destroys a service point.
6387 * called when the refcnt drops to zero.
6388 */
6389 static int
6390 daplka_sp_destroy(daplka_resource_t *gen_rp)
6391 {
6392 daplka_sp_resource_t *sp_rp = (daplka_sp_resource_t *)gen_rp;
6393 ibt_status_t status;
6394
6395 ASSERT(DAPLKA_RS_REFCNT(sp_rp) == 0);
6396 D3("sp_destroy: entering, sp_rp %p, rnum %d\n",
6397 sp_rp, DAPLKA_RS_RNUM(sp_rp));
6398
6399 /*
6400 * it is possible for pending connections to remain
6401 * on an SP. We need to clean them up here.
6402 */
6403 if (sp_rp->sp_backlog != NULL) {
6404 ibt_cm_proceed_reply_t proc_reply;
6405 int i, cnt = 0;
6406 void *spcp_sidp;
6407
6408 for (i = 0; i < sp_rp->sp_backlog_size; i++) {
6409 if (sp_rp->sp_backlog[i].spcp_state ==
6410 DAPLKA_SPCP_PENDING) {
6411 cnt++;
6412 if (sp_rp->sp_backlog[i].spcp_sid == NULL) {
6413 DERR("sp_destroy: "
6414 "spcp_sid == NULL!\n");
6532 mutex_enter(&spp->sp_lock);
6533 for (bkl_index = 0; bkl_index < spp->sp_backlog_size; bkl_index++) {
6534 if (spp->sp_backlog[bkl_index].spcp_state == DAPLKA_SPCP_INIT) {
6535 conn = &spp->sp_backlog[bkl_index];
6536 ASSERT(conn->spcp_sid == NULL);
6537 conn->spcp_state = DAPLKA_SPCP_PENDING;
6538 conn->spcp_sid = event->cm_session_id;
6539 break;
6540 }
6541 }
6542 mutex_exit(&spp->sp_lock);
6543
6544 /*
6545 * too many pending connections
6546 */
6547 if (bkl_index == spp->sp_backlog_size) {
6548 DERR("service_req: connection pending exceeded %d limit\n",
6549 spp->sp_backlog_size);
6550 return (IBT_CM_NO_RESOURCE);
6551 }
6552
6553 /*
6554 * save data for cr_handoff
6555 */
6556 if (pr_data != NULL && pr_len > 0) {
6557 int trunc_len = pr_len;
6558
6559 if (trunc_len > DAPL_MAX_PRIVATE_DATA_SIZE) {
6560 DERR("service_req: private data truncated\n");
6561 trunc_len = DAPL_MAX_PRIVATE_DATA_SIZE;
6562 }
6563 conn->spcp_req_len = trunc_len;
6564 bcopy(pr_data, conn->spcp_req_data, trunc_len);
6565 } else {
6566 conn->spcp_req_len = 0;
6567 }
6568 conn->spcp_rdma_ra_in = event->cm_event.req.req_rdma_ra_in;
6569 conn->spcp_rdma_ra_out = event->cm_event.req.req_rdma_ra_out;
6570
6571 /*
6619 daplka_cm_delay, NULL, 0);
6620 if (status != IBT_SUCCESS) {
6621 DERR("service_req: ibt_cm_delay failed %d\n", status);
6622 cm_status = IBT_CM_NO_RESOURCE;
6623 goto cleanup;
6624 }
6625
6626 /*
6627 * enqueue cr_ev onto the cr_events list of the EVD
6628 * corresponding to the SP
6629 */
6630 D2("service_req: enqueue event(%p) evdp(%p) priv_data(%p) "
6631 "priv_len(%d) psep(0x%llx)\n", cr_ev, spp->sp_evd_res,
6632 cr_ev->ee_cmev.ec_cm_ev_priv_data,
6633 (int)cr_ev->ee_cmev.ec_cm_ev_priv_data_len,
6634 (longlong_t)cr_ev->ee_cmev.ec_cm_psep_cookie);
6635
6636 daplka_evd_wakeup(spp->sp_evd_res,
6637 &spp->sp_evd_res->evd_cr_events, cr_ev);
6638
6639 return (IBT_CM_DEFER);
6640
6641 cleanup:;
6642 /*
6643 * free the cr event
6644 */
6645 if (cr_ev != NULL) {
6646 if (cr_ev->ee_cmev.ec_cm_ev_priv_data != NULL) {
6647 kmem_free(cr_ev->ee_cmev.ec_cm_ev_priv_data, pr_len);
6648 cr_ev->ee_cmev.ec_cm_ev_priv_data = NULL;
6649 cr_ev->ee_cmev.ec_cm_ev_priv_data_len = 0;
6650 }
6651 kmem_free(cr_ev, sizeof (daplka_evd_event_t));
6652 }
6653 /*
6654 * release our slot in the backlog array
6655 */
6656 if (conn != NULL) {
6657 mutex_enter(&spp->sp_lock);
6658 ASSERT(conn->spcp_state == DAPLKA_SPCP_PENDING);
6941 }
6942
6943 /*
6944 * this is the passive side CM handler. it gets registered
6945 * when an SP resource is created in daplka_service_register.
6946 */
6947 static ibt_cm_status_t
6948 daplka_cm_service_handler(void *cm_private, ibt_cm_event_t *event,
6949 ibt_cm_return_args_t *ret_args, void *priv_data, ibt_priv_data_len_t len)
6950 {
6951 daplka_sp_resource_t *sp_rp = (daplka_sp_resource_t *)cm_private;
6952
6953 if (sp_rp == NULL) {
6954 DERR("service_handler: sp_rp == NULL\n");
6955 return (IBT_CM_NO_RESOURCE);
6956 }
6957 /*
6958 * default is not to return priv data
6959 */
6960 if (ret_args != NULL) {
6961 ret_args->cm_ret_len = 0;
6962 }
6963
6964 switch (event->cm_type) {
6965 case IBT_CM_EVENT_REQ_RCV:
6966 D2("service_handler: IBT_CM_EVENT_REQ_RCV\n");
6967 return (daplka_cm_service_req(sp_rp, event, ret_args,
6968 event->cm_priv_data, event->cm_priv_data_len));
6969
6970 case IBT_CM_EVENT_REP_RCV:
6971 /* passive side should not receive this event */
6972 D2("service_handler: IBT_CM_EVENT_REP_RCV\n");
6973 return (IBT_CM_DEFAULT);
6974
6975 case IBT_CM_EVENT_CONN_CLOSED:
6976 D2("service_handler: IBT_CM_EVENT_CONN_CLOSED %d\n",
6977 event->cm_event.closed);
6978 return (daplka_cm_service_conn_closed(sp_rp, event, ret_args,
6979 priv_data, len));
6980
7369 }
7370
7371 /*
7372 * This is the active side CM handler. It gets registered when
7373 * ibt_open_rc_channel is called.
7374 */
7375 static ibt_cm_status_t
7376 daplka_cm_rc_handler(void *cm_private, ibt_cm_event_t *event,
7377 ibt_cm_return_args_t *ret_args, void *priv_data, ibt_priv_data_len_t len)
7378 {
7379 daplka_ep_resource_t *ep_rp = (daplka_ep_resource_t *)cm_private;
7380
7381 if (ep_rp == NULL) {
7382 DERR("rc_handler: ep_rp == NULL\n");
7383 return (IBT_CM_NO_CHANNEL);
7384 }
7385 /*
7386 * default is not to return priv data
7387 */
7388 if (ret_args != NULL) {
7389 ret_args->cm_ret_len = 0;
7390 }
7391
7392 switch (event->cm_type) {
7393 case IBT_CM_EVENT_REQ_RCV:
7394 /* active side should not receive this event */
7395 D2("rc_handler: IBT_CM_EVENT_REQ_RCV\n");
7396 break;
7397
7398 case IBT_CM_EVENT_REP_RCV:
7399 /* connection accepted by passive side */
7400 D2("rc_handler: IBT_CM_EVENT_REP_RCV\n");
7401 return (daplka_cm_rc_rep_rcv(ep_rp, event, ret_args,
7402 priv_data, len));
7403
7404 case IBT_CM_EVENT_CONN_CLOSED:
7405 D2("rc_handler: IBT_CM_EVENT_CONN_CLOSED %d\n",
7406 event->cm_event.closed);
7407 return (daplka_cm_rc_conn_closed(ep_rp, event, ret_args,
7408 priv_data, len));
7490 return (EINVAL);
7491 }
7492 hca_hdl = hca->hca_hdl;
7493 if (hca_hdl == NULL) {
7494 DERR("ia_create: hca_hdl == NULL\n");
7495 DAPLKA_RELE_HCA(daplka_dev, hca);
7496 return (EINVAL);
7497 }
7498 status = ibt_query_hca_ports(hca_hdl, (uint8_t)args.ia_port,
7499 &pinfop, &pinfon, &size);
7500 if (status != IBT_SUCCESS) {
7501 DERR("ia_create: ibt_query_hca_ports returned %d\n", status);
7502 *rvalp = (int)status;
7503 DAPLKA_RELE_HCA(daplka_dev, hca);
7504 return (0);
7505 }
7506 sgid = pinfop->p_sgid_tbl[0];
7507 ibt_free_portinfo(pinfop, size);
7508
7509 ia_rp = kmem_zalloc(sizeof (daplka_ia_resource_t), daplka_km_flags);
7510 DAPLKA_RS_INIT(ia_rp, DAPL_TYPE_IA, rnum, daplka_ia_destroy);
7511
7512 mutex_init(&ia_rp->ia_lock, NULL, MUTEX_DRIVER, NULL);
7513 cv_init(&ia_rp->ia_cv, NULL, CV_DRIVER, NULL);
7514 ia_rp->ia_hca_hdl = hca_hdl;
7515 ia_rp->ia_hca_sgid = sgid;
7516 ia_rp->ia_hca = hca;
7517 ia_rp->ia_port_num = args.ia_port;
7518 ia_rp->ia_port_pkey = args.ia_pkey;
7519 ia_rp->ia_pid = ddi_get_pid();
7520 ia_rp->ia_async_evd_hkeys = NULL;
7521 ia_rp->ia_ar_registered = B_FALSE;
7522 bcopy(args.ia_sadata, ia_rp->ia_sadata, DAPL_ATS_NBYTES);
7523
7524 /* register Address Record */
7525 ar_s.ar_gid = ia_rp->ia_hca_sgid;
7526 ar_s.ar_pkey = ia_rp->ia_port_pkey;
7527 bcopy(ia_rp->ia_sadata, ar_s.ar_data, DAPL_ATS_NBYTES);
7528 #define UC(b) ar_s.ar_data[(b)]
7529 D3("daplka_ia_create: SA[8] %d.%d.%d.%d\n",
7581 daplka_hash_sp_free, daplka_hash_generic_lookup);
7582 if (retval != 0) {
7583 DERR("ia_create: cannot create sp hash table\n");
7584 goto cleanup;
7585 }
7586 retval = daplka_hash_create(&ia_rp->ia_srq_htbl, DAPLKA_SRQ_HTBL_SZ,
7587 daplka_hash_srq_free, daplka_hash_generic_lookup);
7588 if (retval != 0) {
7589 DERR("ia_create: cannot create srq hash table\n");
7590 goto cleanup;
7591 }
7592 /*
7593 * insert ia_rp into the global resource table
7594 */
7595 retval = daplka_resource_insert(rnum, (daplka_resource_t *)ia_rp);
7596 if (retval != 0) {
7597 DERR("ia_create: cannot insert resource\n");
7598 goto cleanup;
7599 }
7600 inserted = B_TRUE;
7601
7602 args.ia_resnum = rnum;
7603 retval = copyout(&args, (void *)arg, sizeof (dapl_ia_create_t));
7604 if (retval != 0) {
7605 DERR("ia_create: copyout error %d\n", retval);
7606 retval = EFAULT;
7607 goto cleanup;
7608 }
7609 return (0);
7610
7611 cleanup:;
7612 if (inserted) {
7613 tmp_rp = (daplka_ia_resource_t *)daplka_resource_remove(rnum);
7614 if (tmp_rp != ia_rp) {
7615 /*
7616 * we can return here because another thread must
7617 * have freed up the resource
7618 */
7619 DERR("ia_create: cannot remove resource\n");
7620 return (retval);
7621 }
7622 }
7623 DAPLKA_RS_UNREF(ia_rp);
7624 return (retval);
7625 }
7626
7627 /*
7628 * destroys an IA resource
7629 */
7630 static int
7631 daplka_ia_destroy(daplka_resource_t *gen_rp)
7632 {
7633 daplka_ia_resource_t *ia_rp = (daplka_ia_resource_t *)gen_rp;
7634 daplka_async_evd_hkey_t *hkp;
7635 int cnt;
7636 ibt_ar_t ar_s;
7637
7638 D3("ia_destroy: entering, ia_rp 0x%p\n", ia_rp);
7639
7640 /* deregister Address Record */
7641 if (ia_rp->ia_ar_registered) {
7642 ar_s.ar_gid = ia_rp->ia_hca_sgid;
7643 ar_s.ar_pkey = ia_rp->ia_port_pkey;
7644 bcopy(ia_rp->ia_sadata, ar_s.ar_data, DAPL_ATS_NBYTES);
7645 (void) ibt_deregister_ar(daplka_dev->daplka_clnt_hdl, &ar_s);
7646 ia_rp->ia_ar_registered = B_FALSE;
7647 }
7648
7649 /*
7650 * destroy hash tables. make sure resources are
7651 * destroyed in the correct order.
7652 */
7653 daplka_hash_destroy(&ia_rp->ia_mw_htbl);
7654 daplka_hash_destroy(&ia_rp->ia_mr_htbl);
7655 daplka_hash_destroy(&ia_rp->ia_ep_htbl);
7656 daplka_hash_destroy(&ia_rp->ia_srq_htbl);
7657 daplka_hash_destroy(&ia_rp->ia_evd_htbl);
9025 * len - last valid entry in array.
9026 *
9027 * A search operation based on a resource number is as follows:
9028 * index = rnum / RESOURCE_BLKSZ;
9029 * ASSERT(index < resource_block.len);
9030 * ASSERT(index < resource_block.sz);
9031 * offset = rnum % RESOURCE_BLKSZ;
9032 * ASSERT(offset >= resource_block.root[index]->base);
9033 * ASSERT(offset < resource_block.root[index]->base + RESOURCE_BLKSZ);
9034 * return resource_block.root[index]->blks[offset];
9035 *
9036 * A resource blk is freed when its used count reaches zero.
9037 */
9038
9039 /*
9040 * initializes the global resource table
9041 */
9042 static void
9043 daplka_resource_init(void)
9044 {
9045 rw_init(&daplka_resource.daplka_rct_lock, NULL, RW_DRIVER, NULL);
9046 daplka_resource.daplka_rc_len = 0;
9047 daplka_resource.daplka_rc_sz = 0;
9048 daplka_resource.daplka_rc_cnt = 0;
9049 daplka_resource.daplka_rc_flag = 0;
9050 daplka_resource.daplka_rc_root = NULL;
9051 }
9052
9053 /*
9054 * destroys the global resource table
9055 */
9056 static void
9057 daplka_resource_fini(void)
9058 {
9059 int i;
9060
9061 rw_enter(&daplka_resource.daplka_rct_lock, RW_WRITER);
9062 for (i = 0; i < daplka_resource.daplka_rc_len; i++) {
9063 daplka_resource_blk_t *blk;
9064 int j;
9065
9066 blk = daplka_resource.daplka_rc_root[i];
9067 if (blk == NULL) {
9068 continue;
9069 }
9070 for (j = 0; j < DAPLKA_RC_BLKSZ; j++) {
9369 * applied on the object before
9370 * daplka_hash_lookup returns
9371 * output:
9372 * none
9373 *
9374 * return value(s):
9375 * EINVAL nbuckets is not a power of 2
9376 * ENOMEM cannot allocate buckets
9377 * 0 success
9378 */
9379 static int
9380 daplka_hash_create(daplka_hash_table_t *htblp, uint_t nbuckets,
9381 void (*free_func)(void *), void (*lookup_func)(void *))
9382 {
9383 int i;
9384
9385 if ((nbuckets & ~(nbuckets - 1)) != nbuckets) {
9386 DERR("hash_create: nbuckets not power of 2\n");
9387 return (EINVAL);
9388 }
9389
9390 htblp->ht_buckets =
9391 kmem_zalloc(sizeof (daplka_hash_bucket_t) * nbuckets,
9392 daplka_km_flags);
9393 if (htblp->ht_buckets == NULL) {
9394 DERR("hash_create: cannot allocate buckets\n");
9395 return (ENOMEM);
9396 }
9397 for (i = 0; i < nbuckets; i++) {
9398 htblp->ht_buckets[i].hb_count = 0;
9399 htblp->ht_buckets[i].hb_entries = NULL;
9400 }
9401 rw_init(&htblp->ht_table_lock, NULL, RW_DRIVER, NULL);
9402 mutex_init(&htblp->ht_key_lock, NULL, MUTEX_DRIVER, NULL);
9403
9404 htblp->ht_count = 0;
9405 htblp->ht_next_hkey = (uint64_t)gethrtime();
9406 htblp->ht_nbuckets = nbuckets;
9407 htblp->ht_free_func = free_func;
9408 htblp->ht_lookup_func = lookup_func;
9409 htblp->ht_initialized = B_TRUE;
9410 D3("hash_create: done, buckets = %d\n", nbuckets);
9411 return (0);
9412 }
9413
9414 /*
9415 * daplka_hash_insert:
9416 * inserts an object into a hash table
9417 *
9418 * input:
9419 * htblp pointer to hash table
9420 *
9421 * hkeyp pointer to hash key.
9422 * *hkeyp being non-zero means that the caller
9423 * has generated its own hkey. if *hkeyp is zero,
9424 * this function will generate an hkey for the
9425 * caller. it is recommended that the caller
9426 * leave the hkey generation to this function
9427 * because the hkey is more likely to be evenly
9428 * distributed.
9429 *
9430 * objp pointer to object to be inserted into
9584 * allows the caller to choose what type
9585 * of lock to acquire before walking the
9586 * table.
9587 *
9588 * output:
9589 * none
9590 *
9591 * return value(s):
9592 * none
9593 */
9594 static void
9595 daplka_hash_walk(daplka_hash_table_t *htblp, int (*func)(void *, void *),
9596 void *farg, krw_t lockmode)
9597 {
9598 daplka_hash_entry_t *curr_hep;
9599 daplka_hash_bucket_t *hbp;
9600 uint32_t bucket, retval = 0;
9601
9602 ASSERT(lockmode == RW_WRITER || lockmode == RW_READER);
9603
9604 if (lockmode == RW_WRITER) {
9605 rw_enter(&htblp->ht_table_lock, RW_WRITER);
9606 } else {
9607 rw_enter(&htblp->ht_table_lock, RW_READER);
9608 }
9609 for (bucket = 0; bucket < htblp->ht_nbuckets && retval == 0; bucket++) {
9610 hbp = &htblp->ht_buckets[bucket];
9611 curr_hep = hbp->hb_entries;
9612 while (curr_hep != NULL) {
9613 retval = (*func)(curr_hep->he_objp, farg);
9614 if (retval != 0) {
9615 break;
9616 }
9617 curr_hep = curr_hep->he_next;
9618 }
9619 }
9620 rw_exit(&htblp->ht_table_lock);
9621 }
9622
9623 /*
|