210 * protection domain reference count.
211 */
212 status = hermon_rsrc_alloc(state, HERMON_DMPT, 1, sleep, &mpt);
213 if (status != DDI_SUCCESS) {
214 status = IBT_INSUFF_RESOURCE;
215 goto mrshared_fail1;
216 }
217
218 /*
219 * Allocate the software structure for tracking the shared memory
220 * region (i.e. the Hermon Memory Region handle). If we fail here, we
221 * must undo the protection domain reference count and the previous
222 * resource allocation.
223 */
224 status = hermon_rsrc_alloc(state, HERMON_MRHDL, 1, sleep, &rsrc);
225 if (status != DDI_SUCCESS) {
226 status = IBT_INSUFF_RESOURCE;
227 goto mrshared_fail2;
228 }
229 mr = (hermon_mrhdl_t)rsrc->hr_addr;
230 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
231
232 /*
233 * Setup and validate the memory region access flags. This means
234 * translating the IBTF's enable flags into the access flags that
235 * will be used in later operations.
236 */
237 mr->mr_accflag = 0;
238 if (mr_attr->mr_flags & IBT_MR_ENABLE_WINDOW_BIND)
239 mr->mr_accflag |= IBT_MR_WINDOW_BIND;
240 if (mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)
241 mr->mr_accflag |= IBT_MR_LOCAL_WRITE;
242 if (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)
243 mr->mr_accflag |= IBT_MR_REMOTE_READ;
244 if (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE)
245 mr->mr_accflag |= IBT_MR_REMOTE_WRITE;
246 if (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)
247 mr->mr_accflag |= IBT_MR_REMOTE_ATOMIC;
248
249 /*
250 * Calculate keys (Lkey, Rkey) from MPT index. Each key is formed
303 status = IBT_INSUFF_RESOURCE;
304 goto mrshared_fail4;
305 }
306 }
307
308 /*
309 * Copy the MTT resource pointer (and additional parameters) from
310 * the original Hermon Memory Region handle. Note: this is normally
311 * where the hermon_mr_mem_bind() routine would be called, but because
312 * we already have bound and filled-in MTT entries it is simply a
313 * matter here of managing the MTT reference count and grabbing the
314 * address of the MTT table entries (for filling in the shared region's
315 * MPT entry).
316 */
317 mr->mr_mttrsrcp = mrhdl->mr_mttrsrcp;
318 mr->mr_logmttpgsz = mrhdl->mr_logmttpgsz;
319 mr->mr_bindinfo = mrhdl->mr_bindinfo;
320 mr->mr_mttrefcntp = mrhdl->mr_mttrefcntp;
321 mutex_exit(&mrhdl->mr_lock);
322 bind = &mr->mr_bindinfo;
323 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*bind))
324 mtt = mr->mr_mttrsrcp;
325
326 /*
327 * Increment the MTT reference count (to reflect the fact that
328 * the MTT is now shared)
329 */
330 (void) hermon_mtt_refcnt_inc(mr->mr_mttrefcntp);
331
332 /*
333 * Update the new "bind" virtual address. Do some extra work here
334 * to ensure proper alignment. That is, make sure that the page
335 * offset for the beginning of the old range is the same as the
336 * offset for this new mapping
337 */
338 pgsize_msk = (((uint64_t)1 << mr->mr_logmttpgsz) - 1);
339 bind->bi_addr = ((mr_attr->mr_vaddr & ~pgsize_msk) |
340 (mr->mr_bindinfo.bi_addr & pgsize_msk));
341
342 /*
343 * Fill in the MPT entry. This is the final step before passing
480 */
481
482 status = hermon_rsrc_alloc(state, HERMON_DMPT, 1, sleep, &mpt);
483 if (status != DDI_SUCCESS) {
484 status = IBT_INSUFF_RESOURCE;
485 goto fmralloc_fail1;
486 }
487
488 /*
489 * Allocate the software structure for tracking the fmr memory
490 * region (i.e. the Hermon Memory Region handle). If we fail here, we
491 * must undo the protection domain reference count and the previous
492 * resource allocation.
493 */
494 status = hermon_rsrc_alloc(state, HERMON_MRHDL, 1, sleep, &rsrc);
495 if (status != DDI_SUCCESS) {
496 status = IBT_INSUFF_RESOURCE;
497 goto fmralloc_fail2;
498 }
499 mr = (hermon_mrhdl_t)rsrc->hr_addr;
500 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
501
502 /*
503 * Setup and validate the memory region access flags. This means
504 * translating the IBTF's enable flags into the access flags that
505 * will be used in later operations.
506 */
507 mr->mr_accflag = 0;
508 if (fmr_pool->fmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)
509 mr->mr_accflag |= IBT_MR_LOCAL_WRITE;
510 if (fmr_pool->fmr_flags & IBT_MR_ENABLE_REMOTE_READ)
511 mr->mr_accflag |= IBT_MR_REMOTE_READ;
512 if (fmr_pool->fmr_flags & IBT_MR_ENABLE_REMOTE_WRITE)
513 mr->mr_accflag |= IBT_MR_REMOTE_WRITE;
514 if (fmr_pool->fmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)
515 mr->mr_accflag |= IBT_MR_REMOTE_ATOMIC;
516
517 /*
518 * Calculate keys (Lkey, Rkey) from MPT index. Each key is formed
519 * from a certain number of "constrained" bits (the least significant
520 * bits) and some number of "unconstrained" bits. The constrained
607 * following fields for use in further operations on the MR. Also, set
608 * that this is an FMR region.
609 */
610 mr->mr_mptrsrcp = mpt;
611 mr->mr_mttrsrcp = mtt;
612
613 mr->mr_mpt_type = HERMON_MPT_DMPT;
614 mr->mr_pdhdl = pd;
615 mr->mr_rsrcp = rsrc;
616 mr->mr_is_fmr = 1;
617 mr->mr_lkey = hermon_mr_key_swap(mr->mr_lkey);
618 mr->mr_rkey = hermon_mr_key_swap(mr->mr_rkey);
619 mr->mr_mttaddr = mtt_addr;
620 (void) memcpy(&mr->mr_bindinfo, &bind, sizeof (hermon_bind_info_t));
621
622 /* initialize hr_addr for use during register/deregister/invalidate */
623 icm_table = &state->hs_icm[HERMON_DMPT];
624 rindx = mpt->hr_indx;
625 hermon_index(index1, index2, rindx, icm_table, i);
626 dma_info = icm_table->icm_dma[index1] + index2;
627 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mpt))
628 mpt->hr_addr = (void *)((uintptr_t)(dma_info->vaddr + i * mpt->hr_len));
629
630 *mrhdl = mr;
631
632 return (DDI_SUCCESS);
633
634 /*
635 * The following is cleanup for all possible failure cases in this routine
636 */
637 fmralloc_fail4:
638 kmem_free(mtt, sizeof (hermon_rsrc_t) * nummtt);
639 fmralloc_fail3:
640 hermon_rsrc_free(state, &rsrc);
641 fmralloc_fail2:
642 hermon_rsrc_free(state, &mpt);
643 fmralloc_fail1:
644 hermon_pd_refcnt_dec(pd);
645 fmralloc_fail:
646 return (status);
647 }
974 /* Set the mrhdl pointer to NULL and return success */
975 *mrhdl = NULL;
976
977 return (DDI_SUCCESS);
978 }
979
980
981 /*
982 * hermon_mr_query()
983 * Context: Can be called from interrupt or base context.
984 */
985 /* ARGSUSED */
986 int
987 hermon_mr_query(hermon_state_t *state, hermon_mrhdl_t mr,
988 ibt_mr_query_attr_t *attr)
989 {
990 int status;
991 hermon_hw_dmpt_t mpt_entry;
992 uint32_t lkey;
993
994 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*attr))
995
996 mutex_enter(&mr->mr_lock);
997
998 /*
999 * Check here to see if the memory region has already been partially
1000 * deregistered as a result of a hermon_umap_umemlock_cb() callback.
1001 * If so, this is an error, return failure.
1002 */
1003 if ((mr->mr_is_umem) && (mr->mr_umemcookie == NULL)) {
1004 mutex_exit(&mr->mr_lock);
1005 return (IBT_MR_HDL_INVALID);
1006 }
1007
1008 status = hermon_cmn_query_cmd_post(state, QUERY_MPT, 0,
1009 mr->mr_lkey >> 8, &mpt_entry, sizeof (hermon_hw_dmpt_t),
1010 HERMON_NOSLEEP);
1011 if (status != HERMON_CMD_SUCCESS) {
1012 cmn_err(CE_CONT, "Hermon: QUERY_MPT failed: status %x", status);
1013 mutex_exit(&mr->mr_lock);
1014 return (ibc_get_ci_failure(0));
1015 }
1260 status = hermon_rsrc_alloc(state, HERMON_DMPT, 1, sleep, &mpt);
1261 if (status != DDI_SUCCESS) {
1262 status = IBT_INSUFF_RESOURCE;
1263 goto mwalloc_fail1;
1264 }
1265
1266 /*
1267 * Allocate the software structure for tracking the memory window (i.e.
1268 * the Hermon Memory Window handle). Note: This is actually the same
1269 * software structure used for tracking memory regions, but since many
1270 * of the same properties are needed, only a single structure is
1271 * necessary. If we fail here, we must undo the protection domain
1272 * reference count and the previous resource allocation.
1273 */
1274 status = hermon_rsrc_alloc(state, HERMON_MRHDL, 1, sleep, &rsrc);
1275 if (status != DDI_SUCCESS) {
1276 status = IBT_INSUFF_RESOURCE;
1277 goto mwalloc_fail2;
1278 }
1279 mw = (hermon_mwhdl_t)rsrc->hr_addr;
1280 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mw))
1281
1282 /*
1283 * Calculate an "unbound" RKey from MPT index. In much the same way
1284 * as we do for memory regions (above), this key is constructed from
1285 * a "constrained" (which depends on the MPT index) and an
1286 * "unconstrained" portion (which may be arbitrarily chosen).
1287 */
1288 mw->mr_rkey = hermon_mr_keycalc(mpt->hr_indx);
1289
1290 /*
1291 * Fill in the MPT entry. This is the final step before passing
1292 * ownership of the MPT entry to the Hermon hardware. We use all of
1293 * the information collected/calculated above to fill in the
1294 * requisite portions of the MPT. Note: fewer entries in the MPT
1295 * entry are necessary to allocate a memory window.
1296 */
1297 bzero(&mpt_entry, sizeof (hermon_hw_dmpt_t));
1298 mpt_entry.reg_win = HERMON_MPT_IS_WINDOW;
1299 mpt_entry.mem_key = mw->mr_rkey;
1300 mpt_entry.pd = pd->pd_pdnum;
1359 * current thread context (i.e. if we are currently in the interrupt
1360 * context, then we shouldn't be attempting to sleep).
1361 */
1362 if ((sleep == HERMON_SLEEP) &&
1363 (sleep != HERMON_SLEEPFLAG_FOR_CONTEXT())) {
1364 status = IBT_INVALID_PARAM;
1365 return (status);
1366 }
1367
1368 /*
1369 * Pull all the necessary information from the Hermon Memory Window
1370 * handle. This is necessary here because the resource for the
1371 * MW handle is going to be freed up as part of the this operation.
1372 */
1373 mw = *mwhdl;
1374 mutex_enter(&mw->mr_lock);
1375 mpt = mw->mr_mptrsrcp;
1376 rsrc = mw->mr_rsrcp;
1377 pd = mw->mr_pdhdl;
1378 mutex_exit(&mw->mr_lock);
1379 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mw))
1380
1381 /*
1382 * Reclaim the MPT entry from hardware. Note: in general, it is
1383 * unexpected for this operation to return an error.
1384 */
1385 status = hermon_cmn_ownership_cmd_post(state, HW2SW_MPT, NULL,
1386 0, mpt->hr_indx, sleep);
1387 if (status != HERMON_CMD_SUCCESS) {
1388 cmn_err(CE_CONT, "Hermon: HW2SW_MPT command failed: %08x\n",
1389 status);
1390 if (status == HERMON_CMD_INVALID_STATUS) {
1391 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
1392 }
1393 return (ibc_get_ci_failure(0));
1394 }
1395
1396 /* Free the Hermon Memory Window handle */
1397 hermon_rsrc_free(state, &rsrc);
1398
1399 /* Free up the MPT entry resource */
1416 * KKKKKKKK IIIIIIII IIIIIIII IIIIIIIII
1417 * where K == the arbitrary bits and I == the index
1418 */
1419 uint32_t
1420 hermon_mr_keycalc(uint32_t indx)
1421 {
1422 uint32_t tmp_key, tmp_indx;
1423
1424 /*
1425 * Generate a simple key from counter. Note: We increment this
1426 * static variable _intentionally_ without any kind of mutex around
1427 * it. First, single-threading all operations through a single lock
1428 * would be a bad idea (from a performance point-of-view). Second,
1429 * the upper "unconstrained" bits don't really have to be unique
1430 * because the lower bits are guaranteed to be (although we do make a
1431 * best effort to ensure that they are). Third, the window for the
1432 * race (where both threads read and update the counter at the same
1433 * time) is incredibly small.
1434 * And, lastly, we'd like to make this into a "random" key
1435 */
1436 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(hermon_memkey_cnt))
1437 tmp_key = (hermon_memkey_cnt++) << HERMON_MEMKEY_SHIFT;
1438 tmp_indx = indx & 0xffffff;
1439 return (tmp_key | tmp_indx);
1440 }
1441
1442
1443 /*
1444 * hermon_mr_key_swap()
1445 * Context: Can be called from interrupt or base context.
1446 * NOTE: Produces a key in the form of
1447 * IIIIIIII IIIIIIII IIIIIIIII KKKKKKKK
1448 * where K == the arbitrary bits and I == the index
1449 */
1450 uint32_t
1451 hermon_mr_key_swap(uint32_t indx)
1452 {
1453 /*
1454 * The memory key format to pass down to the hardware is
1455 * (key[7:0],index[23:0]), which defines the index to the
1456 * hardware resource. When the driver passes this as a memory
1545 if (status != DDI_SUCCESS) {
1546 status = IBT_INSUFF_RESOURCE;
1547 goto mrcommon_fail1;
1548 }
1549 } else {
1550 mpt = NULL;
1551 }
1552
1553 /*
1554 * Allocate the software structure for tracking the memory region (i.e.
1555 * the Hermon Memory Region handle). If we fail here, we must undo
1556 * the protection domain reference count and the previous resource
1557 * allocation.
1558 */
1559 status = hermon_rsrc_alloc(state, HERMON_MRHDL, 1, sleep, &rsrc);
1560 if (status != DDI_SUCCESS) {
1561 status = IBT_INSUFF_RESOURCE;
1562 goto mrcommon_fail2;
1563 }
1564 mr = (hermon_mrhdl_t)rsrc->hr_addr;
1565 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
1566
1567 /*
1568 * Setup and validate the memory region access flags. This means
1569 * translating the IBTF's enable flags into the access flags that
1570 * will be used in later operations.
1571 */
1572 mr->mr_accflag = 0;
1573 if (flags & IBT_MR_ENABLE_WINDOW_BIND)
1574 mr->mr_accflag |= IBT_MR_WINDOW_BIND;
1575 if (flags & IBT_MR_ENABLE_LOCAL_WRITE)
1576 mr->mr_accflag |= IBT_MR_LOCAL_WRITE;
1577 if (flags & IBT_MR_ENABLE_REMOTE_READ)
1578 mr->mr_accflag |= IBT_MR_REMOTE_READ;
1579 if (flags & IBT_MR_ENABLE_REMOTE_WRITE)
1580 mr->mr_accflag |= IBT_MR_REMOTE_WRITE;
1581 if (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)
1582 mr->mr_accflag |= IBT_MR_REMOTE_ATOMIC;
1583
1584 /*
1585 * Calculate keys (Lkey, Rkey) from MPT index. Each key is formed
1599 * Then, if this is userland memory, allocate an entry in the
1600 * "userland resources database". This will later be added to
1601 * the database (after all further memory registration operations are
1602 * successful). If we fail here, we must undo the reference counts
1603 * and the previous resource allocations.
1604 */
1605 mr_is_umem = (((bind->bi_as != NULL) && (bind->bi_as != &kas)) ? 1 : 0);
1606 if (mr_is_umem) {
1607 umem_len = ptob(btopr(bind->bi_len +
1608 ((uintptr_t)bind->bi_addr & PAGEOFFSET)));
1609 umem_addr = (caddr_t)((uintptr_t)bind->bi_addr & ~PAGEOFFSET);
1610 umem_flags = (DDI_UMEMLOCK_WRITE | DDI_UMEMLOCK_READ |
1611 DDI_UMEMLOCK_LONGTERM);
1612 status = umem_lockmemory(umem_addr, umem_len, umem_flags,
1613 &umem_cookie, &hermon_umem_cbops, NULL);
1614 if (status != 0) {
1615 status = IBT_INSUFF_RESOURCE;
1616 goto mrcommon_fail3;
1617 }
1618
1619 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*bind))
1620 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*bind->bi_buf))
1621
1622 bind->bi_buf = ddi_umem_iosetup(umem_cookie, 0, umem_len,
1623 B_WRITE, 0, 0, NULL, DDI_UMEM_SLEEP);
1624 if (bind->bi_buf == NULL) {
1625 status = IBT_INSUFF_RESOURCE;
1626 goto mrcommon_fail3;
1627 }
1628 bind->bi_type = HERMON_BINDHDL_UBUF;
1629 bind->bi_buf->b_flags |= B_READ;
1630
1631 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*bind->bi_buf))
1632 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*bind))
1633
1634 umapdb = hermon_umap_db_alloc(state->hs_instance,
1635 (uint64_t)(uintptr_t)umem_cookie, MLNX_UMAP_MRMEM_RSRC,
1636 (uint64_t)(uintptr_t)rsrc);
1637 if (umapdb == NULL) {
1638 status = IBT_INSUFF_RESOURCE;
1639 goto mrcommon_fail4;
1640 }
1641 }
1642
1643 /*
1644 * Setup the bindinfo for the mtt bind call
1645 */
1646 bh = &mr->mr_bindinfo;
1647 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*bh))
1648 bcopy(bind, bh, sizeof (hermon_bind_info_t));
1649 bh->bi_bypass = bind_type;
1650 status = hermon_mr_mtt_bind(state, bh, bind_dmahdl, &mtt,
1651 &mtt_pgsize_bits, mpt != NULL);
1652 if (status != DDI_SUCCESS) {
1653 /*
1654 * When mtt_bind fails, freerbuf has already been done,
1655 * so make sure not to call it again.
1656 */
1657 bind->bi_type = bh->bi_type;
1658 goto mrcommon_fail5;
1659 }
1660 mr->mr_logmttpgsz = mtt_pgsize_bits;
1661
1662 /*
1663 * Allocate MTT reference count (to track shared memory regions).
1664 * This reference count resource may never be used on the given
1665 * memory region, but if it is ever later registered as "shared"
1666 * memory region then this resource will be necessary. If we fail
1667 * here, we do pretty much the same as above to clean up.
1668 */
1669 status = hermon_rsrc_alloc(state, HERMON_REFCNT, 1, sleep,
1670 &mtt_refcnt);
1671 if (status != DDI_SUCCESS) {
1672 status = IBT_INSUFF_RESOURCE;
1673 goto mrcommon_fail6;
1674 }
1675 mr->mr_mttrefcntp = mtt_refcnt;
1676 swrc_tmp = (hermon_sw_refcnt_t *)mtt_refcnt->hr_addr;
1677 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*swrc_tmp))
1678 HERMON_MTT_REFCNT_INIT(swrc_tmp);
1679
1680 mtt_addr = (mtt->hr_indx << HERMON_MTT_SIZE_SHIFT);
1681
1682 /*
1683 * Fill in the MPT entry. This is the final step before passing
1684 * ownership of the MPT entry to the Hermon hardware. We use all of
1685 * the information collected/calculated above to fill in the
1686 * requisite portions of the MPT. Do this ONLY for DMPTs.
1687 */
1688 if (mpt == NULL)
1689 goto no_passown;
1690
1691 bzero(&mpt_entry, sizeof (hermon_hw_dmpt_t));
1692
1693 mpt_entry.status = HERMON_MPT_SW_OWNERSHIP;
1694 mpt_entry.en_bind = (mr->mr_accflag & IBT_MR_WINDOW_BIND) ? 1 : 0;
1695 mpt_entry.atomic = (mr->mr_accflag & IBT_MR_REMOTE_ATOMIC) ? 1 : 0;
1696 mpt_entry.rw = (mr->mr_accflag & IBT_MR_REMOTE_WRITE) ? 1 : 0;
1697 mpt_entry.rr = (mr->mr_accflag & IBT_MR_REMOTE_READ) ? 1 : 0;
1788 /*
1789 * The following is cleanup for all possible failure cases in this routine
1790 */
1791 mrcommon_fail7:
1792 hermon_rsrc_free(state, &mtt_refcnt);
1793 mrcommon_fail6:
1794 hermon_mr_mem_unbind(state, bh);
1795 bind->bi_type = bh->bi_type;
1796 mrcommon_fail5:
1797 if (mr_is_umem) {
1798 hermon_umap_db_free(umapdb);
1799 }
1800 mrcommon_fail4:
1801 if (mr_is_umem) {
1802 /*
1803 * Free up the memory ddi_umem_iosetup() allocates
1804 * internally.
1805 */
1806 if (bind->bi_type == HERMON_BINDHDL_UBUF) {
1807 freerbuf(bind->bi_buf);
1808 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*bind))
1809 bind->bi_type = HERMON_BINDHDL_NONE;
1810 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*bind))
1811 }
1812 ddi_umem_unlock(umem_cookie);
1813 }
1814 mrcommon_fail3:
1815 hermon_rsrc_free(state, &rsrc);
1816 mrcommon_fail2:
1817 if (mpt != NULL)
1818 hermon_rsrc_free(state, &mpt);
1819 mrcommon_fail1:
1820 hermon_pd_refcnt_dec(pd);
1821 mrcommon_fail:
1822 return (status);
1823 }
1824
1825 /*
1826 * hermon_dma_mr_register()
1827 * Context: Can be called from base context.
1828 */
1829 int
1830 hermon_dma_mr_register(hermon_state_t *state, hermon_pdhdl_t pd,
1863 * reference count.
1864 */
1865 status = hermon_rsrc_alloc(state, HERMON_DMPT, 1, sleep, &mpt);
1866 if (status != DDI_SUCCESS) {
1867 status = IBT_INSUFF_RESOURCE;
1868 goto mrcommon_fail1;
1869 }
1870
1871 /*
1872 * Allocate the software structure for tracking the memory region (i.e.
1873 * the Hermon Memory Region handle). If we fail here, we must undo
1874 * the protection domain reference count and the previous resource
1875 * allocation.
1876 */
1877 status = hermon_rsrc_alloc(state, HERMON_MRHDL, 1, sleep, &rsrc);
1878 if (status != DDI_SUCCESS) {
1879 status = IBT_INSUFF_RESOURCE;
1880 goto mrcommon_fail2;
1881 }
1882 mr = (hermon_mrhdl_t)rsrc->hr_addr;
1883 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
1884 bzero(mr, sizeof (*mr));
1885
1886 /*
1887 * Setup and validate the memory region access flags. This means
1888 * translating the IBTF's enable flags into the access flags that
1889 * will be used in later operations.
1890 */
1891 mr->mr_accflag = 0;
1892 if (flags & IBT_MR_ENABLE_WINDOW_BIND)
1893 mr->mr_accflag |= IBT_MR_WINDOW_BIND;
1894 if (flags & IBT_MR_ENABLE_LOCAL_WRITE)
1895 mr->mr_accflag |= IBT_MR_LOCAL_WRITE;
1896 if (flags & IBT_MR_ENABLE_REMOTE_READ)
1897 mr->mr_accflag |= IBT_MR_REMOTE_READ;
1898 if (flags & IBT_MR_ENABLE_REMOTE_WRITE)
1899 mr->mr_accflag |= IBT_MR_REMOTE_WRITE;
1900 if (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)
1901 mr->mr_accflag |= IBT_MR_REMOTE_ATOMIC;
1902
1903 /*
2042 * The MTTs will get filled in when the FRWR is processed.
2043 */
2044 status = hermon_rsrc_alloc(state, HERMON_DMPT, 1, sleep, &mpt);
2045 if (status != DDI_SUCCESS) {
2046 status = IBT_INSUFF_RESOURCE;
2047 goto alloclkey_fail1;
2048 }
2049
2050 /*
2051 * Allocate the software structure for tracking the memory region (i.e.
2052 * the Hermon Memory Region handle). If we fail here, we must undo
2053 * the protection domain reference count and the previous resource
2054 * allocation.
2055 */
2056 status = hermon_rsrc_alloc(state, HERMON_MRHDL, 1, sleep, &rsrc);
2057 if (status != DDI_SUCCESS) {
2058 status = IBT_INSUFF_RESOURCE;
2059 goto alloclkey_fail2;
2060 }
2061 mr = (hermon_mrhdl_t)rsrc->hr_addr;
2062 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*mr))
2063 bzero(mr, sizeof (*mr));
2064 mr->mr_bindinfo.bi_type = HERMON_BINDHDL_LKEY;
2065
2066 mr->mr_lkey = hermon_mr_keycalc(mpt->hr_indx);
2067
2068 status = hermon_rsrc_alloc(state, HERMON_MTT, nummtt, sleep, &mtt);
2069 if (status != DDI_SUCCESS) {
2070 status = IBT_INSUFF_RESOURCE;
2071 goto alloclkey_fail3;
2072 }
2073 mr->mr_logmttpgsz = PAGESHIFT;
2074
2075 /*
2076 * Allocate MTT reference count (to track shared memory regions).
2077 * This reference count resource may never be used on the given
2078 * memory region, but if it is ever later registered as "shared"
2079 * memory region then this resource will be necessary. If we fail
2080 * here, we do pretty much the same as above to clean up.
2081 */
2082 status = hermon_rsrc_alloc(state, HERMON_REFCNT, 1, sleep,
2083 &mtt_refcnt);
2084 if (status != DDI_SUCCESS) {
2085 status = IBT_INSUFF_RESOURCE;
2086 goto alloclkey_fail4;
2087 }
2088 mr->mr_mttrefcntp = mtt_refcnt;
2089 swrc_tmp = (hermon_sw_refcnt_t *)mtt_refcnt->hr_addr;
2090 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*swrc_tmp))
2091 HERMON_MTT_REFCNT_INIT(swrc_tmp);
2092
2093 mtt_addr = (mtt->hr_indx << HERMON_MTT_SIZE_SHIFT);
2094
2095 bzero(&mpt_entry, sizeof (hermon_hw_dmpt_t));
2096 mpt_entry.status = HERMON_MPT_FREE;
2097 mpt_entry.lw = 1;
2098 mpt_entry.lr = 1;
2099 mpt_entry.reg_win = HERMON_MPT_IS_REGION;
2100 mpt_entry.entity_sz = mr->mr_logmttpgsz;
2101 mpt_entry.mem_key = mr->mr_lkey;
2102 mpt_entry.pd = pd->pd_pdnum;
2103 mpt_entry.fast_reg_en = 1;
2104 mpt_entry.rem_acc_en = 1;
2105 mpt_entry.en_inval = 1;
2106 if (flags & IBT_KEY_REMOTE) {
2107 mpt_entry.ren_inval = 1;
2108 }
2109 mpt_entry.mtt_size = nummtt;
2110 mpt_entry.mtt_addr_h = mtt_addr >> 32; /* only 8 more bits */
2371
2372
2373 /*
2374 * hermon_mr_common_rereg()
2375 * Context: Can be called from interrupt or base context.
2376 */
2377 static int
2378 hermon_mr_common_rereg(hermon_state_t *state, hermon_mrhdl_t mr,
2379 hermon_pdhdl_t pd, hermon_bind_info_t *bind, hermon_mrhdl_t *mrhdl_new,
2380 hermon_mr_options_t *op)
2381 {
2382 hermon_rsrc_t *mpt;
2383 ibt_mr_attr_flags_t acc_flags_to_use;
2384 ibt_mr_flags_t flags;
2385 hermon_pdhdl_t pd_to_use;
2386 hermon_hw_dmpt_t mpt_entry;
2387 uint64_t mtt_addr_to_use, vaddr_to_use, len_to_use;
2388 uint_t sleep, dereg_level;
2389 int status;
2390
2391 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*bind))
2392
2393 /*
2394 * Check here to see if the memory region corresponds to a userland
2395 * mapping. Reregistration of userland memory regions is not
2396 * currently supported. Return failure.
2397 */
2398 if (mr->mr_is_umem) {
2399 status = IBT_MR_HDL_INVALID;
2400 goto mrrereg_fail;
2401 }
2402
2403 mutex_enter(&mr->mr_lock);
2404
2405 /* Pull MPT resource pointer from the Hermon Memory Region handle */
2406 mpt = mr->mr_mptrsrcp;
2407
2408 /* Extract the flags field from the hermon_bind_info_t */
2409 flags = bind->bi_flags;
2410
2411 /*
2412 * Check the sleep flag. Ensure that it is consistent with the
2972 * that all current resources get properly
2973 * freed up. Unnecessary to attempt to regain
2974 * software ownership of the MPT entry as that
2975 * has already been done above (in
2976 * hermon_mr_reregister()). Also unnecessary
2977 * to attempt to unbind the memory.
2978 *
2979 * But we need to unbind the newly bound
2980 * memory and free up the newly allocated MTT
2981 * entries before returning.
2982 */
2983 hermon_mr_mem_unbind(state, bind);
2984 hermon_rsrc_free(state, &mtt);
2985 *dereg_level =
2986 HERMON_MR_DEREG_NO_HW2SW_MPT_OR_UNBIND;
2987
2988 status = IBT_INSUFF_RESOURCE;
2989 goto mrrereghelp_fail;
2990 }
2991 swrc_new = (hermon_sw_refcnt_t *)mtt_refcnt->hr_addr;
2992 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*swrc_new))
2993 HERMON_MTT_REFCNT_INIT(swrc_new);
2994 } else {
2995 mtt_refcnt = mr->mr_mttrefcntp;
2996 }
2997
2998 /*
2999 * Using the new mapping and the new MTT resources, write the
3000 * updated entries to MTT
3001 */
3002 status = hermon_mr_fast_mtt_write(state, mtt, bind,
3003 mtt_pgsize_bits);
3004 if (status != DDI_SUCCESS) {
3005 /*
3006 * Deregister will be called upon returning failure
3007 * from this routine. This will ensure that all
3008 * current resources get properly freed up.
3009 * Unnecessary to attempt to regain software ownership
3010 * of the MPT entry as that has already been done
3011 * above (in hermon_mr_reregister()). Also unnecessary
3012 * to attempt to unbind the memory.
3088 }
3089
3090
3091 /*
3092 * hermon_mr_mem_bind()
3093 * Context: Can be called from interrupt or base context.
3094 */
3095 static int
3096 hermon_mr_mem_bind(hermon_state_t *state, hermon_bind_info_t *bind,
3097 ddi_dma_handle_t dmahdl, uint_t sleep, uint_t is_buffer)
3098 {
3099 ddi_dma_attr_t dma_attr;
3100 int (*callback)(caddr_t);
3101 int status;
3102
3103 /* bi_type must be set to a meaningful value to get a bind handle */
3104 ASSERT(bind->bi_type == HERMON_BINDHDL_VADDR ||
3105 bind->bi_type == HERMON_BINDHDL_BUF ||
3106 bind->bi_type == HERMON_BINDHDL_UBUF);
3107
3108 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*bind))
3109
3110 /* Set the callback flag appropriately */
3111 callback = (sleep == HERMON_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
3112
3113 /*
3114 * Initialize many of the default DMA attributes. Then, if we're
3115 * bypassing the IOMMU, set the DDI_DMA_FORCE_PHYSICAL flag.
3116 */
3117 if (dmahdl == NULL) {
3118 hermon_dma_attr_init(state, &dma_attr);
3119 #ifdef __sparc
3120 if (bind->bi_bypass == HERMON_BINDMEM_BYPASS) {
3121 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
3122 }
3123 #endif
3124
3125 /* set RO if needed - tunable set and 'is_buffer' is non-0 */
3126 if (is_buffer) {
3127 if (! (bind->bi_flags & IBT_MR_DISABLE_RO)) {
3128 if ((bind->bi_type != HERMON_BINDHDL_UBUF) &&
3129 (hermon_kernel_data_ro ==
3175 if (status != DDI_DMA_MAPPED) {
3176 if (bind->bi_free_dmahdl != 0) {
3177 ddi_dma_free_handle(&bind->bi_dmahdl);
3178 }
3179 return (status);
3180 }
3181
3182 return (DDI_SUCCESS);
3183 }
3184
3185
3186 /*
3187 * hermon_mr_mem_unbind()
3188 * Context: Can be called from interrupt or base context.
3189 */
3190 static void
3191 hermon_mr_mem_unbind(hermon_state_t *state, hermon_bind_info_t *bind)
3192 {
3193 int status;
3194
3195 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*bind))
3196 /* there is nothing to unbind for alloc_lkey */
3197 if (bind->bi_type == HERMON_BINDHDL_LKEY)
3198 return;
3199
3200 /*
3201 * In case of HERMON_BINDHDL_UBUF, the memory bi_buf points to
3202 * is actually allocated by ddi_umem_iosetup() internally, then
3203 * it's required to free it here. Reset bi_type to HERMON_BINDHDL_NONE
3204 * not to free it again later.
3205 */
3206 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(*bind))
3207 if (bind->bi_type == HERMON_BINDHDL_UBUF) {
3208 freerbuf(bind->bi_buf);
3209 bind->bi_type = HERMON_BINDHDL_NONE;
3210 }
3211 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(*bind))
3212
3213 /*
3214 * Unbind the DMA memory for the region
3215 *
3216 * Note: The only way ddi_dma_unbind_handle() currently
3217 * can return an error is if the handle passed in is invalid.
3218 * Since this should never happen, we choose to return void
3219 * from this function! If this does return an error, however,
3220 * then we print a warning message to the console.
3221 */
3222 status = ddi_dma_unbind_handle(bind->bi_dmahdl);
3223 if (status != DDI_SUCCESS) {
3224 HERMON_WARNING(state, "failed to unbind DMA mapping");
3225 return;
3226 }
3227
3228 /* Free up the DMA handle */
3229 if (bind->bi_free_dmahdl != 0) {
3230 ddi_dma_free_handle(&bind->bi_dmahdl);
3231 }
3306
3307 if ((addr + pagesize > endaddr) &&
3308 (cookie_cnt == 0))
3309 return (DDI_SUCCESS);
3310
3311 hermon_index(index1, index2, rindx, icm_table,
3312 i);
3313 start = i * sizeof (hermon_hw_mtt_t);
3314 dma_info = icm_table->icm_dma[index1] + index2;
3315 mtt_table =
3316 (uint64_t *)(uintptr_t)dma_info->vaddr;
3317
3318 sync_needed = 0;
3319 } else {
3320 sync_needed = 1;
3321 }
3322
3323 addr += pagesize;
3324 if (addr == 0) {
3325 static int do_once = 1;
3326 _NOTE(SCHEME_PROTECTS_DATA("safe sharing",
3327 do_once))
3328 if (do_once) {
3329 do_once = 0;
3330 cmn_err(CE_NOTE, "probable error in "
3331 "dma_cookie address from caller\n");
3332 }
3333 break;
3334 }
3335 }
3336
3337 /*
3338 * When we've reached the end of the current DMA cookie,
3339 * jump to the next cookie (if there are more)
3340 */
3341 if (cookie_cnt != 0) {
3342 ddi_dma_nextcookie(bind->bi_dmahdl, &dmacookie);
3343 }
3344 }
3345
3346 /* done all the cookies, now sync the memory for the device */
3347 if (sync_needed)
|
210 * protection domain reference count.
211 */
212 status = hermon_rsrc_alloc(state, HERMON_DMPT, 1, sleep, &mpt);
213 if (status != DDI_SUCCESS) {
214 status = IBT_INSUFF_RESOURCE;
215 goto mrshared_fail1;
216 }
217
218 /*
219 * Allocate the software structure for tracking the shared memory
220 * region (i.e. the Hermon Memory Region handle). If we fail here, we
221 * must undo the protection domain reference count and the previous
222 * resource allocation.
223 */
224 status = hermon_rsrc_alloc(state, HERMON_MRHDL, 1, sleep, &rsrc);
225 if (status != DDI_SUCCESS) {
226 status = IBT_INSUFF_RESOURCE;
227 goto mrshared_fail2;
228 }
229 mr = (hermon_mrhdl_t)rsrc->hr_addr;
230
231 /*
232 * Setup and validate the memory region access flags. This means
233 * translating the IBTF's enable flags into the access flags that
234 * will be used in later operations.
235 */
236 mr->mr_accflag = 0;
237 if (mr_attr->mr_flags & IBT_MR_ENABLE_WINDOW_BIND)
238 mr->mr_accflag |= IBT_MR_WINDOW_BIND;
239 if (mr_attr->mr_flags & IBT_MR_ENABLE_LOCAL_WRITE)
240 mr->mr_accflag |= IBT_MR_LOCAL_WRITE;
241 if (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_READ)
242 mr->mr_accflag |= IBT_MR_REMOTE_READ;
243 if (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_WRITE)
244 mr->mr_accflag |= IBT_MR_REMOTE_WRITE;
245 if (mr_attr->mr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)
246 mr->mr_accflag |= IBT_MR_REMOTE_ATOMIC;
247
248 /*
249 * Calculate keys (Lkey, Rkey) from MPT index. Each key is formed
302 status = IBT_INSUFF_RESOURCE;
303 goto mrshared_fail4;
304 }
305 }
306
307 /*
308 * Copy the MTT resource pointer (and additional parameters) from
309 * the original Hermon Memory Region handle. Note: this is normally
310 * where the hermon_mr_mem_bind() routine would be called, but because
311 * we already have bound and filled-in MTT entries it is simply a
312 * matter here of managing the MTT reference count and grabbing the
313 * address of the MTT table entries (for filling in the shared region's
314 * MPT entry).
315 */
316 mr->mr_mttrsrcp = mrhdl->mr_mttrsrcp;
317 mr->mr_logmttpgsz = mrhdl->mr_logmttpgsz;
318 mr->mr_bindinfo = mrhdl->mr_bindinfo;
319 mr->mr_mttrefcntp = mrhdl->mr_mttrefcntp;
320 mutex_exit(&mrhdl->mr_lock);
321 bind = &mr->mr_bindinfo;
322 mtt = mr->mr_mttrsrcp;
323
324 /*
325 * Increment the MTT reference count (to reflect the fact that
326 * the MTT is now shared)
327 */
328 (void) hermon_mtt_refcnt_inc(mr->mr_mttrefcntp);
329
330 /*
331 * Update the new "bind" virtual address. Do some extra work here
332 * to ensure proper alignment. That is, make sure that the page
333 * offset for the beginning of the old range is the same as the
334 * offset for this new mapping
335 */
336 pgsize_msk = (((uint64_t)1 << mr->mr_logmttpgsz) - 1);
337 bind->bi_addr = ((mr_attr->mr_vaddr & ~pgsize_msk) |
338 (mr->mr_bindinfo.bi_addr & pgsize_msk));
339
340 /*
341 * Fill in the MPT entry. This is the final step before passing
478 */
479
480 status = hermon_rsrc_alloc(state, HERMON_DMPT, 1, sleep, &mpt);
481 if (status != DDI_SUCCESS) {
482 status = IBT_INSUFF_RESOURCE;
483 goto fmralloc_fail1;
484 }
485
486 /*
487 * Allocate the software structure for tracking the fmr memory
488 * region (i.e. the Hermon Memory Region handle). If we fail here, we
489 * must undo the protection domain reference count and the previous
490 * resource allocation.
491 */
492 status = hermon_rsrc_alloc(state, HERMON_MRHDL, 1, sleep, &rsrc);
493 if (status != DDI_SUCCESS) {
494 status = IBT_INSUFF_RESOURCE;
495 goto fmralloc_fail2;
496 }
497 mr = (hermon_mrhdl_t)rsrc->hr_addr;
498
499 /*
500 * Setup and validate the memory region access flags. This means
501 * translating the IBTF's enable flags into the access flags that
502 * will be used in later operations.
503 */
504 mr->mr_accflag = 0;
505 if (fmr_pool->fmr_flags & IBT_MR_ENABLE_LOCAL_WRITE)
506 mr->mr_accflag |= IBT_MR_LOCAL_WRITE;
507 if (fmr_pool->fmr_flags & IBT_MR_ENABLE_REMOTE_READ)
508 mr->mr_accflag |= IBT_MR_REMOTE_READ;
509 if (fmr_pool->fmr_flags & IBT_MR_ENABLE_REMOTE_WRITE)
510 mr->mr_accflag |= IBT_MR_REMOTE_WRITE;
511 if (fmr_pool->fmr_flags & IBT_MR_ENABLE_REMOTE_ATOMIC)
512 mr->mr_accflag |= IBT_MR_REMOTE_ATOMIC;
513
514 /*
515 * Calculate keys (Lkey, Rkey) from MPT index. Each key is formed
516 * from a certain number of "constrained" bits (the least significant
517 * bits) and some number of "unconstrained" bits. The constrained
604 * following fields for use in further operations on the MR. Also, set
605 * that this is an FMR region.
606 */
607 mr->mr_mptrsrcp = mpt;
608 mr->mr_mttrsrcp = mtt;
609
610 mr->mr_mpt_type = HERMON_MPT_DMPT;
611 mr->mr_pdhdl = pd;
612 mr->mr_rsrcp = rsrc;
613 mr->mr_is_fmr = 1;
614 mr->mr_lkey = hermon_mr_key_swap(mr->mr_lkey);
615 mr->mr_rkey = hermon_mr_key_swap(mr->mr_rkey);
616 mr->mr_mttaddr = mtt_addr;
617 (void) memcpy(&mr->mr_bindinfo, &bind, sizeof (hermon_bind_info_t));
618
619 /* initialize hr_addr for use during register/deregister/invalidate */
620 icm_table = &state->hs_icm[HERMON_DMPT];
621 rindx = mpt->hr_indx;
622 hermon_index(index1, index2, rindx, icm_table, i);
623 dma_info = icm_table->icm_dma[index1] + index2;
624 mpt->hr_addr = (void *)((uintptr_t)(dma_info->vaddr + i * mpt->hr_len));
625
626 *mrhdl = mr;
627
628 return (DDI_SUCCESS);
629
630 /*
631 * The following is cleanup for all possible failure cases in this routine
632 */
633 fmralloc_fail4:
634 kmem_free(mtt, sizeof (hermon_rsrc_t) * nummtt);
635 fmralloc_fail3:
636 hermon_rsrc_free(state, &rsrc);
637 fmralloc_fail2:
638 hermon_rsrc_free(state, &mpt);
639 fmralloc_fail1:
640 hermon_pd_refcnt_dec(pd);
641 fmralloc_fail:
642 return (status);
643 }
970 /* Set the mrhdl pointer to NULL and return success */
971 *mrhdl = NULL;
972
973 return (DDI_SUCCESS);
974 }
975
976
977 /*
978 * hermon_mr_query()
979 * Context: Can be called from interrupt or base context.
980 */
981 /* ARGSUSED */
982 int
983 hermon_mr_query(hermon_state_t *state, hermon_mrhdl_t mr,
984 ibt_mr_query_attr_t *attr)
985 {
986 int status;
987 hermon_hw_dmpt_t mpt_entry;
988 uint32_t lkey;
989
990 mutex_enter(&mr->mr_lock);
991
992 /*
993 * Check here to see if the memory region has already been partially
994 * deregistered as a result of a hermon_umap_umemlock_cb() callback.
995 * If so, this is an error, return failure.
996 */
997 if ((mr->mr_is_umem) && (mr->mr_umemcookie == NULL)) {
998 mutex_exit(&mr->mr_lock);
999 return (IBT_MR_HDL_INVALID);
1000 }
1001
1002 status = hermon_cmn_query_cmd_post(state, QUERY_MPT, 0,
1003 mr->mr_lkey >> 8, &mpt_entry, sizeof (hermon_hw_dmpt_t),
1004 HERMON_NOSLEEP);
1005 if (status != HERMON_CMD_SUCCESS) {
1006 cmn_err(CE_CONT, "Hermon: QUERY_MPT failed: status %x", status);
1007 mutex_exit(&mr->mr_lock);
1008 return (ibc_get_ci_failure(0));
1009 }
1254 status = hermon_rsrc_alloc(state, HERMON_DMPT, 1, sleep, &mpt);
1255 if (status != DDI_SUCCESS) {
1256 status = IBT_INSUFF_RESOURCE;
1257 goto mwalloc_fail1;
1258 }
1259
1260 /*
1261 * Allocate the software structure for tracking the memory window (i.e.
1262 * the Hermon Memory Window handle). Note: This is actually the same
1263 * software structure used for tracking memory regions, but since many
1264 * of the same properties are needed, only a single structure is
1265 * necessary. If we fail here, we must undo the protection domain
1266 * reference count and the previous resource allocation.
1267 */
1268 status = hermon_rsrc_alloc(state, HERMON_MRHDL, 1, sleep, &rsrc);
1269 if (status != DDI_SUCCESS) {
1270 status = IBT_INSUFF_RESOURCE;
1271 goto mwalloc_fail2;
1272 }
1273 mw = (hermon_mwhdl_t)rsrc->hr_addr;
1274
1275 /*
1276 * Calculate an "unbound" RKey from MPT index. In much the same way
1277 * as we do for memory regions (above), this key is constructed from
1278 * a "constrained" (which depends on the MPT index) and an
1279 * "unconstrained" portion (which may be arbitrarily chosen).
1280 */
1281 mw->mr_rkey = hermon_mr_keycalc(mpt->hr_indx);
1282
1283 /*
1284 * Fill in the MPT entry. This is the final step before passing
1285 * ownership of the MPT entry to the Hermon hardware. We use all of
1286 * the information collected/calculated above to fill in the
1287 * requisite portions of the MPT. Note: fewer entries in the MPT
1288 * entry are necessary to allocate a memory window.
1289 */
1290 bzero(&mpt_entry, sizeof (hermon_hw_dmpt_t));
1291 mpt_entry.reg_win = HERMON_MPT_IS_WINDOW;
1292 mpt_entry.mem_key = mw->mr_rkey;
1293 mpt_entry.pd = pd->pd_pdnum;
1352 * current thread context (i.e. if we are currently in the interrupt
1353 * context, then we shouldn't be attempting to sleep).
1354 */
1355 if ((sleep == HERMON_SLEEP) &&
1356 (sleep != HERMON_SLEEPFLAG_FOR_CONTEXT())) {
1357 status = IBT_INVALID_PARAM;
1358 return (status);
1359 }
1360
1361 /*
1362 * Pull all the necessary information from the Hermon Memory Window
1363 * handle. This is necessary here because the resource for the
1364 * MW handle is going to be freed up as part of the this operation.
1365 */
1366 mw = *mwhdl;
1367 mutex_enter(&mw->mr_lock);
1368 mpt = mw->mr_mptrsrcp;
1369 rsrc = mw->mr_rsrcp;
1370 pd = mw->mr_pdhdl;
1371 mutex_exit(&mw->mr_lock);
1372
1373 /*
1374 * Reclaim the MPT entry from hardware. Note: in general, it is
1375 * unexpected for this operation to return an error.
1376 */
1377 status = hermon_cmn_ownership_cmd_post(state, HW2SW_MPT, NULL,
1378 0, mpt->hr_indx, sleep);
1379 if (status != HERMON_CMD_SUCCESS) {
1380 cmn_err(CE_CONT, "Hermon: HW2SW_MPT command failed: %08x\n",
1381 status);
1382 if (status == HERMON_CMD_INVALID_STATUS) {
1383 hermon_fm_ereport(state, HCA_SYS_ERR, HCA_ERR_SRV_LOST);
1384 }
1385 return (ibc_get_ci_failure(0));
1386 }
1387
1388 /* Free the Hermon Memory Window handle */
1389 hermon_rsrc_free(state, &rsrc);
1390
1391 /* Free up the MPT entry resource */
1408 * KKKKKKKK IIIIIIII IIIIIIII IIIIIIIII
1409 * where K == the arbitrary bits and I == the index
1410 */
1411 uint32_t
1412 hermon_mr_keycalc(uint32_t indx)
1413 {
1414 uint32_t tmp_key, tmp_indx;
1415
1416 /*
1417 * Generate a simple key from counter. Note: We increment this
1418 * static variable _intentionally_ without any kind of mutex around
1419 * it. First, single-threading all operations through a single lock
1420 * would be a bad idea (from a performance point-of-view). Second,
1421 * the upper "unconstrained" bits don't really have to be unique
1422 * because the lower bits are guaranteed to be (although we do make a
1423 * best effort to ensure that they are). Third, the window for the
1424 * race (where both threads read and update the counter at the same
1425 * time) is incredibly small.
1426 * And, lastly, we'd like to make this into a "random" key
1427 */
1428 tmp_key = (hermon_memkey_cnt++) << HERMON_MEMKEY_SHIFT;
1429 tmp_indx = indx & 0xffffff;
1430 return (tmp_key | tmp_indx);
1431 }
1432
1433
1434 /*
1435 * hermon_mr_key_swap()
1436 * Context: Can be called from interrupt or base context.
1437 * NOTE: Produces a key in the form of
1438 * IIIIIIII IIIIIIII IIIIIIIII KKKKKKKK
1439 * where K == the arbitrary bits and I == the index
1440 */
1441 uint32_t
1442 hermon_mr_key_swap(uint32_t indx)
1443 {
1444 /*
1445 * The memory key format to pass down to the hardware is
1446 * (key[7:0],index[23:0]), which defines the index to the
1447 * hardware resource. When the driver passes this as a memory
1536 if (status != DDI_SUCCESS) {
1537 status = IBT_INSUFF_RESOURCE;
1538 goto mrcommon_fail1;
1539 }
1540 } else {
1541 mpt = NULL;
1542 }
1543
1544 /*
1545 * Allocate the software structure for tracking the memory region (i.e.
1546 * the Hermon Memory Region handle). If we fail here, we must undo
1547 * the protection domain reference count and the previous resource
1548 * allocation.
1549 */
1550 status = hermon_rsrc_alloc(state, HERMON_MRHDL, 1, sleep, &rsrc);
1551 if (status != DDI_SUCCESS) {
1552 status = IBT_INSUFF_RESOURCE;
1553 goto mrcommon_fail2;
1554 }
1555 mr = (hermon_mrhdl_t)rsrc->hr_addr;
1556
1557 /*
1558 * Setup and validate the memory region access flags. This means
1559 * translating the IBTF's enable flags into the access flags that
1560 * will be used in later operations.
1561 */
1562 mr->mr_accflag = 0;
1563 if (flags & IBT_MR_ENABLE_WINDOW_BIND)
1564 mr->mr_accflag |= IBT_MR_WINDOW_BIND;
1565 if (flags & IBT_MR_ENABLE_LOCAL_WRITE)
1566 mr->mr_accflag |= IBT_MR_LOCAL_WRITE;
1567 if (flags & IBT_MR_ENABLE_REMOTE_READ)
1568 mr->mr_accflag |= IBT_MR_REMOTE_READ;
1569 if (flags & IBT_MR_ENABLE_REMOTE_WRITE)
1570 mr->mr_accflag |= IBT_MR_REMOTE_WRITE;
1571 if (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)
1572 mr->mr_accflag |= IBT_MR_REMOTE_ATOMIC;
1573
1574 /*
1575 * Calculate keys (Lkey, Rkey) from MPT index. Each key is formed
1589 * Then, if this is userland memory, allocate an entry in the
1590 * "userland resources database". This will later be added to
1591 * the database (after all further memory registration operations are
1592 * successful). If we fail here, we must undo the reference counts
1593 * and the previous resource allocations.
1594 */
1595 mr_is_umem = (((bind->bi_as != NULL) && (bind->bi_as != &kas)) ? 1 : 0);
1596 if (mr_is_umem) {
1597 umem_len = ptob(btopr(bind->bi_len +
1598 ((uintptr_t)bind->bi_addr & PAGEOFFSET)));
1599 umem_addr = (caddr_t)((uintptr_t)bind->bi_addr & ~PAGEOFFSET);
1600 umem_flags = (DDI_UMEMLOCK_WRITE | DDI_UMEMLOCK_READ |
1601 DDI_UMEMLOCK_LONGTERM);
1602 status = umem_lockmemory(umem_addr, umem_len, umem_flags,
1603 &umem_cookie, &hermon_umem_cbops, NULL);
1604 if (status != 0) {
1605 status = IBT_INSUFF_RESOURCE;
1606 goto mrcommon_fail3;
1607 }
1608
1609 bind->bi_buf = ddi_umem_iosetup(umem_cookie, 0, umem_len,
1610 B_WRITE, 0, 0, NULL, DDI_UMEM_SLEEP);
1611 if (bind->bi_buf == NULL) {
1612 status = IBT_INSUFF_RESOURCE;
1613 goto mrcommon_fail3;
1614 }
1615 bind->bi_type = HERMON_BINDHDL_UBUF;
1616 bind->bi_buf->b_flags |= B_READ;
1617
1618 umapdb = hermon_umap_db_alloc(state->hs_instance,
1619 (uint64_t)(uintptr_t)umem_cookie, MLNX_UMAP_MRMEM_RSRC,
1620 (uint64_t)(uintptr_t)rsrc);
1621 if (umapdb == NULL) {
1622 status = IBT_INSUFF_RESOURCE;
1623 goto mrcommon_fail4;
1624 }
1625 }
1626
1627 /*
1628 * Setup the bindinfo for the mtt bind call
1629 */
1630 bh = &mr->mr_bindinfo;
1631 bcopy(bind, bh, sizeof (hermon_bind_info_t));
1632 bh->bi_bypass = bind_type;
1633 status = hermon_mr_mtt_bind(state, bh, bind_dmahdl, &mtt,
1634 &mtt_pgsize_bits, mpt != NULL);
1635 if (status != DDI_SUCCESS) {
1636 /*
1637 * When mtt_bind fails, freerbuf has already been done,
1638 * so make sure not to call it again.
1639 */
1640 bind->bi_type = bh->bi_type;
1641 goto mrcommon_fail5;
1642 }
1643 mr->mr_logmttpgsz = mtt_pgsize_bits;
1644
1645 /*
1646 * Allocate MTT reference count (to track shared memory regions).
1647 * This reference count resource may never be used on the given
1648 * memory region, but if it is ever later registered as "shared"
1649 * memory region then this resource will be necessary. If we fail
1650 * here, we do pretty much the same as above to clean up.
1651 */
1652 status = hermon_rsrc_alloc(state, HERMON_REFCNT, 1, sleep,
1653 &mtt_refcnt);
1654 if (status != DDI_SUCCESS) {
1655 status = IBT_INSUFF_RESOURCE;
1656 goto mrcommon_fail6;
1657 }
1658 mr->mr_mttrefcntp = mtt_refcnt;
1659 swrc_tmp = (hermon_sw_refcnt_t *)mtt_refcnt->hr_addr;
1660 HERMON_MTT_REFCNT_INIT(swrc_tmp);
1661
1662 mtt_addr = (mtt->hr_indx << HERMON_MTT_SIZE_SHIFT);
1663
1664 /*
1665 * Fill in the MPT entry. This is the final step before passing
1666 * ownership of the MPT entry to the Hermon hardware. We use all of
1667 * the information collected/calculated above to fill in the
1668 * requisite portions of the MPT. Do this ONLY for DMPTs.
1669 */
1670 if (mpt == NULL)
1671 goto no_passown;
1672
1673 bzero(&mpt_entry, sizeof (hermon_hw_dmpt_t));
1674
1675 mpt_entry.status = HERMON_MPT_SW_OWNERSHIP;
1676 mpt_entry.en_bind = (mr->mr_accflag & IBT_MR_WINDOW_BIND) ? 1 : 0;
1677 mpt_entry.atomic = (mr->mr_accflag & IBT_MR_REMOTE_ATOMIC) ? 1 : 0;
1678 mpt_entry.rw = (mr->mr_accflag & IBT_MR_REMOTE_WRITE) ? 1 : 0;
1679 mpt_entry.rr = (mr->mr_accflag & IBT_MR_REMOTE_READ) ? 1 : 0;
1770 /*
1771 * The following is cleanup for all possible failure cases in this routine
1772 */
1773 mrcommon_fail7:
1774 hermon_rsrc_free(state, &mtt_refcnt);
1775 mrcommon_fail6:
1776 hermon_mr_mem_unbind(state, bh);
1777 bind->bi_type = bh->bi_type;
1778 mrcommon_fail5:
1779 if (mr_is_umem) {
1780 hermon_umap_db_free(umapdb);
1781 }
1782 mrcommon_fail4:
1783 if (mr_is_umem) {
1784 /*
1785 * Free up the memory ddi_umem_iosetup() allocates
1786 * internally.
1787 */
1788 if (bind->bi_type == HERMON_BINDHDL_UBUF) {
1789 freerbuf(bind->bi_buf);
1790 bind->bi_type = HERMON_BINDHDL_NONE;
1791 }
1792 ddi_umem_unlock(umem_cookie);
1793 }
1794 mrcommon_fail3:
1795 hermon_rsrc_free(state, &rsrc);
1796 mrcommon_fail2:
1797 if (mpt != NULL)
1798 hermon_rsrc_free(state, &mpt);
1799 mrcommon_fail1:
1800 hermon_pd_refcnt_dec(pd);
1801 mrcommon_fail:
1802 return (status);
1803 }
1804
1805 /*
1806 * hermon_dma_mr_register()
1807 * Context: Can be called from base context.
1808 */
1809 int
1810 hermon_dma_mr_register(hermon_state_t *state, hermon_pdhdl_t pd,
1843 * reference count.
1844 */
1845 status = hermon_rsrc_alloc(state, HERMON_DMPT, 1, sleep, &mpt);
1846 if (status != DDI_SUCCESS) {
1847 status = IBT_INSUFF_RESOURCE;
1848 goto mrcommon_fail1;
1849 }
1850
1851 /*
1852 * Allocate the software structure for tracking the memory region (i.e.
1853 * the Hermon Memory Region handle). If we fail here, we must undo
1854 * the protection domain reference count and the previous resource
1855 * allocation.
1856 */
1857 status = hermon_rsrc_alloc(state, HERMON_MRHDL, 1, sleep, &rsrc);
1858 if (status != DDI_SUCCESS) {
1859 status = IBT_INSUFF_RESOURCE;
1860 goto mrcommon_fail2;
1861 }
1862 mr = (hermon_mrhdl_t)rsrc->hr_addr;
1863 bzero(mr, sizeof (*mr));
1864
1865 /*
1866 * Setup and validate the memory region access flags. This means
1867 * translating the IBTF's enable flags into the access flags that
1868 * will be used in later operations.
1869 */
1870 mr->mr_accflag = 0;
1871 if (flags & IBT_MR_ENABLE_WINDOW_BIND)
1872 mr->mr_accflag |= IBT_MR_WINDOW_BIND;
1873 if (flags & IBT_MR_ENABLE_LOCAL_WRITE)
1874 mr->mr_accflag |= IBT_MR_LOCAL_WRITE;
1875 if (flags & IBT_MR_ENABLE_REMOTE_READ)
1876 mr->mr_accflag |= IBT_MR_REMOTE_READ;
1877 if (flags & IBT_MR_ENABLE_REMOTE_WRITE)
1878 mr->mr_accflag |= IBT_MR_REMOTE_WRITE;
1879 if (flags & IBT_MR_ENABLE_REMOTE_ATOMIC)
1880 mr->mr_accflag |= IBT_MR_REMOTE_ATOMIC;
1881
1882 /*
2021 * The MTTs will get filled in when the FRWR is processed.
2022 */
2023 status = hermon_rsrc_alloc(state, HERMON_DMPT, 1, sleep, &mpt);
2024 if (status != DDI_SUCCESS) {
2025 status = IBT_INSUFF_RESOURCE;
2026 goto alloclkey_fail1;
2027 }
2028
2029 /*
2030 * Allocate the software structure for tracking the memory region (i.e.
2031 * the Hermon Memory Region handle). If we fail here, we must undo
2032 * the protection domain reference count and the previous resource
2033 * allocation.
2034 */
2035 status = hermon_rsrc_alloc(state, HERMON_MRHDL, 1, sleep, &rsrc);
2036 if (status != DDI_SUCCESS) {
2037 status = IBT_INSUFF_RESOURCE;
2038 goto alloclkey_fail2;
2039 }
2040 mr = (hermon_mrhdl_t)rsrc->hr_addr;
2041 bzero(mr, sizeof (*mr));
2042 mr->mr_bindinfo.bi_type = HERMON_BINDHDL_LKEY;
2043
2044 mr->mr_lkey = hermon_mr_keycalc(mpt->hr_indx);
2045
2046 status = hermon_rsrc_alloc(state, HERMON_MTT, nummtt, sleep, &mtt);
2047 if (status != DDI_SUCCESS) {
2048 status = IBT_INSUFF_RESOURCE;
2049 goto alloclkey_fail3;
2050 }
2051 mr->mr_logmttpgsz = PAGESHIFT;
2052
2053 /*
2054 * Allocate MTT reference count (to track shared memory regions).
2055 * This reference count resource may never be used on the given
2056 * memory region, but if it is ever later registered as "shared"
2057 * memory region then this resource will be necessary. If we fail
2058 * here, we do pretty much the same as above to clean up.
2059 */
2060 status = hermon_rsrc_alloc(state, HERMON_REFCNT, 1, sleep,
2061 &mtt_refcnt);
2062 if (status != DDI_SUCCESS) {
2063 status = IBT_INSUFF_RESOURCE;
2064 goto alloclkey_fail4;
2065 }
2066 mr->mr_mttrefcntp = mtt_refcnt;
2067 swrc_tmp = (hermon_sw_refcnt_t *)mtt_refcnt->hr_addr;
2068 HERMON_MTT_REFCNT_INIT(swrc_tmp);
2069
2070 mtt_addr = (mtt->hr_indx << HERMON_MTT_SIZE_SHIFT);
2071
2072 bzero(&mpt_entry, sizeof (hermon_hw_dmpt_t));
2073 mpt_entry.status = HERMON_MPT_FREE;
2074 mpt_entry.lw = 1;
2075 mpt_entry.lr = 1;
2076 mpt_entry.reg_win = HERMON_MPT_IS_REGION;
2077 mpt_entry.entity_sz = mr->mr_logmttpgsz;
2078 mpt_entry.mem_key = mr->mr_lkey;
2079 mpt_entry.pd = pd->pd_pdnum;
2080 mpt_entry.fast_reg_en = 1;
2081 mpt_entry.rem_acc_en = 1;
2082 mpt_entry.en_inval = 1;
2083 if (flags & IBT_KEY_REMOTE) {
2084 mpt_entry.ren_inval = 1;
2085 }
2086 mpt_entry.mtt_size = nummtt;
2087 mpt_entry.mtt_addr_h = mtt_addr >> 32; /* only 8 more bits */
2348
2349
2350 /*
2351 * hermon_mr_common_rereg()
2352 * Context: Can be called from interrupt or base context.
2353 */
2354 static int
2355 hermon_mr_common_rereg(hermon_state_t *state, hermon_mrhdl_t mr,
2356 hermon_pdhdl_t pd, hermon_bind_info_t *bind, hermon_mrhdl_t *mrhdl_new,
2357 hermon_mr_options_t *op)
2358 {
2359 hermon_rsrc_t *mpt;
2360 ibt_mr_attr_flags_t acc_flags_to_use;
2361 ibt_mr_flags_t flags;
2362 hermon_pdhdl_t pd_to_use;
2363 hermon_hw_dmpt_t mpt_entry;
2364 uint64_t mtt_addr_to_use, vaddr_to_use, len_to_use;
2365 uint_t sleep, dereg_level;
2366 int status;
2367
2368 /*
2369 * Check here to see if the memory region corresponds to a userland
2370 * mapping. Reregistration of userland memory regions is not
2371 * currently supported. Return failure.
2372 */
2373 if (mr->mr_is_umem) {
2374 status = IBT_MR_HDL_INVALID;
2375 goto mrrereg_fail;
2376 }
2377
2378 mutex_enter(&mr->mr_lock);
2379
2380 /* Pull MPT resource pointer from the Hermon Memory Region handle */
2381 mpt = mr->mr_mptrsrcp;
2382
2383 /* Extract the flags field from the hermon_bind_info_t */
2384 flags = bind->bi_flags;
2385
2386 /*
2387 * Check the sleep flag. Ensure that it is consistent with the
2947 * that all current resources get properly
2948 * freed up. Unnecessary to attempt to regain
2949 * software ownership of the MPT entry as that
2950 * has already been done above (in
2951 * hermon_mr_reregister()). Also unnecessary
2952 * to attempt to unbind the memory.
2953 *
2954 * But we need to unbind the newly bound
2955 * memory and free up the newly allocated MTT
2956 * entries before returning.
2957 */
2958 hermon_mr_mem_unbind(state, bind);
2959 hermon_rsrc_free(state, &mtt);
2960 *dereg_level =
2961 HERMON_MR_DEREG_NO_HW2SW_MPT_OR_UNBIND;
2962
2963 status = IBT_INSUFF_RESOURCE;
2964 goto mrrereghelp_fail;
2965 }
2966 swrc_new = (hermon_sw_refcnt_t *)mtt_refcnt->hr_addr;
2967 HERMON_MTT_REFCNT_INIT(swrc_new);
2968 } else {
2969 mtt_refcnt = mr->mr_mttrefcntp;
2970 }
2971
2972 /*
2973 * Using the new mapping and the new MTT resources, write the
2974 * updated entries to MTT
2975 */
2976 status = hermon_mr_fast_mtt_write(state, mtt, bind,
2977 mtt_pgsize_bits);
2978 if (status != DDI_SUCCESS) {
2979 /*
2980 * Deregister will be called upon returning failure
2981 * from this routine. This will ensure that all
2982 * current resources get properly freed up.
2983 * Unnecessary to attempt to regain software ownership
2984 * of the MPT entry as that has already been done
2985 * above (in hermon_mr_reregister()). Also unnecessary
2986 * to attempt to unbind the memory.
3062 }
3063
3064
3065 /*
3066 * hermon_mr_mem_bind()
3067 * Context: Can be called from interrupt or base context.
3068 */
3069 static int
3070 hermon_mr_mem_bind(hermon_state_t *state, hermon_bind_info_t *bind,
3071 ddi_dma_handle_t dmahdl, uint_t sleep, uint_t is_buffer)
3072 {
3073 ddi_dma_attr_t dma_attr;
3074 int (*callback)(caddr_t);
3075 int status;
3076
3077 /* bi_type must be set to a meaningful value to get a bind handle */
3078 ASSERT(bind->bi_type == HERMON_BINDHDL_VADDR ||
3079 bind->bi_type == HERMON_BINDHDL_BUF ||
3080 bind->bi_type == HERMON_BINDHDL_UBUF);
3081
3082 /* Set the callback flag appropriately */
3083 callback = (sleep == HERMON_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
3084
3085 /*
3086 * Initialize many of the default DMA attributes. Then, if we're
3087 * bypassing the IOMMU, set the DDI_DMA_FORCE_PHYSICAL flag.
3088 */
3089 if (dmahdl == NULL) {
3090 hermon_dma_attr_init(state, &dma_attr);
3091 #ifdef __sparc
3092 if (bind->bi_bypass == HERMON_BINDMEM_BYPASS) {
3093 dma_attr.dma_attr_flags = DDI_DMA_FORCE_PHYSICAL;
3094 }
3095 #endif
3096
3097 /* set RO if needed - tunable set and 'is_buffer' is non-0 */
3098 if (is_buffer) {
3099 if (! (bind->bi_flags & IBT_MR_DISABLE_RO)) {
3100 if ((bind->bi_type != HERMON_BINDHDL_UBUF) &&
3101 (hermon_kernel_data_ro ==
3147 if (status != DDI_DMA_MAPPED) {
3148 if (bind->bi_free_dmahdl != 0) {
3149 ddi_dma_free_handle(&bind->bi_dmahdl);
3150 }
3151 return (status);
3152 }
3153
3154 return (DDI_SUCCESS);
3155 }
3156
3157
3158 /*
3159 * hermon_mr_mem_unbind()
3160 * Context: Can be called from interrupt or base context.
3161 */
3162 static void
3163 hermon_mr_mem_unbind(hermon_state_t *state, hermon_bind_info_t *bind)
3164 {
3165 int status;
3166
3167 /* there is nothing to unbind for alloc_lkey */
3168 if (bind->bi_type == HERMON_BINDHDL_LKEY)
3169 return;
3170
3171 /*
3172 * In case of HERMON_BINDHDL_UBUF, the memory bi_buf points to
3173 * is actually allocated by ddi_umem_iosetup() internally, then
3174 * it's required to free it here. Reset bi_type to HERMON_BINDHDL_NONE
3175 * not to free it again later.
3176 */
3177 if (bind->bi_type == HERMON_BINDHDL_UBUF) {
3178 freerbuf(bind->bi_buf);
3179 bind->bi_type = HERMON_BINDHDL_NONE;
3180 }
3181
3182 /*
3183 * Unbind the DMA memory for the region
3184 *
3185 * Note: The only way ddi_dma_unbind_handle() currently
3186 * can return an error is if the handle passed in is invalid.
3187 * Since this should never happen, we choose to return void
3188 * from this function! If this does return an error, however,
3189 * then we print a warning message to the console.
3190 */
3191 status = ddi_dma_unbind_handle(bind->bi_dmahdl);
3192 if (status != DDI_SUCCESS) {
3193 HERMON_WARNING(state, "failed to unbind DMA mapping");
3194 return;
3195 }
3196
3197 /* Free up the DMA handle */
3198 if (bind->bi_free_dmahdl != 0) {
3199 ddi_dma_free_handle(&bind->bi_dmahdl);
3200 }
3275
3276 if ((addr + pagesize > endaddr) &&
3277 (cookie_cnt == 0))
3278 return (DDI_SUCCESS);
3279
3280 hermon_index(index1, index2, rindx, icm_table,
3281 i);
3282 start = i * sizeof (hermon_hw_mtt_t);
3283 dma_info = icm_table->icm_dma[index1] + index2;
3284 mtt_table =
3285 (uint64_t *)(uintptr_t)dma_info->vaddr;
3286
3287 sync_needed = 0;
3288 } else {
3289 sync_needed = 1;
3290 }
3291
3292 addr += pagesize;
3293 if (addr == 0) {
3294 static int do_once = 1;
3295 if (do_once) {
3296 do_once = 0;
3297 cmn_err(CE_NOTE, "probable error in "
3298 "dma_cookie address from caller\n");
3299 }
3300 break;
3301 }
3302 }
3303
3304 /*
3305 * When we've reached the end of the current DMA cookie,
3306 * jump to the next cookie (if there are more)
3307 */
3308 if (cookie_cnt != 0) {
3309 ddi_dma_nextcookie(bind->bi_dmahdl, &dmacookie);
3310 }
3311 }
3312
3313 /* done all the cookies, now sync the memory for the device */
3314 if (sync_needed)
|