416 void sdbc_requeue_head_dm_try(_sd_cctl_t *);
417 static _sd_cctl_t *sdbc_alloc_dmc(int, nsc_off_t, nsc_size_t, int *,
418 sdbc_allocbuf_t *, int);
419 static _sd_cctl_t *sdbc_alloc_lru(int, nsc_off_t, int *, int);
420 static _sd_cctl_t *sdbc_alloc_from_dmchain(int, nsc_off_t, sdbc_allocbuf_t *,
421 int);
422 static void sdbc_centry_init_dm(_sd_cctl_t *);
423 static int sdbc_centry_memalloc_dm(_sd_cctl_t *, int, int);
424 static void sdbc_centry_alloc_end(sdbc_allocbuf_t *);
425
426
427
428
429 /* _SD_DEBUG */
430 #if defined(_SD_DEBUG) || defined(DEBUG)
431 static int _sd_cctl_valid(_sd_cctl_t *);
432 #endif
433
434 static
435 nsc_def_t _sdbc_fd_def[] = {
436 "Attach", (uintptr_t)sdbc_fd_attach_cd, 0,
437 "Detach", (uintptr_t)sdbc_fd_detach_cd, 0,
438 "Flush", (uintptr_t)sdbc_fd_flush_cd, 0,
439 0, 0, 0
440 };
441
442
443 /*
444 * _sdbc_cache_configure - initialize cache blocks, queues etc.
445 *
446 * ARGUMENTS:
447 * cblocks - Number of cache blocks
448 *
449 * RETURNS:
450 * 0 on success.
451 * SDBC_EENABLEFAIL or SDBC_EMEMCONFIG on failure.
452 *
453 */
454
455
456
457 int
458 _sdbc_cache_configure(int cblocks, spcs_s_info_t kstatus)
459 {
2083 * skip leading valid or busy entries (data available sooner)
2084 * truncate on busy block (to avoid deadlock)
2085 * release trailing valid entries, adjust length before starting I/O.
2086 */
2087 static int
2088 _sd_prefetch_buf(int cd, nsc_off_t fba_pos, nsc_size_t fba_len, int flag,
2089 _sd_buf_handle_t *handle, int locked)
2090 {
2091 _sd_cd_info_t *cdi;
2092 nsc_off_t cblk; /* position of temp cache block */
2093 sdbc_cblk_fba_t st_cblk_len; /* FBA len of starting cache block */
2094 sdbc_cblk_fba_t end_cblk_len; /* FBA len of ending cache block */
2095 sdbc_cblk_fba_t st_cblk_off; /* FBA offset into starting cblock */
2096 nsc_off_t io_pos; /* offset in FBA's */
2097 nsc_size_t fba_orig_len;
2098 int sts, stall;
2099 _sd_cctl_t *centry = NULL;
2100 _sd_cctl_t *lentry = NULL;
2101 _sd_cctl_t *ioent = NULL;
2102 _sd_cctl_t *last_ioent = NULL;
2103 sdbc_allocbuf_t alloc_tok = {0};
2104 int this_entry_type = 0;
2105 nsc_size_t request_blocks = 0; /* number of cache blocks required */
2106 int pageio;
2107
2108 handle->bh_flag |= NSC_HACTIVE;
2109 ASSERT(cd >= 0);
2110 cdi = &_sd_cache_files[cd];
2111
2112 /* prefetch: truncate if req'd */
2113 if (fba_len > sdbc_max_fbas)
2114 fba_len = sdbc_max_fbas;
2115 if ((fba_pos + fba_len) > cdi->cd_info->sh_filesize) {
2116 if (fba_pos >= cdi->cd_info->sh_filesize) {
2117 sts = EIO;
2118 goto done;
2119 }
2120 fba_len = cdi->cd_info->sh_filesize - fba_pos;
2121 }
2122
2123 fba_orig_len = fba_len;
2523 _sd_alloc_buf(blind_t xcd, nsc_off_t fba_pos, nsc_size_t fba_len, int flag,
2524 _sd_buf_handle_t **handle_p)
2525 {
2526 int cd = (int)(unsigned long)xcd;
2527 _sd_cd_info_t *cdi;
2528 _sd_buf_handle_t *handle;
2529 int sts;
2530 nsc_off_t st_cblk, cblk; /* position of start and temp cache block */
2531 sdbc_cblk_fba_t st_cblk_len; /* FBA len of starting cache block */
2532 sdbc_cblk_fba_t end_cblk_len; /* FBA len of ending cache block */
2533 sdbc_cblk_fba_t st_cblk_off; /* FBA offset into starting cblock */
2534 nsc_off_t io_pos; /* offset in FBA's */
2535 _sd_bufvec_t *bufvec;
2536 _sd_cctl_t *centry, *lentry, *ioent = NULL;
2537 nsc_size_t fba_orig_len = fba_len; /* FBA length of orig request */
2538 int stall, pageio;
2539 unsigned char cc_flag;
2540 int this_entry_type;
2541 int locked = 0;
2542 nsc_size_t dmchain_request_blocks; /* size of dmchain in cache blocks */
2543 sdbc_allocbuf_t alloc_tok = {0};
2544 int min_frag = 0; /* frag statistics */
2545 int max_frag = 0; /* frag statistics */
2546 int nfrags = 0; /* frag statistics */
2547 #ifdef DEBUG
2548 int err = 0;
2549 #endif
2550
2551
2552 ASSERT(*handle_p != NULL);
2553 handle = *handle_p;
2554
2555 if (_sdbc_shutdown_in_progress)
2556 return (EIO);
2557
2558 if (xcd == NSC_ANON_CD)
2559 cd = _CD_NOHASH;
2560
2561 KSTAT_RUNQ_ENTER(cd);
2562
2563 /*
4127 }
4128 }
4129
4130 /*
4131 * sdbc_centry_alloc_blks -- allocate cache entries with memory
4132 *
4133 * ARGUMENTS:
4134 * cd - Cache descriptor (from a previous open)
4135 * cblk - cache block number.
4136 * reqblks - number of cache blocks to be allocated
4137 * flag - can be ALLOC_NOWAIT
4138 * RETURNS:
4139 * A cache block chain or NULL if ALLOC_NOWAIT and request fails
4140 *
4141 * Note: caller must check for null return if called with
4142 * ALLOC_NOWAIT set.
4143 */
4144 _sd_cctl_t *
4145 sdbc_centry_alloc_blks(int cd, nsc_off_t cblk, nsc_size_t reqblks, int flag)
4146 {
4147 sdbc_allocbuf_t alloc_tok = {0}; /* must be 0 */
4148 int stall = 0;
4149 _sd_cctl_t *centry = NULL;
4150 _sd_cctl_t *lentry = NULL;
4151 _sd_cctl_t *anchor = NULL;
4152 _sd_cctl_t *next_centry;
4153
4154 ASSERT(reqblks);
4155
4156 while (reqblks) {
4157 centry = sdbc_centry_alloc(cd, cblk, reqblks, &stall,
4158 &alloc_tok, flag);
4159
4160 if (!centry)
4161 break;
4162
4163 centry->cc_chain = NULL;
4164
4165 if (lentry == NULL)
4166 anchor = centry;
4167 else
7068 _sd_lookup_map[i].mi_len = (unsigned char)k;
7069
7070 _sd_lookup_map[i].mi_mask = SDBC_GET_BITS(stpos, len);
7071 }
7072 for (i = 0; i < _SD_MAX_MAP; i++) {
7073 mask = (_sd_bitmap_t)i;
7074 for (j = 0; mask; j++)
7075 SDBC_LOOKUP_MODIFY(mask);
7076
7077 _sd_lookup_map[i].mi_dirty_count = (unsigned char)j;
7078 }
7079 for (i = 0; i < _SD_MAX_MAP; i++) {
7080 _sd_lookup_map[i].mi_io_count = SDBC_LOOKUP_DTCOUNT(i);
7081 mask = ~i;
7082 _sd_lookup_map[i].mi_io_count += SDBC_LOOKUP_DTCOUNT(mask);
7083 }
7084 }
7085
7086
7087 nsc_def_t _sd_sdbc_def[] = {
7088 "Open", (uintptr_t)_sd_open_io, 0,
7089 "Close", (uintptr_t)_sd_close_io, 0,
7090 "Attach", (uintptr_t)_sdbc_io_attach_cd, 0,
7091 "Detach", (uintptr_t)_sdbc_io_detach_cd, 0,
7092 "AllocBuf", (uintptr_t)_sd_alloc_buf, 0,
7093 "FreeBuf", (uintptr_t)_sd_free_buf, 0,
7094 "Read", (uintptr_t)_sd_read, 0,
7095 "Write", (uintptr_t)_sd_write, 0,
7096 "Zero", (uintptr_t)_sd_zero, 0,
7097 "Copy", (uintptr_t)_sd_copy, 0,
7098 "CopyDirect", (uintptr_t)_sd_copy_direct, 0,
7099 "Uncommit", (uintptr_t)_sd_uncommit, 0,
7100 "AllocHandle", (uintptr_t)_sd_alloc_handle, 0,
7101 "FreeHandle", (uintptr_t)_sd_free_handle, 0,
7102 "Discard", (uintptr_t)_sd_discard_pinned, 0,
7103 "Sizes", (uintptr_t)_sd_cache_sizes, 0,
7104 "GetPinned", (uintptr_t)_sd_get_pinned, 0,
7105 "NodeHints", (uintptr_t)_sd_node_hint_caller, 0,
7106 "PartSize", (uintptr_t)_sd_get_partsize, 0,
7107 "MaxFbas", (uintptr_t)_sd_get_maxfbas, 0,
7108 "Control", (uintptr_t)_sd_control, 0,
7109 "Provide", NSC_CACHE, 0,
7110 0, 0, 0
7111 };
7112
7113 /*
7114 * do the SD_GET_CD_CLUSTER_DATA ioctl (get the global filename data)
7115 */
7116 /* ARGSUSED */
7117 int
7118 sd_get_file_info_data(char *uaddrp)
7119 {
7120 return (ENOTTY);
7121 }
7122
7123 /*
7124 * do the SD_GET_CD_CLUSTER_SIZE ioctl (get size of global filename area)
7125 */
7126 int
7127 sd_get_file_info_size(void *uaddrp)
7128 {
7129 if (copyout(&_sdbc_gl_file_info_size, uaddrp,
7130 sizeof (_sdbc_gl_file_info_size))) {
|
416 void sdbc_requeue_head_dm_try(_sd_cctl_t *);
417 static _sd_cctl_t *sdbc_alloc_dmc(int, nsc_off_t, nsc_size_t, int *,
418 sdbc_allocbuf_t *, int);
419 static _sd_cctl_t *sdbc_alloc_lru(int, nsc_off_t, int *, int);
420 static _sd_cctl_t *sdbc_alloc_from_dmchain(int, nsc_off_t, sdbc_allocbuf_t *,
421 int);
422 static void sdbc_centry_init_dm(_sd_cctl_t *);
423 static int sdbc_centry_memalloc_dm(_sd_cctl_t *, int, int);
424 static void sdbc_centry_alloc_end(sdbc_allocbuf_t *);
425
426
427
428
429 /* _SD_DEBUG */
430 #if defined(_SD_DEBUG) || defined(DEBUG)
431 static int _sd_cctl_valid(_sd_cctl_t *);
432 #endif
433
434 static
435 nsc_def_t _sdbc_fd_def[] = {
436 { "Attach", (uintptr_t)sdbc_fd_attach_cd, 0 },
437 { "Detach", (uintptr_t)sdbc_fd_detach_cd, 0 },
438 { "Flush", (uintptr_t)sdbc_fd_flush_cd, 0 },
439 { NULL, (uintptr_t)NULL, 0 }
440 };
441
442
443 /*
444 * _sdbc_cache_configure - initialize cache blocks, queues etc.
445 *
446 * ARGUMENTS:
447 * cblocks - Number of cache blocks
448 *
449 * RETURNS:
450 * 0 on success.
451 * SDBC_EENABLEFAIL or SDBC_EMEMCONFIG on failure.
452 *
453 */
454
455
456
457 int
458 _sdbc_cache_configure(int cblocks, spcs_s_info_t kstatus)
459 {
2083 * skip leading valid or busy entries (data available sooner)
2084 * truncate on busy block (to avoid deadlock)
2085 * release trailing valid entries, adjust length before starting I/O.
2086 */
2087 static int
2088 _sd_prefetch_buf(int cd, nsc_off_t fba_pos, nsc_size_t fba_len, int flag,
2089 _sd_buf_handle_t *handle, int locked)
2090 {
2091 _sd_cd_info_t *cdi;
2092 nsc_off_t cblk; /* position of temp cache block */
2093 sdbc_cblk_fba_t st_cblk_len; /* FBA len of starting cache block */
2094 sdbc_cblk_fba_t end_cblk_len; /* FBA len of ending cache block */
2095 sdbc_cblk_fba_t st_cblk_off; /* FBA offset into starting cblock */
2096 nsc_off_t io_pos; /* offset in FBA's */
2097 nsc_size_t fba_orig_len;
2098 int sts, stall;
2099 _sd_cctl_t *centry = NULL;
2100 _sd_cctl_t *lentry = NULL;
2101 _sd_cctl_t *ioent = NULL;
2102 _sd_cctl_t *last_ioent = NULL;
2103 sdbc_allocbuf_t alloc_tok = {{(intptr_t)NULL}};
2104 int this_entry_type = 0;
2105 nsc_size_t request_blocks = 0; /* number of cache blocks required */
2106 int pageio;
2107
2108 handle->bh_flag |= NSC_HACTIVE;
2109 ASSERT(cd >= 0);
2110 cdi = &_sd_cache_files[cd];
2111
2112 /* prefetch: truncate if req'd */
2113 if (fba_len > sdbc_max_fbas)
2114 fba_len = sdbc_max_fbas;
2115 if ((fba_pos + fba_len) > cdi->cd_info->sh_filesize) {
2116 if (fba_pos >= cdi->cd_info->sh_filesize) {
2117 sts = EIO;
2118 goto done;
2119 }
2120 fba_len = cdi->cd_info->sh_filesize - fba_pos;
2121 }
2122
2123 fba_orig_len = fba_len;
2523 _sd_alloc_buf(blind_t xcd, nsc_off_t fba_pos, nsc_size_t fba_len, int flag,
2524 _sd_buf_handle_t **handle_p)
2525 {
2526 int cd = (int)(unsigned long)xcd;
2527 _sd_cd_info_t *cdi;
2528 _sd_buf_handle_t *handle;
2529 int sts;
2530 nsc_off_t st_cblk, cblk; /* position of start and temp cache block */
2531 sdbc_cblk_fba_t st_cblk_len; /* FBA len of starting cache block */
2532 sdbc_cblk_fba_t end_cblk_len; /* FBA len of ending cache block */
2533 sdbc_cblk_fba_t st_cblk_off; /* FBA offset into starting cblock */
2534 nsc_off_t io_pos; /* offset in FBA's */
2535 _sd_bufvec_t *bufvec;
2536 _sd_cctl_t *centry, *lentry, *ioent = NULL;
2537 nsc_size_t fba_orig_len = fba_len; /* FBA length of orig request */
2538 int stall, pageio;
2539 unsigned char cc_flag;
2540 int this_entry_type;
2541 int locked = 0;
2542 nsc_size_t dmchain_request_blocks; /* size of dmchain in cache blocks */
2543 sdbc_allocbuf_t alloc_tok = {{(intptr_t)NULL}};
2544 int min_frag = 0; /* frag statistics */
2545 int max_frag = 0; /* frag statistics */
2546 int nfrags = 0; /* frag statistics */
2547 #ifdef DEBUG
2548 int err = 0;
2549 #endif
2550
2551
2552 ASSERT(*handle_p != NULL);
2553 handle = *handle_p;
2554
2555 if (_sdbc_shutdown_in_progress)
2556 return (EIO);
2557
2558 if (xcd == NSC_ANON_CD)
2559 cd = _CD_NOHASH;
2560
2561 KSTAT_RUNQ_ENTER(cd);
2562
2563 /*
4127 }
4128 }
4129
4130 /*
4131 * sdbc_centry_alloc_blks -- allocate cache entries with memory
4132 *
4133 * ARGUMENTS:
4134 * cd - Cache descriptor (from a previous open)
4135 * cblk - cache block number.
4136 * reqblks - number of cache blocks to be allocated
4137 * flag - can be ALLOC_NOWAIT
4138 * RETURNS:
4139 * A cache block chain or NULL if ALLOC_NOWAIT and request fails
4140 *
4141 * Note: caller must check for null return if called with
4142 * ALLOC_NOWAIT set.
4143 */
4144 _sd_cctl_t *
4145 sdbc_centry_alloc_blks(int cd, nsc_off_t cblk, nsc_size_t reqblks, int flag)
4146 {
4147 sdbc_allocbuf_t alloc_tok = {{(intptr_t)NULL}}; /* must be NULL */
4148 int stall = 0;
4149 _sd_cctl_t *centry = NULL;
4150 _sd_cctl_t *lentry = NULL;
4151 _sd_cctl_t *anchor = NULL;
4152 _sd_cctl_t *next_centry;
4153
4154 ASSERT(reqblks);
4155
4156 while (reqblks) {
4157 centry = sdbc_centry_alloc(cd, cblk, reqblks, &stall,
4158 &alloc_tok, flag);
4159
4160 if (!centry)
4161 break;
4162
4163 centry->cc_chain = NULL;
4164
4165 if (lentry == NULL)
4166 anchor = centry;
4167 else
7068 _sd_lookup_map[i].mi_len = (unsigned char)k;
7069
7070 _sd_lookup_map[i].mi_mask = SDBC_GET_BITS(stpos, len);
7071 }
7072 for (i = 0; i < _SD_MAX_MAP; i++) {
7073 mask = (_sd_bitmap_t)i;
7074 for (j = 0; mask; j++)
7075 SDBC_LOOKUP_MODIFY(mask);
7076
7077 _sd_lookup_map[i].mi_dirty_count = (unsigned char)j;
7078 }
7079 for (i = 0; i < _SD_MAX_MAP; i++) {
7080 _sd_lookup_map[i].mi_io_count = SDBC_LOOKUP_DTCOUNT(i);
7081 mask = ~i;
7082 _sd_lookup_map[i].mi_io_count += SDBC_LOOKUP_DTCOUNT(mask);
7083 }
7084 }
7085
7086
7087 nsc_def_t _sd_sdbc_def[] = {
7088 { "Open", (uintptr_t)_sd_open_io, 0 },
7089 { "Close", (uintptr_t)_sd_close_io, 0 },
7090 { "Attach", (uintptr_t)_sdbc_io_attach_cd, 0 },
7091 { "Detach", (uintptr_t)_sdbc_io_detach_cd, 0 },
7092 { "AllocBuf", (uintptr_t)_sd_alloc_buf, 0 },
7093 { "FreeBuf", (uintptr_t)_sd_free_buf, 0 },
7094 { "Read", (uintptr_t)_sd_read, 0 },
7095 { "Write", (uintptr_t)_sd_write, 0 },
7096 { "Zero", (uintptr_t)_sd_zero, 0 },
7097 { "Copy", (uintptr_t)_sd_copy, 0 },
7098 { "CopyDirect", (uintptr_t)_sd_copy_direct, 0 },
7099 { "Uncommit", (uintptr_t)_sd_uncommit, 0 },
7100 { "AllocHandle", (uintptr_t)_sd_alloc_handle, 0 },
7101 { "FreeHandle", (uintptr_t)_sd_free_handle, 0 },
7102 { "Discard", (uintptr_t)_sd_discard_pinned, 0 },
7103 { "Sizes", (uintptr_t)_sd_cache_sizes, 0 },
7104 { "GetPinned", (uintptr_t)_sd_get_pinned, 0 },
7105 { "NodeHints", (uintptr_t)_sd_node_hint_caller, 0 },
7106 { "PartSize", (uintptr_t)_sd_get_partsize, 0 },
7107 { "MaxFbas", (uintptr_t)_sd_get_maxfbas, 0 },
7108 { "Control", (uintptr_t)_sd_control, 0 },
7109 { "Provide", NSC_CACHE, 0 },
7110 { NULL, (uintptr_t)NULL, 0 }
7111 };
7112
7113 /*
7114 * do the SD_GET_CD_CLUSTER_DATA ioctl (get the global filename data)
7115 */
7116 /* ARGSUSED */
7117 int
7118 sd_get_file_info_data(char *uaddrp)
7119 {
7120 return (ENOTTY);
7121 }
7122
7123 /*
7124 * do the SD_GET_CD_CLUSTER_SIZE ioctl (get size of global filename area)
7125 */
7126 int
7127 sd_get_file_info_size(void *uaddrp)
7128 {
7129 if (copyout(&_sdbc_gl_file_info_size, uaddrp,
7130 sizeof (_sdbc_gl_file_info_size))) {
|