Print this page
3866 panic in idm module
3867 stmfCreateLu failed: GUID_IN_USE
3868 iscsi target not accepting any new connections
Reviewed by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed by: Jeremy Jones <jeremy@delphix.com>
Reviewed by: Eric Diven <eric.diven@delphix.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/comstar/stmf/stmf.c
+++ new/usr/src/uts/common/io/comstar/stmf/stmf.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24 /*
25 25 * Copyright 2012, Nexenta Systems, Inc. All rights reserved.
26 26 * Copyright (c) 2013 by Delphix. All rights reserved.
27 27 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
28 28 */
29 29
30 30 #include <sys/conf.h>
31 31 #include <sys/file.h>
32 32 #include <sys/ddi.h>
33 33 #include <sys/sunddi.h>
34 34 #include <sys/modctl.h>
35 35 #include <sys/scsi/scsi.h>
36 36 #include <sys/scsi/generic/persist.h>
37 37 #include <sys/scsi/impl/scsi_reset_notify.h>
38 38 #include <sys/disp.h>
39 39 #include <sys/byteorder.h>
40 40 #include <sys/atomic.h>
41 41 #include <sys/ethernet.h>
42 42 #include <sys/sdt.h>
43 43 #include <sys/nvpair.h>
44 44 #include <sys/zone.h>
45 45 #include <sys/id_space.h>
46 46
47 47 #include <sys/stmf.h>
48 48 #include <sys/lpif.h>
49 49 #include <sys/portif.h>
50 50 #include <sys/stmf_ioctl.h>
51 51 #include <sys/pppt_ic_if.h>
52 52
53 53 #include "stmf_impl.h"
54 54 #include "lun_map.h"
55 55 #include "stmf_state.h"
56 56 #include "stmf_stats.h"
57 57
58 58 /*
59 59 * Lock order:
60 60 * stmf_state_lock --> ilport_lock/iss_lockp --> ilu_task_lock
61 61 */
62 62
63 63 static uint64_t stmf_session_counter = 0;
64 64 static uint16_t stmf_rtpid_counter = 0;
65 65 /* start messages at 1 */
66 66 static uint64_t stmf_proxy_msg_id = 1;
67 67 #define MSG_ID_TM_BIT 0x8000000000000000
68 68 #define ALIGNED_TO_8BYTE_BOUNDARY(i) (((i) + 7) & ~7)
69 69
70 70 /*
71 71 * When stmf_io_deadman_enabled is set to B_TRUE, we check that finishing up
72 72 * I/O operations on an offlining LU doesn't take longer than stmf_io_deadman
73 73 * seconds. If it does, we trigger a panic to inform the user of hung I/O
74 74 * blocking us for too long.
75 75 */
76 76 boolean_t stmf_io_deadman_enabled = B_TRUE;
77 77 int stmf_io_deadman = 1000; /* seconds */
78 78
79 79 struct stmf_svc_clocks;
80 80
81 81 static int stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
82 82 static int stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
83 83 static int stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
84 84 void **result);
85 85 static int stmf_open(dev_t *devp, int flag, int otype, cred_t *credp);
86 86 static int stmf_close(dev_t dev, int flag, int otype, cred_t *credp);
87 87 static int stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
88 88 cred_t *credp, int *rval);
89 89 static int stmf_get_stmf_state(stmf_state_desc_t *std);
90 90 static int stmf_set_stmf_state(stmf_state_desc_t *std);
91 91 static void stmf_abort_task_offline(scsi_task_t *task, int offline_lu,
92 92 char *info);
93 93 static int stmf_set_alua_state(stmf_alua_state_desc_t *alua_state);
94 94 static void stmf_get_alua_state(stmf_alua_state_desc_t *alua_state);
95 95
96 96 static void stmf_task_audit(stmf_i_scsi_task_t *itask,
97 97 task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf);
98 98
99 99 static boolean_t stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp);
100 100 static char stmf_ctoi(char c);
101 101 stmf_xfer_data_t *stmf_prepare_tpgs_data(uint8_t ilu_alua);
102 102 void stmf_svc_init();
103 103 stmf_status_t stmf_svc_fini();
104 104 void stmf_svc(void *arg);
105 105 static void stmf_wait_ilu_tasks_finish(stmf_i_lu_t *ilu);
106 106 void stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info);
107 107 static void stmf_svc_kill_obj_requests(void *obj);
108 108 static void stmf_svc_timeout(struct stmf_svc_clocks *);
109 109 void stmf_check_freetask();
110 110 void stmf_abort_target_reset(scsi_task_t *task);
111 111 stmf_status_t stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task,
112 112 int target_reset);
113 113 void stmf_target_reset_poll(struct scsi_task *task);
114 114 void stmf_handle_lun_reset(scsi_task_t *task);
115 115 void stmf_handle_target_reset(scsi_task_t *task);
116 116 void stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off);
117 117 int stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token,
118 118 uint32_t *err_ret);
119 119 int stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi);
120 120 int stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out,
121 121 uint32_t *err_ret);
122 122 void stmf_delete_ppd(stmf_pp_data_t *ppd);
123 123 void stmf_delete_all_ppds();
124 124 void stmf_trace_clear();
125 125 void stmf_worker_init();
126 126 stmf_status_t stmf_worker_fini();
127 127 void stmf_worker_mgmt();
128 128 void stmf_worker_task(void *arg);
129 129 static void stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss);
130 130 static stmf_status_t stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg,
131 131 uint32_t type);
132 132 static stmf_status_t stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg);
133 133 static stmf_status_t stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg);
134 134 static stmf_status_t stmf_ic_rx_status(stmf_ic_status_msg_t *msg);
135 135 static stmf_status_t stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg);
136 136 void stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s);
137 137
138 138 /* pppt modhandle */
139 139 ddi_modhandle_t pppt_mod;
140 140
141 141 /* pppt modload imported functions */
142 142 stmf_ic_reg_port_msg_alloc_func_t ic_reg_port_msg_alloc;
143 143 stmf_ic_dereg_port_msg_alloc_func_t ic_dereg_port_msg_alloc;
144 144 stmf_ic_reg_lun_msg_alloc_func_t ic_reg_lun_msg_alloc;
145 145 stmf_ic_dereg_lun_msg_alloc_func_t ic_dereg_lun_msg_alloc;
146 146 stmf_ic_lun_active_msg_alloc_func_t ic_lun_active_msg_alloc;
147 147 stmf_ic_scsi_cmd_msg_alloc_func_t ic_scsi_cmd_msg_alloc;
148 148 stmf_ic_scsi_data_xfer_done_msg_alloc_func_t ic_scsi_data_xfer_done_msg_alloc;
149 149 stmf_ic_session_create_msg_alloc_func_t ic_session_reg_msg_alloc;
150 150 stmf_ic_session_destroy_msg_alloc_func_t ic_session_dereg_msg_alloc;
151 151 stmf_ic_tx_msg_func_t ic_tx_msg;
152 152 stmf_ic_msg_free_func_t ic_msg_free;
153 153
154 154 static void stmf_itl_task_start(stmf_i_scsi_task_t *itask);
155 155 static void stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask);
156 156 static void stmf_itl_task_done(stmf_i_scsi_task_t *itask);
157 157
158 158 static void stmf_lport_xfer_start(stmf_i_scsi_task_t *itask,
159 159 stmf_data_buf_t *dbuf);
160 160 static void stmf_lport_xfer_done(stmf_i_scsi_task_t *itask,
161 161 stmf_data_buf_t *dbuf);
162 162
163 163 static void stmf_update_kstat_lu_q(scsi_task_t *, void());
164 164 static void stmf_update_kstat_lport_q(scsi_task_t *, void());
165 165 static void stmf_update_kstat_lu_io(scsi_task_t *, stmf_data_buf_t *);
166 166 static void stmf_update_kstat_lport_io(scsi_task_t *, stmf_data_buf_t *);
167 167
168 168 static int stmf_irport_compare(const void *void_irport1,
169 169 const void *void_irport2);
170 170 static stmf_i_remote_port_t *stmf_irport_create(scsi_devid_desc_t *rport_devid);
171 171 static void stmf_irport_destroy(stmf_i_remote_port_t *irport);
172 172 static stmf_i_remote_port_t *stmf_irport_register(
173 173 scsi_devid_desc_t *rport_devid);
174 174 static stmf_i_remote_port_t *stmf_irport_lookup_locked(
175 175 scsi_devid_desc_t *rport_devid);
176 176 static void stmf_irport_deregister(stmf_i_remote_port_t *irport);
177 177
178 178 static void stmf_teardown_itl_kstats(stmf_i_itl_kstat_t *ks);
179 179 static void stmf_delete_itl_kstat_by_lport(char *);
180 180 static void stmf_delete_itl_kstat_by_guid(char *);
181 181 static int stmf_itl_kstat_compare(const void*, const void*);
182 182 static stmf_i_itl_kstat_t *stmf_itl_kstat_lookup(char *kstat_nm);
183 183 static stmf_i_itl_kstat_t *stmf_itl_kstat_create(stmf_itl_data_t *itl,
184 184 char *nm, scsi_devid_desc_t *lport, scsi_devid_desc_t *lun);
185 185
186 186 extern struct mod_ops mod_driverops;
187 187
188 188 /* =====[ Tunables ]===== */
189 189 /* Internal tracing */
190 190 volatile int stmf_trace_on = 1;
191 191 volatile int stmf_trace_buf_size = (1 * 1024 * 1024);
192 192 /*
193 193 * The reason default task timeout is 75 is because we want the
194 194 * host to timeout 1st and mostly host timeout is 60 seconds.
195 195 */
196 196 volatile int stmf_default_task_timeout = 75;
197 197 /*
198 198 * Setting this to one means, you are responsible for config load and keeping
199 199 * things in sync with persistent database.
200 200 */
201 201 volatile int stmf_allow_modunload = 0;
202 202
203 203 volatile int stmf_max_nworkers = 256;
204 204 volatile int stmf_min_nworkers = 4;
205 205 volatile int stmf_worker_scale_down_delay = 20;
206 206
207 207 /* === [ Debugging and fault injection ] === */
208 208 #ifdef DEBUG
209 209 volatile int stmf_drop_task_counter = 0;
210 210 volatile int stmf_drop_buf_counter = 0;
211 211
212 212 #endif
213 213
214 214 stmf_state_t stmf_state;
215 215 static stmf_lu_t *dlun0;
216 216
217 217 static uint8_t stmf_first_zero[] =
218 218 { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
219 219 static uint8_t stmf_first_one[] =
220 220 { 0xff, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 };
221 221
222 222 static kmutex_t trace_buf_lock;
223 223 static int trace_buf_size;
224 224 static int trace_buf_curndx;
225 225 caddr_t stmf_trace_buf;
226 226
227 227 static enum {
228 228 STMF_WORKERS_DISABLED = 0,
229 229 STMF_WORKERS_ENABLING,
230 230 STMF_WORKERS_ENABLED
231 231 } stmf_workers_state = STMF_WORKERS_DISABLED;
232 232 static int stmf_i_max_nworkers;
233 233 static int stmf_i_min_nworkers;
234 234 static int stmf_nworkers_cur; /* # of workers currently running */
235 235 static int stmf_nworkers_needed; /* # of workers need to be running */
236 236 static int stmf_worker_sel_counter = 0;
237 237 static uint32_t stmf_cur_ntasks = 0;
238 238 static clock_t stmf_wm_last = 0;
239 239 /*
240 240 * This is equal to stmf_nworkers_cur while we are increasing # workers and
241 241 * stmf_nworkers_needed while we are decreasing the worker count.
242 242 */
243 243 static int stmf_nworkers_accepting_cmds;
244 244 static stmf_worker_t *stmf_workers = NULL;
245 245 static clock_t stmf_worker_mgmt_delay = 2;
246 246 static clock_t stmf_worker_scale_down_timer = 0;
247 247 static int stmf_worker_scale_down_qd = 0;
248 248
249 249 static struct cb_ops stmf_cb_ops = {
250 250 stmf_open, /* open */
251 251 stmf_close, /* close */
252 252 nodev, /* strategy */
253 253 nodev, /* print */
254 254 nodev, /* dump */
255 255 nodev, /* read */
256 256 nodev, /* write */
257 257 stmf_ioctl, /* ioctl */
258 258 nodev, /* devmap */
259 259 nodev, /* mmap */
260 260 nodev, /* segmap */
261 261 nochpoll, /* chpoll */
262 262 ddi_prop_op, /* cb_prop_op */
263 263 0, /* streamtab */
264 264 D_NEW | D_MP, /* cb_flag */
265 265 CB_REV, /* rev */
266 266 nodev, /* aread */
267 267 nodev /* awrite */
268 268 };
269 269
270 270 static struct dev_ops stmf_ops = {
271 271 DEVO_REV,
272 272 0,
273 273 stmf_getinfo,
274 274 nulldev, /* identify */
275 275 nulldev, /* probe */
276 276 stmf_attach,
277 277 stmf_detach,
278 278 nodev, /* reset */
279 279 &stmf_cb_ops,
280 280 NULL, /* bus_ops */
281 281 NULL /* power */
282 282 };
283 283
284 284 #define STMF_NAME "COMSTAR STMF"
285 285 #define STMF_MODULE_NAME "stmf"
286 286
287 287 static struct modldrv modldrv = {
288 288 &mod_driverops,
289 289 STMF_NAME,
290 290 &stmf_ops
291 291 };
292 292
293 293 static struct modlinkage modlinkage = {
294 294 MODREV_1,
295 295 &modldrv,
296 296 NULL
297 297 };
298 298
299 299 int
300 300 _init(void)
301 301 {
302 302 int ret;
303 303
304 304 ret = mod_install(&modlinkage);
305 305 if (ret)
306 306 return (ret);
307 307 stmf_trace_buf = kmem_zalloc(stmf_trace_buf_size, KM_SLEEP);
308 308 trace_buf_size = stmf_trace_buf_size;
309 309 trace_buf_curndx = 0;
310 310 mutex_init(&trace_buf_lock, NULL, MUTEX_DRIVER, 0);
311 311 bzero(&stmf_state, sizeof (stmf_state_t));
312 312 /* STMF service is off by default */
313 313 stmf_state.stmf_service_running = 0;
314 314 /* default lu/lport states are online */
315 315 stmf_state.stmf_default_lu_state = STMF_STATE_ONLINE;
316 316 stmf_state.stmf_default_lport_state = STMF_STATE_ONLINE;
317 317 mutex_init(&stmf_state.stmf_lock, NULL, MUTEX_DRIVER, NULL);
318 318 cv_init(&stmf_state.stmf_cv, NULL, CV_DRIVER, NULL);
319 319 stmf_session_counter = (uint64_t)ddi_get_lbolt();
320 320 avl_create(&stmf_state.stmf_irportlist,
321 321 stmf_irport_compare, sizeof (stmf_i_remote_port_t),
322 322 offsetof(stmf_i_remote_port_t, irport_ln));
323 323 stmf_state.stmf_ilport_inst_space =
324 324 id_space_create("lport-instances", 0, MAX_ILPORT);
325 325 stmf_state.stmf_irport_inst_space =
326 326 id_space_create("rport-instances", 0, MAX_IRPORT);
327 327 avl_create(&stmf_state.stmf_itl_kstat_list,
328 328 stmf_itl_kstat_compare, sizeof (stmf_i_itl_kstat_t),
329 329 offsetof(stmf_i_itl_kstat_t, iitl_kstat_ln));
330 330 stmf_view_init();
331 331 stmf_svc_init();
332 332 stmf_dlun_init();
333 333 return (ret);
334 334 }
335 335
336 336 int
337 337 _fini(void)
338 338 {
339 339 int ret;
340 340 stmf_i_remote_port_t *irport;
341 341 stmf_i_itl_kstat_t *ks_itl;
342 342 void *avl_dest_cookie = NULL;
343 343
344 344 if (stmf_state.stmf_service_running)
345 345 return (EBUSY);
346 346 if ((!stmf_allow_modunload) &&
347 347 (stmf_state.stmf_config_state != STMF_CONFIG_NONE)) {
348 348 return (EBUSY);
349 349 }
350 350 if (stmf_state.stmf_nlps || stmf_state.stmf_npps) {
351 351 return (EBUSY);
352 352 }
353 353 if (stmf_dlun_fini() != STMF_SUCCESS)
354 354 return (EBUSY);
355 355 if (stmf_worker_fini() != STMF_SUCCESS) {
356 356 stmf_dlun_init();
357 357 return (EBUSY);
358 358 }
359 359 if (stmf_svc_fini() != STMF_SUCCESS) {
360 360 stmf_dlun_init();
361 361 stmf_worker_init();
362 362 return (EBUSY);
363 363 }
364 364
365 365 ret = mod_remove(&modlinkage);
366 366 if (ret) {
367 367 stmf_svc_init();
368 368 stmf_dlun_init();
369 369 stmf_worker_init();
370 370 return (ret);
371 371 }
372 372
373 373 stmf_view_clear_config();
374 374
375 375 while ((irport = avl_destroy_nodes(&stmf_state.stmf_irportlist,
376 376 &avl_dest_cookie)) != NULL)
377 377 stmf_irport_destroy(irport);
378 378 avl_destroy(&stmf_state.stmf_irportlist);
379 379 id_space_destroy(stmf_state.stmf_ilport_inst_space);
380 380 id_space_destroy(stmf_state.stmf_irport_inst_space);
381 381
382 382 avl_dest_cookie = NULL;
383 383 while ((ks_itl = avl_destroy_nodes(&stmf_state.stmf_itl_kstat_list,
384 384 &avl_dest_cookie)) != NULL) {
385 385 stmf_teardown_itl_kstats(ks_itl);
386 386 kmem_free(ks_itl, sizeof (ks_itl));
387 387 }
388 388 avl_destroy(&stmf_state.stmf_itl_kstat_list);
389 389
390 390 kmem_free(stmf_trace_buf, stmf_trace_buf_size);
391 391 mutex_destroy(&trace_buf_lock);
392 392 mutex_destroy(&stmf_state.stmf_lock);
393 393 cv_destroy(&stmf_state.stmf_cv);
394 394 return (ret);
395 395 }
396 396
397 397 int
398 398 _info(struct modinfo *modinfop)
399 399 {
400 400 return (mod_info(&modlinkage, modinfop));
401 401 }
402 402
403 403 /* ARGSUSED */
404 404 static int
405 405 stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
406 406 {
407 407 switch (cmd) {
408 408 case DDI_INFO_DEVT2DEVINFO:
409 409 *result = stmf_state.stmf_dip;
410 410 break;
411 411 case DDI_INFO_DEVT2INSTANCE:
412 412 *result =
413 413 (void *)(uintptr_t)ddi_get_instance(stmf_state.stmf_dip);
414 414 break;
415 415 default:
416 416 return (DDI_FAILURE);
417 417 }
418 418
419 419 return (DDI_SUCCESS);
420 420 }
421 421
422 422 static int
423 423 stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
424 424 {
425 425 switch (cmd) {
426 426 case DDI_ATTACH:
427 427 stmf_state.stmf_dip = dip;
428 428
429 429 if (ddi_create_minor_node(dip, "admin", S_IFCHR, 0,
430 430 DDI_NT_STMF, 0) != DDI_SUCCESS) {
431 431 break;
432 432 }
433 433 ddi_report_dev(dip);
434 434 return (DDI_SUCCESS);
435 435 }
436 436
437 437 return (DDI_FAILURE);
438 438 }
439 439
440 440 static int
441 441 stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
442 442 {
443 443 switch (cmd) {
444 444 case DDI_DETACH:
445 445 ddi_remove_minor_node(dip, 0);
446 446 return (DDI_SUCCESS);
447 447 }
448 448
449 449 return (DDI_FAILURE);
450 450 }
451 451
452 452 /* ARGSUSED */
453 453 static int
454 454 stmf_open(dev_t *devp, int flag, int otype, cred_t *credp)
455 455 {
456 456 mutex_enter(&stmf_state.stmf_lock);
457 457 if (stmf_state.stmf_exclusive_open) {
458 458 mutex_exit(&stmf_state.stmf_lock);
459 459 return (EBUSY);
460 460 }
461 461 if (flag & FEXCL) {
462 462 if (stmf_state.stmf_opened) {
463 463 mutex_exit(&stmf_state.stmf_lock);
464 464 return (EBUSY);
465 465 }
466 466 stmf_state.stmf_exclusive_open = 1;
467 467 }
468 468 stmf_state.stmf_opened = 1;
469 469 mutex_exit(&stmf_state.stmf_lock);
470 470 return (0);
471 471 }
472 472
473 473 /* ARGSUSED */
474 474 static int
475 475 stmf_close(dev_t dev, int flag, int otype, cred_t *credp)
476 476 {
477 477 mutex_enter(&stmf_state.stmf_lock);
478 478 stmf_state.stmf_opened = 0;
479 479 if (stmf_state.stmf_exclusive_open &&
480 480 (stmf_state.stmf_config_state != STMF_CONFIG_INIT_DONE)) {
481 481 stmf_state.stmf_config_state = STMF_CONFIG_NONE;
482 482 stmf_delete_all_ppds();
483 483 stmf_view_clear_config();
484 484 stmf_view_init();
485 485 }
486 486 stmf_state.stmf_exclusive_open = 0;
487 487 mutex_exit(&stmf_state.stmf_lock);
488 488 return (0);
489 489 }
490 490
491 491 int
492 492 stmf_copyin_iocdata(intptr_t data, int mode, stmf_iocdata_t **iocd,
493 493 void **ibuf, void **obuf)
494 494 {
495 495 int ret;
496 496
497 497 *ibuf = NULL;
498 498 *obuf = NULL;
499 499 *iocd = kmem_zalloc(sizeof (stmf_iocdata_t), KM_SLEEP);
500 500
501 501 ret = ddi_copyin((void *)data, *iocd, sizeof (stmf_iocdata_t), mode);
502 502 if (ret)
503 503 return (EFAULT);
504 504 if ((*iocd)->stmf_version != STMF_VERSION_1) {
505 505 ret = EINVAL;
506 506 goto copyin_iocdata_done;
507 507 }
508 508 if ((*iocd)->stmf_ibuf_size) {
509 509 *ibuf = kmem_zalloc((*iocd)->stmf_ibuf_size, KM_SLEEP);
510 510 ret = ddi_copyin((void *)((unsigned long)(*iocd)->stmf_ibuf),
511 511 *ibuf, (*iocd)->stmf_ibuf_size, mode);
512 512 }
513 513 if ((*iocd)->stmf_obuf_size)
514 514 *obuf = kmem_zalloc((*iocd)->stmf_obuf_size, KM_SLEEP);
515 515
516 516 if (ret == 0)
517 517 return (0);
518 518 ret = EFAULT;
519 519 copyin_iocdata_done:;
520 520 if (*obuf) {
521 521 kmem_free(*obuf, (*iocd)->stmf_obuf_size);
522 522 *obuf = NULL;
523 523 }
524 524 if (*ibuf) {
525 525 kmem_free(*ibuf, (*iocd)->stmf_ibuf_size);
526 526 *ibuf = NULL;
527 527 }
528 528 kmem_free(*iocd, sizeof (stmf_iocdata_t));
529 529 return (ret);
530 530 }
531 531
532 532 int
533 533 stmf_copyout_iocdata(intptr_t data, int mode, stmf_iocdata_t *iocd, void *obuf)
534 534 {
535 535 int ret;
536 536
537 537 if (iocd->stmf_obuf_size) {
538 538 ret = ddi_copyout(obuf, (void *)(unsigned long)iocd->stmf_obuf,
539 539 iocd->stmf_obuf_size, mode);
540 540 if (ret)
541 541 return (EFAULT);
542 542 }
543 543 ret = ddi_copyout(iocd, (void *)data, sizeof (stmf_iocdata_t), mode);
544 544 if (ret)
545 545 return (EFAULT);
546 546 return (0);
547 547 }
548 548
549 549 /* ARGSUSED */
550 550 static int
551 551 stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
552 552 cred_t *credp, int *rval)
553 553 {
554 554 stmf_iocdata_t *iocd;
555 555 void *ibuf = NULL, *obuf = NULL;
556 556 slist_lu_t *luid_list;
557 557 slist_target_port_t *lportid_list;
558 558 stmf_i_lu_t *ilu;
559 559 stmf_i_local_port_t *ilport;
560 560 stmf_i_scsi_session_t *iss;
561 561 slist_scsi_session_t *iss_list;
562 562 sioc_lu_props_t *lup;
563 563 sioc_target_port_props_t *lportp;
564 564 stmf_ppioctl_data_t *ppi, *ppi_out = NULL;
565 565 uint64_t *ppi_token = NULL;
566 566 uint8_t *p_id, *id;
567 567 stmf_state_desc_t *std;
568 568 stmf_status_t ctl_ret;
569 569 stmf_state_change_info_t ssi;
570 570 int ret = 0;
571 571 uint32_t n;
572 572 int i;
573 573 stmf_group_op_data_t *grp_entry;
574 574 stmf_group_name_t *grpname;
575 575 stmf_view_op_entry_t *ve;
576 576 stmf_id_type_t idtype;
577 577 stmf_id_data_t *id_entry;
578 578 stmf_id_list_t *id_list;
579 579 stmf_view_entry_t *view_entry;
580 580 stmf_set_props_t *stmf_set_props;
581 581 uint32_t veid;
582 582 if ((cmd & 0xff000000) != STMF_IOCTL) {
583 583 return (ENOTTY);
584 584 }
585 585
586 586 if (drv_priv(credp) != 0) {
587 587 return (EPERM);
588 588 }
589 589
590 590 ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
591 591 if (ret)
592 592 return (ret);
593 593 iocd->stmf_error = 0;
594 594
595 595 switch (cmd) {
596 596 case STMF_IOCTL_LU_LIST:
597 597 /* retrieves both registered/unregistered */
598 598 mutex_enter(&stmf_state.stmf_lock);
599 599 id_list = &stmf_state.stmf_luid_list;
600 600 n = min(id_list->id_count,
601 601 (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
602 602 iocd->stmf_obuf_max_nentries = id_list->id_count;
603 603 luid_list = (slist_lu_t *)obuf;
604 604 id_entry = id_list->idl_head;
605 605 for (i = 0; i < n; i++) {
606 606 bcopy(id_entry->id_data, luid_list[i].lu_guid, 16);
607 607 id_entry = id_entry->id_next;
608 608 }
609 609
610 610 n = iocd->stmf_obuf_size/sizeof (slist_lu_t);
611 611 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
612 612 id = (uint8_t *)ilu->ilu_lu->lu_id;
613 613 if (stmf_lookup_id(id_list, 16, id + 4) == NULL) {
614 614 iocd->stmf_obuf_max_nentries++;
615 615 if (i < n) {
616 616 bcopy(id + 4, luid_list[i].lu_guid,
617 617 sizeof (slist_lu_t));
618 618 i++;
619 619 }
620 620 }
621 621 }
622 622 iocd->stmf_obuf_nentries = i;
623 623 mutex_exit(&stmf_state.stmf_lock);
624 624 break;
625 625
626 626 case STMF_IOCTL_REG_LU_LIST:
627 627 mutex_enter(&stmf_state.stmf_lock);
628 628 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlus;
629 629 n = min(stmf_state.stmf_nlus,
630 630 (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
631 631 iocd->stmf_obuf_nentries = n;
632 632 ilu = stmf_state.stmf_ilulist;
633 633 luid_list = (slist_lu_t *)obuf;
634 634 for (i = 0; i < n; i++) {
635 635 uint8_t *id;
636 636 id = (uint8_t *)ilu->ilu_lu->lu_id;
637 637 bcopy(id + 4, luid_list[i].lu_guid, 16);
638 638 ilu = ilu->ilu_next;
639 639 }
640 640 mutex_exit(&stmf_state.stmf_lock);
641 641 break;
642 642
643 643 case STMF_IOCTL_VE_LU_LIST:
644 644 mutex_enter(&stmf_state.stmf_lock);
645 645 id_list = &stmf_state.stmf_luid_list;
646 646 n = min(id_list->id_count,
647 647 (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
648 648 iocd->stmf_obuf_max_nentries = id_list->id_count;
649 649 iocd->stmf_obuf_nentries = n;
650 650 luid_list = (slist_lu_t *)obuf;
651 651 id_entry = id_list->idl_head;
652 652 for (i = 0; i < n; i++) {
653 653 bcopy(id_entry->id_data, luid_list[i].lu_guid, 16);
654 654 id_entry = id_entry->id_next;
655 655 }
656 656 mutex_exit(&stmf_state.stmf_lock);
657 657 break;
658 658
659 659 case STMF_IOCTL_TARGET_PORT_LIST:
660 660 mutex_enter(&stmf_state.stmf_lock);
661 661 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlports;
662 662 n = min(stmf_state.stmf_nlports,
663 663 (iocd->stmf_obuf_size)/sizeof (slist_target_port_t));
664 664 iocd->stmf_obuf_nentries = n;
665 665 ilport = stmf_state.stmf_ilportlist;
666 666 lportid_list = (slist_target_port_t *)obuf;
667 667 for (i = 0; i < n; i++) {
668 668 uint8_t *id;
669 669 id = (uint8_t *)ilport->ilport_lport->lport_id;
670 670 bcopy(id, lportid_list[i].target, id[3] + 4);
671 671 ilport = ilport->ilport_next;
672 672 }
673 673 mutex_exit(&stmf_state.stmf_lock);
674 674 break;
675 675
676 676 case STMF_IOCTL_SESSION_LIST:
677 677 p_id = (uint8_t *)ibuf;
678 678 if ((p_id == NULL) || (iocd->stmf_ibuf_size < 4) ||
679 679 (iocd->stmf_ibuf_size < (p_id[3] + 4))) {
680 680 ret = EINVAL;
681 681 break;
682 682 }
683 683 mutex_enter(&stmf_state.stmf_lock);
684 684 for (ilport = stmf_state.stmf_ilportlist; ilport; ilport =
685 685 ilport->ilport_next) {
686 686 uint8_t *id;
687 687 id = (uint8_t *)ilport->ilport_lport->lport_id;
688 688 if ((p_id[3] == id[3]) &&
689 689 (bcmp(p_id + 4, id + 4, id[3]) == 0)) {
690 690 break;
691 691 }
692 692 }
693 693 if (ilport == NULL) {
694 694 mutex_exit(&stmf_state.stmf_lock);
695 695 ret = ENOENT;
696 696 break;
697 697 }
698 698 iocd->stmf_obuf_max_nentries = ilport->ilport_nsessions;
699 699 n = min(ilport->ilport_nsessions,
700 700 (iocd->stmf_obuf_size)/sizeof (slist_scsi_session_t));
701 701 iocd->stmf_obuf_nentries = n;
702 702 iss = ilport->ilport_ss_list;
703 703 iss_list = (slist_scsi_session_t *)obuf;
704 704 for (i = 0; i < n; i++) {
705 705 uint8_t *id;
706 706 id = (uint8_t *)iss->iss_ss->ss_rport_id;
707 707 bcopy(id, iss_list[i].initiator, id[3] + 4);
708 708 iss_list[i].creation_time = (uint32_t)
709 709 iss->iss_creation_time;
710 710 if (iss->iss_ss->ss_rport_alias) {
711 711 (void) strncpy(iss_list[i].alias,
712 712 iss->iss_ss->ss_rport_alias, 255);
713 713 iss_list[i].alias[255] = 0;
714 714 } else {
715 715 iss_list[i].alias[0] = 0;
716 716 }
717 717 iss = iss->iss_next;
718 718 }
719 719 mutex_exit(&stmf_state.stmf_lock);
720 720 break;
721 721
722 722 case STMF_IOCTL_GET_LU_PROPERTIES:
723 723 p_id = (uint8_t *)ibuf;
724 724 if ((iocd->stmf_ibuf_size < 16) ||
725 725 (iocd->stmf_obuf_size < sizeof (sioc_lu_props_t)) ||
726 726 (p_id[0] == 0)) {
727 727 ret = EINVAL;
728 728 break;
729 729 }
730 730 mutex_enter(&stmf_state.stmf_lock);
731 731 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
732 732 if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0)
733 733 break;
734 734 }
735 735 if (ilu == NULL) {
736 736 mutex_exit(&stmf_state.stmf_lock);
737 737 ret = ENOENT;
738 738 break;
739 739 }
740 740 lup = (sioc_lu_props_t *)obuf;
741 741 bcopy(ilu->ilu_lu->lu_id->ident, lup->lu_guid, 16);
742 742 lup->lu_state = ilu->ilu_state & 0x0f;
743 743 lup->lu_present = 1; /* XXX */
744 744 (void) strncpy(lup->lu_provider_name,
745 745 ilu->ilu_lu->lu_lp->lp_name, 255);
746 746 lup->lu_provider_name[254] = 0;
747 747 if (ilu->ilu_lu->lu_alias) {
748 748 (void) strncpy(lup->lu_alias,
749 749 ilu->ilu_lu->lu_alias, 255);
750 750 lup->lu_alias[255] = 0;
751 751 } else {
752 752 lup->lu_alias[0] = 0;
753 753 }
754 754 mutex_exit(&stmf_state.stmf_lock);
755 755 break;
756 756
757 757 case STMF_IOCTL_GET_TARGET_PORT_PROPERTIES:
758 758 p_id = (uint8_t *)ibuf;
759 759 if ((p_id == NULL) ||
760 760 (iocd->stmf_ibuf_size < (p_id[3] + 4)) ||
761 761 (iocd->stmf_obuf_size <
762 762 sizeof (sioc_target_port_props_t))) {
763 763 ret = EINVAL;
764 764 break;
765 765 }
766 766 mutex_enter(&stmf_state.stmf_lock);
767 767 for (ilport = stmf_state.stmf_ilportlist; ilport;
768 768 ilport = ilport->ilport_next) {
769 769 uint8_t *id;
770 770 id = (uint8_t *)ilport->ilport_lport->lport_id;
771 771 if ((p_id[3] == id[3]) &&
772 772 (bcmp(p_id+4, id+4, id[3]) == 0))
773 773 break;
774 774 }
775 775 if (ilport == NULL) {
776 776 mutex_exit(&stmf_state.stmf_lock);
777 777 ret = ENOENT;
778 778 break;
779 779 }
780 780 lportp = (sioc_target_port_props_t *)obuf;
781 781 bcopy(ilport->ilport_lport->lport_id, lportp->tgt_id,
782 782 ilport->ilport_lport->lport_id->ident_length + 4);
783 783 lportp->tgt_state = ilport->ilport_state & 0x0f;
784 784 lportp->tgt_present = 1; /* XXX */
785 785 (void) strncpy(lportp->tgt_provider_name,
786 786 ilport->ilport_lport->lport_pp->pp_name, 255);
787 787 lportp->tgt_provider_name[254] = 0;
788 788 if (ilport->ilport_lport->lport_alias) {
789 789 (void) strncpy(lportp->tgt_alias,
790 790 ilport->ilport_lport->lport_alias, 255);
791 791 lportp->tgt_alias[255] = 0;
792 792 } else {
793 793 lportp->tgt_alias[0] = 0;
794 794 }
795 795 mutex_exit(&stmf_state.stmf_lock);
796 796 break;
797 797
798 798 case STMF_IOCTL_SET_STMF_STATE:
799 799 if ((ibuf == NULL) ||
800 800 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
801 801 ret = EINVAL;
802 802 break;
803 803 }
804 804 ret = stmf_set_stmf_state((stmf_state_desc_t *)ibuf);
805 805 break;
806 806
807 807 case STMF_IOCTL_GET_STMF_STATE:
808 808 if ((obuf == NULL) ||
809 809 (iocd->stmf_obuf_size < sizeof (stmf_state_desc_t))) {
810 810 ret = EINVAL;
811 811 break;
812 812 }
813 813 ret = stmf_get_stmf_state((stmf_state_desc_t *)obuf);
814 814 break;
815 815
816 816 case STMF_IOCTL_SET_ALUA_STATE:
817 817 if ((ibuf == NULL) ||
818 818 (iocd->stmf_ibuf_size < sizeof (stmf_alua_state_desc_t))) {
819 819 ret = EINVAL;
820 820 break;
821 821 }
822 822 ret = stmf_set_alua_state((stmf_alua_state_desc_t *)ibuf);
823 823 break;
824 824
825 825 case STMF_IOCTL_GET_ALUA_STATE:
826 826 if ((obuf == NULL) ||
827 827 (iocd->stmf_obuf_size < sizeof (stmf_alua_state_desc_t))) {
828 828 ret = EINVAL;
829 829 break;
830 830 }
831 831 stmf_get_alua_state((stmf_alua_state_desc_t *)obuf);
832 832 break;
833 833
834 834 case STMF_IOCTL_SET_LU_STATE:
835 835 ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
836 836 ssi.st_additional_info = NULL;
837 837 std = (stmf_state_desc_t *)ibuf;
838 838 if ((ibuf == NULL) ||
839 839 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
840 840 ret = EINVAL;
841 841 break;
842 842 }
843 843 p_id = std->ident;
844 844 mutex_enter(&stmf_state.stmf_lock);
845 845 if (stmf_state.stmf_inventory_locked) {
846 846 mutex_exit(&stmf_state.stmf_lock);
847 847 ret = EBUSY;
848 848 break;
849 849 }
850 850 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
851 851 if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0)
852 852 break;
853 853 }
854 854 if (ilu == NULL) {
855 855 mutex_exit(&stmf_state.stmf_lock);
856 856 ret = ENOENT;
857 857 break;
858 858 }
859 859 stmf_state.stmf_inventory_locked = 1;
860 860 mutex_exit(&stmf_state.stmf_lock);
861 861 cmd = (std->state == STMF_STATE_ONLINE) ? STMF_CMD_LU_ONLINE :
862 862 STMF_CMD_LU_OFFLINE;
863 863 ctl_ret = stmf_ctl(cmd, (void *)ilu->ilu_lu, &ssi);
864 864 if (ctl_ret == STMF_ALREADY)
865 865 ret = 0;
866 866 else if (ctl_ret == STMF_BUSY)
867 867 ret = EBUSY;
868 868 else if (ctl_ret != STMF_SUCCESS)
869 869 ret = EIO;
870 870 mutex_enter(&stmf_state.stmf_lock);
871 871 stmf_state.stmf_inventory_locked = 0;
872 872 mutex_exit(&stmf_state.stmf_lock);
873 873 break;
874 874
875 875 case STMF_IOCTL_SET_STMF_PROPS:
876 876 if ((ibuf == NULL) ||
877 877 (iocd->stmf_ibuf_size < sizeof (stmf_set_props_t))) {
878 878 ret = EINVAL;
879 879 break;
880 880 }
881 881 stmf_set_props = (stmf_set_props_t *)ibuf;
882 882 mutex_enter(&stmf_state.stmf_lock);
883 883 if ((stmf_set_props->default_lu_state_value ==
884 884 STMF_STATE_OFFLINE) ||
885 885 (stmf_set_props->default_lu_state_value ==
886 886 STMF_STATE_ONLINE)) {
887 887 stmf_state.stmf_default_lu_state =
888 888 stmf_set_props->default_lu_state_value;
889 889 }
890 890 if ((stmf_set_props->default_target_state_value ==
891 891 STMF_STATE_OFFLINE) ||
892 892 (stmf_set_props->default_target_state_value ==
893 893 STMF_STATE_ONLINE)) {
894 894 stmf_state.stmf_default_lport_state =
895 895 stmf_set_props->default_target_state_value;
896 896 }
897 897
898 898 mutex_exit(&stmf_state.stmf_lock);
899 899 break;
900 900
901 901 case STMF_IOCTL_SET_TARGET_PORT_STATE:
902 902 ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
903 903 ssi.st_additional_info = NULL;
904 904 std = (stmf_state_desc_t *)ibuf;
905 905 if ((ibuf == NULL) ||
906 906 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
907 907 ret = EINVAL;
908 908 break;
909 909 }
910 910 p_id = std->ident;
911 911 mutex_enter(&stmf_state.stmf_lock);
912 912 if (stmf_state.stmf_inventory_locked) {
913 913 mutex_exit(&stmf_state.stmf_lock);
914 914 ret = EBUSY;
915 915 break;
916 916 }
917 917 for (ilport = stmf_state.stmf_ilportlist; ilport;
918 918 ilport = ilport->ilport_next) {
919 919 uint8_t *id;
920 920 id = (uint8_t *)ilport->ilport_lport->lport_id;
921 921 if ((id[3] == p_id[3]) &&
922 922 (bcmp(id+4, p_id+4, id[3]) == 0)) {
923 923 break;
924 924 }
925 925 }
926 926 if (ilport == NULL) {
927 927 mutex_exit(&stmf_state.stmf_lock);
928 928 ret = ENOENT;
929 929 break;
930 930 }
931 931 stmf_state.stmf_inventory_locked = 1;
932 932 mutex_exit(&stmf_state.stmf_lock);
933 933 cmd = (std->state == STMF_STATE_ONLINE) ?
934 934 STMF_CMD_LPORT_ONLINE : STMF_CMD_LPORT_OFFLINE;
935 935 ctl_ret = stmf_ctl(cmd, (void *)ilport->ilport_lport, &ssi);
936 936 if (ctl_ret == STMF_ALREADY)
937 937 ret = 0;
938 938 else if (ctl_ret == STMF_BUSY)
939 939 ret = EBUSY;
940 940 else if (ctl_ret != STMF_SUCCESS)
941 941 ret = EIO;
942 942 mutex_enter(&stmf_state.stmf_lock);
943 943 stmf_state.stmf_inventory_locked = 0;
944 944 mutex_exit(&stmf_state.stmf_lock);
945 945 break;
946 946
947 947 case STMF_IOCTL_ADD_HG_ENTRY:
948 948 idtype = STMF_ID_TYPE_HOST;
949 949 /* FALLTHROUGH */
950 950 case STMF_IOCTL_ADD_TG_ENTRY:
951 951 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
952 952 ret = EACCES;
953 953 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
954 954 break;
955 955 }
956 956 if (cmd == STMF_IOCTL_ADD_TG_ENTRY) {
957 957 idtype = STMF_ID_TYPE_TARGET;
958 958 }
959 959 grp_entry = (stmf_group_op_data_t *)ibuf;
960 960 if ((ibuf == NULL) ||
961 961 (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) {
962 962 ret = EINVAL;
963 963 break;
964 964 }
965 965 if (grp_entry->group.name[0] == '*') {
966 966 ret = EINVAL;
967 967 break; /* not allowed */
968 968 }
969 969 mutex_enter(&stmf_state.stmf_lock);
970 970 ret = stmf_add_group_member(grp_entry->group.name,
971 971 grp_entry->group.name_size,
972 972 grp_entry->ident + 4,
973 973 grp_entry->ident[3],
974 974 idtype,
975 975 &iocd->stmf_error);
976 976 mutex_exit(&stmf_state.stmf_lock);
977 977 break;
978 978 case STMF_IOCTL_REMOVE_HG_ENTRY:
979 979 idtype = STMF_ID_TYPE_HOST;
980 980 /* FALLTHROUGH */
981 981 case STMF_IOCTL_REMOVE_TG_ENTRY:
982 982 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
983 983 ret = EACCES;
984 984 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
985 985 break;
986 986 }
987 987 if (cmd == STMF_IOCTL_REMOVE_TG_ENTRY) {
988 988 idtype = STMF_ID_TYPE_TARGET;
989 989 }
990 990 grp_entry = (stmf_group_op_data_t *)ibuf;
991 991 if ((ibuf == NULL) ||
992 992 (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) {
993 993 ret = EINVAL;
994 994 break;
995 995 }
996 996 if (grp_entry->group.name[0] == '*') {
997 997 ret = EINVAL;
998 998 break; /* not allowed */
999 999 }
1000 1000 mutex_enter(&stmf_state.stmf_lock);
1001 1001 ret = stmf_remove_group_member(grp_entry->group.name,
1002 1002 grp_entry->group.name_size,
1003 1003 grp_entry->ident + 4,
1004 1004 grp_entry->ident[3],
1005 1005 idtype,
1006 1006 &iocd->stmf_error);
1007 1007 mutex_exit(&stmf_state.stmf_lock);
1008 1008 break;
1009 1009 case STMF_IOCTL_CREATE_HOST_GROUP:
1010 1010 idtype = STMF_ID_TYPE_HOST_GROUP;
1011 1011 /* FALLTHROUGH */
1012 1012 case STMF_IOCTL_CREATE_TARGET_GROUP:
1013 1013 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1014 1014 ret = EACCES;
1015 1015 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1016 1016 break;
1017 1017 }
1018 1018 grpname = (stmf_group_name_t *)ibuf;
1019 1019
1020 1020 if (cmd == STMF_IOCTL_CREATE_TARGET_GROUP)
1021 1021 idtype = STMF_ID_TYPE_TARGET_GROUP;
1022 1022 if ((ibuf == NULL) ||
1023 1023 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1024 1024 ret = EINVAL;
1025 1025 break;
1026 1026 }
1027 1027 if (grpname->name[0] == '*') {
1028 1028 ret = EINVAL;
1029 1029 break; /* not allowed */
1030 1030 }
1031 1031 mutex_enter(&stmf_state.stmf_lock);
1032 1032 ret = stmf_add_group(grpname->name,
1033 1033 grpname->name_size, idtype, &iocd->stmf_error);
1034 1034 mutex_exit(&stmf_state.stmf_lock);
1035 1035 break;
1036 1036 case STMF_IOCTL_REMOVE_HOST_GROUP:
1037 1037 idtype = STMF_ID_TYPE_HOST_GROUP;
1038 1038 /* FALLTHROUGH */
1039 1039 case STMF_IOCTL_REMOVE_TARGET_GROUP:
1040 1040 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1041 1041 ret = EACCES;
1042 1042 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1043 1043 break;
1044 1044 }
1045 1045 grpname = (stmf_group_name_t *)ibuf;
1046 1046 if (cmd == STMF_IOCTL_REMOVE_TARGET_GROUP)
1047 1047 idtype = STMF_ID_TYPE_TARGET_GROUP;
1048 1048 if ((ibuf == NULL) ||
1049 1049 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1050 1050 ret = EINVAL;
1051 1051 break;
1052 1052 }
1053 1053 if (grpname->name[0] == '*') {
1054 1054 ret = EINVAL;
1055 1055 break; /* not allowed */
1056 1056 }
1057 1057 mutex_enter(&stmf_state.stmf_lock);
1058 1058 ret = stmf_remove_group(grpname->name,
1059 1059 grpname->name_size, idtype, &iocd->stmf_error);
1060 1060 mutex_exit(&stmf_state.stmf_lock);
1061 1061 break;
1062 1062 case STMF_IOCTL_VALIDATE_VIEW:
1063 1063 case STMF_IOCTL_ADD_VIEW_ENTRY:
1064 1064 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1065 1065 ret = EACCES;
1066 1066 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1067 1067 break;
1068 1068 }
1069 1069 ve = (stmf_view_op_entry_t *)ibuf;
1070 1070 if ((ibuf == NULL) ||
1071 1071 (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) {
1072 1072 ret = EINVAL;
1073 1073 break;
1074 1074 }
1075 1075 if (!ve->ve_lu_number_valid)
1076 1076 ve->ve_lu_nbr[2] = 0xFF;
1077 1077 if (ve->ve_all_hosts) {
1078 1078 ve->ve_host_group.name[0] = '*';
1079 1079 ve->ve_host_group.name_size = 1;
1080 1080 }
1081 1081 if (ve->ve_all_targets) {
1082 1082 ve->ve_target_group.name[0] = '*';
1083 1083 ve->ve_target_group.name_size = 1;
1084 1084 }
1085 1085 if (ve->ve_ndx_valid)
1086 1086 veid = ve->ve_ndx;
1087 1087 else
1088 1088 veid = 0xffffffff;
1089 1089 mutex_enter(&stmf_state.stmf_lock);
1090 1090 if (cmd == STMF_IOCTL_ADD_VIEW_ENTRY) {
1091 1091 ret = stmf_add_ve(ve->ve_host_group.name,
1092 1092 ve->ve_host_group.name_size,
1093 1093 ve->ve_target_group.name,
1094 1094 ve->ve_target_group.name_size,
1095 1095 ve->ve_guid,
1096 1096 &veid,
1097 1097 ve->ve_lu_nbr,
1098 1098 &iocd->stmf_error);
1099 1099 } else { /* STMF_IOCTL_VALIDATE_VIEW */
1100 1100 ret = stmf_validate_lun_ve(ve->ve_host_group.name,
1101 1101 ve->ve_host_group.name_size,
1102 1102 ve->ve_target_group.name,
1103 1103 ve->ve_target_group.name_size,
1104 1104 ve->ve_lu_nbr,
1105 1105 &iocd->stmf_error);
1106 1106 }
1107 1107 mutex_exit(&stmf_state.stmf_lock);
1108 1108 if (ret == 0 &&
1109 1109 (!ve->ve_ndx_valid || !ve->ve_lu_number_valid) &&
1110 1110 iocd->stmf_obuf_size >= sizeof (stmf_view_op_entry_t)) {
1111 1111 stmf_view_op_entry_t *ve_ret =
1112 1112 (stmf_view_op_entry_t *)obuf;
1113 1113 iocd->stmf_obuf_nentries = 1;
1114 1114 iocd->stmf_obuf_max_nentries = 1;
1115 1115 if (!ve->ve_ndx_valid) {
1116 1116 ve_ret->ve_ndx = veid;
1117 1117 ve_ret->ve_ndx_valid = 1;
1118 1118 }
1119 1119 if (!ve->ve_lu_number_valid) {
1120 1120 ve_ret->ve_lu_number_valid = 1;
1121 1121 bcopy(ve->ve_lu_nbr, ve_ret->ve_lu_nbr, 8);
1122 1122 }
1123 1123 }
1124 1124 break;
1125 1125 case STMF_IOCTL_REMOVE_VIEW_ENTRY:
1126 1126 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1127 1127 ret = EACCES;
1128 1128 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1129 1129 break;
1130 1130 }
1131 1131 ve = (stmf_view_op_entry_t *)ibuf;
1132 1132 if ((ibuf == NULL) ||
1133 1133 (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) {
1134 1134 ret = EINVAL;
1135 1135 break;
1136 1136 }
1137 1137 if (!ve->ve_ndx_valid) {
1138 1138 ret = EINVAL;
1139 1139 break;
1140 1140 }
1141 1141 mutex_enter(&stmf_state.stmf_lock);
1142 1142 ret = stmf_remove_ve_by_id(ve->ve_guid, ve->ve_ndx,
1143 1143 &iocd->stmf_error);
1144 1144 mutex_exit(&stmf_state.stmf_lock);
1145 1145 break;
1146 1146 case STMF_IOCTL_GET_HG_LIST:
1147 1147 id_list = &stmf_state.stmf_hg_list;
1148 1148 /* FALLTHROUGH */
1149 1149 case STMF_IOCTL_GET_TG_LIST:
1150 1150 if (cmd == STMF_IOCTL_GET_TG_LIST)
1151 1151 id_list = &stmf_state.stmf_tg_list;
1152 1152 mutex_enter(&stmf_state.stmf_lock);
1153 1153 iocd->stmf_obuf_max_nentries = id_list->id_count;
1154 1154 n = min(id_list->id_count,
1155 1155 (iocd->stmf_obuf_size)/sizeof (stmf_group_name_t));
1156 1156 iocd->stmf_obuf_nentries = n;
1157 1157 id_entry = id_list->idl_head;
1158 1158 grpname = (stmf_group_name_t *)obuf;
1159 1159 for (i = 0; i < n; i++) {
1160 1160 if (id_entry->id_data[0] == '*') {
1161 1161 if (iocd->stmf_obuf_nentries > 0) {
1162 1162 iocd->stmf_obuf_nentries--;
1163 1163 }
1164 1164 id_entry = id_entry->id_next;
1165 1165 continue;
1166 1166 }
1167 1167 grpname->name_size = id_entry->id_data_size;
1168 1168 bcopy(id_entry->id_data, grpname->name,
1169 1169 id_entry->id_data_size);
1170 1170 grpname++;
1171 1171 id_entry = id_entry->id_next;
1172 1172 }
1173 1173 mutex_exit(&stmf_state.stmf_lock);
1174 1174 break;
1175 1175 case STMF_IOCTL_GET_HG_ENTRIES:
1176 1176 id_list = &stmf_state.stmf_hg_list;
1177 1177 /* FALLTHROUGH */
1178 1178 case STMF_IOCTL_GET_TG_ENTRIES:
1179 1179 grpname = (stmf_group_name_t *)ibuf;
1180 1180 if ((ibuf == NULL) ||
1181 1181 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1182 1182 ret = EINVAL;
1183 1183 break;
1184 1184 }
1185 1185 if (cmd == STMF_IOCTL_GET_TG_ENTRIES) {
1186 1186 id_list = &stmf_state.stmf_tg_list;
1187 1187 }
1188 1188 mutex_enter(&stmf_state.stmf_lock);
1189 1189 id_entry = stmf_lookup_id(id_list, grpname->name_size,
1190 1190 grpname->name);
1191 1191 if (!id_entry)
1192 1192 ret = ENODEV;
1193 1193 else {
1194 1194 stmf_ge_ident_t *grp_entry;
1195 1195 id_list = (stmf_id_list_t *)id_entry->id_impl_specific;
1196 1196 iocd->stmf_obuf_max_nentries = id_list->id_count;
1197 1197 n = min(id_list->id_count,
1198 1198 iocd->stmf_obuf_size/sizeof (stmf_ge_ident_t));
1199 1199 iocd->stmf_obuf_nentries = n;
1200 1200 id_entry = id_list->idl_head;
1201 1201 grp_entry = (stmf_ge_ident_t *)obuf;
1202 1202 for (i = 0; i < n; i++) {
1203 1203 bcopy(id_entry->id_data, grp_entry->ident,
1204 1204 id_entry->id_data_size);
1205 1205 grp_entry->ident_size = id_entry->id_data_size;
1206 1206 id_entry = id_entry->id_next;
1207 1207 grp_entry++;
1208 1208 }
1209 1209 }
1210 1210 mutex_exit(&stmf_state.stmf_lock);
1211 1211 break;
1212 1212
1213 1213 case STMF_IOCTL_GET_VE_LIST:
1214 1214 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t);
1215 1215 mutex_enter(&stmf_state.stmf_lock);
1216 1216 ve = (stmf_view_op_entry_t *)obuf;
1217 1217 for (id_entry = stmf_state.stmf_luid_list.idl_head;
1218 1218 id_entry; id_entry = id_entry->id_next) {
1219 1219 for (view_entry = (stmf_view_entry_t *)
1220 1220 id_entry->id_impl_specific; view_entry;
1221 1221 view_entry = view_entry->ve_next) {
1222 1222 iocd->stmf_obuf_max_nentries++;
1223 1223 if (iocd->stmf_obuf_nentries >= n)
1224 1224 continue;
1225 1225 ve->ve_ndx_valid = 1;
1226 1226 ve->ve_ndx = view_entry->ve_id;
1227 1227 ve->ve_lu_number_valid = 1;
1228 1228 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8);
1229 1229 bcopy(view_entry->ve_luid->id_data, ve->ve_guid,
1230 1230 view_entry->ve_luid->id_data_size);
1231 1231 if (view_entry->ve_hg->id_data[0] == '*') {
1232 1232 ve->ve_all_hosts = 1;
1233 1233 } else {
1234 1234 bcopy(view_entry->ve_hg->id_data,
1235 1235 ve->ve_host_group.name,
1236 1236 view_entry->ve_hg->id_data_size);
1237 1237 ve->ve_host_group.name_size =
1238 1238 view_entry->ve_hg->id_data_size;
1239 1239 }
1240 1240
1241 1241 if (view_entry->ve_tg->id_data[0] == '*') {
1242 1242 ve->ve_all_targets = 1;
1243 1243 } else {
1244 1244 bcopy(view_entry->ve_tg->id_data,
1245 1245 ve->ve_target_group.name,
1246 1246 view_entry->ve_tg->id_data_size);
1247 1247 ve->ve_target_group.name_size =
1248 1248 view_entry->ve_tg->id_data_size;
1249 1249 }
1250 1250 ve++;
1251 1251 iocd->stmf_obuf_nentries++;
1252 1252 }
1253 1253 }
1254 1254 mutex_exit(&stmf_state.stmf_lock);
1255 1255 break;
1256 1256
1257 1257 case STMF_IOCTL_LU_VE_LIST:
1258 1258 p_id = (uint8_t *)ibuf;
1259 1259 if ((iocd->stmf_ibuf_size != 16) ||
1260 1260 (iocd->stmf_obuf_size < sizeof (stmf_view_op_entry_t))) {
1261 1261 ret = EINVAL;
1262 1262 break;
1263 1263 }
1264 1264
1265 1265 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t);
1266 1266 mutex_enter(&stmf_state.stmf_lock);
1267 1267 ve = (stmf_view_op_entry_t *)obuf;
1268 1268 for (id_entry = stmf_state.stmf_luid_list.idl_head;
1269 1269 id_entry; id_entry = id_entry->id_next) {
1270 1270 if (bcmp(id_entry->id_data, p_id, 16) != 0)
1271 1271 continue;
1272 1272 for (view_entry = (stmf_view_entry_t *)
1273 1273 id_entry->id_impl_specific; view_entry;
1274 1274 view_entry = view_entry->ve_next) {
1275 1275 iocd->stmf_obuf_max_nentries++;
1276 1276 if (iocd->stmf_obuf_nentries >= n)
1277 1277 continue;
1278 1278 ve->ve_ndx_valid = 1;
1279 1279 ve->ve_ndx = view_entry->ve_id;
1280 1280 ve->ve_lu_number_valid = 1;
1281 1281 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8);
1282 1282 bcopy(view_entry->ve_luid->id_data, ve->ve_guid,
1283 1283 view_entry->ve_luid->id_data_size);
1284 1284 if (view_entry->ve_hg->id_data[0] == '*') {
1285 1285 ve->ve_all_hosts = 1;
1286 1286 } else {
1287 1287 bcopy(view_entry->ve_hg->id_data,
1288 1288 ve->ve_host_group.name,
1289 1289 view_entry->ve_hg->id_data_size);
1290 1290 ve->ve_host_group.name_size =
1291 1291 view_entry->ve_hg->id_data_size;
1292 1292 }
1293 1293
1294 1294 if (view_entry->ve_tg->id_data[0] == '*') {
1295 1295 ve->ve_all_targets = 1;
1296 1296 } else {
1297 1297 bcopy(view_entry->ve_tg->id_data,
1298 1298 ve->ve_target_group.name,
1299 1299 view_entry->ve_tg->id_data_size);
1300 1300 ve->ve_target_group.name_size =
1301 1301 view_entry->ve_tg->id_data_size;
1302 1302 }
1303 1303 ve++;
1304 1304 iocd->stmf_obuf_nentries++;
1305 1305 }
1306 1306 break;
1307 1307 }
1308 1308 mutex_exit(&stmf_state.stmf_lock);
1309 1309 break;
1310 1310
1311 1311 case STMF_IOCTL_LOAD_PP_DATA:
1312 1312 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1313 1313 ret = EACCES;
1314 1314 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1315 1315 break;
1316 1316 }
1317 1317 ppi = (stmf_ppioctl_data_t *)ibuf;
1318 1318 if ((ppi == NULL) ||
1319 1319 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1320 1320 ret = EINVAL;
1321 1321 break;
1322 1322 }
1323 1323 /* returned token */
1324 1324 ppi_token = (uint64_t *)obuf;
1325 1325 if ((ppi_token == NULL) ||
1326 1326 (iocd->stmf_obuf_size < sizeof (uint64_t))) {
1327 1327 ret = EINVAL;
1328 1328 break;
1329 1329 }
1330 1330 ret = stmf_load_ppd_ioctl(ppi, ppi_token, &iocd->stmf_error);
1331 1331 break;
1332 1332
1333 1333 case STMF_IOCTL_GET_PP_DATA:
1334 1334 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1335 1335 ret = EACCES;
1336 1336 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1337 1337 break;
1338 1338 }
1339 1339 ppi = (stmf_ppioctl_data_t *)ibuf;
1340 1340 if (ppi == NULL ||
1341 1341 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1342 1342 ret = EINVAL;
1343 1343 break;
1344 1344 }
1345 1345 ppi_out = (stmf_ppioctl_data_t *)obuf;
1346 1346 if ((ppi_out == NULL) ||
1347 1347 (iocd->stmf_obuf_size < sizeof (stmf_ppioctl_data_t))) {
1348 1348 ret = EINVAL;
1349 1349 break;
1350 1350 }
1351 1351 ret = stmf_get_ppd_ioctl(ppi, ppi_out, &iocd->stmf_error);
1352 1352 break;
1353 1353
1354 1354 case STMF_IOCTL_CLEAR_PP_DATA:
1355 1355 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1356 1356 ret = EACCES;
1357 1357 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1358 1358 break;
1359 1359 }
1360 1360 ppi = (stmf_ppioctl_data_t *)ibuf;
1361 1361 if ((ppi == NULL) ||
1362 1362 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1363 1363 ret = EINVAL;
1364 1364 break;
1365 1365 }
1366 1366 ret = stmf_delete_ppd_ioctl(ppi);
1367 1367 break;
1368 1368
1369 1369 case STMF_IOCTL_CLEAR_TRACE:
1370 1370 stmf_trace_clear();
1371 1371 break;
1372 1372
1373 1373 case STMF_IOCTL_ADD_TRACE:
1374 1374 if (iocd->stmf_ibuf_size && ibuf) {
1375 1375 ((uint8_t *)ibuf)[iocd->stmf_ibuf_size - 1] = 0;
1376 1376 stmf_trace("\nstradm", "%s\n", ibuf);
1377 1377 }
1378 1378 break;
1379 1379
1380 1380 case STMF_IOCTL_GET_TRACE_POSITION:
1381 1381 if (obuf && (iocd->stmf_obuf_size > 3)) {
1382 1382 mutex_enter(&trace_buf_lock);
1383 1383 *((int *)obuf) = trace_buf_curndx;
1384 1384 mutex_exit(&trace_buf_lock);
1385 1385 } else {
1386 1386 ret = EINVAL;
1387 1387 }
1388 1388 break;
1389 1389
1390 1390 case STMF_IOCTL_GET_TRACE:
1391 1391 if ((iocd->stmf_obuf_size == 0) || (iocd->stmf_ibuf_size < 4)) {
1392 1392 ret = EINVAL;
1393 1393 break;
1394 1394 }
1395 1395 i = *((int *)ibuf);
1396 1396 if ((i > trace_buf_size) || ((i + iocd->stmf_obuf_size) >
1397 1397 trace_buf_size)) {
1398 1398 ret = EINVAL;
1399 1399 break;
1400 1400 }
1401 1401 mutex_enter(&trace_buf_lock);
1402 1402 bcopy(stmf_trace_buf + i, obuf, iocd->stmf_obuf_size);
1403 1403 mutex_exit(&trace_buf_lock);
1404 1404 break;
1405 1405
1406 1406 default:
1407 1407 ret = ENOTTY;
1408 1408 }
1409 1409
1410 1410 if (ret == 0) {
1411 1411 ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1412 1412 } else if (iocd->stmf_error) {
1413 1413 (void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1414 1414 }
1415 1415 if (obuf) {
1416 1416 kmem_free(obuf, iocd->stmf_obuf_size);
1417 1417 obuf = NULL;
1418 1418 }
1419 1419 if (ibuf) {
1420 1420 kmem_free(ibuf, iocd->stmf_ibuf_size);
1421 1421 ibuf = NULL;
1422 1422 }
1423 1423 kmem_free(iocd, sizeof (stmf_iocdata_t));
1424 1424 return (ret);
1425 1425 }
1426 1426
1427 1427 static int
1428 1428 stmf_get_service_state()
1429 1429 {
1430 1430 stmf_i_local_port_t *ilport;
1431 1431 stmf_i_lu_t *ilu;
1432 1432 int online = 0;
1433 1433 int offline = 0;
1434 1434 int onlining = 0;
1435 1435 int offlining = 0;
1436 1436
1437 1437 ASSERT(mutex_owned(&stmf_state.stmf_lock));
1438 1438 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1439 1439 ilport = ilport->ilport_next) {
1440 1440 if (ilport->ilport_state == STMF_STATE_OFFLINE)
1441 1441 offline++;
1442 1442 else if (ilport->ilport_state == STMF_STATE_ONLINE)
1443 1443 online++;
1444 1444 else if (ilport->ilport_state == STMF_STATE_ONLINING)
1445 1445 onlining++;
1446 1446 else if (ilport->ilport_state == STMF_STATE_OFFLINING)
1447 1447 offlining++;
1448 1448 }
1449 1449
1450 1450 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1451 1451 ilu = ilu->ilu_next) {
1452 1452 if (ilu->ilu_state == STMF_STATE_OFFLINE)
1453 1453 offline++;
1454 1454 else if (ilu->ilu_state == STMF_STATE_ONLINE)
1455 1455 online++;
1456 1456 else if (ilu->ilu_state == STMF_STATE_ONLINING)
1457 1457 onlining++;
1458 1458 else if (ilu->ilu_state == STMF_STATE_OFFLINING)
1459 1459 offlining++;
1460 1460 }
1461 1461
1462 1462 if (stmf_state.stmf_service_running) {
1463 1463 if (onlining)
1464 1464 return (STMF_STATE_ONLINING);
1465 1465 else
1466 1466 return (STMF_STATE_ONLINE);
1467 1467 }
1468 1468
1469 1469 if (offlining) {
1470 1470 return (STMF_STATE_OFFLINING);
1471 1471 }
1472 1472
1473 1473 return (STMF_STATE_OFFLINE);
1474 1474 }
1475 1475
1476 1476 static int
1477 1477 stmf_set_stmf_state(stmf_state_desc_t *std)
1478 1478 {
1479 1479 stmf_i_local_port_t *ilport;
1480 1480 stmf_i_lu_t *ilu;
1481 1481 stmf_state_change_info_t ssi;
1482 1482 int svc_state;
1483 1483
1484 1484 ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
1485 1485 ssi.st_additional_info = NULL;
1486 1486
1487 1487 mutex_enter(&stmf_state.stmf_lock);
1488 1488 if (!stmf_state.stmf_exclusive_open) {
1489 1489 mutex_exit(&stmf_state.stmf_lock);
1490 1490 return (EACCES);
1491 1491 }
1492 1492
1493 1493 if (stmf_state.stmf_inventory_locked) {
1494 1494 mutex_exit(&stmf_state.stmf_lock);
1495 1495 return (EBUSY);
1496 1496 }
1497 1497
1498 1498 if ((std->state != STMF_STATE_ONLINE) &&
1499 1499 (std->state != STMF_STATE_OFFLINE)) {
1500 1500 mutex_exit(&stmf_state.stmf_lock);
1501 1501 return (EINVAL);
1502 1502 }
1503 1503
1504 1504 svc_state = stmf_get_service_state();
1505 1505 if ((svc_state == STMF_STATE_OFFLINING) ||
1506 1506 (svc_state == STMF_STATE_ONLINING)) {
1507 1507 mutex_exit(&stmf_state.stmf_lock);
1508 1508 return (EBUSY);
1509 1509 }
1510 1510
1511 1511 if (svc_state == STMF_STATE_OFFLINE) {
1512 1512 if (std->config_state == STMF_CONFIG_INIT) {
1513 1513 if (std->state != STMF_STATE_OFFLINE) {
1514 1514 mutex_exit(&stmf_state.stmf_lock);
1515 1515 return (EINVAL);
1516 1516 }
1517 1517 stmf_state.stmf_config_state = STMF_CONFIG_INIT;
1518 1518 stmf_delete_all_ppds();
1519 1519 stmf_view_clear_config();
1520 1520 stmf_view_init();
1521 1521 mutex_exit(&stmf_state.stmf_lock);
1522 1522 return (0);
1523 1523 }
1524 1524 if ((stmf_state.stmf_config_state == STMF_CONFIG_INIT) ||
1525 1525 (stmf_state.stmf_config_state == STMF_CONFIG_NONE)) {
1526 1526 if (std->config_state != STMF_CONFIG_INIT_DONE) {
1527 1527 mutex_exit(&stmf_state.stmf_lock);
1528 1528 return (EINVAL);
1529 1529 }
1530 1530 stmf_state.stmf_config_state = STMF_CONFIG_INIT_DONE;
1531 1531 }
1532 1532 if (std->state == STMF_STATE_OFFLINE) {
1533 1533 mutex_exit(&stmf_state.stmf_lock);
1534 1534 return (0);
1535 1535 }
1536 1536 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) {
1537 1537 mutex_exit(&stmf_state.stmf_lock);
1538 1538 return (EINVAL);
1539 1539 }
1540 1540 stmf_state.stmf_inventory_locked = 1;
1541 1541 stmf_state.stmf_service_running = 1;
1542 1542 mutex_exit(&stmf_state.stmf_lock);
1543 1543
1544 1544 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1545 1545 ilport = ilport->ilport_next) {
1546 1546 if (stmf_state.stmf_default_lport_state !=
1547 1547 STMF_STATE_ONLINE)
1548 1548 continue;
1549 1549 (void) stmf_ctl(STMF_CMD_LPORT_ONLINE,
1550 1550 ilport->ilport_lport, &ssi);
1551 1551 }
1552 1552
1553 1553 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1554 1554 ilu = ilu->ilu_next) {
1555 1555 if (stmf_state.stmf_default_lu_state !=
1556 1556 STMF_STATE_ONLINE)
1557 1557 continue;
1558 1558 (void) stmf_ctl(STMF_CMD_LU_ONLINE, ilu->ilu_lu, &ssi);
1559 1559 }
1560 1560 mutex_enter(&stmf_state.stmf_lock);
1561 1561 stmf_state.stmf_inventory_locked = 0;
1562 1562 mutex_exit(&stmf_state.stmf_lock);
1563 1563 return (0);
1564 1564 }
1565 1565
1566 1566 /* svc_state is STMF_STATE_ONLINE here */
1567 1567 if ((std->state != STMF_STATE_OFFLINE) ||
1568 1568 (std->config_state == STMF_CONFIG_INIT)) {
1569 1569 mutex_exit(&stmf_state.stmf_lock);
1570 1570 return (EACCES);
1571 1571 }
1572 1572
1573 1573 stmf_state.stmf_inventory_locked = 1;
1574 1574 stmf_state.stmf_service_running = 0;
1575 1575
1576 1576 mutex_exit(&stmf_state.stmf_lock);
1577 1577 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1578 1578 ilport = ilport->ilport_next) {
1579 1579 if (ilport->ilport_state != STMF_STATE_ONLINE)
1580 1580 continue;
1581 1581 (void) stmf_ctl(STMF_CMD_LPORT_OFFLINE,
1582 1582 ilport->ilport_lport, &ssi);
1583 1583 }
1584 1584
1585 1585 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1586 1586 ilu = ilu->ilu_next) {
1587 1587 if (ilu->ilu_state != STMF_STATE_ONLINE)
1588 1588 continue;
1589 1589 (void) stmf_ctl(STMF_CMD_LU_OFFLINE, ilu->ilu_lu, &ssi);
1590 1590 }
1591 1591 mutex_enter(&stmf_state.stmf_lock);
1592 1592 stmf_state.stmf_inventory_locked = 0;
1593 1593 mutex_exit(&stmf_state.stmf_lock);
1594 1594 return (0);
1595 1595 }
1596 1596
1597 1597 static int
1598 1598 stmf_get_stmf_state(stmf_state_desc_t *std)
1599 1599 {
1600 1600 mutex_enter(&stmf_state.stmf_lock);
1601 1601 std->state = stmf_get_service_state();
1602 1602 std->config_state = stmf_state.stmf_config_state;
1603 1603 mutex_exit(&stmf_state.stmf_lock);
1604 1604
1605 1605 return (0);
1606 1606 }
1607 1607 /*
1608 1608 * handles registration message from pppt for a logical unit
1609 1609 */
1610 1610 stmf_status_t
1611 1611 stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg, uint32_t type)
1612 1612 {
1613 1613 stmf_i_lu_provider_t *ilp;
1614 1614 stmf_lu_provider_t *lp;
1615 1615 mutex_enter(&stmf_state.stmf_lock);
1616 1616 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) {
1617 1617 if (strcmp(msg->icrl_lu_provider_name,
1618 1618 ilp->ilp_lp->lp_name) == 0) {
1619 1619 lp = ilp->ilp_lp;
1620 1620 mutex_exit(&stmf_state.stmf_lock);
1621 1621 lp->lp_proxy_msg(msg->icrl_lun_id, msg->icrl_cb_arg,
1622 1622 msg->icrl_cb_arg_len, type);
1623 1623 return (STMF_SUCCESS);
1624 1624 }
1625 1625 }
1626 1626 mutex_exit(&stmf_state.stmf_lock);
1627 1627 return (STMF_SUCCESS);
1628 1628 }
1629 1629
1630 1630 /*
1631 1631 * handles de-registration message from pppt for a logical unit
1632 1632 */
1633 1633 stmf_status_t
1634 1634 stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg)
1635 1635 {
1636 1636 stmf_i_lu_provider_t *ilp;
1637 1637 stmf_lu_provider_t *lp;
1638 1638 mutex_enter(&stmf_state.stmf_lock);
1639 1639 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) {
1640 1640 if (strcmp(msg->icrl_lu_provider_name,
1641 1641 ilp->ilp_lp->lp_name) == 0) {
1642 1642 lp = ilp->ilp_lp;
1643 1643 mutex_exit(&stmf_state.stmf_lock);
1644 1644 lp->lp_proxy_msg(msg->icrl_lun_id, NULL, 0,
1645 1645 STMF_MSG_LU_DEREGISTER);
1646 1646 return (STMF_SUCCESS);
1647 1647 }
1648 1648 }
1649 1649 mutex_exit(&stmf_state.stmf_lock);
1650 1650 return (STMF_SUCCESS);
1651 1651 }
1652 1652
1653 1653 /*
1654 1654 * helper function to find a task that matches a task_msgid
1655 1655 */
1656 1656 scsi_task_t *
1657 1657 find_task_from_msgid(uint8_t *lu_id, stmf_ic_msgid_t task_msgid)
1658 1658 {
1659 1659 stmf_i_lu_t *ilu;
1660 1660 stmf_i_scsi_task_t *itask;
1661 1661
1662 1662 mutex_enter(&stmf_state.stmf_lock);
1663 1663 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
1664 1664 if (bcmp(lu_id, ilu->ilu_lu->lu_id->ident, 16) == 0) {
1665 1665 break;
1666 1666 }
1667 1667 }
1668 1668
1669 1669 if (ilu == NULL) {
1670 1670 mutex_exit(&stmf_state.stmf_lock);
1671 1671 return (NULL);
1672 1672 }
1673 1673
1674 1674 mutex_enter(&ilu->ilu_task_lock);
1675 1675 for (itask = ilu->ilu_tasks; itask != NULL;
1676 1676 itask = itask->itask_lu_next) {
1677 1677 if (itask->itask_flags & (ITASK_IN_FREE_LIST |
1678 1678 ITASK_BEING_ABORTED)) {
1679 1679 continue;
1680 1680 }
1681 1681 if (itask->itask_proxy_msg_id == task_msgid) {
1682 1682 break;
1683 1683 }
1684 1684 }
1685 1685 mutex_exit(&ilu->ilu_task_lock);
1686 1686 mutex_exit(&stmf_state.stmf_lock);
1687 1687
1688 1688 if (itask != NULL) {
1689 1689 return (itask->itask_task);
1690 1690 } else {
1691 1691 /* task not found. Likely already aborted. */
1692 1692 return (NULL);
1693 1693 }
1694 1694 }
1695 1695
1696 1696 /*
1697 1697 * message received from pppt/ic
1698 1698 */
1699 1699 stmf_status_t
1700 1700 stmf_msg_rx(stmf_ic_msg_t *msg)
1701 1701 {
1702 1702 mutex_enter(&stmf_state.stmf_lock);
1703 1703 if (stmf_state.stmf_alua_state != 1) {
1704 1704 mutex_exit(&stmf_state.stmf_lock);
1705 1705 cmn_err(CE_WARN, "stmf alua state is disabled");
1706 1706 ic_msg_free(msg);
1707 1707 return (STMF_FAILURE);
1708 1708 }
1709 1709 mutex_exit(&stmf_state.stmf_lock);
1710 1710
1711 1711 switch (msg->icm_msg_type) {
1712 1712 case STMF_ICM_REGISTER_LUN:
1713 1713 (void) stmf_ic_lu_reg(
1714 1714 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg,
1715 1715 STMF_MSG_LU_REGISTER);
1716 1716 break;
1717 1717 case STMF_ICM_LUN_ACTIVE:
1718 1718 (void) stmf_ic_lu_reg(
1719 1719 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg,
1720 1720 STMF_MSG_LU_ACTIVE);
1721 1721 break;
1722 1722 case STMF_ICM_DEREGISTER_LUN:
1723 1723 (void) stmf_ic_lu_dereg(
1724 1724 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg);
1725 1725 break;
1726 1726 case STMF_ICM_SCSI_DATA:
1727 1727 (void) stmf_ic_rx_scsi_data(
1728 1728 (stmf_ic_scsi_data_msg_t *)msg->icm_msg);
1729 1729 break;
1730 1730 case STMF_ICM_SCSI_STATUS:
1731 1731 (void) stmf_ic_rx_scsi_status(
1732 1732 (stmf_ic_scsi_status_msg_t *)msg->icm_msg);
1733 1733 break;
1734 1734 case STMF_ICM_STATUS:
1735 1735 (void) stmf_ic_rx_status(
1736 1736 (stmf_ic_status_msg_t *)msg->icm_msg);
1737 1737 break;
1738 1738 default:
1739 1739 cmn_err(CE_WARN, "unknown message received %d",
1740 1740 msg->icm_msg_type);
1741 1741 ic_msg_free(msg);
1742 1742 return (STMF_FAILURE);
1743 1743 }
1744 1744 ic_msg_free(msg);
1745 1745 return (STMF_SUCCESS);
1746 1746 }
1747 1747
1748 1748 stmf_status_t
1749 1749 stmf_ic_rx_status(stmf_ic_status_msg_t *msg)
1750 1750 {
1751 1751 stmf_i_local_port_t *ilport;
1752 1752
1753 1753 if (msg->ics_msg_type != STMF_ICM_REGISTER_PROXY_PORT) {
1754 1754 /* for now, ignore other message status */
1755 1755 return (STMF_SUCCESS);
1756 1756 }
1757 1757
1758 1758 if (msg->ics_status != STMF_SUCCESS) {
1759 1759 return (STMF_SUCCESS);
1760 1760 }
1761 1761
1762 1762 mutex_enter(&stmf_state.stmf_lock);
1763 1763 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1764 1764 ilport = ilport->ilport_next) {
1765 1765 if (msg->ics_msgid == ilport->ilport_reg_msgid) {
1766 1766 ilport->ilport_proxy_registered = 1;
1767 1767 break;
1768 1768 }
1769 1769 }
1770 1770 mutex_exit(&stmf_state.stmf_lock);
1771 1771 return (STMF_SUCCESS);
1772 1772 }
1773 1773
1774 1774 /*
1775 1775 * handles scsi status message from pppt
1776 1776 */
1777 1777 stmf_status_t
1778 1778 stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg)
1779 1779 {
1780 1780 scsi_task_t *task;
1781 1781
1782 1782 /* is this a task management command */
1783 1783 if (msg->icss_task_msgid & MSG_ID_TM_BIT) {
1784 1784 return (STMF_SUCCESS);
1785 1785 }
1786 1786
1787 1787 task = find_task_from_msgid(msg->icss_lun_id, msg->icss_task_msgid);
1788 1788
1789 1789 if (task == NULL) {
1790 1790 return (STMF_SUCCESS);
1791 1791 }
1792 1792
1793 1793 task->task_scsi_status = msg->icss_status;
1794 1794 task->task_sense_data = msg->icss_sense;
1795 1795 task->task_sense_length = msg->icss_sense_len;
1796 1796 (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE);
1797 1797
1798 1798 return (STMF_SUCCESS);
1799 1799 }
1800 1800
1801 1801 /*
1802 1802 * handles scsi data message from pppt
1803 1803 */
1804 1804 stmf_status_t
1805 1805 stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg)
1806 1806 {
1807 1807 stmf_i_scsi_task_t *itask;
1808 1808 scsi_task_t *task;
1809 1809 stmf_xfer_data_t *xd = NULL;
1810 1810 stmf_data_buf_t *dbuf;
1811 1811 uint32_t sz, minsz, xd_sz, asz;
1812 1812
1813 1813 /* is this a task management command */
1814 1814 if (msg->icsd_task_msgid & MSG_ID_TM_BIT) {
1815 1815 return (STMF_SUCCESS);
1816 1816 }
1817 1817
1818 1818 task = find_task_from_msgid(msg->icsd_lun_id, msg->icsd_task_msgid);
1819 1819 if (task == NULL) {
1820 1820 stmf_ic_msg_t *ic_xfer_done_msg = NULL;
1821 1821 static uint64_t data_msg_id;
1822 1822 stmf_status_t ic_ret = STMF_FAILURE;
1823 1823 mutex_enter(&stmf_state.stmf_lock);
1824 1824 data_msg_id = stmf_proxy_msg_id++;
1825 1825 mutex_exit(&stmf_state.stmf_lock);
1826 1826 /*
1827 1827 * send xfer done status to pppt
1828 1828 * for now, set the session id to 0 as we cannot
1829 1829 * ascertain it since we cannot find the task
1830 1830 */
1831 1831 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc(
1832 1832 msg->icsd_task_msgid, 0, STMF_FAILURE, data_msg_id);
1833 1833 if (ic_xfer_done_msg) {
1834 1834 ic_ret = ic_tx_msg(ic_xfer_done_msg);
1835 1835 if (ic_ret != STMF_IC_MSG_SUCCESS) {
1836 1836 cmn_err(CE_WARN, "unable to xmit proxy msg");
1837 1837 }
1838 1838 }
1839 1839 return (STMF_FAILURE);
1840 1840 }
1841 1841
1842 1842 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
1843 1843 dbuf = itask->itask_proxy_dbuf;
1844 1844
1845 1845 task->task_cmd_xfer_length += msg->icsd_data_len;
1846 1846
1847 1847 if (task->task_additional_flags &
1848 1848 TASK_AF_NO_EXPECTED_XFER_LENGTH) {
1849 1849 task->task_expected_xfer_length =
1850 1850 task->task_cmd_xfer_length;
1851 1851 }
1852 1852
1853 1853 sz = min(task->task_expected_xfer_length,
1854 1854 task->task_cmd_xfer_length);
1855 1855
1856 1856 xd_sz = msg->icsd_data_len;
1857 1857 asz = xd_sz + sizeof (*xd) - 4;
1858 1858 xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP);
1859 1859
1860 1860 if (xd == NULL) {
1861 1861 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
1862 1862 STMF_ALLOC_FAILURE, NULL);
1863 1863 return (STMF_FAILURE);
1864 1864 }
1865 1865
1866 1866 xd->alloc_size = asz;
1867 1867 xd->size_left = xd_sz;
1868 1868 bcopy(msg->icsd_data, xd->buf, xd_sz);
1869 1869
1870 1870 sz = min(sz, xd->size_left);
1871 1871 xd->size_left = sz;
1872 1872 minsz = min(512, sz);
1873 1873
1874 1874 if (dbuf == NULL)
1875 1875 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
1876 1876 if (dbuf == NULL) {
1877 1877 kmem_free(xd, xd->alloc_size);
1878 1878 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
1879 1879 STMF_ALLOC_FAILURE, NULL);
1880 1880 return (STMF_FAILURE);
1881 1881 }
1882 1882 dbuf->db_lu_private = xd;
1883 1883 dbuf->db_relative_offset = task->task_nbytes_transferred;
1884 1884 stmf_xd_to_dbuf(dbuf, 0);
1885 1885
1886 1886 dbuf->db_flags = DB_DIRECTION_TO_RPORT;
1887 1887 (void) stmf_xfer_data(task, dbuf, 0);
1888 1888 return (STMF_SUCCESS);
1889 1889 }
1890 1890
1891 1891 stmf_status_t
1892 1892 stmf_proxy_scsi_cmd(scsi_task_t *task, stmf_data_buf_t *dbuf)
1893 1893 {
1894 1894 stmf_i_scsi_task_t *itask =
1895 1895 (stmf_i_scsi_task_t *)task->task_stmf_private;
1896 1896 stmf_i_local_port_t *ilport =
1897 1897 (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
1898 1898 stmf_ic_msg_t *ic_cmd_msg;
1899 1899 stmf_ic_msg_status_t ic_ret;
1900 1900 stmf_status_t ret = STMF_FAILURE;
1901 1901
1902 1902 if (stmf_state.stmf_alua_state != 1) {
1903 1903 cmn_err(CE_WARN, "stmf alua state is disabled");
1904 1904 return (STMF_FAILURE);
1905 1905 }
1906 1906
1907 1907 if (ilport->ilport_proxy_registered == 0) {
1908 1908 return (STMF_FAILURE);
1909 1909 }
1910 1910
1911 1911 mutex_enter(&stmf_state.stmf_lock);
1912 1912 itask->itask_proxy_msg_id = stmf_proxy_msg_id++;
1913 1913 mutex_exit(&stmf_state.stmf_lock);
1914 1914 itask->itask_proxy_dbuf = dbuf;
1915 1915
1916 1916 /*
1917 1917 * stmf will now take over the task handling for this task
1918 1918 * but it still needs to be treated differently from other
1919 1919 * default handled tasks, hence the ITASK_PROXY_TASK.
1920 1920 * If this is a task management function, we're really just
1921 1921 * duping the command to the peer. Set the TM bit so that
1922 1922 * we can recognize this on return since we won't be completing
1923 1923 * the proxied task in that case.
1924 1924 */
1925 1925 if (task->task_mgmt_function) {
1926 1926 itask->itask_proxy_msg_id |= MSG_ID_TM_BIT;
1927 1927 } else {
1928 1928 uint32_t new, old;
1929 1929 do {
1930 1930 new = old = itask->itask_flags;
1931 1931 if (new & ITASK_BEING_ABORTED)
1932 1932 return (STMF_FAILURE);
1933 1933 new |= ITASK_DEFAULT_HANDLING | ITASK_PROXY_TASK;
1934 1934 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
1935 1935 }
1936 1936 if (dbuf) {
1937 1937 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id,
1938 1938 task, dbuf->db_data_size, dbuf->db_sglist[0].seg_addr,
1939 1939 itask->itask_proxy_msg_id);
1940 1940 } else {
1941 1941 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id,
1942 1942 task, 0, NULL, itask->itask_proxy_msg_id);
1943 1943 }
1944 1944 if (ic_cmd_msg) {
1945 1945 ic_ret = ic_tx_msg(ic_cmd_msg);
1946 1946 if (ic_ret == STMF_IC_MSG_SUCCESS) {
1947 1947 ret = STMF_SUCCESS;
1948 1948 }
1949 1949 }
1950 1950 return (ret);
1951 1951 }
1952 1952
1953 1953
1954 1954 stmf_status_t
1955 1955 pppt_modload()
1956 1956 {
1957 1957 int error;
1958 1958
1959 1959 if (pppt_mod == NULL && ((pppt_mod =
1960 1960 ddi_modopen("drv/pppt", KRTLD_MODE_FIRST, &error)) == NULL)) {
1961 1961 cmn_err(CE_WARN, "Unable to load pppt");
1962 1962 return (STMF_FAILURE);
1963 1963 }
1964 1964
1965 1965 if (ic_reg_port_msg_alloc == NULL && ((ic_reg_port_msg_alloc =
1966 1966 (stmf_ic_reg_port_msg_alloc_func_t)
1967 1967 ddi_modsym(pppt_mod, "stmf_ic_reg_port_msg_alloc",
1968 1968 &error)) == NULL)) {
1969 1969 cmn_err(CE_WARN,
1970 1970 "Unable to find symbol - stmf_ic_reg_port_msg_alloc");
1971 1971 return (STMF_FAILURE);
1972 1972 }
1973 1973
1974 1974
1975 1975 if (ic_dereg_port_msg_alloc == NULL && ((ic_dereg_port_msg_alloc =
1976 1976 (stmf_ic_dereg_port_msg_alloc_func_t)
1977 1977 ddi_modsym(pppt_mod, "stmf_ic_dereg_port_msg_alloc",
1978 1978 &error)) == NULL)) {
1979 1979 cmn_err(CE_WARN,
1980 1980 "Unable to find symbol - stmf_ic_dereg_port_msg_alloc");
1981 1981 return (STMF_FAILURE);
1982 1982 }
1983 1983
1984 1984 if (ic_reg_lun_msg_alloc == NULL && ((ic_reg_lun_msg_alloc =
1985 1985 (stmf_ic_reg_lun_msg_alloc_func_t)
1986 1986 ddi_modsym(pppt_mod, "stmf_ic_reg_lun_msg_alloc",
1987 1987 &error)) == NULL)) {
1988 1988 cmn_err(CE_WARN,
1989 1989 "Unable to find symbol - stmf_ic_reg_lun_msg_alloc");
1990 1990 return (STMF_FAILURE);
1991 1991 }
1992 1992
1993 1993 if (ic_lun_active_msg_alloc == NULL && ((ic_lun_active_msg_alloc =
1994 1994 (stmf_ic_lun_active_msg_alloc_func_t)
1995 1995 ddi_modsym(pppt_mod, "stmf_ic_lun_active_msg_alloc",
1996 1996 &error)) == NULL)) {
1997 1997 cmn_err(CE_WARN,
1998 1998 "Unable to find symbol - stmf_ic_lun_active_msg_alloc");
1999 1999 return (STMF_FAILURE);
2000 2000 }
2001 2001
2002 2002 if (ic_dereg_lun_msg_alloc == NULL && ((ic_dereg_lun_msg_alloc =
2003 2003 (stmf_ic_dereg_lun_msg_alloc_func_t)
2004 2004 ddi_modsym(pppt_mod, "stmf_ic_dereg_lun_msg_alloc",
2005 2005 &error)) == NULL)) {
2006 2006 cmn_err(CE_WARN,
2007 2007 "Unable to find symbol - stmf_ic_dereg_lun_msg_alloc");
2008 2008 return (STMF_FAILURE);
2009 2009 }
2010 2010
2011 2011 if (ic_scsi_cmd_msg_alloc == NULL && ((ic_scsi_cmd_msg_alloc =
2012 2012 (stmf_ic_scsi_cmd_msg_alloc_func_t)
2013 2013 ddi_modsym(pppt_mod, "stmf_ic_scsi_cmd_msg_alloc",
2014 2014 &error)) == NULL)) {
2015 2015 cmn_err(CE_WARN,
2016 2016 "Unable to find symbol - stmf_ic_scsi_cmd_msg_alloc");
2017 2017 return (STMF_FAILURE);
2018 2018 }
2019 2019
2020 2020 if (ic_scsi_data_xfer_done_msg_alloc == NULL &&
2021 2021 ((ic_scsi_data_xfer_done_msg_alloc =
2022 2022 (stmf_ic_scsi_data_xfer_done_msg_alloc_func_t)
2023 2023 ddi_modsym(pppt_mod, "stmf_ic_scsi_data_xfer_done_msg_alloc",
2024 2024 &error)) == NULL)) {
2025 2025 cmn_err(CE_WARN,
2026 2026 "Unable to find symbol -"
2027 2027 "stmf_ic_scsi_data_xfer_done_msg_alloc");
2028 2028 return (STMF_FAILURE);
2029 2029 }
2030 2030
2031 2031 if (ic_session_reg_msg_alloc == NULL &&
2032 2032 ((ic_session_reg_msg_alloc =
2033 2033 (stmf_ic_session_create_msg_alloc_func_t)
2034 2034 ddi_modsym(pppt_mod, "stmf_ic_session_create_msg_alloc",
2035 2035 &error)) == NULL)) {
2036 2036 cmn_err(CE_WARN,
2037 2037 "Unable to find symbol -"
2038 2038 "stmf_ic_session_create_msg_alloc");
2039 2039 return (STMF_FAILURE);
2040 2040 }
2041 2041
2042 2042 if (ic_session_dereg_msg_alloc == NULL &&
2043 2043 ((ic_session_dereg_msg_alloc =
2044 2044 (stmf_ic_session_destroy_msg_alloc_func_t)
2045 2045 ddi_modsym(pppt_mod, "stmf_ic_session_destroy_msg_alloc",
2046 2046 &error)) == NULL)) {
2047 2047 cmn_err(CE_WARN,
2048 2048 "Unable to find symbol -"
2049 2049 "stmf_ic_session_destroy_msg_alloc");
2050 2050 return (STMF_FAILURE);
2051 2051 }
2052 2052
2053 2053 if (ic_tx_msg == NULL && ((ic_tx_msg =
2054 2054 (stmf_ic_tx_msg_func_t)ddi_modsym(pppt_mod, "stmf_ic_tx_msg",
2055 2055 &error)) == NULL)) {
2056 2056 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_tx_msg");
2057 2057 return (STMF_FAILURE);
2058 2058 }
2059 2059
2060 2060 if (ic_msg_free == NULL && ((ic_msg_free =
2061 2061 (stmf_ic_msg_free_func_t)ddi_modsym(pppt_mod, "stmf_ic_msg_free",
2062 2062 &error)) == NULL)) {
2063 2063 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_msg_free");
2064 2064 return (STMF_FAILURE);
2065 2065 }
2066 2066 return (STMF_SUCCESS);
2067 2067 }
2068 2068
2069 2069 static void
2070 2070 stmf_get_alua_state(stmf_alua_state_desc_t *alua_state)
2071 2071 {
2072 2072 mutex_enter(&stmf_state.stmf_lock);
2073 2073 alua_state->alua_node = stmf_state.stmf_alua_node;
2074 2074 alua_state->alua_state = stmf_state.stmf_alua_state;
2075 2075 mutex_exit(&stmf_state.stmf_lock);
2076 2076 }
2077 2077
2078 2078
2079 2079 static int
2080 2080 stmf_set_alua_state(stmf_alua_state_desc_t *alua_state)
2081 2081 {
2082 2082 stmf_i_local_port_t *ilport;
2083 2083 stmf_i_lu_t *ilu;
2084 2084 stmf_lu_t *lu;
2085 2085 stmf_ic_msg_status_t ic_ret;
2086 2086 stmf_ic_msg_t *ic_reg_lun, *ic_reg_port;
2087 2087 stmf_local_port_t *lport;
2088 2088 int ret = 0;
2089 2089
2090 2090 if (alua_state->alua_state > 1 || alua_state->alua_node > 1) {
2091 2091 return (EINVAL);
2092 2092 }
2093 2093
2094 2094 mutex_enter(&stmf_state.stmf_lock);
2095 2095 if (alua_state->alua_state == 1) {
2096 2096 if (pppt_modload() == STMF_FAILURE) {
2097 2097 ret = EIO;
2098 2098 goto err;
2099 2099 }
2100 2100 if (alua_state->alua_node != 0) {
2101 2101 /* reset existing rtpids to new base */
2102 2102 stmf_rtpid_counter = 255;
2103 2103 }
2104 2104 stmf_state.stmf_alua_node = alua_state->alua_node;
2105 2105 stmf_state.stmf_alua_state = 1;
2106 2106 /* register existing local ports with ppp */
2107 2107 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2108 2108 ilport = ilport->ilport_next) {
2109 2109 /* skip standby ports and non-alua participants */
2110 2110 if (ilport->ilport_standby == 1 ||
2111 2111 ilport->ilport_alua == 0) {
2112 2112 continue;
2113 2113 }
2114 2114 if (alua_state->alua_node != 0) {
2115 2115 ilport->ilport_rtpid =
2116 2116 atomic_add_16_nv(&stmf_rtpid_counter, 1);
2117 2117 }
2118 2118 lport = ilport->ilport_lport;
2119 2119 ic_reg_port = ic_reg_port_msg_alloc(
2120 2120 lport->lport_id, ilport->ilport_rtpid,
2121 2121 0, NULL, stmf_proxy_msg_id);
2122 2122 if (ic_reg_port) {
2123 2123 ic_ret = ic_tx_msg(ic_reg_port);
2124 2124 if (ic_ret == STMF_IC_MSG_SUCCESS) {
2125 2125 ilport->ilport_reg_msgid =
2126 2126 stmf_proxy_msg_id++;
2127 2127 } else {
2128 2128 cmn_err(CE_WARN,
2129 2129 "error on port registration "
2130 2130 "port - %s",
2131 2131 ilport->ilport_kstat_tgt_name);
2132 2132 }
2133 2133 }
2134 2134 }
2135 2135 /* register existing logical units */
2136 2136 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
2137 2137 ilu = ilu->ilu_next) {
2138 2138 if (ilu->ilu_access != STMF_LU_ACTIVE) {
2139 2139 continue;
2140 2140 }
2141 2141 /* register with proxy module */
2142 2142 lu = ilu->ilu_lu;
2143 2143 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
2144 2144 lu->lu_lp->lp_alua_support) {
2145 2145 ilu->ilu_alua = 1;
2146 2146 /* allocate the register message */
2147 2147 ic_reg_lun = ic_reg_lun_msg_alloc(
2148 2148 lu->lu_id->ident, lu->lu_lp->lp_name,
2149 2149 lu->lu_proxy_reg_arg_len,
2150 2150 (uint8_t *)lu->lu_proxy_reg_arg,
2151 2151 stmf_proxy_msg_id);
2152 2152 /* send the message */
2153 2153 if (ic_reg_lun) {
2154 2154 ic_ret = ic_tx_msg(ic_reg_lun);
2155 2155 if (ic_ret == STMF_IC_MSG_SUCCESS) {
2156 2156 stmf_proxy_msg_id++;
2157 2157 }
2158 2158 }
2159 2159 }
2160 2160 }
2161 2161 } else {
2162 2162 stmf_state.stmf_alua_state = 0;
2163 2163 }
2164 2164
2165 2165 err:
2166 2166 mutex_exit(&stmf_state.stmf_lock);
2167 2167 return (ret);
2168 2168 }
2169 2169
2170 2170
2171 2171 typedef struct {
2172 2172 void *bp; /* back pointer from internal struct to main struct */
2173 2173 int alloc_size;
2174 2174 } __istmf_t;
2175 2175
2176 2176 typedef struct {
2177 2177 __istmf_t *fp; /* Framework private */
2178 2178 void *cp; /* Caller private */
2179 2179 void *ss; /* struct specific */
2180 2180 } __stmf_t;
2181 2181
2182 2182 static struct {
2183 2183 int shared;
2184 2184 int fw_private;
2185 2185 } stmf_sizes[] = { { 0, 0 },
2186 2186 { GET_STRUCT_SIZE(stmf_lu_provider_t),
2187 2187 GET_STRUCT_SIZE(stmf_i_lu_provider_t) },
2188 2188 { GET_STRUCT_SIZE(stmf_port_provider_t),
2189 2189 GET_STRUCT_SIZE(stmf_i_port_provider_t) },
2190 2190 { GET_STRUCT_SIZE(stmf_local_port_t),
2191 2191 GET_STRUCT_SIZE(stmf_i_local_port_t) },
2192 2192 { GET_STRUCT_SIZE(stmf_lu_t),
2193 2193 GET_STRUCT_SIZE(stmf_i_lu_t) },
2194 2194 { GET_STRUCT_SIZE(stmf_scsi_session_t),
2195 2195 GET_STRUCT_SIZE(stmf_i_scsi_session_t) },
2196 2196 { GET_STRUCT_SIZE(scsi_task_t),
2197 2197 GET_STRUCT_SIZE(stmf_i_scsi_task_t) },
2198 2198 { GET_STRUCT_SIZE(stmf_data_buf_t),
2199 2199 GET_STRUCT_SIZE(__istmf_t) },
2200 2200 { GET_STRUCT_SIZE(stmf_dbuf_store_t),
2201 2201 GET_STRUCT_SIZE(__istmf_t) }
2202 2202
2203 2203 };
2204 2204
2205 2205 void *
2206 2206 stmf_alloc(stmf_struct_id_t struct_id, int additional_size, int flags)
2207 2207 {
2208 2208 int stmf_size;
2209 2209 int kmem_flag;
2210 2210 __stmf_t *sh;
2211 2211
2212 2212 if ((struct_id == 0) || (struct_id >= STMF_MAX_STRUCT_IDS))
2213 2213 return (NULL);
2214 2214
2215 2215 if ((curthread->t_flag & T_INTR_THREAD) || (flags & AF_FORCE_NOSLEEP)) {
2216 2216 kmem_flag = KM_NOSLEEP;
2217 2217 } else {
2218 2218 kmem_flag = KM_SLEEP;
2219 2219 }
2220 2220
2221 2221 additional_size = (additional_size + 7) & (~7);
2222 2222 stmf_size = stmf_sizes[struct_id].shared +
2223 2223 stmf_sizes[struct_id].fw_private + additional_size;
2224 2224
2225 2225 if (flags & AF_DONTZERO)
2226 2226 sh = (__stmf_t *)kmem_alloc(stmf_size, kmem_flag);
2227 2227 else
2228 2228 sh = (__stmf_t *)kmem_zalloc(stmf_size, kmem_flag);
2229 2229
2230 2230 if (sh == NULL)
2231 2231 return (NULL);
2232 2232
2233 2233 /*
2234 2234 * In principle, the implementation inside stmf_alloc should not
2235 2235 * be changed anyway. But the original order of framework private
2236 2236 * data and caller private data does not support sglist in the caller
2237 2237 * private data.
2238 2238 * To work around this, the memory segments of framework private
2239 2239 * data and caller private data are re-ordered here.
2240 2240 * A better solution is to provide a specific interface to allocate
2241 2241 * the sglist, then we will not need this workaround any more.
2242 2242 * But before the new interface is available, the memory segment
2243 2243 * ordering should be kept as is.
2244 2244 */
2245 2245 sh->cp = GET_BYTE_OFFSET(sh, stmf_sizes[struct_id].shared);
2246 2246 sh->fp = (__istmf_t *)GET_BYTE_OFFSET(sh,
2247 2247 stmf_sizes[struct_id].shared + additional_size);
2248 2248
2249 2249 sh->fp->bp = sh;
2250 2250 /* Just store the total size instead of storing additional size */
2251 2251 sh->fp->alloc_size = stmf_size;
2252 2252
2253 2253 return (sh);
2254 2254 }
2255 2255
2256 2256 void
2257 2257 stmf_free(void *ptr)
2258 2258 {
2259 2259 __stmf_t *sh = (__stmf_t *)ptr;
2260 2260
2261 2261 /*
2262 2262 * So far we dont need any struct specific processing. If such
2263 2263 * a need ever arises, then store the struct id in the framework
2264 2264 * private section and get it here as sh->fp->struct_id.
2265 2265 */
2266 2266 kmem_free(ptr, sh->fp->alloc_size);
2267 2267 }
2268 2268
2269 2269 /*
2270 2270 * Given a pointer to stmf_lu_t, verifies if this lu is registered with the
2271 2271 * framework and returns a pointer to framework private data for the lu.
2272 2272 * Returns NULL if the lu was not found.
2273 2273 */
2274 2274 stmf_i_lu_t *
2275 2275 stmf_lookup_lu(stmf_lu_t *lu)
2276 2276 {
2277 2277 stmf_i_lu_t *ilu;
2278 2278 ASSERT(mutex_owned(&stmf_state.stmf_lock));
2279 2279
2280 2280 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
2281 2281 if (ilu->ilu_lu == lu)
2282 2282 return (ilu);
2283 2283 }
2284 2284 return (NULL);
2285 2285 }
2286 2286
2287 2287 /*
2288 2288 * Given a pointer to stmf_local_port_t, verifies if this lport is registered
2289 2289 * with the framework and returns a pointer to framework private data for
2290 2290 * the lport.
2291 2291 * Returns NULL if the lport was not found.
2292 2292 */
2293 2293 stmf_i_local_port_t *
2294 2294 stmf_lookup_lport(stmf_local_port_t *lport)
2295 2295 {
2296 2296 stmf_i_local_port_t *ilport;
2297 2297 ASSERT(mutex_owned(&stmf_state.stmf_lock));
2298 2298
2299 2299 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2300 2300 ilport = ilport->ilport_next) {
2301 2301 if (ilport->ilport_lport == lport)
2302 2302 return (ilport);
2303 2303 }
2304 2304 return (NULL);
2305 2305 }
2306 2306
2307 2307 stmf_status_t
2308 2308 stmf_register_lu_provider(stmf_lu_provider_t *lp)
2309 2309 {
2310 2310 stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private;
2311 2311 stmf_pp_data_t *ppd;
2312 2312 uint32_t cb_flags;
2313 2313
2314 2314 if (lp->lp_lpif_rev != LPIF_REV_1 && lp->lp_lpif_rev != LPIF_REV_2)
2315 2315 return (STMF_FAILURE);
2316 2316
2317 2317 mutex_enter(&stmf_state.stmf_lock);
2318 2318 ilp->ilp_next = stmf_state.stmf_ilplist;
2319 2319 stmf_state.stmf_ilplist = ilp;
2320 2320 stmf_state.stmf_nlps++;
2321 2321
2322 2322 /* See if we need to do a callback */
2323 2323 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2324 2324 if (strcmp(ppd->ppd_name, lp->lp_name) == 0) {
2325 2325 break;
2326 2326 }
2327 2327 }
2328 2328 if ((ppd == NULL) || (ppd->ppd_nv == NULL)) {
2329 2329 goto rlp_bail_out;
2330 2330 }
2331 2331 ilp->ilp_ppd = ppd;
2332 2332 ppd->ppd_provider = ilp;
2333 2333 if (lp->lp_cb == NULL)
2334 2334 goto rlp_bail_out;
2335 2335 ilp->ilp_cb_in_progress = 1;
2336 2336 cb_flags = STMF_PCB_PREG_COMPLETE;
2337 2337 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2338 2338 cb_flags |= STMF_PCB_STMF_ONLINING;
2339 2339 mutex_exit(&stmf_state.stmf_lock);
2340 2340 lp->lp_cb(lp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2341 2341 mutex_enter(&stmf_state.stmf_lock);
2342 2342 ilp->ilp_cb_in_progress = 0;
2343 2343
2344 2344 rlp_bail_out:
2345 2345 mutex_exit(&stmf_state.stmf_lock);
2346 2346
2347 2347 return (STMF_SUCCESS);
2348 2348 }
2349 2349
2350 2350 stmf_status_t
2351 2351 stmf_deregister_lu_provider(stmf_lu_provider_t *lp)
2352 2352 {
2353 2353 stmf_i_lu_provider_t **ppilp;
2354 2354 stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private;
2355 2355
2356 2356 mutex_enter(&stmf_state.stmf_lock);
2357 2357 if (ilp->ilp_nlus || ilp->ilp_cb_in_progress) {
2358 2358 mutex_exit(&stmf_state.stmf_lock);
2359 2359 return (STMF_BUSY);
2360 2360 }
2361 2361 for (ppilp = &stmf_state.stmf_ilplist; *ppilp != NULL;
2362 2362 ppilp = &((*ppilp)->ilp_next)) {
2363 2363 if (*ppilp == ilp) {
2364 2364 *ppilp = ilp->ilp_next;
2365 2365 stmf_state.stmf_nlps--;
2366 2366 if (ilp->ilp_ppd) {
2367 2367 ilp->ilp_ppd->ppd_provider = NULL;
2368 2368 ilp->ilp_ppd = NULL;
2369 2369 }
2370 2370 mutex_exit(&stmf_state.stmf_lock);
2371 2371 return (STMF_SUCCESS);
2372 2372 }
2373 2373 }
2374 2374 mutex_exit(&stmf_state.stmf_lock);
2375 2375 return (STMF_NOT_FOUND);
2376 2376 }
2377 2377
2378 2378 stmf_status_t
2379 2379 stmf_register_port_provider(stmf_port_provider_t *pp)
2380 2380 {
2381 2381 stmf_i_port_provider_t *ipp =
2382 2382 (stmf_i_port_provider_t *)pp->pp_stmf_private;
2383 2383 stmf_pp_data_t *ppd;
2384 2384 uint32_t cb_flags;
2385 2385
2386 2386 if (pp->pp_portif_rev != PORTIF_REV_1)
2387 2387 return (STMF_FAILURE);
2388 2388
2389 2389 mutex_enter(&stmf_state.stmf_lock);
2390 2390 ipp->ipp_next = stmf_state.stmf_ipplist;
2391 2391 stmf_state.stmf_ipplist = ipp;
2392 2392 stmf_state.stmf_npps++;
2393 2393 /* See if we need to do a callback */
2394 2394 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2395 2395 if (strcmp(ppd->ppd_name, pp->pp_name) == 0) {
2396 2396 break;
2397 2397 }
2398 2398 }
2399 2399 if ((ppd == NULL) || (ppd->ppd_nv == NULL)) {
2400 2400 goto rpp_bail_out;
2401 2401 }
2402 2402 ipp->ipp_ppd = ppd;
2403 2403 ppd->ppd_provider = ipp;
2404 2404 if (pp->pp_cb == NULL)
2405 2405 goto rpp_bail_out;
2406 2406 ipp->ipp_cb_in_progress = 1;
2407 2407 cb_flags = STMF_PCB_PREG_COMPLETE;
2408 2408 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2409 2409 cb_flags |= STMF_PCB_STMF_ONLINING;
2410 2410 mutex_exit(&stmf_state.stmf_lock);
2411 2411 pp->pp_cb(pp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2412 2412 mutex_enter(&stmf_state.stmf_lock);
2413 2413 ipp->ipp_cb_in_progress = 0;
2414 2414
2415 2415 rpp_bail_out:
2416 2416 mutex_exit(&stmf_state.stmf_lock);
2417 2417
2418 2418 return (STMF_SUCCESS);
2419 2419 }
2420 2420
2421 2421 stmf_status_t
2422 2422 stmf_deregister_port_provider(stmf_port_provider_t *pp)
2423 2423 {
2424 2424 stmf_i_port_provider_t *ipp =
2425 2425 (stmf_i_port_provider_t *)pp->pp_stmf_private;
2426 2426 stmf_i_port_provider_t **ppipp;
2427 2427
2428 2428 mutex_enter(&stmf_state.stmf_lock);
2429 2429 if (ipp->ipp_npps || ipp->ipp_cb_in_progress) {
2430 2430 mutex_exit(&stmf_state.stmf_lock);
2431 2431 return (STMF_BUSY);
2432 2432 }
2433 2433 for (ppipp = &stmf_state.stmf_ipplist; *ppipp != NULL;
2434 2434 ppipp = &((*ppipp)->ipp_next)) {
2435 2435 if (*ppipp == ipp) {
2436 2436 *ppipp = ipp->ipp_next;
2437 2437 stmf_state.stmf_npps--;
2438 2438 if (ipp->ipp_ppd) {
2439 2439 ipp->ipp_ppd->ppd_provider = NULL;
2440 2440 ipp->ipp_ppd = NULL;
2441 2441 }
2442 2442 mutex_exit(&stmf_state.stmf_lock);
2443 2443 return (STMF_SUCCESS);
2444 2444 }
2445 2445 }
2446 2446 mutex_exit(&stmf_state.stmf_lock);
2447 2447 return (STMF_NOT_FOUND);
2448 2448 }
2449 2449
2450 2450 int
2451 2451 stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token,
2452 2452 uint32_t *err_ret)
2453 2453 {
2454 2454 stmf_i_port_provider_t *ipp;
2455 2455 stmf_i_lu_provider_t *ilp;
2456 2456 stmf_pp_data_t *ppd;
2457 2457 nvlist_t *nv;
2458 2458 int s;
2459 2459 int ret;
2460 2460
2461 2461 *err_ret = 0;
2462 2462
2463 2463 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2464 2464 return (EINVAL);
2465 2465 }
2466 2466
2467 2467 mutex_enter(&stmf_state.stmf_lock);
2468 2468 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2469 2469 if (ppi->ppi_lu_provider) {
2470 2470 if (!ppd->ppd_lu_provider)
2471 2471 continue;
2472 2472 } else if (ppi->ppi_port_provider) {
2473 2473 if (!ppd->ppd_port_provider)
2474 2474 continue;
2475 2475 }
2476 2476 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2477 2477 break;
2478 2478 }
2479 2479
2480 2480 if (ppd == NULL) {
2481 2481 /* New provider */
2482 2482 s = strlen(ppi->ppi_name);
2483 2483 if (s > 254) {
2484 2484 mutex_exit(&stmf_state.stmf_lock);
2485 2485 return (EINVAL);
2486 2486 }
2487 2487 s += sizeof (stmf_pp_data_t) - 7;
2488 2488
2489 2489 ppd = kmem_zalloc(s, KM_NOSLEEP);
2490 2490 if (ppd == NULL) {
2491 2491 mutex_exit(&stmf_state.stmf_lock);
2492 2492 return (ENOMEM);
2493 2493 }
2494 2494 ppd->ppd_alloc_size = s;
2495 2495 (void) strcpy(ppd->ppd_name, ppi->ppi_name);
2496 2496
2497 2497 /* See if this provider already exists */
2498 2498 if (ppi->ppi_lu_provider) {
2499 2499 ppd->ppd_lu_provider = 1;
2500 2500 for (ilp = stmf_state.stmf_ilplist; ilp != NULL;
2501 2501 ilp = ilp->ilp_next) {
2502 2502 if (strcmp(ppi->ppi_name,
2503 2503 ilp->ilp_lp->lp_name) == 0) {
2504 2504 ppd->ppd_provider = ilp;
2505 2505 ilp->ilp_ppd = ppd;
2506 2506 break;
2507 2507 }
2508 2508 }
2509 2509 } else {
2510 2510 ppd->ppd_port_provider = 1;
2511 2511 for (ipp = stmf_state.stmf_ipplist; ipp != NULL;
2512 2512 ipp = ipp->ipp_next) {
2513 2513 if (strcmp(ppi->ppi_name,
2514 2514 ipp->ipp_pp->pp_name) == 0) {
2515 2515 ppd->ppd_provider = ipp;
2516 2516 ipp->ipp_ppd = ppd;
2517 2517 break;
2518 2518 }
2519 2519 }
2520 2520 }
2521 2521
2522 2522 /* Link this ppd in */
2523 2523 ppd->ppd_next = stmf_state.stmf_ppdlist;
2524 2524 stmf_state.stmf_ppdlist = ppd;
2525 2525 }
2526 2526
2527 2527 /*
2528 2528 * User is requesting that the token be checked.
2529 2529 * If there was another set after the user's get
2530 2530 * it's an error
2531 2531 */
2532 2532 if (ppi->ppi_token_valid) {
2533 2533 if (ppi->ppi_token != ppd->ppd_token) {
2534 2534 *err_ret = STMF_IOCERR_PPD_UPDATED;
2535 2535 mutex_exit(&stmf_state.stmf_lock);
2536 2536 return (EINVAL);
2537 2537 }
2538 2538 }
2539 2539
2540 2540 if ((ret = nvlist_unpack((char *)ppi->ppi_data,
2541 2541 (size_t)ppi->ppi_data_size, &nv, KM_NOSLEEP)) != 0) {
2542 2542 mutex_exit(&stmf_state.stmf_lock);
2543 2543 return (ret);
2544 2544 }
2545 2545
2546 2546 /* Free any existing lists and add this one to the ppd */
2547 2547 if (ppd->ppd_nv)
2548 2548 nvlist_free(ppd->ppd_nv);
2549 2549 ppd->ppd_nv = nv;
2550 2550
2551 2551 /* set the token for writes */
2552 2552 ppd->ppd_token++;
2553 2553 /* return token to caller */
2554 2554 if (ppi_token) {
2555 2555 *ppi_token = ppd->ppd_token;
2556 2556 }
2557 2557
2558 2558 /* If there is a provider registered, do the notifications */
2559 2559 if (ppd->ppd_provider) {
2560 2560 uint32_t cb_flags = 0;
2561 2561
2562 2562 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2563 2563 cb_flags |= STMF_PCB_STMF_ONLINING;
2564 2564 if (ppi->ppi_lu_provider) {
2565 2565 ilp = (stmf_i_lu_provider_t *)ppd->ppd_provider;
2566 2566 if (ilp->ilp_lp->lp_cb == NULL)
2567 2567 goto bail_out;
2568 2568 ilp->ilp_cb_in_progress = 1;
2569 2569 mutex_exit(&stmf_state.stmf_lock);
2570 2570 ilp->ilp_lp->lp_cb(ilp->ilp_lp,
2571 2571 STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2572 2572 mutex_enter(&stmf_state.stmf_lock);
2573 2573 ilp->ilp_cb_in_progress = 0;
2574 2574 } else {
2575 2575 ipp = (stmf_i_port_provider_t *)ppd->ppd_provider;
2576 2576 if (ipp->ipp_pp->pp_cb == NULL)
2577 2577 goto bail_out;
2578 2578 ipp->ipp_cb_in_progress = 1;
2579 2579 mutex_exit(&stmf_state.stmf_lock);
2580 2580 ipp->ipp_pp->pp_cb(ipp->ipp_pp,
2581 2581 STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2582 2582 mutex_enter(&stmf_state.stmf_lock);
2583 2583 ipp->ipp_cb_in_progress = 0;
2584 2584 }
2585 2585 }
2586 2586
2587 2587 bail_out:
2588 2588 mutex_exit(&stmf_state.stmf_lock);
2589 2589
2590 2590 return (0);
2591 2591 }
2592 2592
2593 2593 void
2594 2594 stmf_delete_ppd(stmf_pp_data_t *ppd)
2595 2595 {
2596 2596 stmf_pp_data_t **pppd;
2597 2597
2598 2598 ASSERT(mutex_owned(&stmf_state.stmf_lock));
2599 2599 if (ppd->ppd_provider) {
2600 2600 if (ppd->ppd_lu_provider) {
2601 2601 ((stmf_i_lu_provider_t *)
2602 2602 ppd->ppd_provider)->ilp_ppd = NULL;
2603 2603 } else {
2604 2604 ((stmf_i_port_provider_t *)
2605 2605 ppd->ppd_provider)->ipp_ppd = NULL;
2606 2606 }
2607 2607 ppd->ppd_provider = NULL;
2608 2608 }
2609 2609
2610 2610 for (pppd = &stmf_state.stmf_ppdlist; *pppd != NULL;
2611 2611 pppd = &((*pppd)->ppd_next)) {
2612 2612 if (*pppd == ppd)
2613 2613 break;
2614 2614 }
2615 2615
2616 2616 if (*pppd == NULL)
2617 2617 return;
2618 2618
2619 2619 *pppd = ppd->ppd_next;
2620 2620 if (ppd->ppd_nv)
2621 2621 nvlist_free(ppd->ppd_nv);
2622 2622
2623 2623 kmem_free(ppd, ppd->ppd_alloc_size);
2624 2624 }
2625 2625
2626 2626 int
2627 2627 stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi)
2628 2628 {
2629 2629 stmf_pp_data_t *ppd;
2630 2630 int ret = ENOENT;
2631 2631
2632 2632 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2633 2633 return (EINVAL);
2634 2634 }
2635 2635
2636 2636 mutex_enter(&stmf_state.stmf_lock);
2637 2637
2638 2638 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2639 2639 if (ppi->ppi_lu_provider) {
2640 2640 if (!ppd->ppd_lu_provider)
2641 2641 continue;
2642 2642 } else if (ppi->ppi_port_provider) {
2643 2643 if (!ppd->ppd_port_provider)
2644 2644 continue;
2645 2645 }
2646 2646 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2647 2647 break;
2648 2648 }
2649 2649
2650 2650 if (ppd) {
2651 2651 ret = 0;
2652 2652 stmf_delete_ppd(ppd);
2653 2653 }
2654 2654 mutex_exit(&stmf_state.stmf_lock);
2655 2655
2656 2656 return (ret);
2657 2657 }
2658 2658
2659 2659 int
2660 2660 stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out,
2661 2661 uint32_t *err_ret)
2662 2662 {
2663 2663 stmf_pp_data_t *ppd;
2664 2664 size_t req_size;
2665 2665 int ret = ENOENT;
2666 2666 char *bufp = (char *)ppi_out->ppi_data;
2667 2667
2668 2668 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2669 2669 return (EINVAL);
2670 2670 }
2671 2671
2672 2672 mutex_enter(&stmf_state.stmf_lock);
2673 2673
2674 2674 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2675 2675 if (ppi->ppi_lu_provider) {
2676 2676 if (!ppd->ppd_lu_provider)
2677 2677 continue;
2678 2678 } else if (ppi->ppi_port_provider) {
2679 2679 if (!ppd->ppd_port_provider)
2680 2680 continue;
2681 2681 }
2682 2682 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2683 2683 break;
2684 2684 }
2685 2685
2686 2686 if (ppd && ppd->ppd_nv) {
2687 2687 ppi_out->ppi_token = ppd->ppd_token;
2688 2688 if ((ret = nvlist_size(ppd->ppd_nv, &req_size,
2689 2689 NV_ENCODE_XDR)) != 0) {
2690 2690 goto done;
2691 2691 }
2692 2692 ppi_out->ppi_data_size = req_size;
2693 2693 if (req_size > ppi->ppi_data_size) {
2694 2694 *err_ret = STMF_IOCERR_INSUFFICIENT_BUF;
2695 2695 ret = EINVAL;
2696 2696 goto done;
2697 2697 }
2698 2698
2699 2699 if ((ret = nvlist_pack(ppd->ppd_nv, &bufp, &req_size,
2700 2700 NV_ENCODE_XDR, 0)) != 0) {
2701 2701 goto done;
2702 2702 }
2703 2703 ret = 0;
2704 2704 }
2705 2705
2706 2706 done:
2707 2707 mutex_exit(&stmf_state.stmf_lock);
2708 2708
2709 2709 return (ret);
2710 2710 }
2711 2711
2712 2712 void
2713 2713 stmf_delete_all_ppds()
2714 2714 {
2715 2715 stmf_pp_data_t *ppd, *nppd;
2716 2716
2717 2717 ASSERT(mutex_owned(&stmf_state.stmf_lock));
2718 2718 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = nppd) {
2719 2719 nppd = ppd->ppd_next;
2720 2720 stmf_delete_ppd(ppd);
2721 2721 }
2722 2722 }
2723 2723
2724 2724 /*
2725 2725 * 16 is the max string length of a protocol_ident, increase
2726 2726 * the size if needed.
2727 2727 */
2728 2728 #define STMF_KSTAT_LU_SZ (STMF_GUID_INPUT + 1 + 256)
2729 2729 #define STMF_KSTAT_TGT_SZ (256 * 2 + 16)
2730 2730
2731 2731 /*
2732 2732 * This array matches the Protocol Identifier in stmf_ioctl.h
2733 2733 */
2734 2734 #define MAX_PROTO_STR_LEN 32
2735 2735
2736 2736 char *protocol_ident[PROTOCOL_ANY] = {
2737 2737 "Fibre Channel",
2738 2738 "Parallel SCSI",
2739 2739 "SSA",
2740 2740 "IEEE_1394",
2741 2741 "SRP",
2742 2742 "iSCSI",
2743 2743 "SAS",
2744 2744 "ADT",
2745 2745 "ATAPI",
2746 2746 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN"
2747 2747 };
2748 2748
2749 2749 /*
2750 2750 * Update the lun wait/run queue count
2751 2751 */
2752 2752 static void
2753 2753 stmf_update_kstat_lu_q(scsi_task_t *task, void func())
2754 2754 {
2755 2755 stmf_i_lu_t *ilu;
2756 2756 kstat_io_t *kip;
2757 2757
2758 2758 if (task->task_lu == dlun0)
2759 2759 return;
2760 2760 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
2761 2761 if (ilu != NULL && ilu->ilu_kstat_io != NULL) {
2762 2762 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io);
2763 2763 if (kip != NULL) {
2764 2764 func(kip);
2765 2765 }
2766 2766 }
2767 2767 }
2768 2768
2769 2769 /*
2770 2770 * Update the target(lport) wait/run queue count
2771 2771 */
2772 2772 static void
2773 2773 stmf_update_kstat_lport_q(scsi_task_t *task, void func())
2774 2774 {
2775 2775 stmf_i_local_port_t *ilp;
2776 2776 kstat_io_t *kip;
2777 2777
2778 2778 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
2779 2779 if (ilp != NULL && ilp->ilport_kstat_io != NULL) {
2780 2780 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io);
2781 2781 if (kip != NULL) {
2782 2782 mutex_enter(ilp->ilport_kstat_io->ks_lock);
2783 2783 func(kip);
2784 2784 mutex_exit(ilp->ilport_kstat_io->ks_lock);
2785 2785 }
2786 2786 }
2787 2787 }
2788 2788
2789 2789 static void
2790 2790 stmf_update_kstat_lport_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2791 2791 {
2792 2792 stmf_i_local_port_t *ilp;
2793 2793 kstat_io_t *kip;
2794 2794
2795 2795 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
2796 2796 if (ilp != NULL && ilp->ilport_kstat_io != NULL) {
2797 2797 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io);
2798 2798 if (kip != NULL) {
2799 2799 mutex_enter(ilp->ilport_kstat_io->ks_lock);
2800 2800 STMF_UPDATE_KSTAT_IO(kip, dbuf);
2801 2801 mutex_exit(ilp->ilport_kstat_io->ks_lock);
2802 2802 }
2803 2803 }
2804 2804 }
2805 2805
2806 2806 static void
2807 2807 stmf_update_kstat_lu_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2808 2808 {
2809 2809 stmf_i_lu_t *ilu;
2810 2810 kstat_io_t *kip;
2811 2811
2812 2812 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
2813 2813 if (ilu != NULL && ilu->ilu_kstat_io != NULL) {
2814 2814 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io);
2815 2815 if (kip != NULL) {
2816 2816 mutex_enter(ilu->ilu_kstat_io->ks_lock);
2817 2817 STMF_UPDATE_KSTAT_IO(kip, dbuf);
2818 2818 mutex_exit(ilu->ilu_kstat_io->ks_lock);
2819 2819 }
2820 2820 }
2821 2821 }
2822 2822
2823 2823 static void
2824 2824 stmf_create_kstat_lu(stmf_i_lu_t *ilu)
2825 2825 {
2826 2826 char ks_nm[KSTAT_STRLEN];
2827 2827 stmf_kstat_lu_info_t *ks_lu;
2828 2828
2829 2829 /* create kstat lun info */
2830 2830 ks_lu = (stmf_kstat_lu_info_t *)kmem_zalloc(STMF_KSTAT_LU_SZ,
2831 2831 KM_NOSLEEP);
2832 2832 if (ks_lu == NULL) {
2833 2833 cmn_err(CE_WARN, "STMF: kmem_zalloc failed");
2834 2834 return;
2835 2835 }
2836 2836
2837 2837 bzero(ks_nm, sizeof (ks_nm));
2838 2838 (void) sprintf(ks_nm, "stmf_lu_%"PRIxPTR"", (uintptr_t)ilu);
2839 2839 if ((ilu->ilu_kstat_info = kstat_create(STMF_MODULE_NAME, 0,
2840 2840 ks_nm, "misc", KSTAT_TYPE_NAMED,
2841 2841 sizeof (stmf_kstat_lu_info_t) / sizeof (kstat_named_t),
2842 2842 KSTAT_FLAG_VIRTUAL)) == NULL) {
2843 2843 kmem_free(ks_lu, STMF_KSTAT_LU_SZ);
2844 2844 cmn_err(CE_WARN, "STMF: kstat_create lu failed");
2845 2845 return;
2846 2846 }
2847 2847
2848 2848 ilu->ilu_kstat_info->ks_data_size = STMF_KSTAT_LU_SZ;
2849 2849 ilu->ilu_kstat_info->ks_data = ks_lu;
2850 2850
2851 2851 kstat_named_init(&ks_lu->i_lun_guid, "lun-guid",
2852 2852 KSTAT_DATA_STRING);
2853 2853 kstat_named_init(&ks_lu->i_lun_alias, "lun-alias",
2854 2854 KSTAT_DATA_STRING);
2855 2855
2856 2856 /* convert guid to hex string */
2857 2857 int i;
2858 2858 uint8_t *p = ilu->ilu_lu->lu_id->ident;
2859 2859 bzero(ilu->ilu_ascii_hex_guid, sizeof (ilu->ilu_ascii_hex_guid));
2860 2860 for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
2861 2861 (void) sprintf(&ilu->ilu_ascii_hex_guid[i * 2], "%02x", p[i]);
2862 2862 }
2863 2863 kstat_named_setstr(&ks_lu->i_lun_guid,
2864 2864 (const char *)ilu->ilu_ascii_hex_guid);
2865 2865 kstat_named_setstr(&ks_lu->i_lun_alias,
2866 2866 (const char *)ilu->ilu_lu->lu_alias);
2867 2867 kstat_install(ilu->ilu_kstat_info);
2868 2868
2869 2869 /* create kstat lun io */
2870 2870 bzero(ks_nm, sizeof (ks_nm));
2871 2871 (void) sprintf(ks_nm, "stmf_lu_io_%"PRIxPTR"", (uintptr_t)ilu);
2872 2872 if ((ilu->ilu_kstat_io = kstat_create(STMF_MODULE_NAME, 0,
2873 2873 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
2874 2874 cmn_err(CE_WARN, "STMF: kstat_create lu_io failed");
2875 2875 return;
2876 2876 }
2877 2877 mutex_init(&ilu->ilu_kstat_lock, NULL, MUTEX_DRIVER, 0);
2878 2878 ilu->ilu_kstat_io->ks_lock = &ilu->ilu_kstat_lock;
2879 2879 kstat_install(ilu->ilu_kstat_io);
2880 2880 }
2881 2881
2882 2882 static void
2883 2883 stmf_create_kstat_lport(stmf_i_local_port_t *ilport)
2884 2884 {
2885 2885 char ks_nm[KSTAT_STRLEN];
2886 2886 stmf_kstat_tgt_info_t *ks_tgt;
2887 2887 int id, len;
2888 2888
2889 2889 /* create kstat lport info */
2890 2890 ks_tgt = (stmf_kstat_tgt_info_t *)kmem_zalloc(STMF_KSTAT_TGT_SZ,
2891 2891 KM_NOSLEEP);
2892 2892 if (ks_tgt == NULL) {
2893 2893 cmn_err(CE_WARN, "STMF: kmem_zalloc failed");
2894 2894 return;
2895 2895 }
2896 2896
2897 2897 bzero(ks_nm, sizeof (ks_nm));
2898 2898 (void) sprintf(ks_nm, "stmf_tgt_%"PRIxPTR"", (uintptr_t)ilport);
2899 2899 if ((ilport->ilport_kstat_info = kstat_create(STMF_MODULE_NAME,
2900 2900 0, ks_nm, "misc", KSTAT_TYPE_NAMED,
2901 2901 sizeof (stmf_kstat_tgt_info_t) / sizeof (kstat_named_t),
2902 2902 KSTAT_FLAG_VIRTUAL)) == NULL) {
2903 2903 kmem_free(ks_tgt, STMF_KSTAT_TGT_SZ);
2904 2904 cmn_err(CE_WARN, "STMF: kstat_create target failed");
2905 2905 return;
2906 2906 }
2907 2907
2908 2908 ilport->ilport_kstat_info->ks_data_size = STMF_KSTAT_TGT_SZ;
2909 2909 ilport->ilport_kstat_info->ks_data = ks_tgt;
2910 2910
2911 2911 kstat_named_init(&ks_tgt->i_tgt_name, "target-name",
2912 2912 KSTAT_DATA_STRING);
2913 2913 kstat_named_init(&ks_tgt->i_tgt_alias, "target-alias",
2914 2914 KSTAT_DATA_STRING);
2915 2915 kstat_named_init(&ks_tgt->i_protocol, "protocol",
2916 2916 KSTAT_DATA_STRING);
2917 2917
2918 2918 /* ident might not be null terminated */
2919 2919 len = ilport->ilport_lport->lport_id->ident_length;
2920 2920 bcopy(ilport->ilport_lport->lport_id->ident,
2921 2921 ilport->ilport_kstat_tgt_name, len);
2922 2922 ilport->ilport_kstat_tgt_name[len + 1] = NULL;
2923 2923 kstat_named_setstr(&ks_tgt->i_tgt_name,
2924 2924 (const char *)ilport->ilport_kstat_tgt_name);
2925 2925 kstat_named_setstr(&ks_tgt->i_tgt_alias,
2926 2926 (const char *)ilport->ilport_lport->lport_alias);
2927 2927 /* protocol */
2928 2928 if ((id = ilport->ilport_lport->lport_id->protocol_id) > PROTOCOL_ANY) {
2929 2929 cmn_err(CE_WARN, "STMF: protocol_id out of bound");
2930 2930 id = PROTOCOL_ANY;
2931 2931 }
2932 2932 kstat_named_setstr(&ks_tgt->i_protocol, protocol_ident[id]);
2933 2933 kstat_install(ilport->ilport_kstat_info);
2934 2934
2935 2935 /* create kstat lport io */
2936 2936 bzero(ks_nm, sizeof (ks_nm));
2937 2937 (void) sprintf(ks_nm, "stmf_tgt_io_%"PRIxPTR"", (uintptr_t)ilport);
2938 2938 if ((ilport->ilport_kstat_io = kstat_create(STMF_MODULE_NAME, 0,
2939 2939 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
2940 2940 cmn_err(CE_WARN, "STMF: kstat_create target_io failed");
2941 2941 return;
2942 2942 }
2943 2943 mutex_init(&ilport->ilport_kstat_lock, NULL, MUTEX_DRIVER, 0);
2944 2944 ilport->ilport_kstat_io->ks_lock = &ilport->ilport_kstat_lock;
2945 2945 kstat_install(ilport->ilport_kstat_io);
2946 2946 }
2947 2947
2948 2948 /*
2949 2949 * set the asymmetric access state for a logical unit
2950 2950 * caller is responsible for establishing SCSI unit attention on
2951 2951 * state change
2952 2952 */
2953 2953 stmf_status_t
2954 2954 stmf_set_lu_access(stmf_lu_t *lu, uint8_t access_state)
2955 2955 {
2956 2956 stmf_i_lu_t *ilu;
2957 2957 uint8_t *p1, *p2;
2958 2958
2959 2959 if ((access_state != STMF_LU_STANDBY) &&
2960 2960 (access_state != STMF_LU_ACTIVE)) {
2961 2961 return (STMF_INVALID_ARG);
2962 2962 }
2963 2963
2964 2964 p1 = &lu->lu_id->ident[0];
2965 2965 mutex_enter(&stmf_state.stmf_lock);
2966 2966 if (stmf_state.stmf_inventory_locked) {
2967 2967 mutex_exit(&stmf_state.stmf_lock);
2968 2968 return (STMF_BUSY);
2969 2969 }
2970 2970
2971 2971 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
2972 2972 p2 = &ilu->ilu_lu->lu_id->ident[0];
2973 2973 if (bcmp(p1, p2, 16) == 0) {
2974 2974 break;
2975 2975 }
2976 2976 }
2977 2977
2978 2978 if (!ilu) {
2979 2979 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
2980 2980 } else {
2981 2981 /*
2982 2982 * We're changing access state on an existing logical unit
2983 2983 * Send the proxy registration message for this logical unit
2984 2984 * if we're in alua mode.
2985 2985 * If the requested state is STMF_LU_ACTIVE, we want to register
2986 2986 * this logical unit.
2987 2987 * If the requested state is STMF_LU_STANDBY, we're going to
2988 2988 * abort all tasks for this logical unit.
2989 2989 */
2990 2990 if (stmf_state.stmf_alua_state == 1 &&
2991 2991 access_state == STMF_LU_ACTIVE) {
2992 2992 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
2993 2993 stmf_ic_msg_t *ic_reg_lun;
2994 2994 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
2995 2995 lu->lu_lp->lp_alua_support) {
2996 2996 ilu->ilu_alua = 1;
2997 2997 /* allocate the register message */
2998 2998 ic_reg_lun = ic_lun_active_msg_alloc(p1,
2999 2999 lu->lu_lp->lp_name,
3000 3000 lu->lu_proxy_reg_arg_len,
3001 3001 (uint8_t *)lu->lu_proxy_reg_arg,
3002 3002 stmf_proxy_msg_id);
3003 3003 /* send the message */
3004 3004 if (ic_reg_lun) {
3005 3005 ic_ret = ic_tx_msg(ic_reg_lun);
3006 3006 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3007 3007 stmf_proxy_msg_id++;
3008 3008 }
3009 3009 }
3010 3010 }
3011 3011 } else if (stmf_state.stmf_alua_state == 1 &&
3012 3012 access_state == STMF_LU_STANDBY) {
3013 3013 /* abort all tasks for this lu */
3014 3014 stmf_task_lu_killall(lu, NULL, STMF_ABORTED);
3015 3015 }
3016 3016 }
3017 3017
3018 3018 ilu->ilu_access = access_state;
3019 3019
3020 3020 mutex_exit(&stmf_state.stmf_lock);
3021 3021 return (STMF_SUCCESS);
3022 3022 }
3023 3023
3024 3024
3025 3025 stmf_status_t
3026 3026 stmf_register_lu(stmf_lu_t *lu)
3027 3027 {
3028 3028 stmf_i_lu_t *ilu;
3029 3029 uint8_t *p1, *p2;
3030 3030 stmf_state_change_info_t ssci;
3031 3031 stmf_id_data_t *luid;
3032 3032
3033 3033 if ((lu->lu_id->ident_type != ID_TYPE_NAA) ||
3034 3034 (lu->lu_id->ident_length != 16) ||
3035 3035 ((lu->lu_id->ident[0] & 0xf0) != 0x60)) {
3036 3036 return (STMF_INVALID_ARG);
3037 3037 }
3038 3038 p1 = &lu->lu_id->ident[0];
3039 3039 mutex_enter(&stmf_state.stmf_lock);
3040 3040 if (stmf_state.stmf_inventory_locked) {
3041 3041 mutex_exit(&stmf_state.stmf_lock);
3042 3042 return (STMF_BUSY);
3043 3043 }
3044 3044
3045 3045 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
3046 3046 p2 = &ilu->ilu_lu->lu_id->ident[0];
3047 3047 if (bcmp(p1, p2, 16) == 0) {
3048 3048 mutex_exit(&stmf_state.stmf_lock);
3049 3049 return (STMF_ALREADY);
3050 3050 }
3051 3051 }
3052 3052
3053 3053 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
3054 3054 luid = stmf_lookup_id(&stmf_state.stmf_luid_list,
3055 3055 lu->lu_id->ident_length, lu->lu_id->ident);
3056 3056 if (luid) {
3057 3057 luid->id_pt_to_object = (void *)ilu;
3058 3058 ilu->ilu_luid = luid;
3059 3059 }
3060 3060 ilu->ilu_alias = NULL;
3061 3061
3062 3062 ilu->ilu_next = stmf_state.stmf_ilulist;
3063 3063 ilu->ilu_prev = NULL;
3064 3064 if (ilu->ilu_next)
3065 3065 ilu->ilu_next->ilu_prev = ilu;
3066 3066 stmf_state.stmf_ilulist = ilu;
3067 3067 stmf_state.stmf_nlus++;
3068 3068 if (lu->lu_lp) {
3069 3069 ((stmf_i_lu_provider_t *)
3070 3070 (lu->lu_lp->lp_stmf_private))->ilp_nlus++;
3071 3071 }
3072 3072 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
3073 3073 STMF_EVENT_ALLOC_HANDLE(ilu->ilu_event_hdl);
3074 3074 cv_init(&ilu->ilu_offline_pending_cv, NULL, CV_DRIVER, NULL);
3075 3075 stmf_create_kstat_lu(ilu);
3076 3076 /*
3077 3077 * register with proxy module if available and logical unit
3078 3078 * is in active state
3079 3079 */
3080 3080 if (stmf_state.stmf_alua_state == 1 &&
3081 3081 ilu->ilu_access == STMF_LU_ACTIVE) {
3082 3082 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3083 3083 stmf_ic_msg_t *ic_reg_lun;
3084 3084 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3085 3085 lu->lu_lp->lp_alua_support) {
3086 3086 ilu->ilu_alua = 1;
3087 3087 /* allocate the register message */
3088 3088 ic_reg_lun = ic_reg_lun_msg_alloc(p1,
3089 3089 lu->lu_lp->lp_name, lu->lu_proxy_reg_arg_len,
3090 3090 (uint8_t *)lu->lu_proxy_reg_arg, stmf_proxy_msg_id);
3091 3091 /* send the message */
3092 3092 if (ic_reg_lun) {
3093 3093 ic_ret = ic_tx_msg(ic_reg_lun);
3094 3094 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3095 3095 stmf_proxy_msg_id++;
3096 3096 }
3097 3097 }
3098 3098 }
3099 3099 }
3100 3100 mutex_exit(&stmf_state.stmf_lock);
3101 3101
3102 3102 /* check the default state for lu */
3103 3103 if (stmf_state.stmf_default_lu_state == STMF_STATE_OFFLINE) {
3104 3104 ilu->ilu_prev_state = STMF_STATE_OFFLINE;
3105 3105 } else {
3106 3106 ilu->ilu_prev_state = STMF_STATE_ONLINE;
3107 3107 if (stmf_state.stmf_service_running) {
3108 3108 ssci.st_rflags = 0;
3109 3109 ssci.st_additional_info = NULL;
3110 3110 (void) stmf_ctl(STMF_CMD_LU_ONLINE, lu, &ssci);
3111 3111 }
3112 3112 }
3113 3113
3114 3114 /* XXX: Generate event */
3115 3115 return (STMF_SUCCESS);
3116 3116 }
3117 3117
3118 3118 stmf_status_t
3119 3119 stmf_deregister_lu(stmf_lu_t *lu)
3120 3120 {
3121 3121 stmf_i_lu_t *ilu;
3122 3122
3123 3123 mutex_enter(&stmf_state.stmf_lock);
3124 3124 if (stmf_state.stmf_inventory_locked) {
3125 3125 mutex_exit(&stmf_state.stmf_lock);
3126 3126 return (STMF_BUSY);
3127 3127 }
3128 3128 ilu = stmf_lookup_lu(lu);
3129 3129 if (ilu == NULL) {
3130 3130 mutex_exit(&stmf_state.stmf_lock);
3131 3131 return (STMF_INVALID_ARG);
3132 3132 }
3133 3133 if (ilu->ilu_state == STMF_STATE_OFFLINE) {
3134 3134 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free);
3135 3135 while (ilu->ilu_flags & ILU_STALL_DEREGISTER) {
3136 3136 cv_wait(&stmf_state.stmf_cv, &stmf_state.stmf_lock);
3137 3137 }
3138 3138 if (ilu->ilu_ntasks) {
3139 3139 stmf_i_scsi_task_t *itask, *nitask;
3140 3140
3141 3141 nitask = ilu->ilu_tasks;
3142 3142 do {
3143 3143 itask = nitask;
3144 3144 nitask = itask->itask_lu_next;
3145 3145 lu->lu_task_free(itask->itask_task);
3146 3146 stmf_free(itask->itask_task);
3147 3147 } while (nitask != NULL);
3148 3148
3149 3149 ilu->ilu_tasks = ilu->ilu_free_tasks = NULL;
3150 3150 ilu->ilu_ntasks = ilu->ilu_ntasks_free = 0;
3151 3151 }
3152 3152 /* de-register with proxy if available */
3153 3153 if (ilu->ilu_access == STMF_LU_ACTIVE &&
3154 3154 stmf_state.stmf_alua_state == 1) {
3155 3155 /* de-register with proxy module */
3156 3156 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3157 3157 stmf_ic_msg_t *ic_dereg_lun;
3158 3158 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3159 3159 lu->lu_lp->lp_alua_support) {
3160 3160 ilu->ilu_alua = 1;
3161 3161 /* allocate the de-register message */
3162 3162 ic_dereg_lun = ic_dereg_lun_msg_alloc(
3163 3163 lu->lu_id->ident, lu->lu_lp->lp_name, 0,
3164 3164 NULL, stmf_proxy_msg_id);
3165 3165 /* send the message */
3166 3166 if (ic_dereg_lun) {
3167 3167 ic_ret = ic_tx_msg(ic_dereg_lun);
3168 3168 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3169 3169 stmf_proxy_msg_id++;
3170 3170 }
3171 3171 }
3172 3172 }
3173 3173 }
3174 3174
3175 3175 if (ilu->ilu_next)
3176 3176 ilu->ilu_next->ilu_prev = ilu->ilu_prev;
3177 3177 if (ilu->ilu_prev)
3178 3178 ilu->ilu_prev->ilu_next = ilu->ilu_next;
3179 3179 else
3180 3180 stmf_state.stmf_ilulist = ilu->ilu_next;
3181 3181 stmf_state.stmf_nlus--;
3182 3182
3183 3183 if (ilu == stmf_state.stmf_svc_ilu_draining) {
3184 3184 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next;
3185 3185 }
3186 3186 if (ilu == stmf_state.stmf_svc_ilu_timing) {
3187 3187 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next;
3188 3188 }
3189 3189 if (lu->lu_lp) {
3190 3190 ((stmf_i_lu_provider_t *)
3191 3191 (lu->lu_lp->lp_stmf_private))->ilp_nlus--;
3192 3192 }
3193 3193 if (ilu->ilu_luid) {
3194 3194 ((stmf_id_data_t *)ilu->ilu_luid)->id_pt_to_object =
3195 3195 NULL;
3196 3196 ilu->ilu_luid = NULL;
3197 3197 }
3198 3198 STMF_EVENT_FREE_HANDLE(ilu->ilu_event_hdl);
3199 3199 } else {
3200 3200 mutex_exit(&stmf_state.stmf_lock);
3201 3201 return (STMF_BUSY);
3202 3202 }
3203 3203 if (ilu->ilu_kstat_info) {
3204 3204 kmem_free(ilu->ilu_kstat_info->ks_data,
3205 3205 ilu->ilu_kstat_info->ks_data_size);
3206 3206 kstat_delete(ilu->ilu_kstat_info);
3207 3207 }
3208 3208 if (ilu->ilu_kstat_io) {
3209 3209 kstat_delete(ilu->ilu_kstat_io);
3210 3210 mutex_destroy(&ilu->ilu_kstat_lock);
3211 3211 }
3212 3212 stmf_delete_itl_kstat_by_guid(ilu->ilu_ascii_hex_guid);
3213 3213 cv_destroy(&ilu->ilu_offline_pending_cv);
3214 3214 mutex_exit(&stmf_state.stmf_lock);
3215 3215 return (STMF_SUCCESS);
3216 3216 }
3217 3217
3218 3218 void
3219 3219 stmf_set_port_standby(stmf_local_port_t *lport, uint16_t rtpid)
3220 3220 {
3221 3221 stmf_i_local_port_t *ilport =
3222 3222 (stmf_i_local_port_t *)lport->lport_stmf_private;
3223 3223 ilport->ilport_rtpid = rtpid;
3224 3224 ilport->ilport_standby = 1;
3225 3225 }
3226 3226
3227 3227 void
3228 3228 stmf_set_port_alua(stmf_local_port_t *lport)
3229 3229 {
3230 3230 stmf_i_local_port_t *ilport =
3231 3231 (stmf_i_local_port_t *)lport->lport_stmf_private;
3232 3232 ilport->ilport_alua = 1;
3233 3233 }
3234 3234
3235 3235 stmf_status_t
3236 3236 stmf_register_local_port(stmf_local_port_t *lport)
3237 3237 {
3238 3238 stmf_i_local_port_t *ilport;
3239 3239 stmf_state_change_info_t ssci;
3240 3240 int start_workers = 0;
3241 3241
3242 3242 mutex_enter(&stmf_state.stmf_lock);
3243 3243 if (stmf_state.stmf_inventory_locked) {
3244 3244 mutex_exit(&stmf_state.stmf_lock);
3245 3245 return (STMF_BUSY);
3246 3246 }
3247 3247 ilport = (stmf_i_local_port_t *)lport->lport_stmf_private;
3248 3248 rw_init(&ilport->ilport_lock, NULL, RW_DRIVER, NULL);
3249 3249
3250 3250 ilport->ilport_instance =
3251 3251 id_alloc_nosleep(stmf_state.stmf_ilport_inst_space);
3252 3252 if (ilport->ilport_instance == -1) {
3253 3253 mutex_exit(&stmf_state.stmf_lock);
3254 3254 return (STMF_FAILURE);
3255 3255 }
3256 3256 ilport->ilport_next = stmf_state.stmf_ilportlist;
3257 3257 ilport->ilport_prev = NULL;
3258 3258 if (ilport->ilport_next)
3259 3259 ilport->ilport_next->ilport_prev = ilport;
3260 3260 stmf_state.stmf_ilportlist = ilport;
3261 3261 stmf_state.stmf_nlports++;
3262 3262 if (lport->lport_pp) {
3263 3263 ((stmf_i_port_provider_t *)
3264 3264 (lport->lport_pp->pp_stmf_private))->ipp_npps++;
3265 3265 }
3266 3266 ilport->ilport_tg =
3267 3267 stmf_lookup_group_for_target(lport->lport_id->ident,
3268 3268 lport->lport_id->ident_length);
3269 3269
3270 3270 /*
3271 3271 * rtpid will/must be set if this is a standby port
3272 3272 * only register ports that are not standby (proxy) ports
3273 3273 * and ports that are alua participants (ilport_alua == 1)
3274 3274 */
3275 3275 if (ilport->ilport_standby == 0) {
3276 3276 ilport->ilport_rtpid = atomic_add_16_nv(&stmf_rtpid_counter, 1);
3277 3277 }
3278 3278
3279 3279 if (stmf_state.stmf_alua_state == 1 &&
3280 3280 ilport->ilport_standby == 0 &&
3281 3281 ilport->ilport_alua == 1) {
3282 3282 stmf_ic_msg_t *ic_reg_port;
3283 3283 stmf_ic_msg_status_t ic_ret;
3284 3284 stmf_local_port_t *lport;
3285 3285 lport = ilport->ilport_lport;
3286 3286 ic_reg_port = ic_reg_port_msg_alloc(
3287 3287 lport->lport_id, ilport->ilport_rtpid,
3288 3288 0, NULL, stmf_proxy_msg_id);
3289 3289 if (ic_reg_port) {
3290 3290 ic_ret = ic_tx_msg(ic_reg_port);
3291 3291 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3292 3292 ilport->ilport_reg_msgid = stmf_proxy_msg_id++;
3293 3293 } else {
3294 3294 cmn_err(CE_WARN, "error on port registration "
3295 3295 "port - %s", ilport->ilport_kstat_tgt_name);
3296 3296 }
3297 3297 }
3298 3298 }
3299 3299 STMF_EVENT_ALLOC_HANDLE(ilport->ilport_event_hdl);
3300 3300 stmf_create_kstat_lport(ilport);
3301 3301 if (stmf_workers_state == STMF_WORKERS_DISABLED) {
3302 3302 stmf_workers_state = STMF_WORKERS_ENABLING;
3303 3303 start_workers = 1;
3304 3304 }
3305 3305 mutex_exit(&stmf_state.stmf_lock);
3306 3306
3307 3307 if (start_workers)
3308 3308 stmf_worker_init();
3309 3309
3310 3310 /* the default state of LPORT */
3311 3311
3312 3312 if (stmf_state.stmf_default_lport_state == STMF_STATE_OFFLINE) {
3313 3313 ilport->ilport_prev_state = STMF_STATE_OFFLINE;
3314 3314 } else {
3315 3315 ilport->ilport_prev_state = STMF_STATE_ONLINE;
3316 3316 if (stmf_state.stmf_service_running) {
3317 3317 ssci.st_rflags = 0;
3318 3318 ssci.st_additional_info = NULL;
3319 3319 (void) stmf_ctl(STMF_CMD_LPORT_ONLINE, lport, &ssci);
3320 3320 }
3321 3321 }
3322 3322
3323 3323 /* XXX: Generate event */
3324 3324 return (STMF_SUCCESS);
3325 3325 }
3326 3326
3327 3327 stmf_status_t
3328 3328 stmf_deregister_local_port(stmf_local_port_t *lport)
3329 3329 {
3330 3330 stmf_i_local_port_t *ilport;
3331 3331
3332 3332 mutex_enter(&stmf_state.stmf_lock);
3333 3333 if (stmf_state.stmf_inventory_locked) {
3334 3334 mutex_exit(&stmf_state.stmf_lock);
3335 3335 return (STMF_BUSY);
3336 3336 }
3337 3337
3338 3338 /* dequeue all object requests from active queue */
3339 3339 stmf_svc_kill_obj_requests(lport);
3340 3340
3341 3341 ilport = (stmf_i_local_port_t *)lport->lport_stmf_private;
3342 3342
3343 3343 /*
3344 3344 * deregister ports that are not standby (proxy)
3345 3345 */
3346 3346 if (stmf_state.stmf_alua_state == 1 &&
3347 3347 ilport->ilport_standby == 0 &&
3348 3348 ilport->ilport_alua == 1) {
3349 3349 stmf_ic_msg_t *ic_dereg_port;
3350 3350 stmf_ic_msg_status_t ic_ret;
3351 3351 ic_dereg_port = ic_dereg_port_msg_alloc(
3352 3352 lport->lport_id, 0, NULL, stmf_proxy_msg_id);
3353 3353 if (ic_dereg_port) {
3354 3354 ic_ret = ic_tx_msg(ic_dereg_port);
3355 3355 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3356 3356 stmf_proxy_msg_id++;
3357 3357 }
3358 3358 }
3359 3359 }
3360 3360
3361 3361 if (ilport->ilport_nsessions == 0) {
3362 3362 if (ilport->ilport_next)
3363 3363 ilport->ilport_next->ilport_prev = ilport->ilport_prev;
3364 3364 if (ilport->ilport_prev)
3365 3365 ilport->ilport_prev->ilport_next = ilport->ilport_next;
3366 3366 else
3367 3367 stmf_state.stmf_ilportlist = ilport->ilport_next;
3368 3368 id_free(stmf_state.stmf_ilport_inst_space,
3369 3369 ilport->ilport_instance);
3370 3370 rw_destroy(&ilport->ilport_lock);
3371 3371 stmf_state.stmf_nlports--;
3372 3372 if (lport->lport_pp) {
3373 3373 ((stmf_i_port_provider_t *)
3374 3374 (lport->lport_pp->pp_stmf_private))->ipp_npps--;
3375 3375 }
3376 3376 ilport->ilport_tg = NULL;
3377 3377 STMF_EVENT_FREE_HANDLE(ilport->ilport_event_hdl);
3378 3378 } else {
3379 3379 mutex_exit(&stmf_state.stmf_lock);
3380 3380 return (STMF_BUSY);
3381 3381 }
3382 3382 if (ilport->ilport_kstat_info) {
3383 3383 kmem_free(ilport->ilport_kstat_info->ks_data,
3384 3384 ilport->ilport_kstat_info->ks_data_size);
3385 3385 kstat_delete(ilport->ilport_kstat_info);
3386 3386 }
3387 3387 if (ilport->ilport_kstat_io) {
3388 3388 kstat_delete(ilport->ilport_kstat_io);
3389 3389 mutex_destroy(&ilport->ilport_kstat_lock);
3390 3390 }
3391 3391 stmf_delete_itl_kstat_by_lport(ilport->ilport_kstat_tgt_name);
3392 3392 mutex_exit(&stmf_state.stmf_lock);
3393 3393 return (STMF_SUCCESS);
3394 3394 }
3395 3395
3396 3396 /*
3397 3397 * Rport id/instance mappings remain valid until STMF is unloaded
3398 3398 */
3399 3399 static int
3400 3400 stmf_irport_compare(const void *void_irport1, const void *void_irport2)
3401 3401 {
3402 3402 const stmf_i_remote_port_t *irport1 = void_irport1;
3403 3403 const stmf_i_remote_port_t *irport2 = void_irport2;
3404 3404 int result;
3405 3405
3406 3406 /* Sort by code set then ident */
3407 3407 if (irport1->irport_id->code_set <
3408 3408 irport2->irport_id->code_set) {
3409 3409 return (-1);
3410 3410 } else if (irport1->irport_id->code_set >
3411 3411 irport2->irport_id->code_set) {
3412 3412 return (1);
3413 3413 }
3414 3414
3415 3415 /* Next by ident length */
3416 3416 if (irport1->irport_id->ident_length <
3417 3417 irport2->irport_id->ident_length) {
3418 3418 return (-1);
3419 3419 } else if (irport1->irport_id->ident_length >
3420 3420 irport2->irport_id->ident_length) {
3421 3421 return (1);
3422 3422 }
3423 3423
3424 3424 /* Code set and ident length both match, now compare idents */
3425 3425 result = memcmp(irport1->irport_id->ident,
3426 3426 irport2->irport_id->ident,
3427 3427 irport1->irport_id->ident_length);
3428 3428
3429 3429 if (result < 0) {
3430 3430 return (-1);
3431 3431 } else if (result > 0) {
3432 3432 return (1);
3433 3433 }
3434 3434
3435 3435 return (0);
3436 3436 }
3437 3437
3438 3438 static stmf_i_remote_port_t *
3439 3439 stmf_irport_create(scsi_devid_desc_t *rport_devid)
3440 3440 {
3441 3441 int alloc_len;
3442 3442 stmf_i_remote_port_t *irport;
3443 3443
3444 3444 /*
3445 3445 * Lookup will bump the refcnt if there's an existing rport
3446 3446 * context for this identifier.
3447 3447 */
3448 3448 ASSERT(mutex_owned(&stmf_state.stmf_lock));
3449 3449
3450 3450 alloc_len = sizeof (*irport) + sizeof (scsi_devid_desc_t) +
3451 3451 rport_devid->ident_length - 1;
3452 3452 irport = kmem_zalloc(alloc_len, KM_NOSLEEP);
3453 3453 if (irport == NULL) {
3454 3454 return (NULL);
3455 3455 }
3456 3456
3457 3457 irport->irport_instance =
3458 3458 id_alloc_nosleep(stmf_state.stmf_irport_inst_space);
3459 3459 if (irport->irport_instance == -1) {
3460 3460 kmem_free(irport, alloc_len);
3461 3461 return (NULL);
3462 3462 }
3463 3463
3464 3464 irport->irport_id =
3465 3465 (struct scsi_devid_desc *)(irport + 1); /* Ptr. Arith. */
3466 3466 bcopy(rport_devid, irport->irport_id,
3467 3467 sizeof (scsi_devid_desc_t) + rport_devid->ident_length - 1);
3468 3468 irport->irport_refcnt = 1;
3469 3469 mutex_init(&irport->irport_mutex, NULL, MUTEX_DEFAULT, NULL);
3470 3470
3471 3471 return (irport);
3472 3472 }
3473 3473
3474 3474 static void
3475 3475 stmf_irport_destroy(stmf_i_remote_port_t *irport)
3476 3476 {
3477 3477 id_free(stmf_state.stmf_irport_inst_space, irport->irport_instance);
3478 3478 mutex_destroy(&irport->irport_mutex);
3479 3479 kmem_free(irport, sizeof (*irport) + sizeof (scsi_devid_desc_t) +
3480 3480 irport->irport_id->ident_length - 1);
3481 3481 }
3482 3482
3483 3483 static stmf_i_remote_port_t *
3484 3484 stmf_irport_register(scsi_devid_desc_t *rport_devid)
3485 3485 {
3486 3486 stmf_i_remote_port_t *irport;
3487 3487
3488 3488 mutex_enter(&stmf_state.stmf_lock);
3489 3489
3490 3490 /*
3491 3491 * Lookup will bump the refcnt if there's an existing rport
3492 3492 * context for this identifier.
3493 3493 */
3494 3494 if ((irport = stmf_irport_lookup_locked(rport_devid)) != NULL) {
3495 3495 mutex_exit(&stmf_state.stmf_lock);
3496 3496 return (irport);
3497 3497 }
3498 3498
3499 3499 irport = stmf_irport_create(rport_devid);
3500 3500 if (irport == NULL) {
3501 3501 mutex_exit(&stmf_state.stmf_lock);
3502 3502 return (NULL);
3503 3503 }
3504 3504
3505 3505 avl_add(&stmf_state.stmf_irportlist, irport);
3506 3506 mutex_exit(&stmf_state.stmf_lock);
3507 3507
3508 3508 return (irport);
3509 3509 }
3510 3510
3511 3511 static stmf_i_remote_port_t *
3512 3512 stmf_irport_lookup_locked(scsi_devid_desc_t *rport_devid)
3513 3513 {
3514 3514 stmf_i_remote_port_t *irport;
3515 3515 stmf_i_remote_port_t tmp_irport;
3516 3516
3517 3517 ASSERT(mutex_owned(&stmf_state.stmf_lock));
3518 3518 tmp_irport.irport_id = rport_devid;
3519 3519 irport = avl_find(&stmf_state.stmf_irportlist, &tmp_irport, NULL);
3520 3520 if (irport != NULL) {
3521 3521 mutex_enter(&irport->irport_mutex);
3522 3522 irport->irport_refcnt++;
3523 3523 mutex_exit(&irport->irport_mutex);
3524 3524 }
3525 3525
3526 3526 return (irport);
3527 3527 }
3528 3528
3529 3529 static void
3530 3530 stmf_irport_deregister(stmf_i_remote_port_t *irport)
3531 3531 {
3532 3532 /*
3533 3533 * If we were actually going to remove unreferenced remote ports
3534 3534 * we would want to acquire stmf_state.stmf_lock before getting
3535 3535 * the irport mutex.
3536 3536 *
3537 3537 * Instead we're just going to leave it there even if unreferenced.
3538 3538 */
3539 3539 mutex_enter(&irport->irport_mutex);
3540 3540 irport->irport_refcnt--;
3541 3541 mutex_exit(&irport->irport_mutex);
3542 3542 }
3543 3543
3544 3544 /*
3545 3545 * Port provider has to make sure that register/deregister session and
3546 3546 * port are serialized calls.
3547 3547 */
3548 3548 stmf_status_t
3549 3549 stmf_register_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3550 3550 {
3551 3551 stmf_i_scsi_session_t *iss;
3552 3552 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3553 3553 lport->lport_stmf_private;
3554 3554 uint8_t lun[8];
3555 3555
3556 3556 /*
3557 3557 * Port state has to be online to register a scsi session. It is
3558 3558 * possible that we started an offline operation and a new SCSI
3559 3559 * session started at the same time (in that case also we are going
3560 3560 * to fail the registeration). But any other state is simply
3561 3561 * a bad port provider implementation.
3562 3562 */
3563 3563 if (ilport->ilport_state != STMF_STATE_ONLINE) {
3564 3564 if (ilport->ilport_state != STMF_STATE_OFFLINING) {
3565 3565 stmf_trace(lport->lport_alias, "Port is trying to "
3566 3566 "register a session while the state is neither "
3567 3567 "online nor offlining");
3568 3568 }
3569 3569 return (STMF_FAILURE);
3570 3570 }
3571 3571 bzero(lun, 8);
3572 3572 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3573 3573 if ((iss->iss_irport = stmf_irport_register(ss->ss_rport_id)) == NULL) {
3574 3574 stmf_trace(lport->lport_alias, "Could not register "
3575 3575 "remote port during session registration");
3576 3576 return (STMF_FAILURE);
3577 3577 }
3578 3578
3579 3579 iss->iss_flags |= ISS_BEING_CREATED;
3580 3580
3581 3581 if (ss->ss_rport == NULL) {
3582 3582 iss->iss_flags |= ISS_NULL_TPTID;
3583 3583 ss->ss_rport = stmf_scsilib_devid_to_remote_port(
3584 3584 ss->ss_rport_id);
3585 3585 if (ss->ss_rport == NULL) {
3586 3586 iss->iss_flags &= ~(ISS_NULL_TPTID | ISS_BEING_CREATED);
3587 3587 stmf_trace(lport->lport_alias, "Device id to "
3588 3588 "remote port conversion failed");
3589 3589 return (STMF_FAILURE);
3590 3590 }
3591 3591 } else {
3592 3592 if (!stmf_scsilib_tptid_validate(ss->ss_rport->rport_tptid,
3593 3593 ss->ss_rport->rport_tptid_sz, NULL)) {
3594 3594 iss->iss_flags &= ~ISS_BEING_CREATED;
3595 3595 stmf_trace(lport->lport_alias, "Remote port "
3596 3596 "transport id validation failed");
3597 3597 return (STMF_FAILURE);
3598 3598 }
3599 3599 }
3600 3600
3601 3601 /* sessions use the ilport_lock. No separate lock is required */
3602 3602 iss->iss_lockp = &ilport->ilport_lock;
3603 3603
3604 3604 if (iss->iss_sm != NULL)
3605 3605 cmn_err(CE_PANIC, "create lun map called with non NULL map");
3606 3606 iss->iss_sm = (stmf_lun_map_t *)kmem_zalloc(sizeof (stmf_lun_map_t),
3607 3607 KM_SLEEP);
3608 3608
3609 3609 mutex_enter(&stmf_state.stmf_lock);
3610 3610 rw_enter(&ilport->ilport_lock, RW_WRITER);
3611 3611 (void) stmf_session_create_lun_map(ilport, iss);
3612 3612 ilport->ilport_nsessions++;
3613 3613 iss->iss_next = ilport->ilport_ss_list;
3614 3614 ilport->ilport_ss_list = iss;
3615 3615 rw_exit(&ilport->ilport_lock);
3616 3616 mutex_exit(&stmf_state.stmf_lock);
3617 3617
3618 3618 iss->iss_creation_time = ddi_get_time();
3619 3619 ss->ss_session_id = atomic_add_64_nv(&stmf_session_counter, 1);
3620 3620 iss->iss_flags &= ~ISS_BEING_CREATED;
3621 3621 /* XXX should we remove ISS_LUN_INVENTORY_CHANGED on new session? */
3622 3622 iss->iss_flags &= ~ISS_LUN_INVENTORY_CHANGED;
3623 3623 DTRACE_PROBE2(session__online, stmf_local_port_t *, lport,
3624 3624 stmf_scsi_session_t *, ss);
3625 3625 return (STMF_SUCCESS);
3626 3626 }
3627 3627
3628 3628 void
3629 3629 stmf_deregister_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3630 3630 {
3631 3631 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3632 3632 lport->lport_stmf_private;
3633 3633 stmf_i_scsi_session_t *iss, **ppss;
3634 3634 int found = 0;
3635 3635 stmf_ic_msg_t *ic_session_dereg;
3636 3636 stmf_status_t ic_ret = STMF_FAILURE;
3637 3637
3638 3638 DTRACE_PROBE2(session__offline, stmf_local_port_t *, lport,
3639 3639 stmf_scsi_session_t *, ss);
3640 3640
3641 3641 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3642 3642 if (ss->ss_rport_alias) {
3643 3643 ss->ss_rport_alias = NULL;
3644 3644 }
3645 3645
3646 3646 try_dereg_ss_again:
3647 3647 mutex_enter(&stmf_state.stmf_lock);
3648 3648 atomic_and_32(&iss->iss_flags,
3649 3649 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
3650 3650 if (iss->iss_flags & ISS_EVENT_ACTIVE) {
3651 3651 mutex_exit(&stmf_state.stmf_lock);
3652 3652 delay(1);
3653 3653 goto try_dereg_ss_again;
3654 3654 }
3655 3655
3656 3656 /* dereg proxy session if not standby port */
3657 3657 if (stmf_state.stmf_alua_state == 1 &&
3658 3658 ilport->ilport_standby == 0 &&
3659 3659 ilport->ilport_alua == 1) {
3660 3660 ic_session_dereg = ic_session_dereg_msg_alloc(
3661 3661 ss, stmf_proxy_msg_id);
3662 3662 if (ic_session_dereg) {
3663 3663 ic_ret = ic_tx_msg(ic_session_dereg);
3664 3664 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3665 3665 stmf_proxy_msg_id++;
3666 3666 }
3667 3667 }
3668 3668 }
3669 3669
3670 3670 rw_enter(&ilport->ilport_lock, RW_WRITER);
3671 3671 for (ppss = &ilport->ilport_ss_list; *ppss != NULL;
3672 3672 ppss = &((*ppss)->iss_next)) {
3673 3673 if (iss == (*ppss)) {
3674 3674 *ppss = (*ppss)->iss_next;
3675 3675 found = 1;
3676 3676 break;
3677 3677 }
3678 3678 }
3679 3679 if (!found) {
3680 3680 cmn_err(CE_PANIC, "Deregister session called for non existent"
3681 3681 " session");
3682 3682 }
3683 3683 ilport->ilport_nsessions--;
3684 3684
3685 3685 stmf_irport_deregister(iss->iss_irport);
3686 3686 (void) stmf_session_destroy_lun_map(ilport, iss);
3687 3687 rw_exit(&ilport->ilport_lock);
3688 3688 mutex_exit(&stmf_state.stmf_lock);
3689 3689
3690 3690 if (iss->iss_flags & ISS_NULL_TPTID) {
3691 3691 stmf_remote_port_free(ss->ss_rport);
3692 3692 }
3693 3693 }
3694 3694
3695 3695 stmf_i_scsi_session_t *
3696 3696 stmf_session_id_to_issptr(uint64_t session_id, int stay_locked)
3697 3697 {
3698 3698 stmf_i_local_port_t *ilport;
3699 3699 stmf_i_scsi_session_t *iss;
3700 3700
3701 3701 mutex_enter(&stmf_state.stmf_lock);
3702 3702 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
3703 3703 ilport = ilport->ilport_next) {
3704 3704 rw_enter(&ilport->ilport_lock, RW_WRITER);
3705 3705 for (iss = ilport->ilport_ss_list; iss != NULL;
3706 3706 iss = iss->iss_next) {
3707 3707 if (iss->iss_ss->ss_session_id == session_id) {
3708 3708 if (!stay_locked)
3709 3709 rw_exit(&ilport->ilport_lock);
3710 3710 mutex_exit(&stmf_state.stmf_lock);
3711 3711 return (iss);
3712 3712 }
3713 3713 }
3714 3714 rw_exit(&ilport->ilport_lock);
3715 3715 }
3716 3716 mutex_exit(&stmf_state.stmf_lock);
3717 3717 return (NULL);
3718 3718 }
3719 3719
3720 3720 #define MAX_ALIAS 128
3721 3721
3722 3722 static int
3723 3723 stmf_itl_kstat_compare(const void *itl_kstat_1, const void *itl_kstat_2)
3724 3724 {
3725 3725 const stmf_i_itl_kstat_t *kstat_nm1 = itl_kstat_1;
3726 3726 const stmf_i_itl_kstat_t *kstat_nm2 = itl_kstat_2;
3727 3727 int ret;
3728 3728
3729 3729 ret = strcmp(kstat_nm1->iitl_kstat_nm, kstat_nm2->iitl_kstat_nm);
3730 3730 if (ret < 0) {
3731 3731 return (-1);
3732 3732 } else if (ret > 0) {
3733 3733 return (1);
3734 3734 }
3735 3735 return (0);
3736 3736 }
3737 3737
3738 3738 static stmf_i_itl_kstat_t *
3739 3739 stmf_itl_kstat_lookup(char *kstat_nm)
3740 3740 {
3741 3741 stmf_i_itl_kstat_t tmp;
3742 3742 stmf_i_itl_kstat_t *itl_kstat;
3743 3743
3744 3744 ASSERT(mutex_owned(&stmf_state.stmf_lock));
3745 3745 (void) strcpy(tmp.iitl_kstat_nm, kstat_nm);
3746 3746 itl_kstat = avl_find(&stmf_state.stmf_itl_kstat_list, &tmp, NULL);
3747 3747 return (itl_kstat);
3748 3748 }
3749 3749
3750 3750 static void
3751 3751 stmf_delete_itl_kstat_by_lport(char *tgt)
3752 3752 {
3753 3753 stmf_i_itl_kstat_t *ks_itl, *next;
3754 3754
3755 3755 ASSERT(mutex_owned(&stmf_state.stmf_lock));
3756 3756 ks_itl = avl_first(&stmf_state.stmf_itl_kstat_list);
3757 3757 for (; ks_itl != NULL; ks_itl = next) {
3758 3758 next = AVL_NEXT(&stmf_state.stmf_itl_kstat_list, ks_itl);
3759 3759 if (strcmp(ks_itl->iitl_kstat_lport, tgt) == 0) {
3760 3760 stmf_teardown_itl_kstats(ks_itl);
3761 3761 avl_remove(&stmf_state.stmf_itl_kstat_list, ks_itl);
3762 3762 kmem_free(ks_itl, sizeof (stmf_i_itl_kstat_t));
3763 3763 }
3764 3764 }
3765 3765 }
3766 3766
3767 3767 static void
3768 3768 stmf_delete_itl_kstat_by_guid(char *guid)
3769 3769 {
3770 3770 stmf_i_itl_kstat_t *ks_itl, *next;
3771 3771
3772 3772 ASSERT(mutex_owned(&stmf_state.stmf_lock));
3773 3773 ks_itl = avl_first(&stmf_state.stmf_itl_kstat_list);
3774 3774 for (; ks_itl != NULL; ks_itl = next) {
3775 3775 next = AVL_NEXT(&stmf_state.stmf_itl_kstat_list, ks_itl);
3776 3776 if (strcmp(ks_itl->iitl_kstat_guid, guid) == 0) {
3777 3777 stmf_teardown_itl_kstats(ks_itl);
3778 3778 avl_remove(&stmf_state.stmf_itl_kstat_list, ks_itl);
3779 3779 kmem_free(ks_itl, sizeof (stmf_i_itl_kstat_t));
3780 3780 }
3781 3781 }
3782 3782 }
3783 3783
3784 3784 static stmf_i_itl_kstat_t *
3785 3785 stmf_itl_kstat_create(stmf_itl_data_t *itl, char *nm,
3786 3786 scsi_devid_desc_t *lport, scsi_devid_desc_t *lun)
3787 3787 {
3788 3788 stmf_i_itl_kstat_t *ks_itl;
3789 3789 int i, len;
3790 3790
3791 3791 ASSERT(mutex_owned(&stmf_state.stmf_lock));
3792 3792 if ((ks_itl = stmf_itl_kstat_lookup(nm)) != NULL)
3793 3793 return (ks_itl);
3794 3794
3795 3795 len = sizeof (stmf_i_itl_kstat_t);
3796 3796 ks_itl = kmem_zalloc(len, KM_NOSLEEP);
3797 3797 if (ks_itl == NULL)
3798 3798 return (NULL);
3799 3799
3800 3800 (void) strcpy(ks_itl->iitl_kstat_nm, nm);
3801 3801 bcopy(lport->ident, ks_itl->iitl_kstat_lport, lport->ident_length);
3802 3802 ks_itl->iitl_kstat_lport[lport->ident_length] = '\0';
3803 3803 for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
3804 3804 (void) sprintf(&ks_itl->iitl_kstat_guid[i * 2], "%02x",
3805 3805 lun->ident[i]);
3806 3806 }
3807 3807 ks_itl->iitl_kstat_strbuf = itl->itl_kstat_strbuf;
3808 3808 ks_itl->iitl_kstat_strbuflen = itl->itl_kstat_strbuflen;
3809 3809 ks_itl->iitl_kstat_info = itl->itl_kstat_info;
3810 3810 ks_itl->iitl_kstat_taskq = itl->itl_kstat_taskq;
3811 3811 ks_itl->iitl_kstat_lu_xfer = itl->itl_kstat_lu_xfer;
3812 3812 ks_itl->iitl_kstat_lport_xfer = itl->itl_kstat_lport_xfer;
3813 3813 avl_add(&stmf_state.stmf_itl_kstat_list, ks_itl);
3814 3814
3815 3815 return (ks_itl);
3816 3816 }
3817 3817
3818 3818 stmf_status_t
3819 3819 stmf_setup_itl_kstats(stmf_itl_data_t *itl)
3820 3820 {
3821 3821 char ks_itl_id[32];
3822 3822 char ks_nm[KSTAT_STRLEN];
3823 3823 char ks_itl_nm[KSTAT_STRLEN];
3824 3824 stmf_kstat_itl_info_t *ks_itl;
3825 3825 stmf_scsi_session_t *ss;
3826 3826 stmf_i_scsi_session_t *iss;
3827 3827 stmf_i_local_port_t *ilport;
3828 3828 char *strbuf;
3829 3829 int id, len, i;
3830 3830 char *rport_alias;
3831 3831 char *lport_alias;
3832 3832 char *lu_alias;
3833 3833 stmf_i_itl_kstat_t *tmp_kstat;
3834 3834
3835 3835 /*
3836 3836 * Allocate enough memory in the ITL to hold the relevant
3837 3837 * identifiers.
3838 3838 * rport and lport identifiers come from the stmf_scsi_session_t.
3839 3839 * ident might not be null terminated.
3840 3840 */
3841 3841 ss = itl->itl_session->iss_ss;
3842 3842 iss = ss->ss_stmf_private;
3843 3843 ilport = ss->ss_lport->lport_stmf_private;
3844 3844 (void) snprintf(ks_itl_id, 32, "%d.%d.%d",
3845 3845 iss->iss_irport->irport_instance, ilport->ilport_instance,
3846 3846 itl->itl_lun);
3847 3847
3848 3848 (void) snprintf(ks_itl_nm, KSTAT_STRLEN, "itl_%s", ks_itl_id);
3849 3849 /*
3850 3850 * let's verify this itl_kstat already exist
3851 3851 */
3852 3852 if ((tmp_kstat = stmf_itl_kstat_lookup(ks_itl_nm)) != NULL) {
3853 3853 itl->itl_kstat_strbuf = tmp_kstat->iitl_kstat_strbuf;
3854 3854 itl->itl_kstat_strbuflen = tmp_kstat->iitl_kstat_strbuflen;
3855 3855 itl->itl_kstat_info = tmp_kstat->iitl_kstat_info;
3856 3856 itl->itl_kstat_taskq = tmp_kstat->iitl_kstat_taskq;
3857 3857 itl->itl_kstat_lu_xfer = tmp_kstat->iitl_kstat_lu_xfer;
3858 3858 itl->itl_kstat_lport_xfer = tmp_kstat->iitl_kstat_lport_xfer;
3859 3859 return (STMF_SUCCESS);
3860 3860 }
3861 3861
3862 3862 /* New itl_kstat */
3863 3863 rport_alias = (ss->ss_rport_alias == NULL) ?
3864 3864 "" : ss->ss_rport_alias;
3865 3865 lport_alias = (ss->ss_lport->lport_alias == NULL) ?
3866 3866 "" : ss->ss_lport->lport_alias;
3867 3867 lu_alias = (itl->itl_ilu->ilu_lu->lu_alias == NULL) ?
3868 3868 "" : itl->itl_ilu->ilu_lu->lu_alias;
3869 3869
3870 3870 itl->itl_kstat_strbuflen = (ss->ss_rport_id->ident_length + 1) +
3871 3871 (strnlen(rport_alias, MAX_ALIAS) + 1) +
3872 3872 (ss->ss_lport->lport_id->ident_length + 1) +
3873 3873 (strnlen(lport_alias, MAX_ALIAS) + 1) +
3874 3874 (STMF_GUID_INPUT + 1) +
3875 3875 (strnlen(lu_alias, MAX_ALIAS) + 1) +
3876 3876 MAX_PROTO_STR_LEN;
3877 3877 itl->itl_kstat_strbuf = kmem_zalloc(itl->itl_kstat_strbuflen,
3878 3878 KM_NOSLEEP);
3879 3879 if (itl->itl_kstat_strbuf == NULL) {
3880 3880 return (STMF_ALLOC_FAILURE);
3881 3881 }
3882 3882
3883 3883 ks_itl = (stmf_kstat_itl_info_t *)kmem_zalloc(sizeof (*ks_itl),
3884 3884 KM_NOSLEEP);
3885 3885 if (ks_itl == NULL) {
3886 3886 kmem_free(itl->itl_kstat_strbuf, itl->itl_kstat_strbuflen);
3887 3887 return (STMF_ALLOC_FAILURE);
3888 3888 }
3889 3889
3890 3890 if ((itl->itl_kstat_info = kstat_create(STMF_MODULE_NAME,
3891 3891 0, ks_itl_nm, "misc", KSTAT_TYPE_NAMED,
3892 3892 sizeof (stmf_kstat_itl_info_t) / sizeof (kstat_named_t),
3893 3893 KSTAT_FLAG_VIRTUAL)) == NULL) {
3894 3894 goto itl_kstat_cleanup;
3895 3895 }
3896 3896
3897 3897 itl->itl_kstat_info->ks_data_size += itl->itl_kstat_strbuflen;
3898 3898 itl->itl_kstat_info->ks_data = ks_itl;
3899 3899
3900 3900 kstat_named_init(&ks_itl->i_rport_name, "rport-name",
3901 3901 KSTAT_DATA_STRING);
3902 3902 kstat_named_init(&ks_itl->i_rport_alias, "rport-alias",
3903 3903 KSTAT_DATA_STRING);
3904 3904 kstat_named_init(&ks_itl->i_lport_name, "lport-name",
3905 3905 KSTAT_DATA_STRING);
3906 3906 kstat_named_init(&ks_itl->i_lport_alias, "lport-alias",
3907 3907 KSTAT_DATA_STRING);
3908 3908 kstat_named_init(&ks_itl->i_protocol, "protocol",
3909 3909 KSTAT_DATA_STRING);
3910 3910 kstat_named_init(&ks_itl->i_lu_guid, "lu-guid",
3911 3911 KSTAT_DATA_STRING);
3912 3912 kstat_named_init(&ks_itl->i_lu_alias, "lu-alias",
3913 3913 KSTAT_DATA_STRING);
3914 3914 kstat_named_init(&ks_itl->i_lu_number, "lu-number",
3915 3915 KSTAT_DATA_UINT64);
3916 3916 kstat_named_init(&ks_itl->i_task_waitq_elapsed, "task-waitq-elapsed",
3917 3917 KSTAT_DATA_UINT64);
3918 3918 kstat_named_init(&ks_itl->i_task_read_elapsed, "task-read-elapsed",
3919 3919 KSTAT_DATA_UINT64);
3920 3920 kstat_named_init(&ks_itl->i_task_write_elapsed, "task-write-elapsed",
3921 3921 KSTAT_DATA_UINT64);
3922 3922 kstat_named_init(&ks_itl->i_lu_read_elapsed, "lu-read-elapsed",
3923 3923 KSTAT_DATA_UINT64);
3924 3924 kstat_named_init(&ks_itl->i_lu_write_elapsed, "lu-write-elapsed",
3925 3925 KSTAT_DATA_UINT64);
3926 3926 kstat_named_init(&ks_itl->i_lport_read_elapsed, "lport-read-elapsed",
3927 3927 KSTAT_DATA_UINT64);
3928 3928 kstat_named_init(&ks_itl->i_lport_write_elapsed, "lport-write-elapsed",
3929 3929 KSTAT_DATA_UINT64);
3930 3930
3931 3931 strbuf = itl->itl_kstat_strbuf;
3932 3932
3933 3933 /* Rport */
3934 3934 len = ss->ss_rport_id->ident_length;
3935 3935 bcopy(ss->ss_rport_id->ident, strbuf, len);
3936 3936 strbuf += len;
3937 3937 *strbuf = '\0';
3938 3938 kstat_named_setstr(&ks_itl->i_rport_name, strbuf - len);
3939 3939 strbuf++;
3940 3940
3941 3941 len = strnlen(rport_alias, MAX_ALIAS);
3942 3942 (void) strncpy(strbuf, rport_alias, len + 1);
3943 3943 kstat_named_setstr(&ks_itl->i_rport_alias, strbuf);
3944 3944 strbuf += len + 1;
3945 3945
3946 3946 /* Lport */
3947 3947 len = ss->ss_lport->lport_id->ident_length;
3948 3948 bcopy(ss->ss_lport->lport_id->ident, strbuf, len);
3949 3949 strbuf += len;
3950 3950 *strbuf = '\0';
3951 3951 kstat_named_setstr(&ks_itl->i_lport_name, strbuf - len);
3952 3952 strbuf++;
3953 3953
3954 3954 len = strnlen(lport_alias, MAX_ALIAS);
3955 3955 (void) strncpy(strbuf, lport_alias, len + 1);
3956 3956 kstat_named_setstr(&ks_itl->i_lport_alias, strbuf);
3957 3957 strbuf += len + 1;
3958 3958
3959 3959 id = (ss->ss_lport->lport_id->protocol_id > PROTOCOL_ANY) ?
3960 3960 PROTOCOL_ANY : ss->ss_lport->lport_id->protocol_id;
3961 3961 kstat_named_setstr(&ks_itl->i_protocol, protocol_ident[id]);
3962 3962
3963 3963 /* LU */
3964 3964 for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
3965 3965 (void) sprintf(&strbuf[i * 2], "%02x",
3966 3966 itl->itl_ilu->ilu_lu->lu_id->ident[i]);
3967 3967 }
3968 3968 kstat_named_setstr(&ks_itl->i_lu_guid, strbuf);
3969 3969 strbuf += STMF_GUID_INPUT + 1;
3970 3970
3971 3971 len = strnlen(lu_alias, MAX_ALIAS);
3972 3972 (void) strncpy(strbuf, lu_alias, len + 1);
3973 3973 kstat_named_setstr(&ks_itl->i_lu_alias, strbuf);
3974 3974 strbuf += len + 1;
3975 3975
3976 3976 ks_itl->i_lu_number.value.ui64 = itl->itl_lun;
3977 3977
3978 3978 /* Now create the I/O kstats */
3979 3979 (void) snprintf(ks_nm, KSTAT_STRLEN, "itl_tasks_%s", ks_itl_id);
3980 3980 if ((itl->itl_kstat_taskq = kstat_create(STMF_MODULE_NAME, 0,
3981 3981 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3982 3982 goto itl_kstat_cleanup;
3983 3983 }
3984 3984
3985 3985 (void) snprintf(ks_nm, KSTAT_STRLEN, "itl_lu_%s", ks_itl_id);
3986 3986 if ((itl->itl_kstat_lu_xfer = kstat_create(STMF_MODULE_NAME, 0,
3987 3987 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3988 3988 goto itl_kstat_cleanup;
3989 3989 }
3990 3990
3991 3991 (void) snprintf(ks_nm, KSTAT_STRLEN, "itl_lport_%s", ks_itl_id);
3992 3992 if ((itl->itl_kstat_lport_xfer = kstat_create(STMF_MODULE_NAME, 0,
3993 3993 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3994 3994 goto itl_kstat_cleanup;
3995 3995 }
3996 3996
3997 3997 /* Install all the kstats */
3998 3998 kstat_install(itl->itl_kstat_info);
3999 3999 kstat_install(itl->itl_kstat_taskq);
4000 4000 kstat_install(itl->itl_kstat_lu_xfer);
4001 4001 kstat_install(itl->itl_kstat_lport_xfer);
4002 4002
4003 4003 /* Add new itl_kstat to stmf_itl_kstat_list */
4004 4004 if (stmf_itl_kstat_create(itl, ks_itl_nm, ss->ss_lport->lport_id,
4005 4005 itl->itl_ilu->ilu_lu->lu_id) != NULL)
4006 4006 return (STMF_SUCCESS);
4007 4007
4008 4008 itl_kstat_cleanup:
4009 4009 if (itl->itl_kstat_taskq)
4010 4010 kstat_delete(itl->itl_kstat_taskq);
4011 4011 if (itl->itl_kstat_lu_xfer)
4012 4012 kstat_delete(itl->itl_kstat_lu_xfer);
4013 4013 if (itl->itl_kstat_lport_xfer)
4014 4014 kstat_delete(itl->itl_kstat_lport_xfer);
4015 4015 if (itl->itl_kstat_info)
4016 4016 kstat_delete(itl->itl_kstat_info);
4017 4017 kmem_free(ks_itl, sizeof (*ks_itl));
4018 4018 kmem_free(itl->itl_kstat_strbuf, itl->itl_kstat_strbuflen);
4019 4019 cmn_err(CE_WARN, "STMF: kstat_create itl failed");
4020 4020 return (STMF_ALLOC_FAILURE);
4021 4021 }
4022 4022
4023 4023 static void
4024 4024 stmf_teardown_itl_kstats(stmf_i_itl_kstat_t *ks)
4025 4025 {
4026 4026 kstat_delete(ks->iitl_kstat_lport_xfer);
4027 4027 kstat_delete(ks->iitl_kstat_lu_xfer);
4028 4028 kstat_delete(ks->iitl_kstat_taskq);
4029 4029 kmem_free(ks->iitl_kstat_info->ks_data, sizeof (stmf_kstat_itl_info_t));
4030 4030 kstat_delete(ks->iitl_kstat_info);
4031 4031 kmem_free(ks->iitl_kstat_strbuf, ks->iitl_kstat_strbuflen);
4032 4032 }
4033 4033
4034 4034 void
4035 4035 stmf_release_itl_handle(stmf_lu_t *lu, stmf_itl_data_t *itl)
4036 4036 {
4037 4037 stmf_itl_data_t **itlpp;
4038 4038 stmf_i_lu_t *ilu;
4039 4039
4040 4040 ASSERT(itl->itl_flags & STMF_ITL_BEING_TERMINATED);
4041 4041
4042 4042 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4043 4043 mutex_enter(&ilu->ilu_task_lock);
4044 4044 for (itlpp = &ilu->ilu_itl_list; (*itlpp) != NULL;
4045 4045 itlpp = &(*itlpp)->itl_next) {
4046 4046 if ((*itlpp) == itl)
4047 4047 break;
4048 4048 }
4049 4049 ASSERT((*itlpp) != NULL);
4050 4050 *itlpp = itl->itl_next;
4051 4051 mutex_exit(&ilu->ilu_task_lock);
4052 4052 lu->lu_abort(lu, STMF_LU_ITL_HANDLE_REMOVED, itl->itl_handle,
4053 4053 (uint32_t)itl->itl_hdlrm_reason);
4054 4054
4055 4055 kmem_free(itl, sizeof (*itl));
4056 4056 }
4057 4057
4058 4058 stmf_status_t
4059 4059 stmf_register_itl_handle(stmf_lu_t *lu, uint8_t *lun,
4060 4060 stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle)
4061 4061 {
4062 4062 stmf_itl_data_t *itl;
4063 4063 stmf_i_scsi_session_t *iss;
4064 4064 stmf_lun_map_ent_t *lun_map_ent;
4065 4065 stmf_i_lu_t *ilu;
4066 4066 uint16_t n;
4067 4067
4068 4068 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4069 4069 if (ss == NULL) {
4070 4070 iss = stmf_session_id_to_issptr(session_id, 1);
4071 4071 if (iss == NULL)
4072 4072 return (STMF_NOT_FOUND);
4073 4073 } else {
4074 4074 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4075 4075 }
4076 4076
4077 4077 /*
4078 4078 * Acquire stmf_lock for stmf_itl_kstat_lookup.
4079 4079 */
4080 4080 mutex_enter(&stmf_state.stmf_lock);
4081 4081 rw_enter(iss->iss_lockp, RW_WRITER);
4082 4082 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4083 4083 lun_map_ent = (stmf_lun_map_ent_t *)
4084 4084 stmf_get_ent_from_map(iss->iss_sm, n);
4085 4085 if ((lun_map_ent == NULL) || (lun_map_ent->ent_lu != lu)) {
4086 4086 rw_exit(iss->iss_lockp);
4087 4087 mutex_exit(&stmf_state.stmf_lock);
4088 4088 return (STMF_NOT_FOUND);
4089 4089 }
4090 4090 if (lun_map_ent->ent_itl_datap != NULL) {
4091 4091 rw_exit(iss->iss_lockp);
4092 4092 mutex_exit(&stmf_state.stmf_lock);
4093 4093 return (STMF_ALREADY);
4094 4094 }
4095 4095
4096 4096 itl = (stmf_itl_data_t *)kmem_zalloc(sizeof (*itl), KM_NOSLEEP);
4097 4097 if (itl == NULL) {
4098 4098 rw_exit(iss->iss_lockp);
4099 4099 mutex_exit(&stmf_state.stmf_lock);
4100 4100 return (STMF_ALLOC_FAILURE);
4101 4101 }
4102 4102
4103 4103 itl->itl_ilu = ilu;
4104 4104 itl->itl_session = iss;
4105 4105 itl->itl_counter = 1;
4106 4106 itl->itl_lun = n;
4107 4107 itl->itl_handle = itl_handle;
4108 4108
4109 4109 if (stmf_setup_itl_kstats(itl) != STMF_SUCCESS) {
4110 4110 kmem_free(itl, sizeof (*itl));
4111 4111 rw_exit(iss->iss_lockp);
4112 4112 mutex_exit(&stmf_state.stmf_lock);
4113 4113 return (STMF_ALLOC_FAILURE);
4114 4114 }
4115 4115
4116 4116 mutex_enter(&ilu->ilu_task_lock);
4117 4117 itl->itl_next = ilu->ilu_itl_list;
4118 4118 ilu->ilu_itl_list = itl;
4119 4119 mutex_exit(&ilu->ilu_task_lock);
4120 4120 lun_map_ent->ent_itl_datap = itl;
4121 4121 rw_exit(iss->iss_lockp);
4122 4122 mutex_exit(&stmf_state.stmf_lock);
4123 4123
4124 4124 return (STMF_SUCCESS);
4125 4125 }
4126 4126
4127 4127 void
4128 4128 stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl, uint8_t hdlrm_reason)
4129 4129 {
4130 4130 uint8_t old, new;
4131 4131
4132 4132 do {
4133 4133 old = new = itl->itl_flags;
4134 4134 if (old & STMF_ITL_BEING_TERMINATED)
4135 4135 return;
4136 4136 new |= STMF_ITL_BEING_TERMINATED;
4137 4137 } while (atomic_cas_8(&itl->itl_flags, old, new) != old);
4138 4138 itl->itl_hdlrm_reason = hdlrm_reason;
4139 4139
4140 4140 ASSERT(itl->itl_counter);
4141 4141
4142 4142 if (atomic_add_32_nv(&itl->itl_counter, -1))
4143 4143 return;
4144 4144
4145 4145 drv_usecwait(10);
4146 4146 if (itl->itl_counter)
4147 4147 return;
4148 4148
4149 4149 stmf_release_itl_handle(lu, itl);
4150 4150 }
4151 4151
4152 4152 stmf_status_t
4153 4153 stmf_deregister_all_lu_itl_handles(stmf_lu_t *lu)
4154 4154 {
4155 4155 stmf_i_lu_t *ilu;
4156 4156 stmf_i_local_port_t *ilport;
4157 4157 stmf_i_scsi_session_t *iss;
4158 4158 stmf_lun_map_t *lm;
4159 4159 stmf_lun_map_ent_t *ent;
4160 4160 uint32_t nmaps, nu;
4161 4161 stmf_itl_data_t **itl_list;
4162 4162 int i;
4163 4163
4164 4164 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4165 4165
4166 4166 dereg_itl_start:;
4167 4167 nmaps = ilu->ilu_ref_cnt;
4168 4168 if (nmaps == 0)
4169 4169 return (STMF_NOT_FOUND);
4170 4170 itl_list = (stmf_itl_data_t **)kmem_zalloc(
4171 4171 nmaps * sizeof (stmf_itl_data_t *), KM_SLEEP);
4172 4172 mutex_enter(&stmf_state.stmf_lock);
4173 4173 if (nmaps != ilu->ilu_ref_cnt) {
4174 4174 /* Something changed, start all over */
4175 4175 mutex_exit(&stmf_state.stmf_lock);
4176 4176 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *));
4177 4177 goto dereg_itl_start;
4178 4178 }
4179 4179 nu = 0;
4180 4180 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
4181 4181 ilport = ilport->ilport_next) {
4182 4182 rw_enter(&ilport->ilport_lock, RW_WRITER);
4183 4183 for (iss = ilport->ilport_ss_list; iss != NULL;
4184 4184 iss = iss->iss_next) {
4185 4185 lm = iss->iss_sm;
4186 4186 if (!lm)
4187 4187 continue;
4188 4188 for (i = 0; i < lm->lm_nentries; i++) {
4189 4189 if (lm->lm_plus[i] == NULL)
4190 4190 continue;
4191 4191 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4192 4192 if ((ent->ent_lu == lu) &&
4193 4193 (ent->ent_itl_datap)) {
4194 4194 itl_list[nu++] = ent->ent_itl_datap;
4195 4195 ent->ent_itl_datap = NULL;
4196 4196 if (nu == nmaps) {
4197 4197 rw_exit(&ilport->ilport_lock);
4198 4198 goto dai_scan_done;
4199 4199 }
4200 4200 }
4201 4201 } /* lun table for a session */
4202 4202 } /* sessions */
4203 4203 rw_exit(&ilport->ilport_lock);
4204 4204 } /* ports */
4205 4205
4206 4206 dai_scan_done:
4207 4207 mutex_exit(&stmf_state.stmf_lock);
4208 4208
4209 4209 for (i = 0; i < nu; i++) {
4210 4210 stmf_do_itl_dereg(lu, itl_list[i],
4211 4211 STMF_ITL_REASON_DEREG_REQUEST);
4212 4212 }
4213 4213 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *));
4214 4214
4215 4215 return (STMF_SUCCESS);
4216 4216 }
4217 4217
4218 4218 stmf_status_t
4219 4219 stmf_deregister_itl_handle(stmf_lu_t *lu, uint8_t *lun,
4220 4220 stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle)
4221 4221 {
4222 4222 stmf_i_scsi_session_t *iss;
4223 4223 stmf_itl_data_t *itl;
4224 4224 stmf_lun_map_ent_t *ent;
4225 4225 stmf_lun_map_t *lm;
4226 4226 int i;
4227 4227 uint16_t n;
4228 4228
4229 4229 if (ss == NULL) {
4230 4230 if (session_id == STMF_SESSION_ID_NONE)
4231 4231 return (STMF_INVALID_ARG);
4232 4232 iss = stmf_session_id_to_issptr(session_id, 1);
4233 4233 if (iss == NULL)
4234 4234 return (STMF_NOT_FOUND);
4235 4235 } else {
4236 4236 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4237 4237 rw_enter(iss->iss_lockp, RW_WRITER);
4238 4238 }
4239 4239 lm = iss->iss_sm;
4240 4240 if (lm == NULL) {
4241 4241 rw_exit(iss->iss_lockp);
4242 4242 return (STMF_NOT_FOUND);
4243 4243 }
4244 4244
4245 4245 if (lun) {
4246 4246 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4247 4247 ent = (stmf_lun_map_ent_t *)
4248 4248 stmf_get_ent_from_map(iss->iss_sm, n);
4249 4249 } else {
4250 4250 if (itl_handle == NULL) {
4251 4251 rw_exit(iss->iss_lockp);
4252 4252 return (STMF_INVALID_ARG);
4253 4253 }
4254 4254 ent = NULL;
4255 4255 for (i = 0; i < lm->lm_nentries; i++) {
4256 4256 if (lm->lm_plus[i] == NULL)
4257 4257 continue;
4258 4258 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4259 4259 if (ent->ent_itl_datap &&
4260 4260 (ent->ent_itl_datap->itl_handle == itl_handle)) {
4261 4261 break;
4262 4262 }
4263 4263 }
4264 4264 }
4265 4265 if ((ent == NULL) || (ent->ent_lu != lu) ||
4266 4266 (ent->ent_itl_datap == NULL)) {
4267 4267 rw_exit(iss->iss_lockp);
4268 4268 return (STMF_NOT_FOUND);
4269 4269 }
4270 4270 itl = ent->ent_itl_datap;
4271 4271 ent->ent_itl_datap = NULL;
4272 4272 rw_exit(iss->iss_lockp);
4273 4273 stmf_do_itl_dereg(lu, itl, STMF_ITL_REASON_DEREG_REQUEST);
4274 4274
4275 4275 return (STMF_SUCCESS);
4276 4276 }
4277 4277
4278 4278 stmf_status_t
4279 4279 stmf_get_itl_handle(stmf_lu_t *lu, uint8_t *lun, stmf_scsi_session_t *ss,
4280 4280 uint64_t session_id, void **itl_handle_retp)
4281 4281 {
4282 4282 stmf_i_scsi_session_t *iss;
4283 4283 stmf_lun_map_ent_t *ent;
4284 4284 stmf_lun_map_t *lm;
4285 4285 stmf_status_t ret;
4286 4286 int i;
4287 4287 uint16_t n;
4288 4288
4289 4289 if (ss == NULL) {
4290 4290 iss = stmf_session_id_to_issptr(session_id, 1);
4291 4291 if (iss == NULL)
4292 4292 return (STMF_NOT_FOUND);
4293 4293 } else {
4294 4294 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4295 4295 rw_enter(iss->iss_lockp, RW_WRITER);
4296 4296 }
4297 4297
4298 4298 ent = NULL;
4299 4299 if (lun == NULL) {
4300 4300 lm = iss->iss_sm;
4301 4301 for (i = 0; i < lm->lm_nentries; i++) {
4302 4302 if (lm->lm_plus[i] == NULL)
4303 4303 continue;
4304 4304 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4305 4305 if (ent->ent_lu == lu)
4306 4306 break;
4307 4307 }
4308 4308 } else {
4309 4309 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4310 4310 ent = (stmf_lun_map_ent_t *)
4311 4311 stmf_get_ent_from_map(iss->iss_sm, n);
4312 4312 if (lu && (ent->ent_lu != lu))
4313 4313 ent = NULL;
4314 4314 }
4315 4315 if (ent && ent->ent_itl_datap) {
4316 4316 *itl_handle_retp = ent->ent_itl_datap->itl_handle;
4317 4317 ret = STMF_SUCCESS;
4318 4318 } else {
4319 4319 ret = STMF_NOT_FOUND;
4320 4320 }
4321 4321
4322 4322 rw_exit(iss->iss_lockp);
4323 4323 return (ret);
4324 4324 }
4325 4325
4326 4326 stmf_data_buf_t *
4327 4327 stmf_alloc_dbuf(scsi_task_t *task, uint32_t size, uint32_t *pminsize,
4328 4328 uint32_t flags)
4329 4329 {
4330 4330 stmf_i_scsi_task_t *itask =
4331 4331 (stmf_i_scsi_task_t *)task->task_stmf_private;
4332 4332 stmf_local_port_t *lport = task->task_lport;
4333 4333 stmf_data_buf_t *dbuf;
4334 4334 uint8_t ndx;
4335 4335
4336 4336 ndx = stmf_first_zero[itask->itask_allocated_buf_map];
4337 4337 if (ndx == 0xff)
4338 4338 return (NULL);
4339 4339 dbuf = itask->itask_dbufs[ndx] = lport->lport_ds->ds_alloc_data_buf(
4340 4340 task, size, pminsize, flags);
4341 4341 if (dbuf) {
4342 4342 task->task_cur_nbufs++;
4343 4343 itask->itask_allocated_buf_map |= (1 << ndx);
4344 4344 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
4345 4345 dbuf->db_handle = ndx;
4346 4346 return (dbuf);
4347 4347 }
4348 4348
4349 4349 return (NULL);
4350 4350 }
4351 4351
4352 4352 stmf_status_t
4353 4353 stmf_setup_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t flags)
4354 4354 {
4355 4355 stmf_i_scsi_task_t *itask =
4356 4356 (stmf_i_scsi_task_t *)task->task_stmf_private;
4357 4357 stmf_local_port_t *lport = task->task_lport;
4358 4358 uint8_t ndx;
4359 4359 stmf_status_t ret;
4360 4360
4361 4361 ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF);
4362 4362 ASSERT(lport->lport_ds->ds_setup_dbuf != NULL);
4363 4363 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
4364 4364
4365 4365 if ((task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF) == 0)
4366 4366 return (STMF_FAILURE);
4367 4367 if (lport->lport_ds->ds_setup_dbuf == NULL)
4368 4368 return (STMF_FAILURE);
4369 4369
4370 4370 ndx = stmf_first_zero[itask->itask_allocated_buf_map];
4371 4371 if (ndx == 0xff)
4372 4372 return (STMF_FAILURE);
4373 4373 ret = lport->lport_ds->ds_setup_dbuf(task, dbuf, flags);
4374 4374 if (ret == STMF_FAILURE)
4375 4375 return (STMF_FAILURE);
4376 4376 itask->itask_dbufs[ndx] = dbuf;
4377 4377 task->task_cur_nbufs++;
4378 4378 itask->itask_allocated_buf_map |= (1 << ndx);
4379 4379 dbuf->db_handle = ndx;
4380 4380
4381 4381 return (STMF_SUCCESS);
4382 4382 }
4383 4383
4384 4384 void
4385 4385 stmf_teardown_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf)
4386 4386 {
4387 4387 stmf_i_scsi_task_t *itask =
4388 4388 (stmf_i_scsi_task_t *)task->task_stmf_private;
4389 4389 stmf_local_port_t *lport = task->task_lport;
4390 4390
4391 4391 ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF);
4392 4392 ASSERT(lport->lport_ds->ds_teardown_dbuf != NULL);
4393 4393 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
4394 4394
4395 4395 itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle);
4396 4396 task->task_cur_nbufs--;
4397 4397 lport->lport_ds->ds_teardown_dbuf(lport->lport_ds, dbuf);
4398 4398 }
4399 4399
4400 4400 void
4401 4401 stmf_free_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf)
4402 4402 {
4403 4403 stmf_i_scsi_task_t *itask =
4404 4404 (stmf_i_scsi_task_t *)task->task_stmf_private;
4405 4405 stmf_local_port_t *lport = task->task_lport;
4406 4406
4407 4407 itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle);
4408 4408 task->task_cur_nbufs--;
4409 4409 lport->lport_ds->ds_free_data_buf(lport->lport_ds, dbuf);
4410 4410 }
4411 4411
4412 4412 stmf_data_buf_t *
4413 4413 stmf_handle_to_buf(scsi_task_t *task, uint8_t h)
4414 4414 {
4415 4415 stmf_i_scsi_task_t *itask;
4416 4416
4417 4417 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
4418 4418 if (h > 3)
4419 4419 return (NULL);
4420 4420 return (itask->itask_dbufs[h]);
4421 4421 }
4422 4422
4423 4423 /* ARGSUSED */
4424 4424 struct scsi_task *
4425 4425 stmf_task_alloc(struct stmf_local_port *lport, stmf_scsi_session_t *ss,
4426 4426 uint8_t *lun, uint16_t cdb_length_in, uint16_t ext_id)
4427 4427 {
4428 4428 stmf_lu_t *lu;
4429 4429 stmf_i_scsi_session_t *iss;
4430 4430 stmf_i_lu_t *ilu;
4431 4431 stmf_i_scsi_task_t *itask;
4432 4432 stmf_i_scsi_task_t **ppitask;
4433 4433 scsi_task_t *task;
4434 4434 uint8_t *l;
4435 4435 stmf_lun_map_ent_t *lun_map_ent;
4436 4436 uint16_t cdb_length;
4437 4437 uint16_t luNbr;
4438 4438 uint8_t new_task = 0;
4439 4439
4440 4440 /*
4441 4441 * We allocate 7 extra bytes for CDB to provide a cdb pointer which
4442 4442 * is guaranteed to be 8 byte aligned. Some LU providers like OSD
4443 4443 * depend upon this alignment.
4444 4444 */
4445 4445 if (cdb_length_in >= 16)
4446 4446 cdb_length = cdb_length_in + 7;
4447 4447 else
4448 4448 cdb_length = 16 + 7;
4449 4449 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4450 4450 luNbr = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4451 4451 rw_enter(iss->iss_lockp, RW_READER);
4452 4452 lun_map_ent =
4453 4453 (stmf_lun_map_ent_t *)stmf_get_ent_from_map(iss->iss_sm, luNbr);
4454 4454 if (!lun_map_ent) {
4455 4455 lu = dlun0;
4456 4456 } else {
4457 4457 lu = lun_map_ent->ent_lu;
4458 4458 }
4459 4459 ilu = lu->lu_stmf_private;
4460 4460 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4461 4461 rw_exit(iss->iss_lockp);
4462 4462 return (NULL);
4463 4463 }
4464 4464 ASSERT(lu == dlun0 || (ilu->ilu_state != STMF_STATE_OFFLINING &&
4465 4465 ilu->ilu_state != STMF_STATE_OFFLINE));
4466 4466 do {
4467 4467 if (ilu->ilu_free_tasks == NULL) {
4468 4468 new_task = 1;
4469 4469 break;
4470 4470 }
4471 4471 mutex_enter(&ilu->ilu_task_lock);
4472 4472 for (ppitask = &ilu->ilu_free_tasks; (*ppitask != NULL) &&
4473 4473 ((*ppitask)->itask_cdb_buf_size < cdb_length);
4474 4474 ppitask = &((*ppitask)->itask_lu_free_next))
4475 4475 ;
4476 4476 if (*ppitask) {
4477 4477 itask = *ppitask;
4478 4478 *ppitask = (*ppitask)->itask_lu_free_next;
4479 4479 ilu->ilu_ntasks_free--;
4480 4480 if (ilu->ilu_ntasks_free < ilu->ilu_ntasks_min_free)
4481 4481 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4482 4482 } else {
4483 4483 new_task = 1;
4484 4484 }
4485 4485 mutex_exit(&ilu->ilu_task_lock);
4486 4486 /* CONSTCOND */
4487 4487 } while (0);
4488 4488
4489 4489 if (!new_task) {
4490 4490 /*
4491 4491 * Save the task_cdb pointer and zero per cmd fields.
4492 4492 * We know the task_cdb_length is large enough by task
4493 4493 * selection process above.
4494 4494 */
4495 4495 uint8_t *save_cdb;
4496 4496 uintptr_t t_start, t_end;
4497 4497
4498 4498 task = itask->itask_task;
4499 4499 save_cdb = task->task_cdb; /* save */
4500 4500 t_start = (uintptr_t)&task->task_flags;
4501 4501 t_end = (uintptr_t)&task->task_extended_cmd;
4502 4502 bzero((void *)t_start, (size_t)(t_end - t_start));
4503 4503 task->task_cdb = save_cdb; /* restore */
4504 4504 itask->itask_ncmds = 0;
4505 4505 } else {
4506 4506 task = (scsi_task_t *)stmf_alloc(STMF_STRUCT_SCSI_TASK,
4507 4507 cdb_length, AF_FORCE_NOSLEEP);
4508 4508 if (task == NULL) {
4509 4509 rw_exit(iss->iss_lockp);
4510 4510 return (NULL);
4511 4511 }
4512 4512 task->task_lu = lu;
4513 4513 l = task->task_lun_no;
4514 4514 l[0] = lun[0];
4515 4515 l[1] = lun[1];
4516 4516 l[2] = lun[2];
4517 4517 l[3] = lun[3];
4518 4518 l[4] = lun[4];
4519 4519 l[5] = lun[5];
4520 4520 l[6] = lun[6];
4521 4521 l[7] = lun[7];
4522 4522 task->task_cdb = (uint8_t *)task->task_port_private;
4523 4523 if ((ulong_t)(task->task_cdb) & 7ul) {
4524 4524 task->task_cdb = (uint8_t *)(((ulong_t)
4525 4525 (task->task_cdb) + 7ul) & ~(7ul));
4526 4526 }
4527 4527 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
4528 4528 itask->itask_cdb_buf_size = cdb_length;
4529 4529 mutex_init(&itask->itask_audit_mutex, NULL, MUTEX_DRIVER, NULL);
4530 4530 }
4531 4531 task->task_session = ss;
4532 4532 task->task_lport = lport;
4533 4533 task->task_cdb_length = cdb_length_in;
4534 4534 itask->itask_flags = ITASK_IN_TRANSITION;
4535 4535 itask->itask_waitq_time = 0;
4536 4536 itask->itask_lu_read_time = itask->itask_lu_write_time = 0;
4537 4537 itask->itask_lport_read_time = itask->itask_lport_write_time = 0;
4538 4538 itask->itask_read_xfer = itask->itask_write_xfer = 0;
4539 4539 itask->itask_audit_index = 0;
4540 4540
4541 4541 if (new_task) {
4542 4542 if (lu->lu_task_alloc(task) != STMF_SUCCESS) {
4543 4543 rw_exit(iss->iss_lockp);
4544 4544 stmf_free(task);
4545 4545 return (NULL);
4546 4546 }
4547 4547 mutex_enter(&ilu->ilu_task_lock);
4548 4548 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4549 4549 mutex_exit(&ilu->ilu_task_lock);
4550 4550 rw_exit(iss->iss_lockp);
4551 4551 stmf_free(task);
4552 4552 return (NULL);
4553 4553 }
4554 4554 itask->itask_lu_next = ilu->ilu_tasks;
4555 4555 if (ilu->ilu_tasks)
4556 4556 ilu->ilu_tasks->itask_lu_prev = itask;
4557 4557 ilu->ilu_tasks = itask;
4558 4558 /* kmem_zalloc automatically makes itask->itask_lu_prev NULL */
4559 4559 ilu->ilu_ntasks++;
4560 4560 mutex_exit(&ilu->ilu_task_lock);
4561 4561 }
4562 4562
4563 4563 itask->itask_ilu_task_cntr = ilu->ilu_cur_task_cntr;
4564 4564 atomic_add_32(itask->itask_ilu_task_cntr, 1);
4565 4565 itask->itask_start_time = ddi_get_lbolt();
4566 4566
4567 4567 if ((lun_map_ent != NULL) && ((itask->itask_itl_datap =
4568 4568 lun_map_ent->ent_itl_datap) != NULL)) {
4569 4569 atomic_add_32(&itask->itask_itl_datap->itl_counter, 1);
4570 4570 task->task_lu_itl_handle = itask->itask_itl_datap->itl_handle;
4571 4571 } else {
4572 4572 itask->itask_itl_datap = NULL;
4573 4573 task->task_lu_itl_handle = NULL;
4574 4574 }
4575 4575
4576 4576 rw_exit(iss->iss_lockp);
4577 4577 return (task);
4578 4578 }
4579 4579
4580 4580 static void
4581 4581 stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss)
4582 4582 {
4583 4583 stmf_i_scsi_task_t *itask =
4584 4584 (stmf_i_scsi_task_t *)task->task_stmf_private;
4585 4585 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4586 4586
4587 4587 ASSERT(rw_lock_held(iss->iss_lockp));
4588 4588 itask->itask_flags = ITASK_IN_FREE_LIST;
4589 4589 itask->itask_proxy_msg_id = 0;
4590 4590 mutex_enter(&ilu->ilu_task_lock);
4591 4591 itask->itask_lu_free_next = ilu->ilu_free_tasks;
4592 4592 ilu->ilu_free_tasks = itask;
4593 4593 ilu->ilu_ntasks_free++;
4594 4594 if (ilu->ilu_ntasks == ilu->ilu_ntasks_free)
4595 4595 cv_signal(&ilu->ilu_offline_pending_cv);
4596 4596 mutex_exit(&ilu->ilu_task_lock);
4597 4597 atomic_add_32(itask->itask_ilu_task_cntr, -1);
4598 4598 }
4599 4599
4600 4600 void
4601 4601 stmf_task_lu_check_freelist(stmf_i_lu_t *ilu)
4602 4602 {
4603 4603 uint32_t num_to_release, ndx;
4604 4604 stmf_i_scsi_task_t *itask;
4605 4605 stmf_lu_t *lu = ilu->ilu_lu;
4606 4606
4607 4607 ASSERT(ilu->ilu_ntasks_min_free <= ilu->ilu_ntasks_free);
4608 4608
4609 4609 /* free half of the minimal free of the free tasks */
4610 4610 num_to_release = (ilu->ilu_ntasks_min_free + 1) / 2;
4611 4611 if (!num_to_release) {
4612 4612 return;
4613 4613 }
4614 4614 for (ndx = 0; ndx < num_to_release; ndx++) {
4615 4615 mutex_enter(&ilu->ilu_task_lock);
4616 4616 itask = ilu->ilu_free_tasks;
4617 4617 if (itask == NULL) {
4618 4618 mutex_exit(&ilu->ilu_task_lock);
4619 4619 break;
4620 4620 }
4621 4621 ilu->ilu_free_tasks = itask->itask_lu_free_next;
4622 4622 ilu->ilu_ntasks_free--;
4623 4623 mutex_exit(&ilu->ilu_task_lock);
4624 4624
4625 4625 lu->lu_task_free(itask->itask_task);
4626 4626 mutex_enter(&ilu->ilu_task_lock);
4627 4627 if (itask->itask_lu_next)
4628 4628 itask->itask_lu_next->itask_lu_prev =
4629 4629 itask->itask_lu_prev;
4630 4630 if (itask->itask_lu_prev)
4631 4631 itask->itask_lu_prev->itask_lu_next =
4632 4632 itask->itask_lu_next;
4633 4633 else
4634 4634 ilu->ilu_tasks = itask->itask_lu_next;
4635 4635
4636 4636 ilu->ilu_ntasks--;
4637 4637 mutex_exit(&ilu->ilu_task_lock);
4638 4638 stmf_free(itask->itask_task);
4639 4639 }
4640 4640 }
4641 4641
4642 4642 /*
4643 4643 * Called with stmf_lock held
4644 4644 */
4645 4645 void
4646 4646 stmf_check_freetask()
4647 4647 {
4648 4648 stmf_i_lu_t *ilu;
4649 4649 clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000);
4650 4650
4651 4651 /* stmf_svc_ilu_draining may get changed after stmf_lock is released */
4652 4652 while ((ilu = stmf_state.stmf_svc_ilu_draining) != NULL) {
4653 4653 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next;
4654 4654 if (!ilu->ilu_ntasks_min_free) {
4655 4655 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4656 4656 continue;
4657 4657 }
4658 4658 ilu->ilu_flags |= ILU_STALL_DEREGISTER;
4659 4659 mutex_exit(&stmf_state.stmf_lock);
4660 4660 stmf_task_lu_check_freelist(ilu);
4661 4661 /*
4662 4662 * we do not care about the accuracy of
4663 4663 * ilu_ntasks_min_free, so we don't lock here
4664 4664 */
4665 4665 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4666 4666 mutex_enter(&stmf_state.stmf_lock);
4667 4667 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER;
4668 4668 cv_broadcast(&stmf_state.stmf_cv);
4669 4669 if (ddi_get_lbolt() >= endtime)
4670 4670 break;
4671 4671 }
4672 4672 }
4673 4673
4674 4674 void
4675 4675 stmf_do_ilu_timeouts(stmf_i_lu_t *ilu)
4676 4676 {
4677 4677 clock_t l = ddi_get_lbolt();
4678 4678 clock_t ps = drv_usectohz(1000000);
4679 4679 stmf_i_scsi_task_t *itask;
4680 4680 scsi_task_t *task;
4681 4681 uint32_t to;
4682 4682
4683 4683 mutex_enter(&ilu->ilu_task_lock);
4684 4684 for (itask = ilu->ilu_tasks; itask != NULL;
4685 4685 itask = itask->itask_lu_next) {
4686 4686 if (itask->itask_flags & (ITASK_IN_FREE_LIST |
4687 4687 ITASK_BEING_ABORTED)) {
4688 4688 continue;
4689 4689 }
4690 4690 task = itask->itask_task;
4691 4691 if (task->task_timeout == 0)
4692 4692 to = stmf_default_task_timeout;
4693 4693 else
4694 4694 to = task->task_timeout;
4695 4695 if ((itask->itask_start_time + (to * ps)) > l)
4696 4696 continue;
4697 4697 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
4698 4698 STMF_TIMEOUT, NULL);
4699 4699 }
4700 4700 mutex_exit(&ilu->ilu_task_lock);
4701 4701 }
4702 4702
4703 4703 /*
4704 4704 * Called with stmf_lock held
4705 4705 */
4706 4706 void
4707 4707 stmf_check_ilu_timing()
4708 4708 {
4709 4709 stmf_i_lu_t *ilu;
4710 4710 clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000);
4711 4711
4712 4712 /* stmf_svc_ilu_timing may get changed after stmf_lock is released */
4713 4713 while ((ilu = stmf_state.stmf_svc_ilu_timing) != NULL) {
4714 4714 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next;
4715 4715 if (ilu->ilu_cur_task_cntr == (&ilu->ilu_task_cntr1)) {
4716 4716 if (ilu->ilu_task_cntr2 == 0) {
4717 4717 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr2;
4718 4718 continue;
4719 4719 }
4720 4720 } else {
4721 4721 if (ilu->ilu_task_cntr1 == 0) {
4722 4722 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
4723 4723 continue;
4724 4724 }
4725 4725 }
4726 4726 /*
4727 4727 * If we are here then it means that there is some slowdown
4728 4728 * in tasks on this lu. We need to check.
4729 4729 */
4730 4730 ilu->ilu_flags |= ILU_STALL_DEREGISTER;
4731 4731 mutex_exit(&stmf_state.stmf_lock);
4732 4732 stmf_do_ilu_timeouts(ilu);
4733 4733 mutex_enter(&stmf_state.stmf_lock);
4734 4734 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER;
4735 4735 cv_broadcast(&stmf_state.stmf_cv);
4736 4736 if (ddi_get_lbolt() >= endtime)
4737 4737 break;
4738 4738 }
4739 4739 }
4740 4740
4741 4741 /*
4742 4742 * Kills all tasks on a lu except tm_task
4743 4743 */
4744 4744 void
4745 4745 stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s)
4746 4746 {
4747 4747 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4748 4748 stmf_i_scsi_task_t *itask;
4749 4749
4750 4750 mutex_enter(&ilu->ilu_task_lock);
4751 4751
4752 4752 for (itask = ilu->ilu_tasks; itask != NULL;
4753 4753 itask = itask->itask_lu_next) {
4754 4754 if (itask->itask_flags & ITASK_IN_FREE_LIST)
4755 4755 continue;
4756 4756 if (itask->itask_task == tm_task)
4757 4757 continue;
4758 4758 stmf_abort(STMF_QUEUE_TASK_ABORT, itask->itask_task, s, NULL);
4759 4759 }
4760 4760 mutex_exit(&ilu->ilu_task_lock);
4761 4761 }
4762 4762
4763 4763 void
4764 4764 stmf_free_task_bufs(stmf_i_scsi_task_t *itask, stmf_local_port_t *lport)
4765 4765 {
4766 4766 int i;
4767 4767 uint8_t map;
4768 4768
4769 4769 if ((map = itask->itask_allocated_buf_map) == 0)
4770 4770 return;
4771 4771 for (i = 0; i < 4; i++) {
4772 4772 if (map & 1) {
4773 4773 stmf_data_buf_t *dbuf;
4774 4774
4775 4775 dbuf = itask->itask_dbufs[i];
4776 4776 if (dbuf->db_xfer_start_timestamp) {
4777 4777 stmf_lport_xfer_done(itask, dbuf);
4778 4778 }
4779 4779 if (dbuf->db_flags & DB_LU_DATA_BUF) {
4780 4780 /*
4781 4781 * LU needs to clean up buffer.
4782 4782 * LU is required to free the buffer
4783 4783 * in the xfer_done handler.
4784 4784 */
4785 4785 scsi_task_t *task = itask->itask_task;
4786 4786 stmf_lu_t *lu = task->task_lu;
4787 4787
4788 4788 lu->lu_dbuf_free(task, dbuf);
4789 4789 ASSERT(((itask->itask_allocated_buf_map>>i)
4790 4790 & 1) == 0); /* must be gone */
4791 4791 } else {
4792 4792 ASSERT(dbuf->db_lu_private == NULL);
4793 4793 dbuf->db_lu_private = NULL;
4794 4794 lport->lport_ds->ds_free_data_buf(
4795 4795 lport->lport_ds, dbuf);
4796 4796 }
4797 4797 }
4798 4798 map >>= 1;
4799 4799 }
4800 4800 itask->itask_allocated_buf_map = 0;
4801 4801 }
4802 4802
4803 4803 void
4804 4804 stmf_task_free(scsi_task_t *task)
4805 4805 {
4806 4806 stmf_local_port_t *lport = task->task_lport;
4807 4807 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4808 4808 task->task_stmf_private;
4809 4809 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
4810 4810 task->task_session->ss_stmf_private;
4811 4811
4812 4812 stmf_task_audit(itask, TE_TASK_FREE, CMD_OR_IOF_NA, NULL);
4813 4813
4814 4814 stmf_free_task_bufs(itask, lport);
4815 4815 stmf_itl_task_done(itask);
4816 4816 DTRACE_PROBE2(stmf__task__end, scsi_task_t *, task,
4817 4817 hrtime_t,
4818 4818 itask->itask_done_timestamp - itask->itask_start_timestamp);
4819 4819 if (itask->itask_itl_datap) {
4820 4820 if (atomic_add_32_nv(&itask->itask_itl_datap->itl_counter,
4821 4821 -1) == 0) {
4822 4822 stmf_release_itl_handle(task->task_lu,
4823 4823 itask->itask_itl_datap);
4824 4824 }
4825 4825 }
4826 4826
4827 4827 rw_enter(iss->iss_lockp, RW_READER);
4828 4828 lport->lport_task_free(task);
4829 4829 if (itask->itask_worker) {
4830 4830 atomic_add_32(&stmf_cur_ntasks, -1);
4831 4831 atomic_add_32(&itask->itask_worker->worker_ref_count, -1);
4832 4832 }
4833 4833 /*
4834 4834 * After calling stmf_task_lu_free, the task pointer can no longer
4835 4835 * be trusted.
4836 4836 */
4837 4837 stmf_task_lu_free(task, iss);
4838 4838 rw_exit(iss->iss_lockp);
4839 4839 }
4840 4840
4841 4841 void
4842 4842 stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
4843 4843 {
4844 4844 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4845 4845 task->task_stmf_private;
4846 4846 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4847 4847 int nv;
4848 4848 uint32_t old, new;
4849 4849 uint32_t ct;
4850 4850 stmf_worker_t *w, *w1;
4851 4851 uint8_t tm;
4852 4852
4853 4853 if (task->task_max_nbufs > 4)
4854 4854 task->task_max_nbufs = 4;
4855 4855 task->task_cur_nbufs = 0;
4856 4856 /* Latest value of currently running tasks */
4857 4857 ct = atomic_add_32_nv(&stmf_cur_ntasks, 1);
4858 4858
4859 4859 /* Select the next worker using round robin */
4860 4860 nv = (int)atomic_add_32_nv((uint32_t *)&stmf_worker_sel_counter, 1);
4861 4861 if (nv >= stmf_nworkers_accepting_cmds) {
4862 4862 int s = nv;
4863 4863 do {
4864 4864 nv -= stmf_nworkers_accepting_cmds;
4865 4865 } while (nv >= stmf_nworkers_accepting_cmds);
4866 4866 if (nv < 0)
4867 4867 nv = 0;
4868 4868 /* Its ok if this cas fails */
4869 4869 (void) atomic_cas_32((uint32_t *)&stmf_worker_sel_counter,
4870 4870 s, nv);
4871 4871 }
4872 4872 w = &stmf_workers[nv];
4873 4873
4874 4874 /*
4875 4875 * A worker can be pinned by interrupt. So select the next one
4876 4876 * if it has lower load.
4877 4877 */
4878 4878 if ((nv + 1) >= stmf_nworkers_accepting_cmds) {
4879 4879 w1 = stmf_workers;
4880 4880 } else {
4881 4881 w1 = &stmf_workers[nv + 1];
4882 4882 }
4883 4883 if (w1->worker_queue_depth < w->worker_queue_depth)
4884 4884 w = w1;
4885 4885
4886 4886 mutex_enter(&w->worker_lock);
4887 4887 if (((w->worker_flags & STMF_WORKER_STARTED) == 0) ||
4888 4888 (w->worker_flags & STMF_WORKER_TERMINATE)) {
4889 4889 /*
4890 4890 * Maybe we are in the middle of a change. Just go to
4891 4891 * the 1st worker.
4892 4892 */
4893 4893 mutex_exit(&w->worker_lock);
4894 4894 w = stmf_workers;
4895 4895 mutex_enter(&w->worker_lock);
4896 4896 }
4897 4897 itask->itask_worker = w;
4898 4898 /*
4899 4899 * Track max system load inside the worker as we already have the
4900 4900 * worker lock (no point implementing another lock). The service
4901 4901 * thread will do the comparisons and figure out the max overall
4902 4902 * system load.
4903 4903 */
4904 4904 if (w->worker_max_sys_qdepth_pu < ct)
4905 4905 w->worker_max_sys_qdepth_pu = ct;
4906 4906
4907 4907 do {
4908 4908 old = new = itask->itask_flags;
4909 4909 new |= ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE;
4910 4910 if (task->task_mgmt_function) {
4911 4911 tm = task->task_mgmt_function;
4912 4912 if ((tm == TM_TARGET_RESET) ||
4913 4913 (tm == TM_TARGET_COLD_RESET) ||
4914 4914 (tm == TM_TARGET_WARM_RESET)) {
4915 4915 new |= ITASK_DEFAULT_HANDLING;
4916 4916 }
4917 4917 } else if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
4918 4918 new |= ITASK_DEFAULT_HANDLING;
4919 4919 }
4920 4920 new &= ~ITASK_IN_TRANSITION;
4921 4921 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
4922 4922
4923 4923 stmf_itl_task_start(itask);
4924 4924
4925 4925 itask->itask_worker_next = NULL;
4926 4926 if (w->worker_task_tail) {
4927 4927 w->worker_task_tail->itask_worker_next = itask;
4928 4928 } else {
4929 4929 w->worker_task_head = itask;
4930 4930 }
4931 4931 w->worker_task_tail = itask;
4932 4932 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
4933 4933 w->worker_max_qdepth_pu = w->worker_queue_depth;
4934 4934 }
4935 4935 /* Measure task waitq time */
4936 4936 itask->itask_waitq_enter_timestamp = gethrtime();
4937 4937 atomic_add_32(&w->worker_ref_count, 1);
4938 4938 itask->itask_cmd_stack[0] = ITASK_CMD_NEW_TASK;
4939 4939 itask->itask_ncmds = 1;
4940 4940 stmf_task_audit(itask, TE_TASK_START, CMD_OR_IOF_NA, dbuf);
4941 4941 if (dbuf) {
4942 4942 itask->itask_allocated_buf_map = 1;
4943 4943 itask->itask_dbufs[0] = dbuf;
4944 4944 dbuf->db_handle = 0;
4945 4945 } else {
4946 4946 itask->itask_allocated_buf_map = 0;
4947 4947 itask->itask_dbufs[0] = NULL;
4948 4948 }
4949 4949
4950 4950 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) {
4951 4951 w->worker_signal_timestamp = gethrtime();
4952 4952 DTRACE_PROBE2(worker__signal, stmf_worker_t *, w,
4953 4953 scsi_task_t *, task);
4954 4954 cv_signal(&w->worker_cv);
4955 4955 }
4956 4956 mutex_exit(&w->worker_lock);
4957 4957
4958 4958 /*
4959 4959 * This can only happen if during stmf_task_alloc(), ILU_RESET_ACTIVE
4960 4960 * was set between checking of ILU_RESET_ACTIVE and clearing of the
4961 4961 * ITASK_IN_FREE_LIST flag. Take care of these "sneaked-in" tasks here.
4962 4962 */
4963 4963 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4964 4964 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ABORTED, NULL);
4965 4965 }
4966 4966 }
4967 4967
4968 4968 static void
4969 4969 stmf_task_audit(stmf_i_scsi_task_t *itask,
4970 4970 task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf)
4971 4971 {
4972 4972 stmf_task_audit_rec_t *ar;
4973 4973
4974 4974 mutex_enter(&itask->itask_audit_mutex);
4975 4975 ar = &itask->itask_audit_records[itask->itask_audit_index++];
4976 4976 itask->itask_audit_index &= (ITASK_TASK_AUDIT_DEPTH - 1);
4977 4977 ar->ta_event = te;
4978 4978 ar->ta_cmd_or_iof = cmd_or_iof;
4979 4979 ar->ta_itask_flags = itask->itask_flags;
4980 4980 ar->ta_dbuf = dbuf;
4981 4981 gethrestime(&ar->ta_timestamp);
4982 4982 mutex_exit(&itask->itask_audit_mutex);
4983 4983 }
4984 4984
4985 4985
4986 4986 /*
4987 4987 * ++++++++++++++ ABORT LOGIC ++++++++++++++++++++
4988 4988 * Once ITASK_BEING_ABORTED is set, ITASK_KNOWN_TO_LU can be reset already
4989 4989 * i.e. before ITASK_BEING_ABORTED being set. But if it was not, it cannot
4990 4990 * be reset until the LU explicitly calls stmf_task_lu_aborted(). Of course
4991 4991 * the LU will make this call only if we call the LU's abort entry point.
4992 4992 * we will only call that entry point if ITASK_KNOWN_TO_LU was set.
4993 4993 *
4994 4994 * Same logic applies for the port.
4995 4995 *
4996 4996 * Also ITASK_BEING_ABORTED will not be allowed to set if both KNOWN_TO_LU
4997 4997 * and KNOWN_TO_TGT_PORT are reset.
4998 4998 *
4999 4999 * +++++++++++++++++++++++++++++++++++++++++++++++
5000 5000 */
5001 5001
5002 5002 stmf_status_t
5003 5003 stmf_xfer_data(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t ioflags)
5004 5004 {
5005 5005 stmf_status_t ret = STMF_SUCCESS;
5006 5006
5007 5007 stmf_i_scsi_task_t *itask =
5008 5008 (stmf_i_scsi_task_t *)task->task_stmf_private;
5009 5009
5010 5010 stmf_task_audit(itask, TE_XFER_START, ioflags, dbuf);
5011 5011
5012 5012 if (ioflags & STMF_IOF_LU_DONE) {
5013 5013 uint32_t new, old;
5014 5014 do {
5015 5015 new = old = itask->itask_flags;
5016 5016 if (new & ITASK_BEING_ABORTED)
5017 5017 return (STMF_ABORTED);
5018 5018 new &= ~ITASK_KNOWN_TO_LU;
5019 5019 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5020 5020 }
5021 5021 if (itask->itask_flags & ITASK_BEING_ABORTED)
5022 5022 return (STMF_ABORTED);
5023 5023 #ifdef DEBUG
5024 5024 if (!(ioflags & STMF_IOF_STATS_ONLY) && stmf_drop_buf_counter > 0) {
5025 5025 if (atomic_add_32_nv((uint32_t *)&stmf_drop_buf_counter, -1) ==
5026 5026 1)
5027 5027 return (STMF_SUCCESS);
5028 5028 }
5029 5029 #endif
5030 5030
5031 5031 stmf_update_kstat_lu_io(task, dbuf);
5032 5032 stmf_update_kstat_lport_io(task, dbuf);
5033 5033 stmf_lport_xfer_start(itask, dbuf);
5034 5034 if (ioflags & STMF_IOF_STATS_ONLY) {
5035 5035 stmf_lport_xfer_done(itask, dbuf);
5036 5036 return (STMF_SUCCESS);
5037 5037 }
5038 5038
5039 5039 dbuf->db_flags |= DB_LPORT_XFER_ACTIVE;
5040 5040 ret = task->task_lport->lport_xfer_data(task, dbuf, ioflags);
5041 5041
5042 5042 /*
5043 5043 * Port provider may have already called the buffer callback in
5044 5044 * which case dbuf->db_xfer_start_timestamp will be 0.
5045 5045 */
5046 5046 if (ret != STMF_SUCCESS) {
5047 5047 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
5048 5048 if (dbuf->db_xfer_start_timestamp != 0)
5049 5049 stmf_lport_xfer_done(itask, dbuf);
5050 5050 }
5051 5051
5052 5052 return (ret);
5053 5053 }
5054 5054
5055 5055 void
5056 5056 stmf_data_xfer_done(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t iof)
5057 5057 {
5058 5058 stmf_i_scsi_task_t *itask =
5059 5059 (stmf_i_scsi_task_t *)task->task_stmf_private;
5060 5060 stmf_i_local_port_t *ilport;
5061 5061 stmf_worker_t *w = itask->itask_worker;
5062 5062 uint32_t new, old;
5063 5063 uint8_t update_queue_flags, free_it, queue_it;
5064 5064
5065 5065 stmf_lport_xfer_done(itask, dbuf);
5066 5066
5067 5067 stmf_task_audit(itask, TE_XFER_DONE, iof, dbuf);
5068 5068
5069 5069 /* Guard against unexpected completions from the lport */
5070 5070 if (dbuf->db_flags & DB_LPORT_XFER_ACTIVE) {
5071 5071 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
5072 5072 } else {
5073 5073 /*
5074 5074 * This should never happen.
5075 5075 */
5076 5076 ilport = task->task_lport->lport_stmf_private;
5077 5077 ilport->ilport_unexpected_comp++;
5078 5078 cmn_err(CE_PANIC, "Unexpected xfer completion task %p dbuf %p",
5079 5079 (void *)task, (void *)dbuf);
5080 5080 return;
5081 5081 }
5082 5082
5083 5083 mutex_enter(&w->worker_lock);
5084 5084 do {
5085 5085 new = old = itask->itask_flags;
5086 5086 if (old & ITASK_BEING_ABORTED) {
5087 5087 mutex_exit(&w->worker_lock);
5088 5088 return;
5089 5089 }
5090 5090 free_it = 0;
5091 5091 if (iof & STMF_IOF_LPORT_DONE) {
5092 5092 new &= ~ITASK_KNOWN_TO_TGT_PORT;
5093 5093 task->task_completion_status = dbuf->db_xfer_status;
5094 5094 free_it = 1;
5095 5095 }
5096 5096 /*
5097 5097 * If the task is known to LU then queue it. But if
5098 5098 * it is already queued (multiple completions) then
5099 5099 * just update the buffer information by grabbing the
5100 5100 * worker lock. If the task is not known to LU,
5101 5101 * completed/aborted, then see if we need to
5102 5102 * free this task.
5103 5103 */
5104 5104 if (old & ITASK_KNOWN_TO_LU) {
5105 5105 free_it = 0;
5106 5106 update_queue_flags = 1;
5107 5107 if (old & ITASK_IN_WORKER_QUEUE) {
5108 5108 queue_it = 0;
5109 5109 } else {
5110 5110 queue_it = 1;
5111 5111 new |= ITASK_IN_WORKER_QUEUE;
5112 5112 }
5113 5113 } else {
5114 5114 update_queue_flags = 0;
5115 5115 queue_it = 0;
5116 5116 }
5117 5117 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5118 5118
5119 5119 if (update_queue_flags) {
5120 5120 uint8_t cmd = (dbuf->db_handle << 5) | ITASK_CMD_DATA_XFER_DONE;
5121 5121
5122 5122 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS);
5123 5123 itask->itask_cmd_stack[itask->itask_ncmds++] = cmd;
5124 5124 if (queue_it) {
5125 5125 itask->itask_worker_next = NULL;
5126 5126 if (w->worker_task_tail) {
5127 5127 w->worker_task_tail->itask_worker_next = itask;
5128 5128 } else {
5129 5129 w->worker_task_head = itask;
5130 5130 }
5131 5131 w->worker_task_tail = itask;
5132 5132 /* Measure task waitq time */
5133 5133 itask->itask_waitq_enter_timestamp = gethrtime();
5134 5134 if (++(w->worker_queue_depth) >
5135 5135 w->worker_max_qdepth_pu) {
5136 5136 w->worker_max_qdepth_pu = w->worker_queue_depth;
5137 5137 }
5138 5138 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5139 5139 cv_signal(&w->worker_cv);
5140 5140 }
5141 5141 }
5142 5142 mutex_exit(&w->worker_lock);
5143 5143
5144 5144 if (free_it) {
5145 5145 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5146 5146 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5147 5147 ITASK_BEING_ABORTED)) == 0) {
5148 5148 stmf_task_free(task);
5149 5149 }
5150 5150 }
5151 5151 }
5152 5152
5153 5153 stmf_status_t
5154 5154 stmf_send_scsi_status(scsi_task_t *task, uint32_t ioflags)
5155 5155 {
5156 5156 DTRACE_PROBE1(scsi__send__status, scsi_task_t *, task);
5157 5157
5158 5158 stmf_i_scsi_task_t *itask =
5159 5159 (stmf_i_scsi_task_t *)task->task_stmf_private;
5160 5160
5161 5161 stmf_task_audit(itask, TE_SEND_STATUS, ioflags, NULL);
5162 5162
5163 5163 if (ioflags & STMF_IOF_LU_DONE) {
5164 5164 uint32_t new, old;
5165 5165 do {
5166 5166 new = old = itask->itask_flags;
5167 5167 if (new & ITASK_BEING_ABORTED)
5168 5168 return (STMF_ABORTED);
5169 5169 new &= ~ITASK_KNOWN_TO_LU;
5170 5170 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5171 5171 }
5172 5172
5173 5173 if (!(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT)) {
5174 5174 return (STMF_SUCCESS);
5175 5175 }
5176 5176
5177 5177 if (itask->itask_flags & ITASK_BEING_ABORTED)
5178 5178 return (STMF_ABORTED);
5179 5179
5180 5180 if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
5181 5181 task->task_status_ctrl = 0;
5182 5182 task->task_resid = 0;
5183 5183 } else if (task->task_cmd_xfer_length >
5184 5184 task->task_expected_xfer_length) {
5185 5185 task->task_status_ctrl = TASK_SCTRL_OVER;
5186 5186 task->task_resid = task->task_cmd_xfer_length -
5187 5187 task->task_expected_xfer_length;
5188 5188 } else if (task->task_nbytes_transferred <
5189 5189 task->task_expected_xfer_length) {
5190 5190 task->task_status_ctrl = TASK_SCTRL_UNDER;
5191 5191 task->task_resid = task->task_expected_xfer_length -
5192 5192 task->task_nbytes_transferred;
5193 5193 } else {
5194 5194 task->task_status_ctrl = 0;
5195 5195 task->task_resid = 0;
5196 5196 }
5197 5197 return (task->task_lport->lport_send_status(task, ioflags));
5198 5198 }
5199 5199
5200 5200 void
5201 5201 stmf_send_status_done(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5202 5202 {
5203 5203 stmf_i_scsi_task_t *itask =
5204 5204 (stmf_i_scsi_task_t *)task->task_stmf_private;
5205 5205 stmf_worker_t *w = itask->itask_worker;
5206 5206 uint32_t new, old;
5207 5207 uint8_t free_it, queue_it;
5208 5208
5209 5209 stmf_task_audit(itask, TE_SEND_STATUS_DONE, iof, NULL);
5210 5210
5211 5211 mutex_enter(&w->worker_lock);
5212 5212 do {
5213 5213 new = old = itask->itask_flags;
5214 5214 if (old & ITASK_BEING_ABORTED) {
5215 5215 mutex_exit(&w->worker_lock);
5216 5216 return;
5217 5217 }
5218 5218 free_it = 0;
5219 5219 if (iof & STMF_IOF_LPORT_DONE) {
5220 5220 new &= ~ITASK_KNOWN_TO_TGT_PORT;
5221 5221 free_it = 1;
5222 5222 }
5223 5223 /*
5224 5224 * If the task is known to LU then queue it. But if
5225 5225 * it is already queued (multiple completions) then
5226 5226 * just update the buffer information by grabbing the
5227 5227 * worker lock. If the task is not known to LU,
5228 5228 * completed/aborted, then see if we need to
5229 5229 * free this task.
5230 5230 */
5231 5231 if (old & ITASK_KNOWN_TO_LU) {
5232 5232 free_it = 0;
5233 5233 queue_it = 1;
5234 5234 if (old & ITASK_IN_WORKER_QUEUE) {
5235 5235 cmn_err(CE_PANIC, "status completion received"
5236 5236 " when task is already in worker queue "
5237 5237 " task = %p", (void *)task);
5238 5238 }
5239 5239 new |= ITASK_IN_WORKER_QUEUE;
5240 5240 } else {
5241 5241 queue_it = 0;
5242 5242 }
5243 5243 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5244 5244 task->task_completion_status = s;
5245 5245
5246 5246
5247 5247 if (queue_it) {
5248 5248 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS);
5249 5249 itask->itask_cmd_stack[itask->itask_ncmds++] =
5250 5250 ITASK_CMD_STATUS_DONE;
5251 5251 itask->itask_worker_next = NULL;
5252 5252 if (w->worker_task_tail) {
5253 5253 w->worker_task_tail->itask_worker_next = itask;
5254 5254 } else {
5255 5255 w->worker_task_head = itask;
5256 5256 }
5257 5257 w->worker_task_tail = itask;
5258 5258 /* Measure task waitq time */
5259 5259 itask->itask_waitq_enter_timestamp = gethrtime();
5260 5260 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5261 5261 w->worker_max_qdepth_pu = w->worker_queue_depth;
5262 5262 }
5263 5263 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5264 5264 cv_signal(&w->worker_cv);
5265 5265 }
5266 5266 mutex_exit(&w->worker_lock);
5267 5267
5268 5268 if (free_it) {
5269 5269 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5270 5270 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5271 5271 ITASK_BEING_ABORTED)) == 0) {
5272 5272 stmf_task_free(task);
5273 5273 } else {
5274 5274 cmn_err(CE_PANIC, "LU is done with the task but LPORT "
5275 5275 " is not done, itask %p itask_flags %x",
5276 5276 (void *)itask, itask->itask_flags);
5277 5277 }
5278 5278 }
5279 5279 }
5280 5280
5281 5281 void
5282 5282 stmf_task_lu_done(scsi_task_t *task)
5283 5283 {
5284 5284 stmf_i_scsi_task_t *itask =
5285 5285 (stmf_i_scsi_task_t *)task->task_stmf_private;
5286 5286 stmf_worker_t *w = itask->itask_worker;
5287 5287 uint32_t new, old;
5288 5288
5289 5289 mutex_enter(&w->worker_lock);
5290 5290 do {
5291 5291 new = old = itask->itask_flags;
5292 5292 if (old & ITASK_BEING_ABORTED) {
5293 5293 mutex_exit(&w->worker_lock);
5294 5294 return;
5295 5295 }
5296 5296 if (old & ITASK_IN_WORKER_QUEUE) {
5297 5297 cmn_err(CE_PANIC, "task_lu_done received"
5298 5298 " when task is in worker queue "
5299 5299 " task = %p", (void *)task);
5300 5300 }
5301 5301 new &= ~ITASK_KNOWN_TO_LU;
5302 5302 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5303 5303
5304 5304 mutex_exit(&w->worker_lock);
5305 5305
5306 5306 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5307 5307 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5308 5308 ITASK_BEING_ABORTED)) == 0) {
5309 5309 stmf_task_free(task);
5310 5310 } else {
5311 5311 cmn_err(CE_PANIC, "stmf_lu_done should be the last stage but "
5312 5312 " the task is still not done, task = %p", (void *)task);
5313 5313 }
5314 5314 }
5315 5315
5316 5316 void
5317 5317 stmf_queue_task_for_abort(scsi_task_t *task, stmf_status_t s)
5318 5318 {
5319 5319 stmf_i_scsi_task_t *itask =
5320 5320 (stmf_i_scsi_task_t *)task->task_stmf_private;
5321 5321 stmf_worker_t *w;
5322 5322 uint32_t old, new;
5323 5323
5324 5324 stmf_task_audit(itask, TE_TASK_ABORT, CMD_OR_IOF_NA, NULL);
5325 5325
5326 5326 do {
5327 5327 old = new = itask->itask_flags;
5328 5328 if ((old & ITASK_BEING_ABORTED) ||
5329 5329 ((old & (ITASK_KNOWN_TO_TGT_PORT |
5330 5330 ITASK_KNOWN_TO_LU)) == 0)) {
5331 5331 return;
5332 5332 }
5333 5333 new |= ITASK_BEING_ABORTED;
5334 5334 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5335 5335 task->task_completion_status = s;
5336 5336 itask->itask_start_time = ddi_get_lbolt();
5337 5337
5338 5338 if (((w = itask->itask_worker) == NULL) ||
5339 5339 (itask->itask_flags & ITASK_IN_TRANSITION)) {
5340 5340 return;
5341 5341 }
5342 5342
5343 5343 /* Queue it and get out */
5344 5344 mutex_enter(&w->worker_lock);
5345 5345 if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) {
5346 5346 mutex_exit(&w->worker_lock);
5347 5347 return;
5348 5348 }
5349 5349 atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE);
5350 5350 itask->itask_worker_next = NULL;
5351 5351 if (w->worker_task_tail) {
5352 5352 w->worker_task_tail->itask_worker_next = itask;
5353 5353 } else {
5354 5354 w->worker_task_head = itask;
5355 5355 }
5356 5356 w->worker_task_tail = itask;
5357 5357 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5358 5358 w->worker_max_qdepth_pu = w->worker_queue_depth;
5359 5359 }
5360 5360 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5361 5361 cv_signal(&w->worker_cv);
5362 5362 mutex_exit(&w->worker_lock);
5363 5363 }
5364 5364
5365 5365 void
5366 5366 stmf_abort(int abort_cmd, scsi_task_t *task, stmf_status_t s, void *arg)
5367 5367 {
5368 5368 stmf_i_scsi_task_t *itask = NULL;
5369 5369 uint32_t old, new, f, rf;
5370 5370
5371 5371 DTRACE_PROBE2(scsi__task__abort, scsi_task_t *, task,
5372 5372 stmf_status_t, s);
5373 5373
5374 5374 switch (abort_cmd) {
5375 5375 case STMF_QUEUE_ABORT_LU:
5376 5376 stmf_task_lu_killall((stmf_lu_t *)arg, task, s);
5377 5377 return;
5378 5378 case STMF_QUEUE_TASK_ABORT:
5379 5379 stmf_queue_task_for_abort(task, s);
5380 5380 return;
5381 5381 case STMF_REQUEUE_TASK_ABORT_LPORT:
5382 5382 rf = ITASK_TGT_PORT_ABORT_CALLED;
5383 5383 f = ITASK_KNOWN_TO_TGT_PORT;
5384 5384 break;
5385 5385 case STMF_REQUEUE_TASK_ABORT_LU:
5386 5386 rf = ITASK_LU_ABORT_CALLED;
5387 5387 f = ITASK_KNOWN_TO_LU;
5388 5388 break;
5389 5389 default:
5390 5390 return;
5391 5391 }
5392 5392 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
5393 5393 f |= ITASK_BEING_ABORTED | rf;
5394 5394 do {
5395 5395 old = new = itask->itask_flags;
5396 5396 if ((old & f) != f) {
5397 5397 return;
5398 5398 }
5399 5399 new &= ~rf;
5400 5400 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5401 5401 }
5402 5402
5403 5403 void
5404 5404 stmf_task_lu_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5405 5405 {
5406 5406 char info[STMF_CHANGE_INFO_LEN];
5407 5407 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task);
5408 5408 unsigned long long st;
5409 5409
5410 5410 stmf_task_audit(itask, TE_TASK_LU_ABORTED, iof, NULL);
5411 5411
5412 5412 st = s; /* gcc fix */
5413 5413 if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
5414 5414 (void) snprintf(info, sizeof (info),
5415 5415 "task %p, lu failed to abort ret=%llx", (void *)task, st);
5416 5416 } else if ((iof & STMF_IOF_LU_DONE) == 0) {
5417 5417 (void) snprintf(info, sizeof (info),
5418 5418 "Task aborted but LU is not finished, task ="
5419 5419 "%p, s=%llx, iof=%x", (void *)task, st, iof);
5420 5420 } else {
5421 5421 /*
5422 5422 * LU abort successfully
5423 5423 */
5424 5424 atomic_and_32(&itask->itask_flags, ~ITASK_KNOWN_TO_LU);
5425 5425 return;
5426 5426 }
5427 5427
5428 5428 stmf_abort_task_offline(task, 1, info);
5429 5429 }
5430 5430
5431 5431 void
5432 5432 stmf_task_lport_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5433 5433 {
5434 5434 char info[STMF_CHANGE_INFO_LEN];
5435 5435 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task);
5436 5436 unsigned long long st;
5437 5437 uint32_t old, new;
5438 5438
5439 5439 stmf_task_audit(itask, TE_TASK_LPORT_ABORTED, iof, NULL);
5440 5440
5441 5441 st = s;
5442 5442 if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
5443 5443 (void) snprintf(info, sizeof (info),
5444 5444 "task %p, tgt port failed to abort ret=%llx", (void *)task,
5445 5445 st);
5446 5446 } else if ((iof & STMF_IOF_LPORT_DONE) == 0) {
5447 5447 (void) snprintf(info, sizeof (info),
5448 5448 "Task aborted but tgt port is not finished, "
5449 5449 "task=%p, s=%llx, iof=%x", (void *)task, st, iof);
5450 5450 } else {
5451 5451 /*
5452 5452 * LPORT abort successfully
5453 5453 */
5454 5454 do {
5455 5455 old = new = itask->itask_flags;
5456 5456 if (!(old & ITASK_KNOWN_TO_TGT_PORT))
5457 5457 return;
5458 5458 new &= ~ITASK_KNOWN_TO_TGT_PORT;
5459 5459 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5460 5460 return;
5461 5461 }
5462 5462
5463 5463 stmf_abort_task_offline(task, 0, info);
5464 5464 }
5465 5465
5466 5466 stmf_status_t
5467 5467 stmf_task_poll_lu(scsi_task_t *task, uint32_t timeout)
5468 5468 {
5469 5469 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
5470 5470 task->task_stmf_private;
5471 5471 stmf_worker_t *w = itask->itask_worker;
5472 5472 int i;
5473 5473
5474 5474 ASSERT(itask->itask_flags & ITASK_KNOWN_TO_LU);
5475 5475 mutex_enter(&w->worker_lock);
5476 5476 if (itask->itask_ncmds >= ITASK_MAX_NCMDS) {
5477 5477 mutex_exit(&w->worker_lock);
5478 5478 return (STMF_BUSY);
5479 5479 }
5480 5480 for (i = 0; i < itask->itask_ncmds; i++) {
5481 5481 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LU) {
5482 5482 mutex_exit(&w->worker_lock);
5483 5483 return (STMF_SUCCESS);
5484 5484 }
5485 5485 }
5486 5486 itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LU;
5487 5487 if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
5488 5488 itask->itask_poll_timeout = ddi_get_lbolt() + 1;
5489 5489 } else {
5490 5490 clock_t t = drv_usectohz(timeout * 1000);
5491 5491 if (t == 0)
5492 5492 t = 1;
5493 5493 itask->itask_poll_timeout = ddi_get_lbolt() + t;
5494 5494 }
5495 5495 if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) {
5496 5496 itask->itask_worker_next = NULL;
5497 5497 if (w->worker_task_tail) {
5498 5498 w->worker_task_tail->itask_worker_next = itask;
5499 5499 } else {
5500 5500 w->worker_task_head = itask;
5501 5501 }
5502 5502 w->worker_task_tail = itask;
5503 5503 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5504 5504 w->worker_max_qdepth_pu = w->worker_queue_depth;
5505 5505 }
5506 5506 atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE);
5507 5507 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5508 5508 cv_signal(&w->worker_cv);
5509 5509 }
5510 5510 mutex_exit(&w->worker_lock);
5511 5511 return (STMF_SUCCESS);
5512 5512 }
5513 5513
5514 5514 stmf_status_t
5515 5515 stmf_task_poll_lport(scsi_task_t *task, uint32_t timeout)
5516 5516 {
5517 5517 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
5518 5518 task->task_stmf_private;
5519 5519 stmf_worker_t *w = itask->itask_worker;
5520 5520 int i;
5521 5521
5522 5522 ASSERT(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT);
5523 5523 mutex_enter(&w->worker_lock);
5524 5524 if (itask->itask_ncmds >= ITASK_MAX_NCMDS) {
5525 5525 mutex_exit(&w->worker_lock);
5526 5526 return (STMF_BUSY);
5527 5527 }
5528 5528 for (i = 0; i < itask->itask_ncmds; i++) {
5529 5529 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LPORT) {
5530 5530 mutex_exit(&w->worker_lock);
5531 5531 return (STMF_SUCCESS);
5532 5532 }
5533 5533 }
5534 5534 itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LPORT;
5535 5535 if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
5536 5536 itask->itask_poll_timeout = ddi_get_lbolt() + 1;
5537 5537 } else {
5538 5538 clock_t t = drv_usectohz(timeout * 1000);
5539 5539 if (t == 0)
5540 5540 t = 1;
5541 5541 itask->itask_poll_timeout = ddi_get_lbolt() + t;
5542 5542 }
5543 5543 if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) {
5544 5544 itask->itask_worker_next = NULL;
5545 5545 if (w->worker_task_tail) {
5546 5546 w->worker_task_tail->itask_worker_next = itask;
5547 5547 } else {
5548 5548 w->worker_task_head = itask;
5549 5549 }
5550 5550 w->worker_task_tail = itask;
5551 5551 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5552 5552 w->worker_max_qdepth_pu = w->worker_queue_depth;
5553 5553 }
5554 5554 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5555 5555 cv_signal(&w->worker_cv);
5556 5556 }
5557 5557 mutex_exit(&w->worker_lock);
5558 5558 return (STMF_SUCCESS);
5559 5559 }
5560 5560
5561 5561 void
5562 5562 stmf_do_task_abort(scsi_task_t *task)
5563 5563 {
5564 5564 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task);
5565 5565 stmf_lu_t *lu;
5566 5566 stmf_local_port_t *lport;
5567 5567 unsigned long long ret;
5568 5568 uint32_t old, new;
5569 5569 uint8_t call_lu_abort, call_port_abort;
5570 5570 char info[STMF_CHANGE_INFO_LEN];
5571 5571
5572 5572 lu = task->task_lu;
5573 5573 lport = task->task_lport;
5574 5574 do {
5575 5575 old = new = itask->itask_flags;
5576 5576 if ((old & (ITASK_KNOWN_TO_LU | ITASK_LU_ABORT_CALLED)) ==
5577 5577 ITASK_KNOWN_TO_LU) {
5578 5578 new |= ITASK_LU_ABORT_CALLED;
5579 5579 call_lu_abort = 1;
5580 5580 } else {
5581 5581 call_lu_abort = 0;
5582 5582 }
5583 5583 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5584 5584
5585 5585 if (call_lu_abort) {
5586 5586 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) {
5587 5587 ret = lu->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0);
5588 5588 } else {
5589 5589 ret = dlun0->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0);
5590 5590 }
5591 5591 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) {
5592 5592 stmf_task_lu_aborted(task, ret, STMF_IOF_LU_DONE);
5593 5593 } else if (ret == STMF_BUSY) {
5594 5594 atomic_and_32(&itask->itask_flags,
5595 5595 ~ITASK_LU_ABORT_CALLED);
5596 5596 } else if (ret != STMF_SUCCESS) {
5597 5597 (void) snprintf(info, sizeof (info),
5598 5598 "Abort failed by LU %p, ret %llx", (void *)lu, ret);
5599 5599 stmf_abort_task_offline(task, 1, info);
5600 5600 }
5601 5601 } else if (itask->itask_flags & ITASK_KNOWN_TO_LU) {
5602 5602 if (ddi_get_lbolt() > (itask->itask_start_time +
5603 5603 STMF_SEC2TICK(lu->lu_abort_timeout?
5604 5604 lu->lu_abort_timeout : ITASK_DEFAULT_ABORT_TIMEOUT))) {
5605 5605 (void) snprintf(info, sizeof (info),
5606 5606 "lu abort timed out");
5607 5607 stmf_abort_task_offline(itask->itask_task, 1, info);
5608 5608 }
5609 5609 }
5610 5610
5611 5611 do {
5612 5612 old = new = itask->itask_flags;
5613 5613 if ((old & (ITASK_KNOWN_TO_TGT_PORT |
5614 5614 ITASK_TGT_PORT_ABORT_CALLED)) == ITASK_KNOWN_TO_TGT_PORT) {
5615 5615 new |= ITASK_TGT_PORT_ABORT_CALLED;
5616 5616 call_port_abort = 1;
5617 5617 } else {
5618 5618 call_port_abort = 0;
5619 5619 }
5620 5620 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5621 5621 if (call_port_abort) {
5622 5622 ret = lport->lport_abort(lport, STMF_LPORT_ABORT_TASK, task, 0);
5623 5623 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) {
5624 5624 stmf_task_lport_aborted(task, ret, STMF_IOF_LPORT_DONE);
5625 5625 } else if (ret == STMF_BUSY) {
5626 5626 atomic_and_32(&itask->itask_flags,
5627 5627 ~ITASK_TGT_PORT_ABORT_CALLED);
5628 5628 } else if (ret != STMF_SUCCESS) {
5629 5629 (void) snprintf(info, sizeof (info),
5630 5630 "Abort failed by tgt port %p ret %llx",
5631 5631 (void *)lport, ret);
5632 5632 stmf_abort_task_offline(task, 0, info);
5633 5633 }
5634 5634 } else if (itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT) {
5635 5635 if (ddi_get_lbolt() > (itask->itask_start_time +
5636 5636 STMF_SEC2TICK(lport->lport_abort_timeout?
5637 5637 lport->lport_abort_timeout :
5638 5638 ITASK_DEFAULT_ABORT_TIMEOUT))) {
5639 5639 (void) snprintf(info, sizeof (info),
5640 5640 "lport abort timed out");
5641 5641 stmf_abort_task_offline(itask->itask_task, 0, info);
5642 5642 }
5643 5643 }
5644 5644 }
5645 5645
5646 5646 stmf_status_t
5647 5647 stmf_ctl(int cmd, void *obj, void *arg)
5648 5648 {
5649 5649 stmf_status_t ret;
5650 5650 stmf_i_lu_t *ilu;
5651 5651 stmf_i_local_port_t *ilport;
5652 5652 stmf_state_change_info_t *ssci = (stmf_state_change_info_t *)arg;
5653 5653
5654 5654 mutex_enter(&stmf_state.stmf_lock);
5655 5655 ret = STMF_INVALID_ARG;
5656 5656 if (cmd & STMF_CMD_LU_OP) {
5657 5657 ilu = stmf_lookup_lu((stmf_lu_t *)obj);
5658 5658 if (ilu == NULL) {
5659 5659 goto stmf_ctl_lock_exit;
5660 5660 }
5661 5661 DTRACE_PROBE3(lu__state__change,
5662 5662 stmf_lu_t *, ilu->ilu_lu,
5663 5663 int, cmd, stmf_state_change_info_t *, ssci);
5664 5664 } else if (cmd & STMF_CMD_LPORT_OP) {
5665 5665 ilport = stmf_lookup_lport((stmf_local_port_t *)obj);
5666 5666 if (ilport == NULL) {
5667 5667 goto stmf_ctl_lock_exit;
5668 5668 }
5669 5669 DTRACE_PROBE3(lport__state__change,
5670 5670 stmf_local_port_t *, ilport->ilport_lport,
5671 5671 int, cmd, stmf_state_change_info_t *, ssci);
5672 5672 } else {
5673 5673 goto stmf_ctl_lock_exit;
5674 5674 }
5675 5675
5676 5676 switch (cmd) {
5677 5677 case STMF_CMD_LU_ONLINE:
5678 5678 switch (ilu->ilu_state) {
5679 5679 case STMF_STATE_OFFLINE:
5680 5680 ret = STMF_SUCCESS;
5681 5681 break;
5682 5682 case STMF_STATE_ONLINE:
5683 5683 case STMF_STATE_ONLINING:
5684 5684 ret = STMF_ALREADY;
5685 5685 break;
5686 5686 case STMF_STATE_OFFLINING:
5687 5687 ret = STMF_BUSY;
5688 5688 break;
5689 5689 default:
5690 5690 ret = STMF_BADSTATE;
5691 5691 break;
5692 5692 }
5693 5693 if (ret != STMF_SUCCESS)
5694 5694 goto stmf_ctl_lock_exit;
5695 5695
5696 5696 ilu->ilu_state = STMF_STATE_ONLINING;
5697 5697 mutex_exit(&stmf_state.stmf_lock);
5698 5698 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5699 5699 break;
5700 5700
5701 5701 case STMF_CMD_LU_ONLINE_COMPLETE:
5702 5702 if (ilu->ilu_state != STMF_STATE_ONLINING) {
5703 5703 ret = STMF_BADSTATE;
5704 5704 goto stmf_ctl_lock_exit;
5705 5705 }
5706 5706 if (((stmf_change_status_t *)arg)->st_completion_status ==
5707 5707 STMF_SUCCESS) {
5708 5708 ilu->ilu_state = STMF_STATE_ONLINE;
5709 5709 mutex_exit(&stmf_state.stmf_lock);
5710 5710 ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj,
5711 5711 STMF_ACK_LU_ONLINE_COMPLETE, arg);
5712 5712 mutex_enter(&stmf_state.stmf_lock);
5713 5713 stmf_add_lu_to_active_sessions((stmf_lu_t *)obj);
5714 5714 } else {
5715 5715 /* XXX: should throw a meesage an record more data */
5716 5716 ilu->ilu_state = STMF_STATE_OFFLINE;
5717 5717 }
5718 5718 ret = STMF_SUCCESS;
5719 5719 goto stmf_ctl_lock_exit;
5720 5720
5721 5721 case STMF_CMD_LU_OFFLINE:
5722 5722 switch (ilu->ilu_state) {
5723 5723 case STMF_STATE_ONLINE:
5724 5724 ret = STMF_SUCCESS;
5725 5725 break;
5726 5726 case STMF_STATE_OFFLINE:
5727 5727 case STMF_STATE_OFFLINING:
5728 5728 ret = STMF_ALREADY;
5729 5729 break;
5730 5730 case STMF_STATE_ONLINING:
5731 5731 ret = STMF_BUSY;
5732 5732 break;
5733 5733 default:
5734 5734 ret = STMF_BADSTATE;
5735 5735 break;
5736 5736 }
5737 5737 if (ret != STMF_SUCCESS)
5738 5738 goto stmf_ctl_lock_exit;
5739 5739 ilu->ilu_state = STMF_STATE_OFFLINING;
5740 5740 mutex_exit(&stmf_state.stmf_lock);
5741 5741 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5742 5742 break;
5743 5743
5744 5744 case STMF_CMD_LU_OFFLINE_COMPLETE:
5745 5745 if (ilu->ilu_state != STMF_STATE_OFFLINING) {
5746 5746 ret = STMF_BADSTATE;
5747 5747 goto stmf_ctl_lock_exit;
5748 5748 }
5749 5749 if (((stmf_change_status_t *)arg)->st_completion_status ==
5750 5750 STMF_SUCCESS) {
5751 5751 ilu->ilu_state = STMF_STATE_OFFLINE;
5752 5752 mutex_exit(&stmf_state.stmf_lock);
5753 5753 ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj,
5754 5754 STMF_ACK_LU_OFFLINE_COMPLETE, arg);
5755 5755 mutex_enter(&stmf_state.stmf_lock);
5756 5756 } else {
5757 5757 ilu->ilu_state = STMF_STATE_ONLINE;
5758 5758 stmf_add_lu_to_active_sessions((stmf_lu_t *)obj);
5759 5759 }
5760 5760 mutex_exit(&stmf_state.stmf_lock);
5761 5761 break;
5762 5762
5763 5763 /*
5764 5764 * LPORT_ONLINE/OFFLINE has nothing to do with link offline/online.
5765 5765 * It's related with hardware disable/enable.
5766 5766 */
5767 5767 case STMF_CMD_LPORT_ONLINE:
5768 5768 switch (ilport->ilport_state) {
5769 5769 case STMF_STATE_OFFLINE:
5770 5770 ret = STMF_SUCCESS;
5771 5771 break;
5772 5772 case STMF_STATE_ONLINE:
5773 5773 case STMF_STATE_ONLINING:
5774 5774 ret = STMF_ALREADY;
5775 5775 break;
5776 5776 case STMF_STATE_OFFLINING:
5777 5777 ret = STMF_BUSY;
5778 5778 break;
5779 5779 default:
5780 5780 ret = STMF_BADSTATE;
5781 5781 break;
5782 5782 }
5783 5783 if (ret != STMF_SUCCESS)
5784 5784 goto stmf_ctl_lock_exit;
5785 5785
5786 5786 /*
5787 5787 * Only user request can recover the port from the
5788 5788 * FORCED_OFFLINE state
5789 5789 */
5790 5790 if (ilport->ilport_flags & ILPORT_FORCED_OFFLINE) {
5791 5791 if (!(ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) {
5792 5792 ret = STMF_FAILURE;
5793 5793 goto stmf_ctl_lock_exit;
5794 5794 }
5795 5795 }
5796 5796
5797 5797 /*
5798 5798 * Avoid too frequent request to online
5799 5799 */
5800 5800 if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
5801 5801 ilport->ilport_online_times = 0;
5802 5802 ilport->ilport_avg_interval = 0;
5803 5803 }
5804 5804 if ((ilport->ilport_avg_interval < STMF_AVG_ONLINE_INTERVAL) &&
5805 5805 (ilport->ilport_online_times >= 4)) {
5806 5806 ret = STMF_FAILURE;
5807 5807 ilport->ilport_flags |= ILPORT_FORCED_OFFLINE;
5808 5808 stmf_trace(NULL, "stmf_ctl: too frequent request to "
5809 5809 "online the port");
5810 5810 cmn_err(CE_WARN, "stmf_ctl: too frequent request to "
5811 5811 "online the port, set FORCED_OFFLINE now");
5812 5812 goto stmf_ctl_lock_exit;
5813 5813 }
5814 5814 if (ilport->ilport_online_times > 0) {
5815 5815 if (ilport->ilport_online_times == 1) {
5816 5816 ilport->ilport_avg_interval = ddi_get_lbolt() -
5817 5817 ilport->ilport_last_online_clock;
5818 5818 } else {
5819 5819 ilport->ilport_avg_interval =
5820 5820 (ilport->ilport_avg_interval +
5821 5821 ddi_get_lbolt() -
5822 5822 ilport->ilport_last_online_clock) >> 1;
5823 5823 }
5824 5824 }
5825 5825 ilport->ilport_last_online_clock = ddi_get_lbolt();
5826 5826 ilport->ilport_online_times++;
5827 5827
5828 5828 /*
5829 5829 * Submit online service request
5830 5830 */
5831 5831 ilport->ilport_flags &= ~ILPORT_FORCED_OFFLINE;
5832 5832 ilport->ilport_state = STMF_STATE_ONLINING;
5833 5833 mutex_exit(&stmf_state.stmf_lock);
5834 5834 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5835 5835 break;
5836 5836
5837 5837 case STMF_CMD_LPORT_ONLINE_COMPLETE:
5838 5838 if (ilport->ilport_state != STMF_STATE_ONLINING) {
5839 5839 ret = STMF_BADSTATE;
5840 5840 goto stmf_ctl_lock_exit;
5841 5841 }
5842 5842 if (((stmf_change_status_t *)arg)->st_completion_status ==
5843 5843 STMF_SUCCESS) {
5844 5844 ilport->ilport_state = STMF_STATE_ONLINE;
5845 5845 mutex_exit(&stmf_state.stmf_lock);
5846 5846 ((stmf_local_port_t *)obj)->lport_ctl(
5847 5847 (stmf_local_port_t *)obj,
5848 5848 STMF_ACK_LPORT_ONLINE_COMPLETE, arg);
5849 5849 mutex_enter(&stmf_state.stmf_lock);
5850 5850 } else {
5851 5851 ilport->ilport_state = STMF_STATE_OFFLINE;
5852 5852 }
5853 5853 ret = STMF_SUCCESS;
5854 5854 goto stmf_ctl_lock_exit;
5855 5855
5856 5856 case STMF_CMD_LPORT_OFFLINE:
5857 5857 switch (ilport->ilport_state) {
5858 5858 case STMF_STATE_ONLINE:
5859 5859 ret = STMF_SUCCESS;
5860 5860 break;
5861 5861 case STMF_STATE_OFFLINE:
5862 5862 case STMF_STATE_OFFLINING:
5863 5863 ret = STMF_ALREADY;
5864 5864 break;
5865 5865 case STMF_STATE_ONLINING:
5866 5866 ret = STMF_BUSY;
5867 5867 break;
5868 5868 default:
5869 5869 ret = STMF_BADSTATE;
5870 5870 break;
5871 5871 }
5872 5872 if (ret != STMF_SUCCESS)
5873 5873 goto stmf_ctl_lock_exit;
5874 5874
5875 5875 ilport->ilport_state = STMF_STATE_OFFLINING;
5876 5876 mutex_exit(&stmf_state.stmf_lock);
5877 5877 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5878 5878 break;
5879 5879
5880 5880 case STMF_CMD_LPORT_OFFLINE_COMPLETE:
5881 5881 if (ilport->ilport_state != STMF_STATE_OFFLINING) {
5882 5882 ret = STMF_BADSTATE;
5883 5883 goto stmf_ctl_lock_exit;
5884 5884 }
5885 5885 if (((stmf_change_status_t *)arg)->st_completion_status ==
5886 5886 STMF_SUCCESS) {
5887 5887 ilport->ilport_state = STMF_STATE_OFFLINE;
5888 5888 mutex_exit(&stmf_state.stmf_lock);
5889 5889 ((stmf_local_port_t *)obj)->lport_ctl(
5890 5890 (stmf_local_port_t *)obj,
5891 5891 STMF_ACK_LPORT_OFFLINE_COMPLETE, arg);
5892 5892 mutex_enter(&stmf_state.stmf_lock);
5893 5893 } else {
5894 5894 ilport->ilport_state = STMF_STATE_ONLINE;
5895 5895 }
5896 5896 mutex_exit(&stmf_state.stmf_lock);
5897 5897 break;
5898 5898
5899 5899 default:
5900 5900 cmn_err(CE_WARN, "Invalid ctl cmd received %x", cmd);
5901 5901 ret = STMF_INVALID_ARG;
5902 5902 goto stmf_ctl_lock_exit;
5903 5903 }
5904 5904
5905 5905 return (STMF_SUCCESS);
5906 5906
5907 5907 stmf_ctl_lock_exit:;
5908 5908 mutex_exit(&stmf_state.stmf_lock);
5909 5909 return (ret);
5910 5910 }
5911 5911
5912 5912 /* ARGSUSED */
5913 5913 stmf_status_t
5914 5914 stmf_info_impl(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf,
5915 5915 uint32_t *bufsizep)
5916 5916 {
5917 5917 return (STMF_NOT_SUPPORTED);
5918 5918 }
5919 5919
5920 5920 /* ARGSUSED */
5921 5921 stmf_status_t
5922 5922 stmf_info(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf,
5923 5923 uint32_t *bufsizep)
5924 5924 {
5925 5925 uint32_t cl = SI_GET_CLASS(cmd);
5926 5926
5927 5927 if (cl == SI_STMF) {
5928 5928 return (stmf_info_impl(cmd, arg1, arg2, buf, bufsizep));
5929 5929 }
5930 5930 if (cl == SI_LPORT) {
5931 5931 return (((stmf_local_port_t *)arg1)->lport_info(cmd, arg1,
5932 5932 arg2, buf, bufsizep));
5933 5933 } else if (cl == SI_LU) {
5934 5934 return (((stmf_lu_t *)arg1)->lu_info(cmd, arg1, arg2, buf,
5935 5935 bufsizep));
5936 5936 }
5937 5937
5938 5938 return (STMF_NOT_SUPPORTED);
5939 5939 }
5940 5940
5941 5941 /*
5942 5942 * Used by port providers. pwwn is 8 byte wwn, sdid is the devid used by
5943 5943 * stmf to register local ports. The ident should have 20 bytes in buffer
5944 5944 * space to convert the wwn to "wwn.xxxxxxxxxxxxxxxx" string.
5945 5945 */
5946 5946 void
5947 5947 stmf_wwn_to_devid_desc(scsi_devid_desc_t *sdid, uint8_t *wwn,
5948 5948 uint8_t protocol_id)
5949 5949 {
5950 5950 char wwn_str[20+1];
5951 5951
5952 5952 sdid->protocol_id = protocol_id;
5953 5953 sdid->piv = 1;
5954 5954 sdid->code_set = CODE_SET_ASCII;
5955 5955 sdid->association = ID_IS_TARGET_PORT;
5956 5956 sdid->ident_length = 20;
5957 5957 /* Convert wwn value to "wwn.XXXXXXXXXXXXXXXX" format */
5958 5958 (void) snprintf(wwn_str, sizeof (wwn_str),
5959 5959 "wwn.%02X%02X%02X%02X%02X%02X%02X%02X",
5960 5960 wwn[0], wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7]);
5961 5961 bcopy(wwn_str, (char *)sdid->ident, 20);
5962 5962 }
5963 5963
5964 5964
5965 5965 stmf_xfer_data_t *
5966 5966 stmf_prepare_tpgs_data(uint8_t ilu_alua)
5967 5967 {
5968 5968 stmf_xfer_data_t *xd;
5969 5969 stmf_i_local_port_t *ilport;
5970 5970 uint8_t *p;
5971 5971 uint32_t sz, asz, nports = 0, nports_standby = 0;
5972 5972
5973 5973 mutex_enter(&stmf_state.stmf_lock);
5974 5974 /* check if any ports are standby and create second group */
5975 5975 for (ilport = stmf_state.stmf_ilportlist; ilport;
5976 5976 ilport = ilport->ilport_next) {
5977 5977 if (ilport->ilport_standby == 1) {
5978 5978 nports_standby++;
5979 5979 } else {
5980 5980 nports++;
5981 5981 }
5982 5982 }
5983 5983
5984 5984 /* The spec only allows for 255 ports to be reported per group */
5985 5985 nports = min(nports, 255);
5986 5986 nports_standby = min(nports_standby, 255);
5987 5987 sz = (nports * 4) + 12;
5988 5988 if (nports_standby && ilu_alua) {
5989 5989 sz += (nports_standby * 4) + 8;
5990 5990 }
5991 5991 asz = sz + sizeof (*xd) - 4;
5992 5992 xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP);
5993 5993 if (xd == NULL) {
5994 5994 mutex_exit(&stmf_state.stmf_lock);
5995 5995 return (NULL);
5996 5996 }
5997 5997 xd->alloc_size = asz;
5998 5998 xd->size_left = sz;
5999 5999
6000 6000 p = xd->buf;
6001 6001
6002 6002 *((uint32_t *)p) = BE_32(sz - 4);
6003 6003 p += 4;
6004 6004 p[0] = 0x80; /* PREF */
6005 6005 p[1] = 5; /* AO_SUP, S_SUP */
6006 6006 if (stmf_state.stmf_alua_node == 1) {
6007 6007 p[3] = 1; /* Group 1 */
6008 6008 } else {
6009 6009 p[3] = 0; /* Group 0 */
6010 6010 }
6011 6011 p[7] = nports & 0xff;
6012 6012 p += 8;
6013 6013 for (ilport = stmf_state.stmf_ilportlist; ilport;
6014 6014 ilport = ilport->ilport_next) {
6015 6015 if (ilport->ilport_standby == 1) {
6016 6016 continue;
6017 6017 }
6018 6018 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid);
6019 6019 p += 4;
6020 6020 }
6021 6021 if (nports_standby && ilu_alua) {
6022 6022 p[0] = 0x02; /* Non PREF, Standby */
6023 6023 p[1] = 5; /* AO_SUP, S_SUP */
6024 6024 if (stmf_state.stmf_alua_node == 1) {
6025 6025 p[3] = 0; /* Group 0 */
6026 6026 } else {
6027 6027 p[3] = 1; /* Group 1 */
6028 6028 }
6029 6029 p[7] = nports_standby & 0xff;
6030 6030 p += 8;
6031 6031 for (ilport = stmf_state.stmf_ilportlist; ilport;
6032 6032 ilport = ilport->ilport_next) {
6033 6033 if (ilport->ilport_standby == 0) {
6034 6034 continue;
6035 6035 }
6036 6036 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid);
6037 6037 p += 4;
6038 6038 }
6039 6039 }
6040 6040
6041 6041 mutex_exit(&stmf_state.stmf_lock);
6042 6042
6043 6043 return (xd);
6044 6044 }
6045 6045
6046 6046 struct scsi_devid_desc *
6047 6047 stmf_scsilib_get_devid_desc(uint16_t rtpid)
6048 6048 {
6049 6049 scsi_devid_desc_t *devid = NULL;
6050 6050 stmf_i_local_port_t *ilport;
6051 6051
6052 6052 mutex_enter(&stmf_state.stmf_lock);
6053 6053
6054 6054 for (ilport = stmf_state.stmf_ilportlist; ilport;
6055 6055 ilport = ilport->ilport_next) {
6056 6056 if (ilport->ilport_rtpid == rtpid) {
6057 6057 scsi_devid_desc_t *id = ilport->ilport_lport->lport_id;
6058 6058 uint32_t id_sz = sizeof (scsi_devid_desc_t) +
6059 6059 id->ident_length;
6060 6060 devid = (scsi_devid_desc_t *)kmem_zalloc(id_sz,
6061 6061 KM_NOSLEEP);
6062 6062 if (devid != NULL) {
6063 6063 bcopy(id, devid, id_sz);
6064 6064 }
6065 6065 break;
6066 6066 }
6067 6067 }
6068 6068
6069 6069 mutex_exit(&stmf_state.stmf_lock);
6070 6070 return (devid);
6071 6071 }
6072 6072
6073 6073 uint16_t
6074 6074 stmf_scsilib_get_lport_rtid(struct scsi_devid_desc *devid)
6075 6075 {
6076 6076 stmf_i_local_port_t *ilport;
6077 6077 scsi_devid_desc_t *id;
6078 6078 uint16_t rtpid = 0;
6079 6079
6080 6080 mutex_enter(&stmf_state.stmf_lock);
6081 6081 for (ilport = stmf_state.stmf_ilportlist; ilport;
6082 6082 ilport = ilport->ilport_next) {
6083 6083 id = ilport->ilport_lport->lport_id;
6084 6084 if ((devid->ident_length == id->ident_length) &&
6085 6085 (memcmp(devid->ident, id->ident, id->ident_length) == 0)) {
6086 6086 rtpid = ilport->ilport_rtpid;
6087 6087 break;
6088 6088 }
6089 6089 }
6090 6090 mutex_exit(&stmf_state.stmf_lock);
6091 6091 return (rtpid);
6092 6092 }
6093 6093
6094 6094 static uint16_t stmf_lu_id_gen_number = 0;
6095 6095
6096 6096 stmf_status_t
6097 6097 stmf_scsilib_uniq_lu_id(uint32_t company_id, scsi_devid_desc_t *lu_id)
6098 6098 {
6099 6099 return (stmf_scsilib_uniq_lu_id2(company_id, 0, lu_id));
6100 6100 }
6101 6101
↓ open down ↓ |
6101 lines elided |
↑ open up ↑ |
6102 6102 stmf_status_t
6103 6103 stmf_scsilib_uniq_lu_id2(uint32_t company_id, uint32_t host_id,
6104 6104 scsi_devid_desc_t *lu_id)
6105 6105 {
6106 6106 uint8_t *p;
6107 6107 struct timeval32 timestamp32;
6108 6108 uint32_t *t = (uint32_t *)×tamp32;
6109 6109 struct ether_addr mac;
6110 6110 uint8_t *e = (uint8_t *)&mac;
6111 6111 int hid = (int)host_id;
6112 + uint16_t gen_number;
6112 6113
6113 6114 if (company_id == COMPANY_ID_NONE)
6114 6115 company_id = COMPANY_ID_SUN;
6115 6116
6116 6117 if (lu_id->ident_length != 0x10)
6117 6118 return (STMF_INVALID_ARG);
6118 6119
6119 6120 p = (uint8_t *)lu_id;
6120 6121
6121 - atomic_add_16(&stmf_lu_id_gen_number, 1);
6122 + gen_number = atomic_add_16_nv(&stmf_lu_id_gen_number, 1);
6122 6123
6123 6124 p[0] = 0xf1; p[1] = 3; p[2] = 0; p[3] = 0x10;
6124 6125 p[4] = ((company_id >> 20) & 0xf) | 0x60;
6125 6126 p[5] = (company_id >> 12) & 0xff;
6126 6127 p[6] = (company_id >> 4) & 0xff;
6127 6128 p[7] = (company_id << 4) & 0xf0;
6128 6129 if (hid == 0 && !localetheraddr((struct ether_addr *)NULL, &mac)) {
6129 6130 hid = BE_32((int)zone_get_hostid(NULL));
6130 6131 }
6131 6132 if (hid != 0) {
6132 6133 e[0] = (hid >> 24) & 0xff;
6133 6134 e[1] = (hid >> 16) & 0xff;
6134 6135 e[2] = (hid >> 8) & 0xff;
6135 6136 e[3] = hid & 0xff;
6136 6137 e[4] = e[5] = 0;
6137 6138 }
6138 6139 bcopy(e, p+8, 6);
6139 6140 uniqtime32(×tamp32);
6140 6141 *t = BE_32(*t);
6141 6142 bcopy(t, p+14, 4);
6142 - p[18] = (stmf_lu_id_gen_number >> 8) & 0xff;
6143 - p[19] = stmf_lu_id_gen_number & 0xff;
6143 + p[18] = (gen_number >> 8) & 0xff;
6144 + p[19] = gen_number & 0xff;
6144 6145
6145 6146 return (STMF_SUCCESS);
6146 6147 }
6147 6148
6148 6149 /*
6149 6150 * saa is sense key, ASC, ASCQ
6150 6151 */
6151 6152 void
6152 6153 stmf_scsilib_send_status(scsi_task_t *task, uint8_t st, uint32_t saa)
6153 6154 {
6154 6155 uint8_t sd[18];
6155 6156 task->task_scsi_status = st;
6156 6157 if (st == 2) {
6157 6158 bzero(sd, 18);
6158 6159 sd[0] = 0x70;
6159 6160 sd[2] = (saa >> 16) & 0xf;
6160 6161 sd[7] = 10;
6161 6162 sd[12] = (saa >> 8) & 0xff;
6162 6163 sd[13] = saa & 0xff;
6163 6164 task->task_sense_data = sd;
6164 6165 task->task_sense_length = 18;
6165 6166 } else {
6166 6167 task->task_sense_data = NULL;
6167 6168 task->task_sense_length = 0;
6168 6169 }
6169 6170 (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE);
6170 6171 }
6171 6172
6172 6173 uint32_t
6173 6174 stmf_scsilib_prepare_vpd_page83(scsi_task_t *task, uint8_t *page,
6174 6175 uint32_t page_len, uint8_t byte0, uint32_t vpd_mask)
6175 6176 {
6176 6177 uint8_t *p = NULL;
6177 6178 uint8_t small_buf[32];
6178 6179 uint32_t sz = 0;
6179 6180 uint32_t n = 4;
6180 6181 uint32_t m = 0;
6181 6182 uint32_t last_bit = 0;
6182 6183
6183 6184 if (page_len < 4)
6184 6185 return (0);
6185 6186 if (page_len > 65535)
6186 6187 page_len = 65535;
6187 6188
6188 6189 page[0] = byte0;
6189 6190 page[1] = 0x83;
6190 6191
6191 6192 /* CONSTCOND */
6192 6193 while (1) {
6193 6194 m += sz;
6194 6195 if (sz && (page_len > n)) {
6195 6196 uint32_t copysz;
6196 6197 copysz = page_len > (n + sz) ? sz : page_len - n;
6197 6198 bcopy(p, page + n, copysz);
6198 6199 n += copysz;
6199 6200 }
6200 6201 vpd_mask &= ~last_bit;
6201 6202 if (vpd_mask == 0)
6202 6203 break;
6203 6204
6204 6205 if (vpd_mask & STMF_VPD_LU_ID) {
6205 6206 last_bit = STMF_VPD_LU_ID;
6206 6207 sz = task->task_lu->lu_id->ident_length + 4;
6207 6208 p = (uint8_t *)task->task_lu->lu_id;
6208 6209 continue;
6209 6210 } else if (vpd_mask & STMF_VPD_TARGET_ID) {
6210 6211 last_bit = STMF_VPD_TARGET_ID;
6211 6212 sz = task->task_lport->lport_id->ident_length + 4;
6212 6213 p = (uint8_t *)task->task_lport->lport_id;
6213 6214 continue;
6214 6215 } else if (vpd_mask & STMF_VPD_TP_GROUP) {
6215 6216 stmf_i_local_port_t *ilport;
6216 6217 last_bit = STMF_VPD_TP_GROUP;
6217 6218 p = small_buf;
6218 6219 bzero(p, 8);
6219 6220 p[0] = 1;
6220 6221 p[1] = 0x15;
6221 6222 p[3] = 4;
6222 6223 ilport = (stmf_i_local_port_t *)
6223 6224 task->task_lport->lport_stmf_private;
6224 6225 /*
6225 6226 * If we're in alua mode, group 1 contains all alua
6226 6227 * participating ports and all standby ports
6227 6228 * > 255. Otherwise, if we're in alua mode, any local
6228 6229 * ports (non standby/pppt) are also in group 1 if the
6229 6230 * alua node is 1. Otherwise the group is 0.
6230 6231 */
6231 6232 if ((stmf_state.stmf_alua_state &&
6232 6233 (ilport->ilport_alua || ilport->ilport_standby) &&
6233 6234 ilport->ilport_rtpid > 255) ||
6234 6235 (stmf_state.stmf_alua_node == 1 &&
6235 6236 ilport->ilport_standby != 1)) {
6236 6237 p[7] = 1; /* Group 1 */
6237 6238 }
6238 6239 sz = 8;
6239 6240 continue;
6240 6241 } else if (vpd_mask & STMF_VPD_RELATIVE_TP_ID) {
6241 6242 stmf_i_local_port_t *ilport;
6242 6243
6243 6244 last_bit = STMF_VPD_RELATIVE_TP_ID;
6244 6245 p = small_buf;
6245 6246 bzero(p, 8);
6246 6247 p[0] = 1;
6247 6248 p[1] = 0x14;
6248 6249 p[3] = 4;
6249 6250 ilport = (stmf_i_local_port_t *)
6250 6251 task->task_lport->lport_stmf_private;
6251 6252 p[6] = (ilport->ilport_rtpid >> 8) & 0xff;
6252 6253 p[7] = ilport->ilport_rtpid & 0xff;
6253 6254 sz = 8;
6254 6255 continue;
6255 6256 } else {
6256 6257 cmn_err(CE_WARN, "Invalid vpd_mask");
6257 6258 break;
6258 6259 }
6259 6260 }
6260 6261
6261 6262 page[2] = (m >> 8) & 0xff;
6262 6263 page[3] = m & 0xff;
6263 6264
6264 6265 return (n);
6265 6266 }
6266 6267
6267 6268 void
6268 6269 stmf_scsilib_handle_report_tpgs(scsi_task_t *task, stmf_data_buf_t *dbuf)
6269 6270 {
6270 6271 stmf_i_scsi_task_t *itask =
6271 6272 (stmf_i_scsi_task_t *)task->task_stmf_private;
6272 6273 stmf_i_lu_t *ilu =
6273 6274 (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6274 6275 stmf_xfer_data_t *xd;
6275 6276 uint32_t sz, minsz;
6276 6277
6277 6278 itask->itask_flags |= ITASK_DEFAULT_HANDLING;
6278 6279 task->task_cmd_xfer_length =
6279 6280 ((((uint32_t)task->task_cdb[6]) << 24) |
6280 6281 (((uint32_t)task->task_cdb[7]) << 16) |
6281 6282 (((uint32_t)task->task_cdb[8]) << 8) |
6282 6283 ((uint32_t)task->task_cdb[9]));
6283 6284
6284 6285 if (task->task_additional_flags &
6285 6286 TASK_AF_NO_EXPECTED_XFER_LENGTH) {
6286 6287 task->task_expected_xfer_length =
6287 6288 task->task_cmd_xfer_length;
6288 6289 }
6289 6290
6290 6291 if (task->task_cmd_xfer_length == 0) {
6291 6292 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6292 6293 return;
6293 6294 }
6294 6295 if (task->task_cmd_xfer_length < 4) {
6295 6296 stmf_scsilib_send_status(task, STATUS_CHECK,
6296 6297 STMF_SAA_INVALID_FIELD_IN_CDB);
6297 6298 return;
6298 6299 }
6299 6300
6300 6301 sz = min(task->task_expected_xfer_length,
6301 6302 task->task_cmd_xfer_length);
6302 6303
6303 6304 xd = stmf_prepare_tpgs_data(ilu->ilu_alua);
6304 6305
6305 6306 if (xd == NULL) {
6306 6307 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6307 6308 STMF_ALLOC_FAILURE, NULL);
6308 6309 return;
6309 6310 }
6310 6311
6311 6312 sz = min(sz, xd->size_left);
6312 6313 xd->size_left = sz;
6313 6314 minsz = min(512, sz);
6314 6315
6315 6316 if (dbuf == NULL)
6316 6317 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
6317 6318 if (dbuf == NULL) {
6318 6319 kmem_free(xd, xd->alloc_size);
6319 6320 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6320 6321 STMF_ALLOC_FAILURE, NULL);
6321 6322 return;
6322 6323 }
6323 6324 dbuf->db_lu_private = xd;
6324 6325 stmf_xd_to_dbuf(dbuf, 1);
6325 6326
6326 6327 dbuf->db_flags = DB_DIRECTION_TO_RPORT;
6327 6328 (void) stmf_xfer_data(task, dbuf, 0);
6328 6329
6329 6330 }
6330 6331
6331 6332 void
6332 6333 stmf_scsilib_handle_task_mgmt(scsi_task_t *task)
6333 6334 {
6334 6335
6335 6336 switch (task->task_mgmt_function) {
6336 6337 /*
6337 6338 * For now we will abort all I/Os on the LU in case of ABORT_TASK_SET
6338 6339 * and ABORT_TASK. But unlike LUN_RESET we will not reset LU state
6339 6340 * in these cases. This needs to be changed to abort only the required
6340 6341 * set.
6341 6342 */
6342 6343 case TM_ABORT_TASK:
6343 6344 case TM_ABORT_TASK_SET:
6344 6345 case TM_CLEAR_TASK_SET:
6345 6346 case TM_LUN_RESET:
6346 6347 stmf_handle_lun_reset(task);
6347 6348 /* issue the reset to the proxy node as well */
6348 6349 if (stmf_state.stmf_alua_state == 1) {
6349 6350 (void) stmf_proxy_scsi_cmd(task, NULL);
6350 6351 }
6351 6352 return;
6352 6353 case TM_TARGET_RESET:
6353 6354 case TM_TARGET_COLD_RESET:
6354 6355 case TM_TARGET_WARM_RESET:
6355 6356 stmf_handle_target_reset(task);
6356 6357 return;
6357 6358 default:
6358 6359 /* We dont support this task mgmt function */
6359 6360 stmf_scsilib_send_status(task, STATUS_CHECK,
6360 6361 STMF_SAA_INVALID_FIELD_IN_CMD_IU);
6361 6362 return;
6362 6363 }
6363 6364 }
6364 6365
6365 6366 void
6366 6367 stmf_handle_lun_reset(scsi_task_t *task)
6367 6368 {
6368 6369 stmf_i_scsi_task_t *itask;
6369 6370 stmf_i_lu_t *ilu;
6370 6371
6371 6372 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
6372 6373 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6373 6374
6374 6375 /*
6375 6376 * To sync with target reset, grab this lock. The LU is not going
6376 6377 * anywhere as there is atleast one task pending (this task).
6377 6378 */
6378 6379 mutex_enter(&stmf_state.stmf_lock);
6379 6380
6380 6381 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
6381 6382 mutex_exit(&stmf_state.stmf_lock);
6382 6383 stmf_scsilib_send_status(task, STATUS_CHECK,
6383 6384 STMF_SAA_OPERATION_IN_PROGRESS);
6384 6385 return;
6385 6386 }
6386 6387 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE);
6387 6388 mutex_exit(&stmf_state.stmf_lock);
6388 6389
6389 6390 /*
6390 6391 * Mark this task as the one causing LU reset so that we know who
6391 6392 * was responsible for setting the ILU_RESET_ACTIVE. In case this
6392 6393 * task itself gets aborted, we will clear ILU_RESET_ACTIVE.
6393 6394 */
6394 6395 itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_CAUSING_LU_RESET;
6395 6396
6396 6397 /* Initiatiate abort on all commands on this LU except this one */
6397 6398 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, task->task_lu);
6398 6399
6399 6400 /* Start polling on this task */
6400 6401 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
6401 6402 != STMF_SUCCESS) {
6402 6403 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE,
6403 6404 NULL);
6404 6405 return;
6405 6406 }
6406 6407 }
6407 6408
6408 6409 void
6409 6410 stmf_handle_target_reset(scsi_task_t *task)
6410 6411 {
6411 6412 stmf_i_scsi_task_t *itask;
6412 6413 stmf_i_lu_t *ilu;
6413 6414 stmf_i_scsi_session_t *iss;
6414 6415 stmf_lun_map_t *lm;
6415 6416 stmf_lun_map_ent_t *lm_ent;
6416 6417 int i, lf;
6417 6418
6418 6419 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
6419 6420 iss = (stmf_i_scsi_session_t *)task->task_session->ss_stmf_private;
6420 6421 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6421 6422
6422 6423 /*
6423 6424 * To sync with LUN reset, grab this lock. The session is not going
6424 6425 * anywhere as there is atleast one task pending (this task).
6425 6426 */
6426 6427 mutex_enter(&stmf_state.stmf_lock);
6427 6428
6428 6429 /* Grab the session lock as a writer to prevent any changes in it */
6429 6430 rw_enter(iss->iss_lockp, RW_WRITER);
6430 6431
6431 6432 if (iss->iss_flags & ISS_RESET_ACTIVE) {
6432 6433 rw_exit(iss->iss_lockp);
6433 6434 mutex_exit(&stmf_state.stmf_lock);
6434 6435 stmf_scsilib_send_status(task, STATUS_CHECK,
6435 6436 STMF_SAA_OPERATION_IN_PROGRESS);
6436 6437 return;
6437 6438 }
6438 6439 atomic_or_32(&iss->iss_flags, ISS_RESET_ACTIVE);
6439 6440
6440 6441 /*
6441 6442 * Now go through each LUN in this session and make sure all of them
6442 6443 * can be reset.
6443 6444 */
6444 6445 lm = iss->iss_sm;
6445 6446 for (i = 0, lf = 0; i < lm->lm_nentries; i++) {
6446 6447 if (lm->lm_plus[i] == NULL)
6447 6448 continue;
6448 6449 lf++;
6449 6450 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6450 6451 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private);
6451 6452 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
6452 6453 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
6453 6454 rw_exit(iss->iss_lockp);
6454 6455 mutex_exit(&stmf_state.stmf_lock);
6455 6456 stmf_scsilib_send_status(task, STATUS_CHECK,
6456 6457 STMF_SAA_OPERATION_IN_PROGRESS);
6457 6458 return;
6458 6459 }
6459 6460 }
6460 6461 if (lf == 0) {
6461 6462 /* No luns in this session */
6462 6463 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
6463 6464 rw_exit(iss->iss_lockp);
6464 6465 mutex_exit(&stmf_state.stmf_lock);
6465 6466 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6466 6467 return;
6467 6468 }
6468 6469
6469 6470 /* ok, start the damage */
6470 6471 itask->itask_flags |= ITASK_DEFAULT_HANDLING |
6471 6472 ITASK_CAUSING_TARGET_RESET;
6472 6473 for (i = 0; i < lm->lm_nentries; i++) {
6473 6474 if (lm->lm_plus[i] == NULL)
6474 6475 continue;
6475 6476 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6476 6477 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private);
6477 6478 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE);
6478 6479 }
6479 6480
6480 6481 for (i = 0; i < lm->lm_nentries; i++) {
6481 6482 if (lm->lm_plus[i] == NULL)
6482 6483 continue;
6483 6484 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6484 6485 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED,
6485 6486 lm_ent->ent_lu);
6486 6487 }
6487 6488
6488 6489 rw_exit(iss->iss_lockp);
6489 6490 mutex_exit(&stmf_state.stmf_lock);
6490 6491
6491 6492 /* Start polling on this task */
6492 6493 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
6493 6494 != STMF_SUCCESS) {
6494 6495 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE,
6495 6496 NULL);
6496 6497 return;
6497 6498 }
6498 6499 }
6499 6500
6500 6501 int
6501 6502 stmf_handle_cmd_during_ic(stmf_i_scsi_task_t *itask)
6502 6503 {
6503 6504 scsi_task_t *task = itask->itask_task;
6504 6505 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
6505 6506 task->task_session->ss_stmf_private;
6506 6507
6507 6508 rw_enter(iss->iss_lockp, RW_WRITER);
6508 6509 if (((iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) == 0) ||
6509 6510 (task->task_cdb[0] == SCMD_INQUIRY)) {
6510 6511 rw_exit(iss->iss_lockp);
6511 6512 return (0);
6512 6513 }
6513 6514 atomic_and_32(&iss->iss_flags,
6514 6515 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
6515 6516 rw_exit(iss->iss_lockp);
6516 6517
6517 6518 if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
6518 6519 return (0);
6519 6520 }
6520 6521 stmf_scsilib_send_status(task, STATUS_CHECK,
6521 6522 STMF_SAA_REPORT_LUN_DATA_HAS_CHANGED);
6522 6523 return (1);
6523 6524 }
6524 6525
6525 6526 void
6526 6527 stmf_worker_init()
6527 6528 {
6528 6529 uint32_t i;
6529 6530
6530 6531 /* Make local copy of global tunables */
6531 6532 stmf_i_max_nworkers = stmf_max_nworkers;
6532 6533 stmf_i_min_nworkers = stmf_min_nworkers;
6533 6534
6534 6535 ASSERT(stmf_workers == NULL);
6535 6536 if (stmf_i_min_nworkers < 4) {
6536 6537 stmf_i_min_nworkers = 4;
6537 6538 }
6538 6539 if (stmf_i_max_nworkers < stmf_i_min_nworkers) {
6539 6540 stmf_i_max_nworkers = stmf_i_min_nworkers;
6540 6541 }
6541 6542 stmf_workers = (stmf_worker_t *)kmem_zalloc(
6542 6543 sizeof (stmf_worker_t) * stmf_i_max_nworkers, KM_SLEEP);
6543 6544 for (i = 0; i < stmf_i_max_nworkers; i++) {
6544 6545 stmf_worker_t *w = &stmf_workers[i];
6545 6546 mutex_init(&w->worker_lock, NULL, MUTEX_DRIVER, NULL);
6546 6547 cv_init(&w->worker_cv, NULL, CV_DRIVER, NULL);
6547 6548 }
6548 6549 stmf_worker_mgmt_delay = drv_usectohz(20 * 1000);
6549 6550 stmf_workers_state = STMF_WORKERS_ENABLED;
6550 6551
6551 6552 /* Workers will be started by stmf_worker_mgmt() */
6552 6553
6553 6554 /* Lets wait for atleast one worker to start */
6554 6555 while (stmf_nworkers_cur == 0)
6555 6556 delay(drv_usectohz(20 * 1000));
6556 6557 stmf_worker_mgmt_delay = drv_usectohz(3 * 1000 * 1000);
6557 6558 }
6558 6559
6559 6560 stmf_status_t
6560 6561 stmf_worker_fini()
6561 6562 {
6562 6563 int i;
6563 6564 clock_t sb;
6564 6565
6565 6566 if (stmf_workers_state == STMF_WORKERS_DISABLED)
6566 6567 return (STMF_SUCCESS);
6567 6568 ASSERT(stmf_workers);
6568 6569 stmf_workers_state = STMF_WORKERS_DISABLED;
6569 6570 stmf_worker_mgmt_delay = drv_usectohz(20 * 1000);
6570 6571 cv_signal(&stmf_state.stmf_cv);
6571 6572
6572 6573 sb = ddi_get_lbolt() + drv_usectohz(10 * 1000 * 1000);
6573 6574 /* Wait for all the threads to die */
6574 6575 while (stmf_nworkers_cur != 0) {
6575 6576 if (ddi_get_lbolt() > sb) {
6576 6577 stmf_workers_state = STMF_WORKERS_ENABLED;
6577 6578 return (STMF_BUSY);
6578 6579 }
6579 6580 delay(drv_usectohz(100 * 1000));
6580 6581 }
6581 6582 for (i = 0; i < stmf_i_max_nworkers; i++) {
6582 6583 stmf_worker_t *w = &stmf_workers[i];
6583 6584 mutex_destroy(&w->worker_lock);
6584 6585 cv_destroy(&w->worker_cv);
6585 6586 }
6586 6587 kmem_free(stmf_workers, sizeof (stmf_worker_t) * stmf_i_max_nworkers);
6587 6588 stmf_workers = NULL;
6588 6589
6589 6590 return (STMF_SUCCESS);
6590 6591 }
6591 6592
6592 6593 void
6593 6594 stmf_worker_task(void *arg)
6594 6595 {
6595 6596 stmf_worker_t *w;
6596 6597 stmf_i_scsi_session_t *iss;
6597 6598 scsi_task_t *task;
6598 6599 stmf_i_scsi_task_t *itask;
6599 6600 stmf_data_buf_t *dbuf;
6600 6601 stmf_lu_t *lu;
6601 6602 clock_t wait_timer = 0;
6602 6603 clock_t wait_ticks, wait_delta = 0;
6603 6604 uint32_t old, new;
6604 6605 uint8_t curcmd;
6605 6606 uint8_t abort_free;
6606 6607 uint8_t wait_queue;
6607 6608 uint8_t dec_qdepth;
6608 6609
6609 6610 w = (stmf_worker_t *)arg;
6610 6611 wait_ticks = drv_usectohz(10000);
6611 6612
6612 6613 DTRACE_PROBE1(worker__create, stmf_worker_t, w);
6613 6614 mutex_enter(&w->worker_lock);
6614 6615 w->worker_flags |= STMF_WORKER_STARTED | STMF_WORKER_ACTIVE;
6615 6616 stmf_worker_loop:;
6616 6617 if ((w->worker_ref_count == 0) &&
6617 6618 (w->worker_flags & STMF_WORKER_TERMINATE)) {
6618 6619 w->worker_flags &= ~(STMF_WORKER_STARTED |
6619 6620 STMF_WORKER_ACTIVE | STMF_WORKER_TERMINATE);
6620 6621 w->worker_tid = NULL;
6621 6622 mutex_exit(&w->worker_lock);
6622 6623 DTRACE_PROBE1(worker__destroy, stmf_worker_t, w);
6623 6624 thread_exit();
6624 6625 }
6625 6626 /* CONSTCOND */
6626 6627 while (1) {
6627 6628 dec_qdepth = 0;
6628 6629 if (wait_timer && (ddi_get_lbolt() >= wait_timer)) {
6629 6630 wait_timer = 0;
6630 6631 wait_delta = 0;
6631 6632 if (w->worker_wait_head) {
6632 6633 ASSERT(w->worker_wait_tail);
6633 6634 if (w->worker_task_head == NULL)
6634 6635 w->worker_task_head =
6635 6636 w->worker_wait_head;
6636 6637 else
6637 6638 w->worker_task_tail->itask_worker_next =
6638 6639 w->worker_wait_head;
6639 6640 w->worker_task_tail = w->worker_wait_tail;
6640 6641 w->worker_wait_head = w->worker_wait_tail =
6641 6642 NULL;
6642 6643 }
6643 6644 }
6644 6645 if ((itask = w->worker_task_head) == NULL) {
6645 6646 break;
6646 6647 }
6647 6648 task = itask->itask_task;
6648 6649 DTRACE_PROBE2(worker__active, stmf_worker_t, w,
6649 6650 scsi_task_t *, task);
6650 6651 w->worker_task_head = itask->itask_worker_next;
6651 6652 if (w->worker_task_head == NULL)
6652 6653 w->worker_task_tail = NULL;
6653 6654
6654 6655 wait_queue = 0;
6655 6656 abort_free = 0;
6656 6657 if (itask->itask_ncmds > 0) {
6657 6658 curcmd = itask->itask_cmd_stack[itask->itask_ncmds - 1];
6658 6659 } else {
6659 6660 ASSERT(itask->itask_flags & ITASK_BEING_ABORTED);
6660 6661 }
6661 6662 do {
6662 6663 old = itask->itask_flags;
6663 6664 if (old & ITASK_BEING_ABORTED) {
6664 6665 itask->itask_ncmds = 1;
6665 6666 curcmd = itask->itask_cmd_stack[0] =
6666 6667 ITASK_CMD_ABORT;
6667 6668 goto out_itask_flag_loop;
6668 6669 } else if ((curcmd & ITASK_CMD_MASK) ==
6669 6670 ITASK_CMD_NEW_TASK) {
6670 6671 /*
6671 6672 * set ITASK_KSTAT_IN_RUNQ, this flag
6672 6673 * will not reset until task completed
6673 6674 */
6674 6675 new = old | ITASK_KNOWN_TO_LU |
6675 6676 ITASK_KSTAT_IN_RUNQ;
6676 6677 } else {
6677 6678 goto out_itask_flag_loop;
6678 6679 }
6679 6680 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
6680 6681
6681 6682 out_itask_flag_loop:
6682 6683
6683 6684 /*
6684 6685 * Decide if this task needs to go to a queue and/or if
6685 6686 * we can decrement the itask_cmd_stack.
6686 6687 */
6687 6688 if (curcmd == ITASK_CMD_ABORT) {
6688 6689 if (itask->itask_flags & (ITASK_KNOWN_TO_LU |
6689 6690 ITASK_KNOWN_TO_TGT_PORT)) {
6690 6691 wait_queue = 1;
6691 6692 } else {
6692 6693 abort_free = 1;
6693 6694 }
6694 6695 } else if ((curcmd & ITASK_CMD_POLL) &&
6695 6696 (itask->itask_poll_timeout > ddi_get_lbolt())) {
6696 6697 wait_queue = 1;
6697 6698 }
6698 6699
6699 6700 if (wait_queue) {
6700 6701 itask->itask_worker_next = NULL;
6701 6702 if (w->worker_wait_tail) {
6702 6703 w->worker_wait_tail->itask_worker_next = itask;
6703 6704 } else {
6704 6705 w->worker_wait_head = itask;
6705 6706 }
6706 6707 w->worker_wait_tail = itask;
6707 6708 if (wait_timer == 0) {
6708 6709 wait_timer = ddi_get_lbolt() + wait_ticks;
6709 6710 wait_delta = wait_ticks;
6710 6711 }
6711 6712 } else if ((--(itask->itask_ncmds)) != 0) {
6712 6713 itask->itask_worker_next = NULL;
6713 6714 if (w->worker_task_tail) {
6714 6715 w->worker_task_tail->itask_worker_next = itask;
6715 6716 } else {
6716 6717 w->worker_task_head = itask;
6717 6718 }
6718 6719 w->worker_task_tail = itask;
6719 6720 } else {
6720 6721 atomic_and_32(&itask->itask_flags,
6721 6722 ~ITASK_IN_WORKER_QUEUE);
6722 6723 /*
6723 6724 * This is where the queue depth should go down by
6724 6725 * one but we delay that on purpose to account for
6725 6726 * the call into the provider. The actual decrement
6726 6727 * happens after the worker has done its job.
6727 6728 */
6728 6729 dec_qdepth = 1;
6729 6730 itask->itask_waitq_time +=
6730 6731 gethrtime() - itask->itask_waitq_enter_timestamp;
6731 6732 }
6732 6733
6733 6734 /* We made it here means we are going to call LU */
6734 6735 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0)
6735 6736 lu = task->task_lu;
6736 6737 else
6737 6738 lu = dlun0;
6738 6739 dbuf = itask->itask_dbufs[ITASK_CMD_BUF_NDX(curcmd)];
6739 6740 mutex_exit(&w->worker_lock);
6740 6741 curcmd &= ITASK_CMD_MASK;
6741 6742 stmf_task_audit(itask, TE_PROCESS_CMD, curcmd, dbuf);
6742 6743 switch (curcmd) {
6743 6744 case ITASK_CMD_NEW_TASK:
6744 6745 iss = (stmf_i_scsi_session_t *)
6745 6746 task->task_session->ss_stmf_private;
6746 6747 stmf_itl_lu_new_task(itask);
6747 6748 if (iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) {
6748 6749 if (stmf_handle_cmd_during_ic(itask))
6749 6750 break;
6750 6751 }
6751 6752 #ifdef DEBUG
6752 6753 if (stmf_drop_task_counter > 0) {
6753 6754 if (atomic_add_32_nv(
6754 6755 (uint32_t *)&stmf_drop_task_counter,
6755 6756 -1) == 1) {
6756 6757 break;
6757 6758 }
6758 6759 }
6759 6760 #endif
6760 6761 DTRACE_PROBE1(scsi__task__start, scsi_task_t *, task);
6761 6762 lu->lu_new_task(task, dbuf);
6762 6763 break;
6763 6764 case ITASK_CMD_DATA_XFER_DONE:
6764 6765 lu->lu_dbuf_xfer_done(task, dbuf);
6765 6766 break;
6766 6767 case ITASK_CMD_STATUS_DONE:
6767 6768 lu->lu_send_status_done(task);
6768 6769 break;
6769 6770 case ITASK_CMD_ABORT:
6770 6771 if (abort_free) {
6771 6772 stmf_task_free(task);
6772 6773 } else {
6773 6774 stmf_do_task_abort(task);
6774 6775 }
6775 6776 break;
6776 6777 case ITASK_CMD_POLL_LU:
6777 6778 if (!wait_queue) {
6778 6779 lu->lu_task_poll(task);
6779 6780 }
6780 6781 break;
6781 6782 case ITASK_CMD_POLL_LPORT:
6782 6783 if (!wait_queue)
6783 6784 task->task_lport->lport_task_poll(task);
6784 6785 break;
6785 6786 case ITASK_CMD_SEND_STATUS:
6786 6787 /* case ITASK_CMD_XFER_DATA: */
6787 6788 break;
6788 6789 }
6789 6790 mutex_enter(&w->worker_lock);
6790 6791 if (dec_qdepth) {
6791 6792 w->worker_queue_depth--;
6792 6793 }
6793 6794 }
6794 6795 if ((w->worker_flags & STMF_WORKER_TERMINATE) && (wait_timer == 0)) {
6795 6796 if (w->worker_ref_count == 0)
6796 6797 goto stmf_worker_loop;
6797 6798 else {
6798 6799 wait_timer = ddi_get_lbolt() + 1;
6799 6800 wait_delta = 1;
6800 6801 }
6801 6802 }
6802 6803 w->worker_flags &= ~STMF_WORKER_ACTIVE;
6803 6804 if (wait_timer) {
6804 6805 DTRACE_PROBE1(worker__timed__sleep, stmf_worker_t, w);
6805 6806 (void) cv_reltimedwait(&w->worker_cv, &w->worker_lock,
6806 6807 wait_delta, TR_CLOCK_TICK);
6807 6808 } else {
6808 6809 DTRACE_PROBE1(worker__sleep, stmf_worker_t, w);
6809 6810 cv_wait(&w->worker_cv, &w->worker_lock);
6810 6811 }
6811 6812 DTRACE_PROBE1(worker__wakeup, stmf_worker_t, w);
6812 6813 w->worker_flags |= STMF_WORKER_ACTIVE;
6813 6814 goto stmf_worker_loop;
6814 6815 }
6815 6816
6816 6817 void
6817 6818 stmf_worker_mgmt()
6818 6819 {
6819 6820 int i;
6820 6821 int workers_needed;
6821 6822 uint32_t qd;
6822 6823 clock_t tps, d = 0;
6823 6824 uint32_t cur_max_ntasks = 0;
6824 6825 stmf_worker_t *w;
6825 6826
6826 6827 /* Check if we are trying to increase the # of threads */
6827 6828 for (i = stmf_nworkers_cur; i < stmf_nworkers_needed; i++) {
6828 6829 if (stmf_workers[i].worker_flags & STMF_WORKER_STARTED) {
6829 6830 stmf_nworkers_cur++;
6830 6831 stmf_nworkers_accepting_cmds++;
6831 6832 } else {
6832 6833 /* Wait for transition to complete */
6833 6834 return;
6834 6835 }
6835 6836 }
6836 6837 /* Check if we are trying to decrease the # of workers */
6837 6838 for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) {
6838 6839 if ((stmf_workers[i].worker_flags & STMF_WORKER_STARTED) == 0) {
6839 6840 stmf_nworkers_cur--;
6840 6841 /*
6841 6842 * stmf_nworkers_accepting_cmds has already been
6842 6843 * updated by the request to reduce the # of workers.
6843 6844 */
6844 6845 } else {
6845 6846 /* Wait for transition to complete */
6846 6847 return;
6847 6848 }
6848 6849 }
6849 6850 /* Check if we are being asked to quit */
6850 6851 if (stmf_workers_state != STMF_WORKERS_ENABLED) {
6851 6852 if (stmf_nworkers_cur) {
6852 6853 workers_needed = 0;
6853 6854 goto worker_mgmt_trigger_change;
6854 6855 }
6855 6856 return;
6856 6857 }
6857 6858 /* Check if we are starting */
6858 6859 if (stmf_nworkers_cur < stmf_i_min_nworkers) {
6859 6860 workers_needed = stmf_i_min_nworkers;
6860 6861 goto worker_mgmt_trigger_change;
6861 6862 }
6862 6863
6863 6864 tps = drv_usectohz(1 * 1000 * 1000);
6864 6865 if ((stmf_wm_last != 0) &&
6865 6866 ((d = ddi_get_lbolt() - stmf_wm_last) > tps)) {
6866 6867 qd = 0;
6867 6868 for (i = 0; i < stmf_nworkers_accepting_cmds; i++) {
6868 6869 qd += stmf_workers[i].worker_max_qdepth_pu;
6869 6870 stmf_workers[i].worker_max_qdepth_pu = 0;
6870 6871 if (stmf_workers[i].worker_max_sys_qdepth_pu >
6871 6872 cur_max_ntasks) {
6872 6873 cur_max_ntasks =
6873 6874 stmf_workers[i].worker_max_sys_qdepth_pu;
6874 6875 }
6875 6876 stmf_workers[i].worker_max_sys_qdepth_pu = 0;
6876 6877 }
6877 6878 }
6878 6879 stmf_wm_last = ddi_get_lbolt();
6879 6880 if (d <= tps) {
6880 6881 /* still ramping up */
6881 6882 return;
6882 6883 }
6883 6884 /* max qdepth cannot be more than max tasks */
6884 6885 if (qd > cur_max_ntasks)
6885 6886 qd = cur_max_ntasks;
6886 6887
6887 6888 /* See if we have more workers */
6888 6889 if (qd < stmf_nworkers_accepting_cmds) {
6889 6890 /*
6890 6891 * Since we dont reduce the worker count right away, monitor
6891 6892 * the highest load during the scale_down_delay.
6892 6893 */
6893 6894 if (qd > stmf_worker_scale_down_qd)
6894 6895 stmf_worker_scale_down_qd = qd;
6895 6896 if (stmf_worker_scale_down_timer == 0) {
6896 6897 stmf_worker_scale_down_timer = ddi_get_lbolt() +
6897 6898 drv_usectohz(stmf_worker_scale_down_delay *
6898 6899 1000 * 1000);
6899 6900 return;
6900 6901 }
6901 6902 if (ddi_get_lbolt() < stmf_worker_scale_down_timer) {
6902 6903 return;
6903 6904 }
6904 6905 /* Its time to reduce the workers */
6905 6906 if (stmf_worker_scale_down_qd < stmf_i_min_nworkers)
6906 6907 stmf_worker_scale_down_qd = stmf_i_min_nworkers;
6907 6908 if (stmf_worker_scale_down_qd > stmf_i_max_nworkers)
6908 6909 stmf_worker_scale_down_qd = stmf_i_max_nworkers;
6909 6910 if (stmf_worker_scale_down_qd == stmf_nworkers_cur)
6910 6911 return;
6911 6912 workers_needed = stmf_worker_scale_down_qd;
6912 6913 stmf_worker_scale_down_qd = 0;
6913 6914 goto worker_mgmt_trigger_change;
6914 6915 }
6915 6916 stmf_worker_scale_down_qd = 0;
6916 6917 stmf_worker_scale_down_timer = 0;
6917 6918 if (qd > stmf_i_max_nworkers)
6918 6919 qd = stmf_i_max_nworkers;
6919 6920 if (qd < stmf_i_min_nworkers)
6920 6921 qd = stmf_i_min_nworkers;
6921 6922 if (qd == stmf_nworkers_cur)
6922 6923 return;
6923 6924 workers_needed = qd;
6924 6925 goto worker_mgmt_trigger_change;
6925 6926
6926 6927 /* NOTREACHED */
6927 6928 return;
6928 6929
6929 6930 worker_mgmt_trigger_change:
6930 6931 ASSERT(workers_needed != stmf_nworkers_cur);
6931 6932 if (workers_needed > stmf_nworkers_cur) {
6932 6933 stmf_nworkers_needed = workers_needed;
6933 6934 for (i = stmf_nworkers_cur; i < workers_needed; i++) {
6934 6935 w = &stmf_workers[i];
6935 6936 w->worker_tid = thread_create(NULL, 0, stmf_worker_task,
6936 6937 (void *)&stmf_workers[i], 0, &p0, TS_RUN,
6937 6938 minclsyspri);
6938 6939 }
6939 6940 return;
6940 6941 }
6941 6942 /* At this point we know that we are decreasing the # of workers */
6942 6943 stmf_nworkers_accepting_cmds = workers_needed;
6943 6944 stmf_nworkers_needed = workers_needed;
6944 6945 /* Signal the workers that its time to quit */
6945 6946 for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) {
6946 6947 w = &stmf_workers[i];
6947 6948 ASSERT(w && (w->worker_flags & STMF_WORKER_STARTED));
6948 6949 mutex_enter(&w->worker_lock);
6949 6950 w->worker_flags |= STMF_WORKER_TERMINATE;
6950 6951 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
6951 6952 cv_signal(&w->worker_cv);
6952 6953 mutex_exit(&w->worker_lock);
6953 6954 }
6954 6955 }
6955 6956
6956 6957 /*
6957 6958 * Fills out a dbuf from stmf_xfer_data_t (contained in the db_lu_private).
6958 6959 * If all the data has been filled out, frees the xd and makes
6959 6960 * db_lu_private NULL.
6960 6961 */
6961 6962 void
6962 6963 stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off)
6963 6964 {
6964 6965 stmf_xfer_data_t *xd;
6965 6966 uint8_t *p;
6966 6967 int i;
6967 6968 uint32_t s;
6968 6969
6969 6970 xd = (stmf_xfer_data_t *)dbuf->db_lu_private;
6970 6971 dbuf->db_data_size = 0;
6971 6972 if (set_rel_off)
6972 6973 dbuf->db_relative_offset = xd->size_done;
6973 6974 for (i = 0; i < dbuf->db_sglist_length; i++) {
6974 6975 s = min(xd->size_left, dbuf->db_sglist[i].seg_length);
6975 6976 p = &xd->buf[xd->size_done];
6976 6977 bcopy(p, dbuf->db_sglist[i].seg_addr, s);
6977 6978 xd->size_left -= s;
6978 6979 xd->size_done += s;
6979 6980 dbuf->db_data_size += s;
6980 6981 if (xd->size_left == 0) {
6981 6982 kmem_free(xd, xd->alloc_size);
6982 6983 dbuf->db_lu_private = NULL;
6983 6984 return;
6984 6985 }
6985 6986 }
6986 6987 }
6987 6988
6988 6989 /* ARGSUSED */
6989 6990 stmf_status_t
6990 6991 stmf_dlun0_task_alloc(scsi_task_t *task)
6991 6992 {
6992 6993 return (STMF_SUCCESS);
6993 6994 }
6994 6995
6995 6996 void
6996 6997 stmf_dlun0_new_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
6997 6998 {
6998 6999 uint8_t *cdbp = (uint8_t *)&task->task_cdb[0];
6999 7000 stmf_i_scsi_session_t *iss;
7000 7001 uint32_t sz, minsz;
7001 7002 uint8_t *p;
7002 7003 stmf_xfer_data_t *xd;
7003 7004 uint8_t inq_page_length = 31;
7004 7005
7005 7006 if (task->task_mgmt_function) {
7006 7007 stmf_scsilib_handle_task_mgmt(task);
7007 7008 return;
7008 7009 }
7009 7010
7010 7011 switch (cdbp[0]) {
7011 7012 case SCMD_INQUIRY:
7012 7013 /*
7013 7014 * Basic protocol checks. In addition, only reply to
7014 7015 * standard inquiry. Otherwise, the LU provider needs
7015 7016 * to respond.
7016 7017 */
7017 7018
7018 7019 if (cdbp[2] || (cdbp[1] & 1) || cdbp[5]) {
7019 7020 stmf_scsilib_send_status(task, STATUS_CHECK,
7020 7021 STMF_SAA_INVALID_FIELD_IN_CDB);
7021 7022 return;
7022 7023 }
7023 7024
7024 7025 task->task_cmd_xfer_length =
7025 7026 (((uint32_t)cdbp[3]) << 8) | cdbp[4];
7026 7027
7027 7028 if (task->task_additional_flags &
7028 7029 TASK_AF_NO_EXPECTED_XFER_LENGTH) {
7029 7030 task->task_expected_xfer_length =
7030 7031 task->task_cmd_xfer_length;
7031 7032 }
7032 7033
7033 7034 sz = min(task->task_expected_xfer_length,
7034 7035 min(36, task->task_cmd_xfer_length));
7035 7036 minsz = 36;
7036 7037
7037 7038 if (sz == 0) {
7038 7039 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7039 7040 return;
7040 7041 }
7041 7042
7042 7043 if (dbuf && (dbuf->db_sglist[0].seg_length < 36)) {
7043 7044 /*
7044 7045 * Ignore any preallocated dbuf if the size is less
7045 7046 * than 36. It will be freed during the task_free.
7046 7047 */
7047 7048 dbuf = NULL;
7048 7049 }
7049 7050 if (dbuf == NULL)
7050 7051 dbuf = stmf_alloc_dbuf(task, minsz, &minsz, 0);
7051 7052 if ((dbuf == NULL) || (dbuf->db_sglist[0].seg_length < sz)) {
7052 7053 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7053 7054 STMF_ALLOC_FAILURE, NULL);
7054 7055 return;
7055 7056 }
7056 7057 dbuf->db_lu_private = NULL;
7057 7058
7058 7059 p = dbuf->db_sglist[0].seg_addr;
7059 7060
7060 7061 /*
7061 7062 * Standard inquiry handling only.
7062 7063 */
7063 7064
7064 7065 bzero(p, inq_page_length + 5);
7065 7066
7066 7067 p[0] = DPQ_SUPPORTED | DTYPE_UNKNOWN;
7067 7068 p[2] = 5;
7068 7069 p[3] = 0x12;
7069 7070 p[4] = inq_page_length;
7070 7071 p[6] = 0x80;
7071 7072
7072 7073 (void) strncpy((char *)p+8, "SUN ", 8);
7073 7074 (void) strncpy((char *)p+16, "COMSTAR ", 16);
7074 7075 (void) strncpy((char *)p+32, "1.0 ", 4);
7075 7076
7076 7077 dbuf->db_data_size = sz;
7077 7078 dbuf->db_relative_offset = 0;
7078 7079 dbuf->db_flags = DB_DIRECTION_TO_RPORT;
7079 7080 (void) stmf_xfer_data(task, dbuf, 0);
7080 7081
7081 7082 return;
7082 7083
7083 7084 case SCMD_REPORT_LUNS:
7084 7085 task->task_cmd_xfer_length =
7085 7086 ((((uint32_t)task->task_cdb[6]) << 24) |
7086 7087 (((uint32_t)task->task_cdb[7]) << 16) |
7087 7088 (((uint32_t)task->task_cdb[8]) << 8) |
7088 7089 ((uint32_t)task->task_cdb[9]));
7089 7090
7090 7091 if (task->task_additional_flags &
7091 7092 TASK_AF_NO_EXPECTED_XFER_LENGTH) {
7092 7093 task->task_expected_xfer_length =
7093 7094 task->task_cmd_xfer_length;
7094 7095 }
7095 7096
7096 7097 sz = min(task->task_expected_xfer_length,
7097 7098 task->task_cmd_xfer_length);
7098 7099
7099 7100 if (sz < 16) {
7100 7101 stmf_scsilib_send_status(task, STATUS_CHECK,
7101 7102 STMF_SAA_INVALID_FIELD_IN_CDB);
7102 7103 return;
7103 7104 }
7104 7105
7105 7106 iss = (stmf_i_scsi_session_t *)
7106 7107 task->task_session->ss_stmf_private;
7107 7108 rw_enter(iss->iss_lockp, RW_WRITER);
7108 7109 xd = stmf_session_prepare_report_lun_data(iss->iss_sm);
7109 7110 rw_exit(iss->iss_lockp);
7110 7111
7111 7112 if (xd == NULL) {
7112 7113 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7113 7114 STMF_ALLOC_FAILURE, NULL);
7114 7115 return;
7115 7116 }
7116 7117
7117 7118 sz = min(sz, xd->size_left);
7118 7119 xd->size_left = sz;
7119 7120 minsz = min(512, sz);
7120 7121
7121 7122 if (dbuf == NULL)
7122 7123 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
7123 7124 if (dbuf == NULL) {
7124 7125 kmem_free(xd, xd->alloc_size);
7125 7126 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7126 7127 STMF_ALLOC_FAILURE, NULL);
7127 7128 return;
7128 7129 }
7129 7130 dbuf->db_lu_private = xd;
7130 7131 stmf_xd_to_dbuf(dbuf, 1);
7131 7132
7132 7133 atomic_and_32(&iss->iss_flags,
7133 7134 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
7134 7135 dbuf->db_flags = DB_DIRECTION_TO_RPORT;
7135 7136 (void) stmf_xfer_data(task, dbuf, 0);
7136 7137 return;
7137 7138 }
7138 7139
7139 7140 stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE);
7140 7141 }
7141 7142
7142 7143 void
7143 7144 stmf_dlun0_dbuf_done(scsi_task_t *task, stmf_data_buf_t *dbuf)
7144 7145 {
7145 7146 stmf_i_scsi_task_t *itask =
7146 7147 (stmf_i_scsi_task_t *)task->task_stmf_private;
7147 7148
7148 7149 if (dbuf->db_xfer_status != STMF_SUCCESS) {
7149 7150 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7150 7151 dbuf->db_xfer_status, NULL);
7151 7152 return;
7152 7153 }
7153 7154 task->task_nbytes_transferred += dbuf->db_data_size;
7154 7155 if (dbuf->db_lu_private) {
7155 7156 /* There is more */
7156 7157 stmf_xd_to_dbuf(dbuf, 1);
7157 7158 (void) stmf_xfer_data(task, dbuf, 0);
7158 7159 return;
7159 7160 }
7160 7161
7161 7162 stmf_free_dbuf(task, dbuf);
7162 7163 /*
7163 7164 * If this is a proxy task, it will need to be completed from the
7164 7165 * proxy port provider. This message lets pppt know that the xfer
7165 7166 * is complete. When we receive the status from pppt, we will
7166 7167 * then relay that status back to the lport.
7167 7168 */
7168 7169 if (itask->itask_flags & ITASK_PROXY_TASK) {
7169 7170 stmf_ic_msg_t *ic_xfer_done_msg = NULL;
7170 7171 stmf_status_t ic_ret = STMF_FAILURE;
7171 7172 uint64_t session_msg_id;
7172 7173 mutex_enter(&stmf_state.stmf_lock);
7173 7174 session_msg_id = stmf_proxy_msg_id++;
7174 7175 mutex_exit(&stmf_state.stmf_lock);
7175 7176 /* send xfer done status to pppt */
7176 7177 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc(
7177 7178 itask->itask_proxy_msg_id,
7178 7179 task->task_session->ss_session_id,
7179 7180 STMF_SUCCESS, session_msg_id);
7180 7181 if (ic_xfer_done_msg) {
7181 7182 ic_ret = ic_tx_msg(ic_xfer_done_msg);
7182 7183 if (ic_ret != STMF_IC_MSG_SUCCESS) {
7183 7184 cmn_err(CE_WARN, "unable to xmit session msg");
7184 7185 }
7185 7186 }
7186 7187 /* task will be completed from pppt */
7187 7188 return;
7188 7189 }
7189 7190 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7190 7191 }
7191 7192
7192 7193 /* ARGSUSED */
7193 7194 void
7194 7195 stmf_dlun0_status_done(scsi_task_t *task)
7195 7196 {
7196 7197 }
7197 7198
7198 7199 /* ARGSUSED */
7199 7200 void
7200 7201 stmf_dlun0_task_free(scsi_task_t *task)
7201 7202 {
7202 7203 }
7203 7204
7204 7205 /* ARGSUSED */
7205 7206 stmf_status_t
7206 7207 stmf_dlun0_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags)
7207 7208 {
7208 7209 scsi_task_t *task = (scsi_task_t *)arg;
7209 7210 stmf_i_scsi_task_t *itask =
7210 7211 (stmf_i_scsi_task_t *)task->task_stmf_private;
7211 7212 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7212 7213 int i;
7213 7214 uint8_t map;
7214 7215
7215 7216 if ((task->task_mgmt_function) && (itask->itask_flags &
7216 7217 (ITASK_CAUSING_LU_RESET | ITASK_CAUSING_TARGET_RESET))) {
7217 7218 switch (task->task_mgmt_function) {
7218 7219 case TM_ABORT_TASK:
7219 7220 case TM_ABORT_TASK_SET:
7220 7221 case TM_CLEAR_TASK_SET:
7221 7222 case TM_LUN_RESET:
7222 7223 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7223 7224 break;
7224 7225 case TM_TARGET_RESET:
7225 7226 case TM_TARGET_COLD_RESET:
7226 7227 case TM_TARGET_WARM_RESET:
7227 7228 stmf_abort_target_reset(task);
7228 7229 break;
7229 7230 }
7230 7231 return (STMF_ABORT_SUCCESS);
7231 7232 }
7232 7233
7233 7234 /*
7234 7235 * OK so its not a task mgmt. Make sure we free any xd sitting
7235 7236 * inside any dbuf.
7236 7237 */
7237 7238 if ((map = itask->itask_allocated_buf_map) != 0) {
7238 7239 for (i = 0; i < 4; i++) {
7239 7240 if ((map & 1) &&
7240 7241 ((itask->itask_dbufs[i])->db_lu_private)) {
7241 7242 stmf_xfer_data_t *xd;
7242 7243 stmf_data_buf_t *dbuf;
7243 7244
7244 7245 dbuf = itask->itask_dbufs[i];
7245 7246 xd = (stmf_xfer_data_t *)dbuf->db_lu_private;
7246 7247 dbuf->db_lu_private = NULL;
7247 7248 kmem_free(xd, xd->alloc_size);
7248 7249 }
7249 7250 map >>= 1;
7250 7251 }
7251 7252 }
7252 7253 return (STMF_ABORT_SUCCESS);
7253 7254 }
7254 7255
7255 7256 void
7256 7257 stmf_dlun0_task_poll(struct scsi_task *task)
7257 7258 {
7258 7259 /* Right now we only do this for handling task management functions */
7259 7260 ASSERT(task->task_mgmt_function);
7260 7261
7261 7262 switch (task->task_mgmt_function) {
7262 7263 case TM_ABORT_TASK:
7263 7264 case TM_ABORT_TASK_SET:
7264 7265 case TM_CLEAR_TASK_SET:
7265 7266 case TM_LUN_RESET:
7266 7267 (void) stmf_lun_reset_poll(task->task_lu, task, 0);
7267 7268 return;
7268 7269 case TM_TARGET_RESET:
7269 7270 case TM_TARGET_COLD_RESET:
7270 7271 case TM_TARGET_WARM_RESET:
7271 7272 stmf_target_reset_poll(task);
7272 7273 return;
7273 7274 }
7274 7275 }
7275 7276
7276 7277 /* ARGSUSED */
7277 7278 void
7278 7279 stmf_dlun0_ctl(struct stmf_lu *lu, int cmd, void *arg)
7279 7280 {
7280 7281 /* This function will never be called */
7281 7282 cmn_err(CE_WARN, "stmf_dlun0_ctl called with cmd %x", cmd);
7282 7283 }
7283 7284
7284 7285 void
7285 7286 stmf_dlun_init()
7286 7287 {
7287 7288 stmf_i_lu_t *ilu;
7288 7289
7289 7290 dlun0 = stmf_alloc(STMF_STRUCT_STMF_LU, 0, 0);
7290 7291 dlun0->lu_task_alloc = stmf_dlun0_task_alloc;
7291 7292 dlun0->lu_new_task = stmf_dlun0_new_task;
7292 7293 dlun0->lu_dbuf_xfer_done = stmf_dlun0_dbuf_done;
7293 7294 dlun0->lu_send_status_done = stmf_dlun0_status_done;
7294 7295 dlun0->lu_task_free = stmf_dlun0_task_free;
7295 7296 dlun0->lu_abort = stmf_dlun0_abort;
7296 7297 dlun0->lu_task_poll = stmf_dlun0_task_poll;
7297 7298 dlun0->lu_ctl = stmf_dlun0_ctl;
7298 7299
7299 7300 ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private;
7300 7301 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
7301 7302 }
7302 7303
7303 7304 stmf_status_t
7304 7305 stmf_dlun_fini()
7305 7306 {
7306 7307 stmf_i_lu_t *ilu;
7307 7308
7308 7309 ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private;
7309 7310
7310 7311 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free);
7311 7312 if (ilu->ilu_ntasks) {
7312 7313 stmf_i_scsi_task_t *itask, *nitask;
7313 7314
7314 7315 nitask = ilu->ilu_tasks;
7315 7316 do {
7316 7317 itask = nitask;
7317 7318 nitask = itask->itask_lu_next;
7318 7319 dlun0->lu_task_free(itask->itask_task);
7319 7320 stmf_free(itask->itask_task);
7320 7321 } while (nitask != NULL);
7321 7322
7322 7323 }
7323 7324 stmf_free(dlun0);
7324 7325 return (STMF_SUCCESS);
7325 7326 }
7326 7327
7327 7328 void
7328 7329 stmf_abort_target_reset(scsi_task_t *task)
7329 7330 {
7330 7331 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
7331 7332 task->task_session->ss_stmf_private;
7332 7333 stmf_lun_map_t *lm;
7333 7334 stmf_lun_map_ent_t *lm_ent;
7334 7335 stmf_i_lu_t *ilu;
7335 7336 int i;
7336 7337
7337 7338 rw_enter(iss->iss_lockp, RW_READER);
7338 7339 lm = iss->iss_sm;
7339 7340 for (i = 0; i < lm->lm_nentries; i++) {
7340 7341 if (lm->lm_plus[i] == NULL)
7341 7342 continue;
7342 7343 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
7343 7344 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private;
7344 7345 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
7345 7346 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7346 7347 }
7347 7348 }
7348 7349 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
7349 7350 rw_exit(iss->iss_lockp);
7350 7351 }
7351 7352
7352 7353 /*
7353 7354 * The return value is only used by function managing target reset.
7354 7355 */
7355 7356 stmf_status_t
7356 7357 stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, int target_reset)
7357 7358 {
7358 7359 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7359 7360 int ntasks_pending;
7360 7361
7361 7362 ntasks_pending = ilu->ilu_ntasks - ilu->ilu_ntasks_free;
7362 7363 /*
7363 7364 * This function is also used during Target reset. The idea is that
7364 7365 * once all the commands are aborted, call the LU's reset entry
7365 7366 * point (abort entry point with a reset flag). But if this Task
7366 7367 * mgmt is running on this LU then all the tasks cannot be aborted.
7367 7368 * one task (this task) will still be running which is OK.
7368 7369 */
7369 7370 if ((ntasks_pending == 0) || ((task->task_lu == lu) &&
7370 7371 (ntasks_pending == 1))) {
7371 7372 stmf_status_t ret;
7372 7373
7373 7374 if ((task->task_mgmt_function == TM_LUN_RESET) ||
7374 7375 (task->task_mgmt_function == TM_TARGET_RESET) ||
7375 7376 (task->task_mgmt_function == TM_TARGET_WARM_RESET) ||
7376 7377 (task->task_mgmt_function == TM_TARGET_COLD_RESET)) {
7377 7378 ret = lu->lu_abort(lu, STMF_LU_RESET_STATE, task, 0);
7378 7379 } else {
7379 7380 ret = STMF_SUCCESS;
7380 7381 }
7381 7382 if (ret == STMF_SUCCESS) {
7382 7383 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7383 7384 }
7384 7385 if (target_reset) {
7385 7386 return (ret);
7386 7387 }
7387 7388 if (ret == STMF_SUCCESS) {
7388 7389 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7389 7390 return (ret);
7390 7391 }
7391 7392 if (ret != STMF_BUSY) {
7392 7393 stmf_abort(STMF_QUEUE_TASK_ABORT, task, ret, NULL);
7393 7394 return (ret);
7394 7395 }
7395 7396 }
7396 7397
7397 7398 if (target_reset) {
7398 7399 /* Tell target reset polling code that we are not done */
7399 7400 return (STMF_BUSY);
7400 7401 }
7401 7402
7402 7403 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
7403 7404 != STMF_SUCCESS) {
7404 7405 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7405 7406 STMF_ALLOC_FAILURE, NULL);
7406 7407 return (STMF_SUCCESS);
7407 7408 }
7408 7409
7409 7410 return (STMF_SUCCESS);
7410 7411 }
7411 7412
7412 7413 void
7413 7414 stmf_target_reset_poll(struct scsi_task *task)
7414 7415 {
7415 7416 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
7416 7417 task->task_session->ss_stmf_private;
7417 7418 stmf_lun_map_t *lm;
7418 7419 stmf_lun_map_ent_t *lm_ent;
7419 7420 stmf_i_lu_t *ilu;
7420 7421 stmf_status_t ret;
7421 7422 int i;
7422 7423 int not_done = 0;
7423 7424
7424 7425 ASSERT(iss->iss_flags & ISS_RESET_ACTIVE);
7425 7426
7426 7427 rw_enter(iss->iss_lockp, RW_READER);
7427 7428 lm = iss->iss_sm;
7428 7429 for (i = 0; i < lm->lm_nentries; i++) {
7429 7430 if (lm->lm_plus[i] == NULL)
7430 7431 continue;
7431 7432 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
7432 7433 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private;
7433 7434 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
7434 7435 rw_exit(iss->iss_lockp);
7435 7436 ret = stmf_lun_reset_poll(lm_ent->ent_lu, task, 1);
7436 7437 rw_enter(iss->iss_lockp, RW_READER);
7437 7438 if (ret == STMF_SUCCESS)
7438 7439 continue;
7439 7440 not_done = 1;
7440 7441 if (ret != STMF_BUSY) {
7441 7442 rw_exit(iss->iss_lockp);
7442 7443 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7443 7444 STMF_ABORTED, NULL);
7444 7445 return;
7445 7446 }
7446 7447 }
7447 7448 }
7448 7449 rw_exit(iss->iss_lockp);
7449 7450
7450 7451 if (not_done) {
7451 7452 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
7452 7453 != STMF_SUCCESS) {
7453 7454 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7454 7455 STMF_ALLOC_FAILURE, NULL);
7455 7456 return;
7456 7457 }
7457 7458 return;
7458 7459 }
7459 7460
7460 7461 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
7461 7462
7462 7463 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7463 7464 }
7464 7465
7465 7466 stmf_status_t
7466 7467 stmf_lu_add_event(stmf_lu_t *lu, int eventid)
7467 7468 {
7468 7469 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7469 7470
7470 7471 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7471 7472 return (STMF_INVALID_ARG);
7472 7473 }
7473 7474
7474 7475 STMF_EVENT_ADD(ilu->ilu_event_hdl, eventid);
7475 7476 return (STMF_SUCCESS);
7476 7477 }
7477 7478
7478 7479 stmf_status_t
7479 7480 stmf_lu_remove_event(stmf_lu_t *lu, int eventid)
7480 7481 {
7481 7482 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7482 7483
7483 7484 if (eventid == STMF_EVENT_ALL) {
7484 7485 STMF_EVENT_CLEAR_ALL(ilu->ilu_event_hdl);
7485 7486 return (STMF_SUCCESS);
7486 7487 }
7487 7488
7488 7489 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7489 7490 return (STMF_INVALID_ARG);
7490 7491 }
7491 7492
7492 7493 STMF_EVENT_REMOVE(ilu->ilu_event_hdl, eventid);
7493 7494 return (STMF_SUCCESS);
7494 7495 }
7495 7496
7496 7497 stmf_status_t
7497 7498 stmf_lport_add_event(stmf_local_port_t *lport, int eventid)
7498 7499 {
7499 7500 stmf_i_local_port_t *ilport =
7500 7501 (stmf_i_local_port_t *)lport->lport_stmf_private;
7501 7502
7502 7503 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7503 7504 return (STMF_INVALID_ARG);
7504 7505 }
7505 7506
7506 7507 STMF_EVENT_ADD(ilport->ilport_event_hdl, eventid);
7507 7508 return (STMF_SUCCESS);
7508 7509 }
7509 7510
7510 7511 stmf_status_t
7511 7512 stmf_lport_remove_event(stmf_local_port_t *lport, int eventid)
7512 7513 {
7513 7514 stmf_i_local_port_t *ilport =
7514 7515 (stmf_i_local_port_t *)lport->lport_stmf_private;
7515 7516
7516 7517 if (eventid == STMF_EVENT_ALL) {
7517 7518 STMF_EVENT_CLEAR_ALL(ilport->ilport_event_hdl);
7518 7519 return (STMF_SUCCESS);
7519 7520 }
7520 7521
7521 7522 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7522 7523 return (STMF_INVALID_ARG);
7523 7524 }
7524 7525
7525 7526 STMF_EVENT_REMOVE(ilport->ilport_event_hdl, eventid);
7526 7527 return (STMF_SUCCESS);
7527 7528 }
7528 7529
7529 7530 void
7530 7531 stmf_generate_lu_event(stmf_i_lu_t *ilu, int eventid, void *arg, uint32_t flags)
7531 7532 {
7532 7533 if (STMF_EVENT_ENABLED(ilu->ilu_event_hdl, eventid) &&
7533 7534 (ilu->ilu_lu->lu_event_handler != NULL)) {
7534 7535 ilu->ilu_lu->lu_event_handler(ilu->ilu_lu, eventid, arg, flags);
7535 7536 }
7536 7537 }
7537 7538
7538 7539 void
7539 7540 stmf_generate_lport_event(stmf_i_local_port_t *ilport, int eventid, void *arg,
7540 7541 uint32_t flags)
7541 7542 {
7542 7543 if (STMF_EVENT_ENABLED(ilport->ilport_event_hdl, eventid) &&
7543 7544 (ilport->ilport_lport->lport_event_handler != NULL)) {
7544 7545 ilport->ilport_lport->lport_event_handler(
7545 7546 ilport->ilport_lport, eventid, arg, flags);
7546 7547 }
7547 7548 }
7548 7549
7549 7550 /*
7550 7551 * With the possibility of having multiple itl sessions pointing to the
7551 7552 * same itl_kstat_info, the ilu_kstat_lock mutex is used to synchronize
7552 7553 * the kstat update of the ilu_kstat_io, itl_kstat_taskq and itl_kstat_lu_xfer
7553 7554 * statistics.
7554 7555 */
7555 7556 void
7556 7557 stmf_itl_task_start(stmf_i_scsi_task_t *itask)
7557 7558 {
7558 7559 stmf_itl_data_t *itl = itask->itask_itl_datap;
7559 7560 scsi_task_t *task = itask->itask_task;
7560 7561 stmf_i_lu_t *ilu;
7561 7562
7562 7563 if (itl == NULL || task->task_lu == dlun0)
7563 7564 return;
7564 7565 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7565 7566 mutex_enter(ilu->ilu_kstat_io->ks_lock);
7566 7567 itask->itask_start_timestamp = gethrtime();
7567 7568 kstat_waitq_enter(KSTAT_IO_PTR(itl->itl_kstat_taskq));
7568 7569 stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_enter);
7569 7570 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7570 7571
7571 7572 stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_enter);
7572 7573 }
7573 7574
7574 7575 void
7575 7576 stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask)
7576 7577 {
7577 7578 stmf_itl_data_t *itl = itask->itask_itl_datap;
7578 7579 scsi_task_t *task = itask->itask_task;
7579 7580 stmf_i_lu_t *ilu;
7580 7581
7581 7582 if (itl == NULL || task->task_lu == dlun0)
7582 7583 return;
7583 7584 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7584 7585 mutex_enter(ilu->ilu_kstat_io->ks_lock);
7585 7586 kstat_waitq_to_runq(KSTAT_IO_PTR(itl->itl_kstat_taskq));
7586 7587 stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_to_runq);
7587 7588 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7588 7589
7589 7590 stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_to_runq);
7590 7591 }
7591 7592
7592 7593 void
7593 7594 stmf_itl_task_done(stmf_i_scsi_task_t *itask)
7594 7595 {
7595 7596 stmf_itl_data_t *itl = itask->itask_itl_datap;
7596 7597 scsi_task_t *task = itask->itask_task;
7597 7598 kstat_io_t *kip;
7598 7599 hrtime_t elapsed_time;
7599 7600 stmf_kstat_itl_info_t *itli;
7600 7601 stmf_i_lu_t *ilu;
7601 7602
7602 7603 if (itl == NULL || task->task_lu == dlun0)
7603 7604 return;
7604 7605 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7605 7606
7606 7607 mutex_enter(ilu->ilu_kstat_io->ks_lock);
7607 7608 itli = (stmf_kstat_itl_info_t *)KSTAT_NAMED_PTR(itl->itl_kstat_info);
7608 7609 kip = KSTAT_IO_PTR(itl->itl_kstat_taskq);
7609 7610
7610 7611 itli->i_task_waitq_elapsed.value.ui64 += itask->itask_waitq_time;
7611 7612
7612 7613 itask->itask_done_timestamp = gethrtime();
7613 7614 elapsed_time =
7614 7615 itask->itask_done_timestamp - itask->itask_start_timestamp;
7615 7616
7616 7617 if (task->task_flags & TF_READ_DATA) {
7617 7618 kip->reads++;
7618 7619 kip->nread += itask->itask_read_xfer;
7619 7620 itli->i_task_read_elapsed.value.ui64 += elapsed_time;
7620 7621 itli->i_lu_read_elapsed.value.ui64 +=
7621 7622 itask->itask_lu_read_time;
7622 7623 itli->i_lport_read_elapsed.value.ui64 +=
7623 7624 itask->itask_lport_read_time;
7624 7625 }
7625 7626
7626 7627 if (task->task_flags & TF_WRITE_DATA) {
7627 7628 kip->writes++;
7628 7629 kip->nwritten += itask->itask_write_xfer;
7629 7630 itli->i_task_write_elapsed.value.ui64 += elapsed_time;
7630 7631 itli->i_lu_write_elapsed.value.ui64 +=
7631 7632 itask->itask_lu_write_time;
7632 7633 itli->i_lport_write_elapsed.value.ui64 +=
7633 7634 itask->itask_lport_write_time;
7634 7635 }
7635 7636
7636 7637 if (itask->itask_flags & ITASK_KSTAT_IN_RUNQ) {
7637 7638 kstat_runq_exit(kip);
7638 7639 stmf_update_kstat_lu_q(task, kstat_runq_exit);
7639 7640 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7640 7641 stmf_update_kstat_lport_q(task, kstat_runq_exit);
7641 7642 } else {
7642 7643 kstat_waitq_exit(kip);
7643 7644 stmf_update_kstat_lu_q(task, kstat_waitq_exit);
7644 7645 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7645 7646 stmf_update_kstat_lport_q(task, kstat_waitq_exit);
7646 7647 }
7647 7648 }
7648 7649
7649 7650 void
7650 7651 stmf_lu_xfer_start(scsi_task_t *task)
7651 7652 {
7652 7653 stmf_i_scsi_task_t *itask = task->task_stmf_private;
7653 7654 stmf_itl_data_t *itl = itask->itask_itl_datap;
7654 7655 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7655 7656 kstat_io_t *kip;
7656 7657
7657 7658 if (itl == NULL || task->task_lu == dlun0)
7658 7659 return;
7659 7660
7660 7661 kip = KSTAT_IO_PTR(itl->itl_kstat_lu_xfer);
7661 7662 mutex_enter(ilu->ilu_kstat_io->ks_lock);
7662 7663 kstat_runq_enter(kip);
7663 7664 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7664 7665 }
7665 7666
7666 7667 void
7667 7668 stmf_lu_xfer_done(scsi_task_t *task, boolean_t read, uint64_t xfer_bytes,
7668 7669 hrtime_t elapsed_time)
7669 7670 {
7670 7671 stmf_i_scsi_task_t *itask = task->task_stmf_private;
7671 7672 stmf_itl_data_t *itl = itask->itask_itl_datap;
7672 7673 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7673 7674 kstat_io_t *kip;
7674 7675
7675 7676 if (itl == NULL || task->task_lu == dlun0)
7676 7677 return;
7677 7678
7678 7679 if (read) {
7679 7680 atomic_add_64((uint64_t *)&itask->itask_lu_read_time,
7680 7681 elapsed_time);
7681 7682 } else {
7682 7683 atomic_add_64((uint64_t *)&itask->itask_lu_write_time,
7683 7684 elapsed_time);
7684 7685 }
7685 7686
7686 7687 kip = KSTAT_IO_PTR(itl->itl_kstat_lu_xfer);
7687 7688 mutex_enter(ilu->ilu_kstat_io->ks_lock);
7688 7689 kstat_runq_exit(kip);
7689 7690 if (read) {
7690 7691 kip->reads++;
7691 7692 kip->nread += xfer_bytes;
7692 7693 } else {
7693 7694 kip->writes++;
7694 7695 kip->nwritten += xfer_bytes;
7695 7696 }
7696 7697 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7697 7698 }
7698 7699
7699 7700 static void
7700 7701 stmf_lport_xfer_start(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf)
7701 7702 {
7702 7703 stmf_itl_data_t *itl = itask->itask_itl_datap;
7703 7704
7704 7705 if (itl == NULL)
7705 7706 return;
7706 7707
7707 7708 DTRACE_PROBE2(scsi__xfer__start, scsi_task_t *, itask->itask_task,
7708 7709 stmf_data_buf_t *, dbuf);
7709 7710
7710 7711 dbuf->db_xfer_start_timestamp = gethrtime();
7711 7712 }
7712 7713
7713 7714 static void
7714 7715 stmf_lport_xfer_done(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf)
7715 7716 {
7716 7717 stmf_itl_data_t *itl = itask->itask_itl_datap;
7717 7718 scsi_task_t *task;
7718 7719 stmf_i_local_port_t *ilp;
7719 7720 kstat_io_t *kip;
7720 7721 hrtime_t elapsed_time;
7721 7722 uint64_t xfer_size;
7722 7723
7723 7724 if (itl == NULL)
7724 7725 return;
7725 7726
7726 7727 task = (scsi_task_t *)itask->itask_task;
7727 7728 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
7728 7729 xfer_size = (dbuf->db_xfer_status == STMF_SUCCESS) ?
7729 7730 dbuf->db_data_size : 0;
7730 7731
7731 7732 elapsed_time = gethrtime() - dbuf->db_xfer_start_timestamp;
7732 7733 if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
7733 7734 atomic_add_64((uint64_t *)&itask->itask_lport_read_time,
7734 7735 elapsed_time);
7735 7736 atomic_add_64((uint64_t *)&itask->itask_read_xfer,
7736 7737 xfer_size);
7737 7738 } else {
7738 7739 atomic_add_64((uint64_t *)&itask->itask_lport_write_time,
7739 7740 elapsed_time);
7740 7741 atomic_add_64((uint64_t *)&itask->itask_write_xfer,
7741 7742 xfer_size);
7742 7743 }
7743 7744
7744 7745 DTRACE_PROBE3(scsi__xfer__end, scsi_task_t *, itask->itask_task,
7745 7746 stmf_data_buf_t *, dbuf, hrtime_t, elapsed_time);
7746 7747
7747 7748 kip = KSTAT_IO_PTR(itl->itl_kstat_lport_xfer);
7748 7749 mutex_enter(ilp->ilport_kstat_io->ks_lock);
7749 7750 if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
7750 7751 kip->reads++;
7751 7752 kip->nread += xfer_size;
7752 7753 } else {
7753 7754 kip->writes++;
7754 7755 kip->nwritten += xfer_size;
7755 7756 }
7756 7757 mutex_exit(ilp->ilport_kstat_io->ks_lock);
7757 7758
7758 7759 dbuf->db_xfer_start_timestamp = 0;
7759 7760 }
7760 7761
7761 7762 void
7762 7763 stmf_svc_init()
7763 7764 {
7764 7765 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED)
7765 7766 return;
7766 7767 stmf_state.stmf_svc_tailp = &stmf_state.stmf_svc_active;
7767 7768 stmf_state.stmf_svc_taskq = ddi_taskq_create(0, "STMF_SVC_TASKQ", 1,
7768 7769 TASKQ_DEFAULTPRI, 0);
7769 7770 (void) ddi_taskq_dispatch(stmf_state.stmf_svc_taskq,
7770 7771 stmf_svc, 0, DDI_SLEEP);
7771 7772 }
7772 7773
7773 7774 stmf_status_t
7774 7775 stmf_svc_fini()
7775 7776 {
7776 7777 uint32_t i;
7777 7778
7778 7779 mutex_enter(&stmf_state.stmf_lock);
7779 7780 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) {
7780 7781 stmf_state.stmf_svc_flags |= STMF_SVC_TERMINATE;
7781 7782 cv_signal(&stmf_state.stmf_cv);
7782 7783 }
7783 7784 mutex_exit(&stmf_state.stmf_lock);
7784 7785
7785 7786 /* Wait for 5 seconds */
7786 7787 for (i = 0; i < 500; i++) {
7787 7788 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED)
7788 7789 delay(drv_usectohz(10000));
7789 7790 else
7790 7791 break;
7791 7792 }
7792 7793 if (i == 500)
7793 7794 return (STMF_BUSY);
7794 7795
7795 7796 ddi_taskq_destroy(stmf_state.stmf_svc_taskq);
7796 7797
7797 7798 return (STMF_SUCCESS);
7798 7799 }
7799 7800
7800 7801 struct stmf_svc_clocks {
7801 7802 clock_t drain_start, drain_next;
7802 7803 clock_t timing_start, timing_next;
7803 7804 clock_t worker_delay;
7804 7805 };
7805 7806
7806 7807 /* ARGSUSED */
7807 7808 void
7808 7809 stmf_svc(void *arg)
7809 7810 {
7810 7811 stmf_svc_req_t *req;
7811 7812 stmf_lu_t *lu;
7812 7813 stmf_i_lu_t *ilu;
7813 7814 stmf_local_port_t *lport;
7814 7815 struct stmf_svc_clocks clks = { 0 };
7815 7816
7816 7817 mutex_enter(&stmf_state.stmf_lock);
7817 7818 stmf_state.stmf_svc_flags |= STMF_SVC_STARTED | STMF_SVC_ACTIVE;
7818 7819
7819 7820 while (!(stmf_state.stmf_svc_flags & STMF_SVC_TERMINATE)) {
7820 7821 if (stmf_state.stmf_svc_active == NULL) {
7821 7822 stmf_svc_timeout(&clks);
7822 7823 continue;
7823 7824 }
7824 7825
7825 7826 /*
7826 7827 * Pop the front request from the active list. After this,
7827 7828 * the request will no longer be referenced by global state,
7828 7829 * so it should be safe to access it without holding the
7829 7830 * stmf state lock.
7830 7831 */
7831 7832 req = stmf_state.stmf_svc_active;
7832 7833 stmf_state.stmf_svc_active = req->svc_next;
7833 7834
7834 7835 if (stmf_state.stmf_svc_active == NULL)
7835 7836 stmf_state.stmf_svc_tailp = &stmf_state.stmf_svc_active;
7836 7837
7837 7838 switch (req->svc_cmd) {
7838 7839 case STMF_CMD_LPORT_ONLINE:
7839 7840 /* Fallthrough */
7840 7841 case STMF_CMD_LPORT_OFFLINE:
7841 7842 mutex_exit(&stmf_state.stmf_lock);
7842 7843 lport = (stmf_local_port_t *)req->svc_obj;
7843 7844 lport->lport_ctl(lport, req->svc_cmd, &req->svc_info);
7844 7845 break;
7845 7846 case STMF_CMD_LU_ONLINE:
7846 7847 mutex_exit(&stmf_state.stmf_lock);
7847 7848 lu = (stmf_lu_t *)req->svc_obj;
7848 7849 lu->lu_ctl(lu, req->svc_cmd, &req->svc_info);
7849 7850 break;
7850 7851 case STMF_CMD_LU_OFFLINE:
7851 7852 /* Remove all mappings of this LU */
7852 7853 stmf_session_lu_unmapall((stmf_lu_t *)req->svc_obj);
7853 7854 /* Kill all the pending I/Os for this LU */
7854 7855 mutex_exit(&stmf_state.stmf_lock);
7855 7856 stmf_task_lu_killall((stmf_lu_t *)req->svc_obj, NULL,
7856 7857 STMF_ABORTED);
7857 7858 lu = (stmf_lu_t *)req->svc_obj;
7858 7859 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7859 7860 stmf_wait_ilu_tasks_finish(ilu);
7860 7861 lu->lu_ctl(lu, req->svc_cmd, &req->svc_info);
7861 7862 break;
7862 7863 default:
7863 7864 cmn_err(CE_PANIC, "stmf_svc: unknown cmd %d",
7864 7865 req->svc_cmd);
7865 7866 }
7866 7867
7867 7868 mutex_enter(&stmf_state.stmf_lock);
7868 7869 }
7869 7870
7870 7871 stmf_state.stmf_svc_flags &= ~(STMF_SVC_STARTED | STMF_SVC_ACTIVE);
7871 7872 mutex_exit(&stmf_state.stmf_lock);
7872 7873 }
7873 7874
7874 7875 static void
7875 7876 stmf_svc_timeout(struct stmf_svc_clocks *clks)
7876 7877 {
7877 7878 clock_t td;
7878 7879 stmf_i_local_port_t *ilport, *next_ilport;
7879 7880 stmf_i_scsi_session_t *iss;
7880 7881
7881 7882 ASSERT(mutex_owned(&stmf_state.stmf_lock));
7882 7883
7883 7884 td = drv_usectohz(20000);
7884 7885
7885 7886 /* Do timeouts */
7886 7887 if (stmf_state.stmf_nlus &&
7887 7888 ((!clks->timing_next) || (ddi_get_lbolt() >= clks->timing_next))) {
7888 7889 if (!stmf_state.stmf_svc_ilu_timing) {
7889 7890 /* we are starting a new round */
7890 7891 stmf_state.stmf_svc_ilu_timing =
7891 7892 stmf_state.stmf_ilulist;
7892 7893 clks->timing_start = ddi_get_lbolt();
7893 7894 }
7894 7895
7895 7896 stmf_check_ilu_timing();
7896 7897 if (!stmf_state.stmf_svc_ilu_timing) {
7897 7898 /* we finished a complete round */
7898 7899 clks->timing_next =
7899 7900 clks->timing_start + drv_usectohz(5*1000*1000);
7900 7901 } else {
7901 7902 /* we still have some ilu items to check */
7902 7903 clks->timing_next =
7903 7904 ddi_get_lbolt() + drv_usectohz(1*1000*1000);
7904 7905 }
7905 7906
7906 7907 if (stmf_state.stmf_svc_active)
7907 7908 return;
7908 7909 }
7909 7910
7910 7911 /* Check if there are free tasks to clear */
7911 7912 if (stmf_state.stmf_nlus &&
7912 7913 ((!clks->drain_next) || (ddi_get_lbolt() >= clks->drain_next))) {
7913 7914 if (!stmf_state.stmf_svc_ilu_draining) {
7914 7915 /* we are starting a new round */
7915 7916 stmf_state.stmf_svc_ilu_draining =
7916 7917 stmf_state.stmf_ilulist;
7917 7918 clks->drain_start = ddi_get_lbolt();
7918 7919 }
7919 7920
7920 7921 stmf_check_freetask();
7921 7922 if (!stmf_state.stmf_svc_ilu_draining) {
7922 7923 /* we finished a complete round */
7923 7924 clks->drain_next =
7924 7925 clks->drain_start + drv_usectohz(10*1000*1000);
7925 7926 } else {
7926 7927 /* we still have some ilu items to check */
7927 7928 clks->drain_next =
7928 7929 ddi_get_lbolt() + drv_usectohz(1*1000*1000);
7929 7930 }
7930 7931
7931 7932 if (stmf_state.stmf_svc_active)
7932 7933 return;
7933 7934 }
7934 7935
7935 7936 /* Check if we need to run worker_mgmt */
7936 7937 if (ddi_get_lbolt() > clks->worker_delay) {
7937 7938 stmf_worker_mgmt();
7938 7939 clks->worker_delay = ddi_get_lbolt() +
7939 7940 stmf_worker_mgmt_delay;
7940 7941 }
7941 7942
7942 7943 /* Check if any active session got its 1st LUN */
7943 7944 if (stmf_state.stmf_process_initial_luns) {
7944 7945 int stmf_level = 0;
7945 7946 int port_level;
7946 7947
7947 7948 for (ilport = stmf_state.stmf_ilportlist; ilport;
7948 7949 ilport = next_ilport) {
7949 7950 int ilport_lock_held;
7950 7951 next_ilport = ilport->ilport_next;
7951 7952
7952 7953 if ((ilport->ilport_flags &
7953 7954 ILPORT_SS_GOT_INITIAL_LUNS) == 0)
7954 7955 continue;
7955 7956
7956 7957 port_level = 0;
7957 7958 rw_enter(&ilport->ilport_lock, RW_READER);
7958 7959 ilport_lock_held = 1;
7959 7960
7960 7961 for (iss = ilport->ilport_ss_list; iss;
7961 7962 iss = iss->iss_next) {
7962 7963 if ((iss->iss_flags &
7963 7964 ISS_GOT_INITIAL_LUNS) == 0)
7964 7965 continue;
7965 7966
7966 7967 port_level++;
7967 7968 stmf_level++;
7968 7969 atomic_and_32(&iss->iss_flags,
7969 7970 ~ISS_GOT_INITIAL_LUNS);
7970 7971 atomic_or_32(&iss->iss_flags,
7971 7972 ISS_EVENT_ACTIVE);
7972 7973 rw_exit(&ilport->ilport_lock);
7973 7974 ilport_lock_held = 0;
7974 7975 mutex_exit(&stmf_state.stmf_lock);
7975 7976 stmf_generate_lport_event(ilport,
7976 7977 LPORT_EVENT_INITIAL_LUN_MAPPED,
7977 7978 iss->iss_ss, 0);
7978 7979 atomic_and_32(&iss->iss_flags,
7979 7980 ~ISS_EVENT_ACTIVE);
7980 7981 mutex_enter(&stmf_state.stmf_lock);
7981 7982 /*
7982 7983 * scan all the ilports again as the
7983 7984 * ilport list might have changed.
7984 7985 */
7985 7986 next_ilport = stmf_state.stmf_ilportlist;
7986 7987 break;
7987 7988 }
7988 7989
7989 7990 if (port_level == 0)
7990 7991 atomic_and_32(&ilport->ilport_flags,
7991 7992 ~ILPORT_SS_GOT_INITIAL_LUNS);
7992 7993 /* drop the lock if we are holding it. */
7993 7994 if (ilport_lock_held == 1)
7994 7995 rw_exit(&ilport->ilport_lock);
7995 7996
7996 7997 /* Max 4 session at a time */
7997 7998 if (stmf_level >= 4)
7998 7999 break;
7999 8000 }
8000 8001
8001 8002 if (stmf_level == 0)
8002 8003 stmf_state.stmf_process_initial_luns = 0;
8003 8004 }
8004 8005
8005 8006 stmf_state.stmf_svc_flags &= ~STMF_SVC_ACTIVE;
8006 8007 (void) cv_reltimedwait(&stmf_state.stmf_cv,
8007 8008 &stmf_state.stmf_lock, td, TR_CLOCK_TICK);
8008 8009 stmf_state.stmf_svc_flags |= STMF_SVC_ACTIVE;
8009 8010 }
8010 8011
8011 8012 /*
8012 8013 * Waits for ongoing I/O tasks to finish on an LU in preparation for
8013 8014 * the LU's offlining. The LU should already be in an Offlining state
8014 8015 * (otherwise I/O to the LU might never end). There is an additional
8015 8016 * enforcement of this via a deadman timer check.
8016 8017 */
8017 8018 static void
8018 8019 stmf_wait_ilu_tasks_finish(stmf_i_lu_t *ilu)
8019 8020 {
8020 8021 clock_t start, now, deadline;
8021 8022
8022 8023 start = now = ddi_get_lbolt();
8023 8024 deadline = start + drv_usectohz(stmf_io_deadman * 1000000llu);
8024 8025 mutex_enter(&ilu->ilu_task_lock);
8025 8026 while (ilu->ilu_ntasks != ilu->ilu_ntasks_free) {
8026 8027 (void) cv_timedwait(&ilu->ilu_offline_pending_cv,
8027 8028 &ilu->ilu_task_lock, deadline);
8028 8029 now = ddi_get_lbolt();
8029 8030 if (now > deadline) {
8030 8031 if (stmf_io_deadman_enabled) {
8031 8032 cmn_err(CE_PANIC, "stmf_svc: I/O deadman hit "
8032 8033 "on STMF_CMD_LU_OFFLINE after %d seconds",
8033 8034 stmf_io_deadman);
8034 8035 } else {
8035 8036 /* keep on spinning */
8036 8037 deadline = now + drv_usectohz(stmf_io_deadman *
8037 8038 1000000llu);
8038 8039 }
8039 8040 }
8040 8041 }
8041 8042 mutex_exit(&ilu->ilu_task_lock);
8042 8043 DTRACE_PROBE1(deadman__timeout__wait, clock_t, now - start);
8043 8044 }
8044 8045
8045 8046 void
8046 8047 stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info)
8047 8048 {
8048 8049 stmf_svc_req_t *req;
8049 8050 int s;
8050 8051
8051 8052 ASSERT(!mutex_owned(&stmf_state.stmf_lock));
8052 8053 s = sizeof (stmf_svc_req_t);
8053 8054 if (info->st_additional_info) {
8054 8055 s += strlen(info->st_additional_info) + 1;
8055 8056 }
8056 8057 req = kmem_zalloc(s, KM_SLEEP);
8057 8058
8058 8059 req->svc_cmd = cmd;
8059 8060 req->svc_obj = obj;
8060 8061 req->svc_info.st_rflags = info->st_rflags;
8061 8062 if (info->st_additional_info) {
8062 8063 req->svc_info.st_additional_info = (char *)(GET_BYTE_OFFSET(req,
8063 8064 sizeof (stmf_svc_req_t)));
8064 8065 (void) strcpy(req->svc_info.st_additional_info,
8065 8066 info->st_additional_info);
8066 8067 }
8067 8068 req->svc_req_alloc_size = s;
8068 8069 req->svc_next = NULL;
8069 8070
8070 8071 mutex_enter(&stmf_state.stmf_lock);
8071 8072 *stmf_state.stmf_svc_tailp = req;
8072 8073 stmf_state.stmf_svc_tailp = &req->svc_next;
8073 8074 if ((stmf_state.stmf_svc_flags & STMF_SVC_ACTIVE) == 0) {
8074 8075 cv_signal(&stmf_state.stmf_cv);
8075 8076 }
8076 8077 mutex_exit(&stmf_state.stmf_lock);
8077 8078 }
8078 8079
8079 8080 static void
8080 8081 stmf_svc_kill_obj_requests(void *obj)
8081 8082 {
8082 8083 stmf_svc_req_t *prev_req = NULL;
8083 8084 stmf_svc_req_t *next_req;
8084 8085 stmf_svc_req_t *req;
8085 8086
8086 8087 ASSERT(mutex_owned(&stmf_state.stmf_lock));
8087 8088
8088 8089 for (req = stmf_state.stmf_svc_active; req != NULL; req = next_req) {
8089 8090 next_req = req->svc_next;
8090 8091
8091 8092 if (req->svc_obj == obj) {
8092 8093 if (prev_req != NULL)
8093 8094 prev_req->svc_next = next_req;
8094 8095 else
8095 8096 stmf_state.stmf_svc_active = next_req;
8096 8097
8097 8098 if (next_req == NULL)
8098 8099 stmf_state.stmf_svc_tailp = (prev_req != NULL) ?
8099 8100 &prev_req->svc_next :
8100 8101 &stmf_state.stmf_svc_active;
8101 8102
8102 8103 kmem_free(req, req->svc_req_alloc_size);
8103 8104 } else {
8104 8105 prev_req = req;
8105 8106 }
8106 8107 }
8107 8108 }
8108 8109
8109 8110 void
8110 8111 stmf_trace(caddr_t ident, const char *fmt, ...)
8111 8112 {
8112 8113 va_list args;
8113 8114 char tbuf[160];
8114 8115 int len;
8115 8116
8116 8117 if (!stmf_trace_on)
8117 8118 return;
8118 8119 len = snprintf(tbuf, 158, "%s:%07lu: ", ident ? ident : "",
8119 8120 ddi_get_lbolt());
8120 8121 va_start(args, fmt);
8121 8122 len += vsnprintf(tbuf + len, 158 - len, fmt, args);
8122 8123 va_end(args);
8123 8124
8124 8125 if (len > 158) {
8125 8126 len = 158;
8126 8127 }
8127 8128 tbuf[len++] = '\n';
8128 8129 tbuf[len] = 0;
8129 8130
8130 8131 mutex_enter(&trace_buf_lock);
8131 8132 bcopy(tbuf, &stmf_trace_buf[trace_buf_curndx], len+1);
8132 8133 trace_buf_curndx += len;
8133 8134 if (trace_buf_curndx > (trace_buf_size - 320))
8134 8135 trace_buf_curndx = 0;
8135 8136 mutex_exit(&trace_buf_lock);
8136 8137 }
8137 8138
8138 8139 void
8139 8140 stmf_trace_clear()
8140 8141 {
8141 8142 if (!stmf_trace_on)
8142 8143 return;
8143 8144 mutex_enter(&trace_buf_lock);
8144 8145 trace_buf_curndx = 0;
8145 8146 if (trace_buf_size > 0)
8146 8147 stmf_trace_buf[0] = 0;
8147 8148 mutex_exit(&trace_buf_lock);
8148 8149 }
8149 8150
8150 8151 static void
8151 8152 stmf_abort_task_offline(scsi_task_t *task, int offline_lu, char *info)
8152 8153 {
8153 8154 stmf_state_change_info_t change_info;
8154 8155 void *ctl_private;
8155 8156 uint32_t ctl_cmd;
8156 8157 int msg = 0;
8157 8158
8158 8159 stmf_trace("FROM STMF", "abort_task_offline called for %s: %s",
8159 8160 offline_lu ? "LU" : "LPORT", info ? info : "no additional info");
8160 8161 change_info.st_additional_info = info;
8161 8162 if (offline_lu) {
8162 8163 change_info.st_rflags = STMF_RFLAG_RESET |
8163 8164 STMF_RFLAG_LU_ABORT;
8164 8165 ctl_private = task->task_lu;
8165 8166 if (((stmf_i_lu_t *)
8166 8167 task->task_lu->lu_stmf_private)->ilu_state ==
8167 8168 STMF_STATE_ONLINE) {
8168 8169 msg = 1;
8169 8170 }
8170 8171 ctl_cmd = STMF_CMD_LU_OFFLINE;
8171 8172 } else {
8172 8173 change_info.st_rflags = STMF_RFLAG_RESET |
8173 8174 STMF_RFLAG_LPORT_ABORT;
8174 8175 ctl_private = task->task_lport;
8175 8176 if (((stmf_i_local_port_t *)
8176 8177 task->task_lport->lport_stmf_private)->ilport_state ==
8177 8178 STMF_STATE_ONLINE) {
8178 8179 msg = 1;
8179 8180 }
8180 8181 ctl_cmd = STMF_CMD_LPORT_OFFLINE;
8181 8182 }
8182 8183
8183 8184 if (msg) {
8184 8185 stmf_trace(0, "Calling stmf_ctl to offline %s : %s",
8185 8186 offline_lu ? "LU" : "LPORT", info ? info :
8186 8187 "<no additional info>");
8187 8188 }
8188 8189 (void) stmf_ctl(ctl_cmd, ctl_private, &change_info);
8189 8190 }
8190 8191
8191 8192 static char
8192 8193 stmf_ctoi(char c)
8193 8194 {
8194 8195 if ((c >= '0') && (c <= '9'))
8195 8196 c -= '0';
8196 8197 else if ((c >= 'A') && (c <= 'F'))
8197 8198 c = c - 'A' + 10;
8198 8199 else if ((c >= 'a') && (c <= 'f'))
8199 8200 c = c - 'a' + 10;
8200 8201 else
8201 8202 c = -1;
8202 8203 return (c);
8203 8204 }
8204 8205
8205 8206 /* Convert from Hex value in ASCII format to the equivalent bytes */
8206 8207 static boolean_t
8207 8208 stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp)
8208 8209 {
8209 8210 int ii;
8210 8211
8211 8212 for (ii = 0; ii < dplen; ii++) {
8212 8213 char nibble1, nibble2;
8213 8214 char enc_char = *c++;
8214 8215 nibble1 = stmf_ctoi(enc_char);
8215 8216
8216 8217 enc_char = *c++;
8217 8218 nibble2 = stmf_ctoi(enc_char);
8218 8219 if (nibble1 == -1 || nibble2 == -1)
8219 8220 return (B_FALSE);
8220 8221
8221 8222 dp[ii] = (nibble1 << 4) | nibble2;
8222 8223 }
8223 8224 return (B_TRUE);
8224 8225 }
8225 8226
8226 8227 boolean_t
8227 8228 stmf_scsilib_tptid_validate(scsi_transport_id_t *tptid, uint32_t total_sz,
8228 8229 uint16_t *tptid_sz)
8229 8230 {
8230 8231 uint16_t tpd_len = SCSI_TPTID_SIZE;
8231 8232
8232 8233 if (tptid_sz)
8233 8234 *tptid_sz = 0;
8234 8235 if (total_sz < sizeof (scsi_transport_id_t))
8235 8236 return (B_FALSE);
8236 8237
8237 8238 switch (tptid->protocol_id) {
8238 8239
8239 8240 case PROTOCOL_FIBRE_CHANNEL:
8240 8241 /* FC Transport ID validation checks. SPC3 rev23, Table 284 */
8241 8242 if (total_sz < tpd_len || tptid->format_code != 0)
8242 8243 return (B_FALSE);
8243 8244 break;
8244 8245
8245 8246 case PROTOCOL_iSCSI:
8246 8247 {
8247 8248 iscsi_transport_id_t *iscsiid;
8248 8249 uint16_t adn_len, name_len;
8249 8250
8250 8251 /* Check for valid format code, SPC3 rev 23 Table 288 */
8251 8252 if ((total_sz < tpd_len) ||
8252 8253 (tptid->format_code != 0 && tptid->format_code != 1))
8253 8254 return (B_FALSE);
8254 8255
8255 8256 iscsiid = (iscsi_transport_id_t *)tptid;
8256 8257 adn_len = READ_SCSI16(iscsiid->add_len, uint16_t);
8257 8258 tpd_len = sizeof (iscsi_transport_id_t) + adn_len - 1;
8258 8259
8259 8260 /*
8260 8261 * iSCSI Transport ID validation checks.
8261 8262 * As per SPC3 rev 23 Section 7.5.4.6 and Table 289 & Table 290
8262 8263 */
8263 8264 if (adn_len < 20 || (adn_len % 4 != 0))
8264 8265 return (B_FALSE);
8265 8266
8266 8267 name_len = strnlen(iscsiid->iscsi_name, adn_len);
8267 8268 if (name_len == 0 || name_len >= adn_len)
8268 8269 return (B_FALSE);
8269 8270
8270 8271 /* If the format_code is 1 check for ISID seperator */
8271 8272 if ((tptid->format_code == 1) && (strstr(iscsiid->iscsi_name,
8272 8273 SCSI_TPTID_ISCSI_ISID_SEPERATOR) == NULL))
8273 8274 return (B_FALSE);
8274 8275
8275 8276 }
8276 8277 break;
8277 8278
8278 8279 case PROTOCOL_SRP:
8279 8280 /* SRP Transport ID validation checks. SPC3 rev23, Table 287 */
8280 8281 if (total_sz < tpd_len || tptid->format_code != 0)
8281 8282 return (B_FALSE);
8282 8283 break;
8283 8284
8284 8285 case PROTOCOL_PARALLEL_SCSI:
8285 8286 case PROTOCOL_SSA:
8286 8287 case PROTOCOL_IEEE_1394:
8287 8288 case PROTOCOL_SAS:
8288 8289 case PROTOCOL_ADT:
8289 8290 case PROTOCOL_ATAPI:
8290 8291 default:
8291 8292 {
8292 8293 stmf_dflt_scsi_tptid_t *dflttpd;
8293 8294
8294 8295 tpd_len = sizeof (stmf_dflt_scsi_tptid_t);
8295 8296 if (total_sz < tpd_len)
8296 8297 return (B_FALSE);
8297 8298 dflttpd = (stmf_dflt_scsi_tptid_t *)tptid;
8298 8299 tpd_len = tpd_len + SCSI_READ16(&dflttpd->ident_len) - 1;
8299 8300 if (total_sz < tpd_len)
8300 8301 return (B_FALSE);
8301 8302 }
8302 8303 break;
8303 8304 }
8304 8305 if (tptid_sz)
8305 8306 *tptid_sz = tpd_len;
8306 8307 return (B_TRUE);
8307 8308 }
8308 8309
8309 8310 boolean_t
8310 8311 stmf_scsilib_tptid_compare(scsi_transport_id_t *tpd1,
8311 8312 scsi_transport_id_t *tpd2)
8312 8313 {
8313 8314 if ((tpd1->protocol_id != tpd2->protocol_id) ||
8314 8315 (tpd1->format_code != tpd2->format_code))
8315 8316 return (B_FALSE);
8316 8317
8317 8318 switch (tpd1->protocol_id) {
8318 8319
8319 8320 case PROTOCOL_iSCSI:
8320 8321 {
8321 8322 iscsi_transport_id_t *iscsitpd1, *iscsitpd2;
8322 8323 uint16_t len;
8323 8324
8324 8325 iscsitpd1 = (iscsi_transport_id_t *)tpd1;
8325 8326 iscsitpd2 = (iscsi_transport_id_t *)tpd2;
8326 8327 len = SCSI_READ16(&iscsitpd1->add_len);
8327 8328 if ((memcmp(iscsitpd1->add_len, iscsitpd2->add_len, 2) != 0) ||
8328 8329 (memcmp(iscsitpd1->iscsi_name, iscsitpd2->iscsi_name, len)
8329 8330 != 0))
8330 8331 return (B_FALSE);
8331 8332 }
8332 8333 break;
8333 8334
8334 8335 case PROTOCOL_SRP:
8335 8336 {
8336 8337 scsi_srp_transport_id_t *srptpd1, *srptpd2;
8337 8338
8338 8339 srptpd1 = (scsi_srp_transport_id_t *)tpd1;
8339 8340 srptpd2 = (scsi_srp_transport_id_t *)tpd2;
8340 8341 if (memcmp(srptpd1->srp_name, srptpd2->srp_name,
8341 8342 sizeof (srptpd1->srp_name)) != 0)
8342 8343 return (B_FALSE);
8343 8344 }
8344 8345 break;
8345 8346
8346 8347 case PROTOCOL_FIBRE_CHANNEL:
8347 8348 {
8348 8349 scsi_fc_transport_id_t *fctpd1, *fctpd2;
8349 8350
8350 8351 fctpd1 = (scsi_fc_transport_id_t *)tpd1;
8351 8352 fctpd2 = (scsi_fc_transport_id_t *)tpd2;
8352 8353 if (memcmp(fctpd1->port_name, fctpd2->port_name,
8353 8354 sizeof (fctpd1->port_name)) != 0)
8354 8355 return (B_FALSE);
8355 8356 }
8356 8357 break;
8357 8358
8358 8359 case PROTOCOL_PARALLEL_SCSI:
8359 8360 case PROTOCOL_SSA:
8360 8361 case PROTOCOL_IEEE_1394:
8361 8362 case PROTOCOL_SAS:
8362 8363 case PROTOCOL_ADT:
8363 8364 case PROTOCOL_ATAPI:
8364 8365 default:
8365 8366 {
8366 8367 stmf_dflt_scsi_tptid_t *dflt1, *dflt2;
8367 8368 uint16_t len;
8368 8369
8369 8370 dflt1 = (stmf_dflt_scsi_tptid_t *)tpd1;
8370 8371 dflt2 = (stmf_dflt_scsi_tptid_t *)tpd2;
8371 8372 len = SCSI_READ16(&dflt1->ident_len);
8372 8373 if ((memcmp(dflt1->ident_len, dflt2->ident_len, 2) != 0) ||
8373 8374 (memcmp(dflt1->ident, dflt2->ident, len) != 0))
8374 8375 return (B_FALSE);
8375 8376 }
8376 8377 break;
8377 8378 }
8378 8379 return (B_TRUE);
8379 8380 }
8380 8381
8381 8382 /*
8382 8383 * Changes devid_desc to corresponding TransportID format
8383 8384 * Returns :- pointer to stmf_remote_port_t
8384 8385 * Note :- Allocates continous memory for stmf_remote_port_t and TransportID,
8385 8386 * This memory need to be freed when this remote_port is no longer
8386 8387 * used.
8387 8388 */
8388 8389 stmf_remote_port_t *
8389 8390 stmf_scsilib_devid_to_remote_port(scsi_devid_desc_t *devid)
8390 8391 {
8391 8392 struct scsi_fc_transport_id *fc_tpd;
8392 8393 struct iscsi_transport_id *iscsi_tpd;
8393 8394 struct scsi_srp_transport_id *srp_tpd;
8394 8395 struct stmf_dflt_scsi_tptid *dflt_tpd;
8395 8396 uint16_t ident_len, sz = 0;
8396 8397 stmf_remote_port_t *rpt = NULL;
8397 8398
8398 8399 ident_len = devid->ident_length;
8399 8400 ASSERT(ident_len);
8400 8401 switch (devid->protocol_id) {
8401 8402 case PROTOCOL_FIBRE_CHANNEL:
8402 8403 sz = sizeof (scsi_fc_transport_id_t);
8403 8404 rpt = stmf_remote_port_alloc(sz);
8404 8405 rpt->rport_tptid->format_code = 0;
8405 8406 rpt->rport_tptid->protocol_id = devid->protocol_id;
8406 8407 fc_tpd = (scsi_fc_transport_id_t *)rpt->rport_tptid;
8407 8408 /*
8408 8409 * convert from "wwn.xxxxxxxxxxxxxxxx" to 8-byte binary
8409 8410 * skip first 4 byte for "wwn."
8410 8411 */
8411 8412 ASSERT(strncmp("wwn.", (char *)devid->ident, 4) == 0);
8412 8413 if ((ident_len < SCSI_TPTID_FC_PORT_NAME_SIZE * 2 + 4) ||
8413 8414 !stmf_base16_str_to_binary((char *)devid->ident + 4,
8414 8415 SCSI_TPTID_FC_PORT_NAME_SIZE, fc_tpd->port_name))
8415 8416 goto devid_to_remote_port_fail;
8416 8417 break;
8417 8418
8418 8419 case PROTOCOL_iSCSI:
8419 8420 sz = ALIGNED_TO_8BYTE_BOUNDARY(sizeof (iscsi_transport_id_t) +
8420 8421 ident_len - 1);
8421 8422 rpt = stmf_remote_port_alloc(sz);
8422 8423 rpt->rport_tptid->format_code = 0;
8423 8424 rpt->rport_tptid->protocol_id = devid->protocol_id;
8424 8425 iscsi_tpd = (iscsi_transport_id_t *)rpt->rport_tptid;
8425 8426 SCSI_WRITE16(iscsi_tpd->add_len, ident_len);
8426 8427 (void) memcpy(iscsi_tpd->iscsi_name, devid->ident, ident_len);
8427 8428 break;
8428 8429
8429 8430 case PROTOCOL_SRP:
8430 8431 sz = sizeof (scsi_srp_transport_id_t);
8431 8432 rpt = stmf_remote_port_alloc(sz);
8432 8433 rpt->rport_tptid->format_code = 0;
8433 8434 rpt->rport_tptid->protocol_id = devid->protocol_id;
8434 8435 srp_tpd = (scsi_srp_transport_id_t *)rpt->rport_tptid;
8435 8436 /*
8436 8437 * convert from "eui.xxxxxxxxxxxxxxx" to 8-byte binary
8437 8438 * skip first 4 byte for "eui."
8438 8439 * Assume 8-byte initiator-extension part of srp_name is NOT
8439 8440 * stored in devid and hence will be set as zero
8440 8441 */
8441 8442 ASSERT(strncmp("eui.", (char *)devid->ident, 4) == 0);
8442 8443 if ((ident_len < (SCSI_TPTID_SRP_PORT_NAME_LEN - 8) * 2 + 4) ||
8443 8444 !stmf_base16_str_to_binary((char *)devid->ident+4,
8444 8445 SCSI_TPTID_SRP_PORT_NAME_LEN, srp_tpd->srp_name))
8445 8446 goto devid_to_remote_port_fail;
8446 8447 break;
8447 8448
8448 8449 case PROTOCOL_PARALLEL_SCSI:
8449 8450 case PROTOCOL_SSA:
8450 8451 case PROTOCOL_IEEE_1394:
8451 8452 case PROTOCOL_SAS:
8452 8453 case PROTOCOL_ADT:
8453 8454 case PROTOCOL_ATAPI:
8454 8455 default :
8455 8456 ident_len = devid->ident_length;
8456 8457 sz = ALIGNED_TO_8BYTE_BOUNDARY(sizeof (stmf_dflt_scsi_tptid_t) +
8457 8458 ident_len - 1);
8458 8459 rpt = stmf_remote_port_alloc(sz);
8459 8460 rpt->rport_tptid->format_code = 0;
8460 8461 rpt->rport_tptid->protocol_id = devid->protocol_id;
8461 8462 dflt_tpd = (stmf_dflt_scsi_tptid_t *)rpt->rport_tptid;
8462 8463 SCSI_WRITE16(dflt_tpd->ident_len, ident_len);
8463 8464 (void) memcpy(dflt_tpd->ident, devid->ident, ident_len);
8464 8465 break;
8465 8466 }
8466 8467 return (rpt);
8467 8468
8468 8469 devid_to_remote_port_fail:
8469 8470 stmf_remote_port_free(rpt);
8470 8471 return (NULL);
8471 8472
8472 8473 }
8473 8474
8474 8475 stmf_remote_port_t *
8475 8476 stmf_remote_port_alloc(uint16_t tptid_sz) {
8476 8477 stmf_remote_port_t *rpt;
8477 8478 rpt = (stmf_remote_port_t *)kmem_zalloc(
8478 8479 sizeof (stmf_remote_port_t) + tptid_sz, KM_SLEEP);
8479 8480 rpt->rport_tptid_sz = tptid_sz;
8480 8481 rpt->rport_tptid = (scsi_transport_id_t *)(rpt + 1);
8481 8482 return (rpt);
8482 8483 }
8483 8484
8484 8485 void
8485 8486 stmf_remote_port_free(stmf_remote_port_t *rpt)
8486 8487 {
8487 8488 /*
8488 8489 * Note: stmf_scsilib_devid_to_remote_port() function allocates
8489 8490 * remote port structures for all transports in the same way, So
8490 8491 * it is safe to deallocate it in a protocol independent manner.
8491 8492 * If any of the allocation method changes, corresponding changes
8492 8493 * need to be made here too.
8493 8494 */
8494 8495 kmem_free(rpt, sizeof (stmf_remote_port_t) + rpt->rport_tptid_sz);
8495 8496 }
↓ open down ↓ |
2342 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX