1089 VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE),
1090 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1091 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
1092 }
1093
1094 if (zio == NULL)
1095 return (pio);
1096
1097 zio_nowait(pio);
1098 return (NULL);
1099 }
1100
1101 static void
1102 vdev_open_child(void *arg)
1103 {
1104 vdev_t *vd = arg;
1105
1106 vd->vdev_open_thread = curthread;
1107 vd->vdev_open_error = vdev_open(vd);
1108 vd->vdev_open_thread = NULL;
1109 }
1110
1111 boolean_t
1112 vdev_uses_zvols(vdev_t *vd)
1113 {
1114 if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR,
1115 strlen(ZVOL_DIR)) == 0)
1116 return (B_TRUE);
1117 for (int c = 0; c < vd->vdev_children; c++)
1118 if (vdev_uses_zvols(vd->vdev_child[c]))
1119 return (B_TRUE);
1120 return (B_FALSE);
1121 }
1122
1123 void
1124 vdev_open_children(vdev_t *vd)
1125 {
1126 taskq_t *tq;
1127 int children = vd->vdev_children;
1128
1129 /*
1130 * in order to handle pools on top of zvols, do the opens
1131 * in a single thread so that the same thread holds the
1132 * spa_namespace_lock
1133 */
1134 if (vdev_uses_zvols(vd)) {
1135 for (int c = 0; c < children; c++)
1136 vd->vdev_child[c]->vdev_open_error =
1137 vdev_open(vd->vdev_child[c]);
1138 return;
1139 }
1140 tq = taskq_create("vdev_open", children, minclsyspri,
1141 children, children, TASKQ_PREPOPULATE);
1142
1143 for (int c = 0; c < children; c++)
1144 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c],
1145 TQ_SLEEP) != NULL);
1146
1147 taskq_destroy(tq);
1148 }
1149
1150 /*
1151 * Prepare a virtual device for access.
1152 */
1153 int
1154 vdev_open(vdev_t *vd)
1155 {
1156 spa_t *spa = vd->vdev_spa;
1157 int error;
1158 uint64_t osize = 0;
1159 uint64_t max_osize = 0;
1160 uint64_t asize, max_asize, psize;
1161 uint64_t ashift = 0;
1162
1163 ASSERT(vd->vdev_open_thread == curthread ||
1164 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1165 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
1166 vd->vdev_state == VDEV_STATE_CANT_OPEN ||
1167 vd->vdev_state == VDEV_STATE_OFFLINE);
|
1089 VDEV_PAD_SIZE, zio_buf_alloc(VDEV_PAD_SIZE),
1090 ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1091 ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
1092 }
1093
1094 if (zio == NULL)
1095 return (pio);
1096
1097 zio_nowait(pio);
1098 return (NULL);
1099 }
1100
1101 static void
1102 vdev_open_child(void *arg)
1103 {
1104 vdev_t *vd = arg;
1105
1106 vd->vdev_open_thread = curthread;
1107 vd->vdev_open_error = vdev_open(vd);
1108 vd->vdev_open_thread = NULL;
1109 vd->vdev_parent->vdev_nonrot &= vd->vdev_nonrot;
1110 }
1111
1112 boolean_t
1113 vdev_uses_zvols(vdev_t *vd)
1114 {
1115 if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR,
1116 strlen(ZVOL_DIR)) == 0)
1117 return (B_TRUE);
1118 for (int c = 0; c < vd->vdev_children; c++)
1119 if (vdev_uses_zvols(vd->vdev_child[c]))
1120 return (B_TRUE);
1121 return (B_FALSE);
1122 }
1123
1124 void
1125 vdev_open_children(vdev_t *vd)
1126 {
1127 taskq_t *tq;
1128 int children = vd->vdev_children;
1129
1130 vd->vdev_nonrot = B_TRUE;
1131
1132 /*
1133 * in order to handle pools on top of zvols, do the opens
1134 * in a single thread so that the same thread holds the
1135 * spa_namespace_lock
1136 */
1137 if (vdev_uses_zvols(vd)) {
1138 for (int c = 0; c < children; c++) {
1139 vd->vdev_child[c]->vdev_open_error =
1140 vdev_open(vd->vdev_child[c]);
1141 vd->vdev_nonrot &= vd->vdev_child[c]->vdev_nonrot;
1142 }
1143 return;
1144 }
1145 tq = taskq_create("vdev_open", children, minclsyspri,
1146 children, children, TASKQ_PREPOPULATE);
1147
1148 for (int c = 0; c < children; c++)
1149 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c],
1150 TQ_SLEEP) != NULL);
1151
1152 taskq_destroy(tq);
1153
1154 for (int c = 0; c < children; c++)
1155 vd->vdev_nonrot &= vd->vdev_child[c]->vdev_nonrot;
1156 }
1157
1158 /*
1159 * Prepare a virtual device for access.
1160 */
1161 int
1162 vdev_open(vdev_t *vd)
1163 {
1164 spa_t *spa = vd->vdev_spa;
1165 int error;
1166 uint64_t osize = 0;
1167 uint64_t max_osize = 0;
1168 uint64_t asize, max_asize, psize;
1169 uint64_t ashift = 0;
1170
1171 ASSERT(vd->vdev_open_thread == curthread ||
1172 spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1173 ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
1174 vd->vdev_state == VDEV_STATE_CANT_OPEN ||
1175 vd->vdev_state == VDEV_STATE_OFFLINE);
|