Print this page
7938 Port ZOL #3712 disable LBA weighting on files and SSDs


1092                     abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
1093                     ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1094                     ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
1095         }
1096 
1097         if (zio == NULL)
1098                 return (pio);
1099 
1100         zio_nowait(pio);
1101         return (NULL);
1102 }
1103 
1104 static void
1105 vdev_open_child(void *arg)
1106 {
1107         vdev_t *vd = arg;
1108 
1109         vd->vdev_open_thread = curthread;
1110         vd->vdev_open_error = vdev_open(vd);
1111         vd->vdev_open_thread = NULL;

1112 }
1113 
1114 boolean_t
1115 vdev_uses_zvols(vdev_t *vd)
1116 {
1117         if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR,
1118             strlen(ZVOL_DIR)) == 0)
1119                 return (B_TRUE);
1120         for (int c = 0; c < vd->vdev_children; c++)
1121                 if (vdev_uses_zvols(vd->vdev_child[c]))
1122                         return (B_TRUE);
1123         return (B_FALSE);
1124 }
1125 
1126 void
1127 vdev_open_children(vdev_t *vd)
1128 {
1129         taskq_t *tq;
1130         int children = vd->vdev_children;
1131 


1132         /*
1133          * in order to handle pools on top of zvols, do the opens
1134          * in a single thread so that the same thread holds the
1135          * spa_namespace_lock
1136          */
1137         if (vdev_uses_zvols(vd)) {
1138                 for (int c = 0; c < children; c++)
1139                         vd->vdev_child[c]->vdev_open_error =
1140                             vdev_open(vd->vdev_child[c]);


1141                 return;
1142         }
1143         tq = taskq_create("vdev_open", children, minclsyspri,
1144             children, children, TASKQ_PREPOPULATE);
1145 
1146         for (int c = 0; c < children; c++)
1147                 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c],
1148                     TQ_SLEEP) != NULL);
1149 
1150         taskq_destroy(tq);



1151 }
1152 
1153 /*
1154  * Prepare a virtual device for access.
1155  */
1156 int
1157 vdev_open(vdev_t *vd)
1158 {
1159         spa_t *spa = vd->vdev_spa;
1160         int error;
1161         uint64_t osize = 0;
1162         uint64_t max_osize = 0;
1163         uint64_t asize, max_asize, psize;
1164         uint64_t ashift = 0;
1165 
1166         ASSERT(vd->vdev_open_thread == curthread ||
1167             spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1168         ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
1169             vd->vdev_state == VDEV_STATE_CANT_OPEN ||
1170             vd->vdev_state == VDEV_STATE_OFFLINE);




1092                     abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
1093                     ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
1094                     ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
1095         }
1096 
1097         if (zio == NULL)
1098                 return (pio);
1099 
1100         zio_nowait(pio);
1101         return (NULL);
1102 }
1103 
1104 static void
1105 vdev_open_child(void *arg)
1106 {
1107         vdev_t *vd = arg;
1108 
1109         vd->vdev_open_thread = curthread;
1110         vd->vdev_open_error = vdev_open(vd);
1111         vd->vdev_open_thread = NULL;
1112         vd->vdev_parent->vdev_nonrot &= vd->vdev_nonrot;
1113 }
1114 
1115 boolean_t
1116 vdev_uses_zvols(vdev_t *vd)
1117 {
1118         if (vd->vdev_path && strncmp(vd->vdev_path, ZVOL_DIR,
1119             strlen(ZVOL_DIR)) == 0)
1120                 return (B_TRUE);
1121         for (int c = 0; c < vd->vdev_children; c++)
1122                 if (vdev_uses_zvols(vd->vdev_child[c]))
1123                         return (B_TRUE);
1124         return (B_FALSE);
1125 }
1126 
1127 void
1128 vdev_open_children(vdev_t *vd)
1129 {
1130         taskq_t *tq;
1131         int children = vd->vdev_children;
1132 
1133         vd->vdev_nonrot = B_TRUE;
1134 
1135         /*
1136          * in order to handle pools on top of zvols, do the opens
1137          * in a single thread so that the same thread holds the
1138          * spa_namespace_lock
1139          */
1140         if (vdev_uses_zvols(vd)) {
1141                 for (int c = 0; c < children; c++) {
1142                         vd->vdev_child[c]->vdev_open_error =
1143                             vdev_open(vd->vdev_child[c]);
1144                         vd->vdev_nonrot &= vd->vdev_child[c]->vdev_nonrot;
1145                 }
1146                 return;
1147         }
1148         tq = taskq_create("vdev_open", children, minclsyspri,
1149             children, children, TASKQ_PREPOPULATE);
1150 
1151         for (int c = 0; c < children; c++)
1152                 VERIFY(taskq_dispatch(tq, vdev_open_child, vd->vdev_child[c],
1153                     TQ_SLEEP) != NULL);
1154 
1155         taskq_destroy(tq);
1156 
1157         for (int c = 0; c < children; c++)
1158                 vd->vdev_nonrot &= vd->vdev_child[c]->vdev_nonrot;
1159 }
1160 
1161 /*
1162  * Prepare a virtual device for access.
1163  */
1164 int
1165 vdev_open(vdev_t *vd)
1166 {
1167         spa_t *spa = vd->vdev_spa;
1168         int error;
1169         uint64_t osize = 0;
1170         uint64_t max_osize = 0;
1171         uint64_t asize, max_asize, psize;
1172         uint64_t ashift = 0;
1173 
1174         ASSERT(vd->vdev_open_thread == curthread ||
1175             spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
1176         ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
1177             vd->vdev_state == VDEV_STATE_CANT_OPEN ||
1178             vd->vdev_state == VDEV_STATE_OFFLINE);