154 zio_data_buf_cache[c] = kmem_cache_create(name, size,
155 align, NULL, NULL, NULL, NULL, data_alloc_arena,
156 cflags | KMC_NOTOUCH);
157 }
158 }
159
160 while (--c != 0) {
161 ASSERT(zio_buf_cache[c] != NULL);
162 if (zio_buf_cache[c - 1] == NULL)
163 zio_buf_cache[c - 1] = zio_buf_cache[c];
164
165 ASSERT(zio_data_buf_cache[c] != NULL);
166 if (zio_data_buf_cache[c - 1] == NULL)
167 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
168 }
169
170 /*
171 * The zio write taskqs have 1 thread per cpu, allow 1/2 of the taskqs
172 * to fail 3 times per txg or 8 failures, whichever is greater.
173 */
174 zfs_mg_alloc_failures = MAX((3 * max_ncpus / 2), 8);
175
176 zio_inject_init();
177 }
178
179 void
180 zio_fini(void)
181 {
182 size_t c;
183 kmem_cache_t *last_cache = NULL;
184 kmem_cache_t *last_data_cache = NULL;
185
186 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
187 if (zio_buf_cache[c] != last_cache) {
188 last_cache = zio_buf_cache[c];
189 kmem_cache_destroy(zio_buf_cache[c]);
190 }
191 zio_buf_cache[c] = NULL;
192
193 if (zio_data_buf_cache[c] != last_data_cache) {
2348 uint64_t size, boolean_t use_slog)
2349 {
2350 int error = 1;
2351
2352 ASSERT(txg > spa_syncing_txg(spa));
2353
2354 /*
2355 * ZIL blocks are always contiguous (i.e. not gang blocks) so we
2356 * set the METASLAB_GANG_AVOID flag so that they don't "fast gang"
2357 * when allocating them.
2358 */
2359 if (use_slog) {
2360 error = metaslab_alloc(spa, spa_log_class(spa), size,
2361 new_bp, 1, txg, old_bp,
2362 METASLAB_HINTBP_AVOID | METASLAB_GANG_AVOID);
2363 }
2364
2365 if (error) {
2366 error = metaslab_alloc(spa, spa_normal_class(spa), size,
2367 new_bp, 1, txg, old_bp,
2368 METASLAB_HINTBP_AVOID | METASLAB_GANG_AVOID);
2369 }
2370
2371 if (error == 0) {
2372 BP_SET_LSIZE(new_bp, size);
2373 BP_SET_PSIZE(new_bp, size);
2374 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
2375 BP_SET_CHECKSUM(new_bp,
2376 spa_version(spa) >= SPA_VERSION_SLIM_ZIL
2377 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
2378 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
2379 BP_SET_LEVEL(new_bp, 0);
2380 BP_SET_DEDUP(new_bp, 0);
2381 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
2382 }
2383
2384 return (error);
2385 }
2386
2387 /*
2388 * Free an intent log block.
|
154 zio_data_buf_cache[c] = kmem_cache_create(name, size,
155 align, NULL, NULL, NULL, NULL, data_alloc_arena,
156 cflags | KMC_NOTOUCH);
157 }
158 }
159
160 while (--c != 0) {
161 ASSERT(zio_buf_cache[c] != NULL);
162 if (zio_buf_cache[c - 1] == NULL)
163 zio_buf_cache[c - 1] = zio_buf_cache[c];
164
165 ASSERT(zio_data_buf_cache[c] != NULL);
166 if (zio_data_buf_cache[c - 1] == NULL)
167 zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
168 }
169
170 /*
171 * The zio write taskqs have 1 thread per cpu, allow 1/2 of the taskqs
172 * to fail 3 times per txg or 8 failures, whichever is greater.
173 */
174 if (zfs_mg_alloc_failures == 0)
175 zfs_mg_alloc_failures = MAX((3 * max_ncpus / 2), 8);
176
177 zio_inject_init();
178 }
179
180 void
181 zio_fini(void)
182 {
183 size_t c;
184 kmem_cache_t *last_cache = NULL;
185 kmem_cache_t *last_data_cache = NULL;
186
187 for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
188 if (zio_buf_cache[c] != last_cache) {
189 last_cache = zio_buf_cache[c];
190 kmem_cache_destroy(zio_buf_cache[c]);
191 }
192 zio_buf_cache[c] = NULL;
193
194 if (zio_data_buf_cache[c] != last_data_cache) {
2349 uint64_t size, boolean_t use_slog)
2350 {
2351 int error = 1;
2352
2353 ASSERT(txg > spa_syncing_txg(spa));
2354
2355 /*
2356 * ZIL blocks are always contiguous (i.e. not gang blocks) so we
2357 * set the METASLAB_GANG_AVOID flag so that they don't "fast gang"
2358 * when allocating them.
2359 */
2360 if (use_slog) {
2361 error = metaslab_alloc(spa, spa_log_class(spa), size,
2362 new_bp, 1, txg, old_bp,
2363 METASLAB_HINTBP_AVOID | METASLAB_GANG_AVOID);
2364 }
2365
2366 if (error) {
2367 error = metaslab_alloc(spa, spa_normal_class(spa), size,
2368 new_bp, 1, txg, old_bp,
2369 METASLAB_HINTBP_AVOID);
2370 }
2371
2372 if (error == 0) {
2373 BP_SET_LSIZE(new_bp, size);
2374 BP_SET_PSIZE(new_bp, size);
2375 BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
2376 BP_SET_CHECKSUM(new_bp,
2377 spa_version(spa) >= SPA_VERSION_SLIM_ZIL
2378 ? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
2379 BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
2380 BP_SET_LEVEL(new_bp, 0);
2381 BP_SET_DEDUP(new_bp, 0);
2382 BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
2383 }
2384
2385 return (error);
2386 }
2387
2388 /*
2389 * Free an intent log block.
|