Print this page
7364 NVMe driver performance can be improved by caching nvme_dma_t structs for PRPL.
Reviewed by: Hans Rosenfeld <hans.rosenfeld@nexenta.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Garrett D'Amore <garrett@lucera.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/nvme/nvme_var.h
+++ new/usr/src/uts/common/io/nvme/nvme_var.h
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
14 + * Copyright 2016 The MathWorks, Inc. All rights reserved.
14 15 */
15 16
16 17 #ifndef _NVME_VAR_H
17 18 #define _NVME_VAR_H
18 19
19 20 #include <sys/ddi.h>
20 21 #include <sys/sunddi.h>
21 22 #include <sys/blkdev.h>
22 23 #include <sys/taskq_impl.h>
23 24
24 25 /*
25 26 * NVMe driver state
26 27 */
27 28
28 29 #ifdef __cplusplus
29 30 /* extern "C" { */
30 31 #endif
31 32
32 33 #define NVME_FMA_INIT 0x1
33 34 #define NVME_REGS_MAPPED 0x2
34 35 #define NVME_ADMIN_QUEUE 0x4
35 36 #define NVME_CTRL_LIMITS 0x8
36 37 #define NVME_INTERRUPTS 0x10
37 38
38 39 #define NVME_MIN_ADMIN_QUEUE_LEN 16
39 40 #define NVME_MIN_IO_QUEUE_LEN 16
40 41 #define NVME_DEFAULT_ADMIN_QUEUE_LEN 256
41 42 #define NVME_DEFAULT_IO_QUEUE_LEN 1024
42 43 #define NVME_DEFAULT_ASYNC_EVENT_LIMIT 10
43 44 #define NVME_MIN_ASYNC_EVENT_LIMIT 1
44 45
45 46
46 47 typedef struct nvme nvme_t;
47 48 typedef struct nvme_namespace nvme_namespace_t;
48 49 typedef struct nvme_dma nvme_dma_t;
49 50 typedef struct nvme_cmd nvme_cmd_t;
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
50 51 typedef struct nvme_qpair nvme_qpair_t;
51 52 typedef struct nvme_task_arg nvme_task_arg_t;
52 53
53 54 struct nvme_dma {
54 55 ddi_dma_handle_t nd_dmah;
55 56 ddi_acc_handle_t nd_acch;
56 57 ddi_dma_cookie_t nd_cookie;
57 58 uint_t nd_ncookie;
58 59 caddr_t nd_memp;
59 60 size_t nd_len;
61 + boolean_t nd_cached;
60 62 };
61 63
62 64 struct nvme_cmd {
63 65 nvme_sqe_t nc_sqe;
64 66 nvme_cqe_t nc_cqe;
65 67
66 68 void (*nc_callback)(void *);
67 69 bd_xfer_t *nc_xfer;
68 70 boolean_t nc_completed;
69 71 uint16_t nc_sqid;
70 72
71 73 nvme_dma_t *nc_dma;
72 74
73 75 kmutex_t nc_mutex;
74 76 kcondvar_t nc_cv;
75 77
76 78 taskq_ent_t nc_tqent;
77 79 nvme_t *nc_nvme;
78 80 };
79 81
80 82 struct nvme_qpair {
81 83 size_t nq_nentry;
82 84
83 85 nvme_dma_t *nq_sqdma;
84 86 nvme_sqe_t *nq_sq;
85 87 uint_t nq_sqhead;
86 88 uint_t nq_sqtail;
87 89 uintptr_t nq_sqtdbl;
88 90
89 91 nvme_dma_t *nq_cqdma;
90 92 nvme_cqe_t *nq_cq;
91 93 uint_t nq_cqhead;
92 94 uint_t nq_cqtail;
93 95 uintptr_t nq_cqhdbl;
94 96
95 97 nvme_cmd_t **nq_cmd;
96 98 uint16_t nq_next_cmd;
97 99 uint_t nq_active_cmds;
98 100 int nq_phase;
99 101
100 102 kmutex_t nq_mutex;
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
101 103 };
102 104
103 105 struct nvme {
104 106 dev_info_t *n_dip;
105 107 int n_progress;
106 108
107 109 caddr_t n_regs;
108 110 ddi_acc_handle_t n_regh;
109 111
110 112 kmem_cache_t *n_cmd_cache;
113 + kmem_cache_t *n_prp_cache;
111 114
112 115 size_t n_inth_sz;
113 116 ddi_intr_handle_t *n_inth;
114 117 int n_intr_cnt;
115 118 uint_t n_intr_pri;
116 119 int n_intr_cap;
117 120 int n_intr_type;
118 121 int n_intr_types;
119 122
120 123 char *n_product;
121 124 char *n_vendor;
122 125
123 126 boolean_t n_dead;
124 127 boolean_t n_strict_version;
125 128 boolean_t n_ignore_unknown_vendor_status;
126 129 uint32_t n_admin_queue_len;
127 130 uint32_t n_io_queue_len;
128 131 uint16_t n_async_event_limit;
129 132 uint16_t n_abort_command_limit;
130 133 uint64_t n_max_data_transfer_size;
131 134 boolean_t n_volatile_write_cache_enabled;
132 135 int n_error_log_len;
133 136
134 137 int n_nssr_supported;
135 138 int n_doorbell_stride;
136 139 int n_timeout;
137 140 int n_arbitration_mechanisms;
138 141 int n_cont_queues_reqd;
139 142 int n_max_queue_entries;
140 143 int n_pageshift;
141 144 int n_pagesize;
142 145
143 146 int n_namespace_count;
144 147 int n_ioq_count;
145 148
146 149 nvme_identify_ctrl_t *n_idctl;
147 150
148 151 nvme_qpair_t *n_adminq;
149 152 nvme_qpair_t **n_ioq;
150 153
151 154 nvme_namespace_t *n_ns;
152 155
153 156 ddi_dma_attr_t n_queue_dma_attr;
154 157 ddi_dma_attr_t n_prp_dma_attr;
155 158 ddi_dma_attr_t n_sgl_dma_attr;
156 159 ddi_device_acc_attr_t n_reg_acc_attr;
157 160 ddi_iblock_cookie_t n_fm_ibc;
158 161 int n_fm_cap;
159 162
160 163 ksema_t n_abort_sema;
161 164
162 165 ddi_taskq_t *n_cmd_taskq;
163 166
164 167 nvme_error_log_entry_t *n_error_log;
165 168 nvme_health_log_t *n_health_log;
166 169 nvme_fwslot_log_t *n_fwslot_log;
167 170
168 171 /* errors detected by driver */
169 172 uint32_t n_dma_bind_err;
170 173 uint32_t n_abort_failed;
171 174 uint32_t n_cmd_timeout;
172 175 uint32_t n_cmd_aborted;
173 176 uint32_t n_async_resubmit_failed;
174 177 uint32_t n_wrong_logpage;
175 178 uint32_t n_unknown_logpage;
176 179 uint32_t n_too_many_cookies;
177 180 uint32_t n_admin_queue_full;
178 181
179 182 /* errors detected by hardware */
180 183 uint32_t n_data_xfr_err;
181 184 uint32_t n_internal_err;
182 185 uint32_t n_abort_rq_err;
183 186 uint32_t n_abort_sq_del;
184 187 uint32_t n_nvm_cap_exc;
185 188 uint32_t n_nvm_ns_notrdy;
186 189 uint32_t n_inv_cq_err;
187 190 uint32_t n_inv_qid_err;
188 191 uint32_t n_max_qsz_exc;
189 192 uint32_t n_inv_int_vect;
190 193 uint32_t n_inv_log_page;
191 194 uint32_t n_inv_format;
192 195 uint32_t n_inv_q_del;
193 196 uint32_t n_cnfl_attr;
194 197 uint32_t n_inv_prot;
195 198 uint32_t n_readonly;
196 199
197 200 /* errors reported by asynchronous events */
198 201 uint32_t n_diagfail_event;
199 202 uint32_t n_persistent_event;
200 203 uint32_t n_transient_event;
201 204 uint32_t n_fw_load_event;
202 205 uint32_t n_reliability_event;
203 206 uint32_t n_temperature_event;
204 207 uint32_t n_spare_event;
205 208 uint32_t n_vendor_event;
206 209 uint32_t n_unknown_event;
207 210
208 211 };
209 212
210 213 struct nvme_namespace {
211 214 nvme_t *ns_nvme;
212 215 bd_handle_t ns_bd_hdl;
213 216
214 217 uint32_t ns_id;
215 218 size_t ns_block_count;
216 219 size_t ns_block_size;
217 220 size_t ns_best_block_size;
218 221
219 222 boolean_t ns_ignore;
220 223
221 224 nvme_identify_nsid_t *ns_idns;
222 225
223 226 /*
224 227 * Section 7.7 of the spec describes how to get a unique ID for
225 228 * the controller: the vendor ID, the model name and the serial
226 229 * number shall be unique when combined.
227 230 *
228 231 * We add the hex namespace ID to get a unique ID for the namespace.
229 232 */
230 233 char ns_devid[4 + 1 + 20 + 1 + 40 + 1 + 8 + 1];
231 234 };
232 235
233 236 struct nvme_task_arg {
234 237 nvme_t *nt_nvme;
235 238 nvme_cmd_t *nt_cmd;
236 239 };
237 240
238 241 #ifdef __cplusplus
239 242 /* } */
240 243 #endif
241 244
242 245 #endif /* _NVME_VAR_H */
↓ open down ↓ |
122 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX