264 kmutex_t lock;
265 ddi_dma_handle_t dhdl;
266 ddi_acc_handle_t ahdl;
267
268 __be64 *desc; /* KVA of descriptor ring, ptr to addresses */
269 uint64_t ba; /* bus address of descriptor ring */
270 struct fl_sdesc *sdesc; /* KVA of software descriptor ring */
271 uint32_t cap; /* max # of buffers, for convenience */
272 uint16_t qsize; /* size (# of entries) of the queue */
273 uint16_t cntxt_id; /* SGE context id for the freelist */
274 uint32_t cidx; /* consumer idx (buffer idx, NOT hw desc idx) */
275 uint32_t pidx; /* producer idx (buffer idx, NOT hw desc idx) */
276 uint32_t needed; /* # of buffers needed to fill up fl. */
277 uint32_t lowat; /* # of buffers <= this means fl needs help */
278 uint32_t pending; /* # of bufs allocated since last doorbell */
279 uint32_t offset; /* current packet within the larger buffer */
280 uint16_t copy_threshold; /* anything this size or less is copied up */
281
282 uint64_t copied_up; /* # of frames copied into mblk and handed up */
283 uint64_t passed_up; /* # of frames wrapped in mblk and handed up */
284
285 TAILQ_ENTRY(sge_fl) link; /* All starving freelists */
286 };
287
288 /* txq: SGE egress queue + miscellaneous items */
289 struct sge_txq {
290 struct sge_eq eq; /* MUST be first */
291
292 struct port_info *port; /* the port this txq belongs to */
293 struct tx_sdesc *sdesc; /* KVA of software descriptor ring */
294 mac_ring_handle_t ring_handle;
295
296 /* DMA handles used for tx */
297 ddi_dma_handle_t *tx_dhdl;
298 uint32_t tx_dhdl_total; /* Total # of handles */
299 uint32_t tx_dhdl_pidx; /* next handle to be used */
300 uint32_t tx_dhdl_cidx; /* reclaimed up to this index */
301 uint32_t tx_dhdl_avail; /* # of available handles */
302
303 /* Copy buffers for tx */
|
264 kmutex_t lock;
265 ddi_dma_handle_t dhdl;
266 ddi_acc_handle_t ahdl;
267
268 __be64 *desc; /* KVA of descriptor ring, ptr to addresses */
269 uint64_t ba; /* bus address of descriptor ring */
270 struct fl_sdesc *sdesc; /* KVA of software descriptor ring */
271 uint32_t cap; /* max # of buffers, for convenience */
272 uint16_t qsize; /* size (# of entries) of the queue */
273 uint16_t cntxt_id; /* SGE context id for the freelist */
274 uint32_t cidx; /* consumer idx (buffer idx, NOT hw desc idx) */
275 uint32_t pidx; /* producer idx (buffer idx, NOT hw desc idx) */
276 uint32_t needed; /* # of buffers needed to fill up fl. */
277 uint32_t lowat; /* # of buffers <= this means fl needs help */
278 uint32_t pending; /* # of bufs allocated since last doorbell */
279 uint32_t offset; /* current packet within the larger buffer */
280 uint16_t copy_threshold; /* anything this size or less is copied up */
281
282 uint64_t copied_up; /* # of frames copied into mblk and handed up */
283 uint64_t passed_up; /* # of frames wrapped in mblk and handed up */
284 uint64_t allocb_fail; /* # of mblk allocation failures */
285
286 TAILQ_ENTRY(sge_fl) link; /* All starving freelists */
287 };
288
289 /* txq: SGE egress queue + miscellaneous items */
290 struct sge_txq {
291 struct sge_eq eq; /* MUST be first */
292
293 struct port_info *port; /* the port this txq belongs to */
294 struct tx_sdesc *sdesc; /* KVA of software descriptor ring */
295 mac_ring_handle_t ring_handle;
296
297 /* DMA handles used for tx */
298 ddi_dma_handle_t *tx_dhdl;
299 uint32_t tx_dhdl_total; /* Total # of handles */
300 uint32_t tx_dhdl_pidx; /* next handle to be used */
301 uint32_t tx_dhdl_cidx; /* reclaimed up to this index */
302 uint32_t tx_dhdl_avail; /* # of available handles */
303
304 /* Copy buffers for tx */
|