Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/l4/lm_l4tx.c
+++ new/usr/src/uts/common/io/bnxe/577xx/drivers/common/lm/l4/lm_l4tx.c
1 1
2 2 #include "lm5710.h"
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
3 3 #include "bd_chain.h"
4 4 #include "command.h"
5 5 #include "context.h"
6 6 #include "lm_l4fp.h"
7 7 #include "lm_l4sp.h"
8 8 #include "mm_l4if.h"
9 9
10 10
11 11 /* TODO: remove this temporary solution for solaris / linux compilation conflict, linux needs the
12 12 * first option, solaris the latter */
13 -#if defined(__LINUX)
13 +#if defined(__LINUX) || defined(__LITTLE_ENDIAN)
14 14 #define TOE_TX_INIT_ZERO {{0}}
15 15 #else
16 16 #define TOE_TX_INIT_ZERO {0}
17 17 #endif
18 18
19 19 #define TOE_TX_DOORBELL(pdev,cid) do{\
20 20 struct doorbell db = TOE_TX_INIT_ZERO;\
21 21 db.header.data |= (TOE_CONNECTION_TYPE << DOORBELL_HDR_T_CONN_TYPE_SHIFT);\
22 22 DOORBELL((pdev), (cid), *((u32_t *)&db));\
23 23 } while(0)
24 24
25 25 static __inline void _lm_tcp_tx_write_db(
26 26 lm_device_t * pdev,
27 27 lm_tcp_con_t * tx_con,
28 28 u32_t cid,
29 29 u32_t nbytes,
30 30 u16_t nbds,
31 31 u8_t fin)
32 32 {
33 33 volatile struct toe_tx_db_data *db_data = tx_con->db_data.tx;
34 34
35 35 db_data->bds_prod += nbds; /* nbds should be written before nbytes (FW assumption) */
36 36 DbgBreakIf((db_data->bds_prod & 0xff) == 0);
37 37 db_data->bytes_prod_seq += nbytes;
38 38
39 39 if(fin) {
40 40 DbgBreakIf(db_data->flags & (TOE_TX_DB_DATA_FIN << TOE_TX_DB_DATA_FIN_SHIFT));
41 41 db_data->flags |= (TOE_TX_DB_DATA_FIN << TOE_TX_DB_DATA_FIN_SHIFT);
42 42 }
43 43
44 44 if (!(tx_con->flags & TCP_TX_DB_BLOCKED)) {
45 45 DbgMessage(pdev, INFORMl4tx,
46 46 "ringing tx doorbell: cid=%d, (nbytes+=%d, nbds+=%d, fin=%d)\n",
47 47 cid, nbytes, nbds, fin);
48 48 TOE_TX_DOORBELL(pdev, cid);
49 49 }
50 50 }
51 51
52 52 static __inline void lm_tcp_tx_write_db(
53 53 lm_device_t *pdev,
54 54 lm_tcp_state_t *tcp,
55 55 u8_t post_end)
56 56 {
57 57 lm_tcp_con_t *tx_con = tcp->tx_con;
58 58
59 59 /* define a policy for ringing the doorbell */
60 60 #define MAX_BYTES_PER_TX_DB 0xffff
61 61 #define MAX_BDS_PER_TX_DB 64
62 62
63 63 if (post_end ||
64 64 tx_con->db_more_bytes >= MAX_BYTES_PER_TX_DB ||
65 65 tx_con->db_more_bds >= MAX_BDS_PER_TX_DB) {
66 66 _lm_tcp_tx_write_db(pdev, tx_con, tcp->cid, tx_con->db_more_bytes, tx_con->db_more_bds, 0);
67 67
68 68 /* assert if the new addition will make the cyclic counter post_cnt smaller than comp_cnt */
69 69 DbgBreakIf(S64_SUB(tx_con->bytes_post_cnt + tx_con->db_more_bytes, tx_con->bytes_comp_cnt) < 0);
70 70 tx_con->bytes_post_cnt += tx_con->db_more_bytes;
71 71 tx_con->buffer_post_cnt += tx_con->db_more_bufs;
72 72 tx_con->db_more_bytes = tx_con->db_more_bds = tx_con->db_more_bufs = 0;
73 73 tx_con->fp_db_cnt++;
74 74 } else {
75 75 DbgMessage(pdev, INFORMl4tx,
76 76 "skipped doorbell ringing for cid=%d\n", tcp->cid);
77 77 }
78 78 }
79 79
80 80 /** Description:
81 81 * Post a single tcp buffer to the Tx bd chain
82 82 * Assumptions:
83 83 * - caller initiated tcp_buf->flags field with BUFFER_START/BUFFER_END appropriately
84 84 * Returns:
85 85 * - SUCCESS - tcp buf was successfully attached to the bd chain
86 86 * - RESOURCE - not enough available BDs on bd chain for given tcp buf
87 87 * - CONNECTION_CLOSED - whenever connection's flag are marked as 'POST BLOCKED' */
88 88 lm_status_t lm_tcp_tx_post_buf(
89 89 struct _lm_device_t *pdev,
90 90 lm_tcp_state_t *tcp,
91 91 lm_tcp_buffer_t *tcp_buf,
92 92 lm_frag_list_t *frag_list)
93 93 {
94 94 lm_tcp_con_t *tx_con;
95 95 lm_bd_chain_t *tx_chain;
96 96 struct toe_tx_bd *tx_bd = NULL ;
97 97 lm_frag_t *frag;
98 98 u32_t i, dbg_buf_size = 0;
99 99 u32_t dbg_bytes_prod_seq;
100 100 u16_t old_prod, new_prod;
101 101
102 102 DbgMessage(pdev, VERBOSEl4tx, "###lm_tcp_tx_post_buf\n");
103 103 DbgBreakIf(!(pdev && tcp && tcp_buf && frag_list));
104 104 DbgBreakIf(tcp->cid && (tcp != lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, tcp->cid)));
105 105 DbgBreakIf(frag_list->cnt == 0);
106 106 tx_con = tcp->tx_con;
107 107 tx_chain = &tx_con->bd_chain;
108 108 frag = frag_list->frag_arr;
109 109
110 110 DbgBreakIf(tx_con->flags & TCP_FIN_REQ_POSTED);
111 111
112 112 /* check if tx con is already closed */
113 113 if(tx_con->flags & TCP_TX_POST_BLOCKED) {
114 114 DbgMessage(pdev, WARNl4tx, "post tx buf failed, posting is blocked (cid=%d, con->flags=%x)\n",
115 115 tcp->cid, tx_con->flags);
116 116 return LM_STATUS_CONNECTION_CLOSED;
117 117 }
118 118 /* check bd chain availability (including additional bd that should
119 119 * be kept available for future fin request) */
120 120 if(lm_bd_chain_avail_bds(tx_chain) < frag_list->cnt + 1) {
121 121 DbgMessage(pdev, INFORMl4tx, "post tx buf failed, tx chain is full (cid=%d, avail bds=%d, buf nfrags=%d)\n",
122 122 tcp->cid, lm_bd_chain_avail_bds(tx_chain), frag_list->cnt);
123 123
124 124 LM_COMMON_DRV_STATS_ATOMIC_INC_TOE(pdev, tx_no_l4_bd);
125 125
126 126 if (tx_con->db_more_bds) {
127 127 /* if doorbell ringing was deferred (e.g. until an end of
128 128 * application buffer), it can no longer be deferred since
129 129 * the place in the bd chain is now required */
130 130 lm_tcp_tx_write_db(pdev, tcp, 1);
131 131 }
132 132 return LM_STATUS_RESOURCE;
133 133 }
134 134
135 135 old_prod = lm_bd_chain_prod_idx(tx_chain);
136 136
137 137 dbg_bytes_prod_seq = tx_con->db_data.tx->bytes_prod_seq + tx_con->db_more_bytes;
138 138 /* "attach" the frags to the bd chain */
139 139 for(i = 0; i < frag_list->cnt; i++, frag++) {
140 140 DbgBreakIf(frag->size > 0xffff || frag->size == 0); /* hw limit: each bd can point to a buffer with max size of 64KB */
141 141 tx_bd = (struct toe_tx_bd *)lm_toe_bd_chain_produce_bd(tx_chain);
142 142 tx_bd->addr_hi = frag->addr.as_u32.high;
143 143 tx_bd->addr_lo = frag->addr.as_u32.low;
144 144 tx_bd->flags = 0;
145 145 tx_bd->size = (u16_t)frag->size;
146 146 dbg_bytes_prod_seq += frag->size;
147 147 tx_bd->nextBdStartSeq = dbg_bytes_prod_seq;
148 148 dbg_buf_size += frag->size;
149 149
150 150 /* Support for FW Nagle Algorithm:
151 151 * This bit, is to be set for every bd which is part of a tcp buffer which is equal to or larger than an mss.
152 152 */
153 153 if ((u32_t)frag_list->size >= tx_con->u.tx.mss) {
154 154 tx_bd->flags |= TOE_TX_BD_LARGE_IO;
155 155 }
156 156
157 157 DbgMessage(pdev, VERBOSEl4tx, "Setting Tx BD, addr_lo=0x%x, addr_hi=0x%x, size=%d\n",
158 158 tx_bd->addr_lo, tx_bd->addr_hi, tx_bd->size);
159 159 }
160 160
161 161 DbgBreakIf(frag_list->cnt > 0xffff);
162 162 tcp_buf->bd_used = frag_list->cnt & 0xffff;
163 163 tcp_buf->size = tcp_buf->more_to_comp = (u32_t)frag_list->size;
164 164 DbgBreakIf(tcp_buf->size != dbg_buf_size);
165 165
166 166 DbgBreakIf(!(tcp_buf->flags & TCP_BUF_FLAG_L4_POST_START ?
167 167 tx_con->app_buf_bytes_acc_post == 0 :
168 168 tx_con->app_buf_bytes_acc_post > 0));
169 169 tx_con->app_buf_bytes_acc_post += tcp_buf->size;
170 170 tx_con->db_more_bytes += tcp_buf->size;
171 171 new_prod = lm_bd_chain_prod_idx(tx_chain);
172 172 DbgBreakIf(S16_SUB(new_prod, old_prod) < tcp_buf->bd_used);
173 173 tx_con->db_more_bds += S16_SUB(new_prod, old_prod);
174 174 tx_con->db_more_bufs++;
175 175
176 176 /* Support for FW Nagle Algorithm:
177 177 * This bit, is to be set for every bd which is part of a tcp buffer which is equal to or larger than an mss.
178 178 */
179 179 if (tcp_buf->size >= tx_con->u.tx.mss) {
180 180 tx_bd->flags |= TOE_TX_BD_LARGE_IO;
181 181 }
182 182
183 183 /* special care in case of last tcp buffer of an application buffer */
184 184 if(tcp_buf->flags & TCP_BUF_FLAG_L4_POST_END) {
185 185 tcp_buf->app_buf_xferred = 0; /* just for safety */
186 186 tcp_buf->app_buf_size = tx_con->app_buf_bytes_acc_post;
187 187 tx_con->app_buf_bytes_acc_post = 0;
188 188
189 189 /* special care for the last bd: */
190 190 tx_bd->flags |= TOE_TX_BD_NOTIFY;
191 191 tx_con->u.tx.bds_without_comp_flag = 0;
192 192 tx_bd->flags |= TOE_TX_BD_PUSH;
193 193
194 194 DbgMessage(pdev, VERBOSEl4tx,
195 195 "Setting Tx BD, last bd of app buf, flags=%d\n", tx_bd->flags);
196 196 } else {
197 197 /* make sure there aren't 'too many' bds without completion flag */
198 198 tx_con->u.tx.bds_without_comp_flag += tcp_buf->bd_used;
199 199 if (tx_con->u.tx.bds_without_comp_flag > (tx_chain->capacity - MAX_FRAG_CNT_PER_TB)) {
200 200 tx_bd->flags |= TOE_TX_BD_NOTIFY;
201 201 tx_con->u.tx.bds_without_comp_flag = 0;
202 202 }
203 203 }
204 204
205 205 s_list_push_tail(&tx_con->active_tb_list, &tcp_buf->link);
206 206 tx_con->rq_nbytes += tcp_buf->size;
207 207 lm_tcp_tx_write_db(pdev, tcp, tcp_buf->flags & TCP_BUF_FLAG_L4_POST_END);
208 208
209 209 /* network reachability (NOT IMPLEMENTED):
210 210 if(lm_neigh_is_cache_entry_staled(tcp->path->neigh))
211 211 lm_neigh_indicate_staled_cache_entry(tcp->path->neigh);
212 212 */
213 213
214 214 DbgMessage(pdev, VERBOSEl4tx, "posted tx buf for cid=%d, buf size=%d, bd used=%d, buf flags=%x, app_buf_size=%d\n",
215 215 tcp->cid, tcp_buf->size, tcp_buf->bd_used, tcp_buf->flags, tcp_buf->app_buf_size);
216 216 DbgMessage(pdev, VERBOSEl4tx, "after posting tx buf, tx_con->active_tb_list=%d\n",
217 217 s_list_entry_cnt(&tx_con->active_tb_list));
218 218
219 219 return LM_STATUS_SUCCESS;
220 220 } /* lm_tcp_tx_post_buf */
221 221
222 222 /** Description
223 223 * indicates graceful disconnect completion to client.
224 224 * Assumtpions:
225 225 * tx-lock is taken by caller
226 226 */
227 227 static __inline void lm_tcp_tx_graceful_disconnect_complete(lm_device_t * pdev, lm_tcp_state_t * tcp)
228 228 {
229 229 u8_t ip_version;
230 230 DbgBreakIf(!s_list_is_empty(&tcp->tx_con->active_tb_list));
231 231 DbgBreakIf(tcp->tx_con->flags & TCP_FIN_REQ_COMPLETED);
232 232 tcp->tx_con->flags |= TCP_FIN_REQ_COMPLETED;
233 233 DbgMessage(pdev, INFORMl4tx, "fin request completed (cid=%d)\n", tcp->cid);
234 234 tcp->tcp_state_calc.fin_completed_time = mm_get_current_time(pdev);
235 235 if (!(tcp->tx_con->u.tx.flags & TCP_CON_FIN_REQ_LM_INTERNAL)) {
236 236 ip_version = (tcp->path->path_const.ip_version == IP_VERSION_IPV4)? STATS_IP_4_IDX : STATS_IP_6_IDX;
237 237 LM_COMMON_DRV_STATS_ATOMIC_INC_TOE(pdev, ipv[ip_version].out_fin);
238 238 mm_tcp_graceful_disconnect_done(pdev,tcp, LM_STATUS_SUCCESS);
239 239 }
240 240 }
241 241
242 242 void lm_tcp_tx_cmp_process(
243 243 struct _lm_device_t *pdev,
244 244 lm_tcp_state_t *tcp,
245 245 u32_t completed_bytes
246 246 )
247 247 {
248 248 lm_tcp_con_t *tx_con = tcp->tx_con;
249 249 u32_t actual_completed; /* number of bytes actually completed (could be different than completed in case of fin) */
250 250 MM_INIT_TCP_LOCK_HANDLE();
251 251
252 252 DbgMessage(pdev, VERBOSEl4tx, "##lm_tcp_tx_app_cmp_process, cid=%d, completed_bytes=%d\n",
253 253 tcp->cid, completed_bytes);
254 254
255 255 DbgBreakIf(tx_con->flags & TCP_TX_COMP_BLOCKED);
256 256
257 257 if (!(tx_con->flags & TCP_DEFERRED_PROCESSING)) {
258 258 mm_acquire_tcp_lock(pdev, tx_con);
259 259 }
260 260 tx_con->bytes_comp_cnt += completed_bytes;
261 261 DbgBreakIf(S64_SUB(tx_con->bytes_post_cnt, tx_con->bytes_comp_cnt) < 0);
262 262
263 263 DbgBreakIf(!completed_bytes);
264 264
265 265 actual_completed = lm_tcp_complete_nbytes(pdev, tcp, tcp->tx_con, completed_bytes, FALSE);
266 266
267 267 if (actual_completed != completed_bytes) {
268 268 DbgBreakIf(actual_completed > completed_bytes);
269 269 DbgBreakIf((completed_bytes - actual_completed) != 1);
270 270 DbgBreakIf(!(tx_con->flags & TCP_FIN_REQ_POSTED));
271 271 DbgBreakIf(tx_con->bytes_post_cnt != tx_con->bytes_comp_cnt);
272 272 /* fin completed */
273 273 tx_con->dpc_info.dpc_flags |= LM_TCP_DPC_FIN_CMP;
274 274 tx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_FIN_REQ_COMPLETED */
275 275 }
276 276
277 277 /* network reachability (NOT IMPLEMENTED):
278 278 lm_neigh_update_nic_reachability_time(tcp->path->neigh)
279 279 */
280 280 if (!(tx_con->flags & TCP_DEFERRED_PROCESSING)) {
281 281 mm_release_tcp_lock(pdev, tx_con);
282 282 }
283 283
284 284 } /* lm_tcp_tx_app_cmp_process */
285 285
286 286 u8_t lm_toe_is_tx_completion(lm_device_t *pdev, u8_t drv_toe_rss_id)
287 287 {
288 288 u8_t result = FALSE;
289 289 lm_tcp_scq_t *scq = NULL;
290 290
291 291 DbgBreakIf(!(pdev && ARRSIZE(pdev->toe_info.scqs) > drv_toe_rss_id));
292 292
293 293 scq = &pdev->toe_info.scqs[drv_toe_rss_id];
294 294
295 295 if ( scq->hw_con_idx_ptr &&
296 296 *scq->hw_con_idx_ptr != lm_bd_chain_cons_idx(&scq->bd_chain) )
297 297 {
298 298 result = TRUE;
299 299 }
300 300 DbgMessage(pdev, INFORMl4int, "lm_toe_is_tx_completion(): result is:%s\n", result? "TRUE" : "FALSE");
301 301
302 302 return result;
303 303 }
304 304
305 305 void lm_tcp_tx_inc_trm_aborted_bytes(
306 306 struct _lm_device_t *pdev,
307 307 lm_tcp_state_t *tcp,
308 308 u32_t aborted_bytes
309 309 )
310 310 {
311 311 lm_tcp_con_t *tx_con = tcp->tx_con;
312 312 MM_INIT_TCP_LOCK_HANDLE();
313 313
314 314 DbgMessage(pdev, VERBOSEl4tx, "##lm_tcp_tx_inc_aborted_count, cid=%d, aborted_bytes=%d\n",
315 315 tcp->cid, aborted_bytes);
316 316
317 317 if (!(tx_con->flags & TCP_DEFERRED_PROCESSING)) {
318 318 mm_acquire_tcp_lock(pdev, tx_con);
319 319 }
320 320
321 321 tx_con->bytes_trm_aborted_cnt += aborted_bytes;
322 322
323 323 if (!(tx_con->flags & TCP_DEFERRED_PROCESSING)) {
324 324 mm_release_tcp_lock(pdev, tx_con);
325 325 }
326 326
327 327 } /* lm_tcp_tx_inc_aborted_count */
328 328
329 329 /** Description
330 330 * completes the fast-path operations for a certain connection
331 331 * Assumption:
332 332 * fp-tx lock is taken
333 333 */
334 334 void lm_tcp_tx_complete_tcp_fp(lm_device_t * pdev, lm_tcp_state_t * tcp, lm_tcp_con_t * con)
335 335 {
336 336 /**** Client completing : may result in lock-release *****/
337 337 /* during lock-release, due to this function being called from service_deferred, more
338 338 * cqes can be processed. We don't want to mix. This function is mutually exclusive, so
339 339 * any processing makes it's way to being completed by calling this function.
340 340 * the following define a "fast-path completion"
341 341 * (i) RQ buffers to be completed
342 342 * defined by dpc_completed_tail and are collected during lm_tcp_complete_bufs BEFORE lock
343 343 * is released, so no more buffer processing can make it's way into this buffer completion.
344 344 * (ii) Fin to be completed
345 345 * determined by the flags, since dpc_flags CAN be modified during processing we copy
346 346 * them to a snapshot_flags parameter, which is initialized in this function only, so no fin
347 347 * can can make its way in while we release the lock.
348 348 * (iv) Remainders for sp
349 349 * all sp operations are logged in dpc_flags. for the same reason as (iii) no sp commands can
350 350 * make their way in during this fp-completion, all sp-processing after will relate to this point in time.
351 351 */
352 352
353 353 con->dpc_info.snapshot_flags = con->dpc_info.dpc_flags;
354 354 con->dpc_info.dpc_flags = 0;
355 355
356 356 /* complete buffers to client */
357 357 if (con->dpc_info.dpc_completed_tail != NULL) {
358 358 lm_tcp_complete_bufs(pdev, tcp, con);
359 359 }
360 360
361 361 /* Graceful Disconnect */
362 362 if (con->dpc_info.snapshot_flags & LM_TCP_DPC_FIN_CMP) {
363 363 con->dpc_info.snapshot_flags &= ~LM_TCP_DPC_FIN_CMP;
364 364 lm_tcp_tx_graceful_disconnect_complete(pdev, con->tcp_state);
365 365 }
366 366
367 367 }
368 368
369 369 void lm_tcp_tx_process_cqe(
370 370 lm_device_t * pdev,
371 371 struct toe_tx_cqe * cqe,
372 372 lm_tcp_state_t * tcp
373 373 )
374 374 {
375 375 enum toe_sq_opcode_type cmd;
376 376
377 377 /* get the cmd from cqe */
378 378 cmd = ((cqe->params & TOE_TX_CQE_COMPLETION_OPCODE) >> TOE_TX_CQE_COMPLETION_OPCODE_SHIFT);
379 379
380 380 DbgMessage(pdev, INFORMl4tx, "###lm_tcp_tx_process_cqe cid=%d cmd=%d\n", tcp->cid, cmd);
381 381 DbgBreakIf( ! (pdev && tcp) );
382 382 /* Check that the cqe len make sense, we could have got here by chance... */
383 383 DbgBreakIfAll(cqe->len & 0xc0000000); /* two upper bits on show a completion larger than 1GB - a bit odd...*/
384 384
385 385 /* Three types of completios: fast-path, reset-recv, ramrod-cmp. All completions may have a
386 386 * fast-path part (nbytes completed) which will be handled in any case that cqe->len > 0 */
387 387
388 388 /* complete data if anything needs to be complete */
389 389 if (cqe->len &&
390 390 ((tcp->tx_con->dpc_info.dpc_flags & LM_TCP_DPC_RESET_RECV /* RST recv on this DPC on a previous CQE */ ) ||
391 391 (tcp->tx_con->flags & TCP_REMOTE_RST_RECEIVED /* RST recv on previous DPC */ )))
392 392 {
393 393 /* 10/28/08 - Since in exterme cases current FW may not complete all sent+acked bytes
394 394 on RST recv cqe and do so only later on one of the following ramrod completions,
395 395 we need to ignore this too late completed bytes thus we nullify cqe->len */
396 396 DbgBreakIf((cmd != RAMROD_OPCODE_TOE_RESET_SEND) &&
397 397 (cmd != RAMROD_OPCODE_TOE_INVALIDATE) &&
398 398 (cmd != RAMROD_OPCODE_TOE_EMPTY_RAMROD) &&
399 399 (cmd != RAMROD_OPCODE_TOE_TERMINATE));
400 400 lm_tcp_tx_inc_trm_aborted_bytes(pdev, tcp, cqe->len);
401 401 cqe->len = 0;
402 402 }
403 403 if (cqe->len) {
404 404 DbgBreakIf(tcp->tx_con->dpc_info.dpc_comp_blocked);
405 405 lm_tcp_tx_cmp_process(pdev, tcp, cqe->len);
406 406 }
407 407
408 408 switch(cmd) {
409 409 case CMP_OPCODE_TOE_TX_CMP:
410 410 break;
411 411 case CMP_OPCODE_TOE_RST_RCV:
412 412 tcp->tx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RESET_RECV;
413 413 tcp->tx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_REMOTE_RST_RECEIVED */
414 414 break;
415 415 case RAMROD_OPCODE_TOE_RESET_SEND:
416 416 DbgBreakIf(! tcp->sp_request);
417 417 DbgBreakIf(tcp->sp_request->type != SP_REQUEST_ABORTIVE_DISCONNECT);
418 418 tcp->tx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
419 419 tcp->tx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_RST_REQ_COMPLETED */
420 420 break;
421 421 case RAMROD_OPCODE_TOE_INVALIDATE:
422 422 DbgBreakIf(! tcp->sp_request);
423 423 DbgBreakIf(tcp->sp_request->type != SP_REQUEST_INVALIDATE);
424 424 tcp->tx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
425 425 tcp->tx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_INV_REQ_COMPLETED */
426 426 break;
427 427 case RAMROD_OPCODE_TOE_TERMINATE:
428 428 DbgBreakIf(! tcp->sp_request);
429 429 DbgBreakIf(tcp->sp_request->type != SP_REQUEST_TERMINATE1_OFFLOAD);
430 430 tcp->tx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
431 431 tcp->tx_con->dpc_info.dpc_comp_blocked = TRUE; /* TCP_TRM_REQ_COMPLETED */
432 432 break;
433 433 case RAMROD_OPCODE_TOE_EMPTY_RAMROD:
434 434 DbgBreakIf(cqe->len);
435 435 DbgBreakIf(! tcp->sp_request );
436 436 DbgBreakIf((tcp->sp_request->type != SP_REQUEST_PENDING_LOCAL_FIN_DISCONNECT) &&
437 437 (tcp->sp_request->type != SP_REQUEST_PENDING_REMOTE_DISCONNECT) &&
438 438 (tcp->sp_request->type != SP_REQUEST_PENDING_TX_RST));
439 439 tcp->tx_con->dpc_info.dpc_flags |= LM_TCP_DPC_RAMROD_CMP;
440 440 break;
441 441 default:
442 442 DbgMessage(pdev, FATAL, "unexpected tx cqe opcode=%d\n", cmd);
443 443 DbgBreakIfAll(TRUE);
444 444 }
445 445 }
446 446
447 447 /** Description
448 448 *
449 449 * Assumptions
450 450 * connections is initialzed with a dummy head.
451 451 */
452 452 void lm_tcp_tx_process_cqes(lm_device_t *pdev, u8_t drv_toe_rss_id, s_list_t * connections)
453 453 {
454 454 lm_tcp_scq_t *scq;
455 455 struct toe_tx_cqe *cqe, *hist_cqe;
456 456 lm_tcp_state_t *tcp;
457 457 u32_t cid;
458 458 u32_t avg_dpc_cnt;
459 459 u16_t cq_new_idx;
460 460 u16_t cq_old_idx;
461 461 u16_t num_to_reproduce = 0;
462 462 u8_t defer_cqe;
463 463 MM_INIT_TCP_LOCK_HANDLE();
464 464
465 465 DbgMessage(pdev, VERBOSEl4int , "###lm_tcp_tx_process_cqes\n");
466 466
467 467 scq = &pdev->toe_info.scqs[drv_toe_rss_id];
468 468 cq_new_idx = *(scq->hw_con_idx_ptr);
469 469 cq_old_idx = lm_bd_chain_cons_idx(&scq->bd_chain);
470 470 DbgBreakIf(S16_SUB(cq_new_idx, cq_old_idx) <= 0);
471 471
472 472 /* save statistics */
473 473 scq->num_cqes_last_dpc = S16_SUB(cq_new_idx, cq_old_idx);
474 474 if (scq->num_cqes_last_dpc) { /* Exclude zeroed value from statistics*/
475 475 if(scq->max_cqes_per_dpc < scq->num_cqes_last_dpc) {
476 476 scq->max_cqes_per_dpc = scq->num_cqes_last_dpc;
477 477 }
478 478 /* we don't want to wrap around...*/
479 479 if ((scq->sum_cqes_last_x_dpcs + scq->num_cqes_last_dpc) < scq->sum_cqes_last_x_dpcs) {
480 480 scq->avg_dpc_cnt = 0;
481 481 scq->sum_cqes_last_x_dpcs = 0;
482 482 }
483 483 scq->sum_cqes_last_x_dpcs += scq->num_cqes_last_dpc;
484 484 scq->avg_dpc_cnt++;
485 485 avg_dpc_cnt = scq->avg_dpc_cnt;
486 486 if (avg_dpc_cnt) {
487 487 scq->avg_cqes_per_dpc = scq->sum_cqes_last_x_dpcs / avg_dpc_cnt;
488 488 } else {
489 489 scq->sum_cqes_last_x_dpcs = 0;
490 490 }
491 491 }
492 492
493 493 while(cq_old_idx != cq_new_idx) {
494 494 DbgBreakIf(S16_SUB(cq_new_idx, cq_old_idx) <= 0);
495 495
496 496 /* get next consumed cqe */
497 497 cqe = lm_toe_bd_chain_consume_bd(&scq->bd_chain);
498 498 DbgBreakIf(!cqe);
499 499 num_to_reproduce++;
500 500
501 501 /* get tcp state from cqe */
502 502 cid = SW_CID(((cqe->params & TOE_TX_CQE_CID) >> TOE_TX_CQE_CID_SHIFT));
503 503 tcp = lm_cid_cookie(pdev, TOE_CONNECTION_TYPE, cid);
504 504 DbgBreakIf(!tcp);
505 505 /* save cqe in history_cqes */
506 506 hist_cqe = (struct toe_tx_cqe *)lm_tcp_qe_buffer_next_cqe_override(&tcp->tx_con->history_cqes);
507 507 *hist_cqe = *cqe;
508 508
509 509 defer_cqe = ((tcp->tx_con->flags & TCP_TX_COMP_DEFERRED) == TCP_TX_COMP_DEFERRED);
510 510 if (defer_cqe) {
511 511 /* if we're deferring completions - just store the cqe and continue to the next one */
512 512 /* Return if we are still deferred (may have changed since initial check was w/o a lock */
513 513 mm_acquire_tcp_lock(pdev, tcp->tx_con);
514 514 /* check again under lock if we're deferred */
515 515 defer_cqe = ((tcp->tx_con->flags & TCP_TX_COMP_DEFERRED) == TCP_TX_COMP_DEFERRED);
516 516 if (defer_cqe) {
517 517 tcp->tx_con->flags |= TCP_DEFERRED_PROCESSING;
518 518 lm_tcp_tx_process_cqe(pdev, cqe, tcp);
519 519 }
520 520 mm_release_tcp_lock(pdev, tcp->tx_con);
521 521 }
522 522 if (!defer_cqe) {
523 523 /* connections will always be initialized to a dummy, so once a tcp connection is added to the
524 524 * list, it's link will be initialized to point to another link other than NULL */
525 525 if (s_list_next_entry(&tcp->tx_con->dpc_info.link) == NULL) {
526 526 s_list_push_head(connections, &tcp->tx_con->dpc_info.link);
527 527 }
528 528 lm_tcp_tx_process_cqe(pdev, cqe, tcp);
529 529 }
530 530 cq_old_idx = lm_bd_chain_cons_idx(&scq->bd_chain);
531 531 /* GilR 5/12/2006 - TODO - decide with Alon if reading the hw_con again is required */
532 532 //cq_new_idx = *(scq->hw_con_idx_ptr);
533 533 }
534 534
535 535 /* The fact that we post the producer here before we've handled any slow-path completions assures that
536 536 * the sp-ring will always be updated AFTER the producer was. */
537 537 if (num_to_reproduce) {
538 538 lm_toe_bd_chain_bds_produced(&scq->bd_chain, num_to_reproduce);
539 539
540 540 /* GilR 5/13/2006 - TBA - save some stats? */
541 541
542 542 /* notify the fw of the prod of the SCQ */
543 543 LM_INTMEM_WRITE16(pdev, CSTORM_TOE_CQ_PROD_OFFSET(LM_TOE_FW_RSS_ID(pdev,drv_toe_rss_id) , PORT_ID(pdev)),
544 544 lm_bd_chain_prod_idx(&scq->bd_chain), BAR_CSTRORM_INTMEM);
545 545 }
546 546 }
547 547
548 548 void lm_toe_service_tx_intr(lm_device_t *pdev, u8_t drv_toe_rss_id)
549 549 {
550 550 s_list_t connections;
551 551 s_list_entry_t dummy;
552 552 lm_tcp_con_t * con;
553 553 lm_tcp_state_t * tcp;
554 554
555 555 MM_INIT_TCP_LOCK_HANDLE();
556 556
557 557 DbgMessage(pdev, VERBOSEl4int , "###lm_toe_service_tx_intr\n");
558 558 DbgBreakIf(!(pdev && ARRSIZE(pdev->toe_info.scqs) > drv_toe_rss_id));
559 559
560 560 s_list_clear(&connections);
561 561 s_list_push_head(&connections, &dummy);
562 562 /* process the cqes and initialize connections with all the connections that appeared
563 563 * in the DPC */
564 564 lm_tcp_tx_process_cqes(pdev,drv_toe_rss_id,&connections);
565 565
566 566 /* complete the fp/sp parts of the connections remember to ignore the last one */
567 567 con = (lm_tcp_con_t *)s_list_peek_head(&connections);
568 568 tcp = con->tcp_state;
569 569 while (s_list_next_entry(&con->dpc_info.link) != NULL) {
570 570 mm_acquire_tcp_lock(pdev, con);
571 571 lm_tcp_tx_complete_tcp_fp(pdev, con->tcp_state, con);
572 572 mm_release_tcp_lock(pdev, con);
573 573 con = (lm_tcp_con_t *)s_list_next_entry(&con->dpc_info.link);
574 574 tcp = con->tcp_state;
575 575 }
576 576
577 577 /* SP : traverse the connections. remember to ignore the last one */
578 578 con = (lm_tcp_con_t *)s_list_pop_head(&connections);
579 579 s_list_next_entry(&con->dpc_info.link) = NULL;
580 580 tcp = con->tcp_state;
581 581 while (s_list_entry_cnt(&connections) > 0) {
582 582 /* we access snapshot and not dpc, since once the dpc_flags were copied
583 583 * to snapshot they were zeroized */
584 584 if (con->dpc_info.snapshot_flags) {
585 585 lm_tcp_tx_complete_tcp_sp(pdev, tcp, con);
586 586 }
587 587 con = (lm_tcp_con_t *)s_list_pop_head(&connections);
588 588 s_list_next_entry(&con->dpc_info.link) = NULL;
589 589 tcp = con->tcp_state;
590 590 }
591 591
592 592 }
593 593
594 594 lm_status_t lm_tcp_graceful_disconnect(
595 595 IN lm_device_t * pdev,
596 596 IN lm_tcp_state_t * tcp_state
597 597 )
598 598 {
599 599 struct toe_tx_bd *tx_bd;
600 600 lm_tcp_con_t *tcp_con = tcp_state->tx_con;
601 601 u16_t old_prod, new_prod;
602 602 u32_t dbg_bytes_prod_seq;
603 603
604 604 DbgMessage(pdev, INFORMl4tx, "###lm_tcp_graceful_disconnect\n");
605 605
606 606 if ( tcp_con->flags & TCP_TX_POST_BLOCKED ) {
607 607 return LM_STATUS_CONNECTION_CLOSED;
608 608 }
609 609
610 610 DbgBreakIf( (tcp_con->app_buf_bytes_acc_post != 0) ||
611 611 (tcp_con->db_more_bytes != 0) ||
612 612 (tcp_con->db_more_bds != 0) ||
613 613 (tcp_con->u.tx.bds_without_comp_flag != 0)
614 614 );
615 615
616 616 old_prod = lm_bd_chain_prod_idx(&(tcp_con->bd_chain));
617 617
618 618 /* Post FIN BD on Tx chain */
619 619 tx_bd = (struct toe_tx_bd *)lm_toe_bd_chain_produce_bd(&(tcp_con->bd_chain));
620 620 tx_bd->flags = TOE_TX_BD_FIN; /* Vladz: Pay attention when u move this
621 621 line - there is an assignment to flags, NOT bitwise OR */
622 622 tx_bd->flags |= TOE_TX_BD_NOTIFY;
623 623 tx_bd->size = 1;
624 624 /* For a safety */
625 625 tx_bd->addr_hi = tx_bd->addr_lo = 0;
626 626
627 627 dbg_bytes_prod_seq = tcp_con->db_data.tx->bytes_prod_seq + tcp_con->db_more_bytes;
628 628 dbg_bytes_prod_seq += tx_bd->size;
629 629 tx_bd->nextBdStartSeq = dbg_bytes_prod_seq;
630 630
631 631 new_prod = lm_bd_chain_prod_idx(&(tcp_con->bd_chain));
632 632 DbgBreakIf(S16_SUB(new_prod, old_prod) >= 3);
633 633 DbgBreakIf(S16_SUB(new_prod, old_prod) <= 0);
634 634
635 635 DbgBreakIf(tcp_con->flags & TCP_FIN_REQ_POSTED);
636 636 tcp_con->flags |= TCP_FIN_REQ_POSTED;
637 637
638 638 /* Update fin request time, if not already set by the caller */
639 639 if (!tcp_state->tcp_state_calc.fin_request_time) {
640 640 tcp_state->tcp_state_calc.fin_request_time = mm_get_current_time(pdev);
641 641 if (tcp_state->tcp_state_calc.fin_request_time == tcp_state->tcp_state_calc.fin_reception_time){
642 642 tcp_state->tcp_state_calc.fin_reception_time -= 1;
643 643 }
644 644 }
645 645
646 646 /* Doorbell FIN */
647 647 _lm_tcp_tx_write_db(pdev, tcp_con, tcp_state->cid, 0, (u16_t)S16_SUB(new_prod, old_prod), 1);
648 648
649 649 /* assert if the new addition will make the cyclic counter post_cnt smaller than comp_cnt */
650 650 DbgBreakIf(S64_SUB(tcp_con->bytes_post_cnt + 1, tcp_con->bytes_comp_cnt) < 0);
651 651 tcp_con->bytes_post_cnt++;
652 652 tcp_con->fp_db_cnt++;
653 653
654 654 return LM_STATUS_SUCCESS;
655 655 }
656 656
657 657
↓ open down ↓ |
634 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX