Print this page
9994 cxgbe t4nex: Handle get_fl_payload() alloc failures
9995 cxgbe t4_devo_attach() should initialize ->sfl


 758                 membar_consumer();
 759 
 760                 m = NULL;
 761                 rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
 762                 lq = be32_to_cpu(ctrl->pldbuflen_qid);
 763                 rss = (const void *)iq->cdesc;
 764 
 765                 switch (rsp_type) {
 766                 case X_RSPD_TYPE_FLBUF:
 767 
 768                         ASSERT(iq->flags & IQ_HAS_FL);
 769 
 770                         if (CPL_RX_PKT == rss->opcode) {
 771                                 cpl = (void *)(rss + 1);
 772                                 pkt_len = be16_to_cpu(cpl->len);
 773 
 774                                 if (iq->polling && ((received_bytes + pkt_len) > budget))
 775                                         goto done;
 776 
 777                                 m = get_fl_payload(sc, fl, lq, &fl_bufs_used);
 778                                 if (m == NULL) {
 779                                         panic("%s: line %d.", __func__,
 780                                             __LINE__);
 781                                 }
 782 
 783                                 iq->intr_next = iq->intr_params;
 784                                 m->b_rptr += sc->sge.pktshift;
 785                                 if (sc->params.tp.rx_pkt_encap)
 786                                 /* It is enabled only in T6 config file */
 787                                         err_vec = G_T6_COMPR_RXERR_VEC(ntohs(cpl->err_vec));
 788                                 else
 789                                         err_vec = ntohs(cpl->err_vec);
 790 
 791                                 csum_ok = cpl->csum_calc && !err_vec;
 792 
 793                                 /* TODO: what about cpl->ip_frag? */
 794                                 if (csum_ok && !cpl->ip_frag) {
 795                                         mac_hcksum_set(m, 0, 0, 0, 0xffff,
 796                                             HCK_FULLCKSUM_OK | HCK_FULLCKSUM |
 797                                             HCK_IPV4_HDRCKSUM_OK);
 798                                         rxq->rxcsum++;
 799                                 }
 800                                 rxq->rxpkts++;
 801                                 rxq->rxbytes += pkt_len;
 802                                 received_bytes += pkt_len;
 803 
 804                                 *mblk_tail = m;
 805                                 mblk_tail = &m->b_next;
 806 
 807                                 break;
 808                         }
 809 
 810                         m = get_fl_payload(sc, fl, lq, &fl_bufs_used);
 811                         if (m == NULL) {
 812                                 panic("%s: line %d.", __func__,
 813                                     __LINE__);
 814                         }
 815                         /* FALLTHROUGH */
 816 
 817                 case X_RSPD_TYPE_CPL:
 818                         ASSERT(rss->opcode < NUM_CPL_CMDS);
 819                         sc->cpl_handler[rss->opcode](iq, rss, m);
 820                         break;
 821 
 822                 default:
 823                         break;
 824                 }
 825                 iq_next(iq);
 826                 ++ndescs;
 827                 if (!iq->polling && (ndescs == budget))
 828                         break;
 829         }
 830 
 831 done:
 832 
 833         t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
 834                      V_CIDXINC(ndescs) | V_INGRESSQID(iq->cntxt_id) |


 844                         add_fl_to_sfl(sc, fl);
 845         }
 846         return (mblk_head);
 847 }
 848 
 849 /*
 850  * Deals with anything and everything on the given ingress queue.
 851  */
 852 static int
 853 service_iq(struct sge_iq *iq, int budget)
 854 {
 855         struct sge_iq *q;
 856         struct sge_rxq *rxq = iq_to_rxq(iq);    /* Use iff iq is part of rxq */
 857         struct sge_fl *fl = &rxq->fl;            /* Use iff IQ_HAS_FL */
 858         struct adapter *sc = iq->adapter;
 859         struct rsp_ctrl *ctrl;
 860         const struct rss_header *rss;
 861         int ndescs = 0, limit, fl_bufs_used = 0;
 862         int rsp_type;
 863         uint32_t lq;

 864         mblk_t *m;
 865         STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql);
 866 
 867         limit = budget ? budget : iq->qsize / 8;
 868 
 869         /*
 870          * We always come back and check the descriptor ring for new indirect
 871          * interrupts and other responses after running a single handler.
 872          */
 873         for (;;) {
 874                 while (is_new_response(iq, &ctrl)) {
 875 
 876                         membar_consumer();
 877 
 878                         m = NULL;
 879                         rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
 880                         lq = be32_to_cpu(ctrl->pldbuflen_qid);
 881                         rss = (const void *)iq->cdesc;
 882 
 883                         switch (rsp_type) {
 884                         case X_RSPD_TYPE_FLBUF:
 885 
 886                                 ASSERT(iq->flags & IQ_HAS_FL);
 887 
 888                                 m = get_fl_payload(sc, fl, lq, &fl_bufs_used);
 889                                 if (m == NULL) {
 890                                         panic("%s: line %d.", __func__,
 891                                             __LINE__);













 892                                 }


 893 
 894                         /* FALLTHRU */
 895                         case X_RSPD_TYPE_CPL:
 896 
 897                                 ASSERT(rss->opcode < NUM_CPL_CMDS);
 898                                 sc->cpl_handler[rss->opcode](iq, rss, m);
 899                                 break;
 900 
 901                         case X_RSPD_TYPE_INTR:
 902 
 903                                 /*
 904                                  * Interrupts should be forwarded only to queues
 905                                  * that are not forwarding their interrupts.
 906                                  * This means service_iq can recurse but only 1
 907                                  * level deep.
 908                                  */
 909                                 ASSERT(budget == 0);
 910 
 911                                 q = sc->sge.iqmap[lq - sc->sge.iq_start];
 912                                 if (atomic_cas_uint(&q->state, IQS_IDLE,


 951 
 952                 if (STAILQ_EMPTY(&iql) != 0)
 953                         break;
 954 
 955                 /*
 956                  * Process the head only, and send it to the back of the list if
 957                  * it's still not done.
 958                  */
 959                 q = STAILQ_FIRST(&iql);
 960                 STAILQ_REMOVE_HEAD(&iql, link);
 961                 if (service_iq(q, q->qsize / 8) == 0)
 962                         (void) atomic_cas_uint(&q->state, IQS_BUSY, IQS_IDLE);
 963                 else
 964                         STAILQ_INSERT_TAIL(&iql, q, link);
 965         }
 966 
 967         t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
 968             V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_next));
 969 
 970         if (iq->flags & IQ_HAS_FL) {
 971                 int starved;
 972 
 973                 FL_LOCK(fl);
 974                 fl->needed += fl_bufs_used;
 975                 starved = refill_fl(sc, fl, fl->cap / 4);
 976                 FL_UNLOCK(fl);
 977                 if (starved != 0)
 978                         add_fl_to_sfl(sc, fl);
 979         }
 980 
 981         return (0);
 982 }
 983 
 984 #ifdef TCP_OFFLOAD_ENABLE
 985 int
 986 t4_mgmt_tx(struct adapter *sc, mblk_t *m)
 987 {
 988         return (t4_wrq_tx(sc, &sc->sge.mgmtq, m));
 989 }
 990 
 991 /*


1236         ASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS);
1237         ASSERT(pktc_idx < SGE_NCOUNTERS);    /* -ve is ok, means don't use */
1238 
1239         iq->flags = 0;
1240         iq->adapter = sc;
1241         iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx);
1242         iq->intr_pktc_idx = SGE_NCOUNTERS - 1;
1243         if (pktc_idx >= 0) {
1244                 iq->intr_params |= F_QINTR_CNT_EN;
1245                 iq->intr_pktc_idx = pktc_idx;
1246         }
1247         iq->qsize = roundup(qsize, 16);              /* See FW_IQ_CMD/iqsize */
1248         iq->esize = max(esize, 16);          /* See FW_IQ_CMD/iqesize */
1249 }
1250 
1251 static inline void
1252 init_fl(struct sge_fl *fl, uint16_t qsize)
1253 {
1254 
1255         fl->qsize = qsize;

1256 }
1257 
1258 static inline void
1259 init_eq(struct adapter *sc, struct sge_eq *eq, uint16_t eqtype, uint16_t qsize,
1260     uint8_t tx_chan, uint16_t iqid)
1261 {
1262         struct sge *s = &sc->sge;
1263         uint32_t r;
1264 
1265         ASSERT(tx_chan < NCHAN);
1266         ASSERT(eqtype <= EQ_TYPEMASK);
1267 
1268         if (is_t5(sc->params.chip)) {
1269                 r = t4_read_reg(sc, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
1270                 r >>= S_QUEUESPERPAGEPF0 +
1271                     (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf;
1272                 s->s_qpp = r & M_QUEUESPERPAGEPF0;
1273         }
1274 
1275         eq->flags = eqtype & EQ_TYPEMASK;


2314                 sd = &fl->sdesc[i];
2315 
2316                 if (sd->rxb != NULL) {
2317                         rxbuf_free(sd->rxb);
2318                         sd->rxb = NULL;
2319                 }
2320         }
2321 }
2322 
2323 /*
2324  * Note that fl->cidx and fl->offset are left unchanged in case of failure.
2325  */
2326 static mblk_t *
2327 get_fl_payload(struct adapter *sc, struct sge_fl *fl,
2328                uint32_t len_newbuf, int *fl_bufs_used)
2329 {
2330         struct mblk_pair frame = {0};
2331         struct rxbuf *rxb;
2332         mblk_t *m = NULL;
2333         uint_t nbuf = 0, len, copy, n;
2334         uint32_t cidx, offset;
2335 
2336         /*
2337          * The SGE won't pack a new frame into the current buffer if the entire
2338          * payload doesn't fit in the remaining space.  Move on to the next buf
2339          * in that case.
2340          */


2341         if (fl->offset > 0 && len_newbuf & F_RSPD_NEWBUF) {
2342                 fl->offset = 0;
2343                 if (++fl->cidx == fl->cap)
2344                         fl->cidx = 0;
2345                 nbuf++;
2346         }
2347         cidx = fl->cidx;
2348         offset = fl->offset;
2349 
2350         len = G_RSPD_LEN(len_newbuf);   /* pktshift + payload length */
2351         copy = (len <= fl->copy_threshold);
2352         if (copy != 0) {
2353                 frame.head = m = allocb(len, BPRI_HI);
2354                 if (m == NULL)






2355                         return (NULL);
2356         }

2357 
2358         while (len) {
2359                 rxb = fl->sdesc[cidx].rxb;
2360                 n = min(len, rxb->buf_size - offset);
2361 
2362                 (void) ddi_dma_sync(rxb->dhdl, offset, n,
2363                     DDI_DMA_SYNC_FORKERNEL);
2364 
2365                 if (copy != 0)
2366                         bcopy(rxb->va + offset, m->b_wptr, n);
2367                 else {
2368                         m = desballoc((unsigned char *)rxb->va + offset, n,
2369                             BPRI_HI, &rxb->freefunc);
2370                         if (m == NULL) {
2371                                 freemsg(frame.head);








2372                                 return (NULL);
2373                         }
2374                         atomic_inc_uint(&rxb->ref_cnt);
2375                         if (frame.head != NULL)
2376                                 frame.tail->b_cont = m;
2377                         else
2378                                 frame.head = m;
2379                         frame.tail = m;
2380                 }
2381                 m->b_wptr += n;
2382                 len -= n;
2383                 offset += roundup(n, sc->sge.fl_align);
2384                 ASSERT(offset <= rxb->buf_size);
2385                 if (offset == rxb->buf_size) {
2386                         offset = 0;
2387                         if (++cidx == fl->cap)
2388                                 cidx = 0;
2389                         nbuf++;
2390                 }
2391         }




 758                 membar_consumer();
 759 
 760                 m = NULL;
 761                 rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
 762                 lq = be32_to_cpu(ctrl->pldbuflen_qid);
 763                 rss = (const void *)iq->cdesc;
 764 
 765                 switch (rsp_type) {
 766                 case X_RSPD_TYPE_FLBUF:
 767 
 768                         ASSERT(iq->flags & IQ_HAS_FL);
 769 
 770                         if (CPL_RX_PKT == rss->opcode) {
 771                                 cpl = (void *)(rss + 1);
 772                                 pkt_len = be16_to_cpu(cpl->len);
 773 
 774                                 if (iq->polling && ((received_bytes + pkt_len) > budget))
 775                                         goto done;
 776 
 777                                 m = get_fl_payload(sc, fl, lq, &fl_bufs_used);
 778                                 if (m == NULL)
 779                                         goto done;


 780 
 781                                 iq->intr_next = iq->intr_params;
 782                                 m->b_rptr += sc->sge.pktshift;
 783                                 if (sc->params.tp.rx_pkt_encap)
 784                                 /* It is enabled only in T6 config file */
 785                                         err_vec = G_T6_COMPR_RXERR_VEC(ntohs(cpl->err_vec));
 786                                 else
 787                                         err_vec = ntohs(cpl->err_vec);
 788 
 789                                 csum_ok = cpl->csum_calc && !err_vec;
 790 
 791                                 /* TODO: what about cpl->ip_frag? */
 792                                 if (csum_ok && !cpl->ip_frag) {
 793                                         mac_hcksum_set(m, 0, 0, 0, 0xffff,
 794                                             HCK_FULLCKSUM_OK | HCK_FULLCKSUM |
 795                                             HCK_IPV4_HDRCKSUM_OK);
 796                                         rxq->rxcsum++;
 797                                 }
 798                                 rxq->rxpkts++;
 799                                 rxq->rxbytes += pkt_len;
 800                                 received_bytes += pkt_len;
 801 
 802                                 *mblk_tail = m;
 803                                 mblk_tail = &m->b_next;
 804 
 805                                 break;
 806                         }
 807 
 808                         m = get_fl_payload(sc, fl, lq, &fl_bufs_used);
 809                         if (m == NULL)
 810                                 goto done;


 811                         /* FALLTHROUGH */
 812 
 813                 case X_RSPD_TYPE_CPL:
 814                         ASSERT(rss->opcode < NUM_CPL_CMDS);
 815                         sc->cpl_handler[rss->opcode](iq, rss, m);
 816                         break;
 817 
 818                 default:
 819                         break;
 820                 }
 821                 iq_next(iq);
 822                 ++ndescs;
 823                 if (!iq->polling && (ndescs == budget))
 824                         break;
 825         }
 826 
 827 done:
 828 
 829         t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
 830                      V_CIDXINC(ndescs) | V_INGRESSQID(iq->cntxt_id) |


 840                         add_fl_to_sfl(sc, fl);
 841         }
 842         return (mblk_head);
 843 }
 844 
 845 /*
 846  * Deals with anything and everything on the given ingress queue.
 847  */
 848 static int
 849 service_iq(struct sge_iq *iq, int budget)
 850 {
 851         struct sge_iq *q;
 852         struct sge_rxq *rxq = iq_to_rxq(iq);    /* Use iff iq is part of rxq */
 853         struct sge_fl *fl = &rxq->fl;            /* Use iff IQ_HAS_FL */
 854         struct adapter *sc = iq->adapter;
 855         struct rsp_ctrl *ctrl;
 856         const struct rss_header *rss;
 857         int ndescs = 0, limit, fl_bufs_used = 0;
 858         int rsp_type;
 859         uint32_t lq;
 860         int starved;
 861         mblk_t *m;
 862         STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql);
 863 
 864         limit = budget ? budget : iq->qsize / 8;
 865 
 866         /*
 867          * We always come back and check the descriptor ring for new indirect
 868          * interrupts and other responses after running a single handler.
 869          */
 870         for (;;) {
 871                 while (is_new_response(iq, &ctrl)) {
 872 
 873                         membar_consumer();
 874 
 875                         m = NULL;
 876                         rsp_type = G_RSPD_TYPE(ctrl->u.type_gen);
 877                         lq = be32_to_cpu(ctrl->pldbuflen_qid);
 878                         rss = (const void *)iq->cdesc;
 879 
 880                         switch (rsp_type) {
 881                         case X_RSPD_TYPE_FLBUF:
 882 
 883                                 ASSERT(iq->flags & IQ_HAS_FL);
 884 
 885                                 m = get_fl_payload(sc, fl, lq, &fl_bufs_used);
 886                                 if (m == NULL) {
 887                                         /*
 888                                          * Rearm the iq with a
 889                                          * longer-than-default timer
 890                                          */
 891                                         t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
 892                                                         V_INGRESSQID((u32)iq->cntxt_id) |
 893                                                         V_SEINTARM(V_QINTR_TIMER_IDX(SGE_NTIMERS-1)));
 894                                         if (fl_bufs_used > 0) {
 895                                                 ASSERT(iq->flags & IQ_HAS_FL);
 896                                                 FL_LOCK(fl);
 897                                                 fl->needed += fl_bufs_used;
 898                                                 starved = refill_fl(sc, fl, fl->cap / 8);
 899                                                 FL_UNLOCK(fl);
 900                                                 if (starved)
 901                                                         add_fl_to_sfl(sc, fl);
 902                                         }
 903                                         return (0);
 904                                 }
 905 
 906                         /* FALLTHRU */
 907                         case X_RSPD_TYPE_CPL:
 908 
 909                                 ASSERT(rss->opcode < NUM_CPL_CMDS);
 910                                 sc->cpl_handler[rss->opcode](iq, rss, m);
 911                                 break;
 912 
 913                         case X_RSPD_TYPE_INTR:
 914 
 915                                 /*
 916                                  * Interrupts should be forwarded only to queues
 917                                  * that are not forwarding their interrupts.
 918                                  * This means service_iq can recurse but only 1
 919                                  * level deep.
 920                                  */
 921                                 ASSERT(budget == 0);
 922 
 923                                 q = sc->sge.iqmap[lq - sc->sge.iq_start];
 924                                 if (atomic_cas_uint(&q->state, IQS_IDLE,


 963 
 964                 if (STAILQ_EMPTY(&iql) != 0)
 965                         break;
 966 
 967                 /*
 968                  * Process the head only, and send it to the back of the list if
 969                  * it's still not done.
 970                  */
 971                 q = STAILQ_FIRST(&iql);
 972                 STAILQ_REMOVE_HEAD(&iql, link);
 973                 if (service_iq(q, q->qsize / 8) == 0)
 974                         (void) atomic_cas_uint(&q->state, IQS_BUSY, IQS_IDLE);
 975                 else
 976                         STAILQ_INSERT_TAIL(&iql, q, link);
 977         }
 978 
 979         t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
 980             V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_next));
 981 
 982         if (iq->flags & IQ_HAS_FL) {

 983 
 984                 FL_LOCK(fl);
 985                 fl->needed += fl_bufs_used;
 986                 starved = refill_fl(sc, fl, fl->cap / 4);
 987                 FL_UNLOCK(fl);
 988                 if (starved != 0)
 989                         add_fl_to_sfl(sc, fl);
 990         }
 991 
 992         return (0);
 993 }
 994 
 995 #ifdef TCP_OFFLOAD_ENABLE
 996 int
 997 t4_mgmt_tx(struct adapter *sc, mblk_t *m)
 998 {
 999         return (t4_wrq_tx(sc, &sc->sge.mgmtq, m));
1000 }
1001 
1002 /*


1247         ASSERT(tmr_idx >= 0 && tmr_idx < SGE_NTIMERS);
1248         ASSERT(pktc_idx < SGE_NCOUNTERS);    /* -ve is ok, means don't use */
1249 
1250         iq->flags = 0;
1251         iq->adapter = sc;
1252         iq->intr_params = V_QINTR_TIMER_IDX(tmr_idx);
1253         iq->intr_pktc_idx = SGE_NCOUNTERS - 1;
1254         if (pktc_idx >= 0) {
1255                 iq->intr_params |= F_QINTR_CNT_EN;
1256                 iq->intr_pktc_idx = pktc_idx;
1257         }
1258         iq->qsize = roundup(qsize, 16);              /* See FW_IQ_CMD/iqsize */
1259         iq->esize = max(esize, 16);          /* See FW_IQ_CMD/iqesize */
1260 }
1261 
1262 static inline void
1263 init_fl(struct sge_fl *fl, uint16_t qsize)
1264 {
1265 
1266         fl->qsize = qsize;
1267         fl->allocb_fail = 0;
1268 }
1269 
1270 static inline void
1271 init_eq(struct adapter *sc, struct sge_eq *eq, uint16_t eqtype, uint16_t qsize,
1272     uint8_t tx_chan, uint16_t iqid)
1273 {
1274         struct sge *s = &sc->sge;
1275         uint32_t r;
1276 
1277         ASSERT(tx_chan < NCHAN);
1278         ASSERT(eqtype <= EQ_TYPEMASK);
1279 
1280         if (is_t5(sc->params.chip)) {
1281                 r = t4_read_reg(sc, A_SGE_EGRESS_QUEUES_PER_PAGE_PF);
1282                 r >>= S_QUEUESPERPAGEPF0 +
1283                     (S_QUEUESPERPAGEPF1 - S_QUEUESPERPAGEPF0) * sc->pf;
1284                 s->s_qpp = r & M_QUEUESPERPAGEPF0;
1285         }
1286 
1287         eq->flags = eqtype & EQ_TYPEMASK;


2326                 sd = &fl->sdesc[i];
2327 
2328                 if (sd->rxb != NULL) {
2329                         rxbuf_free(sd->rxb);
2330                         sd->rxb = NULL;
2331                 }
2332         }
2333 }
2334 
2335 /*
2336  * Note that fl->cidx and fl->offset are left unchanged in case of failure.
2337  */
2338 static mblk_t *
2339 get_fl_payload(struct adapter *sc, struct sge_fl *fl,
2340                uint32_t len_newbuf, int *fl_bufs_used)
2341 {
2342         struct mblk_pair frame = {0};
2343         struct rxbuf *rxb;
2344         mblk_t *m = NULL;
2345         uint_t nbuf = 0, len, copy, n;
2346         uint32_t cidx, offset, rcidx, roffset;
2347 
2348         /*
2349          * The SGE won't pack a new frame into the current buffer if the entire
2350          * payload doesn't fit in the remaining space.  Move on to the next buf
2351          * in that case.
2352          */
2353         rcidx = fl->cidx;
2354         roffset = fl->offset;
2355         if (fl->offset > 0 && len_newbuf & F_RSPD_NEWBUF) {
2356                 fl->offset = 0;
2357                 if (++fl->cidx == fl->cap)
2358                         fl->cidx = 0;
2359                 nbuf++;
2360         }
2361         cidx = fl->cidx;
2362         offset = fl->offset;
2363 
2364         len = G_RSPD_LEN(len_newbuf);   /* pktshift + payload length */
2365         copy = (len <= fl->copy_threshold);
2366         if (copy != 0) {
2367                 frame.head = m = allocb(len, BPRI_HI);
2368                 if (m == NULL) {
2369                         fl->allocb_fail++;
2370                         cmn_err(CE_WARN,"%s: mbuf allocation failure "
2371                                         "count = %llu", __func__,
2372                                         (unsigned long long)fl->allocb_fail);
2373                         fl->cidx = rcidx;
2374                         fl->offset = roffset;
2375                         return (NULL);
2376                 }
2377         }
2378 
2379         while (len) {
2380                 rxb = fl->sdesc[cidx].rxb;
2381                 n = min(len, rxb->buf_size - offset);
2382 
2383                 (void) ddi_dma_sync(rxb->dhdl, offset, n,
2384                     DDI_DMA_SYNC_FORKERNEL);
2385 
2386                 if (copy != 0)
2387                         bcopy(rxb->va + offset, m->b_wptr, n);
2388                 else {
2389                         m = desballoc((unsigned char *)rxb->va + offset, n,
2390                             BPRI_HI, &rxb->freefunc);
2391                         if (m == NULL) {
2392                                 fl->allocb_fail++;
2393                                 cmn_err(CE_WARN,
2394                                         "%s: mbuf allocation failure "
2395                                         "count = %llu", __func__,
2396                                         (unsigned long long)fl->allocb_fail);
2397                                 if (frame.head)
2398                                         freemsgchain(frame.head);
2399                                 fl->cidx = rcidx;
2400                                 fl->offset = roffset;
2401                                 return (NULL);
2402                         }
2403                         atomic_inc_uint(&rxb->ref_cnt);
2404                         if (frame.head != NULL)
2405                                 frame.tail->b_cont = m;
2406                         else
2407                                 frame.head = m;
2408                         frame.tail = m;
2409                 }
2410                 m->b_wptr += n;
2411                 len -= n;
2412                 offset += roundup(n, sc->sge.fl_align);
2413                 ASSERT(offset <= rxb->buf_size);
2414                 if (offset == rxb->buf_size) {
2415                         offset = 0;
2416                         if (++cidx == fl->cap)
2417                                 cidx = 0;
2418                         nbuf++;
2419                 }
2420         }