DPDK  25.03.0
rte_mbuf.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2010-2014 Intel Corporation.
3  * Copyright 2014 6WIND S.A.
4  */
5 
6 #ifndef _RTE_MBUF_H_
7 #define _RTE_MBUF_H_
8 
34 #include <stdint.h>
35 
36 #include <rte_common.h>
37 #include <rte_config.h>
38 #include <rte_mempool.h>
39 #include <rte_prefetch.h>
40 #include <rte_branch_prediction.h>
41 #include <rte_mbuf_ptype.h>
42 #include <rte_mbuf_core.h>
43 
44 #ifdef __cplusplus
45 extern "C" {
46 #endif
47 
56 const char *rte_get_rx_ol_flag_name(uint64_t mask);
57 
70 int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
71 
82 const char *rte_get_tx_ol_flag_name(uint64_t mask);
83 
96 int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen);
97 
108 static inline void
110 {
111  rte_prefetch0(m);
112 }
113 
125 static inline void
127 {
128 #if RTE_CACHE_LINE_SIZE == 64
130 #else
131  RTE_SET_USED(m);
132 #endif
133 }
134 
135 
136 static inline uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp);
137 
146 static inline rte_iova_t
147 rte_mbuf_iova_get(const struct rte_mbuf *m)
148 {
149 #if RTE_IOVA_IN_MBUF
150  return m->buf_iova;
151 #else
152  return (rte_iova_t)m->buf_addr;
153 #endif
154 }
155 
164 static inline void
166 {
167 #if RTE_IOVA_IN_MBUF
168  m->buf_iova = iova;
169 #else
170  RTE_SET_USED(m);
171  RTE_SET_USED(iova);
172 #endif
173 }
174 
183 static inline rte_iova_t
184 rte_mbuf_data_iova(const struct rte_mbuf *mb)
185 {
186  return rte_mbuf_iova_get(mb) + mb->data_off;
187 }
188 
201 static inline rte_iova_t
203 {
204  return rte_mbuf_iova_get(mb) + RTE_PKTMBUF_HEADROOM;
205 }
206 
215 static inline struct rte_mbuf *
217 {
218  return (struct rte_mbuf *)RTE_PTR_SUB(mi->buf_addr, sizeof(*mi) + mi->priv_size);
219 }
220 
236 static inline char *
237 rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
238 {
239  return (char *)mb + sizeof(*mb) + rte_pktmbuf_priv_size(mp);
240 }
241 
250 static inline char *
252 {
253  return rte_mbuf_buf_addr(mb, mb->pool) + RTE_PKTMBUF_HEADROOM;
254 }
255 
269 static inline char *
271 {
272  return rte_mbuf_buf_addr(md, md->pool);
273 }
274 
287 static inline void *
289 {
290  return RTE_PTR_ADD(m, sizeof(struct rte_mbuf));
291 }
292 
301  uint16_t mbuf_priv_size;
302  uint32_t flags;
303 };
304 
313 static inline uint32_t
315 {
316  struct rte_pktmbuf_pool_private *mbp_priv;
317 
318  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
319  return mbp_priv->flags;
320 }
321 
328 #define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF (1 << 0)
329 
337 #define RTE_MBUF_HAS_PINNED_EXTBUF(mb) \
338  (rte_pktmbuf_priv_flags(mb->pool) & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF)
339 
340 #ifdef RTE_LIBRTE_MBUF_DEBUG
341 
343 #define __rte_mbuf_sanity_check(m, is_h) rte_mbuf_sanity_check(m, is_h)
344 
345 #else /* RTE_LIBRTE_MBUF_DEBUG */
346 
348 #define __rte_mbuf_sanity_check(m, is_h) do { } while (0)
349 
350 #endif /* RTE_LIBRTE_MBUF_DEBUG */
351 
352 #ifdef RTE_MBUF_REFCNT_ATOMIC
353 
361 static inline uint16_t
362 rte_mbuf_refcnt_read(const struct rte_mbuf *m)
363 {
364  return rte_atomic_load_explicit(&m->refcnt, rte_memory_order_relaxed);
365 }
366 
374 static inline void
375 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
376 {
377  rte_atomic_store_explicit(&m->refcnt, new_value, rte_memory_order_relaxed);
378 }
379 
380 /* internal */
381 static inline uint16_t
382 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
383 {
384  return rte_atomic_fetch_add_explicit(&m->refcnt, value,
385  rte_memory_order_acq_rel) + value;
386 }
387 
397 static inline uint16_t
398 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
399 {
400  /*
401  * The atomic_add is an expensive operation, so we don't want to
402  * call it in the case where we know we are the unique holder of
403  * this mbuf (i.e. ref_cnt == 1). Otherwise, an atomic
404  * operation has to be used because concurrent accesses on the
405  * reference counter can occur.
406  */
407  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
408  ++value;
409  rte_mbuf_refcnt_set(m, (uint16_t)value);
410  return (uint16_t)value;
411  }
412 
413  return __rte_mbuf_refcnt_update(m, value);
414 }
415 
416 #else /* ! RTE_MBUF_REFCNT_ATOMIC */
417 
418 /* internal */
419 static inline uint16_t
420 __rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
421 {
422  m->refcnt = (uint16_t)(m->refcnt + value);
423  return m->refcnt;
424 }
425 
429 static inline uint16_t
430 rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
431 {
432  return __rte_mbuf_refcnt_update(m, value);
433 }
434 
438 static inline uint16_t
440 {
441  return m->refcnt;
442 }
443 
447 static inline void
448 rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
449 {
450  m->refcnt = new_value;
451 }
452 
453 #endif /* RTE_MBUF_REFCNT_ATOMIC */
454 
463 static inline uint16_t
465 {
466  return rte_atomic_load_explicit(&shinfo->refcnt, rte_memory_order_relaxed);
467 }
468 
477 static inline void
479  uint16_t new_value)
480 {
481  rte_atomic_store_explicit(&shinfo->refcnt, new_value, rte_memory_order_relaxed);
482 }
483 
495 static inline uint16_t
497  int16_t value)
498 {
499  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1)) {
500  ++value;
501  rte_mbuf_ext_refcnt_set(shinfo, (uint16_t)value);
502  return (uint16_t)value;
503  }
504 
505  return rte_atomic_fetch_add_explicit(&shinfo->refcnt, value,
506  rte_memory_order_acq_rel) + value;
507 }
508 
510 #define RTE_MBUF_PREFETCH_TO_FREE(m) do { \
511  if ((m) != NULL) \
512  rte_prefetch0(m); \
513 } while (0)
514 
515 
528 void
529 rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header);
530 
550 int rte_mbuf_check(const struct rte_mbuf *m, int is_header,
551  const char **reason);
552 
565 static __rte_always_inline void
567 {
568  RTE_ASSERT(rte_mbuf_refcnt_read(m) == 1);
569  RTE_ASSERT(m->next == NULL);
570  RTE_ASSERT(m->nb_segs == 1);
571  RTE_ASSERT(!RTE_MBUF_CLONED(m));
572  RTE_ASSERT(!RTE_MBUF_HAS_EXTBUF(m) ||
574  rte_mbuf_ext_refcnt_read(m->shinfo) == 1));
576 }
577 
579 #define MBUF_RAW_ALLOC_CHECK(m) __rte_mbuf_raw_sanity_check(m)
580 
600 static inline struct rte_mbuf *rte_mbuf_raw_alloc(struct rte_mempool *mp)
601 {
602  union {
603  void *ptr;
604  struct rte_mbuf *m;
605  } ret;
606 
607  if (rte_mempool_get(mp, &ret.ptr) < 0)
608  return NULL;
610  return ret.m;
611 }
612 
640 __rte_experimental
641 static __rte_always_inline int
642 rte_mbuf_raw_alloc_bulk(struct rte_mempool *mp, struct rte_mbuf **mbufs, unsigned int count)
643 {
644  int rc = rte_mempool_get_bulk(mp, (void **)mbufs, count);
645  if (likely(rc == 0))
646  for (unsigned int idx = 0; idx < count; idx++)
647  __rte_mbuf_raw_sanity_check(mbufs[idx]);
648  return rc;
649 }
650 
665 static __rte_always_inline void
667 {
669  rte_mempool_put(m->pool, m);
670 }
671 
695 __rte_experimental
696 static __rte_always_inline void
697 rte_mbuf_raw_free_bulk(struct rte_mempool *mp, struct rte_mbuf **mbufs, unsigned int count)
698 {
699  for (unsigned int idx = 0; idx < count; idx++) {
700  const struct rte_mbuf *m = mbufs[idx];
701  RTE_ASSERT(m != NULL);
702  RTE_ASSERT(m->pool == mp);
704  }
705 
706  rte_mempool_put_bulk(mp, (void **)mbufs, count);
707 }
708 
731 void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg,
732  void *m, unsigned i);
733 
754 void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg);
755 
789 struct rte_mempool *
790 rte_pktmbuf_pool_create(const char *name, unsigned n,
791  unsigned cache_size, uint16_t priv_size, uint16_t data_room_size,
792  int socket_id);
793 
830 struct rte_mempool *
831 rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n,
832  unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size,
833  int socket_id, const char *ops_name);
834 
837  void *buf_ptr;
839  size_t buf_len;
840  uint16_t elt_size;
841 };
842 
883 struct rte_mempool *
884 rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n,
885  unsigned int cache_size, uint16_t priv_size,
886  uint16_t data_room_size, int socket_id,
887  const struct rte_pktmbuf_extmem *ext_mem,
888  unsigned int ext_num);
889 
901 static inline uint16_t
903 {
904  struct rte_pktmbuf_pool_private *mbp_priv;
905 
906  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
907  return mbp_priv->mbuf_data_room_size;
908 }
909 
922 static inline uint16_t
924 {
925  struct rte_pktmbuf_pool_private *mbp_priv;
926 
927  mbp_priv = (struct rte_pktmbuf_pool_private *)rte_mempool_get_priv(mp);
928  return mbp_priv->mbuf_priv_size;
929 }
930 
939 static inline void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
940 {
941  m->data_off = (uint16_t)RTE_MIN((uint16_t)RTE_PKTMBUF_HEADROOM,
942  (uint16_t)m->buf_len);
943 }
944 
953 static inline void rte_pktmbuf_reset(struct rte_mbuf *m)
954 {
955  m->next = NULL;
956  m->pkt_len = 0;
957  m->tx_offload = 0;
958  m->vlan_tci = 0;
959  m->vlan_tci_outer = 0;
960  m->nb_segs = 1;
962 
964  m->packet_type = 0;
966 
967  m->data_len = 0;
969 }
970 
984 static inline struct rte_mbuf *rte_pktmbuf_alloc(struct rte_mempool *mp)
985 {
986  struct rte_mbuf *m;
987  if ((m = rte_mbuf_raw_alloc(mp)) != NULL)
989  return m;
990 }
991 
1006 static inline int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool,
1007  struct rte_mbuf **mbufs, unsigned count)
1008 {
1009  unsigned idx = 0;
1010  int rc;
1011 
1012  rc = rte_mempool_get_bulk(pool, (void **)mbufs, count);
1013  if (unlikely(rc))
1014  return rc;
1015 
1016  /* To understand duff's device on loop unwinding optimization, see
1017  * https://en.wikipedia.org/wiki/Duff's_device.
1018  * Here while() loop is used rather than do() while{} to avoid extra
1019  * check if count is zero.
1020  */
1021  switch (count % 4) {
1022  case 0:
1023  while (idx != count) {
1024  __rte_mbuf_raw_sanity_check(mbufs[idx]);
1025  rte_pktmbuf_reset(mbufs[idx]);
1026  idx++;
1027  /* fall-through */
1028  case 3:
1029  __rte_mbuf_raw_sanity_check(mbufs[idx]);
1030  rte_pktmbuf_reset(mbufs[idx]);
1031  idx++;
1032  /* fall-through */
1033  case 2:
1034  __rte_mbuf_raw_sanity_check(mbufs[idx]);
1035  rte_pktmbuf_reset(mbufs[idx]);
1036  idx++;
1037  /* fall-through */
1038  case 1:
1039  __rte_mbuf_raw_sanity_check(mbufs[idx]);
1040  rte_pktmbuf_reset(mbufs[idx]);
1041  idx++;
1042  /* fall-through */
1043  }
1044  }
1045  return 0;
1046 }
1047 
1080 static inline struct rte_mbuf_ext_shared_info *
1081 rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len,
1083 {
1084  struct rte_mbuf_ext_shared_info *shinfo;
1085  void *buf_end = RTE_PTR_ADD(buf_addr, *buf_len);
1086  void *addr;
1087 
1088  addr = RTE_PTR_ALIGN_FLOOR(RTE_PTR_SUB(buf_end, sizeof(*shinfo)),
1089  sizeof(uintptr_t));
1090  if (addr <= buf_addr)
1091  return NULL;
1092 
1093  shinfo = (struct rte_mbuf_ext_shared_info *)addr;
1094  shinfo->free_cb = free_cb;
1095  shinfo->fcb_opaque = fcb_opaque;
1096  rte_mbuf_ext_refcnt_set(shinfo, 1);
1097 
1098  *buf_len = (uint16_t)RTE_PTR_DIFF(shinfo, buf_addr);
1099  return shinfo;
1100 }
1101 
1162 static inline void
1163 rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr,
1164  rte_iova_t buf_iova, uint16_t buf_len,
1165  struct rte_mbuf_ext_shared_info *shinfo)
1166 {
1167  /* mbuf should not be read-only */
1168  RTE_ASSERT(RTE_MBUF_DIRECT(m) && rte_mbuf_refcnt_read(m) == 1);
1169  RTE_ASSERT(shinfo->free_cb != NULL);
1170 
1171  m->buf_addr = buf_addr;
1172  rte_mbuf_iova_set(m, buf_iova);
1173  m->buf_len = buf_len;
1174 
1175  m->data_len = 0;
1176  m->data_off = 0;
1177 
1179  m->shinfo = shinfo;
1180 }
1181 
1189 #define rte_pktmbuf_detach_extbuf(m) rte_pktmbuf_detach(m)
1190 
1199 static inline void
1200 rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
1201 {
1202 #if !RTE_IOVA_IN_MBUF
1203  mdst->dynfield2 = msrc->dynfield2;
1204 #endif
1205  memcpy(&mdst->dynfield1, msrc->dynfield1, sizeof(mdst->dynfield1));
1206 }
1207 
1208 /* internal */
1209 static inline void
1210 __rte_pktmbuf_copy_hdr(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
1211 {
1212  mdst->port = msrc->port;
1213  mdst->vlan_tci = msrc->vlan_tci;
1214  mdst->vlan_tci_outer = msrc->vlan_tci_outer;
1215  mdst->tx_offload = msrc->tx_offload;
1216  mdst->hash = msrc->hash;
1217  mdst->packet_type = msrc->packet_type;
1218  rte_mbuf_dynfield_copy(mdst, msrc);
1219 }
1220 
1242 static inline void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
1243 {
1244  RTE_ASSERT(RTE_MBUF_DIRECT(mi) &&
1245  rte_mbuf_refcnt_read(mi) == 1);
1246 
1247  if (RTE_MBUF_HAS_EXTBUF(m)) {
1249  mi->ol_flags = m->ol_flags;
1250  mi->shinfo = m->shinfo;
1251  } else {
1252  /* if m is not direct, get the mbuf that embeds the data */
1254  mi->priv_size = m->priv_size;
1256  }
1257 
1258  __rte_pktmbuf_copy_hdr(mi, m);
1259 
1260  mi->data_off = m->data_off;
1261  mi->data_len = m->data_len;
1263  mi->buf_addr = m->buf_addr;
1264  mi->buf_len = m->buf_len;
1265 
1266  mi->next = NULL;
1267  mi->pkt_len = mi->data_len;
1268  mi->nb_segs = 1;
1269 
1270  __rte_mbuf_sanity_check(mi, 1);
1272 }
1273 
1281 static inline void
1282 __rte_pktmbuf_free_extbuf(struct rte_mbuf *m)
1283 {
1284  RTE_ASSERT(RTE_MBUF_HAS_EXTBUF(m));
1285  RTE_ASSERT(m->shinfo != NULL);
1286 
1287  if (rte_mbuf_ext_refcnt_update(m->shinfo, -1) == 0)
1288  m->shinfo->free_cb(m->buf_addr, m->shinfo->fcb_opaque);
1289 }
1290 
1297 static inline void
1298 __rte_pktmbuf_free_direct(struct rte_mbuf *m)
1299 {
1300  struct rte_mbuf *md;
1301 
1302  RTE_ASSERT(RTE_MBUF_CLONED(m));
1303 
1304  md = rte_mbuf_from_indirect(m);
1305 
1306  if (rte_mbuf_refcnt_update(md, -1) == 0) {
1307  md->next = NULL;
1308  md->nb_segs = 1;
1309  rte_mbuf_refcnt_set(md, 1);
1310  rte_mbuf_raw_free(md);
1311  }
1312 }
1313 
1332 static inline void rte_pktmbuf_detach(struct rte_mbuf *m)
1333 {
1334  struct rte_mempool *mp = m->pool;
1335  uint32_t mbuf_size, buf_len;
1336  uint16_t priv_size;
1337 
1338  if (RTE_MBUF_HAS_EXTBUF(m)) {
1339  /*
1340  * The mbuf has the external attached buffer,
1341  * we should check the type of the memory pool where
1342  * the mbuf was allocated from to detect the pinned
1343  * external buffer.
1344  */
1345  uint32_t flags = rte_pktmbuf_priv_flags(mp);
1346 
1347  if (flags & RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF) {
1348  /*
1349  * The pinned external buffer should not be
1350  * detached from its backing mbuf, just exit.
1351  */
1352  return;
1353  }
1354  __rte_pktmbuf_free_extbuf(m);
1355  } else {
1356  __rte_pktmbuf_free_direct(m);
1357  }
1358  priv_size = rte_pktmbuf_priv_size(mp);
1359  mbuf_size = (uint32_t)(sizeof(struct rte_mbuf) + priv_size);
1360  buf_len = rte_pktmbuf_data_room_size(mp);
1361 
1362  m->priv_size = priv_size;
1363  m->buf_addr = (char *)m + mbuf_size;
1364  rte_mbuf_iova_set(m, rte_mempool_virt2iova(m) + mbuf_size);
1365  m->buf_len = (uint16_t)buf_len;
1367  m->data_len = 0;
1368  m->ol_flags = 0;
1369 }
1370 
1384 static inline int __rte_pktmbuf_pinned_extbuf_decref(struct rte_mbuf *m)
1385 {
1386  struct rte_mbuf_ext_shared_info *shinfo;
1387 
1388  /* Clear flags, mbuf is being freed. */
1390  shinfo = m->shinfo;
1391 
1392  /* Optimize for performance - do not dec/reinit */
1393  if (likely(rte_mbuf_ext_refcnt_read(shinfo) == 1))
1394  return 0;
1395 
1396  /*
1397  * Direct usage of add primitive to avoid
1398  * duplication of comparing with one.
1399  */
1400  if (likely(rte_atomic_fetch_add_explicit(&shinfo->refcnt, -1,
1401  rte_memory_order_acq_rel) - 1))
1402  return 1;
1403 
1404  /* Reinitialize counter before mbuf freeing. */
1405  rte_mbuf_ext_refcnt_set(shinfo, 1);
1406  return 0;
1407 }
1408 
1423 static __rte_always_inline struct rte_mbuf *
1425 {
1427 
1428  if (likely(rte_mbuf_refcnt_read(m) == 1)) {
1429 
1430  if (!RTE_MBUF_DIRECT(m)) {
1431  rte_pktmbuf_detach(m);
1432  if (RTE_MBUF_HAS_EXTBUF(m) &&
1434  __rte_pktmbuf_pinned_extbuf_decref(m))
1435  return NULL;
1436  }
1437 
1438  if (m->next != NULL)
1439  m->next = NULL;
1440  if (m->nb_segs != 1)
1441  m->nb_segs = 1;
1442 
1443  return m;
1444 
1445  } else if (__rte_mbuf_refcnt_update(m, -1) == 0) {
1446 
1447  if (!RTE_MBUF_DIRECT(m)) {
1448  rte_pktmbuf_detach(m);
1449  if (RTE_MBUF_HAS_EXTBUF(m) &&
1451  __rte_pktmbuf_pinned_extbuf_decref(m))
1452  return NULL;
1453  }
1454 
1455  if (m->next != NULL)
1456  m->next = NULL;
1457  if (m->nb_segs != 1)
1458  m->nb_segs = 1;
1459  rte_mbuf_refcnt_set(m, 1);
1460 
1461  return m;
1462  }
1463  return NULL;
1464 }
1465 
1475 static __rte_always_inline void
1477 {
1478  m = rte_pktmbuf_prefree_seg(m);
1479  if (likely(m != NULL))
1480  rte_mbuf_raw_free(m);
1481 }
1482 
1492 static inline void rte_pktmbuf_free(struct rte_mbuf *m)
1493 {
1494  struct rte_mbuf *m_next;
1495 
1496  if (m != NULL)
1498 
1499  while (m != NULL) {
1500  m_next = m->next;
1502  m = m_next;
1503  }
1504 }
1505 
1518 void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count);
1519 
1537 struct rte_mbuf *
1538 rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp);
1539 
1561 struct rte_mbuf *
1562 rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp,
1563  uint32_t offset, uint32_t length);
1564 
1576 static inline void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
1577 {
1579 
1580  do {
1581  rte_mbuf_refcnt_update(m, v);
1582  } while ((m = m->next) != NULL);
1583 }
1584 
1593 static inline uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
1594 {
1596  return m->data_off;
1597 }
1598 
1607 static inline uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
1608 {
1610  return (uint16_t)(m->buf_len - rte_pktmbuf_headroom(m) -
1611  m->data_len);
1612 }
1613 
1622 static inline struct rte_mbuf *rte_pktmbuf_lastseg(struct rte_mbuf *m)
1623 {
1625  while (m->next != NULL)
1626  m = m->next;
1627  return m;
1628 }
1629 
1638 #define rte_pktmbuf_pkt_len(m) ((m)->pkt_len)
1639 
1648 #define rte_pktmbuf_data_len(m) ((m)->data_len)
1649 
1665 static inline char *rte_pktmbuf_prepend(struct rte_mbuf *m,
1666  uint16_t len)
1667 {
1669 
1670  if (unlikely(len > rte_pktmbuf_headroom(m)))
1671  return NULL;
1672 
1673  /* NB: elaborating the subtraction like this instead of using
1674  * -= allows us to ensure the result type is uint16_t
1675  * avoiding compiler warnings on gcc 8.1 at least */
1676  m->data_off = (uint16_t)(m->data_off - len);
1677  m->data_len = (uint16_t)(m->data_len + len);
1678  m->pkt_len = (m->pkt_len + len);
1679 
1680  return (char *)m->buf_addr + m->data_off;
1681 }
1682 
1698 static inline char *rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
1699 {
1700  void *tail;
1701  struct rte_mbuf *m_last;
1702 
1704 
1705  m_last = rte_pktmbuf_lastseg(m);
1706  if (unlikely(len > rte_pktmbuf_tailroom(m_last)))
1707  return NULL;
1708 
1709  tail = (char *)m_last->buf_addr + m_last->data_off + m_last->data_len;
1710  m_last->data_len = (uint16_t)(m_last->data_len + len);
1711  m->pkt_len = (m->pkt_len + len);
1712  return (char*) tail;
1713 }
1714 
1729 static inline char *rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
1730 {
1732 
1733  if (unlikely(len > m->data_len))
1734  return NULL;
1735 
1736  /* NB: elaborating the addition like this instead of using
1737  * += allows us to ensure the result type is uint16_t
1738  * avoiding compiler warnings on gcc 8.1 at least */
1739  m->data_len = (uint16_t)(m->data_len - len);
1740  m->data_off = (uint16_t)(m->data_off + len);
1741  m->pkt_len = (m->pkt_len - len);
1742  return (char *)m->buf_addr + m->data_off;
1743 }
1744 
1759 static inline int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
1760 {
1761  struct rte_mbuf *m_last;
1762 
1764 
1765  m_last = rte_pktmbuf_lastseg(m);
1766  if (unlikely(len > m_last->data_len))
1767  return -1;
1768 
1769  m_last->data_len = (uint16_t)(m_last->data_len - len);
1770  m->pkt_len = (m->pkt_len - len);
1771  return 0;
1772 }
1773 
1783 static inline int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
1784 {
1786  return m->nb_segs == 1;
1787 }
1788 
1792 const void *__rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off,
1793  uint32_t len, void *buf);
1794 
1815 static inline const void *rte_pktmbuf_read(const struct rte_mbuf *m,
1816  uint32_t off, uint32_t len, void *buf)
1817 {
1818  if (likely(off + len <= rte_pktmbuf_data_len(m)))
1819  return rte_pktmbuf_mtod_offset(m, char *, off);
1820  else
1821  return __rte_pktmbuf_read(m, off, len, buf);
1822 }
1823 
1840 static inline int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
1841 {
1842  struct rte_mbuf *cur_tail;
1843 
1844  /* Check for number-of-segments-overflow */
1845  if (head->nb_segs + tail->nb_segs > RTE_MBUF_MAX_NB_SEGS)
1846  return -EOVERFLOW;
1847 
1848  /* Chain 'tail' onto the old tail */
1849  cur_tail = rte_pktmbuf_lastseg(head);
1850  cur_tail->next = tail;
1851 
1852  /* accumulate number of segments and total length.
1853  * NB: elaborating the addition like this instead of using
1854  * -= allows us to ensure the result type is uint16_t
1855  * avoiding compiler warnings on gcc 8.1 at least */
1856  head->nb_segs = (uint16_t)(head->nb_segs + tail->nb_segs);
1857  head->pkt_len += tail->pkt_len;
1858 
1859  /* pkt_len is only set in the head */
1860  tail->pkt_len = tail->data_len;
1861 
1862  return 0;
1863 }
1864 
1886 static __rte_always_inline uint64_t
1887 rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso,
1888  uint64_t ol3, uint64_t ol2, uint64_t unused)
1889 {
1890  return il2 << RTE_MBUF_L2_LEN_OFS |
1891  il3 << RTE_MBUF_L3_LEN_OFS |
1892  il4 << RTE_MBUF_L4_LEN_OFS |
1893  tso << RTE_MBUF_TSO_SEGSZ_OFS |
1894  ol3 << RTE_MBUF_OUTL3_LEN_OFS |
1895  ol2 << RTE_MBUF_OUTL2_LEN_OFS |
1896  unused << RTE_MBUF_TXOFLD_UNUSED_OFS;
1897 }
1898 
1909 static inline int
1911 {
1912  uint64_t ol_flags = m->ol_flags;
1913 
1914  /* Does packet set any of available offloads? */
1915  if (!(ol_flags & RTE_MBUF_F_TX_OFFLOAD_MASK))
1916  return 0;
1917 
1918  /* IP checksum can be counted only for IPv4 packet */
1919  if ((ol_flags & RTE_MBUF_F_TX_IP_CKSUM) && (ol_flags & RTE_MBUF_F_TX_IPV6))
1920  return -EINVAL;
1921 
1922  /* IP type not set when required */
1923  if (ol_flags & (RTE_MBUF_F_TX_L4_MASK | RTE_MBUF_F_TX_TCP_SEG))
1924  if (!(ol_flags & (RTE_MBUF_F_TX_IPV4 | RTE_MBUF_F_TX_IPV6)))
1925  return -EINVAL;
1926 
1927  /* Check requirements for TSO packet */
1928  if (ol_flags & RTE_MBUF_F_TX_TCP_SEG)
1929  if ((m->tso_segsz == 0) ||
1930  ((ol_flags & RTE_MBUF_F_TX_IPV4) &&
1931  !(ol_flags & RTE_MBUF_F_TX_IP_CKSUM)))
1932  return -EINVAL;
1933 
1934  /* RTE_MBUF_F_TX_OUTER_IP_CKSUM set for non outer IPv4 packet. */
1935  if ((ol_flags & RTE_MBUF_F_TX_OUTER_IP_CKSUM) &&
1936  !(ol_flags & RTE_MBUF_F_TX_OUTER_IPV4))
1937  return -EINVAL;
1938 
1939  return 0;
1940 }
1941 
1945 int __rte_pktmbuf_linearize(struct rte_mbuf *mbuf);
1946 
1959 static inline int
1961 {
1962  if (rte_pktmbuf_is_contiguous(mbuf))
1963  return 0;
1964  return __rte_pktmbuf_linearize(mbuf);
1965 }
1966 
1981 void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len);
1982 
1986 static inline uint32_t
1988 {
1989  return m->hash.sched.queue_id;
1990 }
1991 
1995 static inline uint8_t
1997 {
1998  return m->hash.sched.traffic_class;
1999 }
2000 
2004 static inline uint8_t
2006 {
2007  return m->hash.sched.color;
2008 }
2009 
2022 static inline void
2023 rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id,
2024  uint8_t *traffic_class,
2025  uint8_t *color)
2026 {
2027  struct rte_mbuf_sched sched = m->hash.sched;
2028 
2029  *queue_id = sched.queue_id;
2030  *traffic_class = sched.traffic_class;
2031  *color = sched.color;
2032 }
2033 
2037 static inline void
2039 {
2040  m->hash.sched.queue_id = queue_id;
2041 }
2042 
2046 static inline void
2048 {
2049  m->hash.sched.traffic_class = traffic_class;
2050 }
2051 
2055 static inline void
2057 {
2058  m->hash.sched.color = color;
2059 }
2060 
2073 static inline void
2075  uint8_t traffic_class,
2076  uint8_t color)
2077 {
2078  m->hash.sched = (struct rte_mbuf_sched){
2079  .queue_id = queue_id,
2080  .traffic_class = traffic_class,
2081  .color = color,
2082  .reserved = 0,
2083  };
2084 }
2085 
2086 #ifdef __cplusplus
2087 }
2088 #endif
2089 
2090 #endif /* _RTE_MBUF_H_ */
static void rte_pktmbuf_reset(struct rte_mbuf *m)
Definition: rte_mbuf.h:953
struct rte_mbuf_ext_shared_info * shinfo
static rte_iova_t rte_mbuf_data_iova(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:184
struct rte_mbuf * next
uint16_t mbuf_data_room_size
Definition: rte_mbuf.h:300
void rte_pktmbuf_free_bulk(struct rte_mbuf **mbufs, unsigned int count)
uint16_t vlan_tci_outer
#define __rte_always_inline
Definition: rte_common.h:456
#define RTE_MBUF_F_TX_OFFLOAD_MASK
static struct rte_mbuf * rte_pktmbuf_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:984
static void rte_mbuf_sched_traffic_class_set(struct rte_mbuf *m, uint8_t traffic_class)
Definition: rte_mbuf.h:2047
static __rte_experimental __rte_always_inline int rte_mbuf_raw_alloc_bulk(struct rte_mempool *mp, struct rte_mbuf **mbufs, unsigned int count)
Definition: rte_mbuf.h:642
static uint16_t rte_pktmbuf_priv_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:923
static int rte_validate_tx_offload(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1910
uint32_t queue_id
#define likely(x)
static void rte_pktmbuf_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:1492
static uint32_t rte_mbuf_sched_queue_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1987
#define RTE_PTR_ALIGN_FLOOR(ptr, align)
Definition: rte_common.h:576
void rte_pktmbuf_pool_init(struct rte_mempool *mp, void *opaque_arg)
uint64_t rte_iova_t
Definition: rte_common.h:736
static __rte_always_inline void rte_mempool_put_bulk(struct rte_mempool *mp, void *const *obj_table, unsigned int n)
Definition: rte_mempool.h:1468
static __rte_always_inline void rte_pktmbuf_free_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1476
void * buf_addr
static struct rte_mbuf * rte_mbuf_from_indirect(struct rte_mbuf *mi)
Definition: rte_mbuf.h:216
uint16_t data_len
rte_mbuf_extbuf_free_callback_t free_cb
static int rte_pktmbuf_chain(struct rte_mbuf *head, struct rte_mbuf *tail)
Definition: rte_mbuf.h:1840
static uint8_t rte_mbuf_sched_traffic_class_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1996
unsigned int flags
Definition: rte_mempool.h:238
#define __rte_unused
Definition: rte_common.h:214
static void rte_mbuf_iova_set(struct rte_mbuf *m, rte_iova_t iova)
Definition: rte_mbuf.h:165
#define RTE_MBUF_F_TX_IPV6
uint64_t tso_segsz
struct rte_mbuf * rte_pktmbuf_copy(const struct rte_mbuf *m, struct rte_mempool *mp, uint32_t offset, uint32_t length)
static __rte_always_inline void __rte_mbuf_raw_sanity_check(__rte_unused const struct rte_mbuf *m)
Definition: rte_mbuf.h:566
void rte_pktmbuf_dump(FILE *f, const struct rte_mbuf *m, unsigned dump_len)
static uint16_t rte_pktmbuf_headroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1593
static int rte_pktmbuf_alloc_bulk(struct rte_mempool *pool, struct rte_mbuf **mbufs, unsigned count)
Definition: rte_mbuf.h:1006
static void rte_pktmbuf_reset_headroom(struct rte_mbuf *m)
Definition: rte_mbuf.h:939
static void * rte_mbuf_to_priv(struct rte_mbuf *m)
Definition: rte_mbuf.h:288
uint32_t cache_size
Definition: rte_mempool.h:241
static void rte_mbuf_prefetch_part2(struct rte_mbuf *m)
Definition: rte_mbuf.h:126
char name[RTE_MEMPOOL_NAMESIZE]
Definition: rte_mempool.h:231
static uint16_t rte_mbuf_ext_refcnt_update(struct rte_mbuf_ext_shared_info *shinfo, int16_t value)
Definition: rte_mbuf.h:496
uint16_t nb_segs
uint16_t port
int rte_mbuf_check(const struct rte_mbuf *m, int is_header, const char **reason)
#define rte_pktmbuf_mtod_offset(m, t, o)
static __rte_always_inline struct rte_mbuf * rte_pktmbuf_prefree_seg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1424
static int rte_pktmbuf_is_contiguous(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1783
struct rte_mempool * rte_pktmbuf_pool_create_extbuf(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const struct rte_pktmbuf_extmem *ext_mem, unsigned int ext_num)
#define RTE_PTR_ADD(ptr, x)
Definition: rte_common.h:520
int rte_get_tx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
#define RTE_MBUF_MAX_NB_SEGS
static void rte_mbuf_sched_set(struct rte_mbuf *m, uint32_t queue_id, uint8_t traffic_class, uint8_t color)
Definition: rte_mbuf.h:2074
static uint16_t rte_pktmbuf_tailroom(const struct rte_mbuf *m)
Definition: rte_mbuf.h:1607
static __rte_always_inline void rte_mbuf_raw_free(struct rte_mbuf *m)
Definition: rte_mbuf.h:666
static void rte_mbuf_sched_get(const struct rte_mbuf *m, uint32_t *queue_id, uint8_t *traffic_class, uint8_t *color)
Definition: rte_mbuf.h:2023
#define unlikely(x)
uint16_t priv_size
static __rte_always_inline uint64_t rte_mbuf_tx_offload(uint64_t il2, uint64_t il3, uint64_t il4, uint64_t tso, uint64_t ol3, uint64_t ol2, uint64_t unused)
Definition: rte_mbuf.h:1887
static void rte_mbuf_sched_queue_set(struct rte_mbuf *m, uint32_t queue_id)
Definition: rte_mbuf.h:2038
static char * rte_mbuf_buf_addr(struct rte_mbuf *mb, struct rte_mempool *mp)
Definition: rte_mbuf.h:237
void rte_mbuf_sanity_check(const struct rte_mbuf *m, int is_header)
static void rte_mbuf_sched_color_set(struct rte_mbuf *m, uint8_t color)
Definition: rte_mbuf.h:2056
void(* rte_mbuf_extbuf_free_callback_t)(void *addr, void *opaque)
#define RTE_MIN(a, b)
Definition: rte_common.h:761
uint64_t dynfield2
#define __rte_mbuf_sanity_check(m, is_h)
Definition: rte_mbuf.h:348
const char * rte_get_tx_ol_flag_name(uint64_t mask)
static uint16_t rte_mbuf_refcnt_read(const struct rte_mbuf *m)
Definition: rte_mbuf.h:439
static int rte_pktmbuf_linearize(struct rte_mbuf *mbuf)
Definition: rte_mbuf.h:1960
static __rte_always_inline int rte_mempool_get(struct rte_mempool *mp, void **obj_p)
Definition: rte_mempool.h:1711
static uint32_t rte_pktmbuf_priv_flags(struct rte_mempool *mp)
Definition: rte_mbuf.h:314
uint16_t elt_size
Definition: rte_mbuf.h:840
uint16_t refcnt
static char * rte_pktmbuf_adj(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1729
#define RTE_MBUF_DIRECT(mb)
#define RTE_CACHE_LINE_MIN_SIZE
Definition: rte_common.h:702
static void rte_pktmbuf_attach(struct rte_mbuf *mi, struct rte_mbuf *m)
Definition: rte_mbuf.h:1242
static __rte_always_inline int rte_mempool_get_bulk(struct rte_mempool *mp, void **obj_table, unsigned int n)
Definition: rte_mempool.h:1682
#define RTE_MBUF_F_TX_OUTER_IP_CKSUM
uint64_t ol_flags
static void rte_mbuf_dynfield_copy(struct rte_mbuf *mdst, const struct rte_mbuf *msrc)
Definition: rte_mbuf.h:1200
static void rte_pktmbuf_detach(struct rte_mbuf *m)
Definition: rte_mbuf.h:1332
static uint16_t rte_mbuf_ext_refcnt_read(const struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:464
uint32_t pkt_len
uint16_t buf_len
#define rte_pktmbuf_data_len(m)
Definition: rte_mbuf.h:1648
#define RTE_MBUF_F_TX_L4_MASK
static uint16_t rte_mbuf_refcnt_update(struct rte_mbuf *m, int16_t value)
Definition: rte_mbuf.h:430
uint32_t packet_type
#define RTE_MBUF_F_TX_IP_CKSUM
static uint16_t rte_pktmbuf_data_room_size(struct rte_mempool *mp)
Definition: rte_mbuf.h:902
#define RTE_MBUF_F_TX_TCP_SEG
#define RTE_MBUF_F_INDIRECT
#define RTE_MBUF_PORT_INVALID
const char * rte_get_rx_ol_flag_name(uint64_t mask)
static struct rte_mbuf_ext_shared_info * rte_pktmbuf_ext_shinfo_init_helper(void *buf_addr, uint16_t *buf_len, rte_mbuf_extbuf_free_callback_t free_cb, void *fcb_opaque)
Definition: rte_mbuf.h:1081
#define RTE_MBUF_F_TX_OUTER_IPV4
void rte_pktmbuf_init(struct rte_mempool *mp, void *opaque_arg, void *m, unsigned i)
#define RTE_MBUF_HAS_EXTBUF(mb)
static __rte_experimental __rte_always_inline void rte_mbuf_raw_free_bulk(struct rte_mempool *mp, struct rte_mbuf **mbufs, unsigned int count)
Definition: rte_mbuf.h:697
struct rte_mempool * pool
static void rte_mbuf_ext_refcnt_set(struct rte_mbuf_ext_shared_info *shinfo, uint16_t new_value)
Definition: rte_mbuf.h:478
#define RTE_MBUF_CLONED(mb)
static char * rte_pktmbuf_append(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1698
static void rte_mbuf_refcnt_set(struct rte_mbuf *m, uint16_t new_value)
Definition: rte_mbuf.h:448
struct rte_mempool * rte_pktmbuf_pool_create_by_ops(const char *name, unsigned int n, unsigned int cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id, const char *ops_name)
static rte_iova_t rte_mbuf_data_iova_default(const struct rte_mbuf *mb)
Definition: rte_mbuf.h:202
static int rte_pktmbuf_trim(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1759
static char * rte_mbuf_to_baddr(struct rte_mbuf *md)
Definition: rte_mbuf.h:270
static const void * rte_pktmbuf_read(const struct rte_mbuf *m, uint32_t off, uint32_t len, void *buf)
Definition: rte_mbuf.h:1815
static char * rte_pktmbuf_prepend(struct rte_mbuf *m, uint16_t len)
Definition: rte_mbuf.h:1665
static struct rte_mbuf * rte_mbuf_raw_alloc(struct rte_mempool *mp)
Definition: rte_mbuf.h:600
static rte_iova_t rte_mbuf_iova_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:147
static void rte_pktmbuf_refcnt_update(struct rte_mbuf *m, int16_t v)
Definition: rte_mbuf.h:1576
#define RTE_PTR_SUB(ptr, x)
Definition: rte_common.h:525
static struct rte_mbuf * rte_pktmbuf_lastseg(struct rte_mbuf *m)
Definition: rte_mbuf.h:1622
static uint8_t rte_mbuf_sched_color_get(const struct rte_mbuf *m)
Definition: rte_mbuf.h:2005
#define RTE_PKTMBUF_POOL_F_PINNED_EXT_BUF
Definition: rte_mbuf.h:328
static char * rte_mbuf_data_addr_default(struct rte_mbuf *mb)
Definition: rte_mbuf.h:251
static rte_iova_t rte_mempool_virt2iova(const void *elt)
Definition: rte_mempool.h:1836
static __rte_always_inline void rte_mempool_put(struct rte_mempool *mp, void *obj)
Definition: rte_mempool.h:1490
rte_iova_t buf_iova
Definition: rte_mbuf.h:838
uint8_t traffic_class
#define RTE_MBUF_F_TX_IPV4
#define RTE_PTR_DIFF(ptr1, ptr2)
Definition: rte_common.h:532
static void rte_mbuf_prefetch_part1(struct rte_mbuf *m)
Definition: rte_mbuf.h:109
#define RTE_MBUF_F_EXTERNAL
static void * rte_mempool_get_priv(struct rte_mempool *mp)
Definition: rte_mempool.h:1864
uint64_t tx_offload
uint16_t vlan_tci
#define RTE_MBUF_HAS_PINNED_EXTBUF(mb)
Definition: rte_mbuf.h:337
struct rte_mbuf * rte_pktmbuf_clone(struct rte_mbuf *md, struct rte_mempool *mp)
#define RTE_SET_USED(x)
Definition: rte_common.h:230
int rte_get_rx_ol_flag_list(uint64_t mask, char *buf, size_t buflen)
static void rte_prefetch0(const volatile void *p)
struct rte_mempool * rte_pktmbuf_pool_create(const char *name, unsigned n, unsigned cache_size, uint16_t priv_size, uint16_t data_room_size, int socket_id)
uint32_t dynfield1[9]
static void rte_pktmbuf_attach_extbuf(struct rte_mbuf *m, void *buf_addr, rte_iova_t buf_iova, uint16_t buf_len, struct rte_mbuf_ext_shared_info *shinfo)
Definition: rte_mbuf.h:1163