DPDK  25.03.0
rte_eventdev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016 Cavium, Inc.
3  * Copyright(c) 2016-2018 Intel Corporation.
4  * Copyright 2016 NXP
5  * All rights reserved.
6  */
7 
8 #ifndef _RTE_EVENTDEV_H_
9 #define _RTE_EVENTDEV_H_
10 
240 #include <rte_compat.h>
241 #include <rte_common.h>
242 #include <rte_errno.h>
243 #include <rte_mbuf_pool_ops.h>
244 #include <rte_mempool.h>
245 
246 #include "rte_eventdev_trace_fp.h"
247 
248 struct rte_mbuf; /* we just use mbuf pointers; no need to include rte_mbuf.h */
249 struct rte_event;
250 
251 /* Event device capability bitmap flags */
252 #define RTE_EVENT_DEV_CAP_QUEUE_QOS (1ULL << 0)
253 
270 #define RTE_EVENT_DEV_CAP_EVENT_QOS (1ULL << 1)
271 
284 #define RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED (1ULL << 2)
285 
294 #define RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES (1ULL << 3)
295 
318 #define RTE_EVENT_DEV_CAP_BURST_MODE (1ULL << 4)
319 
329 #define RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE (1ULL << 5)
330 
342 #define RTE_EVENT_DEV_CAP_NONSEQ_MODE (1ULL << 6)
343 
354 #define RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK (1ULL << 7)
355 
365 #define RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT (1ULL << 8)
366 
375 #define RTE_EVENT_DEV_CAP_CARRY_FLOW_ID (1ULL << 9)
376 
384 #define RTE_EVENT_DEV_CAP_MAINTENANCE_FREE (1ULL << 10)
385 
397 #define RTE_EVENT_DEV_CAP_RUNTIME_QUEUE_ATTR (1ULL << 11)
398 
407 #define RTE_EVENT_DEV_CAP_PROFILE_LINK (1ULL << 12)
408 
421 #define RTE_EVENT_DEV_CAP_ATOMIC (1ULL << 13)
422 
429 #define RTE_EVENT_DEV_CAP_ORDERED (1ULL << 14)
430 
437 #define RTE_EVENT_DEV_CAP_PARALLEL (1ULL << 15)
438 
445 #define RTE_EVENT_DEV_CAP_INDEPENDENT_ENQ (1ULL << 16)
446 
464 #define RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE (1ULL << 17)
465 
476 #define RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE (1ULL << 18)
477 
488 #define RTE_EVENT_DEV_CAP_PER_PORT_PRESCHEDULE (1ULL << 19)
489 
498 #define RTE_EVENT_DEV_CAP_PRESCHEDULE_EXPLICIT (1ULL << 20)
499 
507 /* Event device priority levels */
508 #define RTE_EVENT_DEV_PRIORITY_HIGHEST 0
509 
515 #define RTE_EVENT_DEV_PRIORITY_NORMAL 128
516 
522 #define RTE_EVENT_DEV_PRIORITY_LOWEST 255
523 
530 /* Event queue scheduling weights */
531 #define RTE_EVENT_QUEUE_WEIGHT_HIGHEST 255
532 
537 #define RTE_EVENT_QUEUE_WEIGHT_LOWEST 0
538 
544 /* Event queue scheduling affinity */
545 #define RTE_EVENT_QUEUE_AFFINITY_HIGHEST 255
546 
551 #define RTE_EVENT_QUEUE_AFFINITY_LOWEST 0
552 
564 uint8_t
565 rte_event_dev_count(void);
566 
579 int
580 rte_event_dev_get_dev_id(const char *name);
581 
593 int
594 rte_event_dev_socket_id(uint8_t dev_id);
595 
600  const char *driver_name;
601  struct rte_device *dev;
665  int32_t max_num_events;
672  uint32_t event_dev_cap;
684 };
685 
702 int
703 rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info);
704 
708 #define RTE_EVENT_DEV_ATTR_PORT_COUNT 0
709 
712 #define RTE_EVENT_DEV_ATTR_QUEUE_COUNT 1
713 
716 #define RTE_EVENT_DEV_ATTR_STARTED 2
717 
730 int
731 rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id,
732  uint32_t *attr_value);
733 
734 
735 /* Event device configuration bitmap flags */
736 #define RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT (1ULL << 0)
737 
764 };
765 
799  uint8_t nb_event_ports;
828  uint32_t event_dev_cfg;
843 };
844 
869 int
870 rte_event_dev_configure(uint8_t dev_id,
871  const struct rte_event_dev_config *dev_conf);
872 
873 /* Event queue specific APIs */
874 
875 /* Event queue configuration bitmap flags */
876 #define RTE_EVENT_QUEUE_CFG_ALL_TYPES (1ULL << 0)
877 
890 #define RTE_EVENT_QUEUE_CFG_SINGLE_LINK (1ULL << 1)
891 
911  uint32_t nb_atomic_flows;
942  uint32_t event_queue_cfg;
944  uint8_t schedule_type;
954  uint8_t priority;
965  uint8_t weight;
976  uint8_t affinity;
987 };
988 
1010 int
1011 rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id,
1012  struct rte_event_queue_conf *queue_conf);
1013 
1033 int
1034 rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id,
1035  const struct rte_event_queue_conf *queue_conf);
1036 
1040 #define RTE_EVENT_QUEUE_ATTR_PRIORITY 0
1041 
1044 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_FLOWS 1
1045 
1048 #define RTE_EVENT_QUEUE_ATTR_NB_ATOMIC_ORDER_SEQUENCES 2
1049 
1052 #define RTE_EVENT_QUEUE_ATTR_EVENT_QUEUE_CFG 3
1053 
1056 #define RTE_EVENT_QUEUE_ATTR_SCHEDULE_TYPE 4
1057 
1060 #define RTE_EVENT_QUEUE_ATTR_WEIGHT 5
1061 
1064 #define RTE_EVENT_QUEUE_ATTR_AFFINITY 6
1065 
1086 int
1087 rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
1088  uint32_t *attr_value);
1089 
1109 int
1110 rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id,
1111  uint64_t attr_value);
1112 
1113 /* Event port specific APIs */
1114 
1115 /* Event port configuration bitmap flags */
1116 #define RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL (1ULL << 0)
1117 
1123 #define RTE_EVENT_PORT_CFG_SINGLE_LINK (1ULL << 1)
1124 
1131 #define RTE_EVENT_PORT_CFG_HINT_PRODUCER (1ULL << 2)
1132 
1141 #define RTE_EVENT_PORT_CFG_HINT_CONSUMER (1ULL << 3)
1142 
1152 #define RTE_EVENT_PORT_CFG_HINT_WORKER (1ULL << 4)
1153 
1163 #define RTE_EVENT_PORT_CFG_INDEPENDENT_ENQ (1ULL << 5)
1164 
1194  uint16_t dequeue_depth;
1201  uint16_t enqueue_depth;
1208  uint32_t event_port_cfg;
1209 };
1210 
1234 int
1235 rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id,
1236  struct rte_event_port_conf *port_conf);
1237 
1264 int
1265 rte_event_port_setup(uint8_t dev_id, uint8_t port_id,
1266  const struct rte_event_port_conf *port_conf);
1267 
1268 typedef void (*rte_eventdev_port_flush_t)(uint8_t dev_id,
1269  struct rte_event event, void *arg);
1299 void
1300 rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id,
1301  rte_eventdev_port_flush_t release_cb, void *args);
1302 
1306 #define RTE_EVENT_PORT_ATTR_ENQ_DEPTH 0
1307 
1310 #define RTE_EVENT_PORT_ATTR_DEQ_DEPTH 1
1311 
1316 #define RTE_EVENT_PORT_ATTR_NEW_EVENT_THRESHOLD 2
1317 
1320 #define RTE_EVENT_PORT_ATTR_IMPLICIT_RELEASE_DISABLE 3
1321 
1324 #define RTE_EVENT_PORT_ATTR_INDEPENDENT_ENQ 4
1325 
1343 int
1344 rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id,
1345  uint32_t *attr_value);
1346 
1365 int
1366 rte_event_dev_start(uint8_t dev_id);
1367 
1386 void
1387 rte_event_dev_stop(uint8_t dev_id);
1388 
1389 typedef void (*rte_eventdev_stop_flush_t)(uint8_t dev_id,
1390  struct rte_event event, void *arg);
1424  rte_eventdev_stop_flush_t callback, void *userdata);
1425 
1439 int
1440 rte_event_dev_close(uint8_t dev_id);
1441 
1445 struct __rte_aligned(16) rte_event_vector {
1446  uint16_t nb_elem;
1448  uint16_t elem_offset : 12;
1450  uint16_t rsvd : 3;
1452  uint16_t attr_valid : 1;
1455  union {
1456  /* Used by Rx/Tx adapter.
1457  * Indicates that all the elements in this vector belong to the
1458  * same port and queue pair when originating from Rx adapter,
1459  * valid only when event type is ETHDEV_VECTOR or
1460  * ETH_RX_ADAPTER_VECTOR.
1461  * Can also be used to indicate the Tx adapter the destination
1462  * port and queue of the mbufs in the vector
1463  */
1464  struct {
1465  uint16_t port;
1466  uint16_t queue;
1467  };
1468  };
1470  uint64_t impl_opaque;
1471 
1472 /* empty structures do not have zero size in C++ leading to compilation errors
1473  * with clang about structure having different sizes in C and C++.
1474  * Since these are all zero-sized arrays, we can omit the "union" wrapper for
1475  * C++ builds, removing the warning.
1476  */
1477 #ifndef __cplusplus
1478 
1483  union __rte_aligned(16) {
1484 #endif
1485  struct rte_mbuf *mbufs[0];
1486  void *ptrs[0];
1487  uint64_t u64s[0];
1488 #ifndef __cplusplus
1489  };
1490 #endif
1491 
1495 };
1496 
1497 /* Scheduler type definitions */
1498 #define RTE_SCHED_TYPE_ORDERED 0
1499 
1536 #define RTE_SCHED_TYPE_ATOMIC 1
1537 
1563 #define RTE_SCHED_TYPE_PARALLEL 2
1564 
1576 /* Event types to classify the event source */
1577 #define RTE_EVENT_TYPE_ETHDEV 0x0
1578 
1579 #define RTE_EVENT_TYPE_CRYPTODEV 0x1
1580 
1581 #define RTE_EVENT_TYPE_TIMER 0x2
1582 
1583 #define RTE_EVENT_TYPE_CPU 0x3
1584 
1587 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER 0x4
1588 
1589 #define RTE_EVENT_TYPE_DMADEV 0x5
1590 
1591 #define RTE_EVENT_TYPE_VECTOR 0x8
1592 
1603 #define RTE_EVENT_TYPE_ETHDEV_VECTOR \
1604  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETHDEV)
1605 
1606 #define RTE_EVENT_TYPE_CPU_VECTOR (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CPU)
1607 
1608 #define RTE_EVENT_TYPE_ETH_RX_ADAPTER_VECTOR \
1609  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_ETH_RX_ADAPTER)
1610 
1611 #define RTE_EVENT_TYPE_CRYPTODEV_VECTOR \
1612  (RTE_EVENT_TYPE_VECTOR | RTE_EVENT_TYPE_CRYPTODEV)
1613 
1615 #define RTE_EVENT_TYPE_MAX 0x10
1616 
1618 /* Event enqueue operations */
1619 #define RTE_EVENT_OP_NEW 0
1620 
1624 #define RTE_EVENT_OP_FORWARD 1
1625 
1636 #define RTE_EVENT_OP_RELEASE 2
1637 
1675 struct rte_event {
1676  /* WORD0 */
1677  union {
1678  uint64_t event;
1680  struct {
1681  uint32_t flow_id:20;
1693  uint32_t sub_event_type:8;
1700  uint32_t event_type:4;
1705  uint8_t op:2;
1715  uint8_t rsvd:4;
1723  uint8_t sched_type:2;
1740  uint8_t queue_id;
1748  uint8_t priority;
1772  uint8_t impl_opaque;
1786  };
1787  };
1788  /* WORD1 */
1789  union {
1790  uint64_t u64;
1792  void *event_ptr;
1794  struct rte_mbuf *mbuf;
1796  struct rte_event_vector *vec;
1798  };
1799 };
1800 
1801 /* Ethdev Rx adapter capability bitmap flags */
1802 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT 0x1
1803 
1806 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_MULTI_EVENTQ 0x2
1807 
1810 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_OVERRIDE_FLOW_ID 0x4
1811 
1817 #define RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR 0x8
1818 
1838 int
1839 rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1840  uint32_t *caps);
1841 
1842 #define RTE_EVENT_TIMER_ADAPTER_CAP_INTERNAL_PORT (1ULL << 0)
1843 
1845 #define RTE_EVENT_TIMER_ADAPTER_CAP_PERIODIC (1ULL << 1)
1846 
1861 int
1862 rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps);
1863 
1864 /* Crypto adapter capability bitmap flag */
1865 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1866 
1872 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1873 
1879 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_QP_EV_BIND 0x4
1880 
1884 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA 0x8
1885 
1889 #define RTE_EVENT_CRYPTO_ADAPTER_CAP_EVENT_VECTOR 0x10
1890 
1913 int
1914 rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id,
1915  uint32_t *caps);
1916 
1917 /* DMA adapter capability bitmap flag */
1918 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_NEW 0x1
1919 
1925 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_OP_FWD 0x2
1926 
1932 #define RTE_EVENT_DMA_ADAPTER_CAP_INTERNAL_PORT_VCHAN_EV_BIND 0x4
1933 
1955 __rte_experimental
1956 int
1957 rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps);
1958 
1959 /* Ethdev Tx adapter capability bitmap flags */
1960 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT 0x1
1961 
1963 #define RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR 0x2
1964 
1984 int
1985 rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id,
1986  uint32_t *caps);
1987 
2012 int
2013 rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns,
2014  uint64_t *timeout_ticks);
2015 
2079 int
2080 rte_event_port_link(uint8_t dev_id, uint8_t port_id,
2081  const uint8_t queues[], const uint8_t priorities[],
2082  uint16_t nb_links);
2083 
2127 int
2128 rte_event_port_unlink(uint8_t dev_id, uint8_t port_id,
2129  uint8_t queues[], uint16_t nb_unlinks);
2130 
2203 __rte_experimental
2204 int
2205 rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[],
2206  const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id);
2207 
2256 __rte_experimental
2257 int
2258 rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
2259  uint16_t nb_unlinks, uint8_t profile_id);
2260 
2282 int
2283 rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id);
2284 
2311 int
2312 rte_event_port_links_get(uint8_t dev_id, uint8_t port_id,
2313  uint8_t queues[], uint8_t priorities[]);
2314 
2346 __rte_experimental
2347 int
2348 rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[],
2349  uint8_t priorities[], uint8_t profile_id);
2350 
2366 int
2367 rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id);
2368 
2382 int
2383 rte_event_dev_dump(uint8_t dev_id, FILE *f);
2384 
2386 #define RTE_EVENT_DEV_XSTATS_NAME_SIZE 64
2387 
2392  RTE_EVENT_DEV_XSTATS_DEVICE,
2393  RTE_EVENT_DEV_XSTATS_PORT,
2394  RTE_EVENT_DEV_XSTATS_QUEUE,
2395 };
2396 
2404  char name[RTE_EVENT_DEV_XSTATS_NAME_SIZE];
2405 };
2406 
2439 int
2440 rte_event_dev_xstats_names_get(uint8_t dev_id,
2441  enum rte_event_dev_xstats_mode mode,
2442  uint8_t queue_port_id,
2443  struct rte_event_dev_xstats_name *xstats_names,
2444  uint64_t *ids,
2445  unsigned int size);
2446 
2473 int
2474 rte_event_dev_xstats_get(uint8_t dev_id,
2475  enum rte_event_dev_xstats_mode mode,
2476  uint8_t queue_port_id,
2477  const uint64_t ids[],
2478  uint64_t values[], unsigned int n);
2479 
2496 uint64_t
2497 rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name,
2498  uint64_t *id);
2499 
2520 int
2521 rte_event_dev_xstats_reset(uint8_t dev_id,
2522  enum rte_event_dev_xstats_mode mode,
2523  int16_t queue_port_id,
2524  const uint64_t ids[],
2525  uint32_t nb_ids);
2526 
2537 int rte_event_dev_selftest(uint8_t dev_id);
2538 
2569 struct rte_mempool *
2570 rte_event_vector_pool_create(const char *name, unsigned int n,
2571  unsigned int cache_size, uint16_t nb_elem,
2572  int socket_id);
2573 
2574 #include <rte_eventdev_core.h>
2575 
2576 #ifdef __cplusplus
2577 extern "C" {
2578 #endif
2579 
2580 static __rte_always_inline uint16_t
2581 __rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2582  const struct rte_event ev[], uint16_t nb_events,
2583  const event_enqueue_burst_t fn)
2584 {
2585  const struct rte_event_fp_ops *fp_ops;
2586  void *port;
2587 
2588  fp_ops = &rte_event_fp_ops[dev_id];
2589  port = fp_ops->data[port_id];
2590 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2591  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2592  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2593  rte_errno = EINVAL;
2594  return 0;
2595  }
2596 
2597  if (port == NULL) {
2598  rte_errno = EINVAL;
2599  return 0;
2600  }
2601 #endif
2602  rte_eventdev_trace_enq_burst(dev_id, port_id, ev, nb_events, (void *)fn);
2603 
2604  return fn(port, ev, nb_events);
2605 }
2606 
2650 static inline uint16_t
2651 rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id,
2652  const struct rte_event ev[], uint16_t nb_events)
2653 {
2654  const struct rte_event_fp_ops *fp_ops;
2655 
2656  fp_ops = &rte_event_fp_ops[dev_id];
2657  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2658  fp_ops->enqueue_burst);
2659 }
2660 
2702 static inline uint16_t
2703 rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id,
2704  const struct rte_event ev[], uint16_t nb_events)
2705 {
2706  const struct rte_event_fp_ops *fp_ops;
2707 
2708  fp_ops = &rte_event_fp_ops[dev_id];
2709  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2710  fp_ops->enqueue_new_burst);
2711 }
2712 
2754 static inline uint16_t
2755 rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id,
2756  const struct rte_event ev[], uint16_t nb_events)
2757 {
2758  const struct rte_event_fp_ops *fp_ops;
2759 
2760  fp_ops = &rte_event_fp_ops[dev_id];
2761  return __rte_event_enqueue_burst(dev_id, port_id, ev, nb_events,
2762  fp_ops->enqueue_forward_burst);
2763 }
2764 
2831 static inline uint16_t
2832 rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[],
2833  uint16_t nb_events, uint64_t timeout_ticks)
2834 {
2835  const struct rte_event_fp_ops *fp_ops;
2836  void *port;
2837 
2838  fp_ops = &rte_event_fp_ops[dev_id];
2839  port = fp_ops->data[port_id];
2840 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2841  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2842  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV) {
2843  rte_errno = EINVAL;
2844  return 0;
2845  }
2846 
2847  if (port == NULL) {
2848  rte_errno = EINVAL;
2849  return 0;
2850  }
2851 #endif
2852  rte_eventdev_trace_deq_burst(dev_id, port_id, ev, nb_events);
2853 
2854  return (fp_ops->dequeue_burst)(port, ev, nb_events, timeout_ticks);
2855 }
2856 
2857 #define RTE_EVENT_DEV_MAINT_OP_FLUSH (1 << 0)
2858 
2899 static inline int
2900 rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
2901 {
2902  const struct rte_event_fp_ops *fp_ops;
2903  void *port;
2904 
2905  fp_ops = &rte_event_fp_ops[dev_id];
2906  port = fp_ops->data[port_id];
2907 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2908  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2909  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2910  return -EINVAL;
2911 
2912  if (port == NULL)
2913  return -EINVAL;
2914 
2915  if (op & (~RTE_EVENT_DEV_MAINT_OP_FLUSH))
2916  return -EINVAL;
2917 #endif
2918  rte_eventdev_trace_maintain(dev_id, port_id, op);
2919 
2920  if (fp_ops->maintain != NULL)
2921  fp_ops->maintain(port, op);
2922 
2923  return 0;
2924 }
2925 
2947 static inline uint8_t
2948 rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
2949 {
2950  const struct rte_event_fp_ops *fp_ops;
2951  void *port;
2952 
2953  fp_ops = &rte_event_fp_ops[dev_id];
2954  port = fp_ops->data[port_id];
2955 
2956 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
2957  if (dev_id >= RTE_EVENT_MAX_DEVS ||
2958  port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
2959  return -EINVAL;
2960 
2961  if (port == NULL)
2962  return -EINVAL;
2963 
2964  if (profile_id >= RTE_EVENT_MAX_PROFILES_PER_PORT)
2965  return -EINVAL;
2966 #endif
2967  rte_eventdev_trace_port_profile_switch(dev_id, port_id, profile_id);
2968 
2969  return fp_ops->profile_switch(port, profile_id);
2970 }
2971 
2995 __rte_experimental
2996 static inline int
2997 rte_event_port_preschedule_modify(uint8_t dev_id, uint8_t port_id,
2999 {
3000  const struct rte_event_fp_ops *fp_ops;
3001  void *port;
3002 
3003  fp_ops = &rte_event_fp_ops[dev_id];
3004  port = fp_ops->data[port_id];
3005 
3006 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
3007  if (dev_id >= RTE_EVENT_MAX_DEVS || port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
3008  return -EINVAL;
3009 
3010  if (port == NULL)
3011  return -EINVAL;
3012 #endif
3013  rte_eventdev_trace_port_preschedule_modify(dev_id, port_id, type);
3014 
3015  return fp_ops->preschedule_modify(port, type);
3016 }
3017 
3039 __rte_experimental
3040 static inline void
3041 rte_event_port_preschedule(uint8_t dev_id, uint8_t port_id,
3043 {
3044  const struct rte_event_fp_ops *fp_ops;
3045  void *port;
3046 
3047  fp_ops = &rte_event_fp_ops[dev_id];
3048  port = fp_ops->data[port_id];
3049 
3050 #ifdef RTE_LIBRTE_EVENTDEV_DEBUG
3051  if (dev_id >= RTE_EVENT_MAX_DEVS || port_id >= RTE_EVENT_MAX_PORTS_PER_DEV)
3052  return;
3053  if (port == NULL)
3054  return;
3055 #endif
3056  rte_eventdev_trace_port_preschedule(dev_id, port_id, type);
3057 
3058  fp_ops->preschedule(port, type);
3059 }
3060 #ifdef __cplusplus
3061 }
3062 #endif
3063 
3064 #endif /* _RTE_EVENTDEV_H_ */
void rte_event_dev_stop(uint8_t dev_id)
uint32_t min_dequeue_timeout_ns
Definition: rte_eventdev.h:602
int rte_event_dequeue_timeout_ticks(uint8_t dev_id, uint64_t ns, uint64_t *timeout_ticks)
int rte_event_dev_xstats_names_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, struct rte_event_dev_xstats_name *xstats_names, uint64_t *ids, unsigned int size)
static uint16_t rte_event_enqueue_new_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
#define __rte_always_inline
Definition: rte_common.h:456
#define RTE_EVENT_DEV_XSTATS_NAME_SIZE
uint64_t u64
int rte_event_dev_attr_get(uint8_t dev_id, uint32_t attr_id, uint32_t *attr_value)
uint32_t flow_id
static __rte_experimental int rte_event_port_preschedule_modify(uint8_t dev_id, uint8_t port_id, enum rte_event_dev_preschedule_type type)
int rte_event_eth_tx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
uint8_t priority
struct rte_device * dev
Definition: rte_eventdev.h:601
int rte_event_queue_setup(uint8_t dev_id, uint8_t queue_id, const struct rte_event_queue_conf *queue_conf)
uint8_t max_event_port_links
Definition: rte_eventdev.h:662
int rte_event_queue_default_conf_get(uint8_t dev_id, uint8_t queue_id, struct rte_event_queue_conf *queue_conf)
static __rte_experimental void rte_event_port_preschedule(uint8_t dev_id, uint8_t port_id, enum rte_event_dev_preschedule_type type)
int rte_event_port_default_conf_get(uint8_t dev_id, uint8_t port_id, struct rte_event_port_conf *port_conf)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:606
#define rte_errno
Definition: rte_errno.h:29
uint32_t event_type
__rte_experimental int rte_event_dma_adapter_caps_get(uint8_t dev_id, uint8_t dmadev_id, uint32_t *caps)
uint32_t event_dev_cap
Definition: rte_eventdev.h:672
int rte_event_queue_attr_set(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint64_t attr_value)
int rte_event_dev_socket_id(uint8_t dev_id)
int rte_event_dev_xstats_get(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, uint8_t queue_port_id, const uint64_t ids[], uint64_t values[], unsigned int n)
static uint8_t rte_event_port_profile_switch(uint8_t dev_id, uint8_t port_id, uint8_t profile_id)
int rte_event_dev_xstats_reset(uint8_t dev_id, enum rte_event_dev_xstats_mode mode, int16_t queue_port_id, const uint64_t ids[], uint32_t nb_ids)
uint32_t max_event_port_enqueue_depth
Definition: rte_eventdev.h:655
int rte_event_timer_adapter_caps_get(uint8_t dev_id, uint32_t *caps)
int rte_event_dev_info_get(uint8_t dev_id, struct rte_event_dev_info *dev_info)
uint32_t cache_size
Definition: rte_mempool.h:241
char name[RTE_MEMPOOL_NAMESIZE]
Definition: rte_mempool.h:231
uint32_t nb_atomic_order_sequences
Definition: rte_eventdev.h:924
uint32_t nb_event_port_dequeue_depth
Definition: rte_eventdev.h:814
int rte_event_port_unlinks_in_progress(uint8_t dev_id, uint8_t port_id)
void * event_ptr
__rte_experimental int rte_event_port_profile_links_set(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links, uint8_t profile_id)
int rte_event_port_link(uint8_t dev_id, uint8_t port_id, const uint8_t queues[], const uint8_t priorities[], uint16_t nb_links)
int rte_event_queue_attr_get(uint8_t dev_id, uint8_t queue_id, uint32_t attr_id, uint32_t *attr_value)
uint8_t max_profiles_per_port
Definition: rte_eventdev.h:680
struct rte_event_vector * vec
uint8_t max_single_link_event_port_queue_pairs
Definition: rte_eventdev.h:674
rte_event_dev_xstats_mode
int rte_event_port_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[])
int rte_event_dev_stop_flush_callback_register(uint8_t dev_id, rte_eventdev_stop_flush_t callback, void *userdata)
int rte_event_dev_selftest(uint8_t dev_id)
static uint16_t rte_event_enqueue_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
int rte_event_dev_start(uint8_t dev_id)
uint64_t rte_event_dev_xstats_by_name_get(uint8_t dev_id, const char *name, uint64_t *id)
int rte_event_port_setup(uint8_t dev_id, uint8_t port_id, const struct rte_event_port_conf *port_conf)
void rte_event_port_quiesce(uint8_t dev_id, uint8_t port_id, rte_eventdev_port_flush_t release_cb, void *args)
uint8_t max_event_port_dequeue_depth
Definition: rte_eventdev.h:648
int rte_event_port_attr_get(uint8_t dev_id, uint8_t port_id, uint32_t attr_id, uint32_t *attr_value)
uint32_t nb_event_port_enqueue_depth
Definition: rte_eventdev.h:821
const char * driver_name
Definition: rte_eventdev.h:600
uint8_t impl_opaque
uint8_t queue_id
struct __rte_aligned(16) rte_event_vector
static uint16_t rte_event_dequeue_burst(uint8_t dev_id, uint8_t port_id, struct rte_event ev[], uint16_t nb_events, uint64_t timeout_ticks)
int rte_event_dev_get_dev_id(const char *name)
uint8_t rte_event_dev_count(void)
int rte_event_dev_close(uint8_t dev_id)
int rte_event_crypto_adapter_caps_get(uint8_t dev_id, uint8_t cdev_id, uint32_t *caps)
__rte_experimental int rte_event_port_profile_links_get(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint8_t priorities[], uint8_t profile_id)
uint32_t dequeue_timeout_ns
Definition: rte_eventdev.h:768
static int rte_event_maintain(uint8_t dev_id, uint8_t port_id, int op)
int rte_event_dev_service_id_get(uint8_t dev_id, uint32_t *service_id)
enum rte_event_dev_preschedule_type preschedule_type
Definition: rte_eventdev.h:838
static uint16_t rte_event_enqueue_forward_burst(uint8_t dev_id, uint8_t port_id, const struct rte_event ev[], uint16_t nb_events)
#define RTE_EVENT_DEV_MAINT_OP_FLUSH
struct rte_mempool * rte_event_vector_pool_create(const char *name, unsigned int n, unsigned int cache_size, uint16_t nb_elem, int socket_id)
rte_event_dev_preschedule_type
Definition: rte_eventdev.h:742
int rte_event_dev_dump(uint8_t dev_id, FILE *f)
uint8_t nb_single_link_event_port_queues
Definition: rte_eventdev.h:830
uint8_t rsvd
void(* rte_eventdev_port_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
uint8_t op
void(* rte_eventdev_stop_flush_t)(uint8_t dev_id, struct rte_event event, void *arg)
uint8_t max_event_priority_levels
Definition: rte_eventdev.h:629
struct rte_mbuf * mbuf
uint32_t max_dequeue_timeout_ns
Definition: rte_eventdev.h:604
uint32_t max_event_queue_flows
Definition: rte_eventdev.h:613
int rte_event_dev_configure(uint8_t dev_id, const struct rte_event_dev_config *dev_conf)
uint32_t sub_event_type
uint8_t sched_type
uint8_t max_event_queues
Definition: rte_eventdev.h:608
__rte_experimental int rte_event_port_profile_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks, uint8_t profile_id)
uint8_t max_event_queue_priority_levels
Definition: rte_eventdev.h:615
int rte_event_port_unlink(uint8_t dev_id, uint8_t port_id, uint8_t queues[], uint16_t nb_unlinks)
int rte_event_eth_rx_adapter_caps_get(uint8_t dev_id, uint16_t eth_port_id, uint32_t *caps)
uint32_t nb_event_queue_flows
Definition: rte_eventdev.h:809