DPDK  25.03.0
rte_cryptodev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2015-2020 Intel Corporation.
3  */
4 
5 #ifndef _RTE_CRYPTODEV_H_
6 #define _RTE_CRYPTODEV_H_
7 
17 #include <rte_compat.h>
18 #include "rte_kvargs.h"
19 #include "rte_crypto.h"
20 #include <rte_common.h>
21 #include <rte_rcu_qsbr.h>
22 
23 #include "rte_cryptodev_trace_fp.h"
24 
25 #ifdef __cplusplus
26 extern "C" {
27 #endif
28 
32 extern int rte_cryptodev_logtype;
33 #define RTE_LOGTYPE_CRYPTODEV rte_cryptodev_logtype
34 
35 /* Logging Macros */
36 #define CDEV_LOG_ERR(...) \
37  RTE_LOG_LINE_PREFIX(ERR, CRYPTODEV, \
38  "%s() line %u: ", __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
39 
40 #define CDEV_LOG_INFO(...) \
41  RTE_LOG_LINE(INFO, CRYPTODEV, "" __VA_ARGS__)
42 
43 #define CDEV_LOG_DEBUG(...) \
44  RTE_LOG_LINE_PREFIX(DEBUG, CRYPTODEV, \
45  "%s() line %u: ", __func__ RTE_LOG_COMMA __LINE__, __VA_ARGS__)
46 
47 #define CDEV_PMD_TRACE(...) \
48  RTE_LOG_LINE_PREFIX(DEBUG, CRYPTODEV, \
49  "[%s] %s: ", dev RTE_LOG_COMMA __func__, __VA_ARGS__)
50 
64 #define rte_crypto_op_ctod_offset(c, t, o) \
65  ((t)((char *)(c) + (o)))
66 
78 #define rte_crypto_op_ctophys_offset(c, o) \
79  (rte_iova_t)((c)->phys_addr + (o))
80 
85  uint16_t min;
86  uint16_t max;
87  uint16_t increment;
93 };
94 
100 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_512_BYTES RTE_BIT32(0)
101 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_4096_BYTES RTE_BIT32(1)
102 #define RTE_CRYPTO_CIPHER_DATA_UNIT_LEN_1_MEGABYTES RTE_BIT32(2)
103 
110  union {
111  struct {
114  uint16_t block_size;
124  } auth;
126  struct {
129  uint16_t block_size;
135  uint32_t dataunit_set;
141  } cipher;
143  struct {
146  uint16_t block_size;
156  } aead;
157  };
158 };
159 
167  uint32_t op_types;
176  __extension__
177  union {
183  uint8_t internal_rng;
189  uint32_t op_capa[RTE_CRYPTO_ASYM_OP_LIST_END];
191  };
192 
193  uint64_t hash_algos;
195 };
196 
202 };
203 
204 
210  union {
215  };
216 };
217 
220  enum rte_crypto_sym_xform_type type;
221  union {
222  enum rte_crypto_cipher_algorithm cipher;
223  enum rte_crypto_auth_algorithm auth;
224  enum rte_crypto_aead_algorithm aead;
225  } algo;
226 };
227 
235 };
236 
248 rte_cryptodev_sym_capability_get(uint8_t dev_id,
249  const struct rte_cryptodev_sym_capability_idx *idx);
250 
262 rte_cryptodev_asym_capability_get(uint8_t dev_id,
263  const struct rte_cryptodev_asym_capability_idx *idx);
264 
277 int
279  const struct rte_cryptodev_symmetric_capability *capability,
280  uint16_t key_size, uint16_t iv_size);
281 
295 int
297  const struct rte_cryptodev_symmetric_capability *capability,
298  uint16_t key_size, uint16_t digest_size, uint16_t iv_size);
299 
314 int
316  const struct rte_cryptodev_symmetric_capability *capability,
317  uint16_t key_size, uint16_t digest_size, uint16_t aad_size,
318  uint16_t iv_size);
319 
330 int
332  const struct rte_cryptodev_asymmetric_xform_capability *capability,
333  enum rte_crypto_asym_op_type op_type);
334 
345 int
347  const struct rte_cryptodev_asymmetric_xform_capability *capability,
348  uint16_t modlen);
349 
360 bool
362  const struct rte_cryptodev_asymmetric_xform_capability *capability,
363  enum rte_crypto_auth_algorithm hash);
364 
379 __rte_experimental
380 int
382  const struct rte_cryptodev_asymmetric_xform_capability *capability,
383  enum rte_crypto_asym_op_type op_type, uint8_t cap);
384 
396 int
398  const char *algo_string);
399 
411 int
413  const char *algo_string);
414 
426 int
428  const char *algo_string);
429 
441 int
443  const char *xform_string);
444 
454 __rte_experimental
455 const char *
457 
467 __rte_experimental
468 const char *
470 
480 __rte_experimental
481 const char *
483 
493 __rte_experimental
494 const char *
496 
497 
499 #define RTE_CRYPTODEV_END_OF_CAPABILITIES_LIST() \
500  { RTE_CRYPTO_OP_TYPE_UNDEFINED }
501 
502 
511 #define RTE_CRYPTODEV_FF_SYMMETRIC_CRYPTO (1ULL << 0)
512 
513 #define RTE_CRYPTODEV_FF_ASYMMETRIC_CRYPTO (1ULL << 1)
514 
515 #define RTE_CRYPTODEV_FF_SYM_OPERATION_CHAINING (1ULL << 2)
516 
517 #define RTE_CRYPTODEV_FF_CPU_SSE (1ULL << 3)
518 
519 #define RTE_CRYPTODEV_FF_CPU_AVX (1ULL << 4)
520 
521 #define RTE_CRYPTODEV_FF_CPU_AVX2 (1ULL << 5)
522 
523 #define RTE_CRYPTODEV_FF_CPU_AESNI (1ULL << 6)
524 
525 #define RTE_CRYPTODEV_FF_HW_ACCELERATED (1ULL << 7)
526 
529 #define RTE_CRYPTODEV_FF_CPU_AVX512 (1ULL << 8)
530 
531 #define RTE_CRYPTODEV_FF_IN_PLACE_SGL (1ULL << 9)
532 
535 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_SGL_OUT (1ULL << 10)
536 
539 #define RTE_CRYPTODEV_FF_OOP_SGL_IN_LB_OUT (1ULL << 11)
540 
544 #define RTE_CRYPTODEV_FF_OOP_LB_IN_SGL_OUT (1ULL << 12)
545 
548 #define RTE_CRYPTODEV_FF_OOP_LB_IN_LB_OUT (1ULL << 13)
549 
550 #define RTE_CRYPTODEV_FF_CPU_NEON (1ULL << 14)
551 
552 #define RTE_CRYPTODEV_FF_CPU_ARM_CE (1ULL << 15)
553 
554 #define RTE_CRYPTODEV_FF_SECURITY (1ULL << 16)
555 
556 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_EXP (1ULL << 17)
557 
558 #define RTE_CRYPTODEV_FF_RSA_PRIV_OP_KEY_QT (1ULL << 18)
559 
560 #define RTE_CRYPTODEV_FF_DIGEST_ENCRYPTED (1ULL << 19)
561 
562 #define RTE_CRYPTODEV_FF_ASYM_SESSIONLESS (1ULL << 20)
563 
564 #define RTE_CRYPTODEV_FF_SYM_CPU_CRYPTO (1ULL << 21)
565 
566 #define RTE_CRYPTODEV_FF_SYM_SESSIONLESS (1ULL << 22)
567 
568 #define RTE_CRYPTODEV_FF_NON_BYTE_ALIGNED_DATA (1ULL << 23)
569 
570 #define RTE_CRYPTODEV_FF_SYM_RAW_DP (1ULL << 24)
571 
572 #define RTE_CRYPTODEV_FF_CIPHER_MULTIPLE_DATA_UNITS (1ULL << 25)
573 
574 #define RTE_CRYPTODEV_FF_CIPHER_WRAPPED_KEY (1ULL << 26)
575 
576 #define RTE_CRYPTODEV_FF_SECURITY_INNER_CSUM (1ULL << 27)
577 
578 #define RTE_CRYPTODEV_FF_SECURITY_RX_INJECT (1ULL << 28)
579 
589 const char *
590 rte_cryptodev_get_feature_name(uint64_t flag);
591 
593 /* Structure rte_cryptodev_info 8< */
595  const char *driver_name;
596  uint8_t driver_id;
597  struct rte_device *device;
599  uint64_t feature_flags;
614  struct {
615  unsigned max_nb_sessions;
620  } sym;
621 };
622 /* >8 End of structure rte_cryptodev_info. */
623 
624 #define RTE_CRYPTODEV_DETACHED (0)
625 #define RTE_CRYPTODEV_ATTACHED (1)
626 
632 };
633 
634 /* Crypto queue pair priority levels */
635 #define RTE_CRYPTODEV_QP_PRIORITY_HIGHEST 0
636 
639 #define RTE_CRYPTODEV_QP_PRIORITY_NORMAL 128
640 
643 #define RTE_CRYPTODEV_QP_PRIORITY_LOWEST 255
644 
649 /* Structure rte_cryptodev_qp_conf 8<*/
651  uint32_t nb_descriptors;
654  uint8_t priority;
662 };
663 /* >8 End of structure rte_cryptodev_qp_conf. */
664 
686 typedef uint16_t (*rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id,
687  struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param);
688 
698 typedef void (*rte_cryptodev_cb_fn)(uint8_t dev_id,
699  enum rte_cryptodev_event_type event, void *cb_arg);
700 
701 
704  uint64_t enqueued_count;
706  uint64_t dequeued_count;
713 };
714 
715 #define RTE_CRYPTODEV_NAME_MAX_LEN (64)
716 
727 int
728 rte_cryptodev_get_dev_id(const char *name);
729 
740 const char *
741 rte_cryptodev_name_get(uint8_t dev_id);
742 
750 uint8_t
751 rte_cryptodev_count(void);
752 
761 uint8_t
762 rte_cryptodev_device_count_by_driver(uint8_t driver_id);
763 
775 uint8_t
776 rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices,
777  uint8_t nb_devices);
778 /*
779  * Return the NUMA socket to which a device is connected
780  *
781  * @param dev_id
782  * The identifier of the device
783  * @return
784  * The NUMA socket id to which the device is connected or
785  * a default of zero if the socket could not be determined.
786  * -1 if returned is the dev_id value is out of range.
787  */
788 int
789 rte_cryptodev_socket_id(uint8_t dev_id);
790 
792 /* Structure rte_cryptodev_config 8< */
794  int socket_id;
795  uint16_t nb_queue_pairs;
797  uint64_t ff_disable;
804 };
805 /* >8 End of structure rte_cryptodev_config. */
806 
821 int
822 rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config);
823 
839 int
840 rte_cryptodev_start(uint8_t dev_id);
841 
848 void
849 rte_cryptodev_stop(uint8_t dev_id);
850 
860 int
861 rte_cryptodev_close(uint8_t dev_id);
862 
884 int
885 rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id,
886  const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
887 
912 __rte_experimental
913 int
914 rte_cryptodev_queue_pair_reset(uint8_t dev_id, uint16_t queue_pair_id,
915  const struct rte_cryptodev_qp_conf *qp_conf, int socket_id);
916 
930 int
931 rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id);
932 
940 uint16_t
941 rte_cryptodev_queue_pair_count(uint8_t dev_id);
942 
943 
955 int
956 rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats);
957 
963 void
964 rte_cryptodev_stats_reset(uint8_t dev_id);
965 
979 void
980 rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info);
981 
982 
996 int
997 rte_cryptodev_callback_register(uint8_t dev_id,
998  enum rte_cryptodev_event_type event,
999  rte_cryptodev_cb_fn cb_fn, void *cb_arg);
1000 
1014 int
1015 rte_cryptodev_callback_unregister(uint8_t dev_id,
1016  enum rte_cryptodev_event_type event,
1017  rte_cryptodev_cb_fn cb_fn, void *cb_arg);
1018 
1034 __rte_experimental
1035 int
1036 rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id);
1037 
1038 struct rte_cryptodev_callback;
1039 
1041 RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback);
1042 
1048  RTE_ATOMIC(struct rte_cryptodev_cb *) next;
1052  void *arg;
1054 };
1055 
1060 struct rte_cryptodev_cb_rcu {
1061  RTE_ATOMIC(struct rte_cryptodev_cb *) next;
1063  struct rte_rcu_qsbr *qsbr;
1065 };
1066 
1076 void *
1077 rte_cryptodev_get_sec_ctx(uint8_t dev_id);
1078 
1108 struct rte_mempool *
1109 rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts,
1110  uint32_t elt_size, uint32_t cache_size, uint16_t priv_size,
1111  int socket_id);
1112 
1113 
1134 struct rte_mempool *
1135 rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts,
1136  uint32_t cache_size, uint16_t user_data_size, int socket_id);
1137 
1154 void *
1155 rte_cryptodev_sym_session_create(uint8_t dev_id,
1156  struct rte_crypto_sym_xform *xforms,
1157  struct rte_mempool *mp);
1175 int
1176 rte_cryptodev_asym_session_create(uint8_t dev_id,
1177  struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp,
1178  void **session);
1179 
1192 int
1193 rte_cryptodev_sym_session_free(uint8_t dev_id,
1194  void *sess);
1195 
1207 int
1208 rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess);
1209 
1216 unsigned int
1218 
1230 unsigned int
1232 
1243 unsigned int
1245 
1254 unsigned int
1255 rte_cryptodev_is_valid_dev(uint8_t dev_id);
1256 
1265 int rte_cryptodev_driver_id_get(const char *name);
1266 
1275 const char *rte_cryptodev_driver_name_get(uint8_t driver_id);
1276 
1289 int
1291  void *data,
1292  uint16_t size);
1293 
1294 #define CRYPTO_SESS_OPAQUE_DATA_OFF 0
1295 
1298 static inline uint64_t
1300 {
1301  return *((uint64_t *)sess + CRYPTO_SESS_OPAQUE_DATA_OFF);
1302 }
1303 
1307 static inline void
1308 rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque)
1309 {
1310  uint64_t *data;
1311  data = (((uint64_t *)sess) + CRYPTO_SESS_OPAQUE_DATA_OFF);
1312  *data = opaque;
1313 }
1314 
1325 void *
1327 
1341 int
1342 rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size);
1343 
1354 void *
1356 
1369 uint32_t
1371  void *sess, union rte_crypto_sym_ofs ofs,
1372  struct rte_crypto_sym_vec *vec);
1373 
1383 int
1384 rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id);
1385 
1401 int
1402 rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess,
1403  enum rte_crypto_op_type op_type,
1404  enum rte_crypto_op_sess_type sess_type,
1405  void *ev_mdata, uint16_t size);
1406 
1411 union rte_cryptodev_session_ctx {void *crypto_sess;
1412  struct rte_crypto_sym_xform *xform;
1413  struct rte_security_session *sec_sess;
1414 };
1415 
1442  void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec,
1443  union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status);
1444 
1467  void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec,
1468  uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs,
1469  struct rte_crypto_va_iova_ptr *iv,
1470  struct rte_crypto_va_iova_ptr *digest,
1471  struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1472  void *user_data);
1473 
1485 typedef int (*cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx,
1486  uint32_t n);
1487 
1497 typedef uint32_t (*rte_cryptodev_raw_get_dequeue_count_t)(void *user_data);
1498 
1507 typedef void (*rte_cryptodev_raw_post_dequeue_t)(void *user_data,
1508  uint32_t index, uint8_t is_op_success);
1509 
1551 typedef uint32_t (*cryptodev_sym_raw_dequeue_burst_t)(void *qp,
1552  uint8_t *drv_ctx,
1553  rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1554  uint32_t max_nb_to_dequeue,
1555  rte_cryptodev_raw_post_dequeue_t post_dequeue,
1556  void **out_user_data, uint8_t is_user_data_array,
1557  uint32_t *n_success, int *dequeue_status);
1558 
1582 typedef void * (*cryptodev_sym_raw_dequeue_t)(
1583  void *qp, uint8_t *drv_ctx, int *dequeue_status,
1584  enum rte_crypto_op_status *op_status);
1585 
1592  void *qp_data;
1593 
1595  cryptodev_sym_raw_enqueue_burst_t enqueue_burst;
1598  cryptodev_sym_raw_dequeue_burst_t dequeue_burst;
1600 
1601  /* Driver specific context data */
1602  uint8_t drv_ctx_data[];
1603 };
1604 
1626 int
1627 rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id,
1628  struct rte_crypto_raw_dp_ctx *ctx,
1629  enum rte_crypto_op_sess_type sess_type,
1630  union rte_cryptodev_session_ctx session_ctx,
1631  uint8_t is_update);
1632 
1657 uint32_t
1659  struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs,
1660  void **user_data, int *enqueue_status);
1661 
1682 __rte_experimental
1683 static __rte_always_inline int
1685  struct rte_crypto_vec *data_vec, uint16_t n_data_vecs,
1686  union rte_crypto_sym_ofs ofs,
1687  struct rte_crypto_va_iova_ptr *iv,
1688  struct rte_crypto_va_iova_ptr *digest,
1689  struct rte_crypto_va_iova_ptr *aad_or_auth_iv,
1690  void *user_data)
1691 {
1692  return ctx->enqueue(ctx->qp_data, ctx->drv_ctx_data, data_vec,
1693  n_data_vecs, ofs, iv, digest, aad_or_auth_iv, user_data);
1694 }
1695 
1706 int
1708  uint32_t n);
1709 
1751 uint32_t
1753  rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count,
1754  uint32_t max_nb_to_dequeue,
1755  rte_cryptodev_raw_post_dequeue_t post_dequeue,
1756  void **out_user_data, uint8_t is_user_data_array,
1757  uint32_t *n_success, int *dequeue_status);
1758 
1782 __rte_experimental
1783 static __rte_always_inline void *
1785  int *dequeue_status, enum rte_crypto_op_status *op_status)
1786 {
1787  return ctx->dequeue(ctx->qp_data, ctx->drv_ctx_data, dequeue_status, op_status);
1788 }
1789 
1799 int
1801  uint32_t n);
1802 
1838 struct rte_cryptodev_cb *
1839 rte_cryptodev_add_enq_callback(uint8_t dev_id,
1840  uint16_t qp_id,
1842  void *cb_arg);
1843 
1865 int rte_cryptodev_remove_enq_callback(uint8_t dev_id,
1866  uint16_t qp_id,
1867  struct rte_cryptodev_cb *cb);
1868 
1903 struct rte_cryptodev_cb *
1904 rte_cryptodev_add_deq_callback(uint8_t dev_id,
1905  uint16_t qp_id,
1907  void *cb_arg);
1908 
1930 int rte_cryptodev_remove_deq_callback(uint8_t dev_id,
1931  uint16_t qp_id,
1932  struct rte_cryptodev_cb *cb);
1933 
1934 #ifdef __cplusplus
1935 }
1936 #endif
1937 
1938 #include "rte_cryptodev_core.h"
1939 
1940 #ifdef __cplusplus
1941 extern "C" {
1942 #endif
1943 
1980 static inline uint16_t
1981 rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id,
1982  struct rte_crypto_op **ops, uint16_t nb_ops)
1983 {
1984  const struct rte_crypto_fp_ops *fp_ops;
1985  void *qp;
1986 
1987  rte_cryptodev_trace_dequeue_burst(dev_id, qp_id, (void **)ops, nb_ops);
1988 
1989  fp_ops = &rte_crypto_fp_ops[dev_id];
1990  qp = fp_ops->qp.data[qp_id];
1991 
1992  nb_ops = fp_ops->dequeue_burst(qp, ops, nb_ops);
1993 
1994 #ifdef RTE_CRYPTO_CALLBACKS
1995  if (unlikely(fp_ops->qp.deq_cb[qp_id].next != NULL)) {
1996  struct rte_cryptodev_cb_rcu *list;
1997  struct rte_cryptodev_cb *cb;
1998 
1999  /* rte_memory_order_release memory order was used when the
2000  * call back was inserted into the list.
2001  * Since there is a clear dependency between loading
2002  * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
2003  * not required.
2004  */
2005  list = &fp_ops->qp.deq_cb[qp_id];
2006  rte_rcu_qsbr_thread_online(list->qsbr, 0);
2007  cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
2008 
2009  while (cb != NULL) {
2010  nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
2011  cb->arg);
2012  cb = cb->next;
2013  };
2014 
2015  rte_rcu_qsbr_thread_offline(list->qsbr, 0);
2016  }
2017 #endif
2018  return nb_ops;
2019 }
2020 
2052 static inline uint16_t
2053 rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id,
2054  struct rte_crypto_op **ops, uint16_t nb_ops)
2055 {
2056  const struct rte_crypto_fp_ops *fp_ops;
2057  void *qp;
2058 
2059  fp_ops = &rte_crypto_fp_ops[dev_id];
2060  qp = fp_ops->qp.data[qp_id];
2061 #ifdef RTE_CRYPTO_CALLBACKS
2062  if (unlikely(fp_ops->qp.enq_cb[qp_id].next != NULL)) {
2063  struct rte_cryptodev_cb_rcu *list;
2064  struct rte_cryptodev_cb *cb;
2065 
2066  /* rte_memory_order_release memory order was used when the
2067  * call back was inserted into the list.
2068  * Since there is a clear dependency between loading
2069  * cb and cb->fn/cb->next, rte_memory_order_acquire memory order is
2070  * not required.
2071  */
2072  list = &fp_ops->qp.enq_cb[qp_id];
2073  rte_rcu_qsbr_thread_online(list->qsbr, 0);
2074  cb = rte_atomic_load_explicit(&list->next, rte_memory_order_relaxed);
2075 
2076  while (cb != NULL) {
2077  nb_ops = cb->fn(dev_id, qp_id, ops, nb_ops,
2078  cb->arg);
2079  cb = cb->next;
2080  };
2081 
2082  rte_rcu_qsbr_thread_offline(list->qsbr, 0);
2083  }
2084 #endif
2085 
2086  rte_cryptodev_trace_enqueue_burst(dev_id, qp_id, (void **)ops, nb_ops);
2087  return fp_ops->enqueue_burst(qp, ops, nb_ops);
2088 }
2089 
2114 __rte_experimental
2115 static inline int
2116 rte_cryptodev_qp_depth_used(uint8_t dev_id, uint16_t qp_id)
2117 {
2118  const struct rte_crypto_fp_ops *fp_ops;
2119  void *qp;
2120  int rc;
2121 
2122  fp_ops = &rte_crypto_fp_ops[dev_id];
2123  qp = fp_ops->qp.data[qp_id];
2124 
2125  if (fp_ops->qp_depth_used == NULL) {
2126  rc = -ENOTSUP;
2127  goto out;
2128  }
2129 
2130  rc = fp_ops->qp_depth_used(qp);
2131 out:
2132  rte_cryptodev_trace_qp_depth_used(dev_id, qp_id);
2133  return rc;
2134 }
2135 
2136 #ifdef __cplusplus
2137 }
2138 #endif
2139 
2140 #endif /* _RTE_CRYPTODEV_H_ */
int rte_cryptodev_get_cipher_algo_enum(enum rte_crypto_cipher_algorithm *algo_enum, const char *algo_string)
int rte_cryptodev_get_auth_algo_enum(enum rte_crypto_auth_algorithm *algo_enum, const char *algo_string)
struct rte_cryptodev_cb * rte_cryptodev_add_enq_callback(uint8_t dev_id, uint16_t qp_id, rte_cryptodev_callback_fn cb_fn, void *cb_arg)
#define __rte_always_inline
Definition: rte_common.h:456
struct rte_cryptodev_cb * next
struct rte_mempool * mp_session
unsigned int rte_cryptodev_asym_get_header_session_size(void)
void rte_cryptodev_info_get(uint8_t dev_id, struct rte_cryptodev_info *dev_info)
struct rte_cryptodev_symmetric_capability sym
static __rte_experimental __rte_always_inline int rte_cryptodev_raw_enqueue(struct rte_crypto_raw_dp_ctx *ctx, struct rte_crypto_vec *data_vec, uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs, struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest, struct rte_crypto_va_iova_ptr *aad_or_auth_iv, void *user_data)
struct rte_mempool * rte_cryptodev_sym_session_pool_create(const char *name, uint32_t nb_elts, uint32_t elt_size, uint32_t cache_size, uint16_t priv_size, int socket_id)
const char * rte_cryptodev_get_feature_name(uint64_t flag)
int rte_cryptodev_remove_enq_callback(uint8_t dev_id, uint16_t qp_id, struct rte_cryptodev_cb *cb)
enum rte_crypto_auth_algorithm algo
int rte_cryptodev_configure_raw_dp_ctx(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_raw_dp_ctx *ctx, enum rte_crypto_op_sess_type sess_type, union rte_cryptodev_session_ctx session_ctx, uint8_t is_update)
int rte_cryptodev_queue_pair_setup(uint8_t dev_id, uint16_t queue_pair_id, const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
bool rte_cryptodev_asym_xform_capability_check_hash(const struct rte_cryptodev_asymmetric_xform_capability *capability, enum rte_crypto_auth_algorithm hash)
const struct rte_cryptodev_asymmetric_xform_capability * rte_cryptodev_asym_capability_get(uint8_t dev_id, const struct rte_cryptodev_asym_capability_idx *idx)
int rte_cryptodev_get_raw_dp_ctx_size(uint8_t dev_id)
uint8_t rte_cryptodev_devices_get(const char *driver_name, uint8_t *devices, uint8_t nb_devices)
rte_crypto_asym_xform_type
enum rte_crypto_asym_xform_type xform_type
static uint16_t rte_cryptodev_dequeue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, uint16_t nb_ops)
struct rte_crypto_param_range digest_size
int rte_cryptodev_raw_enqueue_done(struct rte_crypto_raw_dp_ctx *ctx, uint32_t n)
enum rte_crypto_op_type op
int rte_cryptodev_driver_id_get(const char *name)
__rte_experimental const char * rte_cryptodev_get_aead_algo_string(enum rte_crypto_aead_algorithm algo_enum)
uint32_t rte_cryptodev_sym_cpu_crypto_process(uint8_t dev_id, void *sess, union rte_crypto_sym_ofs ofs, struct rte_crypto_sym_vec *vec)
int rte_cryptodev_asym_session_free(uint8_t dev_id, void *sess)
int rte_cryptodev_asym_xform_capability_check_optype(const struct rte_cryptodev_asymmetric_xform_capability *capability, enum rte_crypto_asym_op_type op_type)
int rte_cryptodev_session_event_mdata_set(uint8_t dev_id, void *sess, enum rte_crypto_op_type op_type, enum rte_crypto_op_sess_type sess_type, void *ev_mdata, uint16_t size)
unsigned int rte_cryptodev_asym_get_private_session_size(uint8_t dev_id)
uint64_t dequeue_err_count
int rte_cryptodev_get_aead_algo_enum(enum rte_crypto_aead_algorithm *algo_enum, const char *algo_string)
uint32_t cache_size
Definition: rte_mempool.h:241
static __rte_always_inline void rte_rcu_qsbr_thread_offline(struct rte_rcu_qsbr *v, unsigned int thread_id)
Definition: rte_rcu_qsbr.h:349
char name[RTE_MEMPOOL_NAMESIZE]
Definition: rte_mempool.h:231
const struct rte_cryptodev_symmetric_capability * rte_cryptodev_sym_capability_get(uint8_t dev_id, const struct rte_cryptodev_sym_capability_idx *idx)
struct rte_cryptodev_symmetric_capability::@124::@126 auth
int rte_cryptodev_configure(uint8_t dev_id, struct rte_cryptodev_config *config)
int rte_cryptodev_callback_unregister(uint8_t dev_id, enum rte_cryptodev_event_type event, rte_cryptodev_cb_fn cb_fn, void *cb_arg)
const struct rte_cryptodev_capabilities * capabilities
int rte_cryptodev_sym_session_free(uint8_t dev_id, void *sess)
uint32_t(* rte_cryptodev_raw_get_dequeue_count_t)(void *user_data)
rte_crypto_asym_op_type
uint32_t size
Definition: rte_mempool.h:240
void rte_cryptodev_stop(uint8_t dev_id)
const char * driver_name
int rte_cryptodev_close(uint8_t dev_id)
void * rte_cryptodev_asym_session_get_user_data(void *sess)
enum rte_crypto_asym_xform_type type
int(* cryptodev_sym_raw_operation_done_t)(void *qp, uint8_t *drv_ctx, uint32_t n)
int rte_cryptodev_asym_session_create(uint8_t dev_id, struct rte_crypto_asym_xform *xforms, struct rte_mempool *mp, void **session)
void(* rte_cryptodev_cb_fn)(uint8_t dev_id, enum rte_cryptodev_event_type event, void *cb_arg)
int rte_cryptodev_sym_capability_check_aead(const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t digest_size, uint16_t aad_size, uint16_t iv_size)
#define unlikely(x)
int rte_cryptodev_asym_xform_capability_check_modlen(const struct rte_cryptodev_asymmetric_xform_capability *capability, uint16_t modlen)
const char * rte_cryptodev_driver_name_get(uint8_t driver_id)
int rte_cryptodev_callback_register(uint8_t dev_id, enum rte_cryptodev_event_type event, rte_cryptodev_cb_fn cb_fn, void *cb_arg)
uint16_t min_mbuf_tailroom_req
const char * rte_cryptodev_name_get(uint8_t dev_id)
__rte_experimental int rte_cryptodev_queue_pair_reset(uint8_t dev_id, uint16_t queue_pair_id, const struct rte_cryptodev_qp_conf *qp_conf, int socket_id)
rte_crypto_op_type
Definition: rte_crypto.h:28
__rte_experimental const char * rte_cryptodev_asym_get_xform_string(enum rte_crypto_asym_xform_type xform_enum)
rte_cryptodev_callback_fn fn
static uint16_t rte_cryptodev_enqueue_burst(uint8_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, uint16_t nb_ops)
int rte_cryptodev_sym_capability_check_cipher(const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t iv_size)
uint16_t(* rte_cryptodev_callback_fn)(uint16_t dev_id, uint16_t qp_id, struct rte_crypto_op **ops, uint16_t nb_ops, void *user_param)
int rte_cryptodev_stats_get(uint8_t dev_id, struct rte_cryptodev_stats *stats)
static __rte_experimental int rte_cryptodev_qp_depth_used(uint8_t dev_id, uint16_t qp_id)
int rte_cryptodev_get_qp_status(uint8_t dev_id, uint16_t queue_pair_id)
__rte_experimental int rte_cryptodev_asym_xform_capability_check_opcap(const struct rte_cryptodev_asymmetric_xform_capability *capability, enum rte_crypto_asym_op_type op_type, uint8_t cap)
struct rte_device * device
int rte_cryptodev_asym_get_xform_enum(enum rte_crypto_asym_xform_type *xform_enum, const char *xform_string)
uint32_t elt_size
Definition: rte_mempool.h:244
int rte_cryptodev_remove_deq_callback(uint8_t dev_id, uint16_t qp_id, struct rte_cryptodev_cb *cb)
struct rte_cryptodev_symmetric_capability::@124::@127 cipher
uint8_t rte_cryptodev_count(void)
void *(* cryptodev_sym_raw_dequeue_t)(void *qp, uint8_t *drv_ctx, int *dequeue_status, enum rte_crypto_op_status *op_status)
int rte_cryptodev_sym_capability_check_auth(const struct rte_cryptodev_symmetric_capability *capability, uint16_t key_size, uint16_t digest_size, uint16_t iv_size)
int rte_cryptodev_asym_session_set_user_data(void *sess, void *data, uint16_t size)
uint16_t min_mbuf_headroom_req
uint32_t rte_cryptodev_raw_enqueue_burst(struct rte_crypto_raw_dp_ctx *ctx, struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, void **user_data, int *enqueue_status)
uint32_t rte_cryptodev_raw_dequeue_burst(struct rte_crypto_raw_dp_ctx *ctx, rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, uint32_t max_nb_to_dequeue, rte_cryptodev_raw_post_dequeue_t post_dequeue, void **out_user_data, uint8_t is_user_data_array, uint32_t *n_success, int *dequeue_status)
unsigned int rte_cryptodev_sym_get_private_session_size(uint8_t dev_id)
static __rte_always_inline void rte_rcu_qsbr_thread_online(struct rte_rcu_qsbr *v, unsigned int thread_id)
Definition: rte_rcu_qsbr.h:296
uint32_t(* cryptodev_sym_raw_enqueue_burst_t)(void *qp, uint8_t *drv_ctx, struct rte_crypto_sym_vec *vec, union rte_crypto_sym_ofs ofs, void *user_data[], int *enqueue_status)
rte_crypto_auth_algorithm
__rte_experimental const char * rte_cryptodev_get_cipher_algo_string(enum rte_crypto_cipher_algorithm algo_enum)
rte_crypto_sym_xform_type
int rte_cryptodev_sym_session_set_user_data(void *sess, void *data, uint16_t size)
RTE_TAILQ_HEAD(rte_cryptodev_cb_list, rte_cryptodev_callback)
int rte_cryptodev_start(uint8_t dev_id)
uint64_t enqueue_err_count
uint32_t(* cryptodev_sym_raw_dequeue_burst_t)(void *qp, uint8_t *drv_ctx, rte_cryptodev_raw_get_dequeue_count_t get_dequeue_count, uint32_t max_nb_to_dequeue, rte_cryptodev_raw_post_dequeue_t post_dequeue, void **out_user_data, uint8_t is_user_data_array, uint32_t *n_success, int *dequeue_status)
rte_crypto_op_sess_type
Definition: rte_crypto.h:61
struct rte_crypto_param_range modlen
__rte_experimental int rte_cryptodev_queue_pair_event_error_query(uint8_t dev_id, uint16_t qp_id)
uint16_t rte_cryptodev_queue_pair_count(uint8_t dev_id)
struct rte_mempool * rte_cryptodev_asym_session_pool_create(const char *name, uint32_t nb_elts, uint32_t cache_size, uint16_t user_data_size, int socket_id)
struct rte_crypto_param_range key_size
int(* cryptodev_sym_raw_enqueue_t)(void *qp, uint8_t *drv_ctx, struct rte_crypto_vec *data_vec, uint16_t n_data_vecs, union rte_crypto_sym_ofs ofs, struct rte_crypto_va_iova_ptr *iv, struct rte_crypto_va_iova_ptr *digest, struct rte_crypto_va_iova_ptr *aad_or_auth_iv, void *user_data)
void * rte_cryptodev_sym_session_get_user_data(void *sess)
struct rte_cryptodev_cb * rte_cryptodev_add_deq_callback(uint8_t dev_id, uint16_t qp_id, rte_cryptodev_callback_fn cb_fn, void *cb_arg)
unsigned max_nb_sessions
rte_cryptodev_event_type
__rte_experimental const char * rte_cryptodev_get_auth_algo_string(enum rte_crypto_auth_algorithm algo_enum)
struct rte_crypto_param_range aad_size
uint32_t op_capa[RTE_CRYPTO_ASYM_OP_LIST_END]
static uint64_t rte_cryptodev_sym_session_opaque_data_get(void *sess)
enum rte_crypto_sym_xform_type xform_type
static __rte_experimental __rte_always_inline void * rte_cryptodev_raw_dequeue(struct rte_crypto_raw_dp_ctx *ctx, int *dequeue_status, enum rte_crypto_op_status *op_status)
void rte_cryptodev_stats_reset(uint8_t dev_id)
int rte_cryptodev_get_dev_id(const char *name)
void * rte_cryptodev_get_sec_ctx(uint8_t dev_id)
void * rte_cryptodev_sym_session_create(uint8_t dev_id, struct rte_crypto_sym_xform *xforms, struct rte_mempool *mp)
unsigned max_nb_queue_pairs
uint8_t rte_cryptodev_device_count_by_driver(uint8_t driver_id)
static void rte_cryptodev_sym_session_opaque_data_set(void *sess, uint64_t opaque)
unsigned int rte_cryptodev_is_valid_dev(uint8_t dev_id)
void(* rte_cryptodev_raw_post_dequeue_t)(void *user_data, uint32_t index, uint8_t is_op_success)
struct rte_crypto_param_range iv_size
rte_crypto_op_status
Definition: rte_crypto.h:38
int rte_cryptodev_raw_dequeue_done(struct rte_crypto_raw_dp_ctx *ctx, uint32_t n)
struct rte_cryptodev_asymmetric_capability asym
rte_crypto_aead_algorithm
rte_crypto_cipher_algorithm