DPDK  25.03.0
rte_dmadev.h
Go to the documentation of this file.
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2021 HiSilicon Limited
3  * Copyright(c) 2021 Intel Corporation
4  * Copyright(c) 2021 Marvell International Ltd
5  * Copyright(c) 2021 SmartShare Systems
6  */
7 
8 #ifndef RTE_DMADEV_H
9 #define RTE_DMADEV_H
10 
147 #include <stdint.h>
148 
149 #include <rte_bitops.h>
150 #include <rte_common.h>
151 
152 #ifdef __cplusplus
153 extern "C" {
154 #endif
155 
157 #define RTE_DMADEV_DEFAULT_MAX 64
158 
171 int rte_dma_dev_max(size_t dev_max);
172 
183 int rte_dma_get_dev_id_by_name(const char *name);
184 
194 bool rte_dma_is_valid(int16_t dev_id);
195 
203 uint16_t rte_dma_count_avail(void);
204 
213 int16_t rte_dma_next_dev(int16_t start_dev_id);
214 
216 #define RTE_DMA_FOREACH_DEV(p) \
217  for (p = rte_dma_next_dev(0); \
218  p != -1; \
219  p = rte_dma_next_dev(p + 1))
220 
221 
226 #define RTE_DMA_CAPA_MEM_TO_MEM RTE_BIT64(0)
227 
228 #define RTE_DMA_CAPA_MEM_TO_DEV RTE_BIT64(1)
229 
230 #define RTE_DMA_CAPA_DEV_TO_MEM RTE_BIT64(2)
231 
232 #define RTE_DMA_CAPA_DEV_TO_DEV RTE_BIT64(3)
233 
239 #define RTE_DMA_CAPA_SVA RTE_BIT64(4)
240 
245 #define RTE_DMA_CAPA_SILENT RTE_BIT64(5)
246 
253 #define RTE_DMA_CAPA_HANDLES_ERRORS RTE_BIT64(6)
254 
260 #define RTE_DMA_CAPA_M2D_AUTO_FREE RTE_BIT64(7)
261 
267 #define RTE_DMA_CAPA_PRI_POLICY_SP RTE_BIT64(8)
268 
273 #define RTE_DMA_CAPA_OPS_COPY RTE_BIT64(32)
274 
275 #define RTE_DMA_CAPA_OPS_COPY_SG RTE_BIT64(33)
276 
277 #define RTE_DMA_CAPA_OPS_FILL RTE_BIT64(34)
278 
285 struct rte_dma_info {
286  const char *dev_name;
288  uint64_t dev_capa;
290  uint16_t max_vchans;
292  uint16_t max_desc;
294  uint16_t min_desc;
302  uint16_t max_sges;
304  int16_t numa_node;
306  uint16_t nb_vchans;
310  uint16_t nb_priorities;
311 };
312 
325 int rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info);
326 
332 struct rte_dma_conf {
337  uint16_t nb_vchans;
346  /* The priority of the DMA device.
347  * This value should be lower than the field 'nb_priorities' of struct
348  * rte_dma_info which get from rte_dma_info_get(). If the DMA device
349  * does not support priority scheduling, this value should be zero.
350  *
351  * Lowest value indicates higher priority and vice-versa.
352  */
353  uint16_t priority;
354 };
355 
372 int rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf);
373 
386 int rte_dma_start(int16_t dev_id);
387 
399 int rte_dma_stop(int16_t dev_id);
400 
412 int rte_dma_close(int16_t dev_id);
413 
452 };
453 
460  RTE_DMA_PORT_NONE,
462 };
463 
476  union {
525  __extension__
526  union {
527  struct {
528  uint64_t coreid : 4;
529  uint64_t pfid : 8;
530  uint64_t vfen : 1;
531  uint64_t vfid : 16;
533  uint64_t pasid : 20;
535  uint64_t attr : 3;
537  uint64_t ph : 2;
539  uint64_t st : 16;
540  };
541  uint64_t val;
542  } pcie;
543  };
544  uint64_t reserved[2];
545 };
546 
551  union {
552  struct {
560  struct rte_mempool *pool;
561  } m2d;
562  };
564  uint64_t reserved[2];
565 };
566 
579  uint16_t nb_desc;
604 };
605 
621 int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan,
622  const struct rte_dma_vchan_conf *conf);
623 
631  uint64_t submitted;
635  uint64_t completed;
637  uint64_t errors;
638 };
639 
646 #define RTE_DMA_ALL_VCHAN 0xFFFFu
647 
663 int rte_dma_stats_get(int16_t dev_id, uint16_t vchan,
664  struct rte_dma_stats *stats);
665 
678 int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan);
679 
690 };
691 
707 int
708 rte_dma_vchan_status(int16_t dev_id, uint16_t vchan, enum rte_dma_vchan_status *status);
709 
721 int rte_dma_dump(int16_t dev_id, FILE *f);
722 
785 };
786 
792 struct rte_dma_sge {
794  uint32_t length;
795 };
796 
797 #ifdef __cplusplus
798 }
799 #endif
800 
801 #include "rte_dmadev_core.h"
802 #include "rte_dmadev_trace_fp.h"
803 
804 #ifdef __cplusplus
805 extern "C" {
806 #endif
807 
819 #define RTE_DMA_OP_FLAG_FENCE RTE_BIT64(0)
820 
824 #define RTE_DMA_OP_FLAG_SUBMIT RTE_BIT64(1)
825 
829 #define RTE_DMA_OP_FLAG_LLC RTE_BIT64(2)
830 
836 #define RTE_DMA_OP_FLAG_AUTO_FREE RTE_BIT64(3)
837 
865 static inline int
866 rte_dma_copy(int16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst,
867  uint32_t length, uint64_t flags)
868 {
869  struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
870  int ret;
871 
872 #ifdef RTE_DMADEV_DEBUG
873  if (!rte_dma_is_valid(dev_id) || length == 0)
874  return -EINVAL;
875  if (obj->copy == NULL)
876  return -ENOTSUP;
877 #endif
878 
879  ret = obj->copy(obj->dev_private, vchan, src, dst, length, flags);
880  rte_dma_trace_copy(dev_id, vchan, src, dst, length, flags, ret);
881 
882  return ret;
883 }
884 
915 static inline int
916 rte_dma_copy_sg(int16_t dev_id, uint16_t vchan, struct rte_dma_sge *src,
917  struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst,
918  uint64_t flags)
919 {
920  struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
921  int ret;
922 
923 #ifdef RTE_DMADEV_DEBUG
924  if (!rte_dma_is_valid(dev_id) || src == NULL || dst == NULL ||
925  nb_src == 0 || nb_dst == 0)
926  return -EINVAL;
927  if (obj->copy_sg == NULL)
928  return -ENOTSUP;
929 #endif
930 
931  ret = obj->copy_sg(obj->dev_private, vchan, src, dst, nb_src, nb_dst, flags);
932  rte_dma_trace_copy_sg(dev_id, vchan, src, dst, nb_src, nb_dst, flags,
933  ret);
934 
935  return ret;
936 }
937 
964 static inline int
965 rte_dma_fill(int16_t dev_id, uint16_t vchan, uint64_t pattern,
966  rte_iova_t dst, uint32_t length, uint64_t flags)
967 {
968  struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
969  int ret;
970 
971 #ifdef RTE_DMADEV_DEBUG
972  if (!rte_dma_is_valid(dev_id) || length == 0)
973  return -EINVAL;
974  if (obj->fill == NULL)
975  return -ENOTSUP;
976 #endif
977 
978  ret = obj->fill(obj->dev_private, vchan, pattern, dst, length, flags);
979  rte_dma_trace_fill(dev_id, vchan, pattern, dst, length, flags, ret);
980 
981  return ret;
982 }
983 
998 static inline int
999 rte_dma_submit(int16_t dev_id, uint16_t vchan)
1000 {
1001  struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1002  int ret;
1003 
1004 #ifdef RTE_DMADEV_DEBUG
1005  if (!rte_dma_is_valid(dev_id))
1006  return -EINVAL;
1007  if (obj->submit == NULL)
1008  return -ENOTSUP;
1009 #endif
1010 
1011  ret = obj->submit(obj->dev_private, vchan);
1012  rte_dma_trace_submit(dev_id, vchan, ret);
1013 
1014  return ret;
1015 }
1016 
1039 static inline uint16_t
1040 rte_dma_completed(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls,
1041  uint16_t *last_idx, bool *has_error)
1042 {
1043  struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1044  uint16_t idx, ret;
1045  bool err;
1046 
1047 #ifdef RTE_DMADEV_DEBUG
1048  if (!rte_dma_is_valid(dev_id) || nb_cpls == 0)
1049  return 0;
1050  if (obj->completed == NULL)
1051  return 0;
1052 #endif
1053 
1054  /* Ensure the pointer values are non-null to simplify drivers.
1055  * In most cases these should be compile time evaluated, since this is
1056  * an inline function.
1057  * - If NULL is explicitly passed as parameter, then compiler knows the
1058  * value is NULL
1059  * - If address of local variable is passed as parameter, then compiler
1060  * can know it's non-NULL.
1061  */
1062  if (last_idx == NULL)
1063  last_idx = &idx;
1064  if (has_error == NULL)
1065  has_error = &err;
1066 
1067  *has_error = false;
1068  ret = obj->completed(obj->dev_private, vchan, nb_cpls, last_idx, has_error);
1069  rte_dma_trace_completed(dev_id, vchan, nb_cpls, last_idx, has_error,
1070  ret);
1071 
1072  return ret;
1073 }
1074 
1101 static inline uint16_t
1102 rte_dma_completed_status(int16_t dev_id, uint16_t vchan,
1103  const uint16_t nb_cpls, uint16_t *last_idx,
1104  enum rte_dma_status_code *status)
1105 {
1106  struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1107  uint16_t idx, ret;
1108 
1109 #ifdef RTE_DMADEV_DEBUG
1110  if (!rte_dma_is_valid(dev_id) || nb_cpls == 0 || status == NULL)
1111  return 0;
1112  if (obj->completed_status == NULL)
1113  return 0;
1114 #endif
1115 
1116  if (last_idx == NULL)
1117  last_idx = &idx;
1118 
1119  ret = obj->completed_status(obj->dev_private, vchan, nb_cpls, last_idx, status);
1120  rte_dma_trace_completed_status(dev_id, vchan, nb_cpls, last_idx, status,
1121  ret);
1122 
1123  return ret;
1124 }
1125 
1138 static inline uint16_t
1139 rte_dma_burst_capacity(int16_t dev_id, uint16_t vchan)
1140 {
1141  struct rte_dma_fp_object *obj = &rte_dma_fp_objs[dev_id];
1142  uint16_t ret;
1143 
1144 #ifdef RTE_DMADEV_DEBUG
1145  if (!rte_dma_is_valid(dev_id))
1146  return 0;
1147  if (obj->burst_capacity == NULL)
1148  return 0;
1149 #endif
1150  ret = obj->burst_capacity(obj->dev_private, vchan);
1151  rte_dma_trace_burst_capacity(dev_id, vchan, ret);
1152 
1153  return ret;
1154 }
1155 
1156 #ifdef __cplusplus
1157 }
1158 #endif
1159 
1160 #endif /* RTE_DMADEV_H */
__extension__ union rte_dma_port_param::@135::@137 pcie
uint16_t max_desc
Definition: rte_dmadev.h:292
int rte_dma_configure(int16_t dev_id, const struct rte_dma_conf *dev_conf)
uint16_t nb_vchans
Definition: rte_dmadev.h:337
uint16_t rte_dma_count_avail(void)
int rte_dma_stats_get(int16_t dev_id, uint16_t vchan, struct rte_dma_stats *stats)
int rte_dma_start(int16_t dev_id)
int rte_dma_stop(int16_t dev_id)
static int rte_dma_copy_sg(int16_t dev_id, uint16_t vchan, struct rte_dma_sge *src, struct rte_dma_sge *dst, uint16_t nb_src, uint16_t nb_dst, uint64_t flags)
Definition: rte_dmadev.h:916
uint64_t rte_iova_t
Definition: rte_common.h:736
struct rte_dma_auto_free_param auto_free
Definition: rte_dmadev.h:603
rte_iova_t addr
Definition: rte_dmadev.h:793
struct rte_dma_port_param dst_port
Definition: rte_dmadev.h:595
int rte_dma_stats_reset(int16_t dev_id, uint16_t vchan)
uint64_t dev_capa
Definition: rte_dmadev.h:288
static int rte_dma_copy(int16_t dev_id, uint16_t vchan, rte_iova_t src, rte_iova_t dst, uint32_t length, uint64_t flags)
Definition: rte_dmadev.h:866
bool rte_dma_is_valid(int16_t dev_id)
int rte_dma_get_dev_id_by_name(const char *name)
int rte_dma_close(int16_t dev_id)
int rte_dma_dev_max(size_t dev_max)
int16_t rte_dma_next_dev(int16_t start_dev_id)
rte_dma_direction
Definition: rte_dmadev.h:419
int rte_dma_dump(int16_t dev_id, FILE *f)
bool enable_silent
Definition: rte_dmadev.h:345
rte_dma_port_type
Definition: rte_dmadev.h:459
struct rte_dma_port_param src_port
Definition: rte_dmadev.h:587
uint16_t max_sges
Definition: rte_dmadev.h:302
const char * dev_name
Definition: rte_dmadev.h:286
uint64_t reserved[2]
Definition: rte_dmadev.h:544
static int rte_dma_submit(int16_t dev_id, uint16_t vchan)
Definition: rte_dmadev.h:999
uint64_t errors
Definition: rte_dmadev.h:637
int rte_dma_info_get(int16_t dev_id, struct rte_dma_info *dev_info)
int16_t numa_node
Definition: rte_dmadev.h:304
enum rte_dma_port_type port_type
Definition: rte_dmadev.h:475
rte_dma_status_code
Definition: rte_dmadev.h:728
int rte_dma_vchan_setup(int16_t dev_id, uint16_t vchan, const struct rte_dma_vchan_conf *conf)
uint16_t min_desc
Definition: rte_dmadev.h:294
static uint16_t rte_dma_completed_status(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls, uint16_t *last_idx, enum rte_dma_status_code *status)
Definition: rte_dmadev.h:1102
uint16_t max_vchans
Definition: rte_dmadev.h:290
enum rte_dma_direction direction
Definition: rte_dmadev.h:577
static uint16_t rte_dma_burst_capacity(int16_t dev_id, uint16_t vchan)
Definition: rte_dmadev.h:1139
rte_dma_vchan_status
Definition: rte_dmadev.h:686
uint64_t completed
Definition: rte_dmadev.h:635
uint64_t submitted
Definition: rte_dmadev.h:631
static int rte_dma_fill(int16_t dev_id, uint16_t vchan, uint64_t pattern, rte_iova_t dst, uint32_t length, uint64_t flags)
Definition: rte_dmadev.h:965
static uint16_t rte_dma_completed(int16_t dev_id, uint16_t vchan, const uint16_t nb_cpls, uint16_t *last_idx, bool *has_error)
Definition: rte_dmadev.h:1040
uint16_t nb_vchans
Definition: rte_dmadev.h:306
uint32_t length
Definition: rte_dmadev.h:794
struct rte_mempool * pool
Definition: rte_dmadev.h:560
uint16_t nb_priorities
Definition: rte_dmadev.h:310