diff --git a/doc/trex_appendix_mellanox.asciidoc b/doc/trex_appendix_mellanox.asciidoc index 4749c54ccc..ce0d57dc97 100755 --- a/doc/trex_appendix_mellanox.asciidoc +++ b/doc/trex_appendix_mellanox.asciidoc @@ -269,10 +269,19 @@ TRex v2.32 and lower works with OFED 4.1 and it can't work with OFED 4.2/newer | 2.33-2.56 | *only* 4.3 | CentOS 7.4 | 2.57 and v2.86 | *only* 4.6 | CentOS 7.6 | 2.89 and above | *only* GA 5.3-1 | CentOS 7.9 +| v3.01 and above | *only* 5.7-1 | CentOS 7.9 |================= WARNING: In our case an upgrade from CentOS 7.3 to CentOS 7.4 using `yum update` didn't work and we needed to *reinstall* everything from scratch see link:https://trex-tgn.cisco.com/youtrack/issue/trex-504[trex-504] +[NOTE] +===================================================================== +In v3.01 we changed the default value of the device-argument: max_dump_files_num. + +We use default value of 0 instead of 128. + +If this argument is important to you, you can set it's value using trex config file, by adding the line: + +dpdk_devargs: ['max_dump_files_num=your_num']. +===================================================================== + == TRex specific implementation details TRex uses flow director filter to steer specific packets to specific queues. diff --git a/external_libs/ibverbs/x86_64/include/infiniband/ib_user_ioctl_verbs.h b/external_libs/ibverbs/x86_64/include/infiniband/ib_user_ioctl_verbs.h index cfea82acfe..3072e5d6b6 100644 --- a/external_libs/ibverbs/x86_64/include/infiniband/ib_user_ioctl_verbs.h +++ b/external_libs/ibverbs/x86_64/include/infiniband/ib_user_ioctl_verbs.h @@ -208,6 +208,7 @@ enum ib_uverbs_read_counters_flags { enum ib_uverbs_advise_mr_advice { IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH, IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE, + IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT, }; enum ib_uverbs_advise_mr_flag { @@ -239,6 +240,7 @@ enum rdma_driver_id { RDMA_DRIVER_OCRDMA, RDMA_DRIVER_NES, RDMA_DRIVER_I40IW, + RDMA_DRIVER_IRDMA = RDMA_DRIVER_I40IW, RDMA_DRIVER_VMW_PVRDMA, RDMA_DRIVER_QEDR, RDMA_DRIVER_HNS, diff --git a/external_libs/ibverbs/x86_64/include/infiniband/mad.h b/external_libs/ibverbs/x86_64/include/infiniband/mad.h index 0945c03364..7f271d339f 100644 --- a/external_libs/ibverbs/x86_64/include/infiniband/mad.h +++ b/external_libs/ibverbs/x86_64/include/infiniband/mad.h @@ -76,7 +76,7 @@ extern "C" { #define IB_BM_BKEY_AND_DATA_SZ (IB_MAD_SIZE - IB_BM_BKEY_OFFS) #define IB_CC_DATA_OFFS 64 #define IB_CC_DATA_SZ (IB_MAD_SIZE - IB_CC_DATA_OFFS) -#define IB_CC_LOG_DATA_OFFS 32 +#define IB_CC_LOG_DATA_OFFS 32 #define IB_CC_LOG_DATA_SZ (IB_MAD_SIZE - IB_CC_LOG_DATA_OFFS) enum MAD_CLASSES { @@ -1319,12 +1319,19 @@ enum MAD_FIELDS { IB_PC_QP1_DROP_F, /* - * More PortInfoExtended fields + * More PortInfoExtended fields (HDR) */ IB_PORT_EXT_HDR_FEC_MODE_SUPPORTED_F, IB_PORT_EXT_HDR_FEC_MODE_ENABLED_F, IB_PORT_EXT_HDR_FEC_MODE_LAST_F, + /* + * More PortInfoExtended fields (NDR) + */ + IB_PORT_EXT_NDR_FEC_MODE_SUPPORTED_F, + IB_PORT_EXT_NDR_FEC_MODE_ENABLED_F, + IB_PORT_EXT_NDR_FEC_MODE_LAST_F, + IB_FIELD_LAST_ /* must be last */ }; diff --git a/external_libs/ibverbs/x86_64/include/infiniband/mlx5_api.h b/external_libs/ibverbs/x86_64/include/infiniband/mlx5_api.h index 7895591a7a..7fc606949e 100644 --- a/external_libs/ibverbs/x86_64/include/infiniband/mlx5_api.h +++ b/external_libs/ibverbs/x86_64/include/infiniband/mlx5_api.h @@ -55,17 +55,26 @@ #define MLX5DV_DM_TYPE_STEERING_SW_ICM MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM #define MLX5DV_DM_TYPE_HEADER_MODIFY_SW_ICM MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM #define MLX5DV_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM +#define MLX5DV_DM_TYPE_ENCAP_SW_ICM MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM #define mlx5dv_devx_create_event_channel_flags mlx5_ib_uapi_devx_create_event_channel_flags #define MLX5DV_DEVX_CREATE_EVENT_CHANNEL_FLAGS_OMIT_EV_DATA MLX5_IB_UAPI_DEVX_CR_EV_CH_FLAGS_OMIT_DATA -#define MLX5DV_DEVX_PORT_VPORT MLX5_IB_UAPI_QUERY_PORT_VPORT -#define MLX5DV_DEVX_PORT_VPORT_VHCA_ID MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID -#define MLX5DV_DEVX_PORT_ESW_OWNER_VHCA_ID MLX5_IB_UAPI_QUERY_PORT_ESW_OWNER_VHCA_ID -#define MLX5DV_DEVX_PORT_VPORT_ICM_RX MLX5_IB_UAPI_QUERY_PORT_VPORT_ICM_RX -#define MLX5DV_DEVX_PORT_VPORT_ICM_TX MLX5_IB_UAPI_QUERY_PORT_VPORT_ICM_TX -#define MLX5DV_DEVX_PORT_MATCH_REG_C_0 MLX5_IB_UAPI_QUERY_PORT_MATCH_REG_C_0 +#define MLX5DV_DEVX_PORT_VPORT MLX5_IB_UAPI_QUERY_PORT_VPORT_OLD +#define MLX5DV_DEVX_PORT_VPORT_VHCA_ID MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID_OLD +#define MLX5DV_DEVX_PORT_ESW_OWNER_VHCA_ID MLX5_IB_UAPI_QUERY_PORT_ESW_OWNER_VHCA_ID_OLD +#define MLX5DV_DEVX_PORT_VPORT_ICM_RX MLX5_IB_UAPI_QUERY_PORT_VPORT_ICM_RX_OLD +#define MLX5DV_DEVX_PORT_VPORT_ICM_TX MLX5_IB_UAPI_QUERY_PORT_VPORT_ICM_TX_OLD +#define MLX5DV_DEVX_PORT_MATCH_REG_C_0 MLX5_IB_UAPI_QUERY_PORT_MATCH_REG_C_0_OLD #define mlx5dv_devx_reg_32 mlx5_ib_uapi_devx_reg_32 #define MLX5DV_PP_ALLOC_FLAGS_DEDICATED_INDEX MLX5_IB_UAPI_PP_ALLOC_FLAGS_DEDICATED_INDEX #define MLX5DV_UAR_ALLOC_TYPE_BF MLX5_IB_UAPI_UAR_ALLOC_TYPE_BF #define MLX5DV_UAR_ALLOC_TYPE_NC MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC +#define MLX5DV_QUERY_PORT_VPORT MLX5_IB_UAPI_QUERY_PORT_VPORT +#define MLX5DV_QUERY_PORT_VPORT_VHCA_ID MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID +#define MLX5DV_QUERY_PORT_VPORT_STEERING_ICM_RX MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_RX +#define MLX5DV_QUERY_PORT_VPORT_STEERING_ICM_TX MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_TX +#define MLX5DV_QUERY_PORT_VPORT_REG_C0 MLX5_IB_UAPI_QUERY_PORT_VPORT_REG_C0 +#define MLX5DV_QUERY_PORT_ESW_OWNER_VHCA_ID MLX5_IB_UAPI_QUERY_PORT_ESW_OWNER_VHCA_ID +#define mlx5dv_port mlx5_ib_uapi_query_port +#define mlx5dv_reg mlx5_ib_uapi_reg #endif diff --git a/external_libs/ibverbs/x86_64/include/infiniband/mlx5_user_ioctl_verbs.h b/external_libs/ibverbs/x86_64/include/infiniband/mlx5_user_ioctl_verbs.h index a1aa36abad..2651fccec7 100644 --- a/external_libs/ibverbs/x86_64/include/infiniband/mlx5_user_ioctl_verbs.h +++ b/external_libs/ibverbs/x86_64/include/infiniband/mlx5_user_ioctl_verbs.h @@ -60,12 +60,12 @@ struct mlx5_ib_uapi_devx_async_cmd_hdr { }; enum mlx5_ib_uapi_devx_query_port_comp_mask { - MLX5_IB_UAPI_QUERY_PORT_VPORT = 1 << 0, - MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID = 1 << 1, - MLX5_IB_UAPI_QUERY_PORT_ESW_OWNER_VHCA_ID = 1 << 2, - MLX5_IB_UAPI_QUERY_PORT_VPORT_ICM_RX = 1 << 3, - MLX5_IB_UAPI_QUERY_PORT_VPORT_ICM_TX = 1 << 4, - MLX5_IB_UAPI_QUERY_PORT_MATCH_REG_C_0 = 1 << 5, + MLX5_IB_UAPI_QUERY_PORT_VPORT_OLD = 1 << 0, + MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID_OLD = 1 << 1, + MLX5_IB_UAPI_QUERY_PORT_ESW_OWNER_VHCA_ID_OLD = 1 << 2, + MLX5_IB_UAPI_QUERY_PORT_VPORT_ICM_RX_OLD = 1 << 3, + MLX5_IB_UAPI_QUERY_PORT_VPORT_ICM_TX_OLD = 1 << 4, + MLX5_IB_UAPI_QUERY_PORT_MATCH_REG_C_0_OLD = 1 << 5, }; struct mlx5_ib_uapi_devx_reg_32 { @@ -78,6 +78,7 @@ enum mlx5_ib_uapi_dm_type { MLX5_IB_UAPI_DM_TYPE_STEERING_SW_ICM, MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_SW_ICM, MLX5_IB_UAPI_DM_TYPE_HEADER_MODIFY_PATTERN_SW_ICM, + MLX5_IB_UAPI_DM_TYPE_ENCAP_SW_ICM, }; enum mlx5_ib_uapi_devx_create_event_channel_flags { @@ -98,5 +99,30 @@ enum mlx5_ib_uapi_uar_alloc_type { MLX5_IB_UAPI_UAR_ALLOC_TYPE_NC = 0x1, }; +enum mlx5_ib_uapi_query_port_flags { + MLX5_IB_UAPI_QUERY_PORT_VPORT = 1 << 0, + MLX5_IB_UAPI_QUERY_PORT_VPORT_VHCA_ID = 1 << 1, + MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_RX = 1 << 2, + MLX5_IB_UAPI_QUERY_PORT_VPORT_STEERING_ICM_TX = 1 << 3, + MLX5_IB_UAPI_QUERY_PORT_VPORT_REG_C0 = 1 << 4, + MLX5_IB_UAPI_QUERY_PORT_ESW_OWNER_VHCA_ID = 1 << 5, +}; + +struct mlx5_ib_uapi_reg { + __u32 value; + __u32 mask; +}; + +struct mlx5_ib_uapi_query_port { + __aligned_u64 flags; + __u16 vport; + __u16 vport_vhca_id; + __u16 esw_owner_vhca_id; + __u16 rsvd0; + __aligned_u64 vport_steering_icm_rx; + __aligned_u64 vport_steering_icm_tx; + struct mlx5_ib_uapi_reg reg_c0; +}; + #endif diff --git a/external_libs/ibverbs/x86_64/include/infiniband/mlx5dv.h b/external_libs/ibverbs/x86_64/include/infiniband/mlx5dv.h index 9f2d2f94cb..e669ac5bbd 100644 --- a/external_libs/ibverbs/x86_64/include/infiniband/mlx5dv.h +++ b/external_libs/ibverbs/x86_64/include/infiniband/mlx5dv.h @@ -82,6 +82,10 @@ enum mlx5dv_context_comp_mask { MLX5DV_CONTEXT_MASK_DC_ODP_CAPS = 1 << 7, MLX5DV_CONTEXT_MASK_HCA_CORE_CLOCK = 1 << 8, MLX5DV_CONTEXT_MASK_NUM_LAG_PORTS = 1 << 9, + MLX5DV_CONTEXT_MASK_SIGNATURE_OFFLOAD = 1 << 10, + MLX5DV_CONTEXT_MASK_DCI_STREAMS = 1 << 11, + MLX5DV_CONTEXT_MASK_WR_MEMCPY_LENGTH = 1 << 12, + MLX5DV_CONTEXT_MASK_CRYPTO_OFFLOAD = 1 << 13, }; struct mlx5dv_cqe_comp_caps { @@ -102,6 +106,11 @@ struct mlx5dv_striding_rq_caps { uint32_t supported_qpts; }; +struct mlx5dv_dci_streams_caps { + uint8_t max_log_num_concurent; + uint8_t max_log_num_errored; +}; + enum mlx5dv_tunnel_offloads { MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_VXLAN = 1 << 0, MLX5DV_RAW_PACKET_CAP_TUNNELED_OFFLOAD_GRE = 1 << 1, @@ -118,6 +127,89 @@ enum mlx5dv_flow_action_cap_flags { MLX5DV_FLOW_ACTION_FLAGS_ESP_AES_GCM_TX_IV_IS_ESN = 1 << 4, }; +enum mlx5dv_sig_type { + MLX5DV_SIG_TYPE_T10DIF, + MLX5DV_SIG_TYPE_CRC, +}; + +enum mlx5dv_sig_prot_caps { + MLX5DV_SIG_PROT_CAP_T10DIF = 1 << MLX5DV_SIG_TYPE_T10DIF, + MLX5DV_SIG_PROT_CAP_CRC = 1 << MLX5DV_SIG_TYPE_CRC, +}; + +enum mlx5dv_sig_t10dif_bg_type { + MLX5DV_SIG_T10DIF_CRC, + MLX5DV_SIG_T10DIF_CSUM, +}; + +enum mlx5dv_sig_t10dif_bg_caps { + MLX5DV_SIG_T10DIF_BG_CAP_CRC = 1 << MLX5DV_SIG_T10DIF_CRC, + MLX5DV_SIG_T10DIF_BG_CAP_CSUM = 1 << MLX5DV_SIG_T10DIF_CSUM, +}; + +enum mlx5dv_sig_crc_type { + MLX5DV_SIG_CRC_TYPE_CRC32, + MLX5DV_SIG_CRC_TYPE_CRC32C, + MLX5DV_SIG_CRC_TYPE_CRC64_XP10, +}; + +enum mlx5dv_sig_crc_type_caps { + MLX5DV_SIG_CRC_TYPE_CAP_CRC32 = 1 << MLX5DV_SIG_CRC_TYPE_CRC32, + MLX5DV_SIG_CRC_TYPE_CAP_CRC32C = 1 << MLX5DV_SIG_CRC_TYPE_CRC32C, + MLX5DV_SIG_CRC_TYPE_CAP_CRC64_XP10 = 1 << MLX5DV_SIG_CRC_TYPE_CRC64_XP10, +}; + +enum mlx5dv_block_size { + MLX5DV_BLOCK_SIZE_512, + MLX5DV_BLOCK_SIZE_520, + MLX5DV_BLOCK_SIZE_4048, + MLX5DV_BLOCK_SIZE_4096, + MLX5DV_BLOCK_SIZE_4160, +}; + +enum mlx5dv_block_size_caps { + MLX5DV_BLOCK_SIZE_CAP_512 = 1 << MLX5DV_BLOCK_SIZE_512, + MLX5DV_BLOCK_SIZE_CAP_520 = 1 << MLX5DV_BLOCK_SIZE_520, + MLX5DV_BLOCK_SIZE_CAP_4048 = 1 << MLX5DV_BLOCK_SIZE_4048, + MLX5DV_BLOCK_SIZE_CAP_4096 = 1 << MLX5DV_BLOCK_SIZE_4096, + MLX5DV_BLOCK_SIZE_CAP_4160 = 1 << MLX5DV_BLOCK_SIZE_4160, +}; + +struct mlx5dv_sig_caps { + uint64_t block_size; /* use enum mlx5dv_block_size_caps */ + uint32_t block_prot; /* use enum mlx5dv_sig_prot_caps */ + uint16_t t10dif_bg; /* use enum mlx5dv_sig_t10dif_bg_caps */ + uint16_t crc_type; /* use enum mlx5dv_sig_crc_type_caps */ +}; + +enum mlx5dv_crypto_engines_caps { + MLX5DV_CRYPTO_ENGINES_CAP_AES_XTS = 1 << 0, + MLX5DV_CRYPTO_ENGINES_CAP_AES_XTS_SINGLE_BLOCK = 1 << 1, +}; + +enum mlx5dv_crypto_wrapped_import_method_caps { + MLX5DV_CRYPTO_WRAPPED_IMPORT_METHOD_CAP_AES_XTS = 1 << 0, +}; + +enum mlx5dv_crypto_caps_flags { + MLX5DV_CRYPTO_CAPS_CRYPTO = 1 << 0, + MLX5DV_CRYPTO_CAPS_WRAPPED_CRYPTO_OPERATIONAL = 1 << 1, + MLX5DV_CRYPTO_CAPS_WRAPPED_CRYPTO_GOING_TO_COMMISSIONING = 1 << 2, +}; + +struct mlx5dv_crypto_caps { + /* + * if failed_selftests != 0 it means there are some self tests errors + * that may render specific crypto engines unusable. Exact code meaning + * should be consulted with NVIDIA. + */ + uint16_t failed_selftests; + uint8_t crypto_engines; /* use enum mlx5dv_crypto_engines_caps */ + uint8_t wrapped_import_method; /* use enum mlx5dv_crypto_wrapped_import_method_caps */ + uint8_t log_max_num_deks; + uint32_t flags; /* use enum mlx5dv_crypto_caps_flags */ +}; + struct mlx5dv_devx_port { uint64_t comp_mask; uint16_t vport_num; @@ -125,7 +217,7 @@ struct mlx5dv_devx_port { uint16_t esw_owner_vhca_id; uint64_t icm_addr_rx; uint64_t icm_addr_tx; - struct mlx5dv_devx_reg_32 reg_c_0; + struct mlx5dv_reg reg_c_0; }; /* @@ -145,6 +237,10 @@ struct mlx5dv_context { uint32_t dc_odp_caps; /* use enum ibv_odp_transport_cap_bits */ void *hca_core_clock; uint8_t num_lag_ports; + struct mlx5dv_sig_caps sig_caps; + struct mlx5dv_dci_streams_caps dci_streams_caps; + size_t max_wr_memcpy_length; + struct mlx5dv_crypto_caps crypto_caps; }; enum mlx5dv_context_flags { @@ -158,6 +254,7 @@ enum mlx5dv_context_flags { MLX5DV_CONTEXT_FLAGS_CQE_128B_COMP = (1 << 4), /* Support CQE 128B compression */ MLX5DV_CONTEXT_FLAGS_CQE_128B_PAD = (1 << 5), /* Support CQE 128B padding */ MLX5DV_CONTEXT_FLAGS_PACKET_BASED_CREDIT_MODE = (1 << 6), + MLX5DV_CONTEXT_FLAGS_REAL_TIME_TS = (1 << 7), }; enum mlx5dv_cq_init_attr_mask { @@ -189,10 +286,13 @@ enum mlx5dv_qp_create_flags { MLX5DV_QP_CREATE_DISABLE_SCATTER_TO_CQE = 1 << 3, MLX5DV_QP_CREATE_ALLOW_SCATTER_TO_CQE = 1 << 4, MLX5DV_QP_CREATE_PACKET_BASED_CREDIT_MODE = 1 << 5, + MLX5DV_QP_CREATE_SIG_PIPELINING = 1 << 6, }; enum mlx5dv_mkey_init_attr_flags { MLX5DV_MKEY_INIT_ATTR_FLAGS_INDIRECT = 1 << 0, + MLX5DV_MKEY_INIT_ATTR_FLAGS_BLOCK_SIGNATURE = 1 << 1, + MLX5DV_MKEY_INIT_ATTR_FLAGS_CRYPTO = 1 << 2, }; struct mlx5dv_mkey_init_attr { @@ -213,6 +313,7 @@ enum mlx5dv_qp_init_attr_mask { MLX5DV_QP_INIT_ATTR_MASK_QP_CREATE_FLAGS = 1 << 0, MLX5DV_QP_INIT_ATTR_MASK_DC = 1 << 1, MLX5DV_QP_INIT_ATTR_MASK_SEND_OPS_FLAGS = 1 << 2, + MLX5DV_QP_INIT_ATTR_MASK_DCI_STREAMS = 1 << 3, }; enum mlx5dv_dc_type { @@ -220,14 +321,25 @@ enum mlx5dv_dc_type { MLX5DV_DCTYPE_DCI, }; +struct mlx5dv_dci_streams { + uint8_t log_num_concurent; + uint8_t log_num_errored; +}; + struct mlx5dv_dc_init_attr { enum mlx5dv_dc_type dc_type; - uint64_t dct_access_key; + union { + uint64_t dct_access_key; + struct mlx5dv_dci_streams dci_streams; + }; }; enum mlx5dv_qp_create_send_ops_flags { MLX5DV_QP_EX_WITH_MR_INTERLEAVED = 1 << 0, MLX5DV_QP_EX_WITH_MR_LIST = 1 << 1, + MLX5DV_QP_EX_WITH_MKEY_CONFIGURE = 1 << 2, + MLX5DV_QP_EX_WITH_RAW_WQE = 1 << 3, + MLX5DV_QP_EX_WITH_MEMCPY = 1 << 4, }; struct mlx5dv_qp_init_attr { @@ -248,8 +360,90 @@ struct mlx5dv_mr_interleaved { uint32_t lkey; }; +enum mlx5dv_sig_t10dif_flags { + MLX5DV_SIG_T10DIF_FLAG_REF_REMAP = 1 << 0, + MLX5DV_SIG_T10DIF_FLAG_APP_ESCAPE = 1 << 1, + MLX5DV_SIG_T10DIF_FLAG_APP_REF_ESCAPE = 1 << 2, +}; + +struct mlx5dv_sig_t10dif { + enum mlx5dv_sig_t10dif_bg_type bg_type; + uint16_t bg; + uint16_t app_tag; + uint32_t ref_tag; + uint16_t flags; /* Use enum mlx5dv_sig_t10dif_flags */ +}; + +struct mlx5dv_sig_crc { + enum mlx5dv_sig_crc_type type; + uint64_t seed; +}; + +struct mlx5dv_sig_block_domain { + enum mlx5dv_sig_type sig_type; + union { + const struct mlx5dv_sig_t10dif *dif; + const struct mlx5dv_sig_crc *crc; + } sig; + enum mlx5dv_block_size block_size; + uint64_t comp_mask; +}; + +enum mlx5dv_sig_mask { + MLX5DV_SIG_MASK_T10DIF_GUARD = 0xc0, + MLX5DV_SIG_MASK_T10DIF_APPTAG = 0x30, + MLX5DV_SIG_MASK_T10DIF_REFTAG = 0x0f, + MLX5DV_SIG_MASK_CRC32 = 0xf0, + MLX5DV_SIG_MASK_CRC32C = MLX5DV_SIG_MASK_CRC32, + MLX5DV_SIG_MASK_CRC64_XP10 = 0xff, +}; + +enum mlx5dv_sig_block_attr_flags { + MLX5DV_SIG_BLOCK_ATTR_FLAG_COPY_MASK = 1 << 0, +}; + +struct mlx5dv_sig_block_attr { + const struct mlx5dv_sig_block_domain *mem; + const struct mlx5dv_sig_block_domain *wire; + uint32_t flags; /* Use enum mlx5dv_sig_block_attr_flags */ + uint8_t check_mask; + uint8_t copy_mask; + uint64_t comp_mask; +}; + +enum mlx5dv_crypto_standard { + MLX5DV_CRYPTO_STANDARD_AES_XTS, +}; + +enum mlx5dv_signature_crypto_order { + MLX5DV_SIGNATURE_CRYPTO_ORDER_SIGNATURE_AFTER_CRYPTO_ON_TX, + MLX5DV_SIGNATURE_CRYPTO_ORDER_SIGNATURE_BEFORE_CRYPTO_ON_TX, +}; + +struct mlx5dv_crypto_attr { + enum mlx5dv_crypto_standard crypto_standard; + bool encrypt_on_tx; + enum mlx5dv_signature_crypto_order signature_crypto_order; + enum mlx5dv_block_size data_unit_size; + char initial_tweak[16]; + struct mlx5dv_dek *dek; + char keytag[8]; + uint64_t comp_mask; +}; + +enum mlx5dv_mkey_conf_flags { + MLX5DV_MKEY_CONF_FLAG_RESET_SIG_ATTR = 1 << 0, +}; + +struct mlx5dv_mkey_conf_attr { + uint32_t conf_flags; /* Use enum mlx5dv_mkey_conf_flags */ + uint64_t comp_mask; +}; + enum mlx5dv_wc_opcode { MLX5DV_WC_UMR = IBV_WC_DRIVER1, + MLX5DV_WC_RAW_WQE = IBV_WC_DRIVER2, + MLX5DV_WC_MEMCPY = IBV_WC_DRIVER3, }; struct mlx5dv_qp_ex { @@ -271,14 +465,38 @@ struct mlx5dv_qp_ex { uint32_t access_flags, /* use enum ibv_access_flags */ uint16_t num_sges, struct ibv_sge *sge); + void (*wr_mkey_configure)(struct mlx5dv_qp_ex *mqp, + struct mlx5dv_mkey *mkey, + uint8_t num_setters, + struct mlx5dv_mkey_conf_attr *attr); + void (*wr_set_mkey_access_flags)(struct mlx5dv_qp_ex *mqp, + uint32_t access_flags); + void (*wr_set_mkey_layout_list)(struct mlx5dv_qp_ex *mqp, + uint16_t num_sges, + const struct ibv_sge *sge); + void (*wr_set_mkey_layout_interleaved)( + struct mlx5dv_qp_ex *mqp, + uint32_t repeat_count, + uint16_t num_interleaved, + const struct mlx5dv_mr_interleaved *data); + void (*wr_set_mkey_sig_block)(struct mlx5dv_qp_ex *mqp, + const struct mlx5dv_sig_block_attr *attr); + void (*wr_raw_wqe)(struct mlx5dv_qp_ex *mqp, const void *wqe); + void (*wr_set_dc_addr_stream)(struct mlx5dv_qp_ex *mqp, + struct ibv_ah *ah, + uint32_t remote_dctn, + uint64_t remote_dc_key, + uint16_t stream_id); + void (*wr_memcpy)(struct mlx5dv_qp_ex *mqp, + uint32_t dest_lkey, uint64_t dest_addr, + uint32_t src_lkey, uint64_t src_addr, + size_t length); + void (*wr_set_mkey_crypto)(struct mlx5dv_qp_ex *mqp, + const struct mlx5dv_crypto_attr *attr); }; struct mlx5dv_qp_ex *mlx5dv_qp_ex_from_ibv_qp_ex(struct ibv_qp_ex *qp); -int mlx5dv_query_devx_port(struct ibv_context *ctx, - uint32_t port_num, - struct mlx5dv_devx_port *mlx5_devx_port); - static inline void mlx5dv_wr_set_dc_addr(struct mlx5dv_qp_ex *mqp, struct ibv_ah *ah, uint32_t remote_dctn, @@ -287,6 +505,16 @@ static inline void mlx5dv_wr_set_dc_addr(struct mlx5dv_qp_ex *mqp, mqp->wr_set_dc_addr(mqp, ah, remote_dctn, remote_dc_key); } +static inline void mlx5dv_wr_set_dc_addr_stream(struct mlx5dv_qp_ex *mqp, + struct ibv_ah *ah, + uint32_t remote_dctn, + uint64_t remote_dc_key, + uint16_t stream_id) +{ + mqp->wr_set_dc_addr_stream(mqp, ah, remote_dctn, + remote_dc_key, stream_id); +} + static inline void mlx5dv_wr_mr_interleaved(struct mlx5dv_qp_ex *mqp, struct mlx5dv_mkey *mkey, uint32_t access_flags, @@ -307,6 +535,182 @@ static inline void mlx5dv_wr_mr_list(struct mlx5dv_qp_ex *mqp, mqp->wr_mr_list(mqp, mkey, access_flags, num_sges, sge); } +static inline void mlx5dv_wr_mkey_configure(struct mlx5dv_qp_ex *mqp, + struct mlx5dv_mkey *mkey, + uint8_t num_setters, + struct mlx5dv_mkey_conf_attr *attr) +{ + mqp->wr_mkey_configure(mqp, mkey, num_setters, attr); +} + +static inline void mlx5dv_wr_set_mkey_access_flags(struct mlx5dv_qp_ex *mqp, + uint32_t access_flags) +{ + mqp->wr_set_mkey_access_flags(mqp, access_flags); +} + +static inline void mlx5dv_wr_set_mkey_layout_list(struct mlx5dv_qp_ex *mqp, + uint16_t num_sges, + const struct ibv_sge *sge) +{ + mqp->wr_set_mkey_layout_list(mqp, num_sges, sge); +} + +static inline void mlx5dv_wr_set_mkey_layout_interleaved(struct mlx5dv_qp_ex *mqp, + uint32_t repeat_count, + uint16_t num_interleaved, + const struct mlx5dv_mr_interleaved *data) +{ + mqp->wr_set_mkey_layout_interleaved(mqp, repeat_count, + num_interleaved, data); +} + +static inline void mlx5dv_wr_set_mkey_sig_block(struct mlx5dv_qp_ex *mqp, + const struct mlx5dv_sig_block_attr *attr) +{ + mqp->wr_set_mkey_sig_block(mqp, attr); +} + +static inline void +mlx5dv_wr_set_mkey_crypto(struct mlx5dv_qp_ex *mqp, + const struct mlx5dv_crypto_attr *attr) +{ + mqp->wr_set_mkey_crypto(mqp, attr); +} + +static inline void mlx5dv_wr_memcpy(struct mlx5dv_qp_ex *mqp, + uint32_t dest_lkey, uint64_t dest_addr, + uint32_t src_lkey, uint64_t src_addr, + size_t length) +{ + mqp->wr_memcpy(mqp, dest_lkey, dest_addr, src_lkey, src_addr, length); +} + +enum mlx5dv_mkey_err_type { + MLX5DV_MKEY_NO_ERR, + MLX5DV_MKEY_SIG_BLOCK_BAD_GUARD, + MLX5DV_MKEY_SIG_BLOCK_BAD_REFTAG, + MLX5DV_MKEY_SIG_BLOCK_BAD_APPTAG, +}; + +struct mlx5dv_sig_err { + uint64_t actual_value; + uint64_t expected_value; + uint64_t offset; +}; + +struct mlx5dv_mkey_err { + enum mlx5dv_mkey_err_type err_type; + union { + struct mlx5dv_sig_err sig; + } err; +}; + +int _mlx5dv_mkey_check(struct mlx5dv_mkey *mkey, + struct mlx5dv_mkey_err *err_info, + size_t err_info_size); + +static inline int mlx5dv_mkey_check(struct mlx5dv_mkey *mkey, + struct mlx5dv_mkey_err *err_info) +{ + return _mlx5dv_mkey_check(mkey, err_info, sizeof(*err_info)); +} + +int mlx5dv_qp_cancel_posted_send_wrs(struct mlx5dv_qp_ex *mqp, uint64_t wr_id); + +static inline void mlx5dv_wr_raw_wqe(struct mlx5dv_qp_ex *mqp, const void *wqe) +{ + mqp->wr_raw_wqe(mqp, wqe); +} + +struct mlx5dv_crypto_login_obj; + +struct mlx5dv_crypto_login_attr { + uint32_t credential_id; + uint32_t import_kek_id; + char credential[48]; + uint64_t comp_mask; +}; + +struct mlx5dv_crypto_login_attr_ex { + uint32_t credential_id; + uint32_t import_kek_id; + const void *credential; + size_t credential_len; + uint64_t comp_mask; +}; +enum mlx5dv_crypto_login_state { + MLX5DV_CRYPTO_LOGIN_STATE_VALID, + MLX5DV_CRYPTO_LOGIN_STATE_NO_LOGIN, + MLX5DV_CRYPTO_LOGIN_STATE_INVALID, +}; + +struct mlx5dv_crypto_login_query_attr { + enum mlx5dv_crypto_login_state state; + uint64_t comp_mask; +}; + +int mlx5dv_crypto_login(struct ibv_context *context, + struct mlx5dv_crypto_login_attr *login_attr); + +int mlx5dv_crypto_login_query_state(struct ibv_context *context, + enum mlx5dv_crypto_login_state *state); + +int mlx5dv_crypto_logout(struct ibv_context *context); + +struct mlx5dv_crypto_login_obj * +mlx5dv_crypto_login_create(struct ibv_context *context, + struct mlx5dv_crypto_login_attr_ex *login_attr); + +int mlx5dv_crypto_login_query(struct mlx5dv_crypto_login_obj *crypto_login, + struct mlx5dv_crypto_login_query_attr *query_attr); + +int mlx5dv_crypto_login_destroy(struct mlx5dv_crypto_login_obj *crypto_login); + +enum mlx5dv_crypto_key_size { + MLX5DV_CRYPTO_KEY_SIZE_128, + MLX5DV_CRYPTO_KEY_SIZE_256, +}; + +enum mlx5dv_crypto_key_purpose { + MLX5DV_CRYPTO_KEY_PURPOSE_AES_XTS, +}; + +enum mlx5dv_dek_state { + MLX5DV_DEK_STATE_READY, + MLX5DV_DEK_STATE_ERROR, +}; + +enum mlx5dv_dek_init_attr_mask { + MLX5DV_DEK_INIT_ATTR_CRYPTO_LOGIN = 1 << 0, +}; + +struct mlx5dv_dek_init_attr { + enum mlx5dv_crypto_key_size key_size; + bool has_keytag; + enum mlx5dv_crypto_key_purpose key_purpose; + struct ibv_pd *pd; + char opaque[8]; + char key[128]; + uint64_t comp_mask; + struct mlx5dv_crypto_login_obj *crypto_login; +}; + +struct mlx5dv_dek_attr { + enum mlx5dv_dek_state state; + char opaque[8]; + uint64_t comp_mask; +}; + +struct mlx5dv_dek; + +struct mlx5dv_dek *mlx5dv_dek_create(struct ibv_context *context, + struct mlx5dv_dek_init_attr *init_attr); + +int mlx5dv_dek_query(struct mlx5dv_dek *dek, struct mlx5dv_dek_attr *attr); + +int mlx5dv_dek_destroy(struct mlx5dv_dek *dek); + enum mlx5dv_flow_action_esp_mask { MLX5DV_FLOW_ACTION_ESP_MASK_FLAGS = 1 << 0, }; @@ -343,6 +747,21 @@ mlx5dv_create_flow_matcher(struct ibv_context *context, int mlx5dv_destroy_flow_matcher(struct mlx5dv_flow_matcher *matcher); +struct mlx5dv_steering_anchor_attr { + enum mlx5dv_flow_table_type ft_type; + uint16_t priority; + uint64_t comp_mask; +}; + +struct mlx5dv_steering_anchor { + uint32_t id; +}; + +struct mlx5dv_steering_anchor * +mlx5dv_create_steering_anchor(struct ibv_context *context, + struct mlx5dv_steering_anchor_attr *attr); +int mlx5dv_destroy_steering_anchor(struct mlx5dv_steering_anchor *sa); + enum mlx5dv_flow_action_type { MLX5DV_FLOW_ACTION_DEST_IBV_QP, MLX5DV_FLOW_ACTION_DROP, @@ -415,6 +834,8 @@ mlx5dv_create_flow_action_packet_reformat(struct ibv_context *ctx, int mlx5dv_query_device(struct ibv_context *ctx_in, struct mlx5dv_context *attrs_out); +int mlx5dv_map_ah_to_qp(struct ibv_ah *ah, uint32_t qp_num); + enum mlx5dv_qp_comp_mask { MLX5DV_QP_MASK_UAR_MMAP_OFFSET = 1 << 0, MLX5DV_QP_MASK_RAW_QP_HANDLES = 1 << 1, @@ -498,6 +919,8 @@ struct ibv_dm *mlx5dv_alloc_dm(struct ibv_context *context, struct ibv_alloc_dm_attr *dm_attr, struct mlx5dv_alloc_dm_attr *mlx5_dm_attr); +void *mlx5dv_dm_map_op_addr(struct ibv_dm *dm, uint8_t op); + struct mlx5_wqe_av; struct mlx5dv_ah { @@ -618,9 +1041,11 @@ enum { MLX5_OPCODE_FMR = 0x19, MLX5_OPCODE_LOCAL_INVAL = 0x1b, MLX5_OPCODE_CONFIG_CMD = 0x1f, + MLX5_OPCODE_SET_PSV = 0x20, MLX5_OPCODE_UMR = 0x25, MLX5_OPCODE_TAG_MATCHING = 0x28, - MLX5_OPCODE_FLOW_TBL_ACCESS = 0x2c, + MLX5_OPCODE_FLOW_TBL_ACCESS = 0x2c, + MLX5_OPCODE_MMO = 0x2F, }; /* @@ -673,6 +1098,7 @@ enum { MLX5_CQE_RESP_SEND_INV = 4, MLX5_CQE_RESIZE_CQ = 5, MLX5_CQE_NO_PACKET = 6, + MLX5_CQE_SIG_ERR = 12, MLX5_CQE_REQ_ERR = 13, MLX5_CQE_RESP_ERR = 14, MLX5_CQE_INVALID = 15, @@ -832,19 +1258,19 @@ struct mlx5_wqe_ctrl_seg { __be32 opmod_idx_opcode; __be32 qpn_ds; uint8_t signature; - uint8_t rsvd[2]; + __be16 dci_stream_channel_id; uint8_t fm_ce_se; __be32 imm; -}; +} __attribute__((__packed__)) __attribute__((__aligned__(4))); struct mlx5_wqe_flow_update_ctrl_seg { - __be32 flow_idx_update; - __be32 dest_handle; - uint8_t reserved0[40]; + __be32 flow_idx_update; + __be32 dest_handle; + uint8_t reserved0[40]; }; struct mlx5_wqe_header_modify_argument_update_seg { - uint8_t argument_list[64]; + uint8_t argument_list[64]; }; struct mlx5_mprq_wqe { @@ -923,6 +1349,8 @@ enum { enum { MLX5_WQE_UMR_CTRL_MKEY_MASK_LEN = 1 << 0, MLX5_WQE_UMR_CTRL_MKEY_MASK_START_ADDR = 1 << 6, + MLX5_WQE_UMR_CTRL_MKEY_MASK_SIG_ERR = 1 << 9, + MLX5_WQE_UMR_CTRL_MKEY_MASK_BSF_ENABLE = 1 << 12, MLX5_WQE_UMR_CTRL_MKEY_MASK_MKEY = 1 << 13, MLX5_WQE_UMR_CTRL_MKEY_MASK_QPN = 1 << 14, MLX5_WQE_UMR_CTRL_MKEY_MASK_ACCESS_LOCAL_WRITE = 1 << 18, @@ -936,7 +1364,10 @@ struct mlx5_wqe_umr_ctrl_seg { uint8_t flags; uint8_t rsvd0[3]; __be16 klm_octowords; - __be16 translation_offset; + union { + __be16 translation_offset; + __be16 bsf_octowords; + }; __be64 mkey_mask; uint8_t rsvd1[32]; }; @@ -1275,6 +1706,27 @@ struct mlx5dv_context_attr { bool mlx5dv_is_supported(struct ibv_device *device); +enum mlx5dv_vfio_context_attr_flags { + MLX5DV_VFIO_CTX_FLAGS_INIT_LINK_DOWN = 1 << 0, +}; + +struct mlx5dv_vfio_context_attr { + const char *pci_name; + uint32_t flags; /* Use enum mlx5dv_vfio_context_attr_flags */ + uint64_t comp_mask; +}; + +struct ibv_device ** +mlx5dv_get_vfio_device_list(struct mlx5dv_vfio_context_attr *attr); + +int mlx5dv_vfio_get_events_fd(struct ibv_context *ibctx); + +/* This API should run from application thread and maintain device events. + * The application is responsible to get the events FD by calling mlx5dv_vfio_get_events_fd + * and once the FD is pollable call the API to let driver process the ready events. + */ +int mlx5dv_vfio_process_events(struct ibv_context *context); + struct ibv_context * mlx5dv_open_device(struct ibv_device *device, struct mlx5dv_context_attr *attr); @@ -1291,12 +1743,36 @@ int mlx5dv_devx_obj_destroy(struct mlx5dv_devx_obj *obj); int mlx5dv_devx_general_cmd(struct ibv_context *context, const void *in, size_t inlen, void *out, size_t outlen); +int _mlx5dv_query_port(struct ibv_context *context, + uint32_t port_num, + struct mlx5dv_port *info, + size_t info_len); + +static inline int mlx5dv_query_port(struct ibv_context *context, + uint32_t port_num, + struct mlx5dv_port *info) +{ + return _mlx5dv_query_port(context, port_num, info, sizeof(*info)); +} + struct mlx5dv_devx_umem { uint32_t umem_id; }; struct mlx5dv_devx_umem * mlx5dv_devx_umem_reg(struct ibv_context *ctx, void *addr, size_t size, uint32_t access); + +struct mlx5dv_devx_umem_in { + void *addr; + size_t size; + uint32_t access; + uint64_t pgsz_bitmap; + uint64_t comp_mask; +}; + +struct mlx5dv_devx_umem * +mlx5dv_devx_umem_reg_ex(struct ibv_context *ctx, struct mlx5dv_devx_umem_in *umem_in); + int mlx5dv_devx_umem_dereg(struct mlx5dv_devx_umem *umem); struct mlx5dv_devx_uar { @@ -1451,6 +1927,10 @@ static inline uint64_t _devx_get64(const void *p, size_t bit_off) #define DEVX_GET64(typ, p, fld) _devx_get64(p, __devx_bit_off(typ, fld)) +#define DEVX_ARRAY_SET64(typ, p, fld, idx, v) do { \ + DEVX_SET64(typ, p, fld[idx], v); \ +} while (0) + struct mlx5dv_dr_domain; struct mlx5dv_dr_table; struct mlx5dv_dr_matcher; @@ -1496,6 +1976,9 @@ int mlx5dv_dr_domain_sync(struct mlx5dv_dr_domain *domain, uint32_t flags); void mlx5dv_dr_domain_set_reclaim_device_memory(struct mlx5dv_dr_domain *dmn, bool enable); +void mlx5dv_dr_domain_allow_duplicate_rules(struct mlx5dv_dr_domain *domain, + bool allow); + struct mlx5dv_dr_table * mlx5dv_dr_table_create(struct mlx5dv_dr_domain *domain, uint32_t level); @@ -1509,6 +1992,19 @@ mlx5dv_dr_matcher_create(struct mlx5dv_dr_table *table, int mlx5dv_dr_matcher_destroy(struct mlx5dv_dr_matcher *matcher); +enum mlx5dv_dr_matcher_layout_flags { + MLX5DV_DR_MATCHER_LAYOUT_RESIZABLE = 1 << 0, + MLX5DV_DR_MATCHER_LAYOUT_NUM_RULE = 1 << 1, +}; + +struct mlx5dv_dr_matcher_layout { + uint32_t flags; /* use enum mlx5dv_dr_matcher_layout_flags */ + uint32_t log_num_of_rules_hint; +}; + +int mlx5dv_dr_matcher_set_layout(struct mlx5dv_dr_matcher *matcher, + struct mlx5dv_dr_matcher_layout *layout); + struct mlx5dv_dr_rule * mlx5dv_dr_rule_create(struct mlx5dv_dr_matcher *matcher, struct mlx5dv_flow_match_parameters *value, @@ -1531,13 +2027,13 @@ struct mlx5dv_dr_action * mlx5dv_dr_action_create_dest_vport(struct mlx5dv_dr_domain *domain, uint32_t vport); -struct mlx5dv_dr_action * -mlx5dv_dr_action_create_dest_devx_tir(struct mlx5dv_devx_obj *devx_obj); - struct mlx5dv_dr_action * mlx5dv_dr_action_create_dest_ib_port(struct mlx5dv_dr_domain *domain, uint32_t ib_port); +struct mlx5dv_dr_action * +mlx5dv_dr_action_create_dest_devx_tir(struct mlx5dv_devx_obj *devx_obj); + enum mlx5dv_dr_action_dest_type { MLX5DV_DR_ACTION_DEST, MLX5DV_DR_ACTION_DEST_REFORMAT, @@ -1582,6 +2078,11 @@ enum mlx5dv_dr_action_aso_flow_meter_flags { MLX5DV_DR_ACTION_FLAGS_ASO_FLOW_METER_UNDEFINED = 1 << 3, }; +enum mlx5dv_dr_action_aso_ct_flags { + MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_INITIATOR = 1 << 0, + MLX5DV_DR_ACTION_FLAGS_ASO_CT_DIRECTION_RESPONDER = 1 << 1, +}; + struct mlx5dv_dr_action * mlx5dv_dr_action_create_aso(struct mlx5dv_dr_domain *domain, struct mlx5dv_devx_obj *devx_obj, @@ -1616,11 +2117,16 @@ int mlx5dv_dr_action_modify_flow_meter(struct mlx5dv_dr_action *action, struct mlx5dv_dr_action * mlx5dv_dr_action_create_flow_sampler(struct mlx5dv_dr_flow_sampler_attr *attr); -struct mlx5dv_dr_action *mlx5dv_dr_action_create_pop_vlan(void); +struct mlx5dv_dr_action * +mlx5dv_dr_action_create_pop_vlan(void); + +struct mlx5dv_dr_action * +mlx5dv_dr_action_create_push_vlan(struct mlx5dv_dr_domain *domain, + __be32 vlan_hdr); -struct mlx5dv_dr_action -*mlx5dv_dr_action_create_push_vlan(struct mlx5dv_dr_domain *domain, - __be32 vlan_hdr); +struct mlx5dv_dr_action * +mlx5dv_dr_action_create_dest_root_table(struct mlx5dv_dr_table *table, + uint16_t priority); int mlx5dv_dr_action_destroy(struct mlx5dv_dr_action *action); @@ -1648,6 +2154,8 @@ int mlx5dv_modify_qp_lag_port(struct ibv_qp *qp, uint8_t port_num); int mlx5dv_modify_qp_udp_sport(struct ibv_qp *qp, uint16_t udp_sport); +int mlx5dv_dci_stream_id_reset(struct ibv_qp *qp, uint16_t stream_id); + enum mlx5dv_sched_elem_attr_flags { MLX5DV_SCHED_ELEM_ATTR_FLAGS_BW_SHARE = 1 << 0, MLX5DV_SCHED_ELEM_ATTR_FLAGS_MAX_AVG_BW = 1 << 1, @@ -1684,6 +2192,38 @@ int mlx5dv_sched_leaf_destroy(struct mlx5dv_sched_leaf *leaf); int mlx5dv_modify_qp_sched_elem(struct ibv_qp *qp, const struct mlx5dv_sched_leaf *requestor, const struct mlx5dv_sched_leaf *responder); + +int mlx5dv_reserved_qpn_alloc(struct ibv_context *ctx, uint32_t *qpn); +int mlx5dv_reserved_qpn_dealloc(struct ibv_context *ctx, uint32_t qpn); + +int mlx5dv_dr_aso_other_domain_link(struct mlx5dv_devx_obj *devx_obj, + struct mlx5dv_dr_domain *peer_dmn, + struct mlx5dv_dr_domain *dmn, + uint32_t flags, + uint8_t return_reg_c); +int mlx5dv_dr_aso_other_domain_unlink(struct mlx5dv_devx_obj *devx_obj, + struct mlx5dv_dr_domain *dmn); + +struct mlx5dv_devx_msi_vector { + int vector; + int fd; +}; + +struct mlx5dv_devx_msi_vector * +mlx5dv_devx_alloc_msi_vector(struct ibv_context *ibctx); + +int mlx5dv_devx_free_msi_vector(struct mlx5dv_devx_msi_vector *msi); + +struct mlx5dv_devx_eq { + void *vaddr; +}; + +struct mlx5dv_devx_eq * +mlx5dv_devx_create_eq(struct ibv_context *ibctx, const void *in, size_t inlen, + void *out, size_t outlen); + +int mlx5dv_devx_destroy_eq(struct mlx5dv_devx_eq *eq); + #ifdef __cplusplus } #endif diff --git a/external_libs/ibverbs/x86_64/include/infiniband/verbs.h b/external_libs/ibverbs/x86_64/include/infiniband/verbs.h index caf626cc78..713cce6ce5 100644 --- a/external_libs/ibverbs/x86_64/include/infiniband/verbs.h +++ b/external_libs/ibverbs/x86_64/include/infiniband/verbs.h @@ -3,6 +3,7 @@ * Copyright (c) 2004, 2011-2012 Intel Corporation. All rights reserved. * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved. * Copyright (c) 2005 PathScale, Inc. All rights reserved. + * Copyright (c) 2020 Intel Corporation. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU @@ -138,6 +139,12 @@ enum ibv_device_cap_flags { IBV_DEVICE_MANAGED_FLOW_STEERING = 1 << 29 }; +enum ibv_fork_status { + IBV_FORK_DISABLED, + IBV_FORK_ENABLED, + IBV_FORK_UNNEEDED, +}; + /* * Can't extended above ibv_device_cap_flags enum as in some systems/compilers * enum range is limited to 4 bytes. @@ -157,6 +164,10 @@ struct ibv_alloc_dm_attr { uint32_t comp_mask; }; +enum ibv_dm_mask { + IBV_DM_MASK_HANDLE = 1 << 0, +}; + struct ibv_dm { struct ibv_context *context; int (*memcpy_to_dm)(struct ibv_dm *dm, uint64_t dm_offset, @@ -164,6 +175,8 @@ struct ibv_dm { int (*memcpy_from_dm)(void *host_addr, struct ibv_dm *dm, uint64_t dm_offset, size_t length); uint32_t comp_mask; + + uint32_t handle; }; struct ibv_device_attr { @@ -345,6 +358,7 @@ struct ibv_device_attr_ex { uint64_t max_dm_size; struct ibv_pci_atomic_caps pci_atomic_caps; uint32_t xrc_odp_caps; + uint32_t phys_port_cnt_ex; }; enum ibv_mtu { @@ -404,6 +418,7 @@ enum ibv_port_cap_flags2 { IBV_PORT_SWITCH_PORT_STATE_TABLE_SUP = 1 << 3, IBV_PORT_LINK_WIDTH_2X_SUP = 1 << 4, IBV_PORT_LINK_SPEED_HDR_SUP = 1 << 5, + IBV_PORT_LINK_SPEED_NDR_SUP = 1 << 10, }; struct ibv_port_attr { @@ -515,6 +530,8 @@ enum ibv_wc_opcode { IBV_WC_TM_RECV, IBV_WC_TM_NO_TAG, IBV_WC_DRIVER1, + IBV_WC_DRIVER2, + IBV_WC_DRIVER3, }; enum { @@ -639,8 +656,7 @@ enum ibv_rereg_mr_flags { IBV_REREG_MR_CHANGE_TRANSLATION = (1 << 0), IBV_REREG_MR_CHANGE_PD = (1 << 1), IBV_REREG_MR_CHANGE_ACCESS = (1 << 2), - IBV_REREG_MR_KEEP_VALID = (1 << 3), - IBV_REREG_MR_FLAGS_SUPPORTED = ((IBV_REREG_MR_KEEP_VALID << 1) - 1) + IBV_REREG_MR_FLAGS_SUPPORTED = ((IBV_REREG_MR_CHANGE_ACCESS << 1) - 1) }; struct ibv_mr { @@ -706,6 +722,8 @@ enum ibv_rate { IBV_RATE_50_GBPS = 20, IBV_RATE_400_GBPS = 21, IBV_RATE_600_GBPS = 22, + IBV_RATE_800_GBPS = 23, + IBV_RATE_1200_GBPS = 24, }; /** @@ -1923,7 +1941,8 @@ struct ibv_device { struct _compat_ibv_port_attr; struct ibv_context_ops { - void *(*_compat_query_device)(void); + int (*_compat_query_device)(struct ibv_context *context, + struct ibv_device_attr *device_attr); int (*_compat_query_port)(struct ibv_context *context, uint8_t port_num, struct _compat_ibv_port_attr *port_attr); @@ -2018,8 +2037,8 @@ enum ibv_parent_domain_init_attr_mask { #define IBV_ALLOCATOR_USE_DEFAULT ((void *)-1) struct ibv_parent_domain_init_attr { - struct ibv_pd *pd; /* referance to a protection domain object, can't be NULL */ - struct ibv_td *td; /* referance to a thread domain object, or NULL */ + struct ibv_pd *pd; /* reference to a protection domain object, can't be NULL */ + struct ibv_td *td; /* reference to a thread domain object, or NULL */ uint32_t comp_mask; void *(*alloc)(struct ibv_pd *pd, void *pd_context, size_t size, size_t alignment, uint64_t resource_type); @@ -2194,8 +2213,8 @@ extern const struct verbs_device_ops verbs_provider_cxgb4; extern const struct verbs_device_ops verbs_provider_efa; extern const struct verbs_device_ops verbs_provider_hfi1verbs; extern const struct verbs_device_ops verbs_provider_hns; -extern const struct verbs_device_ops verbs_provider_i40iw; extern const struct verbs_device_ops verbs_provider_ipathverbs; +extern const struct verbs_device_ops verbs_provider_irdma; extern const struct verbs_device_ops verbs_provider_mlx4; extern const struct verbs_device_ops verbs_provider_mlx5; extern const struct verbs_device_ops verbs_provider_mthca; @@ -2282,6 +2301,16 @@ struct ibv_mr *ibv_import_mr(struct ibv_pd *pd, uint32_t mr_handle); */ void ibv_unimport_mr(struct ibv_mr *mr); +/** + * ibv_import_dm - Import a device memory + */ +struct ibv_dm *ibv_import_dm(struct ibv_context *context, uint32_t dm_handle); + +/** + * ibv_unimport_dm - Unimport a device memory + */ +void ibv_unimport_dm(struct ibv_dm *dm); + /** * ibv_get_async_event - Get next async event * @event: Pointer to use to return async event @@ -2536,6 +2565,12 @@ __ibv_reg_mr_iova(struct ibv_pd *pd, void *addr, size_t length, uint64_t iova, __builtin_constant_p( \ ((access) & IBV_ACCESS_OPTIONAL_RANGE) == 0)) +/** + * ibv_reg_dmabuf_mr - Register a dambuf-based memory region + */ +struct ibv_mr *ibv_reg_dmabuf_mr(struct ibv_pd *pd, uint64_t offset, size_t length, + uint64_t iova, int fd, int access); + enum ibv_rereg_mr_err_code { /* Old MR is valid, invalid input */ IBV_REREG_MR_ERR_INPUT = -1, @@ -2602,9 +2637,17 @@ static inline uint32_t ibv_inc_rkey(uint32_t rkey) static inline int ibv_bind_mw(struct ibv_qp *qp, struct ibv_mw *mw, struct ibv_mw_bind *mw_bind) { + struct ibv_mw_bind_info *bind_info = &mw_bind->bind_info; + if (mw->type != IBV_MW_TYPE_1) return EINVAL; + if (!bind_info->mr && (bind_info->addr || bind_info->length)) + return EINVAL; + + if (bind_info->mr && (mw->pd != bind_info->mr->pd)) + return EPERM; + return mw->context->ops.bind_mw(qp, mw, mw_bind); } @@ -3117,6 +3160,20 @@ ibv_modify_qp_rate_limit(struct ibv_qp *qp, return vctx->modify_qp_rate_limit(qp, attr); } +/** + * ibv_query_qp_data_in_order - Checks whether the data is guaranteed to be + * written in-order. + * @qp: The QP to query. + * @op: Operation type. + * @flags: Extra field for future input. For now must be 0. + * + * Return Value + * ibv_query_qp_data_in_order() returns 1 if the data is guaranteed to be + * written in-order, 0 otherwise. + */ +int ibv_query_qp_data_in_order(struct ibv_qp *qp, enum ibv_wr_opcode op, + uint32_t flags); + /** * ibv_query_qp - Returns the attribute list and current values for the * specified QP. @@ -3348,6 +3405,12 @@ int ibv_detach_mcast(struct ibv_qp *qp, const union ibv_gid *gid, uint16_t lid); */ int ibv_fork_init(void); +/** + * ibv_is_fork_initialized - Check if fork support + * (ibv_fork_init) was enabled. + */ +enum ibv_fork_status ibv_is_fork_initialized(void); + /** * ibv_node_type_str - Return string describing node_type enum value */ diff --git a/external_libs/ibverbs/x86_64/include/infiniband/verbs_api.h b/external_libs/ibverbs/x86_64/include/infiniband/verbs_api.h index ded6fa401a..309f6fbafd 100644 --- a/external_libs/ibverbs/x86_64/include/infiniband/verbs_api.h +++ b/external_libs/ibverbs/x86_64/include/infiniband/verbs_api.h @@ -88,6 +88,7 @@ #define ibv_advise_mr_advice ib_uverbs_advise_mr_advice #define IBV_ADVISE_MR_ADVICE_PREFETCH IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH #define IBV_ADVISE_MR_ADVICE_PREFETCH_WRITE IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_WRITE +#define IBV_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT IB_UVERBS_ADVISE_MR_ADVICE_PREFETCH_NO_FAULT #define IBV_ADVISE_MR_FLAG_FLUSH IB_UVERBS_ADVISE_MR_FLAG_FLUSH diff --git a/external_libs/ibverbs/x86_64/libibverbs.so b/external_libs/ibverbs/x86_64/libibverbs.so index f67f919796..c2d7b12409 100644 Binary files a/external_libs/ibverbs/x86_64/libibverbs.so and b/external_libs/ibverbs/x86_64/libibverbs.so differ diff --git a/external_libs/ibverbs/x86_64/libmlx5.so b/external_libs/ibverbs/x86_64/libmlx5.so index 923bbc6bb2..1b1c336bb1 100644 Binary files a/external_libs/ibverbs/x86_64/libmlx5.so and b/external_libs/ibverbs/x86_64/libmlx5.so differ diff --git a/linux_dpdk/ws_main.py b/linux_dpdk/ws_main.py index 4bd679ce70..1b770d96ec 100644 --- a/linux_dpdk/ws_main.py +++ b/linux_dpdk/ws_main.py @@ -212,6 +212,7 @@ def configure_dummy_mlx5 (ctx): autoconf_file = 'src/dpdk/drivers/common/mlx5/mlx5_autoconf.h' autoconf_path = os.path.join(top, autoconf_file) os.system('rm -rf %s' % autoconf_path) + # a copy with some modification of the generated autoconf_file of the function configure_mlx5 dummy_file_data = ''' #ifndef HAVE_IBV_MLX5_MOD_SWP #define HAVE_IBV_MLX5_MOD_SWP 1 @@ -265,9 +266,7 @@ def configure_dummy_mlx5 (ctx): /* HAVE_IBV_WQ_FLAG_RX_END_PADDING is not defined. */ -#ifndef HAVE_MLX5DV_DR_DEVX_PORT -#define HAVE_MLX5DV_DR_DEVX_PORT 1 -#endif /* HAVE_MLX5DV_DR_DEVX_PORT */ +/* HAVE_MLX5DV_DR_DEVX_PORT is not defined. */ #ifndef HAVE_IBV_DEVX_OBJ #define HAVE_IBV_DEVX_OBJ 1 @@ -466,7 +465,7 @@ def configure_dummy_mlx5 (ctx): #define ETHTOOL_LINK_MODE_100000baseLR4_ER4_Full_BIT 39 - ''' +''' f = open(autoconf_path, "w") f.write(dummy_file_data) diff --git a/scripts/automation/regression/stateful_tests/trex_rx_test.py b/scripts/automation/regression/stateful_tests/trex_rx_test.py index f9ab2bed24..7d0084cd91 100755 --- a/scripts/automation/regression/stateful_tests/trex_rx_test.py +++ b/scripts/automation/regression/stateful_tests/trex_rx_test.py @@ -153,6 +153,8 @@ def test_rx_check_http(self): def test_rx_check_sfr_ipv6(self): + if CTRexScenario.setup_name in ["trex19"]: + self.skip("skipping Mellanox NICS") if not self.is_loopback and not CTRexScenario.router_cfg['no_dut_config']: self.router.configure_basic_interfaces() self.router.config_pbr(mode = 'config') diff --git a/src/dpdk/drivers/net/mlx5/mlx5.c b/src/dpdk/drivers/net/mlx5/mlx5.c index 72b1e35673..c672c4e27a 100644 --- a/src/dpdk/drivers/net/mlx5/mlx5.c +++ b/src/dpdk/drivers/net/mlx5/mlx5.c @@ -2363,8 +2363,11 @@ mlx5_port_args_config(struct mlx5_priv *priv, struct mlx5_kvargs_ctrl *mkvlist, DRV_LOG(WARNING, "Multi-Packet RQ isn't supported."); config->mprq.enabled = 0; } - if (config->max_dump_files_num == 0) + #ifndef TREX_PATCH + if (config->max_dump_files_num == 0) { config->max_dump_files_num = 128; + } + #endif /* Detect minimal data bytes to inline. */ mlx5_set_min_inline(priv); DRV_LOG(DEBUG, "VLAN insertion in WQE is %ssupported.", diff --git a/src/dpdk/drivers/net/mlx5/mlx5_rx.c b/src/dpdk/drivers/net/mlx5/mlx5_rx.c index e5eea0ad94..02e6611f12 100644 --- a/src/dpdk/drivers/net/mlx5/mlx5_rx.c +++ b/src/dpdk/drivers/net/mlx5/mlx5_rx.c @@ -390,6 +390,11 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) *rxq->rq_db = rte_cpu_to_be_32(rxq->rq_ci); } +/* Must be negative. */ +#define MLX5_ERROR_CQE_RET (-1) +/* Must not be negative. */ +#define MLX5_RECOVERY_ERROR_RET 0 + /** * Handle a Rx error. * The function inserts the RQ state to reset when the first error CQE is @@ -404,7 +409,7 @@ mlx5_rxq_initialize(struct mlx5_rxq_data *rxq) * 0 when called from non-vectorized Rx burst. * * @return - * -1 in case of recovery error, otherwise the CQE status. + * MLX5_RECOVERY_ERROR_RET in case of recovery error, otherwise the CQE status. */ int mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) @@ -433,7 +438,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) sm.queue_id = rxq->idx; sm.state = IBV_WQS_RESET; if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm)) - return -1; + return MLX5_RECOVERY_ERROR_RET; if (rxq_ctrl->dump_file_n < RXQ_PORT(rxq_ctrl)->config.max_dump_files_num) { MKSTR(err_str, "Unexpected CQE error syndrome " @@ -473,7 +478,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) sm.queue_id = rxq->idx; sm.state = IBV_WQS_RDY; if (mlx5_queue_state_modify(RXQ_DEV(rxq_ctrl), &sm)) - return -1; + return MLX5_RECOVERY_ERROR_RET; if (vec) { const uint32_t elts_n = mlx5_rxq_mprq_enabled(rxq) ? @@ -501,7 +506,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) rte_pktmbuf_free_seg (*elt); } - return -1; + return MLX5_RECOVERY_ERROR_RET; } } for (i = 0; i < (int)elts_n; ++i) { @@ -520,7 +525,7 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) } return ret; default: - return -1; + return MLX5_RECOVERY_ERROR_RET; } } @@ -538,7 +543,9 @@ mlx5_rx_err_handle(struct mlx5_rxq_data *rxq, uint8_t vec) * written. * * @return - * 0 in case of empty CQE, otherwise the packet size in bytes. + * 0 in case of empty CQE, MLX5_ERROR_CQE_RET in case of error CQE, + * otherwise the packet size in regular RxQ, and striding byte + * count format in mprq case. */ static inline int mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, @@ -605,8 +612,8 @@ mlx5_rx_poll_len(struct mlx5_rxq_data *rxq, volatile struct mlx5_cqe *cqe, rxq->err_state)) { ret = mlx5_rx_err_handle(rxq, 0); if (ret == MLX5_CQE_STATUS_HW_OWN || - ret == -1) - return 0; + ret == MLX5_RECOVERY_ERROR_RET) + return MLX5_ERROR_CQE_RET; } else { return 0; } @@ -851,8 +858,10 @@ mlx5_rx_burst(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) if (!pkt) { cqe = &(*rxq->cqes)[rxq->cq_ci & cqe_cnt]; len = mlx5_rx_poll_len(rxq, cqe, cqe_cnt, &mcqe); - if (!len) { + if (len <= 0) { rte_mbuf_raw_free(rep); + if (unlikely(len == MLX5_ERROR_CQE_RET)) + rq_ci = rxq->rq_ci << sges_n; break; } pkt = seg; @@ -1075,8 +1084,13 @@ mlx5_rx_burst_mprq(void *dpdk_rxq, struct rte_mbuf **pkts, uint16_t pkts_n) } cqe = &(*rxq->cqes)[rxq->cq_ci & cq_mask]; ret = mlx5_rx_poll_len(rxq, cqe, cq_mask, &mcqe); - if (!ret) + if (ret == 0) + break; + if (unlikely(ret == MLX5_ERROR_CQE_RET)) { + rq_ci = rxq->rq_ci; + consumed_strd = rxq->consumed_strd; break; + } byte_cnt = ret; len = (byte_cnt & MLX5_MPRQ_LEN_MASK) >> MLX5_MPRQ_LEN_SHIFT; MLX5_ASSERT((int)len >= (rxq->crc_present << 2));