What can I help you with?
NVIDIA DPDK Documentation MLNX_DPDK_22.11_2310.5.1 LTS

Sample Action with VXLAN and NVGRE Encapsulation

Enables the user to perform VXLAN and NVGRE encapsulation as part of the sample actions. This allows better tunnel encapsulation capabilities.

  • E-Switch Mirroring rule: the matched ingress packets are sent to port IS 2 and also mirrored the packets with VXLAN encapsulation header and sent to port id 0:

    Copy
    Copied!
                

    testpmd> set vxlan ip-version ipv4 vni 4 udp-src 4 udp-dst 4 ip-src 127.0.0.1 ip-dst 128.0.0.1 eth-src 11:11:11:11:11:11 eth-dst 22:22:22:22:22:22 testpmd> set sample_actions 0 vxlan_encap / port_id id 0 / end testpmd> flow create 0 ingress transfer pattern eth / end actions sample ratio 1 index 0  / port_id id 2 / end

  • E-Switch Mirroring rule: the matched ingress packets are sent to port ID 2 and also mirrored the packets with NVGRE encapsulation header and sent to port id 0:

    Copy
    Copied!
                

    testpmd> set nvgre ip-version ipv4 tni 4 ip-src 127.0.0.1 ip-dst 128.0.0.1 eth-src 11:11:11:11:11:11 eth-dst 22:22:22:22:22:22 testpmd> set sample_actions 0 nvgre_encap / port_id id 0 / end testpmd> flow create 0 ingress transfer pattern eth / end actions  sample ratio 1 index 0  / port_id id 2 / end

Copy
Copied!
            

/* Maximum number of items in vxlan encap/decap. * ETH / IPv4(6) / UDP / VXLAN / END */ #define TUNNEL_ITEMS_NUM 5 /* Maximum number of actions in port mirror. * RAW_ENCAP(VXLAN_ENCAP) / PORT_ID / END */ #define MIRROR_ACTIONS_NUM 3   static int add_mirror_action(const void* header) { struct sample_conf { struct rte_flow_action_sample sample; struct rte_flow_action_port_id port_id; struct rte_flow_action_vxlan_encap vxlan_encap; struct rte_flow_item vxlan_items[TUNNEL_ITEMS_NUM]; struct rte_flow_action sample_actions[MIRROR_ACTIONS_NUM]; } *sample_conf;   BUILD_ASSERT_DECL(offsetof(struct sample_conf, sample) == 0); struct rte_flow_action *sample_itr; const struct eth_header *eth; const struct udp_header *udp; const void *vxlan; const void *l3; const void *l4; int field = 0; int port_id = 2;   sample_conf = xzalloc(sizeof *sample_conf); sample_itr = sample_conf->sample_actions; eth = header;   /* Fill vxlan_items */ vxlan_items[field].type = RTE_FLOW_ITEM_TYPE_ETH; vxlan_items[field].spec = eth; vxlan_items[field].mask = &rte_flow_item_eth_mask; field++;   l3 = eth + 1; vxlan_items[field].type = RTE_FLOW_ITEM_TYPE_IPV4; vxlan_items[field].spec = l3; vxlan_items[field].mask = &rte_flow_item_ipv4_mask; field++;   udp = (l3 + 1); vxlan_items[field].type = RTE_FLOW_ITEM_TYPE_UDP; vxlan_items[field].spec = udp; vxlan_items[field].mask = &rte_flow_item_udp_mask; field++;   vxlan = (udp + 1); vxlan_items[field].type = RTE_FLOW_ITEM_TYPE_VXLAN; vxlan_items[field].spec = vxlan; vxlan_items[field].mask = &rte_flow_item_vxlan_mask; field++;   vxlan_items[field].type = RTE_FLOW_ITEM_TYPE_END;   /* Initialize sample struct */ sample_conf->sample.ratio = 1; sample_conf->sample.actions = sample_conf->sample_actions; sample_conf->port_id.id = port_id; sample_itr->conf = &sample_conf->vxlan_encap; sample_itr->type = RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP; sample_itr++; sample_itr->conf = &sample_conf->port_id; sample_itr->type = RTE_FLOW_ACTION_TYPE_PORT_ID; sample_itr++; sample_itr->type = RTE_FLOW_ACTION_TYPE_END;   add_flow_action(actions, RTE_FLOW_ACTION_TYPE_SAMPLE, sample_conf);   }

© Copyright 2024, NVIDIA. Last updated on Jan 9, 2025.