What can I help you with?
NVIDIA DPDK Documentation MLNX_DPDK_22.11_2310.5.1 LTS

Meter Hierarchy

Copy
Copied!
            

testpmd>add port meter profile srtcm_rfc2697 0 25 1000 1024 0 1 testpmd>add port meter profile srtcm_rfc2697 0 24 65536 1024 0 0 testpmd>add port meter policy 0 1 g_actions queue index 1 end / end y_actions end r_actions drop / end testpmd>create port meter 0 1 25 1 yes 0xffff 1 0 testpmd>add port meter policy 0 2 g_actions meter mtr_id 1 / end y_actions end r_actions drop / end testpmd>create port meter 0 2 24 2 yes 0xffff 1 0 testpmd>flow create 0 priority 3 group 1 ingress pattern eth / end actions  meter mtr_id 2 / end

Copy
Copied!
            

int add_port_meter_profile_srtcm(uint16_t port_id, uint32_t profile_id,   uint64_t cir, uint64_t cbs,   uint64_t ebs, int packet_mode,   struct rte_mtr_error *error)   {   struct rte_mtr_meter_profile mp;       /* Private shaper profile params */   memset(&mp, 0, sizeof(struct rte_mtr_meter_profile));   mp.alg = RTE_MTR_SRTCM_RFC2697;   mp.srtcm_rfc2697.cir = cir;   mp.srtcm_rfc2697.cbs = cbs;   mp.srtcm_rfc2697.ebs = ebs;   mp.packet_mode = packet_mode;       return rte_mtr_meter_profile_add(port_id, profile_id, &mp, error);   }       int port_meter_policy_add(portid_t port_id, uint32_t policy_id,     const struct rte_flow_action *actions,     struct rte_mtr_error *error)   {   struct rte_mtr_error error;   const struct rte_flow_action *act = actions;   const struct rte_flow_action *start;   struct rte_mtr_meter_policy_params policy;   uint32_t i = 0, act_n;   int ret;       for (i = 0; i < RTE_COLORS; i++) {   for (act_n = 0, start = act; act->type != RTE_FLOW_ACTION_TYPE_END; act++)   act_n++;   if (act_n && act->type == RTE_FLOW_ACTION_TYPE_END)   policy.actions[i] = start;   else   policy.actions[i] = NULL;   act++;   }   return rte_mtr_meter_policy_add(port_id, policy_id, &policy, error);   }       static void create_port_meter(uint16_t port_id, uint32_t mtr_id, uint32_t shared,   uint32_t profile_id, int use_prev_mtr_color,   int meter_enable, uint64_t stats_mask,   uint32_t policy_id, struct rte_mtr_error *error)   {   struct rte_mtr_params params;   int ret;       /* Meter params */   memset(params, 0, sizeof(struct rte_mtr_params));   params.meter_profile_id = profile_id;   params.use_prev_mtr_color = use_prev_mtr_color;   params.meter_enable = meter_enable;   params.stats_mask = stats_mask;   params.meter_policy_id = policy_id;   return rte_mtr_create(port_id, mtr_id, params, shared, error);   }       int   create_flow_with_meter_hierarchy(uint16_t port_id)   {   int ret;   struct rte_mtr_error mtr_error;   uint32_t pps_profile_id = 1;   uint32_t pps_policy_id = 1;   uint32_t pps_mtr_id = 1;   uint32_t bps_profile_id = 2;   uint32_t bps_policy_id = 2;   uint32_t bps_mtr_id = 2;   int shared = 1; /* mtr is shared. */   struct rte_flow_action_queue queue = { .index = 1 };   struct rte_flow_action pps_actions[] = {   [0] = { /* meter action. */   .type = RTE_FLOW_ACTION_TYPE_QUEUE,   .conf = &queue},   [1] = { /* end action mast be the last action. */   .type = RTE_FLOW_ACTION_TYPE_END,},   };   struct rte_flow_action_meter pps_meter = {.mtr_id = pps_mtr_id};   struct rte_flow_action bps_actions[] = {   [0] = { /* meter action. */   .type = RTE_FLOW_ACTION_TYPE_METER,   .conf = &pps_meter},   [1] = { /* end action mast be the last action. */   .type = RTE_FLOW_ACTION_TYPE_END,},   };       /* Create pps meter. */   ret = add_port_meter_profile_srtcm(port_id, pps_profile_id,     1000, 300, 0, 1, &mtr_error);   if (ret) {   printf("cannot add meter pps profile, error: %s\n",   mtr_error.message);   return ret;   }   ret = port_meter_policy_add(port_id, pps_policy_id, pps_actions, &mtr_error);   if (ret) {   printf("cannot add meter pps policy, error: %s\n",   mtr_error.message);   return ret;   }   ret = create_port_meter(port_id, pps_mtr_id, shared, pps_profile_id, 0, 1,   0xffff, pps_policy_id, &mtr_error);   if (ret) {   printf("cannot create pps meter: %u with profile: %u, error: %s\n",   mtr_id, pps_profile_id, mtr_error.message);   return ret;   }   /* Create bps meter. */   ret = add_port_meter_profile_srtcm(port_id, bps_profile_id,     1*1024*1024, 64*1024, 0, 0, &mtr_error);   if (ret) {   printf("cannot add meter bps profile, error: %s\n",   mtr_error.message);   return ret;   }   ret = port_meter_policy_add(port_id, bps_policy_id, bps_actions, &mtr_error);   if (ret) {   printf("cannot add meter bps policy, error: %s\n",   mtr_error.message);   return ret;   }   ret = create_port_meter(port_id, bps_mtr_id, shared, bps_profile_id, 0, 1,   0xffff, bps_policy_id, &mtr_error);   if (ret) {   printf("cannot create bps meter: %u with profile: %u, error: %s\n",   mtr_id, bps_profile_id, mtr_error.message);   return ret;   }   struct rte_flow *flow;   struct rte_flow_error error;   struct rte_flow_attr attr = { /* holds the flow attributes. */   .group = 1, /* set the rule on the main group. */   .ingress = 1,/* rx flow. */   .transfer = 0,   .priority = 1, }; /* add priority to rule   to give the decap rule higher priority since   it is more specific */   struct rte_flow_item_ipv4 ipv4_outer = {   .hdr = {   .src_addr = rte_cpu_to_be_32(0x0D0A0A0A),   /* match on 13.10.10.10 src address */   }};   struct rte_flow_item_ipv4 ipv4_mask = {   .hdr = {   .src_addr = RTE_BE32(0xffffffff)}};       pattern[L2].type = RTE_FLOW_ITEM_TYPE_ETH;   pattern[L3].type = RTE_FLOW_ITEM_TYPE_IPV4;   pattern[L3].spec = &ipv4_outer;   pattern[L3].mask = &ipv4_mask;   pattern[TUNNEL].type = RTE_FLOW_ITEM_TYPE_END;       struct rte_flow_action_meter bps_meter = {.mtr_id = bps_mtr_id};   struct rte_flow_action actions[] = {   [0] = { /* meter action. */   .type = RTE_FLOW_ACTION_TYPE_METER,   .conf = &bps_meter},   [1] = { /* end action mast be the last action. */   .type = RTE_FLOW_ACTION_TYPE_END,},   };   flow = rte_flow_create(port_id, &attr, pattern, actions, &error);   if (!flow) {   printf("can't create flow with with mtr id: %u, on port: %u, error: %s\n",   bps_mtr_id, port_id, error.message);   return -1;   }   return 0;   }

© Copyright 2024, NVIDIA. Last updated on Jan 9, 2025.