summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--app/src/dp_trace.c2
-rw-r--r--app/src/mrb.c2
-rw-r--r--include/external/marsio.h2
-rw-r--r--infra/include/dp_trace.h6
-rw-r--r--infra/src/dp_trace.c16
5 files changed, 18 insertions, 10 deletions
diff --git a/app/src/dp_trace.c b/app/src/dp_trace.c
index 59a7347..39972af 100644
--- a/app/src/dp_trace.c
+++ b/app/src/dp_trace.c
@@ -65,7 +65,7 @@ int marsio_dp_trace_measurements_can_emit(__rte_unused struct mr_instance * inst
uint8_t measurement_type)
{
struct mrb_metadata * mrb_meta = rte_mbuf_to_priv((struct rte_mbuf *)mbuf);
- if (unlikely(mrb_meta->measurement_type == DP_TRACE_MEASUREMENT_TYPE_UNKNOW))
+ if (unlikely(mrb_meta->measurement_type == DP_TRACE_MEASUREMENT_TYPE_UNKNOWN))
{
dp_trace_filter_exec(instance->trace, (struct rte_mbuf *)mbuf, 0, marsio_thread_id_get());
}
diff --git a/app/src/mrb.c b/app/src/mrb.c
index 2d2ddb4..d156f34 100644
--- a/app/src/mrb.c
+++ b/app/src/mrb.c
@@ -338,7 +338,7 @@ static void __buff_clone_ctrlzone(marsio_buff_t * mc, marsio_buff_t * md)
/* 断言,MBUF的私有区域一定大于控制域的大小 */
assert(rte_pktmbuf_priv_size(__mc->pool) >= sizeof(struct mrb_priv_zone));
rte_memcpy(__mc_priv_zone, __mi_priv_zone, rte_pktmbuf_priv_size(__mc->pool));
- ((struct mrb_metadata *)__mc_priv_zone)->measurement_type = DP_TRACE_MEASUREMENT_TYPE_UNKNOW;
+ ((struct mrb_metadata *)__mc_priv_zone)->measurement_type = DP_TRACE_MEASUREMENT_TYPE_UNKNOWN;
((struct mrb_metadata *)__mc_priv_zone)->dp_trace_buffer = NULL;
/* 下一个链表项 */
diff --git a/include/external/marsio.h b/include/external/marsio.h
index b5e1065..b74c792 100644
--- a/include/external/marsio.h
+++ b/include/external/marsio.h
@@ -368,7 +368,7 @@ struct dp_trace_job_desc
uint8_t measurement_type;
int8_t rule_index;
char bpf_expr[MR_BPF_EXPRESSION_MAX];
- unsigned int pkt_cnt_max;
+ unsigned int pkt_cnt_max; // The final number of captured packets
unsigned int sampling;
unsigned int snaplen;
uint8_t traffic_link_id_cnt;
diff --git a/infra/include/dp_trace.h b/infra/include/dp_trace.h
index 38a2f28..a8f7c7c 100644
--- a/infra/include/dp_trace.h
+++ b/infra/include/dp_trace.h
@@ -32,7 +32,7 @@
#define DP_TRACE_ERROR_ILLEGAL_BPF_EXPR 4
#define DP_TRACE_ERROR_MAX 5
-#define DP_TRACE_MEASUREMENT_TYPE_UNKNOW (0)
+#define DP_TRACE_MEASUREMENT_TYPE_UNKNOWN (0)
#define DP_TRACE_MEASUREMENT_TYPE_UNMATCH (1 << 3)
#define DP_TRACE_MEASUREMENT_TYPE_BUF_ALLOC_FAILED (1 << 4)
#define DP_TRACE_MEASUREMENT_TYPE_MATCHED (DP_TRACE_MEASUREMENT_TYPE_TRACE | DP_TRACE_MEASUREMENT_TYPE_TELEMETRY)
@@ -67,7 +67,9 @@ struct dp_trace_job_ctx
volatile uint8_t used;
struct bpf_program fp;
job_bitmap_t job_id;
-};
+ RTE_MARKER cacheline1 __rte_cache_min_aligned;
+ rte_atomic64_t pkt_cnt; // Number of packets that satisfy bpf rules
+} __rte_cache_aligned;
struct dp_trace_stat
{
diff --git a/infra/src/dp_trace.c b/infra/src/dp_trace.c
index ef6a0c7..e28dd6d 100644
--- a/infra/src/dp_trace.c
+++ b/infra/src/dp_trace.c
@@ -34,7 +34,9 @@ static struct dp_trace_instance * dp_trace_instance_create()
for (unsigned int i = 0; i < DP_TRACE_JOB_NUM_MAX; i++)
{
- instance->job_ctx[i].job_id = 1 << (i);
+ struct dp_trace_job_ctx * ctx = &instance->job_ctx[i];
+ ctx->job_id = 1 << (i);
+ rte_atomic64_init(&(ctx->pkt_cnt));
}
instance->nr_ring = DP_TRACE_RING_NUM;
@@ -142,6 +144,7 @@ void dp_trace_job_clean(struct dp_trace_job_ctx * ctx)
if (ctx == NULL)
return;
ctx->used = 0;
+ rte_atomic64_set(&ctx->pkt_cnt, 0);
// sleep must be called here, which can effectively avoid race conditions.
// notice: avoid CPU out-of-order execution here
@@ -207,6 +210,7 @@ int dp_trace_job_add(struct dp_trace_process * trace, const struct dp_trace_job_
ctx->desc.traffic_link_ids[i] = desc->traffic_link_ids[i];
}
+ rte_atomic64_set(&ctx->pkt_cnt, 0);
ctx->used = 1;
instance->nr_job_ctx++;
MR_INFO("[add job:%u] bpf_expr: %s type: %u", rule_index, ctx->desc.bpf_expr, ctx->desc.measurement_type);
@@ -283,12 +287,13 @@ uint16_t dp_trace_filter_exec_jobs_get(struct dp_trace_process * trace, struct r
// When the maximum number of data packet records is reached, the tag will no longer be added to the
// data packet. unlimit: ctx->desc.pkt_cnt_max == 0
target_packet = true;
- if (ctx->desc.pkt_cnt_max != 0 && statistics->filter_exec_hit[i] >= ctx->desc.pkt_cnt_max)
+ uint64_t cur_pkt_cnt = rte_atomic64_read(&ctx->pkt_cnt);
+ if (ctx->desc.pkt_cnt_max != 0 && cur_pkt_cnt >= (ctx->desc.pkt_cnt_max * ctx->desc.sampling))
{
statistics->reach_pkt_cnt_limit++;
continue;
}
- if (statistics->filter_exec_hit[i] % ctx->desc.sampling == 0)
+ if (cur_pkt_cnt % ctx->desc.sampling == 0)
{
// match every sampling packet
match_jobs = match_jobs | ctx->job_id;
@@ -299,6 +304,7 @@ uint16_t dp_trace_filter_exec_jobs_get(struct dp_trace_process * trace, struct r
}
mrb_meta->measurement_type |= ctx->desc.measurement_type;
}
+ rte_atomic64_add(&ctx->pkt_cnt, 1);
statistics->filter_exec_hit[i]++;
}
}
@@ -336,7 +342,7 @@ void dp_trace_filter_exec(struct dp_trace_process * trace, struct rte_mbuf * mbu
struct dp_trace_stat * statistics = &trace->statistics[lcore_id];
struct mrb_metadata * mrb_meta = rte_mbuf_to_priv(mbuf);
- if (mrb_meta->measurement_type != DP_TRACE_MEASUREMENT_TYPE_UNKNOW)
+ if (mrb_meta->measurement_type != DP_TRACE_MEASUREMENT_TYPE_UNKNOWN)
{
return;
}
@@ -356,7 +362,7 @@ void dp_trace_filter_exec(struct dp_trace_process * trace, struct rte_mbuf * mbu
struct pkt_parser_result * pkt_parser_result = &mrb_meta->pkt_parser_result;
job_bitmap_t match_jobs = 0;
- mrb_meta->measurement_type = DP_TRACE_MEASUREMENT_TYPE_UNKNOW;
+ mrb_meta->measurement_type = DP_TRACE_MEASUREMENT_TYPE_UNKNOWN;
if (unlikely(pkt_parser_result->nr_layers == 0))
{