diff options
| author | Qiuwen Lu <[email protected]> | 2016-09-22 17:02:13 +0800 |
|---|---|---|
| committer | Qiuwen Lu <[email protected]> | 2016-09-22 17:02:13 +0800 |
| commit | f60d01b6cbbf31167cb44b923c6f4976ad0d7e6b (patch) | |
| tree | 899fd7bb824732dbff0e1f843958414da0cf99bf | |
| parent | 511445909125982a8f4fbf9cde2ca3819914f0c6 (diff) | |
删除调试使用的部分语句,优化了队列添加/删除元素过程。
| -rw-r--r-- | core/src/vnode.c | 11 | ||||
| -rw-r--r-- | pag/libpag.c | 2 | ||||
| -rw-r--r-- | service/src/rxtx.c | 15 |
3 files changed, 15 insertions, 13 deletions
diff --git a/core/src/vnode.c b/core/src/vnode.c index 100cde9..2a5f1cb 100644 --- a/core/src/vnode.c +++ b/core/src/vnode.c @@ -119,11 +119,14 @@ static inline void tunnel_enqueue(struct tunnel_desc * desc, struct vnode_ops * void * obj) { // disable sz_buffer, enqueue object directly. + // TODO: 优化无缓冲队列 +#if 0 if(desc->en_buffer == NULL) { tunnel_enqueue_without_buffer(desc, ops, obj); return; } +#endif // append the object at the tail of enqueue buffer. unsigned int pos; @@ -140,7 +143,7 @@ static inline void tunnel_enqueue(struct tunnel_desc * desc, struct vnode_ops * // enqueue buffer is full, burst these objects into tunnel. assert(pos == desc->sz_en_buffer); int n_to_send = desc->sz_en_buffer; - int n_send = rte_ring_enqueue_burst(desc->tunnel_object, + int n_send = rte_ring_sp_enqueue_burst(desc->tunnel_object, desc->en_buffer, n_to_send); UPDATE_VNODE_STAT(tunnel_enqueue, n_send); @@ -163,7 +166,7 @@ static inline void tunnel_enqueue(struct tunnel_desc * desc, struct vnode_ops * static inline int tunnel_dequeue(struct tunnel_desc * desc, struct vnode_ops * ops, void * obj, int nr_max_obj) { - return rte_ring_dequeue_burst(desc->tunnel_object, obj, nr_max_obj); + return rte_ring_sc_dequeue_burst(desc->tunnel_object, obj, nr_max_obj); } /* A 2d array of tunnels, trans data from a prod to a cons */ @@ -271,7 +274,7 @@ err: // only use by prod. // TODO: rewrite in SSE/SSE2/AVX/AVX2 intrinsics -void tunnel_block_enqueue_with_hash(struct tunnel_block * block, struct vnode_ops * ops, +static inline void tunnel_block_enqueue_with_hash(struct tunnel_block * block, struct vnode_ops * ops, int prodq, void * obj[], uint32_t hash[], int nr_obj) { unsigned int consq; @@ -290,7 +293,7 @@ void tunnel_block_enqueue_with_hash(struct tunnel_block * block, struct vnode_op // Tunnel Block Dequeue, dequeue from block, only used by cons. // TODO: rewrite in SSE/SSE2/AVX/AVX2 intrinsics -int tunnel_block_dequeue(struct tunnel_block * block, struct vnode_ops * ops, +static inline int tunnel_block_dequeue(struct tunnel_block * block, struct vnode_ops * ops, int consq, void * obj[], int nr_max_obj) { unsigned int nr_obj = 0, nr_obj_recv = 0; diff --git a/pag/libpag.c b/pag/libpag.c index d014f9d..c6c90cc 100644 --- a/pag/libpag.c +++ b/pag/libpag.c @@ -239,8 +239,6 @@ void * pag_get_frame(int sid __rte_unused) return NULL;
tinstance->rxmbuf_ctx = tinstance->rxmbuf[tinstance->rxmbuf_cur++];
- rte_mbuf_sanity_check(tinstance->rxmbuf_ctx, 1);
-
PERF_END(pag_get_frame);
return rte_pktmbuf_mtod(tinstance->rxmbuf_ctx, void *);
}
diff --git a/service/src/rxtx.c b/service/src/rxtx.c index e8b8c68..3cb1baa 100644 --- a/service/src/rxtx.c +++ b/service/src/rxtx.c @@ -21,18 +21,19 @@ void sc_thread_rx_loop(struct sc_instance * instance, { struct rte_mbuf * rx_bufs[MR_BURST_MAX]; hash_t hash_result[MR_BURST_MAX]; - int nr_rx_burst = instance->sz_rx_burst; PERF_BEGIN(sc_rx_loop_all); + int nr_rx_burst = instance->sz_rx_burst; + for(unsigned int i =0; i < tinstance->nr_devices; i++) { mr_devq_t * devq; devq = tinstance->rt_dev[i].devq; - PERF_BEGIN(sc_rx_loop_eth_rx_burst); + //PERF_BEGIN(sc_rx_loop_eth_rx_burst); uint16_t nr_rx_bufs = rte_eth_rx_burst(devq->port_id, devq->rx_queue_id, rx_bufs, nr_rx_burst); - PERF_END(sc_rx_loop_eth_rx_burst); + //PERF_END(sc_rx_loop_eth_rx_burst); // û�����ݰ�ֱ������ if (unlikely(nr_rx_bufs == 0)) continue; @@ -41,18 +42,18 @@ void sc_thread_rx_loop(struct sc_instance * instance, UPDATE_DEV_STAT(dev_rx_bit, devq->port_id, pktslen(rx_bufs, nr_rx_bufs)); // �������ݰ����طֵ�ʹ�õ�Hashֵ - PERF_BEGIN(sc_rx_loop_hash_calc); + //PERF_BEGIN(sc_rx_loop_hash_calc); distributer_hash_caculate(instance->distmode, rx_bufs, nr_rx_bufs, hash_result); - PERF_END(sc_rx_loop_hash_calc); + //PERF_END(sc_rx_loop_hash_calc); // ����VNode�У��ַ�����Ӧ�� struct vnodeman_attach_desc * attach_desc; attach_desc = tinstance->rt_dev[i].rxvnode_desc; - PERF_BEGIN(sc_rx_loop_dispatch); + //PERF_BEGIN(sc_rx_loop_dispatch); vnode_enqueue_burst_with_hash(attach_desc->prod, &vnodeman_ops, attach_desc->queue_id, (void **)rx_bufs, hash_result, nr_rx_bufs); - PERF_END(sc_rx_loop_dispatch); + //PERF_END(sc_rx_loop_dispatch); } PERF_END(sc_rx_loop_all); |
