summaryrefslogtreecommitdiff
path: root/lib
diff options
context:
space:
mode:
authorMattias Rönnblom <[email protected]>2024-10-25 10:41:46 +0200
committerThomas Monjalon <[email protected]>2024-11-07 23:40:04 +0100
commit29c39cd3d54d8330ee578dd3ea27cfda1c562079 (patch)
tree3be80c51da537446810e6fe73a28d0cdc6351f8b /lib
parent776d4753893335d43011f97b08d422b84a54b16c (diff)
random: keep PRNG state in lcore variable
Replace keeping PRNG state in a RTE_MAX_LCORE-sized static array of cache-aligned and RTE_CACHE_GUARDed struct instances with keeping the same state in a more cache-friendly lcore variable. Signed-off-by: Mattias Rönnblom <[email protected]> Acked-by: Morten Brørup <[email protected]> Acked-by: Konstantin Ananyev <[email protected]> Acked-by: Chengwen Feng <[email protected]> Acked-by: Stephen Hemminger <[email protected]>
Diffstat (limited to 'lib')
-rw-r--r--lib/eal/common/rte_random.c28
1 files changed, 17 insertions, 11 deletions
diff --git a/lib/eal/common/rte_random.c b/lib/eal/common/rte_random.c
index 90e91b3c4f..cf0756f26a 100644
--- a/lib/eal/common/rte_random.c
+++ b/lib/eal/common/rte_random.c
@@ -11,6 +11,7 @@
#include <rte_branch_prediction.h>
#include <rte_cycles.h>
#include <rte_lcore.h>
+#include <rte_lcore_var.h>
#include <rte_random.h>
struct __rte_cache_aligned rte_rand_state {
@@ -19,14 +20,12 @@ struct __rte_cache_aligned rte_rand_state {
uint64_t z3;
uint64_t z4;
uint64_t z5;
- RTE_CACHE_GUARD;
};
-/* One instance each for every lcore id-equipped thread, and one
- * additional instance to be shared by all others threads (i.e., all
- * unregistered non-EAL threads).
- */
-static struct rte_rand_state rand_states[RTE_MAX_LCORE + 1];
+RTE_LCORE_VAR_HANDLE(struct rte_rand_state, rand_state);
+
+/* instance to be shared by all unregistered non-EAL threads */
+static struct rte_rand_state unregistered_rand_state;
static uint32_t
__rte_rand_lcg32(uint32_t *seed)
@@ -85,8 +84,14 @@ rte_srand(uint64_t seed)
unsigned int lcore_id;
/* add lcore_id to seed to avoid having the same sequence */
- for (lcore_id = 0; lcore_id < RTE_DIM(rand_states); lcore_id++)
- __rte_srand_lfsr258(seed + lcore_id, &rand_states[lcore_id]);
+ for (lcore_id = 0; lcore_id < RTE_MAX_LCORE; lcore_id++) {
+ struct rte_rand_state *lcore_state =
+ RTE_LCORE_VAR_LCORE(lcore_id, rand_state);
+
+ __rte_srand_lfsr258(seed + lcore_id, lcore_state);
+ }
+
+ __rte_srand_lfsr258(seed + lcore_id, &unregistered_rand_state);
}
static __rte_always_inline uint64_t
@@ -124,11 +129,10 @@ struct rte_rand_state *__rte_rand_get_state(void)
idx = rte_lcore_id();
- /* last instance reserved for unregistered non-EAL threads */
if (unlikely(idx == LCORE_ID_ANY))
- idx = RTE_MAX_LCORE;
+ return &unregistered_rand_state;
- return &rand_states[idx];
+ return RTE_LCORE_VAR(rand_state);
}
uint64_t
@@ -228,6 +232,8 @@ RTE_INIT(rte_rand_init)
{
uint64_t seed;
+ RTE_LCORE_VAR_ALLOC(rand_state);
+
seed = __rte_random_initial_seed();
rte_srand(seed);