diff options
| author | luwenpeng <[email protected]> | 2024-07-17 15:47:07 +0800 |
|---|---|---|
| committer | luwenpeng <[email protected]> | 2024-07-17 15:47:48 +0800 |
| commit | cc5a5379403ff9fb22d1ee5bdd9da3f636e2a199 (patch) | |
| tree | 2cb558562dfe896f1f45964efaef2af94083ffa2 /common | |
| parent | b59b736e4dd53a3033e4935cdf4dadd5121b46e8 (diff) | |
feature: adding Kafka infrastructure
Diffstat (limited to 'common')
| -rw-r--r-- | common/CMakeLists.txt | 4 | ||||
| -rw-r--r-- | common/include/kafka.h | 28 | ||||
| -rw-r--r-- | common/include/utils.h | 1 | ||||
| -rw-r--r-- | common/src/kafka.cpp | 245 |
4 files changed, 276 insertions, 2 deletions
diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index 5f6ff2e..247786b 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -1,6 +1,6 @@ -add_library(common src/session_table.cpp src/packet.cpp src/control_packet.cpp src/bfd.cpp src/utils.cpp src/vxlan.cpp src/log.cpp src/timestamp.cpp src/mpack.cpp) +add_library(common src/session_table.cpp src/packet.cpp src/control_packet.cpp src/bfd.cpp src/utils.cpp src/vxlan.cpp src/log.cpp src/timestamp.cpp src/mpack.cpp src/kafka.cpp) target_link_libraries(common PUBLIC cjson) -target_link_libraries(common PUBLIC MESA_handle_logger) +target_link_libraries(common PUBLIC MESA_handle_logger rdkafka) target_include_directories(common PUBLIC ${CMAKE_CURRENT_LIST_DIR}/include) diff --git a/common/include/kafka.h b/common/include/kafka.h new file mode 100644 index 0000000..ffb9911 --- /dev/null +++ b/common/include/kafka.h @@ -0,0 +1,28 @@ +#ifndef _KAFKA_H +#define _KAFKA_H + +#ifdef __cplusplus +extern "C" +{ +#endif + +enum topic_idx +{ + TOPIC_RULE_HITS, + + // add more topic here + + MAX_TOPIC_NUM, +}; + +struct kafka *kafka_create(const char *profile); +void kafka_destroy(struct kafka *handle); +// return 0: if success +// return -1: if failed +int kafka_send(struct kafka *handle, enum topic_idx idx, const char *data, int len); + +#ifdef __cplusplus +} +#endif + +#endif diff --git a/common/include/utils.h b/common/include/utils.h index 713c254..8319ed1 100644 --- a/common/include/utils.h +++ b/common/include/utils.h @@ -18,6 +18,7 @@ extern "C" #define LOG_TAG_UTILS "UTILS" #define LOG_TAG_HEALTH_CHECK "HEALTH_CHECK" #define LOG_TAG_TIMESTAMP "TIMESTAMP" +#define LOG_TAG_KAFKA "KAFKA" #define ATOMIC_INC(x) __atomic_fetch_add(x, 1, __ATOMIC_RELAXED) #define ATOMIC_DEC(x) __atomic_fetch_sub(x, 1, __ATOMIC_RELAXED) diff --git a/common/src/kafka.cpp b/common/src/kafka.cpp new file mode 100644 index 0000000..2a06c92 --- /dev/null +++ b/common/src/kafka.cpp @@ -0,0 +1,245 @@ +#include <stdlib.h> +#include <string.h> + +#include "log.h" +#include "utils.h" +#include "kafka.h" +#include <MESA/MESA_prof_load.h> +#include <librdkafka/rdkafka.h> + +#define MAX_SYMBOL_LEN 128 + +#define KAFKA_LOG_ERROR(fmt, ...) LOG_ERROR("%s: " fmt, LOG_TAG_KAFKA, ##__VA_ARGS__) +#define KAFKA_LOG_DEBUG(fmt, ...) LOG_DEBUG("%s: " fmt, LOG_TAG_KAFKA, ##__VA_ARGS__) + +struct config +{ + char brokerlist[MAX_SYMBOL_LEN]; + char sasl_username[MAX_SYMBOL_LEN]; + char sasl_passwd[MAX_SYMBOL_LEN]; + char topic_name[MAX_TOPIC_NUM][MAX_SYMBOL_LEN]; +}; + +struct per_producer_per_topic +{ + rd_kafka_t *producer; + rd_kafka_topic_t *topic; +}; + +struct kafka +{ + struct config cfg; + struct per_producer_per_topic *pppt[MAX_TOPIC_NUM]; +}; + +/****************************************************************************** + * Private API + ******************************************************************************/ + +static void per_producer_per_topic_free(struct per_producer_per_topic *pppt) +{ + if (pppt) + { + if (pppt->topic) + { + rd_kafka_topic_destroy(pppt->topic); + pppt->topic = NULL; + } + + if (pppt->producer) + { + rd_kafka_destroy(pppt->producer); + pppt->producer = NULL; + } + + free(pppt); + pppt = NULL; + } +} + +static struct per_producer_per_topic *per_producer_per_topic_new(const char *brokerlist, const char *sasl_username, const char *sasl_passwd, const char *topic_name) +{ + char err_str[1024] = {0}; + struct per_producer_per_topic *pppt = (struct per_producer_per_topic *)calloc(1, sizeof(struct per_producer_per_topic)); + if (!pppt) + { + return NULL; + } + + rd_kafka_conf_t *conf = rd_kafka_conf_new(); + if (!conf) + { + KAFKA_LOG_ERROR("failed to create kafka conf"); + goto error_out; + } + if (rd_kafka_conf_set(conf, "queue.buffering.max.messages", "1000000", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK) + { + KAFKA_LOG_ERROR("failed to set kafka queue.buffering.max.messages, %s", err_str); + goto error_out; + } + if (rd_kafka_conf_set(conf, "topic.metadata.refresh.interval.ms", "600000", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK) + { + KAFKA_LOG_ERROR("failed to set kafka topic.metadata.refresh.interval.ms, %s", err_str); + goto error_out; + } + if (rd_kafka_conf_set(conf, "client.id", topic_name, err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK) + { + KAFKA_LOG_ERROR("failed to set kafka client.id, %s", err_str); + goto error_out; + } + if (strlen(sasl_username) > 0 && strlen(sasl_passwd) > 0) + { + if (rd_kafka_conf_set(conf, "security.protocol", "sasl_plaintext", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK) + { + KAFKA_LOG_ERROR("failed to set kafka security.protocol, %s", err_str); + goto error_out; + } + if (rd_kafka_conf_set(conf, "sasl.mechanisms", "PLAIN", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK) + { + KAFKA_LOG_ERROR("failed to set kafka sasl.mechanisms, %s", err_str); + goto error_out; + } + if (rd_kafka_conf_set(conf, "sasl.username", sasl_username, err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK) + { + KAFKA_LOG_ERROR("failed to set kafka sasl.username, %s", err_str); + goto error_out; + } + if (rd_kafka_conf_set(conf, "sasl.password", sasl_passwd, err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK) + { + KAFKA_LOG_ERROR("failed to set kafka sasl.password, %s", err_str); + goto error_out; + } + } + else + { + if (rd_kafka_conf_set(conf, "security.protocol", "plaintext", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK) + { + KAFKA_LOG_ERROR("failed to set kafka security.protocol, %s", err_str); + goto error_out; + } + } + + // The conf object is freed by this function and must not be used or destroyed by the application sub-sequently. + pppt->producer = rd_kafka_new(RD_KAFKA_PRODUCER, conf, err_str, sizeof(err_str)); + conf = NULL; + if (pppt->producer == NULL) + { + KAFKA_LOG_ERROR("failed to create kafka producer, %s", err_str); + goto error_out; + } + + if (rd_kafka_brokers_add(pppt->producer, brokerlist) == 0) + { + KAFKA_LOG_ERROR("failed to add kafka brokers"); + goto error_out; + } + + pppt->topic = rd_kafka_topic_new(pppt->producer, topic_name, NULL); + if (pppt->topic == NULL) + { + KAFKA_LOG_ERROR("failed to create kafka topic: %s", topic_name); + goto error_out; + } + + return pppt; + +error_out: + if (conf) + { + rd_kafka_conf_destroy(conf); + } + + per_producer_per_topic_free(pppt); + return NULL; +} + +/****************************************************************************** + * Public API -- Kafka + ******************************************************************************/ + +// due to limit by client.id, need per producer per topic +struct kafka *kafka_create(const char *profile) +{ + struct kafka *handle = (struct kafka *)calloc(1, sizeof(struct kafka)); + if (!handle) + { + return NULL; + } + + MESA_load_profile_string_def(profile, "kafka", "brokerlist", handle->cfg.brokerlist, sizeof(handle->cfg.brokerlist), ""); + MESA_load_profile_string_def(profile, "kafka", "sasl_username", handle->cfg.sasl_username, sizeof(handle->cfg.sasl_username), ""); + MESA_load_profile_string_def(profile, "kafka", "sasl_passwd", handle->cfg.sasl_passwd, sizeof(handle->cfg.sasl_passwd), ""); + MESA_load_profile_string_def(profile, "kafka", "topic_name", handle->cfg.topic_name[TOPIC_RULE_HITS], sizeof(handle->cfg.topic_name[TOPIC_RULE_HITS]), ""); + + if (strlen(handle->cfg.brokerlist) == 0) + { + KAFKA_LOG_ERROR("brokerlist is empty"); + goto error_out; + } + + for (int i = 0; i < MAX_TOPIC_NUM; i++) + { + if (strlen(handle->cfg.topic_name[i]) == 0) + { + KAFKA_LOG_ERROR("topic_name[%d] is empty", i); + goto error_out; + } + } + + for (int i = 0; i < MAX_TOPIC_NUM; i++) + { + handle->pppt[i] = per_producer_per_topic_new(handle->cfg.brokerlist, handle->cfg.sasl_username, handle->cfg.sasl_passwd, handle->cfg.topic_name[i]); + if (!handle->pppt[i]) + { + goto error_out; + } + } + + return handle; + +error_out: + kafka_destroy(handle); + return NULL; +} + +void kafka_destroy(struct kafka *handle) +{ + if (handle) + { + for (int i = 0; i < MAX_TOPIC_NUM; i++) + { + per_producer_per_topic_free(handle->pppt[i]); + handle->pppt[i] = NULL; + } + + free(handle); + handle = NULL; + } +} + +int kafka_send(struct kafka *handle, enum topic_idx idx, const void *data, int len) +{ + if (idx < 0 || idx >= MAX_TOPIC_NUM) + { + KAFKA_LOG_ERROR("invalid topic index: %d", idx); + return -1; + } + + if (handle->pppt[idx]) + { + if (rd_kafka_produce(handle->pppt[idx]->topic, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY, (void *)data, len, NULL, 0, NULL) == -1) + { + KAFKA_LOG_ERROR("failed to produce message with topic [%d], %s", idx, rd_kafka_err2str(rd_kafka_last_error())); + return -1; + } + else + { + return 0; + } + } + else + { + KAFKA_LOG_ERROR("topic %d not initialized", idx); + return -1; + } +}
\ No newline at end of file |
