summaryrefslogtreecommitdiff
path: root/common/src/kafka.cpp
diff options
context:
space:
mode:
Diffstat (limited to 'common/src/kafka.cpp')
-rw-r--r--common/src/kafka.cpp251
1 files changed, 251 insertions, 0 deletions
diff --git a/common/src/kafka.cpp b/common/src/kafka.cpp
new file mode 100644
index 0000000..a82c641
--- /dev/null
+++ b/common/src/kafka.cpp
@@ -0,0 +1,251 @@
+#include <stdlib.h>
+#include <string.h>
+
+#include "kafka.h"
+#include "tfe_utils.h"
+#include <MESA/MESA_prof_load.h>
+#include <librdkafka/rdkafka.h>
+
+#define MAX_SYMBOL_LEN 128
+
+struct config
+{
+ char brokerlist[MAX_SYMBOL_LEN];
+ char sasl_username[MAX_SYMBOL_LEN];
+ char sasl_passwd[MAX_SYMBOL_LEN];
+ char topic_name[MAX_TOPIC_NUM][MAX_SYMBOL_LEN];
+};
+
+struct per_producer_per_topic
+{
+ rd_kafka_t *producer;
+ rd_kafka_topic_t *topic;
+};
+
+struct kafka
+{
+ struct config cfg;
+ struct per_producer_per_topic *pppt[MAX_TOPIC_NUM];
+};
+
+/******************************************************************************
+ * Private API
+ ******************************************************************************/
+
+static void per_producer_per_topic_free(struct per_producer_per_topic *pppt)
+{
+ if (pppt)
+ {
+ if (pppt->topic)
+ {
+ rd_kafka_topic_destroy(pppt->topic);
+ pppt->topic = NULL;
+ }
+
+ if (pppt->producer)
+ {
+ rd_kafka_destroy(pppt->producer);
+ pppt->producer = NULL;
+ }
+
+ free(pppt);
+ pppt = NULL;
+ }
+}
+
+static struct per_producer_per_topic *per_producer_per_topic_new(const char *brokerlist, const char *sasl_username, const char *sasl_passwd, const char *topic_name)
+{
+ char err_str[1024] = {0};
+ struct per_producer_per_topic *pppt = (struct per_producer_per_topic *)calloc(1, sizeof(struct per_producer_per_topic));
+ if (!pppt)
+ {
+ return NULL;
+ }
+
+ rd_kafka_conf_t *conf = rd_kafka_conf_new();
+ if (!conf)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: failed to create kafka conf");
+ goto error_out;
+ }
+ if (rd_kafka_conf_set(conf, "queue.buffering.max.messages", "1000000", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: failed to set kafka queue.buffering.max.messages, %s", err_str);
+ goto error_out;
+ }
+ if (rd_kafka_conf_set(conf, "topic.metadata.refresh.interval.ms", "600000", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: failed to set kafka topic.metadata.refresh.interval.ms, %s", err_str);
+ goto error_out;
+ }
+ if (rd_kafka_conf_set(conf, "client.id", topic_name, err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: failed to set kafka client.id, %s", err_str);
+ goto error_out;
+ }
+ if (strlen(sasl_username) > 0 && strlen(sasl_passwd) > 0)
+ {
+ if (rd_kafka_conf_set(conf, "security.protocol", "sasl_plaintext", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: failed to set kafka security.protocol, %s", err_str);
+ goto error_out;
+ }
+ if (rd_kafka_conf_set(conf, "sasl.mechanisms", "PLAIN", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: failed to set kafka sasl.mechanisms, %s", err_str);
+ goto error_out;
+ }
+ if (rd_kafka_conf_set(conf, "sasl.username", sasl_username, err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: failed to set kafka sasl.username, %s", err_str);
+ goto error_out;
+ }
+ if (rd_kafka_conf_set(conf, "sasl.password", sasl_passwd, err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: failed to set kafka sasl.password, %s", err_str);
+ goto error_out;
+ }
+ }
+ else
+ {
+ if (rd_kafka_conf_set(conf, "security.protocol", "plaintext", err_str, sizeof(err_str)) != RD_KAFKA_CONF_OK)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: failed to set kafka security.protocol, %s", err_str);
+ goto error_out;
+ }
+ }
+
+ // The conf object is freed by this function and must not be used or destroyed by the application sub-sequently.
+ pppt->producer = rd_kafka_new(RD_KAFKA_PRODUCER, conf, err_str, sizeof(err_str));
+ conf = NULL;
+ if (pppt->producer == NULL)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: failed to create kafka producer, %s", err_str);
+ goto error_out;
+ }
+
+ if (rd_kafka_brokers_add(pppt->producer, brokerlist) == 0)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: failed to add kafka brokers");
+ goto error_out;
+ }
+
+ pppt->topic = rd_kafka_topic_new(pppt->producer, topic_name, NULL);
+ if (pppt->topic == NULL)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: failed to create kafka topic: %s", topic_name);
+ goto error_out;
+ }
+
+ return pppt;
+
+error_out:
+ if (conf)
+ {
+ rd_kafka_conf_destroy(conf);
+ }
+
+ per_producer_per_topic_free(pppt);
+ return NULL;
+}
+
+/******************************************************************************
+ * Public API -- Kafka
+ ******************************************************************************/
+
+// due to limit by client.id, need per producer per topic
+struct kafka *kafka_create(const char *profile)
+{
+ struct kafka *handle = (struct kafka *)calloc(1, sizeof(struct kafka));
+ if (!handle)
+ {
+ return NULL;
+ }
+
+ MESA_load_profile_string_def(profile, "kafka", "brokerlist", handle->cfg.brokerlist, sizeof(handle->cfg.brokerlist), "");
+ MESA_load_profile_string_def(profile, "kafka", "sasl_username", handle->cfg.sasl_username, sizeof(handle->cfg.sasl_username), "");
+ MESA_load_profile_string_def(profile, "kafka", "sasl_passwd", handle->cfg.sasl_passwd, sizeof(handle->cfg.sasl_passwd), "");
+ MESA_load_profile_string_def(profile, "kafka", "rule_hits_topic", handle->cfg.topic_name[TOPIC_RULE_HITS], sizeof(handle->cfg.topic_name[TOPIC_RULE_HITS]), "");
+ MESA_load_profile_string_def(profile, "kafka", "proxy_event_topic", handle->cfg.topic_name[TOPIC_PROXY_EVENT], sizeof(handle->cfg.topic_name[TOPIC_PROXY_EVENT]), "");
+ MESA_load_profile_string_def(profile, "kafka", "file_stream_topic", handle->cfg.topic_name[TOPIC_FILE_STREAM], sizeof(handle->cfg.topic_name[TOPIC_FILE_STREAM]), "");
+ MESA_load_profile_string_def(profile, "kafka", "exch_cert_topic", handle->cfg.topic_name[TOPIC_EXCH_CERT], sizeof(handle->cfg.topic_name[TOPIC_EXCH_CERT]), "");
+
+ if (strlen(handle->cfg.brokerlist) == 0)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: brokerlist is empty");
+ goto error_out;
+ }
+
+ for (int i = 0; i < MAX_TOPIC_NUM; i++)
+ {
+ if (strlen(handle->cfg.topic_name[i]) == 0)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: topic_name[%d] is empty", i);
+ goto error_out;
+ }
+ }
+
+ for (int i = 0; i < MAX_TOPIC_NUM; i++)
+ {
+ handle->pppt[i] = per_producer_per_topic_new(handle->cfg.brokerlist, handle->cfg.sasl_username, handle->cfg.sasl_passwd, handle->cfg.topic_name[i]);
+ if (!handle->pppt[i])
+ {
+ goto error_out;
+ }
+ }
+
+ return handle;
+
+error_out:
+ kafka_destroy(handle);
+ return NULL;
+}
+
+void kafka_destroy(struct kafka *handle)
+{
+ if (handle)
+ {
+ for (int i = 0; i < MAX_TOPIC_NUM; i++)
+ {
+ per_producer_per_topic_free(handle->pppt[i]);
+ handle->pppt[i] = NULL;
+ }
+
+ free(handle);
+ handle = NULL;
+ }
+}
+
+int kafka_send(struct kafka *handle, enum topic_idx idx, const char *data, int len)
+{
+ if (!handle)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: handle is NULL");
+ return -1;
+ }
+
+ if (idx < 0 || idx >= MAX_TOPIC_NUM)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: invalid topic index: %d", idx);
+ return -1;
+ }
+
+ if (handle->pppt[idx])
+ {
+ if (rd_kafka_produce(handle->pppt[idx]->topic, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY, (void *)data, len, NULL, 0, NULL) == -1)
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: failed to produce message with topic [%d], %s", idx, rd_kafka_err2str(rd_kafka_last_error()));
+ return -1;
+ }
+ else
+ {
+ TFE_LOG_DEBUG(g_default_logger, "KAFKA: success to produce message with topic [%d], %s", idx, data);
+ return 0;
+ }
+ }
+ else
+ {
+ TFE_LOG_ERROR(g_default_logger, "KAFKA: topic %d not initialized", idx);
+ return -1;
+ }
+} \ No newline at end of file