summaryrefslogtreecommitdiff
path: root/src
diff options
context:
space:
mode:
authorchenzizhan <[email protected]>2024-07-12 18:37:40 +0800
committerchenzizhan <[email protected]>2024-07-12 18:37:40 +0800
commit6b3dcefab5b4049a3f40be9faab6a05c79a8bb5b (patch)
tree97dadc0663c837671776729aa7a75ca0001d8752 /src
parentdcc5329f090d4d3e1f2b1ea6c09393c0397fc111 (diff)
renames
Diffstat (limited to 'src')
-rw-r--r--src/cells/hash_table.c (renamed from src/tags/tag_map.c)2
-rw-r--r--src/cells/hash_table.h (renamed from src/tags/tag_map.h)0
-rw-r--r--src/cells/heavy_keeper.c (renamed from src/tags/heavy_keeper.c)2
-rw-r--r--src/cells/heavy_keeper.h (renamed from src/tags/heavy_keeper.h)0
-rw-r--r--src/cells/spread_sketch.c (renamed from src/tags/spread_sketch.c)1
-rw-r--r--src/cells/spread_sketch.h (renamed from src/tags/spread_sketch.h)0
-rw-r--r--src/cube.c75
-rw-r--r--src/exporter/cjson_exporter.c46
-rw-r--r--src/exporter/fieldstat_exporter.py16
-rw-r--r--src/fieldstat.c10
-rw-r--r--src/fieldstat_easy.c10
-rw-r--r--src/metrics/python_api.c4
12 files changed, 83 insertions, 83 deletions
diff --git a/src/tags/tag_map.c b/src/cells/hash_table.c
index 6dad5e1..8394565 100644
--- a/src/tags/tag_map.c
+++ b/src/cells/hash_table.c
@@ -1,4 +1,4 @@
-#include "tag_map.h"
+#include "hash_table.h"
#include <stdio.h>
#include <assert.h>
diff --git a/src/tags/tag_map.h b/src/cells/hash_table.h
index 59fc8e8..59fc8e8 100644
--- a/src/tags/tag_map.h
+++ b/src/cells/hash_table.h
diff --git a/src/tags/heavy_keeper.c b/src/cells/heavy_keeper.c
index c257e1d..b606a67 100644
--- a/src/tags/heavy_keeper.c
+++ b/src/cells/heavy_keeper.c
@@ -583,7 +583,7 @@ int heavy_keeper_add(struct heavy_keeper *heavy_keeper, const char *key, size_t
struct sorted_set *summary = heavy_keeper->top_K_heap;
- long long old_cnt = sorted_set_get_score(summary, key, key_len); // todo: 改成 score
+ long long old_cnt = sorted_set_get_score(summary, key, key_len);
bool not_in_sorted_set = (old_cnt == NOT_FIND);
long long maxv = 0;
uint64_t fp = cal_hash_val_with_seed(key, key_len, FP_HASH_KEY);
diff --git a/src/tags/heavy_keeper.h b/src/cells/heavy_keeper.h
index 3b09598..3b09598 100644
--- a/src/tags/heavy_keeper.h
+++ b/src/cells/heavy_keeper.h
diff --git a/src/tags/spread_sketch.c b/src/cells/spread_sketch.c
index 0d39d16..e79815b 100644
--- a/src/tags/spread_sketch.c
+++ b/src/cells/spread_sketch.c
@@ -52,6 +52,7 @@ struct spread_sketch {
uint32_t *min_level_per_row; // TODO: 先看看性能吧, 之后再写。用来记录每行最小的level,从而跳过行数。对于64位的level,维持一个计数,额外使用64 r的空间,当一个最小位数的level 计数到0时,更新最小level。
// TODO: 对比heavy keeper,不仅仅是跳过的问题,heavykeeper 无论什么情况,在输入0的时候都不会走sketch 更新。
+ // 或者简单记录用掉的bucket 数量也挺好。
};
static void *default_new_fn(void *arg) {
diff --git a/src/tags/spread_sketch.h b/src/cells/spread_sketch.h
index 9717238..9717238 100644
--- a/src/tags/spread_sketch.h
+++ b/src/cells/spread_sketch.h
diff --git a/src/cube.c b/src/cube.c
index 6d620c2..5bdbea7 100644
--- a/src/cube.c
+++ b/src/cube.c
@@ -13,7 +13,7 @@
#include "metric_manifest.h"
#include "metric.h"
#include "heavy_keeper.h"
-#include "tag_map.h"
+#include "hash_table.h"
#include "spread_sketch.h"
#define DEFAULT_N_METRIC 32
@@ -42,8 +42,8 @@ struct cell {
struct cube {
enum sampling_mode sampling_mode;
union {
- struct heavy_keeper *heavykeeper; // todo: 这两个改了
- struct hash_table *table; // todo:
+ struct heavy_keeper *heavykeeper;
+ struct hash_table *table;
struct spread_sketch *spread_sketch;
};
size_t max_n_cell;
@@ -54,7 +54,7 @@ struct cube {
int primary_metric_id;
char *serialized_dimensions; // the key of cube is serialized cube dimensions
- size_t serialized_dimensions_len; // todo: 重命名
+ size_t serialized_dimensions_len;
int id;
UT_hash_handle hh;
};
@@ -68,13 +68,13 @@ static struct field *field_array_duplicate(const struct field *fields_src, size_
ret[i].type = fields_src[i].type;
switch (fields_src[i].type)
{
- case TAG_INTEGER:
+ case FIELD_VALUE_INTEGER:
ret[i].value_longlong = fields_src[i].value_longlong;
break;
- case TAG_CSTRING:
+ case FIELD_VALUE_CSTRING:
ret[i].value_str = strdup(fields_src[i].value_str);
break;
- case TAG_DOUBLE:
+ case FIELD_VALUE_DOUBLE:
ret[i].value_double = fields_src[i].value_double;
break;
default:
@@ -90,7 +90,7 @@ static void fieldstat_free_tag_array(struct field *fields, size_t n_tags)
for (size_t i = 0; i < n_tags; i++) {
struct field *field = &fields[i];
free((char *)field->key);
- if (field->type == TAG_CSTRING) {
+ if (field->type == FIELD_VALUE_CSTRING) {
free((char *)field->value_str);
}
}
@@ -164,16 +164,16 @@ static int field_array_to_key_safe(const struct field fields[], size_t n_tags, c
key_len = strlen(field->key);
switch(field->type)
{
- case TAG_INTEGER:
+ case FIELD_VALUE_INTEGER:
val_len = sizeof(long long);
val_position = (void *)&field->value_longlong;
break;
- case TAG_DOUBLE:
+ case FIELD_VALUE_DOUBLE:
val_len = sizeof(double);
val_position = (void *)&field->value_double;
break;
- case TAG_CSTRING:
+ case FIELD_VALUE_CSTRING:
val_len = strlen(field->value_str);
val_position = (void *)field->value_str;
break;
@@ -221,16 +221,16 @@ static void field_array_to_key_endeavor(const struct field fields[], size_t n_ta
key_len = strlen(field->key);
switch(field->type)
{
- case TAG_INTEGER:
+ case FIELD_VALUE_INTEGER:
val_len = sizeof(long long);
val_position = (void *)&field->value_longlong;
break;
- case TAG_DOUBLE:
+ case FIELD_VALUE_DOUBLE:
val_len = sizeof(double);
val_position = (void *)&field->value_double;
break;
- case TAG_CSTRING:
+ case FIELD_VALUE_CSTRING:
val_len = strlen(field->value_str);
val_position = (void *)field->value_str;
break;
@@ -484,7 +484,7 @@ void cell_free(struct cell *pthis) {
free(pthis->slots);
for (size_t i = 0; i < pthis->cell_dimensions.n_field; i++) {
free((char *)pthis->cell_dimensions.field[i].key);
- if (pthis->cell_dimensions.field[i].type == TAG_CSTRING) {
+ if (pthis->cell_dimensions.field[i].type == FIELD_VALUE_CSTRING) {
free((char *)pthis->cell_dimensions.field[i].value_str);
}
}
@@ -592,7 +592,7 @@ struct cube *cube_new(const struct field *dimensions, size_t n_dimensions, enum
cube->table = hash_table_new(max_n_cell);
hash_table_set_exdata_schema(cube->table, exdata_new_i, exdata_free_i, exdata_merge_i, exdata_reset_i, exdata_copy_i);
break;
- case SAMPLING_MODE_SPREADSKETCH:
+ case SAMPLING_MODE_TOP_CARDINALITY:
cube->spread_sketch = spread_sketch_new(max_n_cell);
spread_sketch_set_exdata_schema(cube->spread_sketch, exdata_new_i, exdata_free_i, exdata_merge_i, exdata_reset_i, exdata_copy_i);
break;
@@ -613,7 +613,7 @@ void cube_free(struct cube *cube) {
case SAMPLING_MODE_COMPREHENSIVE:
hash_table_free(cube->table);
break;
- case SAMPLING_MODE_SPREADSKETCH:
+ case SAMPLING_MODE_TOP_CARDINALITY:
spread_sketch_free(cube->spread_sketch);
break;
default:
@@ -642,7 +642,7 @@ void cube_reset(struct cube *cube) {
case SAMPLING_MODE_COMPREHENSIVE:
hash_table_reset(cube->table);
break;
- case SAMPLING_MODE_SPREADSKETCH:
+ case SAMPLING_MODE_TOP_CARDINALITY:
spread_sketch_reset(cube->spread_sketch);
break;
default:
@@ -658,7 +658,7 @@ int cube_set_primary_metric(struct cube *cube, int metric_id) {
}
if (cube->sampling_mode == SAMPLING_MODE_COMPREHENSIVE ||
(cube->sampling_mode == SAMPLING_MODE_TOPK && manifest->type != METRIC_TYPE_COUNTER) ||
- (cube->sampling_mode == SAMPLING_MODE_SPREADSKETCH && manifest->type != METRIC_TYPE_HLL)) {
+ (cube->sampling_mode == SAMPLING_MODE_TOP_CARDINALITY && manifest->type != METRIC_TYPE_HLL)) {
return FS_ERR_INVALID_PARAM;
}
cube->primary_metric_id = metric_id;
@@ -761,9 +761,8 @@ struct cell *get_cell_in_spread_sketch_cube(struct cube *cube, const struct fiel
args.n_dimensions = n_dimension;
struct cell *cell_data = NULL;
- assert(cube->sampling_mode == SAMPLING_MODE_SPREADSKETCH);
+ assert(cube->sampling_mode == SAMPLING_MODE_TOP_CARDINALITY);
- // todo: spread sketch 现在支持dummy 的方式是让他们也走sketch,可以用“满行”来减少这种计算,但确实加入level 低的内容,会走相同的流程,不像heavy keeper 一样就简单的查哈希表。
if (cube->primary_metric_id != metric_id) {
cell_data = spread_sketch_get0_exdata(cube->spread_sketch, key, key_len);
if (cell_data == NULL) {
@@ -894,7 +893,7 @@ int cube_histogram_record(struct cube *cube, int metric_id, const struct field *
case SAMPLING_MODE_TOPK: {
cell_data = get_cell_in_topk_cube(cube, dimensions, n_dimensions, 0, metric_id);
break;}
- case SAMPLING_MODE_SPREADSKETCH: {
+ case SAMPLING_MODE_TOP_CARDINALITY: {
cell_data = get_cell_in_spread_sketch_cube(cube, dimensions, n_dimensions, 0, metric_id);
break;}
default:
@@ -923,7 +922,7 @@ int cube_hll_add(struct cube *cube, int metric_id, const struct field *dimension
}
uint64_t hash = 0; // just any value, if we do not need to update the primary metric of spread sketch cube, hash value is not used
- if (cube->sampling_mode == SAMPLING_MODE_SPREADSKETCH && cube->primary_metric_id == metric_id) {
+ if (cube->sampling_mode == SAMPLING_MODE_TOP_CARDINALITY && cube->primary_metric_id == metric_id) {
hash = XXH3_64bits(key, key_len);
}
struct cell *cell_data = NULL;
@@ -934,7 +933,7 @@ int cube_hll_add(struct cube *cube, int metric_id, const struct field *dimension
case SAMPLING_MODE_TOPK: {
cell_data = get_cell_in_topk_cube(cube, dimensions, n_dimensions, 0, metric_id);
break;}
- case SAMPLING_MODE_SPREADSKETCH: {
+ case SAMPLING_MODE_TOP_CARDINALITY: {
cell_data = get_cell_in_spread_sketch_cube(cube, dimensions, n_dimensions, hash, metric_id);
break;}
default:
@@ -956,7 +955,7 @@ uint64_t field_array_to_hash(const struct field *field, size_t n_dimensions) {
for (int i = 0; i < n_dimensions; i++) {
XXH3_64bits_update(&state, field[i].key, strlen(field[i].key));
- if (field[i].type != TAG_CSTRING) {
+ if (field[i].type != FIELD_VALUE_CSTRING) {
XXH3_64bits_update(&state, &field[i].value_longlong, sizeof(long long));
} else {
XXH3_64bits_update(&state, field[i].value_str, strlen(field[i].value_str));
@@ -975,7 +974,7 @@ int cube_hll_add_field(struct cube *cube, int metric_id, const struct field *dim
}
uint64_t hash = 0; // just any value, if we do not need to update the primary metric of spread sketch cube, hash value is not used
- if (cube->sampling_mode == SAMPLING_MODE_SPREADSKETCH && cube->primary_metric_id == metric_id) {
+ if (cube->sampling_mode == SAMPLING_MODE_TOP_CARDINALITY && cube->primary_metric_id == metric_id) {
hash = field_array_to_hash(tags_key, n_tag_key);
}
struct cell *cell_data = NULL;
@@ -986,7 +985,7 @@ int cube_hll_add_field(struct cube *cube, int metric_id, const struct field *dim
case SAMPLING_MODE_TOPK: {
cell_data = get_cell_in_topk_cube(cube, dimensions, n_dimensions, 0, metric_id);
break;}
- case SAMPLING_MODE_SPREADSKETCH: {
+ case SAMPLING_MODE_TOP_CARDINALITY: {
cell_data = get_cell_in_spread_sketch_cube(cube, dimensions, n_dimensions, hash, metric_id);
break;}
default:
@@ -1008,7 +1007,7 @@ int cube_hll_add_field(struct cube *cube, int metric_id, const struct field *dim
int cube_counter_incrby(struct cube *cube, int metric_id, const struct field *dimensions, size_t n_dimensions, long long increment) {
assert(cube->sampling_mode == SAMPLING_MODE_COMPREHENSIVE ||
(cube->sampling_mode == SAMPLING_MODE_TOPK && (cube->primary_metric_id != metric_id || increment >= 0)) ||
- (cube->sampling_mode == SAMPLING_MODE_SPREADSKETCH && cube->primary_metric_id != metric_id)
+ (cube->sampling_mode == SAMPLING_MODE_TOP_CARDINALITY && cube->primary_metric_id != metric_id)
);
const struct metric_manifest *manifest = metric_manifest_manager_get_by_id(cube->manifest_manager, metric_id);
@@ -1024,7 +1023,7 @@ int cube_counter_incrby(struct cube *cube, int metric_id, const struct field *di
case SAMPLING_MODE_TOPK: {
cell_data = get_cell_in_topk_cube(cube, dimensions, n_dimensions, increment, metric_id);
break;}
- case SAMPLING_MODE_SPREADSKETCH: {
+ case SAMPLING_MODE_TOP_CARDINALITY: {
cell_data = get_cell_in_spread_sketch_cube(cube, dimensions, n_dimensions, 0, metric_id);
break;}
default:
@@ -1058,7 +1057,7 @@ int cube_counter_set(struct cube *cube, int metric_id, const struct field *dimen
case SAMPLING_MODE_TOPK: {
cell_data = get_cell_in_topk_cube(cube, dimensions, n_dimensions, 0, metric_id);
break;}
- case SAMPLING_MODE_SPREADSKETCH: {
+ case SAMPLING_MODE_TOP_CARDINALITY: {
cell_data = get_cell_in_spread_sketch_cube(cube, dimensions, n_dimensions, 0, metric_id);
break;}
default:
@@ -1087,7 +1086,7 @@ struct cube *cube_copy(const struct cube *cube)
case SAMPLING_MODE_COMPREHENSIVE:
cube_dup->table = hash_table_copy(cube->table);
break;
- case SAMPLING_MODE_SPREADSKETCH:
+ case SAMPLING_MODE_TOP_CARDINALITY:
cube_dup->spread_sketch = spread_sketch_copy(cube->spread_sketch);
break;
default:
@@ -1131,7 +1130,7 @@ int cube_merge(struct cube *dest, const struct cube *src)
case SAMPLING_MODE_COMPREHENSIVE:
hash_table_merge(dest->table, src->table);
break;
- case SAMPLING_MODE_SPREADSKETCH:
+ case SAMPLING_MODE_TOP_CARDINALITY:
spread_sketch_merge(dest->spread_sketch, src->spread_sketch);
break;
default:
@@ -1156,7 +1155,7 @@ struct cube *cube_fork(const struct cube *cube) {
ret->table = hash_table_new(cube->max_n_cell);
hash_table_set_exdata_schema(ret->table, exdata_new_i, exdata_free_i, exdata_merge_i, exdata_reset_i, exdata_copy_i);
break;
- case SAMPLING_MODE_SPREADSKETCH:
+ case SAMPLING_MODE_TOP_CARDINALITY:
ret->spread_sketch = spread_sketch_new(cube->max_n_cell);
spread_sketch_set_exdata_schema(ret->spread_sketch, exdata_new_i, exdata_free_i, exdata_merge_i, exdata_reset_i, exdata_copy_i);
break;
@@ -1196,7 +1195,7 @@ void cube_get_cells(const struct cube *cube, struct field_list **cell_dimensions
case SAMPLING_MODE_TOPK:
n_cell_tmp = heavy_keeper_get_count(cube->heavykeeper);
break;
- case SAMPLING_MODE_SPREADSKETCH:
+ case SAMPLING_MODE_TOP_CARDINALITY:
n_cell_tmp = spread_sketch_get_count(cube->spread_sketch);
break;
default:
@@ -1217,7 +1216,7 @@ void cube_get_cells(const struct cube *cube, struct field_list **cell_dimensions
case SAMPLING_MODE_TOPK:
heavy_keeper_list(cube->heavykeeper, (void **)cell_datas, n_cell_tmp);
break;
- case SAMPLING_MODE_SPREADSKETCH:
+ case SAMPLING_MODE_TOP_CARDINALITY:
spread_sketch_list(cube->spread_sketch, (void **)cell_datas, n_cell_tmp);
break;
default:
@@ -1225,7 +1224,7 @@ void cube_get_cells(const struct cube *cube, struct field_list **cell_dimensions
}
// spread sketch often stores more than max_n_cell. So sort out the top max_n_cell cells.
- if (cube->sampling_mode == SAMPLING_MODE_SPREADSKETCH && n_cell_tmp > cube->max_n_cell) {
+ if (cube->sampling_mode == SAMPLING_MODE_TOP_CARDINALITY && n_cell_tmp > cube->max_n_cell) {
struct tmp_sorted_data_spread_sketch_cell *tmp_sorted_data = (struct tmp_sorted_data_spread_sketch_cell *)malloc(sizeof(struct tmp_sorted_data_spread_sketch_cell) * n_cell_tmp);
for (int i = 0; i < n_cell_tmp; i++) {
tmp_sorted_data[i].data = cell_datas[i];
@@ -1274,7 +1273,7 @@ const struct cell *get_cell_by_tag_list(const struct cube *cube, const struct fi
case SAMPLING_MODE_COMPREHENSIVE:
ret = hash_table_get0_exdata(cube->table, tag_in_string, tag_len);
break;
- case SAMPLING_MODE_SPREADSKETCH:
+ case SAMPLING_MODE_TOP_CARDINALITY:
ret = spread_sketch_get0_exdata(cube->spread_sketch, tag_in_string, tag_len);
break;
default:
@@ -1383,7 +1382,7 @@ int cube_get_cell_count(const struct cube *cube) {
return hash_table_get_count(cube->table);
case SAMPLING_MODE_TOPK:
return heavy_keeper_get_count(cube->heavykeeper);
- case SAMPLING_MODE_SPREADSKETCH:
+ case SAMPLING_MODE_TOP_CARDINALITY:
return spread_sketch_get_count(cube->spread_sketch);
default:
assert(0);
diff --git a/src/exporter/cjson_exporter.c b/src/exporter/cjson_exporter.c
index 3fecf40..bbe6fd3 100644
--- a/src/exporter/cjson_exporter.c
+++ b/src/exporter/cjson_exporter.c
@@ -116,7 +116,7 @@ struct couple_export_table {
void kv_pair_free(struct export_kv_pair *pair) {
- if (pair->type == TAG_CSTRING) {
+ if (pair->type == FIELD_VALUE_CSTRING) {
free((char *)pair->value_str);
}
free((char *)pair->key);
@@ -128,13 +128,13 @@ void kv_pair_fill_with_tags(struct export_kv_pair *dest, const struct field *src
dest->key = strdup(src->key);
dest->type = src->type;
switch (src->type) {
- case TAG_INTEGER:
+ case FIELD_VALUE_INTEGER:
dest->value_longlong = src->value_longlong;
break;
- case TAG_DOUBLE:
+ case FIELD_VALUE_DOUBLE:
dest->value_double = src->value_double;
break;
- case TAG_CSTRING:
+ case FIELD_VALUE_CSTRING:
dest->value_str = strdup(src->value_str);
break;
default:
@@ -229,17 +229,17 @@ bool fieldstat_tag_list_cmp(const struct field_list *a, const struct field_list
}
switch (a->field[i].type)
{
- case TAG_INTEGER:
+ case FIELD_VALUE_INTEGER:
if (a->field[i].value_longlong != b->field[i].value_longlong) {
return false;
}
break;
- case TAG_DOUBLE:
+ case FIELD_VALUE_DOUBLE:
if (a->field[i].value_double != b->field[i].value_double) {
return false;
}
break;
- case TAG_CSTRING:
+ case FIELD_VALUE_CSTRING:
if (strcmp(a->field[i].value_str, b->field[i].value_str) != 0) {
return false;
}
@@ -262,13 +262,13 @@ struct field_list *my_copy_fs_tag_list(const struct field_list *src)
dest->field[i].type = src->field[i].type;
switch (src->field[i].type)
{
- case TAG_INTEGER:
+ case FIELD_VALUE_INTEGER:
dest->field[i].value_longlong = src->field[i].value_longlong;
break;
- case TAG_DOUBLE:
+ case FIELD_VALUE_DOUBLE:
dest->field[i].value_double = src->field[i].value_double;
break;
- case TAG_CSTRING:
+ case FIELD_VALUE_CSTRING:
dest->field[i].value_str = strdup(src->field[i].value_str);
break;
default:
@@ -342,7 +342,7 @@ void write_delta_to_json(struct fieldstat_json_exporter *exporter, struct cellwi
tag_json = "\a\t\a"; // just a dummy string
}
for (int j = 0; j < tag_field_pair->n_metric; j++) {
- if (tag_field_pair->metric_pairs[j]->type != TAG_INTEGER) { // only counter type need to write delta
+ if (tag_field_pair->metric_pairs[j]->type != FIELD_VALUE_INTEGER) { // only counter type need to write delta
continue;
}
const char *metric_name = tag_field_pair->metric_pairs[j]->key;
@@ -535,7 +535,7 @@ struct export_kv_pair *cell_query_with_iter(const struct cell_iter *iter, int me
}
ret = malloc(sizeof(struct export_kv_pair));
ret->key = strdup(fieldstat_get_metric_name(iter->instance, cube_id, metric_id));
- ret->type = TAG_INTEGER;
+ ret->type = FIELD_VALUE_INTEGER;
ret->value_longlong = value;
return ret;
}
@@ -548,7 +548,7 @@ struct export_kv_pair *cell_query_with_iter(const struct cell_iter *iter, int me
}
ret = malloc(sizeof(struct export_kv_pair));
ret->key = strdup(fieldstat_get_metric_name(iter->instance, cube_id, metric_id));
- ret->type = TAG_CSTRING;
+ ret->type = FIELD_VALUE_CSTRING;
ret->value_str = value;
return ret;
}
@@ -560,13 +560,13 @@ void kv_pair_write_to_json(const struct export_kv_pair *pairs, struct json_write
{
switch (pairs->type)
{
- case TAG_INTEGER:
+ case FIELD_VALUE_INTEGER:
json_writer_longlong_field(writer, pairs->key, pairs->value_longlong);
break;
- case TAG_DOUBLE:
+ case FIELD_VALUE_DOUBLE:
json_writer_double_field(writer, pairs->key, pairs->value_double);
break;
- case TAG_CSTRING:
+ case FIELD_VALUE_CSTRING:
json_writer_str_field(writer, pairs->key, pairs->value_str, strlen(pairs->value_str));
break;
default:
@@ -588,13 +588,13 @@ void tag_list_append_to_tag_object(const struct field_list *tag_list, struct jso
pairs.type = tag_list->field[i].type;
switch (pairs.type)
{
- case TAG_INTEGER:
+ case FIELD_VALUE_INTEGER:
pairs.value_longlong = tag_list->field[i].value_longlong;
break;
- case TAG_DOUBLE:
+ case FIELD_VALUE_DOUBLE:
pairs.value_double = tag_list->field[i].value_double;
break;
- case TAG_CSTRING:
+ case FIELD_VALUE_CSTRING:
pairs.value_str = (char *)tag_list->field[i].value_str;
break;
default:
@@ -627,7 +627,7 @@ void kv_pair_free_list(struct export_kv_pair *pairs, size_t len)
{
for (int i = 0; i < len; i++) {
struct export_kv_pair *pair = &pairs[i];
- if (pair->type == TAG_CSTRING) {
+ if (pair->type == FIELD_VALUE_CSTRING) {
free(pair->value_str);
}
free(pair->key);
@@ -992,13 +992,13 @@ void fieldstat_json_exporter_set_global_tag(struct fieldstat_json_exporter *expo
field->type = tag_list[i].type;
switch (field->type)
{
- case TAG_INTEGER:
+ case FIELD_VALUE_INTEGER:
field->value_longlong = tag_list[i].value_longlong;
break;
- case TAG_CSTRING:
+ case FIELD_VALUE_CSTRING:
field->value_str = strdup(tag_list[i].value_str);
break;
- case TAG_DOUBLE:
+ case FIELD_VALUE_DOUBLE:
field->value_double = tag_list[i].value_double;
break;
diff --git a/src/exporter/fieldstat_exporter.py b/src/exporter/fieldstat_exporter.py
index c9eb523..aad7969 100644
--- a/src/exporter/fieldstat_exporter.py
+++ b/src/exporter/fieldstat_exporter.py
@@ -25,11 +25,11 @@ class FieldstatAPI:
libfieldstat.fieldstat_histogram_free.argtypes = [ctypes.c_void_p]
- libfieldstat.fieldstat_histogram_value_at_percentile.argtypes = [ctypes.c_void_p, ctypes.c_double]
- libfieldstat.fieldstat_histogram_value_at_percentile.restype = ctypes.c_longlong
+ libfieldstat.fieldstat_histogram_value_at_percentile_api.argtypes = [ctypes.c_void_p, ctypes.c_double]
+ libfieldstat.fieldstat_histogram_value_at_percentile_api.restype = ctypes.c_longlong
- libfieldstat.fieldstat_histogram_count_le_value.argtypes = [ctypes.c_void_p, ctypes.c_longlong]
- libfieldstat.fieldstat_histogram_count_le_value.restype = ctypes.c_longlong
+ libfieldstat.fieldstat_histogram_count_le_value_api.argtypes = [ctypes.c_void_p, ctypes.c_longlong]
+ libfieldstat.fieldstat_histogram_count_le_value_api.restype = ctypes.c_longlong
libfieldstat.fieldstat_histogram_value_total_count.argtypes = [ctypes.c_void_p]
libfieldstat.fieldstat_histogram_value_total_count.restype = ctypes.c_longlong
@@ -134,7 +134,7 @@ class PrometheusExporter:
metrics = ""
for i in self.hist_bins:
- value = FieldstatAPI.libfieldstat.fieldstat_histogram_count_le_value(c_hist, int(i))
+ value = FieldstatAPI.libfieldstat.fieldstat_histogram_count_le_value_api(c_hist, int(i))
metric = name + "_bucket" + "{" + tags + ",le=\"{:.2f}\"".format(i) + "}" + ' ' + str(value) + '\n'
metrics += metric
self.n_lines += 1
@@ -144,7 +144,7 @@ class PrometheusExporter:
def __build_summary_format(self, name, tags, c_hist):
metrics = ""
for i in self.hist_bins:
- value = FieldstatAPI.libfieldstat.fieldstat_histogram_value_at_percentile(c_hist, float(i * 100))
+ value = FieldstatAPI.libfieldstat.fieldstat_histogram_value_at_percentile_api(c_hist, float(i * 100))
metric = name + "{" + tags + ",quantile=\"{:.2f}%\"".format(i * 100) + "}" + ' ' + str(value) + '\n'
metrics += metric
self.n_lines += 1
@@ -365,10 +365,10 @@ class HistogramTable:
row_values = []
for i in self.bins:
if self.format == "summary":
- value = FieldstatAPI.libfieldstat.fieldstat_histogram_value_at_percentile(c_hist, float(i * 100))
+ value = FieldstatAPI.libfieldstat.fieldstat_histogram_value_at_percentile_api(c_hist, float(i * 100))
row_values.append(str(value))
if self.format == "histogram":
- value = FieldstatAPI.libfieldstat.fieldstat_histogram_count_le_value(c_hist, int(i))
+ value = FieldstatAPI.libfieldstat.fieldstat_histogram_count_le_value_api(c_hist, int(i))
row_values.append(str(value))
shared_values = self.__get_row_shared_values(c_hist)
row_values += shared_values
diff --git a/src/fieldstat.c b/src/fieldstat.c
index 0e2fb81..12c482b 100644
--- a/src/fieldstat.c
+++ b/src/fieldstat.c
@@ -69,7 +69,7 @@ void fieldstat_free_tag_array(struct field *fields, size_t n_tags)
for (size_t i = 0; i < n_tags; i++) {
struct field *field = &fields[i];
free((char *)field->key);
- if (field->type == TAG_CSTRING) {
+ if (field->type == FIELD_VALUE_CSTRING) {
free((char *)field->value_str);
}
}
@@ -142,7 +142,7 @@ int fieldstat_register_hll(struct fieldstat *instance, int cube_id, const char *
}
// cppcheck-suppress [constParameterPointer, unmatchedSuppression]
-int fieldstat_register_hist(struct fieldstat *instance, int cube_id, const char *metric_name, long long lowest_trackable_value, long long highest_trackable_value, int significant_figures)
+int fieldstat_register_histogram(struct fieldstat *instance, int cube_id, const char *metric_name, long long lowest_trackable_value, long long highest_trackable_value, int significant_figures)
{
struct cube *cube = cube_manager_get_cube_by_id(instance->cube_manager, cube_id);
if (cube == NULL) {
@@ -200,7 +200,7 @@ int fieldstat_hll_add_field(struct fieldstat *instance, int cube_id, int metric_
}
// cppcheck-suppress [constParameterPointer, unmatchedSuppression]
-int fieldstat_hist_record(struct fieldstat *instance, int cube_id, int metric_id, const struct field *cell_dimensions, size_t n_dimensions, long long value)
+int fieldstat_histogram_record(struct fieldstat *instance, int cube_id, int metric_id, const struct field *cell_dimensions, size_t n_dimensions, long long value)
{
struct cube *cube = cube_manager_get_cube_by_id(instance->cube_manager, cube_id);
if (cube == NULL) {
@@ -277,7 +277,7 @@ int fieldstat_hll_get(const struct fieldstat *instance, int cube_id, const struc
return ret;
}
-long long fieldstat_hist_value_at_percentile(const struct fieldstat *instance, int cube_id, const struct field_list *cell_dimensions, int metric_id, double percentile)
+long long fieldstat_histogram_value_at_percentile(const struct fieldstat *instance, int cube_id, const struct field_list *cell_dimensions, int metric_id, double percentile)
{
const struct cube *cube = cube_manager_get_cube_by_id(instance->cube_manager, cube_id);
if (cube == NULL) {
@@ -292,7 +292,7 @@ long long fieldstat_hist_value_at_percentile(const struct fieldstat *instance, i
return value;
}
-long long fieldstat_hist_count_le_value(const struct fieldstat *instance, int cube_id, const struct field_list *cell_dimensions, int metric_id, long long value)
+long long fieldstat_histogram_count_le_value(const struct fieldstat *instance, int cube_id, const struct field_list *cell_dimensions, int metric_id, long long value)
{
const struct cube *cube = cube_manager_get_cube_by_id(instance->cube_manager, cube_id);
if (cube == NULL) {
diff --git a/src/fieldstat_easy.c b/src/fieldstat_easy.c
index ef1a921..00a569a 100644
--- a/src/fieldstat_easy.c
+++ b/src/fieldstat_easy.c
@@ -218,17 +218,17 @@ int fieldstat_easy_register_histogram(struct fieldstat_easy *fse, const char *na
pthread_spin_lock(&fse->fsu[i].lock);
}
- int ret = fieldstat_register_hist(fse->fsu[0].active, 0, name, lowest_trackable_value, highest_trackable_value, significant_figures); // try to register
+ int ret = fieldstat_register_histogram(fse->fsu[0].active, 0, name, lowest_trackable_value, highest_trackable_value, significant_figures); // try to register
if (ret < 0) {
for (int i = 0; i < fse->max_thread_num; i++) {
pthread_spin_unlock(&fse->fsu[i].lock);
}
return ret;
}
- fieldstat_register_hist(fse->fsu[0].read_only, 0, name, lowest_trackable_value, highest_trackable_value, significant_figures);
+ fieldstat_register_histogram(fse->fsu[0].read_only, 0, name, lowest_trackable_value, highest_trackable_value, significant_figures);
for (int i = 1; i < fse->max_thread_num; i++) {
- fieldstat_register_hist(fse->fsu[i].active, 0, name, lowest_trackable_value, highest_trackable_value, significant_figures);
- fieldstat_register_hist(fse->fsu[i].read_only, 0, name, lowest_trackable_value, highest_trackable_value, significant_figures);
+ fieldstat_register_histogram(fse->fsu[i].active, 0, name, lowest_trackable_value, highest_trackable_value, significant_figures);
+ fieldstat_register_histogram(fse->fsu[i].read_only, 0, name, lowest_trackable_value, highest_trackable_value, significant_figures);
}
for (int i = 0; i < fse->max_thread_num; i++) {
@@ -354,7 +354,7 @@ int fieldstat_easy_histogram_record(struct fieldstat_easy *fse, int thread_id, i
}
pthread_spin_lock(&fse->fsu[thread_id].lock);
- int ret = fieldstat_hist_record(fse->fsu[thread_id].active, 0, metric_id, dimensions, n_dimensions, value);
+ int ret = fieldstat_histogram_record(fse->fsu[thread_id].active, 0, metric_id, dimensions, n_dimensions, value);
pthread_spin_unlock(&fse->fsu[thread_id].lock);
return ret;
diff --git a/src/metrics/python_api.c b/src/metrics/python_api.c
index 8388a1c..6f25fdf 100644
--- a/src/metrics/python_api.c
+++ b/src/metrics/python_api.c
@@ -16,12 +16,12 @@ void *fieldstat_histogram_base64_decode(char *buf)
return hdr;
}
-long long fieldstat_histogram_value_at_percentile(void* h, double percentile)
+long long fieldstat_histogram_value_at_percentile_api(void* h, double percentile)
{
return hdr_value_at_percentile((const struct hdr_histogram *)h, percentile);
}
-long long fieldstat_histogram_count_le_value(void* h, long long value)
+long long fieldstat_histogram_count_le_value_api(void* h, long long value)
{
return hdr_count_le_value((const struct hdr_histogram *)h, value);
}