Skip to content

Commit

Permalink
Use NameStr() macro for NameData struct usage
Browse files Browse the repository at this point in the history
  • Loading branch information
fabriziomello committed Dec 9, 2023
1 parent 6f30822 commit 48c9edd
Show file tree
Hide file tree
Showing 15 changed files with 69 additions and 59 deletions.
2 changes: 1 addition & 1 deletion src/bgw/job.c
Original file line number Diff line number Diff line change
Expand Up @@ -1041,7 +1041,7 @@ ts_bgw_job_validate_job_owner(Oid owner)
ereport(ERROR,
(errcode(ERRCODE_INVALID_AUTHORIZATION_SPECIFICATION),
errmsg("permission denied to start background process as role \"%s\"",
rform->rolname.data),
NameStr(rform->rolname)),
errhint("Hypertable owner must have LOGIN permission to run background tasks.")));
}
ReleaseSysCache(role_tup);
Expand Down
14 changes: 9 additions & 5 deletions src/chunk.c
Original file line number Diff line number Diff line change
Expand Up @@ -988,7 +988,11 @@ chunk_create_object(const Hypertable *ht, Hypercube *cube, const char *schema_na
if (NULL == prefix)
prefix = NameStr(ht->fd.associated_table_prefix);

len = snprintf(chunk->fd.table_name.data, NAMEDATALEN, "%s_%d_chunk", prefix, chunk->fd.id);
len = snprintf(NameStr(chunk->fd.table_name),
NAMEDATALEN,
"%s_%d_chunk",
prefix,
chunk->fd.id);

if (len >= NAMEDATALEN)
elog(ERROR, "chunk table name too long");
Expand Down Expand Up @@ -3930,8 +3934,8 @@ ts_chunk_drop_internal(const Chunk *chunk, DropBehavior behavior, int32 log_leve
if (log_level >= 0)
elog(log_level,
"dropping chunk %s.%s",
chunk->fd.schema_name.data,
chunk->fd.table_name.data);
NameStr(chunk->fd.schema_name),
NameStr(chunk->fd.table_name));

/* Remove the chunk from the chunk table */
ts_chunk_delete_by_relid(chunk->table_id, behavior, preserve_catalog_row);
Expand Down Expand Up @@ -4138,8 +4142,8 @@ ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int3
}

/* store chunk name for output */
schema_name = quote_identifier(chunks[i].fd.schema_name.data);
table_name = quote_identifier(chunks[i].fd.table_name.data);
schema_name = quote_identifier(NameStr(chunks[i].fd.schema_name));
table_name = quote_identifier(NameStr(chunks[i].fd.table_name));
chunk_name = psprintf("%s.%s", schema_name, table_name);
dropped_chunk_names = lappend(dropped_chunk_names, chunk_name);

Expand Down
2 changes: 1 addition & 1 deletion src/chunk_adaptive.c
Original file line number Diff line number Diff line change
Expand Up @@ -439,7 +439,7 @@ ts_calculate_chunk_interval(PG_FUNCTION_ARGS)
if (acl_result != ACLCHECK_OK)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied for table %s", ht->fd.table_name.data)));
errmsg("permission denied for table %s", NameStr(ht->fd.table_name))));

if (hypertable_is_distributed(ht))
ereport(ERROR,
Expand Down
8 changes: 4 additions & 4 deletions src/compression_with_clause.c
Original file line number Diff line number Diff line change
Expand Up @@ -90,8 +90,8 @@ parse_segment_collist(char *inpstr, Hypertable *hypertable)
/* parse the segment by list exactly how you would a group by */
appendStringInfo(&buf,
"SELECT FROM %s.%s GROUP BY %s",
quote_identifier(hypertable->fd.schema_name.data),
quote_identifier(hypertable->fd.table_name.data),
quote_identifier(NameStr(hypertable->fd.schema_name)),
quote_identifier(NameStr(hypertable->fd.table_name)),
inpstr);

PG_TRY();
Expand Down Expand Up @@ -175,8 +175,8 @@ parse_order_collist(char *inpstr, Hypertable *hypertable)
/* parse the segment by list exactly how you would a order by by */
appendStringInfo(&buf,
"SELECT FROM %s.%s ORDER BY %s",
quote_identifier(hypertable->fd.schema_name.data),
quote_identifier(hypertable->fd.table_name.data),
quote_identifier(NameStr(hypertable->fd.schema_name)),
quote_identifier(NameStr(hypertable->fd.table_name)),
inpstr);

PG_TRY();
Expand Down
3 changes: 2 additions & 1 deletion src/hypertable.c
Original file line number Diff line number Diff line change
Expand Up @@ -723,7 +723,8 @@ ts_hypertable_drop(Hypertable *hypertable, DropBehavior behavior)
performDeletion(&hypertable_addr, behavior, 0);
}
/* Clean up catalog */
ts_hypertable_delete_by_name(hypertable->fd.schema_name.data, hypertable->fd.table_name.data);
ts_hypertable_delete_by_name(NameStr(hypertable->fd.schema_name),
NameStr(hypertable->fd.table_name));
}

static ScanTupleResult
Expand Down
10 changes: 6 additions & 4 deletions src/hypertable_cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -118,10 +118,12 @@ hypertable_cache_create_entry(Cache *cache, CacheQuery *query)
cache_entry->hypertable = NULL;
break;
case 1:
Assert(strncmp(cache_entry->hypertable->fd.schema_name.data, hq->schema, NAMEDATALEN) ==
0);
Assert(strncmp(cache_entry->hypertable->fd.table_name.data, hq->table, NAMEDATALEN) ==
0);
Assert(strncmp(NameStr(cache_entry->hypertable->fd.schema_name),
hq->schema,
NAMEDATALEN) == 0);
Assert(strncmp(NameStr(cache_entry->hypertable->fd.table_name),
hq->table,
NAMEDATALEN) == 0);
break;
default:
elog(ERROR, "got an unexpected number of records: %d", number_found);
Expand Down
17 changes: 10 additions & 7 deletions tsl/src/bgw_policy/job.c
Original file line number Diff line number Diff line change
Expand Up @@ -232,8 +232,8 @@ policy_reorder_execute(int32 job_id, Jsonb *config)
{
elog(NOTICE,
"no chunks need reordering for hypertable %s.%s",
policy.hypertable->fd.schema_name.data,
policy.hypertable->fd.table_name.data);
NameStr(policy.hypertable->fd.schema_name),
NameStr(policy.hypertable->fd.table_name));
return true;
}

Expand All @@ -243,12 +243,15 @@ policy_reorder_execute(int32 job_id, Jsonb *config)
* chunk.
*/
chunk = ts_chunk_get_by_id(chunk_id, false);
elog(DEBUG1, "reordering chunk %s.%s", chunk->fd.schema_name.data, chunk->fd.table_name.data);
elog(DEBUG1,
"reordering chunk %s.%s",
NameStr(chunk->fd.schema_name),
NameStr(chunk->fd.table_name));
reorder_chunk(chunk->table_id, policy.index_relid, false, InvalidOid, InvalidOid, InvalidOid);
elog(DEBUG1,
"completed reordering chunk %s.%s",
chunk->fd.schema_name.data,
chunk->fd.table_name.data);
NameStr(chunk->fd.schema_name),
NameStr(chunk->fd.table_name));

/* Now update chunk_stats table */
ts_bgw_policy_chunk_stats_record_job_run(job_id, chunk_id, ts_timer_get_current_timestamp());
Expand Down Expand Up @@ -548,8 +551,8 @@ policy_recompression_execute(int32 job_id, Jsonb *config)
{
elog(NOTICE,
"no chunks for hypertable \"%s.%s\" that satisfy recompress chunk policy",
policy_data.hypertable->fd.schema_name.data,
policy_data.hypertable->fd.table_name.data);
NameStr(policy_data.hypertable->fd.schema_name),
NameStr(policy_data.hypertable->fd.table_name));
ts_cache_release(policy_data.hcache);
if (!used_portalcxt)
MemoryContextDelete(multitxn_cxt);
Expand Down
12 changes: 6 additions & 6 deletions tsl/src/chunk_api.c
Original file line number Diff line number Diff line change
Expand Up @@ -660,12 +660,12 @@ convert_type_oid_to_strings(Oid type_id, Datum *result_strings)
type_tuple = SearchSysCache1(TYPEOID, type_id);
Assert(HeapTupleIsValid(type_tuple));
type = (Form_pg_type) GETSTRUCT(type_tuple);
result_strings[ENCODED_TYPE_NAME] = PointerGetDatum(pstrdup(type->typname.data));
result_strings[ENCODED_TYPE_NAME] = PointerGetDatum(pstrdup(NameStr(type->typname)));

namespace_tuple = SearchSysCache1(NAMESPACEOID, type->typnamespace);
Assert(HeapTupleIsValid(namespace_tuple));
namespace = (Form_pg_namespace) GETSTRUCT(namespace_tuple);
result_strings[ENCODED_TYPE_NAMESPACE] = PointerGetDatum(pstrdup(namespace->nspname.data));
result_strings[ENCODED_TYPE_NAMESPACE] = PointerGetDatum(pstrdup(NameStr(namespace->nspname)));
ReleaseSysCache(namespace_tuple);
ReleaseSysCache(type_tuple);
}
Expand All @@ -681,12 +681,12 @@ convert_op_oid_to_strings(Oid op_id, Datum *result_strings)
operator_tuple = SearchSysCache1(OPEROID, op_id);
Assert(HeapTupleIsValid(operator_tuple));
operator=(Form_pg_operator) GETSTRUCT(operator_tuple);
result_strings[ENCODED_OP_NAME] = PointerGetDatum(pstrdup(operator->oprname.data));
result_strings[ENCODED_OP_NAME] = PointerGetDatum(pstrdup(NameStr(operator->oprname)));

namespace_tuple = SearchSysCache1(NAMESPACEOID, operator->oprnamespace);
Assert(HeapTupleIsValid(namespace_tuple));
namespace = (Form_pg_namespace) GETSTRUCT(namespace_tuple);
result_strings[ENCODED_OP_NAMESPACE] = PointerGetDatum(pstrdup(namespace->nspname.data));
result_strings[ENCODED_OP_NAMESPACE] = PointerGetDatum(pstrdup(NameStr(namespace->nspname)));
ReleaseSysCache(namespace_tuple);

convert_type_oid_to_strings(operator->oprleft, LargSubarrayForOpArray(result_strings));
Expand Down Expand Up @@ -1811,8 +1811,8 @@ chunk_api_call_chunk_drop_replica(const Chunk *chunk, const char *node_name, Oid
*/

drop_cmd = psprintf("DROP TABLE %s.%s",
quote_identifier(chunk->fd.schema_name.data),
quote_identifier(chunk->fd.table_name.data));
quote_identifier(NameStr(chunk->fd.schema_name)),
quote_identifier(NameStr(chunk->fd.table_name)));
data_nodes = list_make1((char *) node_name);
ts_dist_cmd_run_on_data_nodes(drop_cmd, data_nodes, true);

Expand Down
34 changes: 17 additions & 17 deletions tsl/src/chunk_copy.c
Original file line number Diff line number Diff line change
Expand Up @@ -209,13 +209,13 @@ chunk_copy_operation_update(ChunkCopy *cc)
{
NameData application_name;

snprintf(application_name.data,
sizeof(application_name.data),
snprintf(NameStr(application_name),
sizeof(NameStr(application_name)),
"%s:%s",
cc->fd.operation_id.data,
NameStr(cc->fd.operation_id),
cc->stage->name);

pgstat_report_appname(application_name.data);
pgstat_report_appname(NameStr(application_name));

chunk_copy_operation_scan_update_by_id(NameStr(cc->fd.operation_id),
chunk_copy_operation_tuple_update,
Expand Down Expand Up @@ -351,7 +351,7 @@ chunk_copy_setup(ChunkCopy *cc, Oid chunk_relid, const char *src_node, const cha
cc->fd.chunk_id = cc->chunk->fd.id;
namestrcpy(&cc->fd.source_node_name, src_node);
namestrcpy(&cc->fd.dest_node_name, dst_node);
memset(cc->fd.compressed_chunk_name.data, 0, NAMEDATALEN);
memset(NameStr(cc->fd.compressed_chunk_name), 0, NAMEDATALEN);
cc->fd.delete_on_src_node = delete_on_src_node;

ts_cache_release(hcache);
Expand Down Expand Up @@ -380,15 +380,15 @@ chunk_copy_stage_init(ChunkCopy *cc)
int32 id;

/* check if the user has specified the operation id, if not generate one */
if (cc->fd.operation_id.data[0] == '\0')
if (NameStr(cc->fd.operation_id)[0] == '\0')
{
/*
* Get the operation id for this chunk move/copy activity. The naming
* convention is "ts_copy_seq-id_chunk-id".
*/
id = ts_catalog_table_next_seq_id(ts_catalog_get(), CHUNK_COPY_OPERATION);
snprintf(cc->fd.operation_id.data,
sizeof(cc->fd.operation_id.data),
snprintf(NameStr(cc->fd.operation_id),
sizeof(NameStr(cc->fd.operation_id)),
"ts_copy_%d_%d",
id,
cc->chunk->fd.id);
Expand Down Expand Up @@ -503,8 +503,8 @@ chunk_copy_get_source_compressed_chunk_name(ChunkCopy *cc)
errmsg("failed to get corresponding compressed chunk name from the source data "
"node")));

snprintf(cc->fd.compressed_chunk_name.data,
sizeof(cc->fd.compressed_chunk_name.data),
snprintf(NameStr(cc->fd.compressed_chunk_name),
sizeof(NameStr(cc->fd.compressed_chunk_name)),
"%s",
PQgetvalue(res, 0, 0));

Expand Down Expand Up @@ -626,7 +626,7 @@ chunk_copy_stage_create_empty_compressed_chunk_cleanup(ChunkCopy *cc)
INTERNAL_SCHEMA_NAME,
NameStr(cc->fd.compressed_chunk_name));
ts_dist_cmd_run_on_data_nodes(cmd, list_make1(NameStr(cc->fd.dest_node_name)), true);
cc->fd.compressed_chunk_name.data[0] = 0;
NameStr(cc->fd.compressed_chunk_name)[0] = 0;
}

static void
Expand Down Expand Up @@ -946,8 +946,8 @@ chunk_copy_stage_attach_chunk(ChunkCopy *cc)
chunk_data_node->foreign_server_oid = cc->dst_server->serverid;

remote_chunk_name = psprintf("%s.%s",
quote_identifier(chunk->fd.schema_name.data),
quote_identifier(chunk->fd.table_name.data));
quote_identifier(NameStr(chunk->fd.schema_name)),
quote_identifier(NameStr(chunk->fd.table_name)));

chunk_api_create_on_data_nodes(chunk, ht, remote_chunk_name, list_make1(chunk_data_node));

Expand Down Expand Up @@ -977,8 +977,8 @@ chunk_copy_stage_attach_compressed_chunk(ChunkCopy *cc)
chunk_copy_alter_chunk_owner(cc, NameStr(cc->fd.dest_node_name), true, true);

chunk_name = psprintf("%s.%s",
quote_identifier(cc->chunk->fd.schema_name.data),
quote_identifier(cc->chunk->fd.table_name.data));
quote_identifier(NameStr(cc->chunk->fd.schema_name)),
quote_identifier(NameStr(cc->chunk->fd.table_name)));

compressed_chunk_name = psprintf("%s.%s",
quote_identifier(INTERNAL_SCHEMA_NAME),
Expand Down Expand Up @@ -1173,11 +1173,11 @@ chunk_copy(Oid chunk_relid, const char *src_node, const char *dst_node, const ch
errhint("operation_id names may only contain lower case letters, numbers, and "
"the underscore character.")));

snprintf(cc.fd.operation_id.data, sizeof(cc.fd.operation_id.data), "%s", op_id);
snprintf(NameStr(cc.fd.operation_id), sizeof(NameStr(cc.fd.operation_id)), "%s", op_id);
}
else
{
cc.fd.operation_id.data[0] = '\0';
NameStr(cc.fd.operation_id)[0] = '\0';
}

chunk_copy_setup(&cc, chunk_relid, src_node, dst_node, delete_on_src_node);
Expand Down
8 changes: 4 additions & 4 deletions tsl/src/compression/api.c
Original file line number Diff line number Diff line change
Expand Up @@ -1280,8 +1280,8 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
int elevel = if_not_compressed ? NOTICE : ERROR;
elog(elevel,
"nothing to recompress in chunk %s.%s",
uncompressed_chunk->fd.schema_name.data,
uncompressed_chunk->fd.table_name.data);
NameStr(uncompressed_chunk->fd.schema_name),
NameStr(uncompressed_chunk->fd.table_name));
}

/*
Expand All @@ -1296,8 +1296,8 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
elog(ERROR,
"unexpected chunk status %d in chunk %s.%s",
status,
uncompressed_chunk->fd.schema_name.data,
uncompressed_chunk->fd.table_name.data);
NameStr(uncompressed_chunk->fd.schema_name),
NameStr(uncompressed_chunk->fd.table_name));

int i = 0, htcols_listlen;
ListCell *lc;
Expand Down
10 changes: 5 additions & 5 deletions tsl/src/compression/compression.c
Original file line number Diff line number Diff line change
Expand Up @@ -931,7 +931,7 @@ row_compressor_init(RowCompressor *row_compressor, TupleDesc uncompressed_tuple_
if (compressed_column_attr->atttypid != compressed_data_type_oid)
elog(ERROR,
"expected column '%s' to be a compressed data type",
compression_info->attname.data);
NameStr(compression_info->attname));

if (compression_info->orderby_column_index > 0)
{
Expand Down Expand Up @@ -965,7 +965,7 @@ row_compressor_init(RowCompressor *row_compressor, TupleDesc uncompressed_tuple_
if (column_attr->atttypid != compressed_column_attr->atttypid)
elog(ERROR,
"expected segment by column \"%s\" to be same type as uncompressed column",
compression_info->attname.data);
NameStr(compression_info->attname));
*column = (PerColumn){
.segment_info = segment_info_new(column_attr),
.segmentby_column_index = compression_info->segmentby_column_index,
Expand Down Expand Up @@ -2750,7 +2750,7 @@ fix_and_reorder_index_filters(Relation comp_chunk_rel, Relation index_rel,
index_rel,
(Var *) newvar,
idx_filters,
sf->column_name.data,
NameStr(sf->column_name),
(Node *) newclause,
opno);
}
Expand All @@ -2768,7 +2768,7 @@ fix_and_reorder_index_filters(Relation comp_chunk_rel, Relation index_rel,
index_rel,
(Var *) newvar,
idx_filters,
sf->column_name.data,
NameStr(sf->column_name),
(Node *) newclause,
0);
}
Expand Down Expand Up @@ -2837,7 +2837,7 @@ find_matching_index(Relation comp_chunk_rel, List *index_filters)
char *attname = get_attname(RelationGetRelid(comp_chunk_rel), attnum, false);
BatchFilter *sf = lfirst(li);
/* ensure column exists in index relation */
if (!strcmp(attname, sf->column_name.data))
if (!strcmp(attname, NameStr(sf->column_name)))
{
match_count++;
break;
Expand Down
2 changes: 1 addition & 1 deletion tsl/src/deparse.c
Original file line number Diff line number Diff line change
Expand Up @@ -649,7 +649,7 @@ deparse_get_distributed_hypertable_create_command(Hypertable *ht)
", time_column_name => %s",
quote_literal_cstr(NameStr(time_dim->fd.column_name)));

if (time_dim->fd.partitioning_func.data[0] != '\0')
if (NameStr(time_dim->fd.partitioning_func)[0] != '\0')
appendStringInfo(hypertable_cmd,
", time_partitioning_func => %s",
quote_literal_cstr(
Expand Down
2 changes: 1 addition & 1 deletion tsl/src/nodes/decompress_chunk/decompress_chunk.c
Original file line number Diff line number Diff line change
Expand Up @@ -1273,7 +1273,7 @@ chunk_joininfo_mutator(Node *node, CompressionInfo *context)
get_column_compressioninfo(context->hypertable_compression_info, column_name);

compressed_attno =
get_attnum(context->compressed_rte->relid, compressioninfo->attname.data);
get_attnum(context->compressed_rte->relid, NameStr(compressioninfo->attname));
compress_var->varno = context->compressed_rel->relid;
compress_var->varattno = compressed_attno;

Expand Down
2 changes: 1 addition & 1 deletion tsl/src/nodes/decompress_chunk/qual_pushdown.c
Original file line number Diff line number Diff line change
Expand Up @@ -387,7 +387,7 @@ modify_expression(Node *node, QualPushdownContext *context)

var = copyObject(var);
compressed_attno =
get_attnum(context->compressed_rte->relid, compressioninfo->attname.data);
get_attnum(context->compressed_rte->relid, NameStr(compressioninfo->attname));
var->varno = context->compressed_rel->relid;
var->varattno = compressed_attno;

Expand Down
2 changes: 1 addition & 1 deletion tsl/src/remote/connection_cache.c
Original file line number Diff line number Diff line change
Expand Up @@ -417,7 +417,7 @@ create_tuple_from_conn_entry(const ConnectionCacheEntry *entry, const TupleDesc
namestrcpy(&conn_node_name, remote_connection_node_name(entry->conn));

if (NULL == username)
pg_snprintf(conn_user_name.data, NAMEDATALEN, "%u", entry->id.user_id);
pg_snprintf(NameStr(conn_user_name), NAMEDATALEN, "%u", entry->id.user_id);
else
namestrcpy(&conn_user_name, username);

Expand Down

0 comments on commit 48c9edd

Please sign in to comment.