Skip to content

Commit

Permalink
[fix](index compaction)Fix construct index compaction columns core wh…
Browse files Browse the repository at this point in the history
…en reader close error (#46508)

fix #46507
  • Loading branch information
qidaye authored Jan 8, 2025
1 parent f360ee2 commit e904800
Show file tree
Hide file tree
Showing 3 changed files with 199 additions and 61 deletions.
136 changes: 75 additions & 61 deletions be/src/olap/compaction.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -858,72 +858,86 @@ Status Compaction::construct_output_rowset_writer(RowsetWriterContext& ctx, bool
return false;
}
for (auto i = 0; i < rowset->num_segments(); i++) {
auto segment_file = rowset->segment_file_path(i);
io::Path segment_path(segment_file);
auto inverted_index_file_reader =
std::make_unique<InvertedIndexFileReader>(
fs, segment_path.parent_path(),
segment_path.filename(),
_cur_tablet_schema
->get_inverted_index_storage_format());
bool open_idx_file_cache = false;
auto st = inverted_index_file_reader->init(
config::inverted_index_read_buffer_size,
open_idx_file_cache);
if (!st.ok()) {
LOG(WARNING) << "init index "
<< inverted_index_file_reader->get_index_file_path(
index_meta)
<< " error:" << st;
return false;
}

bool exists = false;
if (!inverted_index_file_reader
->index_file_exist(index_meta, &exists)
.ok()) {
LOG(ERROR) << inverted_index_file_reader->get_index_file_path(
index_meta)
<< " fs->exists error";
return false;
}

if (!exists) {
std::string index_file_path;
try {
auto segment_file = rowset->segment_file_path(i);
io::Path segment_path(segment_file);
auto inverted_index_file_reader =
std::make_unique<InvertedIndexFileReader>(
fs, segment_path.parent_path(),
segment_path.filename(),
_cur_tablet_schema
->get_inverted_index_storage_format());
bool open_idx_file_cache = false;
auto st = inverted_index_file_reader->init(
config::inverted_index_read_buffer_size,
open_idx_file_cache);
index_file_path =
inverted_index_file_reader->get_index_file_path(
index_meta);
if (!st.ok()) {
LOG(WARNING) << "init index " << index_file_path
<< " error:" << st;
return false;
}

bool exists = false;
if (!inverted_index_file_reader
->index_file_exist(index_meta, &exists)
.ok()) {
LOG(ERROR) << index_file_path << " fs->exists error";
return false;
}

if (!exists) {
LOG(WARNING)
<< "tablet[" << _tablet->tablet_id()
<< "] column_unique_id[" << col_unique_id << "],"
<< index_file_path
<< " is not exists, will skip index compaction";
return false;
}

// check index meta
auto result = inverted_index_file_reader->open(index_meta);
if (!result.has_value()) {
LOG(WARNING) << "open index " << index_file_path
<< " error:" << result.error();
return false;
}
auto reader = std::move(result.value());
std::vector<std::string> files;
reader->list(&files);
reader->close();

DBUG_EXECUTE_IF(
"Compaction::construct_skip_inverted_index_index_"
"reader_"
"close_error",
{
_CLTHROWA(CL_ERR_IO,
"debug point: reader close error");
})

// why is 3?
// slice type index file at least has 3 files: null_bitmap, segments_N, segments.gen
if (files.size() < 3) {
LOG(WARNING) << "tablet[" << _tablet->tablet_id()
<< "] column_unique_id[" << col_unique_id
<< "]," << index_file_path
<< " is corrupted, will skip index compaction";
return false;
}
} catch (CLuceneError& err) {
LOG(WARNING) << "tablet[" << _tablet->tablet_id()
<< "] column_unique_id[" << col_unique_id << "],"
<< inverted_index_file_reader->get_index_file_path(
index_meta)
<< " is not exists, will skip index compaction";
return false;
}

// check index meta
auto result = inverted_index_file_reader->open(index_meta);
if (!result.has_value()) {
LOG(WARNING) << "open index "
<< inverted_index_file_reader->get_index_file_path(
index_meta)
<< " error:" << result.error();
return false;
}
auto reader = std::move(result.value());
std::vector<std::string> files;
reader->list(&files);
reader->close();

// why is 3?
// slice type index file at least has 3 files: null_bitmap, segments_N, segments.gen
if (files.size() < 3) {
LOG(WARNING) << "tablet[" << _tablet->tablet_id()
<< "] column_unique_id[" << col_unique_id << "],"
<< inverted_index_file_reader->get_index_file_path(
index_meta)
<< " is corrupted, will skip index compaction";
<< "] column_unique_id[" << col_unique_id
<< "] open index[" << index_file_path
<< "], will skip index compaction, error:"
<< err.what();
return false;
}
}
return true;
return true;
});
if (all_have_inverted_index) {
ctx.columns_to_do_index_compaction.insert(col_unique_id);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,106 @@
3 bason bason hate pear 99
3 bason bason hate pear 99

-- !sql --
1 andy andy love apple 100
1 andy andy love apple 100
1 andy andy love apple 100
1 andy andy love apple 100
1 andy andy love apple 100
1 bason bason hate pear 99
1 bason bason hate pear 99
1 bason bason hate pear 99
1 bason bason hate pear 99
1 bason bason hate pear 99
2 andy andy love apple 100
2 andy andy love apple 100
2 andy andy love apple 100
2 andy andy love apple 100
2 andy andy love apple 100
2 bason bason hate pear 99
2 bason bason hate pear 99
2 bason bason hate pear 99
2 bason bason hate pear 99
2 bason bason hate pear 99
3 andy andy love apple 100
3 andy andy love apple 100
3 andy andy love apple 100
3 andy andy love apple 100
3 andy andy love apple 100
3 bason bason hate pear 99
3 bason bason hate pear 99
3 bason bason hate pear 99
3 bason bason hate pear 99
3 bason bason hate pear 99

-- !sql --
1 andy andy love apple 100
1 andy andy love apple 100
1 andy andy love apple 100
1 andy andy love apple 100
1 andy andy love apple 100
2 andy andy love apple 100
2 andy andy love apple 100
2 andy andy love apple 100
2 andy andy love apple 100
2 andy andy love apple 100
3 andy andy love apple 100
3 andy andy love apple 100
3 andy andy love apple 100
3 andy andy love apple 100
3 andy andy love apple 100

-- !sql --
1 bason bason hate pear 99
1 bason bason hate pear 99
1 bason bason hate pear 99
1 bason bason hate pear 99
1 bason bason hate pear 99
2 bason bason hate pear 99
2 bason bason hate pear 99
2 bason bason hate pear 99
2 bason bason hate pear 99
2 bason bason hate pear 99
3 bason bason hate pear 99
3 bason bason hate pear 99
3 bason bason hate pear 99
3 bason bason hate pear 99
3 bason bason hate pear 99

-- !sql --
1 bason bason hate pear 99
1 bason bason hate pear 99
1 bason bason hate pear 99
1 bason bason hate pear 99
1 bason bason hate pear 99
2 bason bason hate pear 99
2 bason bason hate pear 99
2 bason bason hate pear 99
2 bason bason hate pear 99
2 bason bason hate pear 99
3 bason bason hate pear 99
3 bason bason hate pear 99
3 bason bason hate pear 99
3 bason bason hate pear 99
3 bason bason hate pear 99

-- !sql --
1 bason bason hate pear 99
2 bason bason hate pear 99
3 bason bason hate pear 99

-- !sql --

-- !sql --
1 bason bason hate pear 99
2 bason bason hate pear 99
3 bason bason hate pear 99

-- !sql --
1 bason bason hate pear 99
2 bason bason hate pear 99
3 bason bason hate pear 99

-- !sql --
1 bason bason hate pear 99
2 bason bason hate pear 99
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -223,6 +223,30 @@ suite("test_index_compaction_failure_injection", "nonConcurrent") {
assert (rowsetCount == 1 * replicaNum)

run_sql.call()

// insert more data, trigger full compaction again
insert_data.call()

// insert 6 rows, so there are 7 rowsets.
rowsetCount = get_rowset_count.call(tablets);
assert (rowsetCount == 7 * replicaNum)

// trigger full compaction for all tablets with fault injection
try {
GetDebugPoint().enableDebugPointForAllBEs("Compaction::construct_skip_inverted_index_index_reader_close_error")
logger.info("trigger_full_compaction_on_tablets with fault injection: Compaction::construct_skip_inverted_index_index_reader_close_error")
trigger_full_compaction_on_tablets.call(tablets)
wait_full_compaction_done.call(tablets)
} finally {
GetDebugPoint().disableDebugPointForAllBEs("Compaction::construct_skip_inverted_index_index_reader_close_error")
}

// the debug point will cause index skip index compaction, so the compaction process will be successful.
// after full compaction, there is only 1 rowset.
rowsetCount = get_rowset_count.call(tablets);
assert (rowsetCount == 1 * replicaNum)

run_sql.call()
}

boolean invertedIndexCompactionEnable = false
Expand Down

0 comments on commit e904800

Please sign in to comment.