Skip to content

Commit

Permalink
Improve for clang-tidy
Browse files Browse the repository at this point in the history
  • Loading branch information
kenneth-jia committed Nov 8, 2022
1 parent 0ec1511 commit c3de6e4
Show file tree
Hide file tree
Showing 44 changed files with 399 additions and 301 deletions.
4 changes: 3 additions & 1 deletion .clang-tidy
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ Checks: "*,\
-hicpp-no-array-decay,\
-hicpp-deprecated-headers,\
-hicpp-braces-around-statements,\
-hicppcert-err60-cpp,\
-cppcoreguidelines-special-member-function,\
-cppcoreguidelines-macro-usage,\
-cppcoreguidelines-avoid-magic-numbers,\
Expand All @@ -41,6 +42,7 @@ Checks: "*,\
-readability-function-cognitive-complexity,\
-bugprone-unused-return-value,\
-bugprone-easily-swappable-parameters,\
-bugprone-exception-escape,\
-cert-err58-cpp,\
-cert-err60-cpp"
-clang-analyzer-optin.performance.Padding"

1 change: 0 additions & 1 deletion .github/workflows/kafka_api_bazel_build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,6 @@ jobs:
if [[ ${OS_VERSION} == 'ubuntu'* ]]; then
sudo apt install -y libboost-all-dev
elif [[ ${OS_VERSION} == 'macos'* ]]; then
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
brew install boost
fi
Expand Down
23 changes: 17 additions & 6 deletions .github/workflows/kafka_api_ci_tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ jobs:
strategy:
matrix:
include:
- os: macos-11
- os: macos-12
build-cxx: clang++
test-labels: unit|integration
enable-ut-stubs: true
Expand All @@ -39,6 +39,10 @@ jobs:
build-cxx: clang++
test-labels: robustness

- os: macos-12
build-cxx: clang++
check-option: clang-tidy

- os: ubuntu-22.04
build-cxx: g++
build-type: Debug
Expand Down Expand Up @@ -93,11 +97,15 @@ jobs:
steps:
- uses: actions/checkout@v3

- name: Update the List for Available Packages
if: contains(matrix.os, 'ubuntu')
- name: Preparation
if: ${{!contains(matrix.os, 'windows')}}
run: |
sed -e 's/azure.archive.ubuntu.com/us.archive.ubuntu.com/g' -e t -e d /etc/apt/sources.list | sudo tee /etc/apt/sources.list.d/nonazure.list
sudo apt-get update
if [[ ${OS_VERSION} == 'ubuntu'* ]]; then
sed -e 's/azure.archive.ubuntu.com/us.archive.ubuntu.com/g' -e t -e d /etc/apt/sources.list | sudo tee /etc/apt/sources.list.d/nonazure.list
sudo apt-get update
elif [[ ${OS_VERSION} == 'macos'* ]]; then
echo "CPU_CORE_NUM=3" >> $GITHUB_ENV
fi
- name: Install Dependencies
run: |
Expand All @@ -110,6 +118,7 @@ jobs:
sudo apt install -y clang-tidy
elif [[ ${OS_VERSION} == 'macos'* ]]; then
brew install llvm
export PATH=/usr/local/opt/llvm/bin/:$PATH
fi
fi
Expand All @@ -125,7 +134,6 @@ jobs:
if [[ ${OS_VERSION} == 'ubuntu'* ]]; then
sudo apt install -y libboost-all-dev
elif [[ ${OS_VERSION} == 'macos'* ]]; then
/bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)"
brew install boost
fi
Expand Down Expand Up @@ -184,6 +192,9 @@ jobs:
run: |
cd ${BUILD_SUB_DIR}
# Restore the PATH from environment file
export PATH=${{ env.PATH }}
if [ ${CXX_STANDARD} ]; then
export CMAKE_CXX_STANDARD="-DCMAKE_CXX_STANDARD=${CXX_STANDARD}"
fi
Expand Down
15 changes: 14 additions & 1 deletion .github/workflows/kafka_api_demo_conan_build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,10 +20,23 @@ jobs:

runs-on: ${{ matrix.os }}

env:
OS_VERSION: ${{ matrix.os }}

steps:
- uses: actions/checkout@v2

- name: Prepare
- name: Prepare (non-windows)
if: ${{!contains(matrix.os, 'windows')}}
run: |
if [[ ${OS_VERSION} == 'macos'* ]]; then
brew install conan
else
pip3 install conan
fi
- name: Prepare (windows)
if: ${{contains(matrix.os, 'windows')}}
run: |
pip3 install conan
Expand Down
6 changes: 3 additions & 3 deletions examples/kafka_async_producer_copy_payload.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@ int main(int argc, char **argv)
exit(argc == 1 ? 0 : 1); // NOLINT
}

std::string brokers = argv[1];
kafka::Topic topic = argv[2];
const std::string brokers = argv[1];
const kafka::Topic topic = argv[2];

try {

// Create configuration object
kafka::Properties props ({
const kafka::Properties props ({
{"bootstrap.servers", brokers},
{"enable.idempotence", "true"},
});
Expand Down
6 changes: 3 additions & 3 deletions examples/kafka_async_producer_not_copy_payload.cc
Original file line number Diff line number Diff line change
Expand Up @@ -13,13 +13,13 @@ int main(int argc, char **argv)
exit(argc == 1 ? 0 : 1); // NOLINT
}

std::string brokers = argv[1];
kafka::Topic topic = argv[2];
const std::string brokers = argv[1];
const kafka::Topic topic = argv[2];

try {

// Create configuration object
kafka::Properties props ({
const kafka::Properties props ({
{"bootstrap.servers", brokers},
{"enable.idempotence", "true"},
});
Expand Down
6 changes: 3 additions & 3 deletions examples/kafka_auto_commit_consumer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,13 @@ int main(int argc, char **argv)
exit(argc == 1 ? 0 : 1); // NOLINT
}

std::string brokers = argv[1];
kafka::Topic topic = argv[2];
const std::string brokers = argv[1];
const kafka::Topic topic = argv[2];

try {

// Create configuration object
kafka::Properties props ({
const kafka::Properties props ({
{"bootstrap.servers", brokers},
{"enable.auto.commit", "true"}
});
Expand Down
6 changes: 3 additions & 3 deletions examples/kafka_manual_commit_consumer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -10,13 +10,13 @@ int main(int argc, char **argv)
exit(argc == 1 ? 0 : 1); // NOLINT
}

std::string brokers = argv[1];
kafka::Topic topic = argv[2];
const std::string brokers = argv[1];
const kafka::Topic topic = argv[2];

try {

// Create configuration object
kafka::Properties props ({
const kafka::Properties props ({
{"bootstrap.servers", brokers},
});

Expand Down
8 changes: 4 additions & 4 deletions examples/kafka_sync_producer.cc
Original file line number Diff line number Diff line change
Expand Up @@ -12,13 +12,13 @@ int main(int argc, char **argv)
exit(argc == 1 ? 0 : 1); // NOLINT
}

std::string brokers = argv[1];
kafka::Topic topic = argv[2];
const std::string brokers = argv[1];
const kafka::Topic topic = argv[2];

try {

// Create configuration object
kafka::Properties props({
const kafka::Properties props({
{"bootstrap.servers", brokers},
{"enable.idempotence", "true"},
});
Expand All @@ -37,7 +37,7 @@ int main(int argc, char **argv)

// Send the message.
try {
producer::RecordMetadata metadata = producer.syncSend(record);
const producer::RecordMetadata metadata = producer.syncSend(record);
std::cout << "% Message delivered: " << metadata.toString() << std::endl;
} catch (const kafka::KafkaException& e) {
std::cerr << "% Message delivery failed: " << e.error().message() << std::endl;
Expand Down
22 changes: 11 additions & 11 deletions include/kafka/AdminClient.h
Original file line number Diff line number Diff line change
Expand Up @@ -83,9 +83,9 @@ AdminClient::getPerTopicResults(const rd_kafka_topic_result_t** topicResults, st
for (std::size_t i = 0; i < topicCount; ++i)
{
const rd_kafka_topic_result_t* topicResult = topicResults[i];
if (rd_kafka_resp_err_t topicError = rd_kafka_topic_result_error(topicResult))
if (const rd_kafka_resp_err_t topicError = rd_kafka_topic_result_error(topicResult))
{
std::string detailedMsg = "topic[" + std::string(rd_kafka_topic_result_name(topicResult)) + "] with error[" + rd_kafka_topic_result_error_string(topicResult) + "]";
const std::string detailedMsg = "topic[" + std::string(rd_kafka_topic_result_name(topicResult)) + "] with error[" + rd_kafka_topic_result_error_string(topicResult) + "]";
errors.emplace_back(topicError, detailedMsg);
}
}
Expand All @@ -99,9 +99,9 @@ AdminClient::getPerTopicPartitionResults(const rd_kafka_topic_partition_list_t*

for (int i = 0; i < (partitionResults ? partitionResults->cnt : 0); ++i)
{
if (rd_kafka_resp_err_t partitionError = partitionResults->elems[i].err)
if (const rd_kafka_resp_err_t partitionError = partitionResults->elems[i].err)
{
std::string detailedMsg = "topic-partition[" + std::string(partitionResults->elems[i].topic) + "-" + std::to_string(partitionResults->elems[i].partition) + "] with error[" + rd_kafka_err2str(partitionError) + "]";
const std::string detailedMsg = "topic-partition[" + std::string(partitionResults->elems[i].topic) + "-" + std::to_string(partitionResults->elems[i].partition) + "] with error[" + rd_kafka_err2str(partitionError) + "]";
errors.emplace_back(partitionError, detailedMsg);
}
}
Expand Down Expand Up @@ -148,10 +148,10 @@ AdminClient::createTopics(const Topics& topics,

for (const auto& conf: topicConfig.map())
{
rd_kafka_resp_err_t err = rd_kafka_NewTopic_set_config(rkNewTopics.back().get(), conf.first.c_str(), conf.second.c_str());
const rd_kafka_resp_err_t err = rd_kafka_NewTopic_set_config(rkNewTopics.back().get(), conf.first.c_str(), conf.second.c_str());
if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
{
std::string errMsg = "Invalid config[" + conf.first + "=" + conf.second + "]";
const std::string errMsg = "Invalid config[" + conf.first + "=" + conf.second + "]";
KAFKA_API_DO_LOG(Log::Level::Err, errMsg.c_str());
return admin::CreateTopicsResult(Error{RD_KAFKA_RESP_ERR__INVALID_ARG, errMsg});
}
Expand Down Expand Up @@ -189,7 +189,7 @@ AdminClient::createTopics(const Topics& topics,

std::list<Error> errors;

if (rd_kafka_resp_err_t respErr = rd_kafka_event_error(rk_ev.get()))
if (const rd_kafka_resp_err_t respErr = rd_kafka_event_error(rk_ev.get()))
{
errors.emplace_back(respErr, rd_kafka_event_error_string(rk_ev.get()));
}
Expand Down Expand Up @@ -262,7 +262,7 @@ AdminClient::deleteTopics(const Topics& topics, std::chrono::milliseconds timeou

std::list<Error> errors;

if (rd_kafka_resp_err_t respErr = rd_kafka_event_error(rk_ev.get()))
if (const rd_kafka_resp_err_t respErr = rd_kafka_event_error(rk_ev.get()))
{
errors.emplace_back(respErr, rd_kafka_event_error_string(rk_ev.get()));
}
Expand All @@ -281,7 +281,7 @@ inline admin::ListTopicsResult
AdminClient::listTopics(std::chrono::milliseconds timeout)
{
const rd_kafka_metadata_t* rk_metadata = nullptr;
rd_kafka_resp_err_t err = rd_kafka_metadata(getClientHandle(), true, nullptr, &rk_metadata, convertMsDurationToInt(timeout));
const rd_kafka_resp_err_t err = rd_kafka_metadata(getClientHandle(), true, nullptr, &rk_metadata, convertMsDurationToInt(timeout));
auto guard = rd_kafka_metadata_unique_ptr(rk_metadata);

if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
Expand All @@ -303,7 +303,7 @@ AdminClient::deleteRecords(const TopicPartitionOffsets& topicPartitionOffsets,
{
auto rk_queue = rd_kafka_queue_unique_ptr(rd_kafka_queue_new(getClientHandle()));

rd_kafka_DeleteRecords_unique_ptr rkDeleteRecords(rd_kafka_DeleteRecords_new(createRkTopicPartitionList(topicPartitionOffsets)));
const rd_kafka_DeleteRecords_unique_ptr rkDeleteRecords(rd_kafka_DeleteRecords_new(createRkTopicPartitionList(topicPartitionOffsets)));
std::array<rd_kafka_DeleteRecords_t*, 1> rk_del_records{rkDeleteRecords.get()};

rd_kafka_DeleteRecords(getClientHandle(), rk_del_records.data(), rk_del_records.size(), nullptr, rk_queue.get());
Expand Down Expand Up @@ -331,7 +331,7 @@ AdminClient::deleteRecords(const TopicPartitionOffsets& topicPartitionOffsets,

std::list<Error> errors;

if (rd_kafka_resp_err_t respErr = rd_kafka_event_error(rk_ev.get()))
if (const rd_kafka_resp_err_t respErr = rd_kafka_event_error(rk_ev.get()))
{
errors.emplace_back(respErr, rd_kafka_event_error_string(rk_ev.get()));
}
Expand Down
1 change: 1 addition & 0 deletions include/kafka/BrokerMetadata.h
Original file line number Diff line number Diff line change
Expand Up @@ -127,6 +127,7 @@ inline std::vector<std::shared_ptr<BrokerMetadata::Node>>
BrokerMetadata::nodes() const
{
std::vector<std::shared_ptr<BrokerMetadata::Node>> ret;
ret.reserve(_nodes.size());
for (const auto& nodeInfo: _nodes)
{
ret.emplace_back(nodeInfo.second);
Expand Down
2 changes: 1 addition & 1 deletion include/kafka/ConsumerRecord.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ class ConsumerRecord
Timestamp timestamp() const
{
rd_kafka_timestamp_type_t tstype{};
Timestamp::Value tsValue = rd_kafka_message_timestamp(_rk_msg.get(), &tstype);
const Timestamp::Value tsValue = rd_kafka_message_timestamp(_rk_msg.get(), &tstype);
return {tsValue, tstype};
}

Expand Down
2 changes: 1 addition & 1 deletion include/kafka/Header.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ struct Header
*/
std::string toString() const
{
return key + ":" + value.toString();
return (key.empty() ? "[null]" : key) + ":" + value.toString();
}

Key key;
Expand Down
24 changes: 16 additions & 8 deletions include/kafka/KafkaClient.h
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,11 @@ KafkaClient::KafkaClient(ClientType clientType,
continue;
}

rd_kafka_conf_res_t result = rd_kafka_conf_set(rk_conf.get(), prop.first.c_str(), prop.second.c_str(), errInfo.str(), errInfo.capacity());
const rd_kafka_conf_res_t result = rd_kafka_conf_set(rk_conf.get(),
prop.first.c_str(),
prop.second.c_str(),
errInfo.str(),
errInfo.capacity());
if (result == RD_KAFKA_CONF_OK)
{
_properties.put(prop.first, prop.second);
Expand Down Expand Up @@ -434,7 +438,7 @@ KafkaClient::KafkaClient(ClientType clientType,
// Interceptor
if (!_interceptors.empty())
{
Error result{ rd_kafka_conf_interceptor_add_on_new(rk_conf.get(), "on_new", KafkaClient::configInterceptorOnNew, nullptr) };
const Error result{ rd_kafka_conf_interceptor_add_on_new(rk_conf.get(), "on_new", KafkaClient::configInterceptorOnNew, nullptr) };
KAFKA_THROW_IF_WITH_ERROR(result);
}

Expand All @@ -447,10 +451,10 @@ KafkaClient::KafkaClient(ClientType clientType,

// Add brokers
auto brokers = properties.getProperty(BOOTSTRAP_SERVERS);
if (rd_kafka_brokers_add(getClientHandle(), brokers->c_str()) == 0)
if (!brokers || rd_kafka_brokers_add(getClientHandle(), brokers->c_str()) == 0)
{
KAFKA_THROW_ERROR(Error(RD_KAFKA_RESP_ERR__INVALID_ARG,\
"No broker could be added successfully, BOOTSTRAP_SERVERS=[" + *brokers + "]"));
"No broker could be added successfully, BOOTSTRAP_SERVERS=[" + (brokers ? *brokers : "NA") + "]"));
}

_opened = true;
Expand Down Expand Up @@ -503,7 +507,7 @@ KafkaClient::getProperty(const std::string& name) const
if (valueSize > valueBuf.size())
{
valueBuf.resize(valueSize);
[[maybe_unused]] rd_kafka_conf_res_t result = rd_kafka_conf_get(conf, name.c_str(), valueBuf.data(), &valueSize);
[[maybe_unused]] const rd_kafka_conf_res_t result = rd_kafka_conf_get(conf, name.c_str(), valueBuf.data(), &valueSize);
assert(result == RD_KAFKA_CONF_OK);
}

Expand Down Expand Up @@ -538,7 +542,7 @@ KafkaClient::onStats(const std::string& jsonString)
inline int
KafkaClient::statsCallback(rd_kafka_t* rk, char* jsonStrBuf, size_t jsonStrLen, void* /*opaque*/)
{
std::string stats(jsonStrBuf, jsonStrBuf+jsonStrLen);
const std::string stats(jsonStrBuf, jsonStrBuf+jsonStrLen);
kafkaClient(rk).onStats(stats);
return 0;
}
Expand Down Expand Up @@ -618,7 +622,11 @@ KafkaClient::fetchBrokerMetadata(const std::string& topic, std::chrono::millisec
{
const rd_kafka_metadata_t* rk_metadata = nullptr;
// Here the input parameter for `all_topics` is `true`, since we want the `cgrp_update`
rd_kafka_resp_err_t err = rd_kafka_metadata(getClientHandle(), true, nullptr, &rk_metadata, convertMsDurationToInt(timeout));
const rd_kafka_resp_err_t err = rd_kafka_metadata(getClientHandle(),
true,
nullptr,
&rk_metadata,
convertMsDurationToInt(timeout));

auto guard = rd_kafka_metadata_unique_ptr(rk_metadata);

Expand Down Expand Up @@ -670,7 +678,7 @@ KafkaClient::fetchBrokerMetadata(const std::string& topic, std::chrono::millisec
{
const rd_kafka_metadata_partition& metadata_partition = metadata_topic->partitions[i];

Partition partition = metadata_partition.id;
const Partition partition = metadata_partition.id;

if (metadata_partition.err != 0)
{
Expand Down
Loading

0 comments on commit c3de6e4

Please sign in to comment.