Skip to content

Commit 311fb42

Browse files
authored
fix GitHub action for cpplint style check. (k2-fsa#146)
1 parent ae84d49 commit 311fb42

12 files changed

+81
-113
lines changed

.github/workflows/style_check.yml

+2-20
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,6 @@ jobs:
3333
run: |
3434
python3 -m pip install --upgrade pip
3535
python3 -m pip install --upgrade flake8
36-
pip install cpplint==1.4.5
3736
3837
- name: Run flake8
3938
shell: bash
@@ -44,6 +43,7 @@ jobs:
4443
# exit-zero treats all errors as warnings.
4544
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=79 --statistics
4645
46+
# TODO(fangjun): build a docker for style check
4747
# - name: Install cppcheck
4848
# run: |
4949
# cd /tmp
@@ -55,30 +55,12 @@ jobs:
5555
# make -j
5656
# sudo make install
5757
58-
- name: Create Build Directory
59-
run: cmake -E make_directory ${{runner.workspace}}/build
60-
61-
- name: Configure CMake
62-
shell: bash
63-
working-directory: ${{runner.workspace}}/build
64-
run: cmake -DCMAKE_CXX_CPPLINT=cpplint $GITHUB_WORKSPACE
65-
6658
- name: Check style with cpplint
6759
shell: bash
6860
working-directory: ${{github.workspace}}
69-
run: ./scripts/check_style_cpplint.sh ${{runner.workspace}}/build 1
61+
run: ./scripts/check_style_cpplint.sh
7062

7163
# - name: Run cppcheck
7264
# shell: bash
7365
# working-directory: ${{github.workspace}}
7466
# run: ./scripts/run_cppcheck.sh ${{runner.workspace}}/build
75-
76-
- name: Install clang-tidy
77-
run: |
78-
sudo apt-get install clang-tidy-9
79-
80-
- name: Run clang-tidy
81-
shell: bash
82-
working-directory: ${{github.workspace}}
83-
run: |
84-
clang-tidy-9 --quiet -p ${{runner.workspace}}/build k2/csrc/*.cc k2/python/csrc/*.cc

CMakeLists.txt

-1
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,6 @@ endif()
9393
enable_testing()
9494

9595
list(APPEND CMAKE_MODULE_PATH ${CMAKE_SOURCE_DIR}/cmake)
96-
include(cpplint)
9796
include(glog)
9897
include(googletest)
9998
include(pybind11)

CPPLINT.cfg

+12
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,14 @@
11
# Stop searching for additional config files.
22
set noparent
3+
4+
# There are lots of functions taking arguments like `Foo& foo`
5+
# in k2, but cpplint complains about this style.
6+
filter=-runtime/references
7+
8+
# Suppress warnings for `// TODO` without a user name.
9+
# cpplint recommends `// TODO(username): `.
10+
filter=-readability/todo
11+
12+
# cpplint does not support lambdas decorated with __host__ __device__
13+
# and issues a warning: You don't need a ; after a }
14+
filter=-readability/braces

cmake/cpplint.cmake

-34
This file was deleted.

k2/csrc/array.h

+6-1
Original file line numberDiff line numberDiff line change
@@ -16,6 +16,8 @@
1616

1717
#include <algorithm>
1818
#include <iostream>
19+
#include <utility>
20+
#include <vector>
1921

2022
#include "k2/csrc/context.h"
2123
#include "k2/csrc/dtype.h"
@@ -400,6 +402,9 @@ class Array2 {
400402
Convert to possibly-different context, may require CPU/GPU transfer.
401403
The returned value may share the same underlying `data` memory as *this.
402404
This should work even for tensors with dim == 0.
405+
406+
Note that the returned array is contiguous in case the required context
407+
is not compatible with the current context.
403408
*/
404409
Array2<T> To(ContextPtr ctx) const {
405410
if (ctx->IsCompatible(*Context())) return *this;
@@ -459,7 +464,7 @@ class Array2 {
459464
with error.
460465
461466
*/
462-
Array2(Tensor &t, bool copy_for_strides = true) {
467+
explicit Array2(Tensor &t, bool copy_for_strides = true) {
463468
auto type = t.GetDtype();
464469
K2_CHECK_EQ(type, DtypeOf<T>::dtype);
465470
auto shape = t.GetShape();

k2/csrc/context.h

+4-3
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include <memory>
2020
#include <ostream>
2121
#include <type_traits>
22+
#include <vector>
2223

2324
#include "k2/csrc/log.h"
2425

@@ -215,7 +216,7 @@ class BackgroundRunner {
215216
void Wait();
216217

217218
private:
218-
// TODO. My current thinking on this is for Background() to create threads
219+
// TODO: My current thinking on this is for Background() to create threads
219220
// that Wait() can wait on, and to have the number of threads limited by a
220221
// global semaphore.
221222
};
@@ -451,7 +452,7 @@ static thread_local CudaStreamOverride g_stream_override;
451452

452453
class With {
453454
public:
454-
With(cudaStream_t stream) : stream_(stream) {
455+
explicit With(cudaStream_t stream) : stream_(stream) {
455456
g_stream_override.Push(stream_);
456457
}
457458
~With() { g_stream_override.Pop(stream_); }
@@ -472,7 +473,7 @@ class With {
472473
*/
473474
class ParallelRunner {
474475
public:
475-
ParallelRunner(ContextPtr c) : c_(c) {}
476+
explicit ParallelRunner(ContextPtr c) : c_(c) {}
476477

477478
// create a new stream, that first syncs with stream of c_ via an event. The
478479
// destructor will cause the stream of c_ to wait on this stream in the

k2/csrc/math.cu

+1-1
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ int32_t HighestBitSet(int32_t i) {
3232
// returns random int from [min..max]
3333
int32_t RandInt(int32_t min, int32_t max) {
3434
CHECK_GE(max, min);
35-
return (min + (rand() % (max + 1 - min)));
35+
return (min + (rand() % (max + 1 - min))); // NOLINT
3636
}
3737

3838
// Returns random ints from a distribution that gives more weight to lower

k2/csrc/ops.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -25,13 +25,11 @@
2525
// Note, I'm not sure about the name of this file, they are not ops like in
2626
// TensorFlow, but procedures..
2727

28-
namespace {
2928
// TODO(haowen): manage/load block config with some classes? then we can get
3029
// different configuration depending on num_elements and data type.
3130
// block size for matrix transpose.
32-
constexpr int kTransTileDim = 32;
33-
constexpr int kTransBlockRows = 8;
34-
} // namespace
31+
static constexpr int kTransTileDim = 32;
32+
static constexpr int kTransBlockRows = 8;
3533

3634
namespace k2 {
3735
// TODO(haowen): move the implementations to file `op_inl.h` or

k2/csrc/ops_inl.h

+13-9
Original file line numberDiff line numberDiff line change
@@ -13,16 +13,17 @@
1313
* See LICENSE for clarification regarding multiple authors
1414
*/
1515

16+
#ifndef K2_CSRC_OPS_INL_H_
17+
#define K2_CSRC_OPS_INL_H_
18+
1619
#ifndef IS_IN_K2_CSRC_OPS_H_
1720
#error "this file is supposed to be included only by ops.h"
1821
#endif
1922

20-
// No header guard for this file since it will only be included
21-
// in ops.h
22-
2323
#include <cassert>
24-
#include <cub/cub.cuh>
24+
#include <cub/cub.cuh> // NOLINT
2525
#include <type_traits>
26+
#include <vector>
2627

2728
namespace k2 {
2829

@@ -53,8 +54,8 @@ Array1<T> Append(int32_t num_arrays, const Array1<T> **src) {
5354
for (int32_t i = 0; i < num_arrays; i++) {
5455
int32_t offset = row_splits_vec[i], this_dim = src[i]->Dim();
5556
const int32_t *this_src_data = src[i]->Data();
56-
memcpy((void *)ans_data, (const void *)this_src_data,
57-
sizeof(T) * this_dim);
57+
memcpy(static_cast<void *>(ans_data),
58+
static_cast<const void *>(this_src_data), sizeof(T) * this_dim);
5859
ans_data += this_dim;
5960
}
6061
} else {
@@ -134,7 +135,7 @@ void ExclusiveSumDeref(Array1<T *> &src, Array1<T> *dest) {
134135
struct PtrPtr {
135136
T **data;
136137
__host__ __device__ T operator[](int32_t i) { return *(data[i]); }
137-
PtrPtr(T **data) : data(data) {}
138+
explicit PtrPtr(T **data) : data(data) {}
138139
PtrPtr(const PtrPtr &src) = default;
139140
};
140141

@@ -212,10 +213,11 @@ Array1<T> RandUniformArray1(ContextPtr &c, int32_t dim, T min_value,
212213
} else if (std::is_floating_point<T>::value ||
213214
std::abs(min_value) > RAND_MAX || std::abs(max_value) > RAND_MAX) {
214215
for (int32_t i = 0; i < dim; i++)
215-
data[i] = min_value + (rand() * (max_value - min_value) / RAND_MAX);
216+
data[i] =
217+
min_value + (rand() * (max_value - min_value) / RAND_MAX); // NOLINT
216218
} else {
217219
for (int32_t i = 0; i < dim; i++)
218-
data[i] = min_value + (rand() % (max_value + 1 - min_value));
220+
data[i] = min_value + (rand() % (max_value + 1 - min_value)); // NOLINT
219221
}
220222
return temp.To(c);
221223
}
@@ -254,3 +256,5 @@ Array2<T> ToContiguous(const Array2<T> &src) {
254256
}
255257

256258
} // namespace k2
259+
260+
#endif // K2_CSRC_OPS_INL_H_

k2/csrc/ragged.h

+4-2
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@
1212
#ifndef K2_CSRC_RAGGED_H_
1313
#define K2_CSRC_RAGGED_H_
1414

15+
#include <vector>
16+
1517
#include "k2/csrc/algorithms.h"
1618
#include "k2/csrc/array.h"
1719
#include "k2/csrc/log.h"
@@ -125,7 +127,7 @@ class RaggedShape {
125127

126128
RaggedShapeIndexIterator Iterator();
127129

128-
RaggedShape(std::vector<RaggedShapeDim> &axes, bool check = true)
130+
explicit RaggedShape(std::vector<RaggedShapeDim> &axes, bool check = true)
129131
: axes_(axes) {
130132
if (check) Check();
131133
}
@@ -540,7 +542,7 @@ Ragged<T> RandomRagged(T min_value = static_cast<T>(0),
540542

541543
} // namespace k2
542544

543-
// TODO(dan), include guard maybe.
545+
// TODO(dan): include guard maybe.
544546
#include "k2/csrc/ragged_inl.h"
545547

546548
#endif // K2_CSRC_RAGGED_H_

k2/csrc/utils.h

+4-3
Original file line numberDiff line numberDiff line change
@@ -13,6 +13,7 @@
1313
#define K2_CSRC_UTILS_H_
1414

1515
#include <algorithm>
16+
#include <vector>
1617

1718
#include "k2/csrc/context.h"
1819

@@ -270,9 +271,9 @@ RowIdFromRowSplits(int32_t num_rows, const int32_t *row_splits, int32_t index,
270271
// auto i =
271272
// std::lower_bound(row_splits + 1, row_splits + num_rows + 1, index) - 1;
272273
// K2_DCHECK(static_cast<uint32_t>(i) < static_cast<uint32_t>(num_rows));
273-
// TODO! Implement std::lower_bound ourselves.
274+
// TODO: Implement std::lower_bound ourselves.
274275
// return *i;
275-
return 0; // TODO! Does not work.
276+
return 0; // TODO: Does not work.
276277
}
277278

278279
/*
@@ -413,7 +414,7 @@ void EvalWithRedirect(cudaStream_t stream, int32_t num_jobs,
413414
TaskRedirect *redirect, int32_t min_threads_per_job,
414415
int32_t tot_work, int32_t target_num_loops,
415416
LambdaT &lambda) {
416-
// TODO..
417+
// TODO:
417418
}
418419

419420
__host__ __device__ __forceinline__ int32_t FloatAsInt(float f) {

0 commit comments

Comments
 (0)