Skip to content

Commit 25ae95a

Browse files
committed
minor spelling tweaks
1 parent b0a1563 commit 25ae95a

File tree

11 files changed

+22
-22
lines changed

11 files changed

+22
-22
lines changed

examples/DistributedTraining.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ int main() {
3737

3838
const int inputIdx = 0, targetIdx = 1;
3939

40-
// Model defintion - 2-layer Perceptron with ReLU activation
40+
// Model definition - 2-layer Perceptron with ReLU activation
4141
auto model = std::make_shared<Sequential>();
4242
model->add(Linear(nFeat, 100));
4343
model->add(ReLU());

examples/Perceptron.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ int main() {
2424
TensorDataset data({X, Y});
2525
const int inputIdx = 0, targetIdx = 1;
2626

27-
// Model defintion - 2-layer Perceptron with ReLU activation
27+
// Model definition - 2-layer Perceptron with ReLU activation
2828
Sequential model;
2929
model.add(Linear(nFeat, 100));
3030
model.add(ReLU());

examples/README.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@ In the `make install` step, examples are placed in `<prefix>/share/flashlight/ex
77
## Building In-Source
88
Building in-source is simple; examples are built by default. Binaries are placed in `build/examples`.
99

10-
To disable building examples in source, simply make sure `FL_BUILD_EXAMPLES` is `OFF`. Even though examples won't be built, the install step will still place source files in `examples/` in the isntall target directory mentioned above.
10+
To disable building examples in source, simply make sure `FL_BUILD_EXAMPLES` is `OFF`. Even though examples won't be built, the install step will still place source files in `examples/` in the install target directory mentioned above.
1111

1212
## Building as a Standalone Project
1313
Examples can also be built as standalone projects outside of the `build` directory. After the installation step, simply copy the example source path to a suitable directory, and build:

flashlight/autograd/Functions.h

+7-7
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ Variable operator<=(const Variable& lhs, const double& rhs);
181181
Variable operator&&(const Variable& lhs, const Variable& rhs);
182182

183183
/**
184-
* [Non-differentiable] Element-wise lodical not of a Variable.
184+
* [Non-differentiable] Element-wise logical not of a Variable.
185185
* \f[ out_i = !var_i \f]
186186
*/
187187
Variable operator!(const Variable& input);
@@ -296,28 +296,28 @@ Variable transpose(const Variable& input);
296296
/**
297297
* Repeats the tensor `input` along certain dimensions so as to match the shape
298298
* of `reference`. The dimensions to be repeated along are automatically
299-
* infered.
299+
* inferred.
300300
*/
301301
Variable tileAs(const Variable& input, const Variable& reference);
302302

303303
/**
304304
* Repeats the tensor `input` along certain dimensions so as to match the shape
305305
* in the descriptor `rdims`. The dimensions to be repeated along are
306-
* automatically infered.
306+
* automatically inferred.
307307
*/
308308
Variable tileAs(const Variable& input, const af::dim4& rdims);
309309

310310
/**
311311
* Sums up the tensor `input` along certain dimensions so as to match the shape
312-
* of `reference`. The dimensions to be summed along are automatically infered.
312+
* of `reference`. The dimensions to be summed along are automatically inferred.
313313
* Note that after summation, the shape of those dimensions will be 1.
314314
*/
315315
Variable sumAs(const Variable& input, const Variable& reference);
316316

317317
/**
318318
* Sums up the tensor `input` along certain dimensions so as to match the shape
319319
* in the descriptor `rdims`. The dimensions to be summed along are
320-
* automatically infered. Note that after summation, the shape of those
320+
* automatically inferred. Note that after summation, the shape of those
321321
* dimensions will be 1.
322322
*/
323323
Variable sumAs(const Variable& input, const af::dim4& rdims);
@@ -549,15 +549,15 @@ Variable pool2d(
549549
PoolingMode mode = PoolingMode::MAX);
550550

551551
/**
552-
* Applies a softmax function on Variable `input` along dimention `dim`, so that
552+
* Applies a softmax function on Variable `input` along dimension `dim`, so that
553553
* the elements of the dimensional `dim` in output lie in the range (0,1) and
554554
* sum to 1.
555555
* \f[ out(x_{i}) = \frac{exp(x_i)}{\sum_j exp(x_j)} \f]
556556
*/
557557
Variable softmax(const Variable& input, const int dim);
558558

559559
/**
560-
* Applies a log(softmax(x)) function on Variable `input` along dimention `dim`
560+
* Applies a log(softmax(x)) function on Variable `input` along dimension `dim`
561561
* \f[
562562
* out(x_{i}) = log \Big( \frac{exp(x_i)}{\sum_j exp(x_j)} \Big)
563563
* \f]

flashlight/autograd/Variable.h

+4-4
Original file line numberDiff line numberDiff line change
@@ -91,7 +91,7 @@ class Variable {
9191
* Creates a Variable which wraps the array and inputs specified
9292
* @param[in] data array to the stored in the Variable
9393
* @param[in] inputs a vector specifying inputs for this Variable
94-
* @param[in] gradFunc function specifyign how to calculate gradient of the
94+
* @param[in] gradFunc function specifying how to calculate gradient of the
9595
* input Variables
9696
*/
9797
Variable(af::array data, std::vector<Variable> inputs, GradFunc gradFunc);
@@ -131,7 +131,7 @@ class Variable {
131131
bool isGradAvailable() const;
132132

133133
/**
134-
* Returns the dimesion of the array wrapped by the Variable
134+
* Returns the dimension of the array wrapped by the Variable
135135
*/
136136
af::dim4 dims() const;
137137

@@ -222,7 +222,7 @@ class Variable {
222222

223223
/**
224224
* Run backward pass on the Variable. Gradient of all the inputs
225-
* in the computation graph leading upto the Variable on which the function is
225+
* in the computation graph leading up to the Variable on which the function is
226226
* computed.
227227
* @param[in] grad gradient w.r.t to the Variable
228228
* @param[in] retainGraph If False, clears the input Variables stored
@@ -232,7 +232,7 @@ class Variable {
232232

233233
/**
234234
* Run backward pass on the Variable. Gradient of all the inputs
235-
* in the computation graph leading upto the Variable on which the function is
235+
* in the computation graph leading up to the Variable on which the function is
236236
* computed. Gradient w.r.t the all the elements in the variable is set to 1.0
237237
* @param[in] retainGraph If False, clears the input Variables stored
238238
* by the Variable

flashlight/common/DevicePtr.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -13,11 +13,11 @@ namespace fl {
1313

1414
/**
1515
* DevicePtr provides a wrapper for accessing device pointer of an Arrayfire
16-
* array in a safer way. After calliing `device()` on arrayfire array to get
16+
* array in a safer way. After calling `device()` on arrayfire array to get
1717
* device pointer, the memory is not free until `unlock()` is called -
1818
* 'http://arrayfire.org/docs/group__device__func__device.htm'.
1919
* DevicePtr provides a std::lock_guard style API which calls the `unlock()`
20-
* function in its desctructor after getting device pointer.
20+
* function in its destructor after getting device pointer.
2121
* Example Usage :
2222
* \code{.cpp}
2323
* auto A = af::array(10, 10);
@@ -63,7 +63,7 @@ class DevicePtr {
6363
DevicePtr& operator=(DevicePtr&& other) = delete;
6464

6565
/**
66-
`.unlock()` is called on the underlying array in desctructor
66+
`.unlock()` is called on the underlying array in destructor
6767
*/
6868
~DevicePtr();
6969

flashlight/dataset/PrefetchDataset.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ namespace fl {
1717
* A view into a dataset, where a given number of samples are prefetched in
1818
* advance in a ThreadPool. PrefetchDataset should be used when there is a
1919
* sequential access to the underlying dataset. Otherwise, there will a lot of
20-
* cache misses leading to a degarded performance.
20+
* cache misses leading to a degraded performance.
2121
*
2222
* Example:
2323
\code{.cpp}

flashlight/distributed/DistributedApi.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@ namespace fl {
1919
* Initialize the distributed environment. Note that `worldSize`, `worldRank`
2020
* are ignored if DistributedInit::MPI is used.
2121
*
22-
* @param initMethod Intialization method used for setting up the rendezvous
22+
* @param initMethod Initialization method used for setting up the rendezvous
2323
* @param worldSize Total number of processes in the communication group
2424
*`@param worldRank 0-indexed rank of the current process
2525
* @param params Additional parameters (if any) needed for initialization

flashlight/nn/modules/AdaptiveSoftMaxLoss.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ class AdaptiveSoftMaxLoss : public BinaryModule {
6565
* first tail bucket will contain `50 - 5 = 45` targets (subtracting the size
6666
* of the head bucket), the second tail bucket will contain `100 - 50 = 50`
6767
* targets (subtracting the size of the first tail bucket). Cutoffs must be
68-
* specified to accomadate all targets: any remaining targets are not assigned
68+
* specified to accommodate all targets: any remaining targets are not assigned
6969
* to an 'overflow' bucket.
7070
* @param div_value determines the number of hidden units in the intermediate
7171
* layer for each tail bucket:

flashlight/nn/modules/Loss.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ class BinaryCrossEntropy : public BinaryModule {
8686
* Perform forward loss computation with an additional weight tensor.
8787
*
8888
* @param inputs a tensor with the predicted values
89-
* @param targets a tensor with the taret values
89+
* @param targets a tensor with the target values
9090
* @param weights a rescaling weight given to the loss of each element.
9191
*/
9292
Variable forward(

flashlight/nn/modules/Module.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -77,7 +77,7 @@ class Module {
7777
virtual void train();
7878

7979
/**
80-
* Switches the module to evalulation mode. Changes all parameters so that
80+
* Switches the module to evaluation mode. Changes all parameters so that
8181
* gradient calculation will be disabled for any calls to `forward`.
8282
*/
8383
virtual void eval();

0 commit comments

Comments
 (0)