Skip to content

Commit 803bd8a

Browse files
fix: correct typos in comments and docstrings (#2816)
Co-authored-by: thecaptain789 <thecaptain789@users.noreply.github.com>
1 parent 8301618 commit 803bd8a

File tree

9 files changed

+10
-10
lines changed

9 files changed

+10
-10
lines changed

thunder/core/jit_ext.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1888,7 +1888,7 @@ def from_provenance(provenance, *, new_output=False):
18881888
try:
18891889
from_provenance(p.history)
18901890
except Exception as e:
1891-
raise NotImplementedError(f"Exception occured unpacking object from {p.history}") from e
1891+
raise NotImplementedError(f"Exception occurred unpacking object from {p.history}") from e
18921892

18931893
already_unpacked[id(p)] = p
18941894

thunder/core/prims.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3912,7 +3912,7 @@ def _reduction_meta(a: TensorProxy, /, dims: Sequence[int]) -> TensorProxy:
39123912
sum = make_prim(PrimIDs.SUM, "sum", meta=_reduction_meta, tags=(OpTags.REDUCTION_OP,))
39133913

39143914

3915-
# Note: We have seperate meta function for `argmin/argmax` instead of
3915+
# Note: We have separate meta function for `argmin/argmax` instead of
39163916
# reusing `_reduction_meta` as these operations expect Optional[int] for `dim`
39173917
# and return output with integer dtype.
39183918
#

thunder/core/utils.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1141,7 +1141,7 @@ def find_producer_symbols(trace: TraceCtx, proxies: Sequence[Proxy], stop_proxie
11411141
if arg_name not in map(lambda x: x.name, stop_proxies) and arg_name not in seen:
11421142
queue.append(arg)
11431143
seen.add(arg_name)
1144-
# original_order maps from bound_symbol to the index/order of its occurence in the trace. The order is
1144+
# original_order maps from bound_symbol to the index/order of its occurrence in the trace. The order is
11451145
# used to sort producer bound symbols to preserve the correctness of data dependency.
11461146
original_order = dict()
11471147
for i, bsym in enumerate(trace.bound_symbols):

thunder/distributed/transforms/ddp_v2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ def transform_module(self, model: ThunderModule):
5656

5757
# NOTE: Shared Parameters in Trace
5858
# Shared parameters in PyTorch eager are parameters of module which have different name but share the underlying tensor.
59-
# For shared parameter, we replace all occurence shared parameter with it's corresponding `base` parameter.
59+
# For shared parameter, we replace all occurrences shared parameter with it's corresponding `base` parameter.
6060
# In our implementation `base` parameter is the parameter and corresponding name which we see the first time while
6161
# iterating our parameters (see below). We track subsequent parameter which share the underlying Tensor with this `base` parameter
6262
# in `shared_params_name` dictionary.

thunder/distributed/transforms/fsdp_v2.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -144,7 +144,7 @@ def transform_module(
144144

145145
# NOTE: Shared Parameters in Trace
146146
# Shared parameters in PyTorch eager are parameters of module which have different name but share the underlying tensor.
147-
# For shared parameter, we replace all occurence shared parameter with it's corresponding `base` parameter.
147+
# For shared parameter, we replace all occurrences shared parameter with it's corresponding `base` parameter.
148148
# In our implementation `base` parameter is the parameter and corresponding name which we see the first time while
149149
# iterating our parameters (see below). We track subsequent parameter which share the underlying Tensor with this `base` parameter
150150
# in `shared_params_name` dictionary.

thunder/tests/distributed/test_dtensor.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@
3131
from thunder.dynamo import thunderfx
3232

3333

34-
# NOTE: We run all these similar functions seperately
34+
# NOTE: We run all these similar functions separately
3535
# as we want to avoid nvfuser issue (https://github.com/NVIDIA/Fuser/issues/4507)
3636
# where trying to create FusionDefinition with same math operation can fail.
3737
functions_to_test = {

thunder/tests/opinfos.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5964,8 +5964,8 @@ def make_t(shape):
59645964
if dtype is not torch.bool: # argmax is not supported on `bool`
59655965
# overload: torch_max(a: TensorLike, /, dim: int | tuple[int], keepdim: bool = False) -> TensorLike, TensorLike
59665966
# This overload corresponds to taking the max along the specified dimension `dim`.
5967-
# It returns first occurence of the maximum value along the dimension and it's corresponding index.
5968-
# NOTE: When same values are present, the first occurence of the `value` and corresponding index is returned
5967+
# It returns first occurrence of the maximum value along the dimension and it's corresponding index.
5968+
# NOTE: When same values are present, the first occurrence of the `value` and corresponding index is returned
59695969
yield SampleInput(make_t(shape), dim)
59705970
yield SampleInput(make_t(shape), dim, keepdim)
59715971

thunder/torch/__init__.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3197,7 +3197,7 @@ def torch_max(
31973197

31983198
# overload - torch_max(a: TensorLike, /, dim: int | tuple[int], keepdim: bool = False) -> TensorLike, TensorLike
31993199
# This overload corresponds to taking the max along the specified dimension `dim`.
3200-
# NOTE: It returns first occurence of the maximum value along the dimension and it's corresponding index.
3200+
# NOTE: It returns first occurrence of the maximum value along the dimension and it's corresponding index.
32013201
utils.check_type(dim, NumberLike)
32023202
max_vals = amax(a, dim, keepdim)
32033203
argmax_vals = argmax(a, dim, keepdim)

thunder/transforms/materialization.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ def should_skip_materialization(n):
3333
processed_names = set()
3434

3535
# Shared parameters in PyTorch eager are parameters of module which have different name but share the underlying tensor.
36-
# For shared parameter, we replace all occurence shared parameter with its corresponding `base` parameter.
36+
# For shared parameter, we replace all occurrences shared parameter with its corresponding `base` parameter.
3737
# In our implementation `base` parameter is the parameter and corresponding name which we see the first time while
3838
# iterating our parameters (see below). We track subsequent parameter which share the underlying Tensor with this `base` parameter
3939
# in `shared_params_name` dictionary.

0 commit comments

Comments
 (0)