Skip to content

Commit 9089242

Browse files
kiszkpytorchmergebot
authored andcommitted
Fix typo under test directory (pytorch#112346)
This PR fixes typo in comments and messages under `test` directory. This PR also fixes related typo in messages under `torch` directory. Pull Request resolved: pytorch#112346 Approved by: https://github.com/kit1980, https://github.com/ezyang
1 parent 94ebf52 commit 9089242

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

43 files changed

+78
-78
lines changed

test/distributed/_shard/sharded_optim/test_sharded_optim.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ def test_sharded_optim(self):
104104
local_model = MyShardedModel().cuda()
105105
sharded_model = MyShardedModel(spec=rowwise_spec).cuda()
106106

107-
# copy the parameteres from local model
107+
# copy the parameters from local model
108108
sharded_model.sharded_param.local_shards()[0].tensor = \
109109
local_model.sharded_param.detach().clone().requires_grad_()
110110

test/distributed/_shard/sharded_tensor/test_sharded_tensor.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1594,7 +1594,7 @@ def test_sharded_tensor_to_cpu(self):
15941594
# test ability to move st to CPU
15951595
spec_before_move = st.sharding_spec()
15961596
new_st = st.cpu(process_group=gloo_pg)
1597-
# return a copy of orginal st
1597+
# return a copy of original st
15981598
self.assertFalse(st is new_st)
15991599
# check the spec is still ChunkShardingSpec
16001600
spec_after_move = new_st.sharding_spec()
@@ -1626,7 +1626,7 @@ def test_sharded_tensor_to_cpu(self):
16261626

16271627
st = sharded_tensor.zeros(mixed_spec, h, w, process_group=gloo_pg)
16281628
new_st = st.cpu()
1629-
# return a copy of orginal st
1629+
# return a copy of original st
16301630
self.assertFalse(st is new_st)
16311631
# check the spec is still ChunkShardingSpec
16321632
spec_after_move = new_st.sharding_spec()

test/distributed/_spmd/test_tracing.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -669,7 +669,7 @@ def train_step(mod, opt, inp):
669669
train_step(mod, opt, inp)
670670
for node in train_step._compiled_obj.gm.graph.nodes:
671671
if node.target == torch.ops.aten.expand.default:
672-
# backward grad expandion op should match local batch size
672+
# backward grad expansion op should match local batch size
673673
# instead of global batch size.
674674
self.assertEqual(node.args[1], [2, 10])
675675

test/distributed/_tensor/test_dtensor_ops.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -495,7 +495,7 @@ def wrapped(fn):
495495
xfail("zeros"),
496496
# ops inside this might even fail without dtensor
497497
# tests, as we rescale op db common test size factor (i.e. L, M, S)
498-
# which triggered the orignal function run failures with input
498+
# which triggered the original function run failures with input
499499
# generation becomes wrong, we skip them for now but should enable later.
500500
# TODO: need to clean this list and remove all cases
501501
skip("argwhere"),
@@ -643,7 +643,7 @@ def to_replicate(e: object) -> object:
643643
# errors
644644
dtensor_rs = func(*dtensor_args, **dtensor_kwargs)
645645

646-
# we need to skip tests containing tensors of zero elmeents for now.
646+
# we need to skip tests containing tensors of zero elements for now.
647647
# see issue: https://github.com/pytorch/tau/issues/470
648648
# TODO remove this once issue above fixed.
649649
flat_args = pytree.tree_leaves(dtensor_rs)

test/distributed/checkpoint/e2e/test_fine_tuning.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -93,7 +93,7 @@ def pretrain(self, pretrain_dir: str) -> None:
9393
model = FSDP(model, device_mesh=device_mesh)
9494
optim = torch.optim.Adam(model.parameters(), lr=1e-3)
9595

96-
# Trainining
96+
# Training
9797
for i in range(3):
9898
batch = torch.rand(32, DIM, device="cuda")
9999
loss = model(batch).sum()
@@ -158,7 +158,7 @@ def finetune(self, pretrain_dir: str, finetune_dir: str) -> None:
158158
# If this is the restart of the fine tuning, then checkpoint should exit.
159159
self.assertEqual(i, 0)
160160

161-
# Trainining
161+
# Training
162162
for j in range(3):
163163
batch = torch.rand(32, DIM, device="cuda")
164164
loss = model(batch).sum()

test/distributed/checkpoint/test_dtensor_resharding.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ def test_1d_to_1d_reshard_placement_change(self) -> None:
8787
)
8888
self.assertEqual(global_tensor, state_dict_to_load["dtensor"].to_local())
8989

90-
# redistribute the tensor back to its original placment for comparison.
90+
# redistribute the tensor back to its original placement for comparison.
9191
state_dict_to_load["dtensor"] = state_dict_to_load["dtensor"].redistribute(
9292
device_mesh,
9393
placements=original_placement,

test/distributed/elastic/agent/server/test/local_elastic_agent_test.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -336,7 +336,7 @@ def run_agent(
336336
) -> Optional[RunResult]:
337337
"""
338338
Runs a single agent. This method can be called either on a separate process
339-
or the main test process. When calling this method on a sparate process make
339+
or the main test process. When calling this method on a separate process make
340340
sure to pass the ``agent_results`` multiprocessing Queue so that the agent's
341341
run results can be returned. If ``agent_results`` is omitted, then the
342342
run result is returned from the method.

test/distributed/elastic/timer/local_timer_example.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def _stuck_function(rank, mp_queue):
4040
time.sleep(5)
4141

4242

43-
# timer is not supported on macos or windowns
43+
# timer is not supported on macos or windows
4444
if not (IS_WINDOWS or IS_MACOS):
4545
class LocalTimerExample(TestCase):
4646
"""

test/distributed/fsdp/test_fsdp_core.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -327,7 +327,7 @@ def test_pre_backward_hook_registration_after_state_dict(self):
327327
def _test_pre_backward_hook_registration(self, model):
328328
optim = torch.optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
329329
optim.zero_grad()
330-
# Inputs always cuda, as computation happes on CUDA device only
330+
# Inputs always cuda, as computation happens on CUDA device only
331331
input = model.module.get_input(torch.device("cuda"))
332332
output = model(*input)
333333
# this is pre-bwd hook

test/distributed/fsdp/test_fsdp_fine_tune.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ def forward(self, x):
181181
def test_hooks_multi_traversal(self):
182182
"""
183183
Tests that the hooks do reshard / unshard correctly in the case of same
184-
parameters being used mutliple times during forward pass.
184+
parameters being used multiple times during forward pass.
185185
"""
186186
self.run_subtests(
187187
{

0 commit comments

Comments
 (0)