Skip to content

Commit 29d12de

Browse files
cyyeverfacebook-github-bot
authored andcommitted
Update to use Python 3.9 syntax (pytorch#4909)
Summary: Pull Request resolved: pytorch#4909 X-link: facebookresearch/FBGEMM#1933 Pull Request resolved: pytorch#4905 Reviewed By: cthi Differential Revision: D82979037 Pulled By: q10
1 parent be84b43 commit 29d12de

File tree

144 files changed

+928
-972
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

144 files changed

+928
-972
lines changed

fbgemm_gpu/bench/batched_unary_embeddings_benchmark.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88

99
import functools
1010
from math import sqrt
11-
from typing import List, Tuple
1211

1312
import click
1413
import fbgemm_gpu
@@ -33,7 +32,7 @@ def generate_unary_feature(
3332
num_embeddings: int,
3433
# pyre-fixme[24]: Generic type `list` expects 1 type parameter, use
3534
# `typing.List[<element type>]` to avoid runtime subscripting errors.
36-
) -> Tuple[List, List, List]:
35+
) -> tuple[list, list, list]:
3736
lengths = []
3837
offsets = []
3938
indices = []
@@ -53,7 +52,7 @@ def generate_unary_feature(
5352

5453

5554
class MyModule(torch.nn.Module):
56-
def __init__(self, num_tasks: int, hash_sizes: List[int]) -> None:
55+
def __init__(self, num_tasks: int, hash_sizes: list[int]) -> None:
5756
super().__init__()
5857
self.num_tasks = num_tasks
5958
self.hash_sizes = hash_sizes
@@ -73,7 +72,7 @@ def __init__(self, num_tasks: int, hash_sizes: List[int]) -> None:
7372
self.emb_modules.append(emb)
7473

7574
def forward(
76-
self, offsets: List[torch.Tensor], indices: List[torch.Tensor]
75+
self, offsets: list[torch.Tensor], indices: list[torch.Tensor]
7776
) -> torch.Tensor:
7877
tt_list = []
7978
for n in range(self.num_tasks):

fbgemm_gpu/bench/bench_utils.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
import logging
1111
import threading
1212
import time
13-
from typing import List, Tuple
1413

1514
import torch
1615

@@ -32,7 +31,7 @@ def benchmark_torch_function( # noqa: C901
3231
name: str = "",
3332
num_threads: int = 1,
3433
copy_f_for_multi_thread_test: bool = False,
35-
) -> Tuple[float, torch.Tensor]:
34+
) -> tuple[float, torch.Tensor]:
3635
logging.debug(f"Start to benchmark {name}...")
3736
if device != "cpu" and device != "" and device != "cuda":
3837
torch.cuda.set_device(device)
@@ -68,7 +67,7 @@ def benchmark_torch_function( # noqa: C901
6867
dtype=torch.float,
6968
device=device,
7069
)
71-
duration_ms_list: List[float] = []
70+
duration_ms_list: list[float] = []
7271

7372
f_list = [f]
7473
# make deepcopy of f if necessary

fbgemm_gpu/bench/histogram_binning_calibration_benchmark.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,7 @@
88

99
import logging
1010
import time
11-
from typing import Callable, Tuple
11+
from typing import Callable
1212

1313
import click
1414
import torch
@@ -25,9 +25,9 @@
2525

2626

2727
def benchmark_hbc_function(
28-
func: Callable[[Tensor], Tuple[Tensor, Tensor]],
28+
func: Callable[[Tensor], tuple[Tensor, Tensor]],
2929
input: Tensor,
30-
) -> Tuple[float, Tensor]:
30+
) -> tuple[float, Tensor]:
3131
if input.is_cuda:
3232
torch.cuda.synchronize()
3333
start_event = torch.cuda.Event(enable_timing=True)
@@ -118,7 +118,7 @@ def cli(
118118
[num_bins * (num_segments + 1)], dtype=torch.float64
119119
).fill_(0.0)
120120

121-
def fbgemm_hbc_cpu(input: Tensor) -> Tuple[Tensor, Tensor]:
121+
def fbgemm_hbc_cpu(input: Tensor) -> tuple[Tensor, Tensor]:
122122
return torch.ops.fbgemm.histogram_binning_calibration(
123123
input,
124124
bin_num_examples,
@@ -130,7 +130,7 @@ def fbgemm_hbc_cpu(input: Tensor) -> Tuple[Tensor, Tensor]:
130130
0.9995,
131131
)
132132

133-
def fbgemm_hbc_by_feature_cpu(input: Tensor) -> Tuple[Tensor, Tensor]:
133+
def fbgemm_hbc_by_feature_cpu(input: Tensor) -> tuple[Tensor, Tensor]:
134134
return torch.ops.fbgemm.histogram_binning_calibration_by_feature(
135135
input,
136136
segment_values,
@@ -146,7 +146,7 @@ def fbgemm_hbc_by_feature_cpu(input: Tensor) -> Tuple[Tensor, Tensor]:
146146
0.9995,
147147
)
148148

149-
def fbgemm_generic_hbc_by_feature_cpu(input: Tensor) -> Tuple[Tensor, Tensor]:
149+
def fbgemm_generic_hbc_by_feature_cpu(input: Tensor) -> tuple[Tensor, Tensor]:
150150
return torch.ops.fbgemm.generic_histogram_binning_calibration_by_feature(
151151
input,
152152
segment_values,
@@ -186,7 +186,7 @@ def fbgemm_generic_hbc_by_feature_cpu(input: Tensor) -> Tuple[Tensor, Tensor]:
186186
bin_num_examples_gpu: Tensor = bin_num_examples.cuda()
187187
bin_num_positives_gpu: Tensor = bin_num_positives.cuda()
188188

189-
def fbgemm_hbc_gpu(input: Tensor) -> Tuple[Tensor, Tensor]:
189+
def fbgemm_hbc_gpu(input: Tensor) -> tuple[Tensor, Tensor]:
190190
return torch.ops.fbgemm.histogram_binning_calibration(
191191
input,
192192
bin_num_examples_gpu,
@@ -206,7 +206,7 @@ def fbgemm_hbc_gpu(input: Tensor) -> Tuple[Tensor, Tensor]:
206206
by_feature_bin_num_positives.cuda()
207207
)
208208

209-
def fbgemm_hbc_by_feature_gpu(input: Tensor) -> Tuple[Tensor, Tensor]:
209+
def fbgemm_hbc_by_feature_gpu(input: Tensor) -> tuple[Tensor, Tensor]:
210210
return torch.ops.fbgemm.histogram_binning_calibration_by_feature(
211211
input,
212212
segment_values_gpu,
@@ -226,7 +226,7 @@ def fbgemm_hbc_by_feature_gpu(input: Tensor) -> Tuple[Tensor, Tensor]:
226226

227227
def fbgemm_generic_hbc_by_feature_gpu(
228228
input: Tensor,
229-
) -> Tuple[Tensor, Tensor]:
229+
) -> tuple[Tensor, Tensor]:
230230
return (
231231
torch.ops.fbgemm.generic_histogram_binning_calibration_by_feature(
232232
input,

fbgemm_gpu/bench/jagged_tensor_benchmark.py

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,6 @@
1212
import logging
1313
import random
1414
from dataclasses import dataclass
15-
from typing import List, Tuple
1615

1716
import click
1817
import fbgemm_gpu
@@ -505,7 +504,7 @@ def masked_select_jagged_1d(
505504

506505
def ref(
507506
values: torch.Tensor, lengths: torch.Tensor, mask: torch.Tensor
508-
) -> Tuple[torch.Tensor, torch.Tensor]:
507+
) -> tuple[torch.Tensor, torch.Tensor]:
509508
masked_values_ref = values[mask]
510509
cum_count = torch.cumsum(mask, 0)
511510
cum_count = torch.cat((cum_count, torch.tensor([0])))
@@ -653,9 +652,9 @@ def keyed_jagged_index_select_dim1(
653652
ref_inputs.append((key_values, key_lengths, indices, key_weights))
654653

655654
def keyed_jagged_index_select_dim1_ref(
656-
inputs: List[torch.Tensor],
655+
inputs: list[torch.Tensor],
657656
has_weights: bool,
658-
) -> Tuple[torch.Tensor, torch.Tensor]:
657+
) -> tuple[torch.Tensor, torch.Tensor]:
659658
outputs = []
660659
output_weights = []
661660
for key_values, key_lengths, indices, _ in inputs:
@@ -758,11 +757,11 @@ def jagged_slice_ref(
758757
offsets: torch.Tensor,
759758
start: torch.Tensor,
760759
max_L: int,
761-
) -> Tuple[torch.Tensor, torch.Tensor]:
760+
) -> tuple[torch.Tensor, torch.Tensor]:
762761
end_offsets_ = max_L + start + offsets[:-1]
763762
end_offsets = torch.where(end_offsets_ > offsets[1:], offsets[1:], end_offsets_)
764763
start_offsets = start + offsets[:-1]
765-
indices_to_select: List[torch.Tensor] = []
764+
indices_to_select: list[torch.Tensor] = []
766765
for i in range(end_offsets.size(0)):
767766
indices_to_select.append(
768767
torch.arange(start_offsets[i].item(), end_offsets[i].item())

fbgemm_gpu/bench/merge_embeddings_benchmark.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010

1111
import logging
1212
import signal
13-
from typing import List, Tuple
1413

1514
import click
1615
import fbgemm_gpu
@@ -59,7 +58,7 @@ def get_table_batched_offsets_from_dense(
5958
merged_indices: torch.Tensor,
6059
# pyre-fixme[2]: Parameter must be annotated.
6160
gpu_num,
62-
) -> Tuple[torch.Tensor, torch.Tensor]:
61+
) -> tuple[torch.Tensor, torch.Tensor]:
6362
(T, B, L) = merged_indices.size()
6463
lengths = np.ones((T, B)) * L
6564
flat_lengths = lengths.flatten()
@@ -80,7 +79,7 @@ def generate_requests(
8079
E: int,
8180
# inter-batch indices reuse rate
8281
reuse: float = 0.0,
83-
) -> List[Tuple[torch.IntTensor, torch.IntTensor, None]]:
82+
) -> list[tuple[torch.IntTensor, torch.IntTensor, None]]:
8483
rs = []
8584
for gpu_num in range(num_gpus):
8685
all_indices = torch.randint(

fbgemm_gpu/bench/quantize_ops_benchmark.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,9 @@
99
import functools
1010
import logging
1111
import random
12+
from collections.abc import Iterable
1213
from contextlib import nullcontext
13-
from typing import Iterable, Optional, Union
14+
from typing import Optional, Union
1415

1516
import click
1617
import fbgemm_gpu

fbgemm_gpu/bench/sparse_ops_benchmark.py

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,6 @@
1111
import logging
1212
import math
1313
import random
14-
from typing import List
1514

1615
import click
1716
import fbgemm_gpu
@@ -681,42 +680,42 @@ def index_select_bench(
681680
optim_group: torch.optim.Optimizer = torch.optim.SGD(gis_inputs, lr=0.1)
682681

683682
def index_select_fwd_ref(
684-
inputs: List[torch.Tensor], indices: List[torch.Tensor]
685-
) -> List[torch.Tensor]:
683+
inputs: list[torch.Tensor], indices: list[torch.Tensor]
684+
) -> list[torch.Tensor]:
686685
outputs = []
687686
for input, index in zip(inputs, indices):
688687
optim_index.zero_grad()
689688
outputs.append(torch.index_select(input, 0, index))
690689
return outputs
691690

692691
def index_select_bwd_ref(
693-
outputs: List[torch.Tensor], grads: List[torch.Tensor]
692+
outputs: list[torch.Tensor], grads: list[torch.Tensor]
694693
) -> None:
695694
for output, grad in zip(outputs, grads):
696695
optim_index.zero_grad()
697696
output.backward(grad, retain_graph=True)
698697

699698
def batch_index_select_fwd(
700-
concat_inputs: List[torch.Tensor],
701-
concat_indices: List[int],
702-
input_num_indices: List[int],
703-
input_rows: List[int],
704-
input_columns: List[int],
699+
concat_inputs: list[torch.Tensor],
700+
concat_indices: list[int],
701+
input_num_indices: list[int],
702+
input_rows: list[int],
703+
input_columns: list[int],
705704
) -> torch.autograd.Variable:
706705
optim_batch.zero_grad()
707706
return torch.ops.fbgemm.batch_index_select_dim0(
708707
concat_inputs, concat_indices, input_num_indices, input_rows, input_columns
709708
)
710709

711710
def group_index_select_fwd(
712-
gis_inputs: List[torch.Tensor], indices: List[int]
711+
gis_inputs: list[torch.Tensor], indices: list[int]
713712
) -> torch.autograd.Variable:
714713
optim_group.zero_grad()
715714
return torch.ops.fbgemm.group_index_select_dim0(gis_inputs, indices)
716715

717716
def batch_group_index_select_bwd(
718717
output: torch.autograd.Variable,
719-
grads: List[torch.Tensor],
718+
grads: list[torch.Tensor],
720719
optim: torch.optim.Optimizer,
721720
) -> torch.autograd.Variable:
722721
optim.zero_grad()

fbgemm_gpu/bench/tbe/split_table_batched_embeddings_benchmark.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@
1212
import os
1313
import tempfile
1414
from contextlib import nullcontext
15-
from typing import Any, Callable, Dict, Optional
15+
from typing import Any, Callable, Optional
1616

1717
import click
1818
import numpy as np
@@ -231,7 +231,7 @@ def device( # noqa C901
231231
pooling_mode = PoolingMode.NONE
232232
do_pooling = False
233233

234-
common_split_args: Dict[str, Any] = {
234+
common_split_args: dict[str, Any] = {
235235
"weights_precision": weights_precision,
236236
"stochastic_rounding": stoc,
237237
"output_dtype": output_dtype,
@@ -1384,7 +1384,7 @@ def vbe(
13841384
else EmbeddingLocation.HOST
13851385
)
13861386

1387-
common_split_args: Dict[str, Any] = {
1387+
common_split_args: dict[str, Any] = {
13881388
"weights_precision": embconfig.weights_dtype,
13891389
"stochastic_rounding": embconfig.stochastic_rounding,
13901390
"output_dtype": embconfig.output_dtype,

fbgemm_gpu/bench/tbe/tbe_cache_benchmark.py

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -8,7 +8,6 @@
88

99
import logging
1010
import random
11-
from typing import List, Tuple
1211

1312
import click
1413
import numpy as np
@@ -104,7 +103,7 @@ def create_embedding_specs(
104103
cached_tables_ratio: float,
105104
num_embeddings: int,
106105
embedding_dims: int,
107-
) -> List[Tuple[str, int, int, SparseType, EmbeddingLocation]]:
106+
) -> list[tuple[str, int, int, SparseType, EmbeddingLocation]]:
108107
"""
109108
Returns embedding specs to be used with IntNBitTableBatchedEmbeddingBagsCodegen.
110109
"""
@@ -157,7 +156,7 @@ def create_embedding_specs(
157156

158157
def create_request(
159158
num_tables: int, num_embeddings: int, batch: int, avg_pooling_factor: int
160-
) -> Tuple[Tensor, Tensor]:
159+
) -> tuple[Tensor, Tensor]:
161160
"""
162161
Returns [indices, offsets], which are inputs of embedding bags.
163162
"""

fbgemm_gpu/bench/tbe/tbe_inference_benchmark.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
import statistics
1717
from contextlib import nullcontext
1818
from pathlib import Path
19-
from typing import Any, Callable, Dict, List, Optional
19+
from typing import Any, Callable, Optional
2020

2121
import click
2222
import numpy as np
@@ -1398,14 +1398,14 @@ def nbit_uvm_compare_direct_mapped(
13981398
)
13991399

14001400
if mixed:
1401-
Ds: List[int] = [
1401+
Ds: list[int] = [
14021402
round_up(np.random.randint(low=int(0.5 * D), high=int(1.5 * D)), 4)
14031403
for _ in range(T)
14041404
]
14051405
# pyre-fixme[9]: D has type `int`; used as `floating[typing.Any]`.
14061406
D = np.average(Ds)
14071407
else:
1408-
Ds: List[int] = [D] * T
1408+
Ds: list[int] = [D] * T
14091409

14101410
_requests_uvm = generate_requests(
14111411
iters,
@@ -1417,7 +1417,7 @@ def nbit_uvm_compare_direct_mapped(
14171417
alpha=alpha,
14181418
weighted=weighted,
14191419
)
1420-
requests_uvm: List[TBERequest] = [
1420+
requests_uvm: list[TBERequest] = [
14211421
TBERequest(req.indices.int(), req.offsets.int(), req.per_sample_weights)
14221422
for req in _requests_uvm
14231423
]
@@ -1429,7 +1429,7 @@ def nbit_uvm_compare_direct_mapped(
14291429
+ param_size_multiplier * B * sum(Ds[:T]) * L
14301430
)
14311431

1432-
stats: Dict[str, Any] = {
1432+
stats: dict[str, Any] = {
14331433
"B": B,
14341434
"T": T,
14351435
"E": E,

0 commit comments

Comments
 (0)