Skip to content

Commit 92b69bc

Browse files
committed
PyTorch v2.0 ๋ฐ˜์˜, pytorch/tutorials@9efe789b (#626)
1 parent e5a6705 commit 92b69bc

File tree

190 files changed

+8384
-8589
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

190 files changed

+8384
-8589
lines changed

โ€Ž.build/get_files_to_run.py

+107
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
from typing import Any, Dict, List, Optional, Tuple
2+
import json
3+
import os
4+
from pathlib import Path
5+
# from remove_runnable_code import remove_runnable_code
6+
7+
8+
# Calculate repo base dir
9+
REPO_BASE_DIR = Path(__file__).absolute().parent.parent
10+
11+
12+
def get_all_files() -> List[str]:
13+
sources = [x.relative_to(REPO_BASE_DIR) for x in REPO_BASE_DIR.glob("*_source/**/*.py") if 'data' not in x.parts]
14+
return [str(x) for x in sources]
15+
16+
17+
def read_metadata() -> Dict[str, Any]:
18+
with (REPO_BASE_DIR / ".jenkins" / "metadata.json").open() as fp:
19+
return json.load(fp)
20+
21+
22+
def calculate_shards(all_files: List[str], num_shards: int = 20) -> List[List[str]]:
23+
sharded_files: List[Tuple[float, List[str]]] = [(0.0, []) for _ in range(num_shards)]
24+
metadata = read_metadata()
25+
26+
def get_duration(file: str) -> int:
27+
# tutorials not listed in the metadata.json file usually take
28+
# <3min to run, so we'll default to 1min if it's not listed
29+
return metadata.get(file, {}).get("duration", 60)
30+
31+
def get_needs_machine(file: str) -> Optional[str]:
32+
return metadata.get(file, {}).get("needs", None)
33+
34+
def add_to_shard(i, filename):
35+
shard_time, shard_jobs = sharded_files[i]
36+
shard_jobs.append(filename)
37+
sharded_files[i] = (
38+
shard_time + get_duration(filename),
39+
shard_jobs,
40+
)
41+
42+
all_other_files = all_files.copy()
43+
needs_gpu_nvidia_small_multi = list(
44+
filter(lambda x: get_needs_machine(x) == "gpu.nvidia.small.multi", all_files,)
45+
)
46+
needs_gpu_nvidia_medium = list(
47+
filter(lambda x: get_needs_machine(x) == "gpu.nvidia.large", all_files,)
48+
)
49+
for filename in needs_gpu_nvidia_small_multi:
50+
# currently, the only job that uses gpu.nvidia.small.multi is the 0th worker,
51+
# so we'll add all the jobs that need this machine to the 0th worker
52+
add_to_shard(0, filename)
53+
all_other_files.remove(filename)
54+
for filename in needs_gpu_nvidia_medium:
55+
# currently, the only job that uses gpu.nvidia.large is the 1st worker,
56+
# so we'll add all the jobs that need this machine to the 1st worker
57+
add_to_shard(1, filename)
58+
all_other_files.remove(filename)
59+
60+
sorted_files = sorted(all_other_files, key=get_duration, reverse=True,)
61+
62+
for filename in sorted_files:
63+
min_shard_index = sorted(range(num_shards), key=lambda i: sharded_files[i][0])[
64+
0
65+
]
66+
add_to_shard(min_shard_index, filename)
67+
return [x[1] for x in sharded_files]
68+
69+
70+
def compute_files_to_keep(files_to_run: List[str]) -> List[str]:
71+
metadata = read_metadata()
72+
files_to_keep = list(files_to_run)
73+
for file in files_to_run:
74+
extra_files = metadata.get(file, {}).get("extra_files", [])
75+
files_to_keep.extend(extra_files)
76+
return files_to_keep
77+
78+
79+
def remove_other_files(all_files, files_to_keep) -> None:
80+
81+
for file in all_files:
82+
if file not in files_to_keep:
83+
remove_runnable_code(file, file)
84+
85+
86+
def parse_args() -> Any:
87+
from argparse import ArgumentParser
88+
parser = ArgumentParser("Select files to run")
89+
parser.add_argument("--dry-run", action="store_true")
90+
parser.add_argument("--num-shards", type=int, default=int(os.environ.get("NUM_WORKERS", 20)))
91+
parser.add_argument("--shard-num", type=int, default=int(os.environ.get("WORKER_ID", 0)))
92+
return parser.parse_args()
93+
94+
95+
def main() -> None:
96+
args = parse_args()
97+
98+
all_files = get_all_files()
99+
files_to_run = calculate_shards(all_files, num_shards=args.num_shards)[args.shard_num]
100+
if not args.dry_run:
101+
remove_other_files(all_files, compute_files_to_keep(files_to_run))
102+
stripped_file_names = [Path(x).stem for x in files_to_run]
103+
print(" ".join(stripped_file_names))
104+
105+
106+
if __name__ == "__main__":
107+
main()

โ€Ž.build/get_sphinx_filenames.py

+13
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
from pathlib import Path
2+
from typing import List
3+
4+
from get_files_to_run import get_all_files
5+
from validate_tutorials_built import NOT_RUN
6+
7+
8+
def get_files_for_sphinx() -> List[str]:
9+
all_py_files = get_all_files()
10+
return [x for x in all_py_files if all(y not in x for y in NOT_RUN)]
11+
12+
13+
SPHINX_SHOULD_RUN = "|".join(get_files_for_sphinx())

โ€Ž.build/validate_tutorials_built.py

+43-46
Original file line numberDiff line numberDiff line change
@@ -9,51 +9,49 @@
99
# the file name to explain why, like intro.html), or fix the tutorial and remove it from this list).
1010

1111
NOT_RUN = [
12-
"basics/intro", # no code
13-
"translation_transformer",
14-
"profiler",
15-
"saving_loading_models",
16-
"introyt/captumyt",
17-
"introyt/trainingyt",
18-
"examples_nn/polynomial_module",
19-
"examples_nn/dynamic_net",
20-
"examples_nn/polynomial_optim",
21-
"former_torchies/autograd_tutorial_old",
22-
"former_torchies/tensor_tutorial_old",
23-
"examples_autograd/polynomial_autograd",
24-
"examples_autograd/polynomial_custom_function",
25-
"parametrizations",
26-
"mnist_train_nas", # used by ax_multiobjective_nas_tutorial.py
27-
"fx_conv_bn_fuser",
28-
"super_resolution_with_onnxruntime",
29-
"ddp_pipeline", # requires 4 gpus
30-
"fx_graph_mode_ptq_dynamic",
31-
"vmap_recipe",
32-
"torchscript_freezing",
33-
"nestedtensor",
34-
"recipes/saving_and_loading_models_for_inference",
35-
"recipes/saving_multiple_models_in_one_file",
36-
"recipes/loading_data_recipe",
37-
"recipes/tensorboard_with_pytorch",
38-
"recipes/what_is_state_dict",
39-
"recipes/profiler_recipe",
40-
"recipes/save_load_across_devices",
41-
"recipes/warmstarting_model_using_parameters_from_a_different_model",
42-
"recipes/dynamic_quantization",
43-
"recipes/saving_and_loading_a_general_checkpoint",
44-
"recipes/benchmark",
45-
"recipes/tuning_guide",
46-
"recipes/zeroing_out_gradients",
47-
"recipes/defining_a_neural_network",
48-
"recipes/timer_quick_start",
49-
"recipes/amp_recipe",
50-
"recipes/Captum_Recipe",
51-
"hyperparameter_tuning_tutorial",
52-
"flask_rest_api_tutorial",
53-
"text_to_speech_with_torchaudio",
12+
"beginner_source/basics/intro", # no code
13+
"beginner_source/translation_transformer",
14+
"beginner_source/profiler",
15+
"beginner_source/saving_loading_models",
16+
"beginner_source/introyt/captumyt",
17+
"beginner_source/examples_nn/polynomial_module",
18+
"beginner_source/examples_nn/dynamic_net",
19+
"beginner_source/examples_nn/polynomial_optim",
20+
"beginner_source/former_torchies/autograd_tutorial_old",
21+
"beginner_source/former_torchies/tensor_tutorial_old",
22+
"beginner_source/examples_autograd/polynomial_autograd",
23+
"beginner_source/examples_autograd/polynomial_custom_function",
24+
"intermediate_source/parametrizations",
25+
"intermediate_source/mnist_train_nas", # used by ax_multiobjective_nas_tutorial.py
26+
"intermediate_source/fx_conv_bn_fuser",
27+
"advanced_source/super_resolution_with_onnxruntime",
28+
"advanced_source/ddp_pipeline", # requires 4 gpus
29+
"prototype_source/fx_graph_mode_ptq_dynamic",
30+
"prototype_source/vmap_recipe",
31+
"prototype_source/torchscript_freezing",
32+
"prototype_source/nestedtensor",
33+
"recipes_source/recipes/saving_and_loading_models_for_inference",
34+
"recipes_source/recipes/saving_multiple_models_in_one_file",
35+
"recipes_source/recipes/loading_data_recipe",
36+
"recipes_source/recipes/tensorboard_with_pytorch",
37+
"recipes_source/recipes/what_is_state_dict",
38+
"recipes_source/recipes/profiler_recipe",
39+
"recipes_source/recipes/save_load_across_devices",
40+
"recipes_source/recipes/warmstarting_model_using_parameters_from_a_different_model",
41+
"recipes_source/recipes/dynamic_quantization",
42+
"recipes_source/recipes/saving_and_loading_a_general_checkpoint",
43+
"recipes_source/recipes/benchmark",
44+
"recipes_source/recipes/tuning_guide",
45+
"recipes_source/recipes/zeroing_out_gradients",
46+
"recipes_source/recipes/defining_a_neural_network",
47+
"recipes_source/recipes/timer_quick_start",
48+
"recipes_source/recipes/amp_recipe",
49+
"recipes_source/recipes/Captum_Recipe",
50+
"intermediate_source/flask_rest_api_tutorial",
51+
"intermediate_source/text_to_speech_with_torchaudio",
52+
"intermediate_source/tensorboard_profiler_tutorial" # reenable after 2.0 release.
5453
]
5554

56-
5755
def tutorial_source_dirs() -> List[Path]:
5856
return [
5957
p.relative_to(REPO_ROOT).with_name(p.stem[:-7])
@@ -68,6 +66,7 @@ def main() -> None:
6866
glob_path = f"{tutorial_source_dir}/**/*.html"
6967
html_file_paths += docs_dir.glob(glob_path)
7068

69+
should_not_run = [f'{x.replace("_source", "")}.html' for x in NOT_RUN]
7170
did_not_run = []
7271
for html_file_path in html_file_paths:
7372
with open(html_file_path, "r", encoding="utf-8") as html_file:
@@ -78,9 +77,7 @@ def main() -> None:
7877
if (
7978
"Total running time of the script: ( 0 minutes 0.000 seconds)"
8079
in elem.text
81-
and not any(
82-
html_file_path.match(file) for file in NOT_RUN
83-
)
80+
and not any(html_file_path.match(file) for file in should_not_run)
8481
):
8582
did_not_run.append(html_file_path.as_posix())
8683

โ€Ž.github/ISSUE_TEMPLATE/1_TRANSLATE_REQUEST.md

+1-1
Original file line numberDiff line numberDiff line change
@@ -15,4 +15,4 @@ _(๋ฐ˜๋“œ์‹œ ์ง€ํ‚ค์…”์•ผ ํ•˜๋Š” ์ผ์ •์ด ์•„๋‹™๋‹ˆ๋‹ค - ์ผ์ •์ด ๋„ˆ๋ฌด ๋Šฆ์–ด
1515
## ๊ด€๋ จ ์ด์Šˆ
1616
_ํ˜„์žฌ ๋ฒˆ์—ญ ์š”์ฒญ / ์ง„ํ–‰ ๋‚ด์—ญ์„ ๋ณด๊ธฐ ์œ„ํ•ด ๊ฐ ๋ฒ„์ „์˜ ๋ฉ”์ธ ์ด์Šˆ๋ฅผ ์ฐธ์กฐํ•ฉ๋‹ˆ๋‹ค._ <br />
1717
_(ํŠน๋ณ„ํ•œ ์ผ์ด ์—†๋‹ค๋ฉด ๋ณ€๊ฒฝํ•˜์ง€ ์•Š์œผ์…”๋„ ๋ฉ๋‹ˆ๋‹ค.)_
18-
* ๊ด€๋ จ ์ด์Šˆ: #615 (v1.13)
18+
* ๊ด€๋ จ ์ด์Šˆ: #660 (v2.0)

โ€ŽLICENSE

+1-1
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
BSD 3-Clause License
22

3-
Copyright (c) 2017, Pytorch contributors
3+
Copyright (c) 2017, PyTorch contributors
44
All rights reserved.
55

66
Redistribution and use in source and binary forms, with or without

โ€ŽREADME.md

+4-4
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,10 @@
1-
# PyTorch ํ•œ๊ตญ์–ด ํŠœํ† ๋ฆฌ์–ผ
1+
# ํŒŒ์ดํ† ์น˜ ํ•œ๊ตญ์–ด ํŠœํ† ๋ฆฌ์–ผ (PyTorch tutorials in Korean)
22

33
## ์†Œ๊ฐœ
44

55
PyTorch์—์„œ ์ œ๊ณตํ•˜๋Š” ํŠœํ† ๋ฆฌ์–ผ์˜ ํ•œ๊ตญ์–ด ๋ฒˆ์—ญ์„ ์œ„ํ•œ ์ €์žฅ์†Œ์ž…๋‹ˆ๋‹ค.\
66
๋ฒˆ์—ญ์˜ ๊ฒฐ๊ณผ๋ฌผ์€ [https://tutorials.pytorch.kr](https://tutorials.pytorch.kr)์—์„œ ํ™•์ธํ•˜์‹ค ์ˆ˜ ์žˆ์Šต๋‹ˆ๋‹ค. (๋ฒˆ์—ญ์„ ์ง„ํ–‰ํ•˜๋ฉฐ **๋น„์ •๊ธฐ์ ์œผ๋กœ** ๋ฐ˜์˜ํ•ฉ๋‹ˆ๋‹ค.)\
7-
ํ˜„์žฌ ๋ฒ„์ „์˜ ๋ฒˆ์—ญ / ๋ณ€๊ฒฝ ๊ด€๋ จ ์ด์Šˆ๋Š” [#615 ์ด์Šˆ](https://github.com/PyTorchKorea/tutorials-kr/issues/615)๋ฅผ ์ฐธ๊ณ ํ•ด์ฃผ์„ธ์š”.
7+
ํ˜„์žฌ ๋ฒ„์ „์˜ ๋ฒˆ์—ญ / ๋ณ€๊ฒฝ ๊ด€๋ จ ์ด์Šˆ๋Š” [#660 ์ด์Šˆ](https://github.com/PyTorchKorea/tutorials-kr/issues/660)๋ฅผ ์ฐธ๊ณ ํ•ด์ฃผ์„ธ์š”.
88

99
## ๊ธฐ์—ฌํ•˜๊ธฐ
1010

@@ -22,7 +22,7 @@ PyTorch์—์„œ ์ œ๊ณตํ•˜๋Š” ํŠœํ† ๋ฆฌ์–ผ์˜ ํ•œ๊ตญ์–ด ๋ฒˆ์—ญ์„ ์œ„ํ•œ ์ €์žฅ์†Œ
2222

2323
## ์›๋ฌธ
2424

25-
ํ˜„์žฌ PyTorch v1.13 ํŠœํ† ๋ฆฌ์–ผ([pytorch/tutorials@db34a77](https://github.com/pytorch/tutorials/commit/db34a779242f1a71346db4a9e5d6ac962a8d9b77) ๊ธฐ์ค€) ๋ฒˆ์—ญ์ด ์ง„ํ–‰ ์ค‘์ž…๋‹ˆ๋‹ค.
25+
ํ˜„์žฌ PyTorch v2.0 ํŠœํ† ๋ฆฌ์–ผ([pytorch/tutorials@9efe789b](https://github.com/pytorch/tutorials/commit/9efe789bfc3763ec359b60f12b5e6dda4e6d5db0) ๊ธฐ์ค€) ๋ฒˆ์—ญ์ด ์ง„ํ–‰ ์ค‘์ž…๋‹ˆ๋‹ค.
2626

2727
์ตœ์‹  ๋ฒ„์ „์˜ ํŠœํ† ๋ฆฌ์–ผ(๊ณต์‹, ์˜์–ด)์€ [PyTorch tutorials ์‚ฌ์ดํŠธ](https://pytorch.org/tutorials) ๋ฐ [PyTorch tutorials ์ €์žฅ์†Œ](https://github.com/pytorch/tutorials)๋ฅผ ์ฐธ๊ณ ํ•ด์ฃผ์„ธ์š”.
2828

@@ -46,5 +46,5 @@ v1.0 ์ดํ›„ ๋ฒˆ์—ญ์€ ๋ณ„๋„ ์ €์žฅ์†Œ๋กœ ๊ด€๋ฆฌํ•˜์ง€ ์•Š์Šต๋‹ˆ๋‹ค. [์ด ์ €์žฅ
4646
๋นŒ๋“œ ๋ฐฉ๋ฒ•์€ [๊ธฐ์—ฌํ•˜๊ธฐ ๋ฌธ์„œ์˜ `2-5. (๋‚ด ์ปดํ“จํ„ฐ์—์„œ) ๊ฒฐ๊ณผ ํ™•์ธํ•˜๊ธฐ`](https://github.com/PyTorchKorea/tutorials-kr/blob/master/CONTRIBUTING.md#2-5-๋‚ด-์ปดํ“จํ„ฐ์—์„œ-๊ฒฐ๊ณผ-ํ™•์ธํ•˜๊ธฐ) ๋ถ€๋ถ„์„ ์ฐธ๊ณ ํ•ด์ฃผ์„ธ์š”.
4747

4848
---
49-
This is a project to translate [pytorch/tutorials@db34a77](https://github.com/pytorch/tutorials/commit/db34a779242f1a71346db4a9e5d6ac962a8d9b77) into Korean.
49+
This is a project to translate [pytorch/tutorials@9efe789b](https://github.com/pytorch/tutorials/commit/9efe789bfc3763ec359b60f12b5e6dda4e6d5db0) into Korean.
5050
For the latest version, please visit to the [official PyTorch tutorials repo](https://github.com/pytorch/tutorials).

โ€Ž_static/img/invpendulum.gif

29.6 KB
Loading
Original file line numberDiff line numberDiff line change
@@ -0,0 +1 @@
1+
<mxfile host="app.diagrams.net" modified="2022-10-01T16:00:40.980Z" agent="5.0 (X11)" etag="_qbqVrrm3wUvm_i0-Q9T" version="20.4.0" type="device"><diagram id="aSXDm0BvLjt-Za0vl2Tv" name="Page-1">5Vpbc+MmFP41nmkfmpGEpMiPjTftzrTZZtbbbbYvHSxhiRQJFeHb/vqChG4gx95ElqfTeCaGwwEO37lwDskMLNL9zwzmyQONEJk5VrSfgXczx7GtuSW+JOVQUXzXrggxw5FiaglL/BXVMxV1gyNU9Bg5pYTjvE8MaZahkPdokDG667OtKenvmsMYGYRlCIlJ/QNHPKmogWe19PcIx0m9s22pkRTWzIpQJDCiuw4J3M/AglHKq1a6XyAiwatxqeb9dGS0EYyhjJ8z4flQfCient3PwfKXVfTn40P6/vMPapUtJBt14I8oJ/AgaA8opeygZOeHGhBGN1mE5JrWDNztEszRMoehHN0JExC0hKdE9GzRXGNCFpRQVs4Fa09+BD1mMMJC7s7YqvyIsYIz+jfqjPjljxhRsiLG0f4oCHYDrbBJRFPE5SEsNcGt1aPMMVDdXatbu1ZY0tGrr2hQmVPcrNwiLhoK9G9QgGMo4Lec41T6gWN9535v4C/WFLaPTmM/AlrebR8t2z0TruBScAEDLgaziKaCtoI8TAy4wg3bltYqIUFZ9KOMCaIbElgUOOxD1rftai0UGQFCg0/sRzcsRKfdjEMWI37KGkx1dOD2BtCuaQwRyPG2L+6QCtQOjxSLgxz1DRdoWqyOqWZ1I42+kNdfCOjmUOFgLCR0U0aemi2XDMVxgQ3ztK0X5fJtjR/0+EWjkqA1z0YHr7dYz7DYR0pwKM/5AfFRw2sEUbAOh8PrLYI+sgbDaxig1foy4dWxrh1fAzOACodeqi5lPKExzSC5b6laGGh5fqU0V8g/I84PKluBG06HQu8okcN/W+Q4OyS8CWTfsPFPlbz/Cxu/eg5hm0nEmPcg2mP+JNs3nup96Yy823c7h/HvTu8/cXfqV9H8lXen5xxJuUa+O91A2yd4+e709LvTP8Hvvsh/mbvWNtPDUR0hE+I9NVNF50vrFrLb+kLZq52hcaCO+9hTuY9zpvt413Qf3Vqc29e6z1xbyJkm9TSKb51fS4mdKVJP2zXc4fc8grwsLb3rlpaODog3cItaU96ijUdNmirWscHuXq03jjdVeLDnZ8aHI+qcJsGspewFdp8Iee8ivJU7Ehxn5YD/z0a+qN0RtOZtT7Ri9Q1Tac3ZqsjLvvWJQZzhLBbNUmtqXSFnuXQzb5zd7Bvxa5FQWkgvbB4vJDxCgXVlCEOOaXZhURwpylJQiRQFZdsL7wfkfh9RSFkkGql6XrQ2KiRddG9X7t2+rF10L6/ElUpu5VZ/ZWUt1D/piuk76/K8pWyq5S+lHiVi23oGaA9E7PlAxG4Yxw/ZZr4X1q5Vu9AE6V8wP5UAyt4jYlgcG7HrlUhVGL1WkgeO5EDf/r5oDdcuo9dIeqUPXk7ygK/xn3iPNACxJkgKHTMpHNVJBmod6+Z2snzmqmWMrlCgVx/nWjjQLc+7jIUDvYw5ZeFA43emsFjzCf0iYd2ava6q7z2LTVbX18XdyaDvX9UjNIMBevl2tkdo71VATyrG8ghd4LcV6qLb/oW/Ym//TwLc/ws=</diagram></mxfile>
Loading

โ€Ž_templates/layout.html

+2-2
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@
4646
url="https://docs.microsoft.com/learn/modules/intro-machine-learning-pytorch/8-inference?WT.mc_id=aiml-7486-cxa";
4747
}
4848

49-
$(".pytorch-call-to-action-links").children().first().before("<a href="+url+' data-behavior="call-to-action-event" data-response="Run in Microsoft Learn" target="_blank"><div id="microsoft-learn-link" style="padding-bottom: 0.625rem;border-bottom: 1px solid #f3f4f7;padding-right: 2.5rem;display: -webkit-box; display: -ms-flexbox; isplay: flex; -webkit-box-align: center;-ms-flex-align: center;align-items: center;"><img class="call-to-action-img" src="../../_static/images/microsoft-logo.svg"/><div class="call-to-action-desktop-view">Run in Microsoft Learn</div><div class="call-to-action-mobile-view">Learn</div></div></a>')
49+
$(".pytorch-call-to-action-links").children().first().before("<a href="+url+' data-behavior="call-to-action-event" data-response="Run in Microsoft Learn" target="_blank"><div id="microsoft-learn-link" style="padding-bottom: 0.625rem;border-bottom: 1px solid #f3f4f7;padding-right: 2.5rem;display: -webkit-box; display: -ms-flexbox; display: flex; -webkit-box-align: center;-ms-flex-align: center;align-items: center;"><img class="call-to-action-img" src="../../_static/images/microsoft-logo.svg"/><div class="call-to-action-desktop-view">Run in Microsoft Learn</div><div class="call-to-action-mobile-view">Learn</div></div></a>')
5050
}
5151
</script>
5252

@@ -91,7 +91,7 @@
9191
</script>
9292

9393
<script type="text/javascript">
94-
var collapsedSections = ['ํŒŒ์ดํ† ์น˜(PyTorch) ๋ ˆ์‹œํ”ผ', 'ํŒŒ์ดํ† ์น˜(PyTorch) ๋ฐฐ์šฐ๊ธฐ', '์ด๋ฏธ์ง€/๋น„๋””์˜ค', '์˜ค๋””์˜ค', 'ํ…์ŠคํŠธ', '๊ฐ•ํ™”ํ•™์Šต', 'PyTorch ๋ชจ๋ธ์„ ํ”„๋กœ๋•์…˜ ํ™˜๊ฒฝ์— ๋ฐฐํฌํ•˜๊ธฐ', 'Code Transforms with FX', 'ํ”„๋ก ํŠธ์—”๋“œ API', 'PyTorch ํ™•์žฅํ•˜๊ธฐ', '๋ชจ๋ธ ์ตœ์ ํ™”', '๋ณ‘๋ ฌ ๋ฐ ๋ถ„์‚ฐ ํ•™์Šต', 'Mobile', 'Introduction to PyTorch on YouTube', 'Recommendation Systems'];
94+
var collapsedSections = ['ํŒŒ์ดํ† ์น˜(PyTorch) ๋ ˆ์‹œํ”ผ', 'ํŒŒ์ดํ† ์น˜(PyTorch) ๋ฐฐ์šฐ๊ธฐ', '์ด๋ฏธ์ง€/๋น„๋””์˜ค', '์˜ค๋””์˜ค', 'ํ…์ŠคํŠธ', '๊ฐ•ํ™”ํ•™์Šต', 'PyTorch ๋ชจ๋ธ์„ ํ”„๋กœ๋•์…˜ ํ™˜๊ฒฝ์— ๋ฐฐํฌํ•˜๊ธฐ', 'Code Transforms with FX', 'ํ”„๋ก ํŠธ์—”๋“œ API', 'PyTorch ํ™•์žฅํ•˜๊ธฐ', '๋ชจ๋ธ ์ตœ์ ํ™”', '๋ณ‘๋ ฌ ๋ฐ ๋ถ„์‚ฐ ํ•™์Šต', '๋ชจ๋ฐ”์ผ', 'Introduction to PyTorch on YouTube', '์ถ”์ฒœ ์‹œ์Šคํ…œ', 'Multimodality'];
9595
</script>
9696

9797
{% endblock %}

0 commit comments

Comments
ย (0)