-
Notifications
You must be signed in to change notification settings - Fork 257
/
Copy pathdispatcher.html
1077 lines (896 loc) ยท 79.6 KB
/
dispatcher.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
<meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>Registering a Dispatched Operator in C++ — PyTorch Tutorials 1.10.2+cu102 documentation</title>
<link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
<!-- <link rel="stylesheet" href="../_static/pygments.css" type="text/css" /> -->
<link rel="stylesheet" href="../_static/copybutton.css" type="text/css" />
<link rel="stylesheet" href="../_static/gallery.css" type="text/css" />
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.10.0-beta/dist/katex.min.css" type="text/css" />
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/katex@0.13.11/dist/katex.min.css" type="text/css" />
<link rel="stylesheet" href="../_static/katex-math.css" type="text/css" />
<link rel="index" title="Index" href="../genindex.html" />
<link rel="search" title="Search" href="../search.html" />
<link rel="next" title="Extending dispatcher for a new backend in C++" href="extend_dispatcher.html" />
<link rel="prev" title="์ปค์คํ
C++ ํด๋์ค๋ก TorchScript ํ์ฅํ๊ธฐ" href="torch_script_custom_classes.html" />
<script src="../_static/js/modernizr.min.js"></script>
<!-- Preload the theme fonts -->
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-book.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-medium-italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<!-- Preload the katex fonts -->
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Math-Italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Main-Bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size1-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size4-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size2-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Size3-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/katex@0.10.0/dist/fonts/KaTeX_Caligraphic-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.15.2/css/all.css" integrity="sha384-vSIIfh2YWi9wW0r9iZe7RJPrKwp6bG+s9QZMoITbCckVJqGCCRhc+ccxNcdpHuYu" crossorigin="anonymous">
</head>
<div class="container-fluid header-holder tutorials-header" id="header-holder">
<div class="container">
<div class="header-container">
<a class="header-logo" href="https://pytorch.kr/" aria-label="PyTorch"></a>
<div class="main-menu">
<ul>
<li>
<a href="https://pytorch.kr/get-started">์์ํ๊ธฐ</a>
</li>
<li class="active">
<a href="https://tutorials.pytorch.kr">ํํ ๋ฆฌ์ผ</a>
</li>
<li>
<a href="https://pytorch.kr/hub">ํ๋ธ</a>
</li>
<li>
<a href="https://discuss.pytorch.kr">์ปค๋ฎค๋ํฐ</a>
</li>
</ul>
</div>
<a class="main-menu-open-button" href="#" data-behavior="open-mobile-menu"></a>
</div>
</div>
</div>
<body class="pytorch-body">
<div class="table-of-contents-link-wrapper">
<span>Table of Contents</span>
<a href="#" class="toggle-table-of-contents" data-behavior="toggle-table-of-contents"></a>
</div>
<nav data-toggle="wy-nav-shift" class="pytorch-left-menu" id="pytorch-left-menu">
<div class="pytorch-side-scroll">
<div class="pytorch-menu pytorch-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
<div class="pytorch-left-menu-search">
<div class="version">
1.10.2+cu102
</div>
<div role="search">
<form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
<input type="text" name="q" placeholder="Search Tutorials" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
</div>
</div>
<p class="caption"><span class="caption-text">ํ์ดํ ์น(PyTorch) ๋ ์ํผ</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../recipes/recipes_index.html">๋ชจ๋ ๋ ์ํผ ๋ณด๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../prototype/prototype_index.html">๋ชจ๋ ํ๋กํ ํ์
๋ ์ํผ ๋ณด๊ธฐ</a></li>
</ul>
<p class="caption"><span class="caption-text">ํ์ดํ ์น(PyTorch) ์์ํ๊ธฐ</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../beginner/basics/intro.html">ํ์ดํ ์น(PyTorch) ๊ธฐ๋ณธ ์ตํ๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/basics/quickstart_tutorial.html">๋น ๋ฅธ ์์(Quickstart)</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/basics/tensorqs_tutorial.html">ํ
์(Tensor)</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/basics/data_tutorial.html">Dataset๊ณผ DataLoader</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/basics/transforms_tutorial.html">๋ณํ(Transform)</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/basics/buildmodel_tutorial.html">์ ๊ฒฝ๋ง ๋ชจ๋ธ ๊ตฌ์ฑํ๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/basics/autogradqs_tutorial.html"><code class="docutils literal notranslate"><span class="pre">torch.autograd</span></code>๋ฅผ ์ฌ์ฉํ ์๋ ๋ฏธ๋ถ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/basics/optimization_tutorial.html">๋ชจ๋ธ ๋งค๊ฐ๋ณ์ ์ต์ ํํ๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/basics/saveloadrun_tutorial.html">๋ชจ๋ธ ์ ์ฅํ๊ณ ๋ถ๋ฌ์ค๊ธฐ</a></li>
</ul>
<p class="caption"><span class="caption-text">Introduction to PyTorch on YouTube</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../beginner/introyt.html">Introduction to PyTorch - YouTube Series</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/introyt/introyt1_tutorial.html">Introduction to PyTorch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/introyt/tensors_deeper_tutorial.html">Introduction to PyTorch Tensors</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/introyt/autogradyt_tutorial.html">The Fundamentals of Autograd</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/introyt/modelsyt_tutorial.html">Building Models with PyTorch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/introyt/tensorboardyt_tutorial.html">PyTorch TensorBoard Support</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/introyt/trainingyt.html">Training with PyTorch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/introyt/captumyt.html">Model Understanding with Captum</a></li>
</ul>
<p class="caption"><span class="caption-text">ํ์ดํ ์น(PyTorch) ๋ฐฐ์ฐ๊ธฐ</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../beginner/deep_learning_60min_blitz.html">PyTorch๋ก ๋ฅ๋ฌ๋ํ๊ธฐ: 60๋ถ๋ง์ ๋์ฅ๋ด๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/pytorch_with_examples.html">์์ ๋ก ๋ฐฐ์ฐ๋ ํ์ดํ ์น(PyTorch)</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/nn_tutorial.html"><cite>torch.nn</cite> ์ด <em>์ค์ ๋ก</em> ๋ฌด์์ธ๊ฐ์?</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/tensorboard_tutorial.html">TensorBoard๋ก ๋ชจ๋ธ, ๋ฐ์ดํฐ, ํ์ต ์๊ฐํํ๊ธฐ</a></li>
</ul>
<p class="caption"><span class="caption-text">์ด๋ฏธ์ง/๋น๋์ค</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/torchvision_tutorial.html">TorchVision ๊ฐ์ฒด ๊ฒ์ถ ๋ฏธ์ธ์กฐ์ (Finetuning) ํํ ๋ฆฌ์ผ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/transfer_learning_tutorial.html">์ปดํจํฐ ๋น์ (Vision)์ ์ํ ์ ์ดํ์ต(Transfer Learning)</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/fgsm_tutorial.html">์ ๋์ ์์ ์์ฑ(Adversarial Example Generation)</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/dcgan_faces_tutorial.html">DCGAN ํํ ๋ฆฌ์ผ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/vt_tutorial.html">๋ฐฐํฌ๋ฅผ ์ํ ๋น์ ํธ๋์คํฌ๋จธ(Vision Transformer) ๋ชจ๋ธ ์ต์ ํํ๊ธฐ</a></li>
</ul>
<p class="caption"><span class="caption-text">์ค๋์ค</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../beginner/audio_io_tutorial.html">Audio I/O</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/audio_resampling_tutorial.html">Audio Resampling</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/audio_data_augmentation_tutorial.html">Audio Data Augmentation</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/audio_feature_extractions_tutorial.html">Audio Feature Extractions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/audio_feature_augmentation_tutorial.html">Audio Feature Augmentation</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/audio_datasets_tutorial.html">Audio Datasets</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/speech_recognition_pipeline_tutorial.html">Speech Recognition with Wav2Vec2</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/speech_command_classification_with_torchaudio_tutorial.html">Speech Command Classification with torchaudio</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/text_to_speech_with_torchaudio.html">Text-to-speech with torchaudio</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/forced_alignment_with_torchaudio_tutorial.html">Forced Alignment with Wav2Vec2</a></li>
</ul>
<p class="caption"><span class="caption-text">ํ
์คํธ</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../beginner/transformer_tutorial.html">nn.Transformer ์ TorchText ๋ก ์ํ์ค-ํฌ-์ํ์ค(Sequence-to-Sequence) ๋ชจ๋ธ๋งํ๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/char_rnn_classification_tutorial.html">๊ธฐ์ด๋ถํฐ ์์ํ๋ NLP: ๋ฌธ์-๋จ์ RNN์ผ๋ก ์ด๋ฆ ๋ถ๋ฅํ๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/char_rnn_generation_tutorial.html">๊ธฐ์ด๋ถํฐ ์์ํ๋ NLP: ๋ฌธ์-๋จ์ RNN์ผ๋ก ์ด๋ฆ ์์ฑํ๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/seq2seq_translation_tutorial.html">๊ธฐ์ด๋ถํฐ ์์ํ๋ NLP: Sequence to Sequence ๋คํธ์ํฌ์ Attention์ ์ด์ฉํ ๋ฒ์ญ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/text_sentiment_ngrams_tutorial.html">torchtext ๋ผ์ด๋ธ๋ฌ๋ฆฌ๋ก ํ
์คํธ ๋ถ๋ฅํ๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/translation_transformer.html">nn.Transformer์ torchtext๋ก ์ธ์ด ๋ฒ์ญํ๊ธฐ</a></li>
</ul>
<p class="caption"><span class="caption-text">๊ฐํํ์ต</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/reinforcement_q_learning.html">๊ฐํ ํ์ต (DQN) ํํ ๋ฆฌ์ผ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/mario_rl_tutorial.html">Train a Mario-playing RL Agent</a></li>
</ul>
<p class="caption"><span class="caption-text">PyTorch ๋ชจ๋ธ์ ํ๋ก๋์
ํ๊ฒฝ์ ๋ฐฐํฌํ๊ธฐ</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/flask_rest_api_tutorial.html">Flask๋ฅผ ์ฌ์ฉํ์ฌ Python์์ PyTorch๋ฅผ REST API๋ก ๋ฐฐํฌํ๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/Intro_to_TorchScript_tutorial.html">TorchScript ์๊ฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="cpp_export.html">C++์์ TorchScript ๋ชจ๋ธ ๋ก๋ฉํ๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="super_resolution_with_onnxruntime.html">(์ ํ) PyTorch ๋ชจ๋ธ์ ONNX์ผ๋ก ๋ณํํ๊ณ ONNX ๋ฐํ์์์ ์คํํ๊ธฐ</a></li>
</ul>
<p class="caption"><span class="caption-text">Code Transforms with FX</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/fx_conv_bn_fuser.html">(๋ฒ ํ) FX์์ ํฉ์ฑ๊ณฑ/๋ฐฐ์น ์ ๊ทํ(Convolution/Batch Norm) ๊ฒฐํฉ๊ธฐ(Fuser) ๋ง๋ค๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/fx_profiling_tutorial.html">(beta) Building a Simple CPU Performance Profiler with FX</a></li>
</ul>
<p class="caption"><span class="caption-text">ํ๋ก ํธ์๋ API</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/memory_format_tutorial.html">(๋ฒ ํ) PyTorch๋ฅผ ์ฌ์ฉํ Channels Last ๋ฉ๋ชจ๋ฆฌ ํ์</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/forward_ad_usage.html">Forward-mode Automatic Differentiation (Beta)</a></li>
<li class="toctree-l1"><a class="reference internal" href="cpp_frontend.html">PyTorch C++ ํ๋ก ํธ์๋ ์ฌ์ฉํ๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="torch-script-parallelism.html">TorchScript์ ๋์ ๋ณ๋ ฌ ์ฒ๋ฆฌ(Dynamic Parallelism)</a></li>
<li class="toctree-l1"><a class="reference internal" href="cpp_autograd.html">C++ ํ๋ก ํธ์๋์ ์๋ ๋ฏธ๋ถ (autograd)</a></li>
</ul>
<p class="caption"><span class="caption-text">PyTorch ํ์ฅํ๊ธฐ</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="../intermediate/custom_function_double_backward_tutorial.html">Double Backward with Custom Functions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/custom_function_conv_bn_tutorial.html">Fusing Convolution and Batch Norm using Custom Function</a></li>
<li class="toctree-l1"><a class="reference internal" href="cpp_extension.html">Custom C++ and CUDA Extensions</a></li>
<li class="toctree-l1"><a class="reference internal" href="torch_script_custom_ops.html">Extending TorchScript with Custom C++ Operators</a></li>
<li class="toctree-l1"><a class="reference internal" href="torch_script_custom_classes.html">์ปค์คํ
C++ ํด๋์ค๋ก TorchScript ํ์ฅํ๊ธฐ</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">Registering a Dispatched Operator in C++</a></li>
<li class="toctree-l1"><a class="reference internal" href="extend_dispatcher.html">Extending dispatcher for a new backend in C++</a></li>
</ul>
<p class="caption"><span class="caption-text">๋ชจ๋ธ ์ต์ ํ</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../beginner/profiler.html">PyTorch ๋ชจ๋ ํ๋กํ์ผ๋ง ํ๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/tensorboard_profiler_tutorial.html">PyTorch Profiler With TensorBoard</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/hyperparameter_tuning_tutorial.html">Hyperparameter tuning with Ray Tune</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/vt_tutorial.html">๋ฐฐํฌ๋ฅผ ์ํ ๋น์ ํธ๋์คํฌ๋จธ(Vision Transformer) ๋ชจ๋ธ ์ต์ ํํ๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/parametrizations.html">Parametrizations Tutorial</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/pruning_tutorial.html">๊ฐ์ง์น๊ธฐ ๊ธฐ๋ฒ(Pruning) ํํ ๋ฆฌ์ผ</a></li>
<li class="toctree-l1"><a class="reference internal" href="dynamic_quantization_tutorial.html">(๋ฒ ํ) LSTM ๊ธฐ๋ฐ ๋จ์ด ๋จ์ ์ธ์ด ๋ชจ๋ธ์ ๋์ ์์ํ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/dynamic_quantization_bert_tutorial.html">(๋ฒ ํ) BERT ๋ชจ๋ธ ๋์ ์์ํํ๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/quantized_transfer_learning_tutorial.html">(๋ฒ ํ) ์ปดํจํฐ ๋น์ ํํ ๋ฆฌ์ผ์ ์ํ ์์ํ๋ ์ ์ดํ์ต(Quantized Transfer Learning)</a></li>
<li class="toctree-l1"><a class="reference internal" href="static_quantization_tutorial.html">(beta) Static Quantization with Eager Mode in PyTorch</a></li>
</ul>
<p class="caption"><span class="caption-text">๋ณ๋ ฌ ๋ฐ ๋ถ์ฐ ํ์ต</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../beginner/dist_overview.html">PyTorch Distributed Overview</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/model_parallel_tutorial.html">๋จ์ผ ๋จธ์ ์ ์ฌ์ฉํ ๋ชจ๋ธ ๋ณ๋ ฌํ ๋ชจ๋ฒ ์ฌ๋ก</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/ddp_tutorial.html">๋ถ์ฐ ๋ฐ์ดํฐ ๋ณ๋ ฌ ์ฒ๋ฆฌ ์์ํ๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/dist_tuto.html">PyTorch๋ก ๋ถ์ฐ ์ดํ๋ฆฌ์ผ์ด์
๊ฐ๋ฐํ๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/rpc_tutorial.html">Getting Started with Distributed RPC Framework</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/rpc_param_server_tutorial.html">Implementing a Parameter Server Using Distributed RPC Framework</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/dist_pipeline_parallel_tutorial.html">Distributed Pipeline Parallelism Using RPC</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/rpc_async_execution.html">Implementing Batch RPC Processing Using Asynchronous Executions</a></li>
<li class="toctree-l1"><a class="reference internal" href="rpc_ddp_tutorial.html">๋ถ์ฐ ๋ฐ์ดํฐ ๋ณ๋ ฌ(DDP)๊ณผ ๋ถ์ฐ RPC ํ๋ ์์ํฌ ๊ฒฐํฉ</a></li>
<li class="toctree-l1"><a class="reference internal" href="../intermediate/pipeline_tutorial.html">ํ์ดํ๋ผ์ธ ๋ณ๋ ฌํ๋ก ํธ๋์คํฌ๋จธ ๋ชจ๋ธ ํ์ต์ํค๊ธฐ</a></li>
<li class="toctree-l1"><a class="reference internal" href="ddp_pipeline.html">๋ถ์ฐ ๋ฐ์ดํฐ ๋ณ๋ ฌ ์ฒ๋ฆฌ์ ๋ณ๋ ฌ ์ฒ๋ฆฌ ํ์ดํ๋ผ์ธ์ ์ฌ์ฉํ ํธ๋์คํฌ๋จธ ๋ชจ๋ธ ํ์ต</a></li>
<li class="toctree-l1"><a class="reference internal" href="generic_join.html">Distributed Training with Uneven Inputs Using the Join Context Manager</a></li>
</ul>
<p class="caption"><span class="caption-text">Mobile</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../beginner/deeplabv3_on_ios.html">iOS์์์ ์ด๋ฏธ์ง ๋ถํ DeepLapV3</a></li>
<li class="toctree-l1"><a class="reference internal" href="../beginner/deeplabv3_on_android.html">์๋๋ก์ด๋์์์ ์ด๋ฏธ์ง ๋ถํ DeepLapV3</a></li>
</ul>
</div>
</div>
</nav>
<div class="pytorch-container">
<div class="pytorch-page-level-bar" id="pytorch-page-level-bar">
<div class="pytorch-breadcrumbs-wrapper">
<div role="navigation" aria-label="breadcrumbs navigation">
<ul class="pytorch-breadcrumbs">
<li>
<a href="../index.html">
Tutorials
</a> >
</li>
<li>Registering a Dispatched Operator in C++</li>
<li class="pytorch-breadcrumbs-aside">
<a href="../_sources/advanced/dispatcher.rst.txt" rel="nofollow"><img src="../_static/images/view-page-source-icon.svg"></a>
</li>
</ul>
</div>
</div>
<div class="pytorch-shortcuts-wrapper" id="pytorch-shortcuts-wrapper">
Shortcuts
</div>
</div>
<section data-toggle="wy-nav-shift" id="pytorch-content-wrap" class="pytorch-content-wrap">
<div class="pytorch-content-left">
<div class="pytorch-call-to-action-links">
<div id="tutorial-type">advanced/dispatcher</div>
<div id="google-colab-link">
<img class="call-to-action-img" src="../_static/images/pytorch-colab.svg"/>
<div class="call-to-action-desktop-view">Run in Google Colab</div>
<div class="call-to-action-mobile-view">Colab</div>
</div>
<div id="download-notebook-link">
<img class="call-to-action-notebook-img" src="../_static/images/pytorch-download.svg"/>
<div class="call-to-action-desktop-view">Download Notebook</div>
<div class="call-to-action-mobile-view">Notebook</div>
</div>
<div id="github-view-link">
<img class="call-to-action-img" src="../_static/images/pytorch-github.svg"/>
<div class="call-to-action-desktop-view">View on GitHub</div>
<div class="call-to-action-mobile-view">GitHub</div>
</div>
</div>
<div class="rst-content">
<div role="main" class="main-content" itemscope="itemscope" itemtype="http://schema.org/Article">
<article itemprop="articleBody" id="pytorch-article" class="pytorch-article">
<div class="section" id="registering-a-dispatched-operator-in-c">
<h1>Registering a Dispatched Operator in C++<a class="headerlink" href="#registering-a-dispatched-operator-in-c" title="Permalink to this headline">ยถ</a></h1>
<p>The dispatcher is an internal component of PyTorch which is responsible for
figuring out what code should actually get run when you call a function like
<code class="docutils literal notranslate"><span class="pre">torch::add</span></code>. This can be nontrivial, because PyTorch operations need
to handle a lot of cross-cutting concerns that are โlayeredโ on top of one
of another. Here is a sampling of some of the things it handles:</p>
<ul class="simple">
<li>Switching between the CPU and CUDA implementations of an operator, depending
on the devices of the input tensors.</li>
<li>Switching between the autograd and backend implementations of an operator,
depending on whether or not autograd handling is necessary.</li>
<li>Applying autocasting when necessary for automatic mixed precision.</li>
<li>Applying batching rules when an operator is run under a <code class="docutils literal notranslate"><span class="pre">vmap</span></code> call.</li>
<li>Tracing execution of operations, if you are tracing a model for export.</li>
</ul>
<p>If in your <a class="reference external" href="torch_script_custom_ops">custom operator code</a> you find yourself
manually writing if statements to handle these cases, the dispatcher APIs can
help organize your code. (Conversely, if your custom operator is very simple
and is only for CPU inference, you probably donโt need to use the dispatcher,
just use the basic API.)</p>
<p>In this tutorial, we will describe how to structure a custom operator
registration to use the dispatcher to organize various components. Weโll
assume that you are familiar with how to
<a class="reference external" href="torch_script_custom_ops">register an operator</a> and how to write
a <a class="reference external" href="cpp_autograd">custom autograd function</a>.</p>
<div class="section" id="defining-schema-and-backend-implementations">
<h2>Defining schema and backend implementations<a class="headerlink" href="#defining-schema-and-backend-implementations" title="Permalink to this headline">ยถ</a></h2>
<p>The general principle behind the dispatcher is that it divides the
implementation of an operator into multiple kernels, each of which implements
functionality for a specific <em>dispatch key</em>, e.g. CPU, CUDA. The dispatcher
determines what the highest priority dispatch key is at the time
you call an operator (this is done by looking at both the tensor arguments as
well as some thread local state), and transfers control to the kernel for that
dispatch key. The end effect is that when you call an operator, we first
execute the Autograd kernel, and then we redispatch to the backend kernel
depending on the device types of the passed in tensors.</p>
<p>Letโs take a look at the various parts involved in making this
happen. First, we must define the schema for the operator in question.
Unlike simple pybind11-style operator registration, we donโt actually
provide an implementation of our operator at this point; we just
provide a schema string specifying the type signature of the operator
that all of our other kernels will abide by:</p>
<div class="highlight-cpp notranslate"><div class="highlight"><pre><span></span><span class="n">TORCH_LIBRARY</span><span class="p">(</span><span class="n">myops</span><span class="p">,</span> <span class="n">m</span><span class="p">)</span> <span class="p">{</span>
<span class="n">m</span><span class="p">.</span><span class="n">def</span><span class="p">(</span><span class="s">"myadd(Tensor self, Tensor other) -> Tensor"</span><span class="p">);</span>
<span class="p">}</span>
</pre></div>
</div>
<p>Next, we need to actually provide some implementations of this operator.
For concreteness, here is a really simple implementation of addition on CPU:</p>
<div class="highlight-cpp notranslate"><div class="highlight"><pre><span></span><span class="n">Tensor</span> <span class="nf">myadd_cpu</span><span class="p">(</span><span class="k">const</span> <span class="n">Tensor</span><span class="o">&</span> <span class="n">self_</span><span class="p">,</span> <span class="k">const</span> <span class="n">Tensor</span><span class="o">&</span> <span class="n">other_</span><span class="p">)</span> <span class="p">{</span>
<span class="n">TORCH_CHECK</span><span class="p">(</span><span class="n">self_</span><span class="p">.</span><span class="n">sizes</span><span class="p">()</span> <span class="o">==</span> <span class="n">other_</span><span class="p">.</span><span class="n">sizes</span><span class="p">());</span>
<span class="n">TORCH_INTERNAL_ASSERT</span><span class="p">(</span><span class="n">self_</span><span class="p">.</span><span class="n">device</span><span class="p">().</span><span class="n">type</span><span class="p">()</span> <span class="o">==</span> <span class="n">DeviceType</span><span class="o">::</span><span class="n">CPU</span><span class="p">);</span>
<span class="n">TORCH_INTERNAL_ASSERT</span><span class="p">(</span><span class="n">other_</span><span class="p">.</span><span class="n">device</span><span class="p">().</span><span class="n">type</span><span class="p">()</span> <span class="o">==</span> <span class="n">DeviceType</span><span class="o">::</span><span class="n">CPU</span><span class="p">);</span>
<span class="n">Tensor</span> <span class="n">self</span> <span class="o">=</span> <span class="n">self_</span><span class="p">.</span><span class="n">contiguous</span><span class="p">();</span>
<span class="n">Tensor</span> <span class="n">other</span> <span class="o">=</span> <span class="n">other_</span><span class="p">.</span><span class="n">contiguous</span><span class="p">();</span>
<span class="n">Tensor</span> <span class="n">result</span> <span class="o">=</span> <span class="n">torch</span><span class="o">::</span><span class="n">empty</span><span class="p">(</span><span class="n">self</span><span class="p">.</span><span class="n">sizes</span><span class="p">(),</span> <span class="n">self</span><span class="p">.</span><span class="n">options</span><span class="p">());</span>
<span class="k">const</span> <span class="kt">float</span><span class="o">*</span> <span class="n">self_ptr</span> <span class="o">=</span> <span class="n">self</span><span class="p">.</span><span class="n">data_ptr</span><span class="o"><</span><span class="kt">float</span><span class="o">></span><span class="p">();</span>
<span class="k">const</span> <span class="kt">float</span><span class="o">*</span> <span class="n">other_ptr</span> <span class="o">=</span> <span class="n">other</span><span class="p">.</span><span class="n">data_ptr</span><span class="o"><</span><span class="kt">float</span><span class="o">></span><span class="p">();</span>
<span class="kt">float</span><span class="o">*</span> <span class="n">result_ptr</span> <span class="o">=</span> <span class="n">result</span><span class="p">.</span><span class="n">data_ptr</span><span class="o"><</span><span class="kt">float</span><span class="o">></span><span class="p">();</span>
<span class="k">for</span> <span class="p">(</span><span class="kt">int64_t</span> <span class="n">i</span> <span class="o">=</span> <span class="mi">0</span><span class="p">;</span> <span class="n">i</span> <span class="o"><</span> <span class="n">result</span><span class="p">.</span><span class="n">numel</span><span class="p">();</span> <span class="n">i</span><span class="o">++</span><span class="p">)</span> <span class="p">{</span>
<span class="n">result_ptr</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">=</span> <span class="n">self_ptr</span><span class="p">[</span><span class="n">i</span><span class="p">]</span> <span class="o">+</span> <span class="n">other_ptr</span><span class="p">[</span><span class="n">i</span><span class="p">];</span>
<span class="p">}</span>
<span class="k">return</span> <span class="n">result</span><span class="p">;</span>
<span class="p">}</span>
</pre></div>
</div>
<p>Weโd like to register this function as an implementation of <code class="docutils literal notranslate"><span class="pre">myops::myadd</span></code>.
However, the simple way of registering it (<code class="docutils literal notranslate"><span class="pre">def("myadd",</span> <span class="pre">myadd_cpu)</span></code>) would
register the kernel to run in all cases, even if the tensor is not a CPU
tensor! (Internally, we refer to these as โcatch-allโ kernels, since they
catch all cases.) To ensure that <code class="docutils literal notranslate"><span class="pre">myadd_cpu</span></code> is only run for
CPU tensors, we can use the <code class="docutils literal notranslate"><span class="pre">TORCH_LIBRARY_IMPL</span></code> macro:</p>
<div class="highlight-cpp notranslate"><div class="highlight"><pre><span></span><span class="n">TORCH_LIBRARY_IMPL</span><span class="p">(</span><span class="n">myops</span><span class="p">,</span> <span class="n">CPU</span><span class="p">,</span> <span class="n">m</span><span class="p">)</span> <span class="p">{</span>
<span class="n">m</span><span class="p">.</span><span class="n">impl</span><span class="p">(</span><span class="s">"myadd"</span><span class="p">,</span> <span class="n">myadd_cpu</span><span class="p">);</span>
<span class="p">}</span>
</pre></div>
</div>
<p>The <code class="docutils literal notranslate"><span class="pre">TORCH_LIBRARY_IMPL</span></code> lets us register implementations for operators on
a specific dispatch key (in this case, CPU). Each call to <code class="docutils literal notranslate"><span class="pre">impl</span></code>
associates a CPU kernel with the corresponding operator (which we previously
defined in the <code class="docutils literal notranslate"><span class="pre">TORCH_LIBRARY</span></code> block). If we also have a CUDA implementation <code class="docutils literal notranslate"><span class="pre">myadd_cuda</span></code>,
we can register it in a separate <code class="docutils literal notranslate"><span class="pre">TORCH_LIBRARY_IMPL</span></code> block:</p>
<div class="highlight-cpp notranslate"><div class="highlight"><pre><span></span><span class="n">TORCH_LIBRARY_IMPL</span><span class="p">(</span><span class="n">myops</span><span class="p">,</span> <span class="n">CUDA</span><span class="p">,</span> <span class="n">m</span><span class="p">)</span> <span class="p">{</span>
<span class="n">m</span><span class="p">.</span><span class="n">impl</span><span class="p">(</span><span class="s">"myadd"</span><span class="p">,</span> <span class="n">myadd_cuda</span><span class="p">);</span>
<span class="p">}</span>
</pre></div>
</div>
<p>These registrations can be split across files or even across library boundaries; so
for example, you could have these two <code class="docutils literal notranslate"><span class="pre">TORCH_LIBRARY_IMPL</span></code> blocks compiled
into a separate <code class="docutils literal notranslate"><span class="pre">myops_cpu</span></code> and <code class="docutils literal notranslate"><span class="pre">myops_cuda</span></code> dynamic libraries. Generally,
speaking, the structure of your registrations will look like this:</p>
<ol class="arabic simple">
<li>A single <code class="docutils literal notranslate"><span class="pre">TORCH_LIBRARY</span></code> that lists every custom operator in your namespace
in a centralized place.</li>
<li>A <code class="docutils literal notranslate"><span class="pre">TORCH_LIBRARY_IMPL</span></code> per dispatch key that registers implementations for
that key (e.g., CPU or CUDA). If you like, you can further subdivide
<code class="docutils literal notranslate"><span class="pre">TORCH_LIBRARY_IMPL</span></code> blocks into a block per operator. This is convenient
if you have a separate file per operator implementation, but donโt want to
expose the operators in a header; you can just put the registration in the
cpp file that defines your operator.</li>
</ol>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p class="last">Did you know that you can also write <code class="docutils literal notranslate"><span class="pre">TORCH_LIBRARY_IMPL</span></code> blocks for existing
core operators in PyTorch? This is how XLA support for PyTorch is
implemented: the <code class="docutils literal notranslate"><span class="pre">torch_xla</span></code> library contains a <code class="docutils literal notranslate"><span class="pre">TORCH_LIBRARY_IMPL</span></code>
that provides implementations for all basic operators on the XLA dispatch
key.</p>
</div>
</div>
<div class="section" id="for-operators-that-do-not-need-autograd">
<h2>For operators that do not need autograd<a class="headerlink" href="#for-operators-that-do-not-need-autograd" title="Permalink to this headline">ยถ</a></h2>
<p>Note: This section only applies to versions of PyTorch <code class="docutils literal notranslate"><span class="pre">>=</span> <span class="pre">1.10</span></code>.</p>
<p>In the next section, we will discuss how to add autograd support to an operator.
But for the ops that do not need autograd support, the following kernel should be
registered improve useability and make your op behave like PyTorchโs built-in
operators.</p>
<div class="highlight-cpp notranslate"><div class="highlight"><pre><span></span><span class="n">TORCH_LIBRARY_IMPL</span><span class="p">(</span><span class="n">myops</span><span class="p">,</span> <span class="n">Autograd</span><span class="p">,</span> <span class="n">m</span><span class="p">)</span> <span class="p">{</span>
<span class="n">m</span><span class="p">.</span><span class="n">impl</span><span class="p">(</span><span class="n">op</span><span class="p">,</span> <span class="n">autogradNotImplementedFallback</span><span class="p">());</span>
<span class="p">}</span>
</pre></div>
</div>
<p>The above lines registers an <code class="docutils literal notranslate"><span class="pre">Autograd</span></code> kernel that appends a dummy
<code class="docutils literal notranslate"><span class="pre">NotImplemented</span></code> node on forward (preserving the <code class="docutils literal notranslate"><span class="pre">require_grad</span></code>-ness of the inputs).
On backward, the <code class="docutils literal notranslate"><span class="pre">NotImplemented</span></code> node raises an error. This can be helpful
for debugging in larger models where previously it can be hard to pin-point
exactly where the <code class="docutils literal notranslate"><span class="pre">requires_grad</span></code>-ness is lost during the forward pass.</p>
<div class="section" id="in-place-or-view-ops">
<h3>In-place or view ops<a class="headerlink" href="#in-place-or-view-ops" title="Permalink to this headline">ยถ</a></h3>
<p>To ensure correctness and best possible performance, if your op mutates an input
in-place or returns a tensor that aliases with one of the inputs, two additional
steps should be taken:</p>
<ol class="arabic simple">
<li>Register an <code class="docutils literal notranslate"><span class="pre">ADInplaceOrView</span></code> kernel in addition to the <code class="docutils literal notranslate"><span class="pre">Autograd</span></code> kernel
above. This kernel handles the necessary bookkeeping to ensure the correctness
of in-place or view operations. It is important to note that this ADInplaceOrView
kernel should only be used with <code class="docutils literal notranslate"><span class="pre">autogradNotImplementedFallback</span></code>.</li>
</ol>
<div class="highlight-cpp notranslate"><div class="highlight"><pre><span></span><span class="n">TORCH_LIBRARY_IMPL</span><span class="p">(</span><span class="n">myops</span><span class="p">,</span> <span class="n">Autograd</span><span class="p">,</span> <span class="n">m</span><span class="p">)</span> <span class="p">{</span>
<span class="n">m</span><span class="p">.</span><span class="n">impl</span><span class="p">(</span><span class="n">op</span><span class="p">,</span> <span class="n">autogradNotImplementedFallback</span><span class="p">());</span>
<span class="p">}</span>
<span class="n">TORCH_LIBRARY_IMPL</span><span class="p">(</span><span class="n">myops</span><span class="p">,</span> <span class="n">ADInplaceOrView</span><span class="p">,</span> <span class="n">m</span><span class="p">)</span> <span class="p">{</span>
<span class="n">m</span><span class="p">.</span><span class="n">impl</span><span class="p">(</span><span class="n">op</span><span class="p">,</span> <span class="n">autogradNotImplementedInplaceOrViewFallback</span><span class="p">());</span>
<span class="p">}</span>
</pre></div>
</div>
<ol class="arabic simple" start="2">
<li>The <code class="docutils literal notranslate"><span class="pre">Autograd</span></code> or <code class="docutils literal notranslate"><span class="pre">ADInplaceOrView</span></code> boxed kernels registered above
rely on operator schema information in their logi. If your op mutates an input
in-place or returns a tensor that aliases with one of the inputs it is important to
ensure that your schema properly reflects this. See
<a class="reference external" href="https://github.com/pytorch/pytorch/blob/master/aten/src/ATen/native/README.md">here</a>
for more information on how to annotate the schema.</li>
</ol>
</div>
</div>
<div class="section" id="adding-autograd-support">
<span id="autograd-support"></span><h2>Adding autograd support<a class="headerlink" href="#adding-autograd-support" title="Permalink to this headline">ยถ</a></h2>
<p>At this point, we have an operator with both CPU and CUDA implementations. How
can we add autograd support to it? As you might guess, we will register an
autograd kernel (similar to whatโs described in the <a class="reference external" href="cpp_autograd">custom autograd function</a> tutorial)!
However, there is a twist: unlike the CPU and CUDA kernels, the autograd kernel
needs to <em>redispatch</em>: it needs to call back into the dispatcher to get to
the inference kernels, e.g. CPU or CUDA implementations.</p>
<p>Thus, before we write the autograd kernel, letโs write a <em>dispatching function</em>
which calls into the dispatcher to find the right kernel for your operator.
This function constitutes the public C++ API for your operatorsโin fact, all of
the tensor functions in PyTorchโs C++ API all call the dispatcher in the same
way under the hood. Hereโs what the dispatching function looks like:</p>
<div class="highlight-cpp notranslate"><div class="highlight"><pre><span></span><span class="n">Tensor</span> <span class="nf">myadd</span><span class="p">(</span><span class="k">const</span> <span class="n">Tensor</span><span class="o">&</span> <span class="n">self</span><span class="p">,</span> <span class="k">const</span> <span class="n">Tensor</span><span class="o">&</span> <span class="n">other</span><span class="p">)</span> <span class="p">{</span>
<span class="k">static</span> <span class="k">auto</span> <span class="n">op</span> <span class="o">=</span> <span class="n">torch</span><span class="o">::</span><span class="n">Dispatcher</span><span class="o">::</span><span class="n">singleton</span><span class="p">()</span>
<span class="p">.</span><span class="n">findSchemaOrThrow</span><span class="p">(</span><span class="s">"myops::myadd"</span><span class="p">,</span> <span class="s">""</span><span class="p">)</span>
<span class="p">.</span><span class="n">typed</span><span class="o"><</span><span class="k">decltype</span><span class="p">(</span><span class="n">myadd</span><span class="p">)</span><span class="o">></span><span class="p">();</span>
<span class="k">return</span> <span class="n">op</span><span class="p">.</span><span class="n">call</span><span class="p">(</span><span class="n">self</span><span class="p">,</span> <span class="n">other</span><span class="p">);</span>
<span class="p">}</span>
</pre></div>
</div>
<p>Letโs break it down:</p>
<ul>
<li><p class="first">In the first line, we look up a typed operator handle from the dispatcher
corresponding to the operator that we are going to dispatch to.
<code class="docutils literal notranslate"><span class="pre">findSchemaOrThrow</span></code> takes two arguments: the (namespace qualified) name
of the operator, and the overload name of the operator (typically just
the empty string). <code class="docutils literal notranslate"><span class="pre">typed</span></code> casts the dynamically typed handle into
a statically typed handle (doing a runtime test to make sure youโve given
the correct C++ type), so that we can do a normal C++ call on it. We
pass it <code class="docutils literal notranslate"><span class="pre">decltype(myadd)</span></code> since the type of the dispatching function is
the same as the type of the underlying kernels registered to the dispatcher.</p>
<p>For performance, this computation is done in a static variable, so that
we only need to do the (slow) lookup once. If you typoed the name of the
operator you want to call, this lookup will error the first time you call this
function.</p>
</li>
<li><p class="first">In the second line, we simply <code class="docutils literal notranslate"><span class="pre">call</span></code> the operator handle with all of the
arguments passed into the dispatching function. This will actually invoke
the dispatcher and in the end control will be transferred to whatever kernel
is appropriate for this call.</p>
</li>
</ul>
<p>With the dispatch function in hand, we can now write the autograd kernel:</p>
<div class="highlight-cpp notranslate"><div class="highlight"><pre><span></span><span class="k">class</span> <span class="nc">MyAddFunction</span> <span class="o">:</span> <span class="k">public</span> <span class="n">torch</span><span class="o">::</span><span class="n">autograd</span><span class="o">::</span><span class="n">Function</span><span class="o"><</span><span class="n">MyAddFunction</span><span class="o">></span> <span class="p">{</span>
<span class="k">public</span><span class="o">:</span>
<span class="k">static</span> <span class="n">Tensor</span> <span class="n">forward</span><span class="p">(</span>
<span class="n">AutogradContext</span> <span class="o">*</span><span class="n">ctx</span><span class="p">,</span> <span class="n">torch</span><span class="o">::</span><span class="n">Tensor</span> <span class="n">self</span><span class="p">,</span> <span class="n">torch</span><span class="o">::</span><span class="n">Tensor</span> <span class="n">other</span><span class="p">)</span> <span class="p">{</span>
<span class="n">at</span><span class="o">::</span><span class="n">AutoNonVariableTypeMode</span> <span class="n">g</span><span class="p">;</span>
<span class="k">return</span> <span class="nf">myadd</span><span class="p">(</span><span class="n">self</span><span class="p">,</span> <span class="n">other</span><span class="p">);</span>
<span class="p">}</span>
<span class="k">static</span> <span class="n">tensor_list</span> <span class="n">backward</span><span class="p">(</span><span class="n">AutogradContext</span> <span class="o">*</span><span class="n">ctx</span><span class="p">,</span> <span class="n">tensor_list</span> <span class="n">grad_outputs</span><span class="p">)</span> <span class="p">{</span>
<span class="k">auto</span> <span class="n">grad_output</span> <span class="o">=</span> <span class="n">grad_outputs</span><span class="p">[</span><span class="mi">0</span><span class="p">];</span>
<span class="k">return</span> <span class="p">{</span><span class="n">grad_output</span><span class="p">,</span> <span class="n">grad_output</span><span class="p">};</span>
<span class="p">}</span>
<span class="p">};</span>
<span class="n">Tensor</span> <span class="nf">myadd_autograd</span><span class="p">(</span><span class="k">const</span> <span class="n">Tensor</span><span class="o">&</span> <span class="n">self</span><span class="p">,</span> <span class="k">const</span> <span class="n">Tensor</span><span class="o">&</span> <span class="n">other</span><span class="p">)</span> <span class="p">{</span>
<span class="k">return</span> <span class="n">MyAddFunction</span><span class="o">::</span><span class="n">apply</span><span class="p">(</span><span class="n">self</span><span class="p">,</span> <span class="n">other</span><span class="p">)[</span><span class="mi">0</span><span class="p">];</span>
<span class="p">}</span>
</pre></div>
</div>
<p>The autograd function is written as normal using <code class="docutils literal notranslate"><span class="pre">torch::autograd::Function</span></code>,
except that instead of directly writing the implementation in <code class="docutils literal notranslate"><span class="pre">forward()</span></code>,
we:</p>
<ol class="arabic simple">
<li>Turn off autograd handling with the <code class="docutils literal notranslate"><span class="pre">at::AutoNonVariableTypeMode</span></code> RAII
guard, and then</li>
<li>Call the dispatch function <code class="docutils literal notranslate"><span class="pre">myadd</span></code> to call back into the dispatcher.</li>
</ol>
<p>Without (1), your calls will infinite loop (and stack overflow), because
<code class="docutils literal notranslate"><span class="pre">myadd</span></code> will send you back to this function (as the highest priority dispatch
key would still be autograd.) With (1),
autograd is excluded from the set of dispatch keys under consideration, and
we will go to the next handlers, which will either be CPU and CUDA.</p>
<p>We can now register this function in the same way we registered the CPU/CUDA
functions:</p>
<div class="highlight-cpp notranslate"><div class="highlight"><pre><span></span><span class="n">TORCH_LIBRARY_IMPL</span><span class="p">(</span><span class="n">myops</span><span class="p">,</span> <span class="n">Autograd</span><span class="p">,</span> <span class="n">m</span><span class="p">)</span> <span class="p">{</span>
<span class="n">m</span><span class="p">.</span><span class="n">impl</span><span class="p">(</span><span class="s">"myadd"</span><span class="p">,</span> <span class="n">myadd_autograd</span><span class="p">);</span>
<span class="p">}</span>
</pre></div>
</div>
<div class="admonition note">
<p class="first admonition-title">Note</p>
<p class="last">In this example we register the kernel to <code class="docutils literal notranslate"><span class="pre">Autograd</span></code>, which installs it as the
autograd kernel for all backends. You can also register optimized kernels for specific
backends by using the corresponding backend-specific dispatch key - for example,
<code class="docutils literal notranslate"><span class="pre">AutogradCPU</span></code> or <code class="docutils literal notranslate"><span class="pre">AutogradCUDA</span></code>. To explore these and other dispatch key
options in more detail, check out the <code class="docutils literal notranslate"><span class="pre">PythonDispatcher</span></code> tool provided in
<a class="reference external" href="https://github.com/pytorch/pytorch/blob/master/torch/_python_dispatcher.py">torch/_python_dispatcher.py</a>.</p>
</div>
</div>
<div class="section" id="going-beyond-autograd">
<h2>Going beyond autograd<a class="headerlink" href="#going-beyond-autograd" title="Permalink to this headline">ยถ</a></h2>
<p>In some sense, the dispatcher isnโt doing all that much: all it does is
implement a glorified if-statement, along the lines of this:</p>
<div class="highlight-cpp notranslate"><div class="highlight"><pre><span></span><span class="k">class</span> <span class="nc">MyAddFunction</span> <span class="o">:</span> <span class="p">...</span> <span class="p">{</span>
<span class="k">public</span><span class="o">:</span>
<span class="k">static</span> <span class="n">Tensor</span> <span class="n">forward</span><span class="p">(</span>
<span class="n">AutogradContext</span> <span class="o">*</span><span class="n">ctx</span><span class="p">,</span> <span class="n">torch</span><span class="o">::</span><span class="n">Tensor</span> <span class="n">self</span><span class="p">,</span> <span class="n">torch</span><span class="o">::</span><span class="n">Tensor</span> <span class="n">other</span><span class="p">)</span> <span class="p">{</span>
<span class="k">if</span> <span class="p">(</span><span class="n">self</span><span class="p">.</span><span class="n">device</span><span class="p">().</span><span class="n">type</span><span class="p">()</span> <span class="o">==</span> <span class="n">DeviceType</span><span class="o">::</span><span class="n">CPU</span><span class="p">)</span> <span class="p">{</span>
<span class="k">return</span> <span class="nf">add_cpu</span><span class="p">(</span><span class="n">self</span><span class="p">,</span> <span class="n">other</span><span class="p">);</span>
<span class="p">}</span> <span class="k">else</span> <span class="k">if</span> <span class="p">(</span><span class="n">self</span><span class="p">.</span><span class="n">device</span><span class="p">().</span><span class="n">type</span><span class="p">()</span> <span class="o">==</span> <span class="n">DeviceType</span><span class="o">::</span><span class="n">CUDA</span><span class="p">)</span> <span class="p">{</span>
<span class="k">return</span> <span class="nf">add_cuda</span><span class="p">(</span><span class="n">self</span><span class="p">,</span> <span class="n">other</span><span class="p">);</span>
<span class="p">}</span> <span class="k">else</span> <span class="p">{</span>
<span class="n">TORCH_CHECK</span><span class="p">(</span><span class="mi">0</span><span class="p">,</span> <span class="s">"Unsupported device "</span><span class="p">,</span> <span class="n">self</span><span class="p">.</span><span class="n">device</span><span class="p">().</span><span class="n">type</span><span class="p">());</span>
<span class="p">}</span>
<span class="p">}</span>
<span class="p">...</span>
<span class="p">}</span>
</pre></div>
</div>
<p>So why use the dispatcher? There are a few reasons:</p>
<ol class="arabic simple">
<li>It is decentralized. You can assemble all of the pieces of an operator
(CPU, CUDA, Autograd) without having to write a single, centralized
if statement that refers to all of them. Importantly, third parties can
register extra implementations for other aspects without having to patch the
original definition of an operator. Weโll talk more about extending the
dispatcher in <a class="reference external" href="extend_dispatcher">extending dispatcher for a new backend</a>.</li>
<li>It supports more dispatch keys than CPU, CUDA and Autograd. You can
see a full list of dispatch keys that are currently implemented
in PyTorch in <code class="docutils literal notranslate"><span class="pre">c10/core/DispatchKey.h</span></code>. These dispatch keys
implement a variety of optional functionality for operators, and if you
decide you want your custom operator to support this functionality,
all you have to register a kernel for the appropriate key.</li>
<li>The dispatcher implements support for boxed fallback functions, which
are functions that can be implemented once and apply to all operators
in the system. Boxed fallbacks can be used to provide default behavior
for a dispatch key; if you use the dispatcher to implement your operator,
you also opt into the fallbacks for all of these operations.</li>
</ol>
<p>Here are some particular dispatch keys which you may need to define an operator
for.</p>
<div class="section" id="autocast">
<h3>Autocast<a class="headerlink" href="#autocast" title="Permalink to this headline">ยถ</a></h3>
<p>The Autocast dispatch key implements support for
<a class="reference external" href="https://pytorch.org/docs/stable/amp.html">automatic mixed precision (AMP)</a>.
An autocast wrapper kernel typically casts incoming <code class="docutils literal notranslate"><span class="pre">float16</span></code> or <code class="docutils literal notranslate"><span class="pre">float32</span></code> CUDA tensors
to some preferred precision before running the op.
For example, matmuls and convolutions on floating-point CUDA tensors usually run faster
and use less memory in <code class="docutils literal notranslate"><span class="pre">float16</span></code> without impairing convergence.
Autocast wrappers only have an effect in
<a class="reference external" href="https://pytorch.org/docs/stable/amp.html#torch.cuda.amp.autocast">autocast-enabled contexts</a>.</p>
<p>Hereโs an autocast wrapper for a hypothetical custom matmul, along with its registration:</p>
<div class="highlight-cpp notranslate"><div class="highlight"><pre><span></span><span class="c1">// Autocast-specific helper functions</span>
<span class="cp">#include</span> <span class="cpf"><ATen/autocast_mode.h></span><span class="cp"></span>
<span class="n">Tensor</span> <span class="nf">mymatmul_autocast</span><span class="p">(</span><span class="k">const</span> <span class="n">Tensor</span><span class="o">&</span> <span class="n">self</span><span class="p">,</span> <span class="k">const</span> <span class="n">Tensor</span><span class="o">&</span> <span class="n">other</span><span class="p">)</span> <span class="p">{</span>
<span class="n">c10</span><span class="o">::</span><span class="n">impl</span><span class="o">::</span><span class="n">ExcludeDispatchKeyGuard</span> <span class="n">no_autocast</span><span class="p">(</span><span class="n">c10</span><span class="o">::</span><span class="n">DispatchKey</span><span class="o">::</span><span class="n">Autocast</span><span class="p">);</span>
<span class="k">return</span> <span class="n">mymatmul</span><span class="p">(</span><span class="n">at</span><span class="o">::</span><span class="n">autocast</span><span class="o">::</span><span class="n">cached_cast</span><span class="p">(</span><span class="n">at</span><span class="o">::</span><span class="n">kHalf</span><span class="p">,</span> <span class="n">self</span><span class="p">),</span>
<span class="n">at</span><span class="o">::</span><span class="n">autocast</span><span class="o">::</span><span class="n">cached_cast</span><span class="p">(</span><span class="n">at</span><span class="o">::</span><span class="n">kHalf</span><span class="p">,</span> <span class="n">other</span><span class="p">));</span>
<span class="p">}</span>
<span class="n">TORCH_LIBRARY_IMPL</span><span class="p">(</span><span class="n">myops</span><span class="p">,</span> <span class="n">Autocast</span><span class="p">,</span> <span class="n">m</span><span class="p">)</span> <span class="p">{</span>
<span class="n">m</span><span class="p">.</span><span class="n">impl</span><span class="p">(</span><span class="s">"mymatmul"</span><span class="p">,</span> <span class="n">mymatmul_autocast</span><span class="p">);</span>
<span class="p">}</span>
</pre></div>
</div>
<p><code class="docutils literal notranslate"><span class="pre">cached_cast(kHalf,</span> <span class="pre">tensor)</span></code> casts <code class="docutils literal notranslate"><span class="pre">tensor</span></code> to <code class="docutils literal notranslate"><span class="pre">float16</span></code> if <code class="docutils literal notranslate"><span class="pre">tensor</span></code> is CUDA and <code class="docutils literal notranslate"><span class="pre">float32</span></code>,
otherwise, it leaves <code class="docutils literal notranslate"><span class="pre">tensor</span></code> unchanged (c.f. the
<a class="reference external" href="https://pytorch.org/docs/stable/amp.html#op-eligibility">eligibility policy</a> for natively autocasted ops).
This ensures if the network calls <code class="docutils literal notranslate"><span class="pre">mymatmul</span></code> on any mixture of <code class="docutils literal notranslate"><span class="pre">float16</span></code> and <code class="docutils literal notranslate"><span class="pre">float32</span></code> CUDA tensors,
<code class="docutils literal notranslate"><span class="pre">mymatmul</span></code> runs in <code class="docutils literal notranslate"><span class="pre">float16</span></code>. Meanwhile, calls to <code class="docutils literal notranslate"><span class="pre">mymatmul</span></code> with non-CUDA, integer-type, or <code class="docutils literal notranslate"><span class="pre">float64</span></code>
inputs are unaffected. Using <code class="docutils literal notranslate"><span class="pre">cached_cast</span></code> to follow the native eligibility policy in your own autocast wrapper
is recommended, but not required. For example, if you wanted to force <code class="docutils literal notranslate"><span class="pre">float16</span></code> execution for all input types,
you could <code class="docutils literal notranslate"><span class="pre">return</span> <span class="pre">mymatmul(self.half(),</span> <span class="pre">other.half());</span></code> instead of using <code class="docutils literal notranslate"><span class="pre">cached_cast</span></code>.</p>
<p>Notice that, like our autograd kernels, we exclude the <code class="docutils literal notranslate"><span class="pre">Autocast</span></code> key from
dispatch before redispatching.</p>
<p>By default, if no autocast wrapper is provided,
we fallthrough directly to the regular operator implementation (no
autocasting occurs). (We didnโt use <code class="docutils literal notranslate"><span class="pre">myadd</span></code> for this example, since pointwise
addition doesnโt need autocasting and should just fall through.)</p>
<p>When should an autocast wrapper be registered? Unfortunately, there arenโt
cut-and-dried rules for an opโs preferred precision. You can
get a sense for some native opsโ preferred precisions by looking at the
<a class="reference external" href="https://pytorch.org/docs/master/amp.html#op-specific-behavior">cast lists</a>.
General guidance:</p>
<ul class="simple">
<li>Ops that do reductions should probably execute in <code class="docutils literal notranslate"><span class="pre">float32</span></code>,</li>
<li>Any op that does a convolution or gemm under the hood should
probably execute in <code class="docutils literal notranslate"><span class="pre">float16</span></code>, and</li>
<li>Other ops with multiple floating-point tensor inputs should standardize
them to a common precision (unless the implementation supports inputs with different precisions).</li>
</ul>
<p>If your custom op falls into the third category, the <code class="docutils literal notranslate"><span class="pre">promote_type</span></code> template
helps figure out the widest floating-point type present among input tensors, which is
the safest choice for the execution type:</p>
<div class="highlight-cpp notranslate"><div class="highlight"><pre><span></span><span class="cp">#include</span> <span class="cpf"><ATen/autocast_mode.h></span><span class="cp"></span>
<span class="n">Tensor</span> <span class="nf">my_multiple_input_op_autocast</span><span class="p">(</span><span class="k">const</span> <span class="n">Tensor</span><span class="o">&</span> <span class="n">t0</span><span class="p">,</span> <span class="k">const</span> <span class="n">Tensor</span><span class="o">&</span> <span class="n">t1</span><span class="p">)</span> <span class="p">{</span>
<span class="n">c10</span><span class="o">::</span><span class="n">impl</span><span class="o">::</span><span class="n">ExcludeDispatchKeyGuard</span> <span class="n">no_autocast</span><span class="p">(</span><span class="n">c10</span><span class="o">::</span><span class="n">DispatchKey</span><span class="o">::</span><span class="n">Autocast</span><span class="p">);</span>
<span class="c1">// The required at::kHalf argument is an optimistic initial guess.</span>
<span class="k">auto</span> <span class="n">exec_type</span> <span class="o">=</span> <span class="n">at</span><span class="o">::</span><span class="n">autocast</span><span class="o">::</span><span class="n">promote_type</span><span class="p">(</span><span class="n">at</span><span class="o">::</span><span class="n">kHalf</span><span class="p">,</span> <span class="n">t0</span><span class="p">,</span> <span class="n">t1</span><span class="p">);</span>
<span class="k">return</span> <span class="n">my_multiple_input_op</span><span class="p">(</span><span class="n">at</span><span class="o">::</span><span class="n">autocast</span><span class="o">::</span><span class="n">cached_cast</span><span class="p">(</span><span class="n">exec_type</span><span class="p">,</span> <span class="n">t0</span><span class="p">),</span>
<span class="n">at</span><span class="o">::</span><span class="n">autocast</span><span class="o">::</span><span class="n">cached_cast</span><span class="p">(</span><span class="n">exec_type</span><span class="p">,</span> <span class="n">t1</span><span class="p">));</span>
<span class="p">}</span>
</pre></div>
</div>
<p>If your custom op is <a class="reference internal" href="#autograd-support"><span class="std std-ref">autograd-enabled</span></a>, you only need to write and register
an autocast wrapper for the same name onto which the autograd wrapper is registered.
For example, if you wanted an autocast wrapper for the <code class="docutils literal notranslate"><span class="pre">myadd</span></code> function shown
in the autograd section, all youโd need is</p>
<div class="highlight-cpp notranslate"><div class="highlight"><pre><span></span><span class="n">Tensor</span> <span class="nf">myadd_autocast</span><span class="p">(</span><span class="k">const</span> <span class="n">Tensor</span><span class="o">&</span> <span class="n">self</span><span class="p">,</span> <span class="k">const</span> <span class="n">Tensor</span><span class="o">&</span> <span class="n">other</span><span class="p">)</span> <span class="p">{</span>
<span class="n">c10</span><span class="o">::</span><span class="n">impl</span><span class="o">::</span><span class="n">ExcludeDispatchKeyGuard</span> <span class="n">no_autocast</span><span class="p">(</span><span class="n">c10</span><span class="o">::</span><span class="n">DispatchKey</span><span class="o">::</span><span class="n">Autocast</span><span class="p">);</span>
<span class="k">return</span> <span class="n">myadd</span><span class="p">(</span><span class="n">at</span><span class="o">::</span><span class="n">autocast</span><span class="o">::</span><span class="n">cached_cast</span><span class="p">(</span><span class="o"><</span><span class="n">desired</span> <span class="n">dtype</span><span class="o">></span><span class="p">,</span> <span class="n">self</span><span class="p">),</span>
<span class="n">at</span><span class="o">::</span><span class="n">autocast</span><span class="o">::</span><span class="n">cached_cast</span><span class="p">(</span><span class="o"><</span><span class="n">desired</span> <span class="n">dtype</span><span class="o">></span><span class="p">,</span> <span class="n">other</span><span class="p">));</span>
<span class="p">}</span>
<span class="n">TORCH_LIBRARY_IMPL</span><span class="p">(</span><span class="n">myops</span><span class="p">,</span> <span class="n">Autocast</span><span class="p">,</span> <span class="n">m</span><span class="p">)</span> <span class="p">{</span>
<span class="n">m</span><span class="p">.</span><span class="n">impl</span><span class="p">(</span><span class="s">"myadd"</span><span class="p">,</span> <span class="n">myadd_autocast</span><span class="p">);</span>
<span class="p">}</span>
</pre></div>
</div>
<p>There are no separate gymnastics to make the backward method autocast compatible.
However, the backward method defined in your custom autograd function will run in the same
dtype as autocast sets for the forward method, so you should choose a <code class="docutils literal notranslate"><span class="pre"><desired</span> <span class="pre">dtype></span></code>
suitable for both your forward and backward methods.</p>
</div>
<div class="section" id="batched">
<h3>Batched<a class="headerlink" href="#batched" title="Permalink to this headline">ยถ</a></h3>
<p>Batched tensors allow you to write your code in a per-example manner, and then
have them be automatically batched when run under a <code class="docutils literal notranslate"><span class="pre">vmap</span></code> invocation. The
API for writing batching rules is currently under development, but once it is
stabilized, you can add support for <code class="docutils literal notranslate"><span class="pre">vmap</span></code> for your operators by registering
a kernel at the Batched dispatch key.</p>
</div>
<div class="section" id="tracer">
<h3>Tracer<a class="headerlink" href="#tracer" title="Permalink to this headline">ยถ</a></h3>
<p>The Tracer dispatch key implements support for recording invocations of operators
into a trace when you run <code class="docutils literal notranslate"><span class="pre">torch.jit.trace</span></code>. We intend to provide a
boxed fallback that will implement tracing for arbitrary operations,
see <a class="reference external" href="https://github.com/pytorch/pytorch/issues/41478">issue #41478</a> to track
progress.</p>
</div>
</div>
</div>
</article>
</div>
<footer>
<div class="rst-footer-buttons" role="navigation" aria-label="footer navigation">
<a href="extend_dispatcher.html" class="btn btn-neutral float-right" title="Extending dispatcher for a new backend in C++" accesskey="n" rel="next">Next <img src="../_static/images/chevron-right-orange.svg" class="next-page"></a>
<a href="torch_script_custom_classes.html" class="btn btn-neutral" title="์ปค์คํ
C++ ํด๋์ค๋ก TorchScript ํ์ฅํ๊ธฐ" accesskey="p" rel="prev"><img src="../_static/images/chevron-right-orange.svg" class="previous-page"> Previous</a>
</div>
<hr class="rating-hr hr-top">
<div class="rating-container">
<div class="rating-prompt">Rate this Tutorial</div>
<div class="stars-outer">
<i class="far fa-star" title="1 Star" data-behavior="tutorial-rating" data-count="1"></i>
<i class="far fa-star" title="2 Stars" data-behavior="tutorial-rating" data-count="2"></i>
<i class="far fa-star" title="3 Stars" data-behavior="tutorial-rating" data-count="3"></i>
<i class="far fa-star" title="4 Stars" data-behavior="tutorial-rating" data-count="4"></i>
<i class="far fa-star" title="5 Stars" data-behavior="tutorial-rating" data-count="5"></i>
</div>
</div>
<hr class="rating-hr hr-bottom"/>
<div role="contentinfo">
<p>
© Copyright 2021, PyTorch & PyTorch Korea Community.
</p>
</div>
<div>
Built with <a href="http://sphinx-doc.org/">Sphinx</a> using a <a href="https://github.com/rtfd/sphinx_rtd_theme">theme</a> provided by <a href="https://readthedocs.org">Read the Docs</a>.
</div>
</footer>
</div>
</div>
<div class="pytorch-content-right" id="pytorch-content-right">
<div class="pytorch-right-menu" id="pytorch-right-menu">
<div class="pytorch-side-scroll" id="pytorch-side-scroll-right">
<ul>
<li><a class="reference internal" href="#">Registering a Dispatched Operator in C++</a><ul>
<li><a class="reference internal" href="#defining-schema-and-backend-implementations">Defining schema and backend implementations</a></li>
<li><a class="reference internal" href="#for-operators-that-do-not-need-autograd">For operators that do not need autograd</a><ul>
<li><a class="reference internal" href="#in-place-or-view-ops">In-place or view ops</a></li>
</ul>
</li>
<li><a class="reference internal" href="#adding-autograd-support">Adding autograd support</a></li>
<li><a class="reference internal" href="#going-beyond-autograd">Going beyond autograd</a><ul>
<li><a class="reference internal" href="#autocast">Autocast</a></li>
<li><a class="reference internal" href="#batched">Batched</a></li>
<li><a class="reference internal" href="#tracer">Tracer</a></li>
</ul>
</li>
</ul>
</li>
</ul>
</div>
</div>
</div>
</section>
</div>
<script type="text/javascript" id="documentation_options" data-url_root="../" src="../_static/documentation_options.js"></script>
<script type="text/javascript" src="../_static/jquery.js"></script>
<script type="text/javascript" src="../_static/underscore.js"></script>
<script type="text/javascript" src="../_static/doctools.js"></script>
<script type="text/javascript" src="../_static/clipboard.min.js"></script>
<script type="text/javascript" src="../_static/copybutton.js"></script>
<script type="text/javascript" src="https://cdn.jsdelivr.net/npm/katex@0.13.11/dist/katex.min.js"></script>
<script type="text/javascript" src="https://cdn.jsdelivr.net/npm/katex@0.13.11/dist/contrib/auto-render.min.js"></script>
<script type="text/javascript" src="../_static/katex_autorenderer.js"></script>
<script type="text/javascript" src="../_static/js/vendor/popper.min.js"></script>
<script type="text/javascript" src="../_static/js/vendor/bootstrap.min.js"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/list.js/1.5.0/list.min.js"></script>
<script type="text/javascript" src="../_static/js/theme.js"></script>
<script type="text/javascript">
jQuery(function () {
SphinxRtdTheme.Navigation.enable(true);
});
</script>
<script>
//add microsoft link
if(window.location.href.indexOf("/beginner/basics/")!= -1)
{
var url="https://docs.microsoft.com/learn/paths/pytorch-fundamentals/?wt.mc_id=aiml-7486-cxa";
switch(window.location.pathname.split("/").pop().replace('.html',''))
{
case"quickstart_tutorial":
url="https://docs.microsoft.com/learn/modules/intro-machine-learning-pytorch/9-quickstart?WT.mc_id=aiml-7486-cxa";
break;
case"tensorqs_tutorial":
url="https://docs.microsoft.com/learn/modules/intro-machine-learning-pytorch/2-tensors?WT.mc_id=aiml-7486-cxa";
break;
case"data_tutorial":
url="https://docs.microsoft.com/learn/modules/intro-machine-learning-pytorch/3-data?WT.mc_id=aiml-7486-cxa";
break;
case"transforms_tutorial":
url="https://docs.microsoft.com/learn/modules/intro-machine-learning-pytorch/4-transforms?WT.mc_id=aiml-7486-cxa";
break;
case"buildmodel_tutorial":
url="https://docs.microsoft.com/learn/modules/intro-machine-learning-pytorch/5-model?WT.mc_id=aiml-7486-cxa";
break;
case"autogradqs_tutorial":
url="https://docs.microsoft.com/learn/modules/intro-machine-learning-pytorch/6-autograd?WT.mc_id=aiml-7486-cxa";
break;
case"optimization_tutorial":
url="https://docs.microsoft.com/learn/modules/intro-machine-learning-pytorch/7-optimization?WT.mc_id=aiml-7486-cxa";
break;
case"saveloadrun_tutorial":
url="https://docs.microsoft.com/learn/modules/intro-machine-learning-pytorch/8-inference?WT.mc_id=aiml-7486-cxa";
}
$(".pytorch-call-to-action-links").children().first().before("<a href="+url+' data-behavior="call-to-action-event" data-response="Run in Microsoft Learn" target="_blank"><div id="microsoft-learn-link" style="padding-bottom: 0.625rem;border-bottom: 1px solid #f3f4f7;padding-right: 2.5rem;display: -webkit-box; display: -ms-flexbox; isplay: flex; -webkit-box-align: center;-ms-flex-align: center;align-items: center;"><img class="call-to-action-img" src="../../_static/images/microsoft-logo.svg"/><div class="call-to-action-desktop-view">Run in Microsoft Learn</div><div class="call-to-action-mobile-view">Learn</div></div></a>')
}
</script>
<script async src="https://www.googletagmanager.com/gtag/js?id=UA-71919972-3"></script>
<script>
window.dataLayer = window.dataLayer || [];
function gtag(){dataLayer.push(arguments);}
gtag('js', new Date());
gtag('config', 'UA-71919972-3');
</script>
<script>
$("[data-behavior='call-to-action-event']").on('click', function(){
ga('send', {
hitType: 'event',
eventCategory: $(this).attr("data-response"),
eventAction: 'click',
eventLabel: window.location.href
});
gtag('event', 'click', {
'event_category': $(this).attr("data-response"),
'event_label': $("h1").first().text(),
'tutorial_link': window.location.href
});
});
$("[data-behavior='tutorial-rating']").on('click', function(){
gtag('event', 'click', {
'event_category': 'Tutorial Rating',
'event_label': $("h1").first().text(),
'value': $(this).attr("data-count")
});
});
if (location.pathname == "/") {
$(".rating-container").hide();
$(".hr-bottom").hide();
}
</script>
<script type="text/javascript">
var collapsedSections = ['ํ์ดํ ์น(PyTorch) ๋ ์ํผ', 'ํ์ดํ ์น(PyTorch) ๋ฐฐ์ฐ๊ธฐ', '์ด๋ฏธ์ง/๋น๋์ค', '์ค๋์ค', 'ํ
์คํธ', '๊ฐํํ์ต', 'PyTorch ๋ชจ๋ธ์ ํ๋ก๋์
ํ๊ฒฝ์ ๋ฐฐํฌํ๊ธฐ', 'Code Transforms with FX', 'ํ๋ก ํธ์๋ API', 'PyTorch ํ์ฅํ๊ธฐ', '๋ชจ๋ธ ์ต์ ํ', '๋ณ๋ ฌ ๋ฐ ๋ถ์ฐ ํ์ต', 'Mobile'];
</script>
<!-- Begin Footer -->
<div class="container-fluid docs-tutorials-resources" id="docs-tutorials-resources">
<div class="container">
<div class="row">
<div class="col-md-4 text-center">
<h2>๊ณต์ ๋ฌธ์ (์์ด)</h2>
<p>PyTorch ๊ณต์ ๋ฌธ์์
๋๋ค.</p>
<a id="orgTutorialLink" class="with-right-arrow" href="https://pytorch.org/docs/stable/index.html" target="_blank">๊ณต์ ๋ฌธ์๋ก ์ด๋</a>
</div>
<div class="col-md-4 text-center">
<h2>ํ๊ตญ์ด ํํ ๋ฆฌ์ผ</h2>
<p>ํ๊ตญ์ด๋ก ๋ฒ์ญ ์ค์ธ PyTorch ํํ ๋ฆฌ์ผ์
๋๋ค.</p>
<a class="with-right-arrow" href="https://tutorials.pytorch.kr">ํํ ๋ฆฌ์ผ๋ก ์ด๋</a>
</div>
<div class="col-md-4 text-center">
<h2>์ปค๋ฎค๋ํฐ</h2>
<p>๋ค๋ฅธ ์ฌ์ฉ์๋ค๊ณผ ์๊ฒฌ์ ๋๋ ๋ณด์ธ์!</p>
<a class="with-right-arrow" href="https://discuss.pytorch.kr">์ปค๋ฎค๋ํฐ๋ก ์ด๋</a>
</div>
</div>
</div>
</div>
<footer class="site-footer">
<div class="container footer-container">
<div class="footer-logo-wrapper">
<a href="https://pytorch.kr/" class="footer-logo"></a>
</div>
<div class="footer-links-wrapper">
<div class="footer-links-col">
<ul>
<li class="list-title"><a href="https://pytorch.org">PyTorch ํํ์ด์ง (๊ณต์)</a></li>
<li><a href="https://pytorch.org" target="_blank">๊ณต์ ํํ์ด์ง</a></li>
<li><a href="https://pytorch.org/tutorials" target="_blank">๊ณต์ ํํ ๋ฆฌ์ผ</a></li>
<li><a href="https://pytorch.org/docs" target="_blank">๊ณต์ ๋ฌธ์</a></li>
</ul>
</div>
<div class="footer-links-col">
<ul>