-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathcuda.html
2047 lines (1809 loc) · 196 KB
/
cuda.html
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
<!DOCTYPE html>
<!--[if IE 8]><html class="no-js lt-ie9" lang="en" > <![endif]-->
<!--[if gt IE 8]><!--> <html class="no-js" lang="en" > <!--<![endif]-->
<head>
<meta name="robots" content="noindex">
<meta charset="utf-8">
<meta content="A guide to torch.cuda, a PyTorch module to run CUDA operations" name="description" />
<meta content="memory management, PYTORCH_CUDA_ALLOC_CONF, optimize PyTorch, CUDA" name="keywords" />
<meta name="viewport" content="width=device-width, initial-scale=1.0">
<title>CUDA semantics — PyTorch 2.5 documentation</title>
<link rel="canonical" href="https://pytorch.org/docs/stable/notes/cuda.html"/>
<link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
<!-- <link rel="stylesheet" href="../_static/pygments.css" type="text/css" /> -->
<link rel="stylesheet" href="../_static/pygments.css" type="text/css" />
<link rel="stylesheet" href="../_static/css/theme.css" type="text/css" />
<link rel="stylesheet" href="../_static/copybutton.css" type="text/css" />
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css" type="text/css" />
<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/katex.min.css" type="text/css" />
<link rel="stylesheet" href="../_static/katex-math.css" type="text/css" />
<link rel="stylesheet" href="../_static/sphinx-dropdown.css" type="text/css" />
<link rel="stylesheet" href="../_static/panels-bootstrap.min.css" type="text/css" />
<link rel="stylesheet" href="../_static/css/jit.css" type="text/css" />
<link rel="index" title="Index" href="../genindex.html" />
<link rel="search" title="Search" href="../search.html" />
<link rel="next" title="PyTorch Custom Operators Landing Page" href="custom_operators.html" />
<link rel="prev" title="CPU threading and TorchScript inference" href="cpu_threading_torchscript_inference.html" />
<!-- Google Tag Manager -->
<script>(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start':
new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0],
j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f);
})(window,document,'script','dataLayer','GTM-T8XT4PS');</script>
<!-- End Google Tag Manager -->
<script src="../_static/js/modernizr.min.js"></script>
<!-- Preload the theme fonts -->
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-book.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/IBMPlexMono/IBMPlexMono-Medium.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/FreightSans/freight-sans-medium-italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="../_static/fonts/IBMPlexMono/IBMPlexMono-SemiBold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<!-- Preload the katex fonts -->
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Math-Italic.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Main-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Main-Bold.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Size1-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Size4-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Size2-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Size3-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="preload" href="https://cdn.jsdelivr.net/npm/[email protected]/dist/fonts/KaTeX_Caligraphic-Regular.woff2" as="font" type="font/woff2" crossorigin="anonymous">
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.15.2/css/all.css" integrity="sha384-vSIIfh2YWi9wW0r9iZe7RJPrKwp6bG+s9QZMoITbCckVJqGCCRhc+ccxNcdpHuYu" crossorigin="anonymous">
</head>
<div class="container-fluid header-holder tutorials-header" id="header-holder">
<div class="container">
<div class="header-container">
<a class="header-logo" href="https://pytorch.org/" aria-label="PyTorch"></a>
<div class="main-menu">
<ul>
<li class="main-menu-item">
<div id="resourcesDropdownButton" data-toggle="resources-dropdown" class="resources-dropdown">
<a class="with-down-arrow">
Learn
</a>
<div class="resources-dropdown-menu">
<a class="nav-dropdown-item" href="https://pytorch.org/get-started">
<span class=dropdown-title>Get Started</span>
<p>Run PyTorch locally or get started quickly with one of the supported cloud platforms</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/tutorials">
<span class="dropdown-title">Tutorials</span>
<p>Whats new in PyTorch tutorials</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/tutorials/beginner/basics/intro.html">
<span class="dropdown-title">Learn the Basics</span>
<p>Familiarize yourself with PyTorch concepts and modules</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/tutorials/recipes/recipes_index.html">
<span class="dropdown-title">PyTorch Recipes</span>
<p>Bite-size, ready-to-deploy PyTorch code examples</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/tutorials/beginner/introyt.html">
<span class="dropdown-title">Intro to PyTorch - YouTube Series</span>
<p>Master PyTorch basics with our engaging YouTube tutorial series</p>
</a>
</div>
</div>
</li>
<li>
<div id="resourcesDropdownButton" data-toggle="resources-dropdown" class="resources-dropdown">
<a class="with-down-arrow">
Ecosystem
</a>
<div class="resources-dropdown-menu">
<a class="nav-dropdown-item" href="https://pytorch.org/ecosystem">
<span class="dropdown-title">Tools</span>
<p>Learn about the tools and frameworks in the PyTorch Ecosystem</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/#community-module">
<span class=dropdown-title>Community</span>
<p>Join the PyTorch developer community to contribute, learn, and get your questions answered</p>
</a>
<a class="nav-dropdown-item" href="https://discuss.pytorch.org/" target="_blank">
<span class=dropdown-title>Forums</span>
<p>A place to discuss PyTorch code, issues, install, research</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/resources">
<span class=dropdown-title>Developer Resources</span>
<p>Find resources and get questions answered</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/ecosystem/contributor-awards-2023">
<span class="dropdown-title">Contributor Awards - 2023</span>
<p>Award winners announced at this year's PyTorch Conference</p>
</a>
</div>
</div>
</li>
<li>
<div id="resourcesDropdownButton" data-toggle="resources-dropdown" class="resources-dropdown">
<a class="with-down-arrow">
Edge
</a>
<div class="resources-dropdown-menu">
<a class="nav-dropdown-item" href="https://pytorch.org/edge">
<span class="dropdown-title">About PyTorch Edge</span>
<p>Build innovative and privacy-aware AI experiences for edge devices</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/executorch-overview">
<span class="dropdown-title">ExecuTorch</span>
<p>End-to-end solution for enabling on-device inference capabilities across mobile and edge devices</p>
</a>
</div>
</div>
</li>
<li class="main-menu-item">
<div id="resourcesDropdownButton" data-toggle="resources-dropdown" class="resources-dropdown">
<a class="with-down-arrow">
Docs
</a>
<div class="resources-dropdown-menu">
<a class="nav-dropdown-item" href="https://pytorch.org/docs/stable/index.html">
<span class="dropdown-title">PyTorch</span>
<p>Explore the documentation for comprehensive guidance on how to use PyTorch</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/pytorch-domains">
<span class="dropdown-title">PyTorch Domains</span>
<p>Read the PyTorch Domains documentation to learn more about domain-specific libraries</p>
</a>
</div>
</div>
</li>
<li>
<div id="resourcesDropdownButton" data-toggle="resources-dropdown" class="resources-dropdown">
<a class="with-down-arrow">
Blogs & News
</a>
<div class="resources-dropdown-menu">
<a class="nav-dropdown-item" href="https://pytorch.org/blog/">
<span class="dropdown-title">PyTorch Blog</span>
<p>Catch up on the latest technical news and happenings</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/community-blog">
<span class="dropdown-title">Community Blog</span>
<p>Stories from the PyTorch ecosystem</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/videos">
<span class="dropdown-title">Videos</span>
<p>Learn about the latest PyTorch tutorials, new, and more </p>
<a class="nav-dropdown-item" href="https://pytorch.org/community-stories">
<span class="dropdown-title">Community Stories</span>
<p>Learn how our community solves real, everyday machine learning problems with PyTorch</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/events">
<span class="dropdown-title">Events</span>
<p>Find events, webinars, and podcasts</p>
</a>
</div>
</li>
<li>
<div id="resourcesDropdownButton" data-toggle="resources-dropdown" class="resources-dropdown">
<a class="with-down-arrow">
About
</a>
<div class="resources-dropdown-menu">
<a class="nav-dropdown-item" href="https://pytorch.org/foundation">
<span class="dropdown-title">PyTorch Foundation</span>
<p>Learn more about the PyTorch Foundation</p>
</a>
<a class="nav-dropdown-item" href="https://pytorch.org/governing-board">
<span class="dropdown-title">Governing Board</span>
<p></p>
</a>
</div>
</div>
</li>
<li class="main-menu-item">
<div class="no-dropdown">
<a href="https://pytorch.org/join" data-cta="join">
Become a Member
</a>
</div>
</li>
<li>
<div class="main-menu-item">
<a href="https://github.com/pytorch/pytorch" class="github-icon">
</a>
</div>
</li>
<!--- TODO: This block adds the search icon to the nav bar. We will enable it later.
<li>
<div class="main-menu-item">
<a href="https://github.com/pytorch/pytorch" class="search-icon">
</a>
</div>
</li>
--->
</ul>
</div>
<a class="main-menu-open-button" href="#" data-behavior="open-mobile-menu"></a>
</div>
</div>
</div>
<body class="pytorch-body">
<div class="table-of-contents-link-wrapper">
<span>Table of Contents</span>
<a href="#" class="toggle-table-of-contents" data-behavior="toggle-table-of-contents"></a>
</div>
<nav data-toggle="wy-nav-shift" class="pytorch-left-menu" id="pytorch-left-menu">
<div class="pytorch-side-scroll">
<div class="pytorch-menu pytorch-menu-vertical" data-spy="affix" role="navigation" aria-label="main navigation">
<div class="pytorch-left-menu-search">
<div class="version">
<a href='https://pytorch.org/docs/versions.html'>2.5 ▼</a>
</div>
<div role="search">
<form id="rtd-search-form" class="wy-form" action="../search.html" method="get">
<input type="text" name="q" placeholder="Search Docs" />
<input type="hidden" name="check_keywords" value="yes" />
<input type="hidden" name="area" value="default" />
</form>
</div>
</div>
<p class="caption" role="heading"><span class="caption-text">Community</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../community/build_ci_governance.html">PyTorch Governance | Build + CI</a></li>
<li class="toctree-l1"><a class="reference internal" href="../community/contribution_guide.html">PyTorch Contribution Guide</a></li>
<li class="toctree-l1"><a class="reference internal" href="../community/design.html">PyTorch Design Philosophy</a></li>
<li class="toctree-l1"><a class="reference internal" href="../community/governance.html">PyTorch Governance | Mechanics</a></li>
<li class="toctree-l1"><a class="reference internal" href="../community/persons_of_interest.html">PyTorch Governance | Maintainers</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Developer Notes</span></p>
<ul class="current">
<li class="toctree-l1"><a class="reference internal" href="amp_examples.html">Automatic Mixed Precision examples</a></li>
<li class="toctree-l1"><a class="reference internal" href="autograd.html">Autograd mechanics</a></li>
<li class="toctree-l1"><a class="reference internal" href="broadcasting.html">Broadcasting semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="cpu_threading_torchscript_inference.html">CPU threading and TorchScript inference</a></li>
<li class="toctree-l1 current"><a class="current reference internal" href="#">CUDA semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="custom_operators.html">PyTorch Custom Operators Landing Page</a></li>
<li class="toctree-l1"><a class="reference internal" href="ddp.html">Distributed Data Parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="extending.html">Extending PyTorch</a></li>
<li class="toctree-l1"><a class="reference internal" href="extending.func.html">Extending torch.func with autograd.Function</a></li>
<li class="toctree-l1"><a class="reference internal" href="faq.html">Frequently Asked Questions</a></li>
<li class="toctree-l1"><a class="reference internal" href="fsdp.html">FSDP Notes</a></li>
<li class="toctree-l1"><a class="reference internal" href="get_start_xpu.html">Getting Started on Intel GPU</a></li>
<li class="toctree-l1"><a class="reference internal" href="gradcheck.html">Gradcheck mechanics</a></li>
<li class="toctree-l1"><a class="reference internal" href="hip.html">HIP (ROCm) semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="large_scale_deployments.html">Features for large-scale deployments</a></li>
<li class="toctree-l1"><a class="reference internal" href="modules.html">Modules</a></li>
<li class="toctree-l1"><a class="reference internal" href="mps.html">MPS backend</a></li>
<li class="toctree-l1"><a class="reference internal" href="multiprocessing.html">Multiprocessing best practices</a></li>
<li class="toctree-l1"><a class="reference internal" href="numerical_accuracy.html">Numerical accuracy</a></li>
<li class="toctree-l1"><a class="reference internal" href="randomness.html">Reproducibility</a></li>
<li class="toctree-l1"><a class="reference internal" href="serialization.html">Serialization semantics</a></li>
<li class="toctree-l1"><a class="reference internal" href="windows.html">Windows FAQ</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Language Bindings</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../cpp_index.html">C++</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/javadoc/">Javadoc</a></li>
<li class="toctree-l1"><a class="reference internal" href="../deploy.html">torch::deploy</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Python API</span></p>
<ul>
<li class="toctree-l1"><a class="reference internal" href="../torch.html">torch</a></li>
<li class="toctree-l1"><a class="reference internal" href="../nn.html">torch.nn</a></li>
<li class="toctree-l1"><a class="reference internal" href="../nn.functional.html">torch.nn.functional</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tensors.html">torch.Tensor</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tensor_attributes.html">Tensor Attributes</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tensor_view.html">Tensor Views</a></li>
<li class="toctree-l1"><a class="reference internal" href="../amp.html">torch.amp</a></li>
<li class="toctree-l1"><a class="reference internal" href="../autograd.html">torch.autograd</a></li>
<li class="toctree-l1"><a class="reference internal" href="../library.html">torch.library</a></li>
<li class="toctree-l1"><a class="reference internal" href="../cpu.html">torch.cpu</a></li>
<li class="toctree-l1"><a class="reference internal" href="../cuda.html">torch.cuda</a></li>
<li class="toctree-l1"><a class="reference internal" href="../torch_cuda_memory.html">Understanding CUDA Memory Usage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../torch_cuda_memory.html#generating-a-snapshot">Generating a Snapshot</a></li>
<li class="toctree-l1"><a class="reference internal" href="../torch_cuda_memory.html#using-the-visualizer">Using the visualizer</a></li>
<li class="toctree-l1"><a class="reference internal" href="../torch_cuda_memory.html#snapshot-api-reference">Snapshot API Reference</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mps.html">torch.mps</a></li>
<li class="toctree-l1"><a class="reference internal" href="../xpu.html">torch.xpu</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mtia.html">torch.mtia</a></li>
<li class="toctree-l1"><a class="reference internal" href="../meta.html">Meta device</a></li>
<li class="toctree-l1"><a class="reference internal" href="../backends.html">torch.backends</a></li>
<li class="toctree-l1"><a class="reference internal" href="../export.html">torch.export</a></li>
<li class="toctree-l1"><a class="reference internal" href="../distributed.html">torch.distributed</a></li>
<li class="toctree-l1"><a class="reference internal" href="../distributed.tensor.html">torch.distributed.tensor</a></li>
<li class="toctree-l1"><a class="reference internal" href="../distributed.algorithms.join.html">torch.distributed.algorithms.join</a></li>
<li class="toctree-l1"><a class="reference internal" href="../distributed.elastic.html">torch.distributed.elastic</a></li>
<li class="toctree-l1"><a class="reference internal" href="../fsdp.html">torch.distributed.fsdp</a></li>
<li class="toctree-l1"><a class="reference internal" href="../distributed.tensor.parallel.html">torch.distributed.tensor.parallel</a></li>
<li class="toctree-l1"><a class="reference internal" href="../distributed.optim.html">torch.distributed.optim</a></li>
<li class="toctree-l1"><a class="reference internal" href="../distributed.pipelining.html">torch.distributed.pipelining</a></li>
<li class="toctree-l1"><a class="reference internal" href="../distributed.checkpoint.html">torch.distributed.checkpoint</a></li>
<li class="toctree-l1"><a class="reference internal" href="../distributions.html">torch.distributions</a></li>
<li class="toctree-l1"><a class="reference internal" href="../torch.compiler.html">torch.compiler</a></li>
<li class="toctree-l1"><a class="reference internal" href="../fft.html">torch.fft</a></li>
<li class="toctree-l1"><a class="reference internal" href="../func.html">torch.func</a></li>
<li class="toctree-l1"><a class="reference internal" href="../futures.html">torch.futures</a></li>
<li class="toctree-l1"><a class="reference internal" href="../fx.html">torch.fx</a></li>
<li class="toctree-l1"><a class="reference internal" href="../fx.experimental.html">torch.fx.experimental</a></li>
<li class="toctree-l1"><a class="reference internal" href="../hub.html">torch.hub</a></li>
<li class="toctree-l1"><a class="reference internal" href="../jit.html">torch.jit</a></li>
<li class="toctree-l1"><a class="reference internal" href="../linalg.html">torch.linalg</a></li>
<li class="toctree-l1"><a class="reference internal" href="../monitor.html">torch.monitor</a></li>
<li class="toctree-l1"><a class="reference internal" href="../signal.html">torch.signal</a></li>
<li class="toctree-l1"><a class="reference internal" href="../special.html">torch.special</a></li>
<li class="toctree-l1"><a class="reference internal" href="../torch.overrides.html">torch.overrides</a></li>
<li class="toctree-l1"><a class="reference internal" href="../package.html">torch.package</a></li>
<li class="toctree-l1"><a class="reference internal" href="../profiler.html">torch.profiler</a></li>
<li class="toctree-l1"><a class="reference internal" href="../nn.init.html">torch.nn.init</a></li>
<li class="toctree-l1"><a class="reference internal" href="../nn.attention.html">torch.nn.attention</a></li>
<li class="toctree-l1"><a class="reference internal" href="../onnx.html">torch.onnx</a></li>
<li class="toctree-l1"><a class="reference internal" href="../optim.html">torch.optim</a></li>
<li class="toctree-l1"><a class="reference internal" href="../complex_numbers.html">Complex Numbers</a></li>
<li class="toctree-l1"><a class="reference internal" href="../ddp_comm_hooks.html">DDP Communication Hooks</a></li>
<li class="toctree-l1"><a class="reference internal" href="../quantization.html">Quantization</a></li>
<li class="toctree-l1"><a class="reference internal" href="../rpc.html">Distributed RPC Framework</a></li>
<li class="toctree-l1"><a class="reference internal" href="../random.html">torch.random</a></li>
<li class="toctree-l1"><a class="reference internal" href="../masked.html">torch.masked</a></li>
<li class="toctree-l1"><a class="reference internal" href="../nested.html">torch.nested</a></li>
<li class="toctree-l1"><a class="reference internal" href="../size.html">torch.Size</a></li>
<li class="toctree-l1"><a class="reference internal" href="../sparse.html">torch.sparse</a></li>
<li class="toctree-l1"><a class="reference internal" href="../storage.html">torch.Storage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../testing.html">torch.testing</a></li>
<li class="toctree-l1"><a class="reference internal" href="../utils.html">torch.utils</a></li>
<li class="toctree-l1"><a class="reference internal" href="../benchmark_utils.html">torch.utils.benchmark</a></li>
<li class="toctree-l1"><a class="reference internal" href="../bottleneck.html">torch.utils.bottleneck</a></li>
<li class="toctree-l1"><a class="reference internal" href="../checkpoint.html">torch.utils.checkpoint</a></li>
<li class="toctree-l1"><a class="reference internal" href="../cpp_extension.html">torch.utils.cpp_extension</a></li>
<li class="toctree-l1"><a class="reference internal" href="../data.html">torch.utils.data</a></li>
<li class="toctree-l1"><a class="reference internal" href="../deterministic.html">torch.utils.deterministic</a></li>
<li class="toctree-l1"><a class="reference internal" href="../jit_utils.html">torch.utils.jit</a></li>
<li class="toctree-l1"><a class="reference internal" href="../dlpack.html">torch.utils.dlpack</a></li>
<li class="toctree-l1"><a class="reference internal" href="../mobile_optimizer.html">torch.utils.mobile_optimizer</a></li>
<li class="toctree-l1"><a class="reference internal" href="../model_zoo.html">torch.utils.model_zoo</a></li>
<li class="toctree-l1"><a class="reference internal" href="../tensorboard.html">torch.utils.tensorboard</a></li>
<li class="toctree-l1"><a class="reference internal" href="../module_tracker.html">torch.utils.module_tracker</a></li>
<li class="toctree-l1"><a class="reference internal" href="../type_info.html">Type Info</a></li>
<li class="toctree-l1"><a class="reference internal" href="../named_tensor.html">Named Tensors</a></li>
<li class="toctree-l1"><a class="reference internal" href="../name_inference.html">Named Tensors operator coverage</a></li>
<li class="toctree-l1"><a class="reference internal" href="../config_mod.html">torch.__config__</a></li>
<li class="toctree-l1"><a class="reference internal" href="../future_mod.html">torch.__future__</a></li>
<li class="toctree-l1"><a class="reference internal" href="../logging.html">torch._logging</a></li>
<li class="toctree-l1"><a class="reference internal" href="../torch_environment_variables.html">Torch Environment Variables</a></li>
</ul>
<p class="caption" role="heading"><span class="caption-text">Libraries</span></p>
<ul>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/audio/stable">torchaudio</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/data">TorchData</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/torchrec">TorchRec</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/serve">TorchServe</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/text/stable">torchtext</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/vision/stable">torchvision</a></li>
<li class="toctree-l1"><a class="reference external" href="https://pytorch.org/xla/">PyTorch on XLA Devices</a></li>
</ul>
</div>
</div>
</nav>
<div class="pytorch-container">
<div class="pytorch-page-level-bar" id="pytorch-page-level-bar">
<div class="pytorch-breadcrumbs-wrapper">
<div role="navigation" aria-label="breadcrumbs navigation">
<ul class="pytorch-breadcrumbs">
<li>
<a href="../index.html">
Docs
</a> >
</li>
<li>CUDA semantics</li>
<li class="pytorch-breadcrumbs-aside">
<a href="../_sources/notes/cuda.rst.txt" rel="nofollow"><img src="../_static/images/view-page-source-icon.svg"></a>
</li>
</ul>
</div>
</div>
<div class="pytorch-shortcuts-wrapper" id="pytorch-shortcuts-wrapper">
Shortcuts
</div>
</div>
<section data-toggle="wy-nav-shift" id="pytorch-content-wrap" class="pytorch-content-wrap">
<div class="pytorch-content-left">
<!-- Google Tag Manager (noscript) -->
<noscript><iframe src="https://www.googletagmanager.com/ns.html?id=GTM-T8XT4PS"
height="0" width="0" style="display:none;visibility:hidden"></iframe></noscript>
<!-- End Google Tag Manager (noscript) -->
<div class="rst-content">
<div role="main" class="main-content" itemscope="itemscope" itemtype="http://schema.org/Article">
<article itemprop="articleBody" id="pytorch-article" class="pytorch-article">
<div class="section" id="cuda-semantics">
<span id="id1"></span><h1>CUDA semantics<a class="headerlink" href="#cuda-semantics" title="Permalink to this heading">¶</a></h1>
<p><a class="reference internal" href="../cuda.html#module-torch.cuda" title="torch.cuda"><code class="xref py py-mod docutils literal notranslate"><span class="pre">torch.cuda</span></code></a> is used to set up and run CUDA operations. It keeps track of
the currently selected GPU, and all CUDA tensors you allocate will by default be
created on that device. The selected device can be changed with a
<a class="reference internal" href="../generated/torch.cuda.device.html#torch.cuda.device" title="torch.cuda.device"><code class="xref any py py-class docutils literal notranslate"><span class="pre">torch.cuda.device</span></code></a> context manager.</p>
<p>However, once a tensor is allocated, you can do operations on it irrespective
of the selected device, and the results will be always placed on the same
device as the tensor.</p>
<p>Cross-GPU operations are not allowed by default, with the exception of
<a class="reference internal" href="../generated/torch.Tensor.copy_.html#torch.Tensor.copy_" title="torch.Tensor.copy_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">copy_()</span></code></a> and other methods with copy-like functionality
such as <a class="reference internal" href="../generated/torch.Tensor.to.html#torch.Tensor.to" title="torch.Tensor.to"><code class="xref py py-meth docutils literal notranslate"><span class="pre">to()</span></code></a> and <a class="reference internal" href="../generated/torch.Tensor.cuda.html#torch.Tensor.cuda" title="torch.Tensor.cuda"><code class="xref py py-meth docutils literal notranslate"><span class="pre">cuda()</span></code></a>.
Unless you enable peer-to-peer memory access, any attempts to launch ops on
tensors spread across different devices will raise an error.</p>
<p>Below you can find a small example showcasing this:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">cuda</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">'cuda'</span><span class="p">)</span> <span class="c1"># Default CUDA device</span>
<span class="n">cuda0</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">'cuda:0'</span><span class="p">)</span>
<span class="n">cuda2</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">'cuda:2'</span><span class="p">)</span> <span class="c1"># GPU 2 (these are 0-indexed)</span>
<span class="n">x</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">],</span> <span class="n">device</span><span class="o">=</span><span class="n">cuda0</span><span class="p">)</span>
<span class="c1"># x.device is device(type='cuda', index=0)</span>
<span class="n">y</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">])</span><span class="o">.</span><span class="n">cuda</span><span class="p">()</span>
<span class="c1"># y.device is device(type='cuda', index=0)</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="mi">1</span><span class="p">):</span>
<span class="c1"># allocates a tensor on GPU 1</span>
<span class="n">a</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">],</span> <span class="n">device</span><span class="o">=</span><span class="n">cuda</span><span class="p">)</span>
<span class="c1"># transfers a tensor from CPU to GPU 1</span>
<span class="n">b</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">])</span><span class="o">.</span><span class="n">cuda</span><span class="p">()</span>
<span class="c1"># a.device and b.device are device(type='cuda', index=1)</span>
<span class="c1"># You can also use ``Tensor.to`` to transfer a tensor:</span>
<span class="n">b2</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">tensor</span><span class="p">([</span><span class="mf">1.</span><span class="p">,</span> <span class="mf">2.</span><span class="p">])</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">device</span><span class="o">=</span><span class="n">cuda</span><span class="p">)</span>
<span class="c1"># b.device and b2.device are device(type='cuda', index=1)</span>
<span class="n">c</span> <span class="o">=</span> <span class="n">a</span> <span class="o">+</span> <span class="n">b</span>
<span class="c1"># c.device is device(type='cuda', index=1)</span>
<span class="n">z</span> <span class="o">=</span> <span class="n">x</span> <span class="o">+</span> <span class="n">y</span>
<span class="c1"># z.device is device(type='cuda', index=0)</span>
<span class="c1"># even within a context, you can specify the device</span>
<span class="c1"># (or give a GPU index to the .cuda call)</span>
<span class="n">d</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">2</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="n">cuda2</span><span class="p">)</span>
<span class="n">e</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">2</span><span class="p">)</span><span class="o">.</span><span class="n">to</span><span class="p">(</span><span class="n">cuda2</span><span class="p">)</span>
<span class="n">f</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">2</span><span class="p">)</span><span class="o">.</span><span class="n">cuda</span><span class="p">(</span><span class="n">cuda2</span><span class="p">)</span>
<span class="c1"># d.device, e.device, and f.device are all device(type='cuda', index=2)</span>
</pre></div>
</div>
<div class="section" id="tensorfloat-32-tf32-on-ampere-and-later-devices">
<span id="tf32-on-ampere"></span><h2>TensorFloat-32 (TF32) on Ampere (and later) devices<a class="headerlink" href="#tensorfloat-32-tf32-on-ampere-and-later-devices" title="Permalink to this heading">¶</a></h2>
<p>Starting in PyTorch 1.7, there is a new flag called <cite>allow_tf32</cite>. This flag
defaults to True in PyTorch 1.7 to PyTorch 1.11, and False in PyTorch 1.12 and later.
This flag controls whether PyTorch is allowed to use the TensorFloat32 (TF32) tensor cores,
available on NVIDIA GPUs since Ampere, internally to compute matmul (matrix multiplies
and batched matrix multiplies) and convolutions.</p>
<p>TF32 tensor cores are designed to achieve better performance on matmul and convolutions on
<cite>torch.float32</cite> tensors by rounding input data to have 10 bits of mantissa, and accumulating
results with FP32 precision, maintaining FP32 dynamic range.</p>
<p>matmuls and convolutions are controlled separately, and their corresponding flags can be accessed at:</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="c1"># The flag below controls whether to allow TF32 on matmul. This flag defaults to False</span>
<span class="c1"># in PyTorch 1.12 and later.</span>
<span class="n">torch</span><span class="o">.</span><span class="n">backends</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">matmul</span><span class="o">.</span><span class="n">allow_tf32</span> <span class="o">=</span> <span class="kc">True</span>
<span class="c1"># The flag below controls whether to allow TF32 on cuDNN. This flag defaults to True.</span>
<span class="n">torch</span><span class="o">.</span><span class="n">backends</span><span class="o">.</span><span class="n">cudnn</span><span class="o">.</span><span class="n">allow_tf32</span> <span class="o">=</span> <span class="kc">True</span>
</pre></div>
</div>
<p>The precision of matmuls can also be set more broadly (limited not just to CUDA) via <code class="xref py py-meth docutils literal notranslate"><span class="pre">set_float_32_matmul_precision()</span></code>.
Note that besides matmuls and convolutions themselves, functions and nn modules that internally uses
matmuls or convolutions are also affected. These include <cite>nn.Linear</cite>, <cite>nn.Conv*</cite>, cdist, tensordot,
affine grid and grid sample, adaptive log softmax, GRU and LSTM.</p>
<p>To get an idea of the precision and speed, see the example code and benchmark data (on A100) below:</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">a_full</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">10240</span><span class="p">,</span> <span class="mi">10240</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">double</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="s1">'cuda'</span><span class="p">)</span>
<span class="n">b_full</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">randn</span><span class="p">(</span><span class="mi">10240</span><span class="p">,</span> <span class="mi">10240</span><span class="p">,</span> <span class="n">dtype</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">double</span><span class="p">,</span> <span class="n">device</span><span class="o">=</span><span class="s1">'cuda'</span><span class="p">)</span>
<span class="n">ab_full</span> <span class="o">=</span> <span class="n">a_full</span> <span class="o">@</span> <span class="n">b_full</span>
<span class="n">mean</span> <span class="o">=</span> <span class="n">ab_full</span><span class="o">.</span><span class="n">abs</span><span class="p">()</span><span class="o">.</span><span class="n">mean</span><span class="p">()</span> <span class="c1"># 80.7277</span>
<span class="n">a</span> <span class="o">=</span> <span class="n">a_full</span><span class="o">.</span><span class="n">float</span><span class="p">()</span>
<span class="n">b</span> <span class="o">=</span> <span class="n">b_full</span><span class="o">.</span><span class="n">float</span><span class="p">()</span>
<span class="c1"># Do matmul at TF32 mode.</span>
<span class="n">torch</span><span class="o">.</span><span class="n">backends</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">matmul</span><span class="o">.</span><span class="n">allow_tf32</span> <span class="o">=</span> <span class="kc">True</span>
<span class="n">ab_tf32</span> <span class="o">=</span> <span class="n">a</span> <span class="o">@</span> <span class="n">b</span> <span class="c1"># takes 0.016s on GA100</span>
<span class="n">error</span> <span class="o">=</span> <span class="p">(</span><span class="n">ab_tf32</span> <span class="o">-</span> <span class="n">ab_full</span><span class="p">)</span><span class="o">.</span><span class="n">abs</span><span class="p">()</span><span class="o">.</span><span class="n">max</span><span class="p">()</span> <span class="c1"># 0.1747</span>
<span class="n">relative_error</span> <span class="o">=</span> <span class="n">error</span> <span class="o">/</span> <span class="n">mean</span> <span class="c1"># 0.0022</span>
<span class="c1"># Do matmul with TF32 disabled.</span>
<span class="n">torch</span><span class="o">.</span><span class="n">backends</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">matmul</span><span class="o">.</span><span class="n">allow_tf32</span> <span class="o">=</span> <span class="kc">False</span>
<span class="n">ab_fp32</span> <span class="o">=</span> <span class="n">a</span> <span class="o">@</span> <span class="n">b</span> <span class="c1"># takes 0.11s on GA100</span>
<span class="n">error</span> <span class="o">=</span> <span class="p">(</span><span class="n">ab_fp32</span> <span class="o">-</span> <span class="n">ab_full</span><span class="p">)</span><span class="o">.</span><span class="n">abs</span><span class="p">()</span><span class="o">.</span><span class="n">max</span><span class="p">()</span> <span class="c1"># 0.0031</span>
<span class="n">relative_error</span> <span class="o">=</span> <span class="n">error</span> <span class="o">/</span> <span class="n">mean</span> <span class="c1"># 0.000039</span>
</pre></div>
</div>
<p>From the above example, we can see that with TF32 enabled, the speed is ~7x faster on A100, and that
relative error compared to double precision is approximately 2 orders of magnitude larger. Note that
the exact ratio of TF32 to single precision speed depends on the hardware generation, as properties
such as the ratio of memory bandwidth to compute as well as the ratio of TF32 to FP32 matmul throughput
may vary from generation to generation or model to model.
If full FP32 precision is needed, users can disable TF32 by:</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">torch</span><span class="o">.</span><span class="n">backends</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">matmul</span><span class="o">.</span><span class="n">allow_tf32</span> <span class="o">=</span> <span class="kc">False</span>
<span class="n">torch</span><span class="o">.</span><span class="n">backends</span><span class="o">.</span><span class="n">cudnn</span><span class="o">.</span><span class="n">allow_tf32</span> <span class="o">=</span> <span class="kc">False</span>
</pre></div>
</div>
<p>To toggle the TF32 flags off in C++, you can do</p>
<div class="highlight-C++ notranslate"><div class="highlight"><pre><span></span><span class="n">at</span><span class="o">::</span><span class="n">globalContext</span><span class="p">().</span><span class="n">setAllowTF32CuBLAS</span><span class="p">(</span><span class="nb">false</span><span class="p">);</span>
<span class="n">at</span><span class="o">::</span><span class="n">globalContext</span><span class="p">().</span><span class="n">setAllowTF32CuDNN</span><span class="p">(</span><span class="nb">false</span><span class="p">);</span>
</pre></div>
</div>
<p>For more information about TF32, see:</p>
<ul class="simple">
<li><p><a class="reference external" href="https://blogs.nvidia.com/blog/2020/05/14/tensorfloat-32-precision-format/">TensorFloat-32</a></p></li>
<li><p><a class="reference external" href="https://devblogs.nvidia.com/cuda-11-features-revealed/">CUDA 11</a></p></li>
<li><p><a class="reference external" href="https://devblogs.nvidia.com/nvidia-ampere-architecture-in-depth/">Ampere architecture</a></p></li>
</ul>
</div>
<div class="section" id="reduced-precision-reduction-in-fp16-gemms">
<span id="fp16reducedprecision"></span><h2>Reduced Precision Reduction in FP16 GEMMs<a class="headerlink" href="#reduced-precision-reduction-in-fp16-gemms" title="Permalink to this heading">¶</a></h2>
<p>fp16 GEMMs are potentially done with some intermediate reduced precision reductions (e.g., in fp16 rather than fp32). These selective reductions in precision can allow for higher performance on certain workloads (particularly those with a large <cite>k</cite> dimension) and GPU architectures at the cost of numerical precision and potential for overflow.</p>
<p>Some example benchmark data on V100:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="p">[</span><span class="o">---------------------------</span> <span class="n">bench_gemm_transformer</span> <span class="o">--------------------------</span><span class="p">]</span>
<span class="p">[</span> <span class="n">m</span> <span class="p">,</span> <span class="n">k</span> <span class="p">,</span> <span class="n">n</span> <span class="p">]</span> <span class="o">|</span> <span class="n">allow_fp16_reduc</span><span class="o">=</span><span class="kc">True</span> <span class="o">|</span> <span class="n">allow_fp16_reduc</span><span class="o">=</span><span class="kc">False</span>
<span class="mi">1</span> <span class="n">threads</span><span class="p">:</span> <span class="o">--------------------------------------------------------------------</span>
<span class="p">[</span><span class="mi">4096</span><span class="p">,</span> <span class="mi">4048</span><span class="p">,</span> <span class="mi">4096</span><span class="p">]</span> <span class="o">|</span> <span class="mf">1634.6</span> <span class="o">|</span> <span class="mf">1639.8</span>
<span class="p">[</span><span class="mi">4096</span><span class="p">,</span> <span class="mi">4056</span><span class="p">,</span> <span class="mi">4096</span><span class="p">]</span> <span class="o">|</span> <span class="mf">1670.8</span> <span class="o">|</span> <span class="mf">1661.9</span>
<span class="p">[</span><span class="mi">4096</span><span class="p">,</span> <span class="mi">4080</span><span class="p">,</span> <span class="mi">4096</span><span class="p">]</span> <span class="o">|</span> <span class="mf">1664.2</span> <span class="o">|</span> <span class="mf">1658.3</span>
<span class="p">[</span><span class="mi">4096</span><span class="p">,</span> <span class="mi">4096</span><span class="p">,</span> <span class="mi">4096</span><span class="p">]</span> <span class="o">|</span> <span class="mf">1639.4</span> <span class="o">|</span> <span class="mf">1651.0</span>
<span class="p">[</span><span class="mi">4096</span><span class="p">,</span> <span class="mi">4104</span><span class="p">,</span> <span class="mi">4096</span><span class="p">]</span> <span class="o">|</span> <span class="mf">1677.4</span> <span class="o">|</span> <span class="mf">1674.9</span>
<span class="p">[</span><span class="mi">4096</span><span class="p">,</span> <span class="mi">4128</span><span class="p">,</span> <span class="mi">4096</span><span class="p">]</span> <span class="o">|</span> <span class="mf">1655.7</span> <span class="o">|</span> <span class="mf">1646.0</span>
<span class="p">[</span><span class="mi">4096</span><span class="p">,</span> <span class="mi">4144</span><span class="p">,</span> <span class="mi">4096</span><span class="p">]</span> <span class="o">|</span> <span class="mf">1796.8</span> <span class="o">|</span> <span class="mf">2519.6</span>
<span class="p">[</span><span class="mi">4096</span><span class="p">,</span> <span class="mi">5096</span><span class="p">,</span> <span class="mi">4096</span><span class="p">]</span> <span class="o">|</span> <span class="mf">2094.6</span> <span class="o">|</span> <span class="mf">3190.0</span>
<span class="p">[</span><span class="mi">4096</span><span class="p">,</span> <span class="mi">5104</span><span class="p">,</span> <span class="mi">4096</span><span class="p">]</span> <span class="o">|</span> <span class="mf">2144.0</span> <span class="o">|</span> <span class="mf">2663.5</span>
<span class="p">[</span><span class="mi">4096</span><span class="p">,</span> <span class="mi">5112</span><span class="p">,</span> <span class="mi">4096</span><span class="p">]</span> <span class="o">|</span> <span class="mf">2149.1</span> <span class="o">|</span> <span class="mf">2766.9</span>
<span class="p">[</span><span class="mi">4096</span><span class="p">,</span> <span class="mi">5120</span><span class="p">,</span> <span class="mi">4096</span><span class="p">]</span> <span class="o">|</span> <span class="mf">2142.8</span> <span class="o">|</span> <span class="mf">2631.0</span>
<span class="p">[</span><span class="mi">4096</span><span class="p">,</span> <span class="mi">9728</span><span class="p">,</span> <span class="mi">4096</span><span class="p">]</span> <span class="o">|</span> <span class="mf">3875.1</span> <span class="o">|</span> <span class="mf">5779.8</span>
<span class="p">[</span><span class="mi">4096</span><span class="p">,</span> <span class="mi">16384</span><span class="p">,</span> <span class="mi">4096</span><span class="p">]</span> <span class="o">|</span> <span class="mf">6182.9</span> <span class="o">|</span> <span class="mf">9656.5</span>
<span class="p">(</span><span class="n">times</span> <span class="ow">in</span> <span class="n">microseconds</span><span class="p">)</span><span class="o">.</span>
</pre></div>
</div>
<p>If full precision reductions are needed, users can disable reduced precision reductions in fp16 GEMMs with:</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">torch</span><span class="o">.</span><span class="n">backends</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">matmul</span><span class="o">.</span><span class="n">allow_fp16_reduced_precision_reduction</span> <span class="o">=</span> <span class="kc">False</span>
</pre></div>
</div>
<p>To toggle the reduced precision reduction flags in C++, one can do</p>
<div class="highlight-C++ notranslate"><div class="highlight"><pre><span></span><span class="n">at</span><span class="o">::</span><span class="n">globalContext</span><span class="p">().</span><span class="n">setAllowFP16ReductionCuBLAS</span><span class="p">(</span><span class="nb">false</span><span class="p">);</span>
</pre></div>
</div>
</div>
<div class="section" id="reduced-precision-reduction-in-bf16-gemms">
<span id="bf16reducedprecision"></span><h2>Reduced Precision Reduction in BF16 GEMMs<a class="headerlink" href="#reduced-precision-reduction-in-bf16-gemms" title="Permalink to this heading">¶</a></h2>
<p>A similar flag (as above) exists for BFloat16 GEMMs.
Note that this switch is set to <cite>True</cite> by default for BF16, if you observe
numerical instability in your workload, you may wish to set it to <cite>False</cite>.</p>
<p>If reduced precision reductions are not desired, users can disable reduced
precision reductions in bf16 GEMMs with:</p>
<div class="highlight-python notranslate"><div class="highlight"><pre><span></span><span class="n">torch</span><span class="o">.</span><span class="n">backends</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">matmul</span><span class="o">.</span><span class="n">allow_bf16_reduced_precision_reduction</span> <span class="o">=</span> <span class="kc">False</span>
</pre></div>
</div>
<p>To toggle the reduced precision reduction flags in C++, one can do</p>
<div class="highlight-C++ notranslate"><div class="highlight"><pre><span></span><span class="n">at</span><span class="o">::</span><span class="n">globalContext</span><span class="p">().</span><span class="n">setAllowBF16ReductionCuBLAS</span><span class="p">(</span><span class="nb">true</span><span class="p">);</span>
</pre></div>
</div>
</div>
<div class="section" id="asynchronous-execution">
<h2>Asynchronous execution<a class="headerlink" href="#asynchronous-execution" title="Permalink to this heading">¶</a></h2>
<p>By default, GPU operations are asynchronous. When you call a function that
uses the GPU, the operations are <em>enqueued</em> to the particular device, but not
necessarily executed until later. This allows us to execute more computations
in parallel, including operations on CPU or other GPUs.</p>
<p>In general, the effect of asynchronous computation is invisible to the caller,
because (1) each device executes operations in the order they are queued, and
(2) PyTorch automatically performs necessary synchronization when copying data
between CPU and GPU or between two GPUs. Hence, computation will proceed as if
every operation was executed synchronously.</p>
<p>You can force synchronous computation by setting environment variable
<code class="docutils literal notranslate"><span class="pre">CUDA_LAUNCH_BLOCKING=1</span></code>. This can be handy when an error occurs on the GPU.
(With asynchronous execution, such an error isn’t reported until after the
operation is actually executed, so the stack trace does not show where it was
requested.)</p>
<p>A consequence of the asynchronous computation is that time measurements without
synchronizations are not accurate. To get precise measurements, one should either
call <a class="reference internal" href="../generated/torch.cuda.synchronize.html#torch.cuda.synchronize" title="torch.cuda.synchronize"><code class="xref py py-func docutils literal notranslate"><span class="pre">torch.cuda.synchronize()</span></code></a> before measuring, or use <a class="reference internal" href="../generated/torch.cuda.Event.html#torch.cuda.Event" title="torch.cuda.Event"><code class="xref py py-class docutils literal notranslate"><span class="pre">torch.cuda.Event</span></code></a>
to record times as following:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">start_event</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">Event</span><span class="p">(</span><span class="n">enable_timing</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">end_event</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">Event</span><span class="p">(</span><span class="n">enable_timing</span><span class="o">=</span><span class="kc">True</span><span class="p">)</span>
<span class="n">start_event</span><span class="o">.</span><span class="n">record</span><span class="p">()</span>
<span class="c1"># Run some things here</span>
<span class="n">end_event</span><span class="o">.</span><span class="n">record</span><span class="p">()</span>
<span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">synchronize</span><span class="p">()</span> <span class="c1"># Wait for the events to be recorded!</span>
<span class="n">elapsed_time_ms</span> <span class="o">=</span> <span class="n">start_event</span><span class="o">.</span><span class="n">elapsed_time</span><span class="p">(</span><span class="n">end_event</span><span class="p">)</span>
</pre></div>
</div>
<p>As an exception, several functions such as <a class="reference internal" href="../generated/torch.Tensor.to.html#torch.Tensor.to" title="torch.Tensor.to"><code class="xref py py-meth docutils literal notranslate"><span class="pre">to()</span></code></a> and
<a class="reference internal" href="../generated/torch.Tensor.copy_.html#torch.Tensor.copy_" title="torch.Tensor.copy_"><code class="xref py py-meth docutils literal notranslate"><span class="pre">copy_()</span></code></a> admit an explicit <code class="xref py py-attr docutils literal notranslate"><span class="pre">non_blocking</span></code> argument,
which lets the caller bypass synchronization when it is unnecessary.
Another exception is CUDA streams, explained below.</p>
<div class="section" id="cuda-streams">
<h3>CUDA streams<a class="headerlink" href="#cuda-streams" title="Permalink to this heading">¶</a></h3>
<p>A <a class="reference external" href="https://docs.nvidia.com/cuda/cuda-c-programming-guide/index.html#streams">CUDA stream</a> is a linear sequence of execution that belongs to a specific
device. You normally do not need to create one explicitly: by default, each
device uses its own “default” stream.</p>
<p>Operations inside each stream are serialized in the order they are created,
but operations from different streams can execute concurrently in any
relative order, unless explicit synchronization functions (such as
<a class="reference internal" href="../generated/torch.cuda.synchronize.html#torch.cuda.synchronize" title="torch.cuda.synchronize"><code class="xref py py-meth docutils literal notranslate"><span class="pre">synchronize()</span></code></a> or <a class="reference internal" href="../generated/torch.cuda.Stream.html#torch.cuda.Stream.wait_stream" title="torch.cuda.Stream.wait_stream"><code class="xref py py-meth docutils literal notranslate"><span class="pre">wait_stream()</span></code></a>) are
used. For example, the following code is incorrect:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">cuda</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">'cuda'</span><span class="p">)</span>
<span class="n">s</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">Stream</span><span class="p">()</span> <span class="c1"># Create a new stream.</span>
<span class="n">A</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">empty</span><span class="p">((</span><span class="mi">100</span><span class="p">,</span> <span class="mi">100</span><span class="p">),</span> <span class="n">device</span><span class="o">=</span><span class="n">cuda</span><span class="p">)</span><span class="o">.</span><span class="n">normal_</span><span class="p">(</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">)</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">stream</span><span class="p">(</span><span class="n">s</span><span class="p">):</span>
<span class="c1"># sum() may start execution before normal_() finishes!</span>
<span class="n">B</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">sum</span><span class="p">(</span><span class="n">A</span><span class="p">)</span>
</pre></div>
</div>
<p>When the “current stream” is the default stream, PyTorch automatically performs
necessary synchronization when data is moved around, as explained above.
However, when using non-default streams, it is the user’s responsibility to
ensure proper synchronization. The fixed version of this example is:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">cuda</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">'cuda'</span><span class="p">)</span>
<span class="n">s</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">Stream</span><span class="p">()</span> <span class="c1"># Create a new stream.</span>
<span class="n">A</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">empty</span><span class="p">((</span><span class="mi">100</span><span class="p">,</span> <span class="mi">100</span><span class="p">),</span> <span class="n">device</span><span class="o">=</span><span class="n">cuda</span><span class="p">)</span><span class="o">.</span><span class="n">normal_</span><span class="p">(</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">)</span>
<span class="n">s</span><span class="o">.</span><span class="n">wait_stream</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">default_stream</span><span class="p">(</span><span class="n">cuda</span><span class="p">))</span> <span class="c1"># NEW!</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">stream</span><span class="p">(</span><span class="n">s</span><span class="p">):</span>
<span class="n">B</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">sum</span><span class="p">(</span><span class="n">A</span><span class="p">)</span>
<span class="n">A</span><span class="o">.</span><span class="n">record_stream</span><span class="p">(</span><span class="n">s</span><span class="p">)</span> <span class="c1"># NEW!</span>
</pre></div>
</div>
<p>There are two new additions. The <a class="reference internal" href="../generated/torch.cuda.Stream.html#torch.cuda.Stream.wait_stream" title="torch.cuda.Stream.wait_stream"><code class="xref py py-meth docutils literal notranslate"><span class="pre">torch.cuda.Stream.wait_stream()</span></code></a> call
ensures that the <code class="docutils literal notranslate"><span class="pre">normal_()</span></code> execution has finished before we start running
<code class="docutils literal notranslate"><span class="pre">sum(A)</span></code> on a side stream. The <a class="reference internal" href="../generated/torch.Tensor.record_stream.html#torch.Tensor.record_stream" title="torch.Tensor.record_stream"><code class="xref py py-meth docutils literal notranslate"><span class="pre">torch.Tensor.record_stream()</span></code></a> (see for
more details) ensures that we do not deallocate A before <code class="docutils literal notranslate"><span class="pre">sum(A)</span></code> has
completed. You can also manually wait on the stream at some later point in
time with <code class="docutils literal notranslate"><span class="pre">torch.cuda.default_stream(cuda).wait_stream(s)</span></code> (note that it
is pointless to wait immediately, since that will prevent the stream execution
from running in parallel with other work on the default stream.) See the
documentation for <a class="reference internal" href="../generated/torch.Tensor.record_stream.html#torch.Tensor.record_stream" title="torch.Tensor.record_stream"><code class="xref py py-meth docutils literal notranslate"><span class="pre">torch.Tensor.record_stream()</span></code></a> on more details on when
to use one or another.</p>
<p>Note that this synchronization is necessary even when there is no
read dependency, e.g., as seen in this example:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">cuda</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">device</span><span class="p">(</span><span class="s1">'cuda'</span><span class="p">)</span>
<span class="n">s</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">Stream</span><span class="p">()</span> <span class="c1"># Create a new stream.</span>
<span class="n">A</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">empty</span><span class="p">((</span><span class="mi">100</span><span class="p">,</span> <span class="mi">100</span><span class="p">),</span> <span class="n">device</span><span class="o">=</span><span class="n">cuda</span><span class="p">)</span>
<span class="n">s</span><span class="o">.</span><span class="n">wait_stream</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">default_stream</span><span class="p">(</span><span class="n">cuda</span><span class="p">))</span> <span class="c1"># STILL REQUIRED!</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">stream</span><span class="p">(</span><span class="n">s</span><span class="p">):</span>
<span class="n">A</span><span class="o">.</span><span class="n">normal_</span><span class="p">(</span><span class="mf">0.0</span><span class="p">,</span> <span class="mf">1.0</span><span class="p">)</span>
<span class="n">A</span><span class="o">.</span><span class="n">record_stream</span><span class="p">(</span><span class="n">s</span><span class="p">)</span>
</pre></div>
</div>
<p>Despite the computation on <code class="docutils literal notranslate"><span class="pre">s</span></code> not reading the contents of <code class="docutils literal notranslate"><span class="pre">A</span></code> and no
other uses of <code class="docutils literal notranslate"><span class="pre">A</span></code>, it is still necessary to synchronize, because <code class="docutils literal notranslate"><span class="pre">A</span></code>
may correspond to memory reallocated by the CUDA caching allocator, with
pending operations from the old (deallocated) memory.</p>
</div>
<div class="section" id="stream-semantics-of-backward-passes">
<span id="bwd-cuda-stream-semantics"></span><h3>Stream semantics of backward passes<a class="headerlink" href="#stream-semantics-of-backward-passes" title="Permalink to this heading">¶</a></h3>
<p>Each backward CUDA op runs on the same stream that was used for its corresponding forward op.
If your forward pass runs independent ops in parallel on different streams,
this helps the backward pass exploit that same parallelism.</p>
<p>The stream semantics of a backward call with respect to surrounding ops are the same
as for any other call. The backward pass inserts internal syncs to ensure this even when
backward ops run on multiple streams as described in the previous paragraph.
More concretely, when calling
<a class="reference internal" href="../generated/torch.autograd.backward.html#torch.autograd.backward" title="torch.autograd.backward"><code class="xref py py-func docutils literal notranslate"><span class="pre">autograd.backward</span></code></a>,
<a class="reference internal" href="../generated/torch.autograd.grad.html#torch.autograd.grad" title="torch.autograd.grad"><code class="xref py py-func docutils literal notranslate"><span class="pre">autograd.grad</span></code></a>, or
<a class="reference internal" href="../generated/torch.Tensor.backward.html#torch.Tensor.backward" title="torch.Tensor.backward"><code class="xref py py-meth docutils literal notranslate"><span class="pre">tensor.backward</span></code></a>,
and optionally supplying CUDA tensor(s) as the initial gradient(s) (e.g.,
<a class="reference internal" href="../generated/torch.autograd.backward.html#torch.autograd.backward" title="torch.autograd.backward"><code class="xref py py-func docutils literal notranslate"><span class="pre">autograd.backward(...,</span> <span class="pre">grad_tensors=initial_grads)</span></code></a>,
<a class="reference internal" href="../generated/torch.autograd.grad.html#torch.autograd.grad" title="torch.autograd.grad"><code class="xref py py-func docutils literal notranslate"><span class="pre">autograd.grad(...,</span> <span class="pre">grad_outputs=initial_grads)</span></code></a>, or
<a class="reference internal" href="../generated/torch.Tensor.backward.html#torch.Tensor.backward" title="torch.Tensor.backward"><code class="xref py py-meth docutils literal notranslate"><span class="pre">tensor.backward(...,</span> <span class="pre">gradient=initial_grad)</span></code></a>),
the acts of</p>
<ol class="arabic simple">
<li><p>optionally populating initial gradient(s),</p></li>
<li><p>invoking the backward pass, and</p></li>
<li><p>using the gradients</p></li>
</ol>
<p>have the same stream-semantics relationship as any group of ops:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="n">s</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">Stream</span><span class="p">()</span>
<span class="c1"># Safe, grads are used in the same stream context as backward()</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">stream</span><span class="p">(</span><span class="n">s</span><span class="p">):</span>
<span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
<span class="n">use</span> <span class="n">grads</span>
<span class="c1"># Unsafe</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">stream</span><span class="p">(</span><span class="n">s</span><span class="p">):</span>
<span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
<span class="n">use</span> <span class="n">grads</span>
<span class="c1"># Safe, with synchronization</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">stream</span><span class="p">(</span><span class="n">s</span><span class="p">):</span>
<span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
<span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">current_stream</span><span class="p">()</span><span class="o">.</span><span class="n">wait_stream</span><span class="p">(</span><span class="n">s</span><span class="p">)</span>
<span class="n">use</span> <span class="n">grads</span>
<span class="c1"># Safe, populating initial grad and invoking backward are in the same stream context</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">stream</span><span class="p">(</span><span class="n">s</span><span class="p">):</span>
<span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">(</span><span class="n">gradient</span><span class="o">=</span><span class="n">torch</span><span class="o">.</span><span class="n">ones_like</span><span class="p">(</span><span class="n">loss</span><span class="p">))</span>
<span class="c1"># Unsafe, populating initial_grad and invoking backward are in different stream contexts,</span>
<span class="c1"># without synchronization</span>
<span class="n">initial_grad</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">ones_like</span><span class="p">(</span><span class="n">loss</span><span class="p">)</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">stream</span><span class="p">(</span><span class="n">s</span><span class="p">):</span>
<span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">(</span><span class="n">gradient</span><span class="o">=</span><span class="n">initial_grad</span><span class="p">)</span>
<span class="c1"># Safe, with synchronization</span>
<span class="n">initial_grad</span> <span class="o">=</span> <span class="n">torch</span><span class="o">.</span><span class="n">ones_like</span><span class="p">(</span><span class="n">loss</span><span class="p">)</span>
<span class="n">s</span><span class="o">.</span><span class="n">wait_stream</span><span class="p">(</span><span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">current_stream</span><span class="p">())</span>
<span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">stream</span><span class="p">(</span><span class="n">s</span><span class="p">):</span>
<span class="n">initial_grad</span><span class="o">.</span><span class="n">record_stream</span><span class="p">(</span><span class="n">s</span><span class="p">)</span>
<span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">(</span><span class="n">gradient</span><span class="o">=</span><span class="n">initial_grad</span><span class="p">)</span>
</pre></div>
</div>
<div class="section" id="bc-note-using-grads-on-the-default-stream">
<h4>BC note: Using grads on the default stream<a class="headerlink" href="#bc-note-using-grads-on-the-default-stream" title="Permalink to this heading">¶</a></h4>
<p>In prior versions of PyTorch (1.9 and earlier), the autograd engine always synced
the default stream with all backward ops, so the following pattern:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">stream</span><span class="p">(</span><span class="n">s</span><span class="p">):</span>
<span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
<span class="n">use</span> <span class="n">grads</span>
</pre></div>
</div>
<p>was safe as long as <code class="docutils literal notranslate"><span class="pre">use</span> <span class="pre">grads</span></code> happened on the default stream.
In present PyTorch, that pattern is no longer safe. If <code class="docutils literal notranslate"><span class="pre">backward()</span></code>
and <code class="docutils literal notranslate"><span class="pre">use</span> <span class="pre">grads</span></code> are in different stream contexts, you must sync the streams:</p>
<div class="highlight-default notranslate"><div class="highlight"><pre><span></span><span class="k">with</span> <span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">stream</span><span class="p">(</span><span class="n">s</span><span class="p">):</span>
<span class="n">loss</span><span class="o">.</span><span class="n">backward</span><span class="p">()</span>
<span class="n">torch</span><span class="o">.</span><span class="n">cuda</span><span class="o">.</span><span class="n">current_stream</span><span class="p">()</span><span class="o">.</span><span class="n">wait_stream</span><span class="p">(</span><span class="n">s</span><span class="p">)</span>
<span class="n">use</span> <span class="n">grads</span>
</pre></div>
</div>
<p>even if <code class="docutils literal notranslate"><span class="pre">use</span> <span class="pre">grads</span></code> is on the default stream.</p>
</div>
</div>
</div>
<div class="section" id="memory-management">
<span id="cuda-memory-management"></span><h2>Memory management<a class="headerlink" href="#memory-management" title="Permalink to this heading">¶</a></h2>
<p>PyTorch uses a caching memory allocator to speed up memory allocations. This
allows fast memory deallocation without device synchronizations. However, the
unused memory managed by the allocator will still show as if used in
<code class="docutils literal notranslate"><span class="pre">nvidia-smi</span></code>. You can use <a class="reference internal" href="../generated/torch.cuda.memory_allocated.html#torch.cuda.memory_allocated" title="torch.cuda.memory_allocated"><code class="xref py py-meth docutils literal notranslate"><span class="pre">memory_allocated()</span></code></a> and
<a class="reference internal" href="../generated/torch.cuda.max_memory_allocated.html#torch.cuda.max_memory_allocated" title="torch.cuda.max_memory_allocated"><code class="xref py py-meth docutils literal notranslate"><span class="pre">max_memory_allocated()</span></code></a> to monitor memory occupied by
tensors, and use <a class="reference internal" href="../generated/torch.cuda.memory_reserved.html#torch.cuda.memory_reserved" title="torch.cuda.memory_reserved"><code class="xref py py-meth docutils literal notranslate"><span class="pre">memory_reserved()</span></code></a> and
<a class="reference internal" href="../generated/torch.cuda.max_memory_reserved.html#torch.cuda.max_memory_reserved" title="torch.cuda.max_memory_reserved"><code class="xref py py-meth docutils literal notranslate"><span class="pre">max_memory_reserved()</span></code></a> to monitor the total amount of memory
managed by the caching allocator. Calling <a class="reference internal" href="../generated/torch.cuda.empty_cache.html#torch.cuda.empty_cache" title="torch.cuda.empty_cache"><code class="xref py py-meth docutils literal notranslate"><span class="pre">empty_cache()</span></code></a>
releases all <strong>unused</strong> cached memory from PyTorch so that those can be used
by other GPU applications. However, the occupied GPU memory by tensors will not
be freed so it can not increase the amount of GPU memory available for PyTorch.</p>
<p>To better understand how CUDA memory is being used over time,
<a class="reference internal" href="../torch_cuda_memory.html#torch-cuda-memory"><span class="std std-ref">Understanding CUDA Memory Usage</span></a> describes tools for capturing and visualizing traces of memory use.</p>
<p>For more advanced users, we offer more comprehensive memory benchmarking via
<a class="reference internal" href="../generated/torch.cuda.memory_stats.html#torch.cuda.memory_stats" title="torch.cuda.memory_stats"><code class="xref py py-meth docutils literal notranslate"><span class="pre">memory_stats()</span></code></a>. We also offer the capability to capture a
complete snapshot of the memory allocator state via
<a class="reference internal" href="../generated/torch.cuda.memory_snapshot.html#torch.cuda.memory_snapshot" title="torch.cuda.memory_snapshot"><code class="xref py py-meth docutils literal notranslate"><span class="pre">memory_snapshot()</span></code></a>, which can help you understand the
underlying allocation patterns produced by your code.</p>
<div class="section" id="optimizing-memory-usage-with-pytorch-cuda-alloc-conf">
<span id="cuda-memory-envvars"></span><h3>Optimizing memory usage with <code class="docutils literal notranslate"><span class="pre">PYTORCH_CUDA_ALLOC_CONF</span></code><a class="headerlink" href="#optimizing-memory-usage-with-pytorch-cuda-alloc-conf" title="Permalink to this heading">¶</a></h3>
<p>Use of a caching allocator can interfere with memory checking tools such as
<code class="docutils literal notranslate"><span class="pre">cuda-memcheck</span></code>. To debug memory errors using <code class="docutils literal notranslate"><span class="pre">cuda-memcheck</span></code>, set
<code class="docutils literal notranslate"><span class="pre">PYTORCH_NO_CUDA_MEMORY_CACHING=1</span></code> in your environment to disable caching.</p>
<p>The behavior of the caching allocator can be controlled via the environment variable
<code class="docutils literal notranslate"><span class="pre">PYTORCH_CUDA_ALLOC_CONF</span></code>.
The format is <code class="docutils literal notranslate"><span class="pre">PYTORCH_CUDA_ALLOC_CONF=<option>:<value>,<option2>:<value2>...</span></code>
Available options:</p>
<ul>
<li><p><code class="docutils literal notranslate"><span class="pre">backend</span></code> allows selecting the underlying allocator implementation.
Currently, valid options are <code class="docutils literal notranslate"><span class="pre">native</span></code>, which uses PyTorch’s native
implementation, and <code class="docutils literal notranslate"><span class="pre">cudaMallocAsync</span></code>, which uses
<a class="reference external" href="https://developer.nvidia.com/blog/using-cuda-stream-ordered-memory-allocator-part-1/">CUDA’s built-in asynchronous allocator</a>.
<code class="docutils literal notranslate"><span class="pre">cudaMallocAsync</span></code> requires CUDA 11.4 or newer. The default is <code class="docutils literal notranslate"><span class="pre">native</span></code>.
<code class="docutils literal notranslate"><span class="pre">backend</span></code> applies to all devices used by the process, and can’t be
specified on a per-device basis.</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">max_split_size_mb</span></code> prevents the native allocator
from splitting blocks larger than this size (in MB). This can reduce
fragmentation and may allow some borderline workloads to complete without
running out of memory. Performance cost can range from ‘zero’ to ‘substantial’
depending on allocation patterns. Default value is unlimited, i.e. all blocks
can be split. The
<a class="reference internal" href="../generated/torch.cuda.memory_stats.html#torch.cuda.memory_stats" title="torch.cuda.memory_stats"><code class="xref py py-meth docutils literal notranslate"><span class="pre">memory_stats()</span></code></a> and
<a class="reference internal" href="../generated/torch.cuda.memory_summary.html#torch.cuda.memory_summary" title="torch.cuda.memory_summary"><code class="xref py py-meth docutils literal notranslate"><span class="pre">memory_summary()</span></code></a> methods are useful for tuning. This
option should be used as a last resort for a workload that is aborting
due to ‘out of memory’ and showing a large amount of inactive split blocks.
<code class="docutils literal notranslate"><span class="pre">max_split_size_mb</span></code> is only meaningful with <code class="docutils literal notranslate"><span class="pre">backend:native</span></code>.
With <code class="docutils literal notranslate"><span class="pre">backend:cudaMallocAsync</span></code>, <code class="docutils literal notranslate"><span class="pre">max_split_size_mb</span></code> is ignored.</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">roundup_power2_divisions</span></code> helps with rounding the requested allocation
size to nearest power-2 division and making better use of the blocks. In
the native CUDACachingAllocator, the sizes are rounded up in multiple
of blocks size of 512, so this works fine for smaller sizes. However, this
can be inefficient for large near-by allocations as each will go to different
size of blocks and re-use of those blocks are minimized. This might create
lots of unused blocks and will waste GPU memory capacity. This option enables
the rounding of allocation size to nearest power-2 division. For example, if
we need to round-up size of 1200 and if number of divisions is 4,
the size 1200 lies between 1024 and 2048 and if we do 4 divisions between
them, the values are 1024, 1280, 1536, and 1792. So, allocation size of 1200
will be rounded to 1280 as the nearest ceiling of power-2 division.
Specify a single value to apply for all allocation sizes or specify an
array of key value pairs to set power-2 division individually for each
power of two interval. For example to set 1 division for all allocations
under 256MB, 2 division for allocations between 256MB and 512MB, 4 divisions
for allocations between 512MB and 1GB and 8 divisions for any larger allocations,
set the knob value to: [256:1,512:2,1024:4,>:8].
<code class="docutils literal notranslate"><span class="pre">roundup_power2_divisions</span></code> is only meaningful with <code class="docutils literal notranslate"><span class="pre">backend:native</span></code>.
With <code class="docutils literal notranslate"><span class="pre">backend:cudaMallocAsync</span></code>, <code class="docutils literal notranslate"><span class="pre">roundup_power2_divisions</span></code> is ignored.</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">garbage_collection_threshold</span></code> helps actively reclaiming unused GPU memory to
avoid triggering expensive sync-and-reclaim-all operation (release_cached_blocks),
which can be unfavorable to latency-critical GPU applications (e.g., servers).
Upon setting this threshold (e.g., 0.8), the allocator will start reclaiming
GPU memory blocks if the GPU memory capacity usage exceeds the threshold (i.e.,
80% of the total memory allocated to the GPU application). The algorithm prefers
to free old & unused blocks first to avoid freeing blocks that are actively being
reused. The threshold value should be between greater than 0.0 and less than 1.0.
<code class="docutils literal notranslate"><span class="pre">garbage_collection_threshold</span></code> is only meaningful with <code class="docutils literal notranslate"><span class="pre">backend:native</span></code>.
With <code class="docutils literal notranslate"><span class="pre">backend:cudaMallocAsync</span></code>, <code class="docutils literal notranslate"><span class="pre">garbage_collection_threshold</span></code> is ignored.</p></li>
<li><p><code class="docutils literal notranslate"><span class="pre">expandable_segments</span></code> (experimental, default: <cite>False</cite>) If set to <cite>True</cite>, this setting instructs
the allocator to create CUDA allocations that can later be expanded to better handle cases
where a job changing allocation sizes frequently, such as having a changing batch size.
Normally for large (>2MB) allocations, the allocator calls cudaMalloc to get allocations
that are the same size as what the user requests. In the future, parts of these
allocations can be reused for other requests if they are free. This works well
when the program makes many requests of exactly the same size or of sizes that
even multiples of that size. Many deep learning models follow this behavior.
However, one common exception is when the batch size changes slightly from one
iteration to the next, e.g. in batched inference. When the program runs
initially with batch size <cite>N</cite>, it will make allocations appropriate for that size.
If in the future, it runs at size <cite>N - 1</cite>, the existing allocations will still be
big enough. However, if it runs at size <cite>N + 1</cite>, then it will have to make new
allocations that are slightly larger. Not all the tensors are the same size.
Some might be <cite>(N + 1)*A</cite> and others <cite>(N + 1)*A*B</cite> where <cite>A</cite> and <cite>B</cite> are some non-batch
dimensions in the model. Because the allocator reuses existing allocations when
they are big enough, some number of <cite>(N + 1)*A</cite> allocations will actually fit in
the already existing <cite>N*B*A</cite> segments, though not perfectly. As the model runs it
will partially fill up all of these segments leaving unusable free slices of
memory at the end of these segments. The allocator at some point will need to
<cite>cudaMalloc</cite> a new <cite>(N + 1)*A*B</cite> segment. If there is not enough memory, there is
now no way to recover the slices of memory that are free at the end of existing
segments. With models 50+ layers deep, this pattern might repeat 50+ times
creating many slivers.</p>
<p><cite>expandable_segments</cite> allows the allocator to create a segment initially and then
expand its size later when more memory is needed. Instead of making one segment
per allocation, it tries to make one segment (per stream) that grows as
necessary. Now when the <cite>N + 1</cite> case runs, the allocations will tile nicely into
the one large segment until it fills up. Then more memory is requested and
appended to the end of the segment. This process does not create as many slivers
of unusable memory, so it is more likely to succeed at finding this memory.</p>
<p><cite>pinned_use_cuda_host_register</cite> option is a boolean flag that determines whether to
use the CUDA API’s cudaHostRegister function for allocating pinned memory instead
of the default cudaHostAlloc. When set to True, the memory is allocated using regular
malloc and then pages are mapped to the memory before calling cudaHostRegister.
This pre-mapping of pages helps reduce the lock time during the execution
of cudaHostRegister.</p>
<p><cite>pinned_num_register_threads</cite> option is only valid when pinned_use_cuda_host_register
is set to True. By default, one thread is used to map the pages. This option allows
using more threads to parallelize the page mapping operations to reduce the overall
allocation time of pinned memory. A good value for this option is 8 based on
benchmarking results.</p>
</li>
</ul>
<div class="admonition note">
<p class="admonition-title">Note</p>
<p>Some stats reported by the
<a class="reference internal" href="../cuda.html#cuda-memory-management-api"><span class="std std-ref">CUDA memory management API</span></a>
are specific to <code class="docutils literal notranslate"><span class="pre">backend:native</span></code>, and are not meaningful with
<code class="docutils literal notranslate"><span class="pre">backend:cudaMallocAsync</span></code>.
See each function’s docstring for details.</p>
</div>
</div>
</div>
<div class="section" id="using-custom-memory-allocators-for-cuda">
<span id="cuda-memory-custom-allocator"></span><h2>Using custom memory allocators for CUDA<a class="headerlink" href="#using-custom-memory-allocators-for-cuda" title="Permalink to this heading">¶</a></h2>
<p>It is possible to define allocators as simple functions in C/C++ and compile
them as a shared library, the code below shows a basic allocator that just
traces all the memory operations.</p>
<div class="highlight-C++ notranslate"><div class="highlight"><pre><span></span><span class="cp">#include</span><span class="w"> </span><span class="cpf"><sys/types.h></span>
<span class="cp">#include</span><span class="w"> </span><span class="cpf"><cuda_runtime_api.h></span>
<span class="cp">#include</span><span class="w"> </span><span class="cpf"><iostream></span>
<span class="c1">// Compile with g++ alloc.cc -o alloc.so -I/usr/local/cuda/include -shared -fPIC</span>
<span class="k">extern</span><span class="w"> </span><span class="s">"C"</span><span class="w"> </span><span class="p">{</span>
<span class="kt">void</span><span class="o">*</span><span class="w"> </span><span class="nf">my_malloc</span><span class="p">(</span><span class="kt">ssize_t</span><span class="w"> </span><span class="n">size</span><span class="p">,</span><span class="w"> </span><span class="kt">int</span><span class="w"> </span><span class="n">device</span><span class="p">,</span><span class="w"> </span><span class="n">cudaStream_t</span><span class="w"> </span><span class="n">stream</span><span class="p">)</span><span class="w"> </span><span class="p">{</span>
<span class="w"> </span><span class="kt">void</span><span class="w"> </span><span class="o">*</span><span class="n">ptr</span><span class="p">;</span>
<span class="w"> </span><span class="n">cudaMalloc</span><span class="p">(</span><span class="o">&</span><span class="n">ptr</span><span class="p">,</span><span class="w"> </span><span class="n">size</span><span class="p">);</span>
<span class="w"> </span><span class="n">std</span><span class="o">::</span><span class="n">cout</span><span class="o"><<</span><span class="s">"alloc "</span><span class="o"><<</span><span class="n">ptr</span><span class="o"><<</span><span class="n">size</span><span class="o"><<</span><span class="n">std</span><span class="o">::</span><span class="n">endl</span><span class="p">;</span>