[chibi@alma8 ~]$ ls Geekbench-4.4.2-Linux ダウンロード ドキュメント 画像 NVIDIA_CUDA-11.3_Samples テンプレート ビデオ 公開 'y-cruncher v0.7.8.9507-static' デスクトップ 音楽 [chibi@alma8 ~]$ cd NVIDIA_CUDA-11.3_Samples [chibi@alma8 NVIDIA_CUDA-11.3_Samples]$ ls 0_Simple 2_Graphics 4_Finance 6_Advanced EULA.txt Makefile 1_Utilities 3_Imaging 5_Simulations 7_CUDALibraries LICENSE common [chibi@alma8 NVIDIA_CUDA-11.3_Samples]$ cd 0_Simple [chibi@alma8 0_Simple]$ ls UnifiedMemoryStreams simpleCubemapTexture asyncAPI simpleCudaGraphs bf16TensorCoreGemm simpleDrvRuntime binaryPartitionCG simpleIPC cdpSimplePrint simpleLayeredTexture cdpSimpleQuicksort simpleMPI clock simpleMultiCopy clock_nvrtc simpleMultiGPU cppIntegration simpleOccupancy cppOverload simpleP2P cudaNvSci simplePitchLinearTexture cudaOpenMP simplePrintf cudaTensorCoreGemm simpleSeparateCompilation dmmaTensorCoreGemm simpleStreams fp16ScalarProduct simpleSurfaceWrite globalToShmemAsyncCopy simpleTemplates immaTensorCoreGemm simpleTemplates_nvrtc inlinePTX simpleTexture inlinePTX_nvrtc simpleTextureDrv matrixMul simpleVoteIntrinsics matrixMulCUBLAS simpleVoteIntrinsics_nvrtc matrixMulDrv simpleZeroCopy matrixMul_nvrtc streamOrderedAllocation memMapIPCDrv streamOrderedAllocationIPC simpleAWBarrier streamOrderedAllocationP2P simpleAssert systemWideAtomics simpleAssert_nvrtc template simpleAtomicIntrinsics tf32TensorCoreGemm simpleAtomicIntrinsics_nvrtc vectorAdd simpleAttributes vectorAddDrv simpleCallback vectorAddMMAP simpleCooperativeGroups vectorAdd_nvrtc [chibi@alma8 0_Simple]$ cd simpleP2P [chibi@alma8 simpleP2P]$ make /usr/local/cuda-11.3/bin/nvcc -ccbin g++ -I../../common/inc -m64 --threads 0 -gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_86,code=compute_86 -o simpleP2P.o -c simpleP2P.cu nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). /usr/local/cuda-11.3/bin/nvcc -ccbin g++ -m64 -gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_86,code=compute_86 -o simpleP2P simpleP2P.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). mkdir -p ../../bin/x86_64/linux/release cp simpleP2P ../../bin/x86_64/linux/release [chibi@alma8 simpleP2P]$ ./simpleP2P [./simpleP2P] - Starting... Checking for multiple GPUs... CUDA-capable device count: 2 Checking GPU(s) for support of peer to peer memory access... > Peer access from NVIDIA TITAN RTX (GPU0) -> NVIDIA TITAN RTX (GPU1) : Yes > Peer access from NVIDIA TITAN RTX (GPU1) -> NVIDIA TITAN RTX (GPU0) : Yes Enabling peer access between GPU0 and GPU1... Allocating buffers (64MB on GPU0, GPU1 and CPU Host)... Creating event handles... cudaMemcpyPeer / cudaMemcpy between GPU0 and GPU1: 43.53GB/s Preparing host buffer and memcpy to GPU0... Run kernel on GPU1, taking source data from GPU0 and writing to GPU1... Run kernel on GPU0, taking source data from GPU1 and writing to GPU0... Copy data back to host from GPU0 and verify results... Disabling peer access... Shutting down... Test passed [chibi@alma8 simpleP2P]$ cd ~/NVIDIA_CUDA-11.3_Samples/1_Utilities [chibi@alma8 1_Utilities]$ ls UnifiedMemoryPerf deviceQuery p2pBandwidthLatencyTest bandwidthTest deviceQueryDrv topologyQuery [chibi@alma8 1_Utilities]$ cd p2pBandwidthLatencyTest [chibi@alma8 p2pBandwidthLatencyTest]$ make /usr/local/cuda-11.3/bin/nvcc -ccbin g++ -I../../common/inc -m64 --threads 0 -gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_86,code=compute_86 -o p2pBandwidthLatencyTest.o -c p2pBandwidthLatencyTest.cu nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). /usr/local/cuda-11.3/bin/nvcc -ccbin g++ -m64 -gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_86,code=compute_86 -o p2pBandwidthLatencyTest p2pBandwidthLatencyTest.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). mkdir -p ../../bin/x86_64/linux/release cp p2pBandwidthLatencyTest ../../bin/x86_64/linux/release [chibi@alma8 p2pBandwidthLatencyTest]$ ./p2pBandwidthLatencyTest [P2P (Peer-to-Peer) GPU Bandwidth Latency Test] Device: 0, NVIDIA TITAN RTX, pciBusID: 1, pciDeviceID: 0, pciDomainID:0 Device: 1, NVIDIA TITAN RTX, pciBusID: 41, pciDeviceID: 0, pciDomainID:0 Device=0 CAN Access Peer Device=1 Device=1 CAN Access Peer Device=0 ***NOTE: In case a device doesn't have P2P access to other one, it falls back to normal memcopy procedure. So you can see lesser Bandwidth (GB/s) and unstable Latency (us) in those cases. P2P Connectivity Matrix D\D 0 1 0 1 1 1 1 1 Unidirectional P2P=Disabled Bandwidth Matrix (GB/s) D\D 0 1 0 564.63 6.01 1 6.03 562.86 Unidirectional P2P=Enabled Bandwidth (P2P Writes) Matrix (GB/s) D\D 0 1 0 541.78 47.10 1 47.10 562.56 Bidirectional P2P=Disabled Bandwidth Matrix (GB/s) D\D 0 1 0 549.79 8.91 1 8.87 552.93 Bidirectional P2P=Enabled Bandwidth Matrix (GB/s) D\D 0 1 0 552.95 94.13 1 94.11 551.51 P2P=Disabled Latency Matrix (us) GPU 0 1 0 1.38 24.39 1 14.22 1.37 CPU 0 1 0 3.26 10.13 1 9.99 3.15 P2P=Enabled Latency (P2P Writes) Matrix (us) GPU 0 1 0 1.30 1.79 1 1.79 1.36 CPU 0 1 0 3.20 2.62 1 2.67 3.28 NOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled. [chibi@alma8 p2pBandwidthLatencyTest]$ cd ~/NVIDIA_CUDA-11.3_Samples/1_Utilities /bandwidthTest [chibi@alma8 bandwidthTest]$ make /usr/local/cuda-11.3/bin/nvcc -ccbin g++ -I../../common/inc -m64 --threads 0 -gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_86,code=compute_86 -o bandwidthTest.o -c bandwidthTest.cu nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). /usr/local/cuda-11.3/bin/nvcc -ccbin g++ -m64 -gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_86,code=compute_86 -o bandwidthTest bandwidthTest.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). mkdir -p ../../bin/x86_64/linux/release cp bandwidthTest ../../bin/x86_64/linux/release [chibi@alma8 bandwidthTest]$ ./bandwidthTest [CUDA Bandwidth Test] - Starting... Running on... Device 0: NVIDIA TITAN RTX Quick Mode Host to Device Bandwidth, 1 Device(s) PINNED Memory Transfers Transfer Size (Bytes) Bandwidth(GB/s) 32000000 13.1 Device to Host Bandwidth, 1 Device(s) PINNED Memory Transfers Transfer Size (Bytes) Bandwidth(GB/s) 32000000 13.2 Device to Device Bandwidth, 1 Device(s) PINNED Memory Transfers Transfer Size (Bytes) Bandwidth(GB/s) 32000000 540.2 Result = PASS NOTE: The CUDA Samples are not meant for performance measurements. Results may vary when GPU Boost is enabled. [chibi@alma8 bandwidthTest]$ cd ~/NVIDIA_CUDA-11.3_Samples/1_Utilities/deviceQuery [chibi@alma8 deviceQuery]$ make /usr/local/cuda-11.3/bin/nvcc -ccbin g++ -I../../common/inc -m64 --threads 0 -gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_86,code=compute_86 -o deviceQuery.o -c deviceQuery.cpp nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). /usr/local/cuda-11.3/bin/nvcc -ccbin g++ -m64 -gencode arch=compute_35,code=sm_35 -gencode arch=compute_37,code=sm_37 -gencode arch=compute_50,code=sm_50 -gencode arch=compute_52,code=sm_52 -gencode arch=compute_60,code=sm_60 -gencode arch=compute_61,code=sm_61 -gencode arch=compute_70,code=sm_70 -gencode arch=compute_75,code=sm_75 -gencode arch=compute_80,code=sm_80 -gencode arch=compute_86,code=sm_86 -gencode arch=compute_86,code=compute_86 -o deviceQuery deviceQuery.o nvcc warning : The 'compute_35', 'compute_37', 'compute_50', 'sm_35', 'sm_37' and 'sm_50' architectures are deprecated, and may be removed in a future release (Use -Wno-deprecated-gpu-targets to suppress warning). mkdir -p ../../bin/x86_64/linux/release cp deviceQuery ../../bin/x86_64/linux/release [chibi@alma8 deviceQuery]$ ./deviceQuery ./deviceQuery Starting... CUDA Device Query (Runtime API) version (CUDART static linking) Detected 2 CUDA Capable device(s) Device 0: "NVIDIA TITAN RTX" CUDA Driver Version / Runtime Version 11.3 / 11.3 CUDA Capability Major/Minor version number: 7.5 Total amount of global memory: 24220 MBytes (25396838400 bytes) (072) Multiprocessors, (064) CUDA Cores/MP: 4608 CUDA Cores GPU Max Clock rate: 1770 MHz (1.77 GHz) Memory Clock rate: 7001 Mhz Memory Bus Width: 384-bit L2 Cache Size: 6291456 bytes Maximum Texture Dimension Size (x,y,z) 1D=(131072), 2D=(131072, 65536), 3D=(16384, 16384, 16384) Maximum Layered 1D Texture Size, (num) layers 1D=(32768), 2048 layers Maximum Layered 2D Texture Size, (num) layers 2D=(32768, 32768), 2048 layers Total amount of constant memory: 65536 bytes Total amount of shared memory per block: 49152 bytes Total shared memory per multiprocessor: 65536 bytes Total number of registers available per block: 65536 Warp size: 32 Maximum number of threads per multiprocessor: 1024 Maximum number of threads per block: 1024 Max dimension size of a thread block (x,y,z): (1024, 1024, 64) Max dimension size of a grid size (x,y,z): (2147483647, 65535, 65535) Maximum memory pitch: 2147483647 bytes Texture alignment: 512 bytes Concurrent copy and kernel execution: Yes with 3 copy engine(s) Run time limit on kernels: Yes Integrated GPU sharing Host Memory: No Support host page-locked memory mapping: Yes Alignment requirement for Surfaces: Yes Device has ECC support: Disabled Device supports Unified Addressing (UVA): Yes Device supports Managed Memory: Yes Device supports Compute Preemption: Yes Supports Cooperative Kernel Launch: Yes Supports MultiDevice Co-op Kernel Launch: Yes Device PCI Domain ID / Bus ID / location ID: 0 / 1 / 0 Compute Mode: < Default (multiple host threads can use ::cudaSetDevice() with device simultaneously) > Device 1: "NVIDIA TITAN RTX" CUDA Driver Version / Runtime Version 11.3 / 11.3 CUDA Capability Major/Minor version number: 7.5 Total amount of global memory: 24218 MBytes (25394348032 bytes) (072) Multiprocessors, (064) CUDA Cores/MP: 4608 CUDA Cores GPU Max Clock rate: 1770 MHz (1.77 GHz) Memory Clock rate: 7001 Mhz Memory Bus Width: 384-bit L2 Cache Size: 6291456 bytes Maximum Texture Dimension Size (x,y,z) 1D=(131072), 2D=(131072, 65536), 3D=(16384, 16384, 16384) Maximum Layered 1D Texture Size, (num) layers 1D=(32768), 2048 layers Maximum Layered 2D Texture Size, (num) layers 2D=(32768, 32768), 2048 layers Total amount of constant memory: 65536 bytes Total amount of shared memory per block: 49152 bytes Total shared memory per multiprocessor: 65536 bytes Total number of registers available per block: 65536 Warp size: 32 Maximum number of threads per multiprocessor: 1024 Maximum number of threads per block: 1024 Max dimension size of a thread block (x,y,z): (1024, 1024, 64) Max dimension size of a grid size (x,y,z): (2147483647, 65535, 65535) Maximum memory pitch: 2147483647 bytes Texture alignment: 512 bytes Concurrent copy and kernel execution: Yes with 3 copy engine(s) Run time limit on kernels: No Integrated GPU sharing Host Memory: No Support host page-locked memory mapping: Yes Alignment requirement for Surfaces: Yes Device has ECC support: Disabled Device supports Unified Addressing (UVA): Yes Device supports Managed Memory: Yes Device supports Compute Preemption: Yes Supports Cooperative Kernel Launch: Yes Supports MultiDevice Co-op Kernel Launch: Yes Device PCI Domain ID / Bus ID / location ID: 0 / 65 / 0 Compute Mode: < Default (multiple host threads can use ::cudaSetDevice() with device simultaneously) > > Peer access from NVIDIA TITAN RTX (GPU0) -> NVIDIA TITAN RTX (GPU1) : Yes > Peer access from NVIDIA TITAN RTX (GPU1) -> NVIDIA TITAN RTX (GPU0) : Yes deviceQuery, CUDA Driver = CUDART, CUDA Driver Version = 11.3, CUDA Runtime Version = 11.3, NumDevs = 2 Result = PASS [chibi@alma8 deviceQuery]$ nvidia-smi nvlink -c GPU 0: NVIDIA TITAN RTX (UUID: GPU-5a71d61e-f130-637a-b33d-4df555b0ed88) Link 0, P2P is supported: true Link 0, Access to system memory supported: true Link 0, P2P atomics supported: true Link 0, System memory atomics supported: true Link 0, SLI is supported: true Link 0, Link is supported: false Link 1, P2P is supported: true Link 1, Access to system memory supported: true Link 1, P2P atomics supported: true Link 1, System memory atomics supported: true Link 1, SLI is supported: true Link 1, Link is supported: false GPU 1: NVIDIA TITAN RTX (UUID: GPU-7fb51c1d-c1e7-35cc-aad7-66971f05ddb7) Link 0, P2P is supported: true Link 0, Access to system memory supported: true Link 0, P2P atomics supported: true Link 0, System memory atomics supported: true Link 0, SLI is supported: true Link 0, Link is supported: false Link 1, P2P is supported: true Link 1, Access to system memory supported: true Link 1, P2P atomics supported: true Link 1, System memory atomics supported: true Link 1, SLI is supported: true Link 1, Link is supported: false [chibi@alma8 deviceQuery]$ nvidia-smi Fri Jun 18 12:54:18 2021 +-----------------------------------------------------------------------------+ | NVIDIA-SMI 465.19.01 Driver Version: 465.19.01 CUDA Version: 11.3 | |-------------------------------+----------------------+----------------------+ | GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |===============================+======================+======================| | 0 NVIDIA TITAN RTX Off | 00000000:01:00.0 Off | N/A | | 41% 42C P8 33W / 280W | 15MiB / 24220MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ | 1 NVIDIA TITAN RTX Off | 00000000:41:00.0 On | N/A | | 41% 48C P8 22W / 280W | 1MiB / 24217MiB | 0% Default | | | | N/A | +-------------------------------+----------------------+----------------------+ +-----------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=============================================================================| | 0 N/A N/A 5158 G /usr/libexec/Xorg 9MiB | | 0 N/A N/A 5715 G /usr/bin/gnome-shell 4MiB | +-----------------------------------------------------------------------------+ [chibi@alma8 deviceQuery]$ cat /etc/redhat-release AlmaLinux release 8.4 (Electric Cheetah) [chibi@alma8 deviceQuery]$ nvcc -V nvcc: NVIDIA (R) Cuda compiler driver Copyright (c) 2005-2021 NVIDIA Corporation Built on Mon_May__3_19:15:13_PDT_2021 Cuda compilation tools, release 11.3, V11.3.109 Build cuda_11.3.r11.3/compiler.29920130_0 [chibi@alma8 deviceQuery]$