CUDA Memory Hierarchy
|
|
- Randell Maxwell
- 6 years ago
- Views:
Transcription
1 CUDA Memory Hierarchy Piotr Danilewski October 2012 Saarland University
2 Memory GTX 690
3 GTX 690 Memory host memory main GPU memory (global memory) shared memory caches registers
4 Memory host memory GPU global memory SM shared memory shared memory shared memory registers registers registers ALU
5 device code Memory host memory host code GPU global memory SM shared memory shared memory shared memory registers registers registers ALU
6 Memory host memory GPU global memory contstant memory texture memory local memory SM texture cache texture cache texture cache constant cache constant cache constant cache shared memory shared memory shared memory registers registers registers ALU
7 device code Memory host memory host code GPU global memory contstant memory texture memory local memory SM texture cache texture cache texture cache constant cache constant cache constant cache shared memory shared memory shared memory registers registers registers ALU
8 Memory host memory GPU global memory contstant memory texture memory local memory L2 cache SM texture cache texture cache texture cache constant cache constant cache constant cache shared memory L1 cache shared memory L1 cache shared memory L1 cache registers registers registers ALU
9 Memory host memory GPU global memory contstant memory texture memory local memory L2 cache SM texture cache texture cache texture cache constant cache constant cache constant cache shared memory L1 cache shared memory L1 cache shared memory L1 cache registers registers registers ALU?
10 global memory Dynamic int* devptr; cudamalloc(&devptr, sizeof(int)); Static device int devvar; cudamemcpy( devptr, hostptr, sizeof(int), cudamemcpyhosttodevice ) cudamemcpy( hostptr, devptr, sizeof(int), cudamemcpydevicetohost ) cudamemcpytosymbol( devvar, &hostvar, sizeof(int), 0, cudamemcpyhosttodevice ) cudamemcpyfromsymbol( &hostvar, devvar, sizeof(int), 0, cudamemcpydevicetohost ) cudafree(devptr) automatically at the end of application
11 global memory! Dynamic global add4f(float* u, float* v) { u[i]+=v[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; float* devu, devv; cudamalloc(&devu, size); cudamalloc(&devv, size); cudamemcpy(devu, hostu, size, cudamemcpy(devv, hostv, size, add4f<<<1,4>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devv); cudafree(devu);! Static device float devu[4]; device float devv[4]; global adduv() { devu[i]+=devv[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; cudamemcpytosymbol(devu, hostu, size, 0, cudamemcpytosymbol(devv, hostv, size, 0, adduv<<<1,4>>>(); cudamemcpyfromsymbol(hostu, devu, size, 0, cudamemcpydevicetohost);
12 global memory! Dynamic global add4f(float* u, float* v) { u[i]+=v[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; float* devu, devv; cudamalloc(&devu, size); cudamalloc(&devv, size); cudamemcpy(devu, hostu, size, cudamemcpy(devv, hostv, size, add4f<<<1,4>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devv); cudafree(devu);! Static device float devu[4]; device float devv[4]; global adduv() { devu[i]+=devv[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; cudamemcpytosymbol(devu, hostu, size, 0, cudamemcpytosymbol(devv, hostv, size, 0, adduv<<<1,4>>>(); cudamemcpyfromsymbol(hostu, devu, size, 0, cudamemcpydevicetohost);
13 global memory! Dynamic global add4f(float* u, float* v) { u[i]+=v[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; float* devu, devv; cudamalloc(&devu, size); cudamalloc(&devv, size); cudamemcpy(devu, hostu, size, cudamemcpy(devv, hostv, size, add4f<<<1,4>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devv); cudafree(devu);! Static device float devu[4]; device float devv[4]; global adduv() { devu[i]+=devv[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; cudamemcpytosymbol(devu, hostu, size, 0, cudamemcpytosymbol(devv, hostv, size, 0, adduv<<<1,4>>>(); cudamemcpyfromsymbol(hostu, devu, size, 0, cudamemcpydevicetohost);
14 global memory! Dynamic global add4f(float* u, float* v) { u[i]+=v[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; float* devu, devv; cudamalloc(&devu, size); cudamalloc(&devv, size); cudamemcpy(devu, hostu, size, cudamemcpy(devv, hostv, size, add4f<<<1,4>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devv); cudafree(devu);! Static device float devu[4]; device float devv[4]; global adduv() { devu[i]+=devv[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; cudamemcpytosymbol(devu, hostu, size, 0, cudamemcpytosymbol(devv, hostv, size, 0, adduv<<<1,4>>>(); cudamemcpyfromsymbol(hostu, devu, size, 0, cudamemcpydevicetohost);
15 global memory! Dynamic global add4f(float* u, float* v) { u[i]+=v[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; float* devu, devv; cudamalloc(&devu, size); cudamalloc(&devv, size); cudamemcpy(devu, hostu, size, cudamemcpy(devv, hostv, size, add4f<<<1,4>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devv); cudafree(devu);! Static device float devu[4]; device float devv[4]; global adduv() { devu[i]+=devv[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; cudamemcpytosymbol(devu, hostu, size, 0, cudamemcpytosymbol(devv, hostv, size, 0, adduv<<<1,4>>>(); cudamemcpyfromsymbol(hostu, devu, size, 0, cudamemcpydevicetohost);
16 global memory! Dynamic global add4f(float* u, float* v) { u[i]+=v[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; float* devu, devv; cudamalloc(&devu, size); cudamalloc(&devv, size); cudamemcpy(devu, hostu, size, cudamemcpy(devv, hostv, size, add4f<<<1,4>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devv); cudafree(devu);! Static device float devu[4]; device float devv[4]; global adduv() { devu[i]+=devv[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; cudamemcpytosymbol(devu, hostu, size, 0, cudamemcpytosymbol(devv, hostv, size, 0, adduv<<<1,4>>>(); cudamemcpyfromsymbol(hostu, devu, size, 0, cudamemcpydevicetohost);
17 global memory! Dynamic global add4f(float* u, float* v) { u[i]+=v[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; float* devu, devv; cudamalloc(&devu, size); cudamalloc(&devv, size); cudamemcpy(devu, hostu, size, cudamemcpy(devv, hostv, size, add4f<<<1,4>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devv); cudafree(devu);! Static device float devu[4]; device float devv[4]; global adduv() { devu[i]+=devv[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; cudamemcpytosymbol(devu, hostu, size, 0, cudamemcpytosymbol(devv, hostv, size, 0, adduv<<<1,4>>>(); cudamemcpyfromsymbol(hostu, devu, size, 0, cudamemcpydevicetohost);
18 global memory! Dynamic global add4f(float* u, float* v) { u[i]+=v[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; float* devu, devv; cudamalloc(&devu, size); cudamalloc(&devv, size); cudamemcpy(devu, hostu, size, cudamemcpy(devv, hostv, size, add4f<<<1,4>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devv); cudafree(devu);! Static device float devu[4]; device float devv[4]; global adduv() { devu[i]+=devv[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; cudamemcpytosymbol(devu, hostu, size, 0, cudamemcpytosymbol(devv, hostv, size, 0, adduv<<<1,4>>>(); cudamemcpyfromsymbol(hostu, devu, size, 0, cudamemcpydevicetohost);
19 global memory! Dynamic global add4f(float* u, float* v) { u[i]+=v[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; float* devu, devv; cudamalloc(&devu, size); cudamalloc(&devv, size); cudamemcpy(devu, hostu, size, cudamemcpy(devv, hostv, size, add4f<<<1,4>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devv); cudafree(devu);! Static device float devu[4]; device float devv[4]; global adduv() { devu[i]+=devv[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; cudamemcpytosymbol(devu, hostu, size, 0, cudamemcpytosymbol(devv, hostv, size, 0, adduv<<<1,4>>>(); cudamemcpyfromsymbol(hostu, devu, size, 0, cudamemcpydevicetohost);
20 global memory! Dynamic global add4f(float* u, float* v) { u[i]+=v[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; float* devu, devv; cudamalloc(&devu, size); cudamalloc(&devv, size); cudamemcpy(devu, hostu, size, cudamemcpy(devv, hostv, size, add4f<<<1,4>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devv); cudafree(devu);! Static device float devu[4]; device float devv[4]; global adduv() { devu[i]+=devv[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; cudamemcpytosymbol(devu, hostu, size, 0, cudamemcpytosymbol(devv, hostv, size, 0, adduv<<<1,4>>>(); cudamemcpyfromsymbol(hostu, devu, size, 0, cudamemcpydevicetohost);
21 global memory! Dynamic global add4f(float* u, float* v) { u[i]+=v[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; float* devu, devv; cudamalloc(&devu, size); cudamalloc(&devv, size); cudamemcpy(devu, hostu, size, cudamemcpy(devv, hostv, size, add4f<<<1,4>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devv); cudafree(devu);! Static device float devu[4]; device float devv[4]; global adduv() { devu[i]+=devv[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; cudamemcpytosymbol(devu, hostu, size, 0, cudamemcpytosymbol(devv, hostv, size, 0, adduv<<<1,4>>>(); cudamemcpyfromsymbol(hostu, devu, size, 0, cudamemcpydevicetohost);
22 global memory! Dynamic global add4f(float* u, float* v) { u[i]+=v[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; float* devu, devv; cudamalloc(&devu, size); cudamalloc(&devv, size); cudamemcpy(devu, hostu, size, cudamemcpy(devv, hostv, size, add4f<<<1,4>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devv); cudafree(devu);! Static device float devu[4]; device float devv[4]; global adduv() { devu[i]+=devv[i]; float hostu[4] = {1, 2, 3, 4; float hostv[4] = {1, 2, 3, 4; cudamemcpytosymbol(devu, hostu, size, 0, cudamemcpytosymbol(devv, hostv, size, 0, adduv<<<1,4>>>(); cudamemcpyfromsymbol(hostu, devu, size, 0, cudamemcpydevicetohost);?
23 host memory GPU global memory contstant memory texture memory local memory L2 cache SM texture cache texture cache texture cache constant cache constant cache constant cache shared memory L1 cache shared memory L1 cache shared memory L1 cache registers registers registers ALU
24 contstant memory constant cache constant cache constant cache constant int devvar; cudamemcpytosymbol( devvar, &hostvar, sizeof(int), 0, cudamemcpyhosttodevice ) cudamemcpyfromsymbol( &hostvar, devvar, sizeof(int), 0, cudamemcpydevicetohost ) automatically at the end of application
25 contstant memory constant cache constant cache constant cache parameters - up to 4KB [Fermi] [Kepler] Load Uniform [Fermi] [Kepler?] - variable resides in global memory - read-only in the kernel - not dependent on threadidx
26 host memory GPU global memory contstant memory texture memory local memory SM texture cache texture cache texture cache constant cache constant cache constant cache shared memory shared memory shared memory registers registers registers ALU
27 shared memory shared host memory shared memory Static Dynamic - file scope - file scope - device function - device function shared int sharr[4]; extern shared int sharr[]; kernel<<<grid,block,sizeof(int)*4>>>(); size is fixed at compile time size is a run-time parameter all extern shared variables occupy same memory not accesible from host automatically at the end of kernel
28 shared memory shared host memory shared memory Static global example(float* u) { shared int tmp[4]; tmp[i]=u[i]; u[i]=tmp[i]*tmp[i]+tmp[3-i]; float hostu[4] = {1, 2, 3, 4; float* devu; cudamalloc(&devu, size); cudamemcpy(devu, hostu, size, example<<<1,4>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devu); u i =u i2 +u n-i-1 Dynamic extern shared int tmp[]; global example(float* u) { tmp[i]=u[i]; u[i]=tmp[i]*tmp[i]+tmp[3-i]; float hostu[4] = {1, 2, 3, 4; float* devu; cudamalloc(&devu, size); cudamemcpy(devu, hostu, size, example<<<1,4,size>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devu);
29 shared memory shared host memory shared memory Static global example(float* u) { shared int tmp[4]; tmp[i]=u[i]; u[i]=tmp[i]*tmp[i]+tmp[3-i]; float hostu[4] = {1, 2, 3, 4; float* devu; cudamalloc(&devu, size); cudamemcpy(devu, hostu, size, example<<<1,4>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devu); u i =u i2 +u n-i-1 Dynamic extern shared int tmp[]; global example(float* u) { tmp[i]=u[i]; u[i]=tmp[i]*tmp[i]+tmp[3-i]; float hostu[4] = {1, 2, 3, 4; float* devu; cudamalloc(&devu, size); cudamemcpy(devu, hostu, size, example<<<1,4,size>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devu);
30 shared memory shared host memory shared memory Static global example(float* u) { shared int tmp[4]; tmp[i]=u[i]; u[i]=tmp[i]*tmp[i]+tmp[3-i]; float hostu[4] = {1, 2, 3, 4; float* devu; cudamalloc(&devu, size); cudamemcpy(devu, hostu, size, example<<<1,4>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devu); u i =u i2 +u n-i-1 Dynamic extern shared int tmp[]; global example(float* u) { tmp[i]=u[i]; u[i]=tmp[i]*tmp[i]+tmp[3-i]; float hostu[4] = {1, 2, 3, 4; float* devu; cudamalloc(&devu, size); cudamemcpy(devu, hostu, size, example<<<1,4,size>>>(devu, devv); cudamemcpy(hostu, devu, size, cudamemcpydevicetohost); cudafree(devu);
31 shared L1 shared L1 shared L1 shared memory shared host memory shared memory memory cache memory cache memory cache few bytes for control parameters - up to 256B [pre-fermi] L1 cache - 16KB, 32KB or 48KB [Fermi] [Kepler] [Kepler] only
32 host memory GPU global memory contstant memory texture memory local memory L2 cache SM texture cache texture cache texture cache constant cache constant cache constant cache shared memory L1 cache shared memory L1 cache shared memory L1 cache registers registers registers ALU
33 local memory per-thread, but slow (unless L1 or L2) no explicit keyword used for register spills used when address is used used with recursive functions
34 local memory per-thread, but slow (unless L1 or L2) no explicit keyword used for register spills used when address is used used with recursive functions max regs thread [pre-fermi] [Fermi] [Kepler] /255
35 local memory per-thread, but slow (unless L1 or L2) no explicit keyword used for register spills used when address is used used with recursive functions [pre-fermi] [Fermi] [Kepler] max regs thread /255 max regs SM 8K/16K 32K 64K
36 local memory per-thread, but slow (unless L1 or L2) no explicit keyword used for register spills used when address is used used with recursive functions max regs thread max regs SM max threads SM max regs thread if half [pre-fermi] 128 8K/16K 768/ /32 [Fermi] 63 32K [Kepler] 63/255 64K
37 local memory per-thread, but slow (unless L1 or L2) no explicit keyword used for register spills used when address is used used with recursive functions float x =...; float *px = &x; float x[4]; x[i] =...; float x[1000];
38 local memory per-thread, but slow (unless L1 or L2) no explicit keyword used for register spills used when address is used used with recursive functions
39 host memory GPU global memory contstant memory texture memory local memory L2 cache SM texture cache texture cache texture cache constant cache constant cache constant cache shared memory L1 cache shared memory L1 cache shared memory L1 cache registers registers registers ALU
40 texture memory texture cache texture cache texture cache separate cache in TPC [pre-fermi] or SM [Fermi] [Kepler] 1D, 2D and 3D memory addressing normalized addressing value normalization layered textures and cubemaps [Fermi] [Kepler] handling border (clamp, warp, mirror) interpolation (nearest, linear)
41 texture memory texture cache texture cache texture cache texture<float, Type, ReadMode> texref; texref.normalized =... texref.addressmode[0] =... texref.filtermode = float* devptr; cudamalloc(&devptr,...); cudachannelformatdesc channeldesc = cudacreatechanneldesc<float>(); size_t offset; cudabindtexture(&offset, texref, devptr, channeldesc, size); Host: use devptr cudaunbindtexture(texref); cudafree(devptr);
42 Memory Usage texture memory texture cache texture cache texture cache texture<float, Type, ReadMode> texref; texref.normalized =... texref.addressmode[0] =... texref.filtermode = Data type: one of char, short, int, long, long long, float, double or vector structures of size 2 or 4 (e.g. short2, float4) Value normalization: cudareadmodeelementtype cudareadmodenormalizedfloat Dimensionality: cudatexturetype1d cudatexturetype2d cudatexturetype3d cudatexturetype1dlayered cudatexturetype2dlayered cudatexturetypecubemap default float* devptr; cudamalloc(&devptr,...); cudatexturetypecubemaplayered cudachannelformatdesc channeldesc = cudacreatechanneldesc<float>(); size_t offset; cudabindtexture(&offset, texref, devptr, channeldesc, size); default Host: use devptr cudaunbindtexture(texref); cudafree(devptr);
43 texture memory texture cache texture cache texture cache texture<float, Type, ReadMode> texref; texref.normalized =... texref.addressmode[0] =... texref.filtermode = more float* devptr; cudamalloc(&devptr,...); Normalized addressing - false - [0..N-1] - true - [0..1-1/N] normalized addressing Interpolation cudafiltermodepoint default Border handling (per each dimension) cudaaddressmodeborder cudaaddressmodeclamp cudaaddressmodewarp cudaaddressmodemirror cudafiltermodelinear cudachannelformatdesc channeldesc = cudacreatechanneldesc<float>(); size_t offset; cudabindtexture(&offset, texref, devptr, channeldesc, size); default Host: use devptr cudaunbindtexture(texref); cudafree(devptr);
44 texture memory texture cache texture cache texture cache [pre-fermi] [Fermi] [Kepler] texture<float, Type, ReadMode> texref; [Kepler] cudatexturedesc texdesc; cudaresourcedesc resdesc; texref.normalized =... texref.addressmode[0] =... texref.filtermode = cudacreatetextureobject(...); float* devptr; cudamalloc(&devptr,...); cudachannelformatdesc channeldesc = cudacreatechanneldesc<float>(); size_t offset; cudabindtexture(&offset, texref, devptr, channeldesc, size); Host: use devptr cudaunbindtexture(texref); cudafree(devptr);
45 Memory Usage texture memory texture cache texture cache texture cache texture<float, Type, ReadMode> texref; cudabindtexture(&offset, texref, devptr, channeldesc, size); Device code float v = tex1dfetch(texref, x); float v = tex1d(texref, x); float v = tex2d(texref, x, y); float v = tex3d(texref, x, y, z);... Dimensionality: cudatexturetype1d default cudatexturetype2d cudatexturetype3d cudatexturetype1dlayered cudatexturetype2dlayered cudatexturetypecubemap cudatexturetypecubemaplayered (address not normalized, no interpolation)
46 Recap host memory GPU global memory contstant memory texture memory local memory device cudamalloc cudamemcpytosymbol cudamemcpy cudafree constant cudamemcpytosymbol texture<...> cudabindtexture L2 cache SM texture cache texture cache texture cache constant cache constant cache constant cache shared memory L1 cache shared memory L1 cache shared memory shared registers registers registers ALU
47 Not covered accessing host memory from kernel asynchronous memory transfers L1 cache configuration efficient access patterns texture objects [Kepler] cudaarray cudasurface
48 Thank you Questions?
Class. Windows and CUDA : Shu Guo. Program from last time: Constant memory
Class Windows and CUDA : Shu Guo Program from last time: Constant memory Windows on CUDA Reference: NVIDIA CUDA Getting Started Guide for Microsoft Windows Whiting School has Visual Studio Cuda 5.5 Installer
More informationProgramming with CUDA
Programming with CUDA Jens K. Mueller jkm@informatik.uni-jena.de Department of Mathematics and Computer Science Friedrich-Schiller-University Jena Tuesday 19 th April, 2011 Today s lecture: Synchronization
More informationCS179 GPU Programming: CUDA Memory. Lecture originally by Luke Durant and Tamas Szalay
: CUDA Memory Lecture originally by Luke Durant and Tamas Szalay CUDA Memory Review of Memory Spaces Memory syntax Constant Memory Allocation Issues Global Memory Gotchas Shared Memory Gotchas Texture
More informationMemory Management. Memory Access Bandwidth. Memory Spaces. Memory Spaces
Memory Access Bandwidth Memory Management Bedrich Benes, Ph.D. Purdue University Department of Computer Graphics Technology High Performance Computer Graphics Lab Host and device different memory spaces
More informationTextures & Surfaces CUDA Webinar Gernot Ziegler, Developer Technology (Compute)
Textures & Surfaces CUDA Webinar Gernot Ziegler, Developer Technology (Compute) Outline Intro to Texturing and Texture Unit CUDA Array Storage Textures in CUDA C (Setup, Binding Modes, Coordinates) Texture
More informationNVIDIA CUDA Compute Unified Device Architecture
NVIDIA CUDA Compute Unified Device Architecture Programming Guide Version 0.8 2/12/2007 ii CUDA Programming Guide Version 0.8 Table of Contents Chapter 1. Introduction to CUDA... 1 1.1 The Graphics Processor
More informationMemory concept. Grid concept, Synchronization. GPU Programming. Szénási Sándor.
Memory concept Grid concept, Synchronization GPU Programming http://cuda.nik.uni-obuda.hu Szénási Sándor szenasi.sandor@nik.uni-obuda.hu GPU Education Center of Óbuda University MEMORY CONCEPT Off-chip
More informationCS 179: GPU Computing. Lecture 2: The Basics
CS 179: GPU Computing Lecture 2: The Basics Recap Can use GPU to solve highly parallelizable problems Performance benefits vs. CPU Straightforward extension to C language Disclaimer Goal for Week 1: Fast-paced
More informationCMPSCI 691AD General Purpose Computation on the GPU
CMPSCI 691AD General Purpose Computation on the GPU Spring 2009 Lecture 5: Quantitative Analysis of Parallel Algorithms Rui Wang (cont. from last lecture) Device Management Context Management Module Management
More informationCUDA Architecture & Programming Model
CUDA Architecture & Programming Model Course on Multi-core Architectures & Programming Oliver Taubmann May 9, 2012 Outline Introduction Architecture Generation Fermi A Brief Look Back At Tesla What s New
More informationCUDA Memory Model. N. Cardoso & P. Bicudo. Física Computacional (FC5)
CUDA Memory Model N. Cardoso & P. Bicudo Física Computacional (FC5) N. Cardoso & P. Bicudo CUDA Memory Model 1/32 Outline 1 Memory System Global Memory Shared Memory L2/L1 Cache Constant Memory Texture
More informationDebugging and Optimization strategies
Debugging and Optimization strategies Philip Blakely Laboratory for Scientific Computing, Cambridge Philip Blakely (LSC) Optimization 1 / 25 Writing a correct CUDA code You should start with a functional
More informationTesla Architecture, CUDA and Optimization Strategies
Tesla Architecture, CUDA and Optimization Strategies Lan Shi, Li Yi & Liyuan Zhang Hauptseminar: Multicore Architectures and Programming Page 1 Outline Tesla Architecture & CUDA CUDA Programming Optimization
More informationGPU Profiling and Optimization. Scott Grauer-Gray
GPU Profiling and Optimization Scott Grauer-Gray Benefits of GPU Programming "Free" speedup with new architectures More cores in new architecture Improved features such as L1 and L2 cache Increased shared/local
More informationAn Introduction to GPGPU Pro g ra m m ing - CUDA Arc hitec ture
An Introduction to GPGPU Pro g ra m m ing - CUDA Arc hitec ture Rafia Inam Mälardalen Real-Time Research Centre Mälardalen University, Västerås, Sweden http://www.mrtc.mdh.se rafia.inam@mdh.se CONTENTS
More informationGPU Programming. Lecture 2: CUDA C Basics. Miaoqing Huang University of Arkansas 1 / 34
1 / 34 GPU Programming Lecture 2: CUDA C Basics Miaoqing Huang University of Arkansas 2 / 34 Outline Evolvements of NVIDIA GPU CUDA Basic Detailed Steps Device Memories and Data Transfer Kernel Functions
More informationCS 179: GPU Computing LECTURE 4: GPU MEMORY SYSTEMS
CS 179: GPU Computing LECTURE 4: GPU MEMORY SYSTEMS 1 Last time Each block is assigned to and executed on a single streaming multiprocessor (SM). Threads execute in groups of 32 called warps. Threads in
More informationCUDA Lecture 2. Manfred Liebmann. Technische Universität München Chair of Optimal Control Center for Mathematical Sciences, M17
CUDA Lecture 2 Manfred Liebmann Technische Universität München Chair of Optimal Control Center for Mathematical Sciences, M17 manfred.liebmann@tum.de December 15, 2015 CUDA Programming Fundamentals CUDA
More informationIntroduction to CUDA CME343 / ME May James Balfour [ NVIDIA Research
Introduction to CUDA CME343 / ME339 18 May 2011 James Balfour [ jbalfour@nvidia.com] NVIDIA Research CUDA Programing system for machines with GPUs Programming Language Compilers Runtime Environments Drivers
More informationCUDA Workshop. High Performance GPU computing EXEBIT Karthikeyan
CUDA Workshop High Performance GPU computing EXEBIT- 2014 Karthikeyan CPU vs GPU CPU Very fast, serial, Low Latency GPU Slow, massively parallel, High Throughput Play Demonstration Compute Unified Device
More informationCUDA PROGRAMMING MODEL. Carlo Nardone Sr. Solution Architect, NVIDIA EMEA
CUDA PROGRAMMING MODEL Carlo Nardone Sr. Solution Architect, NVIDIA EMEA CUDA: COMMON UNIFIED DEVICE ARCHITECTURE Parallel computing architecture and programming model GPU Computing Application Includes
More informationCS377P Programming for Performance GPU Programming - I
CS377P Programming for Performance GPU Programming - I Sreepathi Pai UTCS November 9, 2015 Outline 1 Introduction to CUDA 2 Basic Performance 3 Memory Performance Outline 1 Introduction to CUDA 2 Basic
More informationReview. Lecture 10. Today s Outline. Review. 03b.cu. 03?.cu CUDA (II) Matrix addition CUDA-C API
Review Lecture 10 CUDA (II) host device CUDA many core processor threads thread blocks grid # threads >> # of cores to be efficient Threads within blocks can cooperate Threads between thread blocks cannot
More informationIntroduction to GPU Computing Junjie Lai, NVIDIA Corporation
Introduction to GPU Computing Junjie Lai, NVIDIA Corporation Outline Evolution of GPU Computing Heterogeneous Computing CUDA Execution Model & Walkthrough of Hello World Walkthrough : 1D Stencil Once upon
More informationRegister file. A single large register file (ex. 16K registers) is partitioned among the threads of the dispatched blocks.
Sharing the resources of an SM Warp 0 Warp 1 Warp 47 Register file A single large register file (ex. 16K registers) is partitioned among the threads of the dispatched blocks Shared A single SRAM (ex. 16KB)
More information04. CUDA Data Transfer
04. CUDA Data Transfer Fall Semester, 2015 COMP427 Parallel Programming School of Computer Sci. and Eng. Kyungpook National University 2013-5 N Baek 1 CUDA Compute Unified Device Architecture General purpose
More informationCUDA C/C++ BASICS. NVIDIA Corporation
CUDA C/C++ BASICS NVIDIA Corporation What is CUDA? CUDA Architecture Expose GPU parallelism for general-purpose computing Retain performance CUDA C/C++ Based on industry-standard C/C++ Small set of extensions
More informationCUDA GPGPU Workshop CUDA/GPGPU Arch&Prog
CUDA GPGPU Workshop 2012 CUDA/GPGPU Arch&Prog Yip Wichita State University 7/11/2012 GPU-Hardware perspective GPU as PCI device Original PCI PCIe Inside GPU architecture GPU as PCI device Traditional PC
More informationUniversity of Bielefeld
Geistes-, Natur-, Sozial- und Technikwissenschaften gemeinsam unter einem Dach Introduction to GPU Programming using CUDA Olaf Kaczmarek University of Bielefeld STRONGnet Summerschool 2011 ZIF Bielefeld
More informationCUDA Memories. Introduction 5/4/11
5/4/11 CUDA Memories James Gain, Michelle Kuttel, Sebastian Wyngaard, Simon Perkins and Jason Brownbridge { jgain mkuttel sperkins jbrownbr}@cs.uct.ac.za swyngaard@csir.co.za 3-6 May 2011 Introduction
More informationGPU Programming Using CUDA
GPU Programming Using CUDA Michael J. Schnieders Depts. of Biomedical Engineering & Biochemistry The University of Iowa & Gregory G. Howes Department of Physics and Astronomy The University of Iowa Iowa
More informationMIC-GPU: High-Performance Computing for Medical Imaging on Programmable Graphics Hardware (GPUs)
MIC-GPU: High-Performance Computing for Medical Imaging on Programmable Graphics Hardware (GPUs) CUDA API Klaus Mueller, Ziyi Zheng, Eric Papenhausen Stony Brook University Function Qualifiers Device Global,
More informationCSE 599 I Accelerated Computing - Programming GPUS. Parallel Patterns: Graph Search
CSE 599 I Accelerated Computing - Programming GPUS Parallel Patterns: Graph Search Objective Study graph search as a prototypical graph-based algorithm Learn techniques to mitigate the memory-bandwidth-centric
More informationProgramming with CUDA, WS09
Programming with CUDA and Parallel Algorithms Waqar Saleem Jens Müller Lecture 3 Thursday, 29 Nov, 2009 Recap Motivational videos Example kernel Thread IDs Memory overhead CUDA hardware and programming
More informationCS GPU and GPGPU Programming Lecture 8+9: GPU Architecture 7+8. Markus Hadwiger, KAUST
CS 380 - GPU and GPGPU Programming Lecture 8+9: GPU Architecture 7+8 Markus Hadwiger, KAUST Reading Assignment #5 (until March 12) Read (required): Programming Massively Parallel Processors book, Chapter
More informationIntroduction to GPGPUs and to CUDA programming model
Introduction to GPGPUs and to CUDA programming model www.cineca.it Marzia Rivi m.rivi@cineca.it GPGPU architecture CUDA programming model CUDA efficient programming Debugging & profiling tools CUDA libraries
More informationOutline 2011/10/8. Memory Management. Kernels. Matrix multiplication. CIS 565 Fall 2011 Qing Sun
Outline Memory Management CIS 565 Fall 2011 Qing Sun sunqing@seas.upenn.edu Kernels Matrix multiplication Managing Memory CPU and GPU have separate memory spaces Host (CPU) code manages device (GPU) memory
More informationMaster Thesis Accelerating Image Registration on GPUs
Master Thesis Accelerating Image Registration on GPUs A proof of concept migration of FAIR to CUDA Sunil Ramgopal Tatavarty Prof. Dr. Ulrich Rüde Dr.-Ing.Harald Köstler Lehrstuhl für Systemsimulation Universität
More informationGPU Programming. CUDA Memories. Miaoqing Huang University of Arkansas Spring / 43
1 / 43 GPU Programming CUDA Memories Miaoqing Huang University of Arkansas Spring 2016 2 / 43 Outline CUDA Memories Atomic Operations 3 / 43 Hardware Implementation of CUDA Memories Each thread can: Read/write
More informationsimcuda: A C++ based CUDA Simulation Framework
Technical Report simcuda: A C++ based CUDA Simulation Framework Abhishek Das and Andreas Gerstlauer UT-CERC-16-01 May 20, 2016 Computer Engineering Research Center Department of Electrical & Computer Engineering
More informationCOSC 6385 Computer Architecture. - Multi-Processors (V) The Intel Larrabee, Nvidia GT200 and Fermi processors
COSC 6385 Computer Architecture - Multi-Processors (V) The Intel Larrabee, Nvidia GT200 and Fermi processors Fall 2012 References Intel Larrabee: [1] L. Seiler, D. Carmean, E. Sprangle, T. Forsyth, M.
More informationCOSC 6374 Parallel Computations Introduction to CUDA
COSC 6374 Parallel Computations Introduction to CUDA Edgar Gabriel Fall 2014 Disclaimer Material for this lecture has been adopted based on various sources Matt Heavener, CS, State Univ. of NY at Buffalo
More informationCOMP 322: Fundamentals of Parallel Programming. Flynn s Taxonomy for Parallel Computers
COMP 322: Fundamentals of Parallel Programming Lecture 37: General-Purpose GPU (GPGPU) Computing Max Grossman, Vivek Sarkar Department of Computer Science, Rice University max.grossman@rice.edu, vsarkar@rice.edu
More informationIntroduction to CUDA 5.0
Introduction to CUDA 5.0 CUDA 5 In this article, I will introduce the reader to CUDA 5.0. I will briefly talk about the architecture of the Kepler GPU (Graphics Processing Unit) and I will show you how
More informationCOSC 6385 Computer Architecture. - Data Level Parallelism (II)
COSC 6385 Computer Architecture - Data Level Parallelism (II) Fall 2013 SIMD Instructions Originally developed for Multimedia applications Same operation executed for multiple data items Uses a fixed length
More informationECE 574 Cluster Computing Lecture 15
ECE 574 Cluster Computing Lecture 15 Vince Weaver http://web.eece.maine.edu/~vweaver vincent.weaver@maine.edu 30 March 2017 HW#7 (MPI) posted. Project topics due. Update on the PAPI paper Announcements
More informationGPU CUDA Programming
GPU CUDA Programming 이정근 (Jeong-Gun Lee) 한림대학교컴퓨터공학과, 임베디드 SoC 연구실 www.onchip.net Email: Jeonggun.Lee@hallym.ac.kr ALTERA JOINT LAB Introduction 차례 Multicore/Manycore and GPU GPU on Medical Applications
More informationAdvanced CUDA Optimizations. Umar Arshad ArrayFire
Advanced CUDA Optimizations Umar Arshad (@arshad_umar) ArrayFire (@arrayfire) ArrayFire World s leading GPU experts In the industry since 2007 NVIDIA Partner Deep experience working with thousands of customers
More informationScientific discovery, analysis and prediction made possible through high performance computing.
Scientific discovery, analysis and prediction made possible through high performance computing. An Introduction to GPGPU Programming Bob Torgerson Arctic Region Supercomputing Center November 21 st, 2013
More informationAn Introduction to GPU Architecture and CUDA C/C++ Programming. Bin Chen April 4, 2018 Research Computing Center
An Introduction to GPU Architecture and CUDA C/C++ Programming Bin Chen April 4, 2018 Research Computing Center Outline Introduction to GPU architecture Introduction to CUDA programming model Using the
More informationJosef Pelikán, Jan Horáček CGG MFF UK Praha
GPGPU and CUDA 2012-2018 Josef Pelikán, Jan Horáček CGG MFF UK Praha pepca@cgg.mff.cuni.cz http://cgg.mff.cuni.cz/~pepca/ 1 / 41 Content advances in hardware multi-core vs. many-core general computing
More informationIntroduction to Numerical General Purpose GPU Computing with NVIDIA CUDA. Part 1: Hardware design and programming model
Introduction to Numerical General Purpose GPU Computing with NVIDIA CUDA Part 1: Hardware design and programming model Dirk Ribbrock Faculty of Mathematics, TU dortmund 2016 Table of Contents Why parallel
More informationCUDA Optimizations WS Intelligent Robotics Seminar. Universität Hamburg WS Intelligent Robotics Seminar Praveen Kulkarni
CUDA Optimizations WS 2014-15 Intelligent Robotics Seminar 1 Table of content 1 Background information 2 Optimizations 3 Summary 2 Table of content 1 Background information 2 Optimizations 3 Summary 3
More informationLecture 15: Introduction to GPU programming. Lecture 15: Introduction to GPU programming p. 1
Lecture 15: Introduction to GPU programming Lecture 15: Introduction to GPU programming p. 1 Overview Hardware features of GPGPU Principles of GPU programming A good reference: David B. Kirk and Wen-mei
More informationLecture 9. Outline. CUDA : a General-Purpose Parallel Computing Architecture. CUDA Device and Threads CUDA. CUDA Architecture CUDA (I)
Lecture 9 CUDA CUDA (I) Compute Unified Device Architecture 1 2 Outline CUDA Architecture CUDA Architecture CUDA programming model CUDA-C 3 4 CUDA : a General-Purpose Parallel Computing Architecture CUDA
More informationIntroduction to CUDA (1 of n*)
Administrivia Introduction to CUDA (1 of n*) Patrick Cozzi University of Pennsylvania CIS 565 - Spring 2011 Paper presentation due Wednesday, 02/23 Topics first come, first serve Assignment 4 handed today
More informationInformation Coding / Computer Graphics, ISY, LiTH. CUDA memory! ! Coalescing!! Constant memory!! Texture memory!! Pinned memory 26(86)
26(86) Information Coding / Computer Graphics, ISY, LiTH CUDA memory Coalescing Constant memory Texture memory Pinned memory 26(86) CUDA memory We already know... Global memory is slow. Shared memory is
More informationNVIDIA Fermi Architecture
Administrivia NVIDIA Fermi Architecture Patrick Cozzi University of Pennsylvania CIS 565 - Spring 2011 Assignment 4 grades returned Project checkpoint on Monday Post an update on your blog beforehand Poster
More informationCUDA C/C++ BASICS. NVIDIA Corporation
CUDA C/C++ BASICS NVIDIA Corporation What is CUDA? CUDA Architecture Expose GPU parallelism for general-purpose computing Retain performance CUDA C/C++ Based on industry-standard C/C++ Small set of extensions
More informationECE 574 Cluster Computing Lecture 17
ECE 574 Cluster Computing Lecture 17 Vince Weaver http://web.eece.maine.edu/~vweaver vincent.weaver@maine.edu 28 March 2019 HW#8 (CUDA) posted. Project topics due. Announcements 1 CUDA installing On Linux
More informationCUDA. Schedule API. Language extensions. nvcc. Function type qualifiers (1) CUDA compiler to handle the standard C extensions.
Schedule CUDA Digging further into the programming manual Application Programming Interface (API) text only part, sorry Image utilities (simple CUDA examples) Performace considerations Matrix multiplication
More informationIntroduction to CUDA
Introduction to CUDA Overview HW computational power Graphics API vs. CUDA CUDA glossary Memory model, HW implementation, execution Performance guidelines CUDA compiler C/C++ Language extensions Limitations
More informationHigh-Performance Computing Using GPUs
High-Performance Computing Using GPUs Luca Caucci caucci@email.arizona.edu Center for Gamma-Ray Imaging November 7, 2012 Outline Slide 1 of 27 Why GPUs? What is CUDA? The CUDA programming model Anatomy
More informationINTRODUCTION TO GPU COMPUTING WITH CUDA. Topi Siro
INTRODUCTION TO GPU COMPUTING WITH CUDA Topi Siro 19.10.2015 OUTLINE PART I - Tue 20.10 10-12 What is GPU computing? What is CUDA? Running GPU jobs on Triton PART II - Thu 22.10 10-12 Using libraries Different
More informationINTRODUCTION TO GPU COMPUTING IN AALTO. Topi Siro
INTRODUCTION TO GPU COMPUTING IN AALTO Topi Siro 11.6.2014 PART I Introduction to GPUs Basics of CUDA (and OpenACC) Running GPU jobs on Triton Hands-on 1 PART II Optimizing CUDA codes Libraries Hands-on
More informationIntroduction to CUDA Programming
Introduction to CUDA Programming Steve Lantz Cornell University Center for Advanced Computing October 30, 2013 Based on materials developed by CAC and TACC Outline Motivation for GPUs and CUDA Overview
More informationStanford University. NVIDIA Tesla M2090. NVIDIA GeForce GTX 690
Stanford University NVIDIA Tesla M2090 NVIDIA GeForce GTX 690 Moore s Law 2 Clock Speed 10000 Pentium 4 Prescott Core 2 Nehalem Sandy Bridge 1000 Pentium 4 Williamette Clock Speed (MHz) 100 80486 Pentium
More informationBasic Elements of CUDA Algoritmi e Calcolo Parallelo. Daniele Loiacono
Basic Elements of CUDA Algoritmi e Calcolo Parallelo References q This set of slides is mainly based on: " CUDA Technical Training, Dr. Antonino Tumeo, Pacific Northwest National Laboratory " Slide of
More informationMathematical computations with GPUs
Master Educational Program Information technology in applications Mathematical computations with GPUs CUDA Alexey A. Romanenko arom@ccfit.nsu.ru Novosibirsk State University CUDA - Compute Unified Device
More informationIntroduc)on to GPU Programming
Introduc)on to GPU Programming Mubashir Adnan Qureshi h3p://www.ncsa.illinois.edu/people/kindr/projects/hpca/files/singapore_p1.pdf h3p://developer.download.nvidia.com/cuda/training/nvidia_gpu_compu)ng_webinars_cuda_memory_op)miza)on.pdf
More informationINTRODUCTION TO GPU COMPUTING IN AALTO. Topi Siro
INTRODUCTION TO GPU COMPUTING IN AALTO Topi Siro 12.6.2013 OUTLINE PART I Introduction to GPUs Basics of CUDA PART II Maximizing performance Coalesced memory access Optimizing memory transfers Occupancy
More informationCUDA Advanced Techniques 2 Mohamed Zahran (aka Z)
CSCI-GA.3033-004 Graphics Processing Units (GPUs): Architecture and Programming CUDA Advanced Techniques 2 Mohamed Zahran (aka Z) mzahran@cs.nyu.edu http://www.mzahran.com Alignment Memory Alignment Memory
More informationGPU computing Simulating spin models on GPU Lecture 1: GPU architecture and CUDA programming. GPU computing. GPU computing.
GPU computing Simulating spin models on GPU Lecture 1: GPU architecture and CUDA programming Martin Weigel Applied Mathematics Research Centre, Coventry University, Coventry, United Kingdom and Institut
More informationParalization on GPU using CUDA An Introduction
Paralization on GPU using CUDA An Introduction Ehsan Nedaaee Oskoee 1 1 Department of Physics IASBS IPM Grid and HPC workshop IV, 2011 Outline 1 Introduction to GPU 2 Introduction to CUDA Graphics Processing
More informationCOSC 6339 Accelerators in Big Data
COSC 6339 Accelerators in Big Data Edgar Gabriel Fall 2018 Motivation Programming models such as MapReduce and Spark provide a high-level view of parallelism not easy for all problems, e.g. recursive algorithms,
More informationParallel Programming and Debugging with CUDA C. Geoff Gerfin Sr. System Software Engineer
Parallel Programming and Debugging with CUDA C Geoff Gerfin Sr. System Software Engineer CUDA - NVIDIA s Architecture for GPU Computing Broad Adoption Over 250M installed CUDA-enabled GPUs GPU Computing
More informationMathematical computations with GPUs
Master Educational Program Information technology in applications Mathematical computations with GPUs GPU architecture Alexey A. Romanenko arom@ccfit.nsu.ru Novosibirsk State University GPU Graphical Processing
More informationCUDA Programming. Week 1. Basic Programming Concepts Materials are copied from the reference list
CUDA Programming Week 1. Basic Programming Concepts Materials are copied from the reference list G80/G92 Device SP: Streaming Processor (Thread Processors) SM: Streaming Multiprocessor 128 SP grouped into
More informationAdvanced Topics in CUDA C
Advanced Topics in CUDA C S. Sundar and M. Panchatcharam August 9, 2014 S. Sundar and M. Panchatcharam ( IIT Madras, ) Advanced CUDA August 9, 2014 1 / 36 Outline 1 Julia Set 2 Julia GPU 3 Compilation
More informationLecture 2: Introduction to CUDA C
CS/EE 217 GPU Architecture and Programming Lecture 2: Introduction to CUDA C David Kirk/NVIDIA and Wen-mei W. Hwu, 2007-2013 1 CUDA /OpenCL Execution Model Integrated host+device app C program Serial or
More informationCS179 GPU Programming Recitation 4: CUDA Particles
Recitation 4: CUDA Particles Lab 4 CUDA Particle systems Two parts Simple repeat of Lab 3 Interacting Flocking simulation 2 Setup Two folders given particles_simple, particles_interact Must install NVIDIA_CUDA_SDK
More informationGPU Architecture and Programming. Andrei Doncescu inspired by NVIDIA
GPU Architecture and Programming Andrei Doncescu inspired by NVIDIA Traditional Computing Von Neumann architecture: instructions are sent from memory to the CPU Serial execution: Instructions are executed
More informationEEM528 GPU COMPUTING
EEM528 CS 193G GPU COMPUTING Lecture 2: GPU History & CUDA Programming Basics Slides Credit: Jared Hoberock & David Tarjan CS 193G History of GPUs Graphics in a Nutshell Make great images intricate shapes
More informationHigh Performance Computing and GPU Programming
High Performance Computing and GPU Programming Lecture 1: Introduction Objectives C++/CPU Review GPU Intro Programming Model Objectives Objectives Before we begin a little motivation Intel Xeon 2.67GHz
More informationCUDA C Programming Mark Harris NVIDIA Corporation
CUDA C Programming Mark Harris NVIDIA Corporation Agenda Tesla GPU Computing CUDA Fermi What is GPU Computing? Introduction to Tesla CUDA Architecture Programming & Memory Models Programming Environment
More informationCUDA Programming (Basics, Cuda Threads, Atomics) Ezio Bartocci
TECHNISCHE UNIVERSITÄT WIEN Fakultät für Informatik Cyber-Physical Systems Group CUDA Programming (Basics, Cuda Threads, Atomics) Ezio Bartocci Outline of CUDA Basics Basic Kernels and Execution on GPU
More informationBasic CUDA workshop. Outlines. Setting Up Your Machine Architecture Getting Started Programming CUDA. Fine-Tuning. Penporn Koanantakool
Basic CUDA workshop Penporn Koanantakool twitter: @kaewgb e-mail: kaewgb@gmail.com Outlines Setting Up Your Machine Architecture Getting Started Programming CUDA Debugging Fine-Tuning Setting Up Your Machine
More informationHigh Performance Computing and GPU Programming
High Performance Computing and GPU Programming Lecture 3: GPU Application GPU Intro Review Simple Example Memory Effects GPU Intro Review GPU Intro Review Shared Multiprocessors Global parallelism Assign
More informationCUDA Performance Optimization Mark Harris NVIDIA Corporation
CUDA Performance Optimization Mark Harris NVIDIA Corporation Outline Overview Hardware Memory Optimizations Execution Configuration Optimizations Instruction Optimizations Summary Optimize Algorithms for
More informationMassively Parallel Algorithms
Massively Parallel Algorithms Introduction to CUDA & Many Fundamental Concepts of Parallel Programming G. Zachmann University of Bremen, Germany cgvr.cs.uni-bremen.de Hybrid/Heterogeneous Computation/Architecture
More informationCUDA and GPU Performance Tuning Fundamentals: A hands-on introduction. Francesco Rossi University of Bologna and INFN
CUDA and GPU Performance Tuning Fundamentals: A hands-on introduction Francesco Rossi University of Bologna and INFN * Using this terminology since you ve already heard of SIMD and SPMD at this school
More informationCS 179: GPU Programming LECTURE 5: GPU COMPUTE ARCHITECTURE FOR THE LAST TIME
CS 179: GPU Programming LECTURE 5: GPU COMPUTE ARCHITECTURE FOR THE LAST TIME 1 Last time... GPU Memory System Different kinds of memory pools, caches, etc Different optimization techniques 2 Warp Schedulers
More informationLecture 8: GPU Programming. CSE599G1: Spring 2017
Lecture 8: GPU Programming CSE599G1: Spring 2017 Announcements Project proposal due on Thursday (4/28) 5pm. Assignment 2 will be out today, due in two weeks. Implement GPU kernels and use cublas library
More informationGPU Computing Workshop CSU Getting Started. Garland Durham Quantos Analytics
1 GPU Computing Workshop CSU 2013 Getting Started Garland Durham Quantos Analytics nvidia-smi 2 At command line, run command nvidia-smi to get/set GPU properties. nvidia-smi Options: -q query -L list attached
More informationCSE 160 Lecture 24. Graphical Processing Units
CSE 160 Lecture 24 Graphical Processing Units Announcements Next week we meet in 1202 on Monday 3/11 only On Weds 3/13 we have a 2 hour session Usual class time at the Rady school final exam review SDSC
More informationProgramming with CUDA WS 08/09. Lecture 7 Thu, 13 Nov, 2008
Programming with CUDA WS 08/09 Lecture 7 Thu, 13 Nov, 2008 Previously CUDA Runtime Common Built-in vector types Math functions Timing Textures Texture fetch Texture reference Texture read modes Normalized
More informationCUDA Performance Optimization. Patrick Legresley
CUDA Performance Optimization Patrick Legresley Optimizations Kernel optimizations Maximizing global memory throughput Efficient use of shared memory Minimizing divergent warps Intrinsic instructions Optimizations
More informationCartoon parallel architectures; CPUs and GPUs
Cartoon parallel architectures; CPUs and GPUs CSE 6230, Fall 2014 Th Sep 11! Thanks to Jee Choi (a senior PhD student) for a big assist 1 2 3 4 5 6 7 8 9 10 11 12 13 14 ~ socket 14 ~ core 14 ~ HWMT+SIMD
More informationCS 179: GPU Computing
CS 179: GPU Computing LECTURE 2: INTRO TO THE SIMD LIFESTYLE AND GPU INTERNALS Recap Can use GPU to solve highly parallelizable problems Straightforward extension to C++ Separate CUDA code into.cu and.cuh
More informationGPGPU in Film Production. Laurence Emms Pixar Animation Studios
GPGPU in Film Production Laurence Emms Pixar Animation Studios Outline GPU computing at Pixar Demo overview Simulation on the GPU Future work GPU Computing at Pixar GPUs have been used for real-time preview
More information