LearnOpenGL

Unnamed repository; edit this file 'description' to name the repository.
Log | Files | Refs

commit b416180e66ce429c96126fcd0326bf80f2e4e222
Author: Matsuda Kenji <ftvda283@gmail.com>
Date:   Fri, 10 Sep 2021 08:32:12 +0900

first commit

Diffstat:
AGetting-started/Camera.html | 844+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AGetting-started/Coordinate-Systems.html | 727+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AGetting-started/Creating-a-window.html | 299+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AGetting-started/Hello-Triangle.html | 732+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AGetting-started/Hello-Window.html | 326+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AGetting-started/OpenGL.html | 160+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AGetting-started/Review.html | 317+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AGetting-started/Shaders.html | 843+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AGetting-started/Textures.html | 803+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
AGetting-started/Transformations.html | 838+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Aimg/getting-started/cmake.png | 0
Aimg/getting-started/hellotriangle.png | 0
Aimg/getting-started/hellotriangle2.png | 0
Aimg/getting-started/hellowindow.png | 0
Aimg/getting-started/hellowindow2.png | 0
Aimg/getting-started/include_directories.png | 0
Aimg/getting-started/linker_input.png | 0
Aimg/getting-started/ndc.png | 0
Aimg/getting-started/opengl.jpg | 0
Aimg/getting-started/pipeline.png | 0
Aimg/getting-started/vc_directories.png | 0
Aimg/getting-started/vertex_array_objects.png | 0
Aimg/getting-started/vertex_array_objects_ebo.png | 0
Aimg/getting-started/vertex_attribute_pointer.png | 0
Aimg/getting-started/x64.png | 0
Astatic/style.css | 283+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Astatic/style2.css | 1+
27 files changed, 6173 insertions(+), 0 deletions(-)

diff --git a/Getting-started/Camera.html b/Getting-started/Camera.html @@ -0,0 +1,843 @@ + + +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8"/> + <title>LearnOpenGL - Camera</title> <!--<title>Learn OpenGL, extensive tutorial resource for learning Modern OpenGL</title>--> + <link rel="shortcut icon" type="image/ico" href="/favicon.ico" /> + <meta name="description" content="Learn OpenGL . com provides good and clear modern 3.3+ OpenGL tutorials with clear examples. A great resource to learn modern OpenGL aimed at beginners."> + <meta name="fragment" content="!"> + <script> + (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ + (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), + m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) + })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); + + ga('create', 'UA-51879160-1', 'learnopengl.com'); + ga('send', 'pageview'); + + </script> + <!--<script async src="//pagead2.googlesyndication.com/pagead/js/adsbygoogle.js"></script>--> + <script> + (adsbygoogle = window.adsbygoogle || []).push({ + google_ad_client: "ca-pub-7855791439695850", + enable_page_level_ads: true + }); + </script> + <script async='async' src='https://www.googletagservices.com/tag/js/gpt.js'></script> + <script> + var googletag = googletag || {}; + googletag.cmd = googletag.cmd || []; + </script> + <script> + googletag.cmd.push(function() { + googletag.defineSlot('/8491498/learnopengl_video', [300, 225], 'div-gpt-ad-1540574378241-0').addService(googletag.pubads()); + googletag.pubads().enableSingleRequest(); + googletag.pubads().collapseEmptyDivs(); + googletag.enableServices(); + }); + </script> + <script type="text/javascript" src="https://d31vxm9ubutrmw.cloudfront.net/static/js/1681.js"></script> + <script src="/js/jquery-1.11.0.min.js"></script> + <script src="/js/hoverintent.js"></script> + <link rel="stylesheet" type="text/css" href="/layout.css"> + <link rel="stylesheet" type="text/css" href="/js/styles/obsidian.css"> + <script src="/js/highlight.pack.js"></script> + <script src="/js/functions.js"></script> + <script type="text/javascript" src="/js/mathjax/MathJax.js?config=TeX-AMS_HTML"></script> + <script> + // Has to be loaded last due to content bug + MathJax.Hub.Config({ + TeX: { equationNumbers: { autoNumber: "AMS" } } + }); + </script> + <script>hljs.initHighlightingOnLoad();</script> + <script> + $(document).ready(function() { + // check if user visited from the old # based urls, re-direct to ?p= form + if(window.location.hash) + { + var name = window.location.hash.substring(2); + // name = name.replace(/-/g," "); + var index = name.indexOf('#'); // Remove any hash fragments from the url (Disquss adds hash fragments for comments, but results in 404 pages) + if(index >= 0) + name = name.substring(0, index); + + window.location.href = "https://learnopengl.com/" + name; + } else { + // Check if data has been succesfully loaded, if so: change title bar as ajax hash fragment + var title = $('#content-url').text(); + + // Refresh syntax highlighting + // $('pre').each(function(i, e) {hljs.highlightBlock(e)}); + + // Reset DISQUS + // if(title == '/dev/') + // title = ''; + // alert('hoi'); + + // Adjust ads for correct bottom positioning based on content size + window.setTimeout(function() { + AdPositioning(); + }, 3000); + + + // set API resets after time-out (once content is properly loaded) + window.setTimeout(function() { + MathJax.Hub.Queue(["Typeset",MathJax.Hub]); + MathJax.Hub.Queue(["resetEquationNumbers", MathJax.InputJax.TeX]); + + var page_url = title == "" ? "http://www.learnopengl.com/" : "http://www.learnopengl.com/" + title; + if(typeof DISQUS !== 'undefined') { + DISQUS.reset({ + reload: true, + config: function () { + this.page.identifier = title; + this.page.url = page_url; + } + }); + $('#disqus_thread').show(); + } + // Refresh callbacks on <function> tags + SetFunctionTagCallbacks(); + }, 1000); + + // Zet ook de juiste button op 'selected' + $('#nav li span, #nav li a').removeClass('selected'); + if(title != '') + { + $('#nav li[id=\'' + title + '\']').children('span, a').addClass('selected'); + } + // En open menu waar nodig + var parents = $('#nav span.selected, #nav a.selected').parents('li').children('span.closed, a.closed'); + var index = 0; + for(index = parents.length - 1; index >= 0; index--) + { + + var id = $(parents[index]).attr("id").replace( /^\D+/g, ''); + MenuClick(id, false); + } + + } + }); + // var initialized = false; + // window.onpopstate = function() { + // if(initialized) + // LoadPage(); + // else + // initialized = true; + // }; + + // Set up DISQUS + // $(document).ready(function() { + var disqus_shortname = 'learnopengl'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//' + disqus_shortname + '.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + // }); + </script> +</head> +<body> +<a href="https://learnopengl.com"> +<div id="header"> +</div> +</a> + +<div id="supercontainer"> + <!-- 728x90/320x50 --> + <div id="header_ad"> + <div id="waldo-tag-6194"></div> + </div> + <div id="rightad_container"> + <div id="rightad"> + <!-- /8491498/learnopengl_video --> + <!--<div id='div-gpt-ad-1540574378241-0' style='height:225px; width:300px;'> + <script> + googletag.cmd.push(function() { googletag.display('div-gpt-ad-1540574378241-0'); }); + </script> + </div> + <br/>--> + + <div id="waldo-tag-1715"></div> + </div> + + <div id="admessage"> + If you're running AdBlock, please consider whitelisting this site if you'd like to support LearnOpenGL; and no worries, I won't be mad if you don't :) + <!--<br/><br/> + Also, check out this little local multiplayer-only game I've made: <a href="https://store.steampowered.com/app/983590/Tank_Blazers/" target="_blank">Tank Blazers</a>. + <br/> + <a href="https://store.steampowered.com/app/983590/Tank_Blazers" target="_blank"><img src="/img/tank_blazers.jpg" style="width:278px; margin-top: 9px; margin-left: -3px;"/></a>--> + </div> + + <div id="rightonethirdad"> + <div id="waldo-tag-2246"></div> + </div> + + <div id="rightbottomad"> + <div id="waldo-tag-2247"></div> + </div> + </div> + <div id="container"> + <div id="loading"></div> +<script> +$(document).ready(function() { +$('#menu-item4').mousedown(function() { MenuClick(4, true) }); +$('#menu-item48').mousedown(function() { MenuClick(48, true) }); +$('#menu-item56').mousedown(function() { MenuClick(56, true) }); +$('#menu-item63').mousedown(function() { MenuClick(63, true) }); +$('#menu-item100').mousedown(function() { MenuClick(100, true) }); +$('#menu-item102').mousedown(function() { MenuClick(102, true) }); +$('#menu-item113').mousedown(function() { MenuClick(113, true) }); +$('#menu-item116').mousedown(function() { MenuClick(116, true) }); +$('#menu-item78').mousedown(function() { MenuClick(78, true) }); +$('#menu-item81').mousedown(function() { MenuClick(81, true) }); +$('#menu-item85').mousedown(function() { MenuClick(85, true) }); +$('#menu-item125').mousedown(function() { MenuClick(125, true) }); +$('#menu-item128').mousedown(function() { MenuClick(128, true) }); +$('#menu-item129').mousedown(function() { MenuClick(129, true) }); +$('#menu-item133').mousedown(function() { MenuClick(133, true) }); +$('#menu-item134').mousedown(function() { MenuClick(134, true) }); +}); +</script> + <div id="nav"> + <div id="social"> + <a href="https://github.com/JoeyDeVries/LearnOpenGL" target="_blank"> + <img src="/img/github.png" class="social_ico"> + </a> + <!-- <a href="https://www.facebook.com/Learnopengl-2199631333595544/" target="_blank"> + <img src="/img/facebook.png" class="social_ico"> + </a>--> + <a href="https://twitter.com/JoeyDeVriez" target="_blank"> + <img src="/img/twitter.png" class="social_ico"> + </a> + + </div> + <img src='img/nav-button_bottom-arrow.png' style='display: none'><ol><li id='Introduction'><a id="menu-item1" href="https://learnopengl.com/Introduction">Introduction </a></li><li id='Getting-started'><span id="menu-item4" class="closed">Getting started </span><ol id="menu-items-of4" style="display:none;"><li id='Getting-started/OpenGL'><a id="menu-item49" href="https://learnopengl.com/Getting-started/OpenGL">OpenGL </a></li><li id='Getting-started/Creating-a-window'><a id="menu-item5" href="https://learnopengl.com/Getting-started/Creating-a-window">Creating a window </a></li><li id='Getting-started/Hello-Window'><a id="menu-item6" href="https://learnopengl.com/Getting-started/Hello-Window">Hello Window </a></li><li id='Getting-started/Hello-Triangle'><a id="menu-item38" href="https://learnopengl.com/Getting-started/Hello-Triangle">Hello Triangle </a></li><li id='Getting-started/Shaders'><a id="menu-item39" href="https://learnopengl.com/Getting-started/Shaders">Shaders </a></li><li id='Getting-started/Textures'><a id="menu-item40" href="https://learnopengl.com/Getting-started/Textures">Textures </a></li><li id='Getting-started/Transformations'><a id="menu-item43" href="https://learnopengl.com/Getting-started/Transformations">Transformations </a></li><li id='Getting-started/Coordinate-Systems'><a id="menu-item44" href="https://learnopengl.com/Getting-started/Coordinate-Systems">Coordinate Systems </a></li><li id='Getting-started/Camera'><a id="menu-item47" href="https://learnopengl.com/Getting-started/Camera">Camera </a></li><li id='Getting-started/Review'><a id="menu-item50" href="https://learnopengl.com/Getting-started/Review">Review </a></li></ol></li><li id='Lighting'><span id="menu-item48" class="closed">Lighting </span><ol id="menu-items-of48" style="display:none;"><li id='Lighting/Colors'><a id="menu-item51" href="https://learnopengl.com/Lighting/Colors">Colors </a></li><li id='Lighting/Basic-Lighting'><a id="menu-item52" href="https://learnopengl.com/Lighting/Basic-Lighting">Basic Lighting </a></li><li id='Lighting/Materials'><a id="menu-item53" href="https://learnopengl.com/Lighting/Materials">Materials </a></li><li id='Lighting/Lighting-maps'><a id="menu-item54" href="https://learnopengl.com/Lighting/Lighting-maps">Lighting maps </a></li><li id='Lighting/Light-casters'><a id="menu-item55" href="https://learnopengl.com/Lighting/Light-casters">Light casters </a></li><li id='Lighting/Multiple-lights'><a id="menu-item58" href="https://learnopengl.com/Lighting/Multiple-lights">Multiple lights </a></li><li id='Lighting/Review'><a id="menu-item57" href="https://learnopengl.com/Lighting/Review">Review </a></li></ol></li><li id='Model-Loading'><span id="menu-item56" class="closed">Model Loading </span><ol id="menu-items-of56" style="display:none;"><li id='Model-Loading/Assimp'><a id="menu-item59" href="https://learnopengl.com/Model-Loading/Assimp">Assimp </a></li><li id='Model-Loading/Mesh'><a id="menu-item60" href="https://learnopengl.com/Model-Loading/Mesh">Mesh </a></li><li id='Model-Loading/Model'><a id="menu-item61" href="https://learnopengl.com/Model-Loading/Model">Model </a></li></ol></li><li id='Advanced-OpenGL'><span id="menu-item63" class="closed">Advanced OpenGL </span><ol id="menu-items-of63" style="display:none;"><li id='Advanced-OpenGL/Depth-testing'><a id="menu-item72" href="https://learnopengl.com/Advanced-OpenGL/Depth-testing">Depth testing </a></li><li id='Advanced-OpenGL/Stencil-testing'><a id="menu-item73" href="https://learnopengl.com/Advanced-OpenGL/Stencil-testing">Stencil testing </a></li><li id='Advanced-OpenGL/Blending'><a id="menu-item74" href="https://learnopengl.com/Advanced-OpenGL/Blending">Blending </a></li><li id='Advanced-OpenGL/Face-culling'><a id="menu-item77" href="https://learnopengl.com/Advanced-OpenGL/Face-culling">Face culling </a></li><li id='Advanced-OpenGL/Framebuffers'><a id="menu-item65" href="https://learnopengl.com/Advanced-OpenGL/Framebuffers">Framebuffers </a></li><li id='Advanced-OpenGL/Cubemaps'><a id="menu-item66" href="https://learnopengl.com/Advanced-OpenGL/Cubemaps">Cubemaps </a></li><li id='Advanced-OpenGL/Advanced-Data'><a id="menu-item69" href="https://learnopengl.com/Advanced-OpenGL/Advanced-Data">Advanced Data </a></li><li id='Advanced-OpenGL/Advanced-GLSL'><a id="menu-item67" href="https://learnopengl.com/Advanced-OpenGL/Advanced-GLSL">Advanced GLSL </a></li><li id='Advanced-OpenGL/Geometry-Shader'><a id="menu-item68" href="https://learnopengl.com/Advanced-OpenGL/Geometry-Shader">Geometry Shader </a></li><li id='Advanced-OpenGL/Instancing'><a id="menu-item70" href="https://learnopengl.com/Advanced-OpenGL/Instancing">Instancing </a></li><li id='Advanced-OpenGL/Anti-Aliasing'><a id="menu-item75" href="https://learnopengl.com/Advanced-OpenGL/Anti-Aliasing">Anti Aliasing </a></li></ol></li><li id='Advanced-Lighting'><span id="menu-item100" class="closed">Advanced Lighting </span><ol id="menu-items-of100" style="display:none;"><li id='Advanced-Lighting/Advanced-Lighting'><a id="menu-item101" href="https://learnopengl.com/Advanced-Lighting/Advanced-Lighting">Advanced Lighting </a></li><li id='Advanced-Lighting/Gamma-Correction'><a id="menu-item110" href="https://learnopengl.com/Advanced-Lighting/Gamma-Correction">Gamma Correction </a></li><li id='Advanced-Lighting/Shadows'><span id="menu-item102" class="closed">Shadows </span><ol id="menu-items-of102" style="display:none;"><li id='Advanced-Lighting/Shadows/Shadow-Mapping'><a id="menu-item103" href="https://learnopengl.com/Advanced-Lighting/Shadows/Shadow-Mapping">Shadow Mapping </a></li><li id='Advanced-Lighting/Shadows/Point-Shadows'><a id="menu-item104" href="https://learnopengl.com/Advanced-Lighting/Shadows/Point-Shadows">Point Shadows </a></li></ol></li><li id='Advanced-Lighting/Normal-Mapping'><a id="menu-item106" href="https://learnopengl.com/Advanced-Lighting/Normal-Mapping">Normal Mapping </a></li><li id='Advanced-Lighting/Parallax-Mapping'><a id="menu-item107" href="https://learnopengl.com/Advanced-Lighting/Parallax-Mapping">Parallax Mapping </a></li><li id='Advanced-Lighting/HDR'><a id="menu-item111" href="https://learnopengl.com/Advanced-Lighting/HDR">HDR </a></li><li id='Advanced-Lighting/Bloom'><a id="menu-item112" href="https://learnopengl.com/Advanced-Lighting/Bloom">Bloom </a></li><li id='Advanced-Lighting/Deferred-Shading'><a id="menu-item108" href="https://learnopengl.com/Advanced-Lighting/Deferred-Shading">Deferred Shading </a></li><li id='Advanced-Lighting/SSAO'><a id="menu-item109" href="https://learnopengl.com/Advanced-Lighting/SSAO">SSAO </a></li></ol></li><li id='PBR'><span id="menu-item113" class="closed">PBR </span><ol id="menu-items-of113" style="display:none;"><li id='PBR/Theory'><a id="menu-item114" href="https://learnopengl.com/PBR/Theory">Theory </a></li><li id='PBR/Lighting'><a id="menu-item115" href="https://learnopengl.com/PBR/Lighting">Lighting </a></li><li id='PBR/IBL'><span id="menu-item116" class="closed">IBL </span><ol id="menu-items-of116" style="display:none;"><li id='PBR/IBL/Diffuse-irradiance'><a id="menu-item117" href="https://learnopengl.com/PBR/IBL/Diffuse-irradiance">Diffuse irradiance </a></li><li id='PBR/IBL/Specular-IBL'><a id="menu-item118" href="https://learnopengl.com/PBR/IBL/Specular-IBL">Specular IBL </a></li></ol></li></ol></li><li id='In-Practice'><span id="menu-item78" class="closed">In Practice </span><ol id="menu-items-of78" style="display:none;"><li id='In-Practice/Debugging'><a id="menu-item79" href="https://learnopengl.com/In-Practice/Debugging">Debugging </a></li><li id='In-Practice/Text-Rendering'><a id="menu-item80" href="https://learnopengl.com/In-Practice/Text-Rendering">Text Rendering </a></li><li id='In-Practice/2D-Game'><span id="menu-item81" class="closed">2D Game </span><ol id="menu-items-of81" style="display:none;"><li id='In-Practice/2D-Game/Breakout'><a id="menu-item82" href="https://learnopengl.com/In-Practice/2D-Game/Breakout">Breakout </a></li><li id='In-Practice/2D-Game/Setting-up'><a id="menu-item88" href="https://learnopengl.com/In-Practice/2D-Game/Setting-up">Setting up </a></li><li id='In-Practice/2D-Game/Rendering-Sprites'><a id="menu-item83" href="https://learnopengl.com/In-Practice/2D-Game/Rendering-Sprites">Rendering Sprites </a></li><li id='In-Practice/2D-Game/Levels'><a id="menu-item84" href="https://learnopengl.com/In-Practice/2D-Game/Levels">Levels </a></li><li id='In-Practice/2D-Game/Collisions'><span id="menu-item85" class="closed">Collisions </span><ol id="menu-items-of85" style="display:none;"><li id='In-Practice/2D-Game/Collisions/Ball'><a id="menu-item95" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Ball">Ball </a></li><li id='In-Practice/2D-Game/Collisions/Collision-detection'><a id="menu-item96" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Collision-detection">Collision detection </a></li><li id='In-Practice/2D-Game/Collisions/Collision-resolution'><a id="menu-item97" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Collision-resolution">Collision resolution </a></li></ol></li><li id='In-Practice/2D-Game/Particles'><a id="menu-item89" href="https://learnopengl.com/In-Practice/2D-Game/Particles">Particles </a></li><li id='In-Practice/2D-Game/Postprocessing'><a id="menu-item90" href="https://learnopengl.com/In-Practice/2D-Game/Postprocessing">Postprocessing </a></li><li id='In-Practice/2D-Game/Powerups'><a id="menu-item91" href="https://learnopengl.com/In-Practice/2D-Game/Powerups">Powerups </a></li><li id='In-Practice/2D-Game/Audio'><a id="menu-item94" href="https://learnopengl.com/In-Practice/2D-Game/Audio">Audio </a></li><li id='In-Practice/2D-Game/Render-text'><a id="menu-item92" href="https://learnopengl.com/In-Practice/2D-Game/Render-text">Render text </a></li><li id='In-Practice/2D-Game/Final-thoughts'><a id="menu-item93" href="https://learnopengl.com/In-Practice/2D-Game/Final-thoughts">Final thoughts </a></li></ol></li></ol></li><li id='Guest-Articles'><span id="menu-item125" class="closed">Guest Articles </span><ol id="menu-items-of125" style="display:none;"><li id='Guest-Articles/How-to-publish'><a id="menu-item126" href="https://learnopengl.com/Guest-Articles/How-to-publish">How to publish </a></li><li id='Guest-Articles/2020'><span id="menu-item128" class="closed">2020 </span><ol id="menu-items-of128" style="display:none;"><li id='Guest-Articles/2020/OIT'><span id="menu-item129" class="closed">OIT </span><ol id="menu-items-of129" style="display:none;"><li id='Guest-Articles/2020/OIT/Introduction'><a id="menu-item130" href="https://learnopengl.com/Guest-Articles/2020/OIT/Introduction">Introduction </a></li><li id='Guest-Articles/2020/OIT/Weighted-Blended'><a id="menu-item132" href="https://learnopengl.com/Guest-Articles/2020/OIT/Weighted-Blended">Weighted Blended </a></li></ol></li><li id='Guest-Articles/2020/Skeletal-Animation'><a id="menu-item131" href="https://learnopengl.com/Guest-Articles/2020/Skeletal-Animation">Skeletal Animation </a></li></ol></li><li id='Guest-Articles/2021'><span id="menu-item133" class="closed">2021 </span><ol id="menu-items-of133" style="display:none;"><li id='Guest-Articles/2021/Scene'><span id="menu-item134" class="closed">Scene </span><ol id="menu-items-of134" style="display:none;"><li id='Guest-Articles/2021/Scene/Scene-Graph'><a id="menu-item135" href="https://learnopengl.com/Guest-Articles/2021/Scene/Scene-Graph">Scene Graph </a></li><li id='Guest-Articles/2021/Scene/Frustum-Culling'><a id="menu-item136" href="https://learnopengl.com/Guest-Articles/2021/Scene/Frustum-Culling">Frustum Culling </a></li></ol></li></ol></li></ol></li><li id='Code-repository'><a id="menu-item99" href="https://learnopengl.com/Code-repository">Code repository </a></li><li id='Translations'><a id="menu-item119" href="https://learnopengl.com/Translations">Translations </a></li><li id='About'><a id="menu-item2" href="https://learnopengl.com/About">About </a></li></ol> <div id="menu_book"> + <a href="https://geni.us/learnopengl" target="_blank"><img src="/book/below_menu.png" class="clean"/></a> + </div> + <div id="donate"> + <a href="https://www.paypal.me/learnopengl/" target="_blank"> + <div id="donate_img"></div> + <img style="display: none" src="/img/donate_button_hover.png"/> + <!--<img id="donate_img" src="img/patreon.png"/>--> + </a> + <!--<div id="alipay"> + <img style="width: 150px;" class="clean" src="/img/alipay_logo.png"/> + <img style="width: 150px; margin-top: 5px" src="/img/alipay.png"/> + </div>--> + </div> + <div class="btc"> + <h3>BTC</h3> + <p> + 1CLGKgmBSuYJ1nnvDGAepVTKNNDpUjfpRa + </p> + <img src="/img/btc_qr.png"/> + </div> + <div class="btc"> + <h3>ETH/ERC20</h3> + <p> + 0x1de59bd9e52521a46309474f8372531533bd7c43 + </p> + <img src="/img/erc20_qr.png"/> + </div> + <div id="ad"> + <!--<div id="waldo-tag-1684"></div>--> + </div> + + <div id="lefttwothirdad"> + <div id="waldo-tag-2245"></div> + </div> + </div> + + <div id="content"> + <h1 id="content-title">Camera</h1> +<h1 id="content-url" style='display:none;'>Getting-started/Camera</h1> +<p> + In the previous chapter we discussed the view matrix and how we can use the view matrix to move around the scene (we moved backwards a little). OpenGL by itself is not familiar with the concept of a <em>camera</em>, but we can try to simulate one by moving all objects in the scene in the reverse direction, giving the illusion that <strong>we</strong> are moving. +</p> + +<p> + In this chapter we'll discuss how we can set up a camera in OpenGL. We will discuss a fly style camera that allows you to freely move around in a 3D scene. We'll also discuss keyboard and mouse input and finish with a custom camera class. +</p> + +<h2>Camera/View space</h2> +<p> + When we're talking about camera/view space we're talking about all the vertex coordinates as seen from the camera's perspective as the origin of the scene: the view matrix transforms all the world coordinates into view coordinates that are relative to the camera's position and direction. To define a camera we need its position in world space, the direction it's looking at, a vector pointing to the right and a vector pointing upwards from the camera. A careful reader may notice that we're actually going to create a coordinate system with 3 perpendicular unit axes with the camera's position as the origin. +</p> + +<img src="/img/getting-started/camera_axes.png" class="clean"/> + +<h3>1. Camera position</h3> +<p> + Getting the camera position is easy. The camera position is a vector in world space that points to the camera's position. We set the camera at the same position we've set the camera in the previous chapter: +</p> + +<pre><code> +glm::vec3 cameraPos = glm::vec3(0.0f, 0.0f, 3.0f); +</code></pre> + +<note> + Don't forget that the positive z-axis is going through your screen towards you so if we want the camera to move backwards, we move along the positive z-axis. +</note> + +<h3>2. Camera direction</h3> +<p> + The next vector required is the camera's direction e.g. at what direction it is pointing at. For now we let the camera point to the origin of our scene: <code>(0,0,0)</code>. Remember that if we subtract two vectors from each other we get a vector that's the difference of these two vectors? Subtracting the camera position vector from the scene's origin vector thus results in the direction vector we want. For the view matrix's coordinate system we want its z-axis to be positive and because by convention (in OpenGL) the camera points towards the negative z-axis we want to negate the direction vector. If we switch the subtraction order around we now get a vector pointing towards the camera's positive z-axis: +</p> + +<pre><code> +glm::vec3 cameraTarget = glm::vec3(0.0f, 0.0f, 0.0f); +glm::vec3 cameraDirection = glm::normalize(cameraPos - cameraTarget); +</code></pre> + +<warning> + The name <em>direction</em> vector is not the best chosen name, since it is actually pointing in the reverse direction of what it is targeting. +</warning> + +<h3>3. Right axis</h3> +<p> + The next vector that we need is a <em>right</em> vector that represents the positive x-axis of the camera space. To get the <em>right</em> vector we use a little trick by first specifying an <em>up</em> vector that points upwards (in world space). Then we do a cross product on the up vector and the direction vector from step 2. Since the result of a cross product is a vector perpendicular to both vectors, we will get a vector that points in the positive x-axis's direction (if we would switch the cross product order we'd get a vector that points in the negative x-axis): +</p> + +<pre><code> +glm::vec3 up = glm::vec3(0.0f, 1.0f, 0.0f); +glm::vec3 cameraRight = glm::normalize(<function id='61'>glm::cross</function>(up, cameraDirection)); +</code></pre> + +<h3>4. Up axis</h3> +<p> + Now that we have both the x-axis vector and the z-axis vector, retrieving the vector that points to the camera's positive y-axis is relatively easy: we take the cross product of the right and direction vector: +</p> + +<pre><code> +glm::vec3 cameraUp = <function id='61'>glm::cross</function>(cameraDirection, cameraRight); +</code></pre> + +<p> + With the help of the cross product and a few tricks we were able to create all the vectors that form the view/camera space. For the more mathematically inclined readers, this process is known as the <a href="http://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process" target="_blank">Gram-Schmidt</a> process in linear algebra. Using these camera vectors we can now create a <def>LookAt</def> matrix that proves very useful for creating a camera. +</p> + +<h2>Look At</h2> +<p> + A great thing about matrices is that if you define a coordinate space using 3 perpendicular (or non-linear) axes you can create a matrix with those 3 axes plus a translation vector and you can transform any vector to that coordinate space by multiplying it with this matrix. This is exactly what the <em>LookAt</em> matrix does and now that we have 3 perpendicular axes and a position vector to define the camera space we can create our own LookAt matrix: + + \[LookAt = \begin{bmatrix} \color{red}{R_x} & \color{red}{R_y} & \color{red}{R_z} & 0 \\ \color{green}{U_x} & \color{green}{U_y} & \color{green}{U_z} & 0 \\ \color{blue}{D_x} & \color{blue}{D_y} & \color{blue}{D_z} & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix} * \begin{bmatrix} 1 & 0 & 0 & -\color{purple}{P_x} \\ 0 & 1 & 0 & -\color{purple}{P_y} \\ 0 & 0 & 1 & -\color{purple}{P_z} \\ 0 & 0 & 0 & 1 \end{bmatrix} \] + + Where \(\color{red}R\) is the right vector, \(\color{green}U\) is the up vector, \(\color{blue}D\) is the direction vector and \(\color{purple}P\) is the camera's position vector. Note that the rotation (left matrix) and translation (right matrix) parts are inverted (transposed and negated respectively) since we want to rotate and translate the world in the opposite direction of where we want the camera to move. Using this LookAt matrix as our view matrix effectively transforms all the world coordinates to the view space we just defined. The LookAt matrix then does exactly what it says: it creates a view matrix that <em>looks</em> at a given target. +</p> + +<p> + Luckily for us, GLM already does all this work for us. We only have to specify a camera position, a target position and a vector that represents the up vector in world space (the up vector we used for calculating the right vector). GLM then creates the LookAt matrix that we can use as our view matrix: +</p> + +<pre><code> +glm::mat4 view; +view = <function id='62'>glm::lookAt</function>(glm::vec3(0.0f, 0.0f, 3.0f), + glm::vec3(0.0f, 0.0f, 0.0f), + glm::vec3(0.0f, 1.0f, 0.0f)); +</code></pre> + +<p> + The <fun><function id='62'>glm::LookAt</function></fun> function requires a position, target and up vector respectively. This example creates a view matrix that is the same as the one we created in the previous chapter. +</p> + +<p> + Before delving into user input, let's get a little funky first by rotating the camera around our scene. We keep the target of the scene at <code>(0,0,0)</code>. We use a little bit of trigonometry to create an <code>x</code> and <code>z</code> coordinate each frame that represents a point on a circle and we'll use these for our camera position. By re-calculating the <code>x</code> and <code>y</code> coordinate over time we're traversing all the points in a circle and thus the camera rotates around the scene. We enlarge this circle by a pre-defined <var>radius</var> and create a new view matrix each frame using GLFW's <fun><function id='47'>glfwGetTime</function></fun> function: +</p> + +<pre><code> +const float radius = 10.0f; +float camX = sin(<function id='47'>glfwGetTime</function>()) * radius; +float camZ = cos(<function id='47'>glfwGetTime</function>()) * radius; +glm::mat4 view; +view = <function id='62'>glm::lookAt</function>(glm::vec3(camX, 0.0, camZ), glm::vec3(0.0, 0.0, 0.0), glm::vec3(0.0, 1.0, 0.0)); +</code></pre> + +<p> + If you run this code you should get something like this: +</p> + +<div class="video paused" onclick="ClickVideo(this)"> + <video width="600" height="450" loop> + <source src="/video/getting-started/camera_circle.mp4" type="video/mp4"/> + <img src="/img/getting-started/camera_circle.png" class="clean"/> + </video> +</div> + +<p> + With this little snippet of code the camera now circles around the scene over time. Feel free to experiment with the radius and position/direction parameters to get the feel of how this <em>LookAt</em> matrix works. Also, check the <a href="/code_viewer_gh.php?code=src/1.getting_started/7.1.camera_circle/camera_circle.cpp" target="_blank">source code</a> if you're stuck. +</p> + +<h1>Walk around</h1> +<p> + Swinging the camera around a scene is fun, but it's more fun to do all the movement ourselves! First we need to set up a camera system, so it is useful to define some camera variables at the top of our program: +</p> + +<pre><code> +glm::vec3 cameraPos = glm::vec3(0.0f, 0.0f, 3.0f); +glm::vec3 cameraFront = glm::vec3(0.0f, 0.0f, -1.0f); +glm::vec3 cameraUp = glm::vec3(0.0f, 1.0f, 0.0f); +</code></pre> + +<p> + The <code>LookAt</code> function now becomes: +</p> + +<pre><code> +view = <function id='62'>glm::lookAt</function>(cameraPos, cameraPos + cameraFront, cameraUp); +</code></pre> + +<p> + First we set the camera position to the previously defined <var>cameraPos</var>. The direction is the current position + the direction vector we just defined. This ensures that however we move, the camera keeps looking at the target direction. Let's play a bit with these variables by updating the <var>cameraPos</var> vector when we press some keys. +</p> + +<p> + We already defined a <fun>processInput</fun> function to manage GLFW's keyboard input so let's add a few extra key commands: +</p> + +<pre><code> +void processInput(GLFWwindow *window) +{ + ... + const float cameraSpeed = 0.05f; // adjust accordingly + if (glfwGetKey(window, GLFW_KEY_W) == GLFW_PRESS) + cameraPos += cameraSpeed * cameraFront; + if (glfwGetKey(window, GLFW_KEY_S) == GLFW_PRESS) + cameraPos -= cameraSpeed * cameraFront; + if (glfwGetKey(window, GLFW_KEY_A) == GLFW_PRESS) + cameraPos -= glm::normalize(<function id='61'>glm::cross</function>(cameraFront, cameraUp)) * cameraSpeed; + if (glfwGetKey(window, GLFW_KEY_D) == GLFW_PRESS) + cameraPos += glm::normalize(<function id='61'>glm::cross</function>(cameraFront, cameraUp)) * cameraSpeed; +} +</code></pre> + +<p> + Whenever we press one of the <code>WASD</code> keys, the camera's position is updated accordingly. If we want to move forward or backwards we add or subtract the direction vector from the position vector scaled by some speed value. If we want to move sideways we do a cross product to create a <em>right</em> vector and we move along the right vector accordingly. This creates the familiar <def>strafe</def> effect when using the camera. +</p> + +<note> + Note that we normalize the resulting <em>right</em> vector. If we wouldn't normalize this vector, the resulting cross product may return differently sized vectors based on the <var>cameraFront</var> variable. If we would not normalize the vector we would move slow or fast based on the camera's orientation instead of at a consistent movement speed. +</note> + +<p> + By now, you should already be able to move the camera somewhat, albeit at a speed that's system-specific so you may need to adjust <var>cameraSpeed</var>. + +<h2>Movement speed</h2> +<p> + Currently we used a constant value for movement speed when walking around. In theory this seems fine, but in practice people's machines have different processing powers and the result of that is that some people are able to render much more frames than others each second. Whenever a user renders more frames than another user he also calls <fun>processInput</fun> more often. The result is that some people move really fast and some really slow depending on their setup. When shipping your application you want to make sure it runs the same on all kinds of hardware. +</p> + +<p> + Graphics applications and games usually keep track of a <def>deltatime</def> variable that stores the time it took to render the last frame. We then multiply all velocities with this <var>deltaTime</var> value. The result is that when we have a large <var>deltaTime</var> in a frame, meaning that the last frame took longer than average, the velocity for that frame will also be a bit higher to balance it all out. When using this approach it does not matter if you have a very fast or slow pc, the velocity of the camera will be balanced out accordingly so each user will have the same experience. +</p> + +<p> + To calculate the <var>deltaTime</var> value we keep track of 2 global variables: +</p> + +<pre><code> +float deltaTime = 0.0f; // Time between current frame and last frame +float lastFrame = 0.0f; // Time of last frame +</code></pre> + +<p> + Within each frame we then calculate the new <var>deltaTime</var> value for later use: +</p> + +<pre><code> +float currentFrame = <function id='47'>glfwGetTime</function>(); +deltaTime = currentFrame - lastFrame; +lastFrame = currentFrame; +</code></pre> + +<p> + Now that we have <var>deltaTime</var> we can take it into account when calculating the velocities: +</p> + +<pre><code> +void processInput(GLFWwindow *window) +{ + float cameraSpeed = 2.5f * deltaTime; + [...] +} +</code></pre> + +<p> + Since we're using <var>deltaTime</var> the camera will now move at a constant speed of <code>2.5</code> units per second. Together with the previous section we should now have a much smoother and more consistent camera system for moving around the scene: +</p> + +<div class="video paused" onclick="ClickVideo(this)"> + <video width="600" height="450" loop> + <source src="/video/getting-started/camera_smooth.mp4" type="video/mp4" /> + <img src="/img/getting-started/camera_smooth.png" class="clean"/> + </video> +</div> + +<p> + And now we have a camera that walks and looks equally fast on any system. Again, check the <a href="/code_viewer_gh.php?code=src/1.getting_started/7.2.camera_keyboard_dt/camera_keyboard_dt.cpp" target="_blank">source code</a> if you're stuck. We'll see the <var>deltaTime</var> value frequently return with anything movement related. +</p> + +<h1>Look around</h1> +<p> + Only using the keyboard keys to move around isn't that interesting. Especially since we can't turn around making the movement rather restricted. That's where the mouse comes in! +</p> + +<p> + To look around the scene we have to change the <var>cameraFront</var> vector based on the input of the mouse. However, changing the direction vector based on mouse rotations is a little complicated and requires some trigonometry. If you do not understand the trigonometry, don't worry, you can just skip to the code sections and paste them in your code; you can always come back later if you want to know more. +</p> + +<h2>Euler angles</h2> +<p> + Euler angles are 3 values that can represent any rotation in 3D, defined by Leonhard Euler somewhere in the 1700s. There are 3 Euler angles: <em>pitch</em>, <em>yaw</em> and <em>roll</em>. The following image gives them a visual meaning: +</p> + +<img src="/img/getting-started/camera_pitch_yaw_roll.png" alt="Euler angles yaw pitch and roll" class="clean"/> + +<p> + The <def>pitch</def> is the angle that depicts how much we're looking up or down as seen in the first image. The second image shows the <def>yaw</def> value which represents the magnitude we're looking to the left or to the right. The <def>roll</def> represents how much we <em>roll</em> as mostly used in space-flight cameras. Each of the Euler angles are represented by a single value and with the combination of all 3 of them we can calculate any rotation vector in 3D. +</p> + +<p> + For our camera system we only care about the yaw and pitch values so we won't discuss the roll value here. Given a pitch and a yaw value we can convert them into a 3D vector that represents a new direction vector. The process of converting yaw and pitch values to a direction vector requires a bit of trigonometry. and we start with a basic case: +</p> + +<p> + Let's start with a bit of a refresher and check the general right triangle case (with one side at a 90 degree angle): + +<img src="/img/getting-started/camera_triangle.png" class="clean"/> + +<p> + If we define the hypotenuse to be of length <code>1</code> we know from trigonometry (soh cah toa) that the adjacant side's length is \(\cos \ \color{red}x/\color{purple}h = \cos \ \color{red}x/\color{purple}1 = \cos\ \color{red}x\) and that the opposing side's length is \(\sin \ \color{green}y/\color{purple}h = \sin \ \color{green}y/\color{purple}1 = \sin\ \color{green}y\). This gives us some general formulas for retrieving the length in both the <code>x</code> and <code>y</code> sides on right triangles, depending on the given angle. Let's use this to calculate the components of the direction vector. +</p> + +<p> + Let's imagine this same triangle, but now looking at it from a top perspective with the adjacent and opposite sides being parallel to the scene's x and z axis (as if looking down the y-axis). +</p> + +<img src="/img/getting-started/camera_yaw.png" class="clean"/> + +<p> + If we visualize the yaw angle to be the counter-clockwise angle starting from the <code>x</code> side we can see that the length of the <code>x</code> side relates to <code>cos(yaw)</code>. And similarly how the length of the <code>z</code> side relates to <code>sin(yaw)</code>. +</p> + +<p> + If we take this knowledge and a given <code>yaw</code> value we can use it to create a camera direction vector: +</p> + +<pre><code> +glm::vec3 direction; +direction.x = cos(<function id='63'>glm::radians</function>(yaw)); // Note that we convert the angle to radians first +direction.z = sin(<function id='63'>glm::radians</function>(yaw)); +</code></pre> + +<p> + This solves how we can get a 3D direction vector from a yaw value, but pitch needs to be included as well. Let's now look at the <code>y</code> axis side as if we're sitting on the <code>xz</code> plane: +</p> + + +<img src="/img/getting-started/camera_pitch.png" class="clean"/> + +<p> + Similarly, from this triangle we can see that the direction's y component equals <code>sin(pitch)</code> so let's fill that in: +</p> + + +<pre><code> +direction.y = sin(<function id='63'>glm::radians</function>(pitch)); +</code></pre> + +<p> + However, from the pitch triangle we can also see the <code>xz</code> sides are influenced by <code>cos(pitch)</code> so we need to make sure this is also part of the direction vector. With this included we get the final direction vector as translated from yaw and pitch Euler angles: +</p> + +<pre><code> +direction.x = cos(<function id='63'>glm::radians</function>(yaw)) * cos(<function id='63'>glm::radians</function>(pitch)); +direction.y = sin(<function id='63'>glm::radians</function>(pitch)); +direction.z = sin(<function id='63'>glm::radians</function>(yaw)) * cos(<function id='63'>glm::radians</function>(pitch)); +</code></pre> + +<p> + This gives us a formula to convert yaw and pitch values to a 3-dimensional direction vector that we can use for looking around. +</p> + +<p> + We've set up the scene world so everything's positioned in the direction of the negative z-axis. However, if we look at the <code>x</code> and <code>z</code> yaw triangle we see that a \(\theta\) of <code>0</code> results in the camera's <code>direction</code> vector to point towards the positive x-axis. To make sure the camera points towards the negative z-axis by default we can give the <code>yaw</code> a default value of a 90 degree clockwise rotation. Positive degrees rotate counter-clockwise so we set the default <code>yaw</code> value to: +</p> + +<pre><code> +yaw = -90.0f; +</code></pre> + +<p> + + You've probably wondered by now: how do we set and modify these yaw and pitch values? +</p> + + +<h2>Mouse input</h2> +<p> + The yaw and pitch values are obtained from mouse (or controller/joystick) movement where horizontal mouse-movement affects the yaw and vertical mouse-movement affects the pitch. The idea is to store the last frame's mouse positions and calculate in the current frame how much the mouse values changed. The higher the horizontal or vertical difference, the more we update the pitch or yaw value and thus the more the camera should move. +</p> + +<p> + First we will tell GLFW that it should hide the cursor and <def>capture</def> it. Capturing a cursor means that, once the application has focus, the mouse cursor stays within the center of the window (unless the application loses focus or quits). We can do this with one simple configuration call: +</p> + +<pre><code> +glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED); +</code></pre> + +<p> + After this call, wherever we move the mouse it won't be visible and it should not leave the window. This is perfect for an FPS camera system. +</p> + +<p> +To calculate the pitch and yaw values we need to tell GLFW to listen to mouse-movement events. We do this by creating a callback function with the following prototype: +</p> + +<pre><code> +void mouse_callback(GLFWwindow* window, double xpos, double ypos); +</code></pre> + +<p> + Here <var>xpos</var> and <var>ypos</var> represent the current mouse positions. As soon as we register the callback function with GLFW each time the mouse moves, the <fun>mouse_callback</fun> function is called: +</p> + +<pre><code> +glfwSetCursorPosCallback(window, mouse_callback); +</code></pre> + +<p> + When handling mouse input for a fly style camera there are several steps we have to take before we're able to fully calculate the camera's direction vector: + + <ol> + <li>Calculate the mouse's offset since the last frame.</li> + <li>Add the offset values to the camera's yaw and pitch values.</li> + <li>Add some constraints to the minimum/maximum pitch values.</li> + <li>Calculate the direction vector.</li> + </ol> +</p> + +<p> + The first step is to calculate the offset of the mouse since last frame. We first have to store the last mouse positions in the application, which we initialize to be in the center of the screen (screen size is <code>800</code> by <code>600</code>) initially: +</p> + +<pre class="cpp"><code> +float lastX = 400, lastY = 300; +</code></pre> + +<p> + Then in the mouse's callback function we calculate the offset movement between the last and current frame: +</p> + +<pre><code> +float xoffset = xpos - lastX; +float yoffset = lastY - ypos; // reversed since y-coordinates range from bottom to top +lastX = xpos; +lastY = ypos; + +const float sensitivity = 0.1f; +xoffset *= sensitivity; +yoffset *= sensitivity; +</code></pre> + +<p> + Note that we multiply the offset values by a <var>sensitivity</var> value. If we omit this multiplication the mouse movement would be way too strong; fiddle around with the sensitivity value to your liking. +</p> + +<p> + Next we add the offset values to the globally declared <var>pitch</var> and <var>yaw</var> values: +</p> + +<pre><code> +yaw += xoffset; +pitch += yoffset; +</code></pre> + +<p> + In the third step we'd like to add some constraints to the camera so users won't be able to make weird camera movements (also causes a LookAt flip once direction vector is parallel to the world up direction). The pitch needs to be constrained in such a way that users won't be able to look higher than <code>89</code> degrees (at <code>90</code> degrees we get the LookAt flip) and also not below <code>-89</code> degrees. This ensures the user will be able to look up to the sky or below to his feet but not further. The constraints work by replacing the Euler value with its constraint value whenever it breaches the constraint: +</p> + +<pre><code> +if(pitch &gt; 89.0f) + pitch = 89.0f; +if(pitch &lt; -89.0f) + pitch = -89.0f; +</code></pre> + +<p> + Note that we set no constraint on the yaw value since we don't want to constrain the user in horizontal rotation. However, it's just as easy to add a constraint to the yaw as well if you feel like it. +</p> + +<p> + The fourth and last step is to calculate the actual direction vector using the formula from the previous section: +</p> + +<pre><code> +glm::vec3 direction; +direction.x = cos(<function id='63'>glm::radians</function>(yaw)) * cos(<function id='63'>glm::radians</function>(pitch)); +direction.y = sin(<function id='63'>glm::radians</function>(pitch)); +direction.z = sin(<function id='63'>glm::radians</function>(yaw)) * cos(<function id='63'>glm::radians</function>(pitch)); +cameraFront = glm::normalize(direction); +</code></pre> + + <p> + This computed direction vector then contains all the rotations calculated from the mouse's movement. Since the <var>cameraFront</var> vector is already included in glm's <fun>lookAt</fun> function we're set to go. +</p> + +<p> + If you'd now run the code you'll notice the camera makes a large sudden jump whenever the window first receives focus of your mouse cursor. The cause for this sudden jump is that as soon as your cursor enters the window the mouse callback function is called with an <var>xpos</var> and <var>ypos</var> position equal to the location your mouse entered the screen from. This is often a position that is significantly far away from the center of the screen, resulting in large offsets and thus a large movement jump. We can circumvent this issue by defining a global <code>bool</code> variable to check if this is the first time we receive mouse input. If it is the first time, we update the initial mouse positions to the new <var>xpos</var> and <code>ypos</code> values. The resulting mouse movements will then use the newly entered mouse's position coordinates to calculate the offsets: +</p> + +<pre><code> +if (firstMouse) // initially set to true +{ + lastX = xpos; + lastY = ypos; + firstMouse = false; +} +</code></pre> + +<p> + The final code then becomes: +</p> + +<pre><code> +void mouse_callback(GLFWwindow* window, double xpos, double ypos) +{ + if (firstMouse) + { + lastX = xpos; + lastY = ypos; + firstMouse = false; + } + + float xoffset = xpos - lastX; + float yoffset = lastY - ypos; + lastX = xpos; + lastY = ypos; + + float sensitivity = 0.1f; + xoffset *= sensitivity; + yoffset *= sensitivity; + + yaw += xoffset; + pitch += yoffset; + + if(pitch &gt; 89.0f) + pitch = 89.0f; + if(pitch &lt; -89.0f) + pitch = -89.0f; + + glm::vec3 direction; + direction.x = cos(<function id='63'>glm::radians</function>(yaw)) * cos(<function id='63'>glm::radians</function>(pitch)); + direction.y = sin(<function id='63'>glm::radians</function>(pitch)); + direction.z = sin(<function id='63'>glm::radians</function>(yaw)) * cos(<function id='63'>glm::radians</function>(pitch)); + cameraFront = glm::normalize(direction); +} +</code></pre> + +<p> + There we go! Give it a spin and you'll see that we can now freely move through our 3D scene! +</p> + + +<h2>Zoom</h2> +<p> + As a little extra to the camera system we'll also implement a zooming interface. In the previous chapter we said the <em>Field of view</em> or <em>fov</em> largely defines how much we can see of the scene. When the field of view becomes smaller, the scene's projected space gets smaller. This smaller space is projected over the same NDC, giving the illusion of zooming in. To zoom in, we're going to use the mouse's scroll wheel. Similar to mouse movement and keyboard input we have a callback function for mouse scrolling: +</p> + +<pre><code> +void scroll_callback(GLFWwindow* window, double xoffset, double yoffset) +{ + fov -= (float)yoffset; + if (fov &lt 1.0f) + fov = 1.0f; + if (fov &gt; 45.0f) + fov = 45.0f; +} +</code></pre> + +<p> + When scrolling, the <var>yoffset</var> value tells us the amount we scrolled vertically. When the <fun>scroll_callback</fun> function is called we change the content of the globally declared <var>fov</var> variable. Since <code>45.0</code> is the default fov value we want to constrain the zoom level between <code>1.0</code> and <code> 45.0</code>. +</p> + +<p> + We now have to upload the perspective projection matrix to the GPU each frame, but this time with the <var>fov</var> variable as its field of view: +</p> + +<pre><code> +projection = <function id='58'>glm::perspective</function>(<function id='63'>glm::radians</function>(fov), 800.0f / 600.0f, 0.1f, 100.0f); +</code></pre> + +<p> + And lastly don't forget to register the scroll callback function: +</p> + +<pre><code> +<function id='64'>glfwSetScrollCallback</function>(window, scroll_callback); +</code></pre> + +<p> + And there you have it. We implemented a simple camera system that allows for free movement in a 3D environment. +</p> + +<div class="video paused" onclick="ClickVideo(this)"> + <video width="600" height="450" loop> + <source src="/video/getting-started/camera_mouse.mp4" type="video/mp4" /> + <img src="/img/getting-started/camera_mouse.png" class="clean"/> + </video> +</div> + +<p> + Feel free to experiment a little and if you're stuck compare your code with the <a href="/code_viewer_gh.php?code=src/1.getting_started/7.3.camera_mouse_zoom/camera_mouse_zoom.cpp" target="_blank">source code</a>. +</p> + +<h1>Camera class</h1> +<p> + In the upcoming chapters we'll always use a camera to easily look around the scenes and see the results from all angles. However, since the camera code can take up a significant amount of space on each chapter we'll abstract its details a little and create our own camera object that does most of the work for us with some neat little extras. Unlike the Shader chapter we won't walk you through creating the camera class, but provide you with the (fully commented) source code if you want to know the inner workings. +</p> + +<p> + Like the <code>Shader</code> object, we define the camera class entirely in a single header file. You can find the camera class <a href="/code_viewer_gh.php?code=includes/learnopengl/camera.h" target="_blank">here</a>; you should be able to understand the code after this chapter. It is advised to at least check the class out once as an example on how you could create your own camera system. +</p> + +<warning> + The camera system we introduced is a fly like camera that suits most purposes and works well with Euler angles, but be careful when creating different camera systems like an FPS camera, or a flight simulation camera. Each camera system has its own tricks and quirks so be sure to read up on them. For example, this fly camera doesn't allow for pitch values higher than or equal to <code>90</code> degrees and a static up vector of <code>(0,1,0)</code> doesn't work when we take roll values into account. +</warning> + +<p> + The updated version of the source code using the new camera object can be found <a href="/code_viewer_gh.php?code=src/1.getting_started/7.4.camera_class/camera_class.cpp" target="_blank">here</a>. +</p> + +<h2>Exercises</h2> +<p> + <ul> + <li>See if you can transform the camera class in such a way that it becomes a <strong>true</strong> fps camera where you cannot fly; you can only look around while staying on the <code>xz</code> plane: <a href="/code_viewer_gh.php?code=src/1.getting_started/7.5.camera_exercise1/camera_exercise1.cpp" target="_blank">solution</a>.</li> + <li>Try to create your own LookAt function where you manually create a view matrix as discussed at the start of this chapter. Replace glm's LookAt function with your own implementation and see if it still acts the same: <a href="/code_viewer_gh.php?code=src/1.getting_started/7.6.camera_exercise2/camera_exercise2.cpp" target="_blank">solution</a>.</li> + </ul> +</p> + + + </div> + + <div id="hover"> + HI + </div> + <!-- 728x90/320x50 sticky footer --> +<div id="waldo-tag-6196"></div> + + <div id="disqus_thread"></div> + + + + +</div> <!-- container div --> + + +</div> <!-- super container div --> +</body> +</html> +\ No newline at end of file diff --git a/Getting-started/Coordinate-Systems.html b/Getting-started/Coordinate-Systems.html @@ -0,0 +1,726 @@ + + +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8"/> + <title>LearnOpenGL - Coordinate Systems</title> <!--<title>Learn OpenGL, extensive tutorial resource for learning Modern OpenGL</title>--> + <link rel="shortcut icon" type="image/ico" href="/favicon.ico" /> + <meta name="description" content="Learn OpenGL . com provides good and clear modern 3.3+ OpenGL tutorials with clear examples. A great resource to learn modern OpenGL aimed at beginners."> + <meta name="fragment" content="!"> + <script> + (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ + (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), + m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) + })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); + + ga('create', 'UA-51879160-1', 'learnopengl.com'); + ga('send', 'pageview'); + + </script> + <!--<script async src="//pagead2.googlesyndication.com/pagead/js/adsbygoogle.js"></script>--> + <script> + (adsbygoogle = window.adsbygoogle || []).push({ + google_ad_client: "ca-pub-7855791439695850", + enable_page_level_ads: true + }); + </script> + <script async='async' src='https://www.googletagservices.com/tag/js/gpt.js'></script> + <script> + var googletag = googletag || {}; + googletag.cmd = googletag.cmd || []; + </script> + <script> + googletag.cmd.push(function() { + googletag.defineSlot('/8491498/learnopengl_video', [300, 225], 'div-gpt-ad-1540574378241-0').addService(googletag.pubads()); + googletag.pubads().enableSingleRequest(); + googletag.pubads().collapseEmptyDivs(); + googletag.enableServices(); + }); + </script> + <script type="text/javascript" src="https://d31vxm9ubutrmw.cloudfront.net/static/js/1681.js"></script> + <script src="/js/jquery-1.11.0.min.js"></script> + <script src="/js/hoverintent.js"></script> + <link rel="stylesheet" type="text/css" href="/layout.css"> + <link rel="stylesheet" type="text/css" href="/js/styles/obsidian.css"> + <script src="/js/highlight.pack.js"></script> + <script src="/js/functions.js"></script> + <script type="text/javascript" src="/js/mathjax/MathJax.js?config=TeX-AMS_HTML"></script> + <script> + // Has to be loaded last due to content bug + MathJax.Hub.Config({ + TeX: { equationNumbers: { autoNumber: "AMS" } } + }); + </script> + <script>hljs.initHighlightingOnLoad();</script> + <script> + $(document).ready(function() { + // check if user visited from the old # based urls, re-direct to ?p= form + if(window.location.hash) + { + var name = window.location.hash.substring(2); + // name = name.replace(/-/g," "); + var index = name.indexOf('#'); // Remove any hash fragments from the url (Disquss adds hash fragments for comments, but results in 404 pages) + if(index >= 0) + name = name.substring(0, index); + + window.location.href = "https://learnopengl.com/" + name; + } else { + // Check if data has been succesfully loaded, if so: change title bar as ajax hash fragment + var title = $('#content-url').text(); + + // Refresh syntax highlighting + // $('pre').each(function(i, e) {hljs.highlightBlock(e)}); + + // Reset DISQUS + // if(title == '/dev/') + // title = ''; + // alert('hoi'); + + // Adjust ads for correct bottom positioning based on content size + window.setTimeout(function() { + AdPositioning(); + }, 3000); + + + // set API resets after time-out (once content is properly loaded) + window.setTimeout(function() { + MathJax.Hub.Queue(["Typeset",MathJax.Hub]); + MathJax.Hub.Queue(["resetEquationNumbers", MathJax.InputJax.TeX]); + + var page_url = title == "" ? "http://www.learnopengl.com/" : "http://www.learnopengl.com/" + title; + if(typeof DISQUS !== 'undefined') { + DISQUS.reset({ + reload: true, + config: function () { + this.page.identifier = title; + this.page.url = page_url; + } + }); + $('#disqus_thread').show(); + } + // Refresh callbacks on <function> tags + SetFunctionTagCallbacks(); + }, 1000); + + // Zet ook de juiste button op 'selected' + $('#nav li span, #nav li a').removeClass('selected'); + if(title != '') + { + $('#nav li[id=\'' + title + '\']').children('span, a').addClass('selected'); + } + // En open menu waar nodig + var parents = $('#nav span.selected, #nav a.selected').parents('li').children('span.closed, a.closed'); + var index = 0; + for(index = parents.length - 1; index >= 0; index--) + { + + var id = $(parents[index]).attr("id").replace( /^\D+/g, ''); + MenuClick(id, false); + } + + } + }); + // var initialized = false; + // window.onpopstate = function() { + // if(initialized) + // LoadPage(); + // else + // initialized = true; + // }; + + // Set up DISQUS + // $(document).ready(function() { + var disqus_shortname = 'learnopengl'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//' + disqus_shortname + '.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + // }); + </script> +</head> +<body> +<a href="https://learnopengl.com"> +<div id="header"> +</div> +</a> + +<div id="supercontainer"> + <!-- 728x90/320x50 --> + <div id="header_ad"> + <div id="waldo-tag-6194"></div> + </div> + <div id="rightad_container"> + <div id="rightad"> + <!-- /8491498/learnopengl_video --> + <!--<div id='div-gpt-ad-1540574378241-0' style='height:225px; width:300px;'> + <script> + googletag.cmd.push(function() { googletag.display('div-gpt-ad-1540574378241-0'); }); + </script> + </div> + <br/>--> + + <div id="waldo-tag-1715"></div> + </div> + + <div id="admessage"> + If you're running AdBlock, please consider whitelisting this site if you'd like to support LearnOpenGL; and no worries, I won't be mad if you don't :) + <!--<br/><br/> + Also, check out this little local multiplayer-only game I've made: <a href="https://store.steampowered.com/app/983590/Tank_Blazers/" target="_blank">Tank Blazers</a>. + <br/> + <a href="https://store.steampowered.com/app/983590/Tank_Blazers" target="_blank"><img src="/img/tank_blazers.jpg" style="width:278px; margin-top: 9px; margin-left: -3px;"/></a>--> + </div> + + <div id="rightonethirdad"> + <div id="waldo-tag-2246"></div> + </div> + + <div id="rightbottomad"> + <div id="waldo-tag-2247"></div> + </div> + </div> + <div id="container"> + <div id="loading"></div> +<script> +$(document).ready(function() { +$('#menu-item4').mousedown(function() { MenuClick(4, true) }); +$('#menu-item48').mousedown(function() { MenuClick(48, true) }); +$('#menu-item56').mousedown(function() { MenuClick(56, true) }); +$('#menu-item63').mousedown(function() { MenuClick(63, true) }); +$('#menu-item100').mousedown(function() { MenuClick(100, true) }); +$('#menu-item102').mousedown(function() { MenuClick(102, true) }); +$('#menu-item113').mousedown(function() { MenuClick(113, true) }); +$('#menu-item116').mousedown(function() { MenuClick(116, true) }); +$('#menu-item78').mousedown(function() { MenuClick(78, true) }); +$('#menu-item81').mousedown(function() { MenuClick(81, true) }); +$('#menu-item85').mousedown(function() { MenuClick(85, true) }); +$('#menu-item125').mousedown(function() { MenuClick(125, true) }); +$('#menu-item128').mousedown(function() { MenuClick(128, true) }); +$('#menu-item129').mousedown(function() { MenuClick(129, true) }); +$('#menu-item133').mousedown(function() { MenuClick(133, true) }); +$('#menu-item134').mousedown(function() { MenuClick(134, true) }); +}); +</script> + <div id="nav"> + <div id="social"> + <a href="https://github.com/JoeyDeVries/LearnOpenGL" target="_blank"> + <img src="/img/github.png" class="social_ico"> + </a> + <!-- <a href="https://www.facebook.com/Learnopengl-2199631333595544/" target="_blank"> + <img src="/img/facebook.png" class="social_ico"> + </a>--> + <a href="https://twitter.com/JoeyDeVriez" target="_blank"> + <img src="/img/twitter.png" class="social_ico"> + </a> + + </div> + <img src='img/nav-button_bottom-arrow.png' style='display: none'><ol><li id='Introduction'><a id="menu-item1" href="https://learnopengl.com/Introduction">Introduction </a></li><li id='Getting-started'><span id="menu-item4" class="closed">Getting started </span><ol id="menu-items-of4" style="display:none;"><li id='Getting-started/OpenGL'><a id="menu-item49" href="https://learnopengl.com/Getting-started/OpenGL">OpenGL </a></li><li id='Getting-started/Creating-a-window'><a id="menu-item5" href="https://learnopengl.com/Getting-started/Creating-a-window">Creating a window </a></li><li id='Getting-started/Hello-Window'><a id="menu-item6" href="https://learnopengl.com/Getting-started/Hello-Window">Hello Window </a></li><li id='Getting-started/Hello-Triangle'><a id="menu-item38" href="https://learnopengl.com/Getting-started/Hello-Triangle">Hello Triangle </a></li><li id='Getting-started/Shaders'><a id="menu-item39" href="https://learnopengl.com/Getting-started/Shaders">Shaders </a></li><li id='Getting-started/Textures'><a id="menu-item40" href="https://learnopengl.com/Getting-started/Textures">Textures </a></li><li id='Getting-started/Transformations'><a id="menu-item43" href="https://learnopengl.com/Getting-started/Transformations">Transformations </a></li><li id='Getting-started/Coordinate-Systems'><a id="menu-item44" href="https://learnopengl.com/Getting-started/Coordinate-Systems">Coordinate Systems </a></li><li id='Getting-started/Camera'><a id="menu-item47" href="https://learnopengl.com/Getting-started/Camera">Camera </a></li><li id='Getting-started/Review'><a id="menu-item50" href="https://learnopengl.com/Getting-started/Review">Review </a></li></ol></li><li id='Lighting'><span id="menu-item48" class="closed">Lighting </span><ol id="menu-items-of48" style="display:none;"><li id='Lighting/Colors'><a id="menu-item51" href="https://learnopengl.com/Lighting/Colors">Colors </a></li><li id='Lighting/Basic-Lighting'><a id="menu-item52" href="https://learnopengl.com/Lighting/Basic-Lighting">Basic Lighting </a></li><li id='Lighting/Materials'><a id="menu-item53" href="https://learnopengl.com/Lighting/Materials">Materials </a></li><li id='Lighting/Lighting-maps'><a id="menu-item54" href="https://learnopengl.com/Lighting/Lighting-maps">Lighting maps </a></li><li id='Lighting/Light-casters'><a id="menu-item55" href="https://learnopengl.com/Lighting/Light-casters">Light casters </a></li><li id='Lighting/Multiple-lights'><a id="menu-item58" href="https://learnopengl.com/Lighting/Multiple-lights">Multiple lights </a></li><li id='Lighting/Review'><a id="menu-item57" href="https://learnopengl.com/Lighting/Review">Review </a></li></ol></li><li id='Model-Loading'><span id="menu-item56" class="closed">Model Loading </span><ol id="menu-items-of56" style="display:none;"><li id='Model-Loading/Assimp'><a id="menu-item59" href="https://learnopengl.com/Model-Loading/Assimp">Assimp </a></li><li id='Model-Loading/Mesh'><a id="menu-item60" href="https://learnopengl.com/Model-Loading/Mesh">Mesh </a></li><li id='Model-Loading/Model'><a id="menu-item61" href="https://learnopengl.com/Model-Loading/Model">Model </a></li></ol></li><li id='Advanced-OpenGL'><span id="menu-item63" class="closed">Advanced OpenGL </span><ol id="menu-items-of63" style="display:none;"><li id='Advanced-OpenGL/Depth-testing'><a id="menu-item72" href="https://learnopengl.com/Advanced-OpenGL/Depth-testing">Depth testing </a></li><li id='Advanced-OpenGL/Stencil-testing'><a id="menu-item73" href="https://learnopengl.com/Advanced-OpenGL/Stencil-testing">Stencil testing </a></li><li id='Advanced-OpenGL/Blending'><a id="menu-item74" href="https://learnopengl.com/Advanced-OpenGL/Blending">Blending </a></li><li id='Advanced-OpenGL/Face-culling'><a id="menu-item77" href="https://learnopengl.com/Advanced-OpenGL/Face-culling">Face culling </a></li><li id='Advanced-OpenGL/Framebuffers'><a id="menu-item65" href="https://learnopengl.com/Advanced-OpenGL/Framebuffers">Framebuffers </a></li><li id='Advanced-OpenGL/Cubemaps'><a id="menu-item66" href="https://learnopengl.com/Advanced-OpenGL/Cubemaps">Cubemaps </a></li><li id='Advanced-OpenGL/Advanced-Data'><a id="menu-item69" href="https://learnopengl.com/Advanced-OpenGL/Advanced-Data">Advanced Data </a></li><li id='Advanced-OpenGL/Advanced-GLSL'><a id="menu-item67" href="https://learnopengl.com/Advanced-OpenGL/Advanced-GLSL">Advanced GLSL </a></li><li id='Advanced-OpenGL/Geometry-Shader'><a id="menu-item68" href="https://learnopengl.com/Advanced-OpenGL/Geometry-Shader">Geometry Shader </a></li><li id='Advanced-OpenGL/Instancing'><a id="menu-item70" href="https://learnopengl.com/Advanced-OpenGL/Instancing">Instancing </a></li><li id='Advanced-OpenGL/Anti-Aliasing'><a id="menu-item75" href="https://learnopengl.com/Advanced-OpenGL/Anti-Aliasing">Anti Aliasing </a></li></ol></li><li id='Advanced-Lighting'><span id="menu-item100" class="closed">Advanced Lighting </span><ol id="menu-items-of100" style="display:none;"><li id='Advanced-Lighting/Advanced-Lighting'><a id="menu-item101" href="https://learnopengl.com/Advanced-Lighting/Advanced-Lighting">Advanced Lighting </a></li><li id='Advanced-Lighting/Gamma-Correction'><a id="menu-item110" href="https://learnopengl.com/Advanced-Lighting/Gamma-Correction">Gamma Correction </a></li><li id='Advanced-Lighting/Shadows'><span id="menu-item102" class="closed">Shadows </span><ol id="menu-items-of102" style="display:none;"><li id='Advanced-Lighting/Shadows/Shadow-Mapping'><a id="menu-item103" href="https://learnopengl.com/Advanced-Lighting/Shadows/Shadow-Mapping">Shadow Mapping </a></li><li id='Advanced-Lighting/Shadows/Point-Shadows'><a id="menu-item104" href="https://learnopengl.com/Advanced-Lighting/Shadows/Point-Shadows">Point Shadows </a></li></ol></li><li id='Advanced-Lighting/Normal-Mapping'><a id="menu-item106" href="https://learnopengl.com/Advanced-Lighting/Normal-Mapping">Normal Mapping </a></li><li id='Advanced-Lighting/Parallax-Mapping'><a id="menu-item107" href="https://learnopengl.com/Advanced-Lighting/Parallax-Mapping">Parallax Mapping </a></li><li id='Advanced-Lighting/HDR'><a id="menu-item111" href="https://learnopengl.com/Advanced-Lighting/HDR">HDR </a></li><li id='Advanced-Lighting/Bloom'><a id="menu-item112" href="https://learnopengl.com/Advanced-Lighting/Bloom">Bloom </a></li><li id='Advanced-Lighting/Deferred-Shading'><a id="menu-item108" href="https://learnopengl.com/Advanced-Lighting/Deferred-Shading">Deferred Shading </a></li><li id='Advanced-Lighting/SSAO'><a id="menu-item109" href="https://learnopengl.com/Advanced-Lighting/SSAO">SSAO </a></li></ol></li><li id='PBR'><span id="menu-item113" class="closed">PBR </span><ol id="menu-items-of113" style="display:none;"><li id='PBR/Theory'><a id="menu-item114" href="https://learnopengl.com/PBR/Theory">Theory </a></li><li id='PBR/Lighting'><a id="menu-item115" href="https://learnopengl.com/PBR/Lighting">Lighting </a></li><li id='PBR/IBL'><span id="menu-item116" class="closed">IBL </span><ol id="menu-items-of116" style="display:none;"><li id='PBR/IBL/Diffuse-irradiance'><a id="menu-item117" href="https://learnopengl.com/PBR/IBL/Diffuse-irradiance">Diffuse irradiance </a></li><li id='PBR/IBL/Specular-IBL'><a id="menu-item118" href="https://learnopengl.com/PBR/IBL/Specular-IBL">Specular IBL </a></li></ol></li></ol></li><li id='In-Practice'><span id="menu-item78" class="closed">In Practice </span><ol id="menu-items-of78" style="display:none;"><li id='In-Practice/Debugging'><a id="menu-item79" href="https://learnopengl.com/In-Practice/Debugging">Debugging </a></li><li id='In-Practice/Text-Rendering'><a id="menu-item80" href="https://learnopengl.com/In-Practice/Text-Rendering">Text Rendering </a></li><li id='In-Practice/2D-Game'><span id="menu-item81" class="closed">2D Game </span><ol id="menu-items-of81" style="display:none;"><li id='In-Practice/2D-Game/Breakout'><a id="menu-item82" href="https://learnopengl.com/In-Practice/2D-Game/Breakout">Breakout </a></li><li id='In-Practice/2D-Game/Setting-up'><a id="menu-item88" href="https://learnopengl.com/In-Practice/2D-Game/Setting-up">Setting up </a></li><li id='In-Practice/2D-Game/Rendering-Sprites'><a id="menu-item83" href="https://learnopengl.com/In-Practice/2D-Game/Rendering-Sprites">Rendering Sprites </a></li><li id='In-Practice/2D-Game/Levels'><a id="menu-item84" href="https://learnopengl.com/In-Practice/2D-Game/Levels">Levels </a></li><li id='In-Practice/2D-Game/Collisions'><span id="menu-item85" class="closed">Collisions </span><ol id="menu-items-of85" style="display:none;"><li id='In-Practice/2D-Game/Collisions/Ball'><a id="menu-item95" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Ball">Ball </a></li><li id='In-Practice/2D-Game/Collisions/Collision-detection'><a id="menu-item96" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Collision-detection">Collision detection </a></li><li id='In-Practice/2D-Game/Collisions/Collision-resolution'><a id="menu-item97" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Collision-resolution">Collision resolution </a></li></ol></li><li id='In-Practice/2D-Game/Particles'><a id="menu-item89" href="https://learnopengl.com/In-Practice/2D-Game/Particles">Particles </a></li><li id='In-Practice/2D-Game/Postprocessing'><a id="menu-item90" href="https://learnopengl.com/In-Practice/2D-Game/Postprocessing">Postprocessing </a></li><li id='In-Practice/2D-Game/Powerups'><a id="menu-item91" href="https://learnopengl.com/In-Practice/2D-Game/Powerups">Powerups </a></li><li id='In-Practice/2D-Game/Audio'><a id="menu-item94" href="https://learnopengl.com/In-Practice/2D-Game/Audio">Audio </a></li><li id='In-Practice/2D-Game/Render-text'><a id="menu-item92" href="https://learnopengl.com/In-Practice/2D-Game/Render-text">Render text </a></li><li id='In-Practice/2D-Game/Final-thoughts'><a id="menu-item93" href="https://learnopengl.com/In-Practice/2D-Game/Final-thoughts">Final thoughts </a></li></ol></li></ol></li><li id='Guest-Articles'><span id="menu-item125" class="closed">Guest Articles </span><ol id="menu-items-of125" style="display:none;"><li id='Guest-Articles/How-to-publish'><a id="menu-item126" href="https://learnopengl.com/Guest-Articles/How-to-publish">How to publish </a></li><li id='Guest-Articles/2020'><span id="menu-item128" class="closed">2020 </span><ol id="menu-items-of128" style="display:none;"><li id='Guest-Articles/2020/OIT'><span id="menu-item129" class="closed">OIT </span><ol id="menu-items-of129" style="display:none;"><li id='Guest-Articles/2020/OIT/Introduction'><a id="menu-item130" href="https://learnopengl.com/Guest-Articles/2020/OIT/Introduction">Introduction </a></li><li id='Guest-Articles/2020/OIT/Weighted-Blended'><a id="menu-item132" href="https://learnopengl.com/Guest-Articles/2020/OIT/Weighted-Blended">Weighted Blended </a></li></ol></li><li id='Guest-Articles/2020/Skeletal-Animation'><a id="menu-item131" href="https://learnopengl.com/Guest-Articles/2020/Skeletal-Animation">Skeletal Animation </a></li></ol></li><li id='Guest-Articles/2021'><span id="menu-item133" class="closed">2021 </span><ol id="menu-items-of133" style="display:none;"><li id='Guest-Articles/2021/Scene'><span id="menu-item134" class="closed">Scene </span><ol id="menu-items-of134" style="display:none;"><li id='Guest-Articles/2021/Scene/Scene-Graph'><a id="menu-item135" href="https://learnopengl.com/Guest-Articles/2021/Scene/Scene-Graph">Scene Graph </a></li><li id='Guest-Articles/2021/Scene/Frustum-Culling'><a id="menu-item136" href="https://learnopengl.com/Guest-Articles/2021/Scene/Frustum-Culling">Frustum Culling </a></li></ol></li></ol></li></ol></li><li id='Code-repository'><a id="menu-item99" href="https://learnopengl.com/Code-repository">Code repository </a></li><li id='Translations'><a id="menu-item119" href="https://learnopengl.com/Translations">Translations </a></li><li id='About'><a id="menu-item2" href="https://learnopengl.com/About">About </a></li></ol> <div id="menu_book"> + <a href="https://geni.us/learnopengl" target="_blank"><img src="/book/below_menu.png" class="clean"/></a> + </div> + <div id="donate"> + <a href="https://www.paypal.me/learnopengl/" target="_blank"> + <div id="donate_img"></div> + <img style="display: none" src="/img/donate_button_hover.png"/> + <!--<img id="donate_img" src="img/patreon.png"/>--> + </a> + <!--<div id="alipay"> + <img style="width: 150px;" class="clean" src="/img/alipay_logo.png"/> + <img style="width: 150px; margin-top: 5px" src="/img/alipay.png"/> + </div>--> + </div> + <div class="btc"> + <h3>BTC</h3> + <p> + 1CLGKgmBSuYJ1nnvDGAepVTKNNDpUjfpRa + </p> + <img src="/img/btc_qr.png"/> + </div> + <div class="btc"> + <h3>ETH/ERC20</h3> + <p> + 0x1de59bd9e52521a46309474f8372531533bd7c43 + </p> + <img src="/img/erc20_qr.png"/> + </div> + <div id="ad"> + <!--<div id="waldo-tag-1684"></div>--> + </div> + + <div id="lefttwothirdad"> + <div id="waldo-tag-2245"></div> + </div> + </div> + + <div id="content"> + <h1 id="content-title">Coordinate Systems</h1> +<h1 id="content-url" style='display:none;'>Getting-started/Coordinate-Systems</h1> +<p> + In the last chapter we learned how we can use matrices to our advantage by transforming all vertices with transformation matrices. OpenGL expects all the vertices, that we want to become visible, to be in normalized device coordinates after each vertex shader run. That is, the <code>x</code>, <code>y</code> and <code>z</code> coordinates of each vertex should be between <code>-1.0</code> and <code>1.0</code>; coordinates outside this range will not be visible. What we usually do, is specify the coordinates in a range (or space) we determine ourselves and in the vertex shader transform these coordinates to normalized device coordinates (NDC). These NDC are then given to the rasterizer to transform them to 2D coordinates/pixels on your screen. +</p> + +<p> + Transforming coordinates to NDC is usually accomplished in a step-by-step fashion where we transform an object's vertices to several coordinate systems before finally transforming them to NDC. The advantage of transforming them to several <em>intermediate</em> coordinate systems is that some operations/calculations are easier in certain coordinate systems as will soon become apparent. There are a total of 5 different coordinate systems that are of importance to us: +</p> + + <ul> + <li>Local space (or Object space)</li> + <li>World space</li> + <li>View space (or Eye space)</li> + <li>Clip space</li> + <li>Screen space</li> + </ul> + +<p> + Those are all a different state at which our vertices will be transformed in before finally ending up as fragments. +</p> + +<p> + You're probably quite confused by now by what a space or coordinate system actually is so we'll explain them in a more high-level fashion first by showing the total picture and what each specific space represents. +</p> + +<h2>The global picture</h2> +<p> + To transform the coordinates from one space to the next coordinate space we'll use several transformation matrices of which the most important are the <def>model</def>, <def>view</def> and <def>projection</def> matrix. Our vertex coordinates first start in <def>local space</def> as <def>local coordinates</def> and are then further processed to <def>world coordinates</def>, <def>view coordinates</def>, <def>clip coordinates</def> and eventually end up as <def>screen coordinates</def>. The following image displays the process and shows what each transformation does: +</p> + +<img src="/img/getting-started/coordinate_systems.png" class="clean"/> + + <ol> + <li>Local coordinates are the coordinates of your object relative to its local origin; they're the coordinates your object begins in. </li> + <li>The next step is to transform the local coordinates to world-space coordinates which are coordinates in respect of a larger world. These coordinates are relative to some global origin of the world, together with many other objects also placed relative to this world's origin.</li> + <li>Next we transform the world coordinates to view-space coordinates in such a way that each coordinate is as seen from the camera or viewer's point of view. </li> + <li>After the coordinates are in view space we want to project them to clip coordinates. Clip coordinates are processed to the <code>-1.0</code> and <code>1.0</code> range and determine which vertices will end up on the screen. Projection to clip-space coordinates can add perspective if using perspective projection. </li> + <li> + And lastly we transform the clip coordinates to screen coordinates in a process we call <def>viewport transform</def> that transforms the coordinates from <code>-1.0</code> and <code>1.0</code> to the coordinate range defined by <fun><function id='22'>glViewport</function></fun>. The resulting coordinates are then sent to the rasterizer to turn them into fragments. + </li> + </ol> + +<p> + You probably got a slight idea what each individual space is used for. The reason we're transforming our vertices into all these different spaces is that some operations make more sense or are easier to use in certain coordinate systems. For example, when modifying your object it makes most sense to do this in local space, while calculating certain operations on the object with respect to the position of other objects makes most sense in world coordinates and so on. If we want, we could define one transformation matrix that goes from local space to clip space all in one go, but that leaves us with less flexibility. +</p> + +<p> + We'll discuss each coordinate system in more detail below. +</p> + +<h2>Local space</h2> +<p> + Local space is the coordinate space that is local to your object, i.e. where your object begins in. Imagine that you've created your cube in a modeling software package (like Blender). The origin of your cube is probably at <code>(0,0,0)</code> even though your cube may end up at a different location in your final application. Probably all the models you've created all have <code>(0,0,0)</code> as their initial position. All the vertices of your model are therefore in <em>local</em> space: they are all local to your object. +</p> + +<p> + The vertices of the container we've been using were specified as coordinates between <code>-0.5</code> and <code>0.5</code> with <code>0.0</code> as its origin. These are local coordinates. +</p> + +<h2>World space</h2> +<p> + If we would import all our objects directly in the application they would probably all be somewhere positioned inside each other at the world's origin of <code>(0,0,0)</code> which is not what we want. We want to define a position for each object to position them inside a larger world. The coordinates in world space are exactly what they sound like: the coordinates of all your vertices relative to a (game) world. This is the coordinate space where you want your objects transformed to in such a way that they're all scattered around the place (preferably in a realistic fashion). The coordinates of your object are transformed from local to world space; this is accomplished with the <def>model</def> matrix. +</p> + +<p> + The model matrix is a transformation matrix that translates, scales and/or rotates your object to place it in the world at a location/orientation they belong to. Think of it as transforming a house by scaling it down (it was a bit too large in local space), translating it to a suburbia town and rotating it a bit to the left on the y-axis so that it neatly fits with the neighboring houses. You could think of the matrix in the previous chapter to position the container all over the scene as a sort of model matrix as well; we transformed the local coordinates of the container to some different place in the scene/world. +</p> + +<h2>View space</h2> +<p> + The view space is what people usually refer to as the <def>camera</def> of OpenGL (it is sometimes also known as <def>camera space</def> or <def>eye space</def>). The view space is the result of transforming your world-space coordinates to coordinates that are in front of the user's view. The view space is thus the space as seen from the camera's point of view. This is usually accomplished with a combination of translations and rotations to translate/rotate the scene so that certain items are transformed to the front of the camera. These combined transformations are generally stored inside a <def>view matrix</def> that transforms world coordinates to view space. In the next chapter we'll extensively discuss how to create such a view matrix to simulate a camera. +</p> + +<h2>Clip space</h2> +<p> + At the end of each vertex shader run, OpenGL expects the coordinates to be within a specific range and any coordinate that falls outside this range is <def>clipped</def>. Coordinates that are clipped are discarded, so the remaining coordinates will end up as fragments visible on your screen. This is also where <def>clip space</def> gets its name from. +</p> + +<p> + Because specifying all the visible coordinates to be within the range <code>-1.0</code> and <code>1.0</code> isn't really intuitive, we specify our own coordinate set to work in and convert those back to NDC as OpenGL expects them. +</p> + +<p> + To transform vertex coordinates from view to clip-space we define a so called <def>projection matrix</def> that specifies a range of coordinates e.g. <code>-1000</code> and <code>1000</code> in each dimension. The projection matrix then transforms coordinates within this specified range to normalized device coordinates (<code>-1.0</code>, <code>1.0</code>). All coordinates outside this range will not be mapped between <code>-1.0</code> and <code>1.0</code> and therefore be clipped. With this range we specified in the projection matrix, a coordinate of (<code>1250</code>, <code>500</code>, <code>750</code>) would not be visible, since the <code>x</code> coordinate is out of range and thus gets converted to a coordinate higher than <code>1.0</code> in NDC and is therefore clipped. +</p> + +<note> + Note that if only a part of a primitive e.g. a triangle is outside the <def>clipping volume</def> OpenGL will reconstruct the triangle as one or more triangles to fit inside the clipping range. +</note> + +<p> + This <em>viewing box</em> a projection matrix creates is called a <def>frustum</def> and each coordinate that ends up inside this frustum will end up on the user's screen. The total process to convert coordinates within a specified range to NDC that can easily be mapped to 2D view-space coordinates is called <def>projection</def> since the projection matrix <def>projects</def> 3D coordinates to the easy-to-map-to-2D normalized device coordinates. +</p> + +<p> + Once all the vertices are transformed to clip space a final operation called <def>perspective division</def> is performed where we divide the <code>x</code>, <code>y</code> and <code>z</code> components of the position vectors by the vector's homogeneous <code>w</code> component; perspective division is what transforms the 4D clip space coordinates to 3D normalized device coordinates. This step is performed automatically at the end of the vertex shader step. +</p> + +<p> + It is after this stage where the resulting coordinates are mapped to screen coordinates (using the settings of <fun><function id='22'>glViewport</function></fun>) and turned into fragments. +</p> + +<p> + The projection matrix to transform view coordinates to clip coordinates usually takes two different forms, where each form defines its own unique frustum. We can either create an <def>orthographic</def> projection matrix or a <def>perspective</def> projection matrix. +</p> + +<h3>Orthographic projection</h3> +<p> + An orthographic projection matrix defines a cube-like frustum box that defines the clipping space where each vertex outside this box is clipped. When creating an orthographic projection matrix we specify the width, height and length of the visible frustum. All the coordinates inside this frustum will end up within the NDC range after transformed by its matrix and thus won't be clipped. The frustum looks a bit like a container: +</p> + +<img src="/img/getting-started/orthographic_frustum.png" class="clean"/> + +<p> + The frustum defines the visible coordinates and is specified by a width, a height and a <def>near</def> and <def>far</def> plane. Any coordinate in front of the near plane is clipped and the same applies to coordinates behind the far plane. The orthographic frustum <strong>directly</strong> maps all coordinates inside the frustum to normalized device coordinates without any special side effects since it won't touch the <code>w</code> component of the transformed vector; if the <code>w</code> component remains equal to <code>1.0</code> perspective division won't change the coordinates. +</p> + +<p> + To create an orthographic projection matrix we make use of GLM's built-in function <code><function id='59'>glm::ortho</function></code>: +</p> + +<pre><code> +<function id='59'>glm::ortho</function>(0.0f, 800.0f, 0.0f, 600.0f, 0.1f, 100.0f); +</code></pre> + +<p> + The first two parameters specify the left and right coordinate of the frustum and the third and fourth parameter specify the bottom and top part of the frustum. With those 4 points we've defined the size of the near and far planes and the 5th and 6th parameter then define the distances between the near and far plane. This specific projection matrix transforms all coordinates between these <code>x</code>, <code>y</code> and <code>z</code> range values to normalized device coordinates. +</p> + +<p> + An orthographic projection matrix directly maps coordinates to the 2D plane that is your screen, but in reality a direct projection produces unrealistic results since the projection doesn't take <def>perspective</def> into account. That is something the <def>perspective projection</def> matrix fixes for us. +</p> + +<h3>Perspective projection</h3> +<p> + If you ever were to enjoy the graphics the <em>real life</em> has to offer you'll notice that objects that are farther away appear much smaller. This weird effect is something we call <def>perspective</def>. Perspective is especially noticeable when looking down the end of an infinite motorway or railway as seen in the following image: +</p> + +<img src="/img/getting-started/perspective.png" class="clean"/> + +<p> + As you can see, due to perspective the lines seem to coincide at a far enough distance. This is exactly the effect perspective projection tries to mimic and it does so using a <def>perspective projection matrix</def>. The projection matrix maps a given frustum range to clip space, but also manipulates the <code>w</code> value of each vertex coordinate in such a way that the further away a vertex coordinate is from the viewer, the higher this <code>w</code> component becomes. Once the coordinates are transformed to clip space they are in the range <code>-w</code> to <code>w</code> (anything outside this range is clipped). OpenGL requires that the visible coordinates fall between the range <code>-1.0</code> and <code>1.0</code> as the final vertex shader output, thus once the coordinates are in clip space, perspective division is applied to the clip space coordinates: + + \[ out = \begin{pmatrix} x /w \\ y / w \\ z / w \end{pmatrix} \] + + Each component of the vertex coordinate is divided by its <code>w</code> component giving smaller vertex coordinates the further away a vertex is from the viewer. This is another reason why the <code>w</code> component is important, since it helps us with perspective projection. The resulting coordinates are then in normalized device space. If you're interested to figure out how the orthographic and perspective projection matrices are actually calculated (and aren't too scared of the mathematics) I can recommend <a href="http://www.songho.ca/opengl/gl_projectionmatrix.html" target="_blank">this excellent article</a> by Songho. +</p> + +<p> + A perspective projection matrix can be created in GLM as follows: +</p> + +<pre><code> +glm::mat4 proj = <function id='58'>glm::perspective</function>(<function id='63'>glm::radians</function>(45.0f), (float)width/(float)height, 0.1f, 100.0f); +</code></pre> + +<p> + What <code><function id='58'>glm::perspective</function></code> does is again create a large <em>frustum</em> that defines the visible space, anything outside the frustum will not end up in the clip space volume and will thus become clipped. A perspective frustum can be visualized as a non-uniformly shaped box from where each coordinate inside this box will be mapped to a point in clip space. An image of a perspective frustum is seen below: +</p> + +<img src="/img/getting-started/perspective_frustum.png" class="clean"/> + + +<p> +Its first parameter defines the <def>fov</def> value, that stands for <def>field of view</def> and sets how large the viewspace is. For a realistic view it is usually set to 45 degrees, but for more doom-style results you could set it to a higher value. The second parameter sets the aspect ratio which is calculated by dividing the viewport's width by its height. The third and fourth parameter set the <em>near</em> and <em>far</em> plane of the frustum. We usually set the near distance to <code>0.1</code> and the far distance to <code>100.0</code>. All the vertices between the near and far plane and inside the frustum will be rendered. +</p> + +<note> + Whenever the <em>near</em> value of your perspective matrix is set too high (like <code>10.0</code>), OpenGL will clip all coordinates close to the camera (between <code>0.0</code> and <code>10.0</code>), which can give a visual result you maybe have seen before in videogames where you could see through certain objects when moving uncomfortably close to them. +</note> + +<p> + When using orthographic projection, each of the vertex coordinates are directly mapped to clip space without any fancy perspective division (it still does perspective division, but the <code>w</code> component is not manipulated (it stays <code>1</code>) and thus has no effect). Because the orthographic projection doesn't use perspective projection, objects farther away do not seem smaller, which produces a weird visual output. For this reason the orthographic projection is mainly used for 2D renderings and for some architectural or engineering applications where we'd rather not have vertices distorted by perspective. Applications like <em>Blender</em> that are used for 3D modeling sometimes use orthographic projection for modeling, because it more accurately depicts each object's dimensions. Below you'll see a comparison of both projection methods in Blender: +</p> + +<img src="/img/getting-started/perspective_orthographic.png" class="clean"/> + +<p> + You can see that with perspective projection, the vertices farther away appear much smaller, while in orthographic projection each vertex has the same distance to the user. +</p> + +<h2>Putting it all together</h2> +<p> + We create a transformation matrix for each of the aforementioned steps: model, view and projection matrix. A vertex coordinate is then transformed to clip coordinates as follows: + + \[ V_{clip} = M_{projection} \cdot M_{view} \cdot M_{model} \cdot V_{local} \] + +Note that the order of matrix multiplication is reversed (remember that we need to read matrix multiplication from right to left). The resulting vertex should then be assigned to <var>gl_Position</var> in the vertex shader and OpenGL will then automatically perform perspective division and clipping. +</p> + +<note> + <strong>And then?</strong><br/> + The output of the vertex shader requires the coordinates to be in clip-space which is what we just did with the transformation matrices. OpenGL then performs <em>perspective division</em> on the <em>clip-space coordinates</em> to transform them to <em>normalized-device coordinates</em>. OpenGL then uses the parameters from <fun><function id='22'>glViewPort</function></fun> to map the normalized-device coordinates to <em>screen coordinates</em> where each coordinate corresponds to a point on your screen (in our case a 800x600 screen). This process is called the <em>viewport transform</em>. +</note> + +<p> + This is a difficult topic to understand so if you're still not exactly sure about what each space is used for you don't have to worry. Below you'll see how we can actually put these coordinate spaces to good use and enough examples will follow in the upcoming chapters. +</p> + +<h1>Going 3D</h1> +<p> + Now that we know how to transform 3D coordinates to 2D coordinates we can start rendering real 3D objects instead of the lame 2D plane we've been showing so far. +</p> + +<p> + To start drawing in 3D we'll first create a model matrix. The model matrix consists of translations, scaling and/or rotations we'd like to apply to <em>transform</em> all object's vertices to the global world space. Let's transform our plane a bit by rotating it on the x-axis so it looks like it's laying on the floor. The model matrix then looks like this: +</p> + +<pre><code> +glm::mat4 model = glm::mat4(1.0f); +model = <function id='57'>glm::rotate</function>(model, <function id='63'>glm::radians</function>(-55.0f), glm::vec3(1.0f, 0.0f, 0.0f)); +</code></pre> + +<p> +By multiplying the vertex coordinates with this model matrix we're transforming the vertex coordinates to world coordinates. Our plane that is slightly on the floor thus represents the plane in the global world. + </p> + +<p> + Next we need to create a view matrix. We want to move slightly backwards in the scene so the object becomes visible (when in world space we're located at the origin <code>(0,0,0)</code>). To move around the scene, think about the following: + <ul> + <li>To move a camera backwards, is the same as moving the entire scene forward.</li> + </ul> + That is exactly what a view matrix does, we move the entire scene around inversed to where we want the camera to move.<br/> + Because we want to move backwards and since OpenGL is a right-handed system we have to move in the positive z-axis. We do this by translating the scene towards the negative z-axis. This gives the impression that we are moving backwards. +</p> + + +<note> + <strong>Right-handed system</strong> + <p> + By convention, OpenGL is a right-handed system. What this basically says is that the positive x-axis is to your right, the positive y-axis is up and the positive z-axis is backwards. Think of your screen being the center of the 3 axes and the positive z-axis going through your screen towards you. The axes are drawn as follows: +</p> + <img src="/img/getting-started/coordinate_systems_right_handed.png" class="clean"/> + <p> + To understand why it's called right-handed do the following: + <ul> + <li>Stretch your right-arm along the positive y-axis with your hand up top.</li> + <li>Let your thumb point to the right.</li> + <li>Let your pointing finger point up.</li> + <li>Now bend your middle finger downwards 90 degrees.</li> + </ul> + If you did things right, your thumb should point towards the positive x-axis, the pointing finger towards the positive y-axis and your middle finger towards the positive z-axis. If you were to do this with your left-arm you would see the z-axis is reversed. This is known as a left-handed system and is commonly used by DirectX. Note that in normalized device coordinates OpenGL actually uses a left-handed system (the projection matrix switches the handedness). + </p> +</note> + +<p> + We'll discuss how to move around the scene in more detail in the next chapter. For now the view matrix looks like this: +</p> + +<pre><code> +glm::mat4 view = glm::mat4(1.0f); +// note that we're translating the scene in the reverse direction of where we want to move +view = <function id='55'>glm::translate</function>(view, glm::vec3(0.0f, 0.0f, -3.0f)); +</code></pre> + +<p> + The last thing we need to define is the projection matrix. We want to use perspective projection for our scene so we'll declare the projection matrix like this: +</p> + +<pre><code> +glm::mat4 projection; +projection = <function id='58'>glm::perspective</function>(<function id='63'>glm::radians</function>(45.0f), 800.0f / 600.0f, 0.1f, 100.0f); +</code></pre> + +<p> + Now that we created the transformation matrices we should pass them to our shaders. First let's declare the transformation matrices as uniforms in the vertex shader and multiply them with the vertex coordinates: +</p> + +<pre><code> +#version 330 core +layout (location = 0) in vec3 aPos; +... +uniform mat4 model; +uniform mat4 view; +uniform mat4 projection; + +void main() +{ + // note that we read the multiplication from right to left + gl_Position = projection * view * model * vec4(aPos, 1.0); + ... +} +</code></pre> + +<p> + We should also send the matrices to the shader (this is usually done each frame since transformation matrices tend to change a lot): +</p> + +<pre><code> +int modelLoc = <function id='45'>glGetUniformLocation</function>(ourShader.ID, "model"); +<function id='44'>glUniform</function>Matrix4fv(modelLoc, 1, GL_FALSE, glm::value_ptr(model)); +... // same for View Matrix and Projection Matrix +</code></pre> + +<p> + Now that our vertex coordinates are transformed via the model, view and projection matrix the final object should be: + + <ul> + <li>Tilted backwards to the floor. </li> + <li>A bit farther away from us.</li> + <li>Be displayed with perspective (it should get smaller, the further its vertices are).</li> + </ul> + + Let's check if the result actually does fulfill these requirements: +</p> + +<img src="/img/getting-started/coordinate_systems_result.png" class="clean"/> + +<p> + It does indeed look like the plane is a 3D plane that's resting at some imaginary floor. If you're not getting the same result, compare your code with the complete <a href="/code_viewer_gh.php?code=src/1.getting_started/6.1.coordinate_systems/coordinate_systems.cpp" target="_blank">source code</a>. +</p> + +<h2>More 3D</h2> +<p> + So far we've been working with a 2D plane, even in 3D space, so let's take the adventurous route and extend our 2D plane to a 3D cube. To render a cube we need a total of 36 vertices (6 faces * 2 triangles * 3 vertices each). 36 vertices are a lot to sum up so you can retrieve them from <a href="/code_viewer.php?code=getting-started/cube_vertices" target="_blank">here</a>. +</p> + +<p> + For fun, we'll let the cube rotate over time: +</p> + +<pre><code> +model = <function id='57'>glm::rotate</function>(model, (float)<function id='47'>glfwGetTime</function>() * <function id='63'>glm::radians</function>(50.0f), glm::vec3(0.5f, 1.0f, 0.0f)); +</code></pre> + +<p> + And then we'll draw the cube using <fun><function id='1'>glDrawArrays</function></fun> (as we didn't specify indices), but this time with a count of 36 vertices. +</p> + +<pre class="cpp"><code> +<function id='1'>glDrawArrays</function>(GL_TRIANGLES, 0, 36); +</code></pre> + +<p> + You should get something similar to the following: +</p> + +<div class="video paused" onclick="ClickVideo(this)"> + <video width="600" height="450" loop> + <source src="/video/getting-started/coordinate_system_no_depth.mp4" type="video/mp4" /> + <img src="/img/getting-started/coordinate_systems_no_depth.png" class="clean"/> + </video> +</div> + + +<p> + It does resemble a cube slightly but something's off. Some sides of the cubes are being drawn over other sides of the cube. This happens because when OpenGL draws your cube triangle-by-triangle, fragment by fragment, it will overwrite any pixel color that may have already been drawn there before. Since OpenGL gives no guarantee on the order of triangles rendered (within the same draw call), some triangles are drawn on top of each other even though one should clearly be in front of the other. +</p> + +<p> + Luckily, OpenGL stores depth information in a buffer called the <def>z-buffer</def> that allows OpenGL to decide when to draw over a pixel and when not to. Using the z-buffer we can configure OpenGL to do depth-testing. +</p> + +<h3>Z-buffer</h3> +<p> + OpenGL stores all its depth information in a z-buffer, also known as a <def>depth buffer</def>. GLFW automatically creates such a buffer for you (just like it has a color-buffer that stores the colors of the output image). The depth is stored within each fragment (as the fragment's <code>z</code> value) and whenever the fragment wants to output its color, OpenGL compares its depth values with the z-buffer. If the current fragment is behind the other fragment it is discarded, otherwise overwritten. This process is called <def>depth testing</def> and is done automatically by OpenGL. +</p> + +<p> + However, if we want to make sure OpenGL actually performs the depth testing we first need to tell OpenGL we want to enable depth testing; it is disabled by default. We can enable depth testing using <fun><function id='60'>glEnable</function></fun>. The <fun><function id='60'>glEnable</function></fun> and <fun>glDisable</fun> functions allow us to enable/disable certain functionality in OpenGL. That functionality is then enabled/disabled until another call is made to disable/enable it. Right now we want to enable depth testing by enabling <var>GL_DEPTH_TEST</var>: +</p> + +<pre><code> +<function id='60'>glEnable</function>(GL_DEPTH_TEST); +</code></pre> + +<p> + Since we're using a depth buffer we also want to clear the depth buffer before each render iteration (otherwise the depth information of the previous frame stays in the buffer). Just like clearing the color buffer, we can clear the depth buffer by specifying the <var>DEPTH_BUFFER_BIT</var> bit in the <fun><function id='10'>glClear</function></fun> function: +</p> + +<pre><code> +<function id='10'>glClear</function>(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); +</code></pre> + +<p> + Let's re-run our program and see if OpenGL now performs depth testing: +</p> + +<div class="video paused" onclick="ClickVideo(this)"> + <video width="600" height="450" loop> + <source src="/video/getting-started/coordinate_system_depth.mp4" type="video/mp4" /> + <img src="/img/getting-started/coordinate_systems_with_depth.png" class="clean"/> + </video> +</div> + + +<p> + There we go! A fully textured cube with proper depth testing that rotates over time. Check the source code <a href="/code_viewer_gh.php?code=src/1.getting_started/6.2.coordinate_systems_depth/coordinate_systems_depth.cpp" target="_blank">here</a>. +</p> + + + +<h3>More cubes!</h3> +<p> + Say we wanted to display 10 of our cubes on screen. Each cube will look the same but will only differ in where it's located in the world with each a different rotation. The graphical layout of the cube is already defined so we don't have to change our buffers or attribute arrays when rendering more objects. The only thing we have to change for each object is its model matrix where we transform the cubes into the world. +</p> + +<p> + First, let's define a translation vector for each cube that specifies its position in world space. We'll define 10 cube positions in a <code>glm::vec3</code> array: +</p> + +<pre><code> +glm::vec3 cubePositions[] = { + glm::vec3( 0.0f, 0.0f, 0.0f), + glm::vec3( 2.0f, 5.0f, -15.0f), + glm::vec3(-1.5f, -2.2f, -2.5f), + glm::vec3(-3.8f, -2.0f, -12.3f), + glm::vec3( 2.4f, -0.4f, -3.5f), + glm::vec3(-1.7f, 3.0f, -7.5f), + glm::vec3( 1.3f, -2.0f, -2.5f), + glm::vec3( 1.5f, 2.0f, -2.5f), + glm::vec3( 1.5f, 0.2f, -1.5f), + glm::vec3(-1.3f, 1.0f, -1.5f) +}; +</code></pre> + +<p> + Now, within the render loop we want to call <fun><function id='1'>glDrawArrays</function></fun> 10 times, but this time send a different model matrix to the vertex shader each time before we send out the draw call. We will create a small loop within the render loop that renders our object 10 times with a different model matrix each time. Note that we also add a small unique rotation to each container. +</p> + +<pre><code> +<function id='27'>glBindVertexArray</function>(VAO); +for(unsigned int i = 0; i &lt; 10; i++) +{ + glm::mat4 model = glm::mat4(1.0f); + model = <function id='55'>glm::translate</function>(model, cubePositions[i]); + float angle = 20.0f * i; + model = <function id='57'>glm::rotate</function>(model, <function id='63'>glm::radians</function>(angle), glm::vec3(1.0f, 0.3f, 0.5f)); + ourShader.setMat4("model", model); + + <function id='1'>glDrawArrays</function>(GL_TRIANGLES, 0, 36); +} +</code></pre> + +<p> + This snippet of code will update the model matrix each time a new cube is drawn and do this 10 times in total. Right now we should be looking into a world filled with 10 oddly rotated cubes: +</p> + +<img src="/img/getting-started/coordinate_systems_multiple_objects.png" class="clean"/> + +<p> + Perfect! It looks like our container found some like-minded friends. If you're stuck see if you can compare your code with the <a href="/code_viewer_gh.php?code=src/1.getting_started/6.3.coordinate_systems_multiple/coordinate_systems_multiple.cpp" target="_blank">source code</a>. +</p> + +<h2>Exercises</h2> +<ul> + <li>Try experimenting with the <code>FoV</code> and <code>aspect-ratio</code> parameters of GLM's <code>projection</code> function. See if you can figure out how those affect the perspective frustum.</li> + <li>Play with the view matrix by translating in several directions and see how the scene changes. Think of the view matrix as a camera object.</li> + <li>Try to make every 3rd container (including the 1st) rotate over time, while leaving the other containers static using just the model matrix: <a href="/code_viewer_gh.php?code=src/1.getting_started/6.4.coordinate_systems_exercise3/coordinate_systems_exercise3.cpp" target="_blank">solution</a>.</li> +</ul> + + </div> + + <div id="hover"> + HI + </div> + <!-- 728x90/320x50 sticky footer --> +<div id="waldo-tag-6196"></div> + + <div id="disqus_thread"></div> + + + + +</div> <!-- container div --> + + +</div> <!-- super container div --> +</body> +</html> +\ No newline at end of file diff --git a/Getting-started/Creating-a-window.html b/Getting-started/Creating-a-window.html @@ -0,0 +1,299 @@ + + +<!DOCTYPE html> +<html lang="ja"> +<head> + <meta charset="utf-8"/> + <title>LearnOpenGL - Creating a window</title> <!--<title>Learn OpenGL, extensive tutorial resource for learning Modern OpenGL</title>--> + <link rel="shortcut icon" type="image/ico" href="/favicon.ico" /> + <meta name="description" content="Learn OpenGL . com provides good and clear modern 3.3+ OpenGL tutorials with clear examples. A great resource to learn modern OpenGL aimed at beginners."> +</head> +<body> + <div id="content"> + <h1 id="content-title">Creating a window</h1> + <h1 id="content-title">ウィンドウの作成</h1> +<h1 id="content-url" style='display:none;'>Getting-started/Creating-a-window</h1> +<p> + The first thing we need to do before we start creating stunning graphics is to create an OpenGL context and an application window to draw in. However, those operations are specific per operating system and OpenGL purposefully tries to abstract itself from these operations. This means we have to create a window, define a context, and handle user input all by ourselves. +</p> +<p> +さまざまなグラフィックを作成する前に、OpenGLのコンテクストとアプリケーションのウィンドウを作成する必要があります。しかしこの作業はOSによってまちまちであり、OpenGLはこういった作業から自身を抽象化しようとしています。つまりわれわれはウィンドウの作成やコンテクストの定義、あるいはユーザーからの入力の処理といった作業を自分自身でおこなわなければなりません。 +</p> + +<p> + Luckily, there are quite a few libraries out there that provide the functionality we seek, some specifically aimed at OpenGL. Those libraries save us all the operation-system specific work and give us a window and an OpenGL context to render in. Some of the more popular libraries are GLUT, SDL, SFML and GLFW. On LearnOpenGL we will be using <strong>GLFW</strong>. Feel free to use any of the other libraries, the setup for most is similar to GLFW's setup. +</p> +<p> +ありがたいことに、我々の目的にあった機能を提供するライブラリがたくさんあり、なかにはOpenGLに特化したものもあります。こういったライブラリを利用することで、OSごとに違うコードを用意しなくてもウィンドウやコンテクストを作成することが可能です。有名なライブラリにはGLUT、SDL、SFML、そしてGLFW等があります。本書では<strong>GLFW</strong>を利用します。しかし他のどのライブラリを使ってもかまいません。セットアップ方法はだいたい同じです。 +</p> + +<h2> GLFW </h2> +<p> + GLFW is a library, written in C, specifically targeted at OpenGL. GLFW gives us the bare necessities required for rendering goodies to the screen. It allows us to create an OpenGL context, define window parameters, and handle user input, which is plenty enough for our purposes. +</p> +<p> +GLFWはOpenGLに特化したC言語製のライブラリです。GLFWはスクリーンにいいものを描画するために必要な最低限のものを提供します。GLFWにより、コンテクストの作成、ウィンドウにかかる変数の定義、ユーザーからの入力の処理が可能になり、われわれの目的には十分です。 +</p> + +<p> + The focus of this and the next chapter is to get GLFW up and running, making sure it properly creates an OpenGL context and that it displays a simple window for us to mess around in. This chapter takes a step-by-step approach in retrieving, building and linking the GLFW library. We'll use Microsoft Visual Studio 2019 IDE as of this writing (note that the process is the same on the more recent visual studio versions). If you're not using Visual Studio (or an older version) don't worry, the process will be similar on most other IDEs. +</p> +<p> +この章と次の章ではGLFWを動作させ、コンテクストの作成や簡単なウィンドウの表示が適切にできているか確認します。この章ではGLFWライブラリをダウンロード、コンパイルしリンクする方法を順番に見ていきます。MicrosoftのVisual Studio 2019 IDEを利用します(もっと新しいバージョンのvisual studioでもやりかたは同じです)。あなたがVisual Studioの古いバージョンや他のIDEを使う場合でも、だいたい同じ方法で動作します。 +</p> + +<h2>Building GLFW</h2> +<h2>GLFWのビルド</h2> +<p> + GLFW can be obtained from their webpage's <a href="http://www.glfw.org/download.html" target="_blank">download</a> page. GLFW already has pre-compiled binaries and header files for Visual Studio 2012 up to 2019, but for completeness' sake we will compile GLFW ourselves from the source code. This is to give you a feel for the process of compiling open-source libraries yourself as not every library will have pre-compiled binaries available. So let's download the <em>Source package</em>. +</p> +<p> +GLFWはこの<a href="http://www.glfw.org/download.html" target="_blank">ダウンドード</a>ページから入手できます。GLFWはVisual Studio 2012から2019ではあらかじめコンパイルされたバイナリも利用できますが、完全性のために自分達でコンパイルすることにします。オープンソースのライブラリによってはバイナリは用意されていないことがあるので、ライブラリをコンパイルすることに慣れておいたほうがいいからです。それでは<em>ソースパッケージ</em>をダウンロードしましょう。 +</p> + +<warning> + We'll be building all libraries as 64-bit binaries so make sure to get the 64-bit binaries if you're using their pre-compiled binaries. +</warning> +<warning> +われわれはすべてのライブラリを64bit用にコンパイルします。コンパイル済のバイナリーをダウンロードする場合、64bit用のものであることを確認してください。 +</warning> + +<p> + Once you've downloaded the source package, extract it and open its content. We are only interested in a few items: +</p> +<p> +ダウンロードしたソースパッケージを解凍します。我々が利用するのは以下のものだけです: +</p> + <ul> + <li>The resulting library from compilation.</li> + <li>The <strong>include</strong> folder.</li> + </ul> + <ul> + <li>ライブラリをコンパイルしたもの。</li> + <li><strong>include</strong>フォルダ。</li> + </ul> + +<p> + Compiling the library from the source code guarantees that the resulting library is perfectly tailored for your CPU/OS, a luxury pre-compiled binaries don't always provide (sometimes, pre-compiled binaries are not available for your system). The problem with providing source code to the open world however is that not everyone uses the same IDE or build system for developing their application, which means the project/solution files provided may not be compatible with other people's setup. + So people then have to setup their own project/solution with the given .c/.cpp and .h/.hpp files, which is cumbersome. Exactly for those reasons there is a tool called CMake. +</p> +<p> +ライブラリをソースからビルドした場合、あなたのCPUやOSに完全に適合することが保証されます。これはコンパイル済のバイナリではあじわえない贅沢です(場合によってはあなたのシステムで動作するバイナリが提供されていないこともあります)。オープンソースの世界でソースコードを提供することの問題は、すべてのユーザーがアプリケーションを開発するうえでおなじIDEやビルドシステムを利用していないことです。だれかのプロジェクトやソリューションが他のひとの環境で動作しない可能性があるということです。 +</p> + +<h3>CMake</h3> +<p> + CMake is a tool that can generate project/solution files of the user's choice (e.g. Visual Studio, Code::Blocks, Eclipse) from a collection of source code files using pre-defined CMake scripts. This allows us to generate a Visual Studio 2019 project file from GLFW's source package which we can use to compile the library. First we need to download and install CMake which can be downloaded on their <a href="http://www.cmake.org/cmake/resources/software.html" target="_blank">download</a> page. +</p> +<p> +CMakeはソースコードとあらかじめ用意しておいたCMakeのスクリプトから、任意のIDE(例えばVisual Studio、Code::Blocks、Eclipse等)のプロジェクトやソリューションのファイルを作成するツールです。これを使えば、GLFWのソースパッケージからVisual Studio 2019のプロジェクトファイルを作成することができ、さらにプロジェクトファイルをコンパイルしてライブラリを作成することができます。 +</p> + +<p> + Once CMake is installed you can choose to run CMake from the command line or through their GUI. Since we're not trying to overcomplicate things we're going to use the GUI. CMake requires a source code folder and a destination folder for the binaries. For the source code folder we're going to choose the root folder of the downloaded GLFW source package and for the build folder we're creating a new directory <em>build</em> and then select that directory. +</p> +<p> +CMakeはコマンドラインからでもGUIを通してでも、好きな方法で利用できます。こみいった話をさけるため、ここではGUIを利用します。CMakeを利用するには、ソースコードのフォルダとコンパイルしてできあがったバイナリ用のフォルダのふたつのフォルダが必要です。ソースコードのフォルダとしてGLFWをダウンロードしたフォルダを選び、バイナリ用には<em>build</em>というフォルダを新しく作成することにしましょう。 +</p> + +<img src="/img/getting-started/cmake.png" width="800px" alt="Image of CMake's logo"/> + +<p> + Once the source and destination folders have been set, click the <code>Configure</code> button so CMake can read the required settings and the source code. We then have to choose the generator for the project and since we're using Visual Studio 2019 we will choose the <code>Visual Studio 16</code> option (Visual Studio 2019 is also known as Visual Studio 16). CMake will then display the possible build options to configure the resulting library. We can leave them to their default values and click <code>Configure</code> again to store the settings. Once the settings have been set, we click <code>Generate</code> and the resulting project files will be generated in your <code>build</code> folder. +</p> +<p> +ソースコード用およびバイナリ用のフォルダを設定したら、<code>Configure</code>ボタンをクリックしてCMakeに設定とソースコードを読み込みます。次にジェネレータの項目を設定します。我々が利用するのはVisual Studio 2019なので<code>Visual Studio 16</code>を選んでください(Visual Studio 2019はVisual Studio 16とも呼ばれます)。そうすればCMakeが選択可能なビルドオプションを表示します。ここではデフォルトのままにしておいてかまいません。設定を保存するために再度<code>Configure</code>をクリックしてください。設定完了後、<code>Generate</code>をクリックすれば<code>build</code>フォルダにプロジェクトファイルが作成されます。 +</p> + +<h3>Compilation</h3> +<h3>コンパイル</h3> +<p> + In the <code>build</code> folder a file named <code>GLFW.sln</code> can now be found and we open it with Visual Studio 2019. Since CMake generated a project file that already contains the proper configuration settings we only have to build the solution. CMake should've automatically configured the solution so it compiles to a 64-bit library; now hit build solution. This will give us a compiled library file that can be found in <code>build/src/Debug</code> named <code>glfw3.lib</code>.</p> +<p> +<code>build</code>フォルダに<code>GLFW.sln</code>というファイルがあるはずですので、Visual Studio 2019で開いてください。CMakeが作成したプロジェクトファイルには必要な設定がすべて含まれているので、あとはソリューションをビルドすればいいだけです。64bitのライブラリをコンパイルするようにCMakeが自動的に設定してくれています。build solutionを押しましょう。さすればライブラリはコンパイルされ、<code>build/src/Debug</code>というフォルダに<code>glfw3.lib</code>なるファイルが出現するでしょう。 +</p> + +<p> + Once we generated the library we need to make sure the IDE knows where to find the library and the include files for our OpenGL program. There are two common approaches in doing this: + <ol> + <li> We find the <code>/lib</code> and <code>/include</code> folders of the IDE/compiler and add the content of GLFW's <code>include</code> folder to the IDE's <code>/include</code> folder and similarly add <code>glfw3.lib</code> to the IDE's <code>/lib</code> folder. This works, but it's is not the recommended approach. It's hard to keep track of your library and include files and a new installation of your IDE/compiler results in you having to do this process all over again. </li> + <li> + Another approach (and recommended) is to create a new set of directories at a location of your choice that contains all the header files/libraries from third party libraries to which you can refer to from your IDE/compiler. You could, for instance, create a single folder that contains a <code>Libs</code> and <code>Include</code> folder where we store all our library and header files respectively for OpenGL projects. Now all the third party libraries are organized within a single location (that can be shared across multiple computers). The requirement is, however, that each time we create a new project we have to tell the IDE where to find those directories. + </li> + </ol> + Once the required files are stored at a location of your choice, we can start creating our first OpenGL GLFW project. +</p> +<p> +ライブラリをコンパイルしたら、IDEにそのありかを伝えねばなりません。それには二通りの方法があります: + <ol> + <li>IDEやコンパイラの<code>/lib</code>と<code>/include</code>のフォルダを見つけ、そこに<code>glfw3.lib</code>とGLFWの<code>include</code>をそれぞれ追加する方法。これは機能はしますがおすすめはできません。追加したライブラリやインクルードファイルを覚えておくのが大変ですし、新しいIDEやコンパイラをインストールするたびに同じ作業を繰り返すはめになります。</li> + <li>IDEやコンパイラがみつけられる場所に新しいフォルダを作成し、その中にサードパーティー製のライブラリに関するすべてのヘッダーファイルとライブラリファイルを集める方法。こちらの方法がおすすめです。例えば<code>Libs</code>と<code>Include</code>というフォルダを作り、OpenGLのプロジェクトで使うライブラリとヘッダーファイルをすべてここにおいておきます。こうすればサードパーティー製のライブラリが一つの場所にまとめられます(この場所を複数のコンピュータで共有することも可能です)。こちらの方法では、新しいプロジェクトを作成するたびにIDEにライブラリのある場所を教える必要があります。 + </li> + </ol> + 必要なファイルを参照できる場所に配置できれば、いよいよGLFWを利用したOpenGLのプロジェクトを作成できます。 +</p> + +<h2>Our first project</h2> +<h2>最初のプロジェクト</h2> +<p> + First, let's open up Visual Studio and create a new project. Choose C++ if multiple options are given and take the <code>Empty Project</code> (don't forget to give your project a suitable name). Since we're going to be doing everything in 64-bit and the project defaults to 32-bit, we'll need to change the dropdown at the top next to Debug from x86 to x64: +</p> +<p> +まずはVisual Studioを立ちあげて新しいプロジェクトを作成しましょう。選択肢があればC++と<code>Empty Project</code>を選び、プロジェクトに適切な名前を付けてください。デフォルトは32bitになっていますが我々は64bitで開発しますので、いちばん上にあるDebugの隣のドロップダウンをx64からx86に変更してください: +</p> + +<img src="/img/getting-started/x64.png" alt="Image of how to switch from x86 to x64"/>。 + +<p> + Once that's done, we now have a workspace to create our very first OpenGL application! + これでOpenGLのアプリケーションを作る下準備は完了です。 +</p> + + +<h2>Linking</h2> +<h2>リンク</h2> +<p> + In order for the project to use GLFW we need to <def>link</def> the library with our project. This can be done by specifying we want to use <code>glfw3.lib</code> in the linker settings, but our project does not yet know where to find <code>glfw3.lib</code> since we store our third party libraries in a different directory. We thus need to add this directory to the project first. +プロジェクトがGLFWを利用するにはライブラリをプロジェクトに<def>リンク</def>する必要があります。これには<code>glfw3.lib</code>を使うようリンカを設定すればいいのですが、先程サードパーティー製のライブラリを別のディレクトリに入れたので、プロジェクトはどこに<code>glfw3.lib</code>があるのかを知りません。プロジェクトにこのディレクトリの場所を追加する必要があります。 +</p> + +<p> + We can tell the IDE to take this directory into account when it needs to look for library and include files. Right-click the project name in the solution explorer and then go to <code>VC++ Directories</code> as seen in the image below: + われわれはIDEに、ライブラリやインクルードファイルが必要なときにわれわれのディレクトリも探すように伝えることができます。ソリューションエクスプローラにおいてプロジェクト名を右クリックし<code>VC++ Directories</code>を選択してください: +</p> + +<img src="/img/getting-started/vc_directories.png" width="600px" alt="Image of Visual Studio's VC++ Directories configuration"/>。 + +<p> + From there on out you can add your own directories to let the project know where to search. This can be done by manually inserting it into the text or clicking the appropriate location string and selecting the <code>&lt;Edit..&gt;</code> option. Do this for both the <code>Library Directories</code> and <code>Include Directories</code>: + ここからわれわれのディレクトリをプロジェクトに伝えることができます。直接テキストに入力してもいいですし、しかる場所をクリックして<code>&lt;Edit..&gt;</code>を選択してもかまいません。<code>ライブラリのディレクトリ</code>と<code>インクルードディレクトリ</code>の両方を登録してください: +</p> + +<img src="/img/getting-started/include_directories.png" width="600px" alt="Image of Visual Studio's Include Directories configuration"/>。 + +<p> + Here you can add as many extra directories as you'd like and from that point on the IDE will also search those directorie when searching for library and header files. As soon as your <code>Include</code> folder from GLFW is included, you will be able to find all the header files for GLFW by including <code>&lt;GLFW/..&gt;</code>. The same applies for the library directories. +ここですきなだけディレクトリを追加でき、IDEはライブラリやヘッダファイルが必要なときに追加したディレクトリも検索してくれます。GLFWの<code>インクルード</code>フォルダを追加すれば<code>&lt;GLFW/..&gt;</code>をインクルードすることでGLFWのヘッダーファイルを利用することができます。 +</p> + +<p> + Since VS can now find all the required files we can finally link GLFW to the project by going to the <code>Linker</code> tab and <code>Input</code>: + 必要なファイルをVisual Studioが見つけられるようになったので、ようやくGLFWをプロジェクトにリンクできます。<code>Linker</code>タブの中の<code>Input</code>を選択してください: +</p> + +<img src="/img/getting-started/linker_input.png" width="600px" alt="Image of Visual Studio's link configuration"/>。 + +<p> + To then link to a library you'd have to specify the name of the library to the linker. Since the library name is <code>glfw3.lib</code>, we add that to the <code>Additional Dependencies</code> field (either manually or using the <code>&lt;Edit..&gt;</code> option) and from that point on GLFW will be linked when we compile. In addition to GLFW we should also add a link entry to the OpenGL library, but this may differ per operating system: +ライブラリをリンクするために、こんどはリンカにライブラリ名を伝える必要があります。ライブラリ名は<code>glfw3.lib</code>なので、この名前を<code>Additional Dependencies</code>に追加します(手入力でも<code>&lt;Edit..&gt;</code>からでもかまいません)。これでコンパイル時にGLFWがリンクされるようになりました。GLFWに加えて、OpenGLのライブラリもリンクしなければなりませんが、これはOSによってすこし違います: +</p> + +<h3>OpenGL library on Windows</h3> +<h3>WindowsにおけるOpenGLライブラリ</h3> +<p> + If you're on Windows the OpenGL library <code>opengl32.lib</code> comes with the Microsoft SDK, which is installed by default when you install Visual Studio. Since this chapter uses the VS compiler and is on windows we add <code>opengl32.lib</code> to the linker settings. Note that the 64-bit equivalent of the OpenGL library is called <code>opengl32.lib</code>, just like the 32-bit equivalent, which is a bit of an unfortunate name. +WindowsユーザーはいますぐLinuxかBSDにのりかえてください。OpenGLのライブラリは<code>opengl32.lib</code>とよばれ、Microsoft SDKに付属しています。MicrosoftSDKはVisual Studioに標準で搭載されています。この章ではVisual Studioのコンパイラを利用し、Windows上で作業を行なっているのでリンカの設定に<code>opengl32.lib</code>を追加しましょう。まぎらわしいことに64bit用のライブラリは32bitライブラリであるかのようなへんてこな名前をしていますので注意してください。 +</p> + +<h3>OpenGL library on Linux</h3> +<h3>LinuxにおけるOpenGLライブラリ</h3> +<p> + On Linux systems you need to link to the <code>libGL.so</code> library by adding <code>-lGL</code> to your linker settings. If you can't find the library you probably need to install any of the Mesa, NVidia or AMD dev packages. +Linuxでは<code>libGL.so</code>をリンクするためにリンカの設定に<code>-lGL</code>を追加してください。ライブラリが見つからない場合、Mesa、NVidiaあるいはAMDの開発用パッケージをインストールしてください。 +</p> + +<p> + Then, once you've added both the GLFW and OpenGL library to the linker settings you can include the header files for GLFW as follows: +GLFWとOpenGLライブラリをリンカの設定に追加したら、GLFWのヘッダーファイルをインクルードできます: +</p> + +<pre><code> +#include &lt;GLFW\glfw3.h&gt; +</code></pre>。 + +<note> + For Linux users compiling with GCC, the following command line options may help you compile the project: <code>-lglfw3 -lGL -lX11 -lpthread -lXrandr -lXi -ldl</code>. Not correctly linking the corresponding libraries will generate many <em>undefined reference</em> errors. +GCCを利用しているLinuxユーザーは以下のコマンドラインオプションが便利です: <code>-lglfw3 -lGL -lX11 -lpthread -lXrandr -lXi -ldl</code>。リンクがうまくいっていないと、大量の<em>undefined reference</em>エラーがでます。 +</note> + +<p> + This concludes the setup and configuration of GLFW. + 以上でGLFWの設定は完了です。 +</p> + +<h2>GLAD</h2> +<p> + We're still not quite there yet, since there is one other thing we still need to do. Because OpenGL is only really a standard/specification it is up to the driver manufacturer to implement the specification to a driver that the specific graphics card supports. Since there are many different versions of OpenGL drivers, the location of most of its functions is not known at compile-time and needs to be queried at run-time. It is then the task of the developer to retrieve the location of the functions he/she needs and store them in function pointers for later use. Retrieving those locations is <a href="https://www.khronos.org/opengl/wiki/Load_OpenGL_Functions" target="_blank">OS-specific</a>. In Windows it looks something like this: +まだです。もうひとつだけやり残したことがあります。OpenGLはただの規格であり仕様であるため、各グラフィックカードがサポートするドライバにおいて、その仕様をどのように実装するかはドライバの作成者に任されています。OpenGLのドライバはたくさんあるので、関数の場所はほとんどコンパイル時にはわからず、実行時に要求されます。そのため必要な関数の場所を割り出し、その関数へのポインタとして保存するのは開発者の仕事なのです。この仕事は<a href="https://www.khronos.org/opengl/wiki/Load_OpenGL_Functions" target="_blank">OSによってまちまち</a>です。Windowsでは以下のようにします: +</p> + +<pre><code> +// define the function's prototype +// 関数のプロトタイプ宣言 +typedef void (*GL_GENBUFFERS) (GLsizei, GLuint*); +// find the function and assign it to a function pointer +// 関数を見つけ、ポインタを割り当て +GL_GENBUFFERS <function id='12'>glGenBuffers</function> = (GL_GENBUFFERS)wglGetProcAddress("<function id='12'>glGenBuffers</function>"); +// function can now be called as normal +// 上の作業により関数は普通に呼び出せる +unsigned int buffer; +<function id='12'>glGenBuffers</function>(1, &buffer); +</code></pre> + + <p> + As you can see the code looks complex and it's a cumbersome process to do this for each function you may need that is not yet declared. Thankfully, there are libraries for this purpose as well where <strong>GLAD</strong> is a popular and up-to-date library. + ごらんのように必要な関数ごとにいちいちこれを行うのはかったるいです。ありがたいことにこの仕事を肩代りしてくれるライブラリがあります。<strong>GLAD</strong>はそのようなライブラリのなかでも人気があり常に更新されています。 + </p> + +<h3>Setting up GLAD</h3> +<h3>GLADの設定</h3> + <p> + GLAD is an <a href="https://github.com/Dav1dde/glad" target="_blank">open source</a> library that manages all that cumbersome work we talked about. GLAD has a slightly different configuration setup than most common open source libraries. GLAD uses a <a href="http://glad.dav1d.de/" target="_blank">web service</a> where we can tell GLAD for which version of OpenGL we'd like to define and load all relevant OpenGL functions according to that version. + GLADは<a href="https://github.com/Dav1dde/glad" target="_blank">オープンソース</a>のライブラリで、上記のような面倒な作業を行ってくれるものです。GLADは他の多くのオープンソースなライブラリとは少し違った方法でセットアップします。GLADは<a href="http://glad.dav1d.de/" target="_blank">ウェブサービス</a>を利用しています。利用したいOpenGLのバージョンをGLADに伝えれば、そのバージョンの関数を全てロードできるようになります。 + </p> + +<p> + Go to the GLAD <a href="http://glad.dav1d.de/" target="_blank">web service</a>, make sure the language is set to C++, and in the API section select an OpenGL version of at least 3.3 (which is what we'll be using; higher versions are fine as well). Also make sure the profile is set to <em>Core</em> and that the <em>Generate a loader</em> option is ticked. Ignore the extensions (for now) and click <em>Generate</em> to produce the resulting library files. +GLADの<a href="http://glad.dav1d.de/" target="_blank">ウェブサービス</a>に行き、言語がC++になっていることを確認し、APIの部分でOpenGLのバージョンから3.3以上のものを選択してください(3.3は本書で使うバージョンですが、もっと新しいバージョンでもかまいません)。また、profileが<em>Core</em>であることおよび、<em>Generate a loader</em>オプションにチェックが入っていることも確認してください。extentionはとりあえず無視してかまいませんので、<em>Generate</em>をクリックしてライブラリを作成してください。 +</p> + +<p> + GLAD by now should have provided you a zip file containing two include folders, and a single <code>glad.c</code> file. Copy both include folders (<code>glad</code> and <code>KHR</code>) into your include(s) directoy (or add an extra item pointing to these folders), and add the <code>glad.c</code> file to your project. + そうすれば二つのインクルードフォルダとひとつの<code>glad.c</code>ファイルが入ったzipファイルがダウンロードできます。これらのフォルダー(<code>glad</code>と<code>KHR</code>)をあなたのインクルードディレクトリ(またはこれらのフォルダを指したほかのもの)へコピーし、<code>glad.c</code>ファイルをプロジェクトに追加してください。 +</p> + +<p> + After the previous steps, you should be able to add the following include directive above your file: + 以上により、あなたのファイルの冒頭にインクルードディレクティブを追加できます: +</p> + +<pre><code> +#include &lt;glad/glad.h&gt; +</code></pre> + +<p> + Hitting the compile button shouldn't give you any errors, at which point we're set to go for the <a href="https://learnopengl.com/Getting-started/Hello-Window" target="_blank">next</a> chapter where we'll discuss how we can actually use GLFW and GLAD to configure an OpenGL context and spawn a window. Be sure to check that all your include and library directories are correct and that the library names in the linker settings match the corresponding libraries. + コンパイルボタンを押してエラーがでなければ、<a href="https://learnopengl.com/Getting-started/Hello-Window" target="_blank">次の</a>章に進むことができます。次の章では実際にGLFWとGLADを用いてOpenGLのコンテクストを設定し、ウィンドを作ります。すべてのインクルードおよびライブラリディレクトリが正しく、リンカの設定にあるライブラリ名が実際のライブラリ名と対応していることを確認してください。 +</p> + +<h2>Additional resources</h2> +<h2>参考</h2> +<ul> + <li><a href="http://www.glfw.org/docs/latest/window_guide.html" target="_blank">GLFW: Window Guide</a>: official GLFW guide on setting up and configuring a GLFW window.</li> + <li><a href="http://www.glfw.org/docs/latest/window_guide.html" target="_blank">GLFW: Window Guide</a>: GLFWのセットアップおよびウィンドウの設定に関する公式ガイド</li> + <li><a href="http://www.opengl-tutorial.org/miscellaneous/building-your-own-c-application/" target="_blank">Building applications</a>: provides great info about the compilation/linking process of your application and a large list of possible errors (plus solutions) that may come up.</li> + <li><a href="http://www.opengl-tutorial.org/miscellaneous/building-your-own-c-application/" target="_blank">Building applications</a>: コンパイルおよびリンクに関する情報と予想されるエラーとその解決法</li> + <li><a href="http://wiki.codeblocks.org/index.php?title=Using_GLFW_with_Code::Blocks" target="_blank">GLFW with Code::Blocks</a>: building GLFW in Code::Blocks IDE.</li> + <li><a href="http://wiki.codeblocks.org/index.php?title=Using_GLFW_with_Code::Blocks" target="_blank">GLFW with Code::Blocks</a>: Code::BlocksにおけるGLFWのビルド方法</li> + <li><a href="http://www.cmake.org/runningcmake/" target="_blank">Running CMake</a>: short overview of how to run CMake on both Windows and Linux.</li> + <li><a href="http://www.cmake.org/runningcmake/" target="_blank">Running CMake</a>: WindowsとLinux両方におけるCMakeの概要</li> + <li><a href="https://learnopengl.com/demo/autotools_tutorial.txt" target="_blank">Writing a build system under Linux</a>: an autotools tutorial by Wouter Verholst on how to write a build system in Linux.</li> + <li><a href="https://learnopengl.com/demo/autotools_tutorial.txt" target="_blank">Writing a build system under Linux</a>: Wouter Verholstによるautotoolsのチュートリアル: Linuxにおけるビルドシステムの書き方</li> + <li><a href="https://github.com/Polytonic/Glitter" target="_blank">Polytonic/Glitter</a>: a simple boilerplate project that comes pre-configured with all relevant libraries; great for if you want a sample project without the hassle of having to compile all the libraries yourself.</li> + <li><a href="https://github.com/Polytonic/Glitter" target="_blank">Polytonic/Glitter</a>: 必要なライブラリがあらかじめ設定された、雛形のようなプロジェクト; ライブラリを自分でコンパイルしないですむサンプルプロジェクトがほしい場合に最適</li> +</ul> + + + </div> +</body> +</html> diff --git a/Getting-started/Hello-Triangle.html b/Getting-started/Hello-Triangle.html @@ -0,0 +1,732 @@ +<!DOCTYPE html> +<html lang="ja"> +<head> + <meta charset="utf-8"/> + <title>LearnOpenGL - Hello Triangle</title> <!--<title>Learn OpenGL, extensive tutorial resource for learning Modern OpenGL</title>--> + <link rel="shortcut icon" type="image/ico" href="/favicon.ico" /> + <link rel="stylesheet" href="../static/style.css" /> + <meta name="description" content="Learn OpenGL . com provides good and clear modern 3.3+ OpenGL tutorials with clear examples. A great resource to learn modern OpenGL aimed at beginners."> + <meta name="fragment" content="!"> +</head> +<body> +<div id="content"> + <h1 id="content-title">Hello Triangle</h1> + <h1 id="content-title">はじめての三角形</h1> +<h1 id="content-url" style='display:none;'>Getting-started/Hello-Triangle</h1> +<p> + In OpenGL everything is in 3D space, but the screen or window is a 2D array of pixels so a large part of OpenGL's work is about transforming all 3D coordinates to 2D pixels that fit on your screen. The process of transforming 3D coordinates to 2D pixels is managed by the <def>graphics pipeline</def> of OpenGL. The graphics pipeline can be divided into two large parts: the first transforms your 3D coordinates into 2D coordinates and the second part transforms the 2D coordinates into actual colored pixels. In this chapter we'll briefly discuss the graphics pipeline and how we can use it to our advantage to create fancy pixels. +OpenGLにおいてあらゆることは三次元空間上でおこります。しかしスクリーンやウィンドウはピクセルの二次元配列です。OpenGLの仕事のほとんどは三次元空間でのできごとを二次元のピクセルにおとしこむことです。三次元から二次元への変換は<def>グラフィックパイプライン</def>により処理されます。グラフィックパイプラインは大きくわけて二つのことに分割できます: ひとつめは三次元の座標を二次元に変換すること、ふたつめはその二次元の座標を実際の色に変換することです。この章ではグラフィックパイプラインの概要およびそれを使っていい感じに描画する方法をざっくりと説明します。 +</p> + +<!--<note> + There is a difference between a 2D coordinate and a pixel. A 2D coordinate is a very precise representation of where a point is in 2D space, while a 2D pixel is an approximation of that point limited by the resolution of your screen/window. +</note>--> + +<p> + The graphics pipeline takes as input a set of 3D coordinates and transforms these to colored 2D pixels on your screen. The graphics pipeline can be divided into several steps where each step requires the output of the previous step as its input. All of these steps are highly specialized (they have one specific function) and can easily be executed in parallel. Because of their parallel nature, graphics cards of today have thousands of small processing cores to quickly process your data within the graphics pipeline. The processing cores run small programs on the GPU for each step of the pipeline. These small programs are called <def>shaders</def>. +グラフィックパイプラインは三次元の座標を入力として受けとり、色の付いた二次元のピクセルをスクリーンに表示します。グラフィックパイプラインはいくつかのステップに分割できます。それぞれのステップでは入力として前のステップの出力を受けとります。各ステップはそれぞれの仕事に特化していて(それぞれの仕事に特化した関数をもっています)、簡単に並列化することができます。そのため今日のグラフィックカードは何千もの小さな演算装置を持ち、グラフィックパイプライン中で高速に処理できるようになっています。パイプラインの各ステップで、演算装置が小さなプログラムを実行します。この小さなプログラムを<def>シェーダー</def>といいます。 +</p> + +<p> + Some of these shaders are configurable by the developer which allows us to write our own shaders to replace the existing default shaders. This gives us much more fine-grained control over specific parts of the pipeline and because they run on the GPU, they can also save us valuable CPU time. Shaders are written in the <def>OpenGL Shading Language</def> (<def>GLSL</def>) and we'll delve more into that in the next chapter. +シェーダーのうちいくつかのものはデフォルトのものを開発者自身によって書いたものに変更できます。これによりパイプラインの特定の場所をより細かくコントロールできます。されにシェーダーがGPU上で実行されているので、貴重なCPUの時間を節約できます。シェーダーは<def>OpenGLシェーディンク言語</def>(<def>GLSL</def>)で記載されます。GLSLについては次の章で詳しく解説します。 +</p> + +<p> + Below you'll find an abstract representation of all the stages of the graphics pipeline. Note that the blue sections represent sections where we can inject our own shaders. +以下にグラフィックパイプラインのおおまかな流れを示します。青い部分は自作のシェーダーを指しこめる場所を表しています。 +</p> + +<img src="/img/getting-started/pipeline.png" class="clean" alt="The OpenGL graphics pipeline with shader stages" /> + + +<p> + As you can see, the graphics pipeline contains a large number of sections that each handle one specific part of converting your vertex data to a fully rendered pixel. We will briefly explain each part of the pipeline in a simplified way to give you a good overview of how the pipeline operates. +ごらんのように、グラフィックパイプラインにはたくさんの処理が含まれます。頂点のデータを描画用のピクセルに変換する過程でそれぞれの役割を分担しているのです。パイプラインの動きの概観をつかんでいただくために、それぞれの処理内容を簡単に見ていきましょう。 +</p> + +<p> + As input to the graphics pipeline we pass in a list of three 3D coordinates that should form a triangle in an array here called <code>Vertex Data</code>; this vertex data is a collection of vertices. A <def>vertex</def> is a collection of data per 3D coordinate. This vertex's data is represented using <def>vertex attributes</def> that can contain any data we'd like, but for simplicity's sake let's assume that each vertex consists of just a 3D position and some color value. +グラフィックパイプラインの入力として、三次元空間に三角形を構成する三つの座標を用意します。この入力のような、頂点の組を<code>頂点データ</code>と呼びます。<def>頂点</def>とは、三次元のそれぞれの座標軸にそった値の組です。頂点データは<def>頂点属性</def>といい、任意の情報を持たせることができます。ここでは簡単のために各頂点が座標と色の情報だけを保持しているものとします。 +</p> + +<note> + In order for OpenGL to know what to make of your collection of coordinates and color values OpenGL requires you to hint what kind of render types you want to form with the data. Do we want the data rendered as a collection of points, a collection of triangles or perhaps just one long line? Those hints are called <def>primitives</def> and are given to OpenGL while calling any of the drawing commands. Some of these hints are <var>GL_POINTS</var>, <var>GL_TRIANGLES</var> and <var>GL_LINE_STRIP</var>. +座標や色の組からなにをしたいかをOpenGLに伝えるために、描画のタイプを指定する必要があります。点を描画するのか、三角形か、あるいは一本の長い線か。このような描画タイプのことを<def>プリミティブ</def>といい、OpenGLがなにかを描くときに利用されます。プリミティブには<var>GL_POINTS</var>、<var>GL_TRIANGLES</var>、<var>GL_LINE_STRIP</var>等があります。 +</note> + +<p> + The first part of the pipeline is the <def>vertex shader</def> that takes as input a single vertex. The main purpose of the vertex shader is to transform 3D coordinates into different 3D coordinates (more on that later) and the vertex shader allows us to do some basic processing on the vertex attributes. +パイプラインの最初の処理は入力としてひとつの頂点をとる<def>頂点シェーダー</def>です。頂点シェーダーの主な目的は三次元の座標を別の三次元の座標に変換することです(あとで詳しく説明します)。頂点シェーターでは頂点属性に対する基本的な処理も可能です。 +</p> + +<p> + The <def>primitive assembly</def> stage takes as input all the vertices (or vertex if <var>GL_POINTS</var> is chosen) from the vertex shader that form a primitive and assembles all the point(s) in the primitive shape given; in this case a triangle. +<def>プリミティブアセンブリ</def>では入力として頂点シェーダからプリミティブを構成する頂点の組(<var>GL_POINTS</var>の場合はひとつだけ)をとり、指定されたプリミティブ(今回の場合は三角形)を形成します。 +</p> + +<p> + The output of the primitive assembly stage is passed to the <def>geometry shader</def>. The geometry shader takes as input a collection of vertices that form a primitive and has the ability to generate other shapes by emitting new vertices to form new (or other) primitive(s). In this example case, it generates a second triangle out of the given shape. +プリミティブアセンブリからの出力は<def>ジオメトリシェーダー</def>に渡されます。ジオメトリシェーダーは入力としてプリミティブを構成する頂点の組をとり、頂点を追加することで新しい(あるいは他の)プリミティブを作成することができます。こんかいの例では与えられた図形からふたつ目の三角形を生み出しています。 + +</p> + +<p> + The output of the geometry shader is then passed on to the <def>rasterization stage</def> where it maps the resulting primitive(s) to the corresponding pixels on the final screen, resulting in fragments for the fragment shader to use. Before the fragment shaders run, <def>clipping</def> is performed. Clipping discards all fragments that are outside your view, increasing performance. +ジオメトリシェーダーの出力は<def>ラスタライザーステージ</def>に渡されます。ここでは出力されたプリミティブを最終的にスクリーンに表示されるピクセルと対応したデータに落しこまれ、フラグメントシェーダーが利用できるフラグメントになります。フラグメントシェーダーが実行される前に、<def>クリッピング</def>が行われます。これは画面の外に隠れて見えなくなる部分のフラグメントを捨て、処理速度を向上させるものです。 +</p> + +<note> + A fragment in OpenGL is all the data required for OpenGL to render a single pixel. +フラグメントとは、OpenGLがひとつのピクセルを表示するために必要なすべてのデータのことです。 +</note> + +<p> + The main purpose of the <def>fragment shader</def> is to calculate the final color of a pixel and this is usually the stage where all the advanced OpenGL effects occur. Usually the fragment shader contains data about the 3D scene that it can use to calculate the final pixel color (like lights, shadows, color of the light and so on). +<def>フラグメントシェーター</def>の主な目的は最終的なピクセルの色を計算することです。通常このシェーダーにおいてすべての先進的な効果が適用されます。フラグメントシェーダーは普通、最終的なピクセルの色を決定するために利用する三次元空間上の情報を持っています(照明、影、光の色などです)。 +</p> + + +<p> + After all the corresponding color values have been determined, the final object will then pass through one more stage that we call the <def>alpha test</def> and <def>blending</def> stage. This stage checks the corresponding depth (and stencil) value (we'll get to those later) of the fragment and uses those to check if the resulting fragment is in front or behind other objects and should be discarded accordingly. The stage also checks for <def>alpha</def> values (alpha values define the opacity of an object) and <def>blends</def> the objects accordingly. So even if a pixel output color is calculated in the fragment shader, the final pixel color could still be something entirely different when rendering multiple triangles. +</p> + +<p> + As you can see, the graphics pipeline is quite a complex whole and contains many configurable parts. However, for almost all the cases we only have to work with the vertex and fragment shader. The geometry shader is optional and usually left to its default shader. There is also the tessellation stage and transform feedback loop that we haven't depicted here, but that's something for later. +</p> + +<p> + In modern OpenGL we are <strong>required</strong> to define at least a vertex and fragment shader of our own (there are no default vertex/fragment shaders on the GPU). For this reason it is often quite difficult to start learning modern OpenGL since a great deal of knowledge is required before being able to render your first triangle. Once you do get to finally render your triangle at the end of this chapter you will end up knowing a lot more about graphics programming. +</p> + +<h2>Vertex input</h2> +<p> + To start drawing something we have to first give OpenGL some input vertex data. OpenGL is a 3D graphics library so all coordinates that we specify in OpenGL are in 3D (<code>x</code>, <code>y</code> and <code>z</code> coordinate). OpenGL doesn't simply transform <strong>all</strong> your 3D coordinates to 2D pixels on your screen; OpenGL only processes 3D coordinates when they're in a specific range between <code>-1.0</code> and <code>1.0</code> on all 3 axes (<code>x</code>, <code>y</code> and <code>z</code>). All coordinates within this so called <def>normalized device coordinates</def> range will end up visible on your screen (and all coordinates outside this region won't). +</p> + +<p> + Because we want to render a single triangle we want to specify a total of three vertices with each vertex having a 3D position. We define them in normalized device coordinates (the visible region of OpenGL) in a <code>float</code> array: +</p> + +<pre><code> +float vertices[] = { + -0.5f, -0.5f, 0.0f, + 0.5f, -0.5f, 0.0f, + 0.0f, 0.5f, 0.0f +}; +</code></pre> + +<p> + Because OpenGL works in 3D space we render a 2D triangle with each vertex having a <code>z</code> coordinate of <code>0.0</code>. This way the <em>depth</em> of the triangle remains the same making it look like it's 2D. +</p> + +<note> + <strong>Normalized Device Coordinates (NDC)</strong><br/> + <p> + Once your vertex coordinates have been processed in the vertex shader, they should be in <def>normalized device coordinates</def> which is a small space where the <code>x</code>, <code>y</code> and <code>z</code> values vary from <code>-1.0</code> to <code>1.0</code>. Any coordinates that fall outside this range will be discarded/clipped and won't be visible on your screen. Below you can see the triangle we specified within normalized device coordinates (ignoring the <code>z</code> axis): + </p> + <img src="/img/getting-started/ndc.png" class="clean" alt="2D Normalized Device Coordinates as shown in a graph"/> + <p> + Unlike usual screen coordinates the positive y-axis points in the up-direction and the <code>(0,0)</code> coordinates are at the center of the graph, instead of top-left. Eventually you want all the (transformed) coordinates to end up in this coordinate space, otherwise they won't be visible. +</p> +<p> + Your NDC coordinates will then be transformed to <def>screen-space coordinates</def> via the <def>viewport transform</def> using the data you provided with <fun><function id='22'>glViewport</function></fun>. The resulting screen-space coordinates are then transformed to fragments as inputs to your fragment shader. + </p> +</note> + +<p> + With the vertex data defined we'd like to send it as input to the first process of the graphics pipeline: the vertex shader. This is done by creating memory on the GPU where we store the vertex data, configure how OpenGL should interpret the memory and specify how to send the data to the graphics card. The vertex shader then processes as much vertices as we tell it to from its memory. +</p> + +<p> + We manage this memory via so called <def>vertex buffer objects</def> (<def>VBO</def>) that can store a large number of vertices in the GPU's memory. The advantage of using those buffer objects is that we can send large batches of data all at once to the graphics card, and keep it there if there's enough memory left, without having to send data one vertex at a time. Sending data to the graphics card from the CPU is relatively slow, so wherever we can we try to send as much data as possible at once. Once the data is in the graphics card's memory the vertex shader has almost instant access to the vertices making it extremely fast +</p> + +<p> + A vertex buffer object is our first occurrence of an OpenGL object as we've discussed in the <a href="https://learnopengl.com/Getting-Started/OpenGL" target="_blank">OpenGL</a> chapter. Just like any object in OpenGL, this buffer has a unique ID corresponding to that buffer, so we can generate one with a buffer ID using the <fun><function id='12'>glGenBuffers</function></fun> function: +</p> + +<pre class="cpp"><code> +unsigned int VBO; +<function id='12'>glGenBuffers</function>(1, &amp;VBO); +</code></pre> + +<p> + OpenGL has many types of buffer objects and the buffer type of a vertex buffer object is <var>GL_ARRAY_BUFFER</var>. OpenGL allows us to bind to several buffers at once as long as they have a different buffer type. We can bind the newly created buffer to the <var>GL_ARRAY_BUFFER</var> target with the <fun><function id='32'>glBindBuffer</function></fun> function: +</p> + +<pre><code> +<function id='32'>glBindBuffer</function>(GL_ARRAY_BUFFER, VBO); +</code></pre> + +<p> + From that point on any buffer calls we make (on the <var>GL_ARRAY_BUFFER</var> target) will be used to configure the currently bound buffer, which is <var>VBO</var>. Then we can make a call to the + <fun><function id='31'>glBufferData</function></fun> function that copies the previously defined vertex data into the buffer's memory: +</p> + +<pre><code> +<function id='31'>glBufferData</function>(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW); +</code></pre> + +<p> + <fun><function id='31'>glBufferData</function></fun> is a function specifically targeted to copy user-defined data into the currently bound buffer. Its first argument is the type of the buffer we want to copy data into: the vertex buffer object currently bound to the <var>GL_ARRAY_BUFFER</var> target. The second argument specifies the size of the data (in bytes) we want to pass to the buffer; a simple <code>sizeof</code> of the vertex data suffices. The third parameter is the actual data we want to send. +</p> + +<p> + The fourth parameter specifies how we want the graphics card to manage the given data. This can take 3 forms: +</p> + + <ul> + <li><var>GL_STREAM_DRAW</var>: the data is set only once and used by the GPU at most a few times.</li> + <li><var>GL_STATIC_DRAW</var>: the data is set only once and used many times.</li> + <li><var>GL_DYNAMIC_DRAW</var>: the data is changed a lot and used many times.</li> + + </ul> + +<p> + The position data of the triangle does not change, is used a lot, and stays the same for every render call so its usage type should best be <var>GL_STATIC_DRAW</var>. If, for instance, one would have a buffer with data that is likely to change frequently, a usage type of <var>GL_DYNAMIC_DRAW</var> ensures the graphics card will place the data in memory that allows for faster writes. +</p> + +<p> + As of now we stored the vertex data within memory on the graphics card as managed by a vertex buffer object named <var>VBO</var>. Next we want to create a vertex and fragment shader that actually processes this data, so let's start building those. +</p> + +<h2>Vertex shader</h2> +<p> + The vertex shader is one of the shaders that are programmable by people like us. Modern OpenGL requires that we at least set up a vertex and fragment shader if we want to do some rendering so we will briefly introduce shaders and configure two very simple shaders for drawing our first triangle. In the next chapter we'll discuss shaders in more detail. +</p> + +<p> + The first thing we need to do is write the vertex shader in the shader language GLSL (OpenGL Shading Language) and then compile this shader so we can use it in our application. Below you'll find the source code of a very basic vertex shader in GLSL: +</p> + +<pre><code> +#version 330 core +layout (location = 0) in vec3 aPos; + +void main() +{ + gl_Position = vec4(aPos.x, aPos.y, aPos.z, 1.0); +} +</code></pre> + +<p> + As you can see, GLSL looks similar to C. Each shader begins with a declaration of its version. Since OpenGL 3.3 and higher the version numbers of GLSL match the version of OpenGL (GLSL version 420 corresponds to OpenGL version 4.2 for example). We also explicitly mention we're using core profile functionality. +</p> + +<p> + Next we declare all the input vertex attributes in the vertex shader with the <code>in</code> keyword. Right now we only care about position data so we only need a single vertex attribute. GLSL has a vector datatype that contains 1 to 4 floats based on its postfix digit. Since each vertex has a 3D coordinate we create a <code>vec3</code> input variable with the name <var>aPos</var>. We also specifically set the location of the input variable via <code>layout (location = 0)</code> and you'll later see that why we're going to need that location. +</p> + +<note> + <strong>Vector</strong><br/> + In graphics programming we use the mathematical concept of a vector quite often, since it neatly represents positions/directions in any space and has useful mathematical properties. A vector in GLSL has a maximum size of 4 and each of its values can be retrieved via <code>vec.x</code>, <code>vec.y</code>, <code>vec.z</code> and <code>vec.w</code> respectively where each of them represents a coordinate in space. Note that the <code>vec.w</code> component is not used as a position in space (we're dealing with 3D, not 4D) but is used for something called <def>perspective division</def>. We'll discuss vectors in much greater depth in a later chapter. +</note> + +<p> + To set the output of the vertex shader we have to assign the position data to the predefined <var>gl_Position</var> variable which is a <code>vec4</code> behind the scenes. At the end of the <fun>main</fun> function, whatever we set <var>gl_Position</var> to will be used as the output of the vertex shader. Since our input is a vector of size 3 we have to cast this to a vector of size 4. We can do this by inserting the <code>vec3</code> values inside the constructor of <code>vec4</code> and set its <code>w</code> component to <code>1.0f</code> (we will explain why in a later chapter). +</p> + +<p> + The current vertex shader is probably the most simple vertex shader we can imagine because we did no processing whatsoever on the input data and simply forwarded it to the shader's output. In real applications the input data is usually not already in normalized device coordinates so we first have to transform the input data to coordinates that fall within OpenGL's visible region. +</p> + +<h2>Compiling a shader</h2> +<p> + We take the source code for the vertex shader and store it in a const C string at the top of the code file for now: +</p> + +<pre><code> +const char *vertexShaderSource = "#version 330 core\n" + "layout (location = 0) in vec3 aPos;\n" + "void main()\n" + "{\n" + " gl_Position = vec4(aPos.x, aPos.y, aPos.z, 1.0);\n" + "}\0"; +</code></pre> + +<p> + In order for OpenGL to use the shader it has to dynamically compile it at run-time from its source code. The first thing we need to do is create a shader object, again referenced by an ID. So we store the vertex shader as an <code>unsigned int</code> and create the shader with <fun><function id='37'>glCreateShader</function></fun>: +</p> + +<pre><code> +unsigned int vertexShader; +vertexShader = <function id='37'>glCreateShader</function>(GL_VERTEX_SHADER); +</code></pre> + +<p> + We provide the type of shader we want to create as an argument to <fun><function id='37'>glCreateShader</function></fun>. Since we're creating a vertex shader we pass in <var>GL_VERTEX_SHADER</var>. +</p> + +<p> + Next we attach the shader source code to the shader object and compile the shader: +</p> + +<pre class="cpp"><code> +<function id='42'>glShaderSource</function>(vertexShader, 1, &amp;vertexShaderSource, NULL); +<function id='38'>glCompileShader</function>(vertexShader); +</code></pre> + +<p> + The <fun><function id='42'>glShaderSource</function></fun> function takes the shader object to compile to as its first argument. The second argument specifies how many strings we're passing as source code, which is only one. The third parameter is the actual source code of the vertex shader and we can leave the 4th parameter to <code>NULL</code>. +</p> + +<note> + <p> + You probably want to check if compilation was successful after the call to <fun><function id='38'>glCompileShader</function></fun> and if not, what errors were found so you can fix those. Checking for compile-time errors is accomplished as follows: + </p> + +<pre class="cpp"><code> +int success; +char infoLog[512]; +<function id='39'>glGetShaderiv</function>(vertexShader, GL_COMPILE_STATUS, &amp;success); +</code></pre> + +<p> + First we define an integer to indicate success and a storage container for the error messages (if any). Then we check if compilation was successful with <fun><function id='39'>glGetShaderiv</function></fun>. If compilation failed, we should retrieve the error message with <fun><function id='40'>glGetShaderInfoLog</function></fun> and print the error message. + </p> + +<pre><code> +if(!success) +{ + <function id='40'>glGetShaderInfoLog</function>(vertexShader, 512, NULL, infoLog); + std::cout &lt;&lt; "ERROR::SHADER::VERTEX::COMPILATION_FAILED\n" &lt;&lt; infoLog &lt;&lt; std::endl; +} +</code></pre> +</note> + +<p> + If no errors were detected while compiling the vertex shader it is now compiled. +</p> + +<h2>Fragment shader</h2> +<p> + The fragment shader is the second and final shader we're going to create for rendering a triangle. The fragment shader is all about calculating the color output of your pixels. To keep things simple the fragment shader will always output an orange-ish color. +</p> + +<note> + Colors in computer graphics are represented as an array of 4 values: the red, green, blue and alpha (opacity) component, commonly abbreviated to RGBA. When defining a color in OpenGL or GLSL we set the strength of each component to a value between <code>0.0</code> and <code>1.0</code>. If, for example, we would set red to <code>1.0</code> and green to <code>1.0</code> we would get a mixture of both colors and get the color yellow. Given those 3 color components we can generate over 16 million different colors! +</note> + +<pre><code> +#version 330 core +out vec4 FragColor; + +void main() +{ + FragColor = vec4(1.0f, 0.5f, 0.2f, 1.0f); +} +</code></pre> + +<p> + The fragment shader only requires one output variable and that is a vector of size 4 that defines the final color output that we should calculate ourselves. We can declare output values with the <code>out</code> keyword, that we here promptly named <var>FragColor</var>. Next we simply assign a <code>vec4</code> to the color output as an orange color with an alpha value of <code>1.0</code> (<code>1.0</code> being completely opaque). +</p> + +<p> + The process for compiling a fragment shader is similar to the vertex shader, although this time we use the <var>GL_FRAGMENT_SHADER</var> constant as the shader type: +</p> + +<pre class="cpp"><code> +unsigned int fragmentShader; +fragmentShader = <function id='37'>glCreateShader</function>(GL_FRAGMENT_SHADER); +<function id='42'>glShaderSource</function>(fragmentShader, 1, &amp;fragmentShaderSource, NULL); +<function id='38'>glCompileShader</function>(fragmentShader); +</code></pre> + +<p> + Both the shaders are now compiled and the only thing left to do is link both shader objects into a <def>shader program</def> that we can use for rendering. Make sure to check for compile errors here as well! +</p> + +<h3>Shader program</h3> +<p> + A shader program object is the final linked version of multiple shaders combined. To use the recently compiled shaders we have to <def>link</def> them to a shader program object and then activate this shader program when rendering objects. The activated shader program's shaders will be used when we issue render calls. +</p> + +<p> + When linking the shaders into a program it links the outputs of each shader to the inputs of the next shader. This is also where you'll get linking errors if your outputs and inputs do not match. </p> + +<p> + Creating a program object is easy: +</p> + +<pre><code> +unsigned int shaderProgram; +shaderProgram = <function id='36'>glCreateProgram</function>(); +</code></pre> + +<p> + The <fun><function id='36'>glCreateProgram</function></fun> function creates a program and returns the ID reference to the newly created program object. Now we need to attach the previously compiled shaders to the program object and then link them with <fun><function id='35'>glLinkProgram</function></fun>: +</p> + +<pre><code> +<function id='34'>glAttachShader</function>(shaderProgram, vertexShader); +<function id='34'>glAttachShader</function>(shaderProgram, fragmentShader); +<function id='35'>glLinkProgram</function>(shaderProgram); +</code></pre> + +<p> + The code should be pretty self-explanatory, we attach the shaders to the program and link them via <fun><function id='35'>glLinkProgram</function></fun>. +</p> + +<note> + Just like shader compilation we can also check if linking a shader program failed and retrieve the corresponding log. However, instead of using <fun><function id='39'>glGetShaderiv</function></fun> and <fun><function id='40'>glGetShaderInfoLog</function></fun> we now use: + +<pre class="cpp"><code> +<function id='41'>glGetProgramiv</function>(shaderProgram, GL_LINK_STATUS, &success); +if(!success) { + glGetProgramInfoLog(shaderProgram, 512, NULL, infoLog); + ... +} +</code></pre> +</note> + +<p> + The result is a program object that we can activate by calling <fun><function id='28'>glUseProgram</function></fun> with the newly created program object as its argument: +</p> + +<pre><code> +<function id='28'>glUseProgram</function>(shaderProgram); +</code></pre> + +<p> + Every shader and rendering call after <fun><function id='28'>glUseProgram</function></fun> will now use this program object (and thus the shaders). +</p> + +<p> + Oh yeah, and don't forget to delete the shader objects once we've linked them into the program object; we no longer need them anymore: +</p> + +<pre><code> +<function id='46'>glDeleteShader</function>(vertexShader); +<function id='46'>glDeleteShader</function>(fragmentShader); +</code></pre> + +<p> + Right now we sent the input vertex data to the GPU and instructed the GPU how it should process the vertex data within a vertex and fragment shader. We're almost there, but not quite yet. OpenGL does not yet know how it should interpret the vertex data in memory and how it should connect the vertex data to the vertex shader's attributes. We'll be nice and tell OpenGL how to do that. +</p> + +<h2>Linking Vertex Attributes</h2> +<p> + The vertex shader allows us to specify any input we want in the form of vertex attributes and while this allows for great flexibility, it does mean we have to manually specify what part of our input data goes to which vertex attribute in the vertex shader. This means we have to specify how OpenGL should interpret the vertex data before rendering. +</p> + +<p> + Our vertex buffer data is formatted as follows: +</p> + +<img src="/img/getting-started/vertex_attribute_pointer.png" class="clean" alt="Vertex attribte pointer setup of OpenGL VBO"/> + + <ul> + <li>The position data is stored as 32-bit (4 byte) floating point values.</li> + <li>Each position is composed of 3 of those values.</li> + <li>There is no space (or other values) between each set of 3 values. The values are <def>tightly packed</def> in the array.</li> + <li>The first value in the data is at the beginning of the buffer.</li> + </ul> + +<p> + With this knowledge we can tell OpenGL how it should interpret the vertex data (per vertex attribute) using <fun><function id='30'>glVertexAttribPointer</function></fun>: +</p> + +<pre class="cpp"><code> +<function id='30'>glVertexAttribPointer</function>(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0); +<function id='29'><function id='60'>glEnable</function>VertexAttribArray</function>(0); +</code></pre> + +<p> + The function <fun><function id='30'>glVertexAttribPointer</function></fun> has quite a few parameters so let's carefully walk through them: +</p> + + <ul> + <li>The first parameter specifies which vertex attribute we want to configure. Remember that we specified the location of the <var>position</var> vertex attribute in the vertex shader with <code>layout (location = 0)</code>. This sets the location of the vertex attribute to <code>0</code> and since we want to pass data to this vertex attribute, we pass in <code>0</code>.</li> + + <li>The next argument specifies the size of the vertex attribute. The vertex attribute is a <code>vec3</code> so it is composed of <code>3</code> values.</li> + + <li>The third argument specifies the type of the data which is <var>GL_FLOAT</var> (a <code>vec*</code> in GLSL consists of floating point values).</li> + + <li>The next argument specifies if we want the data to be normalized. If we're inputting integer data types (int, byte) and we've set this to <var>GL_TRUE</var>, the integer data is normalized to <code>0</code> (or <code>-1</code> for signed data) and <code>1</code> when converted to float. This is not relevant for us so we'll leave this at <var>GL_FALSE</var>.</li> + + <li>The fifth argument is known as the <def>stride</def> and tells us the space between consecutive vertex attributes. Since the next set of position data is located exactly 3 times the size of a <code>float</code> away we specify that value as the stride. Note that since we know that the array is tightly packed (there is no space between the next vertex attribute value) we could've also specified the stride as <code>0</code> to let OpenGL determine the stride (this only works when values are tightly packed). Whenever we have more vertex attributes we have to carefully define the spacing between each vertex attribute but we'll get to see more examples of that later on.</li> + + <li>The last parameter is of type <code>void*</code> and thus requires that weird cast. This is the <def>offset</def> of where the position data begins in the buffer. Since the position data is at the start of the data array this value is just <code>0</code>. We will explore this parameter in more detail later on</li> + </ul> + + <note> +Each vertex attribute takes its data from memory managed by a VBO and which VBO it takes its data from (you can have multiple VBOs) is determined by the VBO currently bound to <var>GL_ARRAY_BUFFER</var> when calling <fun><function id='30'>glVertexAttribPointer</function></fun>. Since the previously defined <var>VBO</var> is still bound before calling <fun><function id='30'>glVertexAttribPointer</function></fun> vertex attribute <code>0</code> is now associated with its vertex data. +</note> + + +<p> + Now that we specified how OpenGL should interpret the vertex data we should also enable the vertex attribute with <fun><function id='29'><function id='60'>glEnable</function>VertexAttribArray</function></fun> giving the vertex attribute location as its argument; vertex attributes are disabled by default. From that point on we have everything set up: we initialized the vertex data in a buffer using a vertex buffer object, set up a vertex and fragment shader and told OpenGL how to link the vertex data to the vertex shader's vertex attributes. Drawing an object in OpenGL would now look something like this: +</p> + +<pre><code> +// 0. copy our vertices array in a buffer for OpenGL to use +<function id='32'>glBindBuffer</function>(GL_ARRAY_BUFFER, VBO); +<function id='31'>glBufferData</function>(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW); +// 1. then set the vertex attributes pointers +<function id='30'>glVertexAttribPointer</function>(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0); +<function id='29'><function id='60'>glEnable</function>VertexAttribArray</function>(0); +// 2. use our shader program when we want to render an object +<function id='28'>glUseProgram</function>(shaderProgram); +// 3. now draw the object +someOpenGLFunctionThatDrawsOurTriangle(); +</code></pre> + +<p> + We have to repeat this process every time we want to draw an object. It may not look like that much, but imagine if we have over 5 vertex attributes and perhaps 100s of different objects (which is not uncommon). Binding the appropriate buffer objects and configuring all vertex attributes for each of those objects quickly becomes a cumbersome process. What if there was some way we could store all these state configurations into an object and simply bind this object to restore its state? +</p> + +<h3>Vertex Array Object</h3> +<p> + A <def>vertex array object</def> (also known as <def>VAO</def>) can be bound just like a vertex buffer object and any subsequent vertex attribute calls from that point on will be stored inside the VAO. This has the advantage that when configuring vertex attribute pointers you only have to make those calls once and whenever we want to draw the object, we can just bind the corresponding VAO. This makes switching between different vertex data and attribute configurations as easy as binding a different VAO. All the state we just set is stored inside the VAO. +</p> + +<warning> + Core OpenGL <strong>requires</strong> that we use a VAO so it knows what to do with our vertex inputs. If we fail to bind a VAO, OpenGL will most likely refuse to draw anything. +</warning> + +<p> + A vertex array object stores the following: +</p> + +<ul> + <li>Calls to <fun><function id='29'><function id='60'>glEnable</function>VertexAttribArray</function></fun> or <fun>glDisableVertexAttribArray</fun>.</li> + <li>Vertex attribute configurations via <fun><function id='30'>glVertexAttribPointer</function></fun>.</li> + <li>Vertex buffer objects associated with vertex attributes by calls to <fun><function id='30'>glVertexAttribPointer</function></fun>.</li> +</ul> + + <img src="/img/getting-started/vertex_array_objects.png" class="clean" alt="Image of how a VAO (Vertex Array Object) operates and what it stores in OpenGL"/> + +<p> + The process to generate a VAO looks similar to that of a VBO: +</p> + +<pre class="cpp"><code> +unsigned int VAO; +<function id='33'>glGenVertexArrays</function>(1, &amp;VAO); +</code></pre> + +<p> + To use a VAO all you have to do is bind the VAO using <fun><function id='27'>glBindVertexArray</function></fun>. From that point on we should bind/configure the corresponding VBO(s) and attribute pointer(s) and then unbind the VAO for later use. As soon as we want to draw an object, we simply bind the VAO with the preferred settings before drawing the object and that is it. In code this would look a bit like this: +</p> + +<pre><code> +// ..:: Initialization code (done once (unless your object frequently changes)) :: .. +// 1. bind Vertex Array Object +<function id='27'>glBindVertexArray</function>(VAO); +// 2. copy our vertices array in a buffer for OpenGL to use +<function id='32'>glBindBuffer</function>(GL_ARRAY_BUFFER, VBO); +<function id='31'>glBufferData</function>(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW); +// 3. then set our vertex attributes pointers +<function id='30'>glVertexAttribPointer</function>(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0); +<function id='29'><function id='60'>glEnable</function>VertexAttribArray</function>(0); + + +[...] + +// ..:: Drawing code (in render loop) :: .. +// 4. draw the object +<function id='28'>glUseProgram</function>(shaderProgram); +<function id='27'>glBindVertexArray</function>(VAO); +someOpenGLFunctionThatDrawsOurTriangle(); +</code></pre> + +<p> + And that is it! Everything we did the last few million pages led up to this moment, a VAO that stores our vertex attribute configuration and which VBO to use. Usually when you have multiple objects you want to draw, you first generate/configure all the VAOs (and thus the required VBO and attribute pointers) and store those for later use. The moment we want to draw one of our objects, we take the corresponding VAO, bind it, then draw the object and unbind the VAO again. +</p> + +<h3>The triangle we've all been waiting for</h3> +<p> + To draw our objects of choice, OpenGL provides us with the <fun><function id='1'>glDrawArrays</function></fun> function that draws primitives using the currently active shader, the previously defined vertex attribute configuration and with the VBO's vertex data (indirectly bound via the VAO). +</p> + +<pre class="cpp"><code> +<function id='28'>glUseProgram</function>(shaderProgram); +<function id='27'>glBindVertexArray</function>(VAO); +<function id='1'>glDrawArrays</function>(GL_TRIANGLES, 0, 3); +</code></pre> + +<p> + The <fun><function id='1'>glDrawArrays</function></fun> function takes as its first argument the OpenGL primitive type we would like to draw. Since I said at the start we wanted to draw a triangle, and I don't like lying to you, we pass in <var>GL_TRIANGLES</var>. The second argument specifies the starting index of the vertex array we'd like to draw; we just leave this at <code>0</code>. The last argument specifies how many vertices we want to draw, which is <code>3</code> (we only render 1 triangle from our data, which is exactly 3 vertices long). +</p> + +<p> + Now try to compile the code and work your way backwards if any errors popped up. As soon as your application compiles, you should see the following result: +</p> + +<img src="/img/getting-started/hellotriangle.png" width="600px" class="clean" alt="An image of a basic triangle rendered in modern OpenGL" /> + +<p> + The source code for the complete program can be found <a href="/code_viewer_gh.php?code=src/1.getting_started/2.1.hello_triangle/hello_triangle.cpp" target="_blank">here</a> . +</p> + +<p> + If your output does not look the same you probably did something wrong along the way so check the complete source code and see if you missed anything. +</p> + +<h2> Element Buffer Objects </h2> +<p> + There is one last thing we'd like to discuss when rendering vertices and that is <def>element buffer objects</def> abbreviated to EBO. To explain how element buffer objects work it's best to give an example: suppose we want to draw a rectangle instead of a triangle. We can draw a rectangle using two triangles (OpenGL mainly works with triangles). This will generate the following set of vertices: +</p> + +<pre><code> +float vertices[] = { + // first triangle + 0.5f, 0.5f, 0.0f, // top right + 0.5f, -0.5f, 0.0f, // bottom right + -0.5f, 0.5f, 0.0f, // top left + // second triangle + 0.5f, -0.5f, 0.0f, // bottom right + -0.5f, -0.5f, 0.0f, // bottom left + -0.5f, 0.5f, 0.0f // top left +}; +</code></pre> + +<p> + As you can see, there is some overlap on the vertices specified. We specify <code>bottom right</code> and <code>top left</code> twice! This is an overhead of 50% since the same rectangle could also be specified with only 4 vertices, instead of 6. This will only get worse as soon as we have more complex models that have over 1000s of triangles where there will be large chunks that overlap. What would be a better solution is to store only the unique vertices and then specify the order at which we want to draw these vertices in. In that case we would only have to store 4 vertices for the rectangle, and then just specify at which order we'd like to draw them. Wouldn't it be great if OpenGL provided us with a feature like that? +</p> + +<p> + Thankfully, element buffer objects work exactly like that. An EBO is a buffer, just like a vertex buffer object, that stores indices that OpenGL uses to decide what vertices to draw. This so called <def>indexed drawing</def> is exactly the solution to our problem. To get started we first have to specify the (unique) vertices and the indices to draw them as a rectangle: +</p> + +<pre><code> +float vertices[] = { + 0.5f, 0.5f, 0.0f, // top right + 0.5f, -0.5f, 0.0f, // bottom right + -0.5f, -0.5f, 0.0f, // bottom left + -0.5f, 0.5f, 0.0f // top left +}; +unsigned int indices[] = { // note that we start from 0! + 0, 1, 3, // first triangle + 1, 2, 3 // second triangle +}; +</code></pre> + +<p> + You can see that, when using indices, we only need 4 vertices instead of 6. Next we need to create the element buffer object: +</p> + +<pre class="cpp"><code> +unsigned int EBO; +<function id='12'>glGenBuffers</function>(1, &amp;EBO); +</code></pre> + +<p> + Similar to the VBO we bind the EBO and copy the indices into the buffer with <fun><function id='31'>glBufferData</function></fun>. Also, just like the VBO we want to place those calls between a bind and an unbind call, although this time we specify <var>GL_ELEMENT_ARRAY_BUFFER</var> as the buffer type. +</p> + +<pre><code> +<function id='32'>glBindBuffer</function>(GL_ELEMENT_ARRAY_BUFFER, EBO); +<function id='31'>glBufferData</function>(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW); +</code></pre> + +<p> + Note that we're now giving <var>GL_ELEMENT_ARRAY_BUFFER</var> as the buffer target. The last thing left to do is replace the <fun><function id='1'>glDrawArrays</function></fun> call with <fun><function id='2'>glDrawElements</function></fun> to indicate we want to render the triangles from an index buffer. When using <fun><function id='2'>glDrawElements</function></fun> we're going to draw using indices provided in the element buffer object currently bound: +</p> + +<pre class="cpp"><code> +<function id='32'>glBindBuffer</function>(GL_ELEMENT_ARRAY_BUFFER, EBO); +<function id='2'>glDrawElements</function>(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0); +</code></pre> + +<p> + The first argument specifies the mode we want to draw in, similar to <fun><function id='1'>glDrawArrays</function></fun>. The second argument is the count or number of elements we'd like to draw. We specified 6 indices so we want to draw 6 vertices in total. The third argument is the type of the indices which is of type <var>GL_UNSIGNED_INT</var>. The last argument allows us to specify an offset in the EBO (or pass in an index array, but that is when you're not using element buffer objects), but we're just going to leave this at 0. +</p> + +<p> + The <fun><function id='2'>glDrawElements</function></fun> function takes its indices from the EBO currently bound to the <var>GL_ELEMENT_ARRAY_BUFFER</var> target. This means we have to bind the corresponding EBO each time we want to render an object with indices which again is a bit cumbersome. It just so happens that a vertex array object also keeps track of element buffer object bindings. The last element buffer object that gets bound while a VAO is bound, is stored as the VAO's element buffer object. Binding to a VAO then also automatically binds that EBO. +</p> + +<img src="/img/getting-started/vertex_array_objects_ebo.png" class="clean" alt="Image of VAO's structure / what it stores now also with EBO bindings."/> + +<warning> + A VAO stores the <fun><function id='32'>glBindBuffer</function></fun> calls when the target is <var>GL_ELEMENT_ARRAY_BUFFER</var>. This also means it stores its unbind calls so make sure you don't unbind the element array buffer before unbinding your VAO, otherwise it doesn't have an EBO configured. +</warning> + +<p> + The resulting initialization and drawing code now looks something like this: +</p> + +<pre><code> +// ..:: Initialization code :: .. +// 1. bind Vertex Array Object +<function id='27'>glBindVertexArray</function>(VAO); +// 2. copy our vertices array in a vertex buffer for OpenGL to use +<function id='32'>glBindBuffer</function>(GL_ARRAY_BUFFER, VBO); +<function id='31'>glBufferData</function>(GL_ARRAY_BUFFER, sizeof(vertices), vertices, GL_STATIC_DRAW); +// 3. copy our index array in a element buffer for OpenGL to use +<function id='32'>glBindBuffer</function>(GL_ELEMENT_ARRAY_BUFFER, EBO); +<function id='31'>glBufferData</function>(GL_ELEMENT_ARRAY_BUFFER, sizeof(indices), indices, GL_STATIC_DRAW); +// 4. then set the vertex attributes pointers +<function id='30'>glVertexAttribPointer</function>(0, 3, GL_FLOAT, GL_FALSE, 3 * sizeof(float), (void*)0); +<function id='29'><function id='60'>glEnable</function>VertexAttribArray</function>(0); + +[...] + +// ..:: Drawing code (in render loop) :: .. +<function id='28'>glUseProgram</function>(shaderProgram); +<function id='27'>glBindVertexArray</function>(VAO); +<function id='2'>glDrawElements</function>(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0) +<function id='27'>glBindVertexArray</function>(0); +</code></pre> + +<p> + Running the program should give an image as depicted below. The left image should look familiar and the right image is the rectangle drawn in <def>wireframe mode</def>. The wireframe rectangle shows that the rectangle indeed consists of two triangles. +</p> + +<img src="/img/getting-started/hellotriangle2.png" width="800px" class="clean" alt="A rectangle drawn using indexed rendering in OpenGL"/> + +<note> + <strong>Wireframe mode</strong><br/> + To draw your triangles in wireframe mode, you can configure how OpenGL draws its primitives via <code><function id='43'>glPolygonMode</function>(GL_FRONT_AND_BACK, GL_LINE)</code>. The first argument says we want to apply it to the front and back of all triangles and the second line tells us to draw them as lines. Any subsequent drawing calls will render the triangles in wireframe mode until we set it back to its default using <code><function id='43'>glPolygonMode</function>(GL_FRONT_AND_BACK, GL_FILL)</code>. +</note> + +<p> + If you have any errors, work your way backwards and see if you missed anything. You can find the complete source code <a href="/code_viewer_gh.php?code=src/1.getting_started/2.2.hello_triangle_indexed/hello_triangle_indexed.cpp" target="_blank">here</a>. +</p> + +<p> + If you managed to draw a triangle or a rectangle just like we did then congratulations, you managed to make it past one of the hardest parts of modern OpenGL: drawing your first triangle. This is a difficult part since there is a large chunk of knowledge required before being able to draw your first triangle. Thankfully, we now made it past that barrier and the upcoming chapters will hopefully be much easier to understand. +</p> + +<h2>Additional resources</h2> +<ul> + <li><a href="http://antongerdelan.net/opengl/hellotriangle.html" target="_blank">antongerdelan.net/hellotriangle</a>: Anton Gerdelan's take on rendering the first triangle.</li> + <li><a href="https://open.gl/drawing" target="_blank">open.gl/drawing</a>: Alexander Overvoorde's take on rendering the first triangle.</li> + <li><a href="http://antongerdelan.net/opengl/vertexbuffers.html" target="_blank">antongerdelan.net/vertexbuffers</a>: some extra insights into vertex buffer objects.</li> + <li><a href="https://learnopengl.com/In-Practice/Debugging" target="_blank">learnopengl.com/In-Practice/Debugging</a>: there are a lot of steps involved in this chapter; if you're stuck it may be worthwhile to read a bit on debugging in OpenGL (up until the debug output section).</li> +</ul> + +<h1>Exercises</h1> +<p> + To really get a good grasp of the concepts discussed a few exercises were set up. It is advised to work through them before continuing to the next subject to make sure you get a good grasp of what's going on. +</p> + +<ol> + <li>Try to draw 2 triangles next to each other using <fun><function id='1'>glDrawArrays</function></fun> by adding more vertices to your data: <a href="/code_viewer_gh.php?code=src/1.getting_started/2.3.hello_triangle_exercise1/hello_triangle_exercise1.cpp" target="_blank">solution</a>.</li> + <li>Now create the same 2 triangles using two different VAOs and VBOs for their data: <a href="/code_viewer_gh.php?code=src/1.getting_started/2.4.hello_triangle_exercise2/hello_triangle_exercise2.cpp" target="_blank">solution</a>.</li> + <li>Create two shader programs where the second program uses a different fragment shader that outputs the color yellow; draw both triangles again where one outputs the color yellow: <a href="/code_viewer_gh.php?code=src/1.getting_started/2.5.hello_triangle_exercise3/hello_triangle_exercise3.cpp" target="_blank">solution</a>.</li> +</ol> + + + + </div> + + <div id="hover"> + HI + </div> + <!-- 728x90/320x50 sticky footer --> +<div id="waldo-tag-6196"></div> + + <div id="disqus_thread"></div> + + + + +</div> <!-- container div --> + + +</div> <!-- super container div --> +</body> +</html> diff --git a/Getting-started/Hello-Window.html b/Getting-started/Hello-Window.html @@ -0,0 +1,326 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8"/> + <title>LearnOpenGL - Hello Window</title> <!--<title>Learn OpenGL, extensive tutorial resource for learning Modern OpenGL</title>--> + <link rel="shortcut icon" type="image/ico" href="/favicon.ico" /> + <link rel="stylesheet" href="../static/style.css" /> + <meta name="description" content="Learn OpenGL . com provides good and clear modern 3.3+ OpenGL tutorials with clear examples. A great resource to learn modern OpenGL aimed at beginners."> + <meta name="fragment" content="!"> +</head> +<body> + <div id="content"> + <h1 id="content-title">Hello Window</h1> + <h1 id="content-title">はじめてのウィンドウ</h1> +<h1 id="content-url" style='display:none;'>Getting-started/Hello-Window</h1> +<p> + Let's see if we can get GLFW up and running. First, create a <code>.cpp</code> file and add the following includes to the top of your newly created file. +GLFWを立ち上げ実行できるかどうか確認しましょう。はじめに<code>.cpp</code>ファイルを作成し、一番上に以下のインクルードディレクティブを書いてください: +</p> + +<pre><code> +#include &lt;glad/glad.h&gt; +#include &lt;GLFW/glfw3.h&gt; +</code></pre>。 + +<warning> + Be sure to include GLAD before GLFW. The include file for GLAD includes the required OpenGL headers behind the scenes (like <code>GL/gl.h</code>) so be sure to include GLAD before other header files that require OpenGL (like GLFW). +必ずGLADをGLFWより前にインクルードしてください。GLADをインクルードすると、必要なOpenGLのヘッダーファイルを一緒にインクルードしてくれます(たとえば<code>GL/gl.h</code>のようなものです)。そのためGLFWのようなOpenGLを必要とするヘッダーのまえに、GLADをインクルードする必要があるのです。 +</warning> + +<p> + Next, we create the <fun>main</fun> function where we will instantiate the GLFW window: +続いて<fun>main</fun>関数を作成し、GLFWをインスタンス化します: +</p> + +<pre><code> +int main() +{ + <function id='17'>glfwInit</function>(); + <function id='18'>glfwWindowHint</function>(GLFW_CONTEXT_VERSION_MAJOR, 3); + <function id='18'>glfwWindowHint</function>(GLFW_CONTEXT_VERSION_MINOR, 3); + <function id='18'>glfwWindowHint</function>(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); + //<function id='18'>glfwWindowHint</function>(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE); + + return 0; +} +</code></pre>。 + +<p> + In the main function we first initialize GLFW with <fun><function id='17'>glfwInit</function></fun>, after which we can configure GLFW using <fun><function id='18'>glfwWindowHint</function></fun>. The first argument of <fun><function id='18'>glfwWindowHint</function></fun> tells us what option we want to configure, where we can select the option from a large enum of possible options prefixed with <code>GLFW_</code>. The second argument is an integer that sets the value of our option. A list of all the possible options and its corresponding values can be found at <a href="http://www.glfw.org/docs/latest/window.html#window_hints" target="_blank">GLFW's window handling</a> documentation. If you try to run the application now and it gives a lot of <em>undefined reference</em> errors it means you didn't successfully link the GLFW library. +main関数のはじめに<fun><function id='17'>glfwInit</function></fun>を使ってGLFWを初期化します。続いて<fun><function id='18'>glfwWindowHint</function></fun>によりGLFWの設定を行います。<fun><function id='18'>glfwWindowHint</function></fun>のひとつめの変数は設定したい項目で、頭に<code>GLFW_</code>がついた大きなenumのなかから選べます。ふたつめの変数は選んだオプションを設定する整数です。利用可能なオプションと対応する整数値は<a href="http://www.glfw.org/docs/latest/window.html#window_hints" target="_blank">GLFW's window handling</a>において確認できます。この時点でアプリケーションを実行して大量の<em>undefined reference</em>がでた場合、GLFWライブラリをうまくリンクできていないということです。 +</p> + +<p> + Since the focus of this book is on OpenGL version 3.3 we'd like to tell GLFW that 3.3 is the OpenGL version we want to use. This way GLFW can make the proper arrangements when creating the OpenGL context. This ensures that when a user does not have the proper OpenGL version GLFW fails to run. We set the major and minor version both to <code>3</code>. We also tell GLFW we want to explicitly use the core-profile. Telling GLFW we want to use the core-profile means we'll get access to a smaller subset of OpenGL features without backwards-compatible features we no longer need. Note that on Mac OS X you need to add <code><function id='18'>glfwWindowHint</function>(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);</code> to your initialization code for it to work. +われわれはOpenGLのversion 3.3を利用したいので、GLFWにそのことを伝えます。こうすることで、OpenGLコンテクストを作成するにあたりGLFWが適切な変数を作成できるようになります。また、ユーザーのコンピュータにOpenGLの適切なバージョンが入っていない場合、GLFWの実行が失敗することが保証されます。メジャーバージョンとマイナーバージョンの両方を3に設定しましょう。さらに、core-profileを利用することを明記します。こうすることでわれわれには必要のない下位互換性を捨て、OpenGLをコンパクトに利用できます。あなたがMac OS Xを利用している場合、<code><function id='18'>glfwWindowHint</function>(GLFW_OPENGL_FORWARD_COMPAT, GL_TRUE);</code>を追加する必要があります。 +</p> + +<note> + Make sure you have OpenGL versions 3.3 or higher installed on your system/hardware otherwise the application will crash or display undefined behavior. To find the OpenGL version on your machine either call <strong>glxinfo</strong> on Linux machines or use a utility like the <a href="http://download.cnet.com/OpenGL-Extensions-Viewer/3000-18487_4-34442.html" target="_blank">OpenGL Extension Viewer</a> for Windows. If your supported version is lower try to check if your video card supports OpenGL 3.3+ (otherwise it's really old) and/or update your drivers. +あなたのシステム/ハードウェアにOpenGLバージョン3.3より新しいものがインストールされていることを確認してください。さもなければアプリケーションがクラッシュしたり、不定義の動作をすることがあります。コンピュータにインストールされたOpenGLのバージョンを調べるには、Linuxにおいては<strong>glxinfo</strong>を、Windowsにおいては<a href="http://download.cnet.com/OpenGL-Extensions-Viewer/3000-18487_4-34442.html" target="_blank">OpenGL Extension Viewer</a>のようなものを利用してください。OpenGLのバージョンが古い場合、ビデオカードがOpenGLの3.3以上をサポートしていないか確認し、ドライバをアップデートしてください(サポートしていないビデオカードは相当古いです)。 +</note> + +<p> + Next we're required to create a window object. This window object holds all the windowing data and is required by most of GLFW's other functions. +次はウィンドウオブジェクトの作成です。ウィンドウオブジェクトはウィンドウにかかるすべてのデータを保持しており、GLFWのほとんどの関数を利用するうえで必要となります。 +</p> + +<pre><code> +GLFWwindow* window = <function id='20'>glfwCreateWindow</function>(800, 600, "LearnOpenGL", NULL, NULL); +if (window == NULL) +{ + std::cout &lt;&lt; "Failed to create GLFW window" &lt;&lt; std::endl; + <function id='25'>glfwTerminate</function>(); + return -1; +} +<function id='19'>glfwMakeContextCurrent</function>(window); +</code></pre> + +<p> + The <fun><function id='20'>glfwCreateWindow</function></fun> function requires the window width and height as its first two arguments respectively. The third argument allows us to create a name for the window; for now we call it <code>&quot;LearnOpenGL&quot;</code> but you're allowed to name it however you like. We can ignore the last 2 parameters. The function returns a <fun>GLFWwindow</fun> object that we'll later need for other GLFW operations. After that we tell GLFW to make the context of our window the main context on the current thread. +<fun><function id='20'>glfwCreateWindow</function></fun>関数はウィンドウの幅と高さをそれぞれ第一および第二引数としてとります。三番目の引数はウィンドウの名前です。ここでは<code>&quot;LearnOpenGL&quot;</code>としていますが、あなたの好きな名前でかまいません。あとのふたつの引数はここでは無視します。この関数は<fun>GLFWwindow</fun>オブジェクトを返します。このオブジェクトはGLFWでほかの操作をするときに必要なものです。ウィンドウオブジェクトを作成したあとは、コンテクストの作成です。先程作ったウィンドウのコンテクストをこれから利用するコンテクストとして宣言します。 +</p> + +<h2>GLAD</h2> +<p> + In the previous chapter we mentioned that GLAD manages function pointers for OpenGL so we want to initialize GLAD before we call any OpenGL function: +前の章でGLADが関数のポインタを制御するといいました。OpenGLの関数を呼びだすまえにGLADを初期化する方法を見ていきましょう。 +</p> + +<pre><code> +if (!gladLoadGLLoader((GLADloadproc)glfwGetProcAddress)) +{ + std::cout &lt;&lt; "Failed to initialize GLAD" &lt;&lt; std::endl; + return -1; +} +</code></pre> + +<p> + We pass GLAD the function to load the address of the OpenGL function pointers which is OS-specific. GLFW gives us <fun>glfwGetProcAddress</fun> that defines the correct function based on which OS we're compiling for. +GLADを上記の関数で処理することにより、OpenGLの関数へのポインタのアドレスをロードします。GLFWは<fun>glfwGetProcAddress</fun>により利用しているOSにあった関数を正確に定義します。 +</p> + +<h2>Viewport</h2> +<h2>ビューポート</h2> +<p> + Before we can start rendering we have to do one last thing. We have to tell OpenGL the size of the rendering window so OpenGL knows how we want to display the data and coordinates with respect to the window. We can set those <em>dimensions</em> via the <fun><function id='22'>glViewport</function></fun> function: +描画をはじめる前に、もうひとつしておかなければならないことがあります。OpenGLに描画するウィンドウのサイズを伝え、ウィンドウ中にデータと座標をどのように表示させるかを決定することです。<fun><function id='22'>glViewport</function></fun>により画面の<em>寸法</em>を設定できます: +</p> + +<pre class="cpp"><code> +<function id='22'>glViewport</function>(0, 0, 800, 600); +</code></pre> + +<p> + The first two parameters of <fun><function id='22'>glViewport</function></fun> set the location of the lower left corner of the window. The third and fourth parameter set the width and height of the rendering window in pixels, which we set equal to GLFW's window size. +<fun><function id='22'>glViewport</function></fun>のはじめのふたつの引数はウィンドウ左下の角の場所です。あとのふたつの引数で、ウィンドの幅と高さをピクセル単位で指定します。ここではGLFWのウィンドウと同じサイズにしましょう。 +</p> + +<p> + We could actually set the viewport dimensions at values smaller than GLFW's dimensions; then all the OpenGL rendering would be displayed in a smaller window and we could for example display other elements outside the OpenGL viewport. +ビューポートの寸法をGLFWのものより小さくすることも可能です。そうすることで、OpenGLによる描画がウィンドウより小さくなり、あいたところに他のものを配置したりすることも可能になります。 +</p> + +<note> + Behind the scenes OpenGL uses the data specified via <fun><function id='22'>glViewport</function></fun> to transform the 2D coordinates it processed to coordinates on your screen. For example, a processed point of location <code>(-0.5,0.5)</code> would (as its final transformation) be mapped to <code>(200,450)</code> in screen coordinates. Note that processed coordinates in OpenGL are between -1 and 1 so we effectively map from the range (-1 to 1) to (0, 800) and (0, 600). +OpenGLは、処理した二次元の座標をスクリーンの座標に変換するために、<fun><function id='22'>glViewport</function></fun>を通してあたえられたデータを利用します。たとえばOpenGLの処理により座標が<code>(-0.5,0.5)</code>になった点は、スクリーン上の<code>(200, 450)</code>に対応します。OpenGLで処理された座標は-1と1の間におさまることに留意してください。この縦横(-1, 1)の区間はそれぞれ(0, 800), (0, 600)に対応しています。 +</note> + +<p> + However, the moment a user resizes the window the viewport should be adjusted as well. We can register a callback function on the window that gets called each time the window is resized. This resize callback function has the following prototype: +しかしユーザーがウィンドウのサイズを変更した場合、それにあわせてビューポートも変化するべきです。そのためにコールバック関数をウィンドウに登録して、ウィンドウサイズが変更されるたびに呼ばれるようにできます。次にあげるのがサイズ変更のコールバック関数のプロトタイプです: +</p> + +<pre><code> +void framebuffer_size_callback(GLFWwindow* window, int width, int height); +</code></pre> + +<p> + The framebuffer size function takes a <fun>GLFWwindow</fun> as its first argument and two integers indicating the new window dimensions. Whenever the window changes in size, GLFW calls this function and fills in the proper arguments for you to process. +この関数は第一引数に<fun>GLFWwindow</fun>をとり、変更されたウィンドウの寸法を第二、第三引数にとります。ウィンドウサイズが変更されれば、GLFWがこの関数を呼び出し、処理に必要な変数を渡します。 +</p> + +<pre><code> +void framebuffer_size_callback(GLFWwindow* window, int width, int height) +{ + <function id='22'>glViewport</function>(0, 0, width, height); +} +</code></pre> + +<p> + We do have to tell GLFW we want to call this function on every window resize by registering it: +ウィンドウサイズが変更されるたびにこの関数が呼び出されるよう、以下の通り登録します: +</p> + +<pre><code> +glfwSetFramebufferSizeCallback(window, framebuffer_size_callback); +</code></pre> + +<p> + When the window is first displayed <fun>framebuffer_size_callback</fun> gets called as well with the resulting window dimensions. For retina displays <var>width</var> and <var>height</var> will end up significantly higher than the original input values. +ウィンドウが初めて表示されたときも、<fun>framebuffer_size_callback</fun>は呼ばれ、実際に表示されたウィンドウの寸法が渡されます。レティナディスプレイでは<var>width</var>や<var>height</var>はもともと入力していた値よりもかなり大きくなります。 +</p> + +<p> + There are many callbacks functions we can set to register our own functions. For example, we can make a callback function to process joystick input changes, process error messages etc. We register the callback functions after we've created the window and before the render loop is initiated. +このほかにもコールバック関数として登録できるものはたくさんあります。ジョイスティックの入力値を処理する関数や、エラーメッセージを処理する関数等です。コールバック関数はウィンドウを作成した後、描画ループが始まる前に登録します。 +</p> + +<h1>Ready your engines</h1> +<h1>エンジンの準備</h1> +<p> + We don't want the application to draw a single image and then immediately quit and close the window. We want the application to keep drawing images and handling user input until the program has been explicitly told to stop. For this reason we have to create a while loop, that we now call the <def>render loop</def>, that keeps on running until we tell GLFW to stop. The following code shows a very simple render loop: +アプリケーションがある画像を表示したあとすぐに終了してウィンドウを閉じてしまってはおもしろくありません。プログラムが終了するまで映像を出力し続け、ユーザーからの入力を処理してほしいものです。そのために<def>描画ループ</def>と呼ばれるwhileループを作成し、ユーザーがGLFWに終了を命じるまで動き続けるようにします。以下のコードは単純な描画ループの例です: +</p> + +<pre><code> +while(!<function id='14'>glfwWindowShouldClose</function>(window)) +{ + <function id='24'>glfwSwapBuffers</function>(window); + <function id='23'>glfwPollEvents</function>(); +} +</code></pre> + +<p> + The <fun><function id='14'>glfwWindowShouldClose</function></fun> function checks at the start of each loop iteration if GLFW has been instructed to close. If so, the function returns <code>true</code> and the render loop stops running, after which we can close the application.<br/> + The <fun><function id='23'>glfwPollEvents</function></fun> function checks if any events are triggered (like keyboard input or mouse movement events), updates the window state, and calls the corresponding functions (which we can register via callback methods). + The <fun><function id='24'>glfwSwapBuffers</function></fun> will swap the color buffer (a large 2D buffer that contains color values for each pixel in GLFW's window) that is used to render to during this render iteration and show it as output to the screen. +<fun><function id='14'>glfwWindowShouldClose</function></fun>はループごとに、GLFWが終了の信号を受けとっていないか確認します。終了の信号を受けとっていればこの関数は<code>true</code>を返し描画ループが終了し、アプリケーションを終了させることができます。<fun><function id='23'>glfwPollEvents</function></fun>はキーボードからの入力やマウスの移動といったイベントが発生していないか確認し、ウィンドウの状態を更新し、コールバックとして登録した関数を呼び出します。<fun><function id='24'>glfwSwapBuffers</function></fun>はカラーバッファを入れ替えます。カラーバッファというのは大きな2次元のバッファで、GLFWウィンドウの各ピクセルの色を保持しており、この描画ループの中での描画および、その結果をスクリーンに出力するのに利用します。 +</p> + +<note> + <strong>Double buffer</strong><br/> + <strong>ダブルバッファ</strong><br/> + When an application draws in a single buffer the resulting image may display flickering issues. This is because the resulting output image is not drawn in an instant, but drawn pixel by pixel and usually from left to right and top to bottom. Because this image is not displayed at an instant to the user while still being rendered to, the result may contain artifacts. To circumvent these issues, windowing applications apply a double buffer for rendering. The <strong>front</strong> buffer contains the final output image that is shown at the screen, while all the rendering commands draw to the <strong>back</strong> buffer. As soon as all the rendering commands are finished we <strong>swap</strong> the back buffer to the front buffer so the image can be displayed without still being rendered to, removing all the aforementioned artifacts. +アプリケーションがバッファをひとつだけ使って画像を表示すると、画面にチラつきが発生します。出力された画像がすぐに表示されるのではなく、一般的には左から右へ、上から下へと1ピクセルずつ表示されるためです。描画処理が完了していない画像がじわじわと表示されるので、不自然な結果になるのです。この問題を回避するためにアプリケーションの描画にダブルバッファを利用します。<strong>フロント</strong>バッファが最終的にスクリーンに表示される出力を保持し、描画処理は<strong>バック</strong>バッファにおいて行われます。描画処理がすべて終了すれば前後のバッファが<strong>交換</strong>されます。こうすることで描画処理が完了していない状態の画像を表示させないようにでき、前述の不具合が解消できます。 +</note> + +<h2>One last thing</h2> +<h2>最後にひとつ</h2> + <p> + As soon as we exit the render loop we would like to properly clean/delete all of GLFW's resources that were allocated. We can do this via the <fun><function id='25'>glfwTerminate</function></fun> function that we call at the end of the <fun>main</fun> function. +描画ループからぬけたあと、確保したメモリを開放するのがいいでしょう。この作業は<fun><function id='25'>glfwTerminate</function></fun>によって<fun>main</fun>関数の最後に行うことができます。 + </p> + +<pre><code> +<function id='25'>glfwTerminate</function>(); +return 0; +</code></pre> + + <p> + This will clean up all the resources and properly exit the application. Now try to compile your application and if everything went well you should see the following output: +この関数により、メモリをすべて開放しアプリケーションを適切に終了させることができます。それではいちどアプリケーションをコンパイルしてみましょう。すべてうまくいっていれば以下のような出力が得られるはずです: + </p> + + <img src="/img/getting-started/hellowindow.png" width="600px" class="clean" alt="Image of GLFW window output as most basic example"/> + +<p> + If it's a very dull and boring black image, you did things right! If you didn't get the right image or you're confused as to how everything fits together, check the full source code <a href="/code_viewer_gh.php?code=src/1.getting_started/1.1.hello_window/hello_window.cpp" target="_blank">here</a> (and if it started flashing different colors, keep reading). +真っ黒でつまらない画像が表示された場合ここまでの作業がうまくいっているということです。黒い画面が表示されない場合や、各コードをどのように配置すればいいのかわからない場合は、<a href="/code_viewer_gh.php?code=src/1.getting_started/1.1.hello_window/hello_window.cpp" target="_blank">ここ</a>から完全なソースコードを確認してください(もし違う色の画像が表示された場合、とりあえずこの先を読み進んでください)。 + </p> + + <p> + If you have issues compiling the application, first make sure all your linker options are set correctly and that you properly included the right directories in your IDE (as explained in the previous chapter). Also make sure your code is correct; you can verify it by comparing it with the full source code. +コンパイルが通らなければ、まずは前章で説明した通りリンカのオプションがすべて正しく設定されているか、正しいインクルードディレクトリがIDEに読み込まれているか確認してください。そしてソースコードが正しく記述されているか確かめてください。上にあげた完全なソースコードとあなたのものを比較することで間違いがないか確認できます。 + </p> + +<h2>Input</h2> +<h2>入力</h2> + <p> + We also want to have some form of input control in GLFW and we can achieve this with several of GLFW's input functions. We'll be using GLFW's <fun>glfwGetKey</fun> function that takes the window as input together with a key. The function returns whether this key is currently being pressed. We're creating a <fun>processInput</fun> function to keep all input code organized: +GLFWにおいて入力の処理は、GLFWのいくつかの関数によって行うことができます。ここでは<fun>glfwGetKey</fun>を利用します。この関数はウィンドウと入力されたキーを引数にとり、そのキーが押されているかどうかを返します。<fun>processInput</fun>という関数を作成し、入力の処理を一元化しましょう: +</p> + +<pre><code> +void processInput(GLFWwindow *window) +{ + if(glfwGetKey(window, GLFW_KEY_ESCAPE) == GLFW_PRESS) + glfwSetWindowShouldClose(window, true); +} +</code></pre> + +<p> + Here we check whether the user has pressed the escape key (if it's not pressed, <fun>glfwGetKey</fun> returns <var>GLFW_RELEASE</var>). If the user did press the escape key, we close GLFW by setting its <var>WindowShouldClose</var> property to <code>true</code> using <fun>glfwSetwindowShouldClose</fun>. The next condition check of the main <code>while</code> loop will then fail and the application closes. +ここではユーザーがエスケープキーを押したかどうかを確認しています(押されていなければ<fun>glfwGetKey</fun>は<var>GLFW_RELEASE</var>を返します)。エスケープが押されていた場合、<fun>glfwSetwindowShouldClose</fun>を通して<var>WindowShouldClose</var>を<code>true</code>にすることで、GLFWを終了させるようにします。こうすることで、<code>while</code>が次のループに進むかどうかの判定で偽が返り、ループから抜けだしアプリケーションが終了します。 +</p> + +<p> + We then call <fun>processInput</fun> every iteration of the render loop: +それでは<fun>processInput</fun>を描画ループがまわるたびに呼び出すようにしましょう: +</p> + +<pre><code> +while (!<function id='14'>glfwWindowShouldClose</function>(window)) +{ + processInput(window); + + <function id='24'>glfwSwapBuffers</function>(window); + <function id='23'>glfwPollEvents</function>(); +} +</code></pre> + +<p> + This gives us an easy way to check for specific key presses and react accordingly every <def>frame</def>. An iteration of the render loop is more commonly called a <def>frame</def>. +これが、<def>フレーム</def>ごとになんらかのキーが入力されたかどうかを確認する簡単な方法です。描画ループの一回は一般に<def>フレーム</def>と呼ばれます。 +</p> + +<h2>Rendering</h2> +<h2>描画</h2> +<p> + We want to place all the rendering commands in the render loop, since we want to execute all the rendering commands each iteration or frame of the loop. This would look a bit like this: +各フレームごとに描画処理を行いたいので、描画命令はすべて描画ループの中におきます。以下のような感じです: + </p> + +<pre><code> +// 描画ループ +while(!<function id='14'>glfwWindowShouldClose</function>(window)) +{ + // 入力 + processInput(window); + + // 描画処理 + ... + + // check and call events and swap the buffers + // イベントの処理およびバッファの交換 + <function id='23'>glfwPollEvents</function>(); + <function id='24'>glfwSwapBuffers</function>(window); +} +</code></pre> + + <p> + Just to test if things actually work we want to clear the screen with a color of our choice. At the start of frame we want to clear the screen. Otherwise we would still see the results from the previous frame (this could be the effect you're looking for, but usually you don't). We can clear the screen's color buffer using <fun><function id='10'>glClear</function></fun> where we pass in buffer bits to specify which buffer we would like to clear. The possible bits we can set are <var>GL_COLOR_BUFFER_BIT</var>, <var>GL_DEPTH_BUFFER_BIT</var> and <var>GL_STENCIL_BUFFER_BIT</var>. Right now we only care about the color values so we only clear the color buffer. +ほんとうにうまくいっているのか確認するために、スクリーンの色を変更してみましょう。各フレームのはじめにスクリーンに表示されたものをすべて消します。そうしないと前のフレームで表示されていたものが次のフレームでもみえたままになります(ときにはこのわざとそうしたいこともありますが、多くの場合これは望まない結果でしょう)。スクリーンのカラーバッファは<fun><function id='10'>glClear</function></fun>を用いて削除します。バッファビットを通して消したいバッファを指定しましょう。選べるバッファビットは<var>GL_COLOR_BUFFER_BIT</var>、<var>GL_DEPTH_BUFFER_BIT</var>および<var>GL_STENCIL_BUFFER_BIT</var>です。 + </p> + +<pre><code> +<function id='13'><function id='10'>glClear</function>Color</function>(0.2f, 0.3f, 0.3f, 1.0f); +<function id='10'>glClear</function>(GL_COLOR_BUFFER_BIT); +</code></pre> + + <p> + Note that we also specify the color to clear the screen with using <fun><function id='13'><function id='10'>glClear</function>Color</function></fun>. Whenever we call <fun><function id='10'>glClear</function></fun> and clear the color buffer, the entire color buffer will be filled with the color as configured by <fun><function id='13'><function id='10'>glClear</function>Color</function></fun>. This will result in a dark green-blueish color. +<fun><function id='13'><function id='10'>glClear</function>Color</function></fun>により、バッファを削除したあとの色も指定していることに注意してください。<fun><function id='10'>glClear</function></fun>を呼んでバッファを削除したときは<fun><function id='13'><function id='10'>glClear</function>Color</function></fun>により指定された色でバッファが満たされます。今回は深い青緑を指定しています。 +</p> + +<note> + As you may recall from the <em>OpenGL</em> chapter, the <fun><function id='13'><function id='10'>glClear</function>Color</function></fun> function is a <em>state-setting</em> function and <fun><function id='10'>glClear</function></fun> is a <em>state-using</em> function in that it uses the current state to retrieve the clearing color from. +<em>OpenGL</em>の章で状態遷移関数と状態利用関数について言及しました。ここにでてきた<fun><function id='13'><function id='10'>glClear</function>Color</function></fun>は<em>状態遷移</em>関数で、<fun><function id='10'>glClear</function></fun>は<em>状態利用</em>関数です。<fun><function id='10'>glClear</function></fun>が「バッファ削除後の色」という状態を利用しています。 +</note> + + <img src="/img/getting-started/hellowindow2.png" width="600px" class="clean" alt="Image of GLFW's window creation with <function id='13'><function id='10'>glClear</function>Color</function> defined"/> + + <p> + The full source code of the application can be found <a href="/code_viewer_gh.php?code=src/1.getting_started/1.2.hello_window_clear/hello_window_clear.cpp" target="_blank">here</a>. +今回作ったアプリケーションの完全なソースコードは<a href="/code_viewer_gh.php?code=src/1.getting_started/1.2.hello_window_clear/hello_window_clear.cpp" target="_blank">こちら</a>にあります。 + </p> + +<p> + So right now we got everything ready to fill the render loop with lots of rendering calls, but that's for the <a href="https://learnopengl.com/Getting-started/Hello-Triangle" target="_blank">next</a> chapter. I think we've been rambling long enough here. +描画ループにおいて様々なものを描くうえで必要な準備はすべて整いました。 <a href="https://learnopengl.com/Getting-started/Hello-Triangle" target="_blank">次</a>の章では実際に描画しましょう。 +</p> + + </div> +</body> +</html> diff --git a/Getting-started/OpenGL.html b/Getting-started/OpenGL.html @@ -0,0 +1,160 @@ +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8" /> + <link rel="stylesheet" href="../static/style.css" /> +</head> +<body> + <div id="content"> + <h1 id="content-title">OpenGL</h1> +<h1 id="content-url" style='display:none;'>Getting-started/OpenGL</h1> +<p>学習をはじめる前にOpenGLとは何かを確認しておきましょう。OpenGLとはグラフィックや画像を操作するための関数を多数提供するAPI(<def>Application Programming Interface</def>)だと考えられています。しかしOpenGLそのものはAPIではなく、<a href="http://www.khronos.org/" target="_blank">Khronos Group</a>により開発及びメンテナンスされている仕様です。</p> + +<img src="/img/getting-started/opengl.jpg" class="right" alt="Image of OpenGL's logo"/> + +<p> +OpenGLの仕様は各関数がどのようにふるまい、どのような結果を残し、あるいどんな出力を返すのかを正確に規定しています。これらの関数をどのようにして仕様どおりに機能させるかは、開発者の<em>実装</em>に任されています。OpenGLの仕様は実装についての詳細を規定していないので、得られる結果が仕様通りである(つまりユーザーから見て同じである)限り、様々な実装が可能です。 + +<p> +OpenGLのライブラリを開発しているのは、おもにグラフィックカードの製造者です。あなたが購入したグラフィックカードはその製品(あるいはそのシリーズ)に特化したOpenGLのバージョンをサポートしています。AppleのシステムではOpenGLのライブラリはApple自身によりメンテナンスされており、Linuxにおいてはグラフィックカードの製造者によるライブラリや、趣味の開発者によるものもあります。つまりOpenGLがおかしな挙動をする場合はたいていグラフィックカードの製造者(またはライブラリの開発に関わった誰か)のミスです。 +</p> + +<note> +ほとんどの実装はグラフィックカードの製造者によるものなので、バグが見付かった場合はビデオカードのドライバをアップデートすることでたいてい解決します。最新のドライバは最新のOpenGL[の実装]をサポートしているからです。グラフィックドライバをときどきアップデートするべき理由のひとつです。 +</note> + +<p> +KhronosはすべてのOpenGLのバージョンの仕様に関するドキュメントを公開しています。興味がある読者はこれからみなさんが利用するバージョン3.3の仕様を<a href="https://www.opengl.org/registry/doc/glspec33.core.20100311.withchanges.pdf" target="_blank">ここ</a>で確認できます。このページはOpenGLの詳細を理解するのに非常に役立ちます(実装ではなく結果ばかりが記述されていることを確認してください)。OpenGLの仕様は関数の<strong>正確な</strong>ふるまいを確認する文献としても重要です。 +</p> + +<h2>Core-profileとImmediate mode</h2> +<p> +古くは、OpenGLを利用するとは<def>immediate mode</def>(<def>fixed function pipeline</def>とも)で開発することでした。immediate modeはグラフィックを描写する簡単な方法です。OpenGLのほとんどの機能はライブラリのなかに隠され、開発者は演算の方法にまで深くは踏み込めませんでした。開発者は柔軟性を求めるようになり、それにともない仕様のほうも柔軟になりました。開発者がグラフィックの描画方法にに関してより多くのことを操作できるようになったのです。immediate modeは理解し使用するのは非常に簡単ですが、効率はかなり悪いです。そのため仕様はバージョン3.2からimmediate modeを廃止し<def>core-profile</def> modeを推奨しはじめました。core-profile modeでは廃止された古い機能がすべてとりのぞかれています。 +</p> + +<p> +core-profileを使うには新しい方法にしたがう必要があります。廃止された機能を使おうとするとOpenGLがエラーを出して描画を停止します。新しい方法の利点は柔軟性と効率のよさです。ただしその一方で習得するのが難しいです。immediate modeはOpenGLがおこなっている<strong>実際の</strong>操作の多くを抽象化しているので習得するのが簡単な一方、OpenGLが実際になにをおこなっているかを把握するのは難しいです。新しい方法ではOpenGLやグラフィックプログラミングについての理解が必要であるのですこし難解ではあるものの、柔軟性や効率のよさ、そして一番重要なことですが、グラフィックプログラミングに関するよりよい理解が得られます。 +</p> + +<p> +そのためこの本ではOpenGLバージョン3.3のcore-profileについて重点的に解説します。理解するのは大変ですが、努力する価値は十分にあります。 +</p> + +<p> +今日、OpenGLのもっと新しいバージョンが利用できます(執筆時点での最新は4.6)。にも関わらずバージョン3.3を学習する意味があるのかと疑問に思うかもしれませんが、その答えは比較的簡単です。バージョン3.3より新しいものはすべて、このバージョンに便利な機能を追加したものであり、OpenGLの核となる部分は変更されません。新しいバージョンのものは以前と同じことをするうえでより効率がいい、あるいはより使いやすい方法を導入しただけです。考えかたややりかたはバージョン3.3以降かわらないので、このバージョンを学ぶことが有効なのです。バージョン3.3でOpenGLに慣れ親しめば、より新しいバージョンの機能を利用することは簡単です。 +</p> + +<warning> +最新のOpenGLを使って開発したアプリケーションは、最新のグラフィックカード以外では動作しません。そのためほとんどの開発者は古いバージョンのOpenGLで開発し、新しいバージョンの機能は対応するグラフィックカードへのオプションとして組込みます。 +</warning> + +<p> +一部の章ではことわったうえで新しい機能を利用することがあります。 +</p> + +<h2>拡張機能</h2> +<p> +OpenGLの大きな特徴に、拡張機能のサポートがあります。グラフィックカードの製造者が新しい技術を導入したり、レンダリングを大幅に最適化した場合、それらの機能はドライバに実装されてる<def>拡張機能</def>によって提供されることが多いです。アプリケーションが動作しているハードウェアがそういった拡張機能をサポートしていれば、開発者はそれらを利用し先進的、あるいは効率のいい方法でグラフィックを描画することができます。このような最新の機能は、グラフィックカードによりサポートされているかを確認するだけで利用できるので、開発者はOpenGLにその機能が組込まれるのを待つ必要がありません。ある拡張機能が人気になり、あるいは便利であれば将来のOpenGLのバージョンに組込まれることになります。 +</p> + +<p> +上記のようなグラフィックカードに特有の(あるいはOpenGL自身の)拡張機能を利用する前に、その機能が利用可能かどうかを確かめる必要があります。こうすることで、拡張機能が利用可能かどうかに応じてより効率のよいプログラムを書くことができます: +</p> + +<pre><code> +if(GL_ARB_extension_name) +{ + // 最新の機能を利用したクールなコード +} +else +{ + // 拡張機能がサポートされていない場合: 従来の方法を利用したコード +}。 +</code></pre> +<p> +OpenGLバージョン3.3において拡張機能はほとんど必要ありませんが、利用する場合は解説を付けます。 +</p> + +<h2>状態機械</h2> +<p> +OpenGLはひとつの大きな状態機械だといえます: 各時点におけるOpenGLの動作を規定する変数の集まりだという意味です。OpenGLの状態は<def>コンテクスト</def>と呼ばれます。OpenGLでは、オプションの設定やバッファの操作により状態を変化させ、その時点のコンテクストを利用してレンダリングをおこないます。 +</p> + +<p> +例えば描画するものを三角形から直線に変更するためには、コンテクストの変数のうち描画する図形を規定するものを変更することでOpenGLの状態を変化させることになります。そのようにコンテクストを変更すれば、以降の描画命令では三角形ではなく直線が描かれます。 +</p> + +<p> +OpenGLでの開発において、コンテクストを変更する<def>状態遷移</def>関数や、現在の状態にもとづいてなんらかの操作をおこなう<def>状態利用</def>関数を利用することになります。OpenGLが大きな状態機械であるということを頭にいれておけば、さまざまな機能を理解するのが楽になります。 +</p> + +<h2>オブジェクト</h2> +<p> +OpenGLのライブラリはC言語で記述されています。ほかの言語からの利用も可能ですが、核となる部分はC言語のライブラリのままです。C言語の言語構造は他の高水準の言語にうまく翻訳できないので、OpenGLはいくつかの抽象的な概念を念頭に開発されました。<def>オブジェクト</def>の概念がそのひとつです。 +</p> + +<p> +OpenGLにおいて<def>オブジェクト</def>とは、OpenGLの状態をあらわしたオプションのあつまりです。たとえばウィンドウの描画にかかる設定をまとめたオブジェクトを作ることができます。このオブジェクトを通して、ウィンドウの大きさや表示できる色の数等を設定することができます。オブジェクトはC言語の構造体のようなものとしてとらえることができます: +</p> + +<pre><code> +struct object_name { + float option1; + int option2; + char[] name; +};。 +</code></pre> + +<p> +オブジェクトはたいてい以下のようなかたちで利用することになります(前半の部分はOpenGLのコンテクストを大きな構造体として記述したものです): +</p> + +<pre><code> +// OpenGLの状態 +struct OpenGL_Context { + ... + object_name* object_Window_Target; + ... +}; +</code></pre> + +<pre><code> +// オブジェクトの作成 +unsigned int objectId = 0; +glGenObject(1, &amp;objectId); +// コンテクストに対してオブジェクトを紐付け +glBindObject(GL_WINDOW_TARGET, objectId); +// 現在GL_WINDOW_TARGETに紐付いているオブジェクトのオプションを設定 +glSetObjectOption(GL_WINDOW_TARGET, GL_OPTION_WINDOW_WIDTH, 800); +glSetObjectOption(GL_WINDOW_TARGET, GL_OPTION_WINDOW_HEIGHT, 600); +// コンテクストの紐付けを初期状態に戻す +glBindObject(GL_WINDOW_TARGET, 0);。 +</code></pre> + +<p> +上のような記述はOpenGLで開発をしているとよくみかけます。まずオブジェクトを作成しそれを参照するためのIDを記憶しておきます(オブジェクトの実際のデータは開発者からは見えません)。つぎに記憶したIDによりオブジェクトをコンテクストのうち設定したいものに紐付けます(上の例では<var>GL_WINDOW_TARGET</var>と紐付けています)。そしてウィンドウのオプションを設定し、最後に<var>GL_WINDOW_TARGET</var>と紐付いたオブジェクトのIDを<code>0</code>にすることで、オブジェクトとコンテクストの紐付けを解除します。設定したオプションは<var>objectID</var>によって参照されるオブジェクトに保持され、オブジェクトを<var>GL_WINDOW_TARGET</var>と紐付けることでいつでも復元できます。 +</p> + +<warning> +ここまでのサンプルコードはOpenGLの操作をおおまかに記述したものです。以降では実際に動作するサンプルコードを多く提供します。 +</warning> + +<p> +オブジェクトを利用する利点は、複数のオブジェクトを定義、設定しておけば、OpenGLの状態を操作するときにそれらのオブジェクトのなかから用途にあわせたものを選べることです。例えば家やキャラクター等の3Dモデルのデータを保持した複数のオブジェクトを定義しておけば、各オブジェクトを紐付けるだけで、そのオブジェクトが保持している3Dモデルが描画できます(最初に各3Dモデルに対してオブジェクトを作成し、必要なオプションを設定すればいいのです)。こうすることでたくさんのモデルを描画するときにいちいちオプションを設定しなおさなくてすみます。 +</p> + +<h2>さあ、始めましょう</h2> +<p> +ここまでOpenGLについてざっくりと学びました。OpenGLが仕様であり、ライブラリであること。OpenGLの内部がどのように機能するのか、またOpenGLをどのように扱うのか。すべてを理解できなくても心配はいりません。この本では各段階においてOpenGLを理解するのに十分な例が提示されます。 +</p> + +<h2>参考</h2> +<ul> + <li><a href="https://www.opengl.org/" target="_blank">opengl.org</a>: OpenGLの公式ウェブサイト。</li> + <li><a href="https://www.opengl.org/registry/" target="_blank">OpenGL registry</a>: OpenGLのすべてのバージョンの仕様と拡張機能が確認できるサイト。</li> +</ul> + + + </div> +</body> +</html> diff --git a/Getting-started/Review.html b/Getting-started/Review.html @@ -0,0 +1,316 @@ + + +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8"/> + <title>LearnOpenGL - Review</title> <!--<title>Learn OpenGL, extensive tutorial resource for learning Modern OpenGL</title>--> + <link rel="shortcut icon" type="image/ico" href="/favicon.ico" /> + <meta name="description" content="Learn OpenGL . com provides good and clear modern 3.3+ OpenGL tutorials with clear examples. A great resource to learn modern OpenGL aimed at beginners."> + <meta name="fragment" content="!"> + <script> + (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ + (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), + m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) + })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); + + ga('create', 'UA-51879160-1', 'learnopengl.com'); + ga('send', 'pageview'); + + </script> + <!--<script async src="//pagead2.googlesyndication.com/pagead/js/adsbygoogle.js"></script>--> + <script> + (adsbygoogle = window.adsbygoogle || []).push({ + google_ad_client: "ca-pub-7855791439695850", + enable_page_level_ads: true + }); + </script> + <script async='async' src='https://www.googletagservices.com/tag/js/gpt.js'></script> + <script> + var googletag = googletag || {}; + googletag.cmd = googletag.cmd || []; + </script> + <script> + googletag.cmd.push(function() { + googletag.defineSlot('/8491498/learnopengl_video', [300, 225], 'div-gpt-ad-1540574378241-0').addService(googletag.pubads()); + googletag.pubads().enableSingleRequest(); + googletag.pubads().collapseEmptyDivs(); + googletag.enableServices(); + }); + </script> + <script type="text/javascript" src="https://d31vxm9ubutrmw.cloudfront.net/static/js/1681.js"></script> + <script src="/js/jquery-1.11.0.min.js"></script> + <script src="/js/hoverintent.js"></script> + <link rel="stylesheet" type="text/css" href="/layout.css"> + <link rel="stylesheet" type="text/css" href="/js/styles/obsidian.css"> + <script src="/js/highlight.pack.js"></script> + <script src="/js/functions.js"></script> + <script type="text/javascript" src="/js/mathjax/MathJax.js?config=TeX-AMS_HTML"></script> + <script> + // Has to be loaded last due to content bug + MathJax.Hub.Config({ + TeX: { equationNumbers: { autoNumber: "AMS" } } + }); + </script> + <script>hljs.initHighlightingOnLoad();</script> + <script> + $(document).ready(function() { + // check if user visited from the old # based urls, re-direct to ?p= form + if(window.location.hash) + { + var name = window.location.hash.substring(2); + // name = name.replace(/-/g," "); + var index = name.indexOf('#'); // Remove any hash fragments from the url (Disquss adds hash fragments for comments, but results in 404 pages) + if(index >= 0) + name = name.substring(0, index); + + window.location.href = "https://learnopengl.com/" + name; + } else { + // Check if data has been succesfully loaded, if so: change title bar as ajax hash fragment + var title = $('#content-url').text(); + + // Refresh syntax highlighting + // $('pre').each(function(i, e) {hljs.highlightBlock(e)}); + + // Reset DISQUS + // if(title == '/dev/') + // title = ''; + // alert('hoi'); + + // Adjust ads for correct bottom positioning based on content size + window.setTimeout(function() { + AdPositioning(); + }, 3000); + + + // set API resets after time-out (once content is properly loaded) + window.setTimeout(function() { + MathJax.Hub.Queue(["Typeset",MathJax.Hub]); + MathJax.Hub.Queue(["resetEquationNumbers", MathJax.InputJax.TeX]); + + var page_url = title == "" ? "http://www.learnopengl.com/" : "http://www.learnopengl.com/" + title; + if(typeof DISQUS !== 'undefined') { + DISQUS.reset({ + reload: true, + config: function () { + this.page.identifier = title; + this.page.url = page_url; + } + }); + $('#disqus_thread').show(); + } + // Refresh callbacks on <function> tags + SetFunctionTagCallbacks(); + }, 1000); + + // Zet ook de juiste button op 'selected' + $('#nav li span, #nav li a').removeClass('selected'); + if(title != '') + { + $('#nav li[id=\'' + title + '\']').children('span, a').addClass('selected'); + } + // En open menu waar nodig + var parents = $('#nav span.selected, #nav a.selected').parents('li').children('span.closed, a.closed'); + var index = 0; + for(index = parents.length - 1; index >= 0; index--) + { + + var id = $(parents[index]).attr("id").replace( /^\D+/g, ''); + MenuClick(id, false); + } + + } + }); + // var initialized = false; + // window.onpopstate = function() { + // if(initialized) + // LoadPage(); + // else + // initialized = true; + // }; + + // Set up DISQUS + // $(document).ready(function() { + var disqus_shortname = 'learnopengl'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//' + disqus_shortname + '.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + // }); + </script> +</head> +<body> +<a href="https://learnopengl.com"> +<div id="header"> +</div> +</a> + +<div id="supercontainer"> + <!-- 728x90/320x50 --> + <div id="header_ad"> + <div id="waldo-tag-6194"></div> + </div> + <div id="rightad_container"> + <div id="rightad"> + <!-- /8491498/learnopengl_video --> + <!--<div id='div-gpt-ad-1540574378241-0' style='height:225px; width:300px;'> + <script> + googletag.cmd.push(function() { googletag.display('div-gpt-ad-1540574378241-0'); }); + </script> + </div> + <br/>--> + + <div id="waldo-tag-1715"></div> + </div> + + <div id="admessage"> + If you're running AdBlock, please consider whitelisting this site if you'd like to support LearnOpenGL; and no worries, I won't be mad if you don't :) + <!--<br/><br/> + Also, check out this little local multiplayer-only game I've made: <a href="https://store.steampowered.com/app/983590/Tank_Blazers/" target="_blank">Tank Blazers</a>. + <br/> + <a href="https://store.steampowered.com/app/983590/Tank_Blazers" target="_blank"><img src="/img/tank_blazers.jpg" style="width:278px; margin-top: 9px; margin-left: -3px;"/></a>--> + </div> + + <div id="rightonethirdad"> + <div id="waldo-tag-2246"></div> + </div> + + <div id="rightbottomad"> + <div id="waldo-tag-2247"></div> + </div> + </div> + <div id="container"> + <div id="loading"></div> +<script> +$(document).ready(function() { +$('#menu-item4').mousedown(function() { MenuClick(4, true) }); +$('#menu-item48').mousedown(function() { MenuClick(48, true) }); +$('#menu-item56').mousedown(function() { MenuClick(56, true) }); +$('#menu-item63').mousedown(function() { MenuClick(63, true) }); +$('#menu-item100').mousedown(function() { MenuClick(100, true) }); +$('#menu-item102').mousedown(function() { MenuClick(102, true) }); +$('#menu-item113').mousedown(function() { MenuClick(113, true) }); +$('#menu-item116').mousedown(function() { MenuClick(116, true) }); +$('#menu-item78').mousedown(function() { MenuClick(78, true) }); +$('#menu-item81').mousedown(function() { MenuClick(81, true) }); +$('#menu-item85').mousedown(function() { MenuClick(85, true) }); +$('#menu-item125').mousedown(function() { MenuClick(125, true) }); +$('#menu-item128').mousedown(function() { MenuClick(128, true) }); +$('#menu-item129').mousedown(function() { MenuClick(129, true) }); +$('#menu-item133').mousedown(function() { MenuClick(133, true) }); +$('#menu-item134').mousedown(function() { MenuClick(134, true) }); +}); +</script> + <div id="nav"> + <div id="social"> + <a href="https://github.com/JoeyDeVries/LearnOpenGL" target="_blank"> + <img src="/img/github.png" class="social_ico"> + </a> + <!-- <a href="https://www.facebook.com/Learnopengl-2199631333595544/" target="_blank"> + <img src="/img/facebook.png" class="social_ico"> + </a>--> + <a href="https://twitter.com/JoeyDeVriez" target="_blank"> + <img src="/img/twitter.png" class="social_ico"> + </a> + + </div> + <img src='img/nav-button_bottom-arrow.png' style='display: none'><ol><li id='Introduction'><a id="menu-item1" href="https://learnopengl.com/Introduction">Introduction </a></li><li id='Getting-started'><span id="menu-item4" class="closed">Getting started </span><ol id="menu-items-of4" style="display:none;"><li id='Getting-started/OpenGL'><a id="menu-item49" href="https://learnopengl.com/Getting-started/OpenGL">OpenGL </a></li><li id='Getting-started/Creating-a-window'><a id="menu-item5" href="https://learnopengl.com/Getting-started/Creating-a-window">Creating a window </a></li><li id='Getting-started/Hello-Window'><a id="menu-item6" href="https://learnopengl.com/Getting-started/Hello-Window">Hello Window </a></li><li id='Getting-started/Hello-Triangle'><a id="menu-item38" href="https://learnopengl.com/Getting-started/Hello-Triangle">Hello Triangle </a></li><li id='Getting-started/Shaders'><a id="menu-item39" href="https://learnopengl.com/Getting-started/Shaders">Shaders </a></li><li id='Getting-started/Textures'><a id="menu-item40" href="https://learnopengl.com/Getting-started/Textures">Textures </a></li><li id='Getting-started/Transformations'><a id="menu-item43" href="https://learnopengl.com/Getting-started/Transformations">Transformations </a></li><li id='Getting-started/Coordinate-Systems'><a id="menu-item44" href="https://learnopengl.com/Getting-started/Coordinate-Systems">Coordinate Systems </a></li><li id='Getting-started/Camera'><a id="menu-item47" href="https://learnopengl.com/Getting-started/Camera">Camera </a></li><li id='Getting-started/Review'><a id="menu-item50" href="https://learnopengl.com/Getting-started/Review">Review </a></li></ol></li><li id='Lighting'><span id="menu-item48" class="closed">Lighting </span><ol id="menu-items-of48" style="display:none;"><li id='Lighting/Colors'><a id="menu-item51" href="https://learnopengl.com/Lighting/Colors">Colors </a></li><li id='Lighting/Basic-Lighting'><a id="menu-item52" href="https://learnopengl.com/Lighting/Basic-Lighting">Basic Lighting </a></li><li id='Lighting/Materials'><a id="menu-item53" href="https://learnopengl.com/Lighting/Materials">Materials </a></li><li id='Lighting/Lighting-maps'><a id="menu-item54" href="https://learnopengl.com/Lighting/Lighting-maps">Lighting maps </a></li><li id='Lighting/Light-casters'><a id="menu-item55" href="https://learnopengl.com/Lighting/Light-casters">Light casters </a></li><li id='Lighting/Multiple-lights'><a id="menu-item58" href="https://learnopengl.com/Lighting/Multiple-lights">Multiple lights </a></li><li id='Lighting/Review'><a id="menu-item57" href="https://learnopengl.com/Lighting/Review">Review </a></li></ol></li><li id='Model-Loading'><span id="menu-item56" class="closed">Model Loading </span><ol id="menu-items-of56" style="display:none;"><li id='Model-Loading/Assimp'><a id="menu-item59" href="https://learnopengl.com/Model-Loading/Assimp">Assimp </a></li><li id='Model-Loading/Mesh'><a id="menu-item60" href="https://learnopengl.com/Model-Loading/Mesh">Mesh </a></li><li id='Model-Loading/Model'><a id="menu-item61" href="https://learnopengl.com/Model-Loading/Model">Model </a></li></ol></li><li id='Advanced-OpenGL'><span id="menu-item63" class="closed">Advanced OpenGL </span><ol id="menu-items-of63" style="display:none;"><li id='Advanced-OpenGL/Depth-testing'><a id="menu-item72" href="https://learnopengl.com/Advanced-OpenGL/Depth-testing">Depth testing </a></li><li id='Advanced-OpenGL/Stencil-testing'><a id="menu-item73" href="https://learnopengl.com/Advanced-OpenGL/Stencil-testing">Stencil testing </a></li><li id='Advanced-OpenGL/Blending'><a id="menu-item74" href="https://learnopengl.com/Advanced-OpenGL/Blending">Blending </a></li><li id='Advanced-OpenGL/Face-culling'><a id="menu-item77" href="https://learnopengl.com/Advanced-OpenGL/Face-culling">Face culling </a></li><li id='Advanced-OpenGL/Framebuffers'><a id="menu-item65" href="https://learnopengl.com/Advanced-OpenGL/Framebuffers">Framebuffers </a></li><li id='Advanced-OpenGL/Cubemaps'><a id="menu-item66" href="https://learnopengl.com/Advanced-OpenGL/Cubemaps">Cubemaps </a></li><li id='Advanced-OpenGL/Advanced-Data'><a id="menu-item69" href="https://learnopengl.com/Advanced-OpenGL/Advanced-Data">Advanced Data </a></li><li id='Advanced-OpenGL/Advanced-GLSL'><a id="menu-item67" href="https://learnopengl.com/Advanced-OpenGL/Advanced-GLSL">Advanced GLSL </a></li><li id='Advanced-OpenGL/Geometry-Shader'><a id="menu-item68" href="https://learnopengl.com/Advanced-OpenGL/Geometry-Shader">Geometry Shader </a></li><li id='Advanced-OpenGL/Instancing'><a id="menu-item70" href="https://learnopengl.com/Advanced-OpenGL/Instancing">Instancing </a></li><li id='Advanced-OpenGL/Anti-Aliasing'><a id="menu-item75" href="https://learnopengl.com/Advanced-OpenGL/Anti-Aliasing">Anti Aliasing </a></li></ol></li><li id='Advanced-Lighting'><span id="menu-item100" class="closed">Advanced Lighting </span><ol id="menu-items-of100" style="display:none;"><li id='Advanced-Lighting/Advanced-Lighting'><a id="menu-item101" href="https://learnopengl.com/Advanced-Lighting/Advanced-Lighting">Advanced Lighting </a></li><li id='Advanced-Lighting/Gamma-Correction'><a id="menu-item110" href="https://learnopengl.com/Advanced-Lighting/Gamma-Correction">Gamma Correction </a></li><li id='Advanced-Lighting/Shadows'><span id="menu-item102" class="closed">Shadows </span><ol id="menu-items-of102" style="display:none;"><li id='Advanced-Lighting/Shadows/Shadow-Mapping'><a id="menu-item103" href="https://learnopengl.com/Advanced-Lighting/Shadows/Shadow-Mapping">Shadow Mapping </a></li><li id='Advanced-Lighting/Shadows/Point-Shadows'><a id="menu-item104" href="https://learnopengl.com/Advanced-Lighting/Shadows/Point-Shadows">Point Shadows </a></li></ol></li><li id='Advanced-Lighting/Normal-Mapping'><a id="menu-item106" href="https://learnopengl.com/Advanced-Lighting/Normal-Mapping">Normal Mapping </a></li><li id='Advanced-Lighting/Parallax-Mapping'><a id="menu-item107" href="https://learnopengl.com/Advanced-Lighting/Parallax-Mapping">Parallax Mapping </a></li><li id='Advanced-Lighting/HDR'><a id="menu-item111" href="https://learnopengl.com/Advanced-Lighting/HDR">HDR </a></li><li id='Advanced-Lighting/Bloom'><a id="menu-item112" href="https://learnopengl.com/Advanced-Lighting/Bloom">Bloom </a></li><li id='Advanced-Lighting/Deferred-Shading'><a id="menu-item108" href="https://learnopengl.com/Advanced-Lighting/Deferred-Shading">Deferred Shading </a></li><li id='Advanced-Lighting/SSAO'><a id="menu-item109" href="https://learnopengl.com/Advanced-Lighting/SSAO">SSAO </a></li></ol></li><li id='PBR'><span id="menu-item113" class="closed">PBR </span><ol id="menu-items-of113" style="display:none;"><li id='PBR/Theory'><a id="menu-item114" href="https://learnopengl.com/PBR/Theory">Theory </a></li><li id='PBR/Lighting'><a id="menu-item115" href="https://learnopengl.com/PBR/Lighting">Lighting </a></li><li id='PBR/IBL'><span id="menu-item116" class="closed">IBL </span><ol id="menu-items-of116" style="display:none;"><li id='PBR/IBL/Diffuse-irradiance'><a id="menu-item117" href="https://learnopengl.com/PBR/IBL/Diffuse-irradiance">Diffuse irradiance </a></li><li id='PBR/IBL/Specular-IBL'><a id="menu-item118" href="https://learnopengl.com/PBR/IBL/Specular-IBL">Specular IBL </a></li></ol></li></ol></li><li id='In-Practice'><span id="menu-item78" class="closed">In Practice </span><ol id="menu-items-of78" style="display:none;"><li id='In-Practice/Debugging'><a id="menu-item79" href="https://learnopengl.com/In-Practice/Debugging">Debugging </a></li><li id='In-Practice/Text-Rendering'><a id="menu-item80" href="https://learnopengl.com/In-Practice/Text-Rendering">Text Rendering </a></li><li id='In-Practice/2D-Game'><span id="menu-item81" class="closed">2D Game </span><ol id="menu-items-of81" style="display:none;"><li id='In-Practice/2D-Game/Breakout'><a id="menu-item82" href="https://learnopengl.com/In-Practice/2D-Game/Breakout">Breakout </a></li><li id='In-Practice/2D-Game/Setting-up'><a id="menu-item88" href="https://learnopengl.com/In-Practice/2D-Game/Setting-up">Setting up </a></li><li id='In-Practice/2D-Game/Rendering-Sprites'><a id="menu-item83" href="https://learnopengl.com/In-Practice/2D-Game/Rendering-Sprites">Rendering Sprites </a></li><li id='In-Practice/2D-Game/Levels'><a id="menu-item84" href="https://learnopengl.com/In-Practice/2D-Game/Levels">Levels </a></li><li id='In-Practice/2D-Game/Collisions'><span id="menu-item85" class="closed">Collisions </span><ol id="menu-items-of85" style="display:none;"><li id='In-Practice/2D-Game/Collisions/Ball'><a id="menu-item95" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Ball">Ball </a></li><li id='In-Practice/2D-Game/Collisions/Collision-detection'><a id="menu-item96" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Collision-detection">Collision detection </a></li><li id='In-Practice/2D-Game/Collisions/Collision-resolution'><a id="menu-item97" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Collision-resolution">Collision resolution </a></li></ol></li><li id='In-Practice/2D-Game/Particles'><a id="menu-item89" href="https://learnopengl.com/In-Practice/2D-Game/Particles">Particles </a></li><li id='In-Practice/2D-Game/Postprocessing'><a id="menu-item90" href="https://learnopengl.com/In-Practice/2D-Game/Postprocessing">Postprocessing </a></li><li id='In-Practice/2D-Game/Powerups'><a id="menu-item91" href="https://learnopengl.com/In-Practice/2D-Game/Powerups">Powerups </a></li><li id='In-Practice/2D-Game/Audio'><a id="menu-item94" href="https://learnopengl.com/In-Practice/2D-Game/Audio">Audio </a></li><li id='In-Practice/2D-Game/Render-text'><a id="menu-item92" href="https://learnopengl.com/In-Practice/2D-Game/Render-text">Render text </a></li><li id='In-Practice/2D-Game/Final-thoughts'><a id="menu-item93" href="https://learnopengl.com/In-Practice/2D-Game/Final-thoughts">Final thoughts </a></li></ol></li></ol></li><li id='Guest-Articles'><span id="menu-item125" class="closed">Guest Articles </span><ol id="menu-items-of125" style="display:none;"><li id='Guest-Articles/How-to-publish'><a id="menu-item126" href="https://learnopengl.com/Guest-Articles/How-to-publish">How to publish </a></li><li id='Guest-Articles/2020'><span id="menu-item128" class="closed">2020 </span><ol id="menu-items-of128" style="display:none;"><li id='Guest-Articles/2020/OIT'><span id="menu-item129" class="closed">OIT </span><ol id="menu-items-of129" style="display:none;"><li id='Guest-Articles/2020/OIT/Introduction'><a id="menu-item130" href="https://learnopengl.com/Guest-Articles/2020/OIT/Introduction">Introduction </a></li><li id='Guest-Articles/2020/OIT/Weighted-Blended'><a id="menu-item132" href="https://learnopengl.com/Guest-Articles/2020/OIT/Weighted-Blended">Weighted Blended </a></li></ol></li><li id='Guest-Articles/2020/Skeletal-Animation'><a id="menu-item131" href="https://learnopengl.com/Guest-Articles/2020/Skeletal-Animation">Skeletal Animation </a></li></ol></li><li id='Guest-Articles/2021'><span id="menu-item133" class="closed">2021 </span><ol id="menu-items-of133" style="display:none;"><li id='Guest-Articles/2021/Scene'><span id="menu-item134" class="closed">Scene </span><ol id="menu-items-of134" style="display:none;"><li id='Guest-Articles/2021/Scene/Scene-Graph'><a id="menu-item135" href="https://learnopengl.com/Guest-Articles/2021/Scene/Scene-Graph">Scene Graph </a></li><li id='Guest-Articles/2021/Scene/Frustum-Culling'><a id="menu-item136" href="https://learnopengl.com/Guest-Articles/2021/Scene/Frustum-Culling">Frustum Culling </a></li></ol></li></ol></li></ol></li><li id='Code-repository'><a id="menu-item99" href="https://learnopengl.com/Code-repository">Code repository </a></li><li id='Translations'><a id="menu-item119" href="https://learnopengl.com/Translations">Translations </a></li><li id='About'><a id="menu-item2" href="https://learnopengl.com/About">About </a></li></ol> <div id="menu_book"> + <a href="https://geni.us/learnopengl" target="_blank"><img src="/book/below_menu.png" class="clean"/></a> + </div> + <div id="donate"> + <a href="https://www.paypal.me/learnopengl/" target="_blank"> + <div id="donate_img"></div> + <img style="display: none" src="/img/donate_button_hover.png"/> + <!--<img id="donate_img" src="img/patreon.png"/>--> + </a> + <!--<div id="alipay"> + <img style="width: 150px;" class="clean" src="/img/alipay_logo.png"/> + <img style="width: 150px; margin-top: 5px" src="/img/alipay.png"/> + </div>--> + </div> + <div class="btc"> + <h3>BTC</h3> + <p> + 1CLGKgmBSuYJ1nnvDGAepVTKNNDpUjfpRa + </p> + <img src="/img/btc_qr.png"/> + </div> + <div class="btc"> + <h3>ETH/ERC20</h3> + <p> + 0x1de59bd9e52521a46309474f8372531533bd7c43 + </p> + <img src="/img/erc20_qr.png"/> + </div> + <div id="ad"> + <!--<div id="waldo-tag-1684"></div>--> + </div> + + <div id="lefttwothirdad"> + <div id="waldo-tag-2245"></div> + </div> + </div> + + <div id="content"> + <h1 id="content-title">Review</h1> +<h1 id="content-url" style='display:none;'>Getting-started/Review</h1> +<p> + Congratulations on reaching the end of the <em>Getting started</em> chapters. By now you should be able to create a window, create and compile shaders, send vertex data to your shaders via buffer objects or uniforms, draw objects, use textures, understand vectors and matrices and combine all that knowledge to create a full 3D scene with a camera to play around with.</p> + +<p> + Phew, there is a lot that we learned these last few chapters. Try to play around with what you learned, experiment a bit or come up with your own ideas and solutions to some of the problems. As soon as you feel you got the hang of all the materials we've discussed it's time to move on to the <a href="https://learnopengl.com/Lighting/Colors" target="_blank">next</a> Lighting chapters. +</p> + +<h2>Glossary</h2> +<p> + <ul> + <li><code>OpenGL</code>: a formal specification of a graphics API that defines the layout and output of each function. </li> + <li><code>GLAD</code>: an extension loading library that loads and sets all OpenGL's function pointers for us so we can use all (modern) OpenGL's functions. </li> + <li><code>Viewport</code>: the 2D window region where we render to. </li> + <li><code>Graphics Pipeline</code>: the entire process vertices have to walk through before ending up as one or more pixels on the screen. </li> + <li><code>Shader</code>: a small program that runs on the graphics card. Several stages of the graphics pipeline can use user-made shaders to replace existing functionality.</li> + <li><code>Vertex</code>: a collection of data that represent a single point. </li> + <li><code>Normalized Device Coordinates</code>: the coordinate system your vertices end up in after perspective division is performed on clip coordinates. All vertex positions in NDC between <code>-1.0</code> and <code>1.0</code> will not be discarded or clipped and end up visible. </li> + <li><code>Vertex Buffer Object</code>: a buffer object that allocates memory on the GPU and stores all the vertex data there for the graphics card to use. </li> + <li><code>Vertex Array Object</code>: stores buffer and vertex attribute state information.</li> + <li><code>Element Buffer Object</code>: a buffer object that stores indices on the GPU for indexed drawing. </li> + <li><code>Uniform</code>: a special type of GLSL variable that is global (each shader in a shader program can access this uniform variable) and only has to be set once. </li> + <li><code>Texture</code>: a special type of image used in shaders and usually wrapped around objects, giving the illusion an object is extremely detailed. </li> + <li><code>Texture Wrapping</code>: defines the mode that specifies how OpenGL should sample textures when texture coordinates are outside the range: (<code>0</code>, <code>1</code>). </li> + <li><code>Texture Filtering</code>: defines the mode that specifies how OpenGL should sample the texture when there are several texels (texture pixels) to choose from. This usually occurs when a texture is magnified. </li> + <li><code>Mipmaps</code>: stored smaller versions of a texture where the appropriate sized version is chosen based on the distance to the viewer. </li> + <li><code>stb_image</code>: image loading library. </li> + <li><code>Texture Units</code>: allows for multiple textures on a single shader program by binding multiple textures, each to a different texture unit. </li> + <li><code>Vector</code>: a mathematical entity that defines directions and/or positions in any dimension. </li> + <li><code>Matrix</code>: a rectangular array of mathematical expressions with useful transformation properties. </li> + <li><code>GLM</code>: a mathematics library tailored for OpenGL. </li> + <li><code>Local Space</code>: the space an object begins in. All coordinates relative to an object's origin. </li> + <li><code>World Space</code>: all coordinates relative to a global origin. </li> + <li><code>View Space</code>: all coordinates as viewed from a camera's perspective. </li> + <li><code>Clip Space</code>: all coordinates as viewed from the camera's perspective but with projection applied. This is the space the vertex coordinates should end up in, as output of the vertex shader. OpenGL does the rest (clipping/perspective division). </li> + <li><code>Screen Space</code>: all coordinates as viewed from the screen. Coordinates range from <code>0</code> to screen width/height. </li> + <li><code>LookAt</code>: a special type of view matrix that creates a coordinate system where all coordinates are rotated and translated in such a way that the user is looking at a given target from a given position. </li> + <li><code>Euler Angles</code>: defined as <code>yaw</code>, <code>pitch</code> and <code>roll</code> that allow us to form any 3D direction vector from these 3 values. </li> + </ul> +</p> + + </div> + + <div id="hover"> + HI + </div> + <!-- 728x90/320x50 sticky footer --> +<div id="waldo-tag-6196"></div> + + <div id="disqus_thread"></div> + + + + +</div> <!-- container div --> + + +</div> <!-- super container div --> +</body> +</html> +\ No newline at end of file diff --git a/Getting-started/Shaders.html b/Getting-started/Shaders.html @@ -0,0 +1,842 @@ + + +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8"/> + <title>LearnOpenGL - Shaders</title> <!--<title>Learn OpenGL, extensive tutorial resource for learning Modern OpenGL</title>--> + <link rel="shortcut icon" type="image/ico" href="/favicon.ico" /> + <meta name="description" content="Learn OpenGL . com provides good and clear modern 3.3+ OpenGL tutorials with clear examples. A great resource to learn modern OpenGL aimed at beginners."> + <meta name="fragment" content="!"> + <script> + (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ + (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), + m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) + })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); + + ga('create', 'UA-51879160-1', 'learnopengl.com'); + ga('send', 'pageview'); + + </script> + <!--<script async src="//pagead2.googlesyndication.com/pagead/js/adsbygoogle.js"></script>--> + <script> + (adsbygoogle = window.adsbygoogle || []).push({ + google_ad_client: "ca-pub-7855791439695850", + enable_page_level_ads: true + }); + </script> + <script async='async' src='https://www.googletagservices.com/tag/js/gpt.js'></script> + <script> + var googletag = googletag || {}; + googletag.cmd = googletag.cmd || []; + </script> + <script> + googletag.cmd.push(function() { + googletag.defineSlot('/8491498/learnopengl_video', [300, 225], 'div-gpt-ad-1540574378241-0').addService(googletag.pubads()); + googletag.pubads().enableSingleRequest(); + googletag.pubads().collapseEmptyDivs(); + googletag.enableServices(); + }); + </script> + <script type="text/javascript" src="https://d31vxm9ubutrmw.cloudfront.net/static/js/1681.js"></script> + <script src="/js/jquery-1.11.0.min.js"></script> + <script src="/js/hoverintent.js"></script> + <link rel="stylesheet" type="text/css" href="/layout.css"> + <link rel="stylesheet" type="text/css" href="/js/styles/obsidian.css"> + <script src="/js/highlight.pack.js"></script> + <script src="/js/functions.js"></script> + <script type="text/javascript" src="/js/mathjax/MathJax.js?config=TeX-AMS_HTML"></script> + <script> + // Has to be loaded last due to content bug + MathJax.Hub.Config({ + TeX: { equationNumbers: { autoNumber: "AMS" } } + }); + </script> + <script>hljs.initHighlightingOnLoad();</script> + <script> + $(document).ready(function() { + // check if user visited from the old # based urls, re-direct to ?p= form + if(window.location.hash) + { + var name = window.location.hash.substring(2); + // name = name.replace(/-/g," "); + var index = name.indexOf('#'); // Remove any hash fragments from the url (Disquss adds hash fragments for comments, but results in 404 pages) + if(index >= 0) + name = name.substring(0, index); + + window.location.href = "https://learnopengl.com/" + name; + } else { + // Check if data has been succesfully loaded, if so: change title bar as ajax hash fragment + var title = $('#content-url').text(); + + // Refresh syntax highlighting + // $('pre').each(function(i, e) {hljs.highlightBlock(e)}); + + // Reset DISQUS + // if(title == '/dev/') + // title = ''; + // alert('hoi'); + + // Adjust ads for correct bottom positioning based on content size + window.setTimeout(function() { + AdPositioning(); + }, 3000); + + + // set API resets after time-out (once content is properly loaded) + window.setTimeout(function() { + MathJax.Hub.Queue(["Typeset",MathJax.Hub]); + MathJax.Hub.Queue(["resetEquationNumbers", MathJax.InputJax.TeX]); + + var page_url = title == "" ? "http://www.learnopengl.com/" : "http://www.learnopengl.com/" + title; + if(typeof DISQUS !== 'undefined') { + DISQUS.reset({ + reload: true, + config: function () { + this.page.identifier = title; + this.page.url = page_url; + } + }); + $('#disqus_thread').show(); + } + // Refresh callbacks on <function> tags + SetFunctionTagCallbacks(); + }, 1000); + + // Zet ook de juiste button op 'selected' + $('#nav li span, #nav li a').removeClass('selected'); + if(title != '') + { + $('#nav li[id=\'' + title + '\']').children('span, a').addClass('selected'); + } + // En open menu waar nodig + var parents = $('#nav span.selected, #nav a.selected').parents('li').children('span.closed, a.closed'); + var index = 0; + for(index = parents.length - 1; index >= 0; index--) + { + + var id = $(parents[index]).attr("id").replace( /^\D+/g, ''); + MenuClick(id, false); + } + + } + }); + // var initialized = false; + // window.onpopstate = function() { + // if(initialized) + // LoadPage(); + // else + // initialized = true; + // }; + + // Set up DISQUS + // $(document).ready(function() { + var disqus_shortname = 'learnopengl'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//' + disqus_shortname + '.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + // }); + </script> +</head> +<body> +<a href="https://learnopengl.com"> +<div id="header"> +</div> +</a> + +<div id="supercontainer"> + <!-- 728x90/320x50 --> + <div id="header_ad"> + <div id="waldo-tag-6194"></div> + </div> + <div id="rightad_container"> + <div id="rightad"> + <!-- /8491498/learnopengl_video --> + <!--<div id='div-gpt-ad-1540574378241-0' style='height:225px; width:300px;'> + <script> + googletag.cmd.push(function() { googletag.display('div-gpt-ad-1540574378241-0'); }); + </script> + </div> + <br/>--> + + <div id="waldo-tag-1715"></div> + </div> + + <div id="admessage"> + If you're running AdBlock, please consider whitelisting this site if you'd like to support LearnOpenGL; and no worries, I won't be mad if you don't :) + <!--<br/><br/> + Also, check out this little local multiplayer-only game I've made: <a href="https://store.steampowered.com/app/983590/Tank_Blazers/" target="_blank">Tank Blazers</a>. + <br/> + <a href="https://store.steampowered.com/app/983590/Tank_Blazers" target="_blank"><img src="/img/tank_blazers.jpg" style="width:278px; margin-top: 9px; margin-left: -3px;"/></a>--> + </div> + + <div id="rightonethirdad"> + <div id="waldo-tag-2246"></div> + </div> + + <div id="rightbottomad"> + <div id="waldo-tag-2247"></div> + </div> + </div> + <div id="container"> + <div id="loading"></div> +<script> +$(document).ready(function() { +$('#menu-item4').mousedown(function() { MenuClick(4, true) }); +$('#menu-item48').mousedown(function() { MenuClick(48, true) }); +$('#menu-item56').mousedown(function() { MenuClick(56, true) }); +$('#menu-item63').mousedown(function() { MenuClick(63, true) }); +$('#menu-item100').mousedown(function() { MenuClick(100, true) }); +$('#menu-item102').mousedown(function() { MenuClick(102, true) }); +$('#menu-item113').mousedown(function() { MenuClick(113, true) }); +$('#menu-item116').mousedown(function() { MenuClick(116, true) }); +$('#menu-item78').mousedown(function() { MenuClick(78, true) }); +$('#menu-item81').mousedown(function() { MenuClick(81, true) }); +$('#menu-item85').mousedown(function() { MenuClick(85, true) }); +$('#menu-item125').mousedown(function() { MenuClick(125, true) }); +$('#menu-item128').mousedown(function() { MenuClick(128, true) }); +$('#menu-item129').mousedown(function() { MenuClick(129, true) }); +$('#menu-item133').mousedown(function() { MenuClick(133, true) }); +$('#menu-item134').mousedown(function() { MenuClick(134, true) }); +}); +</script> + <div id="nav"> + <div id="social"> + <a href="https://github.com/JoeyDeVries/LearnOpenGL" target="_blank"> + <img src="/img/github.png" class="social_ico"> + </a> + <!-- <a href="https://www.facebook.com/Learnopengl-2199631333595544/" target="_blank"> + <img src="/img/facebook.png" class="social_ico"> + </a>--> + <a href="https://twitter.com/JoeyDeVriez" target="_blank"> + <img src="/img/twitter.png" class="social_ico"> + </a> + + </div> + <img src='img/nav-button_bottom-arrow.png' style='display: none'><ol><li id='Introduction'><a id="menu-item1" href="https://learnopengl.com/Introduction">Introduction </a></li><li id='Getting-started'><span id="menu-item4" class="closed">Getting started </span><ol id="menu-items-of4" style="display:none;"><li id='Getting-started/OpenGL'><a id="menu-item49" href="https://learnopengl.com/Getting-started/OpenGL">OpenGL </a></li><li id='Getting-started/Creating-a-window'><a id="menu-item5" href="https://learnopengl.com/Getting-started/Creating-a-window">Creating a window </a></li><li id='Getting-started/Hello-Window'><a id="menu-item6" href="https://learnopengl.com/Getting-started/Hello-Window">Hello Window </a></li><li id='Getting-started/Hello-Triangle'><a id="menu-item38" href="https://learnopengl.com/Getting-started/Hello-Triangle">Hello Triangle </a></li><li id='Getting-started/Shaders'><a id="menu-item39" href="https://learnopengl.com/Getting-started/Shaders">Shaders </a></li><li id='Getting-started/Textures'><a id="menu-item40" href="https://learnopengl.com/Getting-started/Textures">Textures </a></li><li id='Getting-started/Transformations'><a id="menu-item43" href="https://learnopengl.com/Getting-started/Transformations">Transformations </a></li><li id='Getting-started/Coordinate-Systems'><a id="menu-item44" href="https://learnopengl.com/Getting-started/Coordinate-Systems">Coordinate Systems </a></li><li id='Getting-started/Camera'><a id="menu-item47" href="https://learnopengl.com/Getting-started/Camera">Camera </a></li><li id='Getting-started/Review'><a id="menu-item50" href="https://learnopengl.com/Getting-started/Review">Review </a></li></ol></li><li id='Lighting'><span id="menu-item48" class="closed">Lighting </span><ol id="menu-items-of48" style="display:none;"><li id='Lighting/Colors'><a id="menu-item51" href="https://learnopengl.com/Lighting/Colors">Colors </a></li><li id='Lighting/Basic-Lighting'><a id="menu-item52" href="https://learnopengl.com/Lighting/Basic-Lighting">Basic Lighting </a></li><li id='Lighting/Materials'><a id="menu-item53" href="https://learnopengl.com/Lighting/Materials">Materials </a></li><li id='Lighting/Lighting-maps'><a id="menu-item54" href="https://learnopengl.com/Lighting/Lighting-maps">Lighting maps </a></li><li id='Lighting/Light-casters'><a id="menu-item55" href="https://learnopengl.com/Lighting/Light-casters">Light casters </a></li><li id='Lighting/Multiple-lights'><a id="menu-item58" href="https://learnopengl.com/Lighting/Multiple-lights">Multiple lights </a></li><li id='Lighting/Review'><a id="menu-item57" href="https://learnopengl.com/Lighting/Review">Review </a></li></ol></li><li id='Model-Loading'><span id="menu-item56" class="closed">Model Loading </span><ol id="menu-items-of56" style="display:none;"><li id='Model-Loading/Assimp'><a id="menu-item59" href="https://learnopengl.com/Model-Loading/Assimp">Assimp </a></li><li id='Model-Loading/Mesh'><a id="menu-item60" href="https://learnopengl.com/Model-Loading/Mesh">Mesh </a></li><li id='Model-Loading/Model'><a id="menu-item61" href="https://learnopengl.com/Model-Loading/Model">Model </a></li></ol></li><li id='Advanced-OpenGL'><span id="menu-item63" class="closed">Advanced OpenGL </span><ol id="menu-items-of63" style="display:none;"><li id='Advanced-OpenGL/Depth-testing'><a id="menu-item72" href="https://learnopengl.com/Advanced-OpenGL/Depth-testing">Depth testing </a></li><li id='Advanced-OpenGL/Stencil-testing'><a id="menu-item73" href="https://learnopengl.com/Advanced-OpenGL/Stencil-testing">Stencil testing </a></li><li id='Advanced-OpenGL/Blending'><a id="menu-item74" href="https://learnopengl.com/Advanced-OpenGL/Blending">Blending </a></li><li id='Advanced-OpenGL/Face-culling'><a id="menu-item77" href="https://learnopengl.com/Advanced-OpenGL/Face-culling">Face culling </a></li><li id='Advanced-OpenGL/Framebuffers'><a id="menu-item65" href="https://learnopengl.com/Advanced-OpenGL/Framebuffers">Framebuffers </a></li><li id='Advanced-OpenGL/Cubemaps'><a id="menu-item66" href="https://learnopengl.com/Advanced-OpenGL/Cubemaps">Cubemaps </a></li><li id='Advanced-OpenGL/Advanced-Data'><a id="menu-item69" href="https://learnopengl.com/Advanced-OpenGL/Advanced-Data">Advanced Data </a></li><li id='Advanced-OpenGL/Advanced-GLSL'><a id="menu-item67" href="https://learnopengl.com/Advanced-OpenGL/Advanced-GLSL">Advanced GLSL </a></li><li id='Advanced-OpenGL/Geometry-Shader'><a id="menu-item68" href="https://learnopengl.com/Advanced-OpenGL/Geometry-Shader">Geometry Shader </a></li><li id='Advanced-OpenGL/Instancing'><a id="menu-item70" href="https://learnopengl.com/Advanced-OpenGL/Instancing">Instancing </a></li><li id='Advanced-OpenGL/Anti-Aliasing'><a id="menu-item75" href="https://learnopengl.com/Advanced-OpenGL/Anti-Aliasing">Anti Aliasing </a></li></ol></li><li id='Advanced-Lighting'><span id="menu-item100" class="closed">Advanced Lighting </span><ol id="menu-items-of100" style="display:none;"><li id='Advanced-Lighting/Advanced-Lighting'><a id="menu-item101" href="https://learnopengl.com/Advanced-Lighting/Advanced-Lighting">Advanced Lighting </a></li><li id='Advanced-Lighting/Gamma-Correction'><a id="menu-item110" href="https://learnopengl.com/Advanced-Lighting/Gamma-Correction">Gamma Correction </a></li><li id='Advanced-Lighting/Shadows'><span id="menu-item102" class="closed">Shadows </span><ol id="menu-items-of102" style="display:none;"><li id='Advanced-Lighting/Shadows/Shadow-Mapping'><a id="menu-item103" href="https://learnopengl.com/Advanced-Lighting/Shadows/Shadow-Mapping">Shadow Mapping </a></li><li id='Advanced-Lighting/Shadows/Point-Shadows'><a id="menu-item104" href="https://learnopengl.com/Advanced-Lighting/Shadows/Point-Shadows">Point Shadows </a></li></ol></li><li id='Advanced-Lighting/Normal-Mapping'><a id="menu-item106" href="https://learnopengl.com/Advanced-Lighting/Normal-Mapping">Normal Mapping </a></li><li id='Advanced-Lighting/Parallax-Mapping'><a id="menu-item107" href="https://learnopengl.com/Advanced-Lighting/Parallax-Mapping">Parallax Mapping </a></li><li id='Advanced-Lighting/HDR'><a id="menu-item111" href="https://learnopengl.com/Advanced-Lighting/HDR">HDR </a></li><li id='Advanced-Lighting/Bloom'><a id="menu-item112" href="https://learnopengl.com/Advanced-Lighting/Bloom">Bloom </a></li><li id='Advanced-Lighting/Deferred-Shading'><a id="menu-item108" href="https://learnopengl.com/Advanced-Lighting/Deferred-Shading">Deferred Shading </a></li><li id='Advanced-Lighting/SSAO'><a id="menu-item109" href="https://learnopengl.com/Advanced-Lighting/SSAO">SSAO </a></li></ol></li><li id='PBR'><span id="menu-item113" class="closed">PBR </span><ol id="menu-items-of113" style="display:none;"><li id='PBR/Theory'><a id="menu-item114" href="https://learnopengl.com/PBR/Theory">Theory </a></li><li id='PBR/Lighting'><a id="menu-item115" href="https://learnopengl.com/PBR/Lighting">Lighting </a></li><li id='PBR/IBL'><span id="menu-item116" class="closed">IBL </span><ol id="menu-items-of116" style="display:none;"><li id='PBR/IBL/Diffuse-irradiance'><a id="menu-item117" href="https://learnopengl.com/PBR/IBL/Diffuse-irradiance">Diffuse irradiance </a></li><li id='PBR/IBL/Specular-IBL'><a id="menu-item118" href="https://learnopengl.com/PBR/IBL/Specular-IBL">Specular IBL </a></li></ol></li></ol></li><li id='In-Practice'><span id="menu-item78" class="closed">In Practice </span><ol id="menu-items-of78" style="display:none;"><li id='In-Practice/Debugging'><a id="menu-item79" href="https://learnopengl.com/In-Practice/Debugging">Debugging </a></li><li id='In-Practice/Text-Rendering'><a id="menu-item80" href="https://learnopengl.com/In-Practice/Text-Rendering">Text Rendering </a></li><li id='In-Practice/2D-Game'><span id="menu-item81" class="closed">2D Game </span><ol id="menu-items-of81" style="display:none;"><li id='In-Practice/2D-Game/Breakout'><a id="menu-item82" href="https://learnopengl.com/In-Practice/2D-Game/Breakout">Breakout </a></li><li id='In-Practice/2D-Game/Setting-up'><a id="menu-item88" href="https://learnopengl.com/In-Practice/2D-Game/Setting-up">Setting up </a></li><li id='In-Practice/2D-Game/Rendering-Sprites'><a id="menu-item83" href="https://learnopengl.com/In-Practice/2D-Game/Rendering-Sprites">Rendering Sprites </a></li><li id='In-Practice/2D-Game/Levels'><a id="menu-item84" href="https://learnopengl.com/In-Practice/2D-Game/Levels">Levels </a></li><li id='In-Practice/2D-Game/Collisions'><span id="menu-item85" class="closed">Collisions </span><ol id="menu-items-of85" style="display:none;"><li id='In-Practice/2D-Game/Collisions/Ball'><a id="menu-item95" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Ball">Ball </a></li><li id='In-Practice/2D-Game/Collisions/Collision-detection'><a id="menu-item96" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Collision-detection">Collision detection </a></li><li id='In-Practice/2D-Game/Collisions/Collision-resolution'><a id="menu-item97" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Collision-resolution">Collision resolution </a></li></ol></li><li id='In-Practice/2D-Game/Particles'><a id="menu-item89" href="https://learnopengl.com/In-Practice/2D-Game/Particles">Particles </a></li><li id='In-Practice/2D-Game/Postprocessing'><a id="menu-item90" href="https://learnopengl.com/In-Practice/2D-Game/Postprocessing">Postprocessing </a></li><li id='In-Practice/2D-Game/Powerups'><a id="menu-item91" href="https://learnopengl.com/In-Practice/2D-Game/Powerups">Powerups </a></li><li id='In-Practice/2D-Game/Audio'><a id="menu-item94" href="https://learnopengl.com/In-Practice/2D-Game/Audio">Audio </a></li><li id='In-Practice/2D-Game/Render-text'><a id="menu-item92" href="https://learnopengl.com/In-Practice/2D-Game/Render-text">Render text </a></li><li id='In-Practice/2D-Game/Final-thoughts'><a id="menu-item93" href="https://learnopengl.com/In-Practice/2D-Game/Final-thoughts">Final thoughts </a></li></ol></li></ol></li><li id='Guest-Articles'><span id="menu-item125" class="closed">Guest Articles </span><ol id="menu-items-of125" style="display:none;"><li id='Guest-Articles/How-to-publish'><a id="menu-item126" href="https://learnopengl.com/Guest-Articles/How-to-publish">How to publish </a></li><li id='Guest-Articles/2020'><span id="menu-item128" class="closed">2020 </span><ol id="menu-items-of128" style="display:none;"><li id='Guest-Articles/2020/OIT'><span id="menu-item129" class="closed">OIT </span><ol id="menu-items-of129" style="display:none;"><li id='Guest-Articles/2020/OIT/Introduction'><a id="menu-item130" href="https://learnopengl.com/Guest-Articles/2020/OIT/Introduction">Introduction </a></li><li id='Guest-Articles/2020/OIT/Weighted-Blended'><a id="menu-item132" href="https://learnopengl.com/Guest-Articles/2020/OIT/Weighted-Blended">Weighted Blended </a></li></ol></li><li id='Guest-Articles/2020/Skeletal-Animation'><a id="menu-item131" href="https://learnopengl.com/Guest-Articles/2020/Skeletal-Animation">Skeletal Animation </a></li></ol></li><li id='Guest-Articles/2021'><span id="menu-item133" class="closed">2021 </span><ol id="menu-items-of133" style="display:none;"><li id='Guest-Articles/2021/Scene'><span id="menu-item134" class="closed">Scene </span><ol id="menu-items-of134" style="display:none;"><li id='Guest-Articles/2021/Scene/Scene-Graph'><a id="menu-item135" href="https://learnopengl.com/Guest-Articles/2021/Scene/Scene-Graph">Scene Graph </a></li><li id='Guest-Articles/2021/Scene/Frustum-Culling'><a id="menu-item136" href="https://learnopengl.com/Guest-Articles/2021/Scene/Frustum-Culling">Frustum Culling </a></li></ol></li></ol></li></ol></li><li id='Code-repository'><a id="menu-item99" href="https://learnopengl.com/Code-repository">Code repository </a></li><li id='Translations'><a id="menu-item119" href="https://learnopengl.com/Translations">Translations </a></li><li id='About'><a id="menu-item2" href="https://learnopengl.com/About">About </a></li></ol> <div id="menu_book"> + <a href="https://geni.us/learnopengl" target="_blank"><img src="/book/below_menu.png" class="clean"/></a> + </div> + <div id="donate"> + <a href="https://www.paypal.me/learnopengl/" target="_blank"> + <div id="donate_img"></div> + <img style="display: none" src="/img/donate_button_hover.png"/> + <!--<img id="donate_img" src="img/patreon.png"/>--> + </a> + <!--<div id="alipay"> + <img style="width: 150px;" class="clean" src="/img/alipay_logo.png"/> + <img style="width: 150px; margin-top: 5px" src="/img/alipay.png"/> + </div>--> + </div> + <div class="btc"> + <h3>BTC</h3> + <p> + 1CLGKgmBSuYJ1nnvDGAepVTKNNDpUjfpRa + </p> + <img src="/img/btc_qr.png"/> + </div> + <div class="btc"> + <h3>ETH/ERC20</h3> + <p> + 0x1de59bd9e52521a46309474f8372531533bd7c43 + </p> + <img src="/img/erc20_qr.png"/> + </div> + <div id="ad"> + <!--<div id="waldo-tag-1684"></div>--> + </div> + + <div id="lefttwothirdad"> + <div id="waldo-tag-2245"></div> + </div> + </div> + + <div id="content"> + <h1 id="content-title">Shaders</h1> +<h1 id="content-url" style='display:none;'>Getting-started/Shaders</h1> +<p> + As mentioned in the <a href="https://learnopengl.com/Getting-started/Hello-Triangle" target="_blank">Hello Triangle</a> chapter, shaders are little programs that rest on the GPU. These programs are run for each specific section of the graphics pipeline. In a basic sense, shaders are nothing more than programs transforming inputs to outputs. Shaders are also very isolated programs in that they're not allowed to communicate with each other; the only communication they have is via their inputs and outputs. +</p> + +<p> + In the previous chapter we briefly touched the surface of shaders and how to properly use them. We will now explain shaders, and specifically the OpenGL Shading Language, in a more general fashion. +</p> + +<h1>GLSL</h1> +<p> + Shaders are written in the C-like language GLSL. GLSL is tailored for use with graphics and contains useful features specifically targeted at vector and matrix manipulation. +</p> + +<p> + Shaders always begin with a version declaration, followed by a list of input and output variables, uniforms and its <fun>main</fun> function. Each shader's entry point is at its <fun>main</fun> function where we process any input variables and output the results in its output variables. Don't worry if you don't know what uniforms are, we'll get to those shortly. +</p> + +<p> + A shader typically has the following structure: +</p> + +<pre><code> +#version version_number +in type in_variable_name; +in type in_variable_name; + +out type out_variable_name; + +uniform type uniform_name; + +void main() +{ + // process input(s) and do some weird graphics stuff + ... + // output processed stuff to output variable + out_variable_name = weird_stuff_we_processed; +} +</code></pre> + +<p> + When we're talking specifically about the vertex shader each input variable is also known as a <def>vertex attribute</def>. There is a maximum number of vertex attributes we're allowed to declare limited by the hardware. OpenGL guarantees there are always at least 16 4-component vertex attributes available, but some hardware may allow for more which you can retrieve by querying <var>GL_MAX_VERTEX_ATTRIBS</var>: +</p> + +<pre><code> +int nrAttributes; +glGetIntegerv(GL_MAX_VERTEX_ATTRIBS, &nrAttributes); +std::cout &lt;&lt; "Maximum nr of vertex attributes supported: " &lt;&lt; nrAttributes &lt;&lt; std::endl; +</code></pre> + +<p> + This often returns the minimum of <code>16</code> which should be more than enough for most purposes. +</p> + +<h2>Types</h2> +<p> + GLSL has, like any other programming language, data types for specifying what kind of variable we want to work with. GLSL has most of the default basic types we know from languages like C: <code>int</code>, <code>float</code>, <code>double</code>, <code>uint</code> and <code>bool</code>. GLSL also features two container types that we'll be using a lot, namely <code>vectors</code> and <code>matrices</code>. We'll discuss matrices in a later chapter. +</p> + +<h3>Vectors</h3> +<p> + A vector in GLSL is a 1,2,3 or 4 component container for any of the basic types just mentioned. They can take the following form (<code>n</code> represents the number of components): +</p> + + <ul> + <li><code>vecn</code>: the default vector of <code>n</code> floats.</li> + <li><code>bvecn</code>: a vector of <code>n</code> booleans.</li> + <li><code>ivecn</code>: a vector of <code>n</code> integers.</li> + <li><code>uvecn</code>: a vector of <code>n</code> unsigned integers.</li> + <li><code>dvecn</code>: a vector of <code>n</code> double components.</li> + </ul> + +<p> + Most of the time we will be using the basic <code>vecn</code> since floats are sufficient for most of our purposes. +</p> + + <p> + Components of a vector can be accessed via <code>vec.x</code> where <code>x</code> is the first component of the vector. You can use <code>.x</code>, <code>.y</code>, <code>.z</code> and <code>.w</code> to access their first, second, third and fourth component respectively. GLSL also allows you to use <code>rgba</code> for colors or <code>stpq</code> for texture coordinates, accessing the same components. + </p> + + <p> + The vector datatype allows for some interesting and flexible component selection called <def>swizzling</def>. Swizzling allows us to use syntax like this: + </p> + +<pre><code> +vec2 someVec; +vec4 differentVec = someVec.xyxx; +vec3 anotherVec = differentVec.zyw; +vec4 otherVec = someVec.xxxx + anotherVec.yxzy; +</code></pre> + +<p> + You can use any combination of up to 4 letters to create a new vector (of the same type) as long as the original vector has those components; it is not allowed to access the <code>.z</code> component of a <code>vec2</code> for example. We can also pass vectors as arguments to different vector constructor calls, reducing the number of arguments required: +</p> + +<pre><code> +vec2 vect = vec2(0.5, 0.7); +vec4 result = vec4(vect, 0.0, 0.0); +vec4 otherResult = vec4(result.xyz, 1.0); +</code></pre> + +<p> + Vectors are thus a flexible datatype that we can use for all kinds of input and output. Throughout the book you'll see plenty of examples of how we can creatively manage vectors. +</p> + +<h2>Ins and outs</h2> +<p> + Shaders are nice little programs on their own, but they are part of a whole and for that reason we want to have inputs and outputs on the individual shaders so that we can move stuff around. GLSL defined the <code>in</code> and <code>out</code> keywords specifically for that purpose. Each shader can specify inputs and outputs using those keywords and wherever an output variable matches with an input variable of the next shader stage they're passed along. The vertex and fragment shader differ a bit though. +</p> + +<p> + The vertex shader <strong>should</strong> receive some form of input otherwise it would be pretty ineffective. The vertex shader differs in its input, in that it receives its input straight from the vertex data. To define how the vertex data is organized we specify the input variables with location metadata so we can configure the vertex attributes on the CPU. We've seen this in the previous chapter as <code>layout (location = 0)</code>. The vertex shader thus requires an extra layout specification for its inputs so we can link it with the vertex data. + </p> + +<note> + It is also possible to omit the <code>layout (location = 0)</code> specifier and query for the attribute locations in your OpenGL code via <fun><function id='104'>glGetAttribLocation</function></fun>, but I'd prefer to set them in the vertex shader. It is easier to understand and saves you (and OpenGL) some work. + </note> + +<p> + The other exception is that the fragment shader requires a <code>vec4</code> color output variable, since the fragment shaders needs to generate a final output color. If you fail to specify an output color in your fragment shader, the color buffer output for those fragments will be undefined (which usually means OpenGL will render them either black or white). + </p> + +<p> + So if we want to send data from one shader to the other we'd have to declare an output in the sending shader and a similar input in the receiving shader. When the types and the names are equal on both sides OpenGL will link those variables together and then it is possible to send data between shaders (this is done when linking a program object). To show you how this works in practice we're going to alter the shaders from the previous chapter to let the vertex shader decide the color for the fragment shader. + </p> + +<strong>Vertex shader</strong> +<pre><code> +#version 330 core +layout (location = 0) in vec3 aPos; // the position variable has attribute position 0 + +out vec4 vertexColor; // specify a color output to the fragment shader + +void main() +{ + gl_Position = vec4(aPos, 1.0); // see how we directly give a vec3 to vec4's constructor + vertexColor = vec4(0.5, 0.0, 0.0, 1.0); // set the output variable to a dark-red color +} +</code></pre> + +<strong>Fragment shader</strong> +<pre><code> +#version 330 core +out vec4 FragColor; + +in vec4 vertexColor; // the input variable from the vertex shader (same name and same type) + +void main() +{ + FragColor = vertexColor; +} +</code></pre> + +<p> + You can see we declared a <var>vertexColor</var> variable as a <code>vec4</code> output that we set in the vertex shader and we declare a similar <var>vertexColor</var> input in the fragment shader. Since they both have the same type and name, the <var>vertexColor</var> in the fragment shader is linked to the <var>vertexColor</var> in the vertex shader. Because we set the color to a dark-red color in the vertex shader, the resulting fragments should be dark-red as well. The following image shows the output: + </p> + + <img src="/img/getting-started/shaders.png" class="clean"/> + +<p> + There we go! We just managed to send a value from the vertex shader to the fragment shader. Let's spice it up a bit and see if we can send a color from our application to the fragment shader! + </p> + + <h2>Uniforms</h2> + <p> + <def>Uniforms</def> are another way to pass data from our application on the CPU to the shaders on the GPU. Uniforms are however slightly different compared to vertex attributes. First of all, uniforms are <def>global</def>. Global, meaning that a uniform variable is unique per shader program object, and can be accessed from any shader at any stage in the shader program. Second, whatever you set the uniform value to, uniforms will keep their values until they're either reset or updated. + </p> + + <p> + To declare a uniform in GLSL we simply add the <code>uniform</code> keyword to a shader with a type and a name. From that point on we can use the newly declared uniform in the shader. Let's see if this time we can set the color of the triangle via a uniform: + </p> + +<pre><code> +#version 330 core +out vec4 FragColor; + +uniform vec4 ourColor; // we set this variable in the OpenGL code. + +void main() +{ + FragColor = ourColor; +} +</code></pre> + + <p> + We declared a uniform <code>vec4</code> <var>ourColor</var> in the fragment shader and set the fragment's output color to the content of this uniform value. Since uniforms are global variables, we can define them in any shader stage we'd like so no need to go through the vertex shader again to get something to the fragment shader. We're not using this uniform in the vertex shader so there's no need to define it there. + </p> + + <warning> + If you declare a uniform that isn't used anywhere in your GLSL code the compiler will silently remove the variable from the compiled version which is the cause for several frustrating errors; keep this in mind! + </warning> + + <p> + The uniform is currently empty; we haven't added any data to the uniform yet so let's try that. We first need to find the index/location of the uniform attribute in our shader. Once we have the index/location of the uniform, we can update its values. Instead of passing a single color to the fragment shader, let's spice things up by gradually changing color over time: + </p> + +<pre><code> +float timeValue = <function id='47'>glfwGetTime</function>(); +float greenValue = (sin(timeValue) / 2.0f) + 0.5f; +int vertexColorLocation = <function id='45'>glGetUniformLocation</function>(shaderProgram, "ourColor"); +<function id='28'>glUseProgram</function>(shaderProgram); +<function id='44'>glUniform</function>4f(vertexColorLocation, 0.0f, greenValue, 0.0f, 1.0f); +</code></pre> + + <p> + First, we retrieve the running time in seconds via <fun><function id='47'>glfwGetTime</function>()</fun>. Then we vary the color in the range of <code>0.0</code> - <code>1.0</code> by using the <fun>sin</fun> function and store the result in <var>greenValue</var>. + </p> + + <p> + Then we query for the location of the <var>ourColor</var> uniform using <fun><function id='45'>glGetUniformLocation</function></fun>. We supply the shader program and the name of the uniform (that we want to retrieve the location from) to the query function. If <fun><function id='45'>glGetUniformLocation</function></fun> returns <code>-1</code>, it could not find the location. Lastly we can set the uniform value using the <fun><function id='44'>glUniform</function>4f</fun> function. Note that finding the uniform location does not require you to use the shader program first, but updating a uniform <strong>does</strong> require you to first use the program (by calling <fun><function id='28'>glUseProgram</function></fun>), because it sets the uniform on the currently active shader program. + </p> + +<note> +<p> + Because OpenGL is in its core a C library it does not have native support for function overloading, so wherever a function can be called with different types OpenGL defines new functions for each type required; <fun><function id='44'>glUniform</function></fun> is a perfect example of this. The function requires a specific postfix for the type of the uniform you want to set. A few of the possible postfixes are: + <ul> + <li><code>f</code>: the function expects a <code>float</code> as its value.</li> + <li><code>i</code>: the function expects an <code>int</code> as its value.</li> + <li><code>ui</code>: the function expects an <code>unsigned int</code> as its value.</li> + <li><code>3f</code>: the function expects 3 <code>float</code>s as its value.</li> + <li><code>fv</code>: the function expects a <code>float</code> vector/array as its value.</li> + </ul> + Whenever you want to configure an option of OpenGL simply pick the overloaded function that corresponds with your type. In our case we want to set 4 floats of the uniform individually so we pass our data via <fun><function id='44'>glUniform</function>4f</fun> (note that we also could've used the <code>fv</code> version). +</p> +</note> + + <p> + Now that we know how to set the values of uniform variables, we can use them for rendering. If we want the color to gradually change, we want to update this uniform every frame, otherwise the triangle would maintain a single solid color if we only set it once. So we calculate the <var>greenValue</var> and update the uniform each render iteration: + </p> + +<pre><code> +while(!<function id='14'>glfwWindowShouldClose</function>(window)) +{ + // input + processInput(window); + + // render + // clear the colorbuffer + <function id='13'><function id='10'>glClear</function>Color</function>(0.2f, 0.3f, 0.3f, 1.0f); + <function id='10'>glClear</function>(GL_COLOR_BUFFER_BIT); + + // be sure to activate the shader + <function id='28'>glUseProgram</function>(shaderProgram); + + // update the uniform color + float timeValue = <function id='47'>glfwGetTime</function>(); + float greenValue = sin(timeValue) / 2.0f + 0.5f; + int vertexColorLocation = <function id='45'>glGetUniformLocation</function>(shaderProgram, "ourColor"); + <function id='44'>glUniform</function>4f(vertexColorLocation, 0.0f, greenValue, 0.0f, 1.0f); + + // now render the triangle + <function id='27'>glBindVertexArray</function>(VAO); + <function id='1'>glDrawArrays</function>(GL_TRIANGLES, 0, 3); + + // swap buffers and poll IO events + <function id='24'>glfwSwapBuffers</function>(window); + <function id='23'>glfwPollEvents</function>(); +} +</code></pre> + + <p> + The code is a relatively straightforward adaptation of the previous code. This time, we update a uniform value each frame before drawing the triangle. If you update the uniform correctly you should see the color of your triangle gradually change from green to black and back to green. + </p> + +<div class="video paused" onclick="ClickVideo(this)"> + <video width="600" height="450" loop> + <source src="/video/getting-started/shaders.mp4" type="video/mp4" /> + <img src="/img/getting-started/shaders2.png" class="clean"/> + </video> +</div> + + +<p> + Check out the source code <a href="/code_viewer_gh.php?code=src/1.getting_started/3.1.shaders_uniform/shaders_uniform.cpp" target="_blank">here</a> if you're stuck. + </p> + +<p> + As you can see, uniforms are a useful tool for setting attributes that may change every frame, or for interchanging data between your application and your shaders, but what if we want to set a color for each vertex? In that case we'd have to declare as many uniforms as we have vertices. A better solution would be to include more data in the vertex attributes which is what we're going to do now. + </p> + + <h2>More attributes!</h2> + <p> + We saw in the previous chapter how we can fill a VBO, configure vertex attribute pointers and store it all in a VAO. This time, we also want to add color data to the vertex data. We're going to add color data as 3 <code>float</code>s to the <var>vertices</var> array. We assign a red, green and blue color to each of the corners of our triangle respectively: + </p> + +<pre><code> +float vertices[] = { + // positions // colors + 0.5f, -0.5f, 0.0f, 1.0f, 0.0f, 0.0f, // bottom right + -0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, // bottom left + 0.0f, 0.5f, 0.0f, 0.0f, 0.0f, 1.0f // top +}; +</code></pre> + +<p> + Since we now have more data to send to the vertex shader, it is necessary to adjust the vertex shader to also receive our color value as a vertex attribute input. Note that we set the location of the <var>aColor</var> attribute to 1 with the layout specifier: +</p> + +<pre><code> +#version 330 core +layout (location = 0) in vec3 aPos; // the position variable has attribute position 0 +layout (location = 1) in vec3 aColor; // the color variable has attribute position 1 + +out vec3 ourColor; // output a color to the fragment shader + +void main() +{ + gl_Position = vec4(aPos, 1.0); + ourColor = aColor; // set ourColor to the input color we got from the vertex data +} +</code></pre> + + <p> + Since we no longer use a uniform for the fragment's color, but now use the <var>ourColor</var> output variable we'll have to change the fragment shader as well: + </p> + +<pre><code> +#version 330 core +out vec4 FragColor; +in vec3 ourColor; + +void main() +{ + FragColor = vec4(ourColor, 1.0); +} +</code></pre> + +<p> + Because we added another vertex attribute and updated the VBO's memory we have to re-configure the vertex attribute pointers. The updated data in the VBO's memory now looks a bit like this: + </p> + + <img src="/img/getting-started/vertex_attribute_pointer_interleaved.png" class="clean" alt="Interleaved data of position and color within VBO to be configured wtih <function id='30'>glVertexAttribPointer</function>"/> + +<p> + Knowing the current layout we can update the vertex format with <fun><function id='30'>glVertexAttribPointer</function></fun>: +</p> + +<pre><code> +// position attribute +<function id='30'>glVertexAttribPointer</function>(0, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(float), (void*)0); +<function id='29'><function id='60'>glEnable</function>VertexAttribArray</function>(0); +// color attribute +<function id='30'>glVertexAttribPointer</function>(1, 3, GL_FLOAT, GL_FALSE, 6 * sizeof(float), (void*)(3* sizeof(float))); +<function id='29'><function id='60'>glEnable</function>VertexAttribArray</function>(1); +</code></pre> + +<p> + The first few arguments of <fun><function id='30'>glVertexAttribPointer</function></fun> are relatively straightforward. This time we are configuring the vertex attribute on attribute location <code>1</code>. The color values have a size of <code>3</code> <code>float</code>s and we do not normalize the values. + </p> + + <p> + Since we now have two vertex attributes we have to re-calculate the <em>stride</em> value. To get the next attribute value (e.g. the next <code>x</code> component of the position vector) in the data array we have to move <code>6</code> <code>float</code>s to the right, three for the position values and three for the color values. This gives us a stride value of 6 times the size of a <code>float</code> in bytes (= <code>24</code> bytes). <br/> + Also, this time we have to specify an offset. For each vertex, the position vertex attribute is first so we declare an offset of <code>0</code>. The color attribute starts after the position data so the offset is <code>3 * sizeof(float)</code> in bytes (= <code>12</code> bytes). +</p> + +<p> + Running the application should result in the following image: +</p> + + <img src="/img/getting-started/shaders3.png" class="clean"/> + + <p> + Check out the source code <a href="/code_viewer_gh.php?code=src/1.getting_started/3.2.shaders_interpolation/shaders_interpolation.cpp" target="_blank">here</a> if you're stuck. + </p> + + <p> + The image may not be exactly what you would expect, since we only supplied 3 colors, not the huge color palette we're seeing right now. This is all the result of something called <def>fragment interpolation</def> in the fragment shader. When rendering a triangle the rasterization stage usually results in a lot more fragments than vertices originally specified. The rasterizer then determines the positions of each of those fragments based on where they reside on the triangle shape.<br/> + Based on these positions, it <def>interpolates</def> all the fragment shader's input variables. Say for example we have a line where the upper point has a green color and the lower point a blue color. If the fragment shader is run at a fragment that resides around a position at <code>70%</code> of the line, its resulting color input attribute would then be a linear combination of green and blue; to be more precise: <code>30%</code> blue and <code>70%</code> green. + </p> + + <p> + This is exactly what happened at the triangle. We have 3 vertices and thus 3 colors, and judging from the triangle's pixels it probably contains around 50000 fragments, where the fragment shader interpolated the colors among those pixels. If you take a good look at the colors you'll see it all makes sense: red to blue first gets to purple and then to blue. Fragment interpolation is applied to all the fragment shader's input attributes. + </p> + +<h1>Our own shader class</h1> + <p> + Writing, compiling and managing shaders can be quite cumbersome. As a final touch on the shader subject we're going to make our life a bit easier by building a shader class that reads shaders from disk, compiles and links them, checks for errors and is easy to use. This also gives you a bit of an idea how we can encapsulate some of the knowledge we learned so far into useful abstract objects. + </p> + + <p> + We will create the shader class entirely in a header file, mainly for learning purposes and portability. Let's start by adding the required includes and by defining the class structure: + </p> + +<pre><code> +#ifndef SHADER_H +#define SHADER_H + +#include &lt;glad/glad.h&gt; // include glad to get all the required OpenGL headers + +#include &lt;string&gt; +#include &lt;fstream&gt; +#include &lt;sstream&gt; +#include &lt;iostream&gt; + + +class Shader +{ +public: + // the program ID + unsigned int ID; + + // constructor reads and builds the shader + Shader(const char* vertexPath, const char* fragmentPath); + // use/activate the shader + void use(); + // utility uniform functions + void setBool(const std::string &name, bool value) const; + void setInt(const std::string &name, int value) const; + void setFloat(const std::string &name, float value) const; +}; + +#endif +</code></pre> + + <note> + We used several <def>preprocessor directives</def> at the top of the header file. Using these little lines of code informs your compiler to only include and compile this header file if it hasn't been included yet, even if multiple files include the shader header. This prevents linking conflicts. + </note> + + <p> + The shader class holds the ID of the shader program. Its constructor requires the file paths of the source code of the vertex and fragment shader respectively that we can store on disk as simple text files. To add a little extra we also add several utility functions to ease our lives a little: <fun>use</fun> activates the shader program, and all <fun>set...</fun> functions query a uniform location and set its value. + </p> + +<h2>Reading from file</h2> +<p> + We're using C++ filestreams to read the content from the file into several <code>string</code> objects: +</p> + +<pre><code> +Shader(const char* vertexPath, const char* fragmentPath) +{ + // 1. retrieve the vertex/fragment source code from filePath + std::string vertexCode; + std::string fragmentCode; + std::ifstream vShaderFile; + std::ifstream fShaderFile; + // ensure ifstream objects can throw exceptions: + vShaderFile.exceptions (std::ifstream::failbit | std::ifstream::badbit); + fShaderFile.exceptions (std::ifstream::failbit | std::ifstream::badbit); + try + { + // open files + vShaderFile.open(vertexPath); + fShaderFile.open(fragmentPath); + std::stringstream vShaderStream, fShaderStream; + // read file's buffer contents into streams + vShaderStream &lt;&lt; vShaderFile.rdbuf(); + fShaderStream &lt;&lt; fShaderFile.rdbuf(); + // close file handlers + vShaderFile.close(); + fShaderFile.close(); + // convert stream into string + vertexCode = vShaderStream.str(); + fragmentCode = fShaderStream.str(); + } + catch(std::ifstream::failure e) + { + std::cout &lt;&lt; "ERROR::SHADER::FILE_NOT_SUCCESFULLY_READ" &lt;&lt; std::endl; + } + const char* vShaderCode = vertexCode.c_str(); + const char* fShaderCode = fragmentCode.c_str(); + [...] +</code></pre> + + <p> + Next we need to compile and link the shaders. Note that we're also reviewing if compilation/linking failed and if so, print the compile-time errors. This is extremely useful when debugging (you are going to need those error logs eventually): + </p> + +<pre><code> +// 2. compile shaders +unsigned int vertex, fragment; +int success; +char infoLog[512]; + +// vertex Shader +vertex = <function id='37'>glCreateShader</function>(GL_VERTEX_SHADER); +<function id='42'>glShaderSource</function>(vertex, 1, &amp;vShaderCode, NULL); +<function id='38'>glCompileShader</function>(vertex); +// print compile errors if any +<function id='39'>glGetShaderiv</function>(vertex, GL_COMPILE_STATUS, &amp;success); +if(!success) +{ + <function id='40'>glGetShaderInfoLog</function>(vertex, 512, NULL, infoLog); + std::cout &lt;&lt; "ERROR::SHADER::VERTEX::COMPILATION_FAILED\n" &lt;&lt; infoLog &lt;&lt; std::endl; +}; + +// similiar for Fragment Shader +[...] + +// shader Program +ID = <function id='36'>glCreateProgram</function>(); +<function id='34'>glAttachShader</function>(ID, vertex); +<function id='34'>glAttachShader</function>(ID, fragment); +<function id='35'>glLinkProgram</function>(ID); +// print linking errors if any +<function id='41'>glGetProgramiv</function>(ID, GL_LINK_STATUS, &amp;success); +if(!success) +{ + glGetProgramInfoLog(ID, 512, NULL, infoLog); + std::cout &lt;&lt; "ERROR::SHADER::PROGRAM::LINKING_FAILED\n" &lt;&lt; infoLog &lt;&lt; std::endl; +} + +// delete the shaders as they're linked into our program now and no longer necessary +<function id='46'>glDeleteShader</function>(vertex); +<function id='46'>glDeleteShader</function>(fragment); +</code></pre> + + <p> + The <fun>use</fun> function is straightforward: + </p> + +<pre><code> +void use() +{ + <function id='28'>glUseProgram</function>(ID); +} +</code></pre> + +<p> + Similarly for any of the uniform setter functions: +</p> + +<pre><code> +void setBool(const std::string &name, bool value) const +{ + <function id='44'>glUniform</function>1i(<function id='45'>glGetUniformLocation</function>(ID, name.c_str()), (int)value); +} +void setInt(const std::string &name, int value) const +{ + <function id='44'>glUniform</function>1i(<function id='45'>glGetUniformLocation</function>(ID, name.c_str()), value); +} +void setFloat(const std::string &name, float value) const +{ + <function id='44'>glUniform</function>1f(<function id='45'>glGetUniformLocation</function>(ID, name.c_str()), value); +} +</code></pre> + + <p> + And there we have it, a completed <a href="/code_viewer_gh.php?code=includes/learnopengl/shader_s.h" target="_blank">shader class</a>. Using the shader class is fairly easy; we create a shader object once and from that point on simply start using it: + </p> + +<pre><code> +Shader ourShader("path/to/shaders/shader.vs", "path/to/shaders/shader.fs"); +[...] +while(...) +{ + ourShader.use(); + ourShader.setFloat("someUniform", 1.0f); + DrawStuff(); +} +</code></pre> + +<p> + Here we stored the vertex and fragment shader source code in two files called <code>shader.vs</code> and <code>shader.fs</code>. You're free to name your shader files however you like; I personally find the extensions <code>.vs</code> and <code>.fs</code> quite intuitive. +</p> + + <p> + You can find the source code <a href="/code_viewer_gh.php?code=src/1.getting_started/3.3.shaders_class/shaders_class.cpp" target="_blank">here</a> using our newly created <a href="/code_viewer_gh.php?code=includes/learnopengl/shader_s.h" target="_blank">shader class</a>. Note that you can click the shader file paths to find the shaders' source code. + </p> + +<h1>Exercises</h1> + <ol> + <li>Adjust the vertex shader so that the triangle is upside down: <a href="/code_viewer_gh.php?code=src/1.getting_started/3.4.shaders_exercise1/shaders_exercise1.cpp" target="_blank">solution</a>.</li> + <li>Specify a horizontal offset via a uniform and move the triangle to the right side of the screen in the vertex shader using this offset value: <a href="/code_viewer_gh.php?code=src/1.getting_started/3.5.shaders_exercise2/shaders_exercise2.cpp" target="_blank">solution</a>.</li> + <li>Output the vertex position to the fragment shader using the <code>out</code> keyword and set the fragment's color equal to this vertex position (see how even the vertex position values are interpolated across the triangle). Once you managed to do this; try to answer the following question: why is the bottom-left side of our triangle black?: <a href="/code_viewer_gh.php?code=src/1.getting_started/3.6.shaders_exercise3/shaders_exercise3.cpp" target="_blank">solution</a>.</li> + </ol> + + + </div> + + <div id="hover"> + HI + </div> + <!-- 728x90/320x50 sticky footer --> +<div id="waldo-tag-6196"></div> + + <div id="disqus_thread"></div> + + + + +</div> <!-- container div --> + + +</div> <!-- super container div --> +</body> +</html> +\ No newline at end of file diff --git a/Getting-started/Textures.html b/Getting-started/Textures.html @@ -0,0 +1,802 @@ + + +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8"/> + <title>LearnOpenGL - Textures</title> <!--<title>Learn OpenGL, extensive tutorial resource for learning Modern OpenGL</title>--> + <link rel="shortcut icon" type="image/ico" href="/favicon.ico" /> + <meta name="description" content="Learn OpenGL . com provides good and clear modern 3.3+ OpenGL tutorials with clear examples. A great resource to learn modern OpenGL aimed at beginners."> + <meta name="fragment" content="!"> + <script> + (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ + (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), + m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) + })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); + + ga('create', 'UA-51879160-1', 'learnopengl.com'); + ga('send', 'pageview'); + + </script> + <!--<script async src="//pagead2.googlesyndication.com/pagead/js/adsbygoogle.js"></script>--> + <script> + (adsbygoogle = window.adsbygoogle || []).push({ + google_ad_client: "ca-pub-7855791439695850", + enable_page_level_ads: true + }); + </script> + <script async='async' src='https://www.googletagservices.com/tag/js/gpt.js'></script> + <script> + var googletag = googletag || {}; + googletag.cmd = googletag.cmd || []; + </script> + <script> + googletag.cmd.push(function() { + googletag.defineSlot('/8491498/learnopengl_video', [300, 225], 'div-gpt-ad-1540574378241-0').addService(googletag.pubads()); + googletag.pubads().enableSingleRequest(); + googletag.pubads().collapseEmptyDivs(); + googletag.enableServices(); + }); + </script> + <script type="text/javascript" src="https://d31vxm9ubutrmw.cloudfront.net/static/js/1681.js"></script> + <script src="/js/jquery-1.11.0.min.js"></script> + <script src="/js/hoverintent.js"></script> + <link rel="stylesheet" type="text/css" href="/layout.css"> + <link rel="stylesheet" type="text/css" href="/js/styles/obsidian.css"> + <script src="/js/highlight.pack.js"></script> + <script src="/js/functions.js"></script> + <script type="text/javascript" src="/js/mathjax/MathJax.js?config=TeX-AMS_HTML"></script> + <script> + // Has to be loaded last due to content bug + MathJax.Hub.Config({ + TeX: { equationNumbers: { autoNumber: "AMS" } } + }); + </script> + <script>hljs.initHighlightingOnLoad();</script> + <script> + $(document).ready(function() { + // check if user visited from the old # based urls, re-direct to ?p= form + if(window.location.hash) + { + var name = window.location.hash.substring(2); + // name = name.replace(/-/g," "); + var index = name.indexOf('#'); // Remove any hash fragments from the url (Disquss adds hash fragments for comments, but results in 404 pages) + if(index >= 0) + name = name.substring(0, index); + + window.location.href = "https://learnopengl.com/" + name; + } else { + // Check if data has been succesfully loaded, if so: change title bar as ajax hash fragment + var title = $('#content-url').text(); + + // Refresh syntax highlighting + // $('pre').each(function(i, e) {hljs.highlightBlock(e)}); + + // Reset DISQUS + // if(title == '/dev/') + // title = ''; + // alert('hoi'); + + // Adjust ads for correct bottom positioning based on content size + window.setTimeout(function() { + AdPositioning(); + }, 3000); + + + // set API resets after time-out (once content is properly loaded) + window.setTimeout(function() { + MathJax.Hub.Queue(["Typeset",MathJax.Hub]); + MathJax.Hub.Queue(["resetEquationNumbers", MathJax.InputJax.TeX]); + + var page_url = title == "" ? "http://www.learnopengl.com/" : "http://www.learnopengl.com/" + title; + if(typeof DISQUS !== 'undefined') { + DISQUS.reset({ + reload: true, + config: function () { + this.page.identifier = title; + this.page.url = page_url; + } + }); + $('#disqus_thread').show(); + } + // Refresh callbacks on <function> tags + SetFunctionTagCallbacks(); + }, 1000); + + // Zet ook de juiste button op 'selected' + $('#nav li span, #nav li a').removeClass('selected'); + if(title != '') + { + $('#nav li[id=\'' + title + '\']').children('span, a').addClass('selected'); + } + // En open menu waar nodig + var parents = $('#nav span.selected, #nav a.selected').parents('li').children('span.closed, a.closed'); + var index = 0; + for(index = parents.length - 1; index >= 0; index--) + { + + var id = $(parents[index]).attr("id").replace( /^\D+/g, ''); + MenuClick(id, false); + } + + } + }); + // var initialized = false; + // window.onpopstate = function() { + // if(initialized) + // LoadPage(); + // else + // initialized = true; + // }; + + // Set up DISQUS + // $(document).ready(function() { + var disqus_shortname = 'learnopengl'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//' + disqus_shortname + '.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + // }); + </script> +</head> +<body> +<a href="https://learnopengl.com"> +<div id="header"> +</div> +</a> + +<div id="supercontainer"> + <!-- 728x90/320x50 --> + <div id="header_ad"> + <div id="waldo-tag-6194"></div> + </div> + <div id="rightad_container"> + <div id="rightad"> + <!-- /8491498/learnopengl_video --> + <!--<div id='div-gpt-ad-1540574378241-0' style='height:225px; width:300px;'> + <script> + googletag.cmd.push(function() { googletag.display('div-gpt-ad-1540574378241-0'); }); + </script> + </div> + <br/>--> + + <div id="waldo-tag-1715"></div> + </div> + + <div id="admessage"> + If you're running AdBlock, please consider whitelisting this site if you'd like to support LearnOpenGL; and no worries, I won't be mad if you don't :) + <!--<br/><br/> + Also, check out this little local multiplayer-only game I've made: <a href="https://store.steampowered.com/app/983590/Tank_Blazers/" target="_blank">Tank Blazers</a>. + <br/> + <a href="https://store.steampowered.com/app/983590/Tank_Blazers" target="_blank"><img src="/img/tank_blazers.jpg" style="width:278px; margin-top: 9px; margin-left: -3px;"/></a>--> + </div> + + <div id="rightonethirdad"> + <div id="waldo-tag-2246"></div> + </div> + + <div id="rightbottomad"> + <div id="waldo-tag-2247"></div> + </div> + </div> + <div id="container"> + <div id="loading"></div> +<script> +$(document).ready(function() { +$('#menu-item4').mousedown(function() { MenuClick(4, true) }); +$('#menu-item48').mousedown(function() { MenuClick(48, true) }); +$('#menu-item56').mousedown(function() { MenuClick(56, true) }); +$('#menu-item63').mousedown(function() { MenuClick(63, true) }); +$('#menu-item100').mousedown(function() { MenuClick(100, true) }); +$('#menu-item102').mousedown(function() { MenuClick(102, true) }); +$('#menu-item113').mousedown(function() { MenuClick(113, true) }); +$('#menu-item116').mousedown(function() { MenuClick(116, true) }); +$('#menu-item78').mousedown(function() { MenuClick(78, true) }); +$('#menu-item81').mousedown(function() { MenuClick(81, true) }); +$('#menu-item85').mousedown(function() { MenuClick(85, true) }); +$('#menu-item125').mousedown(function() { MenuClick(125, true) }); +$('#menu-item128').mousedown(function() { MenuClick(128, true) }); +$('#menu-item129').mousedown(function() { MenuClick(129, true) }); +$('#menu-item133').mousedown(function() { MenuClick(133, true) }); +$('#menu-item134').mousedown(function() { MenuClick(134, true) }); +}); +</script> + <div id="nav"> + <div id="social"> + <a href="https://github.com/JoeyDeVries/LearnOpenGL" target="_blank"> + <img src="/img/github.png" class="social_ico"> + </a> + <!-- <a href="https://www.facebook.com/Learnopengl-2199631333595544/" target="_blank"> + <img src="/img/facebook.png" class="social_ico"> + </a>--> + <a href="https://twitter.com/JoeyDeVriez" target="_blank"> + <img src="/img/twitter.png" class="social_ico"> + </a> + + </div> + <img src='img/nav-button_bottom-arrow.png' style='display: none'><ol><li id='Introduction'><a id="menu-item1" href="https://learnopengl.com/Introduction">Introduction </a></li><li id='Getting-started'><span id="menu-item4" class="closed">Getting started </span><ol id="menu-items-of4" style="display:none;"><li id='Getting-started/OpenGL'><a id="menu-item49" href="https://learnopengl.com/Getting-started/OpenGL">OpenGL </a></li><li id='Getting-started/Creating-a-window'><a id="menu-item5" href="https://learnopengl.com/Getting-started/Creating-a-window">Creating a window </a></li><li id='Getting-started/Hello-Window'><a id="menu-item6" href="https://learnopengl.com/Getting-started/Hello-Window">Hello Window </a></li><li id='Getting-started/Hello-Triangle'><a id="menu-item38" href="https://learnopengl.com/Getting-started/Hello-Triangle">Hello Triangle </a></li><li id='Getting-started/Shaders'><a id="menu-item39" href="https://learnopengl.com/Getting-started/Shaders">Shaders </a></li><li id='Getting-started/Textures'><a id="menu-item40" href="https://learnopengl.com/Getting-started/Textures">Textures </a></li><li id='Getting-started/Transformations'><a id="menu-item43" href="https://learnopengl.com/Getting-started/Transformations">Transformations </a></li><li id='Getting-started/Coordinate-Systems'><a id="menu-item44" href="https://learnopengl.com/Getting-started/Coordinate-Systems">Coordinate Systems </a></li><li id='Getting-started/Camera'><a id="menu-item47" href="https://learnopengl.com/Getting-started/Camera">Camera </a></li><li id='Getting-started/Review'><a id="menu-item50" href="https://learnopengl.com/Getting-started/Review">Review </a></li></ol></li><li id='Lighting'><span id="menu-item48" class="closed">Lighting </span><ol id="menu-items-of48" style="display:none;"><li id='Lighting/Colors'><a id="menu-item51" href="https://learnopengl.com/Lighting/Colors">Colors </a></li><li id='Lighting/Basic-Lighting'><a id="menu-item52" href="https://learnopengl.com/Lighting/Basic-Lighting">Basic Lighting </a></li><li id='Lighting/Materials'><a id="menu-item53" href="https://learnopengl.com/Lighting/Materials">Materials </a></li><li id='Lighting/Lighting-maps'><a id="menu-item54" href="https://learnopengl.com/Lighting/Lighting-maps">Lighting maps </a></li><li id='Lighting/Light-casters'><a id="menu-item55" href="https://learnopengl.com/Lighting/Light-casters">Light casters </a></li><li id='Lighting/Multiple-lights'><a id="menu-item58" href="https://learnopengl.com/Lighting/Multiple-lights">Multiple lights </a></li><li id='Lighting/Review'><a id="menu-item57" href="https://learnopengl.com/Lighting/Review">Review </a></li></ol></li><li id='Model-Loading'><span id="menu-item56" class="closed">Model Loading </span><ol id="menu-items-of56" style="display:none;"><li id='Model-Loading/Assimp'><a id="menu-item59" href="https://learnopengl.com/Model-Loading/Assimp">Assimp </a></li><li id='Model-Loading/Mesh'><a id="menu-item60" href="https://learnopengl.com/Model-Loading/Mesh">Mesh </a></li><li id='Model-Loading/Model'><a id="menu-item61" href="https://learnopengl.com/Model-Loading/Model">Model </a></li></ol></li><li id='Advanced-OpenGL'><span id="menu-item63" class="closed">Advanced OpenGL </span><ol id="menu-items-of63" style="display:none;"><li id='Advanced-OpenGL/Depth-testing'><a id="menu-item72" href="https://learnopengl.com/Advanced-OpenGL/Depth-testing">Depth testing </a></li><li id='Advanced-OpenGL/Stencil-testing'><a id="menu-item73" href="https://learnopengl.com/Advanced-OpenGL/Stencil-testing">Stencil testing </a></li><li id='Advanced-OpenGL/Blending'><a id="menu-item74" href="https://learnopengl.com/Advanced-OpenGL/Blending">Blending </a></li><li id='Advanced-OpenGL/Face-culling'><a id="menu-item77" href="https://learnopengl.com/Advanced-OpenGL/Face-culling">Face culling </a></li><li id='Advanced-OpenGL/Framebuffers'><a id="menu-item65" href="https://learnopengl.com/Advanced-OpenGL/Framebuffers">Framebuffers </a></li><li id='Advanced-OpenGL/Cubemaps'><a id="menu-item66" href="https://learnopengl.com/Advanced-OpenGL/Cubemaps">Cubemaps </a></li><li id='Advanced-OpenGL/Advanced-Data'><a id="menu-item69" href="https://learnopengl.com/Advanced-OpenGL/Advanced-Data">Advanced Data </a></li><li id='Advanced-OpenGL/Advanced-GLSL'><a id="menu-item67" href="https://learnopengl.com/Advanced-OpenGL/Advanced-GLSL">Advanced GLSL </a></li><li id='Advanced-OpenGL/Geometry-Shader'><a id="menu-item68" href="https://learnopengl.com/Advanced-OpenGL/Geometry-Shader">Geometry Shader </a></li><li id='Advanced-OpenGL/Instancing'><a id="menu-item70" href="https://learnopengl.com/Advanced-OpenGL/Instancing">Instancing </a></li><li id='Advanced-OpenGL/Anti-Aliasing'><a id="menu-item75" href="https://learnopengl.com/Advanced-OpenGL/Anti-Aliasing">Anti Aliasing </a></li></ol></li><li id='Advanced-Lighting'><span id="menu-item100" class="closed">Advanced Lighting </span><ol id="menu-items-of100" style="display:none;"><li id='Advanced-Lighting/Advanced-Lighting'><a id="menu-item101" href="https://learnopengl.com/Advanced-Lighting/Advanced-Lighting">Advanced Lighting </a></li><li id='Advanced-Lighting/Gamma-Correction'><a id="menu-item110" href="https://learnopengl.com/Advanced-Lighting/Gamma-Correction">Gamma Correction </a></li><li id='Advanced-Lighting/Shadows'><span id="menu-item102" class="closed">Shadows </span><ol id="menu-items-of102" style="display:none;"><li id='Advanced-Lighting/Shadows/Shadow-Mapping'><a id="menu-item103" href="https://learnopengl.com/Advanced-Lighting/Shadows/Shadow-Mapping">Shadow Mapping </a></li><li id='Advanced-Lighting/Shadows/Point-Shadows'><a id="menu-item104" href="https://learnopengl.com/Advanced-Lighting/Shadows/Point-Shadows">Point Shadows </a></li></ol></li><li id='Advanced-Lighting/Normal-Mapping'><a id="menu-item106" href="https://learnopengl.com/Advanced-Lighting/Normal-Mapping">Normal Mapping </a></li><li id='Advanced-Lighting/Parallax-Mapping'><a id="menu-item107" href="https://learnopengl.com/Advanced-Lighting/Parallax-Mapping">Parallax Mapping </a></li><li id='Advanced-Lighting/HDR'><a id="menu-item111" href="https://learnopengl.com/Advanced-Lighting/HDR">HDR </a></li><li id='Advanced-Lighting/Bloom'><a id="menu-item112" href="https://learnopengl.com/Advanced-Lighting/Bloom">Bloom </a></li><li id='Advanced-Lighting/Deferred-Shading'><a id="menu-item108" href="https://learnopengl.com/Advanced-Lighting/Deferred-Shading">Deferred Shading </a></li><li id='Advanced-Lighting/SSAO'><a id="menu-item109" href="https://learnopengl.com/Advanced-Lighting/SSAO">SSAO </a></li></ol></li><li id='PBR'><span id="menu-item113" class="closed">PBR </span><ol id="menu-items-of113" style="display:none;"><li id='PBR/Theory'><a id="menu-item114" href="https://learnopengl.com/PBR/Theory">Theory </a></li><li id='PBR/Lighting'><a id="menu-item115" href="https://learnopengl.com/PBR/Lighting">Lighting </a></li><li id='PBR/IBL'><span id="menu-item116" class="closed">IBL </span><ol id="menu-items-of116" style="display:none;"><li id='PBR/IBL/Diffuse-irradiance'><a id="menu-item117" href="https://learnopengl.com/PBR/IBL/Diffuse-irradiance">Diffuse irradiance </a></li><li id='PBR/IBL/Specular-IBL'><a id="menu-item118" href="https://learnopengl.com/PBR/IBL/Specular-IBL">Specular IBL </a></li></ol></li></ol></li><li id='In-Practice'><span id="menu-item78" class="closed">In Practice </span><ol id="menu-items-of78" style="display:none;"><li id='In-Practice/Debugging'><a id="menu-item79" href="https://learnopengl.com/In-Practice/Debugging">Debugging </a></li><li id='In-Practice/Text-Rendering'><a id="menu-item80" href="https://learnopengl.com/In-Practice/Text-Rendering">Text Rendering </a></li><li id='In-Practice/2D-Game'><span id="menu-item81" class="closed">2D Game </span><ol id="menu-items-of81" style="display:none;"><li id='In-Practice/2D-Game/Breakout'><a id="menu-item82" href="https://learnopengl.com/In-Practice/2D-Game/Breakout">Breakout </a></li><li id='In-Practice/2D-Game/Setting-up'><a id="menu-item88" href="https://learnopengl.com/In-Practice/2D-Game/Setting-up">Setting up </a></li><li id='In-Practice/2D-Game/Rendering-Sprites'><a id="menu-item83" href="https://learnopengl.com/In-Practice/2D-Game/Rendering-Sprites">Rendering Sprites </a></li><li id='In-Practice/2D-Game/Levels'><a id="menu-item84" href="https://learnopengl.com/In-Practice/2D-Game/Levels">Levels </a></li><li id='In-Practice/2D-Game/Collisions'><span id="menu-item85" class="closed">Collisions </span><ol id="menu-items-of85" style="display:none;"><li id='In-Practice/2D-Game/Collisions/Ball'><a id="menu-item95" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Ball">Ball </a></li><li id='In-Practice/2D-Game/Collisions/Collision-detection'><a id="menu-item96" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Collision-detection">Collision detection </a></li><li id='In-Practice/2D-Game/Collisions/Collision-resolution'><a id="menu-item97" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Collision-resolution">Collision resolution </a></li></ol></li><li id='In-Practice/2D-Game/Particles'><a id="menu-item89" href="https://learnopengl.com/In-Practice/2D-Game/Particles">Particles </a></li><li id='In-Practice/2D-Game/Postprocessing'><a id="menu-item90" href="https://learnopengl.com/In-Practice/2D-Game/Postprocessing">Postprocessing </a></li><li id='In-Practice/2D-Game/Powerups'><a id="menu-item91" href="https://learnopengl.com/In-Practice/2D-Game/Powerups">Powerups </a></li><li id='In-Practice/2D-Game/Audio'><a id="menu-item94" href="https://learnopengl.com/In-Practice/2D-Game/Audio">Audio </a></li><li id='In-Practice/2D-Game/Render-text'><a id="menu-item92" href="https://learnopengl.com/In-Practice/2D-Game/Render-text">Render text </a></li><li id='In-Practice/2D-Game/Final-thoughts'><a id="menu-item93" href="https://learnopengl.com/In-Practice/2D-Game/Final-thoughts">Final thoughts </a></li></ol></li></ol></li><li id='Guest-Articles'><span id="menu-item125" class="closed">Guest Articles </span><ol id="menu-items-of125" style="display:none;"><li id='Guest-Articles/How-to-publish'><a id="menu-item126" href="https://learnopengl.com/Guest-Articles/How-to-publish">How to publish </a></li><li id='Guest-Articles/2020'><span id="menu-item128" class="closed">2020 </span><ol id="menu-items-of128" style="display:none;"><li id='Guest-Articles/2020/OIT'><span id="menu-item129" class="closed">OIT </span><ol id="menu-items-of129" style="display:none;"><li id='Guest-Articles/2020/OIT/Introduction'><a id="menu-item130" href="https://learnopengl.com/Guest-Articles/2020/OIT/Introduction">Introduction </a></li><li id='Guest-Articles/2020/OIT/Weighted-Blended'><a id="menu-item132" href="https://learnopengl.com/Guest-Articles/2020/OIT/Weighted-Blended">Weighted Blended </a></li></ol></li><li id='Guest-Articles/2020/Skeletal-Animation'><a id="menu-item131" href="https://learnopengl.com/Guest-Articles/2020/Skeletal-Animation">Skeletal Animation </a></li></ol></li><li id='Guest-Articles/2021'><span id="menu-item133" class="closed">2021 </span><ol id="menu-items-of133" style="display:none;"><li id='Guest-Articles/2021/Scene'><span id="menu-item134" class="closed">Scene </span><ol id="menu-items-of134" style="display:none;"><li id='Guest-Articles/2021/Scene/Scene-Graph'><a id="menu-item135" href="https://learnopengl.com/Guest-Articles/2021/Scene/Scene-Graph">Scene Graph </a></li><li id='Guest-Articles/2021/Scene/Frustum-Culling'><a id="menu-item136" href="https://learnopengl.com/Guest-Articles/2021/Scene/Frustum-Culling">Frustum Culling </a></li></ol></li></ol></li></ol></li><li id='Code-repository'><a id="menu-item99" href="https://learnopengl.com/Code-repository">Code repository </a></li><li id='Translations'><a id="menu-item119" href="https://learnopengl.com/Translations">Translations </a></li><li id='About'><a id="menu-item2" href="https://learnopengl.com/About">About </a></li></ol> <div id="menu_book"> + <a href="https://geni.us/learnopengl" target="_blank"><img src="/book/below_menu.png" class="clean"/></a> + </div> + <div id="donate"> + <a href="https://www.paypal.me/learnopengl/" target="_blank"> + <div id="donate_img"></div> + <img style="display: none" src="/img/donate_button_hover.png"/> + <!--<img id="donate_img" src="img/patreon.png"/>--> + </a> + <!--<div id="alipay"> + <img style="width: 150px;" class="clean" src="/img/alipay_logo.png"/> + <img style="width: 150px; margin-top: 5px" src="/img/alipay.png"/> + </div>--> + </div> + <div class="btc"> + <h3>BTC</h3> + <p> + 1CLGKgmBSuYJ1nnvDGAepVTKNNDpUjfpRa + </p> + <img src="/img/btc_qr.png"/> + </div> + <div class="btc"> + <h3>ETH/ERC20</h3> + <p> + 0x1de59bd9e52521a46309474f8372531533bd7c43 + </p> + <img src="/img/erc20_qr.png"/> + </div> + <div id="ad"> + <!--<div id="waldo-tag-1684"></div>--> + </div> + + <div id="lefttwothirdad"> + <div id="waldo-tag-2245"></div> + </div> + </div> + + <div id="content"> + <h1 id="content-title">Textures</h1> +<h1 id="content-url" style='display:none;'>Getting-started/Textures</h1> +<p> + We learned that to add more detail to our objects we can use colors for each vertex to create some interesting images. However, to get a fair bit of realism we'd have to have many vertices so we could specify a lot of colors. This takes up a considerable amount of extra overhead, since each model needs a lot more vertices and for each vertex a color attribute as well. +</p> +<p> + What artists and programmers generally prefer is to use a <def>texture</def>. A texture is a 2D image (even 1D and 3D textures exist) used to add detail to an object; think of a texture as a piece of paper with a nice brick image (for example) on it neatly folded over your 3D house so it looks like your house has a stone exterior. Because we can insert a lot of detail in a single image, we can give the illusion the object is extremely detailed without having to specify extra vertices. +</p> + +<note> + Next to images, textures can also be used to store a large collection of arbitrary data to send to the shaders, but we'll leave that for a different topic. +</note> + +<p> + Below you'll see a texture image of a <a href="/img/textures/wall.jpg" target="_blank">brick wall</a> mapped to the triangle from the previous chapter. +</p> + +<img src="/img/getting-started/textures.png" class="clean"/> + +<p> +In order to map a texture to the triangle we need to tell each vertex of the triangle which part of the texture it corresponds to. Each vertex should thus have a <def>texture coordinate</def> associated with them that specifies what part of the texture image to sample from. Fragment interpolation then does the rest for the other fragments. +</p> + +<p> + Texture coordinates range from <code>0</code> to <code>1</code> in the <code>x</code> and <code>y</code> axis (remember that we use 2D texture images). Retrieving the texture color using texture coordinates is called <def>sampling</def>. Texture coordinates start at <code>(0,0)</code> for the lower left corner of a texture image to <code>(1,1)</code> for the upper right corner of a texture image. The following image shows how we map texture coordinates to the triangle: +</p> + +<img src="/img/getting-started/tex_coords.png"/> + +<p> + We specify 3 texture coordinate points for the triangle. We want the bottom-left side of the triangle to correspond with the bottom-left side of the texture so we use the <code>(0,0)</code> texture coordinate for the triangle's bottom-left vertex. The same applies to the bottom-right side with a <code>(1,0)</code> texture coordinate. The top of the triangle should correspond with the top-center of the texture image so we take <code>(0.5,1.0)</code> as its texture coordinate. We only have to pass 3 texture coordinates to the vertex shader, which then passes those to the fragment shader that neatly interpolates all the texture coordinates for each fragment. +</p> + +<p> + The resulting texture coordinates would then look like this: +</p> + +<pre><code> +float texCoords[] = { + 0.0f, 0.0f, // lower-left corner + 1.0f, 0.0f, // lower-right corner + 0.5f, 1.0f // top-center corner +}; +</code></pre> + +<p> + Texture sampling has a loose interpretation and can be done in many different ways. It is thus our job to tell OpenGL how it should <em>sample</em> its textures. +</p> + +<h2>Texture Wrapping</h2> +<p> + Texture coordinates usually range from <code>(0,0)</code> to <code>(1,1)</code> but what happens if we specify coordinates outside this range? The default behavior of OpenGL is to repeat the texture images (we basically ignore the integer part of the floating point texture coordinate), but there are more options OpenGL offers: +</p> + + <ul> + <li><var>GL_REPEAT</var>: The default behavior for textures. Repeats the texture image.</li> + <li><var>GL_MIRRORED_REPEAT</var>: Same as <var>GL_REPEAT</var> but mirrors the image with each repeat.</li> + <li><var>GL_CLAMP_TO_EDGE</var>: Clamps the coordinates between <code>0</code> and <code>1</code>. The result is that higher coordinates become clamped to the edge, resulting in a stretched edge pattern.</li> + <li><var>GL_CLAMP_TO_BORDER</var>: Coordinates outside the range are now given a user-specified border color.</li> + </ul> + +<p> + Each of the options have a different visual output when using texture coordinates outside the default range. Let's see what these look like on a sample texture image (original image by Hólger Rezende): +</p> + +<img src="/img/getting-started/texture_wrapping.png" class="clean"/> + +<p> + Each of the aforementioned options can be set per coordinate axis (<code>s</code>, <code>t</code> (and <code>r</code> if you're using 3D textures) equivalent to <code>x</code>,<code>y</code>,<code>z</code>) with the <fun><function id='15'>glTexParameter</function>*</fun> function: +</p> + +<pre><code> +<function id='15'>glTexParameter</function>i(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_MIRRORED_REPEAT); +<function id='15'>glTexParameter</function>i(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_MIRRORED_REPEAT); +</code></pre> + +<p> + The first argument specifies the texture target; we're working with 2D textures so the texture target is <var>GL_TEXTURE_2D</var>. The second argument requires us to tell what option we want to set and for which texture axis; we want to configure it for both the <code>S</code> and <code>T</code> axis. The last argument requires us to pass in the texture wrapping mode we'd like and in this case OpenGL will set its texture wrapping option on the currently active texture with <var>GL_MIRRORED_REPEAT</var>. +</p> + +<p> + If we choose the <var>GL_CLAMP_TO_BORDER</var> option we should also specify a border color. This is done using the <code>fv</code> equivalent of the <fun><function id='15'>glTexParameter</function></fun> function with <var>GL_TEXTURE_BORDER_COLOR</var> as its option where we pass in a float array of the border's color value: +</p> + +<pre><code> +float borderColor[] = { 1.0f, 1.0f, 0.0f, 1.0f }; +<function id='15'>glTexParameter</function>fv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, borderColor); +</code></pre> + +<h2>Texture Filtering</h2> +<p> + Texture coordinates do not depend on resolution but can be any floating point value, thus OpenGL has to figure out which texture pixel (also known as a <def>texel</def> ) to map the texture coordinate to. This becomes especially important if you have a very large object and a low resolution texture. You probably guessed by now that OpenGL has options for this <def>texture filtering</def> as well. There are several options available but for now we'll discuss the most important options: <var>GL_NEAREST</var> and <var>GL_LINEAR</var>. +</p> + +<p> + <var>GL_NEAREST</var> (also known as <def>nearest neighbor</def> or <def>point</def> filtering) is the default texture filtering method of OpenGL. When set to <var>GL_NEAREST</var>, OpenGL selects the texel that center is closest to the texture coordinate. Below you can see 4 pixels where the cross represents the exact texture coordinate. The upper-left texel has its center closest to the texture coordinate and is therefore chosen as the sampled color: +</p> + + <img src="/img/getting-started/filter_nearest.png" class="clean"/> + +<p> + <var>GL_LINEAR</var> (also known as <def>(bi)linear filtering</def>) takes an interpolated value from the texture coordinate's neighboring texels, approximating a color between the texels. The smaller the distance from the texture coordinate to a texel's center, the more that texel's color contributes to the sampled color. Below we can see that a mixed color of the neighboring pixels is returned: +</p> + +<img src="/img/getting-started/filter_linear.png" class="clean"/> + +<p> + But what is the visual effect of such a texture filtering method? Let's see how these methods work when using a texture with a low resolution on a large object (texture is therefore scaled upwards and individual texels are noticeable): +</p> + + <img src="/img/getting-started/texture_filtering.png" class="clean"/> + +<p> + <var>GL_NEAREST</var> results in blocked patterns where we can clearly see the pixels that form the texture while <var>GL_LINEAR</var> produces a smoother pattern where the individual pixels are less visible. <var>GL_LINEAR</var> produces a more realistic output, but some developers prefer a more 8-bit look and as a result pick the <var>GL_NEAREST</var> option. + </p> + +<p> + Texture filtering can be set for <def>magnifying</def> and <def>minifying</def> operations (when scaling up or downwards) so you could for example use nearest neighbor filtering when textures are scaled downwards and linear filtering for upscaled textures. We thus have to specify the filtering method for both options via <fun><function id='15'>glTexParameter</function>*</fun>. The code should look similar to setting the wrapping method: +</p> + +<pre><code> +<function id='15'>glTexParameter</function>i(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); +<function id='15'>glTexParameter</function>i(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); +</code></pre> + +<h3>Mipmaps</h3> +<p> + Imagine we had a large room with thousands of objects, each with an attached texture. There will be objects far away that have the same high resolution texture attached as the objects close to the viewer. Since the objects are far away and probably only produce a few fragments, OpenGL has difficulties retrieving the right color value for its fragment from the high resolution texture, since it has to pick a texture color for a fragment that spans a large part of the texture. This will produce visible artifacts on small objects, not to mention the waste of memory bandwidth using high resolution textures on small objects. +</p> + +<p> + To solve this issue OpenGL uses a concept called <def>mipmaps</def> that is basically a collection of texture images where each subsequent texture is twice as small compared to the previous one. The idea behind mipmaps should be easy to understand: after a certain distance threshold from the viewer, OpenGL will use a different mipmap texture that best suits the distance to the object. Because the object is far away, the smaller resolution will not be noticeable to the user. OpenGL is then able to sample the correct texels, and there's less cache memory involved when sampling that part of the mipmaps. Let's take a closer look at what a mipmapped texture looks like: +</p> + +<img src="/img/getting-started/mipmaps.png" class="clean"/> + +<p> + Creating a collection of mipmapped textures for each texture image is cumbersome to do manually, but luckily OpenGL is able to do all the work for us with a single call to <fun><function id='51'>glGenerateMipmap</function>s</fun> after we've created a texture. +</p> + +<p> + When switching between mipmaps levels during rendering OpenGL may show some artifacts like sharp edges visible between the two mipmap layers. Just like normal texture filtering, it is also possible to filter between mipmap levels using <var>NEAREST</var> and <var>LINEAR</var> filtering for switching between mipmap levels. To specify the filtering method between mipmap levels we can replace the original filtering methods with one of the following four options: +</p> + + <ul> + <li><var>GL_NEAREST_MIPMAP_NEAREST</var>: takes the nearest mipmap to match the pixel size and uses nearest neighbor interpolation for texture sampling.</li> + <li><var>GL_LINEAR_MIPMAP_NEAREST</var>: takes the nearest mipmap level and samples that level using linear interpolation. </li> + <li><var>GL_NEAREST_MIPMAP_LINEAR</var>: linearly interpolates between the two mipmaps that most closely match the size of a pixel and samples the interpolated level via nearest neighbor interpolation. </li> + <li><var>GL_LINEAR_MIPMAP_LINEAR</var>: linearly interpolates between the two closest mipmaps and samples the interpolated level via linear interpolation.</li> + </ul> + +<p> + Just like texture filtering we can set the filtering method to one of the 4 aforementioned methods using <fun><function id='15'>glTexParameter</function>i</fun>: +</p> + +<pre><code> +<function id='15'>glTexParameter</function>i(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); +<function id='15'>glTexParameter</function>i(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); +</code></pre> + +<p> + A common mistake is to set one of the mipmap filtering options as the magnification filter. This doesn't have any effect since mipmaps are primarily used for when textures get downscaled: texture magnification doesn't use mipmaps and giving it a mipmap filtering option will generate an OpenGL <var>GL_INVALID_ENUM</var> error code. +</p> + +<h1>Loading and creating textures</h1> +<p> + The first thing we need to do to actually use textures is to load them into our application. + Texture images can be stored in dozens of file formats, each with their own structure and ordering of data, so how do we get those images in our application? One solution would be to choose a file format we'd like to use, say <code>.PNG</code> and write our own image loader to convert the image format into a large array of bytes. While it's not very hard to write your own image loader, it's still cumbersome and what if you want to support more file formats? You'd then have to write an image loader for each format you want to support. +</p> + +<p> + Another solution, and probably a good one, is to use an image-loading library that supports several popular formats and does all the hard work for us. A library like <code>stb_image.h</code>. +</p> + +<h2>stb_image.h</h2> +<p> + <code>stb_image.h</code> is a very popular single header image loading library by <a href="https://github.com/nothings" target="_blank">Sean Barrett</a> that is able to load most popular file formats and is easy to integrate in your project(s). <code>stb_image.h</code> can be downloaded from <a href="https://github.com/nothings/stb/blob/master/stb_image.h" target="_blank">here</a>. Simply download the single header file, add it to your project as <code>stb_image.h</code>, and create an additional C++ file with the following code: +</p> + +<pre><code> +#define STB_IMAGE_IMPLEMENTATION +#include "stb_image.h" +</code></pre> + +<p> + By defining <var>STB_IMAGE_IMPLEMENTATION</var> the preprocessor modifies the header file such that it only contains the relevant definition source code, effectively turning the header file into a <code>.cpp</code> file, and that's about it. Now simply include <code>stb_image.h</code> somewhere in your program and compile. +</p> + +<p> + For the following texture sections we're going to use an image of a <a href="/img/textures/container.jpg" target="_blank">wooden container</a>. + To load an image using <code>stb_image.h</code> we use its <fun>stbi_load</fun> function: +</p> + +<pre><code> +int width, height, nrChannels; +unsigned char *data = stbi_load("container.jpg", &amp;width, &amp;height, &amp;nrChannels, 0); +</code></pre> + +<p> + The function first takes as input the location of an image file. It then expects you to give three <code>ints</code> as its second, third and fourth argument that <code>stb_image.h</code> will fill with the resulting image's <em>width</em>, <em>height</em> and <em>number</em> of color channels. We need the image's width and height for generating textures later on. <!--The last argument allows us to force a number of channels. Let's say the image has 4 channels (RGBA) and we only want to load the 3 color channels (RGB) without alpha, we set its last argument to <code>3</code>. --> +</p> + +<h2>Generating a texture</h2> +<p> + Like any of the previous objects in OpenGL, textures are referenced with an ID; let's create one: +</p> + +<pre class="cpp"><code> +unsigned int texture; +<function id='50'>glGenTextures</function>(1, &amp;texture); +</code></pre> + +<p> + The <fun><function id='50'>glGenTextures</function></fun> function first takes as input how many textures we want to generate and stores them in a <code>unsigned int</code> array given as its second argument (in our case just a single <code>unsigned int</code>). Just like other objects we need to bind it so any subsequent texture commands will configure the currently bound texture: +</p> + +<pre><code> +<function id='48'>glBindTexture</function>(GL_TEXTURE_2D, texture); +</code></pre> + +<p> + Now that the texture is bound, we can start generating a texture using the previously loaded image data. Textures are generated with <fun><function id='52'>glTexImage2D</function></fun>: +</p> + +<pre class="cpp"><code> +<function id='52'>glTexImage2D</function>(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data); +<function id='51'>glGenerateMipmap</function>(GL_TEXTURE_2D); +</code></pre> + +<p> + This is a large function with quite a few parameters so we'll walk through them step-by-step: + <ul> + <li>The first argument specifies the texture target; setting this to <var>GL_TEXTURE_2D</var> means this operation will generate a texture on the currently bound texture object at the same target (so any textures bound to targets <var>GL_TEXTURE_1D</var> or <var>GL_TEXTURE_3D</var> will not be affected).</li> + <li>The second argument specifies the mipmap level for which we want to create a texture for if you want to set each mipmap level manually, but we'll leave it at the base level which is <code>0</code>.</li> + <li>The third argument tells OpenGL in what kind of format we want to store the texture. Our image has only <code>RGB</code> values so we'll store the texture with <code>RGB</code> values as well.</li> + <li>The 4th and 5th argument sets the width and height of the resulting texture. We stored those earlier when loading the image so we'll use the corresponding variables.</li> + <li>The next argument should always be <code>0</code> (some legacy stuff).</li> + <li>The 7th and 8th argument specify the format and datatype of the source image. We loaded the image with <code>RGB</code> values and stored them as <code>char</code>s (bytes) so we'll pass in the corresponding values.</li> + <li>The last argument is the actual image data.</li> + </ul> +</p> + +<p> + Once <fun><function id='52'>glTexImage2D</function></fun> is called, the currently bound texture object now has the texture image attached to it. However, currently it only has the base-level of the texture image loaded and if we want to use mipmaps we have to specify all the different images manually (by continually incrementing the second argument) or, we could call <fun><function id='51'>glGenerateMipmap</function></fun> after generating the texture. This will automatically generate all the required mipmaps for the currently bound texture. +</p> + +<p> + After we're done generating the texture and its corresponding mipmaps, it is good practice to free the image memory: +</p> + +<pre class="cpp"><code> +stbi_image_free(data); +</code></pre> + +<p> + The whole process of generating a texture thus looks something like this: +</p> + +<pre><code> +unsigned int texture; +<function id='50'>glGenTextures</function>(1, &amp;texture); +<function id='48'>glBindTexture</function>(GL_TEXTURE_2D, texture); +// set the texture wrapping/filtering options (on the currently bound texture object) +<function id='15'>glTexParameter</function>i(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); +<function id='15'>glTexParameter</function>i(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); +<function id='15'>glTexParameter</function>i(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR); +<function id='15'>glTexParameter</function>i(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); +// load and generate the texture +int width, height, nrChannels; +unsigned char *data = stbi_load("container.jpg", &amp;width, &amp;height, &amp;nrChannels, 0); +if (data) +{ + <function id='52'>glTexImage2D</function>(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data); + <function id='51'>glGenerateMipmap</function>(GL_TEXTURE_2D); +} +else +{ + std::cout &lt;&lt; "Failed to load texture" &lt;&lt; std::endl; +} +stbi_image_free(data); +</code></pre> + +<h2>Applying textures</h2> +<p> + For the upcoming sections we will use the rectangle shape drawn with <fun><function id='2'>glDrawElements</function></fun> from the final part of the <a href="https://learnopengl.com/Getting-started/Hello-Triangle" target="_blank">Hello Triangle</a> chapter. + We need to inform OpenGL how to sample the texture so we'll have to update the vertex data with the texture coordinates: +</p> + +<pre><code> +float vertices[] = { + // positions // colors // texture coords + 0.5f, 0.5f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // top right + 0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // bottom right + -0.5f, -0.5f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // bottom left + -0.5f, 0.5f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // top left +}; +</code></pre> + +<p> + Since we've added an extra vertex attribute we again have to notify OpenGL of the new vertex format: +</p> + +<img src="/img/getting-started/vertex_attribute_pointer_interleaved_textures.png" class="clean" alt="Image of VBO with interleaved position, color and texture data with strides and offsets shown for configuring vertex attribute pointers."/> + +<pre><code> +<function id='30'>glVertexAttribPointer</function>(2, 2, GL_FLOAT, GL_FALSE, 8 * sizeof(float), (void*)(6 * sizeof(float))); +<function id='29'><function id='60'>glEnable</function>VertexAttribArray</function>(2); +</code></pre> + +<p> + Note that we have to adjust the stride parameter of the previous two vertex attributes to <code>8 * sizeof(float)</code> as well. +</p> + +<p> + Next we need to alter the vertex shader to accept the texture coordinates as a vertex attribute and then forward the coordinates to the fragment shader: +</p> + +<pre><code> +#version 330 core +layout (location = 0) in vec3 aPos; +layout (location = 1) in vec3 aColor; +layout (location = 2) in vec2 aTexCoord; + +out vec3 ourColor; +out vec2 TexCoord; + +void main() +{ + gl_Position = vec4(aPos, 1.0); + ourColor = aColor; + TexCoord = aTexCoord; +} +</code></pre> + +<p> + The fragment shader should then accept the <code>TexCoord</code> output variable as an input variable. +</p> + +<p> + The fragment shader should also have access to the texture object, but how do we pass the texture object to the fragment shader? GLSL has a built-in data-type for texture objects called a <def>sampler</def> that takes as a postfix the texture type we want e.g. <code>sampler1D</code>, <code>sampler3D</code> or in our case <code>sampler2D</code>. We can then add a texture to the fragment shader by simply declaring a <code>uniform sampler2D</code> that we later assign our texture to. +</p> + +<pre><code> +#version 330 core +out vec4 FragColor; + +in vec3 ourColor; +in vec2 TexCoord; + +uniform sampler2D ourTexture; + +void main() +{ + FragColor = texture(ourTexture, TexCoord); +} +</code></pre> + +<p> + To sample the color of a texture we use GLSL's built-in <fun>texture</fun> function that takes as its first argument a texture sampler and as its second argument the corresponding texture coordinates. The <fun>texture</fun> function then samples the corresponding color value using the texture parameters we set earlier. The output of this fragment shader is then the (filtered) color of the texture at the (interpolated) texture coordinate. +</p> + +<p> + All that's left to do now is to bind the texture before calling <fun><function id='2'>glDrawElements</function></fun> and it will then automatically assign the texture to the fragment shader's sampler: +</p> + +<pre class="cpp"><code> +<function id='48'>glBindTexture</function>(GL_TEXTURE_2D, texture); +<function id='27'>glBindVertexArray</function>(VAO); +<function id='2'>glDrawElements</function>(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0); +</code></pre> + +<p> + If you did everything right you should see the following image: +</p> + +<img src="/img/getting-started/textures2.png" class="clean"/> + +<p> + If your rectangle is completely white or black you probably made an error along the way. Check your shader logs and try to compare your code with the application's <a href="/code_viewer_gh.php?code=src/1.getting_started/4.1.textures/textures.cpp" target="_blank">source code</a>. +</p> + +<warning> + If your texture code doesn't work or shows up as completely black, continue reading and work your way to the last example that <strong>should</strong> work. On some drivers it is <strong>required</strong> to assign a texture unit to each sampler uniform, which is something we'll discuss further in this chapter. +</warning> + +<p> + To get a little funky we can also mix the resulting texture color with the vertex colors. We simply multiply the resulting texture color with the vertex color in the fragment shader to mix both colors: +</p> + +<pre><code> +FragColor = texture(ourTexture, TexCoord) * vec4(ourColor, 1.0); +</code></pre> + +<p> + The result should be a mixture of the vertex's color and the texture's color: +</p> + +<img src="/img/getting-started/textures_funky.png" class="clean"/> + +<p> + I guess you could say our container likes to disco. +</p> + +<h2>Texture Units</h2> +<p> + You probably wondered why the <code>sampler2D</code> variable is a uniform if we didn't even assign it some value with <fun><function id='44'>glUniform</function></fun>. Using <fun><function id='44'>glUniform</function>1i</fun> we can actually assign a <em>location</em> value to the texture sampler so we can set multiple textures at once in a fragment shader. This location of a texture is more commonly known as a <def>texture unit</def>. The default texture unit for a texture is <code>0</code> which is the default active texture unit so we didn't need to assign a location in the previous section; note that not all graphics drivers assign a default texture unit so the previous section may not have rendered for you. +</p> + +<p> + The main purpose of texture units is to allow us to use more than 1 texture in our shaders. By assigning texture units to the samplers, we can bind to multiple textures at once as long as we activate the corresponding texture unit first. Just like <fun><function id='48'>glBindTexture</function></fun> we can activate texture units using <fun><function id='49'>glActiveTexture</function></fun> passing in the texture unit we'd like to use: +</p> + +<pre class="cpp"><code> +<function id='49'>glActiveTexture</function>(GL_TEXTURE0); // activate the texture unit first before binding texture +<function id='48'>glBindTexture</function>(GL_TEXTURE_2D, texture); +</code></pre> + +<p> + After activating a texture unit, a subsequent <fun><function id='48'>glBindTexture</function></fun> call will bind that texture to the currently active texture unit. Texture unit <var>GL_TEXTURE0</var> is always by default activated, so we didn't have to activate any texture units in the previous example when using <fun><function id='48'>glBindTexture</function></fun>. +</p> + +<note> + OpenGL should have a at least a minimum of 16 texture units for you to use which you can activate using <var>GL_TEXTURE0</var> to <var>GL_TEXTURE15</var>. They are defined in order so we could also get <var>GL_TEXTURE8</var> via <var>GL_TEXTURE0 + 8</var> for example, which is useful when we'd have to loop over several texture units. +</note> + +<p> + We still however need to edit the fragment shader to accept another sampler. This should be relatively straightforward now: +</p> + +<pre><code> +#version 330 core +... + +uniform sampler2D texture1; +uniform sampler2D texture2; + +void main() +{ + FragColor = mix(texture(texture1, TexCoord), texture(texture2, TexCoord), 0.2); +} +</code></pre> + +<p> + The final output color is now the combination of two texture lookups. GLSL's built-in <fun>mix</fun> function takes two values as input and linearly interpolates between them based on its third argument. If the third value is <code>0.0</code> it returns the first input; if it's <code>1.0</code> it returns the second input value. A value of <code>0.2</code> will return <code>80%</code> of the first input color and <code>20%</code> of the second input color, resulting in a mixture of both our textures. +</p> + +<p> + We now want to load and create another texture; you should be familiar with the steps now. Make sure to create another texture object, load the image and generate the final texture using <fun><function id='52'>glTexImage2D</function></fun>. For the second texture we'll use an image of your <a href="/img/textures/awesomeface.png" target="_blank">facial expression while learning OpenGL</a>: +</p> + +<pre><code> +unsigned char *data = stbi_load("awesomeface.png", &amp;width, &amp;height, &amp;nrChannels, 0); +if (data) +{ + <function id='52'>glTexImage2D</function>(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data); + <function id='51'>glGenerateMipmap</function>(GL_TEXTURE_2D); +} +</code></pre> + +<p> + Note that we now load a <code>.png</code> image that includes an alpha (transparency) channel. This means we now need to specify that the image data contains an alpha channel as well by using <var>GL_RGBA</var>; otherwise OpenGL will incorrectly interpret the image data. +</p> + +<p> + To use the second texture (and the first texture) we'd have to change the rendering procedure a bit by binding both textures to the corresponding texture unit: +</p> + +<pre><code> +<function id='49'>glActiveTexture</function>(GL_TEXTURE0); +<function id='48'>glBindTexture</function>(GL_TEXTURE_2D, texture1); +<function id='49'>glActiveTexture</function>(GL_TEXTURE1); +<function id='48'>glBindTexture</function>(GL_TEXTURE_2D, texture2); + +<function id='27'>glBindVertexArray</function>(VAO); +<function id='2'>glDrawElements</function>(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0); +</code></pre> + +<p> + We also have to tell OpenGL to which texture unit each shader sampler belongs to by setting each sampler using <fun><function id='44'>glUniform</function>1i</fun>. We only have to set this once, so we can do this before we enter the render loop: +</p> + +<pre><code> +ourShader.use(); // don't forget to activate the shader before setting uniforms! +<function id='44'>glUniform</function>1i(<function id='45'>glGetUniformLocation</function>(ourShader.ID, "texture1"), 0); // set it manually +ourShader.setInt("texture2", 1); // or with shader class + +while(...) +{ + [...] +} +</code></pre> + +<p> + By setting the samplers via <fun><function id='44'>glUniform</function>1i</fun> we make sure each uniform sampler corresponds to the proper texture unit. You should get the following result: +</p> + +<img src="/img/getting-started/textures_combined.png" class="clean"/> + +<p> + You probably noticed that the texture is flipped upside-down! This happens because OpenGL expects the <code>0.0</code> coordinate on the y-axis to be on the bottom side of the image, but images usually have <code>0.0</code> at the top of the y-axis. Luckily for us, <code>stb_image.h</code> can flip the y-axis during image loading by adding the following statement before loading any image: + </p> + +<pre><code> +stbi_set_flip_vertically_on_load(true); +</code></pre> + +<p> + After telling <code>stb_image.h</code> to flip the y-axis when loading images you should get the following result: +</p> + +<img src="/img/getting-started/textures_combined2.png" class="clean"/> + +<p> + If you see one happy container, you did things right. You can compare it with the <a href="/code_viewer_gh.php?code=src/1.getting_started/4.2.textures_combined/textures_combined.cpp" target="_blank">source code</a>. +</p> + +<h2>Exercises</h2> +<p> + To get more comfortable with textures it is advised to work through these exercises before continuing. + <ul> + <li>Make sure <strong>only</strong> the happy face looks in the other/reverse direction by changing the fragment shader: <a href="/code_viewer_gh.php?code=src/1.getting_started/4.3.textures_exercise1/textures_exercise1.cpp" target="_blank">solution</a>.</li> + <li>Experiment with the different texture wrapping methods by specifying texture coordinates in the range <code>0.0f</code> to <code>2.0f</code> instead of <code>0.0f</code> to <code>1.0f</code>. See if you can display 4 smiley faces on a single container image clamped at its edge: <a href="/code_viewer_gh.php?code=src/1.getting_started/4.4.textures_exercise2/textures_exercise2.cpp" target="_blank">solution</a>, <a href="/img/getting-started/textures_exercise2.png" target="_blank">result</a>. See if you can experiment with other wrapping methods as well.</li> + <li>Try to display only the center pixels of the texture image on the rectangle in such a way that the individual pixels are getting visible by changing the texture coordinates. Try to set the texture filtering method to <var>GL_NEAREST</var> to see the pixels more clearly: <a href="/code_viewer_gh.php?code=src/1.getting_started/4.5.textures_exercise3/textures_exercise3.cpp" target="_blank">solution</a>.</li> + <li>Use a uniform variable as the <fun>mix</fun> function's third parameter to vary the amount the two textures are visible. Use the up and down arrow keys to change how much the container or the smiley face is visible: <a href="/code_viewer_gh.php?code=src/1.getting_started/4.6.textures_exercise4/textures_exercise4.cpp" target="_blank">solution</a>.</li> + </ul> +</p> + + + </div> + + <div id="hover"> + HI + </div> + <!-- 728x90/320x50 sticky footer --> +<div id="waldo-tag-6196"></div> + + <div id="disqus_thread"></div> + + + + +</div> <!-- container div --> + + +</div> <!-- super container div --> +</body> +</html> +\ No newline at end of file diff --git a/Getting-started/Transformations.html b/Getting-started/Transformations.html @@ -0,0 +1,837 @@ + + +<!DOCTYPE html> +<html lang="en"> +<head> + <meta charset="utf-8"/> + <title>LearnOpenGL - Transformations</title> <!--<title>Learn OpenGL, extensive tutorial resource for learning Modern OpenGL</title>--> + <link rel="shortcut icon" type="image/ico" href="/favicon.ico" /> + <meta name="description" content="Learn OpenGL . com provides good and clear modern 3.3+ OpenGL tutorials with clear examples. A great resource to learn modern OpenGL aimed at beginners."> + <meta name="fragment" content="!"> + <script> + (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ + (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), + m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) + })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); + + ga('create', 'UA-51879160-1', 'learnopengl.com'); + ga('send', 'pageview'); + + </script> + <!--<script async src="//pagead2.googlesyndication.com/pagead/js/adsbygoogle.js"></script>--> + <script> + (adsbygoogle = window.adsbygoogle || []).push({ + google_ad_client: "ca-pub-7855791439695850", + enable_page_level_ads: true + }); + </script> + <script async='async' src='https://www.googletagservices.com/tag/js/gpt.js'></script> + <script> + var googletag = googletag || {}; + googletag.cmd = googletag.cmd || []; + </script> + <script> + googletag.cmd.push(function() { + googletag.defineSlot('/8491498/learnopengl_video', [300, 225], 'div-gpt-ad-1540574378241-0').addService(googletag.pubads()); + googletag.pubads().enableSingleRequest(); + googletag.pubads().collapseEmptyDivs(); + googletag.enableServices(); + }); + </script> + <script type="text/javascript" src="https://d31vxm9ubutrmw.cloudfront.net/static/js/1681.js"></script> + <script src="/js/jquery-1.11.0.min.js"></script> + <script src="/js/hoverintent.js"></script> + <link rel="stylesheet" type="text/css" href="/layout.css"> + <link rel="stylesheet" type="text/css" href="/js/styles/obsidian.css"> + <script src="/js/highlight.pack.js"></script> + <script src="/js/functions.js"></script> + <script type="text/javascript" src="/js/mathjax/MathJax.js?config=TeX-AMS_HTML"></script> + <script> + // Has to be loaded last due to content bug + MathJax.Hub.Config({ + TeX: { equationNumbers: { autoNumber: "AMS" } } + }); + </script> + <script>hljs.initHighlightingOnLoad();</script> + <script> + $(document).ready(function() { + // check if user visited from the old # based urls, re-direct to ?p= form + if(window.location.hash) + { + var name = window.location.hash.substring(2); + // name = name.replace(/-/g," "); + var index = name.indexOf('#'); // Remove any hash fragments from the url (Disquss adds hash fragments for comments, but results in 404 pages) + if(index >= 0) + name = name.substring(0, index); + + window.location.href = "https://learnopengl.com/" + name; + } else { + // Check if data has been succesfully loaded, if so: change title bar as ajax hash fragment + var title = $('#content-url').text(); + + // Refresh syntax highlighting + // $('pre').each(function(i, e) {hljs.highlightBlock(e)}); + + // Reset DISQUS + // if(title == '/dev/') + // title = ''; + // alert('hoi'); + + // Adjust ads for correct bottom positioning based on content size + window.setTimeout(function() { + AdPositioning(); + }, 3000); + + + // set API resets after time-out (once content is properly loaded) + window.setTimeout(function() { + MathJax.Hub.Queue(["Typeset",MathJax.Hub]); + MathJax.Hub.Queue(["resetEquationNumbers", MathJax.InputJax.TeX]); + + var page_url = title == "" ? "http://www.learnopengl.com/" : "http://www.learnopengl.com/" + title; + if(typeof DISQUS !== 'undefined') { + DISQUS.reset({ + reload: true, + config: function () { + this.page.identifier = title; + this.page.url = page_url; + } + }); + $('#disqus_thread').show(); + } + // Refresh callbacks on <function> tags + SetFunctionTagCallbacks(); + }, 1000); + + // Zet ook de juiste button op 'selected' + $('#nav li span, #nav li a').removeClass('selected'); + if(title != '') + { + $('#nav li[id=\'' + title + '\']').children('span, a').addClass('selected'); + } + // En open menu waar nodig + var parents = $('#nav span.selected, #nav a.selected').parents('li').children('span.closed, a.closed'); + var index = 0; + for(index = parents.length - 1; index >= 0; index--) + { + + var id = $(parents[index]).attr("id").replace( /^\D+/g, ''); + MenuClick(id, false); + } + + } + }); + // var initialized = false; + // window.onpopstate = function() { + // if(initialized) + // LoadPage(); + // else + // initialized = true; + // }; + + // Set up DISQUS + // $(document).ready(function() { + var disqus_shortname = 'learnopengl'; + (function() { + var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; + dsq.src = '//' + disqus_shortname + '.disqus.com/embed.js'; + (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); + })(); + // }); + </script> +</head> +<body> +<a href="https://learnopengl.com"> +<div id="header"> +</div> +</a> + +<div id="supercontainer"> + <!-- 728x90/320x50 --> + <div id="header_ad"> + <div id="waldo-tag-6194"></div> + </div> + <div id="rightad_container"> + <div id="rightad"> + <!-- /8491498/learnopengl_video --> + <!--<div id='div-gpt-ad-1540574378241-0' style='height:225px; width:300px;'> + <script> + googletag.cmd.push(function() { googletag.display('div-gpt-ad-1540574378241-0'); }); + </script> + </div> + <br/>--> + + <div id="waldo-tag-1715"></div> + </div> + + <div id="admessage"> + If you're running AdBlock, please consider whitelisting this site if you'd like to support LearnOpenGL; and no worries, I won't be mad if you don't :) + <!--<br/><br/> + Also, check out this little local multiplayer-only game I've made: <a href="https://store.steampowered.com/app/983590/Tank_Blazers/" target="_blank">Tank Blazers</a>. + <br/> + <a href="https://store.steampowered.com/app/983590/Tank_Blazers" target="_blank"><img src="/img/tank_blazers.jpg" style="width:278px; margin-top: 9px; margin-left: -3px;"/></a>--> + </div> + + <div id="rightonethirdad"> + <div id="waldo-tag-2246"></div> + </div> + + <div id="rightbottomad"> + <div id="waldo-tag-2247"></div> + </div> + </div> + <div id="container"> + <div id="loading"></div> +<script> +$(document).ready(function() { +$('#menu-item4').mousedown(function() { MenuClick(4, true) }); +$('#menu-item48').mousedown(function() { MenuClick(48, true) }); +$('#menu-item56').mousedown(function() { MenuClick(56, true) }); +$('#menu-item63').mousedown(function() { MenuClick(63, true) }); +$('#menu-item100').mousedown(function() { MenuClick(100, true) }); +$('#menu-item102').mousedown(function() { MenuClick(102, true) }); +$('#menu-item113').mousedown(function() { MenuClick(113, true) }); +$('#menu-item116').mousedown(function() { MenuClick(116, true) }); +$('#menu-item78').mousedown(function() { MenuClick(78, true) }); +$('#menu-item81').mousedown(function() { MenuClick(81, true) }); +$('#menu-item85').mousedown(function() { MenuClick(85, true) }); +$('#menu-item125').mousedown(function() { MenuClick(125, true) }); +$('#menu-item128').mousedown(function() { MenuClick(128, true) }); +$('#menu-item129').mousedown(function() { MenuClick(129, true) }); +$('#menu-item133').mousedown(function() { MenuClick(133, true) }); +$('#menu-item134').mousedown(function() { MenuClick(134, true) }); +}); +</script> + <div id="nav"> + <div id="social"> + <a href="https://github.com/JoeyDeVries/LearnOpenGL" target="_blank"> + <img src="/img/github.png" class="social_ico"> + </a> + <!-- <a href="https://www.facebook.com/Learnopengl-2199631333595544/" target="_blank"> + <img src="/img/facebook.png" class="social_ico"> + </a>--> + <a href="https://twitter.com/JoeyDeVriez" target="_blank"> + <img src="/img/twitter.png" class="social_ico"> + </a> + + </div> + <img src='img/nav-button_bottom-arrow.png' style='display: none'><ol><li id='Introduction'><a id="menu-item1" href="https://learnopengl.com/Introduction">Introduction </a></li><li id='Getting-started'><span id="menu-item4" class="closed">Getting started </span><ol id="menu-items-of4" style="display:none;"><li id='Getting-started/OpenGL'><a id="menu-item49" href="https://learnopengl.com/Getting-started/OpenGL">OpenGL </a></li><li id='Getting-started/Creating-a-window'><a id="menu-item5" href="https://learnopengl.com/Getting-started/Creating-a-window">Creating a window </a></li><li id='Getting-started/Hello-Window'><a id="menu-item6" href="https://learnopengl.com/Getting-started/Hello-Window">Hello Window </a></li><li id='Getting-started/Hello-Triangle'><a id="menu-item38" href="https://learnopengl.com/Getting-started/Hello-Triangle">Hello Triangle </a></li><li id='Getting-started/Shaders'><a id="menu-item39" href="https://learnopengl.com/Getting-started/Shaders">Shaders </a></li><li id='Getting-started/Textures'><a id="menu-item40" href="https://learnopengl.com/Getting-started/Textures">Textures </a></li><li id='Getting-started/Transformations'><a id="menu-item43" href="https://learnopengl.com/Getting-started/Transformations">Transformations </a></li><li id='Getting-started/Coordinate-Systems'><a id="menu-item44" href="https://learnopengl.com/Getting-started/Coordinate-Systems">Coordinate Systems </a></li><li id='Getting-started/Camera'><a id="menu-item47" href="https://learnopengl.com/Getting-started/Camera">Camera </a></li><li id='Getting-started/Review'><a id="menu-item50" href="https://learnopengl.com/Getting-started/Review">Review </a></li></ol></li><li id='Lighting'><span id="menu-item48" class="closed">Lighting </span><ol id="menu-items-of48" style="display:none;"><li id='Lighting/Colors'><a id="menu-item51" href="https://learnopengl.com/Lighting/Colors">Colors </a></li><li id='Lighting/Basic-Lighting'><a id="menu-item52" href="https://learnopengl.com/Lighting/Basic-Lighting">Basic Lighting </a></li><li id='Lighting/Materials'><a id="menu-item53" href="https://learnopengl.com/Lighting/Materials">Materials </a></li><li id='Lighting/Lighting-maps'><a id="menu-item54" href="https://learnopengl.com/Lighting/Lighting-maps">Lighting maps </a></li><li id='Lighting/Light-casters'><a id="menu-item55" href="https://learnopengl.com/Lighting/Light-casters">Light casters </a></li><li id='Lighting/Multiple-lights'><a id="menu-item58" href="https://learnopengl.com/Lighting/Multiple-lights">Multiple lights </a></li><li id='Lighting/Review'><a id="menu-item57" href="https://learnopengl.com/Lighting/Review">Review </a></li></ol></li><li id='Model-Loading'><span id="menu-item56" class="closed">Model Loading </span><ol id="menu-items-of56" style="display:none;"><li id='Model-Loading/Assimp'><a id="menu-item59" href="https://learnopengl.com/Model-Loading/Assimp">Assimp </a></li><li id='Model-Loading/Mesh'><a id="menu-item60" href="https://learnopengl.com/Model-Loading/Mesh">Mesh </a></li><li id='Model-Loading/Model'><a id="menu-item61" href="https://learnopengl.com/Model-Loading/Model">Model </a></li></ol></li><li id='Advanced-OpenGL'><span id="menu-item63" class="closed">Advanced OpenGL </span><ol id="menu-items-of63" style="display:none;"><li id='Advanced-OpenGL/Depth-testing'><a id="menu-item72" href="https://learnopengl.com/Advanced-OpenGL/Depth-testing">Depth testing </a></li><li id='Advanced-OpenGL/Stencil-testing'><a id="menu-item73" href="https://learnopengl.com/Advanced-OpenGL/Stencil-testing">Stencil testing </a></li><li id='Advanced-OpenGL/Blending'><a id="menu-item74" href="https://learnopengl.com/Advanced-OpenGL/Blending">Blending </a></li><li id='Advanced-OpenGL/Face-culling'><a id="menu-item77" href="https://learnopengl.com/Advanced-OpenGL/Face-culling">Face culling </a></li><li id='Advanced-OpenGL/Framebuffers'><a id="menu-item65" href="https://learnopengl.com/Advanced-OpenGL/Framebuffers">Framebuffers </a></li><li id='Advanced-OpenGL/Cubemaps'><a id="menu-item66" href="https://learnopengl.com/Advanced-OpenGL/Cubemaps">Cubemaps </a></li><li id='Advanced-OpenGL/Advanced-Data'><a id="menu-item69" href="https://learnopengl.com/Advanced-OpenGL/Advanced-Data">Advanced Data </a></li><li id='Advanced-OpenGL/Advanced-GLSL'><a id="menu-item67" href="https://learnopengl.com/Advanced-OpenGL/Advanced-GLSL">Advanced GLSL </a></li><li id='Advanced-OpenGL/Geometry-Shader'><a id="menu-item68" href="https://learnopengl.com/Advanced-OpenGL/Geometry-Shader">Geometry Shader </a></li><li id='Advanced-OpenGL/Instancing'><a id="menu-item70" href="https://learnopengl.com/Advanced-OpenGL/Instancing">Instancing </a></li><li id='Advanced-OpenGL/Anti-Aliasing'><a id="menu-item75" href="https://learnopengl.com/Advanced-OpenGL/Anti-Aliasing">Anti Aliasing </a></li></ol></li><li id='Advanced-Lighting'><span id="menu-item100" class="closed">Advanced Lighting </span><ol id="menu-items-of100" style="display:none;"><li id='Advanced-Lighting/Advanced-Lighting'><a id="menu-item101" href="https://learnopengl.com/Advanced-Lighting/Advanced-Lighting">Advanced Lighting </a></li><li id='Advanced-Lighting/Gamma-Correction'><a id="menu-item110" href="https://learnopengl.com/Advanced-Lighting/Gamma-Correction">Gamma Correction </a></li><li id='Advanced-Lighting/Shadows'><span id="menu-item102" class="closed">Shadows </span><ol id="menu-items-of102" style="display:none;"><li id='Advanced-Lighting/Shadows/Shadow-Mapping'><a id="menu-item103" href="https://learnopengl.com/Advanced-Lighting/Shadows/Shadow-Mapping">Shadow Mapping </a></li><li id='Advanced-Lighting/Shadows/Point-Shadows'><a id="menu-item104" href="https://learnopengl.com/Advanced-Lighting/Shadows/Point-Shadows">Point Shadows </a></li></ol></li><li id='Advanced-Lighting/Normal-Mapping'><a id="menu-item106" href="https://learnopengl.com/Advanced-Lighting/Normal-Mapping">Normal Mapping </a></li><li id='Advanced-Lighting/Parallax-Mapping'><a id="menu-item107" href="https://learnopengl.com/Advanced-Lighting/Parallax-Mapping">Parallax Mapping </a></li><li id='Advanced-Lighting/HDR'><a id="menu-item111" href="https://learnopengl.com/Advanced-Lighting/HDR">HDR </a></li><li id='Advanced-Lighting/Bloom'><a id="menu-item112" href="https://learnopengl.com/Advanced-Lighting/Bloom">Bloom </a></li><li id='Advanced-Lighting/Deferred-Shading'><a id="menu-item108" href="https://learnopengl.com/Advanced-Lighting/Deferred-Shading">Deferred Shading </a></li><li id='Advanced-Lighting/SSAO'><a id="menu-item109" href="https://learnopengl.com/Advanced-Lighting/SSAO">SSAO </a></li></ol></li><li id='PBR'><span id="menu-item113" class="closed">PBR </span><ol id="menu-items-of113" style="display:none;"><li id='PBR/Theory'><a id="menu-item114" href="https://learnopengl.com/PBR/Theory">Theory </a></li><li id='PBR/Lighting'><a id="menu-item115" href="https://learnopengl.com/PBR/Lighting">Lighting </a></li><li id='PBR/IBL'><span id="menu-item116" class="closed">IBL </span><ol id="menu-items-of116" style="display:none;"><li id='PBR/IBL/Diffuse-irradiance'><a id="menu-item117" href="https://learnopengl.com/PBR/IBL/Diffuse-irradiance">Diffuse irradiance </a></li><li id='PBR/IBL/Specular-IBL'><a id="menu-item118" href="https://learnopengl.com/PBR/IBL/Specular-IBL">Specular IBL </a></li></ol></li></ol></li><li id='In-Practice'><span id="menu-item78" class="closed">In Practice </span><ol id="menu-items-of78" style="display:none;"><li id='In-Practice/Debugging'><a id="menu-item79" href="https://learnopengl.com/In-Practice/Debugging">Debugging </a></li><li id='In-Practice/Text-Rendering'><a id="menu-item80" href="https://learnopengl.com/In-Practice/Text-Rendering">Text Rendering </a></li><li id='In-Practice/2D-Game'><span id="menu-item81" class="closed">2D Game </span><ol id="menu-items-of81" style="display:none;"><li id='In-Practice/2D-Game/Breakout'><a id="menu-item82" href="https://learnopengl.com/In-Practice/2D-Game/Breakout">Breakout </a></li><li id='In-Practice/2D-Game/Setting-up'><a id="menu-item88" href="https://learnopengl.com/In-Practice/2D-Game/Setting-up">Setting up </a></li><li id='In-Practice/2D-Game/Rendering-Sprites'><a id="menu-item83" href="https://learnopengl.com/In-Practice/2D-Game/Rendering-Sprites">Rendering Sprites </a></li><li id='In-Practice/2D-Game/Levels'><a id="menu-item84" href="https://learnopengl.com/In-Practice/2D-Game/Levels">Levels </a></li><li id='In-Practice/2D-Game/Collisions'><span id="menu-item85" class="closed">Collisions </span><ol id="menu-items-of85" style="display:none;"><li id='In-Practice/2D-Game/Collisions/Ball'><a id="menu-item95" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Ball">Ball </a></li><li id='In-Practice/2D-Game/Collisions/Collision-detection'><a id="menu-item96" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Collision-detection">Collision detection </a></li><li id='In-Practice/2D-Game/Collisions/Collision-resolution'><a id="menu-item97" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Collision-resolution">Collision resolution </a></li></ol></li><li id='In-Practice/2D-Game/Particles'><a id="menu-item89" href="https://learnopengl.com/In-Practice/2D-Game/Particles">Particles </a></li><li id='In-Practice/2D-Game/Postprocessing'><a id="menu-item90" href="https://learnopengl.com/In-Practice/2D-Game/Postprocessing">Postprocessing </a></li><li id='In-Practice/2D-Game/Powerups'><a id="menu-item91" href="https://learnopengl.com/In-Practice/2D-Game/Powerups">Powerups </a></li><li id='In-Practice/2D-Game/Audio'><a id="menu-item94" href="https://learnopengl.com/In-Practice/2D-Game/Audio">Audio </a></li><li id='In-Practice/2D-Game/Render-text'><a id="menu-item92" href="https://learnopengl.com/In-Practice/2D-Game/Render-text">Render text </a></li><li id='In-Practice/2D-Game/Final-thoughts'><a id="menu-item93" href="https://learnopengl.com/In-Practice/2D-Game/Final-thoughts">Final thoughts </a></li></ol></li></ol></li><li id='Guest-Articles'><span id="menu-item125" class="closed">Guest Articles </span><ol id="menu-items-of125" style="display:none;"><li id='Guest-Articles/How-to-publish'><a id="menu-item126" href="https://learnopengl.com/Guest-Articles/How-to-publish">How to publish </a></li><li id='Guest-Articles/2020'><span id="menu-item128" class="closed">2020 </span><ol id="menu-items-of128" style="display:none;"><li id='Guest-Articles/2020/OIT'><span id="menu-item129" class="closed">OIT </span><ol id="menu-items-of129" style="display:none;"><li id='Guest-Articles/2020/OIT/Introduction'><a id="menu-item130" href="https://learnopengl.com/Guest-Articles/2020/OIT/Introduction">Introduction </a></li><li id='Guest-Articles/2020/OIT/Weighted-Blended'><a id="menu-item132" href="https://learnopengl.com/Guest-Articles/2020/OIT/Weighted-Blended">Weighted Blended </a></li></ol></li><li id='Guest-Articles/2020/Skeletal-Animation'><a id="menu-item131" href="https://learnopengl.com/Guest-Articles/2020/Skeletal-Animation">Skeletal Animation </a></li></ol></li><li id='Guest-Articles/2021'><span id="menu-item133" class="closed">2021 </span><ol id="menu-items-of133" style="display:none;"><li id='Guest-Articles/2021/Scene'><span id="menu-item134" class="closed">Scene </span><ol id="menu-items-of134" style="display:none;"><li id='Guest-Articles/2021/Scene/Scene-Graph'><a id="menu-item135" href="https://learnopengl.com/Guest-Articles/2021/Scene/Scene-Graph">Scene Graph </a></li><li id='Guest-Articles/2021/Scene/Frustum-Culling'><a id="menu-item136" href="https://learnopengl.com/Guest-Articles/2021/Scene/Frustum-Culling">Frustum Culling </a></li></ol></li></ol></li></ol></li><li id='Code-repository'><a id="menu-item99" href="https://learnopengl.com/Code-repository">Code repository </a></li><li id='Translations'><a id="menu-item119" href="https://learnopengl.com/Translations">Translations </a></li><li id='About'><a id="menu-item2" href="https://learnopengl.com/About">About </a></li></ol> <div id="menu_book"> + <a href="https://geni.us/learnopengl" target="_blank"><img src="/book/below_menu.png" class="clean"/></a> + </div> + <div id="donate"> + <a href="https://www.paypal.me/learnopengl/" target="_blank"> + <div id="donate_img"></div> + <img style="display: none" src="/img/donate_button_hover.png"/> + <!--<img id="donate_img" src="img/patreon.png"/>--> + </a> + <!--<div id="alipay"> + <img style="width: 150px;" class="clean" src="/img/alipay_logo.png"/> + <img style="width: 150px; margin-top: 5px" src="/img/alipay.png"/> + </div>--> + </div> + <div class="btc"> + <h3>BTC</h3> + <p> + 1CLGKgmBSuYJ1nnvDGAepVTKNNDpUjfpRa + </p> + <img src="/img/btc_qr.png"/> + </div> + <div class="btc"> + <h3>ETH/ERC20</h3> + <p> + 0x1de59bd9e52521a46309474f8372531533bd7c43 + </p> + <img src="/img/erc20_qr.png"/> + </div> + <div id="ad"> + <!--<div id="waldo-tag-1684"></div>--> + </div> + + <div id="lefttwothirdad"> + <div id="waldo-tag-2245"></div> + </div> + </div> + + <div id="content"> + <h1 id="content-title">Transformations</h1> +<h1 id="content-url" style='display:none;'>Getting-started/Transformations</h1> +<p> + We now know how to create objects, color them and/or give them a detailed appearance using textures, but they're still not that interesting since they're all static objects. We could try and make them move by changing their vertices and re-configuring their buffers each frame, but that's cumbersome and costs quite some processing power. There are much better ways to <def>transform</def> an object and that's by using (multiple) <def>matrix</def> objects. This doesn't mean we're going to talk about Kung Fu and a large digital artificial world. +</p> + +<p> + Matrices are very powerful mathematical constructs that seem scary at first, but once you'll grow accustomed to them they'll prove extremely useful. When discussing matrices, we'll have to make a small dive into some mathematics and for the more mathematically inclined readers I'll post additional resources for further reading. +</p> + +<p> + However, to fully understand transformations we first have to delve a bit deeper into vectors before discussing matrices. The focus of this chapter is to give you a basic mathematical background in topics we will require later on. If the subjects are difficult, try to understand them as much as you can and come back to this chapter later to review the concepts whenever you need them. +</p> + +<h1>Vectors</h1> +<p> + In its most basic definition, vectors are directions and nothing more. A vector has a <def>direction</def> and a <def>magnitude</def> (also known as its strength or length). You can think of vectors like directions on a treasure map: 'go left 10 steps, now go north 3 steps and go right 5 steps'; here 'left' is the direction and '10 steps' is the magnitude of the vector. The directions for the treasure map thus contains 3 vectors. Vectors can have any dimension, but we usually work with dimensions of 2 to 4. If a vector has 2 dimensions it represents a direction on a plane (think of 2D graphs) and when it has 3 dimensions it can represent any direction in a 3D world. +</p> + +<p> + Below you'll see 3 vectors where each vector is represented with <code>(x,y)</code> as arrows in a 2D graph. Because it is more intuitive to display vectors in 2D (rather than 3D) you can think of the 2D vectors as 3D vectors with a <code>z</code> coordinate of <code>0</code>. Since vectors represent directions, the origin of the vector does not change its value. In the graph below we can see that the vectors \(\color{red}{\bar{v}}\) and \(\color{blue}{\bar{w}}\) are equal even though their origin is different: +</p> + +<img src="/img/getting-started/vectors.png" class="clean" /> + +<p> + When describing vectors mathematicians generally prefer to describe vectors as character symbols with a little bar over their head like \(\bar{v}\). Also, when displaying vectors in formulas they are generally displayed as follows: + + \[\bar{v} = \begin{pmatrix} \color{red}x \\ \color{green}y \\ \color{blue}z \end{pmatrix} \] +</p> + +<p> + Because vectors are specified as directions it is sometimes hard to visualize them as positions. If we want to visualize vectors as positions we can imagine the origin of the direction vector to be <code>(0,0,0)</code> and then point towards a certain direction that specifies the point, making it a <def>position vector</def> (we could also specify a different origin and then say: 'this vector points to that point in space from this origin'). The position vector <code>(3,5)</code> would then point to <code>(3,5)</code> on the graph with an origin of <code>(0,0)</code>. Using vectors we can thus describe directions <strong>and</strong> positions in 2D and 3D space. +</p> + +<p> + Just like with normal numbers we can also define several operations on vectors (some of which you've already seen). +</p> + +<h2>Scalar vector operations</h2> +<p> + A <def>scalar</def> is a single digit. When adding/subtracting/multiplying or dividing a vector with a scalar we simply add/subtract/multiply or divide each element of the vector by the scalar. For addition it would look like this: + + \[ \begin{pmatrix} \color{red}1 \\ \color{green}2 \\ \color{blue}3 \end{pmatrix} + x \rightarrow \begin{pmatrix} \color{red}1 \\ \color{green}2 \\ \color{blue}3 \end{pmatrix} + \begin{pmatrix} x \\ x \\ x \end{pmatrix} = \begin{pmatrix} \color{red}1 + x \\ \color{green}2 + x \\ \color{blue}3 + x \end{pmatrix} \] + + Where \(+\) can be \(+\),\(-\),\(\cdot\) or \(\div\) where \(\cdot\) is the multiplication operator. +</p> + +<h2>Vector negation</h2> +<p> + Negating a vector results in a vector in the reversed direction. A vector pointing north-east would point south-west after negation. To negate a vector we add a minus-sign to each component (you can also represent it as a scalar-vector multiplication with a scalar value of <code>-1</code>): + + \[-\bar{v} = -\begin{pmatrix} \color{red}{v_x} \\ \color{blue}{v_y} \\ \color{green}{v_z} \end{pmatrix} = \begin{pmatrix} -\color{red}{v_x} \\ -\color{blue}{v_y} \\ -\color{green}{v_z} \end{pmatrix} \] +</p> + +<h2>Addition and subtraction</h2> +<p> + Addition of two vectors is defined as <def>component-wise</def> addition, that is each component of one vector is added to the same component of the other vector like so: + + \[\bar{v} = \begin{pmatrix} \color{red}1 \\ \color{green}2 \\ \color{blue}3 \end{pmatrix}, \bar{k} = \begin{pmatrix} \color{red}4 \\ \color{green}5 \\ \color{blue}6 \end{pmatrix} \rightarrow \bar{v} + \bar{k} = \begin{pmatrix} \color{red}1 + \color{red}4 \\ \color{green}2 + \color{green}5 \\ \color{blue}3 + \color{blue}6 \end{pmatrix} = \begin{pmatrix} \color{red}5 \\ \color{green}7 \\ \color{blue}9 \end{pmatrix} \] + + Visually, it looks like this on vectors <code>v=(4,2)</code> and <code>k=(1,2)</code>, where the second vector is added on top of the first vector's end to find the end point of the resulting vector (head-to-tail method): +</p> + + <img src="/img/getting-started/vectors_addition.png" class="clean"/> + +<p> + Just like normal addition and subtraction, vector subtraction is the same as addition with a negated second vector: + + \[\bar{v} = \begin{pmatrix} \color{red}{1} \\ \color{green}{2} \\ \color{blue}{3} \end{pmatrix}, \bar{k} = \begin{pmatrix} \color{red}{4} \\ \color{green}{5} \\ \color{blue}{6} \end{pmatrix} \rightarrow \bar{v} + -\bar{k} = \begin{pmatrix} \color{red}{1} + (-\color{red}{4}) \\ \color{green}{2} + (-\color{green}{5}) \\ \color{blue}{3} + (-\color{blue}{6}) \end{pmatrix} = \begin{pmatrix} -\color{red}{3} \\ -\color{green}{3} \\ -\color{blue}{3} \end{pmatrix} \] + +</p> + +<p> + Subtracting two vectors from each other results in a vector that's the difference of the positions both vectors are pointing at. This proves useful in certain cases where we need to retrieve a vector that's the difference between two points. +</p> + +<img src="/img/getting-started/vectors_subtraction.png" class="clean"/> + + +<h2>Length</h2> +<p> + To retrieve the length/magnitude of a vector we use the <def>Pythagoras theorem</def> that you may remember from your math classes. A vector forms a triangle when you visualize its individual <code>x</code> and <code>y</code> component as two sides of a triangle: +</p> + +<img src="/img/getting-started/vectors_triangle.png" class="clean"/> + +<p> + Since the length of the two sides <code>(x, y)</code> are known and we want to know the length of the tilted side \(\color{red}{\bar{v}}\) we can calculate it using the Pythagoras theorem as: + + \[||\color{red}{\bar{v}}|| = \sqrt{\color{green}x^2 + \color{blue}y^2} \] + + Where \(||\color{red}{\bar{v}}||\) is denoted as <em>the length of vector \(\color{red}{\bar{v}}\)</em>. This is easily extended to 3D by adding \(z^2\) to the equation. +</p> + +<p> + In this case the length of vector <code>(4, 2)</code> equals: + + \[||\color{red}{\bar{v}}|| = \sqrt{\color{green}4^2 + \color{blue}2^2} = \sqrt{\color{green}16 + \color{blue}4} = \sqrt{20} = 4.47 \] + + Which is <code>4.47</code>. +</p> + + + +<p> + There is also a special type of vector that we call a <def>unit vector</def>. A unit vector has one extra property and that is that its length is exactly 1. We can calculate a unit vector \(\hat{n}\) from any vector by dividing each of the vector's components by its length: + + \[\hat{n} = \frac{\bar{v}}{||\bar{v}||}\] + + We call this <def>normalizing</def> a vector. Unit vectors are displayed with a little roof over their head and are generally easier to work with, especially when we only care about their directions (the direction does not change if we change a vector's length). +</p> + +<h2>Vector-vector multiplication</h2> +<p> + Multiplying two vectors is a bit of a weird case. Normal multiplication isn't really defined on vectors since it has no visual meaning, but we have two specific cases that we could choose from when multiplying: one is the <def>dot product</def> denoted as \(\bar{v} \cdot \bar{k}\) and the other is the <def>cross product</def> denoted as \(\bar{v} \times \bar{k}\). +</p> + +<h3>Dot product</h3> +<p> + The dot product of two vectors is equal to the scalar product of their lengths times the cosine of the angle between them. If this sounds confusing take a look at its formula: + + \[\bar{v} \cdot \bar{k} = ||\bar{v}|| \cdot ||\bar{k}|| \cdot \cos \theta \] + + Where the angle between them is represented as theta (\(\theta\)). Why is this interesting? Well, imagine if \(\bar{v}\) and \(\bar{k}\) are unit vectors then their length would be equal to 1. This would effectively reduce the formula to: + + \[\hat{v} \cdot \hat{k} = 1 \cdot 1 \cdot \cos \theta = \cos \theta\] + + Now the dot product <strong>only</strong> defines the angle between both vectors. You may remember that the cosine or cos function becomes <code>0</code> when the angle is 90 degrees or <code>1</code> when the angle is 0. This allows us to easily test if the two vectors are <def>orthogonal</def> or <def>parallel</def> to each other using the dot product (orthogonal means the vectors are at a <def>right-angle</def> to each other). In case you want to know more about the <code>sin</code> or the <code>cos</code> functions I'd suggest the following <a href="https://www.khanacademy.org/math/trigonometry/basic-trigonometry/basic_trig_ratios/v/basic-trigonometry" target="_blank">Khan Academy videos</a> about basic trigonometry. +</p> + +<note> + You can also calculate the angle between two non-unit vectors, but then you'd have to divide the lengths of both vectors from the result to be left with \(cos \theta\). +</note> + +<p> + So how do we calculate the dot product? The dot product is a component-wise multiplication where we add the results together. It looks like this with two unit vectors (you can verify that both their lengths are exactly <code>1</code>): + + \[ \begin{pmatrix} \color{red}{0.6} \\ -\color{green}{0.8} \\ \color{blue}0 \end{pmatrix} \cdot \begin{pmatrix} \color{red}0 \\ \color{green}1 \\ \color{blue}0 \end{pmatrix} = (\color{red}{0.6} * \color{red}0) + (-\color{green}{0.8} * \color{green}1) + (\color{blue}0 * \color{blue}0) = -0.8 \] + + To calculate the degree between both these unit vectors we use the inverse of the cosine function \(cos^{-1}\) and this results in <code>143.1</code> degrees. We now effectively calculated the angle between these two vectors. The dot product proves very useful when doing lighting calculations later on. +</p> + +<h3>Cross product</h3> +<p> + The cross product is only defined in 3D space and takes two non-parallel vectors as input and produces a third vector that is orthogonal to both the input vectors. If both the input vectors are orthogonal to each other as well, a cross product would result in 3 orthogonal vectors; this will prove useful in the upcoming chapters. The following image shows what this looks like in 3D space: +</p> + +<img src="/img/getting-started/vectors_crossproduct.png" class="clean"/> + +<p> + Unlike the other operations, the cross product isn't really intuitive without delving into linear algebra so it's best to just memorize the formula and you'll be fine (or don't, you'll probably be fine as well). Below you'll see the cross product between two orthogonal vectors A and B: + + \[\begin{pmatrix} \color{red}{A_{x}} \\ \color{green}{A_{y}} \\ \color{blue}{A_{z}} \end{pmatrix} \times \begin{pmatrix} \color{red}{B_{x}} \\ \color{green}{B_{y}} \\ \color{blue}{B_{z}} \end{pmatrix} = \begin{pmatrix} \color{green}{A_{y}} \cdot \color{blue}{B_{z}} - \color{blue}{A_{z}} \cdot \color{green}{B_{y}} \\ \color{blue}{A_{z}} \cdot \color{red}{B_{x}} - \color{red}{A_{x}} \cdot \color{blue}{B_{z}} \\ \color{red}{A_{x}} \cdot \color{green}{B_{y}} - \color{green}{A_{y}} \cdot \color{red}{B_{x}} \end{pmatrix} \] + + As you can see, it doesn't really seem to make sense. However, if you just follow these steps you'll get another vector that is orthogonal to your input vectors. +</p> + +<h1>Matrices</h1> +<p> + Now that we've discussed almost all there is to vectors it is time to enter the matrix! + A matrix is a rectangular array of numbers, symbols and/or mathematical expressions. Each individual item in a matrix is called an <def>element</def> of the matrix. An example of a 2x3 matrix is shown below: + + \[\begin{bmatrix} 1 & 2 & 3 \\ 4 & 5 & 6 \end{bmatrix}\] + + Matrices are indexed by <code>(i,j)</code> where <code>i</code> is the row and <code>j</code> is the column, that is why the above matrix is called a 2x3 matrix (3 columns and 2 rows, also known as the <def>dimensions</def> of the matrix). This is the opposite of what you're used to when indexing 2D graphs as <code>(x,y)</code>. To retrieve the value 4 we would index it as <code>(2,1)</code> (second row, first column). +</p> + +<p> + Matrices are basically nothing more than that, just rectangular arrays of mathematical expressions. They do have a very nice set of mathematical properties and just like vectors we can define several operations on matrices, namely: addition, subtraction and multiplication. +</p> + +<h2>Addition and subtraction</h2> +<p> + Matrix addition and subtraction between two matrices is done on a per-element basis. So the same general rules apply that we're familiar with for normal numbers, but done on the elements of both matrices with the same index. This does mean that addition and subtraction is only defined for matrices of the same dimensions. A 3x2 matrix and a 2x3 matrix (or a 3x3 matrix and a 4x4 matrix) cannot be added or subtracted together. Let's see how matrix addition works on two 2x2 matrices: + + \[\begin{bmatrix} \color{red}1 & \color{red}2 \\ \color{green}3 & \color{green}4 \end{bmatrix} + \begin{bmatrix} \color{red}5 & \color{red}6 \\ \color{green}7 & \color{green}8 \end{bmatrix} = \begin{bmatrix} \color{red}1 + \color{red}5 & \color{red}2 + \color{red}6 \\ \color{green}3 + \color{green}7 & \color{green}4 + \color{green}8 \end{bmatrix} = \begin{bmatrix} \color{red}6 & \color{red}8 \\ \color{green}{10} & \color{green}{12} \end{bmatrix} \] + +The same rules apply for matrix subtraction: + + \[\begin{bmatrix} \color{red}4 & \color{red}2 \\ \color{green}1 & \color{green}6 \end{bmatrix} - \begin{bmatrix} \color{red}2 & \color{red}4 \\ \color{green}0 & \color{green}1 \end{bmatrix} = \begin{bmatrix} \color{red}4 - \color{red}2 & \color{red}2 - \color{red}4 \\ \color{green}1 - \color{green}0 & \color{green}6 - \color{green}1 \end{bmatrix} = \begin{bmatrix} \color{red}2 & -\color{red}2 \\ \color{green}1 & \color{green}5 \end{bmatrix} \] + +</p> + +<h2>Matrix-scalar products</h2> +<p> + A matrix-scalar product multiples each element of the matrix by a scalar. The following example illustrates the multiplication: + + \[\color{green}2 \cdot \begin{bmatrix} 1 & 2 \\ 3 & 4 \end{bmatrix} = \begin{bmatrix} \color{green}2 \cdot 1 & \color{green}2 \cdot 2 \\ \color{green}2 \cdot 3 & \color{green}2 \cdot 4 \end{bmatrix} = \begin{bmatrix} 2 & 4 \\ 6 & 8 \end{bmatrix}\] + +Now it also makes sense as to why those single numbers are called scalars. A scalar basically <em>scales</em> all the elements of the matrix by its value. In the previous example, all elements were scaled by <code>2</code>. +</p> + +<p> + So far so good, all of our cases weren't really too complicated. That is, until we start on matrix-matrix multiplication. +</p> + +<h2>Matrix-matrix multiplication</h2> +<p> + Multiplying matrices is not necessarily complex, but rather difficult to get comfortable with. Matrix multiplication basically means to follow a set of pre-defined rules when multiplying. There are a few restrictions though: + + <ol> + <li>You can only multiply two matrices if the number of columns on the left-hand side matrix is equal to the number of rows on the right-hand side matrix.</li> + <li>Matrix multiplication is not <def>commutative</def> that is \(A \cdot B \neq B \cdot A\).</li> + </ol> +</p> + +<p> + Let's get started with an example of a matrix multiplication of 2 <code>2x2</code> matrices: + + \[ \begin{bmatrix} \color{red}1 & \color{red}2 \\ \color{green}3 & \color{green}4 \end{bmatrix} \cdot \begin{bmatrix} \color{blue}5 & \color{purple}6 \\ \color{blue}7 & \color{purple}8 \end{bmatrix} = \begin{bmatrix} \color{red}1 \cdot \color{blue}5 + \color{red}2 \cdot \color{blue}7 & \color{red}1 \cdot \color{purple}6 + \color{red}2 \cdot \color{purple}8 \\ \color{green}3 \cdot \color{blue}5 + \color{green}4 \cdot \color{blue}7 & \color{green}3 \cdot \color{purple}6 + \color{green}4 \cdot \color{purple}8 \end{bmatrix} = \begin{bmatrix} 19 & 22 \\ 43 & 50 \end{bmatrix} \] + + Right now you're probably trying to figure out what the hell just happened? Matrix multiplication is a combination of normal multiplication and addition using the left-matrix's rows with the right-matrix's columns. Let's try discussing this with the following image: +</p> + + <img src="/img/getting-started/matrix_multiplication.png" class="clean"/> + +<p> + We first take the upper row of the left matrix and then take a column from the right matrix. The row and column that we picked decides which output value of the resulting <code>2x2</code> matrix we're going to calculate. If we take the first row of the left matrix the resulting value will end up in the first row of the result matrix, then we pick a column and if it's the first column the result value will end up in the first column of the result matrix. This is exactly the case of the red pathway. To calculate the bottom-right result we take the bottom row of the first matrix and the rightmost column of the second matrix. +</p> + +<p> + To calculate the resulting value we multiply the first element of the row and column together using normal multiplication, we do the same for the second elements, third, fourth etc. The results of the individual multiplications are then summed up and we have our result. Now it also makes sense that one of the requirements is that the size of the left-matrix's columns and the right-matrix's rows are equal, otherwise we can't finish the operations! +</p> + +<p> + The result is then a matrix that has dimensions of (<code>n,m</code>) where <code>n</code> is equal to the number of rows of the left-hand side matrix and <code>m</code> is equal to the columns of the right-hand side matrix. +</p> + +<p> + Don't worry if you have difficulties imagining the multiplications inside your head. Just keep trying to do the calculations by hand and return to this page whenever you have difficulties. Over time, matrix multiplication becomes second nature to you. +</p> + +<p> + Let's finish the discussion of matrix-matrix multiplication with a larger example. Try to visualize the pattern using the colors. As a useful exercise, see if you can come up with your own answer of the multiplication and then compare them with the resulting matrix (once you try to do a matrix multiplication by hand you'll quickly get the grasp of them). + + \[ \begin{bmatrix} \color{red}4 & \color{red}2 & \color{red}0 \\ \color{green}0 & \color{green}8 & \color{green}1 \\ \color{blue}0 & \color{blue}1 & \color{blue}0 \end{bmatrix} \cdot \begin{bmatrix} \color{red}4 & \color{green}2 & \color{blue}1 \\ \color{red}2 & \color{green}0 & \color{blue}4 \\ \color{red}9 & \color{green}4 & \color{blue}2 \end{bmatrix} = \begin{bmatrix} \color{red}4 \cdot \color{red}4 + \color{red}2 \cdot \color{red}2 + \color{red}0 \cdot \color{red}9 & \color{red}4 \cdot \color{green}2 + \color{red}2 \cdot \color{green}0 + \color{red}0 \cdot \color{green}4 & \color{red}4 \cdot \color{blue}1 + \color{red}2 \cdot \color{blue}4 + \color{red}0 \cdot \color{blue}2 \\ \color{green}0 \cdot \color{red}4 + \color{green}8 \cdot \color{red}2 + \color{green}1 \cdot \color{red}9 & \color{green}0 \cdot \color{green}2 + \color{green}8 \cdot \color{green}0 + \color{green}1 \cdot \color{green}4 & \color{green}0 \cdot \color{blue}1 + \color{green}8 \cdot \color{blue}4 + \color{green}1 \cdot \color{blue}2 \\ \color{blue}0 \cdot \color{red}4 + \color{blue}1 \cdot \color{red}2 + \color{blue}0 \cdot \color{red}9 & \color{blue}0 \cdot \color{green}2 + \color{blue}1 \cdot \color{green}0 + \color{blue}0 \cdot \color{green}4 & \color{blue}0 \cdot \color{blue}1 + \color{blue}1 \cdot \color{blue}4 + \color{blue}0 \cdot \color{blue}2 \end{bmatrix} + \\ = \begin{bmatrix} 20 & 8 & 12 \\ 25 & 4 & 34 \\ 2 & 0 & 4 \end{bmatrix}\] +</p> + +<p> + As you can see, matrix-matrix multiplication is quite a cumbersome process and very prone to errors (which is why we usually let computers do this) and this gets problematic real quick when the matrices become larger. If you're still thirsty for more and you're curious about some more of the mathematical properties of matrices I strongly suggest you take a look at these <a href="https://www.khanacademy.org/math/algebra2/algebra-matrices" target="_blank">Khan Academy videos</a> about matrices. +</p> + +<p> + Anyways, now that we know how to multiply matrices together, we can start getting to the good stuff. +</p> + +<h1>Matrix-Vector multiplication</h1> +<p> + Up until now we've had our fair share of vectors. We used them to represent positions, colors and even texture coordinates. Let's move a bit further down the rabbit hole and tell you that a vector is basically a <code>Nx1</code> matrix where <code>N</code> is the vector's number of components (also known as an <def>N-dimensional</def> vector). If you think about it, it makes a lot of sense. Vectors are just like matrices an array of numbers, but with only 1 column. So, how does this new piece of information help us? Well, if we have a <code>MxN</code> matrix we can multiply this matrix with our <code>Nx1</code> vector, since the columns of the matrix are equal to the number of rows of the vector, thus matrix multiplication is defined. +</p> + +<p> + But why do we care if we can multiply matrices with a vector? Well, it just so happens that there are lots of interesting 2D/3D transformations we can place inside a matrix, and multiplying that matrix with a vector then <em>transforms</em> that vector. In case you're still a bit confused, let's start with a few examples and you'll soon see what we mean. +</p> + +<h2>Identity matrix</h2> +<p> + In OpenGL we usually work with <code>4x4</code> transformation matrices for several reasons and one of them is that most of the vectors are of size 4. The most simple transformation matrix that we can think of is the <def>identity matrix</def>. The identity matrix is an <code>NxN</code> matrix with only 0s except on its diagonal. As you'll see, this transformation matrix leaves a vector completely unharmed: + + \[ \begin{bmatrix} \color{red}1 & \color{red}0 & \color{red}0 & \color{red}0 \\ \color{green}0 & \color{green}1 & \color{green}0 & \color{green}0 \\ \color{blue}0 & \color{blue}0 & \color{blue}1 & \color{blue}0 \\ \color{purple}0 & \color{purple}0 & \color{purple}0 & \color{purple}1 \end{bmatrix} \cdot \begin{bmatrix} 1 \\ 2 \\ 3 \\ 4 \end{bmatrix} = \begin{bmatrix} \color{red}1 \cdot 1 \\ \color{green}1 \cdot 2 \\ \color{blue}1 \cdot 3 \\ \color{purple}1 \cdot 4 \end{bmatrix} = \begin{bmatrix} 1 \\ 2 \\ 3 \\ 4 \end{bmatrix} \] + + The vector is completely untouched. This becomes obvious from the rules of multiplication: the first result element is each individual element of the first row of the matrix multiplied with each element of the vector. Since each of the row's elements are 0 except the first one, we get: \(\color{red}1\cdot1 + \color{red}0\cdot2 + \color{red}0\cdot3 + \color{red}0\cdot4 = 1\) and the same applies for the other 3 elements of the vector. +</p> + +<note> + You may be wondering what the use is of a transformation matrix that does not transform? The identity matrix is usually a starting point for generating other transformation matrices and if we dig even deeper into linear algebra, a very useful matrix for proving theorems and solving linear equations. +</note> + +<h2>Scaling</h2> +<p> + When we're scaling a vector we are increasing the length of the arrow by the amount we'd like to scale, keeping its direction the same. Since we're working in either 2 or 3 dimensions we can define scaling by a vector of 2 or 3 scaling variables, each scaling one axis (<code>x</code>, <code>y</code> or <code>z</code>). +</p> + +<p> + Let's try scaling the vector \(\color{red}{\bar{v}} = (3,2)\). We will scale the vector along the x-axis by <code>0.5</code>, thus making it twice as narrow; and we'll scale the vector by <code>2</code> along the y-axis, making it twice as high. Let's see what it looks like if we scale the vector by <code>(0.5,2)</code> as \(\color{blue}{\bar{s}}\): +</p> + + +<img src="/img/getting-started/vectors_scale.png" class="clean"/> + +<p> + Keep in mind that OpenGL usually operates in 3D space so for this 2D case we could set the z-axis scale to <code>1</code>, leaving it unharmed. The scaling operation we just performed is a <def>non-uniform</def> scale, because the scaling factor is not the same for each axis. If the scalar would be equal on all axes it would be called a <def>uniform scale</def>. +</p> + +<p> + Let's start building a transformation matrix that does the scaling for us. We saw from the identity matrix that each of the diagonal elements were multiplied with its corresponding vector element. What if we were to change the <code>1</code>s in the identity matrix to <code>3</code>s? In that case, we would be multiplying each of the vector elements by a value of <code>3</code> and thus effectively uniformly scale the vector by 3. If we represent the scaling variables as \( (\color{red}{S_1}, \color{green}{S_2}, \color{blue}{S_3}) \) we can define a scaling matrix on any vector \((x,y,z)\) as: + + \[\begin{bmatrix} \color{red}{S_1} & \color{red}0 & \color{red}0 & \color{red}0 \\ \color{green}0 & \color{green}{S_2} & \color{green}0 & \color{green}0 \\ \color{blue}0 & \color{blue}0 & \color{blue}{S_3} & \color{blue}0 \\ \color{purple}0 & \color{purple}0 & \color{purple}0 & \color{purple}1 \end{bmatrix} \cdot \begin{pmatrix} x \\ y \\ z \\ 1 \end{pmatrix} = \begin{pmatrix} \color{red}{S_1} \cdot x \\ \color{green}{S_2} \cdot y \\ \color{blue}{S_3} \cdot z \\ 1 \end{pmatrix} \] + + Note that we keep the 4th scaling value <code>1</code>. The <code>w</code> component is used for other purposes as we'll see later on. +</p> + +<h2>Translation</h2> +<p> + <def>Translation</def> is the process of adding another vector on top of the original vector to return a new vector with a different position, thus <em>moving</em> the vector based on a translation vector. We've already discussed vector addition so this shouldn't be too new. +</p> + +<p> + Just like the scaling matrix there are several locations on a 4-by-4 matrix that we can use to perform certain operations and for translation those are the top-3 values of the 4th column. If we represent the translation vector as \((\color{red}{T_x},\color{green}{T_y},\color{blue}{T_z})\) we can define the translation matrix by: + + \[\begin{bmatrix} \color{red}1 & \color{red}0 & \color{red}0 & \color{red}{T_x} \\ \color{green}0 & \color{green}1 & \color{green}0 & \color{green}{T_y} \\ \color{blue}0 & \color{blue}0 & \color{blue}1 & \color{blue}{T_z} \\ \color{purple}0 & \color{purple}0 & \color{purple}0 & \color{purple}1 \end{bmatrix} \cdot \begin{pmatrix} x \\ y \\ z \\ 1 \end{pmatrix} = \begin{pmatrix} x + \color{red}{T_x} \\ y + \color{green}{T_y} \\ z + \color{blue}{T_z} \\ 1 \end{pmatrix} \] + + This works because all of the translation values are multiplied by the vector's <code>w</code> column and added to the vector's original values (remember the matrix-multiplication rules). This wouldn't have been possible with a 3-by-3 matrix. +</p> + +<note> + <strong>Homogeneous coordinates</strong><br/> + The <code>w</code> component of a vector is also known as a <def>homogeneous coordinate</def>. + To get the 3D vector from a homogeneous vector we divide the <code>x</code>, <code>y</code> and <code>z</code> coordinate by its <code>w</code> coordinate. We usually do not notice this since the <code>w</code> component is <code>1.0</code> most of the time. Using homogeneous coordinates has several advantages: it allows us to do matrix translations on 3D vectors (without a <code>w</code> component we can't translate vectors) and in the next chapter we'll use the <code>w</code> value to create 3D perspective.<br/> + <br/> + Also, whenever the homogeneous coordinate is equal to <code>0</code>, the vector is specifically known as a <def>direction vector</def> since a vector with a <code>w</code> coordinate of <code>0</code> cannot be translated. +</note> + +<p> + With a translation matrix we can move objects in any of the 3 axis directions (<code>x</code>, <code>y</code>, <code>z</code>), making it a very useful transformation matrix for our transformation toolkit. +</p> + + + +<h2>Rotation</h2> +<p> + The last few transformations were relatively easy to understand and visualize in 2D or 3D space, but rotations are a bit trickier. If you want to know exactly how these matrices are constructed I'd recommend that you watch the rotation items of Khan Academy's <a href="https://www.khanacademy.org/math/linear-algebra/matrix_transformations" target="_blank">linear algebra</a> videos. +</p> + +<p> + First let's define what a rotation of a vector actually is. A rotation in 2D or 3D is represented with an <def>angle</def>. An angle could be in degrees or radians where a whole circle has 360 degrees or 2 <a href="http://en.wikipedia.org/wiki/Pi" target="_blank">PI</a> radians. I prefer explaining rotations using degrees as we're generally more accustomed to them. + +<note> + Most rotation functions require an angle in radians, but luckily degrees are easily converted to radians: <br/> + <code>angle in degrees = angle in radians * (180 / PI) </code><br/> + <code>angle in radians = angle in degrees * (PI / 180) </code><br/> + Where <code>PI</code> equals (rounded) <code>3.14159265359</code>. +</note> + + Rotating half a circle rotates us 360/2 = 180 degrees and rotating 1/5th to the right means we rotate 360/5 = 72 degrees to the right. This is demonstrated for a basic 2D vector where \(\color{red}{\bar{v}}\) is rotated 72 degrees to the right, or clockwise, from \(\color{green}{\bar{k}}\): +</p> + + <img src="/img/getting-started/vectors_angle.png" class="clean" /> + +<p> + Rotations in 3D are specified with an angle <strong>and</strong> a <def>rotation axis</def>. The angle specified will rotate the object along the rotation axis given. Try to visualize this by spinning your head a certain degree while continually looking down a single rotation axis. When rotating 2D vectors in a 3D world for example, we set the rotation axis to the z-axis (try to visualize this). +</p> + +<p> + Using trigonometry it is possible to transform vectors to newly rotated vectors given an angle. This is usually done via a smart combination of the <code>sine</code> and <code>cosine</code> functions (commonly abbreviated to <code>sin</code> and <code>cos</code>). A discussion of how the rotation matrices are generated is out of the scope of this chapter. +</p> + +<p> + A rotation matrix is defined for each unit axis in 3D space where the angle is represented as the theta symbol \(\theta\). +</p> + + <p> + Rotation around the X-axis: + + \[\begin{bmatrix} \color{red}1 & \color{red}0 & \color{red}0 & \color{red}0 \\ \color{green}0 & \color{green}{\cos \theta} & - \color{green}{\sin \theta} & \color{green}0 \\ \color{blue}0 & \color{blue}{\sin \theta} & \color{blue}{\cos \theta} & \color{blue}0 \\ \color{purple}0 & \color{purple}0 & \color{purple}0 & \color{purple}1 \end{bmatrix} \cdot \begin{pmatrix} x \\ y \\ z \\ 1 \end{pmatrix} = \begin{pmatrix} x \\ \color{green}{\cos \theta} \cdot y - \color{green}{\sin \theta} \cdot z \\ \color{blue}{\sin \theta} \cdot y + \color{blue}{\cos \theta} \cdot z \\ 1 \end{pmatrix}\] + </p> + + <p> + Rotation around the Y-axis: + + \[\begin{bmatrix} \color{red}{\cos \theta} & \color{red}0 & \color{red}{\sin \theta} & \color{red}0 \\ \color{green}0 & \color{green}1 & \color{green}0 & \color{green}0 \\ - \color{blue}{\sin \theta} & \color{blue}0 & \color{blue}{\cos \theta} & \color{blue}0 \\ \color{purple}0 & \color{purple}0 & \color{purple}0 & \color{purple}1 \end{bmatrix} \cdot \begin{pmatrix} x \\ y \\ z \\ 1 \end{pmatrix} = \begin{pmatrix} \color{red}{\cos \theta} \cdot x + \color{red}{\sin \theta} \cdot z \\ y \\ - \color{blue}{\sin \theta} \cdot x + \color{blue}{\cos \theta} \cdot z \\ 1 \end{pmatrix} \] + </p> + + <p> + Rotation around the Z-axis: + + \[\begin{bmatrix} \color{red}{\cos \theta} & - \color{red}{\sin \theta} & \color{red}0 & \color{red}0 \\ \color{green}{\sin \theta} & \color{green}{\cos \theta} & \color{green}0 & \color{green}0 \\ \color{blue}0 & \color{blue}0 & \color{blue}1 & \color{blue}0 \\ \color{purple}0 & \color{purple}0 & \color{purple}0 & \color{purple}1 \end{bmatrix} \cdot \begin{pmatrix} x \\ y \\ z \\ 1 \end{pmatrix} = \begin{pmatrix} \color{red}{\cos \theta} \cdot x - \color{red}{\sin \theta} \cdot y \\ \color{green}{\sin \theta} \cdot x + \color{green}{\cos \theta} \cdot y \\ z \\ 1 \end{pmatrix} \] + </p> + + + <p> + Using the rotation matrices we can transform our position vectors around one of the three unit axes. To rotate around an arbitrary 3D axis we can combine all 3 them by first rotating around the X-axis, then Y and then Z for example. However, this quickly introduces a problem called <def>Gimbal lock</def>. We won't discuss the details, but a better solution is to rotate around an arbitrary unit axis e.g. <code>(0.662,0.2,0.722)</code> (note that this is a unit vector) right away instead of combining the rotation matrices. Such a (verbose) matrix exists and is given below with \((\color{red}{R_x}, \color{green}{R_y}, \color{blue}{R_z})\) as the arbitrary rotation axis: + + \[\begin{bmatrix} \cos \theta + \color{red}{R_x}^2(1 - \cos \theta) & \color{red}{R_x}\color{green}{R_y}(1 - \cos \theta) - \color{blue}{R_z} \sin \theta & \color{red}{R_x}\color{blue}{R_z}(1 - \cos \theta) + \color{green}{R_y} \sin \theta & 0 \\ \color{green}{R_y}\color{red}{R_x} (1 - \cos \theta) + \color{blue}{R_z} \sin \theta & \cos \theta + \color{green}{R_y}^2(1 - \cos \theta) & \color{green}{R_y}\color{blue}{R_z}(1 - \cos \theta) - \color{red}{R_x} \sin \theta & 0 \\ \color{blue}{R_z}\color{red}{R_x}(1 - \cos \theta) - \color{green}{R_y} \sin \theta & \color{blue}{R_z}\color{green}{R_y}(1 - \cos \theta) + \color{red}{R_x} \sin \theta & \cos \theta + \color{blue}{R_z}^2(1 - \cos \theta) & 0 \\ 0 & 0 & 0 & 1 \end{bmatrix}\] + + + A mathematical discussion of generating such a matrix is out of the scope of this chapter. Keep in mind that even this matrix does not completely prevent gimbal lock (although it gets a lot harder). To truly prevent Gimbal locks we have to represent rotations using <def>quaternions</def>, that are not only safer, but also more computationally friendly. However, a discussion of quaternions is out of this chapter's scope. + </p> + +<h2>Combining matrices</h2> +<p> + The true power from using matrices for transformations is that we can combine multiple transformations in a single matrix thanks to matrix-matrix multiplication. Let's see if we can generate a transformation matrix that combines several transformations. Say we have a vector <code>(x,y,z)</code> and we want to scale it by 2 and then translate it by <code>(1,2,3)</code>. We need a translation and a scaling matrix for our required steps. The resulting transformation matrix would then look like: + + \[Trans . Scale = \begin{bmatrix} \color{red}1 & \color{red}0 & \color{red}0 & \color{red}1 \\ \color{green}0 & \color{green}1 & \color{green}0 & \color{green}2 \\ \color{blue}0 & \color{blue}0 & \color{blue}1 & \color{blue}3 \\ \color{purple}0 & \color{purple}0 & \color{purple}0 & \color{purple}1 \end{bmatrix} . \begin{bmatrix} \color{red}2 & \color{red}0 & \color{red}0 & \color{red}0 \\ \color{green}0 & \color{green}2 & \color{green}0 & \color{green}0 \\ \color{blue}0 & \color{blue}0 & \color{blue}2 & \color{blue}0 \\ \color{purple}0 & \color{purple}0 & \color{purple}0 & \color{purple}1 \end{bmatrix} = \begin{bmatrix} \color{red}2 & \color{red}0 & \color{red}0 & \color{red}1 \\ \color{green}0 & \color{green}2 & \color{green}0 & \color{green}2 \\ \color{blue}0 & \color{blue}0 & \color{blue}2 & \color{blue}3 \\ \color{purple}0 & \color{purple}0 & \color{purple}0 & \color{purple}1 \end{bmatrix} \] + + Note that we first do a translation and then a scale transformation when multiplying matrices. Matrix multiplication is not commutative, which means their order is important. When multiplying matrices the right-most matrix is first multiplied with the vector so you should read the multiplications from right to left. It is advised to first do scaling operations, then rotations and lastly translations when combining matrices otherwise they may (negatively) affect each other. For example, if you would first do a translation and then scale, the translation vector would also scale! + </p> + +<p> + Running the final transformation matrix on our vector results in the following vector: + + \[\begin{bmatrix} \color{red}2 & \color{red}0 & \color{red}0 & \color{red}1 \\ \color{green}0 & \color{green}2 & \color{green}0 & \color{green}2 \\ \color{blue}0 & \color{blue}0 & \color{blue}2 & \color{blue}3 \\ \color{purple}0 & \color{purple}0 & \color{purple}0 & \color{purple}1 \end{bmatrix} . \begin{bmatrix} x \\ y \\ z \\ 1 \end{bmatrix} = \begin{bmatrix} \color{red}2x + \color{red}1 \\ \color{green}2y + \color{green}2 \\ \color{blue}2z + \color{blue}3 \\ 1 \end{bmatrix} \] + + Great! The vector is first scaled by two and then translated by <code>(1,2,3)</code>. + </p> + +<h1>In practice</h1> +<p> + Now that we've explained all the theory behind transformations, it's time to see how we can actually use this knowledge to our advantage. OpenGL does not have any form of matrix or vector knowledge built in, so we have to define our own mathematics classes and functions. In this book we'd rather abstract from all the tiny mathematical details and simply use pre-made mathematics libraries. Luckily, there is an easy-to-use and tailored-for-OpenGL mathematics library called GLM. + </p> + + <h2>GLM</h2> +<p> + <img src="/img/getting-started/glm.png" class="right"/> + GLM stands for Open<strong>GL</strong> <strong>M</strong>athematics and is a <em>header-only</em> library, which means that we only have to include the proper header files and we're done; no linking and compiling necessary. + GLM can be downloaded from their <a href="https://glm.g-truc.net/0.9.8/index.html" target="_blank">website</a>. Copy the root directory of the header files into your <em>includes</em> folder and let's get rolling. + </p> + +<!--<warning> + Since GLM version <code>0.9.9</code>, GLM default initializates matrix types to a 0-initalized matrix, instead of the identity matrix. From that version it is required to initialize matrix types as: <code>glm::mat4 mat = glm::mat4(1.0f)</code>. + + For consistency with the tutorials' code it's advised to use a version of GLM lower than <code>0.9.9</code> or initialize all matrices as mentioned above. +</warning> +--> + + <p> + Most of GLM's functionality that we need can be found in 3 headers files that we'll include as follows: + </p> + +<pre><code> +#include &lt;glm/glm.hpp&gt; +#include &lt;glm/gtc/matrix_transform.hpp&gt; +#include &lt;glm/gtc/type_ptr.hpp&gt; +</code></pre> + + <p> + Let's see if we can put our transformation knowledge to good use by translating a vector of <code>(1,0,0)</code> by <code>(1,1,0)</code> (note that we define it as a <code>glm::vec4</code> with its homogeneous coordinate set to <code>1.0</code>: + </p> + +<pre><code> +glm::vec4 vec(1.0f, 0.0f, 0.0f, 1.0f); +glm::mat4 trans = glm::mat4(1.0f); +trans = <function id='55'>glm::translate</function>(trans, glm::vec3(1.0f, 1.0f, 0.0f)); +vec = trans * vec; +std::cout &lt;&lt; vec.x &lt;&lt; vec.y &lt;&lt; vec.z &lt;&lt; std::endl; +</code></pre> + + <p> + We first define a vector named <code>vec</code> using GLM's built-in vector class. Next we define a <code>mat4</code> and explicitly initialize it to the identity matrix by initializing the matrix's diagonals to <code>1.0</code>; if we do not initialize it to the identity matrix the matrix would be a null matrix (all elements <code>0</code>) and all subsequent matrix operations would end up a null matrix as well. +</p> + +<p> +The next step is to create a transformation matrix by passing our identity matrix to the <code><function id='55'>glm::translate</function></code> function, together with a translation vector (the given matrix is then multiplied with a translation matrix and the resulting matrix is returned). <br/> + Then we multiply our vector by the transformation matrix and output the result. If we still remember how matrix translation works then the resulting vector should be <code>(1+1,0+1,0+0)</code> which is <code>(2,1,0)</code>. This snippet of code outputs <code>210</code> so the translation matrix did its job. + </p> + + <p> + Let's do something more interesting and scale and rotate the container object from the previous chapter: + </p> + +<pre><code> +glm::mat4 trans = glm::mat4(1.0f); +trans = <function id='57'>glm::rotate</function>(trans, <function id='63'>glm::radians</function>(90.0f), glm::vec3(0.0, 0.0, 1.0)); +trans = <function id='56'>glm::scale</function>(trans, glm::vec3(0.5, 0.5, 0.5)); +</code></pre> + +<p> + First we scale the container by <code>0.5</code> on each axis and then rotate the container <code>90</code> degrees around the Z-axis. GLM expects its angles in radians so we convert the degrees to radians using <code><function id='63'>glm::radians</function></code>. Note that the textured rectangle is on the XY plane so we want to rotate around the Z-axis. Keep in mind that the axis that we rotate around should be a unit vector, so be sure to normalize the vector first if you're not rotating around the X, Y, or Z axis. Because we pass the matrix to each of GLM's functions, GLM automatically multiples the matrices together, resulting in a transformation matrix that combines all the transformations. + </p> + + <p> + The next big question is: how do we get the transformation matrix to the shaders? We shortly mentioned before that GLSL also has a <code>mat4</code> type. So we'll adapt the vertex shader to accept a <code>mat4</code> uniform variable and multiply the position vector by the matrix uniform: + </p> + +<pre><code> +#version 330 core +layout (location = 0) in vec3 aPos; +layout (location = 1) in vec2 aTexCoord; + +out vec2 TexCoord; + +uniform mat4 transform; + +void main() +{ + gl_Position = transform * vec4(aPos, 1.0f); + TexCoord = vec2(aTexCoord.x, aTexCoord.y); +} +</code></pre> + +<note> + GLSL also has <code>mat2</code> and <code>mat3</code> types that allow for swizzling-like operations just like vectors. All the aforementioned math operations (like scalar-matrix multiplication, matrix-vector multiplication and matrix-matrix multiplication) are allowed on the matrix types. Wherever special matrix operations are used we'll be sure to explain what's happening. +</note> + + <p> + We added the uniform and multiplied the position vector with the transformation matrix before passing it to <var>gl_Position</var>. Our container should now be twice as small and rotated <code>90</code> degrees (tilted to the left). We still need to pass the transformation matrix to the shader though: + </p> + +<pre><code> +unsigned int transformLoc = <function id='45'>glGetUniformLocation</function>(ourShader.ID, "transform"); +<function id='44'>glUniform</function>Matrix4fv(transformLoc, 1, GL_FALSE, glm::value_ptr(trans)); +</code></pre> + +<p> + We first query the location of the uniform variable and then send the matrix data to the shaders using <fun><function id='44'>glUniform</function></fun> with <code>Matrix4fv</code> as its postfix. The first argument should be familiar by now which is the uniform's location. The second argument tells OpenGL how many matrices we'd like to send, which is <code>1</code>. The third argument asks us if we want to transpose our matrix, that is to swap the columns and rows. OpenGL developers often use an internal matrix layout called <def>column-major ordering</def> which is the default matrix layout in GLM so there is no need to transpose the matrices; we can keep it at <var>GL_FALSE</var>. The last parameter is the actual matrix data, but GLM stores their matrices' data in a way that doesn't always match OpenGL's expectations so we first convert the data with GLM's built-in function <fun>value_ptr</fun>. +</p> + +<p> + We created a transformation matrix, declared a uniform in the vertex shader and sent the matrix to the shaders where we transform our vertex coordinates. The result should look something like this: +</p> + + <img src="/img/getting-started/transformations.png" class="clean" /> + +<p> + Perfect! Our container is indeed tilted to the left and twice as small so the transformation was successful. Let's get a little more funky and see if we can rotate the container over time, and for fun we'll also reposition the container at the bottom-right side of the window. +To rotate the container over time we have to update the transformation matrix in the render loop because it needs to update each frame. We use GLFW's time function to get an angle over time: +</p> + +<pre><code> +glm::mat4 trans = glm::mat4(1.0f); +trans = <function id='55'>glm::translate</function>(trans, glm::vec3(0.5f, -0.5f, 0.0f)); +trans = <function id='57'>glm::rotate</function>(trans, (float)<function id='47'>glfwGetTime</function>(), glm::vec3(0.0f, 0.0f, 1.0f)); +</code></pre> + +<p> + Keep in mind that in the previous case we could declare the transformation matrix anywhere, but now we have to create it every iteration to continuously update the rotation. This means we have to re-create the transformation matrix in each iteration of the render loop. Usually when rendering scenes we have several transformation matrices that are re-created with new values each frame. +</p> + +<p> + Here we first rotate the container around the origin <code>(0,0,0)</code> and once it's rotated, we translate its rotated version to the bottom-right corner of the screen. Remember that the actual transformation order should be read in reverse: even though in code we first translate and then later rotate, the actual transformations first apply a rotation and then a translation. Understanding all these combinations of transformations and how they apply to objects is difficult to understand. Try and experiment with transformations like these and you'll quickly get a grasp of it. +</p> + + +<p> + If you did things right you should get the following result: +</p> + +<div class="video paused" onclick="ClickVideo(this)"> + <video width="600" height="450" loop> + <source src="/video/getting-started/transformations.mp4" type="video/mp4" /> + <img src="/img/getting-started/transformations2.png" class="clean"/> + </video> +</div> + + + <p> + And there you have it. A translated container that's rotated over time, all done by a single transformation matrix! Now you can see why matrices are such a powerful construct in graphics land. We can define an infinite amount of transformations and combine them all in a single matrix that we can re-use as often as we'd like. Using transformations like this in the vertex shader saves us the effort of re-defining the vertex data and saves us some processing time as well, since we don't have to re-send our data all the time (which is quite slow); all we need to do is update the transformation uniform. + </p> + +<p> + If you didn't get the right result or you're stuck somewhere else, take a look at the <a href="/code_viewer_gh.php?code=src/1.getting_started/5.1.transformations/transformations.cpp" target="_blank">source code</a> and the updated <a href="https://learnopengl.com/code_viewer_gh.php?code=includes/learnopengl/shader_m.h" target="_blank">shader</a> class. +</p> + + <p> + In the next chapter we'll discuss how we can use matrices to define different coordinate spaces for our vertices. This will be our first step into 3D graphics! + </p> + +<h2>Further reading</h2> +<ul> + <li><a href="https://www.youtube.com/playlist?list=PLZHQObOWTQDPD3MizzM2xVFitgF8hE_ab" target="_blank">Essence of Linear Algebra</a>: great video tutorial series by Grant Sanderson about the underlying mathematics of transformations and linear algebra.</li> + +</ul> + +<h2>Exercises</h2> +<p> + <ul> + <li>Using the last transformation on the container, try switching the order around by first rotating and then translating. See what happens and try to reason why this happens: <a href="/code_viewer_gh.php?code=src/1.getting_started/5.2.transformations_exercise1/transformations_exercise1.cpp" target="_blank">solution</a>.</li> + <li>Try drawing a second container with another call to <fun><function id='2'>glDrawElements</function></fun> but place it at a different position using transformations <strong>only</strong>. Make sure this second container is placed at the top-left of the window and instead of rotating, scale it over time (using the <code>sin</code> function is useful here; note that using <code>sin</code> will cause the object to invert as soon as a negative scale is applied): <a href="/code_viewer_gh.php?code=src/1.getting_started/5.2.transformations_exercise2/transformations_exercise2.cpp" target="_blank">solution</a>.</li> + </ul> +</p> + + </div> + + <div id="hover"> + HI + </div> + <!-- 728x90/320x50 sticky footer --> +<div id="waldo-tag-6196"></div> + + <div id="disqus_thread"></div> + + + + +</div> <!-- container div --> + + +</div> <!-- super container div --> +</body> +</html> +\ No newline at end of file diff --git a/img/getting-started/cmake.png b/img/getting-started/cmake.png Binary files differ. diff --git a/img/getting-started/hellotriangle.png b/img/getting-started/hellotriangle.png Binary files differ. diff --git a/img/getting-started/hellotriangle2.png b/img/getting-started/hellotriangle2.png Binary files differ. diff --git a/img/getting-started/hellowindow.png b/img/getting-started/hellowindow.png Binary files differ. diff --git a/img/getting-started/hellowindow2.png b/img/getting-started/hellowindow2.png Binary files differ. diff --git a/img/getting-started/include_directories.png b/img/getting-started/include_directories.png Binary files differ. diff --git a/img/getting-started/linker_input.png b/img/getting-started/linker_input.png Binary files differ. diff --git a/img/getting-started/ndc.png b/img/getting-started/ndc.png Binary files differ. diff --git a/img/getting-started/opengl.jpg b/img/getting-started/opengl.jpg Binary files differ. diff --git a/img/getting-started/pipeline.png b/img/getting-started/pipeline.png Binary files differ. diff --git a/img/getting-started/vc_directories.png b/img/getting-started/vc_directories.png Binary files differ. diff --git a/img/getting-started/vertex_array_objects.png b/img/getting-started/vertex_array_objects.png Binary files differ. diff --git a/img/getting-started/vertex_array_objects_ebo.png b/img/getting-started/vertex_array_objects_ebo.png Binary files differ. diff --git a/img/getting-started/vertex_attribute_pointer.png b/img/getting-started/vertex_attribute_pointer.png Binary files differ. diff --git a/img/getting-started/x64.png b/img/getting-started/x64.png Binary files differ. diff --git a/static/style.css b/static/style.css @@ -0,0 +1,283 @@ +body { + color: #111; + background-color: #eee; + margin: 0px; + padding: 0px; + background-image: url('img/header_repeat2.png'); + background-repeat: repeat-x; +} + +img.translation { + float: left; + margin: 0px; + padding: 0px; + margin-right: 10px; + margin-top: -5px; +} + +#content { + /*width: 850px;*/ + float: left; +} + +#disqus_thread { + width: 850px; + float: right; + margin-top: 30px; + display: none; +} + +warning { + display:block; + margin:20px; + padding:15px; + background-color: #FFD2D2; + color: #444; + border-radius: 5px; + border: 2px solid #E0B3B3; +} + +note { + display:block; + margin:20px; + padding:15px; + background-color: #D8F5D8; + color: #444; + border-radius: 5px; + border: 2px solid #AFDFAF; +} + +author { + display:block; + margin:10px; + margin-top: 15px; + padding:10px; + background-color: #eee; + color: #444; + border-radius: 2px; + border: 2px solid #ccc; +} + +function { + cursor: pointer; + border-bottom: 1px solid #c1c1c1; +} + +def { + color: green; +} + +fun { + font-family: "Courier New", Courier, monospace; + color: #822; +} + +var { + font-style: normal; + font-family: "Courier New", Courier, monospace; + color: #227; +} + +p code { + +} + +pre code { + color: rgb(214, 210, 205); + background-color: rgb(32, 35, 36); + background-image: none; + border-color: rgb(67, 73, 76); + display: block; + background: #282B2E; + padding: 5px; + padding-left: 15px; + border-radius: 7px; + border: 3px solid #bbb; +} + +indepth strong.indepth_header { + display: block; + width: 100%; + text-align: center; + cursor: pointer; + color: #333; +} + +indepth { + display:block; + margin:20px; + padding: 10px; + background-color: #BDF; + color: #444; + border-radius: 5px; + border: 3px dotted #9BD; +} + +indepth p { + padding: 15px; + display: none; +} + +indepth img { + display: none; +} + +img { + display: block; + margin-left: auto; + margin-right: auto; + border-radius: 7px; + border: 3px solid #bbb; +} + +img.clean { + border: 0px; + border-radius: 0px; +} + +img.no_radius { + border-radius: 0px; +} + +img.left { + float: left; + margin: 10px; + border: 2px solid #bbb; + border-radius: 3px; + margin-right: 20px; +} + +img.right { + float: right; + margin: 10px; + margin-left: 20px; +} + +img.small { + width: 150px; + height: auto; +} + +img.medium { + width: 300px; + height: auto; +} + +img.large { + width: 500px; + height: auto; +} + +img.book { + margin-left: 100px; + margin-right: 50px; + height: 200px; + width: auto; + #margin-top: -5px; +} + +video { + position:relative; + display: block; + margin: 0px; + padding: 0px; +} + +video.clean { + border: 0px; +} + +.video { + position:relative; + cursor: pointer; + background-image: url('img/start_video.png'); + width: 600px; + height: 450px; + margin: 0px; + padding: 0px; + margin-left: auto; + margin-right: auto; + border-radius: 3px; + border: 3px solid #bbb; +} + +.paused video { + position:relative; + z-index: -1; +} + + +audio { + display: block; + margin-left: auto; + margin-right: auto; +} + +/* == TABLES == */ +table, td { + border-bottom: 1px solid #AAA; + border-top: 1px solid #AAA; +} + +th { + border-bottom: 2px solid #888; +} + +table { + border-collapse: collapse; + text-align: center; + margin-left: auto; + margin-right: auto; +} + +th, td { + padding: 6px; +} + +tr:nth-child(even) { + background-color: #DDD; +} + +pre code function { + text-decoration: none; + border-bottom: 1px solid #343434; +} + +#hover { + display: none; + position: absolute; + color: #222; + width: 600px; + border-radius: 5px; + border: 5px dotted #ccc; + background-color: #eee; + background-image: url('img/header_repeat2.png'); + background-repeat: repeat-x; + opacity:0.95; + filter:alpha(opacity=95); /* For IE8 and earlier */ + padding: 10px; + padding-top: 0px; + padding-bottom: 0px; + font-size: 14px; +} +/* Elements */ +h1 { + color: #303236; + margin-bottom: -10px; +} + +h2 { + /*color: #394666;*/ + color: #37425d; + font-size: 22px; + margin-bottom: -10px; +} + +h3 { + color: #633739; + margin-bottom: -10px; +} + +a { + color: #58A; +} diff --git a/static/style2.css b/static/style2.css @@ -0,0 +1 @@ +