LearnOpenGL

Unnamed repository; edit this file 'description' to name the repository.
Log | Files | Refs

commit 2612da0afdc82d78101adc26f3eb7758fbe436c9
parent d0543401a038679988934a03f3b530db3a7255cb
Author: Matsuda Kenji <ftvda283@gmail.com>
Date:   Thu, 16 Sep 2021 20:07:52 +0900

update texture.html

Diffstat:
Mtranslation/Getting-started/Textures.html | 569+++++++++++++++++++++++--------------------------------------------------------
Atranslation/img/getting-started/filter_linear.png | 0
Atranslation/img/getting-started/filter_nearest.png | 0
Atranslation/img/getting-started/mipmaps.png | 0
Rtranslation/img/start_video.png -> translation/img/getting-started/start_video.png | 0
Atranslation/img/getting-started/tex_coords.png | 0
Atranslation/img/getting-started/texture_filtering.png | 0
Atranslation/img/getting-started/texture_wrapping.png | 0
Atranslation/img/getting-started/textures.png | 0
Atranslation/img/getting-started/textures2.png | 0
Atranslation/img/getting-started/textures_combined.png | 0
Atranslation/img/getting-started/textures_combined2.png | 0
Atranslation/img/getting-started/textures_funky.png | 0
Atranslation/img/getting-started/vertex_attribute_pointer_interleaved_textures.png | 0
14 files changed, 165 insertions(+), 404 deletions(-)

diff --git a/translation/Getting-started/Textures.html b/translation/Getting-started/Textures.html @@ -1,327 +1,102 @@ - - <!DOCTYPE html> <html lang="en"> <head> - <meta charset="utf-8"/> - <title>LearnOpenGL - Textures</title> <!--<title>Learn OpenGL, extensive tutorial resource for learning Modern OpenGL</title>--> - <link rel="shortcut icon" type="image/ico" href="/favicon.ico" /> - <meta name="description" content="Learn OpenGL . com provides good and clear modern 3.3+ OpenGL tutorials with clear examples. A great resource to learn modern OpenGL aimed at beginners."> + <meta charset="utf-8"/> + <title>LearnOpenGL - Textures</title> <!--<title>Learn OpenGL, extensive tutorial resource for learning Modern OpenGL</title>--> + <link rel="shortcut icon" type="image/ico" href="/favicon.ico" /> + <meta name="description" content="Learn OpenGL . com provides good and clear modern 3.3+ OpenGL tutorials with clear examples. A great resource to learn modern OpenGL aimed at beginners."> <meta name="fragment" content="!"> - <script> - (function(i,s,o,g,r,a,m){i['GoogleAnalyticsObject']=r;i[r]=i[r]||function(){ - (i[r].q=i[r].q||[]).push(arguments)},i[r].l=1*new Date();a=s.createElement(o), - m=s.getElementsByTagName(o)[0];a.async=1;a.src=g;m.parentNode.insertBefore(a,m) - })(window,document,'script','//www.google-analytics.com/analytics.js','ga'); - - ga('create', 'UA-51879160-1', 'learnopengl.com'); - ga('send', 'pageview'); - - </script> - <!--<script async src="//pagead2.googlesyndication.com/pagead/js/adsbygoogle.js"></script>--> - <script> - (adsbygoogle = window.adsbygoogle || []).push({ - google_ad_client: "ca-pub-7855791439695850", - enable_page_level_ads: true - }); - </script> - <script async='async' src='https://www.googletagservices.com/tag/js/gpt.js'></script> - <script> - var googletag = googletag || {}; - googletag.cmd = googletag.cmd || []; - </script> - <script> - googletag.cmd.push(function() { - googletag.defineSlot('/8491498/learnopengl_video', [300, 225], 'div-gpt-ad-1540574378241-0').addService(googletag.pubads()); - googletag.pubads().enableSingleRequest(); - googletag.pubads().collapseEmptyDivs(); - googletag.enableServices(); - }); - </script> - <script type="text/javascript" src="https://d31vxm9ubutrmw.cloudfront.net/static/js/1681.js"></script> - <script src="/js/jquery-1.11.0.min.js"></script> - <script src="/js/hoverintent.js"></script> - <link rel="stylesheet" type="text/css" href="/layout.css"> - <link rel="stylesheet" type="text/css" href="/js/styles/obsidian.css"> - <script src="/js/highlight.pack.js"></script> - <script src="/js/functions.js"></script> - <script type="text/javascript" src="/js/mathjax/MathJax.js?config=TeX-AMS_HTML"></script> - <script> - // Has to be loaded last due to content bug - MathJax.Hub.Config({ - TeX: { equationNumbers: { autoNumber: "AMS" } } - }); - </script> - <script>hljs.initHighlightingOnLoad();</script> - <script> - $(document).ready(function() { - // check if user visited from the old # based urls, re-direct to ?p= form - if(window.location.hash) - { - var name = window.location.hash.substring(2); - // name = name.replace(/-/g," "); - var index = name.indexOf('#'); // Remove any hash fragments from the url (Disquss adds hash fragments for comments, but results in 404 pages) - if(index >= 0) - name = name.substring(0, index); - - window.location.href = "https://learnopengl.com/" + name; - } else { - // Check if data has been succesfully loaded, if so: change title bar as ajax hash fragment - var title = $('#content-url').text(); - - // Refresh syntax highlighting - // $('pre').each(function(i, e) {hljs.highlightBlock(e)}); - - // Reset DISQUS - // if(title == '/dev/') - // title = ''; - // alert('hoi'); - - // Adjust ads for correct bottom positioning based on content size - window.setTimeout(function() { - AdPositioning(); - }, 3000); - - - // set API resets after time-out (once content is properly loaded) - window.setTimeout(function() { - MathJax.Hub.Queue(["Typeset",MathJax.Hub]); - MathJax.Hub.Queue(["resetEquationNumbers", MathJax.InputJax.TeX]); - - var page_url = title == "" ? "http://www.learnopengl.com/" : "http://www.learnopengl.com/" + title; - if(typeof DISQUS !== 'undefined') { - DISQUS.reset({ - reload: true, - config: function () { - this.page.identifier = title; - this.page.url = page_url; - } - }); - $('#disqus_thread').show(); - } - // Refresh callbacks on <function> tags - SetFunctionTagCallbacks(); - }, 1000); - - // Zet ook de juiste button op 'selected' - $('#nav li span, #nav li a').removeClass('selected'); - if(title != '') - { - $('#nav li[id=\'' + title + '\']').children('span, a').addClass('selected'); - } - // En open menu waar nodig - var parents = $('#nav span.selected, #nav a.selected').parents('li').children('span.closed, a.closed'); - var index = 0; - for(index = parents.length - 1; index >= 0; index--) - { - - var id = $(parents[index]).attr("id").replace( /^\D+/g, ''); - MenuClick(id, false); - } - - } - }); - // var initialized = false; - // window.onpopstate = function() { - // if(initialized) - // LoadPage(); - // else - // initialized = true; - // }; - - // Set up DISQUS - // $(document).ready(function() { - var disqus_shortname = 'learnopengl'; - (function() { - var dsq = document.createElement('script'); dsq.type = 'text/javascript'; dsq.async = true; - dsq.src = '//' + disqus_shortname + '.disqus.com/embed.js'; - (document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(dsq); - })(); - // }); - </script> + <link rel="stylesheet" href="../static/style.css" /> </head> <body> -<a href="https://learnopengl.com"> -<div id="header"> -</div> -</a> - -<div id="supercontainer"> - <!-- 728x90/320x50 --> - <div id="header_ad"> - <div id="waldo-tag-6194"></div> - </div> - <div id="rightad_container"> - <div id="rightad"> - <!-- /8491498/learnopengl_video --> - <!--<div id='div-gpt-ad-1540574378241-0' style='height:225px; width:300px;'> - <script> - googletag.cmd.push(function() { googletag.display('div-gpt-ad-1540574378241-0'); }); - </script> - </div> - <br/>--> - - <div id="waldo-tag-1715"></div> - </div> - - <div id="admessage"> - If you're running AdBlock, please consider whitelisting this site if you'd like to support LearnOpenGL; and no worries, I won't be mad if you don't :) - <!--<br/><br/> - Also, check out this little local multiplayer-only game I've made: <a href="https://store.steampowered.com/app/983590/Tank_Blazers/" target="_blank">Tank Blazers</a>. - <br/> - <a href="https://store.steampowered.com/app/983590/Tank_Blazers" target="_blank"><img src="/img/tank_blazers.jpg" style="width:278px; margin-top: 9px; margin-left: -3px;"/></a>--> - </div> - - <div id="rightonethirdad"> - <div id="waldo-tag-2246"></div> - </div> - - <div id="rightbottomad"> - <div id="waldo-tag-2247"></div> - </div> - </div> - <div id="container"> - <div id="loading"></div> -<script> -$(document).ready(function() { -$('#menu-item4').mousedown(function() { MenuClick(4, true) }); -$('#menu-item48').mousedown(function() { MenuClick(48, true) }); -$('#menu-item56').mousedown(function() { MenuClick(56, true) }); -$('#menu-item63').mousedown(function() { MenuClick(63, true) }); -$('#menu-item100').mousedown(function() { MenuClick(100, true) }); -$('#menu-item102').mousedown(function() { MenuClick(102, true) }); -$('#menu-item113').mousedown(function() { MenuClick(113, true) }); -$('#menu-item116').mousedown(function() { MenuClick(116, true) }); -$('#menu-item78').mousedown(function() { MenuClick(78, true) }); -$('#menu-item81').mousedown(function() { MenuClick(81, true) }); -$('#menu-item85').mousedown(function() { MenuClick(85, true) }); -$('#menu-item125').mousedown(function() { MenuClick(125, true) }); -$('#menu-item128').mousedown(function() { MenuClick(128, true) }); -$('#menu-item129').mousedown(function() { MenuClick(129, true) }); -$('#menu-item133').mousedown(function() { MenuClick(133, true) }); -$('#menu-item134').mousedown(function() { MenuClick(134, true) }); -}); -</script> - <div id="nav"> - <div id="social"> - <a href="https://github.com/JoeyDeVries/LearnOpenGL" target="_blank"> - <img src="/img/github.png" class="social_ico"> - </a> - <!-- <a href="https://www.facebook.com/Learnopengl-2199631333595544/" target="_blank"> - <img src="/img/facebook.png" class="social_ico"> - </a>--> - <a href="https://twitter.com/JoeyDeVriez" target="_blank"> - <img src="/img/twitter.png" class="social_ico"> - </a> - - </div> - <img src='img/nav-button_bottom-arrow.png' style='display: none'><ol><li id='Introduction'><a id="menu-item1" href="https://learnopengl.com/Introduction">Introduction </a></li><li id='Getting-started'><span id="menu-item4" class="closed">Getting started </span><ol id="menu-items-of4" style="display:none;"><li id='Getting-started/OpenGL'><a id="menu-item49" href="https://learnopengl.com/Getting-started/OpenGL">OpenGL </a></li><li id='Getting-started/Creating-a-window'><a id="menu-item5" href="https://learnopengl.com/Getting-started/Creating-a-window">Creating a window </a></li><li id='Getting-started/Hello-Window'><a id="menu-item6" href="https://learnopengl.com/Getting-started/Hello-Window">Hello Window </a></li><li id='Getting-started/Hello-Triangle'><a id="menu-item38" href="https://learnopengl.com/Getting-started/Hello-Triangle">Hello Triangle </a></li><li id='Getting-started/Shaders'><a id="menu-item39" href="https://learnopengl.com/Getting-started/Shaders">Shaders </a></li><li id='Getting-started/Textures'><a id="menu-item40" href="https://learnopengl.com/Getting-started/Textures">Textures </a></li><li id='Getting-started/Transformations'><a id="menu-item43" href="https://learnopengl.com/Getting-started/Transformations">Transformations </a></li><li id='Getting-started/Coordinate-Systems'><a id="menu-item44" href="https://learnopengl.com/Getting-started/Coordinate-Systems">Coordinate Systems </a></li><li id='Getting-started/Camera'><a id="menu-item47" href="https://learnopengl.com/Getting-started/Camera">Camera </a></li><li id='Getting-started/Review'><a id="menu-item50" href="https://learnopengl.com/Getting-started/Review">Review </a></li></ol></li><li id='Lighting'><span id="menu-item48" class="closed">Lighting </span><ol id="menu-items-of48" style="display:none;"><li id='Lighting/Colors'><a id="menu-item51" href="https://learnopengl.com/Lighting/Colors">Colors </a></li><li id='Lighting/Basic-Lighting'><a id="menu-item52" href="https://learnopengl.com/Lighting/Basic-Lighting">Basic Lighting </a></li><li id='Lighting/Materials'><a id="menu-item53" href="https://learnopengl.com/Lighting/Materials">Materials </a></li><li id='Lighting/Lighting-maps'><a id="menu-item54" href="https://learnopengl.com/Lighting/Lighting-maps">Lighting maps </a></li><li id='Lighting/Light-casters'><a id="menu-item55" href="https://learnopengl.com/Lighting/Light-casters">Light casters </a></li><li id='Lighting/Multiple-lights'><a id="menu-item58" href="https://learnopengl.com/Lighting/Multiple-lights">Multiple lights </a></li><li id='Lighting/Review'><a id="menu-item57" href="https://learnopengl.com/Lighting/Review">Review </a></li></ol></li><li id='Model-Loading'><span id="menu-item56" class="closed">Model Loading </span><ol id="menu-items-of56" style="display:none;"><li id='Model-Loading/Assimp'><a id="menu-item59" href="https://learnopengl.com/Model-Loading/Assimp">Assimp </a></li><li id='Model-Loading/Mesh'><a id="menu-item60" href="https://learnopengl.com/Model-Loading/Mesh">Mesh </a></li><li id='Model-Loading/Model'><a id="menu-item61" href="https://learnopengl.com/Model-Loading/Model">Model </a></li></ol></li><li id='Advanced-OpenGL'><span id="menu-item63" class="closed">Advanced OpenGL </span><ol id="menu-items-of63" style="display:none;"><li id='Advanced-OpenGL/Depth-testing'><a id="menu-item72" href="https://learnopengl.com/Advanced-OpenGL/Depth-testing">Depth testing </a></li><li id='Advanced-OpenGL/Stencil-testing'><a id="menu-item73" href="https://learnopengl.com/Advanced-OpenGL/Stencil-testing">Stencil testing </a></li><li id='Advanced-OpenGL/Blending'><a id="menu-item74" href="https://learnopengl.com/Advanced-OpenGL/Blending">Blending </a></li><li id='Advanced-OpenGL/Face-culling'><a id="menu-item77" href="https://learnopengl.com/Advanced-OpenGL/Face-culling">Face culling </a></li><li id='Advanced-OpenGL/Framebuffers'><a id="menu-item65" href="https://learnopengl.com/Advanced-OpenGL/Framebuffers">Framebuffers </a></li><li id='Advanced-OpenGL/Cubemaps'><a id="menu-item66" href="https://learnopengl.com/Advanced-OpenGL/Cubemaps">Cubemaps </a></li><li id='Advanced-OpenGL/Advanced-Data'><a id="menu-item69" href="https://learnopengl.com/Advanced-OpenGL/Advanced-Data">Advanced Data </a></li><li id='Advanced-OpenGL/Advanced-GLSL'><a id="menu-item67" href="https://learnopengl.com/Advanced-OpenGL/Advanced-GLSL">Advanced GLSL </a></li><li id='Advanced-OpenGL/Geometry-Shader'><a id="menu-item68" href="https://learnopengl.com/Advanced-OpenGL/Geometry-Shader">Geometry Shader </a></li><li id='Advanced-OpenGL/Instancing'><a id="menu-item70" href="https://learnopengl.com/Advanced-OpenGL/Instancing">Instancing </a></li><li id='Advanced-OpenGL/Anti-Aliasing'><a id="menu-item75" href="https://learnopengl.com/Advanced-OpenGL/Anti-Aliasing">Anti Aliasing </a></li></ol></li><li id='Advanced-Lighting'><span id="menu-item100" class="closed">Advanced Lighting </span><ol id="menu-items-of100" style="display:none;"><li id='Advanced-Lighting/Advanced-Lighting'><a id="menu-item101" href="https://learnopengl.com/Advanced-Lighting/Advanced-Lighting">Advanced Lighting </a></li><li id='Advanced-Lighting/Gamma-Correction'><a id="menu-item110" href="https://learnopengl.com/Advanced-Lighting/Gamma-Correction">Gamma Correction </a></li><li id='Advanced-Lighting/Shadows'><span id="menu-item102" class="closed">Shadows </span><ol id="menu-items-of102" style="display:none;"><li id='Advanced-Lighting/Shadows/Shadow-Mapping'><a id="menu-item103" href="https://learnopengl.com/Advanced-Lighting/Shadows/Shadow-Mapping">Shadow Mapping </a></li><li id='Advanced-Lighting/Shadows/Point-Shadows'><a id="menu-item104" href="https://learnopengl.com/Advanced-Lighting/Shadows/Point-Shadows">Point Shadows </a></li></ol></li><li id='Advanced-Lighting/Normal-Mapping'><a id="menu-item106" href="https://learnopengl.com/Advanced-Lighting/Normal-Mapping">Normal Mapping </a></li><li id='Advanced-Lighting/Parallax-Mapping'><a id="menu-item107" href="https://learnopengl.com/Advanced-Lighting/Parallax-Mapping">Parallax Mapping </a></li><li id='Advanced-Lighting/HDR'><a id="menu-item111" href="https://learnopengl.com/Advanced-Lighting/HDR">HDR </a></li><li id='Advanced-Lighting/Bloom'><a id="menu-item112" href="https://learnopengl.com/Advanced-Lighting/Bloom">Bloom </a></li><li id='Advanced-Lighting/Deferred-Shading'><a id="menu-item108" href="https://learnopengl.com/Advanced-Lighting/Deferred-Shading">Deferred Shading </a></li><li id='Advanced-Lighting/SSAO'><a id="menu-item109" href="https://learnopengl.com/Advanced-Lighting/SSAO">SSAO </a></li></ol></li><li id='PBR'><span id="menu-item113" class="closed">PBR </span><ol id="menu-items-of113" style="display:none;"><li id='PBR/Theory'><a id="menu-item114" href="https://learnopengl.com/PBR/Theory">Theory </a></li><li id='PBR/Lighting'><a id="menu-item115" href="https://learnopengl.com/PBR/Lighting">Lighting </a></li><li id='PBR/IBL'><span id="menu-item116" class="closed">IBL </span><ol id="menu-items-of116" style="display:none;"><li id='PBR/IBL/Diffuse-irradiance'><a id="menu-item117" href="https://learnopengl.com/PBR/IBL/Diffuse-irradiance">Diffuse irradiance </a></li><li id='PBR/IBL/Specular-IBL'><a id="menu-item118" href="https://learnopengl.com/PBR/IBL/Specular-IBL">Specular IBL </a></li></ol></li></ol></li><li id='In-Practice'><span id="menu-item78" class="closed">In Practice </span><ol id="menu-items-of78" style="display:none;"><li id='In-Practice/Debugging'><a id="menu-item79" href="https://learnopengl.com/In-Practice/Debugging">Debugging </a></li><li id='In-Practice/Text-Rendering'><a id="menu-item80" href="https://learnopengl.com/In-Practice/Text-Rendering">Text Rendering </a></li><li id='In-Practice/2D-Game'><span id="menu-item81" class="closed">2D Game </span><ol id="menu-items-of81" style="display:none;"><li id='In-Practice/2D-Game/Breakout'><a id="menu-item82" href="https://learnopengl.com/In-Practice/2D-Game/Breakout">Breakout </a></li><li id='In-Practice/2D-Game/Setting-up'><a id="menu-item88" href="https://learnopengl.com/In-Practice/2D-Game/Setting-up">Setting up </a></li><li id='In-Practice/2D-Game/Rendering-Sprites'><a id="menu-item83" href="https://learnopengl.com/In-Practice/2D-Game/Rendering-Sprites">Rendering Sprites </a></li><li id='In-Practice/2D-Game/Levels'><a id="menu-item84" href="https://learnopengl.com/In-Practice/2D-Game/Levels">Levels </a></li><li id='In-Practice/2D-Game/Collisions'><span id="menu-item85" class="closed">Collisions </span><ol id="menu-items-of85" style="display:none;"><li id='In-Practice/2D-Game/Collisions/Ball'><a id="menu-item95" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Ball">Ball </a></li><li id='In-Practice/2D-Game/Collisions/Collision-detection'><a id="menu-item96" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Collision-detection">Collision detection </a></li><li id='In-Practice/2D-Game/Collisions/Collision-resolution'><a id="menu-item97" href="https://learnopengl.com/In-Practice/2D-Game/Collisions/Collision-resolution">Collision resolution </a></li></ol></li><li id='In-Practice/2D-Game/Particles'><a id="menu-item89" href="https://learnopengl.com/In-Practice/2D-Game/Particles">Particles </a></li><li id='In-Practice/2D-Game/Postprocessing'><a id="menu-item90" href="https://learnopengl.com/In-Practice/2D-Game/Postprocessing">Postprocessing </a></li><li id='In-Practice/2D-Game/Powerups'><a id="menu-item91" href="https://learnopengl.com/In-Practice/2D-Game/Powerups">Powerups </a></li><li id='In-Practice/2D-Game/Audio'><a id="menu-item94" href="https://learnopengl.com/In-Practice/2D-Game/Audio">Audio </a></li><li id='In-Practice/2D-Game/Render-text'><a id="menu-item92" href="https://learnopengl.com/In-Practice/2D-Game/Render-text">Render text </a></li><li id='In-Practice/2D-Game/Final-thoughts'><a id="menu-item93" href="https://learnopengl.com/In-Practice/2D-Game/Final-thoughts">Final thoughts </a></li></ol></li></ol></li><li id='Guest-Articles'><span id="menu-item125" class="closed">Guest Articles </span><ol id="menu-items-of125" style="display:none;"><li id='Guest-Articles/How-to-publish'><a id="menu-item126" href="https://learnopengl.com/Guest-Articles/How-to-publish">How to publish </a></li><li id='Guest-Articles/2020'><span id="menu-item128" class="closed">2020 </span><ol id="menu-items-of128" style="display:none;"><li id='Guest-Articles/2020/OIT'><span id="menu-item129" class="closed">OIT </span><ol id="menu-items-of129" style="display:none;"><li id='Guest-Articles/2020/OIT/Introduction'><a id="menu-item130" href="https://learnopengl.com/Guest-Articles/2020/OIT/Introduction">Introduction </a></li><li id='Guest-Articles/2020/OIT/Weighted-Blended'><a id="menu-item132" href="https://learnopengl.com/Guest-Articles/2020/OIT/Weighted-Blended">Weighted Blended </a></li></ol></li><li id='Guest-Articles/2020/Skeletal-Animation'><a id="menu-item131" href="https://learnopengl.com/Guest-Articles/2020/Skeletal-Animation">Skeletal Animation </a></li></ol></li><li id='Guest-Articles/2021'><span id="menu-item133" class="closed">2021 </span><ol id="menu-items-of133" style="display:none;"><li id='Guest-Articles/2021/Scene'><span id="menu-item134" class="closed">Scene </span><ol id="menu-items-of134" style="display:none;"><li id='Guest-Articles/2021/Scene/Scene-Graph'><a id="menu-item135" href="https://learnopengl.com/Guest-Articles/2021/Scene/Scene-Graph">Scene Graph </a></li><li id='Guest-Articles/2021/Scene/Frustum-Culling'><a id="menu-item136" href="https://learnopengl.com/Guest-Articles/2021/Scene/Frustum-Culling">Frustum Culling </a></li></ol></li></ol></li></ol></li><li id='Code-repository'><a id="menu-item99" href="https://learnopengl.com/Code-repository">Code repository </a></li><li id='Translations'><a id="menu-item119" href="https://learnopengl.com/Translations">Translations </a></li><li id='About'><a id="menu-item2" href="https://learnopengl.com/About">About </a></li></ol> <div id="menu_book"> - <a href="https://geni.us/learnopengl" target="_blank"><img src="/book/below_menu.png" class="clean"/></a> - </div> - <div id="donate"> - <a href="https://www.paypal.me/learnopengl/" target="_blank"> - <div id="donate_img"></div> - <img style="display: none" src="/img/donate_button_hover.png"/> - <!--<img id="donate_img" src="img/patreon.png"/>--> - </a> - <!--<div id="alipay"> - <img style="width: 150px;" class="clean" src="/img/alipay_logo.png"/> - <img style="width: 150px; margin-top: 5px" src="/img/alipay.png"/> - </div>--> - </div> - <div class="btc"> - <h3>BTC</h3> - <p> - 1CLGKgmBSuYJ1nnvDGAepVTKNNDpUjfpRa - </p> - <img src="/img/btc_qr.png"/> - </div> - <div class="btc"> - <h3>ETH/ERC20</h3> - <p> - 0x1de59bd9e52521a46309474f8372531533bd7c43 - </p> - <img src="/img/erc20_qr.png"/> - </div> - <div id="ad"> - <!--<div id="waldo-tag-1684"></div>--> - </div> - - <div id="lefttwothirdad"> - <div id="waldo-tag-2245"></div> - </div> - </div> - - <div id="content"> - <h1 id="content-title">Textures</h1> + <div id="content"> + <h1 id="content-title">Textures</h1> + <h1 id="content-title">テクスチャ</h1> <h1 id="content-url" style='display:none;'>Getting-started/Textures</h1> <p> - We learned that to add more detail to our objects we can use colors for each vertex to create some interesting images. However, to get a fair bit of realism we'd have to have many vertices so we could specify a lot of colors. This takes up a considerable amount of extra overhead, since each model needs a lot more vertices and for each vertex a color attribute as well. + We learned that to add more detail to our objects we can use colors for each vertex to create some interesting images. However, to get a fair bit of realism we'd have to have many vertices so we could specify a lot of colors. This takes up a considerable amount of extra overhead, since each model needs a lot more vertices and for each vertex a color attribute as well. +物体にディティールを追加して面白い画像を作るための方法として各頂点に色を着ける方法を学習しました。しかし、現実的な物体を描くためには大量の頂点を作成してそれぞれに色を指定しなければなりません。この方法では各モデルが大量の頂点と色の情報を必要とするので、処理にかかる負荷が非常に大きくなります。 </p> <p> - What artists and programmers generally prefer is to use a <def>texture</def>. A texture is a 2D image (even 1D and 3D textures exist) used to add detail to an object; think of a texture as a piece of paper with a nice brick image (for example) on it neatly folded over your 3D house so it looks like your house has a stone exterior. Because we can insert a lot of detail in a single image, we can give the illusion the object is extremely detailed without having to specify extra vertices. + What artists and programmers generally prefer is to use a <def>texture</def>. A texture is a 2D image (even 1D and 3D textures exist) used to add detail to an object; think of a texture as a piece of paper with a nice brick image (for example) on it neatly folded over your 3D house so it looks like your house has a stone exterior. Because we can insert a lot of detail in a single image, we can give the illusion the object is extremely detailed without having to specify extra vertices. +アーティストやプログラマは一般には<def>テクスチャ</def>を利用する方法を好みます。テクスチャは二次元の画像で、物体にディティールを与えるために利用されます(一次元や三次元のテクスチャも存在します)。例えばお洒落なレンガが描かれた紙を想像してみてください。これが三次元の家を包んでいれば、その家はレンガ作りに見えるでしょう。一つの画像の中に多くのディティールを詰め込めるので、頂点を追加せずとも細部まで作りこまれているかのように見せることができるのです。 </p> <note> - Next to images, textures can also be used to store a large collection of arbitrary data to send to the shaders, but we'll leave that for a different topic. + Next to images, textures can also be used to store a large collection of arbitrary data to send to the shaders, but we'll leave that for a different topic. +テクスチャは画像に加え任意のデータを頂点シェーダーに送信するのに使えます。しかしこの話は別のトピックスとして取っておきます。 </note> - + <p> - Below you'll see a texture image of a <a href="/img/textures/wall.jpg" target="_blank">brick wall</a> mapped to the triangle from the previous chapter. + Below you'll see a texture image of a <a href="/img/textures/wall.jpg" target="_blank">brick wall</a> mapped to the triangle from the previous chapter. +以下に<a href="/img/textures/wall.jpg" target="_blank">レンガの壁</a>のテクスチャを貼り付けた三角形を示します。 </p> <img src="/img/getting-started/textures.png" class="clean"/> <p> In order to map a texture to the triangle we need to tell each vertex of the triangle which part of the texture it corresponds to. Each vertex should thus have a <def>texture coordinate</def> associated with them that specifies what part of the texture image to sample from. Fragment interpolation then does the rest for the other fragments. +三角形にテクスチャを貼り付けるには、各頂点がテクスチャのどの位置に対応するのかを指定する必要があります。そのため各頂点にはテクスチャ画像のどの部分に対応するかを示した<def>テクスチャ座標</def>が必要です。そうすれば他のフラグメントに対してはフラグメント補完がテクスチャを割り当ててくれます。 </p> <p> - Texture coordinates range from <code>0</code> to <code>1</code> in the <code>x</code> and <code>y</code> axis (remember that we use 2D texture images). Retrieving the texture color using texture coordinates is called <def>sampling</def>. Texture coordinates start at <code>(0,0)</code> for the lower left corner of a texture image to <code>(1,1)</code> for the upper right corner of a texture image. The following image shows how we map texture coordinates to the triangle: + Texture coordinates range from <code>0</code> to <code>1</code> in the <code>x</code> and <code>y</code> axis (remember that we use 2D texture images). Retrieving the texture color using texture coordinates is called <def>sampling</def>. Texture coordinates start at <code>(0,0)</code> for the lower left corner of a texture image to <code>(1,1)</code> for the upper right corner of a texture image. The following image shows how we map texture coordinates to the triangle: +テクスチャ座標は<code>x</code>座標と<code>y</code>座標があり(テクスチャは二次元です)、それぞれ<code>0</code>から<code>1</code>の範囲で指定します。テクスチャ座標によりテクスチャの色を抽出することは<def>サンプリング</def>と呼ばれます。テクスチャ座標はテクスチャ画像の左下が<code>(0, 0)</code>で、右上が<code>(1, 1)</code>です。以下の画像はどのようにテクスチャ座標を三角形に対応させるかを示したものです: </p> <img src="/img/getting-started/tex_coords.png"/> <p> - We specify 3 texture coordinate points for the triangle. We want the bottom-left side of the triangle to correspond with the bottom-left side of the texture so we use the <code>(0,0)</code> texture coordinate for the triangle's bottom-left vertex. The same applies to the bottom-right side with a <code>(1,0)</code> texture coordinate. The top of the triangle should correspond with the top-center of the texture image so we take <code>(0.5,1.0)</code> as its texture coordinate. We only have to pass 3 texture coordinates to the vertex shader, which then passes those to the fragment shader that neatly interpolates all the texture coordinates for each fragment. + We specify 3 texture coordinate points for the triangle. We want the bottom-left side of the triangle to correspond with the bottom-left side of the texture so we use the <code>(0,0)</code> texture coordinate for the triangle's bottom-left vertex. The same applies to the bottom-right side with a <code>(1,0)</code> texture coordinate. The top of the triangle should correspond with the top-center of the texture image so we take <code>(0.5,1.0)</code> as its texture coordinate. We only have to pass 3 texture coordinates to the vertex shader, which then passes those to the fragment shader that neatly interpolates all the texture coordinates for each fragment. +三角形に対して3つのテクスチャ座標を割り当てます。三角形の左下をテクスチャの左下に対応させたいので、この頂点には<code>(0, 0)</code>というテクスチャ座標を割り当てます。同様に右下の頂点のテクスチャ座標は<code>(1, 0)</code>にします。上の頂点はテクスチャ画像の上端中央を対応させるため<code>(0.5, 1.0)</code>というテクスチャ座標を割り当てます。3つのテクスチャ座標を頂点シェーダーに渡せば十分です。あとのフラグメントに関しては、フラグメントシェーダーにおいてきちんと補完されます。 </p> <p> The resulting texture coordinates would then look like this: +その結果テクスチャ座標は以下のようになります: </p> <pre><code> float texCoords[] = { - 0.0f, 0.0f, // lower-left corner - 1.0f, 0.0f, // lower-right corner - 0.5f, 1.0f // top-center corner + 0.0f, 0.0f, // lower-left corner + 1.0f, 0.0f, // lower-right corner + 0.5f, 1.0f // top-center corner }; </code></pre> <p> - Texture sampling has a loose interpretation and can be done in many different ways. It is thus our job to tell OpenGL how it should <em>sample</em> its textures. + Texture sampling has a loose interpretation and can be done in many different ways. It is thus our job to tell OpenGL how it should <em>sample</em> its textures. +テクスチャのサンプリングと一口に言っても多くの方法があります。そのためOpenGLがテクスチャをどのように<em>サンプリング</em>するかは自分達で指定しなければなりません。 </p> <h2>Texture Wrapping</h2> +<h2>テクスチャの繰り返し</h2> <p> - Texture coordinates usually range from <code>(0,0)</code> to <code>(1,1)</code> but what happens if we specify coordinates outside this range? The default behavior of OpenGL is to repeat the texture images (we basically ignore the integer part of the floating point texture coordinate), but there are more options OpenGL offers: + Texture coordinates usually range from <code>(0,0)</code> to <code>(1,1)</code> but what happens if we specify coordinates outside this range? The default behavior of OpenGL is to repeat the texture images (we basically ignore the integer part of the floating point texture coordinate), but there are more options OpenGL offers: +テクスチャ座標は通常<code>(0, 0)</code>から<code>(1, 1)</code>までですが、この範囲外の座標を指定した場合なにが起こるでしょう。OpenGLのデフォルトの振舞いではテクスチャ画像を繰返します(テクスチャ座標の整数部分を基本的に無視します)が、他のオプションもあります: </p> - - <ul> - <li><var>GL_REPEAT</var>: The default behavior for textures. Repeats the texture image.</li> - <li><var>GL_MIRRORED_REPEAT</var>: Same as <var>GL_REPEAT</var> but mirrors the image with each repeat.</li> - <li><var>GL_CLAMP_TO_EDGE</var>: Clamps the coordinates between <code>0</code> and <code>1</code>. The result is that higher coordinates become clamped to the edge, resulting in a stretched edge pattern.</li> - <li><var>GL_CLAMP_TO_BORDER</var>: Coordinates outside the range are now given a user-specified border color.</li> - </ul> - + + <ul> + <li><var>GL_REPEAT</var>: The default behavior for textures. Repeats the texture image.</li> + <li><var>GL_REPEAT</var>: デフォルトの振舞。テクスチャ画像を繰返す。</li> + <li><var>GL_MIRRORED_REPEAT</var>: Same as <var>GL_REPEAT</var> but mirrors the image with each repeat.</li> + <li><var>GL_MIRRORED_REPEAT</var>: テクスチャ画像を鏡写しに繰返す。</li> + <li><var>GL_CLAMP_TO_EDGE</var>: Clamps the coordinates between <code>0</code> and <code>1</code>. The result is that higher coordinates become clamped to the edge, resulting in a stretched edge pattern.</li> + <li><var>GL_CLAMP_TO_EDGE</var>: 座標を<code>0</code>と<code>1</code>の間に固定。範囲外の座標は端に固定され、縁を引き伸ばしたようになる。</li> + <li><var>GL_CLAMP_TO_BORDER</var>: Coordinates outside the range are now given a user-specified border color.</li> + <li><var>GL_CLAMP_TO_BORDER</var>: 範囲外の座標にはユーザーが定めた境界線の色が与えられる。</li> + </ul> + <p> - Each of the options have a different visual output when using texture coordinates outside the default range. Let's see what these look like on a sample texture image (original image by Hólger Rezende): + Each of the options have a different visual output when using texture coordinates outside the default range. Let's see what these look like on a sample texture image (original image by Hólger Rezende): +各オプションはテクスチャ座標を用いたとき、範囲外においてそれぞれ違った見た目になります。どのような結果になるのか、サンプルのテクスチャ画像で検証してみましょう(テクスチャ画像はHólger Rezende氏作): </p> <img src="/img/getting-started/texture_wrapping.png" class="clean"/> <p> - Each of the aforementioned options can be set per coordinate axis (<code>s</code>, <code>t</code> (and <code>r</code> if you're using 3D textures) equivalent to <code>x</code>,<code>y</code>,<code>z</code>) with the <fun><function id='15'>glTexParameter</function>*</fun> function: + Each of the aforementioned options can be set per coordinate axis (<code>s</code>, <code>t</code> (and <code>r</code> if you're using 3D textures) equivalent to <code>x</code>,<code>y</code>,<code>z</code>) with the <fun><function id='15'>glTexParameter</function>*</fun> function: +前述のオプションは<fun><function id='15'>glTexParameter</function>*</fun>により各座標軸毎に指定することも可能です(<code>s</code>、<code>t</code>(三次元のテクスチャの場合、<code>r</code>も)がそれぞれ<code>x</code>、<code>y</code>(、<code>z</code>)に対応)。 </p> <pre><code> @@ -330,11 +105,13 @@ float texCoords[] = { </code></pre> <p> - The first argument specifies the texture target; we're working with 2D textures so the texture target is <var>GL_TEXTURE_2D</var>. The second argument requires us to tell what option we want to set and for which texture axis; we want to configure it for both the <code>S</code> and <code>T</code> axis. The last argument requires us to pass in the texture wrapping mode we'd like and in this case OpenGL will set its texture wrapping option on the currently active texture with <var>GL_MIRRORED_REPEAT</var>. + The first argument specifies the texture target; we're working with 2D textures so the texture target is <var>GL_TEXTURE_2D</var>. The second argument requires us to tell what option we want to set and for which texture axis; we want to configure it for both the <code>S</code> and <code>T</code> axis. The last argument requires us to pass in the texture wrapping mode we'd like and in this case OpenGL will set its texture wrapping option on the currently active texture with <var>GL_MIRRORED_REPEAT</var>. +最初の引数はテクスチャのターゲットです。ここでは二次元のテクスチャを利用しているのでこのターゲットは<var>GL_TEXTURE_2D</var>です。二番目の引数はどのオプションをどの座標軸に対して設定するのかを指定します。この例では<code>S</code>軸と<code>T</code>軸の両方を設定しています。最後の引数はテクスチャの繰り返し方を指定します。今回の場合、OpenGLは現在アクティブなテクスチャの繰り返し方を<var>GL_MIRRORED_REPEAT</var>にしています。 </p> <p> - If we choose the <var>GL_CLAMP_TO_BORDER</var> option we should also specify a border color. This is done using the <code>fv</code> equivalent of the <fun><function id='15'>glTexParameter</function></fun> function with <var>GL_TEXTURE_BORDER_COLOR</var> as its option where we pass in a float array of the border's color value: + If we choose the <var>GL_CLAMP_TO_BORDER</var> option we should also specify a border color. This is done using the <code>fv</code> equivalent of the <fun><function id='15'>glTexParameter</function></fun> function with <var>GL_TEXTURE_BORDER_COLOR</var> as its option where we pass in a float array of the border's color value: +<var>GL_CLAMP_TO_BORDER</var>を選択した場合、境界線の色も指定しなければなりません。これは<fun><function id='15'>glTexParameter</function></fun>の<code>fv</code>版にオプションとして<var>GL_TEXTURE_BORDER_COLOR</var>を指定し、境界線の色を浮動小数点数の配列として渡すことで行えます。 </p> <pre><code> @@ -344,33 +121,33 @@ float borderColor[] = { 1.0f, 1.0f, 0.0f, 1.0f }; <h2>Texture Filtering</h2> <p> - Texture coordinates do not depend on resolution but can be any floating point value, thus OpenGL has to figure out which texture pixel (also known as a <def>texel</def> ) to map the texture coordinate to. This becomes especially important if you have a very large object and a low resolution texture. You probably guessed by now that OpenGL has options for this <def>texture filtering</def> as well. There are several options available but for now we'll discuss the most important options: <var>GL_NEAREST</var> and <var>GL_LINEAR</var>. + Texture coordinates do not depend on resolution but can be any floating point value, thus OpenGL has to figure out which texture pixel (also known as a <def>texel</def> ) to map the texture coordinate to. This becomes especially important if you have a very large object and a low resolution texture. You probably guessed by now that OpenGL has options for this <def>texture filtering</def> as well. There are several options available but for now we'll discuss the most important options: <var>GL_NEAREST</var> and <var>GL_LINEAR</var>. </p> <p> - <var>GL_NEAREST</var> (also known as <def>nearest neighbor</def> or <def>point</def> filtering) is the default texture filtering method of OpenGL. When set to <var>GL_NEAREST</var>, OpenGL selects the texel that center is closest to the texture coordinate. Below you can see 4 pixels where the cross represents the exact texture coordinate. The upper-left texel has its center closest to the texture coordinate and is therefore chosen as the sampled color: + <var>GL_NEAREST</var> (also known as <def>nearest neighbor</def> or <def>point</def> filtering) is the default texture filtering method of OpenGL. When set to <var>GL_NEAREST</var>, OpenGL selects the texel that center is closest to the texture coordinate. Below you can see 4 pixels where the cross represents the exact texture coordinate. The upper-left texel has its center closest to the texture coordinate and is therefore chosen as the sampled color: </p> - <img src="/img/getting-started/filter_nearest.png" class="clean"/> - + <img src="/img/getting-started/filter_nearest.png" class="clean"/> + <p> - <var>GL_LINEAR</var> (also known as <def>(bi)linear filtering</def>) takes an interpolated value from the texture coordinate's neighboring texels, approximating a color between the texels. The smaller the distance from the texture coordinate to a texel's center, the more that texel's color contributes to the sampled color. Below we can see that a mixed color of the neighboring pixels is returned: + <var>GL_LINEAR</var> (also known as <def>(bi)linear filtering</def>) takes an interpolated value from the texture coordinate's neighboring texels, approximating a color between the texels. The smaller the distance from the texture coordinate to a texel's center, the more that texel's color contributes to the sampled color. Below we can see that a mixed color of the neighboring pixels is returned: </p> - + <img src="/img/getting-started/filter_linear.png" class="clean"/> - + <p> - But what is the visual effect of such a texture filtering method? Let's see how these methods work when using a texture with a low resolution on a large object (texture is therefore scaled upwards and individual texels are noticeable): + But what is the visual effect of such a texture filtering method? Let's see how these methods work when using a texture with a low resolution on a large object (texture is therefore scaled upwards and individual texels are noticeable): </p> - <img src="/img/getting-started/texture_filtering.png" class="clean"/> + <img src="/img/getting-started/texture_filtering.png" class="clean"/> <p> - <var>GL_NEAREST</var> results in blocked patterns where we can clearly see the pixels that form the texture while <var>GL_LINEAR</var> produces a smoother pattern where the individual pixels are less visible. <var>GL_LINEAR</var> produces a more realistic output, but some developers prefer a more 8-bit look and as a result pick the <var>GL_NEAREST</var> option. - </p> + <var>GL_NEAREST</var> results in blocked patterns where we can clearly see the pixels that form the texture while <var>GL_LINEAR</var> produces a smoother pattern where the individual pixels are less visible. <var>GL_LINEAR</var> produces a more realistic output, but some developers prefer a more 8-bit look and as a result pick the <var>GL_NEAREST</var> option. + </p> <p> - Texture filtering can be set for <def>magnifying</def> and <def>minifying</def> operations (when scaling up or downwards) so you could for example use nearest neighbor filtering when textures are scaled downwards and linear filtering for upscaled textures. We thus have to specify the filtering method for both options via <fun><function id='15'>glTexParameter</function>*</fun>. The code should look similar to setting the wrapping method: + Texture filtering can be set for <def>magnifying</def> and <def>minifying</def> operations (when scaling up or downwards) so you could for example use nearest neighbor filtering when textures are scaled downwards and linear filtering for upscaled textures. We thus have to specify the filtering method for both options via <fun><function id='15'>glTexParameter</function>*</fun>. The code should look similar to setting the wrapping method: </p> <pre><code> @@ -380,32 +157,32 @@ float borderColor[] = { 1.0f, 1.0f, 0.0f, 1.0f }; <h3>Mipmaps</h3> <p> - Imagine we had a large room with thousands of objects, each with an attached texture. There will be objects far away that have the same high resolution texture attached as the objects close to the viewer. Since the objects are far away and probably only produce a few fragments, OpenGL has difficulties retrieving the right color value for its fragment from the high resolution texture, since it has to pick a texture color for a fragment that spans a large part of the texture. This will produce visible artifacts on small objects, not to mention the waste of memory bandwidth using high resolution textures on small objects. + Imagine we had a large room with thousands of objects, each with an attached texture. There will be objects far away that have the same high resolution texture attached as the objects close to the viewer. Since the objects are far away and probably only produce a few fragments, OpenGL has difficulties retrieving the right color value for its fragment from the high resolution texture, since it has to pick a texture color for a fragment that spans a large part of the texture. This will produce visible artifacts on small objects, not to mention the waste of memory bandwidth using high resolution textures on small objects. </p> <p> - To solve this issue OpenGL uses a concept called <def>mipmaps</def> that is basically a collection of texture images where each subsequent texture is twice as small compared to the previous one. The idea behind mipmaps should be easy to understand: after a certain distance threshold from the viewer, OpenGL will use a different mipmap texture that best suits the distance to the object. Because the object is far away, the smaller resolution will not be noticeable to the user. OpenGL is then able to sample the correct texels, and there's less cache memory involved when sampling that part of the mipmaps. Let's take a closer look at what a mipmapped texture looks like: + To solve this issue OpenGL uses a concept called <def>mipmaps</def> that is basically a collection of texture images where each subsequent texture is twice as small compared to the previous one. The idea behind mipmaps should be easy to understand: after a certain distance threshold from the viewer, OpenGL will use a different mipmap texture that best suits the distance to the object. Because the object is far away, the smaller resolution will not be noticeable to the user. OpenGL is then able to sample the correct texels, and there's less cache memory involved when sampling that part of the mipmaps. Let's take a closer look at what a mipmapped texture looks like: </p> <img src="/img/getting-started/mipmaps.png" class="clean"/> <p> - Creating a collection of mipmapped textures for each texture image is cumbersome to do manually, but luckily OpenGL is able to do all the work for us with a single call to <fun><function id='51'>glGenerateMipmap</function>s</fun> after we've created a texture. + Creating a collection of mipmapped textures for each texture image is cumbersome to do manually, but luckily OpenGL is able to do all the work for us with a single call to <fun><function id='51'>glGenerateMipmap</function>s</fun> after we've created a texture. </p> <p> - When switching between mipmaps levels during rendering OpenGL may show some artifacts like sharp edges visible between the two mipmap layers. Just like normal texture filtering, it is also possible to filter between mipmap levels using <var>NEAREST</var> and <var>LINEAR</var> filtering for switching between mipmap levels. To specify the filtering method between mipmap levels we can replace the original filtering methods with one of the following four options: + When switching between mipmaps levels during rendering OpenGL may show some artifacts like sharp edges visible between the two mipmap layers. Just like normal texture filtering, it is also possible to filter between mipmap levels using <var>NEAREST</var> and <var>LINEAR</var> filtering for switching between mipmap levels. To specify the filtering method between mipmap levels we can replace the original filtering methods with one of the following four options: </p> - - <ul> - <li><var>GL_NEAREST_MIPMAP_NEAREST</var>: takes the nearest mipmap to match the pixel size and uses nearest neighbor interpolation for texture sampling.</li> - <li><var>GL_LINEAR_MIPMAP_NEAREST</var>: takes the nearest mipmap level and samples that level using linear interpolation. </li> - <li><var>GL_NEAREST_MIPMAP_LINEAR</var>: linearly interpolates between the two mipmaps that most closely match the size of a pixel and samples the interpolated level via nearest neighbor interpolation. </li> - <li><var>GL_LINEAR_MIPMAP_LINEAR</var>: linearly interpolates between the two closest mipmaps and samples the interpolated level via linear interpolation.</li> - </ul> - + + <ul> + <li><var>GL_NEAREST_MIPMAP_NEAREST</var>: takes the nearest mipmap to match the pixel size and uses nearest neighbor interpolation for texture sampling.</li> + <li><var>GL_LINEAR_MIPMAP_NEAREST</var>: takes the nearest mipmap level and samples that level using linear interpolation. </li> + <li><var>GL_NEAREST_MIPMAP_LINEAR</var>: linearly interpolates between the two mipmaps that most closely match the size of a pixel and samples the interpolated level via nearest neighbor interpolation. </li> + <li><var>GL_LINEAR_MIPMAP_LINEAR</var>: linearly interpolates between the two closest mipmaps and samples the interpolated level via linear interpolation.</li> + </ul> + <p> - Just like texture filtering we can set the filtering method to one of the 4 aforementioned methods using <fun><function id='15'>glTexParameter</function>i</fun>: + Just like texture filtering we can set the filtering method to one of the 4 aforementioned methods using <fun><function id='15'>glTexParameter</function>i</fun>: </p> <pre><code> @@ -414,36 +191,36 @@ float borderColor[] = { 1.0f, 1.0f, 0.0f, 1.0f }; </code></pre> <p> - A common mistake is to set one of the mipmap filtering options as the magnification filter. This doesn't have any effect since mipmaps are primarily used for when textures get downscaled: texture magnification doesn't use mipmaps and giving it a mipmap filtering option will generate an OpenGL <var>GL_INVALID_ENUM</var> error code. + A common mistake is to set one of the mipmap filtering options as the magnification filter. This doesn't have any effect since mipmaps are primarily used for when textures get downscaled: texture magnification doesn't use mipmaps and giving it a mipmap filtering option will generate an OpenGL <var>GL_INVALID_ENUM</var> error code. </p> <h1>Loading and creating textures</h1> <p> - The first thing we need to do to actually use textures is to load them into our application. - Texture images can be stored in dozens of file formats, each with their own structure and ordering of data, so how do we get those images in our application? One solution would be to choose a file format we'd like to use, say <code>.PNG</code> and write our own image loader to convert the image format into a large array of bytes. While it's not very hard to write your own image loader, it's still cumbersome and what if you want to support more file formats? You'd then have to write an image loader for each format you want to support. + The first thing we need to do to actually use textures is to load them into our application. + Texture images can be stored in dozens of file formats, each with their own structure and ordering of data, so how do we get those images in our application? One solution would be to choose a file format we'd like to use, say <code>.PNG</code> and write our own image loader to convert the image format into a large array of bytes. While it's not very hard to write your own image loader, it's still cumbersome and what if you want to support more file formats? You'd then have to write an image loader for each format you want to support. </p> <p> - Another solution, and probably a good one, is to use an image-loading library that supports several popular formats and does all the hard work for us. A library like <code>stb_image.h</code>. + Another solution, and probably a good one, is to use an image-loading library that supports several popular formats and does all the hard work for us. A library like <code>stb_image.h</code>. </p> <h2>stb_image.h</h2> <p> - <code>stb_image.h</code> is a very popular single header image loading library by <a href="https://github.com/nothings" target="_blank">Sean Barrett</a> that is able to load most popular file formats and is easy to integrate in your project(s). <code>stb_image.h</code> can be downloaded from <a href="https://github.com/nothings/stb/blob/master/stb_image.h" target="_blank">here</a>. Simply download the single header file, add it to your project as <code>stb_image.h</code>, and create an additional C++ file with the following code: + <code>stb_image.h</code> is a very popular single header image loading library by <a href="https://github.com/nothings" target="_blank">Sean Barrett</a> that is able to load most popular file formats and is easy to integrate in your project(s). <code>stb_image.h</code> can be downloaded from <a href="https://github.com/nothings/stb/blob/master/stb_image.h" target="_blank">here</a>. Simply download the single header file, add it to your project as <code>stb_image.h</code>, and create an additional C++ file with the following code: </p> - + <pre><code> #define STB_IMAGE_IMPLEMENTATION #include "stb_image.h" </code></pre> - + <p> - By defining <var>STB_IMAGE_IMPLEMENTATION</var> the preprocessor modifies the header file such that it only contains the relevant definition source code, effectively turning the header file into a <code>.cpp</code> file, and that's about it. Now simply include <code>stb_image.h</code> somewhere in your program and compile. + By defining <var>STB_IMAGE_IMPLEMENTATION</var> the preprocessor modifies the header file such that it only contains the relevant definition source code, effectively turning the header file into a <code>.cpp</code> file, and that's about it. Now simply include <code>stb_image.h</code> somewhere in your program and compile. </p> <p> - For the following texture sections we're going to use an image of a <a href="/img/textures/container.jpg" target="_blank">wooden container</a>. - To load an image using <code>stb_image.h</code> we use its <fun>stbi_load</fun> function: + For the following texture sections we're going to use an image of a <a href="/img/textures/container.jpg" target="_blank">wooden container</a>. + To load an image using <code>stb_image.h</code> we use its <fun>stbi_load</fun> function: </p> <pre><code> @@ -452,12 +229,12 @@ unsigned char *data = stbi_load("container.jpg", &amp;width, &amp;height, &amp;n </code></pre> <p> - The function first takes as input the location of an image file. It then expects you to give three <code>ints</code> as its second, third and fourth argument that <code>stb_image.h</code> will fill with the resulting image's <em>width</em>, <em>height</em> and <em>number</em> of color channels. We need the image's width and height for generating textures later on. <!--The last argument allows us to force a number of channels. Let's say the image has 4 channels (RGBA) and we only want to load the 3 color channels (RGB) without alpha, we set its last argument to <code>3</code>. --> + The function first takes as input the location of an image file. It then expects you to give three <code>ints</code> as its second, third and fourth argument that <code>stb_image.h</code> will fill with the resulting image's <em>width</em>, <em>height</em> and <em>number</em> of color channels. We need the image's width and height for generating textures later on. <!--The last argument allows us to force a number of channels. Let's say the image has 4 channels (RGBA) and we only want to load the 3 color channels (RGB) without alpha, we set its last argument to <code>3</code>. --> </p> <h2>Generating a texture</h2> <p> - Like any of the previous objects in OpenGL, textures are referenced with an ID; let's create one: + Like any of the previous objects in OpenGL, textures are referenced with an ID; let's create one: </p> <pre class="cpp"><code> @@ -466,7 +243,7 @@ unsigned int texture; </code></pre> <p> - The <fun><function id='50'>glGenTextures</function></fun> function first takes as input how many textures we want to generate and stores them in a <code>unsigned int</code> array given as its second argument (in our case just a single <code>unsigned int</code>). Just like other objects we need to bind it so any subsequent texture commands will configure the currently bound texture: + The <fun><function id='50'>glGenTextures</function></fun> function first takes as input how many textures we want to generate and stores them in a <code>unsigned int</code> array given as its second argument (in our case just a single <code>unsigned int</code>). Just like other objects we need to bind it so any subsequent texture commands will configure the currently bound texture: </p> <pre><code> @@ -474,7 +251,7 @@ unsigned int texture; </code></pre> <p> - Now that the texture is bound, we can start generating a texture using the previously loaded image data. Textures are generated with <fun><function id='52'>glTexImage2D</function></fun>: + Now that the texture is bound, we can start generating a texture using the previously loaded image data. Textures are generated with <fun><function id='52'>glTexImage2D</function></fun>: </p> <pre class="cpp"><code> @@ -483,24 +260,24 @@ unsigned int texture; </code></pre> <p> - This is a large function with quite a few parameters so we'll walk through them step-by-step: - <ul> - <li>The first argument specifies the texture target; setting this to <var>GL_TEXTURE_2D</var> means this operation will generate a texture on the currently bound texture object at the same target (so any textures bound to targets <var>GL_TEXTURE_1D</var> or <var>GL_TEXTURE_3D</var> will not be affected).</li> - <li>The second argument specifies the mipmap level for which we want to create a texture for if you want to set each mipmap level manually, but we'll leave it at the base level which is <code>0</code>.</li> - <li>The third argument tells OpenGL in what kind of format we want to store the texture. Our image has only <code>RGB</code> values so we'll store the texture with <code>RGB</code> values as well.</li> - <li>The 4th and 5th argument sets the width and height of the resulting texture. We stored those earlier when loading the image so we'll use the corresponding variables.</li> - <li>The next argument should always be <code>0</code> (some legacy stuff).</li> - <li>The 7th and 8th argument specify the format and datatype of the source image. We loaded the image with <code>RGB</code> values and stored them as <code>char</code>s (bytes) so we'll pass in the corresponding values.</li> - <li>The last argument is the actual image data.</li> - </ul> + This is a large function with quite a few parameters so we'll walk through them step-by-step: + <ul> + <li>The first argument specifies the texture target; setting this to <var>GL_TEXTURE_2D</var> means this operation will generate a texture on the currently bound texture object at the same target (so any textures bound to targets <var>GL_TEXTURE_1D</var> or <var>GL_TEXTURE_3D</var> will not be affected).</li> + <li>The second argument specifies the mipmap level for which we want to create a texture for if you want to set each mipmap level manually, but we'll leave it at the base level which is <code>0</code>.</li> + <li>The third argument tells OpenGL in what kind of format we want to store the texture. Our image has only <code>RGB</code> values so we'll store the texture with <code>RGB</code> values as well.</li> + <li>The 4th and 5th argument sets the width and height of the resulting texture. We stored those earlier when loading the image so we'll use the corresponding variables.</li> + <li>The next argument should always be <code>0</code> (some legacy stuff).</li> + <li>The 7th and 8th argument specify the format and datatype of the source image. We loaded the image with <code>RGB</code> values and stored them as <code>char</code>s (bytes) so we'll pass in the corresponding values.</li> + <li>The last argument is the actual image data.</li> + </ul> </p> <p> - Once <fun><function id='52'>glTexImage2D</function></fun> is called, the currently bound texture object now has the texture image attached to it. However, currently it only has the base-level of the texture image loaded and if we want to use mipmaps we have to specify all the different images manually (by continually incrementing the second argument) or, we could call <fun><function id='51'>glGenerateMipmap</function></fun> after generating the texture. This will automatically generate all the required mipmaps for the currently bound texture. + Once <fun><function id='52'>glTexImage2D</function></fun> is called, the currently bound texture object now has the texture image attached to it. However, currently it only has the base-level of the texture image loaded and if we want to use mipmaps we have to specify all the different images manually (by continually incrementing the second argument) or, we could call <fun><function id='51'>glGenerateMipmap</function></fun> after generating the texture. This will automatically generate all the required mipmaps for the currently bound texture. </p> <p> - After we're done generating the texture and its corresponding mipmaps, it is good practice to free the image memory: + After we're done generating the texture and its corresponding mipmaps, it is good practice to free the image memory: </p> <pre class="cpp"><code> @@ -508,7 +285,7 @@ stbi_image_free(data); </code></pre> <p> - The whole process of generating a texture thus looks something like this: + The whole process of generating a texture thus looks something like this: </p> <pre><code> @@ -525,34 +302,34 @@ int width, height, nrChannels; unsigned char *data = stbi_load("container.jpg", &amp;width, &amp;height, &amp;nrChannels, 0); if (data) { - <function id='52'>glTexImage2D</function>(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data); - <function id='51'>glGenerateMipmap</function>(GL_TEXTURE_2D); + <function id='52'>glTexImage2D</function>(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGB, GL_UNSIGNED_BYTE, data); + <function id='51'>glGenerateMipmap</function>(GL_TEXTURE_2D); } else { - std::cout &lt;&lt; "Failed to load texture" &lt;&lt; std::endl; + std::cout &lt;&lt; "Failed to load texture" &lt;&lt; std::endl; } stbi_image_free(data); </code></pre> <h2>Applying textures</h2> <p> - For the upcoming sections we will use the rectangle shape drawn with <fun><function id='2'>glDrawElements</function></fun> from the final part of the <a href="https://learnopengl.com/Getting-started/Hello-Triangle" target="_blank">Hello Triangle</a> chapter. - We need to inform OpenGL how to sample the texture so we'll have to update the vertex data with the texture coordinates: + For the upcoming sections we will use the rectangle shape drawn with <fun><function id='2'>glDrawElements</function></fun> from the final part of the <a href="https://learnopengl.com/Getting-started/Hello-Triangle" target="_blank">Hello Triangle</a> chapter. + We need to inform OpenGL how to sample the texture so we'll have to update the vertex data with the texture coordinates: </p> <pre><code> float vertices[] = { - // positions // colors // texture coords - 0.5f, 0.5f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // top right - 0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // bottom right - -0.5f, -0.5f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // bottom left - -0.5f, 0.5f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // top left + // positions // colors // texture coords + 0.5f, 0.5f, 0.0f, 1.0f, 0.0f, 0.0f, 1.0f, 1.0f, // top right + 0.5f, -0.5f, 0.0f, 0.0f, 1.0f, 0.0f, 1.0f, 0.0f, // bottom right + -0.5f, -0.5f, 0.0f, 0.0f, 0.0f, 1.0f, 0.0f, 0.0f, // bottom left + -0.5f, 0.5f, 0.0f, 1.0f, 1.0f, 0.0f, 0.0f, 1.0f // top left }; </code></pre> <p> - Since we've added an extra vertex attribute we again have to notify OpenGL of the new vertex format: + Since we've added an extra vertex attribute we again have to notify OpenGL of the new vertex format: </p> <img src="/img/getting-started/vertex_attribute_pointer_interleaved_textures.png" class="clean" alt="Image of VBO with interleaved position, color and texture data with strides and offsets shown for configuring vertex attribute pointers."/> @@ -563,11 +340,11 @@ float vertices[] = { </code></pre> <p> - Note that we have to adjust the stride parameter of the previous two vertex attributes to <code>8 * sizeof(float)</code> as well. + Note that we have to adjust the stride parameter of the previous two vertex attributes to <code>8 * sizeof(float)</code> as well. </p> <p> - Next we need to alter the vertex shader to accept the texture coordinates as a vertex attribute and then forward the coordinates to the fragment shader: + Next we need to alter the vertex shader to accept the texture coordinates as a vertex attribute and then forward the coordinates to the fragment shader: </p> <pre><code> @@ -581,24 +358,24 @@ out vec2 TexCoord; void main() { - gl_Position = vec4(aPos, 1.0); - ourColor = aColor; - TexCoord = aTexCoord; + gl_Position = vec4(aPos, 1.0); + ourColor = aColor; + TexCoord = aTexCoord; } </code></pre> <p> - The fragment shader should then accept the <code>TexCoord</code> output variable as an input variable. + The fragment shader should then accept the <code>TexCoord</code> output variable as an input variable. </p> <p> - The fragment shader should also have access to the texture object, but how do we pass the texture object to the fragment shader? GLSL has a built-in data-type for texture objects called a <def>sampler</def> that takes as a postfix the texture type we want e.g. <code>sampler1D</code>, <code>sampler3D</code> or in our case <code>sampler2D</code>. We can then add a texture to the fragment shader by simply declaring a <code>uniform sampler2D</code> that we later assign our texture to. + The fragment shader should also have access to the texture object, but how do we pass the texture object to the fragment shader? GLSL has a built-in data-type for texture objects called a <def>sampler</def> that takes as a postfix the texture type we want e.g. <code>sampler1D</code>, <code>sampler3D</code> or in our case <code>sampler2D</code>. We can then add a texture to the fragment shader by simply declaring a <code>uniform sampler2D</code> that we later assign our texture to. </p> <pre><code> #version 330 core out vec4 FragColor; - + in vec3 ourColor; in vec2 TexCoord; @@ -606,16 +383,16 @@ uniform sampler2D ourTexture; void main() { - FragColor = texture(ourTexture, TexCoord); + FragColor = texture(ourTexture, TexCoord); } </code></pre> <p> - To sample the color of a texture we use GLSL's built-in <fun>texture</fun> function that takes as its first argument a texture sampler and as its second argument the corresponding texture coordinates. The <fun>texture</fun> function then samples the corresponding color value using the texture parameters we set earlier. The output of this fragment shader is then the (filtered) color of the texture at the (interpolated) texture coordinate. + To sample the color of a texture we use GLSL's built-in <fun>texture</fun> function that takes as its first argument a texture sampler and as its second argument the corresponding texture coordinates. The <fun>texture</fun> function then samples the corresponding color value using the texture parameters we set earlier. The output of this fragment shader is then the (filtered) color of the texture at the (interpolated) texture coordinate. </p> <p> - All that's left to do now is to bind the texture before calling <fun><function id='2'>glDrawElements</function></fun> and it will then automatically assign the texture to the fragment shader's sampler: + All that's left to do now is to bind the texture before calling <fun><function id='2'>glDrawElements</function></fun> and it will then automatically assign the texture to the fragment shader's sampler: </p> <pre class="cpp"><code> @@ -625,21 +402,21 @@ void main() </code></pre> <p> - If you did everything right you should see the following image: + If you did everything right you should see the following image: </p> <img src="/img/getting-started/textures2.png" class="clean"/> <p> - If your rectangle is completely white or black you probably made an error along the way. Check your shader logs and try to compare your code with the application's <a href="/code_viewer_gh.php?code=src/1.getting_started/4.1.textures/textures.cpp" target="_blank">source code</a>. + If your rectangle is completely white or black you probably made an error along the way. Check your shader logs and try to compare your code with the application's <a href="/code_viewer_gh.php?code=src/1.getting_started/4.1.textures/textures.cpp" target="_blank">source code</a>. </p> - + <warning> - If your texture code doesn't work or shows up as completely black, continue reading and work your way to the last example that <strong>should</strong> work. On some drivers it is <strong>required</strong> to assign a texture unit to each sampler uniform, which is something we'll discuss further in this chapter. + If your texture code doesn't work or shows up as completely black, continue reading and work your way to the last example that <strong>should</strong> work. On some drivers it is <strong>required</strong> to assign a texture unit to each sampler uniform, which is something we'll discuss further in this chapter. </warning> <p> - To get a little funky we can also mix the resulting texture color with the vertex colors. We simply multiply the resulting texture color with the vertex color in the fragment shader to mix both colors: + To get a little funky we can also mix the resulting texture color with the vertex colors. We simply multiply the resulting texture color with the vertex color in the fragment shader to mix both colors: </p> <pre><code> @@ -647,22 +424,22 @@ FragColor = texture(ourTexture, TexCoord) * vec4(ourColor, 1.0); </code></pre> <p> - The result should be a mixture of the vertex's color and the texture's color: + The result should be a mixture of the vertex's color and the texture's color: </p> <img src="/img/getting-started/textures_funky.png" class="clean"/> <p> - I guess you could say our container likes to disco. + I guess you could say our container likes to disco. </p> <h2>Texture Units</h2> <p> - You probably wondered why the <code>sampler2D</code> variable is a uniform if we didn't even assign it some value with <fun><function id='44'>glUniform</function></fun>. Using <fun><function id='44'>glUniform</function>1i</fun> we can actually assign a <em>location</em> value to the texture sampler so we can set multiple textures at once in a fragment shader. This location of a texture is more commonly known as a <def>texture unit</def>. The default texture unit for a texture is <code>0</code> which is the default active texture unit so we didn't need to assign a location in the previous section; note that not all graphics drivers assign a default texture unit so the previous section may not have rendered for you. + You probably wondered why the <code>sampler2D</code> variable is a uniform if we didn't even assign it some value with <fun><function id='44'>glUniform</function></fun>. Using <fun><function id='44'>glUniform</function>1i</fun> we can actually assign a <em>location</em> value to the texture sampler so we can set multiple textures at once in a fragment shader. This location of a texture is more commonly known as a <def>texture unit</def>. The default texture unit for a texture is <code>0</code> which is the default active texture unit so we didn't need to assign a location in the previous section; note that not all graphics drivers assign a default texture unit so the previous section may not have rendered for you. </p> <p> - The main purpose of texture units is to allow us to use more than 1 texture in our shaders. By assigning texture units to the samplers, we can bind to multiple textures at once as long as we activate the corresponding texture unit first. Just like <fun><function id='48'>glBindTexture</function></fun> we can activate texture units using <fun><function id='49'>glActiveTexture</function></fun> passing in the texture unit we'd like to use: + The main purpose of texture units is to allow us to use more than 1 texture in our shaders. By assigning texture units to the samplers, we can bind to multiple textures at once as long as we activate the corresponding texture unit first. Just like <fun><function id='48'>glBindTexture</function></fun> we can activate texture units using <fun><function id='49'>glActiveTexture</function></fun> passing in the texture unit we'd like to use: </p> <pre class="cpp"><code> @@ -671,15 +448,15 @@ FragColor = texture(ourTexture, TexCoord) * vec4(ourColor, 1.0); </code></pre> <p> - After activating a texture unit, a subsequent <fun><function id='48'>glBindTexture</function></fun> call will bind that texture to the currently active texture unit. Texture unit <var>GL_TEXTURE0</var> is always by default activated, so we didn't have to activate any texture units in the previous example when using <fun><function id='48'>glBindTexture</function></fun>. + After activating a texture unit, a subsequent <fun><function id='48'>glBindTexture</function></fun> call will bind that texture to the currently active texture unit. Texture unit <var>GL_TEXTURE0</var> is always by default activated, so we didn't have to activate any texture units in the previous example when using <fun><function id='48'>glBindTexture</function></fun>. </p> <note> - OpenGL should have a at least a minimum of 16 texture units for you to use which you can activate using <var>GL_TEXTURE0</var> to <var>GL_TEXTURE15</var>. They are defined in order so we could also get <var>GL_TEXTURE8</var> via <var>GL_TEXTURE0 + 8</var> for example, which is useful when we'd have to loop over several texture units. + OpenGL should have a at least a minimum of 16 texture units for you to use which you can activate using <var>GL_TEXTURE0</var> to <var>GL_TEXTURE15</var>. They are defined in order so we could also get <var>GL_TEXTURE8</var> via <var>GL_TEXTURE0 + 8</var> for example, which is useful when we'd have to loop over several texture units. </note> <p> - We still however need to edit the fragment shader to accept another sampler. This should be relatively straightforward now: + We still however need to edit the fragment shader to accept another sampler. This should be relatively straightforward now: </p> <pre><code> @@ -691,33 +468,33 @@ uniform sampler2D texture2; void main() { - FragColor = mix(texture(texture1, TexCoord), texture(texture2, TexCoord), 0.2); + FragColor = mix(texture(texture1, TexCoord), texture(texture2, TexCoord), 0.2); } </code></pre> <p> - The final output color is now the combination of two texture lookups. GLSL's built-in <fun>mix</fun> function takes two values as input and linearly interpolates between them based on its third argument. If the third value is <code>0.0</code> it returns the first input; if it's <code>1.0</code> it returns the second input value. A value of <code>0.2</code> will return <code>80%</code> of the first input color and <code>20%</code> of the second input color, resulting in a mixture of both our textures. + The final output color is now the combination of two texture lookups. GLSL's built-in <fun>mix</fun> function takes two values as input and linearly interpolates between them based on its third argument. If the third value is <code>0.0</code> it returns the first input; if it's <code>1.0</code> it returns the second input value. A value of <code>0.2</code> will return <code>80%</code> of the first input color and <code>20%</code> of the second input color, resulting in a mixture of both our textures. </p> <p> - We now want to load and create another texture; you should be familiar with the steps now. Make sure to create another texture object, load the image and generate the final texture using <fun><function id='52'>glTexImage2D</function></fun>. For the second texture we'll use an image of your <a href="/img/textures/awesomeface.png" target="_blank">facial expression while learning OpenGL</a>: + We now want to load and create another texture; you should be familiar with the steps now. Make sure to create another texture object, load the image and generate the final texture using <fun><function id='52'>glTexImage2D</function></fun>. For the second texture we'll use an image of your <a href="/img/textures/awesomeface.png" target="_blank">facial expression while learning OpenGL</a>: </p> - + <pre><code> unsigned char *data = stbi_load("awesomeface.png", &amp;width, &amp;height, &amp;nrChannels, 0); if (data) { - <function id='52'>glTexImage2D</function>(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data); - <function id='51'>glGenerateMipmap</function>(GL_TEXTURE_2D); + <function id='52'>glTexImage2D</function>(GL_TEXTURE_2D, 0, GL_RGB, width, height, 0, GL_RGBA, GL_UNSIGNED_BYTE, data); + <function id='51'>glGenerateMipmap</function>(GL_TEXTURE_2D); } </code></pre> - + <p> - Note that we now load a <code>.png</code> image that includes an alpha (transparency) channel. This means we now need to specify that the image data contains an alpha channel as well by using <var>GL_RGBA</var>; otherwise OpenGL will incorrectly interpret the image data. + Note that we now load a <code>.png</code> image that includes an alpha (transparency) channel. This means we now need to specify that the image data contains an alpha channel as well by using <var>GL_RGBA</var>; otherwise OpenGL will incorrectly interpret the image data. </p> <p> - To use the second texture (and the first texture) we'd have to change the rendering procedure a bit by binding both textures to the corresponding texture unit: + To use the second texture (and the first texture) we'd have to change the rendering procedure a bit by binding both textures to the corresponding texture unit: </p> <pre><code> @@ -729,74 +506,59 @@ if (data) <function id='27'>glBindVertexArray</function>(VAO); <function id='2'>glDrawElements</function>(GL_TRIANGLES, 6, GL_UNSIGNED_INT, 0); </code></pre> - + <p> - We also have to tell OpenGL to which texture unit each shader sampler belongs to by setting each sampler using <fun><function id='44'>glUniform</function>1i</fun>. We only have to set this once, so we can do this before we enter the render loop: + We also have to tell OpenGL to which texture unit each shader sampler belongs to by setting each sampler using <fun><function id='44'>glUniform</function>1i</fun>. We only have to set this once, so we can do this before we enter the render loop: </p> - + <pre><code> ourShader.use(); // don't forget to activate the shader before setting uniforms! <function id='44'>glUniform</function>1i(<function id='45'>glGetUniformLocation</function>(ourShader.ID, "texture1"), 0); // set it manually ourShader.setInt("texture2", 1); // or with shader class - + while(...) { - [...] + [...] } </code></pre> <p> - By setting the samplers via <fun><function id='44'>glUniform</function>1i</fun> we make sure each uniform sampler corresponds to the proper texture unit. You should get the following result: + By setting the samplers via <fun><function id='44'>glUniform</function>1i</fun> we make sure each uniform sampler corresponds to the proper texture unit. You should get the following result: </p> <img src="/img/getting-started/textures_combined.png" class="clean"/> <p> - You probably noticed that the texture is flipped upside-down! This happens because OpenGL expects the <code>0.0</code> coordinate on the y-axis to be on the bottom side of the image, but images usually have <code>0.0</code> at the top of the y-axis. Luckily for us, <code>stb_image.h</code> can flip the y-axis during image loading by adding the following statement before loading any image: - </p> - + You probably noticed that the texture is flipped upside-down! This happens because OpenGL expects the <code>0.0</code> coordinate on the y-axis to be on the bottom side of the image, but images usually have <code>0.0</code> at the top of the y-axis. Luckily for us, <code>stb_image.h</code> can flip the y-axis during image loading by adding the following statement before loading any image: + </p> + <pre><code> stbi_set_flip_vertically_on_load(true); </code></pre> - + <p> - After telling <code>stb_image.h</code> to flip the y-axis when loading images you should get the following result: + After telling <code>stb_image.h</code> to flip the y-axis when loading images you should get the following result: </p> <img src="/img/getting-started/textures_combined2.png" class="clean"/> <p> - If you see one happy container, you did things right. You can compare it with the <a href="/code_viewer_gh.php?code=src/1.getting_started/4.2.textures_combined/textures_combined.cpp" target="_blank">source code</a>. + If you see one happy container, you did things right. You can compare it with the <a href="/code_viewer_gh.php?code=src/1.getting_started/4.2.textures_combined/textures_combined.cpp" target="_blank">source code</a>. </p> <h2>Exercises</h2> <p> To get more comfortable with textures it is advised to work through these exercises before continuing. <ul> - <li>Make sure <strong>only</strong> the happy face looks in the other/reverse direction by changing the fragment shader: <a href="/code_viewer_gh.php?code=src/1.getting_started/4.3.textures_exercise1/textures_exercise1.cpp" target="_blank">solution</a>.</li> - <li>Experiment with the different texture wrapping methods by specifying texture coordinates in the range <code>0.0f</code> to <code>2.0f</code> instead of <code>0.0f</code> to <code>1.0f</code>. See if you can display 4 smiley faces on a single container image clamped at its edge: <a href="/code_viewer_gh.php?code=src/1.getting_started/4.4.textures_exercise2/textures_exercise2.cpp" target="_blank">solution</a>, <a href="/img/getting-started/textures_exercise2.png" target="_blank">result</a>. See if you can experiment with other wrapping methods as well.</li> - <li>Try to display only the center pixels of the texture image on the rectangle in such a way that the individual pixels are getting visible by changing the texture coordinates. Try to set the texture filtering method to <var>GL_NEAREST</var> to see the pixels more clearly: <a href="/code_viewer_gh.php?code=src/1.getting_started/4.5.textures_exercise3/textures_exercise3.cpp" target="_blank">solution</a>.</li> - <li>Use a uniform variable as the <fun>mix</fun> function's third parameter to vary the amount the two textures are visible. Use the up and down arrow keys to change how much the container or the smiley face is visible: <a href="/code_viewer_gh.php?code=src/1.getting_started/4.6.textures_exercise4/textures_exercise4.cpp" target="_blank">solution</a>.</li> + <li>Make sure <strong>only</strong> the happy face looks in the other/reverse direction by changing the fragment shader: <a href="/code_viewer_gh.php?code=src/1.getting_started/4.3.textures_exercise1/textures_exercise1.cpp" target="_blank">solution</a>.</li> + <li>Experiment with the different texture wrapping methods by specifying texture coordinates in the range <code>0.0f</code> to <code>2.0f</code> instead of <code>0.0f</code> to <code>1.0f</code>. See if you can display 4 smiley faces on a single container image clamped at its edge: <a href="/code_viewer_gh.php?code=src/1.getting_started/4.4.textures_exercise2/textures_exercise2.cpp" target="_blank">solution</a>, <a href="/img/getting-started/textures_exercise2.png" target="_blank">result</a>. See if you can experiment with other wrapping methods as well.</li> + <li>Try to display only the center pixels of the texture image on the rectangle in such a way that the individual pixels are getting visible by changing the texture coordinates. Try to set the texture filtering method to <var>GL_NEAREST</var> to see the pixels more clearly: <a href="/code_viewer_gh.php?code=src/1.getting_started/4.5.textures_exercise3/textures_exercise3.cpp" target="_blank">solution</a>.</li> + <li>Use a uniform variable as the <fun>mix</fun> function's third parameter to vary the amount the two textures are visible. Use the up and down arrow keys to change how much the container or the smiley face is visible: <a href="/code_viewer_gh.php?code=src/1.getting_started/4.6.textures_exercise4/textures_exercise4.cpp" target="_blank">solution</a>.</li> </ul> </p> - - - </div> - - <div id="hover"> - HI - </div> - <!-- 728x90/320x50 sticky footer --> -<div id="waldo-tag-6196"></div> - - <div id="disqus_thread"></div> - - - - -</div> <!-- container div --> + + </div> -</div> <!-- super container div --> </body> -</html> -\ No newline at end of file +</html> diff --git a/translation/img/getting-started/filter_linear.png b/translation/img/getting-started/filter_linear.png Binary files differ. diff --git a/translation/img/getting-started/filter_nearest.png b/translation/img/getting-started/filter_nearest.png Binary files differ. diff --git a/translation/img/getting-started/mipmaps.png b/translation/img/getting-started/mipmaps.png Binary files differ. diff --git a/translation/img/start_video.png b/translation/img/getting-started/start_video.png Binary files differ. diff --git a/translation/img/getting-started/tex_coords.png b/translation/img/getting-started/tex_coords.png Binary files differ. diff --git a/translation/img/getting-started/texture_filtering.png b/translation/img/getting-started/texture_filtering.png Binary files differ. diff --git a/translation/img/getting-started/texture_wrapping.png b/translation/img/getting-started/texture_wrapping.png Binary files differ. diff --git a/translation/img/getting-started/textures.png b/translation/img/getting-started/textures.png Binary files differ. diff --git a/translation/img/getting-started/textures2.png b/translation/img/getting-started/textures2.png Binary files differ. diff --git a/translation/img/getting-started/textures_combined.png b/translation/img/getting-started/textures_combined.png Binary files differ. diff --git a/translation/img/getting-started/textures_combined2.png b/translation/img/getting-started/textures_combined2.png Binary files differ. diff --git a/translation/img/getting-started/textures_funky.png b/translation/img/getting-started/textures_funky.png Binary files differ. diff --git a/translation/img/getting-started/vertex_attribute_pointer_interleaved_textures.png b/translation/img/getting-started/vertex_attribute_pointer_interleaved_textures.png Binary files differ.