JSON Models Example

Load and display several JSON models.

The JSON Models Example demonstrates loading and displaying more than one JSON model and more than one texture. It also implements simple mouse input handling to allow rotating the scene and zooming into it.

JSON Model Loader

First we include a JSON model parser, which handles parsing the JSON models into our internal models:

Qt.include("ThreeJSLoader.js")

The ThreeJSLoader.js includes a reimplementation of the JSON parser in three.js, but we will not go into its implementation details.

Loading the Models

First we need to initialize all array buffers for the models:

function initBuffers() {
    modelOne.verticesVBO = gl.createBuffer();
    modelOne.verticesVBO.name = "modelOne.verticesVBO";
    modelOne.normalsVBO  = gl.createBuffer();
    modelOne.normalsVBO.name = "modelOne.normalsVBO";
    modelOne.texCoordVBO = gl.createBuffer();
    modelOne.texCoordVBO.name = "modelOne.texCoordVBO";
    modelOne.indexVBO    = gl.createBuffer();
    modelOne.indexVBO.name = "modelOne.indexVBO";

    modelTwo.verticesVBO = gl.createBuffer();
    modelTwo.verticesVBO.name = "modelTwo.verticesVBO";
    modelTwo.normalsVBO  = gl.createBuffer();
    modelTwo.normalsVBO.name = "modelTwo.normalsVBO";
    modelTwo.texCoordVBO = gl.createBuffer();
    modelTwo.texCoordVBO.name = "modelTwo.texCoordVBO";
    modelTwo.indexVBO    = gl.createBuffer();
    modelTwo.indexVBO.name = "modelTwo.indexVBO";
    ...

Then we request the models to be loaded:

function loadJSONModels() {
    // Load the first model
    var request = new XMLHttpRequest();
    request.open("GET", "gold.json");
    request.onreadystatechange = function () {
        if (request.readyState === XMLHttpRequest.DONE) {
            handleLoadedModel(JSON.parse(request.responseText));
        }
    }
    request.send();
    log("   XMLHttpRequest sent for model one")

    // Load the second model
    var request2 = new XMLHttpRequest();
    request2.open("GET", "woodbox.json");
    request2.onreadystatechange = function () {
        if (request2.readyState === XMLHttpRequest.DONE) {
            handleLoadedModel(JSON.parse(request2.responseText));
        }
    }
    request2.send();
    log("   XMLHttpRequest sent for model two")
    ...

Then, when the load requests return, we handle the models:

function handleLoadedModel(jsonObj) {
    log("handleLoadedModel...");
    var modelData = parseJSON3DModel(jsonObj, "");

    if (modelOne.count === 0)
        fillModel(modelData, modelOne);
    else if (modelTwo.count === 0)
        fillModel(modelData, modelTwo);
    ...

Each buffer is bound and filled with the data parsed from the json models:

function fillModel(modelData, model) {
    log("   fillModel...");
    log("   "+model.verticesVBO.name);
    gl.bindBuffer(gl.ARRAY_BUFFER, model.verticesVBO);
    gl.bufferData(gl.ARRAY_BUFFER,
                  new Float32Array(modelData.vertices),
                  gl.STATIC_DRAW);
    log("   "+model.normalsVBO.name);
    if (isLogEnabled && stateDumpExt)
        log("GL STATE DUMP:\n"+stateDumpExt.getGLStateDump(stateDumpExt.DUMP_VERTEX_ATTRIB_ARRAYS_BIT || stateDumpExt.DUMP_VERTEX_ATTRIB_ARRAYS_CONTENTS_BIT));

    gl.bindBuffer(gl.ARRAY_BUFFER, model.normalsVBO);
    gl.bufferData(gl.ARRAY_BUFFER,
                  new Float32Array(modelData.normals),
                  gl.STATIC_DRAW);

    log("   "+model.texCoordVBO.name);
    gl.bindBuffer(gl.ARRAY_BUFFER, model.texCoordVBO);
    gl.bufferData(gl.ARRAY_BUFFER,
                  new Float32Array(modelData.texCoords[0]),
                  gl.STATIC_DRAW);

    log("   "+model.indexVBO.name);
    gl.bindBuffer(gl.ELEMENT_ARRAY_BUFFER, model.indexVBO);
    gl.bufferData(gl.ELEMENT_ARRAY_BUFFER,
                  new Uint16Array(modelData.indices),
                  gl.STATIC_DRAW);

    model.count = modelData.indices.length;
    log("   ...fillModel");
}

Loading the Textures

First we create the TextureImage objects for each of the images we are going to load and register handlers for the imageLoaded and imageLoadingFailed signals. In the imageLoaded signal handlers we create the OpenGL textures:

function loadTextures() {
    // Load the first texture
    var goldImage = TextureImageFactory.newTexImage();
    goldImage.name = "goldImage";
    goldImage.imageLoaded.connect(function() {
        log("    creating model one texture");
        modelOneTexture = gl.createTexture();
        modelOneTexture.name = "modelOneTexture";
        gl.bindTexture(gl.TEXTURE_2D, modelOneTexture);
        gl.texImage2D(gl.TEXTURE_2D,    // target
                      0,                // level
                      gl.RGBA,          // internalformat
                      gl.RGBA,          // format
                      gl.UNSIGNED_BYTE, // type
                      goldImage);       // pixels
        gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
        gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR_MIPMAP_NEAREST);
        gl.generateMipmap(gl.TEXTURE_2D);
    });
    goldImage.imageLoadingFailed.connect(function() {
        console.log("Texture load FAILED, "+goldImage.errorString);
    });
    goldImage.src = "qrc:///gold.jpg";
    log("   texture one source set")

    // Load the second texture
    var woodBoxImage = TextureImageFactory.newTexImage();
    woodBoxImage.name = "woodBoxImage";
    woodBoxImage.imageLoaded.connect(function() {
        log("    creating model two texture");
        modelTwoTexture = gl.createTexture();
        modelTwoTexture.name = "modelTwoTexture";
        gl.bindTexture(gl.TEXTURE_2D, modelTwoTexture);
        gl.texImage2D(gl.TEXTURE_2D,    // target
                      0,                // level
                      gl.RGBA,          // internalformat
                      gl.RGBA,          // format
                      gl.UNSIGNED_BYTE, // type
                      woodBoxImage);    // pixels
        gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MAG_FILTER, gl.LINEAR);
        gl.texParameteri(gl.TEXTURE_2D, gl.TEXTURE_MIN_FILTER, gl.LINEAR_MIPMAP_NEAREST);
        gl.generateMipmap(gl.TEXTURE_2D);
    });
    woodBoxImage.imageLoadingFailed.connect(function() {
        console.log("Texture load FAILED, "+woodBoxImage.errorString);
    });
    woodBoxImage.src = "qrc:///woodbox.jpg";
    log("   texture two source set")
    ...

Input Handling

First we add a MouseArea to fill the Canvas3D:

MouseArea {
    anchors.fill: parent
    ...

Before adding some functionality on it, we need to add properties to the canvas with initial values set:

property double xRot: 0.0
property double yRot: 45.0
property double distance: 2.0

After that, we add rotation on mouse movement when the left mouse button is pressed:

onMouseXChanged: {
    // Do not rotate if we don't have previous value
    if (previousY !== 0)
        canvas3d.yRot += mouseY - previousY
    previousY = mouseY
    // Limit the rotation to -90...90 degrees
    if (canvas3d.yRot > 90)
        canvas3d.yRot = 90
    if (canvas3d.yRot < -90)
        canvas3d.yRot = -90
}
onMouseYChanged: {
    // Do not rotate if we don't have previous value
    if (previousX !== 0)
        canvas3d.xRot += mouseX - previousX
    previousX = mouseX
    // Wrap the rotation around
    if (canvas3d.xRot > 180)
        canvas3d.xRot -= 360
    if (canvas3d.xRot < -180)
        canvas3d.xRot += 360
}
onReleased: {
    // Reset previous mouse positions to avoid rotation jumping
    previousX = 0
    previousY = 0
}

We need to keep the previous x and y values to avoid rotation jumping when the mouse button is released and pressed again. We store them in these properties:

property int previousY: 0
property int previousX: 0

Then we add zooming by mouse wheel:

onWheel: {
    canvas3d.distance -= wheel.angleDelta.y / 1000.0
    // Limit the distance to 0.5...10
    if (canvas3d.distance < 0.5)
        canvas3d.distance = 0.5
    if (canvas3d.distance > 10)
        canvas3d.distance = 10
}

These properties are then used in the JavaScript side when calculating eye/camera movement:

// Get the view matrix
mat4.identity(vMatrix);
eye = moveEye(canvas.xRot, canvas.yRot, canvas.distance);
mat4.lookAt(vMatrix, eye, [0, 0, 0], [0, 1, 0]);

Converting the rotation values into movement is done as follows:

function moveEye(xRot, yRot, distance) {
    var xAngle = degToRad(xRot);
    var yAngle = degToRad(yRot);

    var zPos = distance * Math.cos(xAngle) * Math.cos(yAngle);
    var xPos = distance * Math.sin(xAngle) * Math.cos(yAngle);
    var yPos = distance * Math.sin(yAngle);

    return [-xPos, yPos, zPos];
}

Files:

Images:

© 2019 The Qt Company Ltd. Documentation contributions included herein are the copyrights of their respective owners. The documentation provided herein is licensed under the terms of the GNU Free Documentation License version 1.3 as published by the Free Software Foundation. Qt and respective logos are trademarks of The Qt Company Ltd. in Finland and/or other countries worldwide. All other trademarks are property of their respective owners.