コード例 #1
0
  private static Mesh getMeshFromGrid(final ResultGrid resultGrid, float worldScale) {
    Vector3f translate =
        new Vector3f(resultGrid.minLocation.x, resultGrid.minLocation.y, resultGrid.minLocation.z);
    float scale = 1.0f / worldScale;
    Mesh mesh = MarchingCubesMesher.createMesh(resultGrid.dataGrid, scale, translate);

    // Setup bone weight buffer
    FloatBuffer weights = BufferUtils.createFloatBuffer(mesh.getVertexCount() * 4);
    VertexBuffer weightsBuf = new VertexBuffer(VertexBuffer.Type.HWBoneWeight);
    weightsBuf.setupData(VertexBuffer.Usage.Static, 4, VertexBuffer.Format.Float, weights);
    mesh.setBuffer(weightsBuf);

    // Setup bone index buffer
    ByteBuffer indices = BufferUtils.createByteBuffer(mesh.getVertexCount() * 4);
    VertexBuffer indicesBuf = new VertexBuffer(VertexBuffer.Type.HWBoneIndex);
    indicesBuf.setupData(VertexBuffer.Usage.Static, 4, VertexBuffer.Format.UnsignedByte, indices);
    mesh.setBuffer(indicesBuf);

    Vector3f v1 = new Vector3f();
    Vector3f v2 = new Vector3f();
    Vector3f v3 = new Vector3f();
    for (int i = 0; i < mesh.getTriangleCount(); i++) {
      mesh.getTriangle(i, v1, v2, v3);
      putBoneData(weights, indices, v1, resultGrid, scale, translate);
      putBoneData(weights, indices, v2, resultGrid, scale, translate);
      putBoneData(weights, indices, v3, resultGrid, scale, translate);
    }

    return mesh;
  }
コード例 #2
0
ファイル: RenderDeviceJme.java プロジェクト: gormed/solarwars
  public RenderDeviceJme(NiftyJmeDisplay display) {
    this.display = display;

    quadColor = new VertexBuffer(Type.Color);
    quadColor.setNormalized(true);
    ByteBuffer bb = BufferUtils.createByteBuffer(4 * 4);
    quadColor.setupData(Usage.Stream, 4, Format.UnsignedByte, bb);
    quad.setBuffer(quadColor);

    quadModTC.setUsage(Usage.Stream);

    // Load the 3 material types separately to avoid
    // reloading the shader when the defines change.

    // Material with a single color (no texture or vertex color)
    colorMaterial = new Material(display.getAssetManager(), "Common/MatDefs/Misc/Unshaded.j3md");

    // Material with a texture and a color (no vertex color)
    textureColorMaterial =
        new Material(display.getAssetManager(), "Common/MatDefs/Misc/Unshaded.j3md");

    // Material with vertex color, used for gradients (no texture)
    vertexColorMaterial =
        new Material(display.getAssetManager(), "Common/MatDefs/Misc/Unshaded.j3md");
    vertexColorMaterial.setBoolean("VertexColor", true);

    // Shared render state for all materials
    renderState.setDepthTest(false);
    renderState.setDepthWrite(false);
  }
コード例 #3
0
  protected ByteBuffer readByteBuffer(byte[] content) throws IOException {
    int length = readInt(content);
    if (length == BinaryOutputCapsule.NULL_OBJECT) return null;

    if (BinaryImporter.canUseFastBuffers()) {
      ByteBuffer value = BufferUtils.createByteBuffer(length);
      value.put(content, index, length).rewind();
      index += length;
      return value;
    } else {
      ByteBuffer value = BufferUtils.createByteBuffer(length);
      for (int x = 0; x < length; x++) {
        value.put(readByteForBuffer(content));
      }
      value.rewind();
      return value;
    }
  }
コード例 #4
0
ファイル: FloatToFixed.java プロジェクト: jingchan/jh_rogue
  public static VertexBuffer convertToUByte(VertexBuffer vb) {
    FloatBuffer fb = (FloatBuffer) vb.getData();
    ByteBuffer bb = BufferUtils.createByteBuffer(fb.capacity());
    convertToUByte(fb, bb);

    VertexBuffer newVb = new VertexBuffer(vb.getBufferType());
    newVb.setupData(vb.getUsage(), vb.getNumComponents(), Format.UnsignedByte, bb);
    newVb.setNormalized(true);
    return newVb;
  }
コード例 #5
0
ファイル: FloatToFixed.java プロジェクト: jingchan/jh_rogue
  public static void convertToFixed(Geometry geom, Format posFmt, Format nmFmt, Format tcFmt) {
    geom.updateModelBound();
    BoundingBox bbox = (BoundingBox) geom.getModelBound();
    Mesh mesh = geom.getMesh();

    VertexBuffer positions = mesh.getBuffer(Type.Position);
    VertexBuffer normals = mesh.getBuffer(Type.Normal);
    VertexBuffer texcoords = mesh.getBuffer(Type.TexCoord);
    VertexBuffer indices = mesh.getBuffer(Type.Index);

    // positions
    FloatBuffer fb = (FloatBuffer) positions.getData();
    if (posFmt != Format.Float) {
      Buffer newBuf =
          VertexBuffer.createBuffer(posFmt, positions.getNumComponents(), mesh.getVertexCount());
      Transform t = convertPositions(fb, bbox, newBuf);
      t.combineWithParent(geom.getLocalTransform());
      geom.setLocalTransform(t);

      VertexBuffer newPosVb = new VertexBuffer(Type.Position);
      newPosVb.setupData(positions.getUsage(), positions.getNumComponents(), posFmt, newBuf);
      mesh.clearBuffer(Type.Position);
      mesh.setBuffer(newPosVb);
    }

    // normals, automatically convert to signed byte
    fb = (FloatBuffer) normals.getData();

    ByteBuffer bb = BufferUtils.createByteBuffer(fb.capacity());
    convertNormals(fb, bb);

    normals = new VertexBuffer(Type.Normal);
    normals.setupData(Usage.Static, 3, Format.Byte, bb);
    normals.setNormalized(true);
    mesh.clearBuffer(Type.Normal);
    mesh.setBuffer(normals);

    // texcoords
    fb = (FloatBuffer) texcoords.getData();
    if (tcFmt != Format.Float) {
      Buffer newBuf =
          VertexBuffer.createBuffer(tcFmt, texcoords.getNumComponents(), mesh.getVertexCount());
      convertTexCoords2D(fb, newBuf);

      VertexBuffer newTcVb = new VertexBuffer(Type.TexCoord);
      newTcVb.setupData(texcoords.getUsage(), texcoords.getNumComponents(), tcFmt, newBuf);
      mesh.clearBuffer(Type.TexCoord);
      mesh.setBuffer(newTcVb);
    }
  }
コード例 #6
0
 private void initOpenCL1() {
   clContext = context.getOpenCLContext();
   Device device = clContext.getDevices().get(0);
   clQueue = clContext.createQueue(device).register();
   // create kernel
   Program program = null;
   File tmpFolder = JmeSystem.getStorageFolder();
   File binaryFile = new File(tmpFolder, getClass().getSimpleName() + ".clc");
   try {
     // attempt to load cached binary
     byte[] bytes = Files.readAllBytes(binaryFile.toPath());
     ByteBuffer bb = BufferUtils.createByteBuffer(bytes);
     program = clContext.createProgramFromBinary(bb, device);
     program.build();
     LOG.info("reuse program from cached binaries");
   } catch (java.nio.file.NoSuchFileException ex) {
     // do nothing, cache was not created yet
   } catch (Exception ex) {
     LOG.log(Level.INFO, "Unable to use cached program binaries", ex);
   }
   if (program == null) {
     // build from sources
     String source =
         ""
             + "__kernel void ScaleKernel(__global float* vb, float scale)\n"
             + "{\n"
             + "  int idx = get_global_id(0);\n"
             + "  float3 pos = vload3(idx, vb);\n"
             + "  pos *= scale;\n"
             + "  vstore3(pos, idx, vb);\n"
             + "}\n";
     program = clContext.createProgramFromSourceCode(source);
     program.build();
     // Save binary
     try {
       ByteBuffer bb = program.getBinary(device);
       byte[] bytes = new byte[bb.remaining()];
       bb.get(bytes);
       Files.write(binaryFile.toPath(), bytes);
     } catch (UnsupportedOperationException | OpenCLException | IOException ex) {
       LOG.log(Level.SEVERE, "Unable to save program binaries", ex);
     }
     LOG.info("create new program from sources");
   }
   program.register();
   kernel = program.createKernel("ScaleKernel").register();
 }
コード例 #7
0
public class LwjglGL1Renderer implements GL1Renderer {

  private static final Logger logger = Logger.getLogger(LwjglRenderer.class.getName());
  private final ByteBuffer nameBuf = BufferUtils.createByteBuffer(250);
  private final StringBuilder stringBuf = new StringBuilder(250);
  private final IntBuffer ib1 = BufferUtils.createIntBuffer(1);
  private final IntBuffer intBuf16 = BufferUtils.createIntBuffer(16);
  private final FloatBuffer fb16 = BufferUtils.createFloatBuffer(16);
  private final FloatBuffer fb4Null = BufferUtils.createFloatBuffer(4);
  private final RenderContext context = new RenderContext();
  private final NativeObjectManager objManager = new NativeObjectManager();
  private final EnumSet<Caps> caps = EnumSet.noneOf(Caps.class);
  private int maxTexSize;
  private int maxCubeTexSize;
  private int maxVertCount;
  private int maxTriCount;
  private int maxLights;
  private boolean gl12 = false;
  private final Statistics statistics = new Statistics();
  private int vpX, vpY, vpW, vpH;
  private int clipX, clipY, clipW, clipH;

  private Matrix4f worldMatrix = new Matrix4f();
  private Matrix4f viewMatrix = new Matrix4f();

  private ArrayList<Light> lightList = new ArrayList<Light>(8);
  private ColorRGBA materialAmbientColor = new ColorRGBA();
  private Vector3f tempVec = new Vector3f();

  protected void updateNameBuffer() {
    int len = stringBuf.length();

    nameBuf.position(0);
    nameBuf.limit(len);
    for (int i = 0; i < len; i++) {
      nameBuf.put((byte) stringBuf.charAt(i));
    }

    nameBuf.rewind();
  }

  public Statistics getStatistics() {
    return statistics;
  }

  public EnumSet<Caps> getCaps() {
    return caps;
  }

  public void initialize() {
    if (GLContext.getCapabilities().OpenGL12) {
      gl12 = true;
    }

    // Default values for certain GL state.
    glShadeModel(GL_SMOOTH);
    glColorMaterial(GL_FRONT_AND_BACK, GL_DIFFUSE);
    glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);

    // Enable rescaling/normaling of normal vectors.
    // Fixes lighting issues with scaled models.
    if (gl12) {
      glEnable(GL12.GL_RESCALE_NORMAL);
    } else {
      glEnable(GL_NORMALIZE);
    }

    if (GLContext.getCapabilities().GL_ARB_texture_non_power_of_two) {
      caps.add(Caps.NonPowerOfTwoTextures);
    } else {
      logger.log(
          Level.WARNING,
          "Your graphics card does not "
              + "support non-power-of-2 textures. "
              + "Some features might not work.");
    }

    maxLights = glGetInteger(GL_MAX_LIGHTS);
    maxTexSize = glGetInteger(GL_MAX_TEXTURE_SIZE);
  }

  public void invalidateState() {
    context.reset();
  }

  public void resetGLObjects() {
    logger.log(Level.FINE, "Reseting objects and invalidating state");
    objManager.resetObjects();
    statistics.clearMemory();
    invalidateState();
  }

  public void cleanup() {
    logger.log(Level.FINE, "Deleting objects and invalidating state");
    objManager.deleteAllObjects(this);
    statistics.clearMemory();
    invalidateState();
  }

  public void setDepthRange(float start, float end) {
    glDepthRange(start, end);
  }

  public void clearBuffers(boolean color, boolean depth, boolean stencil) {
    int bits = 0;
    if (color) {
      // See explanations of the depth below, we must enable color write to be able to clear the
      // color buffer
      if (context.colorWriteEnabled == false) {
        glColorMask(true, true, true, true);
        context.colorWriteEnabled = true;
      }
      bits = GL_COLOR_BUFFER_BIT;
    }
    if (depth) {

      // glClear(GL_DEPTH_BUFFER_BIT) seems to not work when glDepthMask is false
      // here s some link on openl board
      // http://www.opengl.org/discussion_boards/ubbthreads.php?ubb=showflat&Number=257223
      // if depth clear is requested, we enable the depthMask
      if (context.depthWriteEnabled == false) {
        glDepthMask(true);
        context.depthWriteEnabled = true;
      }
      bits |= GL_DEPTH_BUFFER_BIT;
    }
    if (stencil) {
      bits |= GL_STENCIL_BUFFER_BIT;
    }
    if (bits != 0) {
      glClear(bits);
    }
  }

  public void setBackgroundColor(ColorRGBA color) {
    glClearColor(color.r, color.g, color.b, color.a);
  }

  private void setMaterialColor(int type, ColorRGBA color, ColorRGBA defaultColor) {
    if (color != null) {
      fb16.put(color.r).put(color.g).put(color.b).put(color.a).flip();
    } else {
      fb16.put(defaultColor.r).put(defaultColor.g).put(defaultColor.b).put(defaultColor.a).flip();
    }
    glMaterial(GL_FRONT_AND_BACK, type, fb16);
  }

  /** Applies fixed function bindings from the context to OpenGL */
  private void applyFixedFuncBindings(boolean forLighting) {
    if (forLighting) {
      glMaterialf(GL_FRONT_AND_BACK, GL_SHININESS, context.shininess);
      setMaterialColor(GL_AMBIENT, context.ambient, ColorRGBA.DarkGray);
      setMaterialColor(GL_DIFFUSE, context.diffuse, ColorRGBA.White);
      setMaterialColor(GL_SPECULAR, context.specular, ColorRGBA.Black);

      if (context.useVertexColor) {
        glEnable(GL_COLOR_MATERIAL);
      } else {
        glDisable(GL_COLOR_MATERIAL);
      }
    } else {
      // Ignore other values as they have no effect when
      // GL_LIGHTING is disabled.
      ColorRGBA color = context.color;
      if (color != null) {
        glColor4f(color.r, color.g, color.b, color.a);
      } else {
        glColor4f(1, 1, 1, 1);
      }
    }
    if (context.alphaTestFallOff > 0f) {
      glEnable(GL_ALPHA_TEST);
      glAlphaFunc(GL_GREATER, context.alphaTestFallOff);
    } else {
      glDisable(GL_ALPHA_TEST);
    }
  }

  /** Reset fixed function bindings to default values. */
  private void resetFixedFuncBindings() {
    context.alphaTestFallOff = 0f; // zero means disable alpha test!
    context.color = null;
    context.ambient = null;
    context.diffuse = null;
    context.specular = null;
    context.shininess = 0;
    context.useVertexColor = false;
  }

  public void setFixedFuncBinding(FixedFuncBinding ffBinding, Object val) {
    switch (ffBinding) {
      case Color:
        context.color = (ColorRGBA) val;
        break;
      case MaterialAmbient:
        context.ambient = (ColorRGBA) val;
        break;
      case MaterialDiffuse:
        context.diffuse = (ColorRGBA) val;
        break;
      case MaterialSpecular:
        context.specular = (ColorRGBA) val;
        break;
      case MaterialShininess:
        context.shininess = (Float) val;
        break;
      case UseVertexColor:
        context.useVertexColor = (Boolean) val;
        break;
      case AlphaTestFallOff:
        context.alphaTestFallOff = (Float) val;
        break;
    }
  }

  public void applyRenderState(RenderState state) {
    if (state.isWireframe() && !context.wireframe) {
      glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);
      context.wireframe = true;
    } else if (!state.isWireframe() && context.wireframe) {
      glPolygonMode(GL_FRONT_AND_BACK, GL_FILL);
      context.wireframe = false;
    }

    if (state.isDepthTest() && !context.depthTestEnabled) {
      glEnable(GL_DEPTH_TEST);
      glDepthFunc(GL_LEQUAL);
      context.depthTestEnabled = true;
    } else if (!state.isDepthTest() && context.depthTestEnabled) {
      glDisable(GL_DEPTH_TEST);
      context.depthTestEnabled = false;
    }

    if (state.isAlphaTest()) {
      setFixedFuncBinding(FixedFuncBinding.AlphaTestFallOff, state.getAlphaFallOff());
    } else {
      setFixedFuncBinding(FixedFuncBinding.AlphaTestFallOff, 0f); // disable it
    }

    if (state.isDepthWrite() && !context.depthWriteEnabled) {
      glDepthMask(true);
      context.depthWriteEnabled = true;
    } else if (!state.isDepthWrite() && context.depthWriteEnabled) {
      glDepthMask(false);
      context.depthWriteEnabled = false;
    }

    if (state.isColorWrite() && !context.colorWriteEnabled) {
      glColorMask(true, true, true, true);
      context.colorWriteEnabled = true;
    } else if (!state.isColorWrite() && context.colorWriteEnabled) {
      glColorMask(false, false, false, false);
      context.colorWriteEnabled = false;
    }

    if (state.isPointSprite()) {
      logger.log(Level.WARNING, "Point Sprite unsupported!");
    }

    if (state.isPolyOffset()) {
      if (!context.polyOffsetEnabled) {
        glEnable(GL_POLYGON_OFFSET_FILL);
        glPolygonOffset(state.getPolyOffsetFactor(), state.getPolyOffsetUnits());
        context.polyOffsetEnabled = true;
        context.polyOffsetFactor = state.getPolyOffsetFactor();
        context.polyOffsetUnits = state.getPolyOffsetUnits();
      } else {
        if (state.getPolyOffsetFactor() != context.polyOffsetFactor
            || state.getPolyOffsetUnits() != context.polyOffsetUnits) {
          glPolygonOffset(state.getPolyOffsetFactor(), state.getPolyOffsetUnits());
          context.polyOffsetFactor = state.getPolyOffsetFactor();
          context.polyOffsetUnits = state.getPolyOffsetUnits();
        }
      }
    } else {
      if (context.polyOffsetEnabled) {
        glDisable(GL_POLYGON_OFFSET_FILL);
        context.polyOffsetEnabled = false;
        context.polyOffsetFactor = 0;
        context.polyOffsetUnits = 0;
      }
    }
    if (state.getFaceCullMode() != context.cullMode) {
      if (state.getFaceCullMode() == RenderState.FaceCullMode.Off) {
        glDisable(GL_CULL_FACE);
      } else {
        glEnable(GL_CULL_FACE);
      }

      switch (state.getFaceCullMode()) {
        case Off:
          break;
        case Back:
          glCullFace(GL_BACK);
          break;
        case Front:
          glCullFace(GL_FRONT);
          break;
        case FrontAndBack:
          glCullFace(GL_FRONT_AND_BACK);
          break;
        default:
          throw new UnsupportedOperationException(
              "Unrecognized face cull mode: " + state.getFaceCullMode());
      }

      context.cullMode = state.getFaceCullMode();
    }

    if (state.getBlendMode() != context.blendMode) {
      if (state.getBlendMode() == RenderState.BlendMode.Off) {
        glDisable(GL_BLEND);
      } else {
        glEnable(GL_BLEND);
        switch (state.getBlendMode()) {
          case Off:
            break;
          case Additive:
            glBlendFunc(GL_ONE, GL_ONE);
            break;
          case AlphaAdditive:
            glBlendFunc(GL_SRC_ALPHA, GL_ONE);
            break;
          case Color:
            glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_COLOR);
            break;
          case Alpha:
            glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA);
            break;
          case PremultAlpha:
            glBlendFunc(GL_ONE, GL_ONE_MINUS_SRC_ALPHA);
            break;
          case Modulate:
            glBlendFunc(GL_DST_COLOR, GL_ZERO);
            break;
          case ModulateX2:
            glBlendFunc(GL_DST_COLOR, GL_SRC_COLOR);
            break;
          default:
            throw new UnsupportedOperationException(
                "Unrecognized blend mode: " + state.getBlendMode());
        }
      }

      context.blendMode = state.getBlendMode();
    }

    if (state.isStencilTest()) {
      throw new UnsupportedOperationException(
          "OpenGL 1.1 doesn't support two sided stencil operations.");
    }
  }

  public void setViewPort(int x, int y, int w, int h) {
    if (x != vpX || vpY != y || vpW != w || vpH != h) {
      glViewport(x, y, w, h);
      vpX = x;
      vpY = y;
      vpW = w;
      vpH = h;
    }
  }

  public void setClipRect(int x, int y, int width, int height) {
    if (!context.clipRectEnabled) {
      glEnable(GL_SCISSOR_TEST);
      context.clipRectEnabled = true;
    }
    if (clipX != x || clipY != y || clipW != width || clipH != height) {
      glScissor(x, y, width, height);
      clipX = x;
      clipY = y;
      clipW = width;
      clipH = height;
    }
  }

  public void clearClipRect() {
    if (context.clipRectEnabled) {
      glDisable(GL_SCISSOR_TEST);
      context.clipRectEnabled = false;

      clipX = 0;
      clipY = 0;
      clipW = 0;
      clipH = 0;
    }
  }

  public void onFrame() {
    objManager.deleteUnused(this);
    //        statistics.clearFrame();
  }

  private FloatBuffer storeMatrix(Matrix4f matrix, FloatBuffer store) {
    store.clear();
    matrix.fillFloatBuffer(store, true);
    store.clear();
    return store;
  }

  private void setModelView(Matrix4f modelMatrix, Matrix4f viewMatrix) {
    if (context.matrixMode != GL_MODELVIEW) {
      glMatrixMode(GL_MODELVIEW);
      context.matrixMode = GL_MODELVIEW;
    }

    glLoadMatrix(storeMatrix(viewMatrix, fb16));
    glMultMatrix(storeMatrix(modelMatrix, fb16));
  }

  private void setProjection(Matrix4f projMatrix) {
    if (context.matrixMode != GL_PROJECTION) {
      glMatrixMode(GL_PROJECTION);
      context.matrixMode = GL_PROJECTION;
    }

    glLoadMatrix(storeMatrix(projMatrix, fb16));
  }

  public void setWorldMatrix(Matrix4f worldMatrix) {
    this.worldMatrix.set(worldMatrix);
  }

  public void setViewProjectionMatrices(Matrix4f viewMatrix, Matrix4f projMatrix) {
    this.viewMatrix.set(viewMatrix);
    setProjection(projMatrix);
  }

  public void setLighting(LightList list) {
    // XXX: This is abuse of setLighting() to
    // apply fixed function bindings
    // and do other book keeping.
    if (list == null || list.size() == 0) {
      glDisable(GL_LIGHTING);
      applyFixedFuncBindings(false);
      setModelView(worldMatrix, viewMatrix);
      return;
    }

    // Number of lights set previously
    int numLightsSetPrev = lightList.size();

    // If more than maxLights are defined, they will be ignored.
    // The GL1 renderer is not permitted to crash due to a
    // GL1 limitation. It must render anything that the GL2 renderer
    // can render (even incorrectly).
    lightList.clear();
    materialAmbientColor.set(0, 0, 0, 0);

    for (int i = 0; i < list.size(); i++) {
      Light l = list.get(i);
      if (l.getType() == Light.Type.Ambient) {
        // Gather
        materialAmbientColor.addLocal(l.getColor());
      } else {
        // Add to list
        lightList.add(l);

        // Once maximum lights reached, exit loop.
        if (lightList.size() >= maxLights) {
          break;
        }
      }
    }

    applyFixedFuncBindings(true);

    glEnable(GL_LIGHTING);

    fb16.clear();
    fb16.put(materialAmbientColor.r)
        .put(materialAmbientColor.g)
        .put(materialAmbientColor.b)
        .put(1)
        .flip();

    glLightModel(GL_LIGHT_MODEL_AMBIENT, fb16);

    if (context.matrixMode != GL_MODELVIEW) {
      glMatrixMode(GL_MODELVIEW);
      context.matrixMode = GL_MODELVIEW;
    }
    // Lights are already in world space, so just convert
    // them to view space.
    glLoadMatrix(storeMatrix(viewMatrix, fb16));

    for (int i = 0; i < lightList.size(); i++) {
      int glLightIndex = GL_LIGHT0 + i;
      Light light = lightList.get(i);
      Light.Type lightType = light.getType();
      ColorRGBA col = light.getColor();
      Vector3f pos;

      // Enable the light
      glEnable(glLightIndex);

      // OGL spec states default value for light ambient is black
      switch (lightType) {
        case Directional:
          DirectionalLight dLight = (DirectionalLight) light;

          fb16.clear();
          fb16.put(col.r).put(col.g).put(col.b).put(col.a).flip();
          glLight(glLightIndex, GL_DIFFUSE, fb16);
          glLight(glLightIndex, GL_SPECULAR, fb16);

          pos = tempVec.set(dLight.getDirection()).negateLocal().normalizeLocal();
          fb16.clear();
          fb16.put(pos.x).put(pos.y).put(pos.z).put(0.0f).flip();
          glLight(glLightIndex, GL_POSITION, fb16);
          glLightf(glLightIndex, GL_SPOT_CUTOFF, 180);
          break;
        case Point:
          PointLight pLight = (PointLight) light;

          fb16.clear();
          fb16.put(col.r).put(col.g).put(col.b).put(col.a).flip();
          glLight(glLightIndex, GL_DIFFUSE, fb16);
          glLight(glLightIndex, GL_SPECULAR, fb16);

          pos = pLight.getPosition();
          fb16.clear();
          fb16.put(pos.x).put(pos.y).put(pos.z).put(1.0f).flip();
          glLight(glLightIndex, GL_POSITION, fb16);
          glLightf(glLightIndex, GL_SPOT_CUTOFF, 180);

          if (pLight.getRadius() > 0) {
            // Note: this doesn't follow the same attenuation model
            // as the one used in the lighting shader.
            glLightf(glLightIndex, GL_CONSTANT_ATTENUATION, 1);
            glLightf(glLightIndex, GL_LINEAR_ATTENUATION, pLight.getInvRadius() * 2);
            glLightf(
                glLightIndex,
                GL_QUADRATIC_ATTENUATION,
                pLight.getInvRadius() * pLight.getInvRadius());
          } else {
            glLightf(glLightIndex, GL_CONSTANT_ATTENUATION, 1);
            glLightf(glLightIndex, GL_LINEAR_ATTENUATION, 0);
            glLightf(glLightIndex, GL_QUADRATIC_ATTENUATION, 0);
          }

          break;
        case Spot:
          SpotLight sLight = (SpotLight) light;

          fb16.clear();
          fb16.put(col.r).put(col.g).put(col.b).put(col.a).flip();
          glLight(glLightIndex, GL_DIFFUSE, fb16);
          glLight(glLightIndex, GL_SPECULAR, fb16);

          pos = sLight.getPosition();
          fb16.clear();
          fb16.put(pos.x).put(pos.y).put(pos.z).put(1.0f).flip();
          glLight(glLightIndex, GL_POSITION, fb16);

          Vector3f dir = sLight.getDirection();
          fb16.clear();
          fb16.put(dir.x).put(dir.y).put(dir.z).put(1.0f).flip();
          glLight(glLightIndex, GL_SPOT_DIRECTION, fb16);

          float outerAngleRad = sLight.getSpotOuterAngle();
          float innerAngleRad = sLight.getSpotInnerAngle();
          float spotCut = outerAngleRad * FastMath.RAD_TO_DEG;
          float spotExpo = 0.0f;
          if (outerAngleRad > 0) {
            spotExpo = (1.0f - (innerAngleRad / outerAngleRad)) * 128.0f;
          }

          glLightf(glLightIndex, GL_SPOT_CUTOFF, spotCut);
          glLightf(glLightIndex, GL_SPOT_EXPONENT, spotExpo);

          if (sLight.getSpotRange() > 0) {
            glLightf(glLightIndex, GL_LINEAR_ATTENUATION, sLight.getInvSpotRange());
          } else {
            glLightf(glLightIndex, GL_LINEAR_ATTENUATION, 0);
          }

          break;
        default:
          throw new UnsupportedOperationException("Unrecognized light type: " + lightType);
      }
    }

    // Disable lights after the index
    for (int i = lightList.size(); i < numLightsSetPrev; i++) {
      glDisable(GL_LIGHT0 + i);
    }

    // This will set view matrix as well.
    setModelView(worldMatrix, viewMatrix);
  }

  private int convertTextureType(Texture.Type type) {
    switch (type) {
      case TwoDimensional:
        return GL_TEXTURE_2D;
        //            case ThreeDimensional:
        //                return GL_TEXTURE_3D;
        //            case CubeMap:
        //                return GL_TEXTURE_CUBE_MAP;
      default:
        throw new UnsupportedOperationException("Unknown texture type: " + type);
    }
  }

  private int convertMagFilter(Texture.MagFilter filter) {
    switch (filter) {
      case Bilinear:
        return GL_LINEAR;
      case Nearest:
        return GL_NEAREST;
      default:
        throw new UnsupportedOperationException("Unknown mag filter: " + filter);
    }
  }

  private int convertMinFilter(Texture.MinFilter filter) {
    switch (filter) {
      case Trilinear:
        return GL_LINEAR_MIPMAP_LINEAR;
      case BilinearNearestMipMap:
        return GL_LINEAR_MIPMAP_NEAREST;
      case NearestLinearMipMap:
        return GL_NEAREST_MIPMAP_LINEAR;
      case NearestNearestMipMap:
        return GL_NEAREST_MIPMAP_NEAREST;
      case BilinearNoMipMaps:
        return GL_LINEAR;
      case NearestNoMipMaps:
        return GL_NEAREST;
      default:
        throw new UnsupportedOperationException("Unknown min filter: " + filter);
    }
  }

  private int convertWrapMode(Texture.WrapMode mode) {
    switch (mode) {
      case EdgeClamp:
      case Clamp:
      case BorderClamp:
        return GL_CLAMP;
      case Repeat:
        return GL_REPEAT;
      default:
        throw new UnsupportedOperationException("Unknown wrap mode: " + mode);
    }
  }

  private void setupTextureParams(Texture tex) {
    int target = convertTextureType(tex.getType());

    // filter things
    int minFilter = convertMinFilter(tex.getMinFilter());
    int magFilter = convertMagFilter(tex.getMagFilter());
    glTexParameteri(target, GL_TEXTURE_MIN_FILTER, minFilter);
    glTexParameteri(target, GL_TEXTURE_MAG_FILTER, magFilter);

    // repeat modes
    switch (tex.getType()) {
        //            case ThreeDimensional:
        //            case CubeMap:
        //                glTexParameteri(target, GL_TEXTURE_WRAP_R,
        // convertWrapMode(tex.getWrap(WrapAxis.R)));
      case TwoDimensional:
        glTexParameteri(target, GL_TEXTURE_WRAP_T, convertWrapMode(tex.getWrap(WrapAxis.T)));
        // fall down here is intentional..
        //            case OneDimensional:
        glTexParameteri(target, GL_TEXTURE_WRAP_S, convertWrapMode(tex.getWrap(WrapAxis.S)));
        break;
      default:
        throw new UnsupportedOperationException("Unknown texture type: " + tex.getType());
    }
  }

  public void updateTexImageData(Image img, Texture.Type type, int unit) {
    int texId = img.getId();
    if (texId == -1) {
      // create texture
      glGenTextures(ib1);
      texId = ib1.get(0);
      img.setId(texId);
      objManager.registerObject(img);

      statistics.onNewTexture();
    }

    // bind texture
    int target = convertTextureType(type);
    //        if (context.boundTextureUnit != unit) {
    //            glActiveTexture(GL_TEXTURE0 + unit);
    //            context.boundTextureUnit = unit;
    //        }
    if (context.boundTextures[unit] != img) {
      glEnable(target);
      glBindTexture(target, texId);
      context.boundTextures[unit] = img;

      statistics.onTextureUse(img, true);
    }

    // Check sizes if graphics card doesn't support NPOT
    if (!GLContext.getCapabilities().GL_ARB_texture_non_power_of_two) {
      if (img.getWidth() != 0 && img.getHeight() != 0) {
        if (!FastMath.isPowerOfTwo(img.getWidth()) || !FastMath.isPowerOfTwo(img.getHeight())) {

          // Resize texture to Power-of-2 size
          MipMapGenerator.resizeToPowerOf2(img);
        }
      }
    }

    if (!img.hasMipmaps() && img.isGeneratedMipmapsRequired()) {
      // No pregenerated mips available,
      // generate from base level if required

      // Check if hardware mips are supported
      if (GLContext.getCapabilities().OpenGL14) {
        glTexParameteri(target, GL14.GL_GENERATE_MIPMAP, GL_TRUE);
      } else {
        MipMapGenerator.generateMipMaps(img);
      }
      img.setMipmapsGenerated(true);
    } else {
    }

    if (img.getWidth() > maxTexSize || img.getHeight() > maxTexSize) {
      throw new RendererException(
          "Cannot upload texture "
              + img
              + ". The maximum supported texture resolution is "
              + maxTexSize);
    }

    /*
    if (target == GL_TEXTURE_CUBE_MAP) {
    List<ByteBuffer> data = img.getData();
    if (data.size() != 6) {
    logger.log(Level.WARNING, "Invalid texture: {0}\n"
    + "Cubemap textures must contain 6 data units.", img);
    return;
    }
    for (int i = 0; i < 6; i++) {
    TextureUtil.uploadTexture(img, GL_TEXTURE_CUBE_MAP_POSITIVE_X + i, i, 0, tdc);
    }
    } else if (target == EXTTextureArray.GL_TEXTURE_2D_ARRAY_EXT) {
    List<ByteBuffer> data = img.getData();
    // -1 index specifies prepare data for 2D Array
    TextureUtil.uploadTexture(img, target, -1, 0, tdc);
    for (int i = 0; i < data.size(); i++) {
    // upload each slice of 2D array in turn
    // this time with the appropriate index
    TextureUtil.uploadTexture(img, target, i, 0, tdc);
    }
    } else {*/
    TextureUtil.uploadTexture(img, target, 0, 0);
    // }

    img.clearUpdateNeeded();
  }

  public void setTexture(int unit, Texture tex) {
    if (unit != 0 || tex.getType() != Texture.Type.TwoDimensional) {
      // throw new UnsupportedOperationException();
      return;
    }

    Image image = tex.getImage();
    if (image.isUpdateNeeded()
        || (image.isGeneratedMipmapsRequired() && !image.isMipmapsGenerated())) {
      updateTexImageData(image, tex.getType(), unit);
    }

    int texId = image.getId();
    assert texId != -1;

    Image[] textures = context.boundTextures;

    int type = convertTextureType(tex.getType());
    //        if (!context.textureIndexList.moveToNew(unit)) {
    //             if (context.boundTextureUnit != unit){
    //                glActiveTexture(GL_TEXTURE0 + unit);
    //                context.boundTextureUnit = unit;
    //             }
    //             glEnable(type);
    //        }

    //        if (context.boundTextureUnit != unit) {
    //            glActiveTexture(GL_TEXTURE0 + unit);
    //            context.boundTextureUnit = unit;
    //        }

    if (textures[unit] != image) {
      glEnable(type);
      glBindTexture(type, texId);
      textures[unit] = image;

      statistics.onTextureUse(image, true);
    } else {
      statistics.onTextureUse(image, false);
    }

    setupTextureParams(tex);
  }

  public void modifyTexture(Texture tex, Image pixels, int x, int y) {
    setTexture(0, tex);
    TextureUtil.uploadSubTexture(pixels, convertTextureType(tex.getType()), 0, x, y);
  }

  private void clearTextureUnits() {
    Image[] textures = context.boundTextures;
    if (textures[0] != null) {
      glDisable(GL_TEXTURE_2D);
      textures[0] = null;
    }
  }

  public void deleteImage(Image image) {
    int texId = image.getId();
    if (texId != -1) {
      ib1.put(0, texId);
      ib1.position(0).limit(1);
      glDeleteTextures(ib1);
      image.resetObject();
    }
  }

  private int convertArrayType(VertexBuffer.Type type) {
    switch (type) {
      case Position:
        return GL_VERTEX_ARRAY;
      case Normal:
        return GL_NORMAL_ARRAY;
      case TexCoord:
        return GL_TEXTURE_COORD_ARRAY;
      case Color:
        return GL_COLOR_ARRAY;
      default:
        return -1; // unsupported
    }
  }

  private int convertVertexFormat(VertexBuffer.Format fmt) {
    switch (fmt) {
      case Byte:
        return GL_BYTE;
      case Float:
        return GL_FLOAT;
      case Int:
        return GL_INT;
      case Short:
        return GL_SHORT;
      case UnsignedByte:
        return GL_UNSIGNED_BYTE;
      case UnsignedInt:
        return GL_UNSIGNED_INT;
      case UnsignedShort:
        return GL_UNSIGNED_SHORT;
      default:
        throw new UnsupportedOperationException("Unrecognized vertex format: " + fmt);
    }
  }

  private int convertElementMode(Mesh.Mode mode) {
    switch (mode) {
      case Points:
        return GL_POINTS;
      case Lines:
        return GL_LINES;
      case LineLoop:
        return GL_LINE_LOOP;
      case LineStrip:
        return GL_LINE_STRIP;
      case Triangles:
        return GL_TRIANGLES;
      case TriangleFan:
        return GL_TRIANGLE_FAN;
      case TriangleStrip:
        return GL_TRIANGLE_STRIP;
      default:
        throw new UnsupportedOperationException("Unrecognized mesh mode: " + mode);
    }
  }

  public void drawTriangleArray(Mesh.Mode mode, int count, int vertCount) {
    if (count > 1) {
      throw new UnsupportedOperationException();
    }

    glDrawArrays(convertElementMode(mode), 0, vertCount);
  }

  public void setVertexAttrib(VertexBuffer vb, VertexBuffer idb) {
    if (vb.getBufferType() == VertexBuffer.Type.Color && !context.useVertexColor) {
      // Ignore vertex color buffer if vertex color is disabled.
      return;
    }

    int arrayType = convertArrayType(vb.getBufferType());
    if (arrayType == -1) {
      return; // unsupported
    }
    glEnableClientState(arrayType);
    context.boundAttribs[vb.getBufferType().ordinal()] = vb;

    if (vb.getBufferType() == Type.Normal) {
      // normalize if requested
      if (vb.isNormalized() && !context.normalizeEnabled) {
        glEnable(GL_NORMALIZE);
        context.normalizeEnabled = true;
      } else if (!vb.isNormalized() && context.normalizeEnabled) {
        glDisable(GL_NORMALIZE);
        context.normalizeEnabled = false;
      }
    }

    // NOTE: Use data from interleaved buffer if specified
    Buffer data = idb != null ? idb.getData() : vb.getData();
    int comps = vb.getNumComponents();
    int type = convertVertexFormat(vb.getFormat());

    data.rewind();

    switch (vb.getBufferType()) {
      case Position:
        if (!(data instanceof FloatBuffer)) {
          throw new UnsupportedOperationException();
        }

        glVertexPointer(comps, vb.getStride(), (FloatBuffer) data);
        break;
      case Normal:
        if (!(data instanceof FloatBuffer)) {
          throw new UnsupportedOperationException();
        }

        glNormalPointer(vb.getStride(), (FloatBuffer) data);
        break;
      case Color:
        if (data instanceof FloatBuffer) {
          glColorPointer(comps, vb.getStride(), (FloatBuffer) data);
        } else if (data instanceof ByteBuffer) {
          glColorPointer(comps, true, vb.getStride(), (ByteBuffer) data);
        } else {
          throw new UnsupportedOperationException();
        }
        break;
      case TexCoord:
        if (!(data instanceof FloatBuffer)) {
          throw new UnsupportedOperationException();
        }

        glTexCoordPointer(comps, vb.getStride(), (FloatBuffer) data);
        break;
      default:
        // Ignore, this is an unsupported attribute for OpenGL1.
        break;
    }
  }

  public void setVertexAttrib(VertexBuffer vb) {
    setVertexAttrib(vb, null);
  }

  private void drawElements(int mode, int format, Buffer data) {
    switch (format) {
      case GL_UNSIGNED_BYTE:
        glDrawElements(mode, (ByteBuffer) data);
        break;
      case GL_UNSIGNED_SHORT:
        glDrawElements(mode, (ShortBuffer) data);
        break;
      case GL_UNSIGNED_INT:
        glDrawElements(mode, (IntBuffer) data);
        break;
      default:
        throw new UnsupportedOperationException();
    }
  }

  public void drawTriangleList(VertexBuffer indexBuf, Mesh mesh, int count) {
    Mesh.Mode mode = mesh.getMode();

    Buffer indexData = indexBuf.getData();
    indexData.rewind();

    if (mesh.getMode() == Mode.Hybrid) {
      throw new UnsupportedOperationException();
      /*
      int[] modeStart = mesh.getModeStart();
      int[] elementLengths = mesh.getElementLengths();

      int elMode = convertElementMode(Mode.Triangles);
      int fmt = convertVertexFormat(indexBuf.getFormat());
      //            int elSize = indexBuf.getFormat().getComponentSize();
      //            int listStart = modeStart[0];
      int stripStart = modeStart[1];
      int fanStart = modeStart[2];
      int curOffset = 0;
      for (int i = 0; i < elementLengths.length; i++) {
      if (i == stripStart) {
      elMode = convertElementMode(Mode.TriangleStrip);
      } else if (i == fanStart) {
      elMode = convertElementMode(Mode.TriangleStrip);
      }
      int elementLength = elementLengths[i];
      indexData.position(curOffset);

      drawElements(elMode,
      fmt,
      indexData);

      curOffset += elementLength;
      }*/
    } else {
      drawElements(convertElementMode(mode), convertVertexFormat(indexBuf.getFormat()), indexData);
    }
  }

  public void clearVertexAttribs() {
    for (int i = 0; i < 16; i++) {
      VertexBuffer vb = context.boundAttribs[i];
      if (vb != null) {
        int arrayType = convertArrayType(vb.getBufferType());
        glDisableClientState(arrayType);
        context.boundAttribs[vb.getBufferType().ordinal()] = null;
      }
    }
  }

  private void renderMeshDefault(Mesh mesh, int lod, int count) {
    VertexBuffer indices = null;

    VertexBuffer interleavedData = mesh.getBuffer(Type.InterleavedData);
    if (interleavedData != null && interleavedData.isUpdateNeeded()) {
      updateBufferData(interleavedData);
    }

    if (mesh.getNumLodLevels() > 0) {
      indices = mesh.getLodLevel(lod);
    } else {
      indices = mesh.getBuffer(Type.Index);
    }
    for (VertexBuffer vb : mesh.getBufferList().getArray()) {
      if (vb.getBufferType() == Type.InterleavedData
          || vb.getUsage() == Usage.CpuOnly // ignore cpu-only buffers
          || vb.getBufferType() == Type.Index) {
        continue;
      }

      if (vb.getStride() == 0) {
        // not interleaved
        setVertexAttrib(vb);
      } else {
        // interleaved
        setVertexAttrib(vb, interleavedData);
      }
    }

    if (indices != null) {
      drawTriangleList(indices, mesh, count);
    } else {
      glDrawArrays(convertElementMode(mesh.getMode()), 0, mesh.getVertexCount());
    }

    // TODO: Fix these to use IDList??
    clearVertexAttribs();
    clearTextureUnits();
    resetFixedFuncBindings();
  }

  public void renderMesh(Mesh mesh, int lod, int count) {
    if (mesh.getVertexCount() == 0) {
      return;
    }

    if (context.pointSize != mesh.getPointSize()) {
      glPointSize(mesh.getPointSize());
      context.pointSize = mesh.getPointSize();
    }
    if (context.lineWidth != mesh.getLineWidth()) {
      glLineWidth(mesh.getLineWidth());
      context.lineWidth = mesh.getLineWidth();
    }

    boolean dynamic = false;
    if (mesh.getBuffer(Type.InterleavedData) != null) {
      throw new UnsupportedOperationException("Interleaved meshes are not supported");
    }

    if (mesh.getNumLodLevels() == 0) {
      for (VertexBuffer vb : mesh.getBufferList().getArray()) {
        if (vb.getUsage() != VertexBuffer.Usage.Static) {
          dynamic = true;
          break;
        }
      }
    } else {
      dynamic = true;
    }

    statistics.onMeshDrawn(mesh, lod);

    //        if (!dynamic) {
    // dealing with a static object, generate display list
    //            renderMeshDisplayList(mesh);
    //        } else {
    renderMeshDefault(mesh, lod, count);
    //        }

  }

  public void setAlphaToCoverage(boolean value) {}

  public void setShader(Shader shader) {}

  public void deleteShader(Shader shader) {}

  public void deleteShaderSource(ShaderSource source) {}

  public void copyFrameBuffer(FrameBuffer src, FrameBuffer dst) {}

  public void copyFrameBuffer(FrameBuffer src, FrameBuffer dst, boolean copyDepth) {}

  public void setMainFrameBufferOverride(FrameBuffer fb) {}

  public void setFrameBuffer(FrameBuffer fb) {}

  public void readFrameBuffer(FrameBuffer fb, ByteBuffer byteBuf) {}

  public void deleteFrameBuffer(FrameBuffer fb) {}

  public void updateBufferData(VertexBuffer vb) {}

  public void deleteBuffer(VertexBuffer vb) {}
}
コード例 #8
0
  /**
   * This method returns an array of size 2. The first element is a vertex buffer holding bone
   * weights for every vertex in the model. The second element is a vertex buffer holding bone
   * indices for vertices (the indices of bones the vertices are assigned to).
   *
   * @param meshStructure the mesh structure object
   * @param vertexListSize a number of vertices in the model
   * @param bonesGroups this is an output parameter, it should be a one-sized array; the maximum
   *     amount of weights per vertex (up to MAXIMUM_WEIGHTS_PER_VERTEX) is stored there
   * @param vertexReferenceMap this reference map allows to map the original vertices read from
   *     blender to vertices that are really in the model; one vertex may appear several times in
   *     the result model
   * @param groupToBoneIndexMap this object maps the group index (to which a vertices in blender
   *     belong) to bone index of the model
   * @param blenderContext the blender context
   * @return arrays of vertices weights and their bone indices and (as an output parameter) the
   *     maximum amount of weights for a vertex
   * @throws BlenderFileException this exception is thrown when the blend file structure is somehow
   *     invalid or corrupted
   */
  private VertexBuffer[] getBoneWeightAndIndexBuffer(
      Structure meshStructure,
      int vertexListSize,
      int[] bonesGroups,
      Map<Integer, List<Integer>> vertexReferenceMap,
      Map<Integer, Integer> groupToBoneIndexMap,
      BlenderContext blenderContext)
      throws BlenderFileException {
    Pointer pDvert = (Pointer) meshStructure.getFieldValue("dvert"); // dvert = DeformVERTices
    FloatBuffer weightsFloatData =
        BufferUtils.createFloatBuffer(vertexListSize * MAXIMUM_WEIGHTS_PER_VERTEX);
    ByteBuffer indicesData =
        BufferUtils.createByteBuffer(vertexListSize * MAXIMUM_WEIGHTS_PER_VERTEX);
    if (pDvert.isNotNull()) { // assigning weights and bone indices
      List<Structure> dverts =
          pDvert.fetchData(
              blenderContext.getInputStream()); // dverts.size() == verticesAmount (one dvert per
      // vertex in blender)
      int vertexIndex = 0;
      for (Structure dvert : dverts) {
        int totweight =
            ((Number) dvert.getFieldValue("totweight"))
                .intValue(); // total amount of weights assignet to the vertex
        // (max. 4 in JME)
        Pointer pDW = (Pointer) dvert.getFieldValue("dw");
        List<Integer> vertexIndices =
            vertexReferenceMap.get(
                Integer.valueOf(vertexIndex)); // we fetch the referenced vertices here
        if (totweight > 0
            && pDW.isNotNull()
            && groupToBoneIndexMap
                != null) { // pDW should never be null here, but I check it just in case :)
          int weightIndex = 0;
          List<Structure> dw = pDW.fetchData(blenderContext.getInputStream());
          for (Structure deformWeight : dw) {
            Integer boneIndex =
                groupToBoneIndexMap.get(((Number) deformWeight.getFieldValue("def_nr")).intValue());

            // Remove this code if 4 weights limitation is removed
            if (weightIndex == 4) {
              LOGGER.log(
                  Level.WARNING,
                  "{0} has more than 4 weight on bone index {1}",
                  new Object[] {meshStructure.getName(), boneIndex});
              break;
            }

            if (boneIndex
                != null) { // null here means that we came accross group that has no bone attached
                           // to
              float weight = ((Number) deformWeight.getFieldValue("weight")).floatValue();
              if (weight == 0.0f) {
                weight = 1;
                boneIndex = Integer.valueOf(0);
              }
              // we apply the weight to all referenced vertices
              for (Integer index : vertexIndices) {
                weightsFloatData.put(index * MAXIMUM_WEIGHTS_PER_VERTEX + weightIndex, weight);
                indicesData.put(
                    index * MAXIMUM_WEIGHTS_PER_VERTEX + weightIndex, boneIndex.byteValue());
              }
            }
            ++weightIndex;
          }
        } else {
          for (Integer index : vertexIndices) {
            weightsFloatData.put(index * MAXIMUM_WEIGHTS_PER_VERTEX, 1.0f);
            indicesData.put(index * MAXIMUM_WEIGHTS_PER_VERTEX, (byte) 0);
          }
        }
        ++vertexIndex;
      }
    } else {
      // always bind all vertices to 0-indexed bone
      // this bone makes the model look normally if vertices have no bone assigned
      // and it is used in object animation, so if we come accross object animation
      // we can use the 0-indexed bone for this
      for (List<Integer> vertexIndexList : vertexReferenceMap.values()) {
        // we apply the weight to all referenced vertices
        for (Integer index : vertexIndexList) {
          weightsFloatData.put(index * MAXIMUM_WEIGHTS_PER_VERTEX, 1.0f);
          indicesData.put(index * MAXIMUM_WEIGHTS_PER_VERTEX, (byte) 0);
        }
      }
    }

    bonesGroups[0] = this.endBoneAssigns(vertexListSize, weightsFloatData);
    VertexBuffer verticesWeights = new VertexBuffer(Type.BoneWeight);
    verticesWeights.setupData(Usage.CpuOnly, bonesGroups[0], Format.Float, weightsFloatData);

    VertexBuffer verticesWeightsIndices = new VertexBuffer(Type.BoneIndex);
    verticesWeightsIndices.setupData(
        Usage.CpuOnly, bonesGroups[0], Format.UnsignedByte, indicesData);
    return new VertexBuffer[] {verticesWeights, verticesWeightsIndices};
  }
コード例 #9
0
/**
 * This test renders a scene to an offscreen framebuffer, then copies the contents to a Swing
 * JFrame. Note that some parts are done inefficently, this is done to make the code more readable.
 */
public class TestRenderToMemory extends SimpleApplication implements SceneProcessor {

  private Geometry offBox;
  private float angle = 0;

  private FrameBuffer offBuffer;
  private ViewPort offView;
  private Texture2D offTex;
  private Camera offCamera;
  private ImageDisplay display;

  private static final int width = 800, height = 600;

  private final ByteBuffer cpuBuf = BufferUtils.createByteBuffer(width * height * 4);
  private final byte[] cpuArray = new byte[width * height * 4];
  private final BufferedImage image =
      new BufferedImage(width, height, BufferedImage.TYPE_4BYTE_ABGR);

  private class ImageDisplay extends JPanel {

    private long t;
    private long total;
    private int frames;
    private int fps;

    @Override
    public void paintComponent(Graphics gfx) {
      super.paintComponent(gfx);
      Graphics2D g2d = (Graphics2D) gfx;

      if (t == 0) t = timer.getTime();

      //            g2d.setBackground(Color.BLACK);
      //            g2d.clearRect(0,0,width,height);

      synchronized (image) {
        g2d.drawImage(image, null, 0, 0);
      }

      long t2 = timer.getTime();
      long dt = t2 - t;
      total += dt;
      frames++;
      t = t2;

      if (total > 1000) {
        fps = frames;
        total = 0;
        frames = 0;
      }

      g2d.setColor(Color.white);
      g2d.drawString("FPS: " + fps, 0, getHeight() - 100);
    }
  }

  public static void main(String[] args) {
    TestRenderToMemory app = new TestRenderToMemory();
    app.setPauseOnLostFocus(false);
    AppSettings settings = new AppSettings(true);
    settings.setResolution(1, 1);
    app.setSettings(settings);
    app.start(Type.OffscreenSurface);
  }

  public void createDisplayFrame() {
    SwingUtilities.invokeLater(
        new Runnable() {
          public void run() {
            JFrame frame = new JFrame("Render Display");
            display = new ImageDisplay();
            display.setPreferredSize(new Dimension(width, height));
            frame.getContentPane().add(display);
            frame.setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE);
            frame.addWindowListener(
                new WindowAdapter() {
                  public void windowClosed(WindowEvent e) {
                    stop();
                  }
                });
            frame.pack();
            frame.setLocationRelativeTo(null);
            frame.setResizable(false);
            frame.setVisible(true);
          }
        });
  }

  public void updateImageContents() {
    cpuBuf.clear();
    renderer.readFrameBuffer(offBuffer, cpuBuf);

    synchronized (image) {
      Screenshots.convertScreenShot(cpuBuf, image);
    }

    if (display != null) display.repaint();
  }

  public void setupOffscreenView() {
    offCamera = new Camera(width, height);

    // create a pre-view. a view that is rendered before the main view
    offView = renderManager.createPreView("Offscreen View", offCamera);
    offView.setBackgroundColor(ColorRGBA.DarkGray);
    offView.setClearFlags(true, true, true);

    // this will let us know when the scene has been rendered to the
    // frame buffer
    offView.addProcessor(this);

    // create offscreen framebuffer
    offBuffer = new FrameBuffer(width, height, 1);

    // setup framebuffer's cam
    offCamera.setFrustumPerspective(45f, 1f, 1f, 1000f);
    offCamera.setLocation(new Vector3f(0f, 0f, -5f));
    offCamera.lookAt(new Vector3f(0f, 0f, 0f), Vector3f.UNIT_Y);

    // setup framebuffer's texture
    //        offTex = new Texture2D(width, height, Format.RGBA8);

    // setup framebuffer to use renderbuffer
    // this is faster for gpu -> cpu copies
    offBuffer.setDepthBuffer(Format.Depth);
    offBuffer.setColorBuffer(Format.RGBA8);
    //        offBuffer.setColorTexture(offTex);

    // set viewport to render to offscreen framebuffer
    offView.setOutputFrameBuffer(offBuffer);

    // setup framebuffer's scene
    Box boxMesh = new Box(Vector3f.ZERO, 1, 1, 1);
    Material material = assetManager.loadMaterial("Interface/Logo/Logo.j3m");
    offBox = new Geometry("box", boxMesh);
    offBox.setMaterial(material);

    // attach the scene to the viewport to be rendered
    offView.attachScene(offBox);
  }

  @Override
  public void simpleInitApp() {
    setupOffscreenView();
    createDisplayFrame();
  }

  @Override
  public void simpleUpdate(float tpf) {
    Quaternion q = new Quaternion();
    angle += tpf;
    angle %= FastMath.TWO_PI;
    q.fromAngles(angle, 0, angle);

    offBox.setLocalRotation(q);
    offBox.updateLogicalState(tpf);
    offBox.updateGeometricState();
  }

  public void initialize(RenderManager rm, ViewPort vp) {}

  public void reshape(ViewPort vp, int w, int h) {}

  public boolean isInitialized() {
    return true;
  }

  public void preFrame(float tpf) {}

  public void postQueue(RenderQueue rq) {}

  /** Update the CPU image's contents after the scene has been rendered to the framebuffer. */
  public void postFrame(FrameBuffer out) {
    updateImageContents();
  }

  public void cleanup() {}
}
コード例 #10
0
  /**
   * <code>loadImage</code> is a manual image loader which is entirely independent of AWT. OUT:
   * RGB888 or RGBA8888 Image object
   *
   * @param in InputStream of an uncompressed 24b RGB or 32b RGBA TGA
   * @param flip Flip the image vertically
   * @return <code>Image</code> object that contains the image, either as a RGB888 or RGBA8888
   * @throws java.io.IOException
   */
  public static Image load(InputStream in, boolean flip) throws IOException {
    boolean flipH = false;

    // open a stream to the file
    DataInputStream dis = new DataInputStream(new BufferedInputStream(in));

    // ---------- Start Reading the TGA header ---------- //
    // length of the image id (1 byte)
    int idLength = dis.readUnsignedByte();

    // Type of color map (if any) included with the image
    // 0 - no color map data is included
    // 1 - a color map is included
    int colorMapType = dis.readUnsignedByte();

    // Type of image being read:
    int imageType = dis.readUnsignedByte();

    // Read Color Map Specification (5 bytes)
    // Index of first color map entry (if we want to use it, uncomment and remove extra read.)
    //        short cMapStart = flipEndian(dis.readShort());
    dis.readShort();
    // number of entries in the color map
    short cMapLength = flipEndian(dis.readShort());
    // number of bits per color map entry
    int cMapDepth = dis.readUnsignedByte();

    // Read Image Specification (10 bytes)
    // horizontal coordinate of lower left corner of image. (if we want to use it, uncomment and
    // remove extra read.)
    //        int xOffset = flipEndian(dis.readShort());
    dis.readShort();
    // vertical coordinate of lower left corner of image. (if we want to use it, uncomment and
    // remove extra read.)
    //        int yOffset = flipEndian(dis.readShort());
    dis.readShort();
    // width of image - in pixels
    int width = flipEndian(dis.readShort());
    // height of image - in pixels
    int height = flipEndian(dis.readShort());
    // bits per pixel in image.
    int pixelDepth = dis.readUnsignedByte();
    int imageDescriptor = dis.readUnsignedByte();
    if ((imageDescriptor & 32) != 0) // bit 5 : if 1, flip top/bottom ordering
    {
      flip = !flip;
    }
    if ((imageDescriptor & 16) != 0) // bit 4 : if 1, flip left/right ordering
    {
      flipH = !flipH;
    }

    // ---------- Done Reading the TGA header ---------- //

    // Skip image ID
    if (idLength > 0) {
      dis.skip(idLength);
    }

    ColorMapEntry[] cMapEntries = null;
    if (colorMapType != 0) {
      // read the color map.
      int bytesInColorMap = (cMapDepth * cMapLength) >> 3;
      int bitsPerColor = Math.min(cMapDepth / 3, 8);

      byte[] cMapData = new byte[bytesInColorMap];
      dis.read(cMapData);

      // Only go to the trouble of constructing the color map
      // table if this is declared a color mapped image.
      if (imageType == TYPE_COLORMAPPED || imageType == TYPE_COLORMAPPED_RLE) {
        cMapEntries = new ColorMapEntry[cMapLength];
        int alphaSize = cMapDepth - (3 * bitsPerColor);
        float scalar = 255f / (FastMath.pow(2, bitsPerColor) - 1);
        float alphaScalar = 255f / (FastMath.pow(2, alphaSize) - 1);
        for (int i = 0; i < cMapLength; i++) {
          ColorMapEntry entry = new ColorMapEntry();
          int offset = cMapDepth * i;
          entry.red = (byte) (int) (getBitsAsByte(cMapData, offset, bitsPerColor) * scalar);
          entry.green =
              (byte) (int) (getBitsAsByte(cMapData, offset + bitsPerColor, bitsPerColor) * scalar);
          entry.blue =
              (byte)
                  (int)
                      (getBitsAsByte(cMapData, offset + (2 * bitsPerColor), bitsPerColor) * scalar);
          if (alphaSize <= 0) {
            entry.alpha = (byte) 255;
          } else {
            entry.alpha =
                (byte)
                    (int)
                        (getBitsAsByte(cMapData, offset + (3 * bitsPerColor), alphaSize)
                            * alphaScalar);
          }

          cMapEntries[i] = entry;
        }
      }
    }

    // Allocate image data array
    Format format;
    byte[] rawData = null;
    int dl;
    if (pixelDepth == 32) {
      rawData = new byte[width * height * 4];
      dl = 4;
    } else {
      rawData = new byte[width * height * 3];
      dl = 3;
    }
    int rawDataIndex = 0;

    if (imageType == TYPE_TRUECOLOR) {
      byte red = 0;
      byte green = 0;
      byte blue = 0;
      byte alpha = 0;

      // Faster than doing a 16-or-24-or-32 check on each individual pixel,
      // just make a seperate loop for each.
      if (pixelDepth == 16) {
        byte[] data = new byte[2];
        float scalar = 255f / 31f;
        for (int i = 0; i <= (height - 1); i++) {
          if (!flip) {
            rawDataIndex = (height - 1 - i) * width * dl;
          }
          for (int j = 0; j < width; j++) {
            data[1] = dis.readByte();
            data[0] = dis.readByte();
            rawData[rawDataIndex++] = (byte) (int) (getBitsAsByte(data, 1, 5) * scalar);
            rawData[rawDataIndex++] = (byte) (int) (getBitsAsByte(data, 6, 5) * scalar);
            rawData[rawDataIndex++] = (byte) (int) (getBitsAsByte(data, 11, 5) * scalar);
            if (dl == 4) {
              // create an alpha channel
              alpha = getBitsAsByte(data, 0, 1);
              if (alpha == 1) {
                alpha = (byte) 255;
              }
              rawData[rawDataIndex++] = alpha;
            }
          }
        }

        format = dl == 4 ? Format.RGBA8 : Format.RGB8;
      } else if (pixelDepth == 24) {
        for (int y = 0; y < height; y++) {
          if (!flip) {
            rawDataIndex = (height - 1 - y) * width * dl;
          } else {
            rawDataIndex = y * width * dl;
          }

          dis.readFully(rawData, rawDataIndex, width * dl);
          //                    for (int x = 0; x < width; x++) {
          // read scanline
          //                        blue = dis.readByte();
          //                        green = dis.readByte();
          //                        red = dis.readByte();
          //                        rawData[rawDataIndex++] = red;
          //                        rawData[rawDataIndex++] = green;
          //                        rawData[rawDataIndex++] = blue;
          //                    }
        }
        format = Format.BGR8;
      } else if (pixelDepth == 32) {
        for (int i = 0; i <= (height - 1); i++) {
          if (!flip) {
            rawDataIndex = (height - 1 - i) * width * dl;
          }

          for (int j = 0; j < width; j++) {
            blue = dis.readByte();
            green = dis.readByte();
            red = dis.readByte();
            alpha = dis.readByte();
            rawData[rawDataIndex++] = red;
            rawData[rawDataIndex++] = green;
            rawData[rawDataIndex++] = blue;
            rawData[rawDataIndex++] = alpha;
          }
        }
        format = Format.RGBA8;
      } else {
        throw new IOException("Unsupported TGA true color depth: " + pixelDepth);
      }
    } else if (imageType == TYPE_TRUECOLOR_RLE) {
      byte red = 0;
      byte green = 0;
      byte blue = 0;
      byte alpha = 0;
      // Faster than doing a 16-or-24-or-32 check on each individual pixel,
      // just make a seperate loop for each.
      if (pixelDepth == 32) {
        for (int i = 0; i <= (height - 1); ++i) {
          if (!flip) {
            rawDataIndex = (height - 1 - i) * width * dl;
          }

          for (int j = 0; j < width; ++j) {
            // Get the number of pixels the next chunk covers (either packed or unpacked)
            int count = dis.readByte();
            if ((count & 0x80) != 0) {
              // Its an RLE packed block - use the following 1 pixel for the next <count> pixels
              count &= 0x07f;
              j += count;
              blue = dis.readByte();
              green = dis.readByte();
              red = dis.readByte();
              alpha = dis.readByte();
              while (count-- >= 0) {
                rawData[rawDataIndex++] = red;
                rawData[rawDataIndex++] = green;
                rawData[rawDataIndex++] = blue;
                rawData[rawDataIndex++] = alpha;
              }
            } else {
              // Its not RLE packed, but the next <count> pixels are raw.
              j += count;
              while (count-- >= 0) {
                blue = dis.readByte();
                green = dis.readByte();
                red = dis.readByte();
                alpha = dis.readByte();
                rawData[rawDataIndex++] = red;
                rawData[rawDataIndex++] = green;
                rawData[rawDataIndex++] = blue;
                rawData[rawDataIndex++] = alpha;
              }
            }
          }
        }
        format = Format.RGBA8;
      } else if (pixelDepth == 24) {
        for (int i = 0; i <= (height - 1); i++) {
          if (!flip) {
            rawDataIndex = (height - 1 - i) * width * dl;
          }
          for (int j = 0; j < width; ++j) {
            // Get the number of pixels the next chunk covers (either packed or unpacked)
            int count = dis.readByte();
            if ((count & 0x80) != 0) {
              // Its an RLE packed block - use the following 1 pixel for the next <count> pixels
              count &= 0x07f;
              j += count;
              blue = dis.readByte();
              green = dis.readByte();
              red = dis.readByte();
              while (count-- >= 0) {
                rawData[rawDataIndex++] = red;
                rawData[rawDataIndex++] = green;
                rawData[rawDataIndex++] = blue;
              }
            } else {
              // Its not RLE packed, but the next <count> pixels are raw.
              j += count;
              while (count-- >= 0) {
                blue = dis.readByte();
                green = dis.readByte();
                red = dis.readByte();
                rawData[rawDataIndex++] = red;
                rawData[rawDataIndex++] = green;
                rawData[rawDataIndex++] = blue;
              }
            }
          }
        }
        format = Format.RGB8;
      } else if (pixelDepth == 16) {
        byte[] data = new byte[2];
        float scalar = 255f / 31f;
        for (int i = 0; i <= (height - 1); i++) {
          if (!flip) {
            rawDataIndex = (height - 1 - i) * width * dl;
          }
          for (int j = 0; j < width; j++) {
            // Get the number of pixels the next chunk covers (either packed or unpacked)
            int count = dis.readByte();
            if ((count & 0x80) != 0) {
              // Its an RLE packed block - use the following 1 pixel for the next <count> pixels
              count &= 0x07f;
              j += count;
              data[1] = dis.readByte();
              data[0] = dis.readByte();
              blue = (byte) (int) (getBitsAsByte(data, 1, 5) * scalar);
              green = (byte) (int) (getBitsAsByte(data, 6, 5) * scalar);
              red = (byte) (int) (getBitsAsByte(data, 11, 5) * scalar);
              while (count-- >= 0) {
                rawData[rawDataIndex++] = red;
                rawData[rawDataIndex++] = green;
                rawData[rawDataIndex++] = blue;
              }
            } else {
              // Its not RLE packed, but the next <count> pixels are raw.
              j += count;
              while (count-- >= 0) {
                data[1] = dis.readByte();
                data[0] = dis.readByte();
                blue = (byte) (int) (getBitsAsByte(data, 1, 5) * scalar);
                green = (byte) (int) (getBitsAsByte(data, 6, 5) * scalar);
                red = (byte) (int) (getBitsAsByte(data, 11, 5) * scalar);
                rawData[rawDataIndex++] = red;
                rawData[rawDataIndex++] = green;
                rawData[rawDataIndex++] = blue;
              }
            }
          }
        }
        format = Format.RGB8;
      } else {
        throw new IOException("Unsupported TGA true color depth: " + pixelDepth);
      }

    } else if (imageType == TYPE_COLORMAPPED) {
      int bytesPerIndex = pixelDepth / 8;

      if (bytesPerIndex == 1) {
        for (int i = 0; i <= (height - 1); i++) {
          if (!flip) {
            rawDataIndex = (height - 1 - i) * width * dl;
          }
          for (int j = 0; j < width; j++) {
            int index = dis.readUnsignedByte();
            if (index >= cMapEntries.length || index < 0) {
              throw new IOException("TGA: Invalid color map entry referenced: " + index);
            }

            ColorMapEntry entry = cMapEntries[index];
            rawData[rawDataIndex++] = entry.blue;
            rawData[rawDataIndex++] = entry.green;
            rawData[rawDataIndex++] = entry.red;
            if (dl == 4) {
              rawData[rawDataIndex++] = entry.alpha;
            }
          }
        }
      } else if (bytesPerIndex == 2) {
        for (int i = 0; i <= (height - 1); i++) {
          if (!flip) {
            rawDataIndex = (height - 1 - i) * width * dl;
          }
          for (int j = 0; j < width; j++) {
            int index = flipEndian(dis.readShort());
            if (index >= cMapEntries.length || index < 0) {
              throw new IOException("TGA: Invalid color map entry referenced: " + index);
            }

            ColorMapEntry entry = cMapEntries[index];
            rawData[rawDataIndex++] = entry.blue;
            rawData[rawDataIndex++] = entry.green;
            rawData[rawDataIndex++] = entry.red;
            if (dl == 4) {
              rawData[rawDataIndex++] = entry.alpha;
            }
          }
        }
      } else {
        throw new IOException("TGA: unknown colormap indexing size used: " + bytesPerIndex);
      }

      format = dl == 4 ? Format.RGBA8 : Format.RGB8;
    } else {
      throw new IOException("Monochrome and RLE colormapped images are not supported");
    }

    in.close();
    // Get a pointer to the image memory
    ByteBuffer scratch = BufferUtils.createByteBuffer(rawData.length);
    scratch.clear();
    scratch.put(rawData);
    scratch.rewind();
    // Create the Image object
    Image textureImage = new Image();
    textureImage.setFormat(format);
    textureImage.setWidth(width);
    textureImage.setHeight(height);
    textureImage.setData(scratch);
    return textureImage;
  }