Browse Source

Use actual color format and type for computing BufferedImage size.

No idea why it was hardcoded to these values.
pull/279/head
Vladimír Vondruš 14 years ago
parent
commit
1cdbec77e7
  1. 6
      src/BufferedImage.h

6
src/BufferedImage.h

@ -62,8 +62,7 @@ template<size_t imageDimensions> class BufferedImage {
*/ */
void setDimensions(const Math::Vector<GLsizei, Dimensions>& dimensions, Buffer::Usage usage) { void setDimensions(const Math::Vector<GLsizei, Dimensions>& dimensions, Buffer::Usage usage) {
_dimensions = dimensions; _dimensions = dimensions;
size_t textureSize = AbstractTexture::pixelSize(AbstractTexture::ColorFormat::RGB, Type::UnsignedByte)*dimensions.product(); _buffer.setData(Buffer::Target::PixelPack, AbstractTexture::pixelSize(_colorFormat, _type)*dimensions.product(), nullptr, usage);
_buffer.setData(Buffer::Target::PixelPack, textureSize, nullptr, usage);
} }
/** @brief Color format */ /** @brief Color format */
@ -99,8 +98,7 @@ template<size_t imageDimensions> class BufferedImage {
return; return;
} }
size_t textureSize = AbstractTexture::pixelSize(AbstractTexture::ColorFormat::RGB, Type::UnsignedByte)*_dimensions.product(); _buffer.setSubData(Buffer::Target::PixelPack, 0, AbstractTexture::pixelSize(_colorFormat, _type)*_dimensions.product(), data);
_buffer.setSubData(Buffer::Target::PixelPack, 0, textureSize, data);
} }
protected: protected:

Loading…
Cancel
Save