// first entry starts after the header
int bundleDataOffset = align(bundleHeaderSize);
// allocate data buffer
ByteBuffer bbData = ByteBuffer.allocateDirect(bundleDataOffset + bundleDataSize);
DataOutputWriter out = DataOutputWriter.newWriter(bbData.duplicate());
// write bundle entries
out.writeInt(entrySet.size());
for (Map.Entry<String, ByteBuffer> entry : entrySet) {
String name = entry.getKey();
ByteBuffer buffer = entry.getValue();
buffer.rewind();
out.writeStringNull(name);
out.writeInt(bundleDataOffset);
out.writeInt(buffer.limit());
bbData.position(bundleDataOffset);
bbData.put(buffer);
bundleDataOffset += align(buffer.limit());
}
bbData.flip();
int dataSizeC = bbData.limit();
int dataSizeU = dataSizeC;
// compress bundle data if required
if (isCompressed()) {
L.log(Level.INFO, "Compressing asset bundle, this may take a while");
bbData = LzmaBufferUtils.encode(bbData);
dataSizeC = bbData.limit();
}
// configure header
int headerSize = header.getSize();
int bundleSize = headerSize + dataSizeC;
header.setCompressed(isCompressed());
header.setDataOffset(headerSize);
header.setFileSize1(bundleSize);
header.setFileSize2(bundleSize);
header.setUnknown1(assets);
header.setUnknown2(bundleHeaderSize);
List<Pair<Integer, Integer>> offsetMap = header.getOffsetMap();
offsetMap.clear();
// TODO: Original asset bundles have ascending lengths for each asset
// file. The exact calculation of these values is not yet known, so use
// the maximum size for each entry for now to avoid crashes.
for (int i = 0; i < assets; i++) {
offsetMap.add(new ImmutablePair<>(dataSizeC, dataSizeU));
}
// create bundle buffer
ByteBuffer bb = ByteBuffer.allocateDirect(bundleSize);
out = DataOutputWriter.newWriter(bb);
out.writeStruct(header);
out.writeBuffer(bbData);
bb.flip();
// encode bundle buffer
for (AssetBundleCodec codec : codecsSave) {
L.log(Level.INFO, "Encoding: {0}", codec.getName());