// (a) changegroup.py#builddeltaheader(): # do nothing with basenode, it is implicitly the previous one in HG10
// (b) revlog.py#group(): prev, curr = revs[r], revs[r + 1]
// for c in bundler.revchunk(self, curr, prev):
// so there's no reason to have code here to extract contents of deltaBase revision
String m = String.format("Revision %s import failed: delta base %s is not the last node we've handled (and know content for) %s", ge.node(), deltaBase, prevRevision);
throw new HgInvalidStateException(m);
}
}
//
Patch patch = HgInternals.patchFromData(ge);
byte[] content = patch.apply(prevRevContent, -1);
Nodeid p1 = ge.firstParent();
Nodeid p2 = ge.secondParent();
byte[] calculated = dh.sha1(p1, p2, content).asBinary();
final Nodeid node = ge.node();
if (!node.equalsTo(calculated)) {
String m = String.format("Checksum failed: expected %s, calculated %s. File %s", node, calculated, filename);
throw new HgRevisionIntegrityException(m, null, new File(hgDir, filename));
}
revlogHeader.nodeid(node);
//
if (collectChangelogIndexes) {
changelogIndexes.put(node, revisionSequence.size());
revlogHeader.linkRevision(revisionSequence.size());
} else {
Integer csRev = changelogIndexes.get(ge.cset());
if (csRev == null) {
throw new HgInvalidStateException(String.format("Changelog doesn't contain revision %s of %s", ge.cset().shortNotation(), filename));
}
revlogHeader.linkRevision(csRev.intValue());
}
//
revlogHeader.parents(knownRevision(p1), knownRevision(p2));
//
int patchSerializedLength = patch.serializedLength();
// no reason to keep patch if it's close (here, >75%) in size to the complete contents,
// save patching effort in this case
writeComplete = writeComplete || preferCompleteOverPatch(patchSerializedLength, content.length);
if (writeComplete) {
revlogHeader.baseRevision(revisionSequence.size());
}
assert revlogHeader.baseRevision() >= 0;
DataSerializer.DataSource dataSource = writeComplete ? new ByteArrayDataSource(content) : patch.new PatchDataSource();
revlogDataZip.reset(dataSource);
final int compressedLen;
final boolean useUncompressedData = preferCompressedOverComplete(revlogDataZip.getCompressedLength(), dataSource.serializeLength());
if (useUncompressedData) {
// compression wasn't too effective,
compressedLen = dataSource.serializeLength() + 1 /*1 byte for 'u' - uncompressed prefix byte*/;
} else {
compressedLen= revlogDataZip.getCompressedLength();
}
revlogHeader.length(content.length, compressedLen);
// XXX may be wise not to create DataSerializer for each revision, but for a file
DataSerializer sds = new DataSerializer() {
@Override
public void write(byte[] data, int offset, int length) throws HgIOException {
try {
indexFile.write(data, offset, length);
} catch (IOException ex) {
throw new HgIOException("Write failure", ex, currentFile);
}
}
};
revlogHeader.serialize(sds);
if (useUncompressedData) {
sds.writeByte((byte) 'u');
dataSource.serialize(sds);
} else {
int actualCompressedLenWritten = revlogDataZip.writeCompressedData(sds);
if (actualCompressedLenWritten != compressedLen) {
throw new HgInvalidStateException(String.format("Expected %d bytes of compressed data, but actually wrote %d in %s", compressedLen, actualCompressedLenWritten, filename));
}
}
sds.done();
//
revisionSequence.add(node);