tmpFile2.deleteOnExit();
new FileOutputStream(tmpFile1).write(out1.getData()); // writing data from row1
new FileOutputStream(tmpFile2).write(out2.getData()); // writing data from row2
MappedFileDataInput in1 = new MappedFileDataInput(new FileInputStream(tmpFile1), tmpFile1.getAbsolutePath(), 0);
MappedFileDataInput in2 = new MappedFileDataInput(new FileInputStream(tmpFile2), tmpFile2.getAbsolutePath(), 0);
// key isn't part of what CompactedRow writes, that's done by SSTW.append
// row size can differ b/c of bloom filter counts being different
long rowSize1 = SSTableReader.readRowSize(in1, sstables.iterator().next().descriptor);
long rowSize2 = SSTableReader.readRowSize(in2, sstables.iterator().next().descriptor);
assertEquals(out1.getLength(), rowSize1 + 8);
assertEquals(out2.getLength(), rowSize2 + 8);
// bloom filter
IndexHelper.defreezeBloomFilter(in1, rowSize1, false);
IndexHelper.defreezeBloomFilter(in2, rowSize2, false);
// index
int indexSize1 = in1.readInt();
int indexSize2 = in2.readInt();
assertEquals(indexSize1, indexSize2);
ByteBuffer bytes1 = in1.readBytes(indexSize1);
ByteBuffer bytes2 = in2.readBytes(indexSize2);
assert bytes1.equals(bytes2);
// cf metadata
ColumnFamily cf1 = ColumnFamily.create("Keyspace1", "Standard1");
ColumnFamily cf2 = ColumnFamily.create("Keyspace1", "Standard1");
ColumnFamily.serializer().deserializeFromSSTableNoColumns(cf1, in1);
ColumnFamily.serializer().deserializeFromSSTableNoColumns(cf2, in2);
assert cf1.getLocalDeletionTime() == cf2.getLocalDeletionTime();
assert cf1.getMarkedForDeleteAt() == cf2.getMarkedForDeleteAt();
// columns
int columns = in1.readInt();
assert columns == in2.readInt();
for (int i = 0; i < columns; i++)
{
IColumn c1 = cf1.getColumnSerializer().deserialize(in1);
IColumn c2 = cf2.getColumnSerializer().deserialize(in2);
assert c1.equals(c2);
}
// that should be everything
assert in1.available() == 0;
assert in2.available() == 0;
}
}