Package org.apache.hadoop_voltpatches.util

Examples of org.apache.hadoop_voltpatches.util.PureJavaCrc32


            }
        }

        byte resultBuffers[][] = new byte[chunks.size() - 1][];
        int ii = 0;
        PureJavaCrc32 crc = getCRC ? new PureJavaCrc32() : null;
        for (String chunk : chunks) {
            if (chunk.endsWith("_complete")) continue;
            resultBuffers[ii] = zk.getData(chunk, false, null);
            if (crc != null) {
                crc.update(resultBuffers[ii]);
            }
            ii++;
        }

        return Pair.of(decompressBytes(resultBuffers), crc != null ? (int)crc.getValue() : null);
    }
View Full Code Here


        buf.putLong(OFFSET_VERSION, m_version);
        buf.position(OFFSET_DATA);
        buf.put(m_serData);

        // Finalize the CRC based on the entire buffer and reset the current position.
        final PureJavaCrc32 crc = new PureJavaCrc32();
        crc.update(buf.array());
        buf.putLong(OFFSET_CRC, crc.getValue());
        buf.rewind();
        return buf;
    }
View Full Code Here

        }

        // Get the CRC, zero out its buffer field, and compare to calculated CRC.
        long crcHeader = buf.getLong(OFFSET_CRC);
        buf.putLong(OFFSET_CRC, 0);
        final PureJavaCrc32 crcBuffer = new PureJavaCrc32();
        assert(buf.hasArray());
        crcBuffer.update(buf.array());
        if (crcHeader != crcBuffer.getValue()) {
            throw new IOException("Hashinator snapshot data CRC mismatch.");
        }

        // Slurp the data.
        int coord = buf.getInt(OFFSET_INSTID_COORD);
View Full Code Here

            }

            sw.append(stringer.toString());

            final byte tableListBytes[] = sw.getBuffer().toString().getBytes("UTF-8");
            final PureJavaCrc32 crc = new PureJavaCrc32();
            crc.update(tableListBytes);
            ByteBuffer fileBuffer = ByteBuffer.allocate(tableListBytes.length + 4);
            fileBuffer.putInt((int)crc.getValue());
            fileBuffer.put(tableListBytes);
            fileBuffer.flip();
            fos.getChannel().write(fileBuffer);
            success = true;
            return new Runnable() {
View Full Code Here

             * via a consistent interface.
             */
            if (obj == null) {
                String tableList = caw.toString();
                byte tableListBytes[] = tableList.getBytes("UTF-8");
                PureJavaCrc32 tableListCRC = new PureJavaCrc32();
                tableListCRC.update(tableListBytes);
                tableListCRC.update("\n".getBytes("UTF-8"));
                final int calculatedValue = (int)tableListCRC.getValue();
                if (crc != calculatedValue) {
                    logger.warn("CRC of snapshot digest " + f + " did not match digest contents");
                    return null;
                }

                String tableNames[] = tableList.split(",");
                long txnId = Long.valueOf(tableNames[0]);

                obj = new JSONObject();
                try {
                    obj.put("version", 0);
                    obj.put("txnId", txnId);
                    for (int ii = 1; ii < tableNames.length; ii++) {
                        obj.append("tables", tableNames[ii]);
                    }
                } catch (JSONException e) {
                    logger.warn("Exception parsing JSON of digest " + f, e);
                    return null;
                }
                return obj;
            } else {
                /*
                 * Verify the CRC and then return the data as a JSON object.
                 */
                String tableList = caw.toString();
                byte tableListBytes[] = tableList.getBytes("UTF-8");
                PureJavaCrc32 tableListCRC = new PureJavaCrc32();
                tableListCRC.update(tableListBytes);
                final int calculatedValue = (int)tableListCRC.getValue();
                if (crc != calculatedValue) {
                    logger.warn("CRC of snapshot digest " + f + " did not match digest contents");
                    return null;
                }
                return obj;
View Full Code Here

    // UTILITY
    ///////////////////////////////////////////////////////

    public long getCRC() throws IOException {

        PureJavaCrc32 crc = new PureJavaCrc32();

        for (Entry<String, byte[]> e : super.entrySet()) {
            if (e.getKey().equals("buildinfo.txt") || e.getKey().equals("catalog-report.html")) {
                continue;
            }
            crc.update(e.getKey().getBytes("UTF-8"));
            crc.update(e.getValue());
        }

        return crc.getValue();
    }
View Full Code Here

            }
            m_chunkReads = new Semaphore(readAheadChunks);
            m_saveFile = dataIn;
            m_continueOnCorruptedChunk = continueOnCorruptedChunk;

            final PureJavaCrc32 crc = new PureJavaCrc32();
            /*
             * If the CRC check fails because the file wasn't completed
             */
            final PureJavaCrc32 secondCRC = new PureJavaCrc32();

            /*
             * Get the header with the save restore specific information
             */
            final ByteBuffer lengthBuffer = ByteBuffer.allocate(8);
            while (lengthBuffer.hasRemaining()) {
                final int read = m_saveFile.read(lengthBuffer);
                if (read == -1) {
                    throw new EOFException();
                }
            }
            lengthBuffer.flip();
            final int originalCRC = lengthBuffer.getInt();
            int length = lengthBuffer.getInt();
            crc.update(lengthBuffer.array(), 4, 4);
            secondCRC.update(lengthBuffer.array(), 4, 4);

            if (length < 0) {
                throw new IOException("Corrupted save file has negative header length");
            }

            if (length > 2097152) {
                throw new IOException("Corrupted save file has unreasonable header length > 2 megs");
            }

            final ByteBuffer saveRestoreHeader = ByteBuffer.allocate(length);
            while (saveRestoreHeader.hasRemaining()) {
                final int read = m_saveFile.read(saveRestoreHeader);
                if (read == -1 || read < length) {
                    throw new EOFException();
                }
            }
            saveRestoreHeader.flip();
            crc.update(saveRestoreHeader.array());
            secondCRC.update(new byte[] { 1 });
            secondCRC.update(saveRestoreHeader.array(), 1, saveRestoreHeader.array().length - 1);

            /*
             *  Get the template for the VoltTable serialization header.
             *  It will have an extra length value preceded to it so that
             *  it can be sucked straight into a buffer. This will not
             *  contain a row count since that varies from chunk to chunk
             *  and is supplied by the chunk
             */
            lengthBuffer.clear();
            lengthBuffer.limit(4);
            /*
             * Why this stupidity and no while loop?
             * Because java is broken and complains about a random final
             * elsewhere if you do.
             */
            {
                final int read = m_saveFile.read(lengthBuffer);
                if (read == -1) {
                    throw new EOFException();
                }
            }
            crc.update(lengthBuffer.array(), 0, 4);
            secondCRC.update(lengthBuffer.array(), 0, 4);
            lengthBuffer.flip();
            length = lengthBuffer.getInt();

            if (length < 4) {
                throw new IOException("Corrupted save file has negative length or too small length for VoltTable header");
            }

            if (length > 2097152) {
                throw new IOException("Corrupted save file has unreasonable VoltTable header length > 2 megs");
            }

            m_tableHeader = ByteBuffer.allocate(length + 4);
            m_tableHeader.putInt(length);
            while (m_tableHeader.hasRemaining()) {
                final int read = m_saveFile.read(m_tableHeader);
                if (read == -1) {
                    throw new EOFException();
                }
            }
            crc.update(m_tableHeader.array(), 4, length);
            secondCRC.update(m_tableHeader.array(), 4, length);

            boolean failedCRCDueToNotCompleted = false;

            final int actualCRC = (int)crc.getValue();
            if (originalCRC != actualCRC) {
                /*
                 * Check if the CRC mismatch is due to the snapshot not being completed
                 */
                final int secondCRCValue = (int)secondCRC.getValue();
                if (secondCRCValue == originalCRC) {
                    failedCRCDueToNotCompleted = true;
                } else {
                    throw new IOException("Checksum mismatch");
                }
View Full Code Here

        container.b().putInt(container.b().remaining() - 4);
        container.b().position(0);

        final byte schemaBytes[] = PrivateVoltTableFactory.getSchemaBytes(schemaTable);

        final PureJavaCrc32 crc = new PureJavaCrc32();
        ByteBuffer aggregateBuffer = ByteBuffer.allocate(container.b().remaining() + schemaBytes.length);
        aggregateBuffer.put(container.b());
        container.discard();
        aggregateBuffer.put(schemaBytes);
        aggregateBuffer.flip();
        crc.update(aggregateBuffer.array(), 4, aggregateBuffer.capacity() - 4);

        final int crcValue = (int) crc.getValue();
        aggregateBuffer.putInt(crcValue).position(8);
        aggregateBuffer.put((byte)0).position(0);//Haven't actually finished writing file

        if (m_simulateFullDiskWritingHeader) {
            m_writeException = new IOException("Disk full");
View Full Code Here

                     * Get the partition id and its CRC and validate it. Validating the
                     * partition ID for the chunk separately makes it possible to
                     * continue processing chunks from other partitions if only one partition
                     * has corrupt chunks in the file.
                     */
                    final Checksum partitionIdCRC = m_checksumType == ChecksumType.CRC32C ? new PureJavaCrc32C() : new PureJavaCrc32();
                    chunkLengthB.mark();
                    final int nextChunkPartitionId = chunkLengthB.getInt();
                    final int nextChunkPartitionIdCRC = chunkLengthB.getInt();
                    chunkLengthB.reset();
                    byte partitionIdBytes[] = new byte[4];
View Full Code Here

        schemaSerializer.write(serializedSchemaTable);
        final BBContainer schemaContainer = schemaSerializer.getBBContainer();
        schemaContainer.b().limit(schemaContainer.b().limit() - 4);//Don't want the row count
        schemaContainer.b().position(schemaContainer.b().position() + 4);//Don't want total table length

        final PureJavaCrc32 crc = new PureJavaCrc32();
        ByteBuffer aggregateBuffer = ByteBuffer.allocate(container.b().remaining() + schemaContainer.b().remaining());
        aggregateBuffer.put(container.b());
        container.discard();
        aggregateBuffer.put(schemaContainer.b());
        schemaContainer.discard();
        aggregateBuffer.flip();
        crc.update(aggregateBuffer.array(), 4, aggregateBuffer.capacity() - 4);

        final int crcValue = (int) crc.getValue();
        aggregateBuffer.putInt(crcValue).position(8);
        aggregateBuffer.put((byte)0).position(0);//Haven't actually finished writing file

        if (m_simulateFullDiskWritingHeader) {
            m_writeException = new IOException("Disk full");
View Full Code Here

TOP

Related Classes of org.apache.hadoop_voltpatches.util.PureJavaCrc32

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.