Package org.apache.kahadb.util

Examples of org.apache.kahadb.util.DataByteArrayOutputStream


     *         If the page data marshalls to size larger than maximum page size and overflow was false.
     * @throws IllegalStateException
     *         if the PageFile is not loaded
     */
    public <T> void store(Page<T> page, Marshaller<T> marshaller, final boolean overflow) throws IOException {
        DataByteArrayOutputStream out = (DataByteArrayOutputStream)openOutputStream(page, overflow);
        if (marshaller != null) {
            marshaller.writePayload(page.get(), out);
        }
        out.close();
    }
View Full Code Here


        //
        // To support writing VERY large data, we override the output stream so
        // that we
        // we do the page writes incrementally while the data is being
        // marshalled.
        DataByteArrayOutputStream out = new DataByteArrayOutputStream(pageFile.getPageSize() * 2) {
            Page current = copy;

            @SuppressWarnings("unchecked")
            @Override
            protected void onWrite() throws IOException {

                // Are we at an overflow condition?
                final int pageSize = pageFile.getPageSize();
                if (pos >= pageSize) {
                    // If overflow is allowed
                    if (overflow) {

                        Page next;
                        if (current.getType() == Page.PAGE_PART_TYPE) {
                            next = load(current.getNext(), null);
                        } else {
                            next = allocate();
                        }

                        next.txId = current.txId;

                        // Write the page header
                        int oldPos = pos;
                        pos = 0;

                        current.makePagePart(next.getPageId(), getWriteTransactionId());
                        current.write(this);

                        // Do the page write..
                        byte[] data = new byte[pageSize];
                        System.arraycopy(buf, 0, data, 0, pageSize);
                        Transaction.this.write(current, data);

                        // Reset for the next page chunk
                        pos = 0;
                        // The page header marshalled after the data is written.
                        skip(Page.PAGE_HEADER_SIZE);
                        // Move the overflow data after the header.
                        System.arraycopy(buf, pageSize, buf, pos, oldPos - pageSize);
                        pos += oldPos - pageSize;
                        current = next;

                    } else {
                        throw new PageOverflowIOException("Page overflow.");
                    }
                }

            }

            @SuppressWarnings("unchecked")
            @Override
            public void close() throws IOException {
                super.close();

                // We need to free up the rest of the page chain..
                if (current.getType() == Page.PAGE_PART_TYPE) {
                    free(current.getNext());
                }

                current.makePageEnd(pos, getWriteTransactionId());

                // Write the header..
                pos = 0;
                current.write(this);

                Transaction.this.write(current, buf);
            }
        };

        // The page header marshaled after the data is written.
        out.skip(Page.PAGE_HEADER_SIZE);
        return out;
    }
View Full Code Here

        DataFile dataFile = null;
        RandomAccessFile file = null;
        WriteBatch wb = null;
        try {

            DataByteArrayOutputStream buff = new DataByteArrayOutputStream(maxWriteBatchSize);
            while (true) {

                Object o = null;

                // Block till we get a command.
                synchronized (enqueueMutex) {
                    while (true) {
                        if (nextWriteBatch != null) {
                            o = nextWriteBatch;
                            nextWriteBatch = null;
                            break;
                        }
                        if (shutdown) {
                            return;
                        }
                        enqueueMutex.wait();
                    }
                    enqueueMutex.notify();
                }

                wb = (WriteBatch)o;
                if (dataFile != wb.dataFile) {
                    if (file != null) {
                        file.setLength(dataFile.getLength());
                        dataFile.closeRandomAccessFile(file);
                    }
                    dataFile = wb.dataFile;
                    file = dataFile.openRandomAccessFile();
                    if( file.length() < journal.preferedFileLength ) {
                        file.setLength(journal.preferedFileLength);
                    }
                }

                WriteCommand write = wb.writes.getHead();

                // Write an empty batch control record.
                buff.reset();
                buff.writeInt(Journal.BATCH_CONTROL_RECORD_SIZE);
                buff.writeByte(Journal.BATCH_CONTROL_RECORD_TYPE);
                buff.write(Journal.BATCH_CONTROL_RECORD_MAGIC);
                buff.writeInt(0);
                buff.writeLong(0);
               
                boolean forceToDisk = false;
                while (write != null) {
                    forceToDisk |= write.sync | write.onComplete != null;
                    buff.writeInt(write.location.getSize());
                    buff.writeByte(write.location.getType());
                    buff.write(write.data.getData(), write.data.getOffset(), write.data.getLength());
                    write = write.getNext();
                }

                ByteSequence sequence = buff.toByteSequence();
               
                // Now we can fill in the batch control record properly.
                buff.reset();
                buff.skip(5+Journal.BATCH_CONTROL_RECORD_MAGIC.length);
                buff.writeInt(sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                if( journal.isChecksum() ) {
                  Checksum checksum = new Adler32();
                  checksum.update(sequence.getData(), sequence.getOffset()+Journal.BATCH_CONTROL_RECORD_SIZE, sequence.getLength()-Journal.BATCH_CONTROL_RECORD_SIZE);
                  buff.writeLong(checksum.getValue());
                }

                // Now do the 1 big write.
                file.seek(wb.offset);
                file.write(sequence.getData(), sequence.getOffset(), sequence.getLength());
View Full Code Here

                if (first == null) {
                    first = page;
                }

                addToCache(page);
                DataByteArrayOutputStream out = new DataByteArrayOutputStream(pageSize);
                page.write(out);
                write(page, out.getData());

                // LOG.debug("allocate writing: "+page.getPageId());
                c--;
            }
View Full Code Here

     * during a recovery process.
     */
    public Location store(JournalCommand data, boolean sync) throws IOException {
      try {
            int size = data.serializedSizeFramed();
            DataByteArrayOutputStream os = new DataByteArrayOutputStream(size + 1);
            os.writeByte(data.type().getNumber());
            data.writeFramed(os);
   
            long start = System.currentTimeMillis();
            Location location = journal.write(os.toByteSequence(), sync);
            long start2 = System.currentTimeMillis();
            process(data, location);
          long end = System.currentTimeMillis();
          if( LOG_SLOW_ACCESS_TIME>0 && end-start > LOG_SLOW_ACCESS_TIME) {
            LOG.info("Slow KahaDB access: Journal append took: "+(start2-start)+" ms, Index update took "+(end-start2)+" ms");
View Full Code Here

       expected.setDestination(new KahaDestination().setName("Foo").setType(DestinationType.QUEUE));
       expected.setMessage(new Buffer(new byte[] {1,2,3,4,5,6} ));
       expected.setMessageId("Hello World");
      
       int size = expected.serializedSizeFramed();
       DataByteArrayOutputStream os = new DataByteArrayOutputStream(size + 1);
       os.writeByte(expected.type().getNumber());
       expected.writeFramed(os);
       ByteSequence seq = os.toByteSequence();
      
       DataByteArrayInputStream is = new DataByteArrayInputStream(seq);
       KahaEntryType type = KahaEntryType.valueOf(is.readByte());
       JournalCommand message = (JournalCommand)type.createMessage();
       message.mergeFramed(is);
View Full Code Here

      if (before != null) {
          before.run();
      }
        try {
            int size = data.serializedSizeFramed();
            DataByteArrayOutputStream os = new DataByteArrayOutputStream(size + 1);
            os.writeByte(data.type().getNumber());
            data.writeFramed(os);
   
            long start = System.currentTimeMillis();
            Location location = journal.write(os.toByteSequence(), sync);
            long start2 = System.currentTimeMillis();
            process(data, location);
          long end = System.currentTimeMillis();
          if( LOG_SLOW_ACCESS_TIME>0 && end-start > LOG_SLOW_ACCESS_TIME) {
            LOG.info("Slow KahaDB access: Journal append took: "+(start2-start)+" ms, Index update took "+(end-start2)+" ms");
View Full Code Here

                if (first == null) {
                    first = page;
                }

                addToCache(page);
                DataByteArrayOutputStream out = new DataByteArrayOutputStream(pageSize);
                page.write(out);
                write(page, out.getData());

                // LOG.debug("allocate writing: "+page.getPageId());
            }

            return first;
View Full Code Here

        }
    }

    public ByteSequence toByteSequence(JournalCommand<?> data) throws IOException {
        int size = data.serializedSizeFramed();
        DataByteArrayOutputStream os = new DataByteArrayOutputStream(size + 1);
        os.writeByte(data.type().getNumber());
        data.writeFramed(os);
        return os.toByteSequence();
    }
View Full Code Here

TOP

Related Classes of org.apache.kahadb.util.DataByteArrayOutputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.