Package org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface

Examples of org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams


   */
  @Test
  public void testNotMatchedReplicaID() throws IOException {
    LOG.debug("Running " + GenericTestUtils.getMethodName());
    ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
    BlockWriteStreams streams = null;
    try {
      streams = replicaInfo.createStreams(true, 0, 0);
      streams.checksumOut.write('a');
      dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
      try {
        dn.syncBlock(rBlock, initBlockRecords(dn));
        fail("Sync should fail");
      } catch (IOException e) {
        e.getMessage().startsWith("Cannot recover ");
      }
      verify(dn.namenode, never()).commitBlockSynchronization(
          any(Block.class), anyLong(), anyLong(), anyBoolean(),
          anyBoolean(), any(DatanodeID[].class));
    } finally {
      streams.close();
    }
  }
View Full Code Here


      crcOut = new FileOutputStream(metaRAF.getFD() );
      if (!isCreate) {
        blockOut.getChannel().position(blockDiskSize);
        crcOut.getChannel().position(crcDiskSize);
      }
      return new BlockWriteStreams(blockOut, crcOut, checksum);
    } catch (IOException e) {
      IOUtils.closeStream(blockOut);
      IOUtils.closeStream(metaRAF);
      throw e;
    }
View Full Code Here

    for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
      ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0);
      // we pass expected len as zero, - fsdataset should use the sizeof actual
      // data written
      ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
      BlockWriteStreams out = bInfo.createStreams(true,
          DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, 512));
      try {
        OutputStream dataOut  = out.dataOut;
        assertEquals(0, fsdataset.getLength(b));
        for (int j=1; j <= blockIdToLen(i); ++j) {
          dataOut.write(j);
          assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
          bytesAdded++;
        }
      } finally {
        out.close();
      }
      b.setNumBytes(blockIdToLen(i));
      fsdataset.finalizeBlock(b);
      assertEquals(blockIdToLen(i), fsdataset.getLength(b));
    }
View Full Code Here

      crcOut = new FileOutputStream(metaRAF.getFD() );
      if (!isCreate) {
        blockOut.getChannel().position(blockDiskSize);
        crcOut.getChannel().position(crcDiskSize);
      }
      return new BlockWriteStreams(blockOut, crcOut, checksum);
    } catch (IOException e) {
      IOUtils.closeStream(blockOut);
      IOUtils.closeStream(metaRAF);
      throw e;
    }
View Full Code Here

  public void testNotMatchedReplicaID() throws IOException {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
    BlockWriteStreams streams = null;
    try {
      streams = replicaInfo.createStreams(true,
          DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
      streams.checksumOut.write('a');
      dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
      try {
        dn.syncBlock(rBlock, initBlockRecords(dn));
        fail("Sync should fail");
      } catch (IOException e) {
        e.getMessage().startsWith("Cannot recover ");
      }
      DatanodeProtocol namenode = dn.getBPNamenode(POOL_ID);
      verify(namenode, never()).commitBlockSynchronization(
          any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
          anyBoolean(), any(DatanodeID[].class));
    } finally {
      streams.close();
    }
  }
View Full Code Here

    for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
      ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0);
      // we pass expected len as zero, - fsdataset should use the sizeof actual
      // data written
      ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
      BlockWriteStreams out = bInfo.createStreams(true,
          DataChecksum.newDataChecksum(DataChecksum.Type.CRC32, 512));
      try {
        OutputStream dataOut  = out.dataOut;
        assertEquals(0, fsdataset.getLength(b));
        for (int j=1; j <= blockIdToLen(i); ++j) {
          dataOut.write(j);
          assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
          bytesAdded++;
        }
      } finally {
        out.close();
      }
      b.setNumBytes(blockIdToLen(i));
      fsdataset.finalizeBlock(b);
      assertEquals(blockIdToLen(i), fsdataset.getLength(b));
    }
View Full Code Here

  public void testNotMatchedReplicaID() throws IOException {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
    BlockWriteStreams streams = null;
    try {
      streams = replicaInfo.createStreams(true, 0, 0);
      streams.checksumOut.write('a');
      dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
      try {
        dn.syncBlock(rBlock, initBlockRecords(dn));
        fail("Sync should fail");
      } catch (IOException e) {
        e.getMessage().startsWith("Cannot recover ");
      }
      DatanodeProtocol namenode = dn.getBPNamenode(POOL_ID);
      verify(namenode, never()).commitBlockSynchronization(
          any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(),
          anyBoolean(), any(DatanodeID[].class));
    } finally {
      streams.close();
    }
  }
View Full Code Here

    for (int i = startingBlockId; i < startingBlockId+NUMBLOCKS; ++i) {
      ExtendedBlock b = new ExtendedBlock(bpid, i, 0, 0);
      // we pass expected len as zero, - fsdataset should use the sizeof actual
      // data written
      ReplicaInPipelineInterface bInfo = fsdataset.createRbw(b);
      BlockWriteStreams out = bInfo.createStreams(true, 512, 4);
      try {
        OutputStream dataOut  = out.dataOut;
        assertEquals(0, fsdataset.getLength(b));
        for (int j=1; j <= blockIdToLen(i); ++j) {
          dataOut.write(j);
          assertEquals(j, bInfo.getBytesOnDisk()); // correct length even as we write
          bytesAdded++;
        }
      } finally {
        out.close();
      }
      b.setNumBytes(blockIdToLen(i));
      fsdataset.finalizeBlock(b);
      assertEquals(blockIdToLen(i), fsdataset.getLength(b));
    }
View Full Code Here

          new RandomAccessFile( metaFile, "rw" ).getFD() );
      if (!isCreate) {
        blockOut.getChannel().position(blockDiskSize);
        crcOut.getChannel().position(crcDiskSize);
      }
      return new BlockWriteStreams(blockOut, crcOut);
    } catch (IOException e) {
      IOUtils.closeStream(blockOut);
      IOUtils.closeStream(crcOut);
      throw e;
    }
View Full Code Here

  public void testNotMatchedReplicaID() throws IOException {
    if(LOG.isDebugEnabled()) {
      LOG.debug("Running " + GenericTestUtils.getMethodName());
    }
    ReplicaInPipelineInterface replicaInfo = dn.data.createRbw(block);
    BlockWriteStreams streams = null;
    try {
      streams = replicaInfo.createStreams(true, 0, 0);
      streams.checksumOut.write('a');
      dn.data.initReplicaRecovery(new RecoveringBlock(block, null, RECOVERY_ID+1));
      try {
        dn.syncBlock(rBlock, initBlockRecords(dn));
        fail("Sync should fail");
      } catch (IOException e) {
        e.getMessage().startsWith("Cannot recover ");
      }
      verify(dn.namenode, never()).commitBlockSynchronization(
          any(Block.class), anyLong(), anyLong(), anyBoolean(),
          anyBoolean(), any(DatanodeID[].class));
    } finally {
      streams.close();
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.