Package org.apache.hadoop.raid

Examples of org.apache.hadoop.raid.RaidCodec


   
    String otherCodec = codecId.equals("xor") ? "rs" : "xor";
    raidFile(dfs, source, otherCodec, (short)2,
        "raidFile: couldn't raid a raided file");
   
    RaidCodec codec = RaidCodec.getCodec(codecId);
    long startTime = System.currentTimeMillis();
    result = false;
    while (System.currentTimeMillis() - startTime < 70000 && !result) {
      DFSTestUtil.waitNSecond(3);
      result = dfs.raidFile(source, codecId, expectedSourceRepl);
View Full Code Here


    Path dest = new Path(dir, "2");
    long crc = DFSTestUtil.createFile(dfs, source, fileLen, sourceRepl, 1);
   
    Path parityDir = new Path("/raid/user/facebook" + id);
    assertTrue(dfs.mkdirs(parityDir));
    RaidCodec codec = RaidCodec.getCodec(codecId);
    Path parity = new Path(parityDir, "1");
    DFSTestUtil.createFile(dfs, parity, parityLen,
        codec.parityReplication, 1);
    int[] checksums = new int[numBlocks];
    for (int i = 0; i < numBlocks; i++) {
View Full Code Here

    // Verify checksums
    if (checksums == null || checksums.length == 0) {
      throw new IOException("merge: checksum array is empty or null");
    }
    // Verify codec
    RaidCodec codec = RaidCodec.getCodec(codecId);
    if (codec == null) {
      throw new IOException("merge: codec " + codecId + " doesn't exist");
    }
   
    INode[] sourceINodes = dir.getExistingPathINodes(source);
    INode[] parityINodes = dir.getExistingPathINodes(parity);
    writeLock();
    try {
      // write permissions for the source
      if (isPermissionEnabled) {
        if (isPermissionCheckingEnabled(sourceINodes)) {
          checkPathAccess(source, sourceINodes, FsAction.WRITE);
        }
        if (isPermissionCheckingEnabled(parityINodes)) {
          checkPathAccess(parity, parityINodes, FsAction.READ); // read the file
          checkParentAccess(parity, parityINodes, FsAction.WRITE); // for delete
        }
      }
      INode sinode = sourceINodes[sourceINodes.length - 1];
      INode pinode = parityINodes[parityINodes.length - 1];
     
      if (sinode == null || pinode == null) {
        throw new IOException(
            "merge: source file or parity file doesn't exist");
      }
     
      if (sinode.isUnderConstruction() || pinode.isUnderConstruction()) {
        throw new IOException(
            "merge: source file or parity file is under construction");
      }
     
      if (sinode.isDirectory() || pinode.isDirectory()) {
        throw new IOException(
            "merge: source file or parity file is a directory");
      }
     
      if (sinode instanceof INodeHardLinkFile ||
          pinode instanceof INodeHardLinkFile) {
        throw new IOException("merge: source file or parity file is hardlinked")
      }

      INodeFile sourceINode = (INodeFile) sinode;
      INodeFile parityINode = (INodeFile) pinode;

      if (sourceINode.getStorageType() != StorageType.REGULAR_STORAGE
          || parityINode.getStorageType() != StorageType.REGULAR_STORAGE) {
        throw new IOException(
            "merge: source file or parity file doesn't support merge");
      }
      if (sourceINode.getModificationTime() != parityINode.getModificationTime()) {
        throw new IOException(
            "merge: source file and parity file doesn't have the same modification time");
      }
      if (parityINode.getReplication() != codec.parityReplication) {
        throw new IOException(
            "merge: parity file's replication doesn't match codec's parity replication");
      }
     
      BlockInfo[] sourceBlks = sourceINode.getBlocks();
      BlockInfo[] parityBlks = parityINode.getBlocks();
     
      if (sourceBlks == null || sourceBlks.length == 0) {
        throw new IOException("merge: " + source + " is empty");
      }
      if (parityBlks == null || parityBlks.length == 0) {
        throw new IOException("merge: " + parity + " is empty");
      }
      if (checksums.length != sourceBlks.length) {
        throw new IOException("merge: checksum length " + checksums.length +
            " doesn't match number of source blocks " + sourceBlks.length);
      }
      int expectedParityBlocks = codec.getNumStripes(sourceBlks.length)
          * codec.numParityBlocks;
      if (expectedParityBlocks != parityBlks.length) {
        throw new IOException("merge: expect parity blocks " +
            expectedParityBlocks + " doesn't match number of parity blocks " +
            parityBlks.length);
View Full Code Here

    // Verify source
    if (source == null || source.isEmpty()) {
      throw new IOException("raidFile: source file name is empty");
    }
    // Verify codec
    RaidCodec codec = RaidCodec.getCodec(codecId);
    if (codec == null) {
      throw new IOException("raidFile: codec " + codecId +
          " doesn't exist");
    }
    // Verify expectedSourceRepl
    if (codec.minSourceReplication > expectedSourceRepl) {
      throw new IOException(
          "raidFile: expectedSourceRepl is smaller than " +
          codec.minSourceReplication);
    }
    INode[] sourceINodes = dir.getExistingPathINodes(source);
    boolean status = false;
    writeLock();
    try {
      if (isInSafeMode()) {
        throw new SafeModeException("raidFile: Cannot raid file " + source, safeMode);
      }
      verifyReplication(source, expectedSourceRepl, null);
    
      // write permissions for the source
      if (isPermissionEnabled && isPermissionCheckingEnabled(sourceINodes)) {
        checkPathAccess(source, sourceINodes, FsAction.WRITE);
      }
      INode sinode = sourceINodes[sourceINodes.length - 1];
      if (sinode == null) {
        throw new IOException("raidFile: source file doesn't exist");
      }
     
      if (sinode.isUnderConstruction()) {
        throw new IOException(
            "raidFile: source file is under-construction");
      }
     
      if (sinode.isDirectory()) {
        throw new IOException("raidFile: source file is a directory");
      }
     
      INodeFile sourceINode = (INodeFile)sinode;
      BlockInfo[] blocks = sourceINode.getBlocks();
      if (blocks == null || blocks.length == 0) {
        throw new IOException("raidFile: source file is empty");
      }
     
      if (sourceINode instanceof INodeHardLinkFile) {
        throw new IOException("raidFile: cannot raid a hardlinked file");
      }
     
      if (sourceINode.getStorageType() == StorageType.RAID_STORAGE) {
        INodeRaidStorage storage = (INodeRaidStorage)sourceINode.getStorage();
        if (!storage.getCodec().id.equals(codec.id)) {
          throw new IOException("raidFile: couldn't raid a raided file");
        }
        // check parity blocks and schedule raiding or set replication
        if (codec.checkRaidProgress(sourceINode, raidEncodingTasks, this, false)) {
          setReplicationInternal(source, expectedSourceRepl);
          status = true;
        }
      } else if (sourceINode.getStorageType() == StorageType.REGULAR_STORAGE) {
        // allocate parity blocks
        checkFsObjectLimit();
        // Verify all source blocks have checksums
        boolean allHasChecksums = true;
        for (int i = 0; i < blocks.length; i++) {
          if (blocks[i].getChecksum() == BlockInfo.NO_BLOCK_CHECKSUM) {
            allHasChecksums = false;
            break;
          }
        }
        if (!allHasChecksums) {
          throw new IOException("raidFile: not all source blocks have checksums");
        }
        int numParityBlocks = codec.getNumParityBlocks(sourceINode.getBlocks().length);
        Block[] parityBlocks = allocateParityBlocks(numParityBlocks);
        // Convert to Raid format
        dir.raidFile(sourceINodes, source, codec, expectedSourceRepl, parityBlocks);
        // schedule raid tasks
        codec.checkRaidProgress(sourceINode, raidEncodingTasks, this, true);
      } else {
        throw new IllegalArgumentException("raidFile: storage is not valid");
      }
    } finally {
      writeUnlock();
View Full Code Here

   */
  private boolean processRaidEncodingTask(RaidBlockInfo rbi) {
    ArrayList<ArrayList<DatanodeInfo>> stripeDatanodes =
        new ArrayList<ArrayList<DatanodeInfo>>();
    BlockInfo[] stripeBlocks = null;
    RaidCodec codec = null;
    StringBuilder sb = new StringBuilder();
    StringBuilder dnSb = new StringBuilder();
    readLock();
    try {
      INodeFile inode = blocksMap.getINode(rbi);
      if (inode == null) {
        return false; // file has been deleted already, nothing to do.
      }
      if (inode.getStorageType() != StorageType.RAID_STORAGE) {
        LOG.error("File for block " + rbi + " is not raidable");
        return false;
      }
      INodeRaidStorage storage = (INodeRaidStorage)inode.getStorage();
      codec = storage.getCodec();
      // Find out related blocks
      stripeBlocks = codec.getBlocksInOneStripe(
          inode.getBlocks(), rbi);
      // Find out datanode locations
      for (int i = 0; i < stripeBlocks.length; i++) {
        ArrayList<DatanodeInfo> liveNodes =
            new ArrayList<DatanodeInfo>();
View Full Code Here

          concatDeleteOp.timestamp);
      break;
    }
    case OP_MERGE: {
      MergeOp mergeOp = (MergeOp)op;
      RaidCodec codec = RaidCodec.getCodec(mergeOp.codecId);
      if (codec == null) {
        LOG.error("Codec " + mergeOp.codecId + " doesn't exist");
        logErrorAndFail("Merge: codec doesn't exist");
      }
      INode[] sourceINodes = fsDir.getExistingPathINodes(mergeOp.source);
View Full Code Here

    try {
      cluster = new MiniDFSCluster(conf, 1, true, null);
      final FSNamesystem namesystem = cluster.getNameNode().namesystem;
      final DistributedFileSystem dfs = DFSUtil.convertToDFS(cluster.getFileSystem());
      String filePath = "/test/file1";
      RaidCodec rsCodec = RaidCodec.getCodec("rs");
      RaidDFSUtil.constructFakeRaidFile(dfs, filePath, rsCodec);
     
      DatanodeDescriptor[] datanodes = (DatanodeDescriptor[])
          namesystem.heartbeats.toArray(
              new DatanodeDescriptor[1]);
View Full Code Here

        inodeId = rootNode ? INodeId.ROOT_INODE_ID : namesystem.dir.allocateNewInodeId();
      }
     
      byte inodeType = INode.INodeType.REGULAR_INODE.type;
      long hardLinkID = -1;
      RaidCodec codec = null;
     
      if (context.supportsHardlink) { 
        inodeType = in.readByte()
        if (inodeType == INode.INodeType.HARDLINKED_INODE.type) { 
          hardLinkID = WritableUtils.readVLong(in);
View Full Code Here

    int priLevel = getPriority(blockInfo, curReplicas, decomissionedReplicas,
                               expectedReplicas);
    INodeFile fileINode = blockInfo.getINode();
    if (priLevel == QUEUE_WITH_CORRUPT_BLOCKS && fileINode != null &&
        fileINode.getStorageType().equals(StorageType.RAID_STORAGE)) {
      RaidCodec codec = ((INodeRaidStorage) fileINode.getStorage()).getCodec();
      return raidQueue.add(blockInfo, codec);
    }
   
    if(priLevel != LEVEL && priorityQueues.get(priLevel).add(blockInfo)) {
      if (NameNode.stateChangeLog.isDebugEnabled()) {
View Full Code Here

  /* remove a block from a under replication queue given a priority*/
  synchronized boolean remove(BlockInfo blockInfo, int priLevel) {
    INodeFile fileINode = blockInfo.getINode();
    if (priLevel == QUEUE_WITH_CORRUPT_BLOCKS && fileINode != null &&
        fileINode.getStorageType().equals(StorageType.RAID_STORAGE)) {
      RaidCodec codec = ((INodeRaidStorage) fileINode.getStorage()).getCodec();
      return raidQueue.remove(blockInfo, codec);
    }
   
    if(priLevel >= 0 && priLevel < LEVEL
        && priorityQueues.get(priLevel).remove(blockInfo)) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.raid.RaidCodec

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.