Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FSDataInputStream.readByte()


            return codec.createInputStream(i);
          }
          break;
        }
        case 0x4f62: { // 'O' 'b'
          if (i.readByte() == 'j') {
            i.close();
            return new AvroFileInputStream(item.stat);
          }
          break;
        }
View Full Code Here


        return HdfsFileType.TXT;
      }

      switch (is.readShort()) {
      case 0x5345:
        if (is.readByte() == 'Q') {
          return HdfsFileType.SEQ;
        }
      default:
        is.seek(0);
        CompressionCodecFactory compressionCodecFactory = new CompressionCodecFactory(
View Full Code Here

    return new Iterator<Mutation>() {
     
      byte eventType;
     
      {
        eventType = login.readByte();
      }
     
      public boolean hasNext() {
        return eventType != CLOSE_EVENT;
      }
View Full Code Here

      }
     
      public Mutation next() {
        try {
          mutation.readFields(login);
          eventType = login.readByte();
        } catch (IOException e) {
          throw new RuntimeException(e);
        }
       
        return mutation;
View Full Code Here

          i.seek(0);
          return new GZIPInputStream(i);
        }
        case 0x5345: { // 'S' 'E'
          // Might be a SequenceFile
          if (i.readByte() == 'Q') {
            i.close();
            return new TextRecordInputStream(item.stat);
          }
        }
        default: {
View Full Code Here

            return codec.createInputStream(i);
          }
          break;
        }
        case 0x4f62: { // 'O' 'b'
          if (i.readByte() == 'j') {
            i.close();
            return new AvroFileInputStream(item.stat);
          }
          break;
        }
View Full Code Here

      FSDataOutputStream stm = createFile(fs, file1, 2);
      writeFile(stm);
      stm.close();

      in = fs.open(file1);
      in.readByte();

      DFSInputStream dfsClientIn = findDFSClientInputStream(in);     
      Field blockReaderField = DFSInputStream.class.getDeclaredField("blockReader");
      blockReaderField.setAccessible(true);
      BlockReader blockReader = (BlockReader) blockReaderField.get(dfsClientIn);
View Full Code Here

      blockReader.setArtificialSlowdown(1000);
      blockReader.isReadLocal = false;
      blockReader.isReadRackLocal = false;
      for (int i = 0; i < 1024; i++) {
        in.readByte();
      }

      blockReader.setArtificialSlowdown(0);
      for (int i = 1024; i < fileSize - 1; i++) {
        in.readByte();
View Full Code Here

        in.readByte();
      }

      blockReader.setArtificialSlowdown(0);
      for (int i = 1024; i < fileSize - 1; i++) {
        in.readByte();
      }

      ConcurrentHashMap<DatanodeInfo, DatanodeInfo> deadNodes = getDeadNodes(dfsClientIn);
      TestCase.assertEquals(1, deadNodes.size());
    } finally {
View Full Code Here

    start = 0;
    total = new byte[(int)trgLen];
    stm = dfs.open(trgPath);
    while(start<trgLen){
      stm.seek(start);
      total[start++] = stm.readByte();
    }
    stm.close();
    checkFileContent(total, bytes);
   
    // 7. positioned reading
View Full Code Here

TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.