Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FSDataInputStream


    FileStatus fStatus = fs.getFileStatus(metaSplitFile);
    if (maxMetaInfoSize > 0 && fStatus.getLen() > maxMetaInfoSize) {
      throw new IOException("Split metadata size exceeded " +
          maxMetaInfoSize +". Aborting job " + jobId);
    }
    FSDataInputStream in = fs.open(metaSplitFile);
    byte[] header = new byte[JobSplit.META_SPLIT_FILE_HEADER.length];
    in.readFully(header);
    if (!Arrays.equals(JobSplit.META_SPLIT_FILE_HEADER, header)) {
      throw new IOException("Invalid header on split file");
    }
    int vers = WritableUtils.readVInt(in);
    if (vers != JobSplit.META_SPLIT_VERSION) {
      in.close();
      throw new IOException("Unsupported split version " + vers);
    }
    int numSplits = WritableUtils.readVInt(in); //TODO: check for insane values
    JobSplit.TaskSplitMetaInfo[] allSplitMetaInfo =
      new JobSplit.TaskSplitMetaInfo[numSplits];
    for (int i = 0; i < numSplits; i++) {
      JobSplit.SplitMetaInfo splitMetaInfo = new JobSplit.SplitMetaInfo();
      splitMetaInfo.readFields(in);
      JobSplit.TaskSplitIndex splitIndex = new JobSplit.TaskSplitIndex(
          JobSubmissionFiles.getJobSplitFile(jobSubmitDir).toString(),
          splitMetaInfo.getStartOffset());
      allSplitMetaInfo[i] = new JobSplit.TaskSplitMetaInfo(splitIndex,
          splitMetaInfo.getLocations(),
          splitMetaInfo.getInputDataLength());
    }
    in.close();
    return allSplitMetaInfo;
  }
View Full Code Here


      if(isbreak)
      {
        break;
      }
      if (!f.isDir() && !f.getPath().getName().startsWith("_")) {
        FSDataInputStream in = fs.open(f.getPath());
        BufferedReader bf=new BufferedReader(new InputStreamReader(in));
        String line;
        while ((line = bf.readLine()) != null) {
          bytesRead += line.getBytes().length;
          String towrite=line.replaceAll("\001", ",").replaceAll("\t", ",");
          System.out.println(towrite);
          if(!towrite.isEmpty())
          {
            result=towrite.split(",");
            if(result.length<8)
            {
              isbreak=true;
              result=null;
            }
          }
          if (bytesRead >= maxsize) {
            bf.close();
            in.close();
            isbreak=true;
          }
         
          if(isbreak)
          {
            break;
          }
        }
        bf.close();
        in.close();
      }
    }
   
    System.out.println(Arrays.toString(result));
    if(result!=null&&result.length>=8)
View Full Code Here

  public String readFirstLineStr(Path file) {
    StringBuffer buff = new StringBuffer();
    try {
      if (fs.exists(file)) {
        FSDataInputStream r = fs.open(file);
        BufferedReader in = new BufferedReader(new InputStreamReader(r,
            "UTF-8"));
        buff.append(in.readLine());
        in.close();
        r.close();
      }
    } catch (IOException e) {
    }
    return buff.toString();
  }
View Full Code Here

    Matcher matcher = pattern.matcher("");
    List<String> list = new ArrayList<String>();
    BufferedReader br = null;
    String[] fields = null;
    try {
      FSDataInputStream in=fs.open(new Path(schemaFile));
      br = new BufferedReader(new InputStreamReader(in));
      String temp = null;// 获得的值以空格分隔,"fieldName fieldType"
      while ((temp = br.readLine()) != null) {
        matcher.reset(temp);
        if (matcher.find()) {
          String fnft = matcher.group(1) + " " + matcher.group(2)
              + " " + matcher.group(3) + " " + matcher.group(4);
          System.out.println(fnft);
          list.add(fnft);
        }
      }
      in.close();
    } finally {
      if (br != null) {
        br.close();
      }
    }
View Full Code Here

    Matcher matcher = pattern.matcher("");
    BufferedReader br = null;
    try {
     

      FSDataInputStream in = fs.open(new Path(getBasePath(stormconf), tablename
          + "/solr/conf/schema.xml"));
      br = new BufferedReader(new InputStreamReader(in));
      String temp = null;
      while ((temp = br.readLine()) != null) {
        matcher.reset(temp);
        if (matcher.find()) {
          datatype.info.put(matcher.group(1), matcher.group(2));
        }
      }
      in.close();
    }catch(Exception e){
    } finally {
      if (br != null) {
        br.close();
      }
View Full Code Here

  Set<A> inlist = new HashSet<A>();
  Configuration conf=this.getConf();
  Path p=new Path(file);
 
  FileSystem fs = p.getFileSystem(conf);
  FSDataInputStream in = fs.open(p);
  BufferedReader br = new BufferedReader(new InputStreamReader(in));
  String s1 = null;
  while ((s1 = br.readLine()) != null) {
      String line=s1.trim();
      if(!line.isEmpty())
      {
    inlist.add(trans.trans(line));
      }
  }
  br.close();
  in.close();
 
  return inlist;
    }
View Full Code Here

  }
 
  private static String getFileContent(FileSystem fs,Path f) throws IOException
  {
    StringBuffer buffer=new StringBuffer();
    FSDataInputStream in=fs.open(f);
    BufferedReader bf  = new BufferedReader(new InputStreamReader(in));
    String line;
    while ((line = bf.readLine()) != null) {
      buffer.append(line);
      buffer.append("\r\n");
    }
    bf.close();
    in.close();
   
    return buffer.toString();
  }
View Full Code Here

      if (fileStatus.isDir()) {
        continue;
      }

      int filehasread = 0;
      FSDataInputStream in = fs.open(fileStatus.getPath());
      BufferedReader bf = new BufferedReader(
          new InputStreamReader(in));
      String line;
      while ((line = bf.readLine()) != null) {
        Vector vec=parse.parseVector(line);
        if(vec==null)
        {
          continue;
        }
//        System.out.println(filehasread+"@"+fileMaxReadCount+","+vec.toString());
        number++;
        filehasread++;
       
        int currentSize = chosenClusters.size();
        if (currentSize < k) {
          Cluster newCluster = new Cluster(vec,nextClusterId++);
          chosenClusters.add(newCluster);
        } else {
          int randIndex = (int) (Math.random() * currentSize);
          chosenClusters.get(randIndex).getCenter().merger(vec);
        }

        if (filehasread > fileMaxReadCount) {
          break;
        }

      }
      bf.close();
      in.close();

    }

    for (int i = 0; i < k; i++) {
      Cluster closter = chosenClusters.get(i);
View Full Code Here

          throw ioe;
        }
        return;
      }

      FSDataInputStream in = fs.open(restartFile);
      try {
        // read the old count
        restartCount = in.readInt();
        ++restartCount; // increment the restart count
      } catch (IOException ioe) {
        LOG.warn("System directory is garbled. Failed to read file "
                 + restartFile);
        LOG.warn("Jobtracker recovery is not possible with garbled"
                 + " system directory! Please delete the system directory and"
                 + " restart the jobtracker. Note that deleting the system"
                 + " directory will result in loss of all the running jobs.");
        throw new RuntimeException(ioe);
      } finally {
        if (in != null) {
          in.close();
        }
      }

      // Write back the new restart count and rename the old info file
      //TODO This is similar to jobhistory recovery, maybe this common code
View Full Code Here

                 " bytes",
                 dfs.getFileStatus(filepath).getLen() == size);

      // verify that there is enough data to read.
      System.out.println("File size is good. Now validating sizes from datanodes...");
      FSDataInputStream stmin = dfs.open(filepath);
      stmin.readFully(0, actual, 0, size);
      stmin.close();
    }
    finally {
      try {
        if (cluster != null) {cluster.shutdown();}
      } catch (Exception e) {
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FSDataInputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.