Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FsShell$TextRecordInputStream


    Path testdir = TEST_UTIL.getDataTestDir("TestNamespaceUpgrade");
    // Untar our test dir.
    File untar = untar(new File(testdir.toString()));
    // Now copy the untar up into hdfs so when we start hbase, we'll run from it.
    Configuration conf = TEST_UTIL.getConfiguration();
    FsShell shell = new FsShell(conf);
    FileSystem fs = FileSystem.get(conf);
    // find where hbase will root itself, so we can copy filesystem there
    Path hbaseRootDir = TEST_UTIL.getDefaultRootDirPath();
    if (!fs.isDirectory(hbaseRootDir.getParent())) {
      // mkdir at first
View Full Code Here


    argvs = concatAll(argvs, new String[] { group }, uris);

    // Hadoop 1.0.x
    if (!IS_HADOOP_20X) {
      Class<?> cls = ClassUtils.resolveClassName("org.apache.hadoop.fs.FsShellPermissions", config.getClass().getClassLoader());
      Object[] args = new Object[] { fs, op.getCmd(), argvs, 0, new FsShell(config) };

      Method m = ReflectionUtils.findMethod(cls, "changePermissions", FileSystem.class, String.class, String[].class, int.class, FsShell.class);
      ReflectionUtils.makeAccessible(m);
      ReflectionUtils.invokeMethod(m, null, args);
    }
View Full Code Here

    }
    if (!genData.getJob().isSuccessful()) {
      throw new IOException("Data generation failed!");
    }

    FsShell shell = new FsShell(conf);
    try {
      LOG.info("Changing the permissions for inputPath " + ioPath.toString());
      shell.run(new String[] {"-chmod","-R","777", ioPath.toString()});
    } catch (Exception e) {
      LOG.error("Couldnt change the file permissions " , e);
      throw new IOException(e);
    }
    LOG.info("Done.");
View Full Code Here

    assertTrue(fs.exists(new Path(finalPath, "_masterindex")));
    assertTrue(!fs.exists(new Path(finalPath, "_logs")));
    args = new String[2];
    args[0] = "-ls";
    args[1] = harPath.toString();
    FsShell shell = new FsShell(conf);
    ret = ToolRunner.run(shell, args);
    // fileb and filec
    assertTrue(ret == 0);
    Path harFilea = new Path(harPath, "a");
    Path harFileb = new Path(harPath, "b");
View Full Code Here

    assertTrue(!fs.exists(new Path(finalPath, "_logs")));
    //creation tested
    //check if the archive is same
    // do ls and cat on all the files
   
    FsShell shell = new FsShell(conf);
    args = new String[2];
    args[0] = "-ls";
    args[1] = harPath.toString();
    ret = ToolRunner.run(shell, args);
    // ls should work.
View Full Code Here

      //compare sorted lsr list and sorted dst list
      final Text lsrpath = new Text();
      final FileStatus lsrstatus = new FileStatus();
      final Text dstpath = new Text();
      final Text dstfrom = new Text();
      final FsShell shell = new FsShell(conf);
      final String[] shellargs = {"-rmr", null};

      boolean hasnext = dstin.next(dstpath, dstfrom);
      for(; lsrin.next(lsrpath, lsrstatus); ) {
        int dst_cmp_lsr = dstpath.compareTo(lsrpath);
        for(; hasnext && dst_cmp_lsr < 0; ) {
          hasnext = dstin.next(dstpath, dstfrom);
          dst_cmp_lsr = dstpath.compareTo(lsrpath);
        }
       
        if (dst_cmp_lsr == 0) {
          //lsrpath exists in dst, skip it
          hasnext = dstin.next(dstpath, dstfrom);
        }
        else {
          //lsrpath does not exist, delete it
          String s = new Path(dstroot.getPath(), lsrpath.toString()).toString();
          if (shellargs[1] == null || !isAncestorPath(shellargs[1], s)) {
            shellargs[1] = s;
            int r = 0;
            try {
               r = shell.run(shellargs);
            } catch(Exception e) {
              throw new IOException("Exception from shell.", e);
            }
            if (r != 0) {
              throw new IOException("\"" + shellargs[0] + " " + shellargs[1]
View Full Code Here

      Path f = TestDFSShell.writeFile(fs, new Path(root, "foo"));
     
      // Verify setrep for changing replication
      {
        String[] args = {"-setrep", "-w", "" + toREP, "" + f};
        FsShell shell = new FsShell();
        shell.setConf(conf);
        try {
          assertEquals(0, shell.run(args));
        } catch (Exception e) {
          assertTrue("-setrep " + e, false);
        }
      }
View Full Code Here

    public void execute(Configuration conf, FileSystem fs) throws Exception {
      fs.mkdirs(new Path(TEST_ROOT));

      createFiles(fs, TEST_ROOT, fileEntries);
      final FsShell fsShell = new FsShell(conf);
      final String deletePath =  TEST_ROOT + "/" + deleteEntry.getPath();

      String[] tmpCmdOpts = StringUtils.split(cmdAndOptions);
      ArrayList<String> tmpArray = new ArrayList<String>(Arrays.asList(tmpCmdOpts));
      tmpArray.add(deletePath);
View Full Code Here

        0 == runCommand(new String[] { "-setfattr", "/test"}));
    assertTrue(errContent.toString().contains("Must specify '-n name' or '-x name' option"));
  }

  private int runCommand(String[] commands) throws Exception {
    return ToolRunner.run(conf, new FsShell(), commands);
  }
View Full Code Here

        mPigServer = pigServer;
       
        mDfs = mPigServer.getPigContext().getDfs();
        mLfs = mPigServer.getPigContext().getLfs();
        mConf = mPigServer.getPigContext().getProperties();
        shell = new FsShell(ConfigurationUtil.toConfiguration(mConf));
       
        // TODO: this violates the abstraction layer decoupling between
        // front end and back end and needs to be changed.
        // Right now I am not clear on how the Job Id comes from to tell
        // the back end to kill a given job (mJobClient is used only in
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FsShell$TextRecordInputStream

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.