Package org.apache.hadoop.fs

Examples of org.apache.hadoop.fs.FsShell$CommandFormat


    hdfs.setXAttr(path, name2, value2);
    SnapshotTestHelper.createSnapshot(hdfs, path, snapshotName);
    Path snapshotCopy = new Path(path.toString() + "-copy");
    String[] argv = new String[] { "-cp", "-px", snapshotPath.toUri().toString(),
        snapshotCopy.toUri().toString() };
    int ret = ToolRunner.run(new FsShell(conf), argv);
    assertEquals("cp -px is not working on a snapshot", SUCCESS, ret);

    Map<String, byte[]> xattrs = hdfs.getXAttrs(snapshotCopy);
    assertArrayEquals(value1, xattrs.get(name1));
    assertArrayEquals(value2, xattrs.get(name2));
View Full Code Here


        args.add("-cp");
        args.add(copySource);
        args.add(copyDest);

        console.printInfo("Copying " + copySource + " to " + copyDest);
        FsShell fss = new FsShell(conf);
        int ret = 0;
        try {
          ret = ToolRunner.run(fss, args.toArray(new String[0]));
        } catch (Exception e) {
          throw new HiveException(e);
View Full Code Here

    }
  }

  private void FsShellRun(String cmd, int retcode, String contain)
      throws Exception {
    FsShell shell = new FsShell(new Configuration(conf));
    toolRun(shell, cmd, retcode, contain);
 
View Full Code Here

      DatanodeDescriptor dn = bm.blocksMap.nodeIterator(b.getLocalBlock()).next();
      bm.addToInvalidates(b.getLocalBlock(), dn);
      bm.blocksMap.removeNode(b.getLocalBlock(), dn);
     
      // increment this file's replication factor
      FsShell shell = new FsShell(conf);
      assertEquals(0, shell.run(new String[]{
          "-setrep", "-w", Integer.toString(1+REPLICATION_FACTOR), FILE_NAME}));
    } finally {
      cluster.shutdown();
    }
   
View Full Code Here

    final DistributedFileSystem dfs = (DistributedFileSystem)fs;
    PrintStream psBackup = System.out;
    ByteArrayOutputStream out = new ByteArrayOutputStream();
    PrintStream psOut = new PrintStream(out);
    System.setOut(psOut);
    FsShell shell = new FsShell();
    shell.setConf(conf);
   
    try {
      Path myPath = new Path("/test/dir");
      assertTrue(fs.mkdirs(myPath));
      assertTrue(fs.exists(myPath));
      Path myFile = new Path("/test/dir/file");
      writeFile(fs, myFile);
      assertTrue(fs.exists(myFile));
      Path myFile2 = new Path("/test/dir/file2");
      writeFile(fs, myFile2);
      assertTrue(fs.exists(myFile2));
     
      String[] args = new String[2];
      args[0] = "-du";
      args[1] = "/test/dir";
      int val = -1;
      try {
        val = shell.run(args);
      } catch (Exception e) {
        System.err.println("Exception raised from DFSShell.run " +
                            e.getLocalizedMessage());
      }
      assertTrue(val == 0);
View Full Code Here

      PrintStream tmp = new PrintStream(out);
      System.setErr(tmp);
      String[] argv = new String[2];
      argv[0] = "-cat";
      argv[1] = root.toUri().getPath();
      int ret = ToolRunner.run(new FsShell(), argv);
      assertEquals(" -cat returned 1 ", 1, ret);
      String returned = out.toString();
      assertTrue("cat does not print exceptions ",
          (returned.lastIndexOf("Exception") == -1));
      out.reset();
      argv[0] = "-rm";
      argv[1] = root.toString();
      FsShell shell = new FsShell();
      shell.setConf(conf);
      ret = ToolRunner.run(shell, argv);
      assertEquals(" -rm returned 1 ", 1, ret);
      returned = out.toString();
      out.reset();
      assertTrue("rm prints reasonable error ",
View Full Code Here

      nameDir.mkdirs();
      System.setProperty("test.build.data", nameDir.toString());
      dstCluster = new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build();
      FileSystem srcFs = srcCluster.getFileSystem();
      FileSystem dstFs = dstCluster.getFileSystem();
      FsShell shell = new FsShell();
      shell.setConf(srcConf);
      //check for ls
      String[] argv = new String[2];
      argv[0] = "-ls";
      argv[1] = dstFs.getUri().toString() + "/";
      int ret = ToolRunner.run(shell, argv);
View Full Code Here

      System.setOut(new PrintStream(out));

      String[] argv = new String[2];
      argv[0] = "-text";
      argv[1] = new Path(root, "file.gz").toString();
      int ret = ToolRunner.run(new FsShell(conf), argv);
      assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
      assertTrue("Output doesn't match input",
          Arrays.equals(file.toByteArray(), out.toByteArray()));

      // Create a sequence file with a gz extension, to test proper
      // container detection
      SequenceFile.Writer writer = SequenceFile.createWriter(
          conf,
          SequenceFile.Writer.file(new Path(root, "file.gz")),
          SequenceFile.Writer.keyClass(Text.class),
          SequenceFile.Writer.valueClass(Text.class));
      writer.append(new Text("Foo"), new Text("Bar"));
      writer.close();
      out = new ByteArrayOutputStream();
      System.setOut(new PrintStream(out));
      argv = new String[2];
      argv[0] = "-text";
      argv[1] = new Path(root, "file.gz").toString();
      ret = ToolRunner.run(new FsShell(conf), argv);
      assertEquals("'-text " + argv[1] + " returned " + ret, 0, ret);
      assertTrue("Output doesn't match input",
          Arrays.equals("Foo\tBar\n".getBytes(), out.toByteArray()));
      out.reset();
    } finally {
View Full Code Here

    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    FileSystem fs = cluster.getFileSystem();
    assertTrue("Not a HDFS: "+fs.getUri(),
               fs instanceof DistributedFileSystem);
    DistributedFileSystem dfs = (DistributedFileSystem)fs;
    FsShell shell = new FsShell();
    shell.setConf(conf);

    try {
      String root = createTree(dfs, "copyToLocal");

      // Verify copying the tree
      {
        try {
          assertEquals(0,
              runCmd(shell, "-copyToLocal", root + "*", TEST_ROOT_DIR));
        } catch (Exception e) {
          System.err.println("Exception raised from DFSShell.run " +
                             e.getLocalizedMessage());
        }

        File localroot = new File(TEST_ROOT_DIR, "copyToLocal");
        File localroot2 = new File(TEST_ROOT_DIR, "copyToLocal2");       
       
        File f1 = new File(localroot, "f1");
        assertTrue("Copying failed.", f1.isFile());

        File f2 = new File(localroot, "f2");
        assertTrue("Copying failed.", f2.isFile());

        File sub = new File(localroot, "sub");
        assertTrue("Copying failed.", sub.isDirectory());

        File f3 = new File(sub, "f3");
        assertTrue("Copying failed.", f3.isFile());

        File f4 = new File(sub, "f4");
        assertTrue("Copying failed.", f4.isFile());
       
        File f5 = new File(localroot2, "f1");
        assertTrue("Copying failed.", f5.isFile());       

        f1.delete();
        f2.delete();
        f3.delete();
        f4.delete();
        f5.delete();
        sub.delete();
      }
      // Verify copying non existing sources do not create zero byte
      // destination files
      {
        String[] args = {"-copyToLocal", "nosuchfile", TEST_ROOT_DIR};
        try {  
          assertEquals(1, shell.run(args));
        } catch (Exception e) {
          System.err.println("Exception raised from DFSShell.run " +
                            e.getLocalizedMessage());
        }                           
        File f6 = new File(TEST_ROOT_DIR, "nosuchfile");
View Full Code Here

  @Test
  public void testCount() throws Exception {
    Configuration conf = new HdfsConfiguration();
    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
    DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
    FsShell shell = new FsShell();
    shell.setConf(conf);

    try {
      String root = createTree(dfs, "count");

      // Verify the counts
View Full Code Here

TOP

Related Classes of org.apache.hadoop.fs.FsShell$CommandFormat

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.