Package org.apache.accumulo.core.file

Examples of org.apache.accumulo.core.file.FileSKVWriter


      Random random = new Random();
     
      long bytesWritten = 0;
     
      BatchWriter bw = null;
      FileSKVWriter writer = null;
     
      if (opts.outputFile != null) {
        Configuration conf = CachedConfiguration.getInstance();
        FileSystem fs = FileSystem.get(conf);
        writer = FileOperations.getInstance().openWriter(opts.outputFile + "." + RFile.EXTENSION, fs, conf,
            AccumuloConfiguration.getDefaultConfiguration());
        writer.startDefaultLocalityGroup();
      } else {
        Connector connector = opts.getConnector();
        bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
        connector.securityOperations().changeUserAuthorizations(opts.principal, AUTHS);
      }
      Text labBA = new Text(opts.columnVisibility.getExpression());
     
      long startTime = System.currentTimeMillis();
      for (int i = 0; i < opts.rows; i++) {
        int rowid;
        if (opts.stride > 0) {
          rowid = ((i % opts.stride) * (opts.rows / opts.stride)) + (i / opts.stride);
        } else {
          rowid = i;
        }
       
        Text row = generateRow(rowid, opts.startRow);
        Mutation m = new Mutation(row);
        for (int j = 0; j < opts.cols; j++) {
          Text colf = new Text(opts.columnFamily);
          Text colq = new Text(FastFormat.toZeroPaddedString(j, 7, 10, COL_PREFIX));
         
          if (writer != null) {
            Key key = new Key(row, colf, colq, labBA);
            if (opts.timestamp >= 0) {
              key.setTimestamp(opts.timestamp);
            } else {
              key.setTimestamp(startTime);
            }
           
            if (opts.delete) {
              key.setDeleted(true);
            } else {
              key.setDeleted(false);
            }
           
            bytesWritten += key.getSize();
           
            if (opts.delete) {
              writer.append(key, new Value(new byte[0]));
            } else {
              byte value[];
              if (opts.random != null) {
                value = genRandomValue(random, randomValue, opts.random.intValue(), rowid + opts.startRow, j);
              } else {
                value = bytevals[j % bytevals.length];
              }
             
              Value v = new Value(value);
              writer.append(key, v);
              bytesWritten += v.getSize();
            }
           
          } else {
            Key key = new Key(row, colf, colq, labBA);
            bytesWritten += key.getSize();
           
            if (opts.delete) {
              if (opts.timestamp >= 0)
                m.putDelete(colf, colq, opts.columnVisibility, opts.timestamp);
              else
                m.putDelete(colf, colq, opts.columnVisibility);
            } else {
              byte value[];
              if (opts.random != null) {
                value = genRandomValue(random, randomValue, opts.random.intValue(), rowid + opts.startRow, j);
              } else {
                value = bytevals[j % bytevals.length];
              }
              bytesWritten += value.length;
             
              if (opts.timestamp >= 0) {
                m.put(colf, colq, opts.columnVisibility, opts.timestamp, new Value(value, true));
              } else {
                m.put(colf, colq, opts.columnVisibility, new Value(value, true));
               
              }
            }
          }
         
        }
        if (bw != null)
          bw.addMutation(m);
       
      }
     
      if (writer != null) {
        writer.close();
      } else if (bw != null) {
        try {
          bw.close();
        } catch (MutationsRejectedException e) {
          if (e.getAuthorizationFailuresMap().size() > 0) {
View Full Code Here


        String tmpFile = memDumpDir + "/memDump" + UUID.randomUUID() + "." + RFile.EXTENSION;
       
        Configuration newConf = new Configuration(conf);
        newConf.setInt("io.seqfile.compress.blocksize", 100000);
       
        FileSKVWriter out = new RFileOperations().openWriter(tmpFile, fs, newConf, ServerConfiguration.getSiteConfiguration());
        out.startDefaultLocalityGroup();
        InterruptibleIterator iter = map.skvIterator();
        iter.seek(new Range(), LocalityGroupUtil.EMPTY_CF_SET, false);
       
        while (iter.hasTop() && activeIters.size() > 0) {
          // RFile does not support MemKey, so we move the kv count into the value only for the RFile.
          // There is no need to change the MemKey to a normal key because the kvCount info gets lost when it is written
          Value newValue = new MemValue(iter.getTopValue(), ((MemKey) iter.getTopKey()).kvCount);
          out.append(iter.getTopKey(), newValue);
          iter.next();
        }
       
        out.close();
       
        log.debug("Created mem dump file " + tmpFile);
       
        memDumpFile = tmpFile;
       
View Full Code Here

    // table exists
    assertTrue(client.tableExists(creds, "bar"));
    assertFalse(client.tableExists(creds, "test"));
    // bulk import
    String filename = dir + "/bulk/import/rfile.rf";
    FileSKVWriter writer = FileOperations.getInstance().openWriter(filename, fs, fs.getConf(), DefaultConfiguration.getInstance());
    writer.startDefaultLocalityGroup();
    writer.append(new org.apache.accumulo.core.data.Key(new Text("a"), new Text("b"), new Text("c")), new Value("value".getBytes()));
    writer.close();
    fs.mkdirs(new Path(dir + "/bulk/fail"));
    client.importDirectory(creds, "bar", dir + "/bulk/import", dir + "/bulk/fail", true);
    scanner = client.createScanner(creds, "bar", null);
    more = client.nextK(scanner, 100);
    client.closeScanner(scanner);
View Full Code Here

    Opts opts = new Opts();
    opts.parseArgs(CreateEmpty.class.getName(), args);

    for (String arg : opts.files) {
      Path path = new Path(arg);
      FileSKVWriter writer = (new RFileOperations()).openWriter(arg, path.getFileSystem(conf), conf, DefaultConfiguration.getDefaultConfiguration(), opts.codec);
      writer.close();
    }
  }
View Full Code Here

    List<Integer> rows = new ArrayList<Integer>(startRows);
    rows.add(LOTS);
   
    for (int i = 0; i < parts; i++) {
      String fileName = dir + "/" + String.format("part_%d.", i) + RFile.EXTENSION;
      FileSKVWriter f = FileOperations.getInstance().openWriter(fileName, fs, fs.getConf(), defaultConfiguration);
      f.startDefaultLocalityGroup();
      int start = rows.get(i);
      int end = rows.get(i + 1);
      for (int j = start; j < end; j++) {
        Text row = new Text(String.format(FMT, j));
        for (Column col : COLNAMES) {
          f.append(new Key(row, col.getColumnFamily(), col.getColumnQualifier()), value);
        }
        f.append(new Key(row, MARKER_CF, new Text(markerColumnQualifier)), ONE);
      }
      f.close();
    }
    state.getConnector().tableOperations().importDirectory(Setup.getTableName(), dir.toString(), fail.toString(), true);
    fs.delete(dir, true);
    FileStatus[] failures = fs.listStatus(fail);
    if (failures != null && failures.length > 0) {
View Full Code Here

          keys.add(k);
        }
        Path dir = new Path("/tmp", "bulk_" + UUID.randomUUID().toString());
        Path fail = new Path(dir.toString() + "_fail");
        FileSystem fs = WalkingSecurity.get(state).getFs();
        FileSKVWriter f = FileOperations.getInstance().openWriter(dir + "/securityBulk." + RFile.EXTENSION, fs, fs.getConf(),
            AccumuloConfiguration.getDefaultConfiguration());
        f.startDefaultLocalityGroup();
        fs.mkdirs(fail);
        for (Key k : keys)
          f.append(k, new Value("Value".getBytes(Constants.UTF8)));
        f.close();
        try {
          conn.tableOperations().importDirectory(tableName, dir.toString(), fail.toString(), true);
        } catch (TableNotFoundException tnfe) {
          if (tableExists)
            throw new AccumuloException("Table didn't exist when it should have: " + tableName);
View Full Code Here

    String even = new File(importDir, "even.rf").toString();
    String odd = new File(importDir, "odd.rf").toString();
    File errorsDir = folder.newFolder("errors");
    fs.mkdirs(new Path(errorsDir.toString()));
    AccumuloConfiguration aconf = AccumuloConfiguration.getDefaultConfiguration();
    FileSKVWriter evenWriter = FileOperations.getInstance().openWriter(even, fs, conf, aconf);
    evenWriter.startDefaultLocalityGroup();
    FileSKVWriter oddWriter = FileOperations.getInstance().openWriter(odd, fs, conf, aconf);
    oddWriter.startDefaultLocalityGroup();
    long ts = System.currentTimeMillis();
    Text cf = new Text("cf");
    Text cq = new Text("cq");
    Value value = new Value("value".getBytes());
    for (int i = 0; i < 100; i += 2) {
      Key key = new Key(new Text(String.format("%8d", i)), cf, cq, ts);
      evenWriter.append(key, value);
      key = new Key(new Text(String.format("%8d", i + 1)), cf, cq, ts);
      oddWriter.append(key, value);
    }
    evenWriter.close();
    oddWriter.close();
    exec("createtable " + table, true);
    exec("importdirectory " + importDir + " " + errorsDir + " true", true);
    exec("scan -r 00000000", true, "00000000", true);
    exec("scan -r 00000099", true, "00000099", true);
    exec("deletetable -f " + table);
View Full Code Here

    fs.deleteOnExit(failures);
    fs.delete(failures, true);
    fs.delete(tempFile, true);
    fs.mkdirs(failures);
    fs.mkdirs(tempFile.getParent());
    FileSKVWriter writer = FileOperations.getInstance().openWriter(tempFile.toString(), fs, defaultConf, AccumuloConfiguration.getDefaultConfiguration());
    writer.startDefaultLocalityGroup();
    List<Pair<Key,Value>> keyVals = new ArrayList<Pair<Key,Value>>();
    for (int i = 0; i < 5; i++) {
      keyVals.add(new Pair<Key,Value>(new Key("a" + i, "b" + i, "c" + i, new ColumnVisibility(""), 1000l + i), new Value(Integer.toString(i).getBytes())));
    }
    for (Pair<Key,Value> keyVal : keyVals) {
      writer.append(keyVal.getFirst(), keyVal.getSecond());
    }
    writer.close();
    ImportTestFilesAndData files = new ImportTestFilesAndData();
    files.failurePath = failures;
    files.importPath = tempFile.getParent();
    files.keyVals = keyVals;
    return files;
View Full Code Here

  }
 
  @Override
  public CompactionStats call() throws IOException, CompactionCanceledException {
   
    FileSKVWriter mfw = null;
   
    CompactionStats majCStats = new CompactionStats();

    boolean remove = runningCompactions.add(this);
   
    clearStats();

    String oldThreadName = Thread.currentThread().getName();
    String newThreadName = "MajC compacting " + extent.toString() + " started " + dateFormatter.format(new Date()) + " file: " + outputFile;
    Thread.currentThread().setName(newThreadName);
    try {
      FileOperations fileFactory = FileOperations.getInstance();
      mfw = fileFactory.openWriter(outputFile, fs, conf, acuTableConf);
     
      Map<String,Set<ByteSequence>> lGroups;
      try {
        lGroups = LocalityGroupUtil.getLocalityGroups(acuTableConf);
      } catch (LocalityGroupConfigurationError e) {
        throw new IOException(e);
      }
     
      long t1 = System.currentTimeMillis();
     
      HashSet<ByteSequence> allColumnFamilies = new HashSet<ByteSequence>();
     
      if (mfw.supportsLocalityGroups()) {
        for (Entry<String,Set<ByteSequence>> entry : lGroups.entrySet()) {
          setLocalityGroup(entry.getKey());
          compactLocalityGroup(entry.getKey(), entry.getValue(), true, mfw, majCStats);
          allColumnFamilies.addAll(entry.getValue());
        }
      }
     
      setLocalityGroup("");
      compactLocalityGroup(null, allColumnFamilies, false, mfw, majCStats);
     
      long t2 = System.currentTimeMillis();
     
      FileSKVWriter mfwTmp = mfw;
      mfw = null; // set this to null so we do not try to close it again in finally if the close fails
      mfwTmp.close(); // if the close fails it will cause the compaction to fail
     
      // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
      try {
        FileSKVIterator openReader = fileFactory.openReader(outputFile, false, fs, conf, acuTableConf);
        openReader.close();
View Full Code Here

   
    String dir = "/tmp/bulk_test_diff_files_89723987592";
   
    fs.delete(new Path(dir), true);
   
    FileSKVWriter writer1 = FileOperations.getInstance().openWriter(dir + "/f1." + RFile.EXTENSION, fs, conf, aconf);
    writer1.startDefaultLocalityGroup();
    writeData(writer1, 0, 333);
    writer1.close();
   
    FileSKVWriter writer2 = FileOperations.getInstance().openWriter(dir + "/f2." + RFile.EXTENSION, fs, conf, aconf);
    writer2.startDefaultLocalityGroup();
    writeData(writer2, 334, 999);
    writer2.close();
   
    FileSKVWriter writer3 = FileOperations.getInstance().openWriter(dir + "/f3." + RFile.EXTENSION, fs, conf, aconf);
    writer3.startDefaultLocalityGroup();
    writeData(writer3, 1000, 1999);
    writer3.close();
   
    bulkImport(fs, "bulkFile", dir);
   
    checkRFiles("bulkFile", 6, 6, 1, 1);
   
View Full Code Here

TOP

Related Classes of org.apache.accumulo.core.file.FileSKVWriter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.