Package org.apache.accumulo.core.file

Examples of org.apache.accumulo.core.file.FileSKVWriter


    opts.parseArgs(CreateEmpty.class.getName(), args);

    for (String arg : opts.files) {
      Path path = new Path(arg);
      log.info("Writing to file '" + path + "'");
      FileSKVWriter writer = (new RFileOperations()).openWriter(arg, path.getFileSystem(conf), conf, DefaultConfiguration.getDefaultConfiguration(), opts.codec);
      writer.close();
    }
  }
View Full Code Here


    long bytesWritten = 0;

    createTable(connector, opts);
   
    BatchWriter bw = null;
    FileSKVWriter writer = null;
   
    if (opts.outputFile != null) {
      Configuration conf = CachedConfiguration.getInstance();
      FileSystem fs = FileSystem.get(conf);
      writer = FileOperations.getInstance().openWriter(opts.outputFile + "." + RFile.EXTENSION, fs, conf,
          AccumuloConfiguration.getDefaultConfiguration());
      writer.startDefaultLocalityGroup();
    } else {
      bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
      connector.securityOperations().changeUserAuthorizations(opts.principal, AUTHS);
    }
    Text labBA = new Text(opts.columnVisibility.getExpression());
   
    long startTime = System.currentTimeMillis();
    for (int i = 0; i < opts.rows; i++) {
      int rowid;
      if (opts.stride > 0) {
        rowid = ((i % opts.stride) * (opts.rows / opts.stride)) + (i / opts.stride);
      } else {
        rowid = i;
      }
     
      Text row = generateRow(rowid, opts.startRow);
      Mutation m = new Mutation(row);
      for (int j = 0; j < opts.cols; j++) {
        Text colf = new Text(opts.columnFamily);
        Text colq = new Text(FastFormat.toZeroPaddedString(j, 7, 10, COL_PREFIX));
       
        if (writer != null) {
          Key key = new Key(row, colf, colq, labBA);
          if (opts.timestamp >= 0) {
            key.setTimestamp(opts.timestamp);
          } else {
            key.setTimestamp(startTime);
          }
         
          if (opts.delete) {
            key.setDeleted(true);
          } else {
            key.setDeleted(false);
          }
         
          bytesWritten += key.getSize();
         
          if (opts.delete) {
            writer.append(key, new Value(new byte[0]));
          } else {
            byte value[];
            if (opts.random != null) {
              value = genRandomValue(random, randomValue, opts.random.intValue(), rowid + opts.startRow, j);
            } else {
              value = bytevals[j % bytevals.length];
            }
           
            Value v = new Value(value);
            writer.append(key, v);
            bytesWritten += v.getSize();
          }
         
        } else {
          Key key = new Key(row, colf, colq, labBA);
          bytesWritten += key.getSize();
         
          if (opts.delete) {
            if (opts.timestamp >= 0)
              m.putDelete(colf, colq, opts.columnVisibility, opts.timestamp);
            else
              m.putDelete(colf, colq, opts.columnVisibility);
          } else {
            byte value[];
            if (opts.random != null) {
              value = genRandomValue(random, randomValue, opts.random.intValue(), rowid + opts.startRow, j);
            } else {
              value = bytevals[j % bytevals.length];
            }
            bytesWritten += value.length;
           
            if (opts.timestamp >= 0) {
              m.put(colf, colq, opts.columnVisibility, opts.timestamp, new Value(value, true));
            } else {
              m.put(colf, colq, opts.columnVisibility, new Value(value, true));
             
            }
          }
        }
       
      }
      if (bw != null)
        bw.addMutation(m);
     
    }
   
    if (writer != null) {
      writer.close();
    } else if (bw != null) {
      try {
        bw.close();
      } catch (MutationsRejectedException e) {
        if (e.getAuthorizationFailuresMap().size() > 0) {
View Full Code Here

    fs.deleteOnExit(failures);
    fs.delete(failures, true);
    fs.delete(tempFile, true);
    fs.mkdirs(failures);
    fs.mkdirs(tempFile.getParent());
    FileSKVWriter writer = FileOperations.getInstance().openWriter(tempFile.toString(), fs, defaultConf, AccumuloConfiguration.getDefaultConfiguration());
    writer.startDefaultLocalityGroup();
    List<Pair<Key,Value>> keyVals = new ArrayList<Pair<Key,Value>>();
    for (int i = 0; i < 5; i++) {
      keyVals.add(new Pair<Key,Value>(new Key("a" + i, "b" + i, "c" + i, new ColumnVisibility(""), 1000l + i), new Value(Integer.toString(i).getBytes())));
    }
    for (Pair<Key,Value> keyVal : keyVals) {
      writer.append(keyVal.getFirst(), keyVal.getSecond());
    }
    writer.close();
    ImportTestFilesAndData files = new ImportTestFilesAndData();
    files.failurePath = failures;
    files.importPath = tempFile.getParent();
    files.keyVals = keyVals;
    return files;
View Full Code Here

    MockTabletLocator locator = new MockTabletLocator();
    FileSystem fs = FileSystem.getLocal(CachedConfiguration.getInstance());
    AccumuloConfiguration acuConf = AccumuloConfiguration.getDefaultConfiguration();
    String file = "target/testFile.rf";
    fs.delete(new Path(file), true);
    FileSKVWriter writer = FileOperations.getInstance().openWriter(file, fs, fs.getConf(), acuConf);
    writer.startDefaultLocalityGroup();
    Value empty = new Value(new byte[] {});
    writer.append(new Key("a", "cf", "cq"), empty);
    writer.append(new Key("a", "cf", "cq1"), empty);
    writer.append(new Key("a", "cf", "cq2"), empty);
    writer.append(new Key("a", "cf", "cq3"), empty);
    writer.append(new Key("a", "cf", "cq4"), empty);
    writer.append(new Key("a", "cf", "cq5"), empty);
    writer.append(new Key("d", "cf", "cq"), empty);
    writer.append(new Key("d", "cf", "cq1"), empty);
    writer.append(new Key("d", "cf", "cq2"), empty);
    writer.append(new Key("d", "cf", "cq3"), empty);
    writer.append(new Key("d", "cf", "cq4"), empty);
    writer.append(new Key("d", "cf", "cq5"), empty);
    writer.append(new Key("dd", "cf", "cq1"), empty);
    writer.append(new Key("ichabod", "cf", "cq"), empty);
    writer.append(new Key("icky", "cf", "cq1"), empty);
    writer.append(new Key("iffy", "cf", "cq2"), empty);
    writer.append(new Key("internal", "cf", "cq3"), empty);
    writer.append(new Key("is", "cf", "cq4"), empty);
    writer.append(new Key("iterator", "cf", "cq5"), empty);
    writer.append(new Key("xyzzy", "cf", "cq"), empty);
    writer.close();
    VolumeManager vm = VolumeManagerImpl.get(acuConf);
    List<TabletLocation> overlaps = BulkImporter.findOverlappingTablets(acuConf, vm, locator, new Path(file), credentials);
    Assert.assertEquals(5, overlaps.size());
    Collections.sort(overlaps);
    Assert.assertEquals(new KeyExtent(tableId, new Text("a"), null), overlaps.get(0).tablet_extent);
View Full Code Here

    // populate the root tablet with info about the default tablet
    // the root tablet contains the key extent and locations of all the
    // metadata tablets
    String initRootTabFile = rootTablet + "/00000_00000." + FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration());
    FileSystem ns = fs.getVolumeByPath(new Path(initRootTabFile)).getFileSystem();
    FileSKVWriter mfw = FileOperations.getInstance().openWriter(initRootTabFile, ns, ns.getConf(), AccumuloConfiguration.getDefaultConfiguration());
    mfw.startDefaultLocalityGroup();

    Text tableExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), MetadataSchema.TabletsSection.getRange().getEndKey().getRow()));

    // table tablet's directory
    Key tableDirKey = new Key(tableExtent, TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(),
        TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0);
    mfw.append(tableDirKey, new Value(tableMetadataTabletDir.getBytes(Constants.UTF8)));

    // table tablet time
    Key tableTimeKey = new Key(tableExtent, TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnFamily(),
        TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 0);
    mfw.append(tableTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(Constants.UTF8)));

    // table tablet's prevrow
    Key tablePrevRowKey = new Key(tableExtent, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0);
    mfw.append(tablePrevRowKey, KeyExtent.encodePrevEndRow(null));

    // ----------] default tablet info
    Text defaultExtent = new Text(KeyExtent.getMetadataEntry(new Text(MetadataTable.ID), null));

    // default's directory
    Key defaultDirKey = new Key(defaultExtent, TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnFamily(),
        TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN.getColumnQualifier(), 0);
    mfw.append(defaultDirKey, new Value(defaultMetadataTabletDir.getBytes(Constants.UTF8)));

    // default's time
    Key defaultTimeKey = new Key(defaultExtent, TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnFamily(),
        TabletsSection.ServerColumnFamily.TIME_COLUMN.getColumnQualifier(), 0);
    mfw.append(defaultTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(Constants.UTF8)));

    // default's prevrow
    Key defaultPrevRowKey = new Key(defaultExtent, TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnFamily(),
        TabletsSection.TabletColumnFamily.PREV_ROW_COLUMN.getColumnQualifier(), 0);
    mfw.append(defaultPrevRowKey, KeyExtent.encodePrevEndRow(MetadataSchema.TabletsSection.getRange().getEndKey().getRow()));

    mfw.close();

    // create table and default tablets directories
    for (String s : Arrays.asList(tableMetadataTabletDir, defaultMetadataTabletDir)) {
      Path dir = new Path(s);
      try {
View Full Code Here

     
      String newMapFile = String.format("%s/%04d.%s", newDir, count++, RFile.EXTENSION);
     
      outFiles.add(newMapFile);
      FileSystem ns = fs.getVolumeByPath(new Path(newMapFile)).getFileSystem();
      FileSKVWriter writer = new RFileOperations().openWriter(newMapFile.toString(), ns, ns.getConf(), acuConf);
      writer.startDefaultLocalityGroup();
      List<SortedKeyValueIterator<Key,Value>> iters = new ArrayList<SortedKeyValueIterator<Key,Value>>(inFiles.size());
     
      FileSKVIterator reader = null;
      try {
        for (String s : inFiles) {
          ns = fs.getVolumeByPath(new Path(s)).getFileSystem();
          reader = FileOperations.getInstance().openIndex(s, ns, ns.getConf(), acuConf);
          iters.add(reader);
        }
       
        MultiIterator mmfi = new MultiIterator(iters, true);
       
        while (mmfi.hasTop()) {
          Key key = mmfi.getTopKey();
         
          boolean gtPrevEndRow = prevEndRow == null || key.compareRow(prevEndRow) > 0;
          boolean lteEndRow = endRow == null || key.compareRow(endRow) <= 0;
         
          if (gtPrevEndRow && lteEndRow)
            writer.append(key, new Value(new byte[0]));
         
          if (!lteEndRow)
            break;
         
          mmfi.next();
        }
      } finally {
        try {
          if (reader != null)
            reader.close();
        } catch (IOException e) {
          log.error(e, e);
        }
       
        for (SortedKeyValueIterator<Key,Value> r : iters)
          try {
            if (r != null)
              ((FileSKVIterator) r).close();
          } catch (IOException e) {
            // continue closing
            log.error(e, e);
          }
       
        try {
            writer.close();
        } catch (IOException e) {
          log.error(e, e);
          throw e;
        }
      }
View Full Code Here

        String tmpFile = memDumpDir + "/memDump" + UUID.randomUUID() + "." + RFile.EXTENSION;
       
        Configuration newConf = new Configuration(conf);
        newConf.setInt("io.seqfile.compress.blocksize", 100000);
       
        FileSKVWriter out = new RFileOperations().openWriter(tmpFile, fs, newConf, ServerConfiguration.getSiteConfiguration());
       
        InterruptibleIterator iter = map.skvIterator();
      
        HashSet<ByteSequence> allfams= new HashSet<ByteSequence>();
       
        for(Entry<String, Set<ByteSequence>> entry : lggroups.entrySet()){
          allfams.addAll(entry.getValue());
          out.startNewLocalityGroup(entry.getKey(), entry.getValue());
          iter.seek(new Range(), entry.getValue(), true);
          dumpLocalityGroup(out, iter);
        }
       
        out.startDefaultLocalityGroup();
        iter.seek(new Range(), allfams, false);
      
        dumpLocalityGroup(out, iter);
       
        out.close();
       
        log.debug("Created mem dump file " + tmpFile);
       
        memDumpFile = tmpFile;
       
View Full Code Here

  }

  @Override
  public CompactionStats call() throws IOException, CompactionCanceledException {

    FileSKVWriter mfw = null;

    CompactionStats majCStats = new CompactionStats();

    boolean remove = runningCompactions.add(this);

    clearStats();

    String oldThreadName = Thread.currentThread().getName();
    String newThreadName = "MajC compacting " + extent.toString() + " started " + dateFormatter.format(new Date()) + " file: " + outputFile;
    Thread.currentThread().setName(newThreadName);
    thread = Thread.currentThread();
    try {
      FileOperations fileFactory = FileOperations.getInstance();
      FileSystem ns = this.fs.getVolumeByPath(outputFile.path()).getFileSystem();
      mfw = fileFactory.openWriter(outputFile.path().toString(), ns, ns.getConf(), acuTableConf);

      Map<String,Set<ByteSequence>> lGroups;
      try {
        lGroups = LocalityGroupUtil.getLocalityGroups(acuTableConf);
      } catch (LocalityGroupConfigurationError e) {
        throw new IOException(e);
      }

      long t1 = System.currentTimeMillis();

      HashSet<ByteSequence> allColumnFamilies = new HashSet<ByteSequence>();

      if (mfw.supportsLocalityGroups()) {
        for (Entry<String,Set<ByteSequence>> entry : lGroups.entrySet()) {
          setLocalityGroup(entry.getKey());
          compactLocalityGroup(entry.getKey(), entry.getValue(), true, mfw, majCStats);
          allColumnFamilies.addAll(entry.getValue());
        }
      }

      setLocalityGroup("");
      compactLocalityGroup(null, allColumnFamilies, false, mfw, majCStats);

      long t2 = System.currentTimeMillis();

      FileSKVWriter mfwTmp = mfw;
      mfw = null; // set this to null so we do not try to close it again in finally if the close fails
      mfwTmp.close(); // if the close fails it will cause the compaction to fail

      // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
      try {
        FileSKVIterator openReader = fileFactory.openReader(outputFile.path().toString(), false, ns, ns.getConf(), acuTableConf);
        openReader.close();
View Full Code Here

    String odd = new File(importDir, "odd.rf").toString();
    File errorsDir = new File(getFolder(), "errors");
    errorsDir.mkdir();
    fs.mkdirs(new Path(errorsDir.toString()));
    AccumuloConfiguration aconf = AccumuloConfiguration.getDefaultConfiguration();
    FileSKVWriter evenWriter = FileOperations.getInstance().openWriter(even, fs, conf, aconf);
    evenWriter.startDefaultLocalityGroup();
    FileSKVWriter oddWriter = FileOperations.getInstance().openWriter(odd, fs, conf, aconf);
    oddWriter.startDefaultLocalityGroup();
    long timestamp = System.currentTimeMillis();
    Text cf = new Text("cf");
    Text cq = new Text("cq");
    Value value = new Value("value".getBytes());
    for (int i = 0; i < 100; i += 2) {
      Key key = new Key(new Text(String.format("%8d", i)), cf, cq, timestamp);
      evenWriter.append(key, value);
      key = new Key(new Text(String.format("%8d", i + 1)), cf, cq, timestamp);
      oddWriter.append(key, value);
    }
    evenWriter.close();
    oddWriter.close();
    assertEquals(0, ts.shell.getExitCode());
    ts.exec("createtable " + table, true);
    ts.exec("importdirectory " + importDir + " " + errorsDir + " true", true);
    ts.exec("scan -r 00000000", true, "00000000", true);
    ts.exec("scan -r 00000099", true, "00000099", true);
View Full Code Here

    // table exists
    assertTrue(client.tableExists(creds, "bar"));
    assertFalse(client.tableExists(creds, "test"));
    // bulk import
    String filename = dir + "/bulk/import/rfile.rf";
    FileSKVWriter writer = FileOperations.getInstance().openWriter(filename, fs, fs.getConf(), DefaultConfiguration.getInstance());
    writer.startDefaultLocalityGroup();
    writer.append(new org.apache.accumulo.core.data.Key(new Text("a"), new Text("b"), new Text("c")), new Value("value".getBytes()));
    writer.close();
    fs.mkdirs(new Path(dir + "/bulk/fail"));
    client.importDirectory(creds, "bar", dir + "/bulk/import", dir + "/bulk/fail", true);
    String scanner = client.createScanner(creds, "bar", null);
    ScanResult more = client.nextK(scanner, 100);
    client.closeScanner(scanner);
View Full Code Here

TOP

Related Classes of org.apache.accumulo.core.file.FileSKVWriter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.