Package org.apache.accumulo.core.file

Examples of org.apache.accumulo.core.file.FileSKVWriter


      // populate the root tablet with info about the default tablet
      // the root tablet contains the key extent and locations of all the
      // metadata tablets
      String initRootTabFile = ServerConstants.getMetadataTableDir() + "/root_tablet/00000_00000."
          + FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration());
      FileSKVWriter mfw = FileOperations.getInstance().openWriter(initRootTabFile, fs, conf, AccumuloConfiguration.getDefaultConfiguration());
      mfw.startDefaultLocalityGroup();

      // -----------] root tablet info
      Text rootExtent = Constants.ROOT_TABLET_EXTENT.getMetadataEntry();

      // root's directory
      Key rootDirKey = new Key(rootExtent, Constants.METADATA_DIRECTORY_COLUMN.getColumnFamily(), Constants.METADATA_DIRECTORY_COLUMN.getColumnQualifier(), 0);
      mfw.append(rootDirKey, new Value("/root_tablet".getBytes(Constants.UTF8)));

      // root's prev row
      Key rootPrevRowKey = new Key(rootExtent, Constants.METADATA_PREV_ROW_COLUMN.getColumnFamily(), Constants.METADATA_PREV_ROW_COLUMN.getColumnQualifier(), 0);
      mfw.append(rootPrevRowKey, new Value(new byte[] {0}));

      // ----------] table tablet info
      Text tableExtent = new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), Constants.METADATA_RESERVED_KEYSPACE_START_KEY.getRow()));

      // table tablet's directory
      Key tableDirKey = new Key(tableExtent, Constants.METADATA_DIRECTORY_COLUMN.getColumnFamily(), Constants.METADATA_DIRECTORY_COLUMN.getColumnQualifier(), 0);
      mfw.append(tableDirKey, new Value(Constants.TABLE_TABLET_LOCATION.getBytes(Constants.UTF8)));

      // table tablet time
      Key tableTimeKey = new Key(tableExtent, Constants.METADATA_TIME_COLUMN.getColumnFamily(), Constants.METADATA_TIME_COLUMN.getColumnQualifier(), 0);
      mfw.append(tableTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(Constants.UTF8)));

      // table tablet's prevrow
      Key tablePrevRowKey = new Key(tableExtent, Constants.METADATA_PREV_ROW_COLUMN.getColumnFamily(), Constants.METADATA_PREV_ROW_COLUMN.getColumnQualifier(),
          0);
      mfw.append(tablePrevRowKey, KeyExtent.encodePrevEndRow(new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), null))));

      // ----------] default tablet info
      Text defaultExtent = new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), null));

      // default's directory
      Key defaultDirKey = new Key(defaultExtent, Constants.METADATA_DIRECTORY_COLUMN.getColumnFamily(),
          Constants.METADATA_DIRECTORY_COLUMN.getColumnQualifier(), 0);
      mfw.append(defaultDirKey, new Value(Constants.DEFAULT_TABLET_LOCATION.getBytes(Constants.UTF8)));

      // default's time
      Key defaultTimeKey = new Key(defaultExtent, Constants.METADATA_TIME_COLUMN.getColumnFamily(), Constants.METADATA_TIME_COLUMN.getColumnQualifier(), 0);
      mfw.append(defaultTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(Constants.UTF8)));

      // default's prevrow
      Key defaultPrevRowKey = new Key(defaultExtent, Constants.METADATA_PREV_ROW_COLUMN.getColumnFamily(),
          Constants.METADATA_PREV_ROW_COLUMN.getColumnQualifier(), 0);
      mfw.append(defaultPrevRowKey, KeyExtent.encodePrevEndRow(Constants.METADATA_RESERVED_KEYSPACE_START_KEY.getRow()));

      mfw.close();
    }

    // create table and default tablets directories
    try {
      fstat = fs.getFileStatus(defaultMetadataTablet);
View Full Code Here


    // table exists
    assertTrue(client.tableExists(creds, "bar"));
    assertFalse(client.tableExists(creds, "test"));
    // bulk import
    String filename = dir + "/bulk/import/rfile.rf";
    FileSKVWriter writer = FileOperations.getInstance().openWriter(filename, fs, fs.getConf(), DefaultConfiguration.getInstance());
    writer.startDefaultLocalityGroup();
    writer.append(new org.apache.accumulo.core.data.Key(new Text("a"), new Text("b"), new Text("c")), new Value("value".getBytes()));
    writer.close();
    fs.mkdirs(new Path(dir + "/bulk/fail"));
    client.importDirectory(creds, "bar", dir + "/bulk/import", dir + "/bulk/fail", true);
    scanner = client.createScanner(creds, "bar", null);
    more = client.nextK(scanner, 100);
    client.closeScanner(scanner);
View Full Code Here

      // populate the root tablet with info about the default tablet
      // the root tablet contains the key extent and locations of all the
      // metadata tablets
      String initRootTabFile = ServerConstants.getMetadataTableDir() + "/root_tablet/00000_00000."
          + FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration());
      FileSKVWriter mfw = FileOperations.getInstance().openWriter(initRootTabFile, fs, conf, AccumuloConfiguration.getDefaultConfiguration());
      mfw.startDefaultLocalityGroup();
     
      // -----------] root tablet info
      Text rootExtent = Constants.ROOT_TABLET_EXTENT.getMetadataEntry();
     
      // root's directory
      Key rootDirKey = new Key(rootExtent, Constants.METADATA_DIRECTORY_COLUMN.getColumnFamily(), Constants.METADATA_DIRECTORY_COLUMN.getColumnQualifier(), 0);
      mfw.append(rootDirKey, new Value("/root_tablet".getBytes()));
     
      // root's prev row
      Key rootPrevRowKey = new Key(rootExtent, Constants.METADATA_PREV_ROW_COLUMN.getColumnFamily(), Constants.METADATA_PREV_ROW_COLUMN.getColumnQualifier(), 0);
      mfw.append(rootPrevRowKey, new Value(new byte[] {0}));
     
      // ----------] table tablet info
      Text tableExtent = new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), Constants.METADATA_RESERVED_KEYSPACE_START_KEY.getRow()));
     
      // table tablet's directory
      Key tableDirKey = new Key(tableExtent, Constants.METADATA_DIRECTORY_COLUMN.getColumnFamily(), Constants.METADATA_DIRECTORY_COLUMN.getColumnQualifier(), 0);
      mfw.append(tableDirKey, new Value(Constants.TABLE_TABLET_LOCATION.getBytes()));
     
      // table tablet time
      Key tableTimeKey = new Key(tableExtent, Constants.METADATA_TIME_COLUMN.getColumnFamily(), Constants.METADATA_TIME_COLUMN.getColumnQualifier(), 0);
      mfw.append(tableTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes()));
     
      // table tablet's prevrow
      Key tablePrevRowKey = new Key(tableExtent, Constants.METADATA_PREV_ROW_COLUMN.getColumnFamily(), Constants.METADATA_PREV_ROW_COLUMN.getColumnQualifier(),
          0);
      mfw.append(tablePrevRowKey, KeyExtent.encodePrevEndRow(new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), null))));
     
      // ----------] default tablet info
      Text defaultExtent = new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), null));
     
      // default's directory
      Key defaultDirKey = new Key(defaultExtent, Constants.METADATA_DIRECTORY_COLUMN.getColumnFamily(),
          Constants.METADATA_DIRECTORY_COLUMN.getColumnQualifier(), 0);
      mfw.append(defaultDirKey, new Value(Constants.DEFAULT_TABLET_LOCATION.getBytes()));
     
      // default's time
      Key defaultTimeKey = new Key(defaultExtent, Constants.METADATA_TIME_COLUMN.getColumnFamily(), Constants.METADATA_TIME_COLUMN.getColumnQualifier(), 0);
      mfw.append(defaultTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes()));
     
      // default's prevrow
      Key defaultPrevRowKey = new Key(defaultExtent, Constants.METADATA_PREV_ROW_COLUMN.getColumnFamily(),
          Constants.METADATA_PREV_ROW_COLUMN.getColumnQualifier(), 0);
      mfw.append(defaultPrevRowKey, KeyExtent.encodePrevEndRow(Constants.METADATA_RESERVED_KEYSPACE_START_KEY.getRow()));
     
      mfw.close();
    }
   
    // create table and default tablets directories
    try {
      fstat = fs.getFileStatus(defaultMetadataTablet);
View Full Code Here

   * @see org.apache.hadoop.io.MapFile.Writer
   */
  @Deprecated
  public FileSKVWriter openWriter(final String file, final FileSystem fs, Configuration conf, AccumuloConfiguration acuconf) throws IOException {
    final MyMapFile.Writer mfw = MapFileUtil.openMapFileWriter(acuconf, conf, fs, file);
    return new FileSKVWriter() {
     
      boolean secondCall = false;
     
      @Override
      public void append(Key key, Value value) throws IOException {
View Full Code Here

  }
 
  @Override
  public CompactionStats call() throws IOException, CompactionCanceledException {
   
    FileSKVWriter mfw = null;
   
    CompactionStats majCStats = new CompactionStats();
   
    try {
      FileOperations fileFactory = FileOperations.getInstance();
      AccumuloConfiguration tableConf = ServerConfiguration.getTableConfiguration(extent.getTableId().toString());
      mfw = fileFactory.openWriter(outputFile, fs, conf, tableConf);
     
      Map<String,Set<ByteSequence>> lGroups;
      try {
        lGroups = LocalityGroupUtil.getLocalityGroups(tableConf);
      } catch (LocalityGroupConfigurationError e) {
        throw new IOException(e);
      }
     
      long t1 = System.currentTimeMillis();
     
      HashSet<ByteSequence> allColumnFamilies = new HashSet<ByteSequence>();
     
      if (mfw.supportsLocalityGroups()) {
        for (Entry<String,Set<ByteSequence>> entry : lGroups.entrySet()) {
          compactLocalityGroup(entry.getKey(), entry.getValue(), true, mfw, majCStats);
          allColumnFamilies.addAll(entry.getValue());
        }
      }
     
      compactLocalityGroup(null, allColumnFamilies, false, mfw, majCStats);
     
      long t2 = System.currentTimeMillis();
     
      FileSKVWriter mfwTmp = mfw;
      mfw = null; // set this to null so we do not try to close it again in finally if the close fails
      mfwTmp.close(); // if the close fails it will cause the compaction to fail
     
      // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
      try {
        FileSKVIterator openReader = fileFactory.openReader(outputFile, false, fs, conf, tableConf);
        openReader.close();
View Full Code Here

    MockTabletLocator locator = new MockTabletLocator();
    FileSystem fs = FileSystem.getLocal(CachedConfiguration.getInstance());
    AccumuloConfiguration acuConf = AccumuloConfiguration.getDefaultConfiguration();
    String file = "target/testFile.rf";
    fs.delete(new Path(file), true);
    FileSKVWriter writer = FileOperations.getInstance().openWriter(file, fs, fs.getConf(), acuConf);
    writer.startDefaultLocalityGroup();
    Value empty = new Value(new byte[] {});
    writer.append(new Key("a", "cf", "cq"), empty);
    writer.append(new Key("a", "cf", "cq1"), empty);
    writer.append(new Key("a", "cf", "cq2"), empty);
    writer.append(new Key("a", "cf", "cq3"), empty);
    writer.append(new Key("a", "cf", "cq4"), empty);
    writer.append(new Key("a", "cf", "cq5"), empty);
    writer.append(new Key("d", "cf", "cq"), empty);
    writer.append(new Key("d", "cf", "cq1"), empty);
    writer.append(new Key("d", "cf", "cq2"), empty);
    writer.append(new Key("d", "cf", "cq3"), empty);
    writer.append(new Key("d", "cf", "cq4"), empty);
    writer.append(new Key("d", "cf", "cq5"), empty);
    writer.append(new Key("dd", "cf", "cq1"), empty);
    writer.append(new Key("ichabod", "cf", "cq"), empty);
    writer.append(new Key("icky", "cf", "cq1"), empty);
    writer.append(new Key("iffy", "cf", "cq2"), empty);
    writer.append(new Key("internal", "cf", "cq3"), empty);
    writer.append(new Key("is", "cf", "cq4"), empty);
    writer.append(new Key("iterator", "cf", "cq5"), empty);
    writer.append(new Key("xyzzy", "cf", "cq"), empty);
    writer.close();
    List<TabletLocation> overlaps = BulkImporter.findOverlappingTablets(acuConf, fs, locator, new Path(file));
    Assert.assertEquals(5, overlaps.size());
    Collections.sort(overlaps);
    Assert.assertEquals(new KeyExtent(tableId, new Text("a"), null), overlaps.get(0).tablet_extent);
    Assert.assertEquals(new KeyExtent(tableId, new Text("d"), new Text("cm")), overlaps.get(1).tablet_extent);
View Full Code Here

          keys.add(k);
        }
        Path dir = new Path("/tmp", "bulk_" + UUID.randomUUID().toString());
        Path fail = new Path(dir.toString() + "_fail");
        FileSystem fs = SecurityHelper.getFs(state);
        FileSKVWriter f = FileOperations.getInstance().openWriter(dir + "/securityBulk." + RFile.EXTENSION, fs, fs.getConf(),
            AccumuloConfiguration.getDefaultConfiguration());
        f.startDefaultLocalityGroup();
        fs.mkdirs(fail);
        for (Key k : keys)
          f.append(k, new Value("Value".getBytes()));
        f.close();
        try {
          conn.tableOperations().importDirectory(tableName, dir.toString(), fail.toString(), true);
        } catch (TableNotFoundException tnfe) {
          if (tableExists)
            throw new AccumuloException("Table didn't exist when it should have: " + tableName);
View Full Code Here

      extension = RFile.EXTENSION;
   
    handleBlockSize(job);
    final Path file = this.getDefaultWorkFile(job, "." + extension);
   
    final FileSKVWriter out = FileOperations.getInstance().openWriter(file.toString(), file.getFileSystem(conf), conf,
        AccumuloConfiguration.getDefaultConfiguration());
    out.startDefaultLocalityGroup();
   
    return new RecordWriter<Key,Value>() {
      private boolean hasData = false;
     
      @Override
      public void write(Key key, Value value) throws IOException {
        out.append(key, value);
        hasData = true;
      }
     
      @Override
      public void close(TaskAttemptContext context) throws IOException, InterruptedException {
        out.close();
        if (!hasData)
          file.getFileSystem(conf).delete(file, false);
      }
    };
  }
View Full Code Here

   
    String dir = "/tmp/bulk_test_diff_files_89723987592";
   
    fs.delete(new Path(dir), true);
   
    FileSKVWriter writer1 = FileOperations.getInstance().openWriter(dir + "/f1." + RFile.EXTENSION, fs, conf, aconf);
    writer1.startDefaultLocalityGroup();
    writeData(writer1, 0, 333);
    writer1.close();
   
    FileSKVWriter writer2 = FileOperations.getInstance().openWriter(dir + "/f2." + RFile.EXTENSION, fs, conf, aconf);
    writer2.startDefaultLocalityGroup();
    writeData(writer2, 334, 999);
    writer2.close();
   
    FileSKVWriter writer3 = FileOperations.getInstance().openWriter(dir + "/f3." + RFile.EXTENSION, fs, conf, aconf);
    writer3.startDefaultLocalityGroup();
    writeData(writer3, 1000, 1999);
    writer3.close();
   
    bulkImport(fs, "bulkFile", dir);
   
    checkRFiles("bulkFile", 6, 6, 1, 1);
   
View Full Code Here

      Random random = new Random();
     
      long bytesWritten = 0;
     
      BatchWriter bw = null;
      FileSKVWriter writer = null;
     
      if (opts.outputFile != null) {
        Configuration conf = CachedConfiguration.getInstance();
        FileSystem fs = FileSystem.get(conf);
        writer = FileOperations.getInstance().openWriter(opts.outputFile + "." + RFile.EXTENSION, fs, conf,
            AccumuloConfiguration.getDefaultConfiguration());
        writer.startDefaultLocalityGroup();
      } else {
        Connector connector = opts.getConnector();
        bw = connector.createBatchWriter(opts.getTableName(), bwOpts.getBatchWriterConfig());
        connector.securityOperations().changeUserAuthorizations(opts.principal, AUTHS);
      }
      Text labBA = new Text(opts.columnVisibility.getExpression());
     
      long startTime = System.currentTimeMillis();
      for (int i = 0; i < opts.rows; i++) {
        int rowid;
        if (opts.stride > 0) {
          rowid = ((i % opts.stride) * (opts.rows / opts.stride)) + (i / opts.stride);
        } else {
          rowid = i;
        }
       
        Text row = generateRow(rowid, opts.startRow);
        Mutation m = new Mutation(row);
        for (int j = 0; j < opts.cols; j++) {
          Text colf = new Text(opts.columnFamily);
          Text colq = new Text(FastFormat.toZeroPaddedString(j, 7, 10, COL_PREFIX));
         
          if (writer != null) {
            Key key = new Key(row, colf, colq, labBA);
            if (opts.timestamp >= 0) {
              key.setTimestamp(opts.timestamp);
            } else {
              key.setTimestamp(startTime);
            }
           
            if (opts.delete) {
              key.setDeleted(true);
            } else {
              key.setDeleted(false);
            }
           
            bytesWritten += key.getSize();
           
            if (opts.delete) {
              writer.append(key, new Value(new byte[0]));
            } else {
              byte value[];
              if (opts.random != null) {
                value = genRandomValue(random, randomValue, opts.random.intValue(), rowid + opts.startRow, j);
              } else {
                value = bytevals[j % bytevals.length];
              }
             
              Value v = new Value(value);
              writer.append(key, v);
              bytesWritten += v.getSize();
            }
           
          } else {
            Key key = new Key(row, colf, colq, labBA);
            bytesWritten += key.getSize();
           
            if (opts.delete) {
              if (opts.timestamp >= 0)
                m.putDelete(colf, colq, opts.columnVisibility, opts.timestamp);
              else
                m.putDelete(colf, colq, opts.columnVisibility);
            } else {
              byte value[];
              if (opts.random != null) {
                value = genRandomValue(random, randomValue, opts.random.intValue(), rowid + opts.startRow, j);
              } else {
                value = bytevals[j % bytevals.length];
              }
              bytesWritten += value.length;
             
              if (opts.timestamp >= 0) {
                m.put(colf, colq, opts.columnVisibility, opts.timestamp, new Value(value, true));
              } else {
                m.put(colf, colq, opts.columnVisibility, new Value(value, true));
               
              }
            }
          }
         
        }
        if (bw != null)
          bw.addMutation(m);
       
      }
     
      if (writer != null) {
        writer.close();
      } else if (bw != null) {
        try {
          bw.close();
        } catch (MutationsRejectedException e) {
          if (e.getAuthorizationFailuresMap().size() > 0) {
View Full Code Here

TOP

Related Classes of org.apache.accumulo.core.file.FileSKVWriter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.