Package org.apache.accumulo.core.file

Examples of org.apache.accumulo.core.file.FileSKVWriter


    fs.deleteOnExit(failures);
    fs.delete(failures, true);
    fs.delete(tempFile, true);
    fs.mkdirs(failures);
    fs.mkdirs(tempFile.getParent());
    FileSKVWriter writer = FileOperations.getInstance().openWriter(tempFile.toString(), fs, defaultConf, AccumuloConfiguration.getDefaultConfiguration());
    writer.startDefaultLocalityGroup();
    List<Pair<Key,Value>> keyVals = new ArrayList<Pair<Key,Value>>();
    for (int i = 0; i < 5; i++) {
      keyVals.add(new Pair<Key,Value>(new Key("a" + i, "b" + i, "c" + i, new ColumnVisibility(""), 1000l + i), new Value(Integer.toString(i).getBytes())));
    }
    for (Pair<Key,Value> keyVal : keyVals) {
      writer.append(keyVal.getFirst(), keyVal.getSecond());
    }
    writer.close();
    ImportTestFilesAndData files = new ImportTestFilesAndData();
    files.failurePath = failures;
    files.importPath = tempFile.getParent();
    files.keyVals = keyVals;
    return files;
View Full Code Here


  }
 
  private MajorCompactionStats compactFiles(Configuration conf, FileSystem fs, Set<String> filesToCompact, boolean propogateDeletes, KeyExtent extent,
      String compactTmpName) throws IOException, MajorCompactionCanceledException {
   
    FileSKVWriter mfw = null;
   
    MajorCompactionStats majCStats = new MajorCompactionStats();
   
    try {
      FileOperations fileFactory = FileOperations.getInstance();
      AccumuloConfiguration tableConf = AccumuloConfiguration.getTableConfiguration(HdfsZooInstance.getInstance().getInstanceID(), extent.getTableId()
          .toString());
      mfw = fileFactory.openWriter(compactTmpName, fs, conf, tableConf);
     
      Map<String,Set<ByteSequence>> lGroups;
      try {
        lGroups = LocalityGroupUtil.getLocalityGroups(tableConf);
      } catch (LocalityGroupConfigurationError e) {
        throw new IOException(e);
      }
     
      long t1 = System.currentTimeMillis();
     
      HashSet<ByteSequence> allColumnFamilies = new HashSet<ByteSequence>();
     
      if (mfw.supportsLocalityGroups()) {
        for (Entry<String,Set<ByteSequence>> entry : lGroups.entrySet()) {
          compactLocalityGroup(entry.getKey(), entry.getValue(), true, conf, fs, filesToCompact, propogateDeletes, extent, compactTmpName, mfw, majCStats);
          allColumnFamilies.addAll(entry.getValue());
        }
      }
     
      compactLocalityGroup(null, allColumnFamilies, false, conf, fs, filesToCompact, propogateDeletes, extent, compactTmpName, mfw, majCStats);
     
      long t2 = System.currentTimeMillis();
     
      FileSKVWriter mfwTmp = mfw;
      mfw = null; // set this to null so we do not try to close it again in finally if the close fails
      mfwTmp.close(); // if the close fails it will cause the compaction to fail
     
      // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
      try {
        FileSKVIterator openReader = fileFactory.openReader(compactTmpName, false, fs, conf, tableConf);
        openReader.close();
View Full Code Here

    // table exists
    assertTrue(client.tableExists(creds, "bar"));
    assertFalse(client.tableExists(creds, "test"));
    // bulk import
    String filename = dir + "/bulk/import/rfile.rf";
    FileSKVWriter writer = FileOperations.getInstance().openWriter(filename, fs, fs.getConf(), DefaultConfiguration.getInstance());
    writer.startDefaultLocalityGroup();
    writer.append(new org.apache.accumulo.core.data.Key(new Text("a"), new Text("b"), new Text("c")), new Value("value".getBytes()));
    writer.close();
    fs.mkdirs(new Path(dir + "/bulk/fail"));
    client.importDirectory(creds, "bar", dir + "/bulk/import", dir + "/bulk/fail", true);
    scanner = client.createScanner(creds, "bar", null);
    more = client.nextK(scanner, 100);
    client.closeScanner(scanner);
View Full Code Here

    List<Integer> rows = new ArrayList<Integer>(startRows);
    rows.add(LOTS);
   
    for (int i = 0; i < parts; i++) {
      String fileName = dir + "/" + String.format("part_%d.", i) + RFile.EXTENSION;
      FileSKVWriter f = FileOperations.getInstance().openWriter(fileName, fs, fs.getConf(), defaultConfiguration);
      f.startDefaultLocalityGroup();
      int start = rows.get(i);
      int end = rows.get(i + 1);
      for (int j = start; j < end; j++) {
        Text row = new Text(String.format(FMT, j));
        for (Column col : COLNAMES) {
          f.append(new Key(row, col.getColumnFamily(), col.getColumnQualifier()), value);
        }
        f.append(new Key(row, MARKER_CF, new Text(markerColumnQualifier)), ONE);
      }
      f.close();
    }
    state.getConnector().tableOperations().importDirectory(Setup.getTableName(), dir.toString(), fail.toString(), true);
    fs.delete(dir, true);
    FileStatus[] failures = fs.listStatus(fail);
    if (failures != null && failures.length > 0) {
View Full Code Here

  }
 
  @Override
  public CompactionStats call() throws IOException, CompactionCanceledException {
   
    FileSKVWriter mfw = null;
   
    CompactionStats majCStats = new CompactionStats();
   
    try {
      FileOperations fileFactory = FileOperations.getInstance();
      AccumuloConfiguration tableConf = ServerConfiguration.getTableConfiguration(extent.getTableId().toString());
      mfw = fileFactory.openWriter(outputFile, fs, conf, tableConf);
     
      Map<String,Set<ByteSequence>> lGroups;
      try {
        lGroups = LocalityGroupUtil.getLocalityGroups(tableConf);
      } catch (LocalityGroupConfigurationError e) {
        throw new IOException(e);
      }
     
      long t1 = System.currentTimeMillis();
     
      HashSet<ByteSequence> allColumnFamilies = new HashSet<ByteSequence>();
     
      if (mfw.supportsLocalityGroups()) {
        for (Entry<String,Set<ByteSequence>> entry : lGroups.entrySet()) {
          compactLocalityGroup(entry.getKey(), entry.getValue(), true, mfw, majCStats);
          allColumnFamilies.addAll(entry.getValue());
        }
      }
     
      compactLocalityGroup(null, allColumnFamilies, false, mfw, majCStats);
     
      long t2 = System.currentTimeMillis();
     
      FileSKVWriter mfwTmp = mfw;
      mfw = null; // set this to null so we do not try to close it again in finally if the close fails
      mfwTmp.close(); // if the close fails it will cause the compaction to fail
     
      // Verify the file, since hadoop 0.20.2 sometimes lies about the success of close()
      try {
        FileSKVIterator openReader = fileFactory.openReader(outputFile, false, fs, conf, tableConf);
        openReader.close();
View Full Code Here

      Random random = new Random();
     
      long bytesWritten = 0;
     
      BatchWriter bw = null;
      FileSKVWriter writer = null;
     
      rootCredentials = new AuthInfo(username, ByteBuffer.wrap(passwd.getBytes()), instance.getInstanceID());
      if (ingestArgs.outputToMapFile) {
        Configuration conf = CachedConfiguration.getInstance();
        FileSystem fs = FileSystem.get(conf);
        writer = FileOperations.getInstance().openWriter(ingestArgs.outputFile + "." + Constants.MAPFILE_EXTENSION, fs, conf,
            AccumuloConfiguration.getDefaultConfiguration());
        writer.startDefaultLocalityGroup();
      } else if (ingestArgs.outputToRFile) {
        Configuration conf = CachedConfiguration.getInstance();
        FileSystem fs = FileSystem.get(conf);
        writer = FileOperations.getInstance().openWriter(ingestArgs.outputFile + "." + RFile.EXTENSION, fs, conf,
            AccumuloConfiguration.getDefaultConfiguration());
        writer.startDefaultLocalityGroup();
      } else {
        Connector connector = instance.getConnector(rootCredentials.user, rootCredentials.password);
        bw = connector.createBatchWriter("test_ingest", 20000000l, 60000l, 10);
      }
     
      Authenticator authenticator = ZKAuthenticator.getInstance();
      authenticator.changeAuthorizations(rootCredentials, rootCredentials.user, AUTHS);
      ColumnVisibility le = new ColumnVisibility("L1&L2&G1&GROUP2");
      Text labBA = new Text(le.getExpression());
     
      // int step = 100;
     
      long startTime = System.currentTimeMillis();
      for (int i = 0; i < ingestArgs.rows; i++) {
       
        int rowid;
       
        if (ingestArgs.stride > 0) {
          rowid = ((i % ingestArgs.stride) * (ingestArgs.rows / ingestArgs.stride)) + (i / ingestArgs.stride);
        } else {
          rowid = i;
        }
       
        Text row = generateRow(rowid, ingestArgs.startRow);
        Mutation m = new Mutation(row);
        for (int j = 0; j < ingestArgs.cols; j++) {
          Text colf = new Text(ingestArgs.columnFamily);
          Text colq = new Text(FastFormat.toZeroPaddedString(j, 5, 10, COL_PREFIX));
         
          if (writer != null) {
            Key key = new Key(row, colf, colq, labBA);
            if (ingestArgs.hasTimestamp) {
              key.setTimestamp(ingestArgs.timestamp);
            } else {
              key.setTimestamp(System.currentTimeMillis());
            }
           
            if (ingestArgs.delete) {
              key.setDeleted(true);
            } else {
              key.setDeleted(false);
            }
           
            bytesWritten += key.getSize();
           
            if (ingestArgs.delete) {
              writer.append(key, new Value(new byte[0]));
            } else {
              byte value[];
              if (ingestArgs.random) {
                value = genRandomValue(random, randomValue, ingestArgs.seed, rowid + ingestArgs.startRow, j);
              } else {
                value = bytevals[j % bytevals.length];
              }
             
              Value v = new Value(value);
              writer.append(key, v);
              bytesWritten += v.getSize();
            }
           
          } else {
            Key key = new Key(row, colf, colq, labBA);
            bytesWritten += key.getSize();
           
            if (ingestArgs.delete) {
              if (ingestArgs.hasTimestamp)
                m.putDelete(colf, colq, le, ingestArgs.timestamp);
              else
                m.putDelete(colf, colq, le);
            } else {
              byte value[];
              if (ingestArgs.random) {
                value = genRandomValue(random, randomValue, ingestArgs.seed, rowid + ingestArgs.startRow, j);
              } else {
                value = bytevals[j % bytevals.length];
              }
              bytesWritten += value.length;
             
              if (ingestArgs.hasTimestamp) {
                m.put(colf, colq, le, ingestArgs.timestamp, new Value(value, true));
              } else {
                m.put(colf, colq, le, new Value(value, true));
               
              }
            }
          }
         
        }
        if (bw != null)
          bw.addMutation(m);
       
      }
     
      if (writer != null) {
        writer.close();
      } else if (bw != null) {
        try {
          bw.close();
        } catch (MutationsRejectedException e) {
          if (e.getAuthorizationFailures().size() > 0) {
View Full Code Here

      // populate the root tablet with info about the default tablet
      // the root tablet contains the key extent and locations of all the
      // metadata tablets
      String initRootTabFile = ServerConstants.getMetadataTableDir() + "/root_tablet/00000_00000."
          + FileOperations.getNewFileExtension(AccumuloConfiguration.getDefaultConfiguration());
      FileSKVWriter mfw = FileOperations.getInstance().openWriter(initRootTabFile, fs, conf, AccumuloConfiguration.getDefaultConfiguration());
      mfw.startDefaultLocalityGroup();
     
      // -----------] root tablet info
      Text rootExtent = Constants.ROOT_TABLET_EXTENT.getMetadataEntry();
     
      // root's directory
      Key rootDirKey = new Key(rootExtent, Constants.METADATA_DIRECTORY_COLUMN.getColumnFamily(), Constants.METADATA_DIRECTORY_COLUMN.getColumnQualifier(), 0);
      mfw.append(rootDirKey, new Value("/root_tablet".getBytes()));
     
      // root's prev row
      Key rootPrevRowKey = new Key(rootExtent, Constants.METADATA_PREV_ROW_COLUMN.getColumnFamily(), Constants.METADATA_PREV_ROW_COLUMN.getColumnQualifier(), 0);
      mfw.append(rootPrevRowKey, new Value(new byte[] {0}));
     
      // ----------] table tablet info
      Text tableExtent = new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), Constants.METADATA_RESERVED_KEYSPACE_START_KEY.getRow()));
     
      // table tablet's directory
      Key tableDirKey = new Key(tableExtent, Constants.METADATA_DIRECTORY_COLUMN.getColumnFamily(), Constants.METADATA_DIRECTORY_COLUMN.getColumnQualifier(), 0);
      mfw.append(tableDirKey, new Value(Constants.TABLE_TABLET_LOCATION.getBytes()));
     
      // table tablet time
      Key tableTimeKey = new Key(tableExtent, Constants.METADATA_TIME_COLUMN.getColumnFamily(), Constants.METADATA_TIME_COLUMN.getColumnQualifier(), 0);
      mfw.append(tableTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes()));
     
      // table tablet's prevrow
      Key tablePrevRowKey = new Key(tableExtent, Constants.METADATA_PREV_ROW_COLUMN.getColumnFamily(), Constants.METADATA_PREV_ROW_COLUMN.getColumnQualifier(),
          0);
      mfw.append(tablePrevRowKey, KeyExtent.encodePrevEndRow(new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), null))));
     
      // ----------] default tablet info
      Text defaultExtent = new Text(KeyExtent.getMetadataEntry(new Text(Constants.METADATA_TABLE_ID), null));
     
      // default's directory
      Key defaultDirKey = new Key(defaultExtent, Constants.METADATA_DIRECTORY_COLUMN.getColumnFamily(),
          Constants.METADATA_DIRECTORY_COLUMN.getColumnQualifier(), 0);
      mfw.append(defaultDirKey, new Value(Constants.DEFAULT_TABLET_LOCATION.getBytes()));
     
      // default's time
      Key defaultTimeKey = new Key(defaultExtent, Constants.METADATA_TIME_COLUMN.getColumnFamily(), Constants.METADATA_TIME_COLUMN.getColumnQualifier(), 0);
      mfw.append(defaultTimeKey, new Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes()));
     
      // default's prevrow
      Key defaultPrevRowKey = new Key(defaultExtent, Constants.METADATA_PREV_ROW_COLUMN.getColumnFamily(),
          Constants.METADATA_PREV_ROW_COLUMN.getColumnQualifier(), 0);
      mfw.append(defaultPrevRowKey, KeyExtent.encodePrevEndRow(Constants.METADATA_RESERVED_KEYSPACE_START_KEY.getRow()));
     
      mfw.close();
    }
   
    // create table and default tablets directories
    try {
      fstat = fs.getFileStatus(defaultMetadataTablet);
View Full Code Here

    fs.deleteOnExit(failures);
    fs.delete(failures, true);
    fs.delete(tempFile, true);
    fs.mkdirs(failures);
    fs.mkdirs(tempFile.getParent());
    FileSKVWriter writer = FileOperations.getInstance().openWriter(
        tempFile.toString(), fs, defaultConf,
        AccumuloConfiguration.getDefaultConfiguration());
    List<Pair<Key, Value>> keyVals = new ArrayList<Pair<Key, Value>>();
    for (int i = 0; i < 5; i++) {
      keyVals.add(new Pair<Key, Value>(new Key("a" + i, "b" + i, "c" + i,
          new ColumnVisibility(""), 1000l + i), new Value(Integer.toString(i)
          .getBytes())));
    }
    for (Pair<Key, Value> keyVal : keyVals) {
      writer.append(keyVal.getFirst(), keyVal.getSecond());
    }
    writer.close();
    ImportTestFilesAndData files = new ImportTestFilesAndData();
    files.failurePath = failures;
    files.importPath = tempFile.getParent();
    files.keyVals = keyVals;
    return files;
View Full Code Here

          keys.add(k);
        }
        Path dir = new Path("/tmp", "bulk_" + UUID.randomUUID().toString());
        Path fail = new Path(dir.toString() + "_fail");
        FileSystem fs = SecurityHelper.getFs(state);
        FileSKVWriter f = FileOperations.getInstance().openWriter(dir + "/securityBulk." + RFile.EXTENSION, fs, fs.getConf(),
            AccumuloConfiguration.getDefaultConfiguration());
        f.startDefaultLocalityGroup();
        fs.mkdirs(fail);
        for (Key k : keys)
          f.append(k, new Value("Value".getBytes()));
        f.close();
        try {
          conn.tableOperations().importDirectory(tableName, dir.toString(), fail.toString(), true);
        } catch (TableNotFoundException tnfe) {
          if (tableExists)
            throw new AccumuloException("Table didn't exist when it should have: " + tableName);
View Full Code Here

    if (buffer.size() == 0)
      return;
   
    String file = filenamePrefix + "/" + tablename + "/" + taskID + "_" + (fileCount++) + ".rf";
    // TODO get the table configuration for the given table?
    FileSKVWriter writer = RFileOperations.getInstance().openWriter(file, fs, conf, acuconf);
   
    // forget locality groups for now, just write everything to the default
    writer.startDefaultLocalityGroup();
   
    for (Entry<Key,Value> e : buffer.entrySet()) {
      writer.append(e.getKey(), e.getValue());
    }
   
    writer.close();
   
    size -= bufferSize;
    buffer.clear();
    bufferSizes.put(tablename, 0l);
  }
View Full Code Here

TOP

Related Classes of org.apache.accumulo.core.file.FileSKVWriter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.