Examples of BatchWriter


Examples of org.apache.accumulo.core.client.BatchWriter

      r = new Random();
    else {
      r = new Random(opts.seed);
    }
    Connector connector = opts.getConnector();
    BatchWriter bw = connector.createBatchWriter(opts.tableName, bwOpts.getBatchWriterConfig());
   
    // reuse the ColumnVisibility object to improve performance
    ColumnVisibility cv = opts.visiblity;
  
    // Generate num unique row ids in the given range
    HashSet<Long> rowids = new HashSet<Long>(opts.num);
    while (rowids.size() < opts.num) {
      rowids.add((Math.abs(r.nextLong()) % (opts.max - opts.min)) + opts.min);
    }
    for (long rowid : rowids) {
      Mutation m = createMutation(rowid, opts.size, cv);
      bw.addMutation(m);
    }
   
    try {
      bw.close();
    } catch (MutationsRejectedException e) {
      if (e.getAuthorizationFailuresMap().size() > 0) {
        HashMap<String,Set<SecurityErrorCode>> tables = new HashMap<String,Set<SecurityErrorCode>>();
        for (Entry<KeyExtent,Set<SecurityErrorCode>> ke : e.getAuthorizationFailuresMap().entrySet()) {
          Set<SecurityErrorCode> secCodes = tables.get(ke.getKey().getTableId().toString());
View Full Code Here

Examples of org.apache.accumulo.core.client.BatchWriter

  @Override
  public Repo<Master> call(long tid, Master master) throws Exception {
   
    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
   
    BatchWriter mbw = null;
    ZipInputStream zis = null;
   
    try {
      FileSystem fs = master.getFileSystem();
     
      mbw = master.getConnector().createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
     
      zis = new ZipInputStream(fs.open(path));
     
      Map<String,String> fileNameMappings = readMappingFile(fs, tableInfo);
     
      String bulkDir = new Path(tableInfo.importDir).getName();
     
      ZipEntry zipEntry;
      while ((zipEntry = zis.getNextEntry()) != null) {
        if (zipEntry.getName().equals(Constants.EXPORT_METADATA_FILE)) {
          DataInputStream in = new DataInputStream(new BufferedInputStream(zis));
         
          Key key = new Key();
          Value val = new Value();
         
          Mutation m = null;
          Text currentRow = null;
          int dirCount = 0;
         
          while (true) {
            key.readFields(in);
            val.readFields(in);
           
            Text endRow = new KeyExtent(key.getRow(), (Text) null).getEndRow();
            Text metadataRow = new KeyExtent(new Text(tableInfo.tableId), endRow, null).getMetadataEntry();
           
            Text cq;
           
            if (key.getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
              String oldName = new Path(key.getColumnQualifier().toString()).getName();
              String newName = fileNameMappings.get(oldName);
             
              if (newName == null) {
                throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
                    "File " + oldName + " does not exist in import dir");
              }

              cq = new Text("/" + bulkDir + "/" + newName);
            } else {
              cq = key.getColumnQualifier();
            }
           
            if (m == null) {
              m = new Mutation(metadataRow);
              Constants.METADATA_DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes(Constants.UTF8))));
              currentRow = metadataRow;
            }
           
            if (!currentRow.equals(metadataRow)) {
              mbw.addMutation(m);
              m = new Mutation(metadataRow);
              Constants.METADATA_DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes(Constants.UTF8))));
            }
           
            m.put(key.getColumnFamily(), cq, val);
           
            if (endRow == null && Constants.METADATA_PREV_ROW_COLUMN.hasColumns(key)) {
              mbw.addMutation(m);
              break; // its the last column in the last row
            }
          }
         
          break;
        }
      }
     
      return new MoveExportedFiles(tableInfo);
    } catch (IOException ioe) {
      log.warn(ioe.getMessage(), ioe);
      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
          "Error reading " + path + " " + ioe.getMessage());
    } finally {
      if (zis != null) {
        try {
          zis.close();
        } catch (IOException ioe) {
          log.warn("Failed to close zip file ", ioe);
        }
      }
     
      if (mbw != null) {
        mbw.close();
      }
    }
  }
View Full Code Here

Examples of org.apache.accumulo.core.client.BatchWriter

    return Collections.singletonList(new TableSetup("abc"));
  }
 
  @Override
  public void run() throws Exception {
    BatchWriter bw = getConnector().createBatchWriter("abc", new BatchWriterConfig());
   
    for (int i = 0; i < 100000; i++) {
      Mutation m = new Mutation(new Text(String.format("%08d", i)));
      for (int j = 0; j < 3; j++)
        m.put(new Text("cf1"), new Text("cq" + j), new Value((i + "_" + j).getBytes(Constants.UTF8)));
     
      bw.addMutation(m);
    }
   
    bw.close();
   
    Scanner scanner = getConnector().createScanner("abc", new Authorizations());
    scanner.setBatchSize(1000);
   
    Iterator<Entry<Key,Value>> iter = scanner.iterator();
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.