Package org.apache.accumulo.core.client

Examples of org.apache.accumulo.core.client.BatchWriter


   * Scan 0 |------------------------------| Scan 1 |----------| Minc 1 |-----| Scan 2 |----------| Scan 3 |---------------| Minc 2 |-----| Majc 1 |-----|
   */
 
  @Override
  public void run() throws Exception {
    BatchWriter bw = getConnector().createBatchWriter("cct", new BatchWriterConfig());
    for (int i = 0; i < 50; i++) {
      Mutation m = new Mutation(new Text(String.format("%06d", i)));
      m.put(new Text("cf1"), new Text("cq1"), new Value("foo".getBytes(Constants.UTF8)));
      bw.addMutation(m);
    }
   
    bw.flush();
   
    ScanTask st0 = new ScanTask(getConnector(), 300);
    st0.start();
   
    ScanTask st1 = new ScanTask(getConnector(), 100);
    st1.start();
   
    UtilWaitThread.sleep(50);
    getConnector().tableOperations().flush("cct", null, null, true);
   
    for (int i = 0; i < 50; i++) {
      Mutation m = new Mutation(new Text(String.format("%06d", i)));
      m.put(new Text("cf1"), new Text("cq1"), new Value("foo".getBytes(Constants.UTF8)));
      bw.addMutation(m);
    }
   
    bw.flush();
   
    ScanTask st2 = new ScanTask(getConnector(), 100);
    st2.start();
   
    st1.join();
    st2.join();
    if (st1.count != 50)
      throw new Exception("Thread 1 did not see 50, saw " + st1.count);
   
    if (st2.count != 50)
      throw new Exception("Thread 2 did not see 50, saw " + st2.count);
   
    ScanTask st3 = new ScanTask(getConnector(), 150);
    st3.start();
   
    UtilWaitThread.sleep(50);
    getConnector().tableOperations().flush("cct", null, null, false);
   
    st3.join();
    if (st3.count != 50)
      throw new Exception("Thread 3 did not see 50, saw " + st3.count);
   
    st0.join();
    if (st0.count != 50)
      throw new Exception("Thread 0 did not see 50, saw " + st0.count);
   
    bw.close();
  }
View Full Code Here


  @Override
  public void visit(State state, Properties props) throws Exception {
   
    MultiTableBatchWriter mtbw = state.getMultiTableBatchWriter();
   
    BatchWriter imagesBW = mtbw.getBatchWriter(state.getString("imageTableName"));
    BatchWriter indexBW = mtbw.getBatchWriter(state.getString("indexTableName"));
   
    String uuid = UUID.randomUUID().toString();
    Mutation m = new Mutation(new Text(uuid));
   
    // create a fake image between 4KB and 1MB
    int maxSize = Integer.parseInt(props.getProperty("maxSize"));
    int minSize = Integer.parseInt(props.getProperty("minSize"));
   
    Random rand = new Random();
    int numBytes = rand.nextInt((maxSize - minSize)) + minSize;
    byte[] imageBytes = new byte[numBytes];
    rand.nextBytes(imageBytes);
    m.put(CONTENT_COLUMN_FAMILY, IMAGE_COLUMN_QUALIFIER, new Value(imageBytes));
   
    // store size
    m.put(META_COLUMN_FAMILY, new Text("size"), new Value(String.format("%d", numBytes).getBytes(Constants.UTF8)));
   
    // store hash
    MessageDigest alg = MessageDigest.getInstance("SHA-1");
    alg.update(imageBytes);
    byte[] hash = alg.digest();
    m.put(META_COLUMN_FAMILY, SHA1_COLUMN_QUALIFIER, new Value(hash));
   
    // update write counts
    state.set("numWrites", state.getLong("numWrites") + 1);
    Long totalWrites = state.getLong("totalWrites") + 1;
    state.set("totalWrites", totalWrites);
   
    // set count
    m.put(META_COLUMN_FAMILY, COUNT_COLUMN_QUALIFIER, new Value(String.format("%d", totalWrites).getBytes(Constants.UTF8)));
   
    // add mutation
    imagesBW.addMutation(m);
   
    // now add mutation to index
    Text row = new Text(hash);
    m = new Mutation(row);
    m.put(META_COLUMN_FAMILY, UUID_COLUMN_QUALIFIER, new Value(uuid.getBytes(Constants.UTF8)));
   
    indexBW.addMutation(m);
   
    Text lastRow = (Text) state.get("lastIndexRow");
    if (lastRow.compareTo(row) < 0) {
      state.set("lastIndexRow", new Text(row));
    }
View Full Code Here

        MutationsRejectedException {
      if (!getConnector().tableOperations().exists(TEST_TABLE)) {
        // create the test table
        getConnector().tableOperations().create(TEST_TABLE);
        // put in some initial data
        BatchWriter writer = getConnector().createBatchWriter(TEST_TABLE, new BatchWriterConfig());
        Mutation m = new Mutation(new Text("row"));
        m.put(new Text("cf"), new Text("cq"), new Value("val".getBytes(Constants.UTF8)));
        writer.addMutation(m);
        writer.close();
       
        // verify proper permissions for creator and test user
        verifyHasOnlyTheseTablePermissions(getConnector(), getConnector().whoami(), TEST_TABLE, TablePermission.values());
        verifyHasNoTablePermissions(getConnector(), TEST_USER, TEST_TABLE, TablePermission.values());
       
View Full Code Here

    }
   
    private static void testMissingTablePermission(Connector root_conn, Connector test_user_conn, TablePermission perm) throws AccumuloException,
        AccumuloSecurityException, TableNotFoundException {
      Scanner scanner;
      BatchWriter writer;
      Mutation m;
      log.debug("Confirming that the lack of the " + perm + " permission properly restricts the user");
     
      // test permission prior to granting it
      switch (perm) {
        case READ:
          try {
            scanner = test_user_conn.createScanner(TEST_TABLE, Constants.NO_AUTHS);
            int i = 0;
            for (Entry<Key,Value> entry : scanner)
              i += 1 + entry.getKey().getRowData().length();
            if (i != 0)
              throw new IllegalStateException("Should NOT be able to read from the table");
          } catch (RuntimeException e) {
            AccumuloSecurityException se = (AccumuloSecurityException) e.getCause();
            if (se.getSecurityErrorCode() != SecurityErrorCode.PERMISSION_DENIED)
              throw se;
          }
          break;
        case WRITE:
          try {
            writer = test_user_conn.createBatchWriter(TEST_TABLE, new BatchWriterConfig());
            m = new Mutation(new Text("row"));
            m.put(new Text("a"), new Text("b"), new Value("c".getBytes(Constants.UTF8)));
            writer.addMutation(m);
            try {
              writer.close();
            } catch (MutationsRejectedException e1) {
              if (e1.getAuthorizationFailuresMap().size() > 0)
                throw new AccumuloSecurityException(test_user_conn.whoami(), org.apache.accumulo.core.client.impl.thrift.SecurityErrorCode.PERMISSION_DENIED,
                    e1);
            }
View Full Code Here

    }
   
    private static void testGrantedTablePermission(Connector root_conn, Connector test_user_conn, TablePermission perm) throws AccumuloException,
        TableExistsException, AccumuloSecurityException, TableNotFoundException, MutationsRejectedException {
      Scanner scanner;
      BatchWriter writer;
      Mutation m;
      log.debug("Confirming that the presence of the " + perm + " permission properly permits the user");
     
      // test permission after granting it
      switch (perm) {
        case READ:
          scanner = test_user_conn.createScanner(TEST_TABLE, Constants.NO_AUTHS);
          Iterator<Entry<Key,Value>> iter = scanner.iterator();
          while (iter.hasNext())
            iter.next();
          break;
        case WRITE:
          writer = test_user_conn.createBatchWriter(TEST_TABLE, new BatchWriterConfig());
          m = new Mutation(new Text("row"));
          m.put(new Text("a"), new Text("b"), new Value("c".getBytes(Constants.UTF8)));
          writer.addMutation(m);
          writer.close();
          break;
        case BULK_IMPORT:
          // test for bulk import permission would go here
          break;
        case ALTER_TABLE:
View Full Code Here

    getConnector().tableOperations().create("tt");
    IteratorSetting is = new IteratorSetting(5, "Bad Aggregator", BadCombiner.class);
    Combiner.setColumns(is, Collections.singletonList(new IteratorSetting.Column("acf")));
    getConnector().tableOperations().attachIterator("tt", is);
   
    BatchWriter bw = getConnector().createBatchWriter("tt", new BatchWriterConfig());
   
    Mutation m = new Mutation(new Text("r1"));
    m.put(new Text("acf"), new Text("foo"), new Value(new byte[] {'1'}));
   
    bw.addMutation(m);
   
    bw.close();
   
    // try to scan table
    Scanner scanner = getConnector().createScanner("tt", Constants.NO_AUTHS);
   
    boolean caught = false;
View Full Code Here

    }

  }
 
  private void fillTable(String table) throws Exception {
    BatchWriter bw = getConnector().createBatchWriter(TABLE, new BatchWriterConfig());
    for (String row : ROWS) {
      Mutation m = new Mutation(row);
      m.put("cf", "cq", "value");
      bw.addMutation(m);
    }
    bw.close();
  }
View Full Code Here

      TableOperations tops = connector.tableOperations();
      tops.create(table1);
      tops.create(table2);

      BatchWriter bw1 = mtbw.getBatchWriter(table1), bw2 = mtbw.getBatchWriter(table2);

      Mutation m1 = new Mutation("foo");
      m1.put("col1", "", "val1");
      m1.put("col2", "", "val2");

      bw1.addMutation(m1);
      bw2.addMutation(m1);

      // Mutations might or might not flush before tables goes offline
      tops.offline(table1);
      tops.offline(table2);
View Full Code Here

      r = new Random();
    else {
      r = new Random(opts.seed);
    }
    Connector connector = opts.getConnector();
    BatchWriter bw = connector.createBatchWriter(opts.tableName, bwOpts.getBatchWriterConfig());
   
    // reuse the ColumnVisibility object to improve performance
    ColumnVisibility cv = opts.visiblity;
  
    // Generate num unique row ids in the given range
    HashSet<Long> rowids = new HashSet<Long>(opts.num);
    while (rowids.size() < opts.num) {
      rowids.add((Math.abs(r.nextLong()) % (opts.max - opts.min)) + opts.min);
    }
    for (long rowid : rowids) {
      Mutation m = createMutation(rowid, opts.size, cv);
      bw.addMutation(m);
    }
   
    try {
      bw.close();
    } catch (MutationsRejectedException e) {
      if (e.getAuthorizationFailuresMap().size() > 0) {
        HashMap<String,Set<SecurityErrorCode>> tables = new HashMap<String,Set<SecurityErrorCode>>();
        for (Entry<KeyExtent,Set<SecurityErrorCode>> ke : e.getAuthorizationFailuresMap().entrySet()) {
          Set<SecurityErrorCode> secCodes = tables.get(ke.getKey().getTableId().toString());
View Full Code Here

  @Override
  public Repo<Master> call(long tid, Master master) throws Exception {
   
    Path path = new Path(tableInfo.exportDir, Constants.EXPORT_FILE);
   
    BatchWriter mbw = null;
    ZipInputStream zis = null;
   
    try {
      FileSystem fs = master.getFileSystem();
     
      mbw = master.getConnector().createBatchWriter(Constants.METADATA_TABLE_NAME, new BatchWriterConfig());
     
      zis = new ZipInputStream(fs.open(path));
     
      Map<String,String> fileNameMappings = readMappingFile(fs, tableInfo);
     
      String bulkDir = new Path(tableInfo.importDir).getName();
     
      ZipEntry zipEntry;
      while ((zipEntry = zis.getNextEntry()) != null) {
        if (zipEntry.getName().equals(Constants.EXPORT_METADATA_FILE)) {
          DataInputStream in = new DataInputStream(new BufferedInputStream(zis));
         
          Key key = new Key();
          Value val = new Value();
         
          Mutation m = null;
          Text currentRow = null;
          int dirCount = 0;
         
          while (true) {
            key.readFields(in);
            val.readFields(in);
           
            Text endRow = new KeyExtent(key.getRow(), (Text) null).getEndRow();
            Text metadataRow = new KeyExtent(new Text(tableInfo.tableId), endRow, null).getMetadataEntry();
           
            Text cq;
           
            if (key.getColumnFamily().equals(Constants.METADATA_DATAFILE_COLUMN_FAMILY)) {
              String oldName = new Path(key.getColumnQualifier().toString()).getName();
              String newName = fileNameMappings.get(oldName);
             
              if (newName == null) {
                throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
                    "File " + oldName + " does not exist in import dir");
              }

              cq = new Text("/" + bulkDir + "/" + newName);
            } else {
              cq = key.getColumnQualifier();
            }
           
            if (m == null) {
              m = new Mutation(metadataRow);
              Constants.METADATA_DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes(Constants.UTF8))));
              currentRow = metadataRow;
            }
           
            if (!currentRow.equals(metadataRow)) {
              mbw.addMutation(m);
              m = new Mutation(metadataRow);
              Constants.METADATA_DIRECTORY_COLUMN.put(m, new Value(FastFormat.toZeroPaddedString(dirCount++, 8, 16, "/c-".getBytes(Constants.UTF8))));
            }
           
            m.put(key.getColumnFamily(), cq, val);
           
            if (endRow == null && Constants.METADATA_PREV_ROW_COLUMN.hasColumns(key)) {
              mbw.addMutation(m);
              break; // its the last column in the last row
            }
          }
         
          break;
        }
      }
     
      return new MoveExportedFiles(tableInfo);
    } catch (IOException ioe) {
      log.warn(ioe.getMessage(), ioe);
      throw new ThriftTableOperationException(tableInfo.tableId, tableInfo.tableName, TableOperation.IMPORT, TableOperationExceptionType.OTHER,
          "Error reading " + path + " " + ioe.getMessage());
    } finally {
      if (zis != null) {
        try {
          zis.close();
        } catch (IOException ioe) {
          log.warn("Failed to close zip file ", ioe);
        }
      }
     
      if (mbw != null) {
        mbw.close();
      }
    }
  }
View Full Code Here

TOP

Related Classes of org.apache.accumulo.core.client.BatchWriter

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.