Package org.apache.hadoop.io

Examples of org.apache.hadoop.io.BytesWritable


    // row 1
    tuple.set(0, "column1_1");
    tuple.set(1, "column2_1");
    int row = 0;
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);

    // row 2
    row++;
    TypesUtils.resetTuple(tuple);
    tuple.set(0, "column1_2");
    tuple.set(1, "column2_2");
    inserter.insert(new BytesWritable(String.format("k%d%d", part + 1, row + 1)
        .getBytes()), tuple);
    inserter.close();
    writer1.finish();
  }
View Full Code Here


    System.out.println(myuser + "is reading....");
    BasicTable.Reader reader = new BasicTable.Reader(path, conf);
    reader.setProjection(projection);
    List<RangeSplit> splits = reader.rangeSplit(1);
    TableScanner scanner = reader.getScanner(splits.get(0), true);
    BytesWritable key = new BytesWritable();
    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());

    scanner.getKey(key);
    // Assert.assertEquals(key, new BytesWritable("k11".getBytes()));
    scanner.getValue(RowValue);
View Full Code Here

            tuple.set(k, (9-b) + "_" + i + "" + k);
          } catch (ExecException e) {
            e.printStackTrace();
          }
        }
        inserters[i].insert(new BytesWritable(("key" + i).getBytes()), tuple);
      }
    }
    for (int i = 0; i < numsInserters; i++) {
      inserters[i].close();
    }
View Full Code Here

      }

      // update status
      if ((!dirty) && (index.size() > 0)) {
        RawComparable keyFirst = index.get(0).getFirstKey();
        status.beginKey = new BytesWritable();
        status.beginKey.set(keyFirst.buffer(), keyFirst.offset(), keyFirst
            .size());
        RawComparable keyLast = index.get(index.size() - 1).getLastKey();
        status.endKey = new BytesWritable();
        status.endKey.set(keyLast.buffer(), keyLast.offset(), keyLast.size());
      }
      sorted = true;
    }
View Full Code Here

          }
          } catch (ExecException e) {
            e.printStackTrace();
          }
        }
        inserters[i].insert(new BytesWritable(("key" + i).getBytes()), tuple);
      }
    }
    for (int i = 0; i < numsInserters; i++) {
      inserters[i].close();
    }
View Full Code Here

 
        } catch (ExecException e) {
          e.printStackTrace();
        }
 
       inserters[i].insert(new BytesWritable(("key_" + b).getBytes()), tuple);
      
      }
    }
    for (int i = 0; i < numsInserters; i++) {
      inserters[i].close();
    }
    writer.close();
   
 
    //check table is setup correctly
    String projection = new String("a,b,c,d,e,f,r1,m1");
   
    BasicTable.Reader reader = new BasicTable.Reader(pathTable1, conf);
    reader.setProjection(projection);
    List<RangeSplit> splits = reader.rangeSplit(1);
    TableScanner scanner = reader.getScanner(splits.get(0), true);
    BytesWritable key = new BytesWritable();
    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());
 
    scanner.getValue(RowValue);
    System.out.println("rowvalue size:"+RowValue.size());
    System.out.println("read a : " + RowValue.get(0).toString());
View Full Code Here

        } catch (ExecException e) {
          e.printStackTrace();
        }

        inserters[i].insert(new BytesWritable(("key" + b).getBytes()), tuple);
      }
    }
    for (int i = 0; i < numsInserters; i++) {
      inserters[i].close();
    }
    writer.close();
   
   
   
   
    //check table is setup correctly
    String projection = new String("a,b,c,d,e,f,r1,m1");
   
    BasicTable.Reader reader = new BasicTable.Reader(pathTable2, conf);
    reader.setProjection(projection);
    List<RangeSplit> splits = reader.rangeSplit(1);
    TableScanner scanner = reader.getScanner(splits.get(0), true);
    BytesWritable key = new BytesWritable();
    Tuple RowValue = TypesUtils.createTuple(scanner.getSchema());

    scanner.getValue(RowValue);
    System.out.println("rowvalue size:"+RowValue.size());
    System.out.println("read a : " + RowValue.get(7).toString());
View Full Code Here

    scanner.close();
  }

  @Override
  public BytesWritable createKey() {
    return new BytesWritable();
  }
View Full Code Here

      TypesUtils.resetTuple(tuple);
      for (int k = 0; k < tableData[i].length; ++k) {
        tuple.set(k, tableData[i][k]);
        System.out.println("DEBUG: setting tuple k=" + k + "value= " + tableData[i][k]);
      }
      inserter.insert(new BytesWritable(("key" + i).getBytes()), tuple);
    }
    inserter.close();
    writer.close();
  }
View Full Code Here

          }
          } catch (ExecException e) {
            e.printStackTrace();
          }
        }
        inserters[i].insert(new BytesWritable(("key" + i).getBytes()), tuple);
      }
    }
    for (int i = 0; i < numsInserters; i++) {
      inserters[i].close();
    }
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.BytesWritable

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.