Examples of Writer


Examples of org.apache.accumulo.core.file.rfile.RFile.Writer

   
    long blockSize = acuconf.getMemoryInBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE);
    long indexBlockSize = acuconf.getMemoryInBytes(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE_INDEX);
   
    CachableBlockFile.Writer _cbw = new CachableBlockFile.Writer(fs.create(new Path(file), false, bufferSize, (short) rep, block), compression, conf);
    Writer writer = new RFile.Writer(_cbw, (int) blockSize, (int) indexBlockSize);
    return writer;
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFile.Writer

    StoreFile f = this.store.getStorefiles().firstEntry().getValue();
    Path storedir = f.getPath().getParent();
    long seqid = f.getMaxSequenceId();
    HBaseConfiguration c = new HBaseConfiguration();
    FileSystem fs = FileSystem.get(c);
    Writer w = StoreFile.getWriter(fs, storedir);
    StoreFile.appendMetadata(w, seqid + 1);
    w.close();
    this.store.close();
    // Reopen it... should pick up two files
    this.store = new Store(storedir.getParent().getParent(),
      this.store.getHRegion(),
      this.store.getFamily(), fs, null, c, null);
View Full Code Here

Examples of org.apache.hadoop.hbase.regionserver.wal.HLog.Writer

          + fs.getFileStatus(regionedits).getLen());
      if (!fs.delete(regionedits, false)) {
        LOG.warn("Failed delete of old " + regionedits);
      }
    }
    Writer w = createWriter(fs, regionedits, conf);
    LOG.debug("Creating writer path=" + regionedits + " region="
        + Bytes.toStringBinary(region));
    return (new WriterAndPath(regionedits, w));
  }
View Full Code Here

Examples of org.apache.hadoop.hdfs.server.namenode.FSEditLogOp.Writer

    bufCurrent = new DataOutputBuffer();
    outstandingRequests = new AtomicInteger(0);
    syncLatch = null;
    this.lh = lh;
    this.writer = new Writer(bufCurrent);
    this.transmissionThreshold
      = conf.getInt(BookKeeperJournalManager.BKJM_OUTPUT_BUFFER_SIZE,
                    BookKeeperJournalManager.BKJM_OUTPUT_BUFFER_SIZE_DEFAULT);
  }
View Full Code Here

Examples of org.apache.hadoop.hive.ql.io.orc.Writer

        this.counter = counter;
    }

    @Override
    public void write(T model) throws IOException {
        Writer writer = prepare();
        writer.addRow(model);

        // not sure
        counter.add(1);
    }
View Full Code Here

Examples of org.apache.hadoop.io.MapFile.Writer

  public void setUp() throws Exception {
    fs = FileSystem.getLocal(conf);
    Path root = new Path("manyMaps");
    fs.mkdirs(root);
    fs.create(new Path(root, "finished")).close();
    Writer writer = new Writer(conf, fs, "manyMaps/odd", IntWritable.class, BytesWritable.class);
    BytesWritable value = new BytesWritable("someValue".getBytes());
    for (int i = 1; i < 1000; i += 2) {
      writer.append(new IntWritable(i), value);
    }
    writer.close();
    writer = new Writer(conf, fs, "manyMaps/even", IntWritable.class, BytesWritable.class);
    for (int i = 0; i < 1000; i += 2) {
      if (i == 10)
        continue;
      writer.append(new IntWritable(i), value);
    }
    writer.close();
  }
View Full Code Here

Examples of org.apache.hadoop.io.SequenceFile.Writer

     
      if (numSpills == 0) {
        //create dummy files
        for (int i = 0; i < partitions; i++) {
          segmentStart = finalOut.getPos();
          Writer writer = SequenceFile.createWriter(job, finalOut,
                                                    job.getMapOutputKeyClass(),
                                                    job.getMapOutputValueClass(),
                                                    compressionType, codec);
          finalIndexOut.writeLong(segmentStart);
          finalIndexOut.writeLong(finalOut.getPos() - segmentStart);
          writer.close();
        }
        finalOut.close();
        finalIndexOut.close();
        return;
      }
      {
        //create a sorter object as we need access to the SegmentDescriptor
        //class and merge methods
        Sorter sorter = new Sorter(localFs, job.getOutputKeyComparator(), valClass, job);
        sorter.setProgressable(reporter);
       
        for (int parts = 0; parts < partitions; parts++){
          List<SegmentDescriptor> segmentList =
            new ArrayList<SegmentDescriptor>(numSpills);
          for(int i = 0; i < numSpills; i++) {
            FSDataInputStream indexIn = localFs.open(indexFileName[i]);
            indexIn.seek(parts * 16);
            long segmentOffset = indexIn.readLong();
            long segmentLength = indexIn.readLong();
            indexIn.close();
            SegmentDescriptor s = sorter.new SegmentDescriptor(segmentOffset,
                                                               segmentLength, filename[i]);
            s.preserveInput(true);
            s.doSync();
            segmentList.add(i, s);
          }
          segmentStart = finalOut.getPos();
          RawKeyValueIterator kvIter = sorter.merge(segmentList, new Path(getTaskId()));
          SequenceFile.Writer writer = SequenceFile.createWriter(job, finalOut,
                                                                 job.getMapOutputKeyClass(), job.getMapOutputValueClass(),
                                                                 compressionType, codec);
          sorter.writeFile(kvIter, writer);
          //close the file - required esp. for block compression to ensure
          //partition data don't span partition boundaries
          writer.close();
          //when we write the offset/length to the final index file, we write
          //longs for both. This helps us to reliably seek directly to the
          //offset/length for a partition when we start serving the byte-ranges
          //to the reduces. We probably waste some space in the file by doing
          //this as opposed to writing VLong but it helps us later on.
View Full Code Here

Examples of org.apache.hadoop.io.file.tfile.TFile.Writer

  public void setUp() throws IOException {
    conf = new Configuration();
    path = new Path(ROOT, outputFile);
    fs = path.getFileSystem(conf);
    out = fs.create(path);
    writer = new Writer(out, BLOCK_SIZE, compression, null, conf);
    writer.append("keyZ".getBytes(), "valueZ".getBytes());
    writer.append("keyM".getBytes(), "valueM".getBytes());
    writer.append("keyN".getBytes(), "valueN".getBytes());
    writer.append("keyA".getBytes(), "valueA".getBytes());
    closeOutput();
View Full Code Here

Examples of org.apache.hadoop.zebra.tfile.TFile.Writer

  void createFile(int count, String compress) throws IOException {
    conf = new Configuration();
    path = new Path(ROOT, outputFile + "." + compress);
    fs = path.getFileSystem(conf);
    FSDataOutputStream out = fs.create(path);
    Writer writer = new Writer(out, BLOCK_SIZE, compress, comparator, conf);

    int nx;
    for (nx = 0; nx < count; nx++) {
      byte[] key = composeSortedKey(KEY, count, nx).getBytes();
      byte[] value = (VALUE + nx).getBytes();
      writer.append(key, value);
    }
    writer.close();
    out.close();
  }
View Full Code Here

Examples of org.apache.tez.runtime.library.common.sort.impl.IFile.Writer

   *
   * @param data
   * @throws IOException
   */
  private void testWriterAndReader(List<KVPair> data) throws IOException {
    Writer writer = null;
    //No RLE, No RepeatKeys
    writer = writeTestFile(false, false, data, null);
    readAndVerifyData(writer.getRawLength(), writer.getCompressedLength(), data, null);

    writer = writeTestFile(false, false, data, codec);
    readAndVerifyData(writer.getRawLength(), writer.getCompressedLength(), data, codec);

    //No RLE, RepeatKeys
    writer = writeTestFile(false, true, data, null);
    readAndVerifyData(writer.getRawLength(), writer.getCompressedLength(), data, null);

    writer = writeTestFile(false, true, data, codec);
    readAndVerifyData(writer.getRawLength(), writer.getCompressedLength(), data, codec);

    //RLE, No RepeatKeys
    writer = writeTestFile(true, false, data, null);
    readAndVerifyData(writer.getRawLength(), writer.getCompressedLength(), data, null);

    writer = writeTestFile(true, false, data, codec);
    readAndVerifyData(writer.getRawLength(), writer.getCompressedLength(), data, codec);

    //RLE, RepeatKeys
    writer = writeTestFile(true, true, data, null);
    readAndVerifyData(writer.getRawLength(), writer.getCompressedLength(), data, null);

    writer = writeTestFile(true, true, data, codec);
    readAndVerifyData(writer.getRawLength(), writer.getCompressedLength(), data, codec);
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.