Package org.apache.pig

Examples of org.apache.pig.StoreConfig


  @Override
  public void commit(Configuration conf) throws IOException {
    try {
      JobConf job = new JobConf(conf);
      StoreConfig storeConfig = MapRedUtil.getStoreConfig(job);
      BasicTable.Writer write = new BasicTable.Writer(new Path(storeConfig.getLocation()), job);
      write.close();
    } catch (IOException ee) {
      throw ee;
    }
  }
View Full Code Here


*
*/
class TableOutputFormat implements OutputFormat<BytesWritable, Tuple> {
  @Override
  public void checkOutputSpecs(FileSystem ignored, JobConf job) throws IOException {
    StoreConfig storeConfig = MapRedUtil.getStoreConfig(job);
    String location = storeConfig.getLocation(), schemaStr;
    Schema schema = storeConfig.getSchema();
    org.apache.pig.SortInfo pigSortInfo = storeConfig.getSortInfo();

    /* TODO
     * use a home-brewn comparator ??
     */
    String comparator = null;
View Full Code Here

  private int[] sortColIndices = null;
  KeyGenerator builder;
  Tuple t;

  public TableRecordWriter(String name, JobConf conf) throws IOException {
    StoreConfig storeConfig = MapRedUtil.getStoreConfig(conf);
    String location = storeConfig.getLocation();

    // TODO: how to get? 1) column group splits; 2) flag of sorted-ness,
    // compression, etc.
    writer = new BasicTable.Writer(new Path(location), conf);
View Full Code Here

    private void commitStores(List<POStore> stores, Configuration conf)
    throws IOException {
        for (POStore store : stores) {
            StoreFunc sFunc = (StoreFunc)PigContext.instantiateFuncFromSpec(
                    store.getSFile().getFuncSpec());
            StoreConfig storeConfig = new StoreConfig(store.getSFile().
                    getFileName(), store.getSchema(), store.getSortInfo());
            commit(sFunc, conf, storeConfig,
                    store.getSFile().getFuncSpec().toString());
        }
    }
View Full Code Here

*
*/
class TableOutputFormat implements OutputFormat<BytesWritable, Tuple> {
  @Override
  public void checkOutputSpecs(FileSystem ignored, JobConf job) throws IOException {
    StoreConfig storeConfig = MapRedUtil.getStoreConfig(job);
    String location = storeConfig.getLocation();
    TableStorer storeFunc = (TableStorer)MapRedUtil.getStoreFunc(job);  
    BasicTable.Writer writer = new BasicTable.Writer(new Path(location),
        storeFunc.getSchemaString(), storeFunc.getStorageHintString(), false, job);
    writer.finish();
  }
View Full Code Here

  final private BytesWritable KEY0 = new BytesWritable(new byte[0]);
  private BasicTable.Writer writer;
  private TableInserter inserter;

  public TableRecordWriter(String name, JobConf conf) throws IOException {
    StoreConfig storeConfig = MapRedUtil.getStoreConfig(conf);
    String location = storeConfig.getLocation();

    // TODO: how to get? 1) column group splits; 2) flag of sorted-ness,
    // compression, etc.
    writer = new BasicTable.Writer(new Path(location), conf);
    inserter = writer.getInserter(name, false);
View Full Code Here

                // get encoded as regular characters. Otherwise any control characters
                // in the store funcspec would break the job.xml which is created by
                // hadoop from the jobconf.
                jobConf.set("pig.storeFunc", ObjectSerializer.serialize(outputFuncSpec.toString()));
                jobConf.set(PIG_STORE_CONFIG,
                            ObjectSerializer.serialize(new StoreConfig(outputPath, st.getSchema())));

                jobConf.set("pig.streaming.log.dir",
                            new Path(outputPath, LOG_DIR).toString());
                jobConf.set("pig.streaming.task.output.dir", outputPath);
            }
View Full Code Here

         * @see org.apache.hadoop.mapred.OutputFormat#checkOutputSpecs(org.apache.hadoop.fs.FileSystem, org.apache.hadoop.mapred.JobConf)
         */
        @Override
        public void checkOutputSpecs(FileSystem ignored, JobConf job)
                throws IOException {
            StoreConfig sConfig = MapRedUtil.getStoreConfig(job);
            FileSystem fs = FileSystem.get(job);
            // create a file to test that this method got called
            fs.create(new Path(sConfig.getLocation() + "_checkOutputSpec_test"));
        }
View Full Code Here

                // in the store funcspec would break the job.xml which is created by
                // hadoop from the jobconf.
                jobConf.set("pig.storeFunc", ObjectSerializer.serialize(outputFuncSpec.toString()));
                jobConf.set(PIG_STORE_CONFIG,
                            ObjectSerializer.serialize(
                                    new StoreConfig(outputPath, st.getSchema(), st.getSortInfo())));

                jobConf.set("pig.streaming.log.dir",
                            new Path(outputPath, LOG_DIR).toString());
                jobConf.set("pig.streaming.task.output.dir", outputPath);
            }
View Full Code Here

            // used. In this case, we want to just use PigOutputFormat
            sPrepClass = null;
        }
        if(sPrepClass != null && OutputFormat.class.isAssignableFrom(sPrepClass)) {
       
            StoreConfig storeConfig = new StoreConfig(store.getSFile().
                    getFileName(), store.getSchema(), store.getSortInfo());
            // make a copy of the conf since we may be dealing with multiple
            // stores. Set storeFunc and StoreConfig
            // pertaining to this store in the copy and use it
            JobConf confCopy = new JobConf(job);
View Full Code Here

TOP

Related Classes of org.apache.pig.StoreConfig

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.