Package org.apache.hadoop.hive.ql.hooks

Examples of org.apache.hadoop.hive.ql.hooks.WriteEntity


      // Currently, partition spec can only be static partition.
      String k = tblName + Path.SEPARATOR;
      tsDesc.setStatsAggPrefix(k);

      // set up WritenEntity for replication
      outputs.add(new WriteEntity(tab, true));

      // add WriteEntity for each matching partition
      if (tab.isPartitioned()) {
        if (partSpec == null) {
          throw new SemanticException(ErrorMsg.NEED_PARTITION_SPECIFICATION.getMsg());
        }
        List<Partition> partitions = qbp.getTableSpec().partitions;
        if (partitions != null) {
          for (Partition partn : partitions) {
            // inputs.add(new ReadEntity(partn)); // is this needed at all?
            outputs.add(new WriteEntity(partn, true));
          }
        }
      }
    }
  }
View Full Code Here


   
    String loadTmpPath = ctx.getExternalTmpFileURI(toURI);
    Map<String, String> partSpec = ts.getPartSpec();
    if (partSpec == null) {
      partSpec = new LinkedHashMap<String, String>();
      outputs.add(new WriteEntity(ts.tableHandle));
    } else {
      try{
        Partition part = Hive.get().getPartition(ts.tableHandle, partSpec, false);
        if (part != null) {
          if (part.isOffline()) {
            throw new SemanticException(ErrorMsg.OFFLINE_TABLE_OR_PARTITION.
                getMsg(ts.tableName + ":" + part.getName()));
          }
          outputs.add(new WriteEntity(part));
        } else {
          outputs.add(new WriteEntity(ts.tableHandle));
        }
      } catch(HiveException e) {
        throw new SemanticException(e);
      }
    }
View Full Code Here

                    addPartitionDesc.getSortCols());
    }

    Partition part = db
        .getPartition(tbl, addPartitionDesc.getPartSpec(), false);
    work.getOutputs().add(new WriteEntity(part));

    return 0;
  }
View Full Code Here

        db.alterTable(tblName, tbl);
      } catch (InvalidOperationException e) {
        throw new HiveException("Uable to update table");
      }
      work.getInputs().add(new ReadEntity(tbl));
      work.getOutputs().add(new WriteEntity(tbl));
    } else {
      Partition part = db.getPartition(tbl, touchDesc.getPartSpec(), false);
      if (part == null) {
        throw new HiveException("Specified partition does not exist");
      }
      try {
        db.alterPartition(tblName, part);
      } catch (InvalidOperationException e) {
        throw new HiveException(e);
      }
      work.getInputs().add(new ReadEntity(part));
      work.getOutputs().add(new WriteEntity(part));
    }
    return 0;
  }
View Full Code Here

    // contains the new table. This is needed for rename - both the old and the
    // new table names are
    // passed
    if(part != null) {
      work.getInputs().add(new ReadEntity(part));
      work.getOutputs().add(new WriteEntity(part));
    } else {
      work.getInputs().add(new ReadEntity(oldTbl));
      work.getOutputs().add(new WriteEntity(tbl));
    }
    return 0;
  }
View Full Code Here

      }

      // drop the table
      db.dropTable(dropTbl.getTableName());
      if (tbl != null) {
        work.getOutputs().add(new WriteEntity(tbl));
      }
    } else {
      // This is actually an ALTER TABLE DROP PARTITION
      if (tbl != null) {
        validateAlterTableType(
          tbl, AlterTableDesc.AlterTableTypes.DROPPARTITION,
          dropTbl.getExpectView());
      }

      List<Partition> partsToDelete = new ArrayList<Partition>();
      for (Map<String, String> partSpec : dropTbl.getPartSpecs()) {
        List<Partition> partitions = db.getPartitions(tbl, partSpec);
        for (Partition p : partitions) {
          if (!p.canDrop()) {
            throw new HiveException("Table " + tbl.getTableName() +
                " Partition " + p.getName() +
                " is protected from being dropped");
          }
          partsToDelete.add(p);
        }
      }

      // drop all existing partitions from the list
      for (Partition partition : partsToDelete) {
        console.printInfo("Dropping the partition " + partition.getName());
        db.dropPartition(dropTbl.getTableName(), partition.getValues(), true);
        work.getOutputs().add(new WriteEntity(partition));
      }
    }

    return 0;
  }
View Full Code Here

      return rc;
    }

    // create the table
    db.createTable(tbl, crtTbl.getIfNotExists());
    work.getOutputs().add(new WriteEntity(tbl));
    return 0;
  }
View Full Code Here

      return rc;
    }

    // create the table
    db.createTable(tbl, crtTbl.getIfNotExists());
    work.getOutputs().add(new WriteEntity(tbl));
    return 0;
  }
View Full Code Here

    if (rc != 0) {
      return rc;
    }

    db.createTable(tbl, crtView.getIfNotExists());
    work.getOutputs().add(new WriteEntity(tbl));
    return 0;
  }
View Full Code Here

          checkTargetLocationEmpty(fs, new Path(table.getDataLocation()
              .toString()));
          loadTable(fromURI, table);
        }
        // Set this to read because we can't overwrite any existing partitions
        outputs.add(new WriteEntity(table, WriteEntity.WriteType.DDL_NO_LOCK));
      } catch (InvalidTableException e) {
        LOG.debug("table " + tblDesc.getTableName() + " does not exist");

        Task<?> t = TaskFactory.get(new DDLWork(getInputs(), getOutputs(),
            tblDesc), conf);
View Full Code Here

TOP

Related Classes of org.apache.hadoop.hive.ql.hooks.WriteEntity

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.