/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.hive.shims;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.Trash;
import org.apache.hadoop.hive.shims.HadoopShimsSecure;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import org.apache.hadoop.mapred.MiniMRCluster;
import org.apache.hadoop.mapred.ClusterStatus;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.TaskLogServlet;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.TaskAttemptID;
import org.apache.hadoop.util.Progressable;
/**
* Implemention of shims against Hadoop 0.20 with Security.
*/
public class Hadoop20SShims extends HadoopShimsSecure {
@Override
public String getTaskAttemptLogUrl(JobConf conf,
String taskTrackerHttpAddress, String taskAttemptId)
throws MalformedURLException {
URL taskTrackerHttpURL = new URL(taskTrackerHttpAddress);
return TaskLogServlet.getTaskLogUrl(
taskTrackerHttpURL.getHost(),
Integer.toString(taskTrackerHttpURL.getPort()),
taskAttemptId);
}
@Override
public JobTrackerState getJobTrackerState(ClusterStatus clusterStatus) throws Exception {
JobTrackerState state;
switch (clusterStatus.getJobTrackerState()) {
case INITIALIZING:
return JobTrackerState.INITIALIZING;
case RUNNING:
return JobTrackerState.RUNNING;
default:
String errorMsg = "Unrecognized JobTracker state: " + clusterStatus.getJobTrackerState();
throw new Exception(errorMsg);
}
}
@Override
public org.apache.hadoop.mapreduce.TaskAttemptContext newTaskAttemptContext(Configuration conf, final Progressable progressable) {
return new org.apache.hadoop.mapreduce.TaskAttemptContext(conf, new TaskAttemptID()) {
@Override
public void progress() {
progressable.progress();
}
};
}
@Override
public org.apache.hadoop.mapreduce.JobContext newJobContext(Job job) {
return new org.apache.hadoop.mapreduce.JobContext(job.getConfiguration(), job.getJobID());
}
@Override
public boolean isLocalMode(Configuration conf) {
return "local".equals(getJobLauncherRpcAddress(conf));
}
@Override
public String getJobLauncherRpcAddress(Configuration conf) {
return conf.get("mapred.job.tracker");
}
@Override
public void setJobLauncherRpcAddress(Configuration conf, String val) {
conf.set("mapred.job.tracker", val);
}
@Override
public String getJobLauncherHttpAddress(Configuration conf) {
return conf.get("mapred.job.tracker.http.address");
}
@Override
public boolean moveToAppropriateTrash(FileSystem fs, Path path, Configuration conf)
throws IOException {
// older versions of Hadoop don't have a Trash constructor based on the
// Path or FileSystem. So need to achieve this by creating a dummy conf.
// this needs to be filtered out based on version
Configuration dupConf = new Configuration(conf);
FileSystem.setDefaultUri(dupConf, fs.getUri());
Trash trash = new Trash(dupConf);
return trash.moveToTrash(path);
}
@Override
public long getDefaultBlockSize(FileSystem fs, Path path) {
return fs.getDefaultBlockSize();
}
@Override
public short getDefaultReplication(FileSystem fs, Path path) {
return fs.getDefaultReplication();
}
/**
* Returns a shim to wrap MiniMrCluster
*/
public MiniMrShim getMiniMrCluster(Configuration conf, int numberOfTaskTrackers,
String nameNode, int numDir) throws IOException {
return new MiniMrShim(conf, numberOfTaskTrackers, nameNode, numDir);
}
/**
* Shim for MiniMrCluster
*/
public class MiniMrShim implements HadoopShims.MiniMrShim {
private final MiniMRCluster mr;
public MiniMrShim(Configuration conf, int numberOfTaskTrackers,
String nameNode, int numDir) throws IOException {
this.mr = new MiniMRCluster(numberOfTaskTrackers, nameNode, numDir);
}
@Override
public int getJobTrackerPort() throws UnsupportedOperationException {
return mr.getJobTrackerPort();
}
@Override
public void shutdown() throws IOException {
mr.shutdown();
}
@Override
public void setupConfiguration(Configuration conf) {
setJobLauncherRpcAddress(conf, "localhost:" + mr.getJobTrackerPort());
}
}
// Don't move this code to the parent class. There's a binary
// incompatibility between hadoop 1 and 2 wrt MiniDFSCluster and we
// need to have two different shim classes even though they are
// exactly the same.
public HadoopShims.MiniDFSShim getMiniDfs(Configuration conf,
int numDataNodes,
boolean format,
String[] racks) throws IOException {
return new MiniDFSShim(new MiniDFSCluster(conf, numDataNodes, format, racks));
}
/**
* MiniDFSShim.
*
*/
public class MiniDFSShim implements HadoopShims.MiniDFSShim {
private final MiniDFSCluster cluster;
public MiniDFSShim(MiniDFSCluster cluster) {
this.cluster = cluster;
}
public FileSystem getFileSystem() throws IOException {
return cluster.getFileSystem();
}
public void shutdown() {
cluster.shutdown();
}
}
}