Package org.apache.hadoop.io

Examples of org.apache.hadoop.io.MD5Hash$Comparator


        throw new IOException("File " + str + " received length " + received +
                              " is not of the advertised size " +
                              advertisedSize);
      }
    }
    return digester==null ? null : new MD5Hash(digester.digest());
  }
View Full Code Here


    layoutVersion = Integer.valueOf(fields[0]);
    namespaceID = Integer.valueOf(fields[1]);
    cTime = Long.valueOf(fields[2]);
    editsTime = Long.valueOf(fields[3]);
    checkpointTime = Long.valueOf(fields[4]);
    imageDigest = new MD5Hash(fields[5]);
  }
View Full Code Here

    layoutVersion = in.readInt();
    namespaceID = in.readInt();
    cTime = in.readLong();
    editsTime = in.readLong();
    checkpointTime = in.readLong();
    imageDigest = new MD5Hash();
    imageDigest.readFields(in);
    checkpointState = CheckpointStates.deserialize(in.readInt());
  }
View Full Code Here

          if (locatedblocks.size() > 1 && i == 0) {
            crcPerBlock = cpb;
          }

          //read md5
          final MD5Hash md5 = MD5Hash.read(in);
          md5.write(md5out);

          done = true;

          if (LOG.isDebugEnabled()) {
            if (i == 0) {
              LOG.debug("set bytesPerCRC=" + bytesPerCRC
                  + ", crcPerBlock=" + crcPerBlock);
            }
            LOG.debug("got reply from " + datanodes[j].getName()
                + ": md5=" + md5);
          }
        } catch (IOException ie) {
          LOG.warn("src=" + src + ", datanodes[" + j + "].getName()="
              + datanodes[j].getName(), ie);
        } finally {
          IOUtils.closeStream(in);
          IOUtils.closeStream(out);
          IOUtils.closeSocket(sock);
        }
      }

      if (!done) {
        throw new IOException("Fail to get block MD5 for " + block);
      }
    }

    //compute file MD5
    final MD5Hash fileMD5 = MD5Hash.digest(md5out.getData());
    return new MD5MD5CRC32FileChecksum(bytesPerCRC, crcPerBlock, fileMD5);
  }
View Full Code Here

        if (shared) {
          if (!fs.exists(libjarsDir)) {
            FileSystem.mkdirs(fs, libjarsDir, mapredSysPerms);
          }

          MD5Hash md5hash = MD5Hash.digest(new
              FileInputStream(originalJarPath.toUri().getPath()));
          uploadJarPath = copyRemoteFiles(fs, libjarsDir, originalJarPath, job,
              replication, md5hash.toString());
          URI pathURI = new URI(uploadJarPath.toUri().toString());

          DistributedCache.addSharedArchiveToClassPath(uploadJarPath, job);
          fileInfo.put(pathURI, new FileInfo(md5hash.toString(),
                md5hash.getFileLength(), 0));
        } else {
          // Otherwise we copy jar to JT's filesystem
          uploadJarPath = new Path(uploadFileDir, "job.jar");
          fs.copyFromLocalFile(originalJarPath, uploadJarPath);
        }
      } catch (URISyntaxException ue) {
        // should not throw an uri exception
        throw new IOException("Failed to create uri for " + originalJar);
      }

      job.setJar(uploadJarPath.toString());
      fs.setReplication(uploadJarPath, replication);

      try {
        fs.setPermission(uploadJarPath, new FsPermission(JOB_FILE_PERMISSION));
      } catch (IOException ioe) {
        LOG.warn("Unable to set job jar permission");
      }
    } else {
      LOG.warn("No job jar file set. User classes may not be found. "+
          "See JobConf(Class) or JobConf#setJar(String).");
    }

    // add all the command line files/ jars and archive
    // first copy them to jobtrackers filesystem

    if (files != null) {
      if (!fs.exists(filesDir)) {
        FileSystem.mkdirs(fs, filesDir, mapredSysPerms);
      }

      String[] fileArr = files.split(",");
      for (String tmpFile: fileArr) {
        Path tmp = new Path(tmpFile);

        Path newPath;
        FileStatus fStatus = null;
        MD5Hash md5hash = null;
        try {
          if (shared) {
            md5hash
              = MD5Hash.digest(new FileInputStream(tmp.toUri().getPath()));
            newPath = copyRemoteFiles(fs, filesDir, tmp, job, replication,
                md5hash.toString());

            URI pathURI = new URI(newPath.toUri().toString() + "#" +
                newPath.getName());

            DistributedCache.addSharedCacheFile(pathURI, job);
            fileInfo.put(pathURI, new FileInfo(md5hash.toString(),
                  md5hash.getFileLength(),
                  0));

          } else {
            newPath = copyRemoteFiles(fs, filesDir, tmp, job, replication);
            fStatus = DistributedCache.getFileStatus(job, newPath.toUri());

            URI pathURI = new URI(newPath.toUri().toString() + "#" +
                newPath.getName());

            DistributedCache.addCacheFile(pathURI, job);
            fileInfo.put(pathURI, new FileInfo(null,
                  fStatus.getLen(),
                  fStatus.getModificationTime()));
          }

        } catch(URISyntaxException ue) {
          //should not throw a uri exception
          throw new IOException("Failed to create uri for " + tmpFile);
        }
        DistributedCache.createSymlink(job);
      }
    }

    if (libjars != null) {
      if (!fs.exists(libjarsDir)) {
        FileSystem.mkdirs(fs, libjarsDir, mapredSysPerms);
      }

      String[] libjarsArr = libjars.split(",");
      for (String tmpjars: libjarsArr) {
        Path tmp = new Path(tmpjars);

        Path newPath;
        if (shared) {
          MD5Hash md5hash
            = MD5Hash.digest(new FileInputStream(tmp.toUri().getPath()));
          newPath = copyRemoteFiles(fs, libjarsDir, tmp, job, replication,
              md5hash.toString());
          DistributedCache.addSharedArchiveToClassPath(newPath, job);

          fileInfo.put(newPath.makeQualified(newPath.getFileSystem(job)).toUri(),
                       new FileInfo(md5hash.toString(),
                                    md5hash.getFileLength(),
                                    0));
        } else {
          newPath = copyRemoteFiles(fs, libjarsDir, tmp, job, replication);
          DistributedCache.addArchiveToClassPath(newPath, job);

          FileStatus fStatus = DistributedCache.getFileStatus(job,
              newPath.toUri());
          fileInfo.put(newPath.makeQualified(newPath.getFileSystem(job)).toUri(),
                       new FileInfo(null,
                                    fStatus.getLen(),
                                    fStatus.getModificationTime()));
        }
      }
    }

    if (archives != null) {
      if (!fs.exists(archivesDir)) {
        FileSystem.mkdirs(fs, archivesDir, mapredSysPerms);
      }

     String[] archivesArr = archives.split(",");
     for (String tmpArchives: archivesArr) {
       Path tmp = new Path(tmpArchives);

       Path newPath;
       MD5Hash md5hash = null;
       FileStatus fStatus = null;
       try {
         if (shared) {
           md5hash
             = MD5Hash.digest(new FileInputStream(tmp.toUri().getPath()));
           newPath = copyRemoteFiles(fs, archivesDir, tmp, job, replication,
               md5hash.toString());
           URI pathURI = new URI(newPath.toUri().toString() + "#" +
               newPath.getName());

           DistributedCache.addSharedCacheArchive(pathURI, job);
           fileInfo.put(pathURI, new FileInfo(md5hash.toString(),
                 md5hash.getFileLength(),
                 0));

         } else {
           newPath = copyRemoteFiles(fs, archivesDir, tmp, job, replication);
           fStatus = DistributedCache.getFileStatus(job, newPath.toUri());
View Full Code Here

      flushSectionOutputStream();

      FileSummary summary = b.build();
      saveFileSummary(underlyingOutputStream, summary);
      underlyingOutputStream.close();
      savedDigest = new MD5Hash(digester.digest());
    }
View Full Code Here

        NameNodeDirType.IMAGE, fileName);
    if (dstFiles.isEmpty()) {
      throw new IOException("No targets in destination storage!");
    }
   
    MD5Hash hash = getFileClient(fsName, fileid, dstFiles, dstStorage, needDigest);
    LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size " +
        dstFiles.get(0).length() + " bytes.");
    return hash;
  }
View Full Code Here

    List<File> dstFiles = dstStorage.getFiles(NameNodeDirType.IMAGE, fileName);
    if (dstFiles.isEmpty()) {
      throw new IOException("No targets in destination storage!");
    }

    MD5Hash advertisedDigest = parseMD5Header(request);
    MD5Hash hash = receiveFile(fileName, dstFiles, dstStorage, true,
        advertisedSize, advertisedDigest, fileName, stream, throttler);
    LOG.info("Downloaded file " + dstFiles.get(0).getName() + " size "
        + dstFiles.get(0).length() + " bytes.");
    return hash;
  }
View Full Code Here

      advertisedSize = Long.parseLong(contentLength);
    } else {
      throw new IOException(CONTENT_LENGTH + " header is not provided " +
                            "by the namenode when trying to fetch " + url);
    }
    MD5Hash advertisedDigest = parseMD5Header(connection);
    String fsImageName = connection
        .getHeaderField(ImageServlet.HADOOP_IMAGE_EDITS_HEADER);
    InputStream stream = connection.getInputStream();

    return receiveFile(url.toExternalForm(), localPaths, dstStorage,
View Full Code Here

    long xferKb = received / 1024;
    LOG.info(String.format("Transfer took %.2fs at %.2f KB/s",
        xferSec, xferKb / xferSec));

    if (digester != null) {
      MD5Hash computedDigest = new MD5Hash(digester.digest());
     
      if (advertisedDigest != null &&
          !computedDigest.equals(advertisedDigest)) {
        throw new IOException("File " + url + " computed digest " +
            computedDigest + " does not match advertised digest " +
            advertisedDigest);
      }
      return computedDigest;
View Full Code Here

TOP

Related Classes of org.apache.hadoop.io.MD5Hash$Comparator

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.