Package org.apache.hadoop.raid.protocol

Examples of org.apache.hadoop.raid.protocol.PolicyInfo


          if (!"policy".equalsIgnoreCase(policy.getTagName())) {
            throw new RaidConfigurationException("Bad configuration file: " +
              "Expecting <policy> for srcPath " + srcPathPrefix);
          }
          String policyName = policy.getAttribute("name");
          PolicyInfo pinfo = new PolicyInfo(policyName, conf);
          pinfo.setSrcPath(srcPathPrefix);
          policyList.add(pinfo);

          // loop through all the properties of this policy
          NodeList properties = policy.getChildNodes();
          for (int k = 0; k < properties.getLength(); k++) {
            Node node2 = properties.item(k);
            if (!(node2 instanceof Element)) {
              continue;
            }
            Element property = (Element)node2;
            String propertyName = property.getTagName();
            if ("destPath".equalsIgnoreCase(propertyName)) {
              String text = ((Text)property.getFirstChild()).getData().trim();
              LOG.info(policyName + ".destPath = " + text);
              pinfo.setDestinationPath(text);
            } else if ("description".equalsIgnoreCase(propertyName)) {
              String text = ((Text)property.getFirstChild()).getData().trim();
              pinfo.setDescription(text);
            } else if ("property".equalsIgnoreCase(propertyName)) {
              NodeList nl = property.getChildNodes();
              String pname=null,pvalue=null;
              for (int l = 0; l < nl.getLength(); l++){
                Node node3 = nl.item(l);
                if (!(node3 instanceof Element)) {
                  continue;
                }
                Element item = (Element) node3;
                String itemName = item.getTagName();
                if ("name".equalsIgnoreCase(itemName)){
                  pname = ((Text)item.getFirstChild()).getData().trim();
                } else if ("value".equalsIgnoreCase(itemName)){
                  pvalue = ((Text)item.getFirstChild()).getData().trim();
                }
              }
              if (pname != null && pvalue != null) {
                LOG.info(policyName + "." + pname + " = " + pvalue);
                pinfo.setProperty(pname,pvalue);
              }
            } else {
              LOG.info("Found bad property " + propertyName +
                       " for srcPath" + srcPathPrefix +
                       " policy name " + policyName +
View Full Code Here


        continue;
      }
      Element policy = (Element)node;
      if ("policy".equalsIgnoreCase(policy.getTagName())) {
        String policyName = policy.getAttribute("name");
        PolicyInfo curr = new PolicyInfo(policyName, conf);
        PolicyInfo parent = null;
        NodeList policyElements = policy.getChildNodes();
        for (int j = 0; j < policyElements.getLength(); j++) {
          Node node1 = policyElements.item(j);
          if (!(node1 instanceof Element)) {
            continue;
          }
          Element property = (Element) node1;
          String propertyName = property.getTagName();
          if ("srcPath".equalsIgnoreCase(propertyName)) {
            String srcPathPrefix = property.getAttribute("prefix");
            if (srcPathPrefix != null && srcPathPrefix.length() > 0) {
              curr.setSrcPath(srcPathPrefix);
            }
          } else if ("fileList".equalsIgnoreCase(propertyName)) {
            String text = ((Text)property.getFirstChild()).getData().trim();
            LOG.info(policyName + ".fileList = " + text);
            curr.setFileListPath(new Path(text));
          } else if ("codecId".equalsIgnoreCase(propertyName)) {
            String text = ((Text)property.getFirstChild()).getData().trim();
            LOG.info(policyName + ".codecId = " + text);
            curr.setCodecId(text);
          } else if ("shouldRaid".equalsIgnoreCase(propertyName)) {
            String text = ((Text)property.getFirstChild()).getData().trim();
            curr.setShouldRaid(Boolean.parseBoolean(text));
          } else if ("description".equalsIgnoreCase(propertyName)) {
            String text = ((Text)property.getFirstChild()).getData().trim();
            curr.setDescription(text);
          } else if ("parentPolicy".equalsIgnoreCase(propertyName)) {
            String text = ((Text)property.getFirstChild()).getData().trim();
            parent = existingPolicies.get(text);
          } else if ("property".equalsIgnoreCase(propertyName)) {
            NodeList nl = property.getChildNodes();
            String pname=null,pvalue=null;
            for (int l = 0; l < nl.getLength(); l++){
              Node node3 = nl.item(l);
              if (!(node3 instanceof Element)) {
                continue;
              }
              Element item = (Element) node3;
              String itemName = item.getTagName();
              if ("name".equalsIgnoreCase(itemName)){
                pname = ((Text)item.getFirstChild()).getData().trim();
              } else if ("value".equalsIgnoreCase(itemName)){
                pvalue = ((Text)item.getFirstChild()).getData().trim();
              }
            }
            if (pname != null && pvalue != null) {
              LOG.info(policyName + "." + pname + " = " + pvalue);
              curr.setProperty(pname,pvalue);
            }
          } else {
            LOG.info("Found bad property " + propertyName +
                     " policy name " + policyName +
                     ". Ignoring.");
          }
        }  // done with all properties of this policy
        PolicyInfo pinfo;
        if (parent != null) {
          pinfo = new PolicyInfo(policyName, conf);
          pinfo.copyFrom(parent);
          pinfo.copyFrom(curr);
        } else {
          pinfo = curr;
        }
        if (pinfo.getSrcPath() != null || pinfo.getFileListPath() != null) {
          all.add(pinfo);
        }
        existingPolicies.put(policyName, pinfo);
      }
    }
View Full Code Here

    setAllPolicies(newAllPolicies);
  }
 
  private void loadPolicyInfoFromJSON(JSONObject json,
      Collection<PolicyInfo> policies) throws JSONException, IOException {
    PolicyInfo policyInfo = new PolicyInfo(json.getString("name"), conf);
    String key = null;
    String stringVal = null;
    Object value = null;
    for (Iterator<?> keys = json.keys(); keys.hasNext();) {
      key = (String) keys.next();
      if (key == null || key.equals("")) continue;
      value = json.get(key);

      if (value instanceof String) {
        stringVal = (String)value;
      } else if (value instanceof Integer) {
        stringVal = new Integer((Integer)value).toString();
      } else if (value instanceof Long) {
        stringVal = new Long((Long)value).toString();
      } else if (value instanceof Double) {
        stringVal = new Double((Double)value).toString();
      } else if (value instanceof Boolean) {
        stringVal = new Boolean((Boolean)value).toString();
      } else {
        LOG.warn("unsupported value in json object: " + value);
      }

      if (key.equals("description")) {
        policyInfo.setDescription(stringVal);
      } else if (key.equals("codecId")) {
        policyInfo.setCodecId(stringVal);
      } else if (key.equals("fileListPath")) {
        policyInfo.setFileListPath(new Path(stringVal));
      } else if (key.equals("srcPath")) {
        policyInfo.setSrcPath(stringVal);
      } else if (key.equals("shouldRaid")) {
        policyInfo.setShouldRaid((Boolean)value);
      } else {
        policyInfo.setProperty(key, stringVal);
      }
    }
    policies.add(policyInfo);
  }
View Full Code Here

      dirOut.close();

      cnode = RaidNode.createRaidNode(conf);
      PolicyInfo[] infos = cnode.getAllPolicies();
      assertEquals("we should have only one policy", 1, infos.length);
      PolicyInfo info = infos[0];
      cnode.triggerMonitor.putPolicyInfo(info);
      List<FileStatus> list1 = cnode.triggerMonitor.readFileList(info);
      assertEquals("Only one directory is selected", 1, list1.size());
      assertEquals("/user/rvadali/dir-raidtest/1",
          list1.get(0).getPath().toUri().getPath());
View Full Code Here

    mySetup(1, 1);
    Path dir = new Path("/user/test/raidtest");
    Path file1 = new Path(dir + "/file1");
    HashMap<String, PolicyInfo> infos = new HashMap<String, PolicyInfo>();
    for (Codec code: Codec.getCodecs()) {
      PolicyInfo pi = new PolicyInfo("testPurgePreference", conf);
      pi.setSrcPath("/user/test/raidtest");
      pi.setCodecId(code.id);
      pi.setDescription("test policy");
      pi.setProperty("targetReplication", "1");
      pi.setProperty("metaReplication", "1");
      infos.put(code.id, pi);
    }
   
    try {
      LOG.info("Create a old file");
      TestRaidNode.createOldFile(fileSys, file1, 1, 9, 8192L);
      FileStatus stat = fileSys.getFileStatus(file1);
      FileStatus dirStat = fileSys.getFileStatus(dir);
      HashMap<String, Path> parityFiles = new HashMap<String, Path>();
      // Create the parity files.
      LOG.info("Start Raiding");
      for (PolicyInfo pi: infos.values()){
        Codec code = Codec.getCodec(pi.getCodecId());
        FileStatus fsStat = (code.isDirRaid)? dirStat: stat;
        RaidNode.doRaid(
          conf, pi, fsStat, new RaidNode.Statistics(), Reporter.NULL);
        Path parity = RaidNode.getOriginalParityFile(new Path(code.parityDirectory),
              fsStat.getPath());
        assertTrue(fileSys.exists(parity));
        parityFiles.put(pi.getCodecId(), parity);
      }
      LOG.info("Finished Raiding");
      // Check purge of a single parity file.
      PurgeMonitor purgeMonitor = new PurgeMonitor(conf, null, null);
      LOG.info("Purge dir-rs");
      purgeMonitor.purgeCode(Codec.getCodec("dir-rs"));
      // Calling purge under the Dir-RS path has no effect.
      assertTrue(fileSys.exists(parityFiles.get("testrs")));
      assertTrue(fileSys.exists(parityFiles.get("dir-rs")));
      assertTrue(fileSys.exists(parityFiles.get("rs")));
      assertTrue(fileSys.exists(parityFiles.get("dir-xor")));
      assertTrue(fileSys.exists(parityFiles.get("xor")));
     
      LOG.info("Purge rs");
      purgeMonitor.purgeCode(Codec.getCodec("rs"));
      // Calling purge under the rs path will delete rs
      assertTrue(fileSys.exists(parityFiles.get("testrs")));
      assertTrue(fileSys.exists(parityFiles.get("dir-rs")));
      assertFalse(fileSys.exists(parityFiles.get("rs")));
      assertTrue(fileSys.exists(parityFiles.get("dir-xor")));
      assertTrue(fileSys.exists(parityFiles.get("xor")));
     
      LOG.info("Purge dir-xor");
      purgeMonitor.purgeCode(Codec.getCodec("dir-xor"));
      // Calling purge under the Dir-xor path will delete dir-xor
      assertTrue(fileSys.exists(parityFiles.get("testrs")));
      assertTrue(fileSys.exists(parityFiles.get("dir-rs")));
      assertFalse(fileSys.exists(parityFiles.get("dir-xor")));
      assertTrue(fileSys.exists(parityFiles.get("xor")));
     
      LOG.info("Purge xor");
      purgeMonitor.purgeCode(Codec.getCodec("xor"));
      assertFalse(fileSys.exists(parityFiles.get("xor")));
      assertTrue(fileSys.exists(parityFiles.get("testrs")));
      assertTrue(fileSys.exists(parityFiles.get("dir-rs")));
     
      LOG.info("delete dir-rs parity file");
      fileSys.delete(parityFiles.get("dir-rs"), true);
      assertFalse(fileSys.exists(parityFiles.get("dir-rs")));
     
      //Recreate RS and Dir-XOR
      LOG.info("Raid rs");
      RaidNode.doRaid(
          conf, infos.get("rs"), stat, new RaidNode.Statistics(),
          Reporter.NULL);
      assertTrue(fileSys.exists(parityFiles.get("rs")));
      LOG.info("Raid dir-xor");
      RaidNode.doRaid(
          conf, infos.get("dir-xor"), dirStat, new RaidNode.Statistics(),
          Reporter.NULL);
      assertTrue(fileSys.exists(parityFiles.get("dir-xor")));
      LOG.info("Raid xor");
      RaidNode.doRaid(
          conf, infos.get("xor"), stat, new RaidNode.Statistics(),
          Reporter.NULL);
      assertTrue(fileSys.exists(parityFiles.get("xor")));
     
      LOG.info("Purge dir-xor");
      purgeMonitor.purgeCode(Codec.getCodec("dir-xor"));
      // Calling purge under the Dir-XOR path succeeds
      assertTrue(fileSys.exists(parityFiles.get("testrs")));
      assertTrue(fileSys.exists(parityFiles.get("rs")));
      assertTrue(fileSys.exists(parityFiles.get("dir-xor")));
      assertTrue(fileSys.exists(parityFiles.get("xor")));
     
      LOG.info("Purge rs");
      purgeMonitor.purgeCode(Codec.getCodec("rs"));
      // Calling purge under the Dir-XOR path succeeds
      assertTrue(fileSys.exists(parityFiles.get("testrs")));
      assertFalse(fileSys.exists(parityFiles.get("rs")));
      assertTrue(fileSys.exists(parityFiles.get("dir-xor")));
      assertTrue(fileSys.exists(parityFiles.get("xor")));
     
      LOG.info("Purge testrs");
      purgeMonitor.purgeCode(Codec.getCodec("testrs"));
      // Calling purge under the Dir-XOR path succeeds
      assertFalse(fileSys.exists(parityFiles.get("testrs")));
      assertTrue(fileSys.exists(parityFiles.get("dir-xor")));
      assertTrue(fileSys.exists(parityFiles.get("xor")));
     
      LOG.info("delete dir-xor parity file");
      fileSys.delete(parityFiles.get("dir-xor"), true);
      assertFalse(fileSys.exists(parityFiles.get("dir-xor")));
     
      LOG.info("Raid rs");
      RaidNode.doRaid(
          conf, infos.get("rs"), stat, new RaidNode.Statistics(),
          Reporter.NULL);
      assertTrue(fileSys.exists(parityFiles.get("rs")));
     
      LOG.info("Purge xor");
      purgeMonitor.purgeCode(Codec.getCodec("xor"));
      assertTrue(fileSys.exists(parityFiles.get("rs")));
      assertFalse(fileSys.exists(parityFiles.get("xor")));
     
      LOG.info("delete rs");
      fileSys.delete(parityFiles.get("rs"), true);
      assertFalse(fileSys.exists(parityFiles.get("testrs")));
      LOG.info("Raid testrs");
      RaidNode.doRaid(
          conf, infos.get("testrs"), stat, new RaidNode.Statistics(),
          Reporter.NULL);
      assertTrue(fileSys.exists(parityFiles.get("testrs")));
      LOG.info("Raid xor");
      RaidNode.doRaid(
          conf, infos.get("xor"), stat, new RaidNode.Statistics(),
          Reporter.NULL);
      assertTrue(fileSys.exists(parityFiles.get("xor")));
      LOG.info("Purge xor");
      purgeMonitor.purgeCode(Codec.getCodec("xor"));
      assertTrue(fileSys.exists(parityFiles.get("testrs")));
      assertFalse(fileSys.exists(parityFiles.get("xor")));
      LOG.info("delete testrs");
      fileSys.delete(parityFiles.get("testrs"), true);
     
      // The following is har related stuff
      
      Path rsParity = parityFiles.get("rs");
      Path xorParity = parityFiles.get("xor");
      PolicyInfo infoXor = infos.get("xor");
      PolicyInfo infoRs = infos.get("rs");
      // Now check the purge of a parity har.
      // Delete the RS parity for now.
      fileSys.delete(rsParity, true);
      // Recreate the XOR parity.
      Path xorHar = new Path("/raid", "user/test/raidtest/raidtest" +
View Full Code Here

    newConf.setLong("raid.encoder.bufsize", 4096);
    RaidNode.createChecksumStore(newConf, true);
    Path raidDir = new Path("/raidtest/1");
    HashMap<Codec, Long[]> fileCRCs = new HashMap<Codec, Long[]>();
    HashMap<Codec, Path> filePaths = new HashMap<Codec, Path>();
    PolicyInfo info = new PolicyInfo();
    info.setProperty("targetReplication", Integer.toString(targetReplication));
    info.setProperty("metaReplication", Integer.toString(metaReplication));
    try {
      createTestFiles(raidDir, filePaths, fileCRCs, null);
      LOG.info("Test testBlockMissingExceptionDuringEncoding created test files");
      // create the InjectionHandler
      for (Codec codec: Codec.getCodecs()) {
        Path filePath = filePaths.get(codec);
        FileStatus stat = fileSys.getFileStatus(filePath);
        info.setCodecId(codec.id);
        boolean succeed = false;
        TestEncodingHandler h = new TestEncodingHandler(0.5,
            null, true);
        InjectionHandler.set(h);
        succeed = runEncodingTasks(newConf, codec, stat, info, 100);
View Full Code Here

    Path raidDir = new Path("/raidtest/1");
    HashMap<Codec, Long[]> fileCRCs = new HashMap<Codec, Long[]>();
    HashMap<Codec, Path> filePaths = new HashMap<Codec, Path>();
    HashMap<InjectionEventI, Double> specialFailProbs =
        new HashMap<InjectionEventI, Double>();
    PolicyInfo info = new PolicyInfo();
    info.setProperty("targetReplication", Integer.toString(targetReplication));
    info.setProperty("metaReplication", Integer.toString(metaReplication));
    try {
      createTestFiles(raidDir, filePaths, fileCRCs, null);
      LOG.info("Test testRetryTask created test files");
      for (Codec codec: Codec.getCodecs()) {
        Path filePath = filePaths.get(codec);
        FileStatus stat = fileSys.getFileStatus(filePath);
        info.setCodecId(codec.id);
       
        LOG.info("Codec: " + codec + ", Path: " + filePath +
                 " Sync every task to the finalize stage, " +
                 "all partial parity files are generated");
        specialFailProbs.clear();
View Full Code Here

    Configuration newConf = new Configuration(conf);
    RaidNode.createChecksumStore(newConf, true);
    Path raidDir = new Path("/raidtest/1");
    HashMap<Codec, Long[]> fileCRCs = new HashMap<Codec, Long[]>();
    HashMap<Codec, Path> filePaths = new HashMap<Codec, Path>();
    PolicyInfo info = new PolicyInfo();
    info.setProperty("targetReplication", Integer.toString(targetReplication));
    info.setProperty("metaReplication", Integer.toString(metaReplication));
    try {
      createTestFiles(raidDir, filePaths, fileCRCs, null);
      LOG.info("Test testLargeFailureRateEncoding created test files");
      // create the InjectionHandler
      for (Codec codec: Codec.getCodecs()) {
        Path filePath = filePaths.get(codec);
        FileStatus stat = fileSys.getFileStatus(filePath);
        info.setCodecId(codec.id);
        boolean succeed = false;
        TestEncodingHandler h = new TestEncodingHandler(0.5, null, false);
        InjectionHandler.set(h);
        succeed = runEncodingTasks(newConf, codec, stat, info, 100);
        assertTrue("We should succeed", succeed);
View Full Code Here

      FileSystem fs = srcs.getFileSystem(job);

      List<FileSplit> splits = new ArrayList<FileSplit>(numSplits);

      Text key = new Text();
      PolicyInfo value = new PolicyInfo();
      SequenceFile.Reader in = null;
      long prev = 0L;
      int count = 0; // count src
      try {
        for (in = new SequenceFile.Reader(fs, srcs, job); in.next(key, value);) {
View Full Code Here

          purgeCode(c);
          try {
            // re-generate the parity files for modified sources.
            if (modifiedSource.size() > 0) {
              LOG.info("re-generate parity files");
              PolicyInfo info = raidNode.determinePolicy(c);
              // check if we should raid the files/directories.
              for (Iterator<FileStatus> it = modifiedSource.iterator();
                      it.hasNext();) {
                FileStatus stat = it.next();
                if (!RaidNode.shouldRaid(conf,
View Full Code Here

TOP

Related Classes of org.apache.hadoop.raid.protocol.PolicyInfo

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.