Package org.tmatesoft.hg.repo

Examples of org.tmatesoft.hg.repo.HgChangelog$RawCsetParser


      Pool<Path> cacheFiles = new Pool<Path>();

      Internals implRepo = Internals.getInstance(repo);
      final DirstateBuilder dirstateBuilder = new DirstateBuilder(implRepo);
      dirstateBuilder.fillFrom(new DirstateReader(implRepo, new Path.SimpleSource(repo.getSessionContext().getPathFactory(), cacheFiles)));
      final HgChangelog clog = repo.getChangelog();
      final Nodeid headCset1 = clog.getRevision(firstCset);
      dirstateBuilder.parents(headCset1, clog.getRevision(secondCset));
      //
      MergeStateBuilder mergeStateBuilder = new MergeStateBuilder(implRepo);
      mergeStateBuilder.prepare(headCset1);

      ManifestRevision m1, m2, ma;
View Full Code Here


      assertTrue(commitCmd.execute().isOk());
      Nodeid newCommit = commitCmd.getCommittedRevision();
      //
      new HgPushCommand(srcRepo).destination(dstRemote).execute();
      HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
      final HgChangelog srcClog = srcRepo.getChangelog();
      final HgChangelog dstClog = dstRepo.getChangelog();
      // refresh PhasesHelper
      phaseHelper = new PhasesHelper(HgInternals.getImplementationRepo(srcRepo));
      // check if phase didn't change
      errorCollector.assertEquals(HgPhase.Draft, phaseHelper.getPhase(srcClog.getRevisionIndex(newCommit), newCommit));
      for (Nodeid n : allDraft) {
        // check drafts from src were actually pushed to dst
        errorCollector.assertTrue(dstClog.isKnown(n));
        // check drafts didn't change their phase
        errorCollector.assertEquals(HgPhase.Draft, phaseHelper.getPhase(srcClog.getRevisionIndex(n), n));
      }
    } finally {
      server.stop();
View Full Code Here

      errorCollector.assertEquals(HgPhase.Draft, srcPhase.getPhase(r6, null));
      // r8 is secret on server, locally can't make it less exposed though
      errorCollector.assertEquals(HgPhase.Draft, srcPhase.getPhase(r8, null));
      //
      HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
      final HgChangelog dstClog = dstRepo.getChangelog();
      assertTrue(dstClog.isKnown(newCommit));
      PhasesHelper dstPhase = new PhasesHelper(HgInternals.getImplementationRepo(dstRepo));
      errorCollector.assertEquals(HgPhase.Draft, dstPhase.getPhase(dstClog.getRevisionIndex(newCommit), newCommit));
      // the one that was secret is draft now
      errorCollector.assertEquals(HgPhase.Draft, srcPhase.getPhase(r8, null));
    } finally {
      server.stop();
    }
View Full Code Here

      //
      // make sure pushed repository got same draft root
      final Nodeid r4PublicHead = srcRepo.getChangelog().getRevision(r4);
      final Nodeid r5DraftRoot = srcRepo.getChangelog().getRevision(r5);
      HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
      final HgChangelog dstClog = dstRepo.getChangelog();
      PhasesHelper dstPhase = new PhasesHelper(HgInternals.getImplementationRepo(dstRepo));
      assertEquals(HgPhase.Public, dstPhase.getPhase(dstClog.getRevisionIndex(r4PublicHead), r4PublicHead));
      assertEquals(HgPhase.Draft, dstPhase.getPhase(dstClog.getRevisionIndex(r5DraftRoot), r5DraftRoot));
      //
      // now, graduate some local revisions, r5:draft->public, r6:secret->public, r9: secret->draft
      final ExecHelper srcRun = new ExecHelper(new OutputParser.Stub(), srcRepoLoc);
      srcRun.exec("hg", "phase", "--public", String.valueOf(r5));
      srcRun.exec("hg", "phase", "--public", String.valueOf(r6));
      srcRun.exec("hg", "phase", "--draft", String.valueOf(r9));
      // PhaseHelper shall be new for the command, and would pick up these external changes
      new HgPushCommand(srcRepo).destination(dstRemote).execute();
      final Nodeid r6Nodeid = srcRepo.getChangelog().getRevision(r6);
      final Nodeid r9Nodeid = srcRepo.getChangelog().getRevision(r9);
      // refresh
      dstPhase = new PhasesHelper(HgInternals.getImplementationRepo(dstRepo));
      // not errorCollector as subsequent code would fail if these secret revs didn't get into dst
      assertTrue(dstClog.isKnown(r6Nodeid));
      assertTrue(dstClog.isKnown(r9Nodeid));
      errorCollector.assertEquals(HgPhase.Public, dstPhase.getPhase(dstClog.getRevisionIndex(r5DraftRoot), r5DraftRoot));
      errorCollector.assertEquals(HgPhase.Public, dstPhase.getPhase(dstClog.getRevisionIndex(r6Nodeid), r6Nodeid));
      errorCollector.assertEquals(HgPhase.Draft, dstPhase.getPhase(dstClog.getRevisionIndex(r9Nodeid), r9Nodeid));
    } finally {
      server.stop();
    }
  }
View Full Code Here

      final RevisionSet allDraft = phaseHelper.allDraft();
      assertFalse("[sanity]", allDraft.isEmpty());
      // push all changes
      new HgPushCommand(srcRepo).destination(dstRemote).execute();
      HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
      final HgChangelog srcClog = srcRepo.getChangelog();
      final HgChangelog dstClog = dstRepo.getChangelog();
      // refresh PhasesHelper
      phaseHelper = new PhasesHelper(HgInternals.getImplementationRepo(srcRepo));
      for (Nodeid n : allDraft) {
        // check drafts from src were actually pushed to dst
        errorCollector.assertTrue(dstClog.isKnown(n));
        // check drafts became public
        errorCollector.assertEquals(HgPhase.Public, phaseHelper.getPhase(srcClog.getRevisionIndex(n), n));
      }
    } finally {
      server.stop();
View Full Code Here

      PhasesHelper phaseHelper = new PhasesHelper(HgInternals.getImplementationRepo(srcRepo));
      final RevisionSet allSecret = phaseHelper.allSecret();
      assertFalse("[sanity]", allSecret.isEmpty());
      new HgPushCommand(srcRepo).destination(dstRemote).execute();
      HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
      final HgChangelog srcClog = srcRepo.getChangelog();
      final HgChangelog dstClog = dstRepo.getChangelog();
      errorCollector.assertEquals(srcClog.getRevisionCount() - allSecret.size(), dstClog.getRevisionCount());
      for (Nodeid n : allSecret) {   
        errorCollector.assertTrue(n.toString(), !dstClog.isKnown(n));
      }
    } finally {
      server.stop();
    }
  }
View Full Code Here

      //
      new HgPushCommand(srcRepo).destination(dstRemote).execute();
      Thread.sleep(300); // let the server perform the update
      //
      HgBookmarks srcBookmarks = srcRepo.getBookmarks();
      final HgChangelog srcClog = srcRepo.getChangelog();
      // first, check local bookmarks are intact
      errorCollector.assertEquals(srcClog.getRevision(bm2Local), srcBookmarks.getRevision(bm2));
      errorCollector.assertEquals(srcClog.getRevision(bm3Local), srcBookmarks.getRevision(bm3));
      errorCollector.assertEquals(null, srcBookmarks.getRevision(bm4));
      errorCollector.assertEquals(srcClog.getRevision(bm_4_5), srcBookmarks.getRevision(bm5));
      // now, check remote bookmarks were touched
      HgRepository dstRepo = hgLookup.detect(dstRepoLoc);
      HgBookmarks dstBookmarks = dstRepo.getBookmarks();
      final HgChangelog dstClog = dstRepo.getChangelog();
      // bm1 changed and points to newly pushed commit.
      // if the test fails (bm1 points to r8), chances are server didn't manage to update
      // bookmarks yet (there's Thread.sleep() above to give it a chance).
      errorCollector.assertEquals(commitCmd.getCommittedRevision(), dstBookmarks.getRevision(bm1));
      // bm2 didn't change
      errorCollector.assertEquals(dstClog.getRevision(bm2Remote), dstBookmarks.getRevision(bm2));
      // bm3 did change, now points to value we've got in srcRepo
      errorCollector.assertEquals(srcClog.getRevision(bm3Local), dstBookmarks.getRevision(bm3));
      // bm4 is not affected
      errorCollector.assertEquals(dstClog.getRevision(bm_4_5), dstBookmarks.getRevision(bm4));
      // bm5 is not known remotely
      errorCollector.assertEquals(null, dstBookmarks.getRevision(bm5));
    } finally {
      server.stop();
    }
View Full Code Here

    try {
      progress.start(100);
      //
      // find out missing
      // TODO refactor same code in HgOutgoingCommand #getComparator and #getParentHelper
      final HgChangelog clog = repo.getChangelog();
      final HgParentChildMap<HgChangelog> parentHelper = new HgParentChildMap<HgChangelog>(clog);
      parentHelper.init();
      final Internals implRepo = HgInternals.getImplementationRepo(repo);
      final PhasesHelper phaseHelper = new PhasesHelper(implRepo, parentHelper);
      final RepositoryComparator comparator = new RepositoryComparator(parentHelper, remoteRepo);
View Full Code Here

   * Approach 2: total 213, init: 63, iteration: 150
   * Approach 3: total 140
    */
  private void buildFile2ChangelogRevisionMap(String... fileNames) throws Exception {
    final HgRepository repository = new HgLookup().detect(new File("/home/artem/hg/cpython"));
    final HgChangelog clog = repository.getChangelog();
    // warm-up
    HgRevisionMap<HgChangelog> clogMap = new HgRevisionMap<HgChangelog>(clog).init();

    for (String fname : fileNames) {
      HgDataFile fileNode = repository.getFileNode(fname);
      // warm-up
      HgRevisionMap<HgDataFile> fileMap = new HgRevisionMap<HgDataFile>(fileNode).init();
      //
      final int latestRevision = fileNode.getLastRevision();
      //
      final long start_0 = System.nanoTime();
      final Map<Nodeid, Nodeid> changesetToNodeid_0 = new HashMap<Nodeid, Nodeid>();
      for (int fileRevisionIndex = 0; fileRevisionIndex <= latestRevision; fileRevisionIndex++) {
        Nodeid fileRevision = fileNode.getRevision(fileRevisionIndex);
        Nodeid changesetRevision = fileNode.getChangesetRevision(fileRevision);
        changesetToNodeid_0.put(changesetRevision, fileRevision);
      }
      final long end_0 = System.nanoTime();
      //
      final long start_1 = System.nanoTime();
      fileMap = new HgRevisionMap<HgDataFile>(fileNode).init();
      final long start_1a = System.nanoTime();
      final Map<Nodeid, Nodeid> changesetToNodeid_1 = new HashMap<Nodeid, Nodeid>();
      for (int revision = 0; revision <= latestRevision; revision++) {
        final Nodeid nodeId = fileMap.revision(revision);
        int localCset = fileNode.getChangesetRevisionIndex(revision);
        final Nodeid changesetId = clog.getRevision(localCset);
//        final Nodeid changesetId = fileNode.getChangesetRevision(nodeId);
        changesetToNodeid_1.put(changesetId, nodeId);
      }
      final long end_1 = System.nanoTime();
      //
      final long start_2 = System.nanoTime();
      clogMap = new HgRevisionMap<HgChangelog>(clog).init();
      fileMap = new HgRevisionMap<HgDataFile>(fileNode).init();
      final Map<Nodeid, Nodeid> changesetToNodeid_2 = new HashMap<Nodeid, Nodeid>();
      final long start_2a = System.nanoTime();
      for (int revision = 0; revision <= latestRevision; revision++) {
        Nodeid nidFile = fileMap.revision(revision);
        int localCset = fileNode.getChangesetRevisionIndex(revision);
        Nodeid nidCset = clogMap.revision(localCset);
        changesetToNodeid_2.put(nidCset, nidFile);
      }
      final long end_2 = System.nanoTime();
      Assert.assertEquals(changesetToNodeid_1, changesetToNodeid_2);
      //
      final long start_3 = System.nanoTime();
      final Map<Nodeid, Nodeid> changesetToNodeid_3 = new HashMap<Nodeid, Nodeid>();
      fileNode.indexWalk(0, TIP, new HgDataFile.RevisionInspector() {
 
        public void next(int fileRevisionIndex, Nodeid revision, int linkedRevisionIndex) throws HgRuntimeException {
          changesetToNodeid_3.put(clog.getRevision(linkedRevisionIndex), revision);
        }
      });
      final long end_3 = System.nanoTime();
      Assert.assertEquals(changesetToNodeid_1, changesetToNodeid_3);
      System.out.printf("%s, %d revisions\n", fname, 1+latestRevision);
View Full Code Here

   * each 2000'th revision, total 36 revision: 620 vs 270
   * each 3000'th revision, total 24 revision: 410 vs 275
   */
  public void revisionMap() throws Exception {
    final HgRepository repository = new HgLookup().detect(new File("/temp/hg/cpython"));
    final HgChangelog clog = repository.getChangelog();
    ArrayList<Nodeid> revisions = new ArrayList<Nodeid>();
    final int step = 5000;
    for (int i = 0, top = clog.getLastRevision(); i < top; i += step) {
      revisions.add(clog.getRevision(i));
    }
    final long s1 = System.nanoTime();
    for (Nodeid n : revisions) {
      int r = clog.getRevisionIndex(n);
      if (r % step != 0) {
        throw new IllegalStateException(Integer.toString(r));
      }
    }
    System.out.printf("Direct lookup of %d revisions took %,d ns\n", revisions.size(), System.nanoTime() - s1);
View Full Code Here

TOP

Related Classes of org.tmatesoft.hg.repo.HgChangelog$RawCsetParser

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.