Examples of BlockIndexChunk


Examples of org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk

  }

  @Test
  public void testBlockIndexChunk() throws IOException {
    BlockIndexChunk c = new BlockIndexChunk();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    int N = 1000;
    int[] numSubEntriesAt = new int[N];
    int numSubEntries = 0;
    for (int i = 0; i < N; ++i) {
      baos.reset();
      DataOutputStream dos = new DataOutputStream(baos);
      c.writeNonRoot(dos);
      assertEquals(c.getNonRootSize(), dos.size());

      baos.reset();
      dos = new DataOutputStream(baos);
      c.writeRoot(dos);
      assertEquals(c.getRootSize(), dos.size());

      byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
      numSubEntries += rand.nextInt(5) + 1;
      keys.add(k);
      c.add(k, getDummyFileOffset(i), getDummyOnDiskSize(i), numSubEntries);
    }

    // Test the ability to look up the entry that contains a particular
    // deeper-level index block's entry ("sub-entry"), assuming a global
    // 0-based ordering of sub-entries. This is needed for mid-key calculation.
    for (int i = 0; i < N; ++i) {
      for (int j = i == 0 ? 0 : numSubEntriesAt[i - 1];
           j < numSubEntriesAt[i];
           ++j) {
        assertEquals(i, c.getEntryBySubEntry(j));
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk

  }

  @Test
  public void testBlockIndexChunk() throws IOException {
    BlockIndexChunk c = new BlockIndexChunk();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    int N = 1000;
    int[] numSubEntriesAt = new int[N];
    int numSubEntries = 0;
    for (int i = 0; i < N; ++i) {
      baos.reset();
      DataOutputStream dos = new DataOutputStream(baos);
      c.writeNonRoot(dos);
      assertEquals(c.getNonRootSize(), dos.size());

      baos.reset();
      dos = new DataOutputStream(baos);
      c.writeRoot(dos);
      assertEquals(c.getRootSize(), dos.size());

      byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
      numSubEntries += rand.nextInt(5) + 1;
      keys.add(k);
      c.add(k, getDummyFileOffset(i), getDummyOnDiskSize(i), numSubEntries);
    }

    // Test the ability to look up the entry that contains a particular
    // deeper-level index block's entry ("sub-entry"), assuming a global
    // 0-based ordering of sub-entries. This is needed for mid-key calculation.
    for (int i = 0; i < N; ++i) {
      for (int j = i == 0 ? 0 : numSubEntriesAt[i - 1];
           j < numSubEntriesAt[i];
           ++j) {
        assertEquals(i, c.getEntryBySubEntry(j));
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk

  }

  @Test
  public void testBlockIndexChunk() throws IOException {
    BlockIndexChunk c = new BlockIndexChunk();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    int N = 1000;
    int[] numSubEntriesAt = new int[N];
    int numSubEntries = 0;
    for (int i = 0; i < N; ++i) {
      baos.reset();
      DataOutputStream dos = new DataOutputStream(baos);
      c.writeNonRoot(dos);
      assertEquals(c.getNonRootSize(), dos.size());

      baos.reset();
      dos = new DataOutputStream(baos);
      c.writeRoot(dos);
      assertEquals(c.getRootSize(), dos.size());

      byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
      numSubEntries += rand.nextInt(5) + 1;
      keys.add(k);
      c.add(k, getDummyFileOffset(i), getDummyOnDiskSize(i), numSubEntries);
    }

    // Test the ability to look up the entry that contains a particular
    // deeper-level index block's entry ("sub-entry"), assuming a global
    // 0-based ordering of sub-entries. This is needed for mid-key calculation.
    for (int i = 0; i < N; ++i) {
      for (int j = i == 0 ? 0 : numSubEntriesAt[i - 1];
           j < numSubEntriesAt[i];
           ++j) {
        assertEquals(i, c.getEntryBySubEntry(j));
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk

  }

  @Test
  public void testBlockIndexChunk() throws IOException {
    BlockIndexChunk c = new BlockIndexChunk();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    int N = 1000;
    int[] numSubEntriesAt = new int[N];
    int numSubEntries = 0;
    for (int i = 0; i < N; ++i) {
      baos.reset();
      DataOutputStream dos = new DataOutputStream(baos);
      c.writeNonRoot(dos);
      assertEquals(c.getNonRootSize(), dos.size());

      baos.reset();
      dos = new DataOutputStream(baos);
      c.writeRoot(dos);
      assertEquals(c.getRootSize(), dos.size());

      byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
      numSubEntries += rand.nextInt(5) + 1;
      keys.add(k);
      c.add(k, getDummyFileOffset(i), getDummyOnDiskSize(i), numSubEntries);
    }

    // Test the ability to look up the entry that contains a particular
    // deeper-level index block's entry ("sub-entry"), assuming a global
    // 0-based ordering of sub-entries. This is needed for mid-key calculation.
    for (int i = 0; i < N; ++i) {
      for (int j = i == 0 ? 0 : numSubEntriesAt[i - 1];
           j < numSubEntriesAt[i];
           ++j) {
        assertEquals(i, c.getEntryBySubEntry(j));
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk

  }

  //@Test
  public void testBlockIndexChunk() throws IOException {
    BlockIndexChunk c = new BlockIndexChunk();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    int N = 1000;
    int[] numSubEntriesAt = new int[N];
    int numSubEntries = 0;
    for (int i = 0; i < N; ++i) {
      baos.reset();
      DataOutputStream dos = new DataOutputStream(baos);
      c.writeNonRoot(dos);
      assertEquals(c.getNonRootSize(), dos.size());

      baos.reset();
      dos = new DataOutputStream(baos);
      c.writeRoot(dos);
      assertEquals(c.getRootSize(), dos.size());

      byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
      numSubEntries += rand.nextInt(5) + 1;
      keys.add(k);
      c.add(k, getDummyFileOffset(i), getDummyOnDiskSize(i), numSubEntries);
    }

    // Test the ability to look up the entry that contains a particular
    // deeper-level index block's entry ("sub-entry"), assuming a global
    // 0-based ordering of sub-entries. This is needed for mid-key calculation.
    for (int i = 0; i < N; ++i) {
      for (int j = i == 0 ? 0 : numSubEntriesAt[i - 1];
           j < numSubEntriesAt[i];
           ++j) {
        assertEquals(i, c.getEntryBySubEntry(j));
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk

  }

  //@Test
  public void testBlockIndexChunk() throws IOException {
    BlockIndexChunk c = new BlockIndexChunk();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    int N = 1000;
    int[] numSubEntriesAt = new int[N];
    int numSubEntries = 0;
    for (int i = 0; i < N; ++i) {
      baos.reset();
      DataOutputStream dos = new DataOutputStream(baos);
      c.writeNonRoot(dos);
      assertEquals(c.getNonRootSize(), dos.size());

      baos.reset();
      dos = new DataOutputStream(baos);
      c.writeRoot(dos);
      assertEquals(c.getRootSize(), dos.size());

      byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
      numSubEntries += rand.nextInt(5) + 1;
      keys.add(k);
      c.add(k, getDummyFileOffset(i), getDummyOnDiskSize(i), numSubEntries);
    }

    // Test the ability to look up the entry that contains a particular
    // deeper-level index block's entry ("sub-entry"), assuming a global
    // 0-based ordering of sub-entries. This is needed for mid-key calculation.
    for (int i = 0; i < N; ++i) {
      for (int j = i == 0 ? 0 : numSubEntriesAt[i - 1];
           j < numSubEntriesAt[i];
           ++j) {
        assertEquals(i, c.getEntryBySubEntry(j));
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk

  }

  @Test
  public void testBlockIndexChunk() throws IOException {
    BlockIndexChunk c = new BlockIndexChunk();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    int N = 1000;
    int[] numSubEntriesAt = new int[N];
    int numSubEntries = 0;
    for (int i = 0; i < N; ++i) {
      baos.reset();
      DataOutputStream dos = new DataOutputStream(baos);
      c.writeNonRoot(dos);
      assertEquals(c.getNonRootSize(), dos.size());

      baos.reset();
      dos = new DataOutputStream(baos);
      c.writeRoot(dos);
      assertEquals(c.getRootSize(), dos.size());

      byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
      numSubEntries += rand.nextInt(5) + 1;
      keys.add(k);
      c.add(k, getDummyFileOffset(i), getDummyOnDiskSize(i), numSubEntries);
    }

    // Test the ability to look up the entry that contains a particular
    // deeper-level index block's entry ("sub-entry"), assuming a global
    // 0-based ordering of sub-entries. This is needed for mid-key calculation.
    for (int i = 0; i < N; ++i) {
      for (int j = i == 0 ? 0 : numSubEntriesAt[i - 1];
           j < numSubEntriesAt[i];
           ++j) {
        assertEquals(i, c.getEntryBySubEntry(j));
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk

  }

  @Test
  public void testBlockIndexChunk() throws IOException {
    BlockIndexChunk c = new BlockIndexChunk();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    int N = 1000;
    int[] numSubEntriesAt = new int[N];
    int numSubEntries = 0;
    for (int i = 0; i < N; ++i) {
      baos.reset();
      DataOutputStream dos = new DataOutputStream(baos);
      c.writeNonRoot(dos);
      assertEquals(c.getNonRootSize(), dos.size());

      baos.reset();
      dos = new DataOutputStream(baos);
      c.writeRoot(dos);
      assertEquals(c.getRootSize(), dos.size());

      byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
      numSubEntries += rand.nextInt(5) + 1;
      keys.add(k);
      c.add(k, getDummyFileOffset(i), getDummyOnDiskSize(i), numSubEntries);
    }

    // Test the ability to look up the entry that contains a particular
    // deeper-level index block's entry ("sub-entry"), assuming a global
    // 0-based ordering of sub-entries. This is needed for mid-key calculation.
    for (int i = 0; i < N; ++i) {
      for (int j = i == 0 ? 0 : numSubEntriesAt[i - 1];
           j < numSubEntriesAt[i];
           ++j) {
        assertEquals(i, c.getEntryBySubEntry(j));
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk

  }

  @Test
  public void testBlockIndexChunk() throws IOException {
    BlockIndexChunk c = new BlockIndexChunk();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    int N = 1000;
    int[] numSubEntriesAt = new int[N];
    int numSubEntries = 0;
    for (int i = 0; i < N; ++i) {
      baos.reset();
      DataOutputStream dos = new DataOutputStream(baos);
      c.writeNonRoot(dos);
      assertEquals(c.getNonRootSize(), dos.size());

      baos.reset();
      dos = new DataOutputStream(baos);
      c.writeRoot(dos);
      assertEquals(c.getRootSize(), dos.size());

      byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
      numSubEntries += rand.nextInt(5) + 1;
      keys.add(k);
      c.add(k, getDummyFileOffset(i), getDummyOnDiskSize(i), numSubEntries);
    }

    // Test the ability to look up the entry that contains a particular
    // deeper-level index block's entry ("sub-entry"), assuming a global
    // 0-based ordering of sub-entries. This is needed for mid-key calculation.
    for (int i = 0; i < N; ++i) {
      for (int j = i == 0 ? 0 : numSubEntriesAt[i - 1];
           j < numSubEntriesAt[i];
           ++j) {
        assertEquals(i, c.getEntryBySubEntry(j));
      }
    }
  }
View Full Code Here

Examples of org.apache.hadoop.hbase.io.hfile.HFileBlockIndex.BlockIndexChunk

  }

  @Test
  public void testBlockIndexChunk() throws IOException {
    BlockIndexChunk c = new BlockIndexChunk();
    ByteArrayOutputStream baos = new ByteArrayOutputStream();
    int N = 1000;
    int[] numSubEntriesAt = new int[N];
    int numSubEntries = 0;
    for (int i = 0; i < N; ++i) {
      baos.reset();
      DataOutputStream dos = new DataOutputStream(baos);
      c.writeNonRoot(dos);
      assertEquals(c.getNonRootSize(), dos.size());

      baos.reset();
      dos = new DataOutputStream(baos);
      c.writeRoot(dos);
      assertEquals(c.getRootSize(), dos.size());

      byte[] k = TestHFileWriterV2.randomOrderedKey(rand, i);
      numSubEntries += rand.nextInt(5) + 1;
      keys.add(k);
      c.add(k, getDummyFileOffset(i), getDummyOnDiskSize(i), numSubEntries);
    }

    // Test the ability to look up the entry that contains a particular
    // deeper-level index block's entry ("sub-entry"), assuming a global
    // 0-based ordering of sub-entries. This is needed for mid-key calculation.
    for (int i = 0; i < N; ++i) {
      for (int j = i == 0 ? 0 : numSubEntriesAt[i - 1];
           j < numSubEntriesAt[i];
           ++j) {
        assertEquals(i, c.getEntryBySubEntry(j));
      }
    }
  }
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.