Examples of HexStringSplit


Examples of org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit

     * Unit tests for the HexStringSplit algorithm. Makes sure it divides up the
     * space of keys in the way that we expect.
     */
    @Test
    public void unitTestHexStringSplit() {
        HexStringSplit splitter = new HexStringSplit();
        // Check splitting while starting from scratch

        byte[][] twoRegionsSplits = splitter.split(2);
        assertEquals(1, twoRegionsSplits.length);
    assertArrayEquals(twoRegionsSplits[0], "80000000".getBytes());

        byte[][] threeRegionsSplits = splitter.split(3);
        assertEquals(2, threeRegionsSplits.length);
        byte[] expectedSplit0 = "55555555".getBytes();
        assertArrayEquals(expectedSplit0, threeRegionsSplits[0]);
        byte[] expectedSplit1 = "aaaaaaaa".getBytes();
        assertArrayEquals(expectedSplit1, threeRegionsSplits[1]);

        // Check splitting existing regions that have start and end points
        byte[] splitPoint = splitter.split("10000000".getBytes(), "30000000".getBytes());
        assertArrayEquals("20000000".getBytes(), splitPoint);

        byte[] lastRow = "ffffffff".getBytes();
        assertArrayEquals(lastRow, splitter.lastRow());
        byte[] firstRow = "00000000".getBytes();
        assertArrayEquals(firstRow, splitter.firstRow());

        // Halfway between 00... and 20... should be 10...
        splitPoint = splitter.split(firstRow, "20000000".getBytes());
        assertArrayEquals(splitPoint, "10000000".getBytes());

        // Halfway between df... and ff... should be ef....
        splitPoint = splitter.split("dfffffff".getBytes(), lastRow);
        assertArrayEquals(splitPoint,"efffffff".getBytes());
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit

                new byte[] {(byte)0xef, xFF, xFF, xFF, xFF, xFF, xFF, xFF});
    }

  @Test
  public void testUserInput() {
    SplitAlgorithm algo = new HexStringSplit();
    assertFalse(splitFailsPrecondition(algo)); // default settings are fine
    assertFalse(splitFailsPrecondition(algo, "00", "AA")); // custom is fine
    assertTrue(splitFailsPrecondition(algo, "AA", "00")); // range error
    assertTrue(splitFailsPrecondition(algo, "AA", "AA")); // range error
    assertFalse(splitFailsPrecondition(algo, "0", "2", 3)); // should be fine
View Full Code Here

Examples of org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit

     * Unit tests for the HexStringSplit algorithm. Makes sure it divides up the
     * space of keys in the way that we expect.
     */
    @Test
    public void unitTestHexStringSplit() {
        HexStringSplit splitter = new HexStringSplit();
        // Check splitting while starting from scratch

        byte[][] twoRegionsSplits = splitter.split(2);
        assertEquals(1, twoRegionsSplits.length);
    assertArrayEquals(twoRegionsSplits[0], "80000000".getBytes());

        byte[][] threeRegionsSplits = splitter.split(3);
        assertEquals(2, threeRegionsSplits.length);
        byte[] expectedSplit0 = "55555555".getBytes();
        assertArrayEquals(expectedSplit0, threeRegionsSplits[0]);
        byte[] expectedSplit1 = "aaaaaaaa".getBytes();
        assertArrayEquals(expectedSplit1, threeRegionsSplits[1]);

        // Check splitting existing regions that have start and end points
        byte[] splitPoint = splitter.split("10000000".getBytes(), "30000000".getBytes());
        assertArrayEquals("20000000".getBytes(), splitPoint);

        byte[] lastRow = "ffffffff".getBytes();
        assertArrayEquals(lastRow, splitter.lastRow());
        byte[] firstRow = "00000000".getBytes();
        assertArrayEquals(firstRow, splitter.firstRow());

        // Halfway between 00... and 20... should be 10...
        splitPoint = splitter.split(firstRow, "20000000".getBytes());
        assertArrayEquals(splitPoint, "10000000".getBytes());

        // Halfway between df... and ff... should be ef....
        splitPoint = splitter.split("dfffffff".getBytes(), lastRow);
        assertArrayEquals(splitPoint,"efffffff".getBytes());
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit

                new byte[] {(byte)0xef, xFF, xFF, xFF, xFF, xFF, xFF, xFF});
    }

  @Test
  public void testUserInput() {
    SplitAlgorithm algo = new HexStringSplit();
    assertFalse(splitFailsPrecondition(algo)); // default settings are fine
    assertFalse(splitFailsPrecondition(algo, "00", "AA")); // custom is fine
    assertTrue(splitFailsPrecondition(algo, "AA", "00")); // range error
    assertTrue(splitFailsPrecondition(algo, "AA", "AA")); // range error
    assertFalse(splitFailsPrecondition(algo, "0", "2", 3)); // should be fine
View Full Code Here

Examples of org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit

     * Unit tests for the HexStringSplit algorithm. Makes sure it divides up the
     * space of keys in the way that we expect.
     */
    @Test
    public void unitTestHexStringSplit() {
        HexStringSplit splitter = new HexStringSplit();
        // Check splitting while starting from scratch

        byte[][] twoRegionsSplits = splitter.split(2);
        assertEquals(1, twoRegionsSplits.length);
    assertArrayEquals(twoRegionsSplits[0], "80000000".getBytes());

        byte[][] threeRegionsSplits = splitter.split(3);
        assertEquals(2, threeRegionsSplits.length);
        byte[] expectedSplit0 = "55555555".getBytes();
        assertArrayEquals(expectedSplit0, threeRegionsSplits[0]);
        byte[] expectedSplit1 = "aaaaaaaa".getBytes();
        assertArrayEquals(expectedSplit1, threeRegionsSplits[1]);

        // Check splitting existing regions that have start and end points
        byte[] splitPoint = splitter.split("10000000".getBytes(), "30000000".getBytes());
        assertArrayEquals("20000000".getBytes(), splitPoint);

        byte[] lastRow = "ffffffff".getBytes();
        assertArrayEquals(lastRow, splitter.lastRow());
        byte[] firstRow = "00000000".getBytes();
        assertArrayEquals(firstRow, splitter.firstRow());

        // Halfway between 00... and 20... should be 10...
        splitPoint = splitter.split(firstRow, "20000000".getBytes());
        assertArrayEquals(splitPoint, "10000000".getBytes());

        // Halfway between df... and ff... should be ef....
        splitPoint = splitter.split("dfffffff".getBytes(), lastRow);
        assertArrayEquals(splitPoint,"efffffff".getBytes());
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit

                new byte[] {(byte)0xef, xFF, xFF, xFF, xFF, xFF, xFF, xFF});
    }

  @Test
  public void testUserInput() {
    SplitAlgorithm algo = new HexStringSplit();
    assertFalse(splitFailsPrecondition(algo)); // default settings are fine
    assertFalse(splitFailsPrecondition(algo, "00", "AA")); // custom is fine
    assertTrue(splitFailsPrecondition(algo, "AA", "00")); // range error
    assertTrue(splitFailsPrecondition(algo, "AA", "AA")); // range error
    assertFalse(splitFailsPrecondition(algo, "0", "2", 3)); // should be fine
View Full Code Here

Examples of org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit

     * Unit tests for the HexStringSplit algorithm. Makes sure it divides up the
     * space of keys in the way that we expect.
     */
    @Test
    public void unitTestHexStringSplit() {
        HexStringSplit splitter = new HexStringSplit();
        // Check splitting while starting from scratch

        byte[][] twoRegionsSplits = splitter.split(2);
        assertEquals(1, twoRegionsSplits.length);
    assertArrayEquals(twoRegionsSplits[0], "80000000".getBytes());

        byte[][] threeRegionsSplits = splitter.split(3);
        assertEquals(2, threeRegionsSplits.length);
        byte[] expectedSplit0 = "55555555".getBytes();
        assertArrayEquals(expectedSplit0, threeRegionsSplits[0]);
        byte[] expectedSplit1 = "aaaaaaaa".getBytes();
        assertArrayEquals(expectedSplit1, threeRegionsSplits[1]);

        // Check splitting existing regions that have start and end points
        byte[] splitPoint = splitter.split("10000000".getBytes(), "30000000".getBytes());
        assertArrayEquals("20000000".getBytes(), splitPoint);

        byte[] lastRow = "ffffffff".getBytes();
        assertArrayEquals(lastRow, splitter.lastRow());
        byte[] firstRow = "00000000".getBytes();
        assertArrayEquals(firstRow, splitter.firstRow());

        // Halfway between 00... and 20... should be 10...
        splitPoint = splitter.split(firstRow, "20000000".getBytes());
        assertArrayEquals(splitPoint, "10000000".getBytes());

        // Halfway between df... and ff... should be ef....
        splitPoint = splitter.split("dfffffff".getBytes(), lastRow);
        assertArrayEquals(splitPoint,"efffffff".getBytes());
    }
View Full Code Here

Examples of org.apache.hadoop.hbase.util.RegionSplitter.HexStringSplit

                new byte[] {(byte)0xef, xFF, xFF, xFF, xFF, xFF, xFF, xFF});
    }

  @Test
  public void testUserInput() {
    SplitAlgorithm algo = new HexStringSplit();
    assertFalse(splitFailsPrecondition(algo)); // default settings are fine
    assertFalse(splitFailsPrecondition(algo, "00", "AA")); // custom is fine
    assertTrue(splitFailsPrecondition(algo, "AA", "00")); // range error
    assertTrue(splitFailsPrecondition(algo, "AA", "AA")); // range error
    assertFalse(splitFailsPrecondition(algo, "0", "2", 3)); // should be fine
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.