Package avrobase

Examples of avrobase.AvroBaseException


      } catch (IOException e1) {
        // Then do a normal mutate
      }
      return mutate(row, tMutator);
    } catch (IOException e) {
      throw new AvroBaseException("Failed to delete: " + row, e);
    } finally {
      writeLock.unlock();
    }
  }
View Full Code Here


                } while (hasmore);
              } catch (EOFException e) {
                e.printStackTrace();
                return hasmore = false;
              } catch (IOException e) {
                throw new AvroBaseException("Failed to read s3 object stream", e);
              }
            } while (!hasmore);
            return hasmore;
          }

          private boolean startArchived() {
            // Scan S3
            if (files == null) {
              files = getArchives();
            }
            // If .next() hasn't been called return the previous answer
            if (hasmore != null) return hasmore;
            // Read the files in sequence, moving between them as they run out of data
            if (firstdelegaterow != null) {
              do {
                if (assertCurrentStream()) return hasmore = false;
                try {
                  do {
                    hasmore = currentstream.readBoolean();
                    if (hasmore) {
                      if (nextRowInStream()) return hasmore = false;

                      if (firstdelegatekey != null) {
                        // skip archived rows we have already scanned
                        final int compare = bytesComparator.compare(firstdelegatekey, row);
                        if (compare >= 0) {
                          break;
                        } else {
                          currentstream.readFully(new byte[currentstream.readInt()]);
                        }
                      } else {
                        break;
                      }
                    } else {
                      currentstream.close();
                      currentstream = null;
                    }
                  } while (hasmore);
                } catch (EOFException e) {
                  e.printStackTrace();
                  return hasmore = false;
                } catch (IOException e) {
                  throw new AvroBaseException("Failed to read s3 object stream", e);
                }
              } while (!hasmore);
            } else {
              hasmore = false;
            }
            if (!hasmore) {
              if (stopRow != null && bytesComparator.compare(row, stopRow) >= 0) {
                return false;
              }
              if (firstdelegaterow != null) {
                firstrow = true;
                return hasmore = true;
              }
              return hasmore = iterator.hasNext();
            }
            return hasmore;
          }

          private boolean nextRowInStream() throws IOException {
            // We may be reading things that are supposed to be still in the
            // local store but haven't been deleted yet
            row = new byte[currentstream.readInt()];
            currentstream.readFully(row);
            if (stopRow != null) {
              int compare = bytesComparator.compare(row, stopRow);
              if (compare >= 0) {
                currentstream.close();
                currentstream = null;
                return true;
              }
            }
            return false;
          }

          private boolean assertCurrentStream() {
            if (currentstream == null) {
              if (files.size() == 0) return true;
              final S3Object nextFile = files.remove(0);
              try {
                currentstream = new DataInputStream(new GZIPInputStream(getInputStream(nextFile)));
                final byte[] bytes = new byte[currentstream.readInt()];
                currentstream.readFully(bytes);
                schema = Schema.parse(new ByteArrayInputStream(bytes));
              } catch (ServiceException e) {
                throw new AvroBaseException("Failed to read inputstream from S3: " + nextFile, e);
              } catch (IOException e) {
                throw new AvroBaseException("Failed to read schema", e);
              }
            }
            return false;
          }

          @Override
          public synchronized Row<T, byte[]> next() {
            // Grab the next local value
            if (reverse) {
              if (firstrow) {
                try {
                  firstrow = false;
                  hasmore = null;
                  return firstdelegaterow;
                } finally {
                  firstdelegaterow = null;
                }
              }
              if (firstdelegaterow == null) {
                return iterator.next();
              }
            } else {
              if (lastdelegatekey == null) return lastdelegaterow = iterator.next();
            }
            // Grab the next S3 value
            if ((files.size() == 0 && currentstream == null) || (hasmore != null && !hasmore))
              throw new NoSuchElementException();
            hasmore = null;
            try {
              byte[] bytes = new byte[currentstream.readInt()];
              currentstream.readFully(bytes);
              SpecificDatumReader<T> sdr = new SpecificDatumReader<T>(schema, actualSchema);
              T read = sdr.read(null, decoderFactory.binaryDecoder(bytes, null));
              return new Row<T, byte[]>(read, row);
            } catch (IOException e) {
              throw new AvroBaseException("Invalid data in log", e);
            }
          }

          @Override
          public void remove() {
View Full Code Here

        public boolean apply(@Nullable S3Object input) {
          return !input.getName().equals(path);
        }
      })));
    } catch (S3ServiceException e) {
      throw new AvroBaseException("Failed to read files from S3 bucket", e);
    }
    Collections.sort(files1, new Comparator<S3Object>() {
      @Override
      public int compare(S3Object s3Object, S3Object s3Object1) {
        try {
          final byte[] key = Hex.decodeHex(filename(s3Object).toCharArray());
          final byte[] key1 = Hex.decodeHex(filename(s3Object1).toCharArray());
          return bytesComparator.compare(key, key1);
        } catch (DecoderException e) {
          throw new AvroBaseException("Failed to decode filename: " + s3Object.getName(), e);
        }
      }
    });
    return files1;
  }
View Full Code Here

          archiveRow = Hex.decodeHex(archives.get(archives.size() - 1).getName().substring(path.length()).toCharArray());
        } else {
          archiveRow = Hex.decodeHex(archives.get(0).getName().substring(path.length()).toCharArray());
        }
      } catch (DecoderException e) {
        throw new AvroBaseException("Failed to get row from archives: " + archives, e);
      }
      if (reverse) {
        if (bytesComparator.compare(row, archiveRow) < 0) {
          return;
        }
      } else {
        if (bytesComparator.compare(row, archiveRow) >= 0) {
          return;
        }
      }
    } else {
      archiveRow = null;
    }
    File file = null;
    try {
      file = File.createTempFile("log", "gz");
      DataOutputStream dos = new DataOutputStream(new GZIPOutputStream(new FileOutputStream(file)));
      byte[] bytes = actualSchema.toString().getBytes(Charsets.UTF_8);
      dos.writeInt(bytes.length);
      dos.write(bytes);
      for (Row<T, byte[]> tRow : getScanner(row)) {
        if (archiveRow != null) {
          if (reverse) {
            if (bytesComparator.compare(tRow.row, archiveRow) < 0) {
              break;
            }
          } else {
            if (bytesComparator.compare(tRow.row, archiveRow) >= 0) {
              break;
            }
          }
        }
        dos.writeBoolean(true);
        dos.writeInt(tRow.row.length);
        dos.write(tRow.row);
        ByteArrayOutputStream baos = new ByteArrayOutputStream();
        Encoder be = encoderFactory.binaryEncoder(baos, null);
        SpecificDatumWriter<T> sdw = new SpecificDatumWriter<T>(actualSchema);
        sdw.write(tRow.value, be);
        be.flush();
        bytes = baos.toByteArray();
        dos.writeInt(bytes.length);
        dos.write(bytes);
      }
      dos.writeBoolean(false);
      dos.flush();
      dos.close();
      // Retry 3 times
      for (int i = 0; i < 3; i++) {
        S3Object s3o = new S3Object(path + new String(Hex.encodeHex(row)));
        s3o.setContentLength(file.length());
        s3o.setDataInputStream(new BufferedInputStream(new FileInputStream(file)));
        s3o.setContentType("application/gzip");
        try {
          s3.putObject(bucket, s3o);
          break;
        } catch (S3ServiceException e) {
          if (i == 2) {
            throw new AvroBaseException("Failed to upload to S3", e);
          }
        }
      }
    } catch (IOException e) {
      throw new AvroBaseException("Failed to read/write file: " + file, e);
    } finally {
      if (file != null) {
        file.delete();
      }
    }
View Full Code Here

              }
              return shard;
            }
          }
        }
        throw new AvroBaseException("No active shard matches row");
      } finally {
        readShards.unlock();
      }
    }
View Full Code Here

              long total = 0;
              for (Future<Long> count : counts) {
                try {
                  total += count.get();
                } catch (Exception e) {
                  throw new AvroBaseException("Corrupt shard: " + e);
                }
              }
              // The new list of shards includes the new shard on the end
              List<PartitionedShard<T, K>> newShards = new ArrayList<PartitionedShard<T, K>>(activeShards);
              newShards.add(new PartitionedShard<T, K>(avroBase, weight, null));
              // Stop the world implementation
              // Lock, wait for all shards to be returned
              writeShards.lock();
              synchronized (usedShards) {
                while (usedShards.size() != 0) {
                  usedShards.wait();
                }
              }
              // Copy between shards to make new shard distributions and set the start / end key
              K current;
              for (int j = 0; j < newShards.size(); j++) {
                PartitionedShard<T, K> shard = newShards.get(j);
                PartitionedShard<T, K> nextShard = j + 1 == newShards.size() ? null : newShards.get(j + 1);
                // This is the new number of records the partition should contain
                long newcount = (long) (total * shard.weight() / totalWeight);
                current = null;
                K end = j + 1 == newShards.size() ? null : newShards.get(j + 1).start;
                for (K tRow : shard.avrobase().scanKeys(shard.start, end)) {
                  if (newcount-- <= 0) {
                    if (nextShard == null) {
                      break;
                    } else {
                      // The start is the first row that wasn't in the last shard
                      if (current != null) {
                        nextShard.start = tRow;
                        current = null;
                      }
                      // Copy all the remaining rows from this shard to the next
                      nextShard.avrobase().put(tRow, shard.avrobase().get(tRow).value);
                      // Remove them from this shard
                      shard.avrobase().delete(tRow);
                    }
                  } else {
                    current = tRow;
                  }
                }
              }
              // Set the new active shards
              activeShards.clear();
              activeShards.addAll(newShards);
            } catch (InterruptedException e) {
              throw new AvroBaseException("Shard add interrupted", e);
            } finally {
              // Notify that we are done adding this avrobase
              synchronized (addedAvroBase) {
                addedAvroBase.remove(avroBase);
                addedAvroBase.notifyAll();
View Full Code Here

    strategy.add(avroBase, weight);
    if (wait) {
      try {
        strategy.waitForBalance();
      } catch (InterruptedException e) {
        throw new AvroBaseException("Shard balancing interrupted", e);
      }
    }
  }
View Full Code Here

              mget(row + s, row + v, row + d);
            }
          });
        } while (results == null);
        if (results.size() != 1 || (results = (List<Object>) results.get(0)).size() != 3) {
          throw new AvroBaseException("Incorrect number of results from redis transaction: " + results);
        }
        String schemaId = (String) results.get(0);
        String versionStr = (String) results.get(1);
        String data = (String) results.get(2);
        if (versionStr == null || schemaId == null || data == null) {
          return null;
        }
        Schema schema = schemaCache.get(schemaId);
        if (schema == null) {
          schema = loadSchema(j.get(schemaId + z).getBytes(), schemaId);
        }
        return new Row<T, String>(readValue(data.getBytes(), schema, format), row, Long.parseLong(versionStr));
      } catch (Exception e) {
        pool.returnBrokenResource(j);
        returned = true;
        throw new AvroBaseException(e);
      } finally {
        if (!returned) pool.returnResource(j);
      }
    } catch (TimeoutException e) {
      throw new AvroBaseException("Timed out", e);
    }
  }
View Full Code Here

          });
        } while (results == null);
      } catch (Exception e) {
        pool.returnBrokenResource(j);
        returned = true;
        throw new AvroBaseException(e);
      } finally {
        if (!returned) pool.returnResource(j);
      }
    } catch (TimeoutException e) {
      throw new AvroBaseException("Timed out", e);
    }
  }
View Full Code Here

        });
        return results != null;
      } catch (Exception e) {
        pool.returnBrokenResource(j);
        returned = true;
        throw new AvroBaseException(e);
      } finally {
        if (!returned) pool.returnResource(j);
      }
    } catch (TimeoutException e) {
      throw new AvroBaseException("Timed out", e);
    }
  }
View Full Code Here

TOP

Related Classes of avrobase.AvroBaseException

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.