Examples of Op


Examples of com.hp.hpl.jena.sparql.algebra.Op

    }

    @Override
    public Expr applyNodeTransform(NodeTransform nodeTransform)
    {
        Op op2 = NodeTransformLib.transform(nodeTransform, getGraphPattern()) ;
        return new E_NotExists(getElement(), op2) ;
    }
View Full Code Here

Examples of com.urbanairship.datacube.Op

     */
    private static final ActionRowKeyAndOp makeNewLiveCubeOp(Deserializer<?> deserializer, Collection<Result> liveCubeResults,
            Collection<Result> snapshotResults, Collection<Result> backfilledResults, Context ctx)
            throws IOException {

        Op liveCubeOp = null;
        Op snapshotOp = null;
        Op backfilledOp = null;
       
        byte[] rowKey = null;
        if(!liveCubeResults.isEmpty()) {
            Result result = liveCubeResults.iterator().next();
            liveCubeOp = deserializer.fromBytes(result.value());
            rowKey = result.getRow();
        }
        if(!snapshotResults.isEmpty()) {
            Result result = snapshotResults.iterator().next();
            snapshotOp = deserializer.fromBytes(result.value());
            rowKey = result.getRow();
        }
        if(!backfilledResults.isEmpty()) {
            Result result = backfilledResults.iterator().next();
            backfilledOp = deserializer.fromBytes(result.value());
            rowKey = result.getRow();
        }
        /*
         * Merge the live cube table, the snapshot table, and the backfill table. We assume that the
         * snapshot table contains the values that existing before the backfill began, which means
         * that we can estimate the values that arrived since the snapshot by (live-snapshot). By
         * adding the recently-arrived values to the backfilled values, we solve the problem of data
         * arriving during the snapshot that might not otherwise have been counted.
         *
         * The following if-else statements enumerate all 8 possibilities of presence/absence of
         * snapshot row, backfill row, and livecube row.
         */
       
        // Case: snapshot exists, backfill exists, liveCube exists
        // If live == snap:
        //    new value is backfill
        // Else:
        //    new value is (live-snap) + backfill
        if(snapshotOp != null && backfilledOp != null && liveCubeOp != null) {
            DebugHack.log("HBaseBackfillMergeMapper 1");
            if(liveCubeOp.equals(snapshotOp)) {
                return new ActionRowKeyAndOp(Action.OVERWRITE, rowKey, backfilledOp);
            }
            Op newLiveCubeValue = (liveCubeOp.subtract(snapshotOp)).add(backfilledOp);
            if(newLiveCubeValue.equals(liveCubeOp)) {
                return new ActionRowKeyAndOp(Action.LEAVE_ALONE, rowKey, null);
            } else {
                return new ActionRowKeyAndOp(Action.OVERWRITE, rowKey, newLiveCubeValue);
            }
        }

        // Case: snapshot exists, backfill empty, liveCube exists
        // If live == snap:
        //    no ops occurred during snapshot, delete row
        // Else
        //       New value is (live-snap)
        else if(snapshotOp != null && backfilledOp == null && liveCubeOp != null) {
            DebugHack.log("HBaseBackfillMergeMapper 2");
            if(liveCubeOp.equals(snapshotOp)) {
                DebugHack.log("HBaseBackfillMergeMapper 2.1");
                return new ActionRowKeyAndOp(Action.DELETE, rowKey, null);
            } else {
                DebugHack.log("HBaseBackfillMergeMapper 2.2");
                Op newLiveCubeValue = liveCubeOp.subtract(snapshotOp);
                return new ActionRowKeyAndOp(Action.OVERWRITE, rowKey, newLiveCubeValue);
            }
        }

        // Case: snapshot empty, backfill exists, liveCube exists
        // New value is backfill + live
        else if(snapshotOp == null && backfilledOp != null && liveCubeOp != null) {
            DebugHack.log("HBaseBackfillMergeMapper 3");
            Op newLiveCubeValue = backfilledOp.add(liveCubeOp);
            return new ActionRowKeyAndOp(Action.OVERWRITE, rowKey, newLiveCubeValue);
        }
       
        // Case: snapshot empty, backfill exists, liveCube empty
        // New value is backfill
View Full Code Here

Examples of mikera.vectorz.Op

    }
    return null;
  }
 
  public static Op create(Op a, Op b) {
    Op t1=tryOptimisedCreate(a,b);
    if (t1!=null) return t1;
   
    return new Division(a,b);
  }
View Full Code Here

Examples of org.apache.aurora.gen.storage.Op

    createNoMessagesStreamManager().snapshot(snapshot);
  }

  @Test
  public void testTransactionOps() throws CodingException {
    Op saveFrameworkId = Op.saveFrameworkId(new SaveFrameworkId("jake"));
    Op deleteJob = Op.removeJob(new RemoveJob(JobKeys.from("role", "env", "name").newBuilder()));
    expectTransaction(position1, saveFrameworkId, deleteJob);

    control.replay();

    StreamTransaction transaction = createNoMessagesStreamManager().startTransaction();
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.datatransfer.Op

   * Read/write data from/to the DataXceiverServer.
   */
  @Override
  public void run() {
    int opsProcessed = 0;
    Op op = null;
   
    dataXceiverServer.childSockets.add(s);
   
    try {
     
      InputStream input = socketIn;
      if (dnConf.encryptDataTransfer) {
        IOStreamPair encryptedStreams = null;
        try {
          encryptedStreams = DataTransferEncryptor.getEncryptedStreams(socketOut,
              socketIn, datanode.blockPoolTokenSecretManager,
              dnConf.encryptionAlgorithm);
        } catch (InvalidMagicNumberException imne) {
          LOG.info("Failed to read expected encryption handshake from client " +
              "at " + s.getInetAddress() + ". Perhaps the client is running an " +
              "older version of Hadoop which does not support encryption.");
          return;
        }
        input = encryptedStreams.in;
        socketOut = encryptedStreams.out;
      }
      input = new BufferedInputStream(input, HdfsConstants.SMALL_BUFFER_SIZE);
     
      super.initialize(new DataInputStream(input));
     
      // We process requests in a loop, and stay around for a short timeout.
      // This optimistic behaviour allows the other end to reuse connections.
      // Setting keepalive timeout to 0 disable this behavior.
      do {
        updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));

        try {
          if (opsProcessed != 0) {
            assert dnConf.socketKeepaliveTimeout > 0;
            socketIn.setTimeout(dnConf.socketKeepaliveTimeout);
          } else {
            socketIn.setTimeout(dnConf.socketTimeout);
          }
          op = readOp();
        } catch (InterruptedIOException ignored) {
          // Time out while we wait for client rpc
          break;
        } catch (IOException err) {
          // Since we optimistically expect the next op, it's quite normal to get EOF here.
          if (opsProcessed > 0 &&
              (err instanceof EOFException || err instanceof ClosedChannelException)) {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Cached " + s.toString() + " closing after " + opsProcessed + " ops");
            }
          } else {
            throw err;
          }
          break;
        }

        // restore normal timeout
        if (opsProcessed != 0) {
          s.setSoTimeout(dnConf.socketTimeout);
        }

        opStartTime = now();
        processOp(op);
        ++opsProcessed;
      } while (!s.isClosed() && dnConf.socketKeepaliveTimeout > 0);
    } catch (Throwable t) {
      LOG.error(datanode.getDisplayName() + ":DataXceiver error processing " +
                ((op == null) ? "unknown" : op.name()) + " operation " +
                " src: " + remoteAddress +
                " dest: " + localAddress, t);
    } finally {
      if (LOG.isDebugEnabled()) {
        LOG.debug(datanode.getDisplayName() + ":Number of active connections is: "
View Full Code Here

Examples of org.apache.hadoop.hdfs.protocol.datatransfer.Op

   * Read/write data from/to the DataXceiverServer.
   */
  @Override
  public void run() {
    int opsProcessed = 0;
    Op op = null;

    try {
      dataXceiverServer.addPeer(peer, Thread.currentThread());
      peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
      InputStream input = socketIn;
      if ((!peer.hasSecureChannel()) && dnConf.encryptDataTransfer &&
          !dnConf.trustedChannelResolver.isTrusted(getClientAddress(peer))){
        IOStreamPair encryptedStreams = null;
        try {
          encryptedStreams = DataTransferEncryptor.getEncryptedStreams(socketOut,
              socketIn, datanode.blockPoolTokenSecretManager,
              dnConf.encryptionAlgorithm);
        } catch (InvalidMagicNumberException imne) {
          LOG.info("Failed to read expected encryption handshake from client " +
              "at " + peer.getRemoteAddressString() + ". Perhaps the client " +
              "is running an older version of Hadoop which does not support " +
              "encryption");
          return;
        }
        input = encryptedStreams.in;
        socketOut = encryptedStreams.out;
      }
      input = new BufferedInputStream(input, HdfsConstants.SMALL_BUFFER_SIZE);
     
      super.initialize(new DataInputStream(input));
     
      // We process requests in a loop, and stay around for a short timeout.
      // This optimistic behaviour allows the other end to reuse connections.
      // Setting keepalive timeout to 0 disable this behavior.
      do {
        updateCurrentThreadName("Waiting for operation #" + (opsProcessed + 1));

        try {
          if (opsProcessed != 0) {
            assert dnConf.socketKeepaliveTimeout > 0;
            peer.setReadTimeout(dnConf.socketKeepaliveTimeout);
          } else {
            peer.setReadTimeout(dnConf.socketTimeout);
          }
          op = readOp();
        } catch (InterruptedIOException ignored) {
          // Time out while we wait for client rpc
          break;
        } catch (IOException err) {
          // Since we optimistically expect the next op, it's quite normal to get EOF here.
          if (opsProcessed > 0 &&
              (err instanceof EOFException || err instanceof ClosedChannelException)) {
            if (LOG.isDebugEnabled()) {
              LOG.debug("Cached " + peer + " closing after " + opsProcessed + " ops");
            }
          } else {
            throw err;
          }
          break;
        }

        // restore normal timeout
        if (opsProcessed != 0) {
          peer.setReadTimeout(dnConf.socketTimeout);
        }

        opStartTime = now();
        processOp(op);
        ++opsProcessed;
      } while ((peer != null) &&
          (!peer.isClosed() && dnConf.socketKeepaliveTimeout > 0));
    } catch (Throwable t) {
      String s = datanode.getDisplayName() + ":DataXceiver error processing "
          + ((op == null) ? "unknown" : op.name()) + " operation "
          + " src: " + remoteAddress + " dst: " + localAddress;
      if (op == Op.WRITE_BLOCK && t instanceof ReplicaAlreadyExistsException) {
        // For WRITE_BLOCK, it is okay if the replica already exists since
        // client and replication may write the same block to the same datanode
        // at the same time.
View Full Code Here

Examples of org.apache.zookeeper.Op

    public void testChRootCreateDelete() throws Exception {
        // creating the subtree for chRoot clients.
        String chRoot = createNameSpace();
        // Creating child using chRoot client.
        zk_chroot = createClient(this.hostPort + chRoot);
        Op createChild = Op.create("/myid", new byte[0],
                Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT);
        zk_chroot.multi(Arrays.asList(createChild));
       
        Assert.assertNotNull("zNode is not created under chroot:" + chRoot, zk
                .exists(chRoot + "/myid", false));
        Assert.assertNotNull("zNode is not created under chroot:" + chRoot,
                zk_chroot.exists("/myid", false));
        Assert.assertNull("zNode is created directly under '/', ignored configured chroot",
                zk.exists("/myid", false));
       
        // Deleting child using chRoot client.
        Op deleteChild = Op.delete("/myid", 0);
        zk_chroot.multi(Arrays.asList(deleteChild));
        Assert.assertNull("zNode exists under chroot:" + chRoot, zk.exists(
                chRoot + "/myid", false));
        Assert.assertNull("zNode exists under chroot:" + chRoot, zk_chroot
                .exists("/myid", false));
View Full Code Here

Examples of org.ethereum.vmtrace.Op

        }
    }

    public void saveOpTrace(){

        Op op = new Op();
        op.setPc(pc);

        op.setOp(ops[pc]);
        op.saveGas(getGas());

        ContractDetails contractDetails = this.result.getRepository().
                getContractDetails(this.programAddress.getLast20Bytes());
        op.saveStorageMap(contractDetails.getStorage());
        op.saveMemory(memory);
        op.saveStack(stack);

        programTrace.addOp(op);
    }
View Full Code Here

Examples of org.jboss.gwt.circuit.sample.calculator.Term.Op

                }

                StringBuilder message = new StringBuilder();
                Set<Op> keys = termsByOp.keySet();
                for (Iterator<Op> iterator = keys.iterator(); iterator.hasNext(); ) {
                    Op key = iterator.next();
                    message.append(key.name()).append("(").append(termsByOp.get(key).size()).append(")");
                    if (iterator.hasNext()) {
                        message.append(", ");
                    }
                }
                System.out.printf("Operation stats:    %s\n", message);
View Full Code Here

Examples of org.odbms.OP

      }
    }

    for (Iterator<FieldInfo> i = queryNodes.iterator(); i.hasNext();) {
      FieldInfo fi = i.next();
      OP op = null;
      String val = fi.value.toString();
      if ((val.endsWith("*") || val.endsWith("%")) && (val.startsWith("*") || val.startsWith("%"))) {
        op = OP.CONTAINS;
        val = val.substring(1, val.length() - 2);
      } else if (val.endsWith("*") || val.endsWith("%")) {
View Full Code Here
TOP
Copyright © 2018 www.massapi.com. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.