Package com.mongodb

Examples of com.mongodb.BasicDBList


        DocumentPojo doc = DocumentPojo.fromDb(docObj, DocumentPojo.class);
            _scriptEngine.put("old_document", _gson.toJson(doc));
            try {
              _securityManager.eval(_scriptEngine,JavaScriptUtils.initOnUpdateScript);
              Object returnVal = _securityManager.eval(_scriptEngine, onUpdateScript);
          BasicDBList outList = JavaScriptUtils.parseNativeJsObject(returnVal, _scriptEngine);                       
          f.addToMetadata("_PERSISTENT_", outList.toArray());
            }
            catch (Exception e) {
              // Extra step here...
              if (null != doc.getMetadata()) { // Copy persistent metadata across...
                Object[] persist = doc.getMetadata().get("_PERSISTENT_");
View Full Code Here


  ///////////////////////////////////////////////////////////////////////
  //
  // Utils: create objects out of more complex SQL arrays
 
  public static BasicDBList getComplexArray(String columnName, java.sql.Array a) throws IllegalArgumentException, SQLException {
    BasicDBList bsonArray = new BasicDBList();

    Object array = a.getArray();
    int length = Array.getLength(array);
    for (int i = 0; i < length; ++i) {
      Object o = Array.get(array, i);
      bsonArray.add(convertJdbcTypes(columnName, o));
    }
    a.free();
   
    return bsonArray;
  }//TOTEST 
View Full Code Here

    BasicDBObject query = new BasicDBObject(DocumentPojo._id_, share.getDocumentLocation().get_id()); // (same for all artifacts)
    String dbName = share.getDocumentLocation().getDatabase();
    String collectionName = share.getDocumentLocation().getCollection();
    BasicDBObject returnVal = (BasicDBObject) MongoDbManager.getCollection(dbName, collectionName).findOne(query);
    try {
      BasicDBList communities = null;
      boolean bCustomJob = dbName.equals("custommr"); // (a bit different)
      boolean bFoundOverlap = false;
      if (!bCustomJob) {
        ObjectId communityId = (ObjectId) returnVal.get(DocumentPojo.communityId_); // (same for other artifacts)
        bFoundOverlap = shareIdStrs.contains(communityId.toString());
View Full Code Here

        for (Entry<String, JsonElement> entry: metadata.entrySet()) {
          if (entry.getValue().isJsonArray()) {
            doc.addToMetadata(entry.getKey(), MongoDbUtil.encodeArray(entry.getValue().getAsJsonArray()).toArray());
          }
          else {
            BasicDBList dbl = new BasicDBList();
            dbl.add(MongoDbUtil.encodeUnknown(entry.getValue()));
            doc.addToMetadata(entry.getKey(), dbl);
          }
        }//TESTED       
      }
      return doc;
View Full Code Here

                error.append("Not allowed sub-elements of input called 'filter' (1)");
                return null;
              }
            }//TESTED (1_5b)
           
            BasicDBList subElements = (BasicDBList) inputOrFilter.get(subElType);
            if (null == subElements) {
              subElements = new BasicDBList();
              inputOrFilter.put(subElType, subElements);
            }
            BasicDBObject newEl = new BasicDBObject();
            subElements.add(newEl);
           
            // Store state:
            currSecondLevelBlockName = subElType;
            currSecondLevelBlock = newEl;
          }//TESTED (*)
 
View Full Code Here

       
        return calculateSplits_phase2(conf, confQuery, false, shardingPolicyNew, null);         
      }//TESTED
      else if (conf.getLimit() > 0) { // debug
        //Case 3: Ensure we have small sets of sources to search over
        BasicDBList collectionOfSplits = splitPrecalculations_oldShardSchemeOrDebug(confQuery, srcTagsQuery, conf.getMaxDocsPerSplit());
        final List<InputSplit> splits = new ArrayList<InputSplit>();
       
        boolean queryNonTrivial = isQueryNonTrivial(confQuery);
        if (!queryNonTrivial) {
          //Case 3a: query is trivial, so can just create splits directly from the split pre-calcs
          int toProcess = conf.getLimit();
          Iterator<Object> itSplit = collectionOfSplits.iterator();
          while ((toProcess > 0) && (itSplit.hasNext())) {
            BasicDBObject split = (BasicDBObject) itSplit.next();

            int docCount = (int)split.getLong(SourceHarvestStatusPojo.doccount_, 0L);
            int toGet = (docCount > toProcess) ? toProcess : docCount;
            BasicDBObject modQuery = convertQuery(confQuery, split.get(DocumentPojo.sourceKey_));
            if (null != modQuery) {
              splits.add(new InfiniteMongoInputSplit(conf.getInputURI(), conf.getInputKey(), modQuery, conf.getFields(), conf.getSort(), toGet, 0, conf.isNoTimeout()));
              toProcess -= docCount;
            }
          }//TESTED
        }
        else {
          // Case 3b: annoying, some extra query terms, gonna need to do it the hard way...
          int toProcess = conf.getLimit();
          Iterator<Object> itSplit = collectionOfSplits.iterator();
          DBCollection coll = InfiniteMongoConfigUtil.getCollection(conf.getInputURI());
          while ((toProcess > 0) && (itSplit.hasNext())) {
            BasicDBObject split = (BasicDBObject) itSplit.next();
           
            BasicDBObject modQuery = convertQuery(confQuery, split.get(DocumentPojo.sourceKey_));
            if (null != modQuery) {
              int docsCounted = (int) coll.getCount(modQuery, null, toProcess, 0);
              int toGet = (docsCounted > toProcess) ? toProcess : docsCounted;
              if (docsCounted > 0) {
                splits.add(new InfiniteMongoInputSplit(conf.getInputURI(), conf.getInputKey(), modQuery, conf.getFields(), conf.getSort(), toGet, 0, conf.isNoTimeout()));
                toProcess -= docsCounted;
              }
            }//TESTED
          }
        }//TESTED
       
        return splits;
      }
      else { // More complex cases:
       
        if (shardingPolicyNew) {
          // Case 4a: NEW SHARDING SCHEME
         
          // Always fetch the new sources, eg convert communityId to sourceKeys
          try {         
            splitPrecalculations_newShardScheme(confQuery, srcTagsQuery); // (modifies confQuery if returns true)       
            boolean queryNonTrivial = isQueryNonTrivial(confQuery);
           
            return calculateSplits_phase2(conf, confQuery, !queryNonTrivial, shardingPolicyNew, null);

              // (ie trivial query => always use chunks, bypass skip/limit test)
          }//TESTED (trivial + non-trivial)
          catch (Exception e) { // Didn't match any sources, no problem
            return new ArrayList<InputSplit>();
          }//TESTED
         
        }//TESTED
        else {

          BasicDBList collectionOfSplits = splitPrecalculations_oldShardSchemeOrDebug(confQuery, srcTagsQuery, conf.getMaxDocsPerSplit());
         
          if (null == collectionOfSplits) {
            // Case 4b: OLD SHARDING SCHEME can't get a partition by source keys, just back off to old code
            return calculateSplits_phase2(conf, confQuery, false, shardingPolicyNew, null);           
          }//TESTED (old code)
          else {
            conf.setMaxDocsPerSplit(2*conf.getMaxDocsPerSplit());
              // (because we stop creating splits when the exceed the size)
           
            // Case 4c: OLD SHARDING SCHEME, have a source key partition
            int nMaxCount = 1 + conf.getMaxDocsPerSplit()*conf.getMaxSplits();
            boolean queryNonTrivial = isQueryNonTrivial(confQuery);
            final List<InputSplit> splits = new ArrayList<InputSplit>();
           
            BasicDBObject savedQuery = confQuery;
           
            Iterator<Object> itSplit = collectionOfSplits.iterator();
            BasicDBList bigSplit = null;
            while (itSplit.hasNext()) {
              BasicDBObject split = (BasicDBObject) itSplit.next();
              int docCount = (int)split.getLong(SourceHarvestStatusPojo.doccount_, 0L);
              if (docCount < nMaxCount) { // small split, will use skip/limit
                BasicDBObject modQuery = convertQuery(savedQuery, split.get(DocumentPojo.sourceKey_));
                if (null != modQuery) {

                  final int SPLIT_THRESHOLD = 3;
                  // A few cases:
                  if ((docCount < (SPLIT_THRESHOLD*conf.getMaxDocsPerSplit())) || !queryNonTrivial) {
                    splits.addAll(calculateSplits_phase2(conf, modQuery, false, shardingPolicyNew, (Integer)docCount));
                  }//TESTED (based on limit, based on query)
                  else {
                    // My guess at the point at which you might as well as do the full query in the hope you're going
                    // to save some (empty) splits
                    splits.addAll(calculateSplits_phase2(conf, modQuery, false, shardingPolicyNew, null));
                  }//TESTED
                }//TESTED
              }
              else { // large split, combine all these guys into an array of source keys
                if (null == bigSplit) {
                  bigSplit = new BasicDBList();
                }
                bigSplit.add(split.get(DocumentPojo.sourceKey_));
                  // (guaranteed to be a single element)
              }
            }//(end loop over collections)
           
            if (null != bigSplit) {
             
              // If we have a big left over community then create a set of splits for that - always chunks if query trivial
              if (1 == bigSplit.size()) {
                confQuery.put(DocumentPojo.sourceKey_, bigSplit.iterator().next());               
              }
              else {
                confQuery.put(DocumentPojo.sourceKey_, new BasicDBObject(DbManager.in_, bigSplit));
              }
              splits.addAll(calculateSplits_phase2(conf, confQuery, !queryNonTrivial, shardingPolicyNew, null));
View Full Code Here

      return null;
    }
    else {
      //TreeMap<String, Long> sourceKeys = new TreeMap<String, Long>();
      // Build collections of objects of format { sourceKey: string or [], totalDocs }
      BasicDBList sourceKeyListCollection = new BasicDBList();
      BasicDBList sourceKeyList = null;
      int runningDocs = 0;
      int runningSources = 0;
      while (dbc.hasNext()) {
        BasicDBObject dbo = (BasicDBObject)dbc.next();
        String sourceKey = (String) dbo.get(SourcePojo.key_);
        if (null != sourceKey) {
          long docCount = 0L;
          try {
            BasicDBObject harvestStatus = (BasicDBObject) dbo.get(SourcePojo.harvest_);
            if (null != harvestStatus) {
              docCount = harvestStatus.getLong(SourceHarvestStatusPojo.doccount_, 0L);
            }
          }
          catch (Exception e) {}
         
          //DEBUG
          //System.out.println("SOURCE=" + sourceKey + " DOC_COUNT=" + docCount + " RUNNING=" + runningDocs +"," + runningSources + ": " + sourceKeyList);
         
          if (docCount > maxCountPerTask) { // source is large enough by itself
            // Create collection
            BasicDBObject collection = new BasicDBObject();
            collection.put(DocumentPojo.sourceKey_, sourceKey);
            collection.put(SourceHarvestStatusPojo.doccount_, docCount);
            sourceKeyListCollection.add(collection);
            // (leaving running* alone, can keep building that)
          }//TESTED (by eye, system community of demo cluster)
          else if ((runningDocs + docCount) > maxCountPerTask) { // have now got a large enough collection of sources
            if (null == sourceKeyList) {
              sourceKeyList = new BasicDBList();
            }
            sourceKeyList.add(sourceKey);
            // Create collection
            BasicDBObject collection = new BasicDBObject();
            collection.put(DocumentPojo.sourceKey_, sourceKeyList);
            collection.put(SourceHarvestStatusPojo.doccount_, runningDocs + docCount);
            sourceKeyListCollection.add(collection);     
            sourceKeyList = null;
            runningDocs = 0;
            runningSources = 0;
          }//TESTED (by eye, system community of demo cluster)
          else if (runningSources >= 15) { // have a limit on the number of sources per query, to keep the queries manageable
            sourceKeyList.add(sourceKey);
            // Create collection
            BasicDBObject collection = new BasicDBObject();
            collection.put(DocumentPojo.sourceKey_, sourceKeyList);
            collection.put(SourceHarvestStatusPojo.doccount_, runningDocs + docCount);
            sourceKeyListCollection.add(collection);     
            sourceKeyList = null;
            runningDocs = 0;
            runningSources = 0;           
          }//TESTED (by eye, system community of demo cluster)
          else { // (keep) build(ing) list
            if (null == sourceKeyList) {
              sourceKeyList = new BasicDBList();
            }
            sourceKeyList.add(sourceKey);
            runningDocs += docCount;
            runningSources++;           
          }//TESTED (by eye, system community of demo cluster)
        } //(end if has source key)
      }//(end loop over cursor)
View Full Code Here

      }//TESTED
    }//TESTED
      return null;
    }//TESTED
    public static BasicDBList encodeArray(JsonArray a) {
      BasicDBList dbl = new BasicDBList();
      for (JsonElement el: a) {
        dbl.add(encodeUnknown(el));
      }
      return dbl;     
    }//TESTED
View Full Code Here

  }//TESTED
 
  public static boolean enforceTypeNamingPolicy(Object je, int nDepth) {
   
    if (je instanceof BasicDBList) {
      BasicDBList ja = (BasicDBList)je;
      if (0 == ja.size()) {
        return false; // No idea, carry on
      }
      Object jaje = ja.iterator().next();
      return enforceTypeNamingPolicy(jaje, nDepth + 1); // keep going until you find primitive/object
    }
    else if (je instanceof BasicDBObject) {
      BasicDBObject jo = (BasicDBObject) je;
      // Nested variables:
View Full Code Here

    // 1. (doc_index field)
    doc.remove(DocumentPojo.index_);
    // 2. (source title)
    String tmp = doc.getString(DocumentPojo.source_);
    if (null != tmp) {
      BasicDBList array = new BasicDBList();
      array.add(tmp);
      doc.put(DocumentPojo.source_, array);
    }
    // 3. (source key)
    tmp = doc.getString(DocumentPojo.sourceKey_);
    if (null != tmp) {
      int nCommunityIndex = 0;
      if (-1 != (nCommunityIndex = tmp.indexOf('#')))  {
        tmp = tmp.substring(0, nCommunityIndex);
      }
      BasicDBList array = new BasicDBList();
      array.add(tmp);
      doc.put(DocumentPojo.sourceKey_, array);
    }
    // 4. (media type)
    tmp = doc.getString(DocumentPojo.mediaType_);
    if (null != tmp) {
      BasicDBList array = new BasicDBList();
      array.add(tmp);
      doc.put(DocumentPojo.mediaType_, array);
    }
   
  }//TESTED (see DOC_API1 in TestCode)
View Full Code Here

TOP

Related Classes of com.mongodb.BasicDBList

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.