Package it.unimi.dsi.fastutil.ints

Examples of it.unimi.dsi.fastutil.ints.IntArrayList


        ImmutableList.Builder<PageAndPositions> builder = ImmutableList.builder();
        long nextDistinctId = 0;
        GroupByHash groupByHash = new GroupByHash(types, allChannels, 10_000);
        for (UpdateRequest request : requests) {
            IntList positions = new IntArrayList();

            int startPosition = request.getStartPosition();
            Block[] blocks = request.getBlocks();

            // Move through the positions while advancing the cursors in lockstep
            int positionCount = blocks[0].getPositionCount();
            for (int position = startPosition; position < positionCount; position++) {
                // We are reading ahead in the cursors, so we need to filter any nulls since they can not join
                if (!containsNullValue(position, blocks) && groupByHash.putIfAbsent(position, blocks) == nextDistinctId) {
                    nextDistinctId++;

                    // Only include the key if it is not already in the index
                    if (existingSnapshot.getJoinPosition(position, blocks) == UNLOADED_INDEX_KEY) {
                        positions.add(position);
                    }
                }
            }

            if (!positions.isEmpty()) {
                builder.add(new PageAndPositions(request, positions));
            }
        }

        pageAndPositions = builder.build();
View Full Code Here


    /**
     * Constructor
     * @param capacity Capacity
     */
    public BasicIntArrayList(int capacity) {
      list = new IntArrayList(capacity);
    }
View Full Code Here

    EdgeIterables.initialize(this, edges);
  }

  @Override
  public void initialize(int capacity) {
    neighbors = new IntArrayList(capacity);
  }
View Full Code Here

    neighbors = new IntArrayList(capacity);
  }

  @Override
  public void initialize() {
    neighbors = new IntArrayList();
  }
View Full Code Here

    private int[][] overlappingReadIndicesBySampleIndex(final GenomeLoc overlap) {
        if (overlap == null)
            return null;
        final int sampleCount = samples.sampleCount();
        final int[][] result = new int[sampleCount][];
        final IntArrayList buffer = new IntArrayList(200);
        final int referenceIndex = overlap.getContigIndex();
        final int overlapStart = overlap.getStart();
        final int overlapEnd = overlap.getStop();
        for (int s = 0; s < sampleCount; s++) {
            buffer.clear();
            final GATKSAMRecord[] sampleReads = readsBySampleIndex[s];
            final int sampleReadCount = sampleReads.length;
            buffer.ensureCapacity(sampleReadCount);
            for (int r = 0; r < sampleReadCount; r++)
                if (unclippedReadOverlapsRegion(sampleReads[r], referenceIndex, overlapStart, overlapEnd))
                    buffer.add(r);
            result[s] = buffer.toIntArray();
        }
        return result;
    }
View Full Code Here

        final int locContig = location.getContigIndex();
        final int locStart = location.getStart();
        final int locEnd = location.getStop();

        final int alleleCount = alleles.alleleCount();
        final IntArrayList removeIndices = new IntArrayList(10);
        for (int s = 0; s < sampleCount; s++) {
            int readRemoveCount = 0;
            final GATKSAMRecord[] sampleReads = readsBySampleIndex[s];
            final int sampleReadCount = sampleReads.length;
            for (int r = 0; r < sampleReadCount; r++)
                if (!unclippedReadOverlapsRegion(sampleReads[r], locContig, locStart, locEnd))
                    removeIndices.add(r);
            removeSampleReads(s,removeIndices,alleleCount);
            removeIndices.clear();
        }
    }
View Full Code Here

        if (Double.isNaN(maximumErrorPerBase) || maximumErrorPerBase <= 0.0)
            throw new IllegalArgumentException("the maximum error per base must be a positive number");
        final int sampleCount = samples.sampleCount();

        final int alleleCount = alleles.alleleCount();
        final IntArrayList removeIndices = new IntArrayList(10);
        for (int s = 0; s < sampleCount; s++) {
            final GATKSAMRecord[] sampleReads = readsBySampleIndex[s];
            final int sampleReadCount = sampleReads.length;
            for (int r = 0; r < sampleReadCount; r++) {
                final GATKSAMRecord read = sampleReads[r];
                if (readIsPoorlyModelled(s,r,read, maximumErrorPerBase))
                    removeIndices.add(r);
            }
            removeSampleReads(s, removeIndices, alleleCount);
            removeIndices.clear();
        }
    }
View Full Code Here

     * @throws IllegalArgumentException if {@code perSampleDownsamplingFraction} is {@code null}.
     */
    public void contaminationDownsampling(final Map<String, Double> perSampleDownsamplingFraction) {

        final int sampleCount = samples.sampleCount();
        final IntArrayList readsToRemove = new IntArrayList(10); // blind estimate, can be improved?
        final int alleleCount = alleles.alleleCount();
        for (int s = 0; s < sampleCount; s++) {
            final String sample = samples.sampleAt(s);
            final Double fractionDouble = perSampleDownsamplingFraction.get(sample);
            if (fractionDouble == null)
                continue;
            final double fraction = fractionDouble;
            if (Double.isNaN(fraction) || fraction <= 0.0)
                continue;
            if (fraction >= 1.0) {
                final int sampleReadCount = readsBySampleIndex[s].length;
                readsToRemove.ensureCapacity(sampleReadCount);
                for (int r = 0; r < sampleReadCount; r++)
                    readsToRemove.add(r);
                removeSampleReads(s,readsToRemove,alleleCount);
                readsToRemove.clear();
            }
            else {
                final Map<A,List<GATKSAMRecord>> readsByBestAllelesMap = readsByBestAlleleMap(s);
                removeSampleReads(s,AlleleBiasedDownsamplingUtils.selectAlleleBiasedReads(readsByBestAllelesMap, fraction),alleleCount);
            }
View Full Code Here

    int dictValueCount = getDictValueCount(reader, fieldName);
    BigSegmentedArray order = newInstance(dictValueCount, maxDoc);
    this.orderArray = order;

    IntArrayList minIDList = new IntArrayList();
    IntArrayList maxIDList = new IntArrayList();
    IntArrayList freqList = new IntArrayList();

    int length = maxDoc + 1;
    @SuppressWarnings("unchecked")
    TermValueList<T> list = listFactory == null ? (TermValueList<T>) new TermStringList()
        : listFactory.createTermList();
    int negativeValueCount = getNegativeValueCount(reader, field);

    int t = 1; // valid term id starts from 1
    list.add(null);
    minIDList.add(-1);
    maxIDList.add(-1);
    freqList.add(0);
    int totalFreq = 0;
    Terms terms = reader.terms(field);
    if (terms != null) {
      TermsEnum termsEnum = terms.iterator(null);
      BytesRef text;
      while ((text = termsEnum.next()) != null) {
        // store term text
        // we expect that there is at most one term per document
        if (t >= length) throw new RuntimeException("there are more terms than "
            + "documents in field \"" + field + "\", but it's impossible to sort on "
            + "tokenized fields");
        String strText = text.utf8ToString();
        list.add(strText);
        Term term = new Term(field, strText);
        DocsEnum docsEnum = reader.termDocsEnum(term);
        // freqList.add(termEnum.docFreq()); // doesn't take into account
        // deldocs
        int minID = -1;
        int maxID = -1;
        int docID = -1;
        int df = 0;
        int valId = (t - 1 < negativeValueCount) ? (negativeValueCount - t + 1) : t;
        while ((docID = docsEnum.nextDoc()) != DocsEnum.NO_MORE_DOCS) {
          df++;
          order.add(docID, valId);
          minID = docID;
          while (docsEnum.nextDoc() != DocsEnum.NO_MORE_DOCS) {
            docID = docsEnum.docID();
            df++;
            order.add(docID, valId);
          }
          maxID = docID;
        }
        freqList.add(df);
        totalFreq += df;
        minIDList.add(minID);
        maxIDList.add(maxID);
        t++;
      }
    }

    list.seal();
    this.valArray = list;
    this.freqs = freqList.toIntArray();
    this.minIDs = minIDList.toIntArray();
    this.maxIDs = maxIDList.toIntArray();

    int doc = 0;
    while (doc < maxDoc && order.get(doc) != 0) {
View Full Code Here

    }
    this.freqs[0] = reader.numDocs() - totalFreq;
  }

  private static int[] convertString(FacetDataCache<?> dataCache, String[] vals) {
    IntList list = new IntArrayList(vals.length);
    for (int i = 0; i < vals.length; ++i) {
      int index = dataCache.valArray.indexOf(vals[i]);
      if (index >= 0) {
        list.add(index);
      }
    }
    return list.toIntArray();
  }
View Full Code Here

TOP

Related Classes of it.unimi.dsi.fastutil.ints.IntArrayList

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.