Package de.sciss.eisenkraut.io

Examples of de.sciss.eisenkraut.io.AudioTrail


  throws IOException
  {
    this.server      = server;
    this.doc      = doc;

    final AudioTrail  at      = doc.getAudioTrail();
    final Runnable    runTrigger;
    final SynthDef[]  defs;
    OSCBundle      bndl;
   
    transport      = doc.getTransport();
    nw          = NodeWatcher.newFrom( server );
    numInputChannels  = at.getChannelNum();      // XXX sync?
    channelMaps      = at.getChannelMaps();
    sourceRate      = doc.timeline.getRate();    // XXX sync?
    serverRate      = server.getSampleRate();
   
    DISKBUF_SIZE    = (Math.max( 44100, (int) sourceRate ) + DISKBUF_PAD) << 1// buffer size in frames
    DISKBUF_SIZE_H    = DISKBUF_SIZE >> 1;
View Full Code Here


    final SynthDef[]    defs;
    final Buffer[]      bufsDisk;
    final Synth[]      synthsBufRd;
    final Bus        busInternal, busPh;
    final Span        span        = doc.timeline.getSelectionSpan();
    final AudioTrail    at          = doc.getAudioTrail();
    final long        nrtPlayOffset    = span.start;
    final Span[]      bufSpans      = new Span[ 1 ];
    final Group        nrtGrpRoot, nrtGrpInput;
    final Synth        synthPhasor;
    final float        realRate;
    final float        interpolation;
    final double      nrtServerRate;
    final float        rate        = 1.0f;
    Server          nrtServer      = null;
    NRTFile          f          = null;
    int            argIdx        = 1;
    int            audioBusOffset, bufferOffset; //, controlBusOffset;
    OSCBundle        bndl;
    double          time        = 0.0;
    boolean          even;
    int            nrtClock;
    long          pos          = nrtPlayOffset;
   
    if( ct == null ) {
//      rom.replyFailed( rom, new IOException( "No routing context" ));
      try {
        rom.replyFailed( 1 );
      }
      catch( IOException e11 ) {
        OSCRoot.failed( rom, e11 );
      }
    }
 
    try {
      f        = NRTFile.openAsWrite( new File( rom.msg.getArg( argIdx ).toString() ));
      argIdx++;
      audioBusOffset  = ((Number) rom.msg.getArg( argIdx )).intValue();
      argIdx++;
//      controlBusOffset= ((Number) rom.msg.getArg( argIdx )).intValue();
      argIdx++;
      bufferOffset  = ((Number) rom.msg.getArg( argIdx )).intValue();
      argIdx++;
      nrtServerRate    = ((Number) rom.msg.getArg( argIdx )).doubleValue();
     
      nrtServer      = new Server( "nrt" );
     
      f.write( SuperColliderClient.getInstance().loadDefsMsg() );
     
      defs      = createInputDefs( ct.chanMaps ); // ct.numInputChannels
      if( defs != null ) {
        for( int i = 0; i < defs.length; i++ ) {
          f.write( defs[ i ].recvMsg() );
        }
      }

//      sourceRate      = doc.timeline.getRate();
//      serverRate      = server.getSampleRate();
      srcFactor      = sourceRate / nrtServerRate;
      realRate      = (float) (rate * srcFactor);
      interpolation    = realRate == 1.0f ? 1f : 3f;
   
      nrtGrpRoot        = Group.basicNew( nrtServer );
      f.write( nrtGrpRoot.addToHeadMsg( nrtServer.getDefaultGroup() ));
      nrtGrpInput      = Group.basicNew( nrtServer );
      f.write( nrtGrpInput.addToTailMsg( nrtGrpRoot ));

      synthsBufRd        = new Synth[ ct.numFiles ];
      busInternal        = new Bus( nrtServer, kAudioRate, audioBusOffset, ct.numInChans );
      audioBusOffset       += busInternal.getNumChannels();
      busPh        = new Bus( nrtServer, kAudioRate, audioBusOffset );
      audioBusOffset       += busPh.getNumChannels();
      bufsDisk        = new Buffer[ ct.numFiles ];
      for( int i = 0; i < ct.numFiles; i++ ) {
        bufsDisk[ i ]    = new Buffer( nrtServer, DISKBUF_SIZE, ct.chanMaps[ i ].length, bufferOffset++ );
        f.write( bufsDisk[ i ].allocMsg() );
      }

      for( int i = 0; i < ct.numFiles; i++ ) {
        synthsBufRd[ i = Synth.basicNew( "eisk-input" + ct.chanMaps[ i ].length, nrtServer );
      }
      synthPhasor  = Synth.basicNew( "eisk-phasor", nrtServer );

      for( nrtClock = 0, even = true;; nrtClock++, even = !even ) {
        if( even ) {
          pos = nrtClock * DISKBUF_SIZE_HM - DISKBUF_PAD + nrtPlayOffset;
        } else {
          pos = nrtClock * DISKBUF_SIZE_HM + nrtPlayOffset;
        }
        if( pos >= span.stop ) break;
        f.setTime( time );
//System.err.println( "clock = "+clock+"; pos = "+pos+"; time = "+time );
        bndl        = new OSCBundle( time );
//        if( pos >= DISKBUF_PAD ) {
        if( pos < 0 ) {
          for( int i = 0; i < bufsDisk.length; i++ ) {
            bndl.addPacket( bufsDisk[ i ].fillMsg( 0, DISKBUF_PAD * bufsDisk[ i ].getNumChannels(), 0.0f ));
          }
          pos += DISKBUF_PAD;
        }
//          bufSpans[ 0 ] = new Span( pos - DISKBUF_PAD, pos - DISKBUF_PAD + DISKBUF_SIZE_H );
        bufSpans[ 0 ] = new Span( pos, pos + DISKBUF_SIZE_H );
        at.addBufferReadMessages( bndl, bufSpans, bufsDisk, even ? 0 : DISKBUF_SIZE_H );
        f.write( bndl );
       
        if( nrtClock == 0 ) {
          for( int i = 0, off = 0; i < ct.numFiles; i++ ) {
            f.write( synthsBufRd[ i ].newMsg( nrtGrpInput, new String[] {
View Full Code Here

   */
  public boolean consumerBegin( RenderSource source )
  throws IOException
  {
//System.err.println( "consumerBegin" );
    final AudioTrail    at;
    final ConsumerContext  consc   = (ConsumerContext) source.context.getOption( KEY_CONSC );
    final Span        span  = source.context.getTimeSpan();
   
//    consc.edit    = new SyncCompoundSessionObjEdit(
//      this, doc.bird, Session.DOOR_ALL, context.getTracks(),
//      AudioTrack.OWNER_WAVE, null, null, consc.plugIn.getName() );
    consc.edit    = new BasicCompoundEdit( consc.plugIn.getName() );
   
//    consc.bc    = BlendingAction.createBlendContext(
//      AbstractApplication.getApplication().getUserPrefs().node( BlendingAction.DEFAULT_NODE ),
//      context.getSourceRate(), context.getTimeSpan().getLength() / 2, context.getTimeSpan().getLength() / 2 ); // XXXXXXX

    at        = consc.doc.getAudioTrail();
// XXX
//    consc.bs    = mte.beginOverwrite( context.getTimeSpan(), consc.bc, consc.edit );
    consc.as    = at.alloc( span );
    consc.progOff  = getProgression();
    consc.progWeight= (1.0f - consc.progOff) / span.getLength();
   
    return true;
  }
View Full Code Here

  public boolean consumerFinish( RenderSource source )
  throws IOException
  {
//System.err.println( "consumerFinish " + java.awt.EventQueue.isDispatchThread() );
    final ConsumerContext    consc   = (ConsumerContext) source.context.getOption( KEY_CONSC );
    final AudioTrail      at    = consc.doc.getAudioTrail();

// UUU
//    if( (consc.bs != null) && (consc.edit != null) ) {
//      mte.finishWrite( consc.bs, consc.edit, pt, 0.9f, 0.1f );
    if( consc.edit != null ) {
     
      ProcessingThread.flushProgression();
      ProcessingThread.setNextProgStop( 1.0f );
     
//      if( consc.as != null ) {
      if( source.validAudio ) {
        consc.as.flush();
        at.editBegin( consc.edit );
        at.editRemove( this, consc.as.getSpan(), consc.edit );
        at.editInsert( this, consc.as.getSpan(), consc.edit );
        at.editAdd( this, consc.as, consc.edit );
//        if( !audioTrail.copyRangeFrom( (AudioTrail) srcTrail, copySpan, insertPos, mode, this, edit, trackMap2, bcPre, bcPost )) return CANCELLED;
        at.editEnd( consc.edit );
      }
      if( source.validMarkers ) {
        doc.markers.editBegin( consc.edit );
        doc.markers.editClear( this, source.context.getTimeSpan(), consc.edit );
        doc.markers.editAddAll( this, source.markers.getAll( true ), consc.edit );
View Full Code Here

  public boolean consumerRender( RenderSource source )
  throws IOException
  {
//System.err.println( "consumerRender" );
    final ConsumerContext    consc   = (ConsumerContext) source.context.getOption( KEY_CONSC );
    final AudioTrail      at    = consc.doc.getAudioTrail();
    final boolean        preFade, postFade;

// UUU
//    if( consc.bs == null ) {
    if( consc.as == null ) {
      source.context.getHost().showMessage( JOptionPane.ERROR_MESSAGE,
        AbstractApplication.getApplication().getResourceString( "renderEarlyConsume" ));
      return false;
    }
//    mte.continueWrite( consc.bs, source.blockBuf, source.blockBufOff, source.blockBufLen );
   
    preFade    = source.blockSpan.overlaps( consc.blendPreSpan );
    postFade  = source.blockSpan.overlaps( consc.blendPostSpan );

    // outBuf is audioBlockBuf but with the unused
    // channels set to null, so they won't be faded
    if( preFade || postFade || consc.restoreUnused ) {
      for( int ch = 0; ch < source.numAudioChannels; ch++ ) {
        if( source.audioTrackMap[ ch ]) {
//          System.out.println( "Yes for chan " + ch );
          consc.outBuf[ ch ] = source.audioBlockBuf[ ch ];
        } else {
          consc.outBuf[ ch ] = null;
        }
      }
      at.readFrames( consc.inBuf, 0, source.blockSpan );
    }
   
    if( preFade ) {
//      System.out.println( "pre fade" );
//      at.readFrames( consc.inBuf, 0, source.blockSpan );
View Full Code Here

  {
    final RenderContext        rc          = (RenderContext) proc.getClientArg( "context" );
//    final List            tis          = (List) pt.getClientArg( "tis" );
    final ConsumerContext      consc        = (ConsumerContext) rc.getOption( KEY_CONSC );
    final RenderSource        source        = (RenderSource) proc.getClientArg( "source" );
    final AudioTrail        at          = consc.doc.getAudioTrail();

    final int            inTrnsLen      = ((Integer) proc.getClientArg( "inTrnsLen" )).intValue();
    final RandomAccessRequester    rar          = (RandomAccessRequester) proc.getClientArg( "rar" );
    final boolean          randomAccess    = rar != null;
    int                readLen, writeLen;
//    Span              span;
    long              readOffset, remainingRead;
//    String              className;
    boolean              consStarted      = false;
    boolean              consFinished    = false;

    // --- clipboard related ---
//    Span              clipSpan      = null;
//    long              clipShift      = 0;
   
    // --- resampling related ---
    final int            inOff        = 0;

    // --- init ---
   
    remainingRead = context.getTimeSpan().getLength();
    if( source.validAudio ) ProcessingThread.setNextProgStop( 0.9f ); // XXX arbitrary

//    inOff    = 0;
    readOffset  = context.getTimeSpan().getStart();

    try {
      // --- rendering loop ---
     
prodLp:    while( !ProcessingThread.shouldCancel() ) {
        if( randomAccess ) {
          source.blockSpan = rar.getNextSpan();
          readLen       = (int) source.blockSpan.getLength();
        } else {
          readLen       = (int) Math.min( inTrnsLen - inOff, remainingRead );
          source.blockSpan = new Span( readOffset, readOffset + readLen );
          // preparation for next loop iteration
          remainingRead   -= readLen;
          readOffset      += readLen;
        }
        if( readLen == 0 ) break prodLp;
        writeLen      = readLen;
        source.audioBlockBufLen  = writeLen;

        // XXX optimization possibilities here:
        // leave some channels null (both in readFrames
        // as well as in arraycopy) depending on some
        // kind of policy (maybe a new value of audioPolicy: POLICY_READONLY
        // and POLICY_BYPASS would become POLICY_IGNORE
        at.readFrames( consc.inBuf, inOff, source.blockSpan );
        // looks like a bit of overload but in future
        // versions, channel arrangement might be different than 1:1 from mte
        for( int ch = 0; ch < source.numAudioChannels; ch++ ) {
          System.arraycopy( consc.inBuf[ ch ], inOff, source.audioBlockBuf[ ch ], 0, writeLen );
        }
View Full Code Here

  // NOTE: does not add the document to a handler
  public static Session newEmpty( AudioFileDescr afd, boolean createTransport, boolean createOSC )
  throws IOException
  {
    final Session      doc      = new Session( afd, createOSC );
    final AudioTrail    at      = AudioTrail.newFrom( afd )// does _not_ throw an IOException

//    try {
      doc.setAudioTrail( null, at );
//      if( createOSC ) doc.createOSC();
      if( createTransport ) doc.createTransport();
View Full Code Here

  throws IOException
  {
    final AudioFile      af  = AudioFile.openAsRead( path );
    final AudioFileDescr  afd  = af.getDescr();
    Session          doc  = null;
    AudioTrail        at  = null;
   
    try {
// System.err.println( "readMarkers" );
      af.readMarkers();
      at          = AudioTrail.newFrom( af );
      doc          = new Session( afd, createOSC );
      doc.setAudioTrail( null, at );
//      if( createOSC ) doc.createOSC();
      if( createTransport ) doc.createTransport();
//      if( createDecimated ) {
//        doc.createDecimatedWaveTrail();
//        doc.createDecimatedSonaTrail();
//      }
      return doc;
    }
    catch( IOException e1 ) {
//      if( doc != null ) {
//        doc.dispose();
//      } else
      if( at != null ) {
        at.dispose();
      } else {
        af.cleanUp();
      }
      throw e1;
    }
View Full Code Here

  public static Session newFrom( File[] paths, boolean createTransport, boolean createOSC )
  throws IOException
  {
    final AudioFile[]    afs    = new AudioFile[ paths.length ];
    final AudioFileDescr[]  afds  = new AudioFileDescr[ paths.length ];
    AudioTrail        at    = null;
    Session          doc    = null;
 
    try {
      for( int i = 0; i < paths.length; i++ ) {
        afs[ i = AudioFile.openAsRead( paths[ i ]);
        afds[ i = afs[ i ].getDescr();
        if( i > 0 ) {
          if( (afds[ i ].length != afds[ 0 ].length) || (afds[ i ].rate != afds[ 0 ].rate) ||
            (afds[ i ].bitsPerSample != afds[ 0 ].bitsPerSample) || (afds[ i ].sampleFormat != afds[ 0 ].sampleFormat) ) {
       
            throw new IOException( getResourceString( "errHeadersNotMatching" ));
          }
        }
//        System.err.println( "readMarkers" );
        afs[ i ].readMarkers();
      }
      at          = AudioTrail.newFrom( afs );
      doc          = new Session( afds, createOSC );
      doc.setAudioTrail( null, at );
//      if( createOSC ) doc.createOSC();
      if( createTransport ) doc.createTransport();
//      if( createDecimated ) {
//        doc.createDecimatedWaveTrail();
//        doc.createDecimatedSonaTrail();
//      }
      return doc;
    }
    catch( IOException e1 ) {
//      if( doc != null ) {
//        doc.dispose();
//      } else
      if( at != null ) {
        at.dispose();
      } else {
        for( int i = 0; i < paths.length; i++ ) {
          if( afs[ i ] != null ) afs[ i ].cleanUp();
        }
      }
View Full Code Here

      final int            numFiles  = clientAFDs.length;
      final Session          doc      = (Session) context.getClientArg( "doc" );
      final boolean          saveMarkers  = ((Boolean) context.getClientArg( "markers" )).booleanValue();
      final Span            span    = (Span) context.getClientArg( "span" );
      final int[]            channelMap  = (int[]) context.getClientArg( "chanMap" );
      final AudioTrail        audioTrail  = doc.getAudioTrail();
//      final File[]          tempFs    = new File[ numFiles ];
//      final boolean[]          renamed    = new boolean[ numFiles ];
      final AudioFile[]        afs      = new AudioFile[ numFiles ];
      AudioFileDescr          afdTemp;
      File              tempF;
     
      context.putClientArg( "afs", afs );

      if( saveMarkers ) {
        if( clientAFDs[ 0 ].isPropertySupported( AudioFileDescr.KEY_MARKERS )) {
          doc.markers.copyToAudioFile( clientAFDs[ 0 ], span )// XXX
        } else if( !doc.markers.isEmpty() ) {
          System.err.println( "WARNING: markers are not saved in this file format!!!" );
        }
      } else { // WARNING: we must clear KEY_MARKERS, it might contain copied data!
        clientAFDs[ 0 ].setProperty( AudioFileDescr.KEY_MARKERS, null );
      }
      for( int i = 0; i < numFiles; i++ ) {
        if( clientAFDs[ i ].file.exists() ) {
//            tempFs[ i ]      = File.createTempFile( "eis", null, afds[ i ].file.getParentFile() );
//            tempFs[ i ].delete();
          tempF        = File.createTempFile( "eis", null, clientAFDs[ i ].file.getParentFile() );
          afdTemp        = new AudioFileDescr( clientAFDs[ i ]);
//            afdTemp.file    = tempFs[ i ];
          afdTemp.file    = tempF;
//            renamed[ i ]    = true;
          afs[ i ]      = AudioFile.openAsWrite( afdTemp );
        } else {
          afs[ i ]      = AudioFile.openAsWrite( clientAFDs[ i ]);
        }
      }
     
      audioTrail.flatten( afs, span, channelMap );
      return DONE;
    } // run
View Full Code Here

TOP

Related Classes of de.sciss.eisenkraut.io.AudioTrail

Copyright © 2018 www.massapicom. All rights reserved.
All source code are property of their respective owners. Java is a trademark of Sun Microsystems, Inc and owned by ORACLE Inc. Contact coftware#gmail.com.