if( (pos < 0) || (pos >= timelineLen) ) return;
final String chName = doc.audioTracks.get( ch ).getName();
final double seconds = pos / timelineRate;
final AudioTrail at;
final DecimatedWaveTrail dt;
final float[][] data;
final float[] frame;
float f1;
argsCsr[3] = chName;
argsCsr[0] = new Long( pos );
argsCsr[1] = new Integer( (int) (seconds / 60) );
argsCsr[2] = new Float( seconds % 60 );
csrInfo[0] = msgCsr1.format( argsCsr );
switch( info.model ) {
case DecimatedTrail.MODEL_PCM:
at = doc.getAudioTrail();
data = new float[ at.getChannelNum() ][];
data[ ch ] = new float[ 1 ];
try {
at.readFrames( data, 0, new Span( pos, pos + 1 ));
}
catch( IOException e1 ) { return; }
f1 = data[ ch ][ 0 ];
argsCsr[4] = new Float( f1 );
argsCsr[5] = new Float( Math.log( Math.abs( f1 )) * TWENTYDIVLOG10 );
csrInfo[1] = msgCsr2PCMFloat.format( argsCsr );
if( csrInfoIsInt ) {
argsCsr[6] = new Long( (long) (f1 * (1L << (csrInfoBits - 1))) );
argsCsr[7] = new Integer( csrInfoBits );
csrInfo[2] = msgCsr3PCMInt.format( argsCsr );
} else {
csrInfo[2] = "";
}
break;
case DecimatedTrail.MODEL_FULLWAVE_PEAKRMS:
dt = doc.getDecimatedWaveTrail();
if( dt == null ) return;
frame = new float[ dt.getNumModelChannels() ];
try {
dt.readFrame( Math.min( dt.getNumDecimations() - 1, info.idx + 1 ), pos, ch, frame );
}
catch( IOException e1 ) { return; }
f1 = Math.max( frame[ 0 ], -frame[ 1 ] ); // peak pos/neg
argsCsr[4] = new Float( f1 );
argsCsr[5] = new Float( Math.log( f1 ) * TWENTYDIVLOG10 );