package de.maramuse.soundcomp.control;
import java.util.List;
import java.util.Map;
import java.util.TreeMap;
import de.maramuse.soundcomp.parser.SoundCompText;
import de.maramuse.soundcomp.parser.StringIndexedNumber;
import de.maramuse.soundcomp.parser.TimeConsumer;
import de.maramuse.soundcomp.events.AbruptTempoChangeEvent;
import de.maramuse.soundcomp.events.ReturnToGlobalTiming;
import de.maramuse.soundcomp.events.TempoChangeEvent;
import de.maramuse.soundcomp.parser.SCParser.ParserVal;
import de.maramuse.soundcomp.parser.Process;
import de.maramuse.soundcomp.process.Scale;
import de.maramuse.soundcomp.util.AdvancerRegistry;
/*
* Copyright 2011 Jan Schmidt-Reinisch
*
* SoundComp - a sound processing library
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; in version 2.1
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*
*/
/**
* A control logic taking parse trees, processing them to timing information and DSP objects and organizing their
* execution
*/
public class Control {
// the complete parse tree
private SoundCompText txt;
// this is an amount of time that events may be "negative delayed" compared to their
// relevant beat. In a more complete release, this should be calculated from
// parsed voice data.
double maximumNegativeEventDelay;
TempoChangeEvent globalTempo;
// the control logic controls the advancerRegistry for the DSP part
private AdvancerRegistry advancerRegistry=new AdvancerRegistry();
private Map<String, Process> processes=new TreeMap<String, Process>();
private Map<String, Scale> scales=new TreeMap<String, Scale>();
private Map<String, Voice> voices=new TreeMap<String, Voice>();
public Control(SoundCompText txt) {
this.txt=txt;
for(String s:txt.getVoiceList().getVoices().keySet()){
de.maramuse.soundcomp.parser.Voice v=txt.getVoiceList().getVoices().get(s);
Voice vv=new Voice(s);
vv.setpVoice(v);
voices.put(s,vv);
}
}
private void prepareProcessData() throws Exception {
for(ParserVal proc:txt.getProcessList().getList()){
if(!(proc instanceof Process))
throw new IllegalArgumentException("Member "+proc.getText()
+" of process list is not a process");
processes.put(proc.getText(), ((Process)proc).copyStructure());
// create the global process instance
}
}
private void prepareScaleData() {
if(txt.getScales()!=null){
for(de.maramuse.soundcomp.parser.Scale _scale:txt.getScales().values()){
Scale scale=new Scale();
// TODO: offer option to set a pitch offset to a scale
Integer pitch=0;
List<ParserVal> values=_scale.getValues();
int sz=values.size();
for(int i=0; i<=sz; i++){
ParserVal val=values.get(i);
if(!(val instanceof StringIndexedNumber)){
throw new IllegalArgumentException("illegal content in scale "+_scale.getName());
}
StringIndexedNumber sin=(StringIndexedNumber)val;
// TODO make index and value possible calculation results for more flexibility
// the constant term elimination will already allow some value calculation, though
String index=sin.getIndex().getText();
Double value=sin.getValue().asDouble();
scale.put(index, value, pitch, pitch<sz-1 ? pitch+1 : null, pitch>0 ? pitch-1 : null);
}
scales.put(_scale.getName(), scale);
}
}
}
private void prepareVoiceData() {
// create the voice global process instances and connect them to the global process
}
private void prepareTimingData() {
// search for the maximum "negative delay" because we need that to delay everything
// else in the opposite direction
maximumNegativeEventDelay=0.1d; // temporary solution until we really have a way of specifying
globalTempo=new AbruptTempoChangeEvent(maximumNegativeEventDelay);
globalTempo.setStartBpm(txt.getGlobalParameters().getStartTempo());
globalTempo.setBeat(0);
}
private void processMelodies() {
// the maximumNegativeEventDelay must already be known
advancerRegistry.clear();
boolean finished;
for(Voice v:voices.values()){
v.setMelodyIndex(0);
}
String waitingVoice;
double globalBeat=0;
long iteration=0;
do{
// loop over each sample timeslice of the resulting piece of sound
double t=advancerRegistry.currentTime();
double oldGlobalBeat=globalBeat;
globalBeat=globalTempo.calculateBeatFromTimestamp(t);
if(Math.floor(oldGlobalBeat*4)!=Math.floor(globalBeat*4)){
System.out.println("global beat "+globalBeat+" at time "+t+", iteration "+iteration);
}
// in each sample, loop over all voices to see if we have to add or kill notes
// or schedule other voice-related events
for(Voice v:voices.values()){
// ignore the voice if it is waiting for a synchronization
if(v.getSynchronizeMark()==null){
// is the current note/pause of this voice over?
// depending on whether the voice is on global or private tempo,
// this needs to be calculated differently.
// if the voice has private tempo, the tempo and possible changes of it are known
// at the start of the pause/note that causes the wait, so we will have already
// calculated a timestamp for that incident.
// if the voice is on global tempo, the note/pause that causes the wait starts
// on a certain beat, and its length in beats is given. So we need to compare
// the target global beat time to the actual current global beat time.
// if during a waiting time the voice changes between global and local timing,
// we have a problem that we currently cannot solve. For the time being, this
// cannot happen, but it may come when we will introduce voice threading;
// we will be forced to find a solution then.
boolean advance=false;
boolean nextScheduledEvent=false;
if(!v.isFollowGlobal()&&v.getNextEventTimestamp()<t)
nextScheduledEvent=true;
else if(v.isFollowGlobal()&&v.getNextEventGlobalBeat()<globalBeat)
nextScheduledEvent=true;
/* if(nextScheduledEvent||event)
System.out.println("analyzing voice "+v.getName()+" at timestamp "+t);
*/
if(nextScheduledEvent)do{
List<ParserVal> m=v.getpVoice().getMelody();
int melodyIndex=v.getMelodyIndex();
// only do anything if this voice has not yet reached its end
if(melodyIndex<m.size()){
ParserVal pv=m.get(melodyIndex);
v.setMelodyIndex(melodyIndex+1);
System.out.println(v.getpVoice().getName()+" retrieved a "+pv.getClass().getSimpleName()+" at timestamp "+t+", iteration "+iteration);
if(pv instanceof de.maramuse.soundcomp.parser.Note){
de.maramuse.soundcomp.parser.Note n=(de.maramuse.soundcomp.parser.Note)pv;
System.out.println("it is a "+n.getText());
// instantiate and connect the process instance and delay events for the gate
// don't forget to consider the individual delay shifted by maximumNegativeDelay - a
// voice not having an own delay must be delayed by mND here. The process will be
// created, but its start must not take place before its delay is over.
// note that for private tempo voices, we precalculate the birth, gate on, gate off times.
// for global tempo voices, we only precalculate the birth+gate on event - the global
// tempo may change during the time the note sounds and so gate off is not really predictable
// the note death needs to be implicit from within the sound definition, no timestamp can be
// guessed in advance.
}// NO "else", each note is also a TimeConsumer
if(pv instanceof de.maramuse.soundcomp.parser.TimeConsumer){
// this melody element is timing relevant. calculate the
// timestamp or beat of the next element.
double te=((TimeConsumer)pv).getBeats();
if(te==0||Double.isNaN(te))
te=v.getLastDurationBeats();
v.setLastDurationBeats(te);
if(v.isFollowGlobal()){
// voice follows global tempo, so we need to calculate beats as we cannot
// foresee how the tempo will change during the duration of the note
v.setNextEventGlobalBeat(te+v.getNextEventGlobalBeat());
System.out.println("it consumes "+te+" global beats up to global beat "+v.getNextEventGlobalBeat()+", private beat "+(v.getCurrentBeat()+te));
}else{
// voice has private tempo, we can already calculate end time - if there were
// tempo changes, we'd already have to know. Precalculated duration makes detection
// of the end of the duration easier in this case.
v.setNextEventTimestamp(t+v.calculateDuration(v.getCurrentBeat(), te));
System.out.println("it consumes "+te+" private beats, starting at private beat "+v.getCurrentBeat()+" up to timestamp "+v.getNextEventTimestamp());
}
v.setCurrentBeat(v.getCurrentBeat()+te);
advance=true;
}else if(pv instanceof de.maramuse.soundcomp.parser.Waitmark){
// synchronize to a global wait mark
de.maramuse.soundcomp.parser.Waitmark wm=(de.maramuse.soundcomp.parser.Waitmark)pv;
v.setSynchronizeMark(wm.getMarkName());
advance=true;
}else if(pv instanceof de.maramuse.soundcomp.parser.Setmark){
// restart the voices waiting for this mark
// note that we have a slight incorrecness here:
// the restarted voice may be one to come in this outer loop,
// then the restart has immediate effect; or it may have already
// been iterated over, then the restart is effective in the next timeslice
// so we have a (0, +1 sample) timing jitter here.
// since we will usually have sample rates way above 20 kHz, the jitter
// introduced here is below 1/20 ms - I consider this tolerable. The jitter
// only influences the start and end of the notes, not the timing within
// them or even the signal. And it does not accumulate, every set/wait
// pair sets it anew.
String markname=((de.maramuse.soundcomp.parser.Setmark)pv).getMarkName();
for(Voice vv:voices.values()){
if(vv.getSynchronizeMark()!=null&&vv.getSynchronizeMark().equals(markname)){
// stop waiting
vv.setSynchronizeMark(null);
TempoChangeEvent tce=new AbruptTempoChangeEvent(t);
vv.setLastTimingEvent(tce);
vv.setNextEventTimestamp(t);
// TODO if the waiting voice is following global timing, it may now
// have a random beat offset to global - we need to calculate this.
if(vv.isFollowGlobal()){
vv.setBeatOffsetToGlobal(v.getCurrentBeat()-globalBeat);// TODO calculate from last global timing event and current timestamp
vv.setNextEventGlobalBeat(globalBeat);
}
}
}
}else if(pv instanceof de.maramuse.soundcomp.parser.TimingEvent){
// accelerandi / ritardandi treated here
((de.maramuse.soundcomp.parser.TimingEvent)pv).setTempTimestamp(t
+maximumNegativeEventDelay);
TempoChangeEvent tce=((de.maramuse.soundcomp.parser.TimingEvent)pv).toTempoEvent();
if(v.isFollowGlobal()){
if(!(tce instanceof ReturnToGlobalTiming)) // ignore if redundant RTGT appears
if(Double.isNaN(tce.getStartTempo())){
System.out.println("changing global tempo");
tce.setBeat(globalBeat);
globalTempo=tce;// no start tempo given, take the current global tempo
}
if(tce.isVoicelocal()){
// this voice now leaves global timing
System.out.println("leaving global tempo");
v.setLastTempoChange(tce);
v.setFollowGlobal(false);
}else{
// the global timing is about to change
}
}else{
if(Double.isNaN(tce.getStartTempo())){
// no start tempo given, take the current local tempo
tce.setStartTempo(v.getCurrentTempo(t));
}
v.setLastTempoChange(tce);
if(tce instanceof ReturnToGlobalTiming){
// calculate the beat offset of this voice compared to global beat
// then return
v.setFollowGlobal(true);
v.setBeatOffsetToGlobal(v.getCurrentBeat()-globalBeat);
v.setNextEventGlobalBeat(globalBeat);
System.out.println("returning to global tempo at global beat" + globalBeat +", private beat "+v.getCurrentBeat()+" with offset "+v.getBeatOffsetToGlobal());
// we will potentially have to care about notes that cross this
// timing domain change - when we start having threads and chords
}else{
// the private timing of this voice is changing
System.out.println("changing local tempo");
v.setLastTempoChange(tce);
}
}
}
/*
* else if(pv instanceof global parameter change){ }
*/
}else
advance=true;
}while(!advance);
}
}
// determine if we have done everything.
// NB: we may have the problem that we stop when we see the last note,
// instead of when the last note is over. We will solve that later - we
// must actually wait until the last note does no longer produce any sound,
// even through echo and reverberation effects, which is probably unresolvable.
// we can wait until the end of the actual note itself by implicitly adding a pause
// at the end of each melody, but the optional reverberation time must probably
// be given manually somehow
finished=true;
waitingVoice=null;
for(Voice v:voices.values()){
List<ParserVal> m=v.getpVoice().getMelody();
if(v.getSynchronizeMark()!=null)
waitingVoice=v.getName();
if(v.getSynchronizeMark()==null&&v.getMelodyIndex()<m.size()){
finished=false;
break;
}
}
// if we ever go for realtime operation, this is the place where to synchronize
// to an external or constant internal clock
advancerRegistry.advanceAll();
iteration++;
}while(!finished);
if(waitingVoice!=null){
throw new RuntimeException("Voice "+waitingVoice+" has unresolved waiting mark");
}
}
private void prepare() throws Exception {
prepareProcessData();
prepareScaleData();
prepareTimingData(); // we should determine the maximum negative event delay here
prepareVoiceData();
}
public void execute() throws Exception {
// create control related scale, process, timing and voice data
prepare();
// create timed events from timing and voice data
processMelodies();
for(Voice v:voices.values()){
System.out.println(v.getDebugString());
}
}
}