BibTeX entries in bib/cnmat.bib

@INPROCEEDINGS{cnmat:sdif98,
  author        = {Matthew Wright and Amar Chaudhary and Adrian Freed and David Wessel and Xavier Rodet and Dominique Virolle and Rolf Woehrmann and Xavier Serra},
  title         = {{New Applications of the Sound Description Interchange Format}},
  booktitle     = {Proceedings of the International Computer Music Conference},
  year          = {1998},
}
@INPROCEEDINGS{cnmat:sdif98-short,
  author        = {M. Wright and others},
  title         = {{New Applications of the Sound Description Interchange Format}},
  booktitle     = {Proc. ICMC},
  year          = {1998},
}
@INPROCEEDINGS{cnmat:sdif99,
  author        = {Matthew Wright and Amar Chaudhary and Adrian Freed and Sami Khoury and David Wessel},
  title         = {{Audio Applications of the Sound Description Interchange Format Standard}},
  booktitle     = {AES 107\textsuperscript{\tiny th} convention preprint},
  year          = {1999},
}
@INPROCEEDINGS{cnmat:sdif99-short,
  author        = {M. Wright and A. Chaudhary and A. Freed and S. Khoury and D. Wessel},
  title         = {{Audio Applications of the Sound Description Interchange Format Standard}},
  booktitle     = {AES 107th convention},
  year          = {1999},
}
@INPROCEEDINGS{cnmat:sdif99-sshort,
  author        = {M. Wright and others},
  title         = {{Audio Applications of the Sound Description Interchange Format Standard}},
  booktitle     = {AES 107th convention},
  year          = {1999},
}
@INPROCEEDINGS{cnmat:sdif-mpeg4,
  author        = {Matthew Wright and Eric D. Scheirer},
  title         = {{Cross-Coding SDIF into MPEG-4 Structured Audio}},
  booktitle     = {Proceedings of the International Computer Music Conference (ICMC)},
  year          = {1999},
  address       = {Beijing},
  month         = {October},
  url           = {http://cnmat.CNMAT.Berkeley.EDU/ICMC1999/papers/saol+sdif/icmc99-saol+sdif.html},
  abstract-url  = {http://cnmat.CNMAT.Berkeley.EDU/ICMC1999/abstracts/sdif+mpeg4.html},
  bib-url       = {http://cnmat.CNMAT.Berkeley.EDU/ICMC1999},
  abstract      = {With the completion of the MPEG-4 international standard in October 1998, considerable industry and academic resources will be devoted to building implementations of the MPEG-4 Structured Audio tools. Among these tools is the Structured Audio Orchestra Language (``SAOL''), a general-purpose sound processing and synthesis language. The standardization of MPEG-4 and SAOL is an important development for the computer music community, because compositions written in SAOL will be able to be synthesized by any compliant MPEG-4 decoder. At the same time, the sound analysis and synthesis community has developed and embraced the Sound Description Interface Format (``SDIF''), a general-purpose framework for representing various high-level sound descriptions such as sum-of-sinusoids, noise bands, time-domain samples, and formants. Many tools for composing and manipulating sound in the SDIF format have been created. Composers, sound designers, and analysis/synthesis researchers can benefit from the combined strengths of MPEG-4 and SDIF by using the MPEG-4 Structured Audio decoder as an SDIF synthesizer. This allows the use of sophisticated SDIF tools to create musical works, while leveraging the anticipated wide penetration of MPEG-4 playback devices. Cross-coding SDIF into the Structured Audio format is an example of ``Generalized Audio Coding,'' a new paradigm in which an MPEG-4 Structured Audio decoder is used to flexibly understand and play sound stored in any format. We cross-code SDIF into Structured Audio by writing a SAOL instrument for each type of SDIF sound representation and a translator that maps SDIF data into a Structured Audio score. Rather than use many notes to represent the frames of SDIF data, we use the ``streaming wavetable'' functions of SAOL to create instruments that dynamically interpret spectral, sinusoidal, or other constantly changing data. These SAOL instruments retrieve SDIF data from streaming wavetables via custom unit generators that can be reused to build SAOL synthesizers for other SDIF sound representations. We demonstrate the construction of several different SDIF object types within the Structured Audio framework; the resulting bitstreams are very compact and follow the MPEG-4 specification exactly. Any conforming MPEG-4 decoder can play them back and produce the sound desired by the composer. Our paper will discuss in depth the features of SAOL that make these sorts of instruments possible. By building a link between the MPEG-4 community and the SDIF community, our work contributes to both: The MPEG-4 community benefits by receiving support for synthesis from a large and extensible collection of sound descriptions, each with unique properties of data compression and mutability. The SDIF community gets a stable SDIF synthesis platform that is likely to be supported on a variety of inexpensive, high performance hardware platforms. MPEG-4 also provides the potential to integrate SDIF with other formats, e.g., streaming SDIF data synchronized with video and compressed speech. Finally, each standardization effort benefits from an expanded user base: SDIF users become MPEG-4 users without giving up their familiar tools, while MPEG-4 users outside the small community of sound analysis/synthesis researchers can discover SDIF and the high-level sound descriptions it supports. We have made the cross-coding tools and SDIF object instruments freely available to the computer music community in order to promote the continuing interoperability of these important specifications.},
}
@INPROCEEDINGS{cnmat:sdif-mpeg4-short,
  author        = {M. Wright and E. Scheirer},
  title         = {{Cross-Coding SDIF into MPEG-4 Structured Audio}},
  booktitle     = {Proc. ICMC},
  year          = {1999},
  address       = {Beijing},
  url           = {http://cnmat.CNMAT.Berkeley.EDU/ICMC1999/papers/saol+sdif/icmc99-saol+sdif.html},
  abstract-url  = {http://cnmat.CNMAT.Berkeley.EDU/ICMC1999/abstracts/sdif+mpeg4.html},
  bib-url       = {http://cnmat.CNMAT.Berkeley.EDU/ICMC1999},
}
@INPROCEEDINGS{cnmat:sdif-msp,
  author        = {Matthew Wright and Richard Dudas and Sami Khoury and Raymond Wang and David Zicarelli},
  title         = {{Supporting the Sound Description Interchange Format in the Max/MSP Environment}},
  booktitle     = {Proceedings of the International Computer Music Conference (ICMC)},
  year          = {1999},
  address       = {Beijing},
  month         = {October},
  url           = {http://cnmat.CNMAT.Berkeley.EDU/ICMC1999/papers/msp+sdif/ICMC99-MSP+SDIF-short.html},
  abstract-url  = {http://cnmat.CNMAT.Berkeley.EDU/ICMC1999/abstracts/sdif+msp.html},
  bib-url       = {http://www.ircam.fr/equipes/repmus/RMPapers/},
  abstract      = {The Sound Description Interchange Format (``SDIF'') is an extensible, general-purpose framework for representing high-level sound descriptions such as sum-of-sinusoids, noise bands, time-domain samples, and formants, and is used in many interesting sound analysis and synthesis applications. SDIF data consists of time-tagged ``frames,'' each containing one or more 2D ``matrices''. For example, in an SDIF file representing additive synthesis data, the matrix rows represent individual sinusoids and the columns represent parameters such as frequency, amplitude, and phase. Because of Max/MSP's many attractive features for developing real-time computer music applications, it makes a fine environment for developing applications that manipulate SDIF data. These features include active support and development, a large library of primitive computational objects, and a rich history and repertoire. Unfortunately, Max/MSP's limited language of data structures does not support the structure required by SDIF. Although it is straightforward to extend Max/MSP with an object to read SDIF, there is no Max/MSP data type that could be used to output SDIF data to the rest of a Max/MSP application. We circumvent these problems with a novel technique to manipulate SDIF data within Max/MSP. We have created an object called ``SDIF-buffer'' that represents a collection of SDIF data in memory, analogous to MSP's ``buffer~'' object that represents audio samples in memory. This allows SDIF data to be represented with C data structures. Max/MSP has objects that provide various control structures to read data from a ``buffer~'' and output signals or events usable by other Max/MSP objects. Similarly, we have created a variety of ``SDIF selector'' objects that select a piece of SDIF data from an SDIF-buffer and shoehorn it into a standard Max/MSP data type. The simplest SDIF selector outputs the main matrix from the SDIF frame whose time tag is closest to a given input time. Arguments specify which columns should be output and whether each row should appear as an individual list or all the rows should be concatenated into a single list. More sophisticated SDIF selectors hide the discrete time sampling of SDIF frames, using interpolation along the time axis to synthesize SDIF data. This provides the abstraction of continuous time, with a virtual SDIF frame corresponding to any point along the time axis. We provide linear and a variety of polynomial interpolators. This abstraction of continuously-sampled SDIF data gives rise to sophisticated ways of moving through the time axis of an SDIF-buffer. We introduce the notion of a ``time machine'', a control structure for controlling position in an SDIF time axis in real time, and demonstrate time machines with musically useful features. ``SDIF mutator'' objects have been created that can manipulate data in an SDIF-buffer in response to Max messages. This allows us to write real-time sound analysis software to generate an SDIF model of an audio signal. We implement control structures such as transposition, filtering, and inharmonicity as normal Max/MSP patches that mutate a ``working'' SDIF-buffer; these are cascaded when they share the same SDIF-buffer. These control structures communicate via symbolic references to SDIF-buffers represented as normal Max messages. This system also supports network streaming of SDIF data. As research continues towards more efficient and musically interesting streaming protocols, Max/MSP interfaces will be implemented in C as SDIF mutators that access a given SDIF buffer via a struct definition in the exposed SDIF-buffer header file. One promising approach is to begin transmission with a low-resolution representation and then fill it in with increasing detail. Time machines communicate with streaming interfaces via Max messages to request or predict ranges of time that will need to be available in the near future.},
}
@INPROCEEDINGS{cnmat:sdif-msp-short,
  author        = {M. Wright and R. Dudas and S. Khoury and R. Wang and D. Zicarelli},
  title         = {{Supporting the Sound Description Interchange Format in the Max/MSP Environment}},
  booktitle     = {Proc. ICMC},
  year          = {1999},
  address       = {Beijing},
  url           = {http://cnmat.CNMAT.Berkeley.EDU/ICMC1999/papers/msp+sdif/ICMC99-MSP+SDIF-short.html},
  abstract-url  = {http://cnmat.CNMAT.Berkeley.EDU/ICMC1999/abstracts/sdif+msp.html},
  bib-url       = {http://cnmat.CNMAT.Berkeley.EDU/ICMC1999},
}
@INPROCEEDINGS{cnmat:sdif-srl,
  author        = {Matthew Wright and Amar Chaudhary and Adrian Freed and Sami Khoury and Ali Momeni and Diemo Schwarz and David Wessel},
  title         = {{An XML-based SDIF Stream Relationships Language}},
  booktitle     = {Proceedings of the International Computer Music Conference},
  year          = {2000},
  address       = {Berlin},
  abstract-url  = {http://cnmat.CNMAT.Berkeley.EDU/ICMC2000/abstracts/xml-sdif},
  bib-url       = {http://cnmat.CNMAT.Berkeley.EDU/ICMC2000/},
}
@INPROCEEDINGS{cnmat:sdif-srl-short,
  author        = {M. Wright and A. Chaudhary and A. Freed and S. Khoury and A. Momeni and D. Schwarz and D. Wessel},
  title         = {{An XML-based SDIF Stream Relationships Language}},
  booktitle     = {Proc. ICMC},
  year          = {2000},
  address       = {Berlin},
  abstract-url  = {http://cnmat.CNMAT.Berkeley.EDU/ICMC2000/abstracts/xml-sdif},
  bib-url       = {http://cnmat.CNMAT.Berkeley.EDU/ICMC2000/},
}
@INPROCEEDINGS{cnmat:osw2000-short,
  author        = {A. Chaudhary and A. Freed and M. Wright},
  title         = {{An Open Architecture for Real-time Music Software}},
  booktitle     = {Proc. ICMC},
  year          = {2000},
  address       = {Berlin},
}

This document was translated from LATEX by HEVEA.