positional audio works now

This commit is contained in:
Leon van Kammen 2023-10-18 22:07:20 +02:00
parent 5c2602d40b
commit ebde27ab88
5 changed files with 60 additions and 39 deletions

View file

@ -64,5 +64,4 @@ XRWG.generate = (opts) => {
// sort by n
XRWG.sort( (a,b) => a.nodes.length - b.nodes.length )
XRWG = XRWG.reverse() // the cleankey/get functions e.g. will persist
console.dir(XRWG)
}

View file

@ -72,7 +72,6 @@ xrf.frag.updatePredefinedView = (opts) => {
let id = v.string || v.fragment
if( id == '#' ) return
let match = xrf.XRWG.match(id)
console.dir({id,match,XRWG:xrf.XRWG})
// erase previous lines
xrf.focusLine.lines.map( (line) => scene.remove(line) )
xrf.focusLine.points = []
@ -94,12 +93,7 @@ xrf.frag.updatePredefinedView = (opts) => {
// if this query was triggered by an src-value, lets filter it
const isSRC = opts.embedded && opts.embedded.fragment == 'src'
if( isSRC ){ // spec : https://xrfragment.org/#src
console.log("filtering predefined view of src")
console.dir(frag)
}else{
console.log("updatePredefinedView")
console.dir(frag)
if( !isSRC ){ // spec : https://xrfragment.org/#src
for ( let i in frag ) {
let v = frag[i]
if( v.is( xrf.XRF.PV_EXECUTE ) ){

View file

@ -45,7 +45,6 @@ xrf.frag.src = function(v, opts){
let mimetype = res.headers.get('Content-type')
if( url.replace(/#.*/,'').match(/\.(gltf|glb)$/) ) mimetype = 'gltf'
//if( url.match(/\.(fbx|stl|obj)$/) ) mimetype =
console.log("src mimetype: "+mimetype)
opts = { ...opts, src, frag }
return xrf.frag.src.type[ mimetype ] ? xrf.frag.src.type[ mimetype ](url,opts) : xrf.frag.src.type.unknown(url,opts)
})

View file

@ -7,26 +7,49 @@
*/
let loadAudio = (mimetype) => function(url,opts){
let {mesh} = opts
let {mesh,src,camera} = opts
let {urlObj,dir,file,hash,ext} = xrf.parseUrl(url)
let frag = xrf.URI.parse( url )
// global audio
if( mesh.position.x == 0 && mesh.position.y == 0 && mesh.position.z == 0 ){
let audio = window.document.createElement('audio')
// init fallback formats
let fallback = ['mp3','ogg','wav','weba','aac']
let addSource = (ext) => {
const src = window.document.createElement('source')
src.setAttribute("src", url)
src.setAttribute("type",mimetype)
audio.appendChild(src)
/* WebAudio: setup context via THREEjs */
if( !camera.listener ){
camera.listener = new THREE.AudioListener();
camera.add( camera.listener );
}
fallback
.filter( (e) => e != ext )
.map( addSource )
document.body.appendChild(audio)
xrf.audio.push(audio)
let listener = camera.listener
let isPositionalAudio = !(mesh.position.x == 0 && mesh.position.y == 0 && mesh.position.z == 0)
const audioLoader = new THREE.AudioLoader();
let sound = isPositionalAudio ? new THREE.PositionalAudio(listener) : new THREE.Audio(listener)
audioLoader.load( url.replace(/#.*/,''), function( buffer ) {
sound.setBuffer( buffer );
sound.setLoop(true);
sound.setVolume(0.5);
sound.playXRF = (t) => {
if( sound.isPlaying ) sound.stop()
let hardcodedLoop = frag.t != undefined
t = hardcodedLoop ? { ...frag.t, x: t.x} : t // override with hardcoded metadata except playstate (x)
if( t && t.x != 0 ){
// *TODO* https://stackoverflow.com/questions/12484052/how-can-i-reverse-playback-in-web-audio-api-but-keep-a-forward-version-as-well
sound.setPlaybackRate( Math.abs(t.x) ) // WebAudio does not support negative playback
// setting loop
sound.setLoop( t.z > 0 )
// apply embedded audio/video samplerate/fps or global mixer fps
let loopStart = hardcodedLoop ? t.y / buffer.sampleRate : t.y / xrf.model.mixer.loop.fps
let loopEnd = hardcodedLoop ? t.z / buffer.sampleRate : t.z / xrf.model.mixer.loop.fps
let timeStart = loopStart > 0 ? loopStart : xrf.model.mixer.time
if( t.y > 0 ) sound.setLoopStart( loopStart )
if( t.z > 0 ) sound.setLoopEnd( loopEnd )
if( t.x != 0 ){
sound.offset = loopStart > 0 ? loopStart : timeStart
sound.play()
}
}
}
mesh.add(sound)
xrf.audio.push(sound)
});
}
let audioMimeTypes = [
@ -39,4 +62,7 @@ let audioMimeTypes = [
]
audioMimeTypes.map( (mimetype) => xrf.frag.src.type[ mimetype ] = loadAudio(mimetype) )
// *TODO* https://www.svgrepo.com/svg/20195/play-button should trigger play?
xrf.addEventListener('t', (opts) => {
let t = opts.frag.t
xrf.audio.map( (a) => a.playXRF(t) )
})

View file

@ -8,15 +8,9 @@ xrf.frag.t = function(v, opts){
let frames = model.animations[0].tracks[0].times.length
let mixer = model.mixer
mixer.loop = mixer.loop || {frameStart:0,frameStop:99999999,speed: 1}
let fps = frames / duration
mixer.loop.fps = frames / duration
mixer.loop.speed = v.x
mixer.loop.speedAbs = Math.abs(v.x)
mixer.loop.frameStart = v.y || mixer.loop.frameStart
mixer.loop.frameStop = v.z || mixer.loop.frameStop
// always recalculate time using frameStart/Stop
mixer.loop.timeStart = mixer.loop.frameStart / (fps * mixer.loop.speedAbs)
mixer.loop.timeStop = mixer.loop.frameStop / (fps * mixer.loop.speedAbs)
xrf.frag.t.calculateLoop( v, mixer.loop, mixer.loop.fps )
// update speed
mixer.timeScale = mixer.loop.speed
@ -26,12 +20,9 @@ xrf.frag.t = function(v, opts){
mixer.time = Math.abs(mixer.time)
mixer.update(0) // (forgetting) this little buddy costed me lots of time :]
// (re)trigger audio
xrf.audio.map( (a) => {
a.play()
a.currentTime = time
})
}
//if( v.x != 0 ) xrf.emit('play',v) *TODO* touchend/usergesture
if( v.y > 0 || v.z > 0 ) updateTime( mixer.loop.timeStart )
// update loop when needed
@ -49,3 +40,15 @@ xrf.frag.t = function(v, opts){
mixer.update.patched = true
}
}
xrf.frag.t.default = {x:1, y:0, z:0}
xrf.frag.t.calculateLoop = (t,obj,fps) => {
obj.speed = t.x
obj.speedAbs = Math.abs(t.x)
obj.frameStart = t.y || obj.frameStart
obj.frameStop = t.z || obj.frameStop
// always recalculate time using frameStart/Stop
obj.timeStart = obj.frameStart / (fps * obj.speedAbs)
obj.timeStop = obj.frameStop / (fps * obj.speedAbs)
}