1
- #include " audio/SoundManager.hpp"
2
-
3
1
#include " audio/alCheck.hpp"
2
+ #include " audio/SoundManager.hpp"
3
+ #include " loaders/LoaderSDT.hpp"
4
4
5
5
extern " C" {
6
6
#include < libavcodec/avcodec.h>
7
7
#include < libavformat/avformat.h>
8
+ #include < libavformat/avio.h>
8
9
#include < libavutil/avutil.h>
9
10
}
10
11
// ab
@@ -16,6 +17,10 @@ extern "C" {
16
17
#define av_frame_free avcodec_free_frame
17
18
#endif
18
19
20
+ #if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(57,80,100)
21
+ #define avio_context_free av_freep
22
+ #endif
23
+
19
24
SoundManager::SoundManager () {
20
25
initializeOpenAL ();
21
26
initializeAVCodec ();
@@ -173,6 +178,243 @@ void SoundManager::SoundSource::loadFromFile(const rwfs::path& filePath) {
173
178
avformat_close_input (&formatContext);
174
179
}
175
180
181
+ // / Structure for input data
182
+ struct InputData {
183
+ uint8_t *ptr = nullptr ;
184
+ size_t size{}; // /< size left in the buffer
185
+ };
186
+
187
+ // / Low level function for copying data from handler (opaque)
188
+ // / to buffer.
189
+ static int read_packet (void *opaque, uint8_t *buf, int buf_size) {
190
+ auto * input = reinterpret_cast <InputData*>(opaque);
191
+ buf_size = FFMIN (buf_size, input->size );
192
+ /* copy internal data to buf */
193
+ memcpy (buf, input->ptr , buf_size);
194
+ input->ptr += buf_size;
195
+ input->size -= buf_size;
196
+ return buf_size;
197
+ }
198
+
199
+ void SoundManager::SoundSource::loadSfx (const rwfs::path& path, const size_t & index, const bool asWave) {
200
+ // Allocate audio frame
201
+ AVFrame* frame = av_frame_alloc ();
202
+ if (!frame) {
203
+ RW_ERROR (" Error allocating the audio frame" );
204
+ return ;
205
+ }
206
+
207
+ // / Now we need to prepare "custom" format context
208
+ // / We need sdt loader for that purpose
209
+ LoaderSDT sdt{};
210
+ sdt.load (path / " audio/sfx" );
211
+
212
+ std::unique_ptr<char []> raw_sound = sdt.loadToMemory (index, asWave);
213
+ if (!raw_sound) {
214
+ av_frame_free (&frame);
215
+ RW_ERROR (" Error loading sound" );
216
+ return ;
217
+ }
218
+
219
+ // / Prepare input
220
+ InputData input{};
221
+ input.size = sizeof (WaveHeader) + sdt.assetInfo .size ;
222
+ auto inputDataStart = std::make_unique<uint8_t []>(input.size ); // / Store start ptr of data to be able freed memory later
223
+ input.ptr = inputDataStart.get ();
224
+
225
+ // / Alocate memory for buffer
226
+ // / Memory freeded at the end
227
+ static constexpr size_t ioBufferSize = 4096 ;
228
+ auto ioBuffer = static_cast <uint8_t *>(av_malloc (ioBufferSize));
229
+
230
+ // / Cast pointer, in order to match required layout for ffmpeg
231
+ input.ptr = reinterpret_cast <uint8_t *>(raw_sound.get ());
232
+
233
+ // / Finally prepare our "custom" format context
234
+ AVIOContext* avioContext = avio_alloc_context (ioBuffer, ioBufferSize, 0 , &input, &read_packet, nullptr , nullptr );
235
+ AVFormatContext* formatContext = avformat_alloc_context ();
236
+ formatContext->pb = avioContext;
237
+
238
+ if (avformat_open_input (&formatContext, " nothint" , nullptr , nullptr ) != 0 ) {
239
+ av_free (formatContext->pb ->buffer );
240
+ avio_context_free (&formatContext->pb );
241
+ av_frame_free (&frame);
242
+ RW_ERROR (" Error opening audio file (" << index << " )" );
243
+ return ;
244
+ }
245
+
246
+ if (avformat_find_stream_info (formatContext, nullptr ) < 0 ) {
247
+ av_free (formatContext->pb ->buffer );
248
+ avio_context_free (&formatContext->pb );
249
+ av_frame_free (&frame);
250
+ avformat_close_input (&formatContext);
251
+ RW_ERROR (" Error finding audio stream info" );
252
+ return ;
253
+ }
254
+
255
+ // Find the audio stream
256
+ // AVCodec* codec = nullptr;
257
+ int streamIndex = av_find_best_stream (formatContext, AVMEDIA_TYPE_AUDIO, -1 , -1 , nullptr , 0 );
258
+ if (streamIndex < 0 ) {
259
+ av_free (formatContext->pb ->buffer );
260
+ avio_context_free (&formatContext->pb );
261
+ av_frame_free (&frame);
262
+ avformat_close_input (&formatContext);
263
+ RW_ERROR (" Could not find any audio stream in the file " );
264
+ return ;
265
+ }
266
+
267
+ AVStream* audioStream = formatContext->streams [streamIndex];
268
+ AVCodec* codec = avcodec_find_decoder (audioStream->codecpar ->codec_id );
269
+
270
+ #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57,5,0)
271
+ AVCodecContext* codecContext = audioStream->codec ;
272
+ codecContext->codec = codec;
273
+
274
+ // Open the codec
275
+ if (avcodec_open2 (codecContext, codecContext->codec , nullptr ) != 0 ) {
276
+ av_free (formatContext->pb ->buffer );
277
+ avio_context_free (&formatContext->pb );
278
+ av_frame_free (&frame);
279
+ avformat_close_input (&formatContext);
280
+ RW_ERROR (" Couldn't open the audio codec context" );
281
+ return ;
282
+ }
283
+ #else
284
+ // Initialize codec context for the decoder.
285
+ AVCodecContext* codecContext = avcodec_alloc_context3 (codec);
286
+ if (!codecContext) {
287
+ av_free (formatContext->pb ->buffer );
288
+ avio_context_free (&formatContext->pb );
289
+ av_frame_free (&frame);
290
+ avformat_close_input (&formatContext);
291
+ RW_ERROR (" Couldn't allocate a decoding context." );
292
+ return ;
293
+ }
294
+
295
+ // Fill the codecCtx with the parameters of the codec used in the read file.
296
+ if (avcodec_parameters_to_context (codecContext, audioStream->codecpar ) != 0 ) {
297
+ av_free (formatContext->pb ->buffer );
298
+ avio_context_free (&formatContext->pb );
299
+ avcodec_close (codecContext);
300
+ avcodec_free_context (&codecContext);
301
+ avformat_close_input (&formatContext);
302
+ RW_ERROR (" Couldn't find parametrs for context" );
303
+ return ;
304
+ }
305
+
306
+ // Initialize the decoder.
307
+ if (avcodec_open2 (codecContext, codec, nullptr ) != 0 ) {
308
+ av_free (formatContext->pb ->buffer );
309
+ avio_context_free (&formatContext->pb );
310
+ avcodec_close (codecContext);
311
+ avcodec_free_context (&codecContext);
312
+ avformat_close_input (&formatContext);
313
+ RW_ERROR (" Couldn't open the audio codec context" );
314
+ return ;
315
+ }
316
+ #endif
317
+
318
+ // Expose audio metadata
319
+ channels = static_cast <size_t >(codecContext->channels );
320
+ sampleRate = sdt.assetInfo .sampleRate ;
321
+
322
+ // OpenAL only supports mono or stereo, so error on more than 2 channels
323
+ if (channels > 2 ) {
324
+ RW_ERROR (" Audio has more than two channels" );
325
+ av_free (formatContext->pb ->buffer );
326
+ avio_context_free (&formatContext->pb );
327
+ av_frame_free (&frame);
328
+ avcodec_close (codecContext);
329
+ avformat_close_input (&formatContext);
330
+ return ;
331
+ }
332
+
333
+ // Start reading audio packets
334
+ AVPacket readingPacket;
335
+ av_init_packet (&readingPacket);
336
+
337
+ #if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(57,37,100)
338
+
339
+ while (av_read_frame (formatContext, &readingPacket) == 0 ) {
340
+ if (readingPacket.stream_index == audioStream->index ) {
341
+ AVPacket decodingPacket = readingPacket;
342
+
343
+ while (decodingPacket.size > 0 ) {
344
+ // Decode audio packet
345
+ int gotFrame = 0 ;
346
+ int len = avcodec_decode_audio4 (codecContext, frame, &gotFrame, &decodingPacket);
347
+
348
+ if (len >= 0 && gotFrame) {
349
+ // Write samples to audio buffer
350
+
351
+ for (size_t i = 0 ; i < static_cast <size_t >(frame->nb_samples ); i++) {
352
+ // Interleave left/right channels
353
+ for (size_t channel = 0 ; channel < channels; channel++) {
354
+ int16_t sample = reinterpret_cast <int16_t *>(frame->data [channel])[i];
355
+ data.push_back (sample);
356
+ }
357
+ }
358
+
359
+ decodingPacket.size -= len;
360
+ decodingPacket.data += len;
361
+ }
362
+ else {
363
+ decodingPacket.size = 0 ;
364
+ decodingPacket.data = nullptr ;
365
+ }
366
+ }
367
+ }
368
+ av_free_packet (&readingPacket);
369
+ }
370
+ #else
371
+
372
+ while (av_read_frame (formatContext, &readingPacket) == 0 ) {
373
+ if (readingPacket.stream_index == audioStream->index ) {
374
+ AVPacket decodingPacket = readingPacket;
375
+
376
+ int sendPacket = avcodec_send_packet (codecContext, &decodingPacket);
377
+ int receiveFrame = 0 ;
378
+
379
+ while ((receiveFrame = avcodec_receive_frame (codecContext, frame)) == 0 ) {
380
+ // Decode audio packet
381
+
382
+ if (receiveFrame == 0 && sendPacket == 0 ) {
383
+ // Write samples to audio buffer
384
+
385
+ for (size_t i = 0 ; i < static_cast <size_t >(frame->nb_samples ); i++) {
386
+ // Interleave left/right channels
387
+ for (size_t channel = 0 ; channel < channels; channel++) {
388
+ int16_t sample = reinterpret_cast <int16_t *>(frame->data [channel])[i];
389
+ data.push_back (sample);
390
+ }
391
+ }
392
+ }
393
+ }
394
+ }
395
+ av_packet_unref (&readingPacket);
396
+ }
397
+
398
+ #endif
399
+
400
+ // Cleanup
401
+ // / Free all data used by the frame.
402
+ av_frame_free (&frame);
403
+
404
+ // / Close the context and free all data associated to it, but not the context itself.
405
+ avcodec_close (codecContext);
406
+
407
+ // / Free the context itself.
408
+ avcodec_free_context (&codecContext);
409
+
410
+ // / Free our custom AVIO.
411
+ av_free (formatContext->pb ->buffer );
412
+ avio_context_free (&formatContext->pb );
413
+
414
+ // / We are done here. Close the input.
415
+ avformat_close_input (&formatContext);
416
+ }
417
+
176
418
SoundManager::SoundBuffer::SoundBuffer () {
177
419
alCheck (alGenSources (1 , &source));
178
420
alCheck (alGenBuffers (1 , &buffer));
0 commit comments