Add planar audio support, improve test output

- Add planar audio support.  FFmpeg and libav use planar audio for many
  encoders, so it was somewhat necessary to add support in libobs
  itself.

- Improve/adjust FFmpeg test output plugin.  The exports were somewhat
  messed up (making me rethink how exports should be done).  Not yet
  functional; it handles video properly, but it still does not handle
  audio properly.

- Improve planar video code.  The planar video code was not properly
  accounting for row sizes for each plane.  Specifying row sizes for
  each plane has now been added.  This will also make it more compatible
  with FFmpeg/libav.

- Fixed a bug where callbacks wouldn't create properly in audio-io and
  video-io code.

- Implement 'blogva' function to allow for va_list usage with libobs
  logging.
This commit is contained in:
jp9000 2014-02-07 03:03:54 -07:00
parent 9a2b662935
commit 3d6d43225f
20 changed files with 707 additions and 275 deletions

View file

@ -89,7 +89,7 @@ static inline void required_extension_error(const char *extension)
{
}
static bool gl_init_extensions(struct gs_device* device)
static bool gl_init_extensions(struct gs_device* device)
{
if (!ogl_IsVersionGEQ(2, 1)) {
blog(LOG_ERROR, "obs-studio requires OpenGL version 2.1 or "

View file

@ -32,9 +32,9 @@ struct audio_line {
char *name;
struct audio_output *audio;
struct circlebuf buffer;
struct circlebuf buffers[MAX_AUDIO_PLANES];
pthread_mutex_t mutex;
DARRAY(uint8_t) volume_buffer;
DARRAY(uint8_t) volume_buffers[MAX_AUDIO_PLANES];
uint64_t base_timestamp;
uint64_t last_timestamp;
@ -48,8 +48,11 @@ struct audio_line {
static inline void audio_line_destroy_data(struct audio_line *line)
{
circlebuf_free(&line->buffer);
da_free(line->volume_buffer);
for (size_t i = 0; i < MAX_AUDIO_PLANES; i++) {
circlebuf_free(&line->buffers[i]);
da_free(line->volume_buffers[i]);
}
pthread_mutex_destroy(&line->mutex);
bfree(line->name);
bfree(line);
@ -59,13 +62,12 @@ struct audio_output {
struct audio_output_info info;
size_t block_size;
size_t channels;
size_t planes;
pthread_t thread;
event_t stop_event;
DARRAY(uint8_t) pending_bytes;
DARRAY(uint8_t) mix_buffer;
DARRAY(uint8_t) mix_buffers[MAX_AUDIO_PLANES];
bool initialized;
@ -107,14 +109,16 @@ static inline size_t time_to_bytes(audio_t audio, uint64_t offset)
static inline void clear_excess_audio_data(struct audio_line *line,
uint64_t size)
{
if (size > line->buffer.size)
size = line->buffer.size;
for (size_t i = 0; i < line->audio->planes; i++) {
size_t clear_size = (size > line->buffers[i].size) ?
(size_t)size : line->buffers[i].size;
circlebuf_pop_front(&line->buffers[i], NULL, clear_size);
}
blog(LOG_WARNING, "Excess audio data for audio line '%s', somehow "
"audio data went back in time by %llu bytes",
line->name, size);
circlebuf_pop_front(&line->buffer, NULL, (size_t)size);
}
static inline uint64_t min_uint64(uint64_t a, uint64_t b)
@ -125,8 +129,8 @@ static inline uint64_t min_uint64(uint64_t a, uint64_t b)
static inline void mix_audio_line(struct audio_output *audio,
struct audio_line *line, size_t size, uint64_t timestamp)
{
/* TODO: this just overwrites, handle actual mixing */
if (!line->buffer.size) {
/* TODO: this just overwrites. handle actual mixing */
if (!line->buffers[0].size) {
if (!line->alive)
audio_output_removeline(audio, line);
return;
@ -139,17 +143,22 @@ static inline void mix_audio_line(struct audio_output *audio,
size -= time_offset;
size_t pop_size = (size_t)min_uint64(size, line->buffer.size);
circlebuf_pop_front(&line->buffer,
audio->mix_buffer.array + time_offset,
pop_size);
for (size_t i = 0; i < audio->planes; i++) {
size_t pop_size;
pop_size = (size_t)min_uint64(size, line->buffers[i].size);
circlebuf_pop_front(&line->buffers[i],
audio->mix_buffers[i].array + time_offset,
pop_size);
}
}
static inline void do_audio_output(struct audio_output *audio,
uint64_t timestamp, uint32_t frames)
{
struct audio_data data;
data.data = audio->mix_buffer.array;
for (size_t i = 0; i < MAX_AUDIO_PLANES; i++)
data.data[i] = audio->mix_buffers[i].array;
data.frames = frames;
data.timestamp = timestamp;
data.volume = 1.0f;
@ -171,13 +180,15 @@ static void mix_and_output(struct audio_output *audio, uint64_t audio_time,
uint32_t frames = time_to_frames(audio, time_offset);
size_t bytes = frames * audio->block_size;
da_resize(audio->mix_buffer, bytes);
memset(audio->mix_buffer.array, 0, bytes);
for (size_t i = 0; i < audio->planes; i++) {
da_resize(audio->mix_buffers[i], bytes);
memset(audio->mix_buffers[i].array, 0, bytes);
}
while (line) {
struct audio_line *next = line->next;
if (line->buffer.size && line->base_timestamp < prev_time) {
if (line->buffers[0].size && line->base_timestamp < prev_time) {
clear_excess_audio_data(line,
prev_time - line->base_timestamp);
line->base_timestamp = prev_time;
@ -238,7 +249,7 @@ void audio_output_connect(audio_t audio,
{
pthread_mutex_lock(&audio->input_mutex);
if (audio_get_input_idx(audio, callback, param) != DARRAY_INVALID) {
if (audio_get_input_idx(audio, callback, param) == DARRAY_INVALID) {
struct audio_input input;
input.callback = callback;
input.param = param;
@ -282,6 +293,7 @@ int audio_output_open(audio_t *audio, struct audio_output_info *info)
{
struct audio_output *out;
pthread_mutexattr_t attr;
bool planar = is_audio_planar(info->format);
if (!valid_audio_params(info))
return AUDIO_OUTPUT_INVALIDPARAM;
@ -291,8 +303,9 @@ int audio_output_open(audio_t *audio, struct audio_output_info *info)
memcpy(&out->info, info, sizeof(struct audio_output_info));
pthread_mutex_init_value(&out->line_mutex);
out->channels = get_audio_channels(info->speakers);
out->block_size = out->channels *
out->channels = get_audio_channels(info->speakers);
out->planes = planar ? out->channels : 1;
out->block_size = (planar ? 1 : out->channels) *
get_audio_bytes_per_channel(info->format);
if (pthread_mutexattr_init(&attr) != 0)
@ -337,8 +350,9 @@ void audio_output_close(audio_t audio)
line = next;
}
da_free(audio->mix_buffer);
da_free(audio->pending_bytes);
for (size_t i = 0; i < MAX_AUDIO_PLANES; i++)
da_free(audio->mix_buffers[i]);
event_destroy(&audio->stop_event);
pthread_mutex_destroy(&audio->line_mutex);
bfree(audio);
@ -382,7 +396,7 @@ const struct audio_output_info *audio_output_getinfo(audio_t audio)
void audio_line_destroy(struct audio_line *line)
{
if (line) {
if (!line->buffer.size)
if (!line->buffers[0].size)
audio_output_removeline(line->audio, line);
else
line->alive = false;
@ -394,10 +408,21 @@ size_t audio_output_blocksize(audio_t audio)
return audio->block_size;
}
static inline void mul_vol_u8bit(struct audio_line *line, float volume,
size_t total_num)
size_t audio_output_planes(audio_t audio)
{
uint8_t *vals = line->volume_buffer.array;
return audio->planes;
}
size_t audio_output_channels(audio_t audio)
{
return audio->channels;
}
/* TODO: Optimization of volume multiplication functions */
static inline void mul_vol_u8bit(void *array, float volume, size_t total_num)
{
uint8_t *vals = array;
int16_t vol = (int16_t)(volume * 127.0f);
for (size_t i = 0; i < total_num; i++) {
@ -406,10 +431,9 @@ static inline void mul_vol_u8bit(struct audio_line *line, float volume,
}
}
static inline void mul_vol_16bit(struct audio_line *line, float volume,
size_t total_num)
static inline void mul_vol_16bit(void *array, float volume, size_t total_num)
{
uint16_t *vals = (uint16_t*)line->volume_buffer.array;
uint16_t *vals = array;
int32_t vol = (int32_t)(volume * 32767.0f);
for (size_t i = 0; i < total_num; i++)
@ -436,10 +460,9 @@ static inline void conv_float_to_24bit(float fval, uint8_t *vals)
vals[2] = (val >> 16) & 0xFF;
}
static inline void mul_vol_24bit(struct audio_line *line, float volume,
size_t total_num)
static inline void mul_vol_24bit(void *array, float volume, size_t total_num)
{
uint8_t *vals = line->volume_buffer.array;
uint8_t *vals = array;
for (size_t i = 0; i < total_num; i++) {
float val = conv_24bit_to_float(vals) * volume;
@ -448,10 +471,9 @@ static inline void mul_vol_24bit(struct audio_line *line, float volume,
}
}
static inline void mul_vol_32bit(struct audio_line *line, float volume,
size_t total_num)
static inline void mul_vol_32bit(void *array, float volume, size_t total_num)
{
int32_t *vals = (int32_t*)line->volume_buffer.array;
int32_t *vals = array;
for (size_t i = 0; i < total_num; i++) {
float val = (float)vals[i] / 2147483647.0f;
@ -459,10 +481,9 @@ static inline void mul_vol_32bit(struct audio_line *line, float volume,
}
}
static inline void mul_vol_float(struct audio_line *line, float volume,
size_t total_num)
static inline void mul_vol_float(void *array, float volume, size_t total_num)
{
float *vals = (float*)line->volume_buffer.array;
float *vals = array;
for (size_t i = 0; i < total_num; i++)
vals[i] *= volume;
@ -471,30 +492,42 @@ static inline void mul_vol_float(struct audio_line *line, float volume,
static void audio_line_place_data_pos(struct audio_line *line,
const struct audio_data *data, size_t position)
{
size_t total_num = data->frames * line->audio->channels;
bool planar = line->audio->planes > 1;
size_t total_num = data->frames * planar ? 1 : line->audio->channels;
size_t total_size = data->frames * line->audio->block_size;
da_copy_array(line->volume_buffer, data->data, total_size);
for (size_t i = 0; i < line->audio->planes; i++) {
da_copy_array(line->volume_buffers[i], data->data[i],
total_size);
switch (line->audio->info.format) {
case AUDIO_FORMAT_U8BIT:
mul_vol_u8bit(line, data->volume, total_num);
break;
case AUDIO_FORMAT_16BIT:
mul_vol_16bit(line, data->volume, total_num);
break;
case AUDIO_FORMAT_32BIT:
mul_vol_32bit(line, data->volume, total_num);
break;
case AUDIO_FORMAT_FLOAT:
mul_vol_float(line, data->volume, total_num);
break;
case AUDIO_FORMAT_UNKNOWN:
break;
uint8_t *array = line->volume_buffers[i].array;
switch (line->audio->info.format) {
case AUDIO_FORMAT_U8BIT:
case AUDIO_FORMAT_U8BIT_PLANAR:
mul_vol_u8bit(array, data->volume, total_num);
break;
case AUDIO_FORMAT_16BIT:
case AUDIO_FORMAT_16BIT_PLANAR:
mul_vol_16bit(array, data->volume, total_num);
break;
case AUDIO_FORMAT_32BIT:
case AUDIO_FORMAT_32BIT_PLANAR:
mul_vol_32bit(array, data->volume, total_num);
break;
case AUDIO_FORMAT_FLOAT:
case AUDIO_FORMAT_FLOAT_PLANAR:
mul_vol_float(array, data->volume, total_num);
break;
case AUDIO_FORMAT_UNKNOWN:
blog(LOG_ERROR, "audio_line_place_data_pos: "
"Unknown format");
break;
}
circlebuf_place(&line->buffers[i], position,
line->volume_buffers[i].array, total_size);
}
circlebuf_place(&line->buffer, position, line->volume_buffer.array,
total_size);
}
static inline void audio_line_place_data(struct audio_line *line,
@ -513,7 +546,7 @@ void audio_line_output(audio_line_t line, const struct audio_data *data)
pthread_mutex_lock(&line->mutex);
if (!line->buffer.size) {
if (!line->buffers[0].size) {
line->base_timestamp = data->timestamp;
audio_line_place_data_pos(line, data, 0);

View file

@ -28,6 +28,8 @@ extern "C" {
* for the media.
*/
#define MAX_AUDIO_PLANES 8
struct audio_output;
struct audio_line;
typedef struct audio_output *audio_t;
@ -35,10 +37,16 @@ typedef struct audio_line *audio_line_t;
enum audio_format {
AUDIO_FORMAT_UNKNOWN,
AUDIO_FORMAT_U8BIT,
AUDIO_FORMAT_16BIT,
AUDIO_FORMAT_32BIT,
AUDIO_FORMAT_FLOAT,
AUDIO_FORMAT_U8BIT_PLANAR,
AUDIO_FORMAT_16BIT_PLANAR,
AUDIO_FORMAT_32BIT_PLANAR,
AUDIO_FORMAT_FLOAT_PLANAR,
};
enum speaker_layout {
@ -56,7 +64,7 @@ enum speaker_layout {
};
struct audio_data {
const void *data;
const uint8_t *data[MAX_AUDIO_PLANES];
uint32_t frames;
uint64_t timestamp;
float volume;
@ -99,16 +107,49 @@ static inline uint32_t get_audio_channels(enum speaker_layout speakers)
static inline size_t get_audio_bytes_per_channel(enum audio_format type)
{
switch (type) {
case AUDIO_FORMAT_U8BIT: return 1;
case AUDIO_FORMAT_16BIT: return 2;
case AUDIO_FORMAT_U8BIT:
case AUDIO_FORMAT_U8BIT_PLANAR:
return 1;
case AUDIO_FORMAT_16BIT:
case AUDIO_FORMAT_16BIT_PLANAR:
return 2;
case AUDIO_FORMAT_FLOAT:
case AUDIO_FORMAT_32BIT: return 4;
case AUDIO_FORMAT_UNKNOWN: return 0;
case AUDIO_FORMAT_FLOAT_PLANAR:
case AUDIO_FORMAT_32BIT:
case AUDIO_FORMAT_32BIT_PLANAR:
return 4;
case AUDIO_FORMAT_UNKNOWN:
return 0;
}
return 0;
}
static inline size_t is_audio_planar(enum audio_format type)
{
switch (type) {
case AUDIO_FORMAT_U8BIT:
case AUDIO_FORMAT_16BIT:
case AUDIO_FORMAT_32BIT:
case AUDIO_FORMAT_FLOAT:
return false;
case AUDIO_FORMAT_U8BIT_PLANAR:
case AUDIO_FORMAT_FLOAT_PLANAR:
case AUDIO_FORMAT_16BIT_PLANAR:
case AUDIO_FORMAT_32BIT_PLANAR:
return true;
case AUDIO_FORMAT_UNKNOWN:
return false;
}
return false;
}
static inline size_t get_audio_size(enum audio_format type,
enum speaker_layout speakers, uint32_t frames)
{
@ -133,6 +174,8 @@ EXPORT void audio_output_disconnect(audio_t video,
void *param);
EXPORT size_t audio_output_blocksize(audio_t audio);
EXPORT size_t audio_output_planes(audio_t audio);
EXPORT size_t audio_output_channels(audio_t audio);
EXPORT const struct audio_output_info *audio_output_getinfo(audio_t audio);
EXPORT audio_line_t audio_output_createline(audio_t audio, const char *name);

View file

@ -17,6 +17,7 @@
#include "../util/bmem.h"
#include "audio-resampler.h"
#include "audio-io.h"
#include <libavutil/opt.h>
#include <libavutil/channel_layout.h>
#include <libswresample/swresample.h>
@ -29,22 +30,27 @@ struct audio_resampler {
uint64_t input_layout;
enum AVSampleFormat input_format;
uint8_t *output_buffer;
uint8_t *output_buffer[MAX_AUDIO_PLANES];
uint64_t output_layout;
enum AVSampleFormat output_format;
int output_size;
uint32_t output_ch;
uint32_t output_freq;
uint32_t output_planes;
};
static inline enum AVSampleFormat convert_audio_format(enum audio_format format)
{
switch (format) {
case AUDIO_FORMAT_UNKNOWN: return AV_SAMPLE_FMT_S16;
case AUDIO_FORMAT_U8BIT: return AV_SAMPLE_FMT_U8;
case AUDIO_FORMAT_16BIT: return AV_SAMPLE_FMT_S16;
case AUDIO_FORMAT_32BIT: return AV_SAMPLE_FMT_S32;
case AUDIO_FORMAT_FLOAT: return AV_SAMPLE_FMT_FLT;
case AUDIO_FORMAT_UNKNOWN: return AV_SAMPLE_FMT_S16;
case AUDIO_FORMAT_U8BIT: return AV_SAMPLE_FMT_U8;
case AUDIO_FORMAT_16BIT: return AV_SAMPLE_FMT_S16;
case AUDIO_FORMAT_32BIT: return AV_SAMPLE_FMT_S32;
case AUDIO_FORMAT_FLOAT: return AV_SAMPLE_FMT_FLT;
case AUDIO_FORMAT_U8BIT_PLANAR: return AV_SAMPLE_FMT_U8P;
case AUDIO_FORMAT_16BIT_PLANAR: return AV_SAMPLE_FMT_S16P;
case AUDIO_FORMAT_32BIT_PLANAR: return AV_SAMPLE_FMT_S32P;
case AUDIO_FORMAT_FLOAT_PLANAR: return AV_SAMPLE_FMT_FLTP;
}
/* shouldn't get here */
@ -77,16 +83,17 @@ audio_resampler_t audio_resampler_create(struct resample_info *dst,
struct audio_resampler *rs = bmalloc(sizeof(struct audio_resampler));
int errcode;
memset(rs, 0, sizeof(struct audio_resampler));
rs->opened = false;
rs->input_freq = src->samples_per_sec;
rs->input_layout = convert_speaker_layout(src->speakers);
rs->input_format = convert_audio_format(src->format);
rs->output_buffer = NULL;
rs->output_size = 0;
rs->output_ch = get_audio_channels(dst->speakers);
rs->output_freq = dst->samples_per_sec;
rs->output_layout = convert_speaker_layout(dst->speakers);
rs->output_format = convert_audio_format(dst->format);
rs->output_planes = is_audio_planar(dst->format) ? rs->output_ch : 1;
rs->context = swr_alloc_set_opts(NULL,
rs->output_layout, rs->output_format, dst->samples_per_sec,
@ -116,47 +123,50 @@ void audio_resampler_destroy(audio_resampler_t rs)
if (rs->context)
swr_free(&rs->context);
if (rs->output_buffer)
av_freep(&rs->output_buffer);
av_freep(&rs->output_buffer[0]);
bfree(rs);
}
}
bool audio_resampler_resample(audio_resampler_t rs,
void **output, uint32_t *out_frames,
const void *input, uint32_t in_frames,
uint64_t *timestamp_offset)
uint8_t *output[], uint32_t *out_frames, uint64_t *ts_offset,
const uint8_t *const input[], uint32_t in_frames)
{
struct SwrContext *context = rs->context;
int ret;
int64_t delay = swr_get_delay(context, rs->input_freq);
int estimated = (int)av_rescale_rnd(
delay + (int64_t)in_frames,
(int64_t)rs->output_freq, (int64_t)rs->input_freq,
AV_ROUND_UP);
*timestamp_offset = (uint64_t)swr_get_delay(context, 1000000000);
*ts_offset = (uint64_t)swr_get_delay(context, 1000000000);
/* resize the buffer if bigger */
if (estimated > rs->output_size) {
if (rs->output_buffer)
av_freep(&rs->output_buffer);
av_samples_alloc(&rs->output_buffer, NULL, rs->output_ch,
if (rs->output_buffer[0])
av_freep(&rs->output_buffer[0]);
av_samples_alloc(rs->output_buffer, NULL, rs->output_ch,
estimated, rs->output_format, 0);
rs->output_size = estimated;
}
ret = swr_convert(context,
&rs->output_buffer, rs->output_size,
(const uint8_t**)&input, in_frames);
rs->output_buffer, rs->output_size,
(const uint8_t**)input, in_frames);
if (ret < 0) {
blog(LOG_ERROR, "swr_convert failed: %d", ret);
return false;
}
*output = rs->output_buffer;
for (uint32_t i = 0; i < rs->output_planes; i++)
output[i] = rs->output_buffer[i];
*out_frames = (uint32_t)ret;
return true;
}

View file

@ -38,9 +38,8 @@ EXPORT audio_resampler_t audio_resampler_create(struct resample_info *dst,
EXPORT void audio_resampler_destroy(audio_resampler_t resampler);
EXPORT bool audio_resampler_resample(audio_resampler_t resampler,
void **output, uint32_t *out_frames,
const void *input, uint32_t in_frames,
uint64_t *timestamp_offset);
uint8_t *output[], uint32_t *out_frames, uint64_t *ts_offset,
const uint8_t *const input[], uint32_t in_frames);
#ifdef __cplusplus
}

View file

@ -85,34 +85,34 @@ static inline void pack_chroma_2plane(uint8_t *u_plane, uint8_t *v_plane,
*(uint16_t*)(v_plane+chroma_pos) = (uint16_t)(packed_vals>>16);
}
void compress_uyvx_to_i420(const void *input_v, uint32_t width, uint32_t height,
uint32_t row_bytes, uint32_t start_y, uint32_t end_y,
void **output)
void compress_uyvx_to_i420(
const uint8_t *input, uint32_t in_row_bytes,
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output[], const uint32_t out_row_bytes[])
{
const uint8_t *input = input_v;
uint8_t *lum_plane = output[0];
uint8_t *u_plane = output[1];
uint8_t *v_plane = output[2];
uint32_t chroma_pitch = width >> 1;
uint32_t y;
__m128i lum_mask = _mm_set1_epi32(0x0000FF00);
__m128i uv_mask = _mm_set1_epi16(0x00FF);
for (y = start_y; y < end_y; y += 2) {
uint32_t y_pos = y * row_bytes;
uint32_t chroma_y_pos = (y>>1) * chroma_pitch;
uint32_t lum_y_pos = y * width;
uint32_t y_pos = y * in_row_bytes;
uint32_t chroma_y_pos = (y>>1) * out_row_bytes[1];
uint32_t lum_y_pos = y * out_row_bytes[0];
uint32_t x;
for (x = 0; x < width; x += 4) {
const uint8_t *img = input + y_pos + x*4;
uint32_t lum_pos0 = lum_y_pos + x;
uint32_t lum_pos1 = lum_pos0 + width;
uint32_t lum_pos1 = lum_pos0 + out_row_bytes[0];
__m128i line1 = _mm_load_si128((const __m128i*)img);
__m128i line2 = _mm_load_si128(
(const __m128i*)(img + row_bytes));
(const __m128i*)(img + in_row_bytes));
pack_lum(lum_plane, lum_pos0, lum_pos1,
line1, line2, lum_mask);
@ -123,10 +123,11 @@ void compress_uyvx_to_i420(const void *input_v, uint32_t width, uint32_t height,
}
}
static inline void _compress_uyvx_to_nv12(const uint8_t *input,
uint32_t width, uint32_t height, uint32_t pitch,
uint32_t start_y, uint32_t end_y, uint32_t row_bytes_out,
void **output)
void compress_uyvx_to_nv12(
const uint8_t *input, uint32_t in_row_bytes,
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output[], const uint32_t out_row_bytes[])
{
uint8_t *lum_plane = output[0];
uint8_t *chroma_plane = output[1];
@ -136,19 +137,19 @@ static inline void _compress_uyvx_to_nv12(const uint8_t *input,
__m128i uv_mask = _mm_set1_epi16(0x00FF);
for (y = start_y; y < end_y; y += 2) {
uint32_t y_pos = y * pitch;
uint32_t chroma_y_pos = (y>>1) * row_bytes_out;
uint32_t lum_y_pos = y * row_bytes_out;
uint32_t y_pos = y * in_row_bytes;
uint32_t chroma_y_pos = (y>>1) * out_row_bytes[1];
uint32_t lum_y_pos = y * out_row_bytes[0];
uint32_t x;
for (x = 0; x < width; x += 4) {
const uint8_t *img = input + y_pos + x*4;
uint32_t lum_pos0 = lum_y_pos + x;
uint32_t lum_pos1 = lum_pos0 + row_bytes_out;
uint32_t lum_pos1 = lum_pos0 + out_row_bytes[0];
__m128i line1 = _mm_load_si128((const __m128i*)img);
__m128i line2 = _mm_load_si128(
(const __m128i*)(img + pitch));
(const __m128i*)(img + in_row_bytes));
pack_lum(lum_plane, lum_pos0, lum_pos1,
line1, line2, lum_mask);
@ -158,48 +159,28 @@ static inline void _compress_uyvx_to_nv12(const uint8_t *input,
}
}
void compress_uyvx_to_nv12(const void *input, uint32_t width, uint32_t height,
uint32_t row_bytes, uint32_t start_y, uint32_t end_y,
void **output)
void decompress_420(
const uint8_t *const input[], const uint32_t in_row_bytes[],
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_row_bytes)
{
_compress_uyvx_to_nv12(input, width, height, row_bytes,
start_y, end_y, width, output);
}
void compress_uyvx_to_nv12_aligned(const void *input,
uint32_t width, uint32_t height, uint32_t row_bytes,
uint32_t start_y, uint32_t end_y, uint32_t row_bytes_out,
void **output)
{
_compress_uyvx_to_nv12(input, width, height, row_bytes,
start_y, end_y, row_bytes_out, output);
}
void decompress_420(const void *input_v, uint32_t width, uint32_t height,
uint32_t row_bytes, uint32_t start_y, uint32_t end_y,
void *output_v)
{
uint8_t *output = output_v;
const uint8_t *input = input_v;
const uint8_t *input2 = input + width * height;
const uint8_t *input3 = input2 + width * height / 4;
uint32_t start_y_d2 = start_y/2;
uint32_t width_d2 = width/2;
uint32_t height_d2 = end_y/2;
uint32_t y;
for (y = start_y_d2; y < height_d2; y++) {
const uint8_t *chroma0 = input2 + y * width_d2;
const uint8_t *chroma1 = input3 + y * width_d2;
const uint8_t *chroma0 = input[1] + y * in_row_bytes[1];
const uint8_t *chroma1 = input[2] + y * in_row_bytes[2];
register const uint8_t *lum0, *lum1;
register uint32_t *output0, *output1;
uint32_t x;
lum0 = input + y * 2*width;
lum0 = input[0] + y * 2*width;
lum1 = lum0 + width;
output0 = (uint32_t*)(output + y * 2*row_bytes);
output1 = (uint32_t*)((uint8_t*)output0 + row_bytes);
output0 = (uint32_t*)(output + y * 2 * in_row_bytes[0]);
output1 = (uint32_t*)((uint8_t*)output0 + in_row_bytes[0]);
for (x = 0; x < width_d2; x++) {
uint32_t out;
@ -214,29 +195,28 @@ void decompress_420(const void *input_v, uint32_t width, uint32_t height,
}
}
void decompress_nv12(const void *input_v, uint32_t width, uint32_t height,
uint32_t row_bytes, uint32_t start_y, uint32_t end_y,
void *output_v)
void decompress_nv12(
const uint8_t *const input[], const uint32_t in_row_bytes[],
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_row_bytes)
{
uint8_t *output = output_v;
const uint8_t *input = input_v;
const uint8_t *input2 = input + width * height;
uint32_t start_y_d2 = start_y/2;
uint32_t width_d2 = width/2;
uint32_t height_d2 = end_y/2;
uint32_t y;
for (y = start_y_d2; y < height_d2; y++) {
const uint16_t *chroma = (uint16_t*)(input2 + y * width);
const uint16_t *chroma;
register const uint8_t *lum0, *lum1;
register uint32_t *output0, *output1;
uint32_t x;
lum0 = input + y * 2*width;
lum1 = lum0 + width;
output0 = (uint32_t*)(output + y * 2*row_bytes);
output1 = (uint32_t*)((uint8_t*)output0 + row_bytes);
chroma = (const uint16_t*)(input[1] + y * in_row_bytes[1]);
lum0 = input[0] + y*2 * in_row_bytes[0];
lum1 = lum0 + in_row_bytes[0];
output0 = (uint32_t*)(output + y*2 * out_row_bytes);
output1 = (uint32_t*)((uint8_t*)output0 + out_row_bytes);
for (x = 0; x < width_d2; x++) {
uint32_t out = *(chroma++) << 8;
@ -250,15 +230,14 @@ void decompress_nv12(const void *input_v, uint32_t width, uint32_t height,
}
}
void decompress_422(const void *input_v, uint32_t width, uint32_t height,
uint32_t row_bytes, uint32_t start_y, uint32_t end_y,
void *output_v, bool leading_lum)
void decompress_422(
const uint8_t *input, uint32_t in_row_bytes,
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_row_bytes,
bool leading_lum)
{
const uint8_t *input = input_v;
uint8_t *output = output_v;
uint32_t width_d2 = width >> 1;
uint32_t line_size = width * 2;
uint32_t width_d2 = width >> 1;
uint32_t y;
register const uint32_t *input32;
@ -267,9 +246,9 @@ void decompress_422(const void *input_v, uint32_t width, uint32_t height,
if (leading_lum) {
for (y = start_y; y < end_y; y++) {
input32 = (uint32_t*)(input + y*line_size);
input32 = (const uint32_t*)(input + y*in_row_bytes);
input32_end = input32 + width_d2;
output32 = (uint32_t*)(output + y*row_bytes);
output32 = (uint32_t*)(output + y*out_row_bytes);
while(input32 < input32_end) {
register uint32_t dw = *input32;
@ -285,9 +264,9 @@ void decompress_422(const void *input_v, uint32_t width, uint32_t height,
}
} else {
for (y = start_y; y < end_y; y++) {
input32 = (uint32_t*)(input + y*line_size);
input32 = (const uint32_t*)(input + y*in_row_bytes);
input32_end = input32 + width_d2;
output32 = (uint32_t*)(output + y*row_bytes);
output32 = (uint32_t*)(output + y*out_row_bytes);
while (input32 < input32_end) {
register uint32_t dw = *input32;

View file

@ -23,32 +23,40 @@
extern "C" {
#endif
EXPORT void compress_uyvx_to_i420(const void *input,
uint32_t width, uint32_t height, uint32_t row_bytes,
uint32_t start_y, uint32_t end_y, void **output);
/*
* Functions for converting to and from packed 444 YUV
*/
EXPORT void compress_uyvx_to_nv12(const void *input,
uint32_t width, uint32_t height, uint32_t row_bytes,
uint32_t start_y, uint32_t end_y, void **output);
EXPORT void decompress_nv12(const void *input,
uint32_t width, uint32_t height, uint32_t row_bytes,
uint32_t start_y, uint32_t end_y, void *output);
EXPORT void decompress_420(const void *input,
uint32_t width, uint32_t height, uint32_t row_bytes,
uint32_t start_y, uint32_t end_y, void *output);
EXPORT void decompress_422(const void *input,
uint32_t width, uint32_t height, uint32_t row_bytes,
uint32_t start_y, uint32_t end_y, void *output,
bool leading_lum);
/* special case for quicksync */
EXPORT void compress_uyvx_to_nv12_aligned(const void *input,
uint32_t width, uint32_t height, uint32_t row_bytes,
EXPORT void compress_uyvx_to_i420(
const uint8_t *input, uint32_t in_row_bytes,
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint32_t row_bytes_out, void **output);
uint8_t *output[], const uint32_t out_row_bytes[]);
EXPORT void compress_uyvx_to_nv12(
const uint8_t *input, uint32_t in_row_bytes,
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output[], const uint32_t out_row_bytes[]);
EXPORT void decompress_nv12(
const uint8_t *const input[], const uint32_t in_row_bytes[],
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_row_bytes);
EXPORT void decompress_420(
const uint8_t *const input[], const uint32_t in_row_bytes[],
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_row_bytes);
EXPORT void decompress_422(
const uint8_t *input, uint32_t in_row_bytes,
uint32_t width, uint32_t height,
uint32_t start_y, uint32_t end_y,
uint8_t *output, uint32_t out_row_bytes,
bool leading_lum);
#ifdef __cplusplus
}

View file

@ -21,6 +21,7 @@
#include "../util/threading.h"
#include "../util/darray.h"
#include "format-conversion.h"
#include "video-io.h"
struct video_input {
@ -36,8 +37,10 @@ struct video_output {
pthread_mutex_t data_mutex;
event_t stop_event;
struct video_frame *cur_frame;
struct video_frame *next_frame;
struct video_frame cur_frame;
struct video_frame next_frame;
bool new_frame;
event_t update_event;
uint64_t frame_time;
volatile uint64_t cur_video_time;
@ -54,9 +57,9 @@ static inline void video_swapframes(struct video_output *video)
{
pthread_mutex_lock(&video->data_mutex);
if (video->next_frame) {
if (video->new_frame) {
video->cur_frame = video->next_frame;
video->next_frame = NULL;
video->new_frame = false;
}
pthread_mutex_unlock(&video->data_mutex);
@ -64,15 +67,36 @@ static inline void video_swapframes(struct video_output *video)
static inline void video_output_cur_frame(struct video_output *video)
{
if (!video->cur_frame)
size_t width = video->info.width;
size_t height = video->info.height;
if (!video->cur_frame.data[0])
return;
pthread_mutex_lock(&video->input_mutex);
/* TEST CODE */
/*static struct video_frame frame = {0};
if (!frame.data[0]) {
frame.data[0] = bmalloc(width * height);
frame.data[1] = bmalloc((width/2) * (height/2));
frame.data[2] = bmalloc((width/2) * (height/2));
frame.row_size[0] = width;
frame.row_size[1] = width/2;
frame.row_size[2] = width/2;
}
compress_uyvx_to_i420(
video->cur_frame.data[0], video->cur_frame.row_size[0],
width, height, 0, height,
(uint8_t**)frame.data, (uint32_t*)frame.row_size);*/
/* TODO: conversion */
for (size_t i = 0; i < video->inputs.num; i++) {
struct video_input *input = video->inputs.array+i;
input->callback(input->param, video->cur_frame);
input->callback(input->param, &video->cur_frame);//&frame);
}
pthread_mutex_unlock(&video->input_mutex);
@ -176,7 +200,7 @@ void video_output_connect(video_t video,
{
pthread_mutex_lock(&video->input_mutex);
if (video_get_input_idx(video, callback, param) != DARRAY_INVALID) {
if (video_get_input_idx(video, callback, param) == DARRAY_INVALID) {
struct video_input input;
input.callback = callback;
input.param = param;
@ -223,7 +247,8 @@ const struct video_output_info *video_output_getinfo(video_t video)
void video_output_frame(video_t video, struct video_frame *frame)
{
pthread_mutex_lock(&video->data_mutex);
video->next_frame = frame;
video->next_frame = *frame;
video->new_frame = true;
pthread_mutex_unlock(&video->data_mutex);
}

View file

@ -25,6 +25,8 @@ extern "C" {
/* Base video output component. Use this to create an video output track. */
#define MAX_VIDEO_PLANES 8
struct video_output;
typedef struct video_output *video_t;
@ -49,8 +51,8 @@ enum video_format {
};
struct video_frame {
const void *data;
uint32_t row_size; /* for RGB/BGR formats and UYVX */
const uint8_t *data[MAX_VIDEO_PLANES];
uint32_t row_size[MAX_VIDEO_PLANES];
uint64_t timestamp;
};

View file

@ -79,6 +79,11 @@ obs_output_t obs_output_create(const char *id, const char *name,
void obs_output_destroy(obs_output_t output)
{
if (output) {
if (output->callbacks.active) {
if (output->callbacks.active(output->data))
output->callbacks.stop(output->data);
}
pthread_mutex_lock(&obs->data.outputs_mutex);
da_erase_item(obs->data.outputs, &output);
pthread_mutex_unlock(&obs->data.outputs_mutex);

View file

@ -180,6 +180,84 @@ fail:
return NULL;
}
#define ALIGN_SIZE(size, align) \
size = (((size)+(align-1)) & (~(align-1)))
static void alloc_frame_data(struct source_frame *frame,
enum video_format format, uint32_t width, uint32_t height)
{
size_t size;
size_t offsets[MAX_VIDEO_PLANES];
memset(offsets, 0, sizeof(offsets));
switch (format) {
case VIDEO_FORMAT_NONE:
return;
case VIDEO_FORMAT_I420:
size = width * height;
ALIGN_SIZE(size, 32);
offsets[0] = size;
size += (width/2) * (height/2);
ALIGN_SIZE(size, 32);
offsets[1] = size;
size += (width/2) * (height/2);
ALIGN_SIZE(size, 32);
frame->data[0] = bmalloc(size);
frame->data[1] = (uint8_t*)frame->data[0] + offsets[0];
frame->data[2] = (uint8_t*)frame->data[0] + offsets[1];
frame->row_bytes[0] = width;
frame->row_bytes[1] = width/2;
frame->row_bytes[2] = width/2;
break;
case VIDEO_FORMAT_NV12:
size = width * height;
ALIGN_SIZE(size, 32);
offsets[0] = size;
size += (width/2) * (height/2) * 2;
ALIGN_SIZE(size, 32);
frame->data[0] = bmalloc(size);
frame->data[1] = (uint8_t*)frame->data[0] + offsets[0];
frame->row_bytes[0] = width;
frame->row_bytes[1] = width;
break;
case VIDEO_FORMAT_YVYU:
case VIDEO_FORMAT_YUY2:
case VIDEO_FORMAT_UYVY:
size = width * height * 2;
ALIGN_SIZE(size, 32);
frame->data[0] = bmalloc(size);
frame->row_bytes[0] = width*2;
break;
case VIDEO_FORMAT_YUVX:
case VIDEO_FORMAT_UYVX:
case VIDEO_FORMAT_RGBA:
case VIDEO_FORMAT_BGRA:
case VIDEO_FORMAT_BGRX:
size = width * height * 4;
ALIGN_SIZE(size, 32);
frame->data[0] = bmalloc(size);
frame->row_bytes[0] = width*4;
break;
}
}
struct source_frame *source_frame_alloc(enum video_format format,
uint32_t width, uint32_t height)
{
struct source_frame *frame = bmalloc(sizeof(struct source_frame));
memset(frame, 0, sizeof(struct source_frame));
alloc_frame_data(frame, format, width, height);
frame->format = format;
frame->width = width;
frame->height = height;
return frame;
}
static void obs_source_destroy(obs_source_t source)
{
size_t i;
@ -202,7 +280,9 @@ static void obs_source_destroy(obs_source_t source)
if (source->data)
source->callbacks.destroy(source->data);
bfree(source->audio_data.data);
for (i = 0; i < MAX_AUDIO_PLANES; i++)
bfree(source->audio_data.data[i]);
audio_line_destroy(source->audio_line);
audio_resampler_destroy(source->resampler);
@ -454,7 +534,8 @@ static bool upload_frame(texture_t tex, const struct source_frame *frame)
enum convert_type type = get_convert_type(frame->format);
if (type == CONVERT_NONE) {
texture_setimage(tex, frame->data, frame->row_bytes, false);
texture_setimage(tex, frame->data[0], frame->row_bytes[0],
false);
return true;
}
@ -462,20 +543,24 @@ static bool upload_frame(texture_t tex, const struct source_frame *frame)
return false;
if (type == CONVERT_420)
decompress_420(frame->data, frame->width, frame->height,
frame->row_bytes, 0, frame->height, ptr);
decompress_420(frame->data, frame->row_bytes,
frame->width, frame->height, 0, frame->height,
ptr, row_bytes);
else if (type == CONVERT_NV12)
decompress_nv12(frame->data, frame->width, frame->height,
frame->row_bytes, 0, frame->height, ptr);
decompress_nv12(frame->data, frame->row_bytes,
frame->width, frame->height, 0, frame->height,
ptr, row_bytes);
else if (type == CONVERT_422_Y)
decompress_422(frame->data, frame->width, frame->height,
frame->row_bytes, 0, frame->height, ptr, true);
decompress_422(frame->data[0], frame->row_bytes[0],
frame->width, frame->height, 0, frame->height,
ptr, row_bytes, true);
else if (type == CONVERT_422_U)
decompress_422(frame->data, frame->width, frame->height,
frame->row_bytes, 0, frame->height, ptr, false);
decompress_422(frame->data[0], frame->row_bytes[0],
frame->width, frame->height, 0, frame->height,
ptr, row_bytes, false);
texture_unmap(tex);
return true;
@ -704,14 +789,68 @@ static inline struct source_frame *filter_async_video(obs_source_t source,
return in;
}
static inline void copy_frame_data_line(struct source_frame *dst,
const struct source_frame *src, uint32_t plane, uint32_t y)
{
uint32_t pos_src = y * src->row_bytes[plane];
uint32_t pos_dst = y * dst->row_bytes[plane];
uint32_t bytes = dst->row_bytes[plane] < src->row_bytes[plane] ?
dst->row_bytes[plane] : src->row_bytes[plane];
memcpy(dst->data[plane] + pos_dst, src->data[plane] + pos_src, bytes);
}
static inline void copy_frame_data_plane(struct source_frame *dst,
const struct source_frame *src, uint32_t plane, uint32_t lines)
{
if (dst->row_bytes != src->row_bytes)
for (uint32_t y = 0; y < lines; y++)
copy_frame_data_line(dst, src, plane, y);
else
memcpy(dst->data[plane], src->data[plane],
dst->row_bytes[plane] * lines);
}
static void copy_frame_data(struct source_frame *dst,
const struct source_frame *src)
{
dst->flip = src->flip;
dst->timestamp = src->timestamp;
memcpy(dst->color_matrix, src->color_matrix, sizeof(float) * 16);
switch (dst->format) {
case VIDEO_FORMAT_I420:
copy_frame_data_plane(dst, src, 0, dst->height);
copy_frame_data_plane(dst, src, 1, dst->height/2);
copy_frame_data_plane(dst, src, 2, dst->height/2);
break;
case VIDEO_FORMAT_NV12:
copy_frame_data_plane(dst, src, 0, dst->height);
copy_frame_data_plane(dst, src, 1, dst->height/2);
break;
case VIDEO_FORMAT_YVYU:
case VIDEO_FORMAT_YUY2:
case VIDEO_FORMAT_UYVY:
case VIDEO_FORMAT_NONE:
case VIDEO_FORMAT_YUVX:
case VIDEO_FORMAT_UYVX:
case VIDEO_FORMAT_RGBA:
case VIDEO_FORMAT_BGRA:
case VIDEO_FORMAT_BGRX:
copy_frame_data_plane(dst, src, 0, dst->height);
}
}
static inline struct source_frame *cache_video(obs_source_t source,
const struct source_frame *frame)
{
/* TODO: use an actual cache */
struct source_frame *new_frame = bmalloc(sizeof(struct source_frame));
memcpy(new_frame, frame, sizeof(struct source_frame));
new_frame->data = bmalloc(frame->row_bytes * frame->height);
struct source_frame *new_frame = source_frame_alloc(frame->format,
frame->width, frame->height);
copy_frame_data(new_frame, frame);
return new_frame;
}
@ -780,21 +919,28 @@ static inline void reset_resampler(obs_source_t source,
}
static inline void copy_audio_data(obs_source_t source,
const void *data, uint32_t frames, uint64_t timestamp)
const void *const data[], uint32_t frames, uint64_t timestamp)
{
size_t planes = audio_output_planes(obs->audio.audio);
size_t blocksize = audio_output_blocksize(obs->audio.audio);
size_t size = (size_t)frames * blocksize;
/* ensure audio storage capacity */
if (source->audio_storage_size < size) {
bfree(source->audio_data.data);
source->audio_data.data = bmalloc(size);
source->audio_storage_size = size;
}
size_t size = (size_t)frames * blocksize;
bool resize = source->audio_storage_size < size;
source->audio_data.frames = frames;
source->audio_data.timestamp = timestamp;
memcpy(source->audio_data.data, data, size);
for (size_t i = 0; i < planes; i++) {
/* ensure audio storage capacity */
if (resize) {
bfree(source->audio_data.data[i]);
source->audio_data.data[i] = bmalloc(size);
}
memcpy(source->audio_data.data[i], data[i], size);
}
if (resize)
source->audio_storage_size = size;
}
/* resamples/remixes new audio to the designated main audio output format */
@ -809,12 +955,15 @@ static void process_audio(obs_source_t source, const struct source_audio *audio)
return;
if (source->resampler) {
void *output;
uint8_t *output[MAX_AUDIO_PLANES];
uint32_t frames;
uint64_t offset;
audio_resampler_resample(source->resampler, &output, &frames,
audio->data, audio->frames, &offset);
memset(output, 0, sizeof(output));
audio_resampler_resample(source->resampler,
output, &frames, &offset,
audio->data, audio->frames);
copy_audio_data(source, output, frames,
audio->timestamp - offset);
@ -843,7 +992,10 @@ void obs_source_output_audio(obs_source_t source,
* have a base for sync */
if (source->timing_set || (flags & SOURCE_ASYNC_VIDEO) == 0) {
struct audio_data data;
data.data = output->data;
for (int i = 0; i < MAX_AUDIO_PLANES; i++)
data.data[i] = output->data[i];
data.frames = output->frames;
data.timestamp = output->timestamp;
source_output_audio_line(source, &data);

View file

@ -231,9 +231,10 @@ static inline void output_video(struct obs_core_video *video, int cur_texture,
if (!video->textures_copied[prev_texture])
return;
memset(&frame, 0, sizeof(struct video_frame));
frame.timestamp = timestamp;
if (stagesurface_map(surface, &frame.data, &frame.row_size)) {
if (stagesurface_map(surface, &frame.data[0], &frame.row_size[0])) {
video->mapped_surface = surface;
video_output_frame(video->video, &frame);
}

View file

@ -96,13 +96,13 @@ struct obs_video_info {
};
struct filtered_audio {
void *data;
uint8_t *data[MAX_AUDIO_PLANES];
uint32_t frames;
uint64_t timestamp;
};
struct source_audio {
const void *data;
const uint8_t *data[MAX_AUDIO_PLANES];
uint32_t frames;
/* audio will be automatically resampled/upmixed/downmixed */
@ -115,10 +115,10 @@ struct source_audio {
};
struct source_frame {
void *data;
uint8_t *data[MAX_VIDEO_PLANES];
uint32_t row_bytes[MAX_VIDEO_PLANES];
uint32_t width;
uint32_t height;
uint32_t row_bytes;
uint64_t timestamp;
enum video_format format;
@ -126,12 +126,13 @@ struct source_frame {
bool flip;
};
EXPORT struct source_frame *source_frame_alloc(enum video_format format,
uint32_t width, uint32_t height);
static inline void source_frame_destroy(struct source_frame *frame)
{
if (frame) {
bfree(frame->data);
bfree(frame);
}
bfree(frame->data[0]);
bfree(frame);
}
enum packet_priority {

View file

@ -15,7 +15,6 @@
*/
#include <stdio.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdio.h>
@ -93,6 +92,11 @@ void bcrash(const char *format, ...)
va_end(args);
}
void blogva(enum log_type type, const char *format, va_list args)
{
log_handler(type, format, args);
}
void blog(enum log_type type, const char *format, ...)
{
va_list args;

View file

@ -16,7 +16,6 @@
#pragma once
#include <wctype.h>
#include <stdarg.h>
#include "c99defs.h"
@ -40,6 +39,8 @@ EXPORT void base_set_log_handler(
void (*handler)(enum log_type, const char *, va_list));
EXPORT void base_set_crash_handler(void (*handler)(const char *, va_list));
EXPORT void blogva(enum log_type type, const char *format, va_list args);
#ifndef _MSC_VER
#define PRINTFATTR(f, a) __attribute__((__format__(__printf__, f, a)))
#else

View file

@ -62,6 +62,11 @@ void OBSBasic::OBSInit()
/* TODO: this is a test */
obs_load_module("test-input");
/*obs_load_module("obs-ffmpeg");
obs_output_t output = obs_output_create("ffmpeg_output", "test",
NULL);
obs_output_start(output);*/
/* HACK: fixes a qt bug with native widgets with native repaint */
ui->previewContainer->repaint();
@ -302,7 +307,7 @@ bool OBSBasic::InitAudio()
struct audio_output_info ai;
ai.name = "test";
ai.samples_per_sec = 44100;
ai.format = AUDIO_FORMAT_16BIT;
ai.format = AUDIO_FORMAT_FLOAT_PLANAR;
ai.speakers = SPEAKERS_STEREO;
ai.buffer_ms = 700;

View file

@ -22,6 +22,9 @@
#define FILENAME_TODO "D:\\test.mp4"
#define SPS_TODO 44100
/* NOTE: much of this stuff is test stuff that was more or less copied from
* the muxing.c ffmpeg example */
static inline enum AVPixelFormat obs_to_ffmpeg_video_format(
enum video_format format)
{
@ -94,17 +97,22 @@ static bool open_video_codec(struct ffmpeg_data *data,
return false;
}
if (context->pix_fmt != AV_PIX_FMT_YUV420P) {
ret = avpicture_alloc(&data->src_picture, AV_PIX_FMT_YUV420P,
context->width, context->height);
if (ret < 0) {
blog(LOG_ERROR, "Failed to allocate src_picture: %s",
av_err2str(ret));
return false;
}
*((AVPicture*)data->vframe) = data->dst_picture;
return true;
}
static bool init_swscale(struct ffmpeg_data *data, AVCodecContext *context)
{
data->swscale = sws_getContext(
context->width, context->height, AV_PIX_FMT_YUV420P,
context->width, context->height, context->pix_fmt,
SWS_BICUBIC, NULL, NULL, NULL);
if (!data->swscale) {
blog(LOG_ERROR, "Could not initialize swscale");
return false;
}
*((AVPicture*)data->vframe) = data->dst_picture;
return true;
}
@ -117,6 +125,7 @@ static bool create_video_stream(struct ffmpeg_data *data)
blog(LOG_ERROR, "No active video");
return false;
}
if (!new_stream(data, &data->video, &data->vcodec,
data->output->oformat->video_codec))
return false;
@ -134,7 +143,14 @@ static bool create_video_stream(struct ffmpeg_data *data)
if (data->output->oformat->flags & AVFMT_GLOBALHEADER)
context->flags |= CODEC_FLAG_GLOBAL_HEADER;
return open_video_codec(data, &ovi);
if (!open_video_codec(data, &ovi))
return false;
if (context->pix_fmt != AV_PIX_FMT_YUV420P)
if (!init_swscale(data, context))
return false;
return true;
}
static bool open_audio_codec(struct ffmpeg_data *data,
@ -149,6 +165,8 @@ static bool open_audio_codec(struct ffmpeg_data *data,
return false;
}
context->strict_std_compliance = -2;
ret = avcodec_open2(context, data->acodec, NULL);
if (ret < 0) {
blog(LOG_ERROR, "Failed to open audio codec: %s",
@ -168,14 +186,17 @@ static bool create_audio_stream(struct ffmpeg_data *data)
blog(LOG_ERROR, "No active audio");
return false;
}
if (!new_stream(data, &data->audio, &data->acodec,
data->output->oformat->audio_codec))
return false;
context = data->audio->codec;
context = data->audio->codec;
context->bit_rate = 128000;
context->channels = get_audio_channels(aoi.speakers);
context->sample_rate = aoi.samples_per_sec;
context->sample_fmt = data->acodec->sample_fmts ?
data->acodec->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
if (data->output->oformat->flags & AVFMT_GLOBALHEADER)
context->flags |= CODEC_FLAG_GLOBAL_HEADER;
@ -187,15 +208,13 @@ static inline bool init_streams(struct ffmpeg_data *data)
{
AVOutputFormat *format = data->output->oformat;
if (format->video_codec != AV_CODEC_ID_NONE) {
if (format->video_codec != AV_CODEC_ID_NONE)
if (!create_video_stream(data))
return false;
}
if (format->audio_codec != AV_CODEC_ID_NONE) {
if (format->audio_codec != AV_CODEC_ID_NONE)
if (!create_audio_stream(data))
return false;
}
return true;
}
@ -228,17 +247,14 @@ static inline bool open_output_file(struct ffmpeg_data *data)
static void close_video(struct ffmpeg_data *data)
{
avcodec_close(data->video->codec);
av_free(data->src_picture.data[0]);
av_free(data->dst_picture.data[0]);
avpicture_free(&data->dst_picture);
av_frame_free(&data->vframe);
}
static void close_audio(struct ffmpeg_data *data)
{
av_freep(&data->samples[0]);
avcodec_close(data->audio->codec);
av_free(data->samples[0]);
av_free(data->samples);
av_frame_free(&data->aframe);
}
@ -255,6 +271,8 @@ static void ffmpeg_data_free(struct ffmpeg_data *data)
avio_close(data->output->pb);
avformat_free_context(data->output);
memset(data, 0, sizeof(struct ffmpeg_data));
}
static bool ffmpeg_data_init(struct ffmpeg_data *data)
@ -265,7 +283,7 @@ static bool ffmpeg_data_init(struct ffmpeg_data *data)
/* TODO: settings */
avformat_alloc_output_context2(&data->output, NULL, NULL,
"D:\\test.mp4");
FILENAME_TODO);
if (!data->output) {
blog(LOG_ERROR, "Couldn't create avformat context");
goto fail;
@ -293,7 +311,12 @@ const char *ffmpeg_output_getname(const char *locale)
return "FFmpeg file output";
}
struct ffmpeg_output *ffmpeg_output_create(obs_data_t settings,
void test_callback(void *param, int bla, const char *format, va_list args)
{
blogva(LOG_INFO, format, args);
}
struct ffmpeg_output *ffmpeg_output_create(const char *settings,
obs_output_t output)
{
struct ffmpeg_output *data = bmalloc(sizeof(struct ffmpeg_output));
@ -301,27 +324,159 @@ struct ffmpeg_output *ffmpeg_output_create(obs_data_t settings,
data->output = output;
av_log_set_callback(test_callback);
return data;
}
void ffmpeg_output_destroy(struct ffmpeg_output *data)
{
if (data) {
ffmpeg_data_free(&data->ff_data);
if (data->active)
ffmpeg_data_free(&data->ff_data);
bfree(data);
}
}
void ffmpeg_output_update(struct ffmpeg_output *data, obs_data_t settings)
void ffmpeg_output_update(struct ffmpeg_output *data, const char *settings)
{
}
static inline int64_t rescale_ts(int64_t val, AVCodecContext *context,
AVStream *stream)
{
return av_rescale_q_rnd(val, context->time_base,
stream->time_base,
AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
}
#define YUV420_PLANES 3
static inline void copy_data(AVPicture *pic, const struct video_frame *frame,
int height)
{
for (int plane = 0; plane < YUV420_PLANES; plane++) {
int frame_rowsize = (int)frame->row_size[plane];
int pic_rowsize = pic->linesize[plane];
int bytes = frame_rowsize < pic_rowsize ?
frame_rowsize : pic_rowsize;
int plane_height = plane == 0 ? height : height/2;
for (int y = 0; y < plane_height; y++) {
int pos_frame = y * frame_rowsize;
int pos_pic = y * pic_rowsize;
memcpy(pic->data[plane] + pos_pic,
frame->data[plane] + pos_frame,
bytes);
}
}
}
static void receive_video(void *param, const struct video_frame *frame)
{
struct ffmpeg_output *output = param;
struct ffmpeg_data *data = &output->ff_data;
AVCodecContext *context = data->video->codec;
AVPacket packet = {0};
int ret, got_packet;
av_init_packet(&packet);
if (context->pix_fmt != AV_PIX_FMT_YUV420P)
sws_scale(data->swscale, frame->data, frame->row_size,
0, context->height, data->dst_picture.data,
data->dst_picture.linesize);
else
copy_data(&data->dst_picture, frame, context->height);
if (data->output->flags & AVFMT_RAWPICTURE) {
packet.flags |= AV_PKT_FLAG_KEY;
packet.stream_index = data->video->index;
packet.data = data->dst_picture.data[0];
packet.size = sizeof(AVPicture);
ret = av_interleaved_write_frame(data->output, &packet);
} else {
data->vframe->pts = data->total_frames;
ret = avcodec_encode_video2(context, &packet, data->vframe,
&got_packet);
if (ret < 0) {
blog(LOG_ERROR, "receive_video: Error encoding "
"video: %s", av_err2str(ret));
return;
}
if (!ret && got_packet && packet.size) {
packet.pts = rescale_ts(packet.pts, context,
data->video);
packet.dts = rescale_ts(packet.dts, context,
data->video);
packet.duration = (int)av_rescale_q(packet.duration,
context->time_base,
data->video->time_base);
ret = av_interleaved_write_frame(data->output, &packet);
} else {
ret = 0;
}
}
if (ret != 0) {
blog(LOG_ERROR, "receive_video: Error writing video: %s",
av_err2str(ret));
}
data->total_frames++;
}
static void receive_audio(void *param, const struct audio_data *frame)
{
struct ffmpeg_output *output = param;
struct ffmpeg_data *data = &output->ff_data;
AVCodecContext *context = data->audio->codec;
AVPacket packet = {0};
int channels = (int)audio_output_channels(obs_audio());
size_t planes = audio_output_planes(obs_audio());
int ret, got_packet;
data->aframe->nb_samples = frame->frames;
data->aframe->pts = av_rescale_q(data->total_samples,
(AVRational){1, context->sample_rate},
context->time_base);
if (!data->samples[0])
av_samples_alloc(data->samples, NULL, channels,
frame->frames, context->sample_fmt, 0);
for (size_t i = 0; i < planes; i++) {
/* TODO */
}
data->total_samples += frame->frames;
ret = avcodec_encode_audio2(context, &packet, data->aframe,
&got_packet);
if (ret < 0) {
blog(LOG_ERROR, "receive_audio: Error encoding audio: %s",
av_err2str(ret));
return;
}
if (!got_packet)
return;
packet.pts = rescale_ts(packet.pts, context, data->audio);
packet.dts = rescale_ts(packet.dts, context, data->audio);
packet.duration = (int)av_rescale_q(packet.duration, context->time_base,
data->audio->time_base);
packet.stream_index = data->audio->index;
ret = av_interleaved_write_frame(data->output, &packet);
if (ret != 0)
blog(LOG_ERROR, "receive_audio: Error writing audio: %s",
av_err2str(ret));
}
bool ffmpeg_output_start(struct ffmpeg_output *data)
@ -340,7 +495,7 @@ bool ffmpeg_output_start(struct ffmpeg_output *data)
struct audio_convert_info aci;
aci.samples_per_sec = SPS_TODO;
aci.format = AUDIO_FORMAT_16BIT;
aci.format = AUDIO_FORMAT_FLOAT;
aci.speakers = SPEAKERS_STEREO;
struct video_convert_info vci;
@ -349,8 +504,8 @@ bool ffmpeg_output_start(struct ffmpeg_output *data)
vci.height = 0;
vci.row_align = 1;
video_output_connect(video, &vci, receive_video, data);
audio_output_connect(audio, &aci, receive_audio, data);
//video_output_connect(video, &vci, receive_video, data);
//audio_output_connect(audio, &aci, receive_audio, data);
data->active = true;
return true;

View file

@ -18,6 +18,7 @@
#pragma once
#include <util/c99defs.h>
#include <media-io/audio-io.h>
#include <media-io/video-io.h>
#include <libavformat/avformat.h>
@ -31,12 +32,13 @@ struct ffmpeg_data {
AVFormatContext *output;
struct SwsContext *swscale;
AVFrame *vframe;
AVPicture src_picture;
AVPicture dst_picture;
AVFrame *vframe;
int total_frames;
uint8_t *samples[MAX_AUDIO_PLANES];
AVFrame *aframe;
uint8_t **samples;
int total_samples;
bool initialized;
};
@ -49,12 +51,12 @@ struct ffmpeg_output {
EXPORT const char *ffmpeg_output_getname(const char *locale);
EXPORT struct ffmpeg_output *ffmpeg_output_create(obs_data_t settings,
EXPORT struct ffmpeg_output *ffmpeg_output_create(const char *settings,
obs_output_t output);
EXPORT void ffmpeg_output_destroy(struct ffmpeg_output *data);
EXPORT void ffmpeg_output_update(struct ffmpeg_output *data,
obs_data_t settings);
const char *settings);
EXPORT bool ffmpeg_output_start(struct ffmpeg_output *data);
EXPORT void ffmpeg_output_stop(struct ffmpeg_output *data);

View file

@ -1,14 +1,21 @@
#include <string.h>
#include <util/c99defs.h>
#include <obs.h>
EXPORT const char *enum_outputs(size_t idx);
EXPORT bool enum_outputs(size_t idx, const char **name);
EXPORT uint32_t module_version(uint32_t in_version);
static const char *outputs[] = {"obs_ffmpeg"};
static const char *outputs[] = {"ffmpeg_output"};
const char *enum_outputs(size_t idx)
uint32_t module_version(uint32_t in_version)
{
return LIBOBS_API_VER;
}
bool enum_outputs(size_t idx, const char **name)
{
if (idx >= sizeof(outputs)/sizeof(const char*))
return NULL;
return false;
return outputs[idx];
*name = outputs[idx];
return true;
}

View file

@ -26,7 +26,7 @@ static void *sinewave_thread(void *pdata)
}
struct source_audio data;
data.data = bytes;
data.data[0] = bytes;
data.frames = 480;
data.speakers = SPEAKERS_MONO;
data.samples_per_sec = 48000;