do {
switch (state) {
case am_inactive:
- debug(2,"am_state: am_inactive");
+ debug(2, "am_state: am_inactive");
while (player_state != ps_active)
pthread_cond_wait(&activity_monitor_cv, &activity_monitor_mutex);
state = am_active;
- debug(2,"am_state: going active");
+ debug(2, "am_state: going active");
break;
case am_active:
// debug(1,"am_state: am_active");
static void stop(void);
static void flush(void);
int delay(long *the_delay);
-int get_frames_sent_for_output(__attribute__ ((unused)) uint64_t *elapsed_time, uint64_t *frames_sent_to_dac);
+int get_frames_sent_for_output(__attribute__((unused)) uint64_t *elapsed_time,
+ uint64_t *frames_sent_to_dac);
// int get_rate_information(uint64_t *elapsed_time, uint64_t *frames_played);
void *alsa_buffer_monitor_thread_code(void *arg);
.flush = &flush,
.delay = &delay,
.play = &play,
- .rate_info = &get_frames_sent_for_output, // will also include frames of silence sent to stop standby mode
-// .rate_info = NULL,
+ .rate_info = &get_frames_sent_for_output, // will also include frames of silence sent to stop
+ // standby mode
+ // .rate_info = NULL,
.mute = NULL, // a function will be provided if it can, and is allowed to,
// do hardware mute
.volume = NULL, // a function will be provided if it can do hardware volume
static int set_period_size_request, set_buffer_size_request;
static uint64_t frames_sent_for_playing;
-static int output_error_occurred; // set to true if an underrun or similar has occurred since last requested
+static int output_error_occurred; // set to true if an underrun or similar has occurred since last
+ // requested
static void help(void) {
printf(" -d output-device set the output device, default is \"default\".\n"
char errorstring[1024];
strerror_r(-ret, (char *)errorstring, sizeof(errorstring));
die("alsa: error %d (\"%s\") opening alsa device \"%s\".", ret, (char *)errorstring,
- alsa_out_dev);
+ alsa_out_dev);
}
return ret;
}
ret = snd_pcm_hw_params_set_access(alsa_handle, alsa_params, access);
if (ret < 0) {
die("audio_alsa: Access type not available for device \"%s\": %s", alsa_out_dev,
- snd_strerror(ret));
+ snd_strerror(ret));
return ret;
}
ret = snd_pcm_hw_params_set_channels(alsa_handle, alsa_params, 2);
if (ret < 0) {
die("audio_alsa: Channels count (2) not available for device \"%s\": %s", alsa_out_dev,
- snd_strerror(ret));
+ snd_strerror(ret));
return ret;
}
sps_format_description_string(config.output_format));
} else {
die("audio_alsa: Could not automatically set the output format for device \"%s\": %s",
- alsa_out_dev, snd_strerror(ret));
+ alsa_out_dev, snd_strerror(ret));
return ret;
}
}
config.output_rate; // this is the requested rate -- it'll be changed to the actual rate
ret = snd_pcm_hw_params_set_rate_near(alsa_handle, alsa_params, &actual_sample_rate, &dir);
if (ret < 0) {
- die("audio_alsa: The frame rate of %i frames per second is not available for playback: %s", config.output_rate,
- snd_strerror(ret));
+ die("audio_alsa: The frame rate of %i frames per second is not available for playback: %s",
+ config.output_rate, snd_strerror(ret));
return ret;
}
} else {
if (ret == 0) {
speed_found = 1;
if (actual_sample_rate != speeds[i])
- die("The output DAC can not be set to %d frames per second (fps). The nearest speed available is %d fps.", speeds[i], actual_sample_rate);
+ die("The output DAC can not be set to %d frames per second (fps). The nearest speed "
+ "available is %d fps.",
+ speeds[i], actual_sample_rate);
} else {
i++;
}
debug(1, "alsa: output speed chosen is %d.", config.output_rate);
} else {
die("audio_alsa: Could not automatically set the output rate for device \"%s\": %s",
- alsa_out_dev, snd_strerror(ret));
+ alsa_out_dev, snd_strerror(ret));
return ret;
}
}
ret = snd_pcm_hw_params(alsa_handle, alsa_params);
if (ret < 0) {
die("audio_alsa: Unable to set hw parameters for device \"%s\": %s.", alsa_out_dev,
- snd_strerror(ret));
+ snd_strerror(ret));
return ret;
}
break;
}
- if (snd_pcm_hw_params_get_rate_numden(alsa_params, &uval, &uval2) == 0)
+ if ((snd_pcm_hw_params_get_rate_numden(alsa_params, &uval, &uval2) == 0) && (uval2 != 0))
+ // watch for a divide by zero too!
debug(log_level, " precise (rational) rate = %.3f frames per second (i.e. %u/%u).", uval,
- uval2, ((double)uval) / uval2);
- else
+ uval2, ((double)uval) / uval2);
+ else
debug(log_level, " precise (rational) rate information unavailable.");
snd_pcm_hw_params_get_period_time(alsa_params, &uval, &dir);
warn("Invalid use_precision_timing option choice \"%s\". It should be "
"\"yes\", \"auto\" or \"no\". "
"It remains set to \"%s\".",
- config.use_precision_timing == YNA_NO
- ? "no"
- : config.use_precision_timing == YNA_AUTO ? "auto" : "yes");
+ config.use_precision_timing == YNA_NO ? "no"
+ : config.use_precision_timing == YNA_AUTO ? "auto"
+ : "yes");
}
}
debug(1, "alsa: disable_standby_mode is \"%s\".",
- config.disable_standby_mode == disable_standby_off
- ? "never"
- : config.disable_standby_mode == disable_standby_always ? "always" : "auto");
+ config.disable_standby_mode == disable_standby_off ? "never"
+ : config.disable_standby_mode == disable_standby_always ? "always"
+ : "auto");
debug(1, "alsa: disable_standby_mode_silence_threshold is %f seconds.",
config.disable_standby_mode_silence_threshold);
debug(1, "alsa: disable_standby_mode_silence_scan_interval is %f seconds.",
return ret;
}
-int get_frames_sent_for_output(__attribute__ ((unused)) uint64_t *elapsed_time, uint64_t *frames_sent_to_dac) {
+int get_frames_sent_for_output(__attribute__((unused)) uint64_t *elapsed_time,
+ uint64_t *frames_sent_to_dac) {
int ret = 0;
pthread_cleanup_debug_mutex_lock(&alsa_mutex, 10000, 0);
*frames_sent_to_dac = frames_sent_for_playing;
if (alsa_handle == NULL)
ret = ENODEV;
- else
+ else
ret = output_error_occurred; // will be zero unless an error occurred
- output_error_occurred = 0; // reset it.
+ output_error_occurred = 0; // reset it.
debug_mutex_unlock(&alsa_mutex, 0);
pthread_cleanup_pop(0);
- return ret;
+ return ret;
}
/*
stall_monitor_frame_count += samples;
frames_sent_for_playing += samples;
} else {
- output_error_occurred = -ret; // note than an output error has occurred
- if (ret == -EPIPE) { /* underrun */
+ output_error_occurred = -ret; // note than an output error has occurred
+ if (ret == -EPIPE) { /* underrun */
// It could be that the DAC was in the SND_PCM_STATE_XRUN state before
// sending the samples to be output. If so, it will still be in
#include <pipewire/pipewire.h>
#include <pipewire/stream.h>
-#include <spa/param/audio/layout.h>
#include <spa/param/audio/format-utils.h>
+#include <spa/param/audio/layout.h>
#include <spa/utils/result.h>
#include <math.h>
} data;
-static void on_core_info(__attribute__((unused)) void *userdata, const struct pw_core_info *info)
-{
- debug(1, "pw: remote %"PRIu32" is named \"%s\"", info->id, info->name);
+static void on_core_info(__attribute__((unused)) void *userdata, const struct pw_core_info *info) {
+ debug(1, "pw: remote %" PRIu32 " is named \"%s\"", info->id, info->name);
}
-static void on_core_error(__attribute__((unused)) void *userdata, uint32_t id, int seq, int res, const char *message)
-{
- warn("pw: remote error: id=%"PRIu32" seq:%d res:%d (%s): %s", id, seq, res, spa_strerror(res), message);
+static void on_core_error(__attribute__((unused)) void *userdata, uint32_t id, int seq, int res,
+ const char *message) {
+ warn("pw: remote error: id=%" PRIu32 " seq:%d res:%d (%s): %s", id, seq, res, spa_strerror(res),
+ message);
}
static const struct pw_core_events core_events = {
- PW_VERSION_CORE_EVENTS,
- .info = on_core_info,
- .error = on_core_error,
+ PW_VERSION_CORE_EVENTS,
+ .info = on_core_info,
+ .error = on_core_error,
};
-static void registry_event_global(__attribute__((unused)) void *userdata, uint32_t id,
- __attribute__((unused)) uint32_t permissions, const char *type, __attribute__((unused)) uint32_t version,
- const struct spa_dict *props)
-{
+static void registry_event_global(__attribute__((unused)) void *userdata, uint32_t id,
+ __attribute__((unused)) uint32_t permissions, const char *type,
+ __attribute__((unused)) uint32_t version,
+ const struct spa_dict *props) {
const struct spa_dict_item *item;
const char *name, *media_class;
if (!name || !media_class)
return;
- debug(1, "pw: registry: id=%"PRIu32" type=%s name=\"%s\" media_class=\"%s\"", id, type, name, media_class);
+ debug(1, "pw: registry: id=%" PRIu32 " type=%s name=\"%s\" media_class=\"%s\"", id, type, name,
+ media_class);
- spa_dict_for_each(item, props) {
- debug(1, "pw: \t\t%s = \"%s\"", item->key, item->value);
- }
+ spa_dict_for_each(item, props) { debug(1, "pw: \t\t%s = \"%s\"", item->key, item->value); }
}
}
-static void registry_event_global_remove(__attribute__((unused)) void *userdata, uint32_t id)
-{
- debug(1, "pw: registry: remove id=%"PRIu32"", id);
+static void registry_event_global_remove(__attribute__((unused)) void *userdata, uint32_t id) {
+ debug(1, "pw: registry: remove id=%" PRIu32 "", id);
}
static const struct pw_registry_events registry_events = {
- PW_VERSION_REGISTRY_EVENTS,
- .global = registry_event_global,
- .global_remove = registry_event_global_remove,
+ PW_VERSION_REGISTRY_EVENTS,
+ .global = registry_event_global,
+ .global_remove = registry_event_global_remove,
};
-static void on_state_changed(void *userdata, enum pw_stream_state old, enum pw_stream_state state, const char *error)
-{
+static void on_state_changed(void *userdata, enum pw_stream_state old, enum pw_stream_state state,
+ const char *error) {
struct pw_data *pipewire = userdata;
- debug(1, "pw: stream state changed %s -> %s", pw_stream_state_as_string(old), pw_stream_state_as_string(state));
+ debug(1, "pw: stream state changed %s -> %s", pw_stream_state_as_string(old),
+ pw_stream_state_as_string(state));
if (state == PW_STREAM_STATE_STREAMING)
- debug(1, "pw: stream node %"PRIu32"", pw_stream_get_node_id(pipewire->stream));
+ debug(1, "pw: stream node %" PRIu32 "", pw_stream_get_node_id(pipewire->stream));
if (state == PW_STREAM_STATE_ERROR)
- debug(1, "pw: stream node %"PRIu32" error: %s", pw_stream_get_node_id(pipewire->stream), error);
+ debug(1, "pw: stream node %" PRIu32 " error: %s", pw_stream_get_node_id(pipewire->stream),
+ error);
pw_thread_loop_signal(pipewire->mainloop, 0);
}
-static void on_process(void *userdata)
-{
+static void on_process(void *userdata) {
struct pw_data *pipewire = userdata;
pw_thread_loop_signal(pipewire->mainloop, 0);
}
-static void on_drained(void *userdata)
-{
+static void on_drained(void *userdata) {
struct pw_data *pipewire = userdata;
pw_stream_set_active(pipewire->stream, false);
}
static const struct pw_stream_events stream_events = {
- PW_VERSION_STREAM_EVENTS,
- .state_changed = on_state_changed,
- .process = on_process,
- .drained = on_drained,
+ PW_VERSION_STREAM_EVENTS,
+ .state_changed = on_state_changed,
+ .process = on_process,
+ .drained = on_drained,
};
static void deinit() {
}
if (data.registry) {
- pw_proxy_destroy((struct pw_proxy*)data.registry);
+ pw_proxy_destroy((struct pw_proxy *)data.registry);
data.registry = NULL;
}
}
}
-static int init(__attribute__((unused)) int argc, __attribute__((unused)) char** argv) {
+static int init(__attribute__((unused)) int argc, __attribute__((unused)) char **argv) {
struct pw_loop *loop;
- struct pw_properties* props;
+ struct pw_properties *props;
// set up default values first
config.audio_backend_buffer_desired_length = 0.35;
debug(1, "pw: compiled with libpipewire %s", pw_get_headers_version());
debug(1, "pw: linked with libpipewire: %s", pw_get_library_version());
- data.props = pw_properties_new(
- PW_KEY_MEDIA_TYPE, "Audio",
- PW_KEY_MEDIA_CATEGORY, "Playback",
- PW_KEY_MEDIA_ROLE, "Music",
- PW_KEY_APP_NAME, "shairport-sync",
- PW_KEY_NODE_NAME, "shairport-sync",
- NULL);
+ data.props = pw_properties_new(PW_KEY_MEDIA_TYPE, "Audio", PW_KEY_MEDIA_CATEGORY, "Playback",
+ PW_KEY_MEDIA_ROLE, "Music", PW_KEY_APP_NAME, "shairport-sync",
+ PW_KEY_NODE_NAME, "shairport-sync", NULL);
if (!data.props) {
deinit();
static enum spa_audio_format sps_format_to_spa_format(sps_format_t sps_format) {
switch (sps_format) {
- case SPS_FORMAT_S8:
- return SPA_AUDIO_FORMAT_S8;
- case SPS_FORMAT_U8:
- return SPA_AUDIO_FORMAT_U8;
- case SPS_FORMAT_S16:
- return SPA_AUDIO_FORMAT_S16;
- case SPS_FORMAT_S16_LE:
- return SPA_AUDIO_FORMAT_S16_LE;
- case SPS_FORMAT_S16_BE:
- return SPA_AUDIO_FORMAT_S16_BE;
- case SPS_FORMAT_S24:
- return SPA_AUDIO_FORMAT_S24_32;
- case SPS_FORMAT_S24_LE:
- return SPA_AUDIO_FORMAT_S24_32_LE;
- case SPS_FORMAT_S24_BE:
- return SPA_AUDIO_FORMAT_S24_32_BE;
- case SPS_FORMAT_S24_3LE:
- return SPA_AUDIO_FORMAT_S24_LE;
- case SPS_FORMAT_S24_3BE:
- return SPA_AUDIO_FORMAT_S24_BE;
- case SPS_FORMAT_S32:
- return SPA_AUDIO_FORMAT_S32;
- case SPS_FORMAT_S32_LE:
- return SPA_AUDIO_FORMAT_S32_LE;
- case SPS_FORMAT_S32_BE:
- return SPA_AUDIO_FORMAT_S32_BE;
-
- case SPS_FORMAT_UNKNOWN:
- case SPS_FORMAT_AUTO:
- case SPS_FORMAT_INVALID:
- default:
- return SPA_AUDIO_FORMAT_S16;
+ case SPS_FORMAT_S8:
+ return SPA_AUDIO_FORMAT_S8;
+ case SPS_FORMAT_U8:
+ return SPA_AUDIO_FORMAT_U8;
+ case SPS_FORMAT_S16:
+ return SPA_AUDIO_FORMAT_S16;
+ case SPS_FORMAT_S16_LE:
+ return SPA_AUDIO_FORMAT_S16_LE;
+ case SPS_FORMAT_S16_BE:
+ return SPA_AUDIO_FORMAT_S16_BE;
+ case SPS_FORMAT_S24:
+ return SPA_AUDIO_FORMAT_S24_32;
+ case SPS_FORMAT_S24_LE:
+ return SPA_AUDIO_FORMAT_S24_32_LE;
+ case SPS_FORMAT_S24_BE:
+ return SPA_AUDIO_FORMAT_S24_32_BE;
+ case SPS_FORMAT_S24_3LE:
+ return SPA_AUDIO_FORMAT_S24_LE;
+ case SPS_FORMAT_S24_3BE:
+ return SPA_AUDIO_FORMAT_S24_BE;
+ case SPS_FORMAT_S32:
+ return SPA_AUDIO_FORMAT_S32;
+ case SPS_FORMAT_S32_LE:
+ return SPA_AUDIO_FORMAT_S32_LE;
+ case SPS_FORMAT_S32_BE:
+ return SPA_AUDIO_FORMAT_S32_BE;
+
+ case SPS_FORMAT_UNKNOWN:
+ case SPS_FORMAT_AUTO:
+ case SPS_FORMAT_INVALID:
+ default:
+ return SPA_AUDIO_FORMAT_S16;
}
}
-static int spa_format_samplesize(enum spa_audio_format audio_format)
-{
- switch(audio_format) {
- case SPA_AUDIO_FORMAT_S8:
- case SPA_AUDIO_FORMAT_U8:
- return 1;
- case SPA_AUDIO_FORMAT_S16:
- return 2;
- case SPA_AUDIO_FORMAT_S24:
- return 3;
- case SPA_AUDIO_FORMAT_S24_32:
- case SPA_AUDIO_FORMAT_S32:
- return 4;
- default:
- die("pw: unhandled spa_audio_format: %d", audio_format);
- return -1;
+static int spa_format_samplesize(enum spa_audio_format audio_format) {
+ switch (audio_format) {
+ case SPA_AUDIO_FORMAT_S8:
+ case SPA_AUDIO_FORMAT_U8:
+ return 1;
+ case SPA_AUDIO_FORMAT_S16:
+ return 2;
+ case SPA_AUDIO_FORMAT_S24:
+ return 3;
+ case SPA_AUDIO_FORMAT_S24_32:
+ case SPA_AUDIO_FORMAT_S32:
+ return 4;
+ default:
+ die("pw: unhandled spa_audio_format: %d", audio_format);
+ return -1;
}
}
-static const char* spa_format_to_str(enum spa_audio_format audio_format)
-{
- switch(audio_format) {
- case SPA_AUDIO_FORMAT_U8:
- return "u8";
- case SPA_AUDIO_FORMAT_S8:
- return "s8";
- case SPA_AUDIO_FORMAT_S16:
- return "s16";
- case SPA_AUDIO_FORMAT_S24:
- case SPA_AUDIO_FORMAT_S24_32:
- return "s24";
- case SPA_AUDIO_FORMAT_S32:
- return "s32";
- default:
- die("pw: unhandled spa_audio_format: %d", audio_format);
- return "(invalid)";
+static const char *spa_format_to_str(enum spa_audio_format audio_format) {
+ switch (audio_format) {
+ case SPA_AUDIO_FORMAT_U8:
+ return "u8";
+ case SPA_AUDIO_FORMAT_S8:
+ return "s8";
+ case SPA_AUDIO_FORMAT_S16:
+ return "s16";
+ case SPA_AUDIO_FORMAT_S24:
+ case SPA_AUDIO_FORMAT_S24_32:
+ return "s24";
+ case SPA_AUDIO_FORMAT_S32:
+ return "s32";
+ default:
+ die("pw: unhandled spa_audio_format: %d", audio_format);
+ return "(invalid)";
}
}
debug(1, "pw: format: %s", spa_format_to_str(data.format));
debug(1, "pw: samplesize: %d", spa_format_samplesize(data.format));
debug(1, "pw: stride: %d", data.stride);
- debug(1, "pw: latency: %d samples (%.3fs)", nom, (double)nom/data.rate);
+ if (data.rate != 0)
+ debug(1, "pw: latency: %d samples (%.3fs)", nom, (double)nom / data.rate);
- info = SPA_AUDIO_INFO_RAW_INIT(
- .flags = SPA_AUDIO_FLAG_NONE,
- .format = data.format,
- .rate = data.rate,
- .channels = data.channels);
+ info = SPA_AUDIO_INFO_RAW_INIT(.flags = SPA_AUDIO_FLAG_NONE, .format = data.format,
+ .rate = data.rate, .channels = data.channels);
params[0] = spa_format_audio_raw_build(&pod_builder, SPA_PARAM_EnumFormat, &info);
die("pw: pw_stream_new() failed: %m");
}
- debug(1, "pw: connecting stream: target_id=%"PRIu32"", PW_ID_ANY);
+ debug(1, "pw: connecting stream: target_id=%" PRIu32 "", PW_ID_ANY);
pw_stream_add_listener(data.stream, &data.stream_listener, &stream_events, &data);
- ret = pw_stream_connect(data.stream,
- PW_DIRECTION_OUTPUT,
- PW_ID_ANY,
- PW_STREAM_FLAG_INACTIVE | PW_STREAM_FLAG_AUTOCONNECT | PW_STREAM_FLAG_MAP_BUFFERS,
- params, 1);
+ ret = pw_stream_connect(
+ data.stream, PW_DIRECTION_OUTPUT, PW_ID_ANY,
+ PW_STREAM_FLAG_INACTIVE | PW_STREAM_FLAG_AUTOCONNECT | PW_STREAM_FLAG_MAP_BUFFERS, params, 1);
if (ret < 0) {
deinit();
debug(1, "pw: stream properties:");
pstate = NULL;
while ((key = pw_properties_iterate(props, &pstate)) != NULL &&
- (val = pw_properties_get(props, key)) != NULL) {
+ (val = pw_properties_get(props, key)) != NULL) {
debug(1, "pw: \t%s = \"%s\"", key, val);
}
}
- while(1) {
+ while (1) {
enum pw_stream_state stream_state = pw_stream_get_state(data.stream, NULL);
if (stream_state == PW_STREAM_STATE_PAUSED)
break;
return 0;
}
-audio_output audio_pw = {
- .name = "pw",
- .help = NULL,
- .init = &init,
- .deinit = &deinit,
- .prepare = NULL,
- .start = &start,
- .stop = &stop,
- .is_running = NULL,
- .flush = &flush,
- .delay = NULL,
- .play = &play,
- .volume = NULL,
- .parameters = NULL,
- .mute = NULL
-};
+audio_output audio_pw = {.name = "pw",
+ .help = NULL,
+ .init = &init,
+ .deinit = &deinit,
+ .prepare = NULL,
+ .start = &start,
+ .stop = &stop,
+ .is_running = NULL,
+ .flush = &flush,
+ .delay = NULL,
+ .play = &play,
+ .volume = NULL,
+ .parameters = NULL,
+ .mute = NULL};
char *name;
int (*mdns_register)(char *ap1name, char *ap2name, int port, char **txt_records,
char **secondary_txt_records);
- int (*mdns_update)(char **txt_records, char **secondary_txt_records);
+ int (*mdns_update)(char **txt_records, char **secondary_txt_records);
void (*mdns_unregister)(void);
void (*mdns_dacp_monitor_start)();
void (*mdns_dacp_monitor_set_id)(const char *);
conn->flush_rtp_timestamp = 0; // it seems this number has a special significance -- it seems to
// be used as a null operand, so we'll use it like that too
conn->fix_volume = 0x10000;
-
+
#ifdef CONFIG_AIRPLAY_2
conn->ap2_flush_requested = 0;
conn->ap2_flush_from_valid = 0;
player_watchdog_thread;
// buffers to delete on exit
- signed short *tbuf;
+ int32_t *tbuf;
int32_t *sbuf;
char *outbuf;
#include <string.h>
#include <sys/mman.h>
#ifdef COMPILE_FOR_FREEBSD
-#include <sys/socket.h>
#include <netinet/in.h>
+#include <sys/socket.h>
#endif
#define __STDC_FORMAT_MACROS
#include "common.h"
if (actual_clock_id != NULL)
*actual_clock_id = nqptp_data.master_clock_id;
if (time_of_sample != NULL)
- *time_of_sample = nqptp_data.local_time;
+ *time_of_sample = nqptp_data.local_time;
if (raw_offset != NULL)
*raw_offset = nqptp_data.local_to_master_time_offset;
if (mastership_start_time != NULL)
obfp += 2;
};
*obfp = 0;
-
-
+
+
// get raw timestamp information
// I think that a good way to understand these timestamps is that
// (1) the rtlt below is the timestamp of the frame that should be playing at the
// Thus, (3) the latency can be calculated by subtracting the second from the
// first.
// There must be more to it -- there something missing.
-
+
// In addition, it seems that if the value of the short represented by the second
// pair of bytes in the packet is 7
// then an extra time lag is expected to be added, presumably by
// the AirPort Express.
-
+
// Best guess is that this delay is 11,025 frames.
-
+
uint32_t rtlt = nctohl(&packet[4]); // raw timestamp less latency
uint32_t rt = nctohl(&packet[16]); // raw timestamp
-
+
uint32_t fl = nctohs(&packet[2]); //
-
+
debug(1,"Sync Packet of %d bytes received: \"%s\", flags: %d, timestamps %u and %u,
giving a latency of %d frames.",plen,obf,fl,rt,rtlt,rt-rtlt);
//debug(1,"Monotonic timestamps are: %" PRId64 " and %" PRId64 "
rtsp_conn_info *conn = (rtsp_conn_info *)arg;
debug(2, "Connection %d: AP2 Control Receiver Cleanup.", conn->connection_number);
close(conn->ap2_control_socket);
- debug(2, "Connection %d: UDP control port %u closed.", conn->connection_number, conn->local_ap2_control_port);
+ debug(2, "Connection %d: UDP control port %u closed.", conn->connection_number,
+ conn->local_ap2_control_port);
conn->ap2_control_socket = 0;
conn->ap2_remote_control_socket_addr_length =
0; // indicates to the control receiver thread that the socket address need to be
debug(2, "Buffered Audio Receiver Cleanup Start.");
rtsp_conn_info *conn = (rtsp_conn_info *)arg;
close(conn->buffered_audio_socket);
- debug(2, "Connection %d: TCP Buffered Audio port closed: %u.", conn->connection_number, conn->local_buffered_audio_port);
+ debug(2, "Connection %d: TCP Buffered Audio port closed: %u.", conn->connection_number,
+ conn->local_buffered_audio_port);
conn->buffered_audio_socket = 0;
debug(2, "Buffered Audio Receiver Cleanup Done.");
}
// (does nothing if called twice during the course of one program execution)
// deprecated in ffmpeg 4.0 and later... but still needed in ffmpeg 3.6 / ubuntu 18
- #pragma GCC diagnostic push
- #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
avcodec_register_all();
- #pragma GCC diagnostic pop
+#pragma GCC diagnostic pop
AVCodec *codec = avcodec_find_decoder(AV_CODEC_ID_AAC);
if (codec == NULL) {
ssize_t data_remaining;
uint32_t seq_no; // audio packet number
// uint32_t previous_seq_no = 0;
- int new_buffer_needed;
+ int new_buffer_needed = 0;
ssize_t nread;
int finished = 0;
debug_mutex_lock(&conns_lock, 1000000, 3);
int i;
for (i = 0; i < nconns; i++) {
- if ((conns[i] != NULL) && (conns[i]->running != 0) && (conns[i]->connection_number != except_this_one) &&
+ if ((conns[i] != NULL) && (conns[i]->running != 0) &&
+ (conns[i]->connection_number != except_this_one) &&
((stream_category == unspecified_stream_category) ||
(stream_category == conns[i]->airplay_stream_category))) {
pthread_cancel(conns[i]->thread);
}
}
for (i = 0; i < nconns; i++) {
- if ((conns[i] != NULL) && (conns[i]->running != 0) && (conns[i]->connection_number != except_this_one) &&
+ if ((conns[i] != NULL) && (conns[i]->running != 0) &&
+ (conns[i]->connection_number != except_this_one) &&
((stream_category == unspecified_stream_category) ||
(stream_category == conns[i]->airplay_stream_category))) {
pthread_join(conns[i]->thread, NULL);
char *string;
};
- struct response_t responses[] = {{200, "OK"}, {400, "Bad Request"}, {403, "Unauthorized"}, {501, "Not Implemented"}};
+ struct response_t responses[] = {
+ {200, "OK"}, {400, "Bad Request"}, {403, "Unauthorized"}, {501, "Not Implemented"}};
int found = 0;
char *respcode_text = "Unauthorized";
for (i = 0; i < sizeof(responses) / sizeof(struct response_t); i++) {
}
void handle_setrate(rtsp_conn_info *conn, rtsp_message *req, rtsp_message *resp) {
- debug(3, "Connection %d: SETRATE %s : Content-Length %d", conn->connection_number,
- req->path, req->contentlength);
+ debug(3, "Connection %d: SETRATE %s : Content-Length %d", conn->connection_number, req->path,
+ req->contentlength);
debug_log_rtsp_message(2, "SETRATE request -- unimplemented", req);
resp->respcode = 501; // Not Implemented
}
-void handle_unimplemented_ap1(__attribute((unused)) rtsp_conn_info *conn, rtsp_message *req, rtsp_message *resp) {
+void handle_unimplemented_ap1(__attribute((unused)) rtsp_conn_info *conn, rtsp_message *req,
+ rtsp_message *resp) {
debug_log_rtsp_message(1, "request not recognised for AirPlay 1 operation", req);
resp->respcode = 501;
}
req->contentlength);
resp->respcode = 500;
}
-void handle_post(__attribute((unused)) rtsp_conn_info *conn, __attribute((unused)) rtsp_message *req,
- __attribute((unused)) rtsp_message *resp) {
+void handle_post(__attribute((unused)) rtsp_conn_info *conn,
+ __attribute((unused)) rtsp_message *req,
+ __attribute((unused)) rtsp_message *resp) {
debug(1, "Connection %d: POST %s Content-Length %d", conn->connection_number, req->path,
req->contentlength);
resp->respcode = 500;
// this can be called more than once on the same connection --
// by the player itself but also by the play seesion being killed
if (conn->player_thread) {
- player_stop(conn); // this nulls the player_thread
+ player_stop(conn); // this nulls the player_thread
activity_monitor_signify_activity(0); // inactive, and should be after command_stop()
}
if (conn->session_key) {
if (conn->event_socket) {
close(conn->event_socket);
conn->event_socket = 0;
- debug(2, "Connection %d: closing TCP event port %u.", conn->connection_number, conn->local_event_port);
+ debug(2, "Connection %d: closing TCP event port %u.", conn->connection_number,
+ conn->local_event_port);
}
// if we are closing a PTP stream only, do this
}
}
-
void handle_teardown_2(rtsp_conn_info *conn, __attribute__((unused)) rtsp_message *req,
rtsp_message *resp) {
conn->connection_number, err);
}
- debug(2, "Connection %d: TCP PTP event port opened: %u.", conn->connection_number, conn->local_event_port);
+ debug(2, "Connection %d: TCP PTP event port opened: %u.", conn->connection_number,
+ conn->local_event_port);
if (conn->rtp_event_thread != NULL)
debug(1, "previous rtp_event_thread allocation not freed, it seems.");
conn->connection_number, err);
}
- debug(1, "Connection %d: TCP Remote Control event port opened: %u.", conn->connection_number, conn->local_event_port);
+ debug(1, "Connection %d: TCP Remote Control event port opened: %u.",
+ conn->connection_number, conn->local_event_port);
if (conn->rtp_event_thread != NULL)
debug(1, "previous rtp_event_thread allocation not freed, it seems.");
conn->rtp_event_thread = malloc(sizeof(pthread_t));
if (err) {
die("Error %d: could not find a UDP port to use as an ap2_control port", err);
}
- debug(2, "Connection %d: UDP control port opened: %u.", conn->connection_number, conn->local_ap2_control_port);
+ debug(2, "Connection %d: UDP control port opened: %u.", conn->connection_number,
+ conn->local_ap2_control_port);
pthread_create(&conn->rtp_ap2_control_thread, NULL, &rtp_ap2_control_receiver, (void *)conn);
if (err) {
die("Error %d: could not find a UDP port to use as a realtime_audio port", err);
}
- debug(2, "Connection %d: UDP realtime audio port opened: %u.", conn->connection_number, conn->local_realtime_audio_port);
+ debug(2, "Connection %d: UDP realtime audio port opened: %u.", conn->connection_number,
+ conn->local_realtime_audio_port);
pthread_create(&conn->rtp_realtime_audio_thread, NULL, &rtp_realtime_audio_receiver,
(void *)conn);
conn->connection_number, err);
}
- debug(2, "Connection %d: TCP Buffered Audio port opened: %u.", conn->connection_number, conn->local_buffered_audio_port);
+ debug(2, "Connection %d: TCP Buffered Audio port opened: %u.", conn->connection_number,
+ conn->local_buffered_audio_port);
// hack.
conn->max_frames_per_packet = 352; // number of audio frames per packet.
conn->initial_airplay_volume = volume;
conn->initial_airplay_volume_set = 1;
}
- } else if (strncmp(cp, "progress: ", strlen("progress: ")) == 0) { // this can be sent even when metadata is not solicited
+ } else if (strncmp(cp, "progress: ", strlen("progress: ")) ==
+ 0) { // this can be sent even when metadata is not solicited
#ifdef CONFIG_METADATA
char *progress = cp + strlen("progress: ");
send_ssnc_metadata('prgr', progress, strlen(progress), 1);
#endif
- } else
- {
+ } else {
debug(1, "Connection %d, unrecognised parameter: \"%s\" (%d)\n", conn->connection_number, cp,
strlen(cp));
}
// ("ssnc", "chnk", packet_ix, packet_counts, packet_tag, packet_type, chunked_data)
uint32_t chunk_ix = 0;
+ if (config.metadata_sockmsglength == 24)
+ die("A divide by zero almost occurred (config.metadata_sockmsglength = 24).");
uint32_t chunk_total = length / (config.metadata_sockmsglength - 24);
if (chunk_total * (config.metadata_sockmsglength - 24) < length) {
chunk_total++;
if (pthread_create(&metadata_thread, NULL, metadata_thread_function, NULL) != 0)
debug(1, "Failed to create metadata thread!");
- if (pthread_create(&metadata_multicast_thread, NULL, metadata_multicast_thread_function, NULL) != 0)
+ if (pthread_create(&metadata_multicast_thread, NULL, metadata_multicast_thread_function,
+ NULL) != 0)
debug(1, "Failed to create metadata multicast thread!");
}
#ifdef CONFIG_METADATA_HUB
close(conn->audio_socket);
}
-
-
if (conn->fd > 0) {
debug(3, "Connection %d terminating: closing fd %d.", conn->connection_number, conn->fd);
close(conn->fd);
}
}
if (method_selected == 0) {
- debug(1, "Connection %d: Unrecognised and unhandled rtsp request \"%s\". HTTP Response Code 501 (\"Not Implemented\") returned.",
+ debug(1,
+ "Connection %d: Unrecognised and unhandled rtsp request \"%s\". HTTP Response Code "
+ "501 (\"Not Implemented\") returned.",
conn->connection_number, req->method);
int y = req->contentlength;
*obfp = 0;
debug(1, "Content: \"%s\".", obf);
}
-
}
}
debug(debug_level, "Connection %d: RTSP Response:", conn->connection_number);
/* Version check should be the very first call because it
makes sure that important subsystems are initialized.
#define NEED_LIBGCRYPT_VERSION to the minimum required version. */
-
+
#define NEED_LIBGCRYPT_VERSION "1.5.4"
if (!gcry_check_version(NEED_LIBGCRYPT_VERSION)) {