MODNAME=mod_http_cache
mod_LTLIBRARIES = mod_http_cache.la
-mod_http_cache_la_SOURCES = mod_http_cache.c aws.c
+mod_http_cache_la_SOURCES = mod_http_cache.c common.c aws.c azure.c
mod_http_cache_la_CFLAGS = $(AM_CFLAGS)
mod_http_cache_la_CPPFLAGS = $(CURL_CFLAGS) $(AM_CPPFLAGS)
mod_http_cache_la_LIBADD = $(switch_builddir)/libfreeswitch.la
/* 160 bits / 8 bits per byte */
#define SHA1_LENGTH 20
-/**
- * @param url to check
- * @return true if this is an S3 url
- */
-int aws_s3_is_s3_url(const char *url, const char *base_domain)
-{
- if (!zstr(base_domain)) {
- char *base_domain_escaped;
- char regex[1024];
- int result;
- base_domain_escaped = switch_string_replace(base_domain, ".", "\\.");
- switch_snprintf(regex, 1024, "^https?://\\w[-\\w.]{1,61}\\w\\.%s/.*$", base_domain_escaped);
- result = !zstr(url) && switch_regex_match(url, regex) == SWITCH_STATUS_SUCCESS;
- switch_safe_free(base_domain_escaped);
- return result;
- }
- /* AWS bucket naming rules are complex... this match only supports virtual hosting of buckets */
- return !zstr(url) && switch_regex_match(url, "^https?://\\w[-\\w.]{1,61}\\w\\.s3([-\\w]+)?\\.amazonaws\\.com/.*$") == SWITCH_STATUS_SUCCESS;
-}
-
/**
* Create the string to sign for a AWS signature calculation
* @param verb (PUT/GET)
return signature;
}
-/**
- * Reverse string substring search
- */
-static char *my_strrstr(const char *haystack, const char *needle)
-{
- char *s;
- size_t needle_len;
- size_t haystack_len;
-
- if (zstr(haystack)) {
- return NULL;
- }
-
- if (zstr(needle)) {
- return (char *)haystack;
- }
-
- needle_len = strlen(needle);
- haystack_len = strlen(haystack);
- if (needle_len > haystack_len) {
- return NULL;
- }
-
- s = (char *)(haystack + haystack_len - needle_len);
- do {
- if (!strncmp(s, needle, needle_len)) {
- return s;
- }
- } while (s-- != haystack);
-
- return NULL;
-}
-
-/**
- * Parse bucket and object from URL
- * @param url to parse. This value is modified.
- * @param base_domain of URL (assumes s3.amazonaws.com if not specified)
- * @param bucket to store result in
- * @param bucket_length of result buffer
- */
-void aws_s3_parse_url(char *url, const char *base_domain, char **bucket, char **object)
-{
- char *bucket_start = NULL;
- char *bucket_end;
- char *object_start;
-
- *bucket = NULL;
- *object = NULL;
-
- if (!aws_s3_is_s3_url(url, base_domain)) {
- return;
- }
-
- /* expect: http(s)://bucket.foo-bar.s3.amazonaws.com/object */
- if (!strncasecmp(url, "https://", 8)) {
- bucket_start = url + 8;
- } else if (!strncasecmp(url, "http://", 7)) {
- bucket_start = url + 7;
- }
- if (zstr(bucket_start)) {
- /* invalid URL */
- return;
- }
-
- {
- char base_domain_match[1024];
- if (zstr(base_domain)) {
- base_domain = "s3";
- }
- switch_snprintf(base_domain_match, 1024, ".%s", base_domain);
- bucket_end = my_strrstr(bucket_start, base_domain_match);
- }
- if (!bucket_end) {
- /* invalid URL */
- return;
- }
- *bucket_end = '\0';
-
- object_start = strchr(bucket_end + 1, '/');
- if (!object_start) {
- /* invalid URL */
- return;
- }
- object_start++;
-
- if (zstr(bucket_start) || zstr(object_start)) {
- /* invalid URL */
- return;
- }
-
- *bucket = bucket_start;
- *object = object_start;
-}
-
/**
* Create a pre-signed URL for AWS S3
* @param verb (PUT/GET)
char *object;
/* create URL encoded signature */
- aws_s3_parse_url(url_dup, base_domain, &bucket, &object);
+ parse_url(url_dup, base_domain, "s3", &bucket, &object);
string_to_sign = aws_s3_string_to_sign(verb, bucket, object, content_type, content_md5, expires);
signature[0] = '\0';
aws_s3_signature(signature, S3_SIGNATURE_LENGTH_MAX, string_to_sign, aws_secret_access_key);
char *object;
/* create base64 encoded signature */
- aws_s3_parse_url(url_dup, base_domain, &bucket, &object);
+ parse_url(url_dup, base_domain, "s3", &bucket, &object);
string_to_sign = aws_s3_string_to_sign(verb, bucket, object, content_type, content_md5, date);
signature[0] = '\0';
aws_s3_signature(signature, S3_SIGNATURE_LENGTH_MAX, string_to_sign, aws_secret_access_key);
return switch_mprintf("AWS %s:%s", aws_access_key_id, signature);
}
+switch_status_t aws_s3_config_profile(switch_xml_t xml, http_profile_t *profile) {
+ switch_status_t status = SWITCH_STATUS_SUCCESS;
+
+ profile->append_headers_ptr = aws_s3_append_headers;
+
+ switch_xml_t base_domain_xml = switch_xml_child(xml, "base-domain");
+ /* check if environment variables set the keys */
+ profile->aws_s3_access_key_id = getenv("AWS_ACCESS_KEY_ID");
+ profile->secret_access_key = getenv("AWS_SECRET_ACCESS_KEY");
+ if (!zstr(profile->aws_s3_access_key_id) && !zstr(profile->secret_access_key)) {
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "Using AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables for s3 access on profile \"%s\"\n", profile->name);
+ profile->aws_s3_access_key_id = strdup(profile->aws_s3_access_key_id);
+ profile->secret_access_key = strdup(profile->secret_access_key);
+ } else {
+ /* use configuration for keys */
+ switch_xml_t id = switch_xml_child(xml, "access-key-id");
+ switch_xml_t secret = switch_xml_child(xml, "secret-access-key");
+
+ if (id && secret) {
+ profile->aws_s3_access_key_id = switch_strip_whitespace(switch_xml_txt(id));
+ profile->secret_access_key = switch_strip_whitespace(switch_xml_txt(secret));
+ if (zstr(profile->aws_s3_access_key_id) || zstr(profile->secret_access_key)) {
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_WARNING, "Missing Azure Blob credentials for profile \"%s\"\n", profile->name);
+ switch_safe_free(profile->aws_s3_access_key_id);
+ profile->aws_s3_access_key_id = NULL;
+ switch_safe_free(profile->secret_access_key);
+ profile->secret_access_key = NULL;
+ }
+ } else {
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "Missing key id or secret\n");
+ status = SWITCH_STATUS_FALSE;
+ }
+ }
+ if (base_domain_xml) {
+ profile->base_domain = switch_strip_whitespace(switch_xml_txt(base_domain_xml));
+ if (zstr(profile->base_domain)) {
+ switch_safe_free(profile->base_domain);
+ profile->base_domain = NULL;
+ }
+ }
+ return status;
+}
+
+/**
+ * Append Amazon S3 headers to request if necessary
+ * @param headers to add to. If NULL, new headers are created.
+ * @param profile with S3 credentials
+ * @param content_type of object (PUT only)
+ * @param verb (GET/PUT)
+ * @param url
+ * @return updated headers
+ */
+switch_curl_slist_t *aws_s3_append_headers(http_profile_t *profile, switch_curl_slist_t *headers,
+ const char *verb, unsigned int content_length, const char *content_type, const char *url, const unsigned int block_num, char **query_string)
+{
+ char date[256];
+ char header[1024];
+ char *authenticate;
+
+ /* Date: */
+ switch_rfc822_date(date, switch_time_now());
+ snprintf(header, 1024, "Date: %s", date);
+ headers = switch_curl_slist_append(headers, header);
+
+ /* Authorization: */
+ authenticate = aws_s3_authentication_create(verb, url, profile->base_domain, content_type, "", profile->aws_s3_access_key_id, profile->secret_access_key, date);
+ snprintf(header, 1024, "Authorization: %s", authenticate);
+ free(authenticate);
+ headers = switch_curl_slist_append(headers, header);
+
+ return headers;
+}
+
+
/* For Emacs:
* Local Variables:
* mode:c
#define AWS_H
#include <switch.h>
+#include <switch_curl.h>
+#include "common.h"
/* (SHA1_LENGTH * 1.37 base64 bytes per byte * 3 url-encoded bytes per byte) */
#define S3_SIGNATURE_LENGTH_MAX 83
-int aws_s3_is_s3_url(const char *url, const char *base_domain);
-void aws_s3_parse_url(char *url, const char *base_domain, char **bucket, char **object);
+switch_curl_slist_t *aws_s3_append_headers(http_profile_t *profile, switch_curl_slist_t *headers,
+ const char *verb, unsigned int content_length, const char *content_type, const char *url, const unsigned int block_num, char **query_string);
+switch_status_t aws_s3_config_profile(switch_xml_t xml, http_profile_t *profile);
+
+// the following functions are exposed only so that the unit tests still work
char *aws_s3_string_to_sign(const char *verb, const char *bucket, const char *object, const char *content_type, const char *content_md5, const char *date);
char *aws_s3_signature(char *signature, int signature_length, const char *string_to_sign, const char *aws_secret_access_key);
+void aws_s3_parse_url(char *url, const char *base_domain, char **bucket, char **object);
char *aws_s3_presigned_url_create(const char *verb, const char *url, const char *base_domain, const char *content_type, const char *content_md5, const char *aws_access_key_id, const char *aws_secret_access_key, const char *expires);
char *aws_s3_authentication_create(const char *verb, const char *url, const char *base_domain, const char *content_type, const char *content_md5, const char *aws_access_key_id, const char *aws_secret_access_key, const char *date);
+
#endif
/* For Emacs:
--- /dev/null
+/*
+ * azure.c for FreeSWITCH Modular Media Switching Software Library / Soft-Switch Application
+ * Copyright (C) 2013-2014, Grasshopper
+ *
+ * Version: MPL 1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is aws.c for FreeSWITCH Modular Media Switching Software Library / Soft-Switch Application
+ *
+ * The Initial Developer of the Original Code is Grasshopper
+ * Portions created by the Initial Developer are Copyright (C)
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * Chris Rienzo <chris.rienzo@grasshopper.com>
+ * Richard Screene <richard.screene@thisisdrum.com>
+ *
+ * azure.c -- Some Azure Blob Service helper functions
+ *
+ */
+#include "azure.h"
+#include <switch.h>
+#include <switch_curl.h>
+
+#if defined(HAVE_OPENSSL)
+#include <openssl/hmac.h>
+#include <openssl/sha.h>
+#endif
+
+#define SHA256_LENGTH 32
+
+#define MS_VERSION "2015-12-11"
+
+#define BLOCK_STR_LENGTH 17
+#define BLOCK_ID_LENGTH 25
+
+struct curl_memory_read {
+ char *read_ptr;
+ size_t size_left;
+};
+typedef struct curl_memory_read curl_memory_read_t;
+
+/**
+ * Convert query string parameters into string to be appended to
+ * Azure authentication header
+ * @param query_string The string string to convert
+ * @return the canonicalised resources (must be freed)
+ */
+static char *canonicalise_query_string(const char *query_string) {
+ char *saveptr = NULL;
+ char out_str[1024] = "";
+ char *p = out_str;
+ char *query_string_dup = switch_safe_strdup(query_string);
+ char *in_str = (char *) query_string_dup;
+ char *kvp;
+
+ while ((kvp = strtok_r(in_str, "&", &saveptr)) != NULL) {
+ char *value = strchr(kvp, '=');
+ if (value) {
+ *value = '\0';
+ value ++;
+ p += switch_snprintf(p, &out_str[sizeof(out_str)] - p, "\n%s:%s", kvp, value);
+ } else {
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_WARNING, "canonicalise_query_string - badly formatted query string parameter=%s\n", kvp);
+ }
+ in_str = NULL;
+ }
+
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "canonicalise_query_string - out_str=%s\n", out_str);
+ switch_safe_free(query_string_dup);
+ return strdup(out_str);
+}
+
+/**
+ * Create the string to sign for a Azure Blob Service signature calculation
+ * @param verb (PUT/GET)
+ * @param account account blob is stored in
+ * @param blob to access (filename.ext)
+ * @param content_length content length
+ * @param content_type optional content type
+ * @param content_md5 optional content MD5 checksum
+ * @param date header
+ * @param resources the canonicalised resources
+ * @return the string_to_sign (must be freed)
+ */
+static char *azure_blob_string_to_sign(const char *verb, const char *account, const char *blob, unsigned int content_length, const char *content_type, const char *content_md5, const char *date, const char *resources)
+{
+ char *content_length_str = NULL;
+
+ if (content_length > 0) {
+ content_length_str = switch_mprintf("%d", content_length);
+ }
+
+ return switch_mprintf("%s\n\n\n%s\n%s\n%s\n%s\n\n\n\n\n\nx-ms-version:" MS_VERSION "\n/%s/%s%s",
+ verb, content_length_str ? content_length_str : "", content_md5 ? content_md5 : "", content_type ? content_type : "",
+ date, account, blob, resources);
+}
+
+/**
+ * Create the Azure Blob Service signature
+ * @param signature buffer to store the signature
+ * @param signature_length length of signature buffer
+ * @param string_to_sign
+ * @param secret_access_key secret access key
+ * @return the signature buffer or NULL if missing input
+ */
+static char *azure_blob_signature(char *signature, int signature_length, const char *string_to_sign, const char *secret_access_key)
+{
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "azure_blob_signature to '%s'\n", string_to_sign);
+
+#if defined(HAVE_OPENSSL)
+ unsigned int signature_raw_length = SHA256_LENGTH;
+ char signature_raw[SHA256_LENGTH];
+ signature_raw[0] = '\0';
+ if (!signature || signature_length <= 0) {
+ return NULL;
+ }
+ if (zstr(secret_access_key)) {
+ return NULL;
+ }
+ if (!string_to_sign) {
+ string_to_sign = "";
+ }
+
+ HMAC(EVP_sha256(),
+ secret_access_key, strlen(secret_access_key),
+ (const unsigned char *)string_to_sign,
+ strlen(string_to_sign),
+ (unsigned char *)signature_raw,
+ &signature_raw_length);
+
+ /* convert result to base64 */
+ switch_b64_encode((unsigned char *)signature_raw, signature_raw_length, (unsigned char *)signature, signature_length);
+
+#endif
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "azure_blob_signature result %s\n", signature);
+ return signature;
+}
+
+/**
+ * Create an authentication signature for Azure Blob Service
+ * @param verb (PUT/GET)
+ * @param url address (virtual-host-style)
+ * @param base_domain (optional - Azure Blob assumed if not specified)
+ * @param content_length content length
+ * @param content_type optional content type
+ * @param content_md5 optional content MD5 checksum
+ * @param key secret access key
+ * @param date header
+ * @param query_string extra parameters for the URL
+ * @return signature for Authorization header (must be freed)
+ */
+static char *azure_blob_authentication_create(const char *verb, const char *url, const char *base_domain, unsigned int content_length, const char *content_type, const char *content_md5, const char *key, const char *date, const char *query_string)
+{
+ char signature[AZURE_SIGNATURE_LENGTH_MAX] = "";
+ char *string_to_sign;
+ char *url_dup = strdup(url);
+ char *account;
+ char *blob;
+ char *resources;
+ char *result;
+
+ resources = canonicalise_query_string(query_string);
+
+ /* create base64 encoded signature */
+ parse_url(url_dup, base_domain, "blob", &account, &blob);
+ string_to_sign = azure_blob_string_to_sign(verb, account, blob, content_length, content_type, content_md5, date, resources);
+ azure_blob_signature(signature, AZURE_SIGNATURE_LENGTH_MAX, string_to_sign, key);
+
+ result = switch_mprintf("SharedKey %s:%s", account, signature);
+
+ free(string_to_sign);
+ free(url_dup);
+ free(resources);
+
+ return result;
+}
+
+/**
+ * Read callback for libcurl - reads data from memory. Same function signature as fread(3)
+ */
+static size_t curl_memory_read_callback(void *ptr, size_t size, size_t nmemb, void *userp)
+{
+ curl_memory_read_t *info = (curl_memory_read_t *) userp;
+ size_t bytes_requested = size * nmemb;
+
+ if (info->read_ptr == NULL) {
+ return 0;
+ } else if (bytes_requested <= info->size_left) {
+ memcpy(ptr, info->read_ptr, bytes_requested);
+ info->read_ptr += bytes_requested;
+ info->size_left -= bytes_requested;
+ return nmemb;
+ } else {
+ memcpy(ptr, info->read_ptr, info->size_left);
+ info->read_ptr = NULL;
+ size_t items = info->size_left / size;
+ info->size_left = 0;
+ return items;
+ }
+}
+
+/** Convert the block number to a base64 encoded string
+ * @param num the number to encode
+ * @result the base64 string (must be freed)
+ */
+static char *azure_blob_block_num_to_id(const unsigned int num) {
+ char num_str[BLOCK_STR_LENGTH], num_len;
+ char *out_str;
+
+ num_len = switch_snprintf(num_str, sizeof(num_str), "%016d", num);
+
+ switch_malloc(out_str, BLOCK_ID_LENGTH);
+
+ switch_b64_encode((unsigned char *) num_str, num_len, (unsigned char *) out_str, BLOCK_ID_LENGTH);
+
+ return out_str;
+}
+
+/**
+ * Send blocklist message once we have uploaded all of the blob blocks.
+ * @param url the url to send the request to
+ * @param base_domain (optional - Azure Blob assumed if not specified)
+ * @param key secret access key
+ * @param num_blocks the number of blocks that the file was sent in
+ * @return SWITCH_STATUS_SUCCESS on success
+ */
+switch_status_t azure_blob_finalise_put(http_profile_t *profile, const char *url, const unsigned int num_blocks) {
+ switch_status_t status = SWITCH_STATUS_SUCCESS;
+
+ switch_curl_slist_t *headers = NULL;
+ CURL *curl_handle = NULL;
+ long httpRes = 0;
+ char xmlDoc[2048] = "<?xml version=\"1.0\" encoding=\"utf-8\"?>\n<BlockList>\n";
+ char *p = &xmlDoc[strlen(xmlDoc)];
+ char *query_string = NULL;
+ char *full_url = NULL;
+
+ for (int i = 1; i < num_blocks; i ++) {
+ char *block_id = azure_blob_block_num_to_id(i);
+ p += switch_snprintf(p, &xmlDoc[sizeof(xmlDoc)] - p, " <Uncommitted>%s</Uncommitted>\n", block_id);
+ switch_safe_free(block_id);
+ }
+ strncpy(p, "</BlockList>", &xmlDoc[sizeof(xmlDoc)] - p);
+
+ headers = switch_curl_slist_append(headers, "Content-Type: application/xml");
+ headers = azure_blob_append_headers(profile, headers, "PUT", strlen(xmlDoc), "application/xml", url, 0, &query_string);
+
+ if (query_string) {
+ full_url = switch_mprintf("%s?%s", url, query_string);
+ free(query_string);
+ } else {
+ switch_strdup(full_url, url);
+ }
+
+ curl_handle = switch_curl_easy_init();
+ if (!curl_handle) {
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "switch_curl_easy_init() failure\n");
+ status = SWITCH_STATUS_FALSE;
+ goto done;
+ }
+
+ switch_curl_easy_setopt(curl_handle, CURLOPT_PUT, 1);
+ switch_curl_easy_setopt(curl_handle, CURLOPT_NOSIGNAL, 1);
+ switch_curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, headers);
+ switch_curl_easy_setopt(curl_handle, CURLOPT_URL, full_url);
+ switch_curl_easy_setopt(curl_handle, CURLOPT_FOLLOWLOCATION, 1);
+ switch_curl_easy_setopt(curl_handle, CURLOPT_MAXREDIRS, 10);
+ switch_curl_easy_setopt(curl_handle, CURLOPT_USERAGENT, "freeswitch-http-cache/1.0");
+
+ curl_memory_read_t upload_info = { xmlDoc, strlen(xmlDoc) };
+ switch_curl_easy_setopt(curl_handle, CURLOPT_READFUNCTION, curl_memory_read_callback);
+ switch_curl_easy_setopt(curl_handle, CURLOPT_READDATA, &upload_info);
+ switch_curl_easy_setopt(curl_handle, CURLOPT_INFILESIZE_LARGE, strlen(xmlDoc));
+
+ //NB. we ignore connect_timeout, ssl_verifypeer, ssl_cacert, ssl_verifyhost cache options
+
+ switch_curl_easy_perform(curl_handle);
+ switch_curl_easy_getinfo(curl_handle, CURLINFO_RESPONSE_CODE, &httpRes);
+ switch_curl_easy_cleanup(curl_handle);
+
+ if (httpRes == 200 || httpRes == 201 || httpRes == 202 || httpRes == 204) {
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_DEBUG, "final saved to %s\n", url);
+ } else {
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_ERROR, "Received HTTP error %ld trying to save %s\n", httpRes, url);
+ status = SWITCH_STATUS_GENERR;
+ }
+
+ done:
+ switch_safe_free(full_url);
+
+ if (headers) {
+ switch_curl_slist_free_all(headers);
+ }
+
+ return status;
+}
+
+/**
+ * Append the specific Azure Blob Service headers
+ * @param headers the list of headers to append to
+ * @param base_domain (optional - Azure Blob assumed if not specified)
+ * @param key secret access key
+ * @param verb (PUT/GET)
+ * @param content_length content length
+ * @param content_type optional content type
+ * @param url the url to send the request to
+ * @param block_id the base64 encoded ID of the block
+ * @param query_string returned (must be freed)
+ * @return list of headers (must be freed)
+ */
+
+switch_curl_slist_t *azure_blob_append_headers(http_profile_t *profile, switch_curl_slist_t *headers,
+ const char *verb, unsigned int content_length, const char *content_type, const char *url, const unsigned int block_num, char **query_string)
+{
+ char date[256];
+ char header[1024];
+ char *authenticate;
+ char *my_query_string = NULL;
+
+ if (!strcmp(verb, "PUT")) {
+ if (block_num > 0) {
+ char *block_id = azure_blob_block_num_to_id(block_num);
+ my_query_string = switch_mprintf("blockid=%s&comp=block", block_id);
+ switch_safe_free(block_id);
+ } else {
+ switch_strdup(my_query_string, "comp=blocklist");
+ }
+ }
+
+ /* Date: */
+ switch_rfc822_date(date, switch_time_now());
+ switch_snprintf(header, sizeof(header), "Date: %s", date);
+ headers = switch_curl_slist_append(headers, header);
+
+ headers = switch_curl_slist_append(headers, "x-ms-version: " MS_VERSION);
+
+ /* Authorization: */
+ authenticate = azure_blob_authentication_create(verb, url, profile->base_domain, content_length,
+ content_type, "", profile->secret_access_key, date, my_query_string);
+ switch_snprintf(header, sizeof(header), "Authorization: %s", authenticate);
+ free(authenticate);
+ headers = switch_curl_slist_append(headers, header);
+
+ if (query_string) {
+ *query_string = my_query_string;
+ } else {
+ switch_safe_free(my_query_string);
+ }
+
+ return headers;
+}
+
+/**
+ * Read the Azure Blob Service profile
+ * @param name the name of the profile
+ * @param xml the portion of the XML document containing the profile
+ * @param access_key_id returned value of access_key_id in the configuration
+ * @param secret_access_key returned value of secret_access_key in the configuration
+ * @param base_domain returned value of base_domain in the configuration
+ * @param bytes_per_block returned value of bytes_per_block in the configuration
+ * @return SWITCH_STATUS_SUCCESS on success
+ */
+switch_status_t azure_blob_config_profile(switch_xml_t xml, http_profile_t *profile) {
+ switch_status_t status = SWITCH_STATUS_SUCCESS;
+
+ char *key = NULL;
+ profile->append_headers_ptr = azure_blob_append_headers;
+ profile->finalise_put_ptr = azure_blob_finalise_put;
+
+ switch_xml_t base_domain_xml = switch_xml_child(xml, "base-domain");
+ /* check if environment variables set the keys */
+ key = getenv("AZURE_STORAGE_ACCESS_KEY");
+ if (!zstr(key)) {
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "Using AZURE_STORAGE_ACCESS_KEY environment variables for Azure access on profile \"%s\"\n", profile->name);
+ key = switch_safe_strdup(key);
+ } else {
+ /* use configuration for keys */
+ switch_xml_t secret = switch_xml_child(xml, "secret-access-key");
+
+ if (secret) {
+ key = switch_strip_whitespace(switch_xml_txt(secret));
+ } else {
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "Missing key secret\n");
+ }
+ }
+
+ if (zstr(key)) {
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_WARNING, "Missing Azure Blob credentials for profile \"%s\"\n", profile->name);
+ status = SWITCH_STATUS_FALSE;
+ } else {
+ // convert to UTF-8
+ switch_malloc(profile->secret_access_key, AZURE_SIGNATURE_LENGTH_MAX);
+ switch_b64_decode((char *) key, profile->secret_access_key, AZURE_SIGNATURE_LENGTH_MAX);
+ }
+ switch_safe_free(key);
+
+ profile->bytes_per_block = 4e6;
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "Set number of bytes per block to %zu\n", profile->bytes_per_block);
+
+ if (base_domain_xml) {
+ profile->base_domain = switch_strip_whitespace(switch_xml_txt(base_domain_xml));
+ if (zstr(profile->base_domain)) {
+ switch_safe_free(profile->base_domain);
+ }
+ }
+ return status;
+}
+
+/* For Emacs:
+ * Local Variables:
+ * mode:c
+ * indent-tabs-mode:t
+ * tab-width:4
+ * c-basic-offset:4
+ * End:
+ * For VIM:
+ * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet
+ */
--- /dev/null
+/*
+ * azure.h for FreeSWITCH Modular Media Switching Software Library / Soft-Switch Application
+ * Copyright (C) 2013-2014, Grasshopper
+ *
+ * Version: MPL 1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is aws.h for FreeSWITCH Modular Media Switching Software Library / Soft-Switch Application
+ *
+ * The Initial Developer of the Original Code is Grasshopper
+ * Portions created by the Initial Developer are Copyright (C)
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * Chris Rienzo <chris.rienzo@grasshopper.com>
+ * Richard Screene <richard.screene@thisisdrum.com>
+ *
+ * azure.h - Some Azure Blob Service helper functions
+ *
+ */
+#ifndef AZURE_H
+#define AZURE_H
+
+#include <switch.h>
+#include <switch_curl.h>
+#include "common.h"
+
+#define AZURE_SIGNATURE_LENGTH_MAX 256
+
+switch_curl_slist_t *azure_blob_append_headers(http_profile_t *profile, switch_curl_slist_t *headers,
+ const char *verb, unsigned int content_length, const char *content_type, const char *url, const unsigned int block_num, char **query_string);
+switch_status_t azure_blob_finalise_put(http_profile_t *profile, const char *url, const unsigned int num_blocks);
+switch_status_t azure_blob_config_profile(switch_xml_t xml, http_profile_t *profile);
+
+#endif
+
+/* For Emacs:
+ * Local Variables:
+ * mode:c
+ * indent-tabs-mode:t
+ * tab-width:4
+ * c-basic-offset:4
+ * End:
+ * For VIM:
+ * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet
+ */
--- /dev/null
+/*
+ * common.c for FreeSWITCH Modular Media Switching Software Library / Soft-Switch Application
+ * Copyright (C) 2013-2014, Grasshopper
+ *
+ * Version: MPL 1.1
+ *
+ * The contents of this file are subject to the Mozilla Public License Version
+ * 1.1 (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ * http://www.mozilla.org/MPL/
+ *
+ * Software distributed under the License is distributed on an "AS IS" basis,
+ * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
+ * for the specific language governing rights and limitations under the
+ * License.
+ *
+ * The Original Code is common.c for FreeSWITCH Modular Media Switching Software Library / Soft-Switch Application
+ *
+ * The Initial Developer of the Original Code is Grasshopper
+ * Portions created by the Initial Developer are Copyright (C)
+ * the Initial Developer. All Rights Reserved.
+ *
+ * Contributor(s):
+ * Chris Rienzo <chris.rienzo@grasshopper.com>
+ *
+ * common.c - Functions common to the store provider
+ *
+ */
+
+
+#include <switch.h>
+
+/**
+* Reverse string substring search
+*/
+static char *my_strrstr(const char *haystack, const char *needle)
+{
+ char *s;
+ size_t needle_len;
+ size_t haystack_len;
+
+ if (zstr(haystack)) {
+ return NULL;
+ }
+
+ if (zstr(needle)) {
+ return (char *)haystack;
+ }
+
+ needle_len = strlen(needle);
+ haystack_len = strlen(haystack);
+ if (needle_len > haystack_len) {
+ return NULL;
+ }
+
+ s = (char *)(haystack + haystack_len - needle_len);
+ do {
+ if (!strncmp(s, needle, needle_len)) {
+ return s;
+ }
+ } while (s-- != haystack);
+
+ return NULL;
+}
+
+void parse_url(char *url, const char *base_domain, const char *default_base_domain, char **bucket, char **object)
+{
+ char *bucket_start = NULL;
+ char *bucket_end;
+ char *object_start;
+
+ *bucket = NULL;
+ *object = NULL;
+
+ if (zstr(url)) {
+ return;
+ }
+
+ /* expect: http(s)://bucket.foo-bar.s3.amazonaws.com/object */
+ if (!strncasecmp(url, "https://", 8)) {
+ bucket_start = url + 8;
+ } else if (!strncasecmp(url, "http://", 7)) {
+ bucket_start = url + 7;
+ }
+ if (zstr(bucket_start)) {
+ /* invalid URL */
+ return;
+ }
+
+ {
+ char base_domain_match[1024];
+ if (zstr(base_domain)) {
+ base_domain = default_base_domain;
+ }
+ switch_snprintf(base_domain_match, 1024, ".%s", base_domain);
+ bucket_end = my_strrstr(bucket_start, base_domain_match);
+ }
+ if (!bucket_end) {
+ /* invalid URL */
+ return;
+ }
+ *bucket_end = '\0';
+
+ object_start = strchr(bucket_end + 1, '/');
+ if (!object_start) {
+ /* invalid URL */
+ return;
+ }
+ object_start++;
+
+ if (zstr(bucket_start) || zstr(object_start)) {
+ /* invalid URL */
+ return;
+ }
+
+ // ignore the query string from the end of the URL
+ char *p = strchr(object_start, '&');
+ if (p) {
+ *p = '\0';
+ }
+
+ *bucket = bucket_start;
+ *object = object_start;
+}
+
+
+
+/* For Emacs:
+ * Local Variables:
+ * mode:c
+ * indent-tabs-mode:t
+ * tab-width:4
+ * c-basic-offset:4
+ * End:
+ * For VIM:
+ * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet
+ */
--- /dev/null
+#ifndef COMMON_H
+#define COMMON_H
+
+#include <switch.h>
+
+/**
+ * An http profile. Defines optional credentials
+ * for access to Amazon S3 and Azure Blob Service
+ */
+struct http_profile {
+ const char *name;
+ char *aws_s3_access_key_id;
+ char *secret_access_key;
+ char *base_domain;
+ switch_size_t bytes_per_block;
+
+ // function to be called to add the profile specific headers to the GET/PUT requests
+ switch_curl_slist_t *(*append_headers_ptr)(struct http_profile *profile, switch_curl_slist_t *headers,
+ const char *verb, unsigned int content_length, const char *content_type, const char *url, const unsigned int block_num, char **query_string);
+ // function to be called to perform the profile-specific actions at the end of the PUT operation
+ switch_status_t (*finalise_put_ptr)(struct http_profile *profile, const char *url, const unsigned int num_blocks);
+};
+typedef struct http_profile http_profile_t;
+
+
+void parse_url(char *url, const char *base_domain, const char *default_base_domain, char **bucket, char **object);
+
+#endif
+
+/* For Emacs:
+ * Local Variables:
+ * mode:c
+ * indent-tabs-mode:t
+ * tab-width:4
+ * c-basic-offset:4
+ * End:
+ * For VIM:
+ * vim:set softtabstop=4 shiftwidth=4 tabstop=4 noet
+ */
<domain name="bucket2.s3.amazonaws.com"/>
</domains>
</profile>
+
+ <profile name="blob">
+ <azure-blob>
+ <!-- key identifier, can override with AZURE_STORAGE_ACCESS_KEY environment variable -->
+ <secret-access-key>kOOY4Y/sqZU9bsLjmN+9McVwTry+UIn1Owt4Zs/2S2FQT0eAWLKsk
+Z0V6/gGFqCAKVvwXoGjqUn7PNbVjhZiNA==</secret-access-key>
+ </azure-blob>
+ <domains>
+ <domain name="account.blob.core.windows.net"/>
+ </domains>
+ </profile>
</profiles>
</configuration>
#include <switch.h>
#include <switch_curl.h>
#include "aws.h"
+#include "azure.h"
#include <stdlib.h>
typedef struct url_cache url_cache_t;
-/**
- * An http profile. Defines optional credentials
- * for access to Amazon S3.
- */
-struct http_profile {
- const char *name;
- const char *aws_s3_access_key_id;
- const char *aws_s3_secret_access_key;
- const char *aws_s3_base_domain;
+struct block_info {
+ FILE *f;
+ size_t bytes_to_read;
};
-typedef struct http_profile http_profile_t;
+typedef struct block_info block_info_t;
/**
* status if the cache entry
static void url_cache_clear(url_cache_t *cache, switch_core_session_t *session);
static http_profile_t *url_cache_http_profile_find(url_cache_t *cache, const char *name);
static http_profile_t *url_cache_http_profile_find_by_fqdn(url_cache_t *cache, const char *url);
-static http_profile_t *url_cache_http_profile_add(url_cache_t *cache, const char *name, const char *aws_s3_access_key_id, const char *aws_s3_secret_access_key, const char *aws_s3_base_domain);
-
-static switch_curl_slist_t *append_aws_s3_headers(switch_curl_slist_t *headers, http_profile_t *profile, const char *verb, const char *content_type, const char *url);
/**
}
}
+static size_t read_callback(void *ptr, size_t size, size_t nmemb, void *userp)
+{
+ block_info_t *block_info = (block_info_t *) userp;
+
+ if (size * nmemb <= block_info->bytes_to_read) {
+ block_info->bytes_to_read -= size * nmemb;
+ return fread(ptr, size, nmemb, block_info->f);
+ } else {
+ int i = block_info->bytes_to_read;
+ block_info->bytes_to_read = 0;
+ return fread(ptr, 1, i, block_info->f);
+ }
+}
+
/**
* Put a file to the URL
* @param cache the cache
long httpRes = 0;
struct stat file_info = {0};
FILE *file_to_put = NULL;
- int fd;
+
+ switch_size_t sent_bytes = 0;
+ switch_size_t bytes_per_block;
+ unsigned int block_num = 1;
/* guess what type of mime content this is going to be */
if ((ext = strrchr(filename, '.'))) {
}
}
- buf = switch_mprintf("Content-Type: %s", mime_type);
-
/* find profile for domain */
if (!profile) {
profile = url_cache_http_profile_find_by_fqdn(cache, url);
}
- headers = switch_curl_slist_append(headers, buf);
- headers = append_aws_s3_headers(headers, profile, "PUT", mime_type, url);
-
/* open file and get the file size */
switch_log_printf(SWITCH_CHANNEL_SESSION_LOG(session), SWITCH_LOG_DEBUG, "opening %s for upload to %s\n", filename, url);
- fd = open(filename, O_RDONLY);
- if (fd == -1) {
- switch_log_printf(SWITCH_CHANNEL_SESSION_LOG(session), SWITCH_LOG_ERROR, "open() error: %s\n", strerror(errno));
- status = SWITCH_STATUS_FALSE;
- goto done;
- }
- if (fstat(fd, &file_info) == -1) {
- switch_log_printf(SWITCH_CHANNEL_SESSION_LOG(session), SWITCH_LOG_ERROR, "fstat() error: %s\n", strerror(errno));
- }
- close(fd);
- /* libcurl requires FILE* */
- file_to_put = fopen(filename, "rb");
+ file_to_put = fopen(filename, "rb");
if (!file_to_put) {
switch_log_printf(SWITCH_CHANNEL_SESSION_LOG(session), SWITCH_LOG_ERROR, "fopen() error: %s\n", strerror(errno));
- status = SWITCH_STATUS_FALSE;
- goto done;
+ return SWITCH_STATUS_FALSE;
}
- curl_handle = switch_curl_easy_init();
- if (!curl_handle) {
- switch_log_printf(SWITCH_CHANNEL_SESSION_LOG(session), SWITCH_LOG_ERROR, "switch_curl_easy_init() failure\n");
- status = SWITCH_STATUS_FALSE;
- goto done;
+ if (fstat(fileno(file_to_put), &file_info) == -1) {
+ switch_log_printf(SWITCH_CHANNEL_SESSION_LOG(session), SWITCH_LOG_ERROR, "fstat() error: %s\n", strerror(errno));
+ fclose(file_to_put);
+ return SWITCH_STATUS_FALSE;
}
- switch_curl_easy_setopt(curl_handle, CURLOPT_UPLOAD, 1);
- switch_curl_easy_setopt(curl_handle, CURLOPT_PUT, 1);
- switch_curl_easy_setopt(curl_handle, CURLOPT_NOSIGNAL, 1);
- switch_curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, headers);
- switch_curl_easy_setopt(curl_handle, CURLOPT_URL, url);
- switch_curl_easy_setopt(curl_handle, CURLOPT_READDATA, file_to_put);
- switch_curl_easy_setopt(curl_handle, CURLOPT_INFILESIZE_LARGE, (curl_off_t)file_info.st_size);
- switch_curl_easy_setopt(curl_handle, CURLOPT_FOLLOWLOCATION, 1);
- switch_curl_easy_setopt(curl_handle, CURLOPT_MAXREDIRS, 10);
- switch_curl_easy_setopt(curl_handle, CURLOPT_USERAGENT, "freeswitch-http-cache/1.0");
- if (cache->connect_timeout > 0) {
- switch_curl_easy_setopt(curl_handle, CURLOPT_CONNECTTIMEOUT, cache->connect_timeout);
- }
- if (!cache->ssl_verifypeer) {
- switch_curl_easy_setopt(curl_handle, CURLOPT_SSL_VERIFYPEER, 0L);
- } else {
- /* this is the file with all the trusted certificate authorities */
- if (!zstr(cache->ssl_cacert)) {
- switch_curl_easy_setopt(curl_handle, CURLOPT_CAINFO, cache->ssl_cacert);
+
+ buf = switch_mprintf("Content-Type: %s", mime_type);
+
+ bytes_per_block = profile && profile->bytes_per_block ? profile->bytes_per_block : file_info.st_size;
+
+ // for Azure - will re-upload all of the file on error we could just upload the blocks
+ // that failed. Also, we could do it all in parallel.
+
+ while (sent_bytes < file_info.st_size) {
+ switch_size_t content_length = file_info.st_size - sent_bytes < bytes_per_block ? file_info.st_size - sent_bytes : bytes_per_block;
+ // make a copy of the URL so we can add the query string to ir
+ char *query_string = NULL;
+ char *full_url = NULL;
+
+ headers = switch_curl_slist_append(NULL, buf);
+ if (profile && profile->append_headers_ptr) {
+ profile->append_headers_ptr(profile, headers, "PUT", content_length, mime_type, url, block_num, &query_string);
}
- /* verify that the host name matches the cert */
- if (!cache->ssl_verifyhost) {
- switch_curl_easy_setopt(curl_handle, CURLOPT_SSL_VERIFYHOST, 0L);
+
+ if (query_string) {
+ full_url = switch_mprintf("%s?%s", url, query_string);
+ free(query_string);
+ } else {
+ switch_strdup(full_url, url);
+ }
+
+ // seek to the correct position in the file
+ if (fseek(file_to_put, sent_bytes, SEEK_SET) != 0) {
+ switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "Failed to seek file - errno=%d\n", errno);
+ status = SWITCH_STATUS_FALSE;
+ goto done;
+ }
+
+ curl_handle = switch_curl_easy_init();
+ if (!curl_handle) {
+ switch_log_printf(SWITCH_CHANNEL_SESSION_LOG(session), SWITCH_LOG_ERROR, "switch_curl_easy_init() failure\n");
+ status = SWITCH_STATUS_FALSE;
+ goto done;
+ }
+ switch_curl_easy_setopt(curl_handle, CURLOPT_UPLOAD, 1);
+ switch_curl_easy_setopt(curl_handle, CURLOPT_PUT, 1);
+ switch_curl_easy_setopt(curl_handle, CURLOPT_NOSIGNAL, 1);
+ switch_curl_easy_setopt(curl_handle, CURLOPT_HTTPHEADER, headers);
+ switch_curl_easy_setopt(curl_handle, CURLOPT_URL, full_url);
+
+ /* we want to use our own read function so we can send a portion of the file */
+ switch_curl_easy_setopt(curl_handle, CURLOPT_READFUNCTION, read_callback);
+ block_info_t block_info = { file_to_put, content_length };
+ switch_curl_easy_setopt(curl_handle, CURLOPT_READDATA, &block_info);
+
+ switch_curl_easy_setopt(curl_handle, CURLOPT_INFILESIZE_LARGE, content_length);
+ switch_curl_easy_setopt(curl_handle, CURLOPT_FOLLOWLOCATION, 1);
+ switch_curl_easy_setopt(curl_handle, CURLOPT_MAXREDIRS, 10);
+ switch_curl_easy_setopt(curl_handle, CURLOPT_USERAGENT, "freeswitch-http-cache/1.0");
+ if (cache->connect_timeout > 0) {
+ switch_curl_easy_setopt(curl_handle, CURLOPT_CONNECTTIMEOUT, cache->connect_timeout);
+ }
+ if (!cache->ssl_verifypeer) {
+ switch_curl_easy_setopt(curl_handle, CURLOPT_SSL_VERIFYPEER, 0L);
+ } else {
+ /* this is the file with all the trusted certificate authorities */
+ if (!zstr(cache->ssl_cacert)) {
+ switch_curl_easy_setopt(curl_handle, CURLOPT_CAINFO, cache->ssl_cacert);
+ }
+ /* verify that the host name matches the cert */
+ if (!cache->ssl_verifyhost) {
+ switch_curl_easy_setopt(curl_handle, CURLOPT_SSL_VERIFYHOST, 0L);
+ }
+ }
+ switch_curl_easy_perform(curl_handle);
+ switch_curl_easy_getinfo(curl_handle, CURLINFO_RESPONSE_CODE, &httpRes);
+ switch_curl_easy_cleanup(curl_handle);
+
+ if (httpRes == 200 || httpRes == 201 || httpRes == 202 || httpRes == 204) {
+ switch_log_printf(SWITCH_CHANNEL_SESSION_LOG(session), SWITCH_LOG_DEBUG, "%s saved to %s\n", filename, full_url);
+ } else {
+ switch_log_printf(SWITCH_CHANNEL_SESSION_LOG(session), SWITCH_LOG_ERROR, "Received HTTP error %ld trying to save %s to %s\n", httpRes, filename, url);
+ status = SWITCH_STATUS_GENERR;
}
- }
- switch_curl_easy_perform(curl_handle);
- switch_curl_easy_getinfo(curl_handle, CURLINFO_RESPONSE_CODE, &httpRes);
- switch_curl_easy_cleanup(curl_handle);
- if (httpRes == 200 || httpRes == 201 || httpRes == 202 || httpRes == 204) {
- switch_log_printf(SWITCH_CHANNEL_SESSION_LOG(session), SWITCH_LOG_DEBUG, "%s saved to %s\n", filename, url);
+ done:
+ switch_safe_free(full_url);
+
+ if (headers) {
+ switch_curl_slist_free_all(headers);
+ }
+
+ if (status == SWITCH_STATUS_SUCCESS) {
+ sent_bytes += content_length;
+ block_num ++;
+ } else {
+ // there's no point doing the rest of the blocks if one failed
+ break;
+ }
+ } //while
+
+ fclose(file_to_put);
+
+ if (status == SWITCH_STATUS_SUCCESS) {
if (cache_local_file) {
cached_url_t *u = NULL;
/* save to cache */
}
url_cache_unlock(cache, session);
}
- } else {
- switch_log_printf(SWITCH_CHANNEL_SESSION_LOG(session), SWITCH_LOG_ERROR, "Received HTTP error %ld trying to save %s to %s\n", httpRes, filename, url);
- status = SWITCH_STATUS_GENERR;
- }
-
-done:
- if (file_to_put) {
- fclose(file_to_put);
- }
- if (headers) {
- switch_curl_slist_free_all(headers);
+ if (profile && profile->finalise_put_ptr) {
+ profile->finalise_put_ptr(profile, url, block_num);
+ }
}
switch_safe_free(buf);
return NULL;
}
-/**
- * Add a profile to the cache
- */
-static http_profile_t *url_cache_http_profile_add(url_cache_t *cache, const char *name, const char *aws_s3_access_key_id, const char *aws_s3_secret_access_key, const char *aws_s3_base_domain)
-{
- http_profile_t *profile = switch_core_alloc(cache->pool, sizeof(*profile));
- profile->name = switch_core_strdup(cache->pool, name);
- if (aws_s3_access_key_id) {
- profile->aws_s3_access_key_id = switch_core_strdup(cache->pool, aws_s3_access_key_id);
- }
- if (aws_s3_secret_access_key) {
- profile->aws_s3_secret_access_key = switch_core_strdup(cache->pool, aws_s3_secret_access_key);
- }
- if (aws_s3_base_domain) {
- profile->aws_s3_base_domain = switch_core_strdup(cache->pool, aws_s3_base_domain);
- }
-
- switch_core_hash_insert(cache->profiles, profile->name, profile);
- return profile;
-}
-
/**
* Find file extension at end of URL.
* @param url to search
switch_safe_free(url);
}
-/**
- * Append Amazon S3 headers to request if necessary
- * @param headers to add to. If NULL, new headers are created.
- * @param profile with S3 credentials
- * @param content_type of object (PUT only)
- * @param verb (GET/PUT)
- * @param url
- * @return updated headers
- */
-static switch_curl_slist_t *append_aws_s3_headers(switch_curl_slist_t *headers, http_profile_t *profile, const char *verb, const char *content_type, const char *url)
-{
- /* check if Amazon headers are needed */
- if (profile && profile->aws_s3_access_key_id && aws_s3_is_s3_url(url, profile->aws_s3_base_domain)) {
- char date[256];
- char header[1024];
- char *authenticate;
-
- /* Date: */
- switch_rfc822_date(date, switch_time_now());
- snprintf(header, 1024, "Date: %s", date);
- headers = switch_curl_slist_append(headers, header);
-
- /* Authorization: */
- authenticate = aws_s3_authentication_create(verb, url, profile->aws_s3_base_domain, content_type, "", profile->aws_s3_access_key_id, profile->aws_s3_secret_access_key, date);
- snprintf(header, 1024, "Authorization: %s", authenticate);
- free(authenticate);
- headers = switch_curl_slist_append(headers, header);
- }
- return headers;
-}
-
/**
* Fetch a file via HTTP
* @param cache the cache
profile = url_cache_http_profile_find_by_fqdn(cache, url->url);
}
- /* add optional AWS S3 headers if necessary */
- headers = append_aws_s3_headers(headers, profile, "GET", "", url->url);
+ if (profile && profile->append_headers_ptr) {
+ headers = profile->append_headers_ptr(profile, headers, "GET", 0, "", url->url, 0, NULL);
+ }
curl_handle = switch_curl_easy_init();
switch_log_printf(SWITCH_CHANNEL_SESSION_LOG(session), SWITCH_LOG_DEBUG, "opening %s for URL cache\n", get_data.url->filename);
for (profile = switch_xml_child(profiles, "profile"); profile; profile = profile->next) {
const char *name = switch_xml_attr_soft(profile, "name");
if (!zstr(name)) {
- http_profile_t *profile_obj;
+ http_profile_t *profile_obj = switch_core_alloc(cache->pool, sizeof(*profile_obj));
+ switch_xml_t profile_xml;
switch_xml_t domains;
- switch_xml_t s3 = switch_xml_child(profile, "aws-s3");
- char *access_key_id = NULL;
- char *secret_access_key = NULL;
- char *base_domain = NULL;
- if (s3) {
- switch_xml_t base_domain_xml = switch_xml_child(s3, "base-domain");
-
- /* check if environment variables set the keys */
- access_key_id = getenv("AWS_ACCESS_KEY_ID");
- secret_access_key = getenv("AWS_SECRET_ACCESS_KEY");
- if (!zstr(access_key_id) && !zstr(secret_access_key)) {
- switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "Using AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY environment variables for s3 access on profile \"%s\"\n", name);
- access_key_id = strdup(access_key_id);
- secret_access_key = strdup(secret_access_key);
- } else {
- /* use configuration for keys */
- switch_xml_t id = switch_xml_child(s3, "access-key-id");
- switch_xml_t secret = switch_xml_child(s3, "secret-access-key");
- access_key_id = NULL;
- secret_access_key = NULL;
-
- if (id && secret) {
- access_key_id = switch_strip_whitespace(switch_xml_txt(id));
- secret_access_key = switch_strip_whitespace(switch_xml_txt(secret));
- if (zstr(access_key_id) || zstr(secret_access_key)) {
- switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_WARNING, "Missing aws s3 credentials for profile \"%s\"\n", name);
- switch_safe_free(access_key_id);
- access_key_id = NULL;
- switch_safe_free(secret_access_key);
- secret_access_key = NULL;
- }
- } else {
- switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "Missing key id or secret\n");
- continue;
- }
+
+ switch_strdup(profile_obj->name, name);
+ profile_obj->aws_s3_access_key_id = NULL;
+ profile_obj->secret_access_key = NULL;
+ profile_obj->base_domain = NULL;
+ profile_obj->bytes_per_block = 0;
+ profile_obj->append_headers_ptr = NULL;
+ profile_obj->finalise_put_ptr = NULL;
+
+ profile_xml = switch_xml_child(profile, "aws-s3");
+ if (profile_xml) {
+ if (aws_s3_config_profile(profile_xml, profile_obj) == SWITCH_STATUS_FALSE) {
+ continue;
}
- if (base_domain_xml) {
- base_domain = switch_strip_whitespace(switch_xml_txt(base_domain_xml));
- if (zstr(base_domain)) {
- switch_safe_free(base_domain);
- base_domain = NULL;
+ } else {
+ profile_xml = switch_xml_child(profile, "azure-blob");
+ if (profile_xml) {
+ if (azure_blob_config_profile(profile_xml, profile_obj) == SWITCH_STATUS_FALSE) {
+ continue;
}
}
}
+
switch_log_printf(SWITCH_CHANNEL_LOG, SWITCH_LOG_INFO, "Adding profile \"%s\" to cache\n", name);
- profile_obj = url_cache_http_profile_add(cache, name, access_key_id, secret_access_key, base_domain);
- switch_safe_free(access_key_id);
- switch_safe_free(secret_access_key);
- switch_safe_free(base_domain);
+ switch_core_hash_insert(cache->profiles, profile_obj->name, profile_obj);
domains = switch_xml_child(profile, "domains");
if (domains) {
BASE=../../../../..
LOCAL_CFLAGS += -I../ -I./
-LOCAL_OBJS= main.o ../aws.o
+LOCAL_OBJS= main.o ../aws.o ../common.o
LOCAL_SOURCES= main.c
include $(BASE)/build/modmake.rules
ASSERT_STRING_EQUALS("jZNO", aws_s3_signature(signature, 5, "PUT\nc8fdb181845a4ca6b8fec737b3581d76\ntext/html\nThu, 17 Nov 2005 18:49:58 GMT\nx-amz-magic:abracadabra\nx-amz-meta-author:foo@bar.com\n/quotes/nelson", "OtxrzxIsfpFjA7SwPzILwy8Bw21TLhquhboDYROV"));
}
-/**
- * Test amazon URL detection
- */
-static void test_check_url(void)
-{
- ASSERT_TRUE(aws_s3_is_s3_url("http://bucket.s3-us-west-1.amazonaws.com/object.ext", NULL));
- ASSERT_TRUE(aws_s3_is_s3_url("https://bucket.s3-us-west-1.amazonaws.com/object.ext", NULL));
- ASSERT_TRUE(aws_s3_is_s3_url("http://bucket.s3.amazonaws.com/object.ext", NULL));
- ASSERT_TRUE(aws_s3_is_s3_url("http://bucket.s3.amazonaws.com/object.ext", NULL));
- ASSERT_TRUE(aws_s3_is_s3_url("http://bucket.s3.amazonaws.com/object", NULL));
- ASSERT_TRUE(aws_s3_is_s3_url("http://red.bucket.s3.amazonaws.com/object.ext", NULL));
- ASSERT_TRUE(aws_s3_is_s3_url("https://bucket.s3.amazonaws.com/object.ext", NULL));
- ASSERT_TRUE(aws_s3_is_s3_url("https://bucket.s3.amazonaws.com/object", NULL));
- ASSERT_TRUE(aws_s3_is_s3_url("https://bucket.s3.amazonaws.com/recordings/1240fwjf8we.mp3", NULL));
- ASSERT_TRUE(aws_s3_is_s3_url("https://bucket.s3.amazonaws.com/en/us/8000/1232345.mp3", NULL));
- ASSERT_TRUE(aws_s3_is_s3_url("https://bucket_with_underscore.s3.amazonaws.com/en/us/8000/1232345.mp3", NULL));
- ASSERT_FALSE(aws_s3_is_s3_url("bucket.s3.amazonaws.com/object.ext", NULL));
- ASSERT_FALSE(aws_s3_is_s3_url("https://s3.amazonaws.com/bucket/object", NULL));
- ASSERT_FALSE(aws_s3_is_s3_url("http://s3.amazonaws.com/bucket/object", NULL));
- ASSERT_FALSE(aws_s3_is_s3_url("http://google.com/", NULL));
- ASSERT_FALSE(aws_s3_is_s3_url("http://phono.com/audio/troporocks.mp3", NULL));
- ASSERT_FALSE(aws_s3_is_s3_url("", NULL));
- ASSERT_FALSE(aws_s3_is_s3_url(NULL, NULL));
- ASSERT_FALSE(aws_s3_is_s3_url("https://example.com/bucket/object", "example.com"));
- ASSERT_TRUE(aws_s3_is_s3_url("http://bucket.example.com/object", "example.com"));
- ASSERT_FALSE(aws_s3_is_s3_url("", "example.com"));
- ASSERT_FALSE(aws_s3_is_s3_url(NULL, "example.com"));
-}
-
/**
* Test bucket/object extraction from URL
*/
{
char *bucket;
char *object;
- aws_s3_parse_url(strdup("http://quotes.s3.amazonaws.com/nelson"), NULL, &bucket, &object);
+ parse_url(strdup("http://quotes.s3.amazonaws.com/nelson"), NULL, "s3", &bucket, &object);
ASSERT_STRING_EQUALS("quotes", bucket);
ASSERT_STRING_EQUALS("nelson", object);
- aws_s3_parse_url(strdup("https://quotes.s3.amazonaws.com/nelson.mp3"), NULL, &bucket, &object);
+ parse_url(strdup("https://quotes.s3.amazonaws.com/nelson.mp3"), NULL, "s3", &bucket, &object);
ASSERT_STRING_EQUALS("quotes", bucket);
ASSERT_STRING_EQUALS("nelson.mp3", object);
- aws_s3_parse_url(strdup("http://s3.amazonaws.com/quotes/nelson"), NULL, &bucket, &object);
+ parse_url(strdup("http://s3.amazonaws.com/quotes/nelson"), NULL, "s3", &bucket, &object);
ASSERT_NULL(bucket);
ASSERT_NULL(object);
- aws_s3_parse_url(strdup("http://quotes/quotes/nelson"), NULL, &bucket, &object);
+ parse_url(strdup("http://quotes/quotes/nelson"), NULL, "s3", &bucket, &object);
ASSERT_NULL(bucket);
ASSERT_NULL(object);
- aws_s3_parse_url(strdup("http://quotes.s3.amazonaws.com/"), NULL, &bucket, &object);
+ parse_url(strdup("http://quotes.s3.amazonaws.com/"), NULL, "s3", &bucket, &object);
ASSERT_NULL(bucket);
ASSERT_NULL(object);
- aws_s3_parse_url(strdup("http://quotes.s3.amazonaws.com"), NULL, &bucket, &object);
+ parse_url(strdup("http://quotes.s3.amazonaws.com"), NULL, "s3", &bucket, &object);
ASSERT_NULL(bucket);
ASSERT_NULL(object);
- aws_s3_parse_url(strdup("http://quotes"), NULL, &bucket, &object);
+ parse_url(strdup("http://quotes"), NULL, "s3", &bucket, &object);
ASSERT_NULL(bucket);
ASSERT_NULL(object);
- aws_s3_parse_url(strdup(""), NULL, &bucket, &object);
+ parse_url(strdup(""), NULL, "s3", &bucket, &object);
ASSERT_NULL(bucket);
ASSERT_NULL(object);
- aws_s3_parse_url(NULL, NULL, &bucket, &object);
+ parse_url(NULL, NULL, "s3", &bucket, &object);
ASSERT_NULL(bucket);
ASSERT_NULL(object);
- aws_s3_parse_url(strdup("http://bucket.s3.amazonaws.com/voicemails/recording.wav"), NULL, &bucket, &object);
+ parse_url(strdup("http://bucket.s3.amazonaws.com/voicemails/recording.wav"), NULL, "s3", &bucket, &object);
ASSERT_STRING_EQUALS("bucket", bucket);
ASSERT_STRING_EQUALS("voicemails/recording.wav", object);
- aws_s3_parse_url(strdup("https://my-bucket-with-dash.s3-us-west-2.amazonaws.com/greeting/file/1002/Lumino.mp3"), NULL, &bucket, &object);
+ parse_url(strdup("https://my-bucket-with-dash.s3-us-west-2.amazonaws.com/greeting/file/1002/Lumino.mp3"), NULL, "s3", &bucket, &object);
ASSERT_STRING_EQUALS("my-bucket-with-dash", bucket);
ASSERT_STRING_EQUALS("greeting/file/1002/Lumino.mp3", object);
- aws_s3_parse_url(strdup("http://quotes.s3.foo.bar.s3.amazonaws.com/greeting/file/1002/Lumino.mp3"), NULL, &bucket, &object);
+ parse_url(strdup("http://quotes.s3.foo.bar.s3.amazonaws.com/greeting/file/1002/Lumino.mp3"), NULL, "s3", &bucket, &object);
ASSERT_STRING_EQUALS("quotes.s3.foo.bar", bucket);
ASSERT_STRING_EQUALS("greeting/file/1002/Lumino.mp3", object);
- aws_s3_parse_url(strdup("http://quotes.s3.foo.bar.example.com/greeting/file/1002/Lumino.mp3"), "example.com", &bucket, &object);
+ parse_url(strdup("http://quotes.s3.foo.bar.example.com/greeting/file/1002/Lumino.mp3"), "example.com", "s3", &bucket, &object);
ASSERT_STRING_EQUALS("quotes.s3.foo.bar", bucket);
ASSERT_STRING_EQUALS("greeting/file/1002/Lumino.mp3", object);
}
TEST_INIT
TEST(test_string_to_sign);
TEST(test_signature);
- TEST(test_check_url);
TEST(test_parse_url);
TEST(test_authorization_header);
TEST(test_presigned_url);