return NGHTTP2_ERR_CALLBACK_FAILURE;
result = Curl_xfer_write_resp(data_s, (char *)mem, len, FALSE);
- if(result && result != CURLE_AGAIN)
- return NGHTTP2_ERR_CALLBACK_FAILURE;
+ if(result && result != CURLE_AGAIN) {
+ nghttp2_submit_rst_stream(ctx->h2, 0, stream->id,
+ NGHTTP2_ERR_CALLBACK_FAILURE);
+ return 0;
+ }
nghttp2_session_consume(ctx->h2, stream_id, len);
stream->nrcvd_data += (curl_off_t)len;
char filename[128];
FILE *out;
curl_off_t recv_size;
+ curl_off_t fail_at;
curl_off_t pause_at;
+ curl_off_t abort_at;
int started;
int paused;
int resumed;
void *userdata)
{
struct transfer *t = userdata;
+ size_t blen = (nitems * buflen);
size_t nwritten;
+ fprintf(stderr, "[t-%d] RECV %ld bytes, total=%ld, pause_at=%ld\n",
+ t->idx, (long)blen, (long)t->recv_size, (long)t->pause_at);
if(!t->resumed &&
t->recv_size < t->pause_at &&
- ((t->recv_size + (curl_off_t)(nitems * buflen)) >= t->pause_at)) {
+ ((t->recv_size + (curl_off_t)blen) >= t->pause_at)) {
fprintf(stderr, "[t-%d] PAUSE\n", t->idx);
t->paused = 1;
return CURL_WRITEFUNC_PAUSE;
}
nwritten = fwrite(buf, nitems, buflen, t->out);
- if(nwritten < buflen) {
+ if(nwritten < blen) {
fprintf(stderr, "[t-%d] write failure\n", t->idx);
return 0;
}
t->recv_size += (curl_off_t)nwritten;
+ if(t->fail_at > 0 && t->recv_size >= t->fail_at) {
+ fprintf(stderr, "[t-%d] FAIL by write callback at %ld bytes\n",
+ t->idx, (long)t->recv_size);
+ return CURL_WRITEFUNC_ERROR;
+ }
+
return (size_t)nwritten;
}
-static int setup(CURL *hnd, const char *url, struct transfer *t)
+static int my_progress_cb(void *userdata,
+ curl_off_t dltotal, curl_off_t dlnow,
+ curl_off_t ultotal, curl_off_t ulnow)
+{
+ struct transfer *t = userdata;
+ (void)ultotal;
+ (void)ulnow;
+ (void)dltotal;
+ if(t->abort_at > 0 && dlnow >= t->abort_at) {
+ fprintf(stderr, "[t-%d] ABORT by progress_cb at %ld bytes\n",
+ t->idx, (long)dlnow);
+ return 1;
+ }
+ return 0;
+}
+
+static int setup(CURL *hnd, const char *url, struct transfer *t,
+ int http_version)
{
curl_easy_setopt(hnd, CURLOPT_URL, url);
- curl_easy_setopt(hnd, CURLOPT_HTTP_VERSION, CURL_HTTP_VERSION_2_0);
+ curl_easy_setopt(hnd, CURLOPT_HTTP_VERSION, http_version);
curl_easy_setopt(hnd, CURLOPT_SSL_VERIFYPEER, 0L);
curl_easy_setopt(hnd, CURLOPT_SSL_VERIFYHOST, 0L);
-
+ curl_easy_setopt(hnd, CURLOPT_BUFFERSIZE, (long)(128 * 1024));
curl_easy_setopt(hnd, CURLOPT_WRITEFUNCTION, my_write_cb);
curl_easy_setopt(hnd, CURLOPT_WRITEDATA, t);
+ curl_easy_setopt(hnd, CURLOPT_NOPROGRESS, 0L);
+ curl_easy_setopt(hnd, CURLOPT_XFERINFOFUNCTION, my_progress_cb);
+ curl_easy_setopt(hnd, CURLOPT_XFERINFODATA, t);
/* please be verbose */
if(verbose) {
" download a url with following options:\n"
" -m number max parallel downloads\n"
" -n number total downloads\n"
+ " -A number abort transfer after `number` response bytes\n"
+ " -F number fail writing response after `number` response bytes\n"
" -P number pause transfer after `number` response bytes\n"
+ " -V http_version (http/1.1, h2, h3) http version to use\n"
);
}
size_t i, n, max_parallel = 1;
size_t active_transfers;
size_t pause_offset = 0;
+ size_t abort_offset = 0;
+ size_t fail_offset = 0;
int abort_paused = 0;
struct transfer *t;
+ int http_version = CURL_HTTP_VERSION_2_0;
int ch;
- while((ch = getopt(argc, argv, "ahm:n:P:")) != -1) {
+ while((ch = getopt(argc, argv, "ahm:n:A:F:P:V:")) != -1) {
switch(ch) {
case 'h':
usage(NULL);
case 'n':
transfer_count = (size_t)strtol(optarg, NULL, 10);
break;
+ case 'A':
+ abort_offset = (size_t)strtol(optarg, NULL, 10);
+ break;
+ case 'F':
+ fail_offset = (size_t)strtol(optarg, NULL, 10);
+ break;
case 'P':
pause_offset = (size_t)strtol(optarg, NULL, 10);
break;
+ case 'V': {
+ if(!strcmp("http/1.1", optarg))
+ http_version = CURL_HTTP_VERSION_1_1;
+ else if(!strcmp("h2", optarg))
+ http_version = CURL_HTTP_VERSION_2_0;
+ else if(!strcmp("h3", optarg))
+ http_version = CURL_HTTP_VERSION_3ONLY;
+ else {
+ usage("invalid http version");
+ return 1;
+ }
+ break;
+ }
default:
usage("invalid option");
return 1;
for(i = 0; i < transfer_count; ++i) {
t = &transfers[i];
t->idx = (int)i;
- t->pause_at = (curl_off_t)(pause_offset * i);
+ t->abort_at = (curl_off_t)abort_offset;
+ t->fail_at = (curl_off_t)fail_offset;
+ t->pause_at = (curl_off_t)pause_offset;
}
n = (max_parallel < transfer_count)? max_parallel : transfer_count;
for(i = 0; i < n; ++i) {
t = &transfers[i];
t->easy = curl_easy_init();
- if(!t->easy || setup(t->easy, url, t)) {
+ if(!t->easy || setup(t->easy, url, t, http_version)) {
fprintf(stderr, "[t-%d] FAILED setup\n", (int)i);
return 1;
}
m = curl_multi_info_read(multi_handle, &msgq);
if(m && (m->msg == CURLMSG_DONE)) {
CURL *e = m->easy_handle;
- active_transfers--;
+ --active_transfers;
curl_multi_remove_handle(multi_handle, e);
t = get_transfer_for_easy(e);
if(t) {
t->done = 1;
+ fprintf(stderr, "[t-%d] FINISHED\n", t->idx);
}
- else
+ else {
curl_easy_cleanup(e);
+ fprintf(stderr, "unknown FINISHED???\n");
+ }
}
- else {
- /* nothing happening, maintenance */
- if(abort_paused) {
- /* abort paused transfers */
- for(i = 0; i < transfer_count; ++i) {
- t = &transfers[i];
- if(!t->done && t->paused && t->easy) {
- curl_multi_remove_handle(multi_handle, t->easy);
- t->done = 1;
- active_transfers--;
- fprintf(stderr, "[t-%d] ABORTED\n", t->idx);
- }
+
+
+ /* nothing happening, maintenance */
+ if(abort_paused) {
+ /* abort paused transfers */
+ for(i = 0; i < transfer_count; ++i) {
+ t = &transfers[i];
+ if(!t->done && t->paused && t->easy) {
+ curl_multi_remove_handle(multi_handle, t->easy);
+ t->done = 1;
+ active_transfers--;
+ fprintf(stderr, "[t-%d] ABORTED\n", t->idx);
}
}
- else {
- /* resume one paused transfer */
- for(i = 0; i < transfer_count; ++i) {
- t = &transfers[i];
- if(!t->done && t->paused) {
- t->resumed = 1;
- t->paused = 0;
- curl_easy_pause(t->easy, CURLPAUSE_CONT);
- fprintf(stderr, "[t-%d] RESUMED\n", t->idx);
- break;
- }
+ }
+ else {
+ /* resume one paused transfer */
+ for(i = 0; i < transfer_count; ++i) {
+ t = &transfers[i];
+ if(!t->done && t->paused) {
+ t->resumed = 1;
+ t->paused = 0;
+ curl_easy_pause(t->easy, CURLPAUSE_CONT);
+ fprintf(stderr, "[t-%d] RESUMED\n", t->idx);
+ break;
}
}
+ }
- while(active_transfers < max_parallel) {
- for(i = 0; i < transfer_count; ++i) {
- t = &transfers[i];
- if(!t->started) {
- t->easy = curl_easy_init();
- if(!t->easy || setup(t->easy, url, t)) {
- fprintf(stderr, "[t-%d] FAILEED setup\n", (int)i);
- return 1;
- }
- curl_multi_add_handle(multi_handle, t->easy);
- t->started = 1;
- ++active_transfers;
- fprintf(stderr, "[t-%d] STARTED\n", t->idx);
- break;
+ while(active_transfers < max_parallel) {
+ for(i = 0; i < transfer_count; ++i) {
+ t = &transfers[i];
+ if(!t->started) {
+ t->easy = curl_easy_init();
+ if(!t->easy || setup(t->easy, url, t, http_version)) {
+ fprintf(stderr, "[t-%d] FAILED setup\n", (int)i);
+ return 1;
}
- }
- /* all started */
- if(i == transfer_count)
+ curl_multi_add_handle(multi_handle, t->easy);
+ t->started = 1;
+ ++active_transfers;
+ fprintf(stderr, "[t-%d] STARTED\n", t->idx);
break;
+ }
}
+ /* all started */
+ if(i == transfer_count)
+ break;
}
} while(m);
# download via lib client, 1 at a time, pause/resume at different offsets
@pytest.mark.parametrize("pause_offset", [0, 10*1024, 100*1023, 640000])
- def test_02_21_h2_lib_serial(self, env: Env, httpd, nghttpx, pause_offset, repeat):
- count = 10
+ @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+ def test_02_21_lib_serial(self, env: Env, httpd, nghttpx, proto, pause_offset, repeat):
+ count = 2 if proto == 'http/1.1' else 10
docname = 'data-10m'
url = f'https://localhost:{env.https_port}/{docname}'
client = LocalClient(name='h2-download', env=env)
if not client.exists():
pytest.skip(f'example client not built: {client.name}')
r = client.run(args=[
- '-n', f'{count}', '-P', f'{pause_offset}', url
+ '-n', f'{count}', '-P', f'{pause_offset}', '-V', proto, url
])
r.check_exit_code(0)
srcfile = os.path.join(httpd.docs_dir, docname)
# download via lib client, several at a time, pause/resume
@pytest.mark.parametrize("pause_offset", [100*1023])
- def test_02_22_h2_lib_parallel_resume(self, env: Env, httpd, nghttpx, pause_offset, repeat):
- count = 10
+ @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+ def test_02_22_lib_parallel_resume(self, env: Env, httpd, nghttpx, proto, pause_offset, repeat):
+ count = 2 if proto == 'http/1.1' else 10
max_parallel = 5
docname = 'data-10m'
url = f'https://localhost:{env.https_port}/{docname}'
pytest.skip(f'example client not built: {client.name}')
r = client.run(args=[
'-n', f'{count}', '-m', f'{max_parallel}',
- '-P', f'{pause_offset}', url
+ '-P', f'{pause_offset}', '-V', proto, url
])
r.check_exit_code(0)
srcfile = os.path.join(httpd.docs_dir, docname)
self.check_downloads(client, srcfile, count)
# download, several at a time, pause and abort paused
- @pytest.mark.parametrize("pause_offset", [100*1023])
- def test_02_23_h2_lib_parallel_abort(self, env: Env, httpd, nghttpx, pause_offset, repeat):
- count = 200
- max_parallel = 100
- docname = 'data-10m'
+ @pytest.mark.parametrize("proto", ['http/1.1', 'h2'])
+ def test_02_23a_lib_abort_paused(self, env: Env, httpd, nghttpx, proto, repeat):
+ if proto == 'h2':
+ count = 200
+ max_parallel = 100
+ pause_offset = 64 * 1024
+ else:
+ count = 10
+ max_parallel = 5
+ pause_offset = 12 * 1024
+ docname = 'data-1m'
url = f'https://localhost:{env.https_port}/{docname}'
client = LocalClient(name='h2-download', env=env)
if not client.exists():
pytest.skip(f'example client not built: {client.name}')
r = client.run(args=[
'-n', f'{count}', '-m', f'{max_parallel}', '-a',
- '-P', f'{pause_offset}', url
+ '-P', f'{pause_offset}', '-V', proto, url
+ ])
+ r.check_exit_code(0)
+ srcfile = os.path.join(httpd.docs_dir, docname)
+ # downloads should be there, but not necessarily complete
+ self.check_downloads(client, srcfile, count, complete=False)
+
+ # download, several at a time, abort after n bytes
+ @pytest.mark.parametrize("proto", ['http/1.1', 'h2'])
+ def test_02_23b_lib_abort_offset(self, env: Env, httpd, nghttpx, proto, repeat):
+ if proto == 'h2':
+ count = 200
+ max_parallel = 100
+ abort_offset = 64 * 1024
+ else:
+ count = 10
+ max_parallel = 5
+ abort_offset = 12 * 1024
+ docname = 'data-1m'
+ url = f'https://localhost:{env.https_port}/{docname}'
+ client = LocalClient(name='h2-download', env=env)
+ if not client.exists():
+ pytest.skip(f'example client not built: {client.name}')
+ r = client.run(args=[
+ '-n', f'{count}', '-m', f'{max_parallel}', '-a',
+ '-A', f'{abort_offset}', '-V', proto, url
+ ])
+ r.check_exit_code(0)
+ srcfile = os.path.join(httpd.docs_dir, docname)
+ # downloads should be there, but not necessarily complete
+ self.check_downloads(client, srcfile, count, complete=False)
+
+ # download, several at a time, abort after n bytes
+ @pytest.mark.parametrize("proto", ['http/1.1', 'h2'])
+ def test_02_23c_lib_fail_offset(self, env: Env, httpd, nghttpx, proto, repeat):
+ if proto == 'h2':
+ count = 200
+ max_parallel = 100
+ fail_offset = 64 * 1024
+ else:
+ count = 10
+ max_parallel = 5
+ fail_offset = 12 * 1024
+ docname = 'data-1m'
+ url = f'https://localhost:{env.https_port}/{docname}'
+ client = LocalClient(name='h2-download', env=env)
+ if not client.exists():
+ pytest.skip(f'example client not built: {client.name}')
+ r = client.run(args=[
+ '-n', f'{count}', '-m', f'{max_parallel}', '-a',
+ '-F', f'{fail_offset}', '-V', proto, url
])
r.check_exit_code(0)
srcfile = os.path.join(httpd.docs_dir, docname)
tofile=dfile,
n=1))
assert False, f'download {dfile} differs:\n{diff}'
+
+ # download via lib client, 1 at a time, pause/resume at different offsets
+ @pytest.mark.parametrize("pause_offset", [0, 10*1024, 100*1023, 640000])
+ @pytest.mark.parametrize("proto", ['http/1.1', 'h2', 'h3'])
+ def test_02_29_h2_lib_serial(self, env: Env, httpd, nghttpx, proto, pause_offset, repeat):
+ count = 2 if proto == 'http/1.1' else 10
+ docname = 'data-10m'
+ url = f'https://localhost:{env.https_port}/{docname}'
+ client = LocalClient(name='h2-download', env=env)
+ if not client.exists():
+ pytest.skip(f'example client not built: {client.name}')
+ r = client.run(args=[
+ '-n', f'{count}', '-P', f'{pause_offset}', '-V', proto, url
+ ])
+ r.check_exit_code(0)
+ srcfile = os.path.join(httpd.docs_dir, docname)
+ self.check_downloads(client, srcfile, count)
+