From: Jeremy Allison Date: Thu, 9 Nov 2017 20:48:15 +0000 (-0800) Subject: s3: smbd: kernel oplocks. Replace retry_open() with setup_kernel_oplock_poll_open(). X-Git-Tag: samba-4.6.10~3 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=c64f58eda991c7d8fa16a0d921682b8e35717d66;p=thirdparty%2Fsamba.git s3: smbd: kernel oplocks. Replace retry_open() with setup_kernel_oplock_poll_open(). If a O_NONBLOCK open fails with EWOULDBLOCK, this code changes smbd to do a retry open every second, until either the timeout or we get a successful open. If we're opening a file that has a kernel lease set by a non-smbd process, this is the best we can do. Prior to this, smbd would block on the second open on such a leased file (not using O_NONBLOCK) which freezes active clients. Regression test to follow. BUG: https://bugzilla.samba.org/show_bug.cgi?id=13121 Signed-off-by: Jeremy Allison Reviewed-by: Ralph Boehme (cherry picked from commit 47c13fc10a2c9709e9511b2ffcf0e1004497887d) --- diff --git a/source3/smbd/open.c b/source3/smbd/open.c index fadc2ad86a2..5542b5586de 100644 --- a/source3/smbd/open.c +++ b/source3/smbd/open.c @@ -2368,19 +2368,40 @@ static void defer_open_done(struct tevent_req *req) } /** - * Reschedule an open for immediate execution + * Actually attempt the kernel oplock polling open. + */ + +static void kernel_oplock_poll_open_timer(struct tevent_context *ev, + struct tevent_timer *te, + struct timeval current_time, + void *private_data) +{ + bool ok; + struct smb_request *req = (struct smb_request *)private_data; + + ok = schedule_deferred_open_message_smb(req->xconn, req->mid); + if (!ok) { + exit_server("schedule_deferred_open_message_smb failed"); + } + DBG_DEBUG("kernel_oplock_poll_open_timer fired. Retying open !\n"); +} + +/** + * Reschedule an open for 1 second from now, if not timed out. **/ -static void retry_open(struct timeval request_time, +static void setup_kernel_oplock_poll_open(struct timeval request_time, struct smb_request *req, struct file_id id) { - struct deferred_open_record *open_rec = NULL; + bool ok; + struct deferred_open_record *open_rec = NULL; + /* Maximum wait time. */ + struct timeval timeout = timeval_set(OPLOCK_BREAK_TIMEOUT*2, 0); - DBG_DEBUG("request time [%s] mid [%" PRIu64 "] file_id [%s]\n", - timeval_string(talloc_tos(), &request_time, false), - req->mid, - file_id_string_tos(&id)); + if (request_timed_out(request_time, timeout)) { + return; + } open_rec = deferred_open_record_create(false, false, id); if (open_rec == NULL) { @@ -2389,17 +2410,30 @@ static void retry_open(struct timeval request_time, ok = push_deferred_open_message_smb(req, request_time, - timeval_set(0, 0), + timeout, id, open_rec); if (!ok) { exit_server("push_deferred_open_message_smb failed"); } - ok = schedule_deferred_open_message_smb(req->xconn, req->mid); - if (!ok) { - exit_server("schedule_deferred_open_message_smb failed"); + /* + * As this timer event is owned by req, it will + * disappear if req it talloc_freed. + */ + open_rec->te = tevent_add_timer(req->sconn->ev_ctx, + req, + timeval_current_ofs(1, 0), + kernel_oplock_poll_open_timer, + req); + if (open_rec->te == NULL) { + exit_server("tevent_add_timer failed"); } + + DBG_DEBUG("poll request time [%s] mid [%" PRIu64 "] file_id [%s]\n", + timeval_string(talloc_tos(), &request_time, false), + req->mid, + file_id_string_tos(&id)); } /**************************************************************************** @@ -3118,20 +3152,18 @@ static NTSTATUS open_file_ntcreate(connection_struct *conn, flags2 &= ~(O_CREAT|O_TRUNC); } - if (first_open_attempt && lp_kernel_oplocks(SNUM(conn))) { + if (lp_kernel_oplocks(SNUM(conn))) { /* * With kernel oplocks the open breaking an oplock * blocks until the oplock holder has given up the - * oplock or closed the file. We prevent this by first + * oplock or closed the file. We prevent this by always * trying to open the file with O_NONBLOCK (see "man - * fcntl" on Linux). For the second try, triggered by - * an oplock break response, we do not need this - * anymore. + * fcntl" on Linux). * - * This is true under the assumption that only Samba - * requests kernel oplocks. Once someone else like - * NFSv4 starts to use that API, we will have to - * modify this by communicating with the NFSv4 server. + * If a process that doesn't use the smbd open files + * database or communication methods holds a kernel + * oplock we must periodically poll for available open + * using O_NONBLOCK. */ flags2 |= O_NONBLOCK; } @@ -3210,9 +3242,16 @@ static NTSTATUS open_file_ntcreate(connection_struct *conn, lck = get_existing_share_mode_lock(talloc_tos(), fsp->file_id); if (lck == NULL) { - retry_open(request_time, req, fsp->file_id); - DEBUG(10, ("No share mode lock found after " - "EWOULDBLOCK, retrying sync\n")); + /* + * No oplock from Samba around. Set up a poll every 1 + * second to retry a non-blocking open until the time + * expires. + */ + setup_kernel_oplock_poll_open(request_time, + req, + fsp->file_id); + DBG_DEBUG("No Samba oplock around after EWOULDBLOCK. " + "Retrying with poll\n"); return NT_STATUS_SHARING_VIOLATION; } @@ -3233,14 +3272,15 @@ static NTSTATUS open_file_ntcreate(connection_struct *conn, } /* - * No oplock from Samba around. Immediately retry with - * a blocking open. + * No oplock from Samba around. Set up a poll every 1 + * second to retry a non-blocking open until the time + * expires. */ - retry_open(request_time, req, fsp->file_id); + setup_kernel_oplock_poll_open(request_time, req, fsp->file_id); TALLOC_FREE(lck); - DEBUG(10, ("No Samba oplock around after EWOULDBLOCK. " - "Retrying sync\n")); + DBG_DEBUG("No Samba oplock around after EWOULDBLOCK. " + "Retrying with poll\n"); return NT_STATUS_SHARING_VIOLATION; }