Fix a deadlock that can happen if two clients happen to open and byte-range-lock
two different files whos record in locking.tdb and brlock.tdb happen to sit on
the same hashchain.
The deadlock was introduced by commit
680c7907325b433856ac1dd916ab63e671fbe4ab. Before, we used share_mode_do_locked()
in do_lock() which meant we acquired a chainlock on locking.tdb before getting a
chainlock on brlock.tdb via brl_get_locks_for_locking(), so the TDB chainlock
order invariant was always uphold.
The following race between specific client requests lead to the deadlock.
Client A) issues a byte-range-lock request on a file:
A1) glock locking.tdb (via _share_mode_do_locked_vfs_allowed())
A2) chainlock brlock.tdb (via brl_lock())
A3) attempt to chainlock locking.tdb (via share_mode_g_lock_dump())
[1]
Client B) opens a different (!) file:
B1) glock and chainlock locking.tdb (via _share_mode_entry_prepare_lock())
B2) attempt to chainlock brlock.tdb (via file_has_brlocks())
[2]
The glock from A1 is per record and hence doesn't synchronize with the glock
from B1 as it is for a different file and hence a different record, subsequently
A2 and A3 violate the lock order constraint
To avoid the chainlock lock order violation in the second client we modify the
br-lock code to not take the brlock.tdb chainlock from step A2 via
br_get_locks() for the whole time we process the request. Instead we just fetch
the br-locks via br_get_locks_readonly(), so when running into
contend_level2_oplocks_begin_default() to check for leases and looking into
locking.tdb we don't hold a brlock.tdb chainlock.
Or im simpler terms, we only ever take at most one low-level TDB chainlock at a
time:
Byte-range-lock code calls share_mode_do_locked_brl(..., cb_fn, ...):
1) chainlock locking.tdb
2) glock locking.tdb (via share_mode_do_locked_vfs_allowed())
3) chainunlock locking.tdb
4) share_mode_do_locked_brl_fn() -> brl_get_locks_readonly_parse():
a) chainlock brlock.tdb
b) parse record and store in-memory copy
c) chainunlock brlock.tdb
5) run cb_fn()
6) chainlock brlock.tdb:
a) br_lck->record = dbwrap_fetch_locked(brlock_db, ...)
b) store modifed br_lck from 5) via byte_range_lock_flush()
7) chainunlock brlock.tdb
8) chainlock locking.tdb
9) gunlock locking.tdb
10) chainunlock locking.tdb
All access to brlock.tdb is synchronized correctly via glocks on the locking.tdb
record of the file (step 3)), so operations still appear atomic to clients.
As a result of using share_mode_do_locked_brl(), the functions do_[un]lock() ->
brl_[un]lock() now loop over the same br_lck object in memory, avoiding
repeatedly fetching and storing the locks per loop.
[1]
Full SBT:
#0 0x00007fffa0cecbb0 in __pthread_mutex_lock_full () from /lib64/glibc-hwcaps/power9/libpthread-2.28.so
#1 0x00007fffa0a73cf8 in chain_mutex_lock (m=<optimized out>, m@entry=0x7fff9ae071b0, waitflag=<optimized out>, waitflag@entry=true) at ../../lib/tdb/common/mutex.c:182
#2 0x00007fffa0a7432c in tdb_mutex_lock (tdb=0x1543ba120, rw=<optimized out>, off=<optimized out>, len=<optimized out>, waitflag=<optimized out>, pret=0x7fffd7df3858) at ../../lib/tdb/common/mutex.c:234
#3 0x00007fffa0a6812c in fcntl_lock (waitflag=<optimized out>, len=1, off=376608, rw=0, tdb=0x1543ba120) at ../../lib/tdb/common/lock.c:200
#4 tdb_brlock (tdb=0x1543ba120, rw_type=<optimized out>, offset=<optimized out>, len=1, flags=<optimized out>) at ../../lib/tdb/common/lock.c:200
#5 0x00007fffa0a68af8 in tdb_nest_lock (flags=<optimized out>, ltype=0, offset=<optimized out>, tdb=0x1543ba120) at ../../lib/tdb/common/lock.c:390
#6 tdb_nest_lock (tdb=0x1543ba120, offset=<optimized out>, ltype=<optimized out>, flags=<optimized out>) at ../../lib/tdb/common/lock.c:336
#7 0x00007fffa0a69088 in tdb_lock_list (tdb=0x1543ba120, list=<optimized out>, ltype=<optimized out>, waitflag=<optimized out>) at ../../lib/tdb/common/lock.c:482
#8 0x00007fffa0a69198 in tdb_lock (tdb=0x1543ba120, list=<optimized out>, ltype=<optimized out>) at ../../lib/tdb/common/lock.c:500
#9 0x00007fffa0a64b50 in tdb_find_lock_hash (tdb=<optimized out>, tdb@entry=0x1543ba120, key=..., hash=<optimized out>, locktype=<optimized out>, locktype@entry=0, rec=<optimized out>, rec@entry=0x7fffd7df3ab0) at ../../lib/tdb/common/tdb.c:165
#10 0x00007fffa0a64ed0 in tdb_parse_record (tdb=0x1543ba120, key=..., parser=0x7fffa0e74470 <db_ctdb_ltdb_parser>, private_data=0x7fffd7df3b18) at ../../lib/tdb/common/tdb.c:329
#11 0x00007fffa0e74cbc in db_ctdb_ltdb_parse (db=<optimized out>, private_data=0x7fffd7df3b70, parser=0x7fffa0e76470 <db_ctdb_parse_record_parser_nonpersistent>, key=...) at ../../source3/lib/dbwrap/dbwrap_ctdb.c:170
#12 db_ctdb_try_parse_local_record (ctx=ctx@entry=0x1543d4580, key=..., state=state@entry=0x7fffd7df3b70) at ../../source3/lib/dbwrap/dbwrap_ctdb.c:1385
#13 0x00007fffa0e76024 in db_ctdb_parse_record (db=<optimized out>, key=..., parser=0x7fffa1313910 <dbwrap_watched_parse_record_parser>, private_data=0x7fffd7df3c08) at ../../source3/lib/dbwrap/dbwrap_ctdb.c:1425
#14 0x00007fffa0884760 in dbwrap_parse_record (db=<optimized out>, key=..., parser=<optimized out>, private_data=<optimized out>) at ../../lib/dbwrap/dbwrap.c:454
#15 0x00007fffa1313ab4 in dbwrap_watched_parse_record (db=0x1543a7160, key=..., parser=0x7fffa13187d0 <g_lock_dump_fn>, private_data=0x7fffd7df3ce8) at ../../source3/lib/dbwrap/dbwrap_watch.c:783
#16 0x00007fffa0884760 in dbwrap_parse_record (db=<optimized out>, key=..., parser=<optimized out>, private_data=<optimized out>) at ../../lib/dbwrap/dbwrap.c:454
#17 0x00007fffa131c004 in g_lock_dump (ctx=<error reading variable: value has been optimized out>, key=..., fn=0x7fffa14f3d70 <fsp_update_share_mode_flags_fn>, private_data=0x7fffd7df3dd8) at ../../source3/lib/g_lock.c:1653
#18 0x00007fffa14f434c in share_mode_g_lock_dump (key=..., fn=0x7fffa14f3d70 <fsp_update_share_mode_flags_fn>, private_data=0x7fffd7df3dd8) at ../../source3/locking/share_mode_lock.c:96
#19 0x00007fffa14f8d44 in fsp_update_share_mode_flags (fsp=0x15433c550) at ../../source3/locking/share_mode_lock.c:1181
#20 file_has_read_lease (fsp=0x15433c550) at ../../source3/locking/share_mode_lock.c:1207
#21 0x00007fffa15ccc98 in contend_level2_oplocks_begin_default (type=<optimized out>, fsp=0x15433c550) at ../../source3/smbd/smb2_oplock.c:1282
#22 smbd_contend_level2_oplocks_begin (fsp=0x15433c550, type=<optimized out>) at ../../source3/smbd/smb2_oplock.c:1338
#23 0x00007fffa0dd0b54 in contend_level2_oplocks_begin (fsp=<optimized out>, type=<optimized out>) at ../../source3/lib/smbd_shim.c:72
#24 0x00007fffa14ecfd0 in brl_lock_windows_default (br_lck=0x154421330, plock=0x7fffd7df4250) at ../../source3/locking/brlock.c:457
#25 0x00007fffa150b70c in vfswrap_brl_lock_windows (handle=<optimized out>, br_lck=<optimized out>, plock=<optimized out>) at ../../source3/modules/vfs_default.c:3424
#26 0x00007fffa1561910 in smb_vfs_call_brl_lock_windows (handle=<optimized out>, br_lck=<optimized out>, plock=<optimized out>) at ../../source3/smbd/vfs.c:2686
#27 0x00007fff9c0a7350 in smb_time_audit_brl_lock_windows (handle=<optimized out>, br_lck=0x154421330, plock=0x7fffd7df4250) at ../../source3/modules/vfs_time_audit.c:1740
#28 0x00007fffa1561910 in smb_vfs_call_brl_lock_windows (handle=<optimized out>, br_lck=<optimized out>, plock=<optimized out>) at ../../source3/smbd/vfs.c:2686
#29 0x00007fffa14ed410 in brl_lock (br_lck=0x154421330, smblctx=
3102281601, pid=..., start=0, size=
18446744073709551615, lock_type=<optimized out>, lock_flav=WINDOWS_LOCK, blocker_pid=0x7fffd7df4540, psmblctx=0x7fffd7df4558) at ../../source3/locking/brlock.c:1004
#30 0x00007fffa14e7b18 in do_lock_fn (lck=<optimized out>, private_data=0x7fffd7df4508) at ../../source3/locking/locking.c:271
#31 0x00007fffa14fcd94 in _share_mode_do_locked_vfs_allowed (id=..., fn=0x7fffa14e7a60 <do_lock_fn>, private_data=0x7fffd7df4508, location=<optimized out>) at ../../source3/locking/share_mode_lock.c:2927
#32 0x00007fffa14e918c in do_lock (fsp=0x15433c550, req_mem_ctx=<optimized out>, req_guid=<optimized out>, smblctx=<optimized out>, count=
18446744073709551615, offset=0, lock_type=<optimized out>, lock_flav=<optimized out>, pblocker_pid=0x7fffd7df46f0,
psmblctx=0x7fffd7df46d8) at ../../source3/locking/locking.c:335
#33 0x00007fffa155381c in smbd_do_locks_try (fsp=0x15433c550, num_locks=<optimized out>, locks=0x1543bc310, blocker_idx=0x7fffd7df46d6, blocking_pid=0x7fffd7df46f0, blocking_smblctx=0x7fffd7df46d8) at ../../source3/smbd/blocking.c:46
#34 0x00007fffa159dc90 in smbd_smb2_lock_try (req=req@entry=0x1543bc080) at ../../source3/smbd/smb2_lock.c:590
#35 0x00007fffa159ee8c in smbd_smb2_lock_send (in_locks=<optimized out>, in_lock_count=1, in_lock_sequence=<optimized out>, fsp=0x15433c550, smb2req=0x1543532e0, ev=0x154328120, mem_ctx=0x1543532e0) at ../../source3/smbd/smb2_lock.c:488
#36 smbd_smb2_request_process_lock (req=0x1543532e0) at ../../source3/smbd/smb2_lock.c:150
#37 0x00007fffa158a368 in smbd_smb2_request_dispatch (req=0x1543532e0) at ../../source3/smbd/smb2_server.c:3515
#38 0x00007fffa158c540 in smbd_smb2_io_handler (fde_flags=<optimized out>, xconn=0x154313f30) at ../../source3/smbd/smb2_server.c:5112
#39 smbd_smb2_connection_handler (ev=<optimized out>, fde=<optimized out>, flags=<optimized out>, private_data=<optimized out>) at ../../source3/smbd/smb2_server.c:5150
#40 0x00007fffa1198b2c in tevent_common_invoke_fd_handler (fde=0x1543670f0, flags=<optimized out>, removed=0x0) at ../../lib/tevent/tevent_fd.c:158
#41 0x00007fffa11a2b9c in epoll_event_loop (tvalp=0x7fffd7df4b28, epoll_ev=0x1543b4e80) at ../../lib/tevent/tevent_epoll.c:730
#42 epoll_event_loop_once (ev=<optimized out>, location=<optimized out>) at ../../lib/tevent/tevent_epoll.c:946
#43 0x00007fffa11a0090 in std_event_loop_once (ev=0x154328120, location=0x7fffa1668db8 "../../source3/smbd/smb2_process.c:2158") at ../../lib/tevent/tevent_standard.c:110
#44 0x00007fffa119744c in _tevent_loop_once (ev=0x154328120, location=0x7fffa1668db8 "../../source3/smbd/smb2_process.c:2158") at ../../lib/tevent/tevent.c:823
#45 0x00007fffa1197884 in tevent_common_loop_wait (ev=<optimized out>, location=<optimized out>) at ../../lib/tevent/tevent.c:950
#46 0x00007fffa119ffc0 in std_event_loop_wait (ev=0x154328120, location=0x7fffa1668db8 "../../source3/smbd/smb2_process.c:2158") at ../../lib/tevent/tevent_standard.c:141
#47 0x00007fffa1197978 in _tevent_loop_wait (ev=<optimized out>, location=<optimized out>) at ../../lib/tevent/tevent.c:971
#48 0x00007fffa15737fc in smbd_process (ev_ctx=0x154328120, msg_ctx=<optimized out>, sock_fd=<optimized out>, interactive=<optimized out>) at ../../source3/smbd/smb2_process.c:2158
#49 0x000000011db5c554 in smbd_accept_connection (ev=0x154328120, fde=<optimized out>, flags=<optimized out>, private_data=<optimized out>) at ../../source3/smbd/server.c:1150
#50 0x00007fffa1198b2c in tevent_common_invoke_fd_handler (fde=0x1543ac2d0, flags=<optimized out>, removed=0x0) at ../../lib/tevent/tevent_fd.c:158
#51 0x00007fffa11a2b9c in epoll_event_loop (tvalp=0x7fffd7df4f98, epoll_ev=0x154328350) at ../../lib/tevent/tevent_epoll.c:730
#52 epoll_event_loop_once (ev=<optimized out>, location=<optimized out>) at ../../lib/tevent/tevent_epoll.c:946
#53 0x00007fffa11a0090 in std_event_loop_once (ev=0x154328120, location=0x11db60b50 "../../source3/smbd/server.c:1499") at ../../lib/tevent/tevent_standard.c:110
#54 0x00007fffa119744c in _tevent_loop_once (ev=0x154328120, location=0x11db60b50 "../../source3/smbd/server.c:1499") at ../../lib/tevent/tevent.c:823
#55 0x00007fffa1197884 in tevent_common_loop_wait (ev=<optimized out>, location=<optimized out>) at ../../lib/tevent/tevent.c:950
#56 0x00007fffa119ffc0 in std_event_loop_wait (ev=0x154328120, location=0x11db60b50 "../../source3/smbd/server.c:1499") at ../../lib/tevent/tevent_standard.c:141
#57 0x00007fffa1197978 in _tevent_loop_wait (ev=<optimized out>, location=<optimized out>) at ../../lib/tevent/tevent.c:971
#58 0x000000011db58c54 in smbd_parent_loop (parent=<optimized out>, ev_ctx=0x154328120) at ../../source3/smbd/server.c:1499
#59 main (argc=<optimized out>, argv=<optimized out>) at ../../source3/smbd/server.c:2258
[2]
Full SBT:
#0 0x00007fffa0cecbb0 in __pthread_mutex_lock_full () from /lib64/glibc-hwcaps/power9/libpthread-2.28.so
#1 0x00007fffa0a73cf8 in chain_mutex_lock (m=<optimized out>, m@entry=0x7fff9b3a71b0, waitflag=<optimized out>, waitflag@entry=true) at ../../lib/tdb/common/mutex.c:182
#2 0x00007fffa0a7432c in tdb_mutex_lock (tdb=0x1543c6900, rw=<optimized out>, off=<optimized out>, len=<optimized out>, waitflag=<optimized out>, pret=0x7fffd7df2e28) at ../../lib/tdb/common/mutex.c:234
#3 0x00007fffa0a6812c in fcntl_lock (waitflag=<optimized out>, len=1, off=376608, rw=0, tdb=0x1543c6900) at ../../lib/tdb/common/lock.c:200
#4 tdb_brlock (tdb=0x1543c6900, rw_type=<optimized out>, offset=<optimized out>, len=1, flags=<optimized out>) at ../../lib/tdb/common/lock.c:200
#5 0x00007fffa0a68af8 in tdb_nest_lock (flags=<optimized out>, ltype=0, offset=<optimized out>, tdb=0x1543c6900) at ../../lib/tdb/common/lock.c:390
#6 tdb_nest_lock (tdb=0x1543c6900, offset=<optimized out>, ltype=<optimized out>, flags=<optimized out>) at ../../lib/tdb/common/lock.c:336
#7 0x00007fffa0a69088 in tdb_lock_list (tdb=0x1543c6900, list=<optimized out>, ltype=<optimized out>, waitflag=<optimized out>) at ../../lib/tdb/common/lock.c:482
#8 0x00007fffa0a69198 in tdb_lock (tdb=0x1543c6900, list=<optimized out>, ltype=<optimized out>) at ../../lib/tdb/common/lock.c:500
#9 0x00007fffa0a64b50 in tdb_find_lock_hash (tdb=<optimized out>, tdb@entry=0x1543c6900, key=..., hash=<optimized out>, locktype=<optimized out>, locktype@entry=0, rec=<optimized out>, rec@entry=0x7fffd7df3080) at ../../lib/tdb/common/tdb.c:165
#10 0x00007fffa0a64ed0 in tdb_parse_record (tdb=0x1543c6900, key=..., parser=0x7fffa0e74470 <db_ctdb_ltdb_parser>, private_data=0x7fffd7df30e8) at ../../lib/tdb/common/tdb.c:329
#11 0x00007fffa0e74cbc in db_ctdb_ltdb_parse (db=<optimized out>, private_data=0x7fffd7df3140, parser=0x7fffa0e76470 <db_ctdb_parse_record_parser_nonpersistent>, key=...) at ../../source3/lib/dbwrap/dbwrap_ctdb.c:170
#12 db_ctdb_try_parse_local_record (ctx=ctx@entry=0x154328fc0, key=..., state=state@entry=0x7fffd7df3140) at ../../source3/lib/dbwrap/dbwrap_ctdb.c:1385
#13 0x00007fffa0e76024 in db_ctdb_parse_record (db=<optimized out>, key=..., parser=0x7fffa14ec820 <brl_get_locks_readonly_parser>, private_data=0x7fffd7df3218) at ../../source3/lib/dbwrap/dbwrap_ctdb.c:1425
#14 0x00007fffa0884760 in dbwrap_parse_record (db=<optimized out>, key=..., parser=<optimized out>, private_data=<optimized out>) at ../../lib/dbwrap/dbwrap.c:454
#15 0x00007fffa14ef5bc in brl_get_locks_readonly (fsp=0x1543d01e0) at ../../source3/locking/brlock.c:1884
#16 0x00007fffa1546968 in file_has_brlocks (fsp=0x1543d01e0) at ../../source3/smbd/open.c:2232
#17 delay_for_oplock (pgranted=<synthetic pointer>, poplock_type=<synthetic pointer>, first_open_attempt=<optimized out>, create_disposition=1, have_sharing_violation=false, lck=0x7fffd7df3ce8, lease=0x0, oplock_request=0, fsp=0x1543d01e0) at ../../source3/smbd/open.c:2749
#18 handle_share_mode_lease (pgranted=<synthetic pointer>, poplock_type=<synthetic pointer>, first_open_attempt=<optimized out>, lease=0x0, oplock_request=0, share_access=7, access_mask=131201, create_disposition=1, lck=0x7fffd7df3ce8, fsp=0x1543d01e0) at ../../source3/smbd/open.c:2865
#19 check_and_store_share_mode (first_open_attempt=<optimized out>, lease=0x0, oplock_request=0, share_access=7, access_mask=131201, create_disposition=1, lck=0x7fffd7df3ce8, req=0x154414800, fsp=0x1543d01e0) at ../../source3/smbd/open.c:3333
#20 open_ntcreate_lock_add_entry (lck=0x7fffd7df3ce8, keep_locked=0x7fffd7df3ad0, private_data=0x7fffd7df3cc8) at ../../source3/smbd/open.c:3688
#21 0x00007fffa14f6248 in share_mode_entry_prepare_lock_fn (glck=0x7fffd7df35b8, cb_private=0x7fffd7df3a88) at ../../source3/locking/share_mode_lock.c:2978
#22 0x00007fffa1317680 in g_lock_lock_cb_run_and_store (cb_state=cb_state@entry=0x7fffd7df35b8) at ../../source3/lib/g_lock.c:597
#23 0x00007fffa1319df8 in g_lock_lock_simple_fn (rec=0x7fffd7df3798, value=..., private_data=0x7fffd7df39a0) at ../../source3/lib/g_lock.c:1212
#24 0x00007fffa13160e0 in dbwrap_watched_do_locked_fn (backend_rec=<optimized out>, backend_value=..., private_data=0x7fffd7df3768) at ../../source3/lib/dbwrap/dbwrap_watch.c:458
#25 0x00007fffa0884e48 in dbwrap_do_locked (db=<optimized out>, key=..., fn=0x7fffa1316080 <dbwrap_watched_do_locked_fn>, private_data=0x7fffd7df3768) at ../../lib/dbwrap/dbwrap.c:602
#26 0x00007fffa1315274 in dbwrap_watched_do_locked (db=0x1543a7160, key=..., fn=0x7fffa1319ca0 <g_lock_lock_simple_fn>, private_data=0x7fffd7df39a0) at ../../source3/lib/dbwrap/dbwrap_watch.c:480
#27 0x00007fffa0884d60 in dbwrap_do_locked (db=<optimized out>, key=..., fn=<optimized out>, private_data=<optimized out>) at ../../lib/dbwrap/dbwrap.c:582
#28 0x00007fffa131b458 in g_lock_lock (ctx=0x1543cc630, key=..., type=<optimized out>, timeout=..., cb_fn=0x7fffa14f6190 <share_mode_entry_prepare_lock_fn>, cb_private=0x7fffd7df3a88) at ../../source3/lib/g_lock.c:1267
#29 0x00007fffa14fd060 in _share_mode_entry_prepare_lock (prepare_state=0x7fffd7df3cc8, id=..., servicepath=<optimized out>, smb_fname=<optimized out>, old_write_time=<optimized out>, fn=<optimized out>, private_data=0x7fffd7df3cc8, location=0x7fffa165b880 "../../source3/smbd/open.c:4292") at ../../source3/locking/share_mode_lock.c:3033
#30 0x00007fffa15491e0 in open_file_ntcreate (conn=conn@entry=0x154382050, req=req@entry=0x154414800, access_mask=<optimized out>, access_mask@entry=131201, share_access=share_access@entry=7, create_disposition=create_disposition@entry=1, create_options=create_options@entry=0, new_dos_attributes=<optimized out>, new_dos_attributes@entry=128, oplock_request=oplock_request@entry=0, lease=<optimized out>, lease@entry=0x0, private_flags=<optimized out>, private_flags@entry=0, parent_dir_fname=<optimized out>, smb_fname_atname=<optimized out>, pinfo=<optimized out>, pinfo@entry=0x7fffd7df3f1c, fsp=<optimized out>, fsp@entry=0x1543d01e0) at ../../source3/smbd/open.c:4286
#31 0x00007fffa154b94c in create_file_unixpath (conn=conn@entry=0x154382050, req=req@entry=0x154414800, dirfsp=dirfsp@entry=0x15439a7f0, smb_fname=smb_fname@entry=0x154416300, access_mask=access_mask@entry=131201, share_access=share_access@entry=7, create_disposition=create_disposition@entry=1, create_options=create_options@entry=0, file_attributes=file_attributes@entry=128, oplock_request=<optimized out>, oplock_request@entry=0, lease=<optimized out>, lease@entry=0x0, allocation_size=allocation_size@entry=0, private_flags=private_flags@entry=0, sd=sd@entry=0x0, ea_list=ea_list@entry=0x0, result=result@entry=0x7fffd7df4168, pinfo=pinfo@entry=0x7fffd7df4160) at ../../source3/smbd/open.c:6290
#32 0x00007fffa154dfac in create_file_default (conn=0x154382050, req=0x154414800, dirfsp=0x15439a7f0, smb_fname=0x154416300, access_mask=<optimized out>, share_access=<optimized out>, create_disposition=<optimized out>, create_options=<optimized out>, file_attributes=128, oplock_request=0, lease=0x0, allocation_size=0, private_flags=0, sd=0x0, ea_list=0x0, result=0x1544144e8, pinfo=0x1544144fc, in_context_blobs=0x7fffd7df4798, out_context_blobs=0x154414710) at ../../source3/smbd/open.c:6609
#33 0x00007fffa150972c in vfswrap_create_file (handle=<optimized out>, req=<optimized out>, dirfsp=<optimized out>, smb_fname=<optimized out>, access_mask=<optimized out>, share_access=<optimized out>, create_disposition=<optimized out>, create_options=<optimized out>, file_attributes=128, oplock_request=0, lease=0x0, allocation_size=0, private_flags=0, sd=0x0, ea_list=0x0, result=0x1544144e8, pinfo=0x1544144fc, in_context_blobs=0x7fffd7df4798, out_context_blobs=0x154414710) at ../../source3/modules/vfs_default.c:776
#34 0x00007fffa1559cbc in smb_vfs_call_create_file (handle=<optimized out>, req=<optimized out>, dirfsp=<optimized out>, smb_fname=<optimized out>, access_mask=<optimized out>, share_access=<optimized out>, create_disposition=<optimized out>, create_options=<optimized out>, file_attributes=128, oplock_request=0, lease=0x0, allocation_size=0, private_flags=0, sd=0x0, ea_list=0x0, result=0x1544144e8, pinfo=0x1544144fc, in_context_blobs=0x7fffd7df4798, out_context_blobs=0x154414710) at ../../source3/smbd/vfs.c:1560
#35 0x00007fff9c0a9ec4 in smb_time_audit_create_file (handle=0x154426820, req=0x154414800, dirfsp=0x15439a7f0, fname=0x154416300, access_mask=<optimized out>, share_access=<optimized out>, create_disposition=<optimized out>, create_options=<optimized out>, file_attributes=128, oplock_request=0, lease=0x0, allocation_size=0, private_flags=0, sd=0x0, ea_list=0x0, result_fsp=0x1544144e8, pinfo=0x1544144fc, in_context_blobs=0x7fffd7df4798, out_context_blobs=0x154414710) at ../../source3/modules/vfs_time_audit.c:634
#36 0x00007fffa1559cbc in smb_vfs_call_create_file (handle=<optimized out>, req=<optimized out>, dirfsp=<optimized out>, smb_fname=<optimized out>, access_mask=<optimized out>, share_access=<optimized out>, create_disposition=<optimized out>, create_options=<optimized out>, file_attributes=128, oplock_request=0, lease=0x0, allocation_size=0, private_flags=0, sd=0x0, ea_list=0x0, result=0x1544144e8, pinfo=0x1544144fc, in_context_blobs=0x7fffd7df4798, out_context_blobs=0x154414710) at ../../source3/smbd/vfs.c:1560
#37 0x00007fffa1597aa8 in smbd_smb2_create_send (in_context_blobs=..., in_name=0x154413ca0, in_create_options=<optimized out>, in_create_disposition=<optimized out>, in_share_access=<optimized out>, in_file_attributes=<optimized out>, in_desired_access=<optimized out>, in_impersonation_level=<optimized out>, in_oplock_level=<optimized out>, smb2req=0x154413770, ev=0x154328120, mem_ctx=0x154413770) at ../../source3/smbd/smb2_create.c:1115
#38 smbd_smb2_request_process_create (smb2req=0x154413770) at ../../source3/smbd/smb2_create.c:291
#39 0x00007fffa158a628 in smbd_smb2_request_dispatch (req=0x154413770) at ../../source3/smbd/smb2_server.c:3485
#40 0x00007fffa158c540 in smbd_smb2_io_handler (fde_flags=<optimized out>, xconn=0x154313f30) at ../../source3/smbd/smb2_server.c:5112
#41 smbd_smb2_connection_handler (ev=<optimized out>, fde=<optimized out>, flags=<optimized out>, private_data=<optimized out>) at ../../source3/smbd/smb2_server.c:5150
#42 0x00007fffa1198b2c in tevent_common_invoke_fd_handler (fde=0x15435add0, flags=<optimized out>, removed=0x0) at ../../lib/tevent/tevent_fd.c:158
#43 0x00007fffa11a2b9c in epoll_event_loop (tvalp=0x7fffd7df4b28, epoll_ev=0x1543b4e80) at ../../lib/tevent/tevent_epoll.c:730
#44 epoll_event_loop_once (ev=<optimized out>, location=<optimized out>) at ../../lib/tevent/tevent_epoll.c:946
#45 0x00007fffa11a0090 in std_event_loop_once (ev=0x154328120, location=0x7fffa1668db8 "../../source3/smbd/smb2_process.c:2158") at ../../lib/tevent/tevent_standard.c:110
#46 0x00007fffa119744c in _tevent_loop_once (ev=0x154328120, location=0x7fffa1668db8 "../../source3/smbd/smb2_process.c:2158") at ../../lib/tevent/tevent.c:823
#47 0x00007fffa1197884 in tevent_common_loop_wait (ev=<optimized out>, location=<optimized out>) at ../../lib/tevent/tevent.c:950
#48 0x00007fffa119ffc0 in std_event_loop_wait (ev=0x154328120, location=0x7fffa1668db8 "../../source3/smbd/smb2_process.c:2158") at ../../lib/tevent/tevent_standard.c:141
#49 0x00007fffa1197978 in _tevent_loop_wait (ev=<optimized out>, location=<optimized out>) at ../../lib/tevent/tevent.c:971
#50 0x00007fffa15737fc in smbd_process (ev_ctx=0x154328120, msg_ctx=<optimized out>, sock_fd=<optimized out>, interactive=<optimized out>) at ../../source3/smbd/smb2_process.c:2158
#51 0x000000011db5c554 in smbd_accept_connection (ev=0x154328120, fde=<optimized out>, flags=<optimized out>, private_data=<optimized out>) at ../../source3/smbd/server.c:1150
#52 0x00007fffa1198b2c in tevent_common_invoke_fd_handler (fde=0x1543ac2d0, flags=<optimized out>, removed=0x0) at ../../lib/tevent/tevent_fd.c:158
#53 0x00007fffa11a2b9c in epoll_event_loop (tvalp=0x7fffd7df4f98, epoll_ev=0x154328350) at ../../lib/tevent/tevent_epoll.c:730
#54 epoll_event_loop_once (ev=<optimized out>, location=<optimized out>) at ../../lib/tevent/tevent_epoll.c:946
#55 0x00007fffa11a0090 in std_event_loop_once (ev=0x154328120, location=0x11db60b50 "../../source3/smbd/server.c:1499") at ../../lib/tevent/tevent_standard.c:110
#56 0x00007fffa119744c in _tevent_loop_once (ev=0x154328120, location=0x11db60b50 "../../source3/smbd/server.c:1499") at ../../lib/tevent/tevent.c:823
#57 0x00007fffa1197884 in tevent_common_loop_wait (ev=<optimized out>, location=<optimized out>) at ../../lib/tevent/tevent.c:950
#58 0x00007fffa119ffc0 in std_event_loop_wait (ev=0x154328120, location=0x11db60b50 "../../source3/smbd/server.c:1499") at ../../lib/tevent/tevent_standard.c:141
#59 0x00007fffa1197978 in _tevent_loop_wait (ev=<optimized out>, location=<optimized out>) at ../../lib/tevent/tevent.c:971
#60 0x000000011db58c54 in smbd_parent_loop (parent=<optimized out>, ev_ctx=0x154328120) at ../../source3/smbd/server.c:1499
#61 main (argc=<optimized out>, argv=<optimized out>) at ../../source3/smbd/server.c:2258
BUG: https://bugzilla.samba.org/show_bug.cgi?id=15767
Pair-Programmed-With: Stefan Metzmacher <metze@samba.org>
Signed-off-by: Ralph Boehme <slow@samba.org>
Signed-off-by: Stefan Metzmacher <metze@samba.org>
(cherry picked from commit
2eef298ff4c5baf15c7d29c65fb021dbed5b0a93)
Utility function called by locking requests.
****************************************************************************/
-struct do_lock_state {
- struct files_struct *fsp;
- TALLOC_CTX *req_mem_ctx;
- const struct GUID *req_guid;
- uint64_t smblctx;
- uint64_t count;
- uint64_t offset;
- enum brl_type lock_type;
- enum brl_flavour lock_flav;
-
- struct server_id blocker_pid;
- uint64_t blocker_smblctx;
- NTSTATUS status;
-};
-
-static void do_lock_fn(
- struct share_mode_lock *lck,
- void *private_data)
-{
- struct do_lock_state *state = private_data;
- struct byte_range_lock *br_lck = NULL;
-
- br_lck = brl_get_locks_for_locking(talloc_tos(),
- state->fsp,
- state->req_mem_ctx,
- state->req_guid);
- if (br_lck == NULL) {
- state->status = NT_STATUS_NO_MEMORY;
- return;
- }
-
- state->status = brl_lock(
- br_lck,
- state->smblctx,
- messaging_server_id(state->fsp->conn->sconn->msg_ctx),
- state->offset,
- state->count,
- state->lock_type,
- state->lock_flav,
- &state->blocker_pid,
- &state->blocker_smblctx);
-
- TALLOC_FREE(br_lck);
-}
-
-NTSTATUS do_lock(files_struct *fsp,
+NTSTATUS do_lock(struct byte_range_lock *br_lck,
TALLOC_CTX *req_mem_ctx,
const struct GUID *req_guid,
uint64_t smblctx,
struct server_id *pblocker_pid,
uint64_t *psmblctx)
{
- struct do_lock_state state = {
- .fsp = fsp,
- .req_mem_ctx = req_mem_ctx,
- .req_guid = req_guid,
- .smblctx = smblctx,
- .count = count,
- .offset = offset,
- .lock_type = lock_type,
- .lock_flav = lock_flav,
- };
+ files_struct *fsp = brl_fsp(br_lck);
+ struct server_id blocker_pid;
+ uint64_t blocker_smblctx;
NTSTATUS status;
- /* silently return ok on print files as we don't do locking there */
- if (fsp->print_file) {
- return NT_STATUS_OK;
- }
+ SMB_ASSERT(req_mem_ctx != NULL);
+ SMB_ASSERT(req_guid != NULL);
if (!fsp->fsp_flags.can_lock) {
if (fsp->fsp_flags.is_directory) {
fsp_fnum_dbg(fsp),
fsp_str_dbg(fsp));
- status = share_mode_do_locked_vfs_allowed(fsp->file_id,
- do_lock_fn,
- &state);
- if (!NT_STATUS_IS_OK(status)) {
- DBG_DEBUG("share_mode_do_locked returned %s\n",
- nt_errstr(status));
- return status;
- }
- if (!NT_STATUS_IS_OK(state.status)) {
- DBG_DEBUG("do_lock_fn returned %s\n",
- nt_errstr(state.status));
+ brl_req_set(br_lck, req_mem_ctx, req_guid);
+ status = brl_lock(br_lck,
+ smblctx,
+ messaging_server_id(fsp->conn->sconn->msg_ctx),
+ offset,
+ count,
+ lock_type,
+ lock_flav,
+ &blocker_pid,
+ &blocker_smblctx);
+ brl_req_set(br_lck, NULL, NULL);
+ if (!NT_STATUS_IS_OK(status)) {
+ DBG_DEBUG("brl_lock failed: %s\n", nt_errstr(status));
if (psmblctx != NULL) {
- *psmblctx = state.blocker_smblctx;
+ *psmblctx = blocker_smblctx;
}
if (pblocker_pid != NULL) {
- *pblocker_pid = state.blocker_pid;
+ *pblocker_pid = blocker_pid;
}
- return state.status;
- }
+ return status;
+ }
increment_current_lock_count(fsp, lock_flav);
Utility function called by unlocking requests.
****************************************************************************/
-NTSTATUS do_unlock(files_struct *fsp,
+NTSTATUS do_unlock(struct byte_range_lock *br_lck,
uint64_t smblctx,
uint64_t count,
uint64_t offset,
enum brl_flavour lock_flav)
{
+ files_struct *fsp = brl_fsp(br_lck);
bool ok = False;
- struct byte_range_lock *br_lck = NULL;
if (!fsp->fsp_flags.can_lock) {
return fsp->fsp_flags.is_directory ?
fsp_fnum_dbg(fsp),
fsp_str_dbg(fsp));
- br_lck = brl_get_locks(talloc_tos(), fsp);
- if (!br_lck) {
- return NT_STATUS_NO_MEMORY;
- }
-
ok = brl_unlock(br_lck,
smblctx,
messaging_server_id(fsp->conn->sconn->msg_ctx),
count,
lock_flav);
- TALLOC_FREE(br_lck);
-
if (!ok) {
DEBUG(10,("do_unlock: returning ERRlock.\n" ));
return NT_STATUS_RANGE_NOT_LOCKED;
uint64_t *poffset,
enum brl_type *plock_type,
enum brl_flavour lock_flav);
-NTSTATUS do_lock(files_struct *fsp,
+NTSTATUS do_lock(struct byte_range_lock *br_lck,
TALLOC_CTX *req_mem_ctx,
const struct GUID *req_guid,
uint64_t smblctx,
enum brl_flavour lock_flav,
struct server_id *pblocker_pid,
uint64_t *psmblctx);
-NTSTATUS do_unlock(files_struct *fsp,
+NTSTATUS do_unlock(struct byte_range_lock *br_lck,
uint64_t smblctx,
uint64_t count,
uint64_t offset,
return false;
}
-static NTSTATUS fruit_check_access(vfs_handle_struct *handle,
- files_struct *fsp,
- uint32_t access_mask,
- uint32_t share_mode)
+struct check_access_state {
+ NTSTATUS status;
+ files_struct *fsp;
+ uint32_t access_mask;
+ uint32_t share_mode;
+};
+
+static void fruit_check_access(struct share_mode_lock *lck,
+ struct byte_range_lock *br_lck,
+ void *private_data)
{
+ struct check_access_state *state = private_data;
+ files_struct *fsp = state->fsp;
+ uint32_t access_mask = state->access_mask;
+ uint32_t share_mode = state->share_mode;
NTSTATUS status = NT_STATUS_OK;
off_t off;
bool share_for_read = (share_mode & FILE_SHARE_READ);
/* FIXME: hardcoded data fork, add resource fork */
enum apple_fork fork_type = APPLE_FORK_DATA;
+ /*
+ * The caller has checked fsp->fsp_flags.can_lock and lp_locking so
+ * br_lck has to be there!
+ */
+ SMB_ASSERT(br_lck != NULL);
+
+ state->status = NT_STATUS_OK;
+
DBG_DEBUG("%s, am: %s/%s, sm: 0x%x\n",
fsp_str_dbg(fsp),
access_mask & FILE_READ_DATA ? "READ" :"-",
share_mode);
if (fsp_get_io_fd(fsp) == -1) {
- return NT_STATUS_OK;
+ return;
}
/* Read NetATalk opens and deny modes on the file. */
/* If there are any conflicts - sharing violation. */
if ((access_mask & FILE_READ_DATA) &&
netatalk_already_open_with_deny_read) {
- return NT_STATUS_SHARING_VIOLATION;
+ state->status = NT_STATUS_SHARING_VIOLATION;
+ return;
}
if (!share_for_read &&
netatalk_already_open_for_reading) {
- return NT_STATUS_SHARING_VIOLATION;
+ state->status = NT_STATUS_SHARING_VIOLATION;
+ return;
}
if ((access_mask & FILE_WRITE_DATA) &&
netatalk_already_open_with_deny_write) {
- return NT_STATUS_SHARING_VIOLATION;
+ state->status = NT_STATUS_SHARING_VIOLATION;
+ return;
}
if (!share_for_write &&
netatalk_already_open_for_writing) {
- return NT_STATUS_SHARING_VIOLATION;
+ state->status = NT_STATUS_SHARING_VIOLATION;
+ return;
}
if (!(access_mask & FILE_READ_DATA)) {
* Nothing we can do here, we need read access
* to set locks.
*/
- return NT_STATUS_OK;
+ return;
}
/* Set NetAtalk locks matching our access */
if (access_mask & FILE_READ_DATA) {
off = access_to_netatalk_brl(fork_type, FILE_READ_DATA);
req_guid.time_hi_and_version = __LINE__;
+
status = do_lock(
- fsp,
+ br_lck,
talloc_tos(),
&req_guid,
fsp->op->global->open_persistent_id,
POSIX_LOCK,
NULL,
NULL);
-
if (!NT_STATUS_IS_OK(status)) {
- return status;
+ state->status = status;
+ return;
}
}
if (!share_for_read) {
off = denymode_to_netatalk_brl(fork_type, DENY_READ);
req_guid.time_hi_and_version = __LINE__;
+
status = do_lock(
- fsp,
+ br_lck,
talloc_tos(),
&req_guid,
fsp->op->global->open_persistent_id,
POSIX_LOCK,
NULL,
NULL);
-
if (!NT_STATUS_IS_OK(status)) {
- return status;
+ state->status = status;
+ return;
}
}
if (access_mask & FILE_WRITE_DATA) {
off = access_to_netatalk_brl(fork_type, FILE_WRITE_DATA);
req_guid.time_hi_and_version = __LINE__;
+
status = do_lock(
- fsp,
+ br_lck,
talloc_tos(),
&req_guid,
fsp->op->global->open_persistent_id,
POSIX_LOCK,
NULL,
NULL);
-
if (!NT_STATUS_IS_OK(status)) {
- return status;
+ state->status = status;
+ return;
}
}
if (!share_for_write) {
off = denymode_to_netatalk_brl(fork_type, DENY_WRITE);
req_guid.time_hi_and_version = __LINE__;
+
status = do_lock(
- fsp,
+ br_lck,
talloc_tos(),
&req_guid,
fsp->op->global->open_persistent_id,
POSIX_LOCK,
NULL,
NULL);
-
if (!NT_STATUS_IS_OK(status)) {
- return status;
+ state->status = status;
+ return;
}
}
-
- return NT_STATUS_OK;
}
static NTSTATUS check_aapl(vfs_handle_struct *handle,
}
if ((config->locking == FRUIT_LOCKING_NETATALK) &&
+ lp_locking(fsp->conn->params) &&
+ fsp->fsp_flags.can_lock &&
(fsp->op != NULL) &&
!fsp->fsp_flags.is_pathref)
{
- status = fruit_check_access(
- handle, *result,
- access_mask,
- share_access);
+ struct check_access_state state = (struct check_access_state) {
+ .fsp = fsp,
+ .access_mask = access_mask,
+ .share_mode = share_access,
+ };
+
+ status = share_mode_do_locked_brl(fsp,
+ fruit_check_access,
+ &state);
if (!NT_STATUS_IS_OK(status)) {
goto fail;
}
+ if (!NT_STATUS_IS_OK(state.status)) {
+ status = state.status;
+ goto fail;
+ }
}
return status;
#undef DBGC_CLASS
#define DBGC_CLASS DBGC_LOCKING
-NTSTATUS smbd_do_locks_try(
- struct files_struct *fsp,
- uint16_t num_locks,
- struct smbd_lock_element *locks,
- uint16_t *blocker_idx,
- struct server_id *blocking_pid,
- uint64_t *blocking_smblctx)
+NTSTATUS smbd_do_locks_try(struct byte_range_lock *br_lck,
+ struct smbd_do_locks_state *state)
{
- NTSTATUS status = NT_STATUS_OK;
+ bool unlock_ok;
uint16_t i;
+ NTSTATUS status = NT_STATUS_OK;
- for (i=0; i<num_locks; i++) {
- struct smbd_lock_element *e = &locks[i];
+ for (i = 0; i < state->num_locks; i++) {
+ struct smbd_lock_element *e = &state->locks[i];
status = do_lock(
- fsp,
- locks, /* req_mem_ctx */
+ br_lck,
+ state->locks, /* req_mem_ctx */
&e->req_guid,
e->smblctx,
e->count,
e->offset,
e->brltype,
e->lock_flav,
- blocking_pid,
- blocking_smblctx);
+ &state->blocking_pid,
+ &state->blocking_smblctx);
if (!NT_STATUS_IS_OK(status)) {
break;
}
return NT_STATUS_OK;
}
- *blocker_idx = i;
+ state->blocker_idx = i;
+ unlock_ok = true;
/*
* Undo the locks we successfully got
*/
for (i = i-1; i != UINT16_MAX; i--) {
- struct smbd_lock_element *e = &locks[i];
- do_unlock(fsp,
- e->smblctx,
- e->count,
- e->offset,
- e->lock_flav);
+ struct smbd_lock_element *e = &state->locks[i];
+ NTSTATUS ulstatus;
+
+ ulstatus = do_unlock(br_lck,
+ e->smblctx,
+ e->count,
+ e->offset,
+ e->lock_flav);
+ if (!NT_STATUS_IS_OK(ulstatus)) {
+ DBG_DEBUG("Failed to undo lock flavour %s lock "
+ "type %s start=%"PRIu64" len=%"PRIu64" "
+ "requested for file [%s]\n",
+ lock_flav_name(e->lock_flav),
+ lock_type_name(e->brltype),
+ e->offset,
+ e->count,
+ fsp_str_dbg(brl_fsp(br_lck)));
+ unlock_ok = false;
+ }
+ }
+ if (unlock_ok) {
+ brl_set_modified(br_lck, false);
}
return status;
static void smbd_smb1_do_locks_retry(struct tevent_req *subreq);
static void smbd_smb1_blocked_locks_cleanup(
struct tevent_req *req, enum tevent_req_state req_state);
-static NTSTATUS smbd_smb1_do_locks_check(
- struct files_struct *fsp,
- uint16_t num_locks,
- struct smbd_lock_element *locks,
- uint16_t *blocker_idx,
- struct server_id *blocking_pid,
- uint64_t *blocking_smblctx);
static void smbd_smb1_do_locks_setup_timeout(
struct smbd_smb1_do_locks_state *state,
return NT_STATUS_OK;
}
-static NTSTATUS smbd_smb1_do_locks_check(
- struct files_struct *fsp,
- uint16_t num_locks,
- struct smbd_lock_element *locks,
- uint16_t *blocker_idx,
- struct server_id *blocking_pid,
- uint64_t *blocking_smblctx)
+static void smbd_smb1_do_locks_try_fn(struct share_mode_lock *lck,
+ struct byte_range_lock *br_lck,
+ void *private_data)
{
+ struct tevent_req *req = talloc_get_type_abort(
+ private_data, struct tevent_req);
+ struct smbd_smb1_do_locks_state *state = tevent_req_data(
+ req, struct smbd_smb1_do_locks_state);
+ struct smbd_do_locks_state brl_state;
+ struct files_struct *fsp = state->fsp;
struct tevent_req **blocked = fsp->blocked_smb1_lock_reqs;
size_t num_blocked = talloc_array_length(blocked);
- NTSTATUS status;
+ struct timeval endtime = { 0 };
+ struct tevent_req *subreq = NULL;
size_t bi;
+ NTSTATUS status;
+ bool ok;
+ bool expired;
+
+ /*
+ * The caller has checked fsp->fsp_flags.can_lock and lp_locking so
+ * br_lck has to be there!
+ */
+ SMB_ASSERT(br_lck != NULL);
+
+ brl_state = (struct smbd_do_locks_state) {
+ .num_locks = state->num_locks,
+ .locks = state->locks,
+ };
/*
* We check the pending/blocked requests
tevent_req_data(blocked[bi],
struct smbd_smb1_do_locks_state);
- if (blocked_state->locks == locks) {
- SMB_ASSERT(blocked_state->num_locks == num_locks);
+ if (blocked_state->locks == state->locks) {
+ SMB_ASSERT(blocked_state->num_locks == state->num_locks);
/*
* We found ourself...
status = smbd_smb1_do_locks_check_blocked(
blocked_state->num_locks,
blocked_state->locks,
- num_locks,
- locks,
- blocker_idx,
- blocking_smblctx);
+ state->num_locks,
+ state->locks,
+ &brl_state.blocker_idx,
+ &brl_state.blocking_smblctx);
if (!NT_STATUS_IS_OK(status)) {
- *blocking_pid = messaging_server_id(
- fsp->conn->sconn->msg_ctx);
- return status;
+ brl_state.blocking_pid = messaging_server_id(
+ fsp->conn->sconn->msg_ctx);
+ goto check_retry;
}
}
- status = smbd_do_locks_try(
- fsp,
- num_locks,
- locks,
- blocker_idx,
- blocking_pid,
- blocking_smblctx);
- if (!NT_STATUS_IS_OK(status)) {
- return status;
- }
-
- return NT_STATUS_OK;
-}
-
-static void smbd_smb1_do_locks_try(struct tevent_req *req)
-{
- struct smbd_smb1_do_locks_state *state = tevent_req_data(
- req, struct smbd_smb1_do_locks_state);
- struct files_struct *fsp = state->fsp;
- struct share_mode_lock *lck;
- struct timeval endtime = { 0 };
- struct server_id blocking_pid = { 0 };
- uint64_t blocking_smblctx = 0;
- struct tevent_req *subreq = NULL;
- NTSTATUS status;
- bool ok;
- bool expired;
-
- lck = get_existing_share_mode_lock(state, fsp->file_id);
- if (tevent_req_nomem(lck, req)) {
- DBG_DEBUG("Could not get share mode lock\n");
- return;
- }
-
- status = smbd_smb1_do_locks_check(
- fsp,
- state->num_locks,
- state->locks,
- &state->blocker,
- &blocking_pid,
- &blocking_smblctx);
+ status = smbd_do_locks_try(br_lck, &brl_state);
if (NT_STATUS_IS_OK(status)) {
goto done;
}
+
+ state->blocker = brl_state.blocker_idx;
+
if (NT_STATUS_EQUAL(status, NT_STATUS_RETRY)) {
/*
* We got NT_STATUS_RETRY,
* locking.tdb may cause retries.
*/
- if (blocking_smblctx != UINT64_MAX) {
- SMB_ASSERT(blocking_smblctx == 0);
+ if (brl_state.blocking_smblctx != UINT64_MAX) {
+ SMB_ASSERT(brl_state.blocking_smblctx == 0);
goto setup_retry;
}
endtime = timeval_current_ofs_msec(state->retry_msecs);
goto setup_retry;
}
+
+check_retry:
if (!ERROR_WAS_LOCK_DENIED(status)) {
goto done;
}
smbd_smb1_do_locks_setup_timeout(state, &state->locks[state->blocker]);
DBG_DEBUG("timeout=%"PRIu32", blocking_smblctx=%"PRIu64"\n",
state->timeout,
- blocking_smblctx);
+ brl_state.blocking_smblctx);
/*
* The client specified timeout expired
endtime = state->endtime;
- if (blocking_smblctx == UINT64_MAX) {
+ if (brl_state.blocking_smblctx == UINT64_MAX) {
struct timeval tmp;
smbd_smb1_do_locks_update_polling_msecs(state);
setup_retry:
subreq = share_mode_watch_send(
- state, state->ev, &state->fsp->file_id, blocking_pid);
+ state, state->ev, &state->fsp->file_id, brl_state.blocking_pid);
if (tevent_req_nomem(subreq, req)) {
+ status = NT_STATUS_NO_MEMORY;
goto done;
}
- TALLOC_FREE(lck);
tevent_req_set_callback(subreq, smbd_smb1_do_locks_retry, req);
if (timeval_is_zero(&endtime)) {
}
return;
done:
- TALLOC_FREE(lck);
smbd_smb1_brl_finish_by_req(req, status);
}
+static void smbd_smb1_do_locks_try(struct tevent_req *req)
+{
+ struct smbd_smb1_do_locks_state *state = tevent_req_data(
+ req, struct smbd_smb1_do_locks_state);
+ NTSTATUS status;
+
+ if (!state->fsp->fsp_flags.can_lock) {
+ if (state->fsp->fsp_flags.is_directory) {
+ return smbd_smb1_brl_finish_by_req(req,
+ NT_STATUS_INVALID_DEVICE_REQUEST);
+ }
+ return smbd_smb1_brl_finish_by_req(req,
+ NT_STATUS_INVALID_HANDLE);
+ }
+
+ if (!lp_locking(state->fsp->conn->params)) {
+ return smbd_smb1_brl_finish_by_req(req, NT_STATUS_OK);
+ }
+
+ status = share_mode_do_locked_brl(state->fsp,
+ smbd_smb1_do_locks_try_fn,
+ req);
+ if (!NT_STATUS_IS_OK(status)) {
+ smbd_smb1_brl_finish_by_req(req, status);
+ return;
+ }
+ return;
+}
+
static void smbd_smb1_do_locks_retry(struct tevent_req *subreq)
{
struct tevent_req *req = tevent_req_callback_data(
/* The following definitions come from smbd/blocking.c */
-NTSTATUS smbd_do_locks_try(
- struct files_struct *fsp,
- uint16_t num_locks,
- struct smbd_lock_element *locks,
- uint16_t *blocker_idx,
- struct server_id *blocking_pid,
- uint64_t *blocking_smblctx);
+struct smbd_do_locks_state {
+ uint16_t num_locks;
+ struct smbd_lock_element *locks;
+ NTSTATUS status;
+ uint16_t blocker_idx;
+ struct server_id blocking_pid;
+ uint64_t blocking_smblctx;
+};
+
+NTSTATUS smbd_do_locks_try(struct byte_range_lock *br_lck,
+ struct smbd_do_locks_state *state);
+
struct tevent_req *smbd_smb1_do_locks_send(
TALLOC_CTX *mem_ctx,
struct tevent_context *ev,
state->polling_msecs += v_min;
}
-static void smbd_smb2_lock_try(struct tevent_req *req)
+static void smbd_do_locks_try_fn(struct share_mode_lock *lck,
+ struct byte_range_lock *br_lck,
+ void *private_data)
{
+ struct tevent_req *req = talloc_get_type_abort(
+ private_data, struct tevent_req);
struct smbd_smb2_lock_state *state = tevent_req_data(
req, struct smbd_smb2_lock_state);
- struct share_mode_lock *lck = NULL;
- uint16_t blocker_idx;
- struct server_id blocking_pid = { 0 };
- uint64_t blocking_smblctx;
- NTSTATUS status;
+ struct smbd_do_locks_state brl_state;
struct tevent_req *subreq = NULL;
struct timeval endtime = { 0 };
+ NTSTATUS status;
- lck = get_existing_share_mode_lock(
- talloc_tos(), state->fsp->file_id);
- if (tevent_req_nomem(lck, req)) {
- return;
- }
+ /*
+ * The caller has checked fsp->fsp_flags.can_lock and lp_locking so
+ * br_lck has to be there!
+ */
+ SMB_ASSERT(br_lck != NULL);
- status = smbd_do_locks_try(
- state->fsp,
- state->lock_count,
- state->locks,
- &blocker_idx,
- &blocking_pid,
- &blocking_smblctx);
+ brl_state = (struct smbd_do_locks_state) {
+ .num_locks = state->lock_count,
+ .locks = state->locks,
+ };
+
+ status = smbd_do_locks_try(br_lck, &brl_state);
if (NT_STATUS_IS_OK(status)) {
- TALLOC_FREE(lck);
tevent_req_done(req);
return;
}
* locking.tdb may cause retries.
*/
- if (blocking_smblctx != UINT64_MAX) {
- SMB_ASSERT(blocking_smblctx == 0);
+ if (brl_state.blocking_smblctx != UINT64_MAX) {
+ SMB_ASSERT(brl_state.blocking_smblctx == 0);
goto setup_retry;
}
status = NT_STATUS_LOCK_NOT_GRANTED;
}
if (!NT_STATUS_EQUAL(status, NT_STATUS_LOCK_NOT_GRANTED)) {
- TALLOC_FREE(lck);
tevent_req_nterror(req, status);
return;
}
state->retry_msecs = 0;
if (!state->blocking) {
- TALLOC_FREE(lck);
tevent_req_nterror(req, status);
return;
}
- if (blocking_smblctx == UINT64_MAX) {
+ if (brl_state.blocking_smblctx == UINT64_MAX) {
smbd_smb2_lock_update_polling_msecs(state);
DBG_DEBUG("Blocked on a posix lock. Retry in %"PRIu32" msecs\n",
DBG_DEBUG("Watching share mode lock\n");
subreq = share_mode_watch_send(
- state, state->ev, &state->fsp->file_id, blocking_pid);
- TALLOC_FREE(lck);
+ state, state->ev, &state->fsp->file_id, brl_state.blocking_pid);
if (tevent_req_nomem(subreq, req)) {
return;
}
}
}
+static void smbd_smb2_lock_try(struct tevent_req *req)
+{
+ struct smbd_smb2_lock_state *state = tevent_req_data(
+ req, struct smbd_smb2_lock_state);
+ NTSTATUS status;
+
+ if (!state->fsp->fsp_flags.can_lock) {
+ if (state->fsp->fsp_flags.is_directory) {
+ tevent_req_nterror(req,
+ NT_STATUS_INVALID_DEVICE_REQUEST);
+ return;
+ }
+ tevent_req_nterror(req, NT_STATUS_INVALID_HANDLE);
+ return;
+ }
+
+ if (!lp_locking(state->fsp->conn->params)) {
+ return tevent_req_done(req);
+ }
+
+ status = share_mode_do_locked_brl(state->fsp,
+ smbd_do_locks_try_fn,
+ req);
+ if (!NT_STATUS_IS_OK(status)) {
+ tevent_req_nterror(req, status);
+ return;
+ }
+}
+
static void smbd_smb2_lock_retry(struct tevent_req *subreq)
{
struct tevent_req *req = tevent_req_callback_data(
return offset;
}
-struct smbd_do_unlocking_state {
- struct files_struct *fsp;
- uint16_t num_ulocks;
- struct smbd_lock_element *ulocks;
- NTSTATUS status;
-};
-
-static void smbd_do_unlocking_fn(
- struct share_mode_lock *lck,
- void *private_data)
+static void smbd_do_unlocking_fn(struct share_mode_lock *lck,
+ struct byte_range_lock *br_lck,
+ void *private_data)
{
- struct smbd_do_unlocking_state *state = private_data;
- struct files_struct *fsp = state->fsp;
+ struct smbd_do_locks_state *state = private_data;
+ struct files_struct *fsp = brl_fsp(br_lck);
uint16_t i;
- for (i = 0; i < state->num_ulocks; i++) {
- struct smbd_lock_element *e = &state->ulocks[i];
+ /*
+ * The caller has checked fsp->fsp_flags.can_lock and lp_locking so
+ * br_lck has to be there!
+ */
+ SMB_ASSERT(br_lck != NULL);
+
+ for (i = 0; i < state->num_locks; i++) {
+ struct smbd_lock_element *e = &state->locks[i];
DBG_DEBUG("unlock start=%"PRIu64", len=%"PRIu64" for "
"pid %"PRIu64", file %s\n",
}
state->status = do_unlock(
- fsp, e->smblctx, e->count, e->offset, e->lock_flav);
+ br_lck, e->smblctx, e->count, e->offset, e->lock_flav);
DBG_DEBUG("do_unlock returned %s\n",
nt_errstr(state->status));
uint16_t num_ulocks,
struct smbd_lock_element *ulocks)
{
- struct smbd_do_unlocking_state state = {
- .fsp = fsp,
- .num_ulocks = num_ulocks,
- .ulocks = ulocks,
+ struct smbd_do_locks_state state = {
+ .num_locks = num_ulocks,
+ .locks = ulocks,
};
NTSTATUS status;
DBG_NOTICE("%s num_ulocks=%"PRIu16"\n", fsp_fnum_dbg(fsp), num_ulocks);
- status = share_mode_do_locked_vfs_allowed(
- fsp->file_id, smbd_do_unlocking_fn, &state);
+ if (!fsp->fsp_flags.can_lock) {
+ if (fsp->fsp_flags.is_directory) {
+ return NT_STATUS_INVALID_DEVICE_REQUEST;
+ }
+ return NT_STATUS_INVALID_HANDLE;
+ }
+
+ if (!lp_locking(fsp->conn->params)) {
+ return NT_STATUS_OK;
+ }
+ status = share_mode_do_locked_brl(fsp,
+ smbd_do_unlocking_fn,
+ &state);
if (!NT_STATUS_IS_OK(status)) {
- DBG_DEBUG("share_mode_do_locked_vfs_allowed failed: %s\n",
+ DBG_DEBUG("share_mode_do_locked_brl failed: %s\n",
nt_errstr(status));
return status;
}