]> git.ipfire.org Git - people/ms/linux.git/blame - fs/cifsd/connection.c
cifsd: fix error return code in ksmbd_vfs_remove_file()
[people/ms/linux.git] / fs / cifsd / connection.c
CommitLineData
0626e664
NJ
1// SPDX-License-Identifier: GPL-2.0-or-later
2/*
3 * Copyright (C) 2016 Namjae Jeon <namjae.jeon@protocolfreedom.org>
4 * Copyright (C) 2018 Samsung Electronics Co., Ltd.
5 */
6
7#include <linux/mutex.h>
8#include <linux/freezer.h>
9#include <linux/module.h>
10
11#include "server.h"
12#include "buffer_pool.h"
13#include "smb_common.h"
14#include "mgmt/ksmbd_ida.h"
15#include "connection.h"
16#include "transport_tcp.h"
17#include "transport_rdma.h"
18
19static DEFINE_MUTEX(init_lock);
20
21static struct ksmbd_conn_ops default_conn_ops;
22
23static LIST_HEAD(conn_list);
24static DEFINE_RWLOCK(conn_list_lock);
25
26/**
27 * ksmbd_conn_free() - free resources of the connection instance
28 *
29 * @conn: connection instance to be cleand up
30 *
31 * During the thread termination, the corresponding conn instance
32 * resources(sock/memory) are released and finally the conn object is freed.
33 */
34void ksmbd_conn_free(struct ksmbd_conn *conn)
35{
36 write_lock(&conn_list_lock);
37 list_del(&conn->conns_list);
38 write_unlock(&conn_list_lock);
39
40 ksmbd_free_request(conn->request_buf);
41 ksmbd_ida_free(conn->async_ida);
42 kfree(conn->preauth_info);
43 kfree(conn);
44}
45
46/**
47 * ksmbd_conn_alloc() - initialize a new connection instance
48 *
49 * Return: ksmbd_conn struct on success, otherwise NULL
50 */
51struct ksmbd_conn *ksmbd_conn_alloc(void)
52{
53 struct ksmbd_conn *conn;
54
55 conn = kzalloc(sizeof(struct ksmbd_conn), GFP_KERNEL);
56 if (!conn)
57 return NULL;
58
59 conn->need_neg = true;
60 conn->status = KSMBD_SESS_NEW;
61 conn->local_nls = load_nls("utf8");
62 if (!conn->local_nls)
63 conn->local_nls = load_nls_default();
64 atomic_set(&conn->req_running, 0);
65 atomic_set(&conn->r_count, 0);
66 init_waitqueue_head(&conn->req_running_q);
67 INIT_LIST_HEAD(&conn->conns_list);
68 INIT_LIST_HEAD(&conn->sessions);
69 INIT_LIST_HEAD(&conn->requests);
70 INIT_LIST_HEAD(&conn->async_requests);
71 spin_lock_init(&conn->request_lock);
72 spin_lock_init(&conn->credits_lock);
73 conn->async_ida = ksmbd_ida_alloc();
74
75 write_lock(&conn_list_lock);
76 list_add(&conn->conns_list, &conn_list);
77 write_unlock(&conn_list_lock);
78 return conn;
79}
80
81bool ksmbd_conn_lookup_dialect(struct ksmbd_conn *c)
82{
83 struct ksmbd_conn *t;
84 bool ret = false;
85
86 read_lock(&conn_list_lock);
87 list_for_each_entry(t, &conn_list, conns_list) {
88 if (memcmp(t->ClientGUID, c->ClientGUID, SMB2_CLIENT_GUID_SIZE))
89 continue;
90
91 ret = true;
92 break;
93 }
94 read_unlock(&conn_list_lock);
95 return ret;
96}
97
98void ksmbd_conn_enqueue_request(struct ksmbd_work *work)
99{
100 struct ksmbd_conn *conn = work->conn;
101 struct list_head *requests_queue = NULL;
102
103 if (conn->ops->get_cmd_val(work) != SMB2_CANCEL_HE) {
104 requests_queue = &conn->requests;
105 work->syncronous = true;
106 }
107
108 if (requests_queue) {
109 atomic_inc(&conn->req_running);
110 spin_lock(&conn->request_lock);
111 list_add_tail(&work->request_entry, requests_queue);
112 spin_unlock(&conn->request_lock);
113 }
114}
115
116int ksmbd_conn_try_dequeue_request(struct ksmbd_work *work)
117{
118 struct ksmbd_conn *conn = work->conn;
119 int ret = 1;
120
121 if (list_empty(&work->request_entry) &&
122 list_empty(&work->async_request_entry))
123 return 0;
124
125 atomic_dec(&conn->req_running);
126 spin_lock(&conn->request_lock);
127 if (!work->multiRsp) {
128 list_del_init(&work->request_entry);
129 if (work->syncronous == false)
130 list_del_init(&work->async_request_entry);
131 ret = 0;
132 }
133 spin_unlock(&conn->request_lock);
134
135 wake_up_all(&conn->req_running_q);
136 return ret;
137}
138
139static void ksmbd_conn_lock(struct ksmbd_conn *conn)
140{
141 mutex_lock(&conn->srv_mutex);
142}
143
144static void ksmbd_conn_unlock(struct ksmbd_conn *conn)
145{
146 mutex_unlock(&conn->srv_mutex);
147}
148
149void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
150{
151 wait_event(conn->req_running_q, atomic_read(&conn->req_running) < 2);
152}
153
154int ksmbd_conn_write(struct ksmbd_work *work)
155{
156 struct ksmbd_conn *conn = work->conn;
e5066499 157 struct smb_hdr *rsp_hdr = work->response_buf;
0626e664
NJ
158 size_t len = 0;
159 int sent;
160 struct kvec iov[3];
161 int iov_idx = 0;
162
163 ksmbd_conn_try_dequeue_request(work);
164 if (!rsp_hdr) {
165 ksmbd_err("NULL response header\n");
166 return -EINVAL;
167 }
168
e5066499 169 if (work->tr_buf) {
0626e664
NJ
170 iov[iov_idx] = (struct kvec) { work->tr_buf,
171 sizeof(struct smb2_transform_hdr) };
172 len += iov[iov_idx++].iov_len;
173 }
174
e5066499
NJ
175 if (work->aux_payload_sz) {
176 iov[iov_idx] = (struct kvec) { rsp_hdr, work->resp_hdr_sz };
0626e664 177 len += iov[iov_idx++].iov_len;
e5066499 178 iov[iov_idx] = (struct kvec) { work->aux_payload_buf, work->aux_payload_sz };
0626e664
NJ
179 len += iov[iov_idx++].iov_len;
180 } else {
e5066499
NJ
181 if (work->tr_buf)
182 iov[iov_idx].iov_len = work->resp_hdr_sz;
0626e664
NJ
183 else
184 iov[iov_idx].iov_len = get_rfc1002_len(rsp_hdr) + 4;
185 iov[iov_idx].iov_base = rsp_hdr;
186 len += iov[iov_idx++].iov_len;
187 }
188
189 ksmbd_conn_lock(conn);
190 sent = conn->transport->ops->writev(conn->transport, &iov[0],
191 iov_idx, len,
192 work->need_invalidate_rkey,
193 work->remote_key);
194 ksmbd_conn_unlock(conn);
195
196 if (sent < 0) {
197 ksmbd_err("Failed to send message: %d\n", sent);
198 return sent;
199 }
200
201 return 0;
202}
203
204int ksmbd_conn_rdma_read(struct ksmbd_conn *conn,
205 void *buf, unsigned int buflen,
206 u32 remote_key, u64 remote_offset,
207 u32 remote_len)
208{
209 int ret = -EINVAL;
210
211 if (conn->transport->ops->rdma_read)
212 ret = conn->transport->ops->rdma_read(conn->transport,
213 buf, buflen,
214 remote_key, remote_offset,
215 remote_len);
216 return ret;
217}
218
219int ksmbd_conn_rdma_write(struct ksmbd_conn *conn,
220 void *buf, unsigned int buflen,
221 u32 remote_key, u64 remote_offset,
222 u32 remote_len)
223{
224 int ret = -EINVAL;
225
226 if (conn->transport->ops->rdma_write)
227 ret = conn->transport->ops->rdma_write(conn->transport,
228 buf, buflen,
229 remote_key, remote_offset,
230 remote_len);
231 return ret;
232}
233
234bool ksmbd_conn_alive(struct ksmbd_conn *conn)
235{
236 if (!ksmbd_server_running())
237 return false;
238
239 if (conn->status == KSMBD_SESS_EXITING)
240 return false;
241
242 if (kthread_should_stop())
243 return false;
244
245 if (atomic_read(&conn->stats.open_files_count) > 0)
246 return true;
247
248 /*
249 * Stop current session if the time that get last request from client
250 * is bigger than deadtime user configured and openning file count is
251 * zero.
252 */
253 if (server_conf.deadtime > 0 &&
254 time_after(jiffies, conn->last_active + server_conf.deadtime)) {
255 ksmbd_debug(CONN, "No response from client in %lu minutes\n",
256 server_conf.deadtime / SMB_ECHO_INTERVAL);
257 return false;
258 }
259 return true;
260}
261
262/**
263 * ksmbd_conn_handler_loop() - session thread to listen on new smb requests
264 * @p: connection instance
265 *
266 * One thread each per connection
267 *
268 * Return: 0 on success
269 */
270int ksmbd_conn_handler_loop(void *p)
271{
272 struct ksmbd_conn *conn = (struct ksmbd_conn *)p;
273 struct ksmbd_transport *t = conn->transport;
274 unsigned int pdu_size;
275 char hdr_buf[4] = {0,};
276 int size;
277
278 mutex_init(&conn->srv_mutex);
279 __module_get(THIS_MODULE);
280
281 if (t->ops->prepare && t->ops->prepare(t))
282 goto out;
283
284 conn->last_active = jiffies;
285 while (ksmbd_conn_alive(conn)) {
286 if (try_to_freeze())
287 continue;
288
289 ksmbd_free_request(conn->request_buf);
290 conn->request_buf = NULL;
291
292 size = t->ops->read(t, hdr_buf, sizeof(hdr_buf));
293 if (size != sizeof(hdr_buf))
294 break;
295
296 pdu_size = get_rfc1002_len(hdr_buf);
297 ksmbd_debug(CONN, "RFC1002 header %u bytes\n", pdu_size);
298
299 /* make sure we have enough to get to SMB header end */
300 if (!ksmbd_pdu_size_has_room(pdu_size)) {
301 ksmbd_debug(CONN, "SMB request too short (%u bytes)\n",
302 pdu_size);
303 continue;
304 }
305
306 /* 4 for rfc1002 length field */
307 size = pdu_size + 4;
308 conn->request_buf = ksmbd_alloc_request(size);
309 if (!conn->request_buf)
310 continue;
311
312 memcpy(conn->request_buf, hdr_buf, sizeof(hdr_buf));
313 if (!ksmbd_smb_request(conn))
314 break;
315
316 /*
317 * We already read 4 bytes to find out PDU size, now
318 * read in PDU
319 */
320 size = t->ops->read(t, conn->request_buf + 4, pdu_size);
321 if (size < 0) {
322 ksmbd_err("sock_read failed: %d\n", size);
323 break;
324 }
325
326 if (size != pdu_size) {
327 ksmbd_err("PDU error. Read: %d, Expected: %d\n",
328 size,
329 pdu_size);
330 continue;
331 }
332
333 if (!default_conn_ops.process_fn) {
334 ksmbd_err("No connection request callback\n");
335 break;
336 }
337
338 if (default_conn_ops.process_fn(conn)) {
339 ksmbd_err("Cannot handle request\n");
340 break;
341 }
342 }
343
344out:
345 /* Wait till all reference dropped to the Server object*/
346 while (atomic_read(&conn->r_count) > 0)
347 schedule_timeout(HZ);
348
349 unload_nls(conn->local_nls);
350 if (default_conn_ops.terminate_fn)
351 default_conn_ops.terminate_fn(conn);
352 t->ops->disconnect(t);
353 module_put(THIS_MODULE);
354 return 0;
355}
356
357void ksmbd_conn_init_server_callbacks(struct ksmbd_conn_ops *ops)
358{
359 default_conn_ops.process_fn = ops->process_fn;
360 default_conn_ops.terminate_fn = ops->terminate_fn;
361}
362
363int ksmbd_conn_transport_init(void)
364{
365 int ret;
366
367 mutex_lock(&init_lock);
368 ret = ksmbd_tcp_init();
369 if (ret) {
370 pr_err("Failed to init TCP subsystem: %d\n", ret);
371 goto out;
372 }
373
374 ret = ksmbd_rdma_init();
375 if (ret) {
376 pr_err("Failed to init KSMBD subsystem: %d\n", ret);
377 goto out;
378 }
379out:
380 mutex_unlock(&init_lock);
381 return ret;
382}
383
384static void stop_sessions(void)
385{
386 struct ksmbd_conn *conn;
387
388again:
389 read_lock(&conn_list_lock);
390 list_for_each_entry(conn, &conn_list, conns_list) {
391 struct task_struct *task;
392
393 task = conn->transport->handler;
394 if (task)
395 ksmbd_debug(CONN, "Stop session handler %s/%d\n",
396 task->comm,
397 task_pid_nr(task));
398 conn->status = KSMBD_SESS_EXITING;
399 }
400 read_unlock(&conn_list_lock);
401
402 if (!list_empty(&conn_list)) {
403 schedule_timeout_interruptible(HZ/10); /* 100ms */
404 goto again;
405 }
406}
407
408void ksmbd_conn_transport_destroy(void)
409{
410 mutex_lock(&init_lock);
411 ksmbd_tcp_destroy();
412 ksmbd_rdma_destroy();
413 stop_sessions();
414 mutex_unlock(&init_lock);
415}