]> git.ipfire.org Git - thirdparty/squid.git/blob - src/DiskIO/DiskThreads/async_io.cc
SourceFormat Enforcement
[thirdparty/squid.git] / src / DiskIO / DiskThreads / async_io.cc
1 /*
2 * Copyright (C) 1996-2016 The Squid Software Foundation and contributors
3 *
4 * Squid software is distributed under GPLv2+ license and includes
5 * contributions from numerous individuals and organizations.
6 * Please see the COPYING and CONTRIBUTORS files for details.
7 */
8
9 /* DEBUG: section 32 Asynchronous Disk I/O */
10
11 #include "squid.h"
12 #include "DiskThreads.h"
13 #include "DiskThreadsIOStrategy.h"
14 #include "fde.h"
15 #include "Generic.h"
16 #include "Store.h"
17
18 /*
19 * squidaio_ctrl_t uses explicit alloc()/freeOne() allocators
20 * XXX: convert to MEMPROXY_CLASS() API
21 */
22 #include "mem/Pool.h"
23
24 AIOCounts squidaio_counts;
25
26 typedef struct squidaio_unlinkq_t {
27 char *path;
28
29 struct squidaio_unlinkq_t *next;
30 } squidaio_unlinkq_t;
31
32 dlink_list used_list;
33
34 void
35 aioOpen(const char *path, int oflag, mode_t mode, AIOCB * callback, void *callback_data)
36 {
37 squidaio_ctrl_t *ctrlp;
38
39 assert(DiskThreadsIOStrategy::Instance.initialised);
40 ++squidaio_counts.open_start;
41 ctrlp = new squidaio_ctrl_t;
42 ctrlp->fd = -2;
43 ctrlp->done_handler = callback;
44 ctrlp->done_handler_data = cbdataReference(callback_data);
45 ctrlp->operation = _AIO_OPEN;
46 ctrlp->result.data = ctrlp;
47 squidaio_open(path, oflag, mode, &ctrlp->result);
48 dlinkAdd(ctrlp, &ctrlp->node, &used_list);
49 return;
50 }
51
52 void
53 aioClose(int fd)
54 {
55 squidaio_ctrl_t *ctrlp;
56
57 assert(DiskThreadsIOStrategy::Instance.initialised);
58 ++squidaio_counts.close_start;
59 aioCancel(fd);
60 ctrlp = new squidaio_ctrl_t;
61 ctrlp->fd = fd;
62 ctrlp->done_handler = NULL;
63 ctrlp->done_handler_data = NULL;
64 ctrlp->operation = _AIO_CLOSE;
65 ctrlp->result.data = ctrlp;
66 squidaio_close(fd, &ctrlp->result);
67 dlinkAdd(ctrlp, &ctrlp->node, &used_list);
68 return;
69 }
70
71 void
72 aioCancel(int fd)
73 {
74 squidaio_ctrl_t *ctrlp;
75 dlink_node *m, *next;
76
77 assert(DiskThreadsIOStrategy::Instance.initialised);
78 ++squidaio_counts.cancel;
79
80 for (m = used_list.head; m; m = next) {
81 next = m->next;
82 ctrlp = (squidaio_ctrl_t *)m->data;
83
84 if (ctrlp->fd != fd)
85 continue;
86
87 squidaio_cancel(&ctrlp->result);
88
89 if (ctrlp->done_handler) {
90 AIOCB *callback = ctrlp->done_handler;
91 void *cbdata;
92 ctrlp->done_handler = NULL;
93 debugs(32, DBG_IMPORTANT, "this be aioCancel. Danger ahead!");
94
95 if (cbdataReferenceValidDone(ctrlp->done_handler_data, &cbdata))
96 callback(fd, cbdata, NULL, -2, -2);
97
98 /* free data if requested to aioWrite() */
99 if (ctrlp->free_func)
100 ctrlp->free_func(ctrlp->bufp);
101
102 /* free temporary read buffer */
103 if (ctrlp->operation == _AIO_READ)
104 squidaio_xfree(ctrlp->bufp, ctrlp->len);
105 }
106
107 dlinkDelete(m, &used_list);
108 delete ctrlp;
109 }
110 }
111
112 void
113 aioWrite(int fd, off_t offset, char *bufp, size_t len, AIOCB * callback, void *callback_data, FREE * free_func)
114 {
115 squidaio_ctrl_t *ctrlp;
116 int seekmode;
117
118 assert(DiskThreadsIOStrategy::Instance.initialised);
119 ++squidaio_counts.write_start;
120 ctrlp = new squidaio_ctrl_t;
121 ctrlp->fd = fd;
122 ctrlp->done_handler = callback;
123 ctrlp->done_handler_data = cbdataReference(callback_data);
124 ctrlp->operation = _AIO_WRITE;
125 ctrlp->bufp = bufp;
126 ctrlp->free_func = free_func;
127
128 if (offset >= 0)
129 seekmode = SEEK_SET;
130 else {
131 seekmode = SEEK_END;
132 offset = 0;
133 }
134
135 ctrlp->result.data = ctrlp;
136 squidaio_write(fd, bufp, len, offset, seekmode, &ctrlp->result);
137 dlinkAdd(ctrlp, &ctrlp->node, &used_list);
138 } /* aioWrite */
139
140 void
141 aioRead(int fd, off_t offset, size_t len, AIOCB * callback, void *callback_data)
142 {
143 squidaio_ctrl_t *ctrlp;
144 int seekmode;
145
146 assert(DiskThreadsIOStrategy::Instance.initialised);
147 ++squidaio_counts.read_start;
148 ctrlp = new squidaio_ctrl_t;
149 ctrlp->fd = fd;
150 ctrlp->done_handler = callback;
151 ctrlp->done_handler_data = cbdataReference(callback_data);
152 ctrlp->operation = _AIO_READ;
153 ctrlp->len = len;
154 ctrlp->bufp = (char *)squidaio_xmalloc(len);
155
156 if (offset >= 0)
157 seekmode = SEEK_SET;
158 else {
159 seekmode = SEEK_CUR;
160 offset = 0;
161 }
162
163 ctrlp->result.data = ctrlp;
164 squidaio_read(fd, ctrlp->bufp, len, offset, seekmode, &ctrlp->result);
165 dlinkAdd(ctrlp, &ctrlp->node, &used_list);
166 return;
167 } /* aioRead */
168
169 void
170
171 aioStat(char *path, struct stat *sb, AIOCB * callback, void *callback_data)
172 {
173 squidaio_ctrl_t *ctrlp;
174
175 assert(DiskThreadsIOStrategy::Instance.initialised);
176 ++squidaio_counts.stat_start;
177 ctrlp = new squidaio_ctrl_t;
178 ctrlp->fd = -2;
179 ctrlp->done_handler = callback;
180 ctrlp->done_handler_data = cbdataReference(callback_data);
181 ctrlp->operation = _AIO_STAT;
182 ctrlp->result.data = ctrlp;
183 squidaio_stat(path, sb, &ctrlp->result);
184 dlinkAdd(ctrlp, &ctrlp->node, &used_list);
185 return;
186 } /* aioStat */
187
188 void
189 aioUnlink(const char *path, AIOCB * callback, void *callback_data)
190 {
191 squidaio_ctrl_t *ctrlp;
192 assert(DiskThreadsIOStrategy::Instance.initialised);
193 ++squidaio_counts.unlink_start;
194 ctrlp = new squidaio_ctrl_t;
195 ctrlp->fd = -2;
196 ctrlp->done_handler = callback;
197 ctrlp->done_handler_data = cbdataReference(callback_data);
198 ctrlp->operation = _AIO_UNLINK;
199 ctrlp->result.data = ctrlp;
200 squidaio_unlink(path, &ctrlp->result);
201 dlinkAdd(ctrlp, &ctrlp->node, &used_list);
202 } /* aioUnlink */
203
204 int
205 aioQueueSize(void)
206 {
207 return squidaio_ctrl_t::UseCount();
208 }
209