]>
git.ipfire.org Git - thirdparty/squid.git/blob - src/fs/rock/RockIoState.cc
2 * DEBUG: section 79 Disk IO Routines
8 #include "DiskIO/DiskIOModule.h"
9 #include "DiskIO/DiskIOStrategy.h"
10 #include "DiskIO/WriteRequest.h"
11 #include "fs/rock/RockIoState.h"
12 #include "fs/rock/RockIoRequests.h"
13 #include "fs/rock/RockSwapDir.h"
16 Rock::IoState::IoState(SwapDir
*dir
,
18 StoreIOState::STFNCB
*cbFile
,
19 StoreIOState::STIOCB
*cbIo
,
26 // swap_filen, swap_dirn, diskOffset, and payloadEnd are set by the caller
27 slotSize
= dir
->maxObjectSize();
28 file_callback
= cbFile
;
30 callback_data
= cbdataReference(data
);
31 ++store_open_disk_fd
; // TODO: use a dedicated counter?
32 //theFile is set by SwapDir because it depends on DiskIOStrategy
35 Rock::IoState::~IoState()
39 cbdataReferenceDone(callback_data
);
44 Rock::IoState::file(const RefCount
<DiskFile
> &aFile
)
47 assert(aFile
!= NULL
);
52 Rock::IoState::read_(char *buf
, size_t len
, off_t coreOff
, STRCB
*cb
, void *data
)
54 assert(theFile
!= NULL
);
58 // we skip our cell header; it is only read when building the map
59 const int64_t cellOffset
= sizeof(DbCellHeader
) +
60 static_cast<int64_t>(coreOff
);
61 assert(cellOffset
<= payloadEnd
);
63 // Core specifies buffer length, but we must not exceed stored entry size
64 if (cellOffset
+ (int64_t)len
> payloadEnd
)
65 len
= payloadEnd
- cellOffset
;
67 assert(read
.callback
== NULL
);
68 assert(read
.callback_data
== NULL
);
70 read
.callback_data
= cbdataReference(data
);
72 theFile
->read(new ReadRequest(
73 ::ReadRequest(buf
, diskOffset
+ cellOffset
, len
), this));
76 // We only buffer data here; we actually write when close() is called.
77 // We buffer, in part, to avoid forcing OS to _read_ old unwritten portions
78 // of the slot when the write does not end at the page or sector boundary.
80 Rock::IoState::write(char const *buf
, size_t size
, off_t coreOff
, FREE
*dtor
)
82 // TODO: move to create?
84 assert(theBuf
.isNull());
85 assert(payloadEnd
<= slotSize
);
86 theBuf
.init(min(payloadEnd
, slotSize
), slotSize
);
87 // start with our header; TODO: consider making it a trailer
89 assert(static_cast<int64_t>(sizeof(header
)) <= payloadEnd
);
90 header
.payloadSize
= payloadEnd
- sizeof(header
);
91 theBuf
.append(reinterpret_cast<const char*>(&header
), sizeof(header
));
93 // Core uses -1 offset as "append". Sigh.
94 assert(coreOff
== -1);
95 assert(!theBuf
.isNull());
98 theBuf
.append(buf
, size
);
99 offset_
+= size
; // so that Core thinks we wrote it
102 (dtor
)(const_cast<char*>(buf
)); // cast due to a broken API?
105 // write what was buffered during write() calls
107 Rock::IoState::startWriting()
109 assert(theFile
!= NULL
);
110 assert(!theBuf
.isNull());
112 // TODO: if DiskIO module is mmap-based, we should be writing whole pages
113 // to avoid triggering read-page;new_head+old_tail;write-page overheads
115 debugs(79, 5, HERE
<< swap_filen
<< " at " << diskOffset
<< '+' <<
116 theBuf
.contentSize());
118 assert(theBuf
.contentSize() <= slotSize
);
119 // theFile->write may call writeCompleted immediatelly
120 theFile
->write(new WriteRequest(::WriteRequest(theBuf
.content(),
121 diskOffset
, theBuf
.contentSize(), theBuf
.freeFunc()), this));
126 Rock::IoState::finishedWriting(const int errFlag
)
128 // we incremented offset_ while accumulating data in write()
133 Rock::IoState::close(int how
)
135 debugs(79, 3, HERE
<< swap_filen
<< " accumulated: " << offset_
<<
137 if (how
== wroteAll
&& !theBuf
.isNull())
140 callBack(how
== writerGone
? DISK_ERROR
: 0); // TODO: add DISK_CALLER_GONE
143 /// close callback (STIOCB) dialer: breaks dependencies and
144 /// counts IOState concurrency level
145 class StoreIOStateCb
: public CallDialer
148 StoreIOStateCb(StoreIOState::STIOCB
*cb
, void *data
, int err
, const Rock::IoState::Pointer
&anSio
):
155 callback_data
= cbdataReference(data
);
158 StoreIOStateCb(const StoreIOStateCb
&cb
):
164 callback
= cb
.callback
;
165 callback_data
= cbdataReference(cb
.callback_data
);
168 virtual ~StoreIOStateCb() {
169 cbdataReferenceDone(callback_data
); // may be nil already
172 void dial(AsyncCall
&call
) {
174 if (cbdataReferenceValidDone(callback_data
, &cbd
) && callback
)
175 callback(cbd
, errflag
, sio
.getRaw());
178 bool canDial(AsyncCall
&call
) const {
179 return cbdataReferenceValid(callback_data
) && callback
;
182 virtual void print(std::ostream
&os
) const {
183 os
<< '(' << callback_data
<< ", err=" << errflag
<< ')';
187 StoreIOStateCb
&operator =(const StoreIOStateCb
&cb
); // not defined
189 StoreIOState::STIOCB
*callback
;
192 Rock::IoState::Pointer sio
;
196 Rock::IoState::callBack(int errflag
)
198 debugs(79,3, HERE
<< "errflag=" << errflag
);
201 AsyncCall::Pointer call
= asyncCall(79,3, "SomeIoStateCloseCb",
202 StoreIOStateCb(callback
, callback_data
, errflag
, this));
203 ScheduleCallHere(call
);
206 cbdataReferenceDone(callback_data
);