PRINT("wrap_sys_shmat ( %ld, %#lx, %ld )",ARG1,ARG2,ARG3);
PRE_REG_READ3(long, "shmat",
int, shmid, const void *, shmaddr, int, shmflg);
+ /* Round the attach address down to an VKI_SHMLBA boundary if the
+ client requested rounding. See #222545. This is necessary only
+ on arm-linux because VKI_SHMLBA is 4 * VKI_PAGE size; on all
+ other linux targets it is the same as the page size. */
+ if (ARG3 & VKI_SHM_RND)
+ ARG2 = VG_ROUNDDN(ARG2, VKI_SHMLBA);
arg2tmp = ML_(generic_PRE_sys_shmat)(tid, ARG1,ARG2,ARG3);
if (arg2tmp == 0)
SET_STATUS_Failure( VKI_EINVAL );
UWord tmp;
Bool ok;
if (arg1 == 0) {
+ /* arm-linux only: work around the fact that
+ VG_(am_get_advisory_client_simple) produces something that is
+ VKI_PAGE_SIZE aligned, whereas what we want is something
+ VKI_SHMLBA aligned, and VKI_SHMLBA >= VKI_PAGE_SIZE. Hence
+ increase the request size by VKI_SHMLBA - VKI_PAGE_SIZE and
+ then round the result up to the next VKI_SHMLBA boundary.
+ See bug 222545 comment 15. So far, arm-linux is the only
+ platform where this is known to be necessary. */
+ vg_assert(VKI_SHMLBA >= VKI_PAGE_SIZE);
+ if (VKI_SHMLBA > VKI_PAGE_SIZE) {
+ segmentSize += VKI_SHMLBA - VKI_PAGE_SIZE;
+ }
tmp = VG_(am_get_advisory_client_simple)(0, segmentSize, &ok);
- if (ok)
- arg1 = tmp;
+ if (ok) {
+ if (VKI_SHMLBA > VKI_PAGE_SIZE) {
+ arg1 = VG_ROUNDUP(tmp, VKI_SHMLBA);
+ } else {
+ arg1 = tmp;
+ }
+ }
}
else if (!ML_(valid_client_addr)(arg1, segmentSize, tid, "shmat"))
arg1 = 0;
#define VKI_SHMGET 23
#define VKI_SHMCTL 24
+#define VKI_SHMLBA (4 * VKI_PAGE_SIZE)
+
//----------------------------------------------------------------------
// From linux-2.6.8.1/include/asm-i386/shmbuf.h
};
#define VKI_SHM_RDONLY 010000 /* read-only access */
+#define VKI_SHM_RND 020000 /* round attach address to SHMLBA boundary */
#define VKI_SHM_STAT 13
#define VKI_SHM_INFO 14