err = __io_map ((mach_port_t) fd, &memobj_rd, &memobj_wr);
if (err)
return __hurd_fail (err), MAP_FAILED;
- if (memobj_wr != MACH_PORT_NULL)
+ if (MACH_PORT_VALID (memobj_wr))
__mach_port_deallocate (__mach_task_self (), memobj_wr);
}
if (wobj == robj)
max_vmprot |= VM_PROT_WRITE;
memobj = robj;
- if (wobj != MACH_PORT_NULL)
+ if (MACH_PORT_VALID (wobj))
__mach_port_deallocate (__mach_task_self (), wobj);
break;
case PROT_WRITE:
if (robj == wobj)
max_vmprot |= VM_PROT_READ|VM_PROT_EXECUTE;
memobj = wobj;
- if (robj != MACH_PORT_NULL)
+ if (MACH_PORT_VALID (robj))
__mach_port_deallocate (__mach_task_self (), robj);
break;
case PROT_READ|PROT_WRITE:
copy ? VM_INHERIT_COPY : VM_INHERIT_SHARE);
}
- if (memobj != MACH_PORT_NULL)
+ if (MACH_PORT_VALID (memobj))
__mach_port_deallocate (__mach_task_self (), memobj);
if (err == KERN_PROTECTION_FAILURE)