#if defined(VMKERNEL) || defined(VMKBOOT)
#define INVALID_MPN64 ((MPN64)(uintptr_t)INVALID_MPN)
+#else
+/*
+ * We are zero extending the 32 bit MPN in a 64 bit MPN container
+ * This should change when we actually get a host supporting 64 bit
+ * memory/address space.
+ */
+
+#define INVALID_MPN64 ((MPN64)INVALID_MPN)
+#define RESERVED_MPN64 ((MPN64)RESERVED_MPN)
+#define MEMREF_MPN64 ((MPN64)MEMREF_MPN)
+#define RELEASED_MPN64 ((MPN64)RELEASED_MPN)
#endif
/*
* holding a spinlock --hpreg
*
* Results:
- * INVALID_MPN on failure
- * mpn on success
+ * INVALID_MPN64 on failure
+ * mpn on success
*
* Side effects:
* None
*-----------------------------------------------------------------------------
*/
-static INLINE MPN
+static INLINE MPN64
PgtblPte2MPN(pte_t *pte) // IN
{
+ MPN64 mpn;
if (pte_present(*pte) == 0) {
- return INVALID_MPN;
+ return INVALID_MPN64;
}
- return pte_pfn(*pte);
+ mpn = pte_pfn(*pte);
+ if (mpn >= INVALID_MPN64) {
+ return INVALID_MPN64;
+ }
+ return mpn;
}
*
* Retrieve MPN for a given va.
*
- * Caller must call pte_unmap if valid pte returned. The mm->page_table_lock
+ * Caller must call pte_unmap if valid pte returned. The mm->page_table_lock
* must be held, so this function is not allowed to schedule() --hpreg
*
* Results:
- * INVALID_MPN on failure
- * mpn on success
+ * INVALID_MPN64 on failure
+ * mpn on success
*
* Side effects:
* None
*-----------------------------------------------------------------------------
*/
-static INLINE MPN
+static INLINE MPN64
PgtblVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a process
VA addr) // IN: Address in the virtual address
{
pte = PgtblVa2PTELocked(mm, addr);
if (pte != NULL) {
- MPN mpn = PgtblPte2MPN(pte);
+ MPN64 mpn = PgtblPte2MPN(pte);
pte_unmap(pte);
return mpn;
}
- return INVALID_MPN;
-}
+ return INVALID_MPN64;
+}
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
* must be held, so this function is not allowed to schedule() --hpreg
*
* Results:
- * INVALID_MPN on failure
- * mpn on success
+ * INVALID_MPN64 on failure
+ * mpn on success
*
* Side effects:
* None
*-----------------------------------------------------------------------------
*/
-static INLINE MPN
+static INLINE MPN64
PgtblKVa2MPNLocked(struct mm_struct *mm, // IN: Mm structure of a caller
VA addr) // IN: Address in the virtual address
{
pte = PgtblPGD2PTELocked(compat_pgd_offset_k(mm, addr), addr);
if (pte != NULL) {
- MPN mpn = PgtblPte2MPN(pte);
+ MPN64 mpn = PgtblPte2MPN(pte);
pte_unmap(pte);
return mpn;
}
- return INVALID_MPN;
+ return INVALID_MPN64;
}
#endif
*-----------------------------------------------------------------------------
*/
-static INLINE int
+static INLINE MPN64
PgtblVa2MPN(VA addr) // IN
{
struct mm_struct *mm;
- MPN mpn;
+ MPN64 mpn;
/* current->mm is NULL for kernel threads, so use active_mm. */
mm = current->active_mm;
*-----------------------------------------------------------------------------
*/
-static INLINE int
+static INLINE MPN64
PgtblKVa2MPN(VA addr) // IN
{
struct mm_struct *mm = current->active_mm;
- MPN mpn;
+ MPN64 mpn;
if (compat_get_page_table_lock(mm)) {
spin_lock(compat_get_page_table_lock(mm));