--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: [PATCH] vfs: introduce path_permission()
+
+ 2.6.27 eliminated the nameidata parameter from permission and replaced
+ several call sites with inode_permission. This keeps the information
+ required by AppArmor from reaching it.
+
+ The following patch factors out the permission assessment part of
+ inode_permission into __inode_permission and adds a path_permission
+ function that takes a struct path instead of a struct inode and passes
+ it to security_path_permission instead of security_inode_permission.
+
+ All of the call sites that had access to a struct path whether by
+ itself or via a file or nameidata (and used it) in 2.6.26 are changed
+ to use the path_permission call.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+---
+ fs/inotify_user.c | 2 +-
+ fs/namei.c | 32 ++++++++++++++++++++++++--------
+ fs/open.c | 10 +++++-----
+ include/linux/fs.h | 5 +++++
+ 4 files changed, 35 insertions(+), 14 deletions(-)
+
+--- a/fs/inotify_user.c
++++ b/fs/inotify_user.c
+@@ -372,7 +372,7 @@ static int find_inode(const char __user
+ if (error)
+ return error;
+ /* you can only watch an inode if you have read permissions on it */
+- error = inode_permission(path->dentry->d_inode, MAY_READ);
++ error = path_permission(path, MAY_READ);
+ if (error)
+ path_put(path);
+ return error;
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -227,7 +227,7 @@ int generic_permission(struct inode *ino
+ return -EACCES;
+ }
+
+-int inode_permission(struct inode *inode, int mask)
++static int __inode_permission(struct inode *inode, int mask)
+ {
+ int retval;
+ int submask = mask;
+@@ -273,7 +273,12 @@ int inode_permission(struct inode *inode
+ if (retval)
+ return retval;
+
+- retval = devcgroup_inode_permission(inode, mask);
++ return devcgroup_inode_permission(inode, mask);
++}
++
++int inode_permission(struct inode *inode, int mask)
++{
++ int retval = __inode_permission(inode, mask);
+ if (retval)
+ return retval;
+
+@@ -281,6 +286,15 @@ int inode_permission(struct inode *inode
+ mask & (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND));
+ }
+
++int path_permission(struct path *path, int mask)
++{
++ int retval = __inode_permission(path->dentry->d_inode, mask);
++ if (retval)
++ return retval;
++ return security_path_permission(path,
++ mask & (MAY_READ|MAY_WRITE|MAY_EXEC|MAY_APPEND));
++}
++
+ /**
+ * vfs_permission - check for access rights to a given path
+ * @nd: lookup result that describes the path
+@@ -293,7 +307,7 @@ int inode_permission(struct inode *inode
+ */
+ int vfs_permission(struct nameidata *nd, int mask)
+ {
+- return inode_permission(nd->path.dentry->d_inode, mask);
++ return path_permission(&nd->path, mask);
+ }
+
+ /**
+@@ -310,7 +324,7 @@ int vfs_permission(struct nameidata *nd,
+ */
+ int file_permission(struct file *file, int mask)
+ {
+- return inode_permission(file->f_path.dentry->d_inode, mask);
++ return path_permission(&file->f_path, mask);
+ }
+
+ /*
+@@ -452,8 +466,9 @@ static struct dentry * cached_lookup(str
+ * short-cut DAC fails, then call permission() to do more
+ * complete permission check.
+ */
+-static int exec_permission_lite(struct inode *inode)
++static int exec_permission_lite(struct path *path)
+ {
++ struct inode *inode = path->dentry->d_inode;
+ umode_t mode = inode->i_mode;
+
+ if (inode->i_op && inode->i_op->permission)
+@@ -478,7 +493,7 @@ static int exec_permission_lite(struct i
+
+ return -EACCES;
+ ok:
+- return security_inode_permission(inode, MAY_EXEC);
++ return security_path_permission(path, MAY_EXEC);
+ }
+
+ /*
+@@ -875,7 +890,7 @@ static int __link_path_walk(const char *
+ unsigned int c;
+
+ nd->flags |= LOOKUP_CONTINUE;
+- err = exec_permission_lite(inode);
++ err = exec_permission_lite(&nd->path);
+ if (err == -EAGAIN)
+ err = vfs_permission(nd, MAY_EXEC);
+ if (err)
+@@ -1250,7 +1265,7 @@ static struct dentry *lookup_hash(struct
+ {
+ int err;
+
+- err = inode_permission(nd->path.dentry->d_inode, MAY_EXEC);
++ err = path_permission(&nd->path, MAY_EXEC);
+ if (err)
+ return ERR_PTR(err);
+ return __lookup_hash(&nd->last, nd->path.dentry, nd);
+@@ -2907,6 +2922,7 @@ EXPORT_SYMBOL(page_symlink_inode_operati
+ EXPORT_SYMBOL(path_lookup);
+ EXPORT_SYMBOL(vfs_path_lookup);
+ EXPORT_SYMBOL(inode_permission);
++EXPORT_SYMBOL(path_permission);
+ EXPORT_SYMBOL(vfs_permission);
+ EXPORT_SYMBOL(file_permission);
+ EXPORT_SYMBOL(unlock_rename);
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -248,7 +248,7 @@ static long do_sys_truncate(const char _
+ if (error)
+ goto dput_and_out;
+
+- error = inode_permission(inode, MAY_WRITE);
++ error = path_permission(&path, MAY_WRITE);
+ if (error)
+ goto mnt_drop_write_and_out;
+
+@@ -493,7 +493,7 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
+ goto out_path_release;
+ }
+
+- res = inode_permission(inode, mode | MAY_ACCESS);
++ res = path_permission(&path, mode | MAY_ACCESS);
+ /* SuS v2 requires we report a read only fs too */
+ if (res || !(mode & S_IWOTH) || special_file(inode->i_mode))
+ goto out_path_release;
+@@ -536,7 +536,7 @@ SYSCALL_DEFINE1(chdir, const char __user
+ if (error)
+ goto out;
+
+- error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
++ error = path_permission(&path, MAY_EXEC | MAY_ACCESS);
+ if (error)
+ goto dput_and_out;
+
+@@ -565,7 +565,7 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
+ if (!S_ISDIR(inode->i_mode))
+ goto out_putf;
+
+- error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
++ error = path_permission(&file->f_path, MAY_EXEC | MAY_ACCESS);
+ if (!error)
+ set_fs_pwd(current->fs, &file->f_path);
+ out_putf:
+@@ -583,7 +583,7 @@ SYSCALL_DEFINE1(chroot, const char __use
+ if (error)
+ goto out;
+
+- error = inode_permission(path.dentry->d_inode, MAY_EXEC | MAY_ACCESS);
++ error = path_permission(&path, MAY_EXEC | MAY_ACCESS);
+ if (error)
+ goto dput_and_out;
+
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1201,6 +1201,11 @@ extern void dentry_unhash(struct dentry
+ extern int file_permission(struct file *, int);
+
+ /*
++ * VFS path helper functions.
++ */
++extern int path_permission(struct path *, int);
++
++/*
+ * File types
+ *
+ * NOTE! These match bits 12..15 of stat.st_mode
--- /dev/null
+From: John Johansen <jjohansen@suse.de>
+Subject: AppArmor: Patch AppArmor for 2.6.25 kernel
+
+Add 64 bit capabilities support to AppArmor.
+
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ security/apparmor/module_interface.c | 22 ++++++++++++++++++----
+ 1 file changed, 18 insertions(+), 4 deletions(-)
+
+--- a/security/apparmor/module_interface.c
++++ b/security/apparmor/module_interface.c
+@@ -395,15 +395,29 @@ static struct aa_profile *aa_unpack_prof
+ if (!aa_is_nameX(e, AA_STRUCTEND, NULL))
+ goto fail;
+
+- if (!aa_is_u32(e, &(profile->capabilities), NULL))
++ if (!aa_is_u32(e, &(profile->capabilities.cap[0]), NULL))
+ goto fail;
+- if (!aa_is_u32(e, &(profile->audit_caps), NULL))
++ if (!aa_is_u32(e, &(profile->audit_caps.cap[0]), NULL))
+ goto fail;
+- if (!aa_is_u32(e, &(profile->quiet_caps), NULL))
++ if (!aa_is_u32(e, &(profile->quiet_caps.cap[0]), NULL))
+ goto fail;
+- if (!aa_is_u32(e, &(profile->set_caps), NULL))
++ if (!aa_is_u32(e, &(profile->set_caps.cap[0]), NULL))
+ goto fail;
+
++ if (aa_is_nameX(e, AA_STRUCT, "caps64")) {
++ /* optional upper half of 64 bit caps */
++ if (!aa_is_u32(e, &(profile->capabilities.cap[1]), NULL))
++ goto fail;
++ if (!aa_is_u32(e, &(profile->audit_caps.cap[1]), NULL))
++ goto fail;
++ if (!aa_is_u32(e, &(profile->quiet_caps.cap[1]), NULL))
++ goto fail;
++ if (!aa_is_u32(e, &(profile->set_caps.cap[1]), NULL))
++ goto fail;
++ if (!aa_is_nameX(e, AA_STRUCTEND, NULL))
++ goto fail;
++ }
++
+ if (!aa_unpack_rlimits(e, profile))
+ goto fail;
+
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Export audit subsystem for use by modules
+
+Update kenel audit range comments to show AppArmor's registered range of
+1500-1599. This range used to be reserved for LSPP but LSPP uses the
+SE Linux range and the range was given to AppArmor.
+Adds necessary export symbols for audit subsystem routines.
+Changes audit_log_vformat to be externally visible (analagous to vprintf)
+Patch is not in mainline -- pending AppArmor code submission to lkml
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ include/linux/audit.h | 12 +++++++++++-
+ kernel/audit.c | 6 ++++--
+ 2 files changed, 15 insertions(+), 3 deletions(-)
+
+--- a/include/linux/audit.h
++++ b/include/linux/audit.h
+@@ -33,7 +33,7 @@
+ * 1200 - 1299 messages internal to the audit daemon
+ * 1300 - 1399 audit event messages
+ * 1400 - 1499 SE Linux use
+- * 1500 - 1599 kernel LSPP events
++ * 1500 - 1599 AppArmor use
+ * 1600 - 1699 kernel crypto events
+ * 1700 - 1799 kernel anomaly records
+ * 1800 - 1999 future kernel use (maybe integrity labels and related events)
+@@ -119,6 +119,13 @@
+ #define AUDIT_MAC_UNLBL_STCADD 1416 /* NetLabel: add a static label */
+ #define AUDIT_MAC_UNLBL_STCDEL 1417 /* NetLabel: del a static label */
+
++#define AUDIT_APPARMOR_AUDIT 1501 /* AppArmor audited grants */
++#define AUDIT_APPARMOR_ALLOWED 1502 /* Allowed Access for learning */
++#define AUDIT_APPARMOR_DENIED 1503
++#define AUDIT_APPARMOR_HINT 1504 /* Process Tracking information */
++#define AUDIT_APPARMOR_STATUS 1505 /* Changes in config */
++#define AUDIT_APPARMOR_ERROR 1506 /* Internal AppArmor Errors */
++
+ #define AUDIT_FIRST_KERN_ANOM_MSG 1700
+ #define AUDIT_LAST_KERN_ANOM_MSG 1799
+ #define AUDIT_ANOM_PROMISCUOUS 1700 /* Device changed promiscuous mode */
+@@ -545,6 +552,9 @@ extern void audit_log(struct audit_
+ __attribute__((format(printf,4,5)));
+
+ extern struct audit_buffer *audit_log_start(struct audit_context *ctx, gfp_t gfp_mask, int type);
++extern void audit_log_vformat(struct audit_buffer *ab,
++ const char *fmt, va_list args)
++ __attribute__((format(printf,2,0)));
+ extern void audit_log_format(struct audit_buffer *ab,
+ const char *fmt, ...)
+ __attribute__((format(printf,2,3)));
+--- a/kernel/audit.c
++++ b/kernel/audit.c
+@@ -1231,8 +1231,7 @@ static inline int audit_expand(struct au
+ * will be called a second time. Currently, we assume that a printk
+ * can't format message larger than 1024 bytes, so we don't either.
+ */
+-static void audit_log_vformat(struct audit_buffer *ab, const char *fmt,
+- va_list args)
++void audit_log_vformat(struct audit_buffer *ab, const char *fmt, va_list args)
+ {
+ int len, avail;
+ struct sk_buff *skb;
+@@ -1506,3 +1505,6 @@ EXPORT_SYMBOL(audit_log_start);
+ EXPORT_SYMBOL(audit_log_end);
+ EXPORT_SYMBOL(audit_log_format);
+ EXPORT_SYMBOL(audit_log);
++EXPORT_SYMBOL_GPL(audit_log_vformat);
++EXPORT_SYMBOL_GPL(audit_log_untrustedstring);
++EXPORT_SYMBOL_GPL(audit_log_d_path);
--- /dev/null
+From: John Johansen <jjohansen@suse.de>
+Subject: Add AppArmor LSM to security/Makefile
+
+Signed-off-by: John Johansen <jjohansen@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+---
+ security/Kconfig | 1 +
+ security/Makefile | 3 ++-
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/security/Kconfig
++++ b/security/Kconfig
+@@ -97,6 +97,7 @@ config SECURITY_ROOTPLUG
+
+ source security/selinux/Kconfig
+ source security/smack/Kconfig
++source security/apparmor/Kconfig
+
+ endmenu
+
+--- a/security/Makefile
++++ b/security/Makefile
+@@ -14,5 +14,6 @@ obj-$(CONFIG_SECURITY) += security.o c
+ # Must precede capability.o in order to stack properly.
+ obj-$(CONFIG_SECURITY_SELINUX) += selinux/built-in.o
+ obj-$(CONFIG_SECURITY_SMACK) += smack/built-in.o
+-obj-$(CONFIG_SECURITY_ROOTPLUG) += root_plug.o
++obj-$(CONFIG_SECURITY_APPARMOR) += commoncap.o apparmor/
++ obj-$(CONFIG_SECURITY_ROOTPLUG) += root_plug.o
+ obj-$(CONFIG_CGROUP_DEVICE) += device_cgroup.o
--- /dev/null
+From: John Johansen <jjohansen@suse.de>
+Subject: AppArmor: Module and LSM hooks
+
+Module parameters, LSM hooks, initialization and teardown.
+
+Signed-off-by: John Johansen <jjohansen@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+---
+ security/apparmor/lsm.c | 895 ++++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 895 insertions(+)
+
+--- /dev/null
++++ b/security/apparmor/lsm.c
+@@ -0,0 +1,895 @@
++/*
++ * Copyright (C) 1998-2007 Novell/SUSE
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ *
++ * AppArmor LSM interface
++ */
++
++#include <linux/security.h>
++#include <linux/module.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/mount.h>
++#include <linux/namei.h>
++#include <linux/ctype.h>
++#include <linux/sysctl.h>
++#include <linux/audit.h>
++
++#include "apparmor.h"
++#include "inline.h"
++
++/* Flag indicating whether initialization completed */
++int apparmor_initialized = 0;
++
++static int param_set_aabool(const char *val, struct kernel_param *kp);
++static int param_get_aabool(char *buffer, struct kernel_param *kp);
++#define param_check_aabool(name, p) __param_check(name, p, int)
++
++static int param_set_aauint(const char *val, struct kernel_param *kp);
++static int param_get_aauint(char *buffer, struct kernel_param *kp);
++#define param_check_aauint(name, p) __param_check(name, p, int)
++
++/* Flag values, also controllable via /sys/module/apparmor/parameters
++ * We define special types as we want to do additional mediation.
++ *
++ * Complain mode -- in complain mode access failures result in auditing only
++ * and task is allowed access. audit events are processed by userspace to
++ * generate policy. Default is 'enforce' (0).
++ * Value is also togglable per profile and referenced when global value is
++ * enforce.
++ */
++int apparmor_complain = 0;
++module_param_named(complain, apparmor_complain, aabool, S_IRUSR | S_IWUSR);
++MODULE_PARM_DESC(apparmor_complain, "Toggle AppArmor complain mode");
++
++/* Debug mode */
++int apparmor_debug = 0;
++module_param_named(debug, apparmor_debug, aabool, S_IRUSR | S_IWUSR);
++MODULE_PARM_DESC(apparmor_debug, "Toggle AppArmor debug mode");
++
++/* Audit mode */
++int apparmor_audit = 0;
++module_param_named(audit, apparmor_audit, aabool, S_IRUSR | S_IWUSR);
++MODULE_PARM_DESC(apparmor_audit, "Toggle AppArmor audit mode");
++
++/* Syscall logging mode */
++int apparmor_logsyscall = 0;
++module_param_named(logsyscall, apparmor_logsyscall, aabool, S_IRUSR | S_IWUSR);
++MODULE_PARM_DESC(apparmor_logsyscall, "Toggle AppArmor logsyscall mode");
++
++/* Maximum pathname length before accesses will start getting rejected */
++unsigned int apparmor_path_max = 2 * PATH_MAX;
++module_param_named(path_max, apparmor_path_max, aauint, S_IRUSR | S_IWUSR);
++MODULE_PARM_DESC(apparmor_path_max, "Maximum pathname length allowed");
++
++/* Boot time disable flag */
++#ifdef CONFIG_SECURITY_APPARMOR_DISABLE
++#define AA_ENABLED_PERMS 0600
++#else
++#define AA_ENABLED_PERMS 0400
++#endif
++static int param_set_aa_enabled(const char *val, struct kernel_param *kp);
++unsigned int apparmor_enabled = CONFIG_SECURITY_APPARMOR_BOOTPARAM_VALUE;
++module_param_call(enabled, param_set_aa_enabled, param_get_aauint,
++ &apparmor_enabled, AA_ENABLED_PERMS);
++MODULE_PARM_DESC(apparmor_enabled, "Enable/Disable Apparmor on boot");
++
++static int __init apparmor_enabled_setup(char *str)
++{
++ apparmor_enabled = simple_strtol(str, NULL, 0);
++ return 1;
++}
++__setup("apparmor=", apparmor_enabled_setup);
++
++static int param_set_aabool(const char *val, struct kernel_param *kp)
++{
++ if (aa_task_context(current))
++ return -EPERM;
++ return param_set_bool(val, kp);
++}
++
++static int param_get_aabool(char *buffer, struct kernel_param *kp)
++{
++ if (aa_task_context(current))
++ return -EPERM;
++ return param_get_bool(buffer, kp);
++}
++
++static int param_set_aauint(const char *val, struct kernel_param *kp)
++{
++ if (aa_task_context(current))
++ return -EPERM;
++ return param_set_uint(val, kp);
++}
++
++static int param_get_aauint(char *buffer, struct kernel_param *kp)
++{
++ if (aa_task_context(current))
++ return -EPERM;
++ return param_get_uint(buffer, kp);
++}
++
++/* allow run time disabling of apparmor */
++static int param_set_aa_enabled(const char *val, struct kernel_param *kp)
++{
++ char *endp;
++ unsigned long l;
++
++ if (!apparmor_initialized) {
++ apparmor_enabled = 0;
++ return 0;
++ }
++
++ if (aa_task_context(current))
++ return -EPERM;
++
++ if (!apparmor_enabled)
++ return -EINVAL;
++
++ if (!val)
++ return -EINVAL;
++
++ l = simple_strtoul(val, &endp, 0);
++ if (endp == val || l != 0)
++ return -EINVAL;
++
++ apparmor_enabled = 0;
++ apparmor_disable();
++ return 0;
++}
++
++static int aa_reject_syscall(struct task_struct *task, gfp_t flags,
++ const char *name)
++{
++ struct aa_profile *profile = aa_get_profile(task);
++ int error = 0;
++
++ if (profile) {
++ error = aa_audit_syscallreject(profile, flags, name);
++ aa_put_profile(profile);
++ }
++
++ return error;
++}
++
++static int apparmor_ptrace(struct task_struct *parent,
++ struct task_struct *child, unsigned int mode)
++{
++ struct aa_task_context *cxt;
++ int error = 0;
++
++ /*
++ * parent can ptrace child when
++ * - parent is unconfined
++ * - parent & child are in the same namespace &&
++ * - parent is in complain mode
++ * - parent and child are confined by the same profile
++ * - parent profile has CAP_SYS_PTRACE
++ */
++
++ rcu_read_lock();
++ cxt = aa_task_context(parent);
++ if (cxt) {
++ if (parent->nsproxy != child->nsproxy) {
++ struct aa_audit sa;
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = "ptrace";
++ sa.gfp_mask = GFP_ATOMIC;
++ sa.parent = parent->pid;
++ sa.task = child->pid;
++ sa.info = "different namespaces";
++ aa_audit_reject(cxt->profile, &sa);
++ error = -EPERM;
++ } else {
++ struct aa_task_context *child_cxt =
++ aa_task_context(child);
++
++ error = aa_may_ptrace(cxt, child_cxt ?
++ child_cxt->profile : NULL);
++ if (PROFILE_COMPLAIN(cxt->profile)) {
++ struct aa_audit sa;
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = "ptrace";
++ sa.gfp_mask = GFP_ATOMIC;
++ sa.parent = parent->pid;
++ sa.task = child->pid;
++ aa_audit_hint(cxt->profile, &sa);
++ }
++ }
++ }
++ rcu_read_unlock();
++
++ return error;
++}
++
++static int apparmor_capable(struct task_struct *task, int cap)
++{
++ int error;
++ struct aa_task_context *cxt;
++
++ /* cap_capable returns 0 on success, else -EPERM */
++ error = cap_capable(task, cap);
++
++ rcu_read_lock();
++ cxt = aa_task_context(task);
++ if (cxt && (!error || cap_raised(cxt->profile->set_caps, cap)))
++ error = aa_capability(cxt, cap);
++ rcu_read_unlock();
++
++ return error;
++}
++
++static int apparmor_sysctl(struct ctl_table *table, int op)
++{
++ struct aa_profile *profile = aa_get_profile(current);
++ int error = 0;
++
++ if (profile) {
++ char *buffer, *name;
++ int mask;
++
++ mask = 0;
++ if (op & 4)
++ mask |= MAY_READ;
++ if (op & 2)
++ mask |= MAY_WRITE;
++
++ error = -ENOMEM;
++ buffer = (char*)__get_free_page(GFP_KERNEL);
++ if (!buffer)
++ goto out;
++ name = sysctl_pathname(table, buffer, PAGE_SIZE);
++ if (name && name - buffer >= 5) {
++ name -= 5;
++ memcpy(name, "/proc", 5);
++ error = aa_perm_path(profile, "sysctl", name, mask, 0);
++ }
++ free_page((unsigned long)buffer);
++ }
++
++out:
++ aa_put_profile(profile);
++ return error;
++}
++
++static int apparmor_bprm_set_security(struct linux_binprm *bprm)
++{
++ /* handle capability bits with setuid, etc */
++ cap_bprm_set_security(bprm);
++ /* already set based on script name */
++ if (bprm->sh_bang)
++ return 0;
++ return aa_register(bprm);
++}
++
++static int apparmor_bprm_secureexec(struct linux_binprm *bprm)
++{
++ int ret = cap_bprm_secureexec(bprm);
++
++ if (!ret && (unsigned long)bprm->security & AA_SECURE_EXEC_NEEDED) {
++ AA_DEBUG("%s: secureexec required for %s\n",
++ __FUNCTION__, bprm->filename);
++ ret = 1;
++ }
++
++ return ret;
++}
++
++static int apparmor_sb_mount(char *dev_name, struct path *path, char *type,
++ unsigned long flags, void *data)
++{
++ return aa_reject_syscall(current, GFP_KERNEL, "mount");
++}
++
++static int apparmor_umount(struct vfsmount *mnt, int flags)
++{
++ return aa_reject_syscall(current, GFP_KERNEL, "umount");
++}
++
++static int apparmor_inode_mkdir(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, int mask)
++{
++ struct aa_profile *profile;
++ int error = 0;
++
++ if (!mnt || !mediated_filesystem(dir))
++ goto out;
++
++ profile = aa_get_profile(current);
++
++ if (profile)
++ error = aa_perm_dir(profile, "inode_mkdir", dentry, mnt,
++ MAY_WRITE);
++
++ aa_put_profile(profile);
++
++out:
++ return error;
++}
++
++static int apparmor_inode_rmdir(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt)
++{
++ struct aa_profile *profile;
++ int error = 0;
++
++ if (!mnt || !mediated_filesystem(dir))
++ goto out;
++
++ profile = aa_get_profile(current);
++
++ if (profile)
++ error = aa_perm_dir(profile, "inode_rmdir", dentry, mnt,
++ MAY_WRITE);
++
++ aa_put_profile(profile);
++
++out:
++ return error;
++}
++
++static int aa_permission(const char *operation, struct inode *inode,
++ struct dentry *dentry, struct vfsmount *mnt,
++ int mask, int check)
++{
++ int error = 0;
++
++ if (mnt && mediated_filesystem(inode)) {
++ struct aa_profile *profile;
++
++ profile = aa_get_profile(current);
++ if (profile)
++ error = aa_perm(profile, operation, dentry, mnt, mask,
++ check);
++ aa_put_profile(profile);
++ }
++ return error;
++}
++
++static inline int aa_mask_permissions(int mask)
++{
++ if (mask & MAY_APPEND)
++ mask &= (MAY_READ | MAY_APPEND | MAY_EXEC);
++ else
++ mask &= (MAY_READ | MAY_WRITE | MAY_EXEC);
++ return mask;
++}
++
++static int apparmor_inode_create(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, int mask)
++{
++ return aa_permission("inode_create", dir, dentry, mnt, MAY_APPEND, 0);
++}
++
++static int apparmor_inode_link(struct dentry *old_dentry,
++ struct vfsmount *old_mnt, struct inode *dir,
++ struct dentry *new_dentry,
++ struct vfsmount *new_mnt)
++{
++ int error = 0;
++ struct aa_profile *profile;
++
++ if (!old_mnt || !new_mnt || !mediated_filesystem(dir))
++ goto out;
++
++ profile = aa_get_profile(current);
++
++ if (profile)
++ error = aa_link(profile, new_dentry, new_mnt,
++ old_dentry, old_mnt);
++
++ aa_put_profile(profile);
++
++out:
++ return error;
++}
++
++static int apparmor_inode_unlink(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt)
++{
++ int check = 0;
++
++ if (S_ISDIR(dentry->d_inode->i_mode))
++ check |= AA_CHECK_DIR;
++ return aa_permission("inode_unlink", dir, dentry, mnt, MAY_WRITE,
++ check);
++}
++
++static int apparmor_inode_symlink(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, const char *old_name)
++{
++ return aa_permission("inode_symlink", dir, dentry, mnt, MAY_WRITE, 0);
++}
++
++static int apparmor_inode_mknod(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, int mode, dev_t dev)
++{
++ return aa_permission("inode_mknod", dir, dentry, mnt, MAY_WRITE, 0);
++}
++
++static int apparmor_inode_rename(struct inode *old_dir,
++ struct dentry *old_dentry,
++ struct vfsmount *old_mnt,
++ struct inode *new_dir,
++ struct dentry *new_dentry,
++ struct vfsmount *new_mnt)
++{
++ struct aa_profile *profile;
++ int error = 0;
++
++ if ((!old_mnt && !new_mnt) || !mediated_filesystem(old_dir))
++ goto out;
++
++ profile = aa_get_profile(current);
++
++ if (profile) {
++ struct inode *inode = old_dentry->d_inode;
++ int check = 0;
++
++ if (inode && S_ISDIR(inode->i_mode))
++ check |= AA_CHECK_DIR;
++ if (old_mnt)
++ error = aa_perm(profile, "inode_rename", old_dentry,
++ old_mnt, MAY_READ | MAY_WRITE, check);
++
++ if (!error && new_mnt) {
++ error = aa_perm(profile, "inode_rename", new_dentry,
++ new_mnt, MAY_WRITE, check);
++ }
++ }
++
++ aa_put_profile(profile);
++
++out:
++ return error;
++}
++
++static int apparmor_inode_permission(struct inode *inode, int mask,
++ struct nameidata *nd)
++{
++ int check = 0;
++
++ if (!nd || nd->flags & (LOOKUP_PARENT | LOOKUP_CONTINUE))
++ return 0;
++ mask = aa_mask_permissions(mask);
++ if (S_ISDIR(inode->i_mode)) {
++ check |= AA_CHECK_DIR;
++ /* allow traverse accesses to directories */
++ mask &= ~MAY_EXEC;
++ }
++ return aa_permission("inode_permission", inode, nd->dentry, nd->mnt,
++ mask, check);
++}
++
++static int apparmor_inode_setattr(struct dentry *dentry, struct vfsmount *mnt,
++ struct iattr *iattr)
++{
++ int error = 0;
++
++ if (!mnt)
++ goto out;
++
++ if (mediated_filesystem(dentry->d_inode)) {
++ struct aa_profile *profile;
++
++ profile = aa_get_profile(current);
++ /*
++ * Mediate any attempt to change attributes of a file
++ * (chmod, chown, chgrp, etc)
++ */
++ if (profile)
++ error = aa_attr(profile, dentry, mnt, iattr);
++
++ aa_put_profile(profile);
++ }
++
++out:
++ return error;
++}
++
++static int aa_xattr_permission(struct dentry *dentry, struct vfsmount *mnt,
++ const char *operation, int mask,
++ struct file *file)
++{
++ int error = 0;
++
++ if (mnt && mediated_filesystem(dentry->d_inode)) {
++ struct aa_profile *profile = aa_get_profile(current);
++ int check = file ? AA_CHECK_FD : 0;
++
++ if (profile)
++ error = aa_perm_xattr(profile, operation, dentry, mnt,
++ mask, check);
++ aa_put_profile(profile);
++ }
++
++ return error;
++}
++
++static int apparmor_inode_setxattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name, const void *value,
++ size_t size, int flags, struct file *file)
++{
++ int error = cap_inode_setxattr(dentry, mnt, name, value, size, flags,
++ file);
++
++ if (!error)
++ error = aa_xattr_permission(dentry, mnt, "xattr set",
++ MAY_WRITE, file);
++ return error;
++}
++
++static int apparmor_inode_getxattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name, struct file *file)
++{
++ return aa_xattr_permission(dentry, mnt, "xattr get", MAY_READ, file);
++}
++
++static int apparmor_inode_listxattr(struct dentry *dentry, struct vfsmount *mnt,
++ struct file *file)
++{
++ return aa_xattr_permission(dentry, mnt, "xattr list", MAY_READ, file);
++}
++
++static int apparmor_inode_removexattr(struct dentry *dentry,
++ struct vfsmount *mnt, const char *name,
++ struct file *file)
++{
++ return aa_xattr_permission(dentry, mnt, "xattr remove", MAY_WRITE,
++ file);
++}
++
++static int aa_file_permission(const char *op, struct file *file, int mask)
++{
++ struct aa_profile *profile;
++ struct aa_profile *file_profile = (struct aa_profile*)file->f_security;
++ int error = 0;
++
++ if (!file_profile)
++ goto out;
++
++ /*
++ * If this file was opened under a different profile, we
++ * revalidate the access against the current profile.
++ */
++ profile = aa_get_profile(current);
++ if (profile && (file_profile != profile || mask & AA_MAY_LOCK)) {
++ struct dentry *dentry = file->f_dentry;
++ struct vfsmount *mnt = file->f_vfsmnt;
++ struct inode *inode = dentry->d_inode;
++ int check = AA_CHECK_FD;
++
++ /*
++ * FIXME: We should remember which profiles we revalidated
++ * against.
++ */
++ if (S_ISDIR(inode->i_mode))
++ check |= AA_CHECK_DIR;
++ error = aa_permission(op, inode, dentry, mnt, mask, check);
++ }
++ aa_put_profile(profile);
++
++out:
++ return error;
++}
++
++static int apparmor_file_permission(struct file *file, int mask)
++{
++ return aa_file_permission("file_permission", file,
++ aa_mask_permissions(mask));
++}
++
++static inline int apparmor_file_lock (struct file *file, unsigned int cmd)
++{
++ int mask = AA_MAY_LOCK;
++ if (cmd == F_WRLCK)
++ mask |= MAY_WRITE;
++ return aa_file_permission("file_lock", file, mask);
++}
++
++static int apparmor_file_alloc_security(struct file *file)
++{
++ struct aa_profile *profile;
++
++ profile = aa_get_profile(current);
++ if (profile)
++ file->f_security = profile;
++
++ return 0;
++}
++
++static void apparmor_file_free_security(struct file *file)
++{
++ struct aa_profile *file_profile = (struct aa_profile*)file->f_security;
++
++ aa_put_profile(file_profile);
++}
++
++static inline int aa_mmap(struct file *file, const char *operation,
++ unsigned long prot, unsigned long flags)
++{
++ struct dentry *dentry;
++ int mask = 0;
++
++ if (!file || !file->f_security)
++ return 0;
++
++ if (prot & PROT_READ)
++ mask |= MAY_READ;
++ /* Private mappings don't require write perms since they don't
++ * write back to the files */
++ if ((prot & PROT_WRITE) && !(flags & MAP_PRIVATE))
++ mask |= MAY_WRITE;
++ if (prot & PROT_EXEC)
++ mask |= AA_EXEC_MMAP;
++
++ dentry = file->f_dentry;
++ return aa_permission(operation, dentry->d_inode, dentry,
++ file->f_vfsmnt, mask, AA_CHECK_FD);
++}
++
++static int apparmor_file_mmap(struct file *file, unsigned long reqprot,
++ unsigned long prot, unsigned long flags,
++ unsigned long addr, unsigned long addr_only)
++{
++ if ((addr < mmap_min_addr) && !capable(CAP_SYS_RAWIO)) {
++ struct aa_profile *profile = aa_get_profile(current);
++ if (profile)
++ /* future control check here */
++ return -EACCES;
++ else
++ return -EACCES;
++ aa_put_profile(profile);
++ }
++
++ return aa_mmap(file, "file_mmap", prot, flags);
++}
++
++static int apparmor_file_mprotect(struct vm_area_struct *vma,
++ unsigned long reqprot, unsigned long prot)
++{
++ return aa_mmap(vma->vm_file, "file_mprotect", prot,
++ !(vma->vm_flags & VM_SHARED) ? MAP_PRIVATE : 0);
++}
++
++static int apparmor_task_alloc_security(struct task_struct *task)
++{
++ return aa_clone(task);
++}
++
++/*
++ * Called from IRQ context from RCU callback.
++ */
++static void apparmor_task_free_security(struct task_struct *task)
++{
++ aa_release(task);
++}
++
++static int apparmor_getprocattr(struct task_struct *task, char *name,
++ char **value)
++{
++ unsigned len;
++ int error;
++ struct aa_profile *profile;
++
++ /* AppArmor only supports the "current" process attribute */
++ if (strcmp(name, "current") != 0)
++ return -EINVAL;
++
++ /* must be task querying itself or admin */
++ if (current != task && !capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ profile = aa_get_profile(task);
++ error = aa_getprocattr(profile, value, &len);
++ aa_put_profile(profile);
++ if (!error)
++ error = len;
++
++ return error;
++}
++
++static int apparmor_setprocattr(struct task_struct *task, char *name,
++ void *value, size_t size)
++{
++ char *command, *args;
++ int error;
++
++ if (strcmp(name, "current") != 0 || size == 0 || size >= PAGE_SIZE)
++ return -EINVAL;
++ args = value;
++ args[size] = '\0';
++ args = strstrip(args);
++ command = strsep(&args, " ");
++ if (!args)
++ return -EINVAL;
++ while (isspace(*args))
++ args++;
++ if (!*args)
++ return -EINVAL;
++
++ if (strcmp(command, "changehat") == 0) {
++ if (current != task)
++ return -EACCES;
++ error = aa_setprocattr_changehat(args);
++ } else if (strcmp(command, "changeprofile") == 0) {
++ if (current != task)
++ return -EACCES;
++ error = aa_setprocattr_changeprofile(args);
++ } else if (strcmp(command, "setprofile") == 0) {
++ struct aa_profile *profile;
++
++ /* Only an unconfined process with admin capabilities
++ * may change the profile of another task.
++ */
++
++ if (!capable(CAP_SYS_ADMIN))
++ return -EACCES;
++
++ profile = aa_get_profile(current);
++ if (profile) {
++ struct aa_audit sa;
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = "profile_set";
++ sa.gfp_mask = GFP_KERNEL;
++ sa.task = task->pid;
++ sa.info = "from confined process";
++ aa_audit_reject(profile, &sa);
++ aa_put_profile(profile);
++ return -EACCES;
++ }
++ error = aa_setprocattr_setprofile(task, args);
++ } else {
++ struct aa_audit sa;
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = "setprocattr";
++ sa.gfp_mask = GFP_KERNEL;
++ sa.info = "invalid command";
++ sa.name = command;
++ sa.task = task->pid;
++ aa_audit_reject(NULL, &sa);
++ return -EINVAL;
++ }
++
++ if (!error)
++ error = size;
++ return error;
++}
++
++struct security_operations apparmor_ops = {
++ .ptrace = apparmor_ptrace,
++ .capget = cap_capget,
++ .capset_check = cap_capset_check,
++ .capset_set = cap_capset_set,
++ .sysctl = apparmor_sysctl,
++ .capable = apparmor_capable,
++ .syslog = cap_syslog,
++
++ .netlink_send = cap_netlink_send,
++ .netlink_recv = cap_netlink_recv,
++
++ .bprm_apply_creds = cap_bprm_apply_creds,
++ .bprm_set_security = apparmor_bprm_set_security,
++ .bprm_secureexec = apparmor_bprm_secureexec,
++
++ .sb_mount = apparmor_sb_mount,
++ .sb_umount = apparmor_umount,
++
++ .inode_mkdir = apparmor_inode_mkdir,
++ .inode_rmdir = apparmor_inode_rmdir,
++ .inode_create = apparmor_inode_create,
++ .inode_link = apparmor_inode_link,
++ .inode_unlink = apparmor_inode_unlink,
++ .inode_symlink = apparmor_inode_symlink,
++ .inode_mknod = apparmor_inode_mknod,
++ .inode_rename = apparmor_inode_rename,
++ .inode_permission = apparmor_inode_permission,
++ .inode_setattr = apparmor_inode_setattr,
++ .inode_setxattr = apparmor_inode_setxattr,
++ .inode_getxattr = apparmor_inode_getxattr,
++ .inode_listxattr = apparmor_inode_listxattr,
++ .inode_removexattr = apparmor_inode_removexattr,
++ .file_permission = apparmor_file_permission,
++ .file_alloc_security = apparmor_file_alloc_security,
++ .file_free_security = apparmor_file_free_security,
++ .file_mmap = apparmor_file_mmap,
++ .file_mprotect = apparmor_file_mprotect,
++ .file_lock = apparmor_file_lock,
++
++ .task_alloc_security = apparmor_task_alloc_security,
++ .task_free_security = apparmor_task_free_security,
++ .task_post_setuid = cap_task_post_setuid,
++ .task_reparent_to_init = cap_task_reparent_to_init,
++
++ .getprocattr = apparmor_getprocattr,
++ .setprocattr = apparmor_setprocattr,
++};
++
++void info_message(const char *str)
++{
++ struct aa_audit sa;
++ memset(&sa, 0, sizeof(sa));
++ sa.gfp_mask = GFP_KERNEL;
++ sa.info = str;
++ printk(KERN_INFO "AppArmor: %s\n", str);
++ if (audit_enabled)
++ aa_audit_message(NULL, &sa, AUDIT_APPARMOR_STATUS);
++}
++
++static int __init apparmor_init(void)
++{
++ int error;
++
++ if (!apparmor_enabled) {
++ info_message("AppArmor disabled by boottime parameter\n");
++ return 0;
++ }
++
++ if ((error = create_apparmorfs())) {
++ AA_ERROR("Unable to activate AppArmor filesystem\n");
++ goto createfs_out;
++ }
++
++ if ((error = alloc_default_namespace())){
++ AA_ERROR("Unable to allocate default profile namespace\n");
++ goto alloc_out;
++ }
++
++ if ((error = register_security(&apparmor_ops))) {
++ AA_ERROR("Unable to register AppArmor\n");
++ goto register_security_out;
++ }
++
++ /* Report that AppArmor successfully initialized */
++ apparmor_initialized = 1;
++ if (apparmor_complain)
++ info_message("AppArmor initialized: complainmode enabled");
++ else
++ info_message("AppArmor initialized");
++
++ return error;
++
++register_security_out:
++ free_default_namespace();
++
++alloc_out:
++ destroy_apparmorfs();
++
++createfs_out:
++ return error;
++
++}
++
++security_initcall(apparmor_init);
++
++void apparmor_disable(void)
++{
++ /* Remove and release all the profiles on the profile list. */
++ mutex_lock(&aa_interface_lock);
++ aa_profile_ns_list_release();
++
++ /* FIXME: cleanup profiles references on files */
++ free_default_namespace();
++
++ /*
++ * Delay for an rcu cycle to make sure that all active task
++ * context readers have finished, and all profiles have been
++ * freed by their rcu callbacks.
++ */
++ synchronize_rcu();
++
++ destroy_apparmorfs();
++ mutex_unlock(&aa_interface_lock);
++
++ apparmor_initialized = 0;
++
++ info_message("AppArmor protection removed");
++}
++
++MODULE_DESCRIPTION("AppArmor process confinement");
++MODULE_AUTHOR("Novell/Immunix, http://bugs.opensuse.org");
++MODULE_LICENSE("GPL");
--- /dev/null
+From: John Johansen <jjohansen@suse.de>
+Subject: AppArmor: Main Part
+
+The underlying functions by which the AppArmor LSM hooks are implemented.
+
+Signed-off-by: John Johansen <jjohansen@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+---
+ security/apparmor/main.c | 1478 +++++++++++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 1478 insertions(+)
+
+--- /dev/null
++++ b/security/apparmor/main.c
+@@ -0,0 +1,1478 @@
++/*
++ * Copyright (C) 2002-2007 Novell/SUSE
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ *
++ * AppArmor Core
++ */
++
++#include <linux/security.h>
++#include <linux/namei.h>
++#include <linux/audit.h>
++#include <linux/mount.h>
++#include <linux/ptrace.h>
++
++#include "apparmor.h"
++
++#include "inline.h"
++
++/*
++ * Table of capability names: we generate it from capabilities.h.
++ */
++static const char *capability_names[] = {
++#include "capability_names.h"
++};
++
++struct aa_namespace *default_namespace;
++
++static int aa_inode_mode(struct inode *inode)
++{
++ /* if the inode doesn't exist the user is creating it */
++ if (!inode || current->fsuid == inode->i_uid)
++ return AA_USER_SHIFT;
++ return AA_OTHER_SHIFT;
++}
++
++int alloc_default_namespace(void)
++{
++ struct aa_namespace *ns;
++ char *name = kstrdup("default", GFP_KERNEL);
++ if (!name)
++ return -ENOMEM;
++ ns = alloc_aa_namespace(name);
++ if (!ns) {
++ kfree(name);
++ return -ENOMEM;
++ }
++
++ write_lock(&profile_ns_list_lock);
++ default_namespace = ns;
++ aa_get_namespace(ns);
++ list_add(&ns->list, &profile_ns_list);
++ write_unlock(&profile_ns_list_lock);
++
++ return 0;
++}
++
++void free_default_namespace(void)
++{
++ write_lock(&profile_ns_list_lock);
++ list_del_init(&default_namespace->list);
++ write_unlock(&profile_ns_list_lock);
++ aa_put_namespace(default_namespace);
++ default_namespace = NULL;
++}
++
++static void aa_audit_file_sub_mask(struct audit_buffer *ab, char *buffer,
++ int mask)
++{
++ const char unsafex[] = "upcn";
++ const char safex[] = "UPCN";
++ char *m = buffer;
++
++ if (mask & AA_EXEC_MMAP)
++ *m++ = 'm';
++ if (mask & MAY_READ)
++ *m++ = 'r';
++ if (mask & MAY_WRITE)
++ *m++ = 'w';
++ else if (mask & MAY_APPEND)
++ *m++ = 'a';
++ if (mask & MAY_EXEC) {
++ int index = AA_EXEC_INDEX(mask);
++ /* all indexes > 4 are also named transitions */
++ if (index > 4)
++ index = 4;
++ if (index > 0) {
++ if (mask & AA_EXEC_UNSAFE)
++ *m++ = unsafex[index - 1];
++ else
++ *m++ = safex[index - 1];
++ }
++ if (mask & AA_EXEC_INHERIT)
++ *m++ = 'i';
++ *m++ = 'x';
++ }
++ if (mask & AA_MAY_LINK)
++ *m++ = 'l';
++ if (mask & AA_MAY_LOCK)
++ *m++ = 'k';
++ *m++ = '\0';
++}
++
++static void aa_audit_file_mask(struct audit_buffer *ab, const char *name,
++ int mask)
++{
++ char user[10], other[10];
++
++ aa_audit_file_sub_mask(ab, user,
++ (mask & AA_USER_PERMS) >> AA_USER_SHIFT);
++ aa_audit_file_sub_mask(ab, other,
++ (mask & AA_OTHER_PERMS) >> AA_OTHER_SHIFT);
++
++ audit_log_format(ab, " %s=\"%s::%s\"", name, user, other);
++}
++
++/**
++ * aa_audit - Log an audit event to the audit subsystem
++ * @profile: profile to check against
++ * @sa: audit event
++ * @audit_cxt: audit context to log message to
++ * @type: audit event number
++ */
++static int aa_audit_base(struct aa_profile *profile, struct aa_audit *sa,
++ struct audit_context *audit_cxt, int type)
++{
++ struct audit_buffer *ab = NULL;
++
++ ab = audit_log_start(audit_cxt, sa->gfp_mask, type);
++
++ if (!ab) {
++ AA_ERROR("Unable to log event (%d) to audit subsys\n",
++ type);
++ /* don't fail operations in complain mode even if logging
++ * fails */
++ return type == AUDIT_APPARMOR_ALLOWED ? 0 : -ENOMEM;
++ }
++
++ if (sa->operation)
++ audit_log_format(ab, "operation=\"%s\"", sa->operation);
++
++ if (sa->info) {
++ audit_log_format(ab, " info=\"%s\"", sa->info);
++ if (sa->error_code)
++ audit_log_format(ab, " error=%d", sa->error_code);
++ }
++
++ if (sa->request_mask)
++ aa_audit_file_mask(ab, "requested_mask", sa->request_mask);
++
++ if (sa->denied_mask)
++ aa_audit_file_mask(ab, "denied_mask", sa->denied_mask);
++
++ if (sa->request_mask)
++ audit_log_format(ab, " fsuid=%d", current->fsuid);
++
++ if (sa->iattr) {
++ struct iattr *iattr = sa->iattr;
++
++ audit_log_format(ab, " attribute=\"%s%s%s%s%s%s%s\"",
++ iattr->ia_valid & ATTR_MODE ? "mode," : "",
++ iattr->ia_valid & ATTR_UID ? "uid," : "",
++ iattr->ia_valid & ATTR_GID ? "gid," : "",
++ iattr->ia_valid & ATTR_SIZE ? "size," : "",
++ iattr->ia_valid & (ATTR_ATIME | ATTR_ATIME_SET) ?
++ "atime," : "",
++ iattr->ia_valid & (ATTR_MTIME | ATTR_MTIME_SET) ?
++ "mtime," : "",
++ iattr->ia_valid & ATTR_CTIME ? "ctime," : "");
++ }
++
++ if (sa->task)
++ audit_log_format(ab, " task=%d", sa->task);
++
++ if (sa->parent)
++ audit_log_format(ab, " parent=%d", sa->parent);
++
++ if (sa->name) {
++ audit_log_format(ab, " name=");
++ audit_log_untrustedstring(ab, sa->name);
++ }
++
++ if (sa->name2) {
++ audit_log_format(ab, " name2=");
++ audit_log_untrustedstring(ab, sa->name2);
++ }
++
++ audit_log_format(ab, " pid=%d", current->pid);
++
++ if (profile) {
++ audit_log_format(ab, " profile=");
++ audit_log_untrustedstring(ab, profile->name);
++
++ if (profile->ns != default_namespace) {
++ audit_log_format(ab, " namespace=");
++ audit_log_untrustedstring(ab, profile->ns->name);
++ }
++ }
++
++ audit_log_end(ab);
++
++ return type == AUDIT_APPARMOR_ALLOWED ? 0 : sa->error_code;
++}
++
++/**
++ * aa_audit_syscallreject - Log a syscall rejection to the audit subsystem
++ * @profile: profile to check against
++ * @gfp: memory allocation flags
++ * @msg: string describing syscall being rejected
++ */
++int aa_audit_syscallreject(struct aa_profile *profile, gfp_t gfp,
++ const char *msg)
++{
++ struct aa_audit sa;
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = "syscall";
++ sa.name = msg;
++ sa.gfp_mask = gfp;
++ sa.error_code = -EPERM;
++
++ return aa_audit_base(profile, &sa, current->audit_context,
++ AUDIT_APPARMOR_DENIED);
++}
++
++int aa_audit_message(struct aa_profile *profile, struct aa_audit *sa,
++ int type)
++{
++ struct audit_context *audit_cxt;
++
++ audit_cxt = apparmor_logsyscall ? current->audit_context : NULL;
++ return aa_audit_base(profile, sa, audit_cxt, type);
++}
++
++void aa_audit_hint(struct aa_profile *profile, struct aa_audit *sa)
++{
++ aa_audit_message(profile, sa, AUDIT_APPARMOR_HINT);
++}
++
++void aa_audit_status(struct aa_profile *profile, struct aa_audit *sa)
++{
++ aa_audit_message(profile, sa, AUDIT_APPARMOR_STATUS);
++}
++
++int aa_audit_reject(struct aa_profile *profile, struct aa_audit *sa)
++{
++ return aa_audit_message(profile, sa, AUDIT_APPARMOR_DENIED);
++}
++
++/**
++ * aa_audit - Log an audit event to the audit subsystem
++ * @profile: profile to check against
++ * @sa: audit event
++ */
++int aa_audit(struct aa_profile *profile, struct aa_audit *sa)
++{
++ int type = AUDIT_APPARMOR_DENIED;
++ struct audit_context *audit_cxt;
++
++ if (likely(!sa->error_code))
++ type = AUDIT_APPARMOR_AUDIT;
++ else if (PROFILE_COMPLAIN(profile))
++ type = AUDIT_APPARMOR_ALLOWED;
++
++ audit_cxt = apparmor_logsyscall ? current->audit_context : NULL;
++ return aa_audit_base(profile, sa, audit_cxt, type);
++}
++
++static int aa_audit_file(struct aa_profile *profile, struct aa_audit *sa)
++{
++ if (likely(!sa->error_code)) {
++ int mask = sa->audit_mask & AUDIT_FILE_MASK;
++
++ if (unlikely(PROFILE_AUDIT(profile)))
++ mask |= AUDIT_FILE_MASK;
++
++ if (likely(!(sa->request_mask & mask)))
++ return 0;
++
++ /* mask off perms that are not being force audited */
++ sa->request_mask &= mask | ALL_AA_EXEC_TYPE;
++ } else {
++ int mask = AUDIT_QUIET_MASK(sa->audit_mask);
++
++ if (!(sa->denied_mask & ~mask))
++ return sa->error_code;
++
++ /* mask off perms whose denial is being silenced */
++ sa->denied_mask &= (~mask) | ALL_AA_EXEC_TYPE;
++ }
++
++ return aa_audit(profile, sa);
++}
++
++static int aa_audit_caps(struct aa_profile *profile, struct aa_audit *sa,
++ int cap)
++{
++ if (likely(!sa->error_code)) {
++ if (likely(!PROFILE_AUDIT(profile) &&
++ !cap_raised(profile->audit_caps, cap)))
++ return 0;
++ }
++
++ /* quieting of capabilities is handled the caps_logged cache */
++ return aa_audit(profile, sa);
++}
++
++/**
++ * aa_file_denied - check for @mask access on a file
++ * @profile: profile to check against
++ * @name: pathname of file
++ * @mask: permission mask requested for file
++ * @audit_mask: return audit mask for the match
++ *
++ * Return %0 on success, or else the permissions in @mask that the
++ * profile denies.
++ */
++static int aa_file_denied(struct aa_profile *profile, const char *name,
++ int mask, int *audit_mask)
++{
++ return (mask & ~aa_match(profile->file_rules, name, audit_mask));
++}
++
++/**
++ * aa_link_denied - check for permission to link a file
++ * @profile: profile to check against
++ * @link: pathname of link being created
++ * @target: pathname of target to be linked to
++ * @target_mode: UGO shift for target inode
++ * @request_mask: the permissions subset valid only if link succeeds
++ * @audit_mask: return the audit_mask for the link permission
++ * Return %0 on success, or else the permissions that the profile denies.
++ */
++static int aa_link_denied(struct aa_profile *profile, const char *link,
++ const char *target, int target_mode,
++ int *request_mask, int *audit_mask)
++{
++ unsigned int state;
++ int l_mode, t_mode, l_x, t_x, denied_mask = 0;
++ int link_mask = AA_MAY_LINK << target_mode;
++
++ *request_mask = link_mask;
++
++ l_mode = aa_match_state(profile->file_rules, DFA_START, link, &state);
++
++ if (l_mode & link_mask) {
++ int mode;
++ /* test to see if target can be paired with link */
++ state = aa_dfa_null_transition(profile->file_rules, state);
++ mode = aa_match_state(profile->file_rules, state, target,
++ &state);
++
++ if (!(mode & link_mask))
++ denied_mask |= link_mask;
++
++ *audit_mask = dfa_audit_mask(profile->file_rules, state);
++
++ /* return if link subset test is not required */
++ if (!(mode & (AA_LINK_SUBSET_TEST << target_mode)))
++ return denied_mask;
++ }
++
++ /* Do link perm subset test requiring permission on link are a
++ * subset of the permissions on target.
++ * If a subset test is required a permission subset test of the
++ * perms for the link are done against the user::other of the
++ * target's 'r', 'w', 'x', 'a', 'k', and 'm' permissions.
++ *
++ * If the link has 'x', an exact match of all the execute flags
++ * must match.
++ */
++ denied_mask |= ~l_mode & link_mask;
++
++ t_mode = aa_match(profile->file_rules, target, NULL);
++
++ l_x = l_mode & (ALL_AA_EXEC_TYPE | AA_EXEC_BITS);
++ t_x = t_mode & (ALL_AA_EXEC_TYPE | AA_EXEC_BITS);
++
++ /* For actual subset test ignore valid-profile-transition flags,
++ * and link bits
++ */
++ l_mode &= AA_FILE_PERMS & ~AA_LINK_BITS;
++ t_mode &= AA_FILE_PERMS & ~AA_LINK_BITS;
++
++ *request_mask = l_mode | link_mask;
++
++ if (l_mode) {
++ int x = l_x | (t_x & ALL_AA_EXEC_UNSAFE);
++ denied_mask |= l_mode & ~t_mode;
++ /* mask off x modes not used by link */
++
++ /* handle exec subset
++ * - link safe exec issubset of unsafe exec
++ * - no link x perm is subset of target having x perm
++ */
++ if ((l_mode & AA_USER_EXEC) &&
++ (x & AA_USER_EXEC_TYPE) != (t_x & AA_USER_EXEC_TYPE))
++ denied_mask = AA_USER_EXEC | (l_x & AA_USER_EXEC_TYPE);
++ if ((l_mode & AA_OTHER_EXEC) &&
++ (x & AA_OTHER_EXEC_TYPE) != (t_x & AA_OTHER_EXEC_TYPE))
++ denied_mask = AA_OTHER_EXEC | (l_x & AA_OTHER_EXEC_TYPE);
++ }
++
++ return denied_mask;
++}
++
++/**
++ * aa_get_name - compute the pathname of a file
++ * @dentry: dentry of the file
++ * @mnt: vfsmount of the file
++ * @buffer: buffer that aa_get_name() allocated
++ * @check: AA_CHECK_DIR is set if the file is a directory
++ *
++ * Returns a pointer to the beginning of the pathname (which usually differs
++ * from the beginning of the buffer), or an error code.
++ *
++ * We need @check to indicate whether the file is a directory or not because
++ * the file may not yet exist, and so we cannot check the inode's file type.
++ */
++static char *aa_get_name(struct dentry *dentry, struct vfsmount *mnt,
++ char **buffer, int check)
++{
++ char *name;
++ int is_dir, size = 256;
++
++ is_dir = (check & AA_CHECK_DIR) ? 1 : 0;
++
++ for (;;) {
++ char *buf = kmalloc(size, GFP_KERNEL);
++ if (!buf)
++ return ERR_PTR(-ENOMEM);
++
++ name = d_namespace_path(dentry, mnt, buf, size - is_dir);
++ if (!IS_ERR(name)) {
++ if (name[0] != '/') {
++ /*
++ * This dentry is not connected to the
++ * namespace root -- reject access.
++ */
++ kfree(buf);
++ return ERR_PTR(-ENOENT);
++ }
++ if (is_dir && name[1] != '\0') {
++ /*
++ * Append "/" to the pathname. The root
++ * directory is a special case; it already
++ * ends in slash.
++ */
++ buf[size - 2] = '/';
++ buf[size - 1] = '\0';
++ }
++
++ *buffer = buf;
++ return name;
++ }
++ if (PTR_ERR(name) != -ENAMETOOLONG)
++ return name;
++
++ kfree(buf);
++ size <<= 1;
++ if (size > apparmor_path_max)
++ return ERR_PTR(-ENAMETOOLONG);
++ }
++}
++
++static char *new_compound_name(const char *n1, const char *n2)
++{
++ char *name = kmalloc(strlen(n1) + strlen(n2) + 3, GFP_KERNEL);
++ if (name)
++ sprintf(name, "%s//%s", n1, n2);
++ return name;
++}
++static inline void aa_put_name_buffer(char *buffer)
++{
++ kfree(buffer);
++}
++
++/**
++ * aa_perm_dentry - check if @profile allows @mask for a file
++ * @profile: profile to check against
++ * @dentry: dentry of the file
++ * @mnt: vfsmount o the file
++ * @sa: audit context
++ * @mask: requested profile permissions
++ * @check: kind of check to perform
++ *
++ * Returns 0 upon success, or else an error code.
++ *
++ * @check indicates the file type, and whether the file was accessed through
++ * an open file descriptor (AA_CHECK_FD) or not.
++ */
++static int aa_perm_dentry(struct aa_profile *profile, struct dentry *dentry,
++ struct vfsmount *mnt, struct aa_audit *sa, int check)
++{
++ int error;
++ char *buffer = NULL;
++
++ sa->name = aa_get_name(dentry, mnt, &buffer, check);
++ sa->request_mask <<= aa_inode_mode(dentry->d_inode);
++ if (IS_ERR(sa->name)) {
++ /*
++ * deleted files are given a pass on permission checks when
++ * accessed through a file descriptor.
++ */
++ if (PTR_ERR(sa->name) == -ENOENT && (check & AA_CHECK_FD))
++ sa->denied_mask = 0;
++ else {
++ sa->denied_mask = sa->request_mask;
++ sa->error_code = PTR_ERR(sa->name);
++ if (sa->error_code == -ENOENT)
++ sa->info = "Failed name resolution - object not a valid entry";
++ else if (sa->error_code == -ENAMETOOLONG)
++ sa->info = "Failed name resolution - name too long";
++ else
++ sa->info = "Failed name resolution";
++ }
++ sa->name = NULL;
++ } else
++ sa->denied_mask = aa_file_denied(profile, sa->name,
++ sa->request_mask,
++ &sa->audit_mask);
++
++ if (!sa->denied_mask)
++ sa->error_code = 0;
++
++ error = aa_audit_file(profile, sa);
++ aa_put_name_buffer(buffer);
++
++ return error;
++}
++
++/**
++ * aa_attr - check if attribute change is allowed
++ * @profile: profile to check against
++ * @dentry: dentry of the file to check
++ * @mnt: vfsmount of the file to check
++ * @iattr: attribute changes requested
++ */
++int aa_attr(struct aa_profile *profile, struct dentry *dentry,
++ struct vfsmount *mnt, struct iattr *iattr)
++{
++ struct inode *inode = dentry->d_inode;
++ int error, check;
++ struct aa_audit sa;
++
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = "setattr";
++ sa.gfp_mask = GFP_KERNEL;
++ sa.iattr = iattr;
++ sa.request_mask = MAY_WRITE;
++ sa.error_code = -EACCES;
++
++ check = 0;
++ if (inode && S_ISDIR(inode->i_mode))
++ check |= AA_CHECK_DIR;
++ if (iattr->ia_valid & ATTR_FILE)
++ check |= AA_CHECK_FD;
++
++ error = aa_perm_dentry(profile, dentry, mnt, &sa, check);
++
++ return error;
++}
++
++/**
++ * aa_perm_xattr - check if xattr attribute change is allowed
++ * @profile: profile to check against
++ * @dentry: dentry of the file to check
++ * @mnt: vfsmount of the file to check
++ * @operation: xattr operation being done
++ * @mask: access mode requested
++ * @check: kind of check to perform
++ */
++int aa_perm_xattr(struct aa_profile *profile, const char *operation,
++ struct dentry *dentry, struct vfsmount *mnt, int mask,
++ int check)
++{
++ struct inode *inode = dentry->d_inode;
++ int error;
++ struct aa_audit sa;
++
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = operation;
++ sa.gfp_mask = GFP_KERNEL;
++ sa.request_mask = mask;
++ sa.error_code = -EACCES;
++
++ if (inode && S_ISDIR(inode->i_mode))
++ check |= AA_CHECK_DIR;
++
++ error = aa_perm_dentry(profile, dentry, mnt, &sa, check);
++
++ return error;
++}
++
++/**
++ * aa_perm - basic apparmor permissions check
++ * @profile: profile to check against
++ * @dentry: dentry of the file to check
++ * @mnt: vfsmount of the file to check
++ * @mask: access mode requested
++ * @check: kind of check to perform
++ *
++ * Determine if access @mask for the file is authorized by @profile.
++ * Returns 0 on success, or else an error code.
++ */
++int aa_perm(struct aa_profile *profile, const char *operation,
++ struct dentry *dentry, struct vfsmount *mnt, int mask, int check)
++{
++ struct aa_audit sa;
++ int error = 0;
++
++ if (mask == 0)
++ goto out;
++
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = operation;
++ sa.gfp_mask = GFP_KERNEL;
++ sa.request_mask = mask;
++ sa.error_code = -EACCES;
++
++ error = aa_perm_dentry(profile, dentry, mnt, &sa, check);
++
++out:
++ return error;
++}
++
++/**
++ * aa_perm_dir
++ * @profile: profile to check against
++ * @dentry: dentry of directory to check
++ * @mnt: vfsmount of directory to check
++ * @operation: directory operation being performed
++ * @mask: access mode requested
++ *
++ * Determine if directory operation (make/remove) for dentry is authorized
++ * by @profile.
++ * Returns 0 on success, or else an error code.
++ */
++int aa_perm_dir(struct aa_profile *profile, const char *operation,
++ struct dentry *dentry, struct vfsmount *mnt, int mask)
++{
++ struct aa_audit sa;
++
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = operation;
++ sa.gfp_mask = GFP_KERNEL;
++ sa.request_mask = mask;
++ sa.error_code = -EACCES;
++
++ return aa_perm_dentry(profile, dentry, mnt, &sa, AA_CHECK_DIR);
++}
++
++int aa_perm_path(struct aa_profile *profile, const char *operation,
++ const char *name, int mask, uid_t uid)
++{
++ struct aa_audit sa;
++
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = operation;
++ sa.gfp_mask = GFP_KERNEL;
++ sa.request_mask = mask;
++ sa.name = name;
++ if (current->fsuid == uid)
++ sa.request_mask = mask << AA_USER_SHIFT;
++ else
++ sa.request_mask = mask << AA_OTHER_SHIFT;
++
++ sa.denied_mask = aa_file_denied(profile, name, sa.request_mask,
++ &sa.audit_mask) ;
++ sa.error_code = sa.denied_mask ? -EACCES : 0;
++
++ return aa_audit_file(profile, &sa);
++}
++
++/**
++ * aa_capability - test permission to use capability
++ * @cxt: aa_task_context with profile to check against
++ * @cap: capability to be tested
++ *
++ * Look up capability in profile capability set.
++ * Returns 0 on success, or else an error code.
++ */
++int aa_capability(struct aa_task_context *cxt, int cap)
++{
++ int error = cap_raised(cxt->profile->capabilities, cap) ? 0 : -EPERM;
++ struct aa_audit sa;
++
++ /* test if cap has alread been logged */
++ if (cap_raised(cxt->caps_logged, cap)) {
++ if (PROFILE_COMPLAIN(cxt->profile))
++ error = 0;
++ return error;
++ } else
++ /* don't worry about rcu replacement of the cxt here.
++ * caps_logged is a cache to reduce the occurence of
++ * duplicate messages in the log. The worst that can
++ * happen is duplicate capability messages shows up in
++ * the audit log
++ */
++ cap_raise(cxt->caps_logged, cap);
++
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = "capable";
++ sa.gfp_mask = GFP_ATOMIC;
++ sa.name = capability_names[cap];
++ sa.error_code = error;
++
++ error = aa_audit_caps(cxt->profile, &sa, cap);
++
++ return error;
++}
++
++/* must be used inside rcu_read_lock or task_lock */
++int aa_may_ptrace(struct aa_task_context *cxt, struct aa_profile *tracee)
++{
++ if (!cxt || cxt->profile == tracee)
++ return 0;
++ return aa_capability(cxt, CAP_SYS_PTRACE);
++}
++
++/**
++ * aa_link - hard link check
++ * @profile: profile to check against
++ * @link: dentry of link being created
++ * @link_mnt: vfsmount of link being created
++ * @target: dentry of link target
++ * @target_mnt: vfsmunt of link target
++ *
++ * Returns 0 on success, or else an error code.
++ */
++int aa_link(struct aa_profile *profile,
++ struct dentry *link, struct vfsmount *link_mnt,
++ struct dentry *target, struct vfsmount *target_mnt)
++{
++ int error;
++ struct aa_audit sa;
++ char *buffer = NULL, *buffer2 = NULL;
++
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = "inode_link";
++ sa.gfp_mask = GFP_KERNEL;
++ sa.name = aa_get_name(link, link_mnt, &buffer, 0);
++ sa.name2 = aa_get_name(target, target_mnt, &buffer2, 0);
++
++ if (IS_ERR(sa.name)) {
++ sa.error_code = PTR_ERR(sa.name);
++ sa.name = NULL;
++ }
++ if (IS_ERR(sa.name2)) {
++ sa.error_code = PTR_ERR(sa.name2);
++ sa.name2 = NULL;
++ }
++
++ if (sa.name && sa.name2) {
++ sa.denied_mask = aa_link_denied(profile, sa.name, sa.name2,
++ aa_inode_mode(target->d_inode),
++ &sa.request_mask,
++ &sa.audit_mask);
++ sa.error_code = sa.denied_mask ? -EACCES : 0;
++ }
++
++ error = aa_audit_file(profile, &sa);
++
++ aa_put_name_buffer(buffer);
++ aa_put_name_buffer(buffer2);
++
++ return error;
++}
++
++/*******************************
++ * Global task related functions
++ *******************************/
++
++/**
++ * aa_clone - initialize the task context for a new task
++ * @child: task that is being created
++ *
++ * Returns 0 on success, or else an error code.
++ */
++int aa_clone(struct task_struct *child)
++{
++ struct aa_task_context *cxt, *child_cxt;
++ struct aa_profile *profile;
++
++ if (!aa_task_context(current))
++ return 0;
++ child_cxt = aa_alloc_task_context(GFP_KERNEL);
++ if (!child_cxt)
++ return -ENOMEM;
++
++repeat:
++ profile = aa_get_profile(current);
++ if (profile) {
++ lock_profile(profile);
++ cxt = aa_task_context(current);
++ if (unlikely(profile->isstale || !cxt ||
++ cxt->profile != profile)) {
++ /**
++ * Race with profile replacement or removal, or with
++ * task context removal.
++ */
++ unlock_profile(profile);
++ aa_put_profile(profile);
++ goto repeat;
++ }
++
++ /* No need to grab the child's task lock here. */
++ aa_change_task_context(child, child_cxt, profile,
++ cxt->cookie, cxt->previous_profile);
++ unlock_profile(profile);
++
++ if (APPARMOR_COMPLAIN(child_cxt) &&
++ profile == profile->ns->null_complain_profile) {
++ struct aa_audit sa;
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = "clone";
++ sa.gfp_mask = GFP_KERNEL;
++ sa.task = child->pid;
++ aa_audit_hint(profile, &sa);
++ }
++ aa_put_profile(profile);
++ } else
++ aa_free_task_context(child_cxt);
++
++ return 0;
++}
++
++static struct aa_profile *
++aa_register_find(struct aa_profile *profile, const char* ns_name,
++ const char *name, int mandatory, int complain,
++ struct aa_audit *sa)
++{
++ struct aa_namespace *ns;
++ struct aa_profile *new_profile;
++ int ns_ref = 0;
++
++ if (profile)
++ ns = profile->ns;
++ else
++ ns = default_namespace;
++
++ if (ns_name) {
++ /* locate the profile namespace */
++ ns = aa_find_namespace(ns_name);
++ if (!ns) {
++ if (mandatory) {
++ sa->info = "profile namespace not found";
++ sa->denied_mask = sa->request_mask;
++ sa->error_code = -ENOENT;
++ return ERR_PTR(-ENOENT);
++ } else {
++ return NULL;
++ }
++ }
++ ns_ref++;
++ }
++
++ /* Locate new profile */
++ new_profile = aa_find_profile(ns, name);
++
++ if (new_profile) {
++ AA_DEBUG("%s: setting profile %s\n",
++ __FUNCTION__, new_profile->name);
++ } else if (mandatory && profile) {
++ sa->info = "mandatory profile missing";
++ sa->denied_mask = sa->request_mask; /* shifted MAY_EXEC */
++ if (complain) {
++ aa_audit_hint(profile, sa);
++ new_profile =
++ aa_dup_profile(profile->ns->null_complain_profile);
++ } else {
++ sa->error_code = -EACCES;
++ if (ns_ref)
++ aa_put_namespace(ns);
++ return ERR_PTR(-EACCES);
++ }
++ } else {
++ /* Only way we can get into this code is if task
++ * is unconfined, pix, nix.
++ */
++ AA_DEBUG("%s: No profile found for exec image '%s'\n",
++ __FUNCTION__,
++ name);
++ }
++ if (ns_ref)
++ aa_put_namespace(ns);
++ return new_profile;
++}
++
++static struct aa_profile *
++aa_x_to_profile(struct aa_profile *profile, const char *filename, int xmode,
++ struct aa_audit *sa, char **child)
++{
++ struct aa_profile *new_profile = NULL;
++ int ix = xmode & AA_EXEC_INHERIT;
++ int complain = PROFILE_COMPLAIN(profile);
++ int index;
++
++ *child = NULL;
++ switch (xmode & AA_EXEC_MODIFIERS) {
++ case 0:
++ /* only valid with ix flag */
++ ix = 1;
++ break;
++ case AA_EXEC_UNCONFINED:
++ /* only valid without ix flag */
++ ix = 0;
++ break;
++ case AA_EXEC_PROFILE:
++ new_profile = aa_register_find(profile, NULL, filename, !ix,
++ complain, sa);
++ break;
++ case AA_EXEC_CHILD:
++ *child = new_compound_name(profile->name, filename);
++ sa->name2 = *child;
++ if (!*child) {
++ sa->info = "Failed name resolution - exec failed";
++ sa->error_code = -ENOMEM;
++ new_profile = ERR_PTR(-ENOMEM);
++ } else {
++ new_profile = aa_register_find(profile, NULL, *child,
++ !ix, complain, sa);
++ }
++ break;
++ default:
++ /* all other indexes are named transitions */
++ index = AA_EXEC_INDEX(xmode);
++ if (index - 4 > profile->exec_table_size) {
++ sa->info = "invalid named transition - exec failed";
++ sa->error_code = -EACCES;
++ new_profile = ERR_PTR(-EACCES);
++ } else {
++ char *ns_name = NULL;
++ char *name = profile->exec_table[index - 4];
++ if (*name == ':') {
++ ns_name = name + 1;
++ name = ns_name + strlen(ns_name) + 1;
++ }
++ sa->name2 = name;
++ sa->name3 = ns_name;
++ new_profile =
++ aa_register_find(profile, ns_name, name,
++ !ix, complain, sa);
++ }
++ }
++ if (IS_ERR(new_profile))
++ /* all these failures must be audited - no quieting */
++ return ERR_PTR(aa_audit_reject(profile, sa));
++ return new_profile;
++}
++
++/**
++ * aa_register - register a new program
++ * @bprm: binprm of program being registered
++ *
++ * Try to register a new program during execve(). This should give the
++ * new program a valid aa_task_context if confined.
++ */
++int aa_register(struct linux_binprm *bprm)
++{
++ const char *filename;
++ char *buffer = NULL, *child = NULL;
++ struct file *filp = bprm->file;
++ struct aa_profile *profile, *old_profile, *new_profile = NULL;
++ int exec_mode, complain = 0, shift;
++ struct aa_audit sa;
++
++ AA_DEBUG("%s\n", __FUNCTION__);
++
++ profile = aa_get_profile(current);
++
++ shift = aa_inode_mode(filp->f_dentry->d_inode);
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = "exec";
++ sa.gfp_mask = GFP_KERNEL;
++ sa.request_mask = MAY_EXEC << shift;
++
++ filename = aa_get_name(filp->f_dentry, filp->f_vfsmnt, &buffer, 0);
++ if (IS_ERR(filename)) {
++ if (profile) {
++ sa.info = "Failed name resolution - exec failed";
++ sa.error_code = PTR_ERR(filename);
++ aa_audit_file(profile, &sa);
++ return sa.error_code;
++ } else
++ return 0;
++ }
++ sa.name = filename;
++
++ exec_mode = AA_EXEC_UNSAFE << shift;
++
++repeat:
++ if (profile) {
++ complain = PROFILE_COMPLAIN(profile);
++
++ /* Confined task, determine what mode inherit, unconfined or
++ * mandatory to load new profile
++ */
++ exec_mode = aa_match(profile->file_rules, filename,
++ &sa.audit_mask);
++
++
++ if (exec_mode & sa.request_mask) {
++ int xm = exec_mode >> shift;
++ new_profile = aa_x_to_profile(profile, filename,
++ xm, &sa, &child);
++
++ if (!new_profile && (xm & AA_EXEC_INHERIT))
++ /* (p|c|n|)ix - don't change profile */
++ goto cleanup;
++ /* error case caught below */
++
++ } else if (sa.request_mask & AUDIT_QUIET_MASK(sa.audit_mask)) {
++ /* quiet failed exit */
++ new_profile = ERR_PTR(-EACCES);
++ } else if (complain) {
++ /* There was no entry in calling profile
++ * describing mode to execute image in.
++ * Drop into null-profile (disabling secure exec).
++ */
++ new_profile =
++ aa_dup_profile(profile->ns->null_complain_profile);
++ exec_mode |= AA_EXEC_UNSAFE << shift;
++ } else {
++ sa.denied_mask = sa.request_mask;
++ sa.error_code = -EACCES;
++ new_profile = ERR_PTR(aa_audit_file(profile, &sa));
++ }
++ } else {
++ /* Unconfined task, load profile if it exists */
++ new_profile = aa_register_find(NULL, NULL, filename, 0, 0, &sa);
++ if (new_profile == NULL)
++ goto cleanup;
++ }
++
++ if (IS_ERR(new_profile))
++ goto cleanup;
++
++ old_profile = __aa_replace_profile(current, new_profile);
++ if (IS_ERR(old_profile)) {
++ aa_put_profile(new_profile);
++ aa_put_profile(profile);
++ if (PTR_ERR(old_profile) == -ESTALE) {
++ profile = aa_get_profile(current);
++ goto repeat;
++ }
++ if (PTR_ERR(old_profile) == -EPERM) {
++ sa.denied_mask = sa.request_mask;
++ sa.info = "unable to set profile due to ptrace";
++ sa.task = current->parent->pid;
++ aa_audit_reject(profile, &sa);
++ }
++ new_profile = old_profile;
++ goto cleanup;
++ }
++ aa_put_profile(old_profile);
++ aa_put_profile(profile);
++
++ /* Handle confined exec.
++ * Can be at this point for the following reasons:
++ * 1. unconfined switching to confined
++ * 2. confined switching to different confinement
++ * 3. confined switching to unconfined
++ *
++ * Cases 2 and 3 are marked as requiring secure exec
++ * (unless policy specified "unsafe exec")
++ */
++ if (!(exec_mode & (AA_EXEC_UNSAFE << shift))) {
++ unsigned long bprm_flags;
++
++ bprm_flags = AA_SECURE_EXEC_NEEDED;
++ bprm->security = (void*)
++ ((unsigned long)bprm->security | bprm_flags);
++ }
++
++ if (complain && new_profile &&
++ new_profile == new_profile->ns->null_complain_profile) {
++ sa.request_mask = 0;
++ sa.name = NULL;
++ sa.info = "set profile";
++ aa_audit_hint(new_profile, &sa);
++ }
++
++cleanup:
++ aa_put_name_buffer(child);
++ aa_put_name_buffer(buffer);
++ if (IS_ERR(new_profile))
++ return PTR_ERR(new_profile);
++ aa_put_profile(new_profile);
++ return 0;
++}
++
++/**
++ * aa_release - release a task context
++ * @task: task being released
++ *
++ * This is called after a task has exited and the parent has reaped it.
++ */
++void aa_release(struct task_struct *task)
++{
++ struct aa_task_context *cxt;
++ struct aa_profile *profile;
++ /*
++ * While the task context is still on a profile's task context
++ * list, another process could replace the profile under us,
++ * leaving us with a locked profile that is no longer attached
++ * to this task. So after locking the profile, we check that
++ * the profile is still attached. The profile lock is
++ * sufficient to prevent the replacement race so we do not lock
++ * the task.
++ *
++ * Use lock subtyping to avoid lockdep reporting a false irq
++ * possible inversion between the task_lock and profile_lock
++ *
++ * We also avoid taking the task_lock here because lock_dep
++ * would report another false {softirq-on-W} potential irq_lock
++ * inversion.
++ *
++ * If the task does not have a profile attached we are safe;
++ * nothing can race with us at this point.
++ */
++
++repeat:
++ profile = aa_get_profile(task);
++ if (profile) {
++ lock_profile_nested(profile, aa_lock_task_release);
++ cxt = aa_task_context(task);
++ if (unlikely(!cxt || cxt->profile != profile)) {
++ unlock_profile(profile);
++ aa_put_profile(profile);
++ goto repeat;
++ }
++ aa_change_task_context(task, NULL, NULL, 0, NULL);
++ unlock_profile(profile);
++ aa_put_profile(profile);
++ }
++}
++
++static int do_change_profile(struct aa_profile *expected,
++ struct aa_namespace *ns, const char *name,
++ u64 cookie, int restore, int hat,
++ struct aa_audit *sa)
++{
++ struct aa_profile *new_profile = NULL, *old_profile = NULL,
++ *previous_profile = NULL;
++ struct aa_task_context *new_cxt, *cxt;
++ int error = 0;
++
++ sa->name = name;
++
++ new_cxt = aa_alloc_task_context(GFP_KERNEL);
++ if (!new_cxt)
++ return -ENOMEM;
++
++ new_profile = aa_find_profile(ns, name);
++ if (!new_profile && !restore) {
++ if (!PROFILE_COMPLAIN(expected)) {
++ aa_free_task_context(new_cxt);
++ return -ENOENT;
++ }
++ new_profile = aa_dup_profile(ns->null_complain_profile);
++ } else if (new_profile && hat && !PROFILE_IS_HAT(new_profile)) {
++ aa_free_task_context(new_cxt);
++ aa_put_profile(new_profile);
++ return error;
++ }
++
++ cxt = lock_task_and_profiles(current, new_profile);
++ if (!cxt) {
++ error = -EPERM;
++ goto out;
++ }
++ old_profile = cxt->profile;
++
++ if (cxt->profile != expected || (new_profile && new_profile->isstale)) {
++ error = -ESTALE;
++ goto out;
++ }
++
++ if (cxt->previous_profile) {
++ if (cxt->cookie != cookie) {
++ error = -EACCES;
++ sa->info = "killing process";
++ aa_audit_reject(cxt->profile, sa);
++ /* terminate process */
++ (void)send_sig_info(SIGKILL, NULL, current);
++ goto out;
++ }
++
++ if (!restore)
++ previous_profile = cxt->previous_profile;
++ } else
++ previous_profile = cxt->profile;
++
++ if ((current->ptrace & PT_PTRACED) && aa_may_ptrace(cxt, new_profile)) {
++ error = -EACCES;
++ goto out;
++ }
++
++ if (new_profile == ns->null_complain_profile)
++ aa_audit_hint(cxt->profile, sa);
++
++ if (APPARMOR_AUDIT(cxt))
++ aa_audit_message(cxt->profile, sa, AUDIT_APPARMOR_AUDIT);
++
++ if (!restore && cookie)
++ aa_change_task_context(current, new_cxt, new_profile, cookie,
++ previous_profile);
++ else
++ /* either return to previous_profile, or a permanent change */
++ aa_change_task_context(current, new_cxt, new_profile, 0, NULL);
++
++out:
++ if (aa_task_context(current) != new_cxt)
++ aa_free_task_context(new_cxt);
++ task_unlock(current);
++ unlock_both_profiles(old_profile, new_profile);
++ aa_put_profile(new_profile);
++ return error;
++}
++
++/**
++ * aa_change_profile - perform a one-way profile transition
++ * @ns_name: name of the profile namespace to change to
++ * @name: name of profile to change to
++ * Change to new profile @name. Unlike with hats, there is no way
++ * to change back.
++ *
++ * Returns %0 on success, error otherwise.
++ */
++int aa_change_profile(const char *ns_name, const char *name)
++{
++ struct aa_task_context *cxt;
++ struct aa_profile *profile = NULL;
++ struct aa_namespace *ns = NULL;
++ struct aa_audit sa;
++ unsigned int state;
++ int error = -EINVAL;
++
++ if (!name)
++ return -EINVAL;
++
++ memset(&sa, 0, sizeof(sa));
++ sa.gfp_mask = GFP_ATOMIC;
++ sa.operation = "change_profile";
++
++repeat:
++ task_lock(current);
++ cxt = aa_task_context(current);
++ if (cxt)
++ profile = aa_dup_profile(cxt->profile);
++ task_unlock(current);
++
++ if (ns_name)
++ ns = aa_find_namespace(ns_name);
++ else if (profile)
++ ns = aa_get_namespace(profile->ns);
++ else
++ ns = aa_get_namespace(default_namespace);
++
++ if (!ns) {
++ aa_put_profile(profile);
++ return -ENOENT;
++ }
++
++ if (!profile || PROFILE_COMPLAIN(profile) ||
++ (ns == profile->ns &&
++ (aa_match(profile->file_rules, name, NULL) & AA_CHANGE_PROFILE)))
++ error = do_change_profile(profile, ns, name, 0, 0, 0, &sa);
++ else {
++ /* check for a rule with a namespace prepended */
++ aa_match_state(profile->file_rules, DFA_START, ns->name,
++ &state);
++ state = aa_dfa_null_transition(profile->file_rules, state);
++ if ((aa_match_state(profile->file_rules, state, name, NULL) &
++ AA_CHANGE_PROFILE))
++ error = do_change_profile(profile, ns, name, 0, 0, 0,
++ &sa);
++ else
++ /* no permission to transition to profile @name */
++ error = -EACCES;
++ }
++
++ aa_put_namespace(ns);
++ aa_put_profile(profile);
++ if (error == -ESTALE)
++ goto repeat;
++
++ return error;
++}
++
++/**
++ * aa_change_hat - change hat to/from subprofile
++ * @hat_name: hat to change to
++ * @cookie: magic value to validate the hat change
++ *
++ * Change to new @hat_name, and store the @hat_magic in the current task
++ * context. If the new @hat_name is %NULL and the @cookie matches that
++ * stored in the current task context and is not 0, return to the top level
++ * profile.
++ * Returns %0 on success, error otherwise.
++ */
++int aa_change_hat(const char *hat_name, u64 cookie)
++{
++ struct aa_task_context *cxt;
++ struct aa_profile *profile, *previous_profile;
++ struct aa_audit sa;
++ int error = 0;
++
++ memset(&sa, 0, sizeof(sa));
++ sa.gfp_mask = GFP_ATOMIC;
++ sa.operation = "change_hat";
++
++repeat:
++ task_lock(current);
++ cxt = aa_task_context(current);
++ if (!cxt) {
++ task_unlock(current);
++ return -EPERM;
++ }
++ profile = aa_dup_profile(cxt->profile);
++ previous_profile = aa_dup_profile(cxt->previous_profile);
++ task_unlock(current);
++
++ if (hat_name) {
++ char *name, *profile_name;
++
++ if (previous_profile)
++ profile_name = previous_profile->name;
++ else
++ profile_name = profile->name;
++
++ name = new_compound_name(profile_name, hat_name);
++ if (!name) {
++ error = -ENOMEM;
++ goto out;
++ }
++ error = do_change_profile(profile, profile->ns, name, cookie,
++ 0, 1, &sa);
++ aa_put_name_buffer(name);
++ } else if (previous_profile)
++ error = do_change_profile(profile, profile->ns,
++ previous_profile->name, cookie, 1, 0,
++ &sa);
++ /* else ignore restores when there is no saved profile */
++
++out:
++ aa_put_profile(previous_profile);
++ aa_put_profile(profile);
++ if (error == -ESTALE)
++ goto repeat;
++
++ return error;
++}
++
++/**
++ * __aa_replace_profile - replace a task's profile
++ * @task: task to switch the profile of
++ * @profile: profile to switch to
++ *
++ * Returns a handle to the previous profile upon success, or else an
++ * error code.
++ */
++struct aa_profile *__aa_replace_profile(struct task_struct *task,
++ struct aa_profile *profile)
++{
++ struct aa_task_context *cxt, *new_cxt = NULL;
++ struct aa_profile *old_profile = NULL;
++
++ if (profile) {
++ new_cxt = aa_alloc_task_context(GFP_KERNEL);
++ if (!new_cxt)
++ return ERR_PTR(-ENOMEM);
++ }
++
++ cxt = lock_task_and_profiles(task, profile);
++ if (unlikely(profile && profile->isstale)) {
++ task_unlock(task);
++ unlock_both_profiles(profile, cxt ? cxt->profile : NULL);
++ aa_free_task_context(new_cxt);
++ return ERR_PTR(-ESTALE);
++ }
++
++ if ((current->ptrace & PT_PTRACED) && aa_may_ptrace(cxt, profile)) {
++ task_unlock(task);
++ unlock_both_profiles(profile, cxt ? cxt->profile : NULL);
++ aa_free_task_context(new_cxt);
++ return ERR_PTR(-EPERM);
++ }
++
++ if (cxt)
++ old_profile = aa_dup_profile(cxt->profile);
++ aa_change_task_context(task, new_cxt, profile, 0, NULL);
++
++ task_unlock(task);
++ unlock_both_profiles(profile, old_profile);
++ return old_profile;
++}
++
++/**
++ * lock_task_and_profiles - lock the task and confining profiles and @profile
++ * @task: task to lock
++ * @profile: extra profile to lock in addition to the current profile
++ *
++ * Handle the spinning on locking to make sure the task context and
++ * profile are consistent once all locks are aquired.
++ *
++ * return the aa_task_context currently confining the task. The task lock
++ * will be held whether or not the task is confined.
++ */
++struct aa_task_context *
++lock_task_and_profiles(struct task_struct *task, struct aa_profile *profile)
++{
++ struct aa_task_context *cxt;
++ struct aa_profile *old_profile = NULL;
++
++ rcu_read_lock();
++repeat:
++ cxt = aa_task_context(task);
++ if (cxt)
++ old_profile = cxt->profile;
++
++ lock_both_profiles(profile, old_profile);
++ task_lock(task);
++
++ /* check for race with profile transition, replacement or removal */
++ if (unlikely(cxt != aa_task_context(task))) {
++ task_unlock(task);
++ unlock_both_profiles(profile, old_profile);
++ old_profile = NULL;
++ goto repeat;
++ }
++ rcu_read_unlock();
++ return cxt;
++}
++
++static void free_aa_task_context_rcu_callback(struct rcu_head *head)
++{
++ struct aa_task_context *cxt;
++
++ cxt = container_of(head, struct aa_task_context, rcu);
++ aa_free_task_context(cxt);
++}
++
++/**
++ * aa_change_task_context - switch a task to use a new context and profile
++ * @task: task that is having its task context changed
++ * @new_cxt: new task context to use after the switch
++ * @profile: new profile to use after the switch
++ * @cookie: magic value to switch to
++ * @previous_profile: profile the task can return to
++ */
++void aa_change_task_context(struct task_struct *task,
++ struct aa_task_context *new_cxt,
++ struct aa_profile *profile, u64 cookie,
++ struct aa_profile *previous_profile)
++{
++ struct aa_task_context *old_cxt = aa_task_context(task);
++
++ if (old_cxt) {
++ list_del_init(&old_cxt->list);
++ call_rcu(&old_cxt->rcu, free_aa_task_context_rcu_callback);
++ }
++ if (new_cxt) {
++ /* set the caps_logged cache to the quiet_caps mask
++ * this has the effect of quieting caps that are not
++ * supposed to be logged
++ */
++ new_cxt->caps_logged = profile->quiet_caps;
++ new_cxt->cookie = cookie;
++ new_cxt->task = task;
++ new_cxt->profile = aa_dup_profile(profile);
++ new_cxt->previous_profile = aa_dup_profile(previous_profile);
++ list_move(&new_cxt->list, &profile->task_contexts);
++ }
++ rcu_assign_pointer(task->security, new_cxt);
++}
--- /dev/null
+From: John Johansen <jjohansen@suse.de>
+Subject: AppArmor: all the rest
+
+All the things that didn't nicely fit in a category on their own: kbuild
+code, declararions and inline functions, /sys/kernel/security/apparmor
+filesystem for controlling apparmor from user space, profile list
+functions, locking documentation, /proc/$pid/task/$tid/attr/current
+access.
+
+Signed-off-by: John Johansen <jjohansen@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+---
+ security/apparmor/Kconfig | 42 ++++
+ security/apparmor/Makefile | 13 +
+ security/apparmor/apparmor.h | 371 +++++++++++++++++++++++++++++++++++++++++
+ security/apparmor/apparmorfs.c | 281 +++++++++++++++++++++++++++++++
+ security/apparmor/inline.h | 250 +++++++++++++++++++++++++++
+ security/apparmor/list.c | 174 +++++++++++++++++++
+ security/apparmor/locking.txt | 68 +++++++
+ security/apparmor/procattr.c | 195 +++++++++++++++++++++
+ 8 files changed, 1394 insertions(+)
+
+--- /dev/null
++++ b/security/apparmor/Kconfig
+@@ -0,0 +1,42 @@
++config SECURITY_APPARMOR
++ bool "AppArmor support"
++ depends on SECURITY
++ select AUDIT
++ help
++ This enables the AppArmor security module.
++ Required userspace tools (if they are not included in your
++ distribution) and further information may be found at
++ <http://forge.novell.com/modules/xfmod/project/?apparmor>
++
++ If you are unsure how to answer this question, answer N.
++
++config SECURITY_APPARMOR_BOOTPARAM_VALUE
++ int "AppArmor boot parameter default value"
++ depends on SECURITY_APPARMOR
++ range 0 1
++ default 1
++ help
++ This option sets the default value for the kernel parameter
++ 'apparmor', which allows AppArmor to be enabled or disabled
++ at boot. If this option is set to 0 (zero), the AppArmor
++ kernel parameter will default to 0, disabling AppArmor at
++ bootup. If this option is set to 1 (one), the AppArmor
++ kernel parameter will default to 1, enabling AppArmor at
++ bootup.
++
++ If you are unsure how to answer this question, answer 1.
++
++config SECURITY_APPARMOR_DISABLE
++ bool "AppArmor runtime disable"
++ depends on SECURITY_APPARMOR
++ default n
++ help
++ This option enables writing to a apparmorfs node 'disable', which
++ allows AppArmor to be disabled at runtime prior to the policy load.
++ AppArmor will then remain disabled until the next boot.
++ This option is similar to the apparmor.enabled=0 boot parameter,
++ but is to support runtime disabling of AppArmor, e.g. from
++ /sbin/init, for portability across platforms where boot
++ parameters are difficult to employ.
++
++ If you are unsure how to answer this question, answer N.
+--- /dev/null
++++ b/security/apparmor/Makefile
+@@ -0,0 +1,13 @@
++# Makefile for AppArmor Linux Security Module
++#
++obj-$(CONFIG_SECURITY_APPARMOR) += apparmor.o
++
++apparmor-y := main.o list.o procattr.o lsm.o apparmorfs.o \
++ module_interface.o match.o
++
++quiet_cmd_make-caps = GEN $@
++cmd_make-caps = sed -n -e "/CAP_FS_MASK/d" -e "s/^\#define[ \\t]\\+CAP_\\([A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9]\\+\\)\$$/[\\2] = \"\\1\",/p" $< | tr A-Z a-z > $@
++
++$(obj)/main.o : $(obj)/capability_names.h
++$(obj)/capability_names.h : $(srctree)/include/linux/capability.h
++ $(call cmd,make-caps)
+--- /dev/null
++++ b/security/apparmor/apparmor.h
+@@ -0,0 +1,371 @@
++/*
++ * Copyright (C) 1998-2007 Novell/SUSE
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ *
++ * AppArmor internal prototypes
++ */
++
++#ifndef __APPARMOR_H
++#define __APPARMOR_H
++
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/binfmts.h>
++#include <linux/rcupdate.h>
++
++/*
++ * We use MAY_READ, MAY_WRITE, MAY_EXEC, MAY_APPEND and the following flags
++ * for profile permissions
++ */
++#define AA_MAY_LINK 0x0010
++#define AA_MAY_LOCK 0x0020
++#define AA_EXEC_MMAP 0x0040
++#define AA_MAY_MOUNT 0x0080 /* no direct audit mapping */
++#define AA_EXEC_UNSAFE 0x0100
++#define AA_EXEC_INHERIT 0x0200
++#define AA_EXEC_MOD_0 0x0400
++#define AA_EXEC_MOD_1 0x0800
++#define AA_EXEC_MOD_2 0x1000
++#define AA_EXEC_MOD_3 0x2000
++
++#define AA_BASE_PERMS (MAY_READ | MAY_WRITE | MAY_EXEC | \
++ MAY_APPEND | AA_MAY_LINK | \
++ AA_MAY_LOCK | AA_EXEC_MMAP | \
++ AA_MAY_MOUNT | AA_EXEC_UNSAFE | \
++ AA_EXEC_INHERIT | AA_EXEC_MOD_0 | \
++ AA_EXEC_MOD_1 | AA_EXEC_MOD_2 | \
++ AA_EXEC_MOD_3)
++
++#define AA_EXEC_MODIFIERS (AA_EXEC_MOD_0 | AA_EXEC_MOD_1 | \
++ AA_EXEC_MOD_2 | AA_EXEC_MOD_3)
++
++#define AA_EXEC_TYPE (AA_EXEC_UNSAFE | AA_EXEC_INHERIT | \
++ AA_EXEC_MODIFIERS)
++
++#define AA_EXEC_UNCONFINED AA_EXEC_MOD_0
++#define AA_EXEC_PROFILE AA_EXEC_MOD_1
++#define AA_EXEC_CHILD (AA_EXEC_MOD_0 | AA_EXEC_MOD_1)
++/* remaining exec modes are index into profile name table */
++#define AA_EXEC_INDEX(mode) ((mode & AA_EXEC_MODIFIERS) >> 10)
++
++#define AA_USER_SHIFT 0
++#define AA_OTHER_SHIFT 14
++
++#define AA_USER_PERMS (AA_BASE_PERMS << AA_USER_SHIFT)
++#define AA_OTHER_PERMS (AA_BASE_PERMS << AA_OTHER_SHIFT)
++
++#define AA_FILE_PERMS (AA_USER_PERMS | AA_OTHER_PERMS)
++
++#define AA_LINK_BITS ((AA_MAY_LINK << AA_USER_SHIFT) | \
++ (AA_MAY_LINK << AA_OTHER_SHIFT))
++
++#define AA_USER_EXEC (MAY_EXEC << AA_USER_SHIFT)
++#define AA_OTHER_EXEC (MAY_EXEC << AA_OTHER_SHIFT)
++
++#define AA_USER_EXEC_TYPE (AA_EXEC_TYPE << AA_USER_SHIFT)
++#define AA_OTHER_EXEC_TYPE (AA_EXEC_TYPE << AA_OTHER_SHIFT)
++
++#define AA_EXEC_BITS (AA_USER_EXEC | AA_OTHER_EXEC)
++
++#define ALL_AA_EXEC_UNSAFE ((AA_EXEC_UNSAFE << AA_USER_SHIFT) | \
++ (AA_EXEC_UNSAFE << AA_OTHER_SHIFT))
++
++#define ALL_AA_EXEC_TYPE (AA_USER_EXEC_TYPE | AA_OTHER_EXEC_TYPE)
++
++/* overloaded permissions for link pairs */
++#define AA_LINK_SUBSET_TEST 0x0020
++
++#define AA_USER_PTRACE 0x10000000
++#define AA_OTHER_PTRACE 0x20000000
++#define AA_PTRACE_PERMS (AA_USER_PTRACE | AA_OTHER_PTRACE)
++
++/* shared permissions that are not duplicated in user::other */
++#define AA_CHANGE_HAT 0x40000000
++#define AA_CHANGE_PROFILE 0x80000000
++
++#define AA_SHARED_PERMS (AA_CHANGE_HAT | AA_CHANGE_PROFILE)
++
++#define AA_VALID_PERM_MASK (AA_FILE_PERMS | AA_PTRACE_PERMS | \
++ AA_SHARED_PERMS)
++
++/* audit bits for the second accept field */
++#define AUDIT_FILE_MASK 0x1fc07f
++#define AUDIT_QUIET_MASK(mask) ((mask >> 7) & AUDIT_FILE_MASK)
++#define AA_VALID_PERM2_MASK 0x0fffffff
++
++#define AA_SECURE_EXEC_NEEDED 1
++
++/* Control parameters (0 or 1), settable thru module/boot flags or
++ * via /sys/kernel/security/apparmor/control */
++extern int apparmor_complain;
++extern int apparmor_debug;
++extern int apparmor_audit;
++extern int apparmor_logsyscall;
++extern unsigned int apparmor_path_max;
++
++#define PROFILE_COMPLAIN(_profile) \
++ (apparmor_complain == 1 || ((_profile) && (_profile)->flags.complain))
++
++#define APPARMOR_COMPLAIN(_cxt) \
++ (apparmor_complain == 1 || \
++ ((_cxt) && (_cxt)->profile && (_cxt)->profile->flags.complain))
++
++#define PROFILE_AUDIT(_profile) \
++ (apparmor_audit == 1 || ((_profile) && (_profile)->flags.audit))
++
++#define APPARMOR_AUDIT(_cxt) \
++ (apparmor_audit == 1 || \
++ ((_cxt) && (_cxt)->profile && (_cxt)->profile->flags.audit))
++
++#define PROFILE_IS_HAT(_profile) \
++ ((_profile) && (_profile)->flags.hat)
++
++/*
++ * DEBUG remains global (no per profile flag) since it is mostly used in sysctl
++ * which is not related to profile accesses.
++ */
++
++#define AA_DEBUG(fmt, args...) \
++ do { \
++ if (apparmor_debug) \
++ printk(KERN_DEBUG "AppArmor: " fmt, ##args); \
++ } while (0)
++
++#define AA_ERROR(fmt, args...) printk(KERN_ERR "AppArmor: " fmt, ##args)
++
++struct aa_profile;
++
++/* struct aa_namespace - namespace for a set of profiles
++ * @name: the name of the namespace
++ * @list: list the namespace is on
++ * @profiles: list of profile in the namespace
++ * @profile_count: the number of profiles in the namespace
++ * @null_complain_profile: special profile used for learning in this namespace
++ * @count: reference count on the namespace
++ * @lock: lock for adding/removing profile to the namespace
++ */
++struct aa_namespace {
++ char *name;
++ struct list_head list;
++ struct list_head profiles;
++ int profile_count;
++ struct aa_profile *null_complain_profile;
++
++ struct kref count;
++ rwlock_t lock;
++};
++
++/* struct aa_profile - basic confinement data
++ * @name: the profiles name
++ * @list: list this profile is on
++ * @ns: namespace the profile is in
++ * @file_rules: dfa containing the profiles file rules
++ * @flags: flags controlling profile behavior
++ * @isstale: flag indicating if profile is stale
++ * @set_caps: capabilities that are being set
++ * @capabilities: capabilities mask
++ * @audit_caps: caps that are to be audited
++ * @quiet_caps: caps that should not be audited
++ * @capabilities: capabilities granted by the process
++ * @count: reference count of the profile
++ * @task_contexts: list of tasks confined by profile
++ * @lock: lock for the task_contexts list
++ * @network_families: basic network permissions
++ * @audit_network: which network permissions to force audit
++ * @quiet_network: which network permissions to quiet rejects
++ *
++ * The AppArmor profile contains the basic confinement data. Each profile
++ * has a name, and all nonstale profile are in a profile namespace.
++ *
++ * The task_contexts list and the isstale flag are protected by the
++ * profile lock.
++ *
++ * If a task context is moved between two profiles, we first need to grab
++ * both profile locks. lock_both_profiles() does that in a deadlock-safe
++ * way.
++ */
++struct aa_profile {
++ char *name;
++ struct list_head list;
++ struct aa_namespace *ns;
++
++ int exec_table_size;
++ char **exec_table;
++ struct aa_dfa *file_rules;
++ struct {
++ int hat;
++ int complain;
++ int audit;
++ } flags;
++ int isstale;
++
++ kernel_cap_t set_caps;
++ kernel_cap_t capabilities;
++ kernel_cap_t audit_caps;
++ kernel_cap_t quiet_caps;
++
++ struct kref count;
++ struct list_head task_contexts;
++ spinlock_t lock;
++ unsigned long int_flags;
++};
++
++extern struct list_head profile_ns_list;
++extern rwlock_t profile_ns_list_lock;
++extern struct mutex aa_interface_lock;
++
++/**
++ * struct aa_task_context - primary label for confined tasks
++ * @profile: the current profile
++ * @previous_profile: profile the task may return to
++ * @cookie: magic value the task must know for returning to @previous_profile
++ * @list: list this aa_task_context is on
++ * @task: task that the aa_task_context confines
++ * @rcu: rcu head used when freeing the aa_task_context
++ * @caps_logged: caps that have previously generated log entries
++ *
++ * Contains the task's current profile (which could change due to
++ * change_hat). Plus the hat_magic needed during change_hat.
++ */
++struct aa_task_context {
++ struct aa_profile *profile;
++ struct aa_profile *previous_profile;
++ u64 cookie;
++ struct list_head list;
++ struct task_struct *task;
++ struct rcu_head rcu;
++ kernel_cap_t caps_logged;
++};
++
++extern struct aa_namespace *default_namespace;
++
++/* aa_audit - AppArmor auditing structure
++ * Structure is populated by access control code and passed to aa_audit which
++ * provides for a single point of logging.
++ */
++
++struct aa_audit {
++ const char *operation;
++ gfp_t gfp_mask;
++ const char *info;
++ const char *name;
++ const char *name2;
++ const char *name3;
++ int request_mask, denied_mask, audit_mask;
++ struct iattr *iattr;
++ pid_t task, parent;
++ int error_code;
++};
++
++/* Flags for the permission check functions */
++#define AA_CHECK_FD 1 /* coming from a file descriptor */
++#define AA_CHECK_DIR 2 /* file type is directory */
++
++/* lock subtypes so lockdep does not raise false dependencies */
++enum aa_lock_class {
++ aa_lock_normal,
++ aa_lock_nested,
++ aa_lock_task_release
++};
++
++/* main.c */
++extern int alloc_default_namespace(void);
++extern void free_default_namespace(void);
++extern int aa_audit_message(struct aa_profile *profile, struct aa_audit *sa,
++ int type);
++void aa_audit_hint(struct aa_profile *profile, struct aa_audit *sa);
++void aa_audit_status(struct aa_profile *profile, struct aa_audit *sa);
++int aa_audit_reject(struct aa_profile *profile, struct aa_audit *sa);
++extern int aa_audit_syscallreject(struct aa_profile *profile, gfp_t gfp,
++ const char *);
++extern int aa_audit(struct aa_profile *profile, struct aa_audit *);
++
++extern int aa_attr(struct aa_profile *profile, struct dentry *dentry,
++ struct vfsmount *mnt, struct iattr *iattr);
++extern int aa_perm_xattr(struct aa_profile *profile, const char *operation,
++ struct dentry *dentry, struct vfsmount *mnt,
++ int mask, int check);
++extern int aa_capability(struct aa_task_context *cxt, int cap);
++extern int aa_perm(struct aa_profile *profile, const char *operation,
++ struct dentry *dentry, struct vfsmount *mnt, int mask,
++ int check);
++extern int aa_perm_dir(struct aa_profile *profile, const char *operation,
++ struct dentry *dentry, struct vfsmount *mnt,
++ int mask);
++extern int aa_perm_path(struct aa_profile *, const char *operation,
++ const char *name, int mask, uid_t uid);
++extern int aa_link(struct aa_profile *profile,
++ struct dentry *link, struct vfsmount *link_mnt,
++ struct dentry *target, struct vfsmount *target_mnt);
++extern int aa_clone(struct task_struct *task);
++extern int aa_register(struct linux_binprm *bprm);
++extern void aa_release(struct task_struct *task);
++extern int aa_change_hat(const char *id, u64 hat_magic);
++extern int aa_change_profile(const char *ns_name, const char *name);
++extern struct aa_profile *__aa_replace_profile(struct task_struct *task,
++ struct aa_profile *profile);
++extern struct aa_task_context *lock_task_and_profiles(struct task_struct *task,
++ struct aa_profile *profile);
++extern void unlock_task_and_profiles(struct task_struct *task,
++ struct aa_task_context *cxt,
++ struct aa_profile *profile);
++extern void aa_change_task_context(struct task_struct *task,
++ struct aa_task_context *new_cxt,
++ struct aa_profile *profile, u64 cookie,
++ struct aa_profile *previous_profile);
++extern int aa_may_ptrace(struct aa_task_context *cxt,
++ struct aa_profile *tracee);
++
++/* lsm.c */
++extern int apparmor_initialized;
++extern void info_message(const char *str);
++extern void apparmor_disable(void);
++
++/* list.c */
++extern struct aa_namespace *__aa_find_namespace(const char *name,
++ struct list_head *list);
++extern struct aa_profile *__aa_find_profile(const char *name,
++ struct list_head *list);
++extern void aa_profile_ns_list_release(void);
++
++/* module_interface.c */
++extern ssize_t aa_add_profile(void *, size_t);
++extern ssize_t aa_replace_profile(void *, size_t);
++extern ssize_t aa_remove_profile(char *, size_t);
++extern struct aa_namespace *alloc_aa_namespace(char *name);
++extern void free_aa_namespace(struct aa_namespace *ns);
++extern void free_aa_namespace_kref(struct kref *kref);
++extern struct aa_profile *alloc_aa_profile(void);
++extern void free_aa_profile(struct aa_profile *profile);
++extern void free_aa_profile_kref(struct kref *kref);
++extern void aa_unconfine_tasks(struct aa_profile *profile);
++
++/* procattr.c */
++extern int aa_getprocattr(struct aa_profile *profile, char **string,
++ unsigned *len);
++extern int aa_setprocattr_changehat(char *args);
++extern int aa_setprocattr_changeprofile(char *args);
++extern int aa_setprocattr_setprofile(struct task_struct *task, char *args);
++
++/* apparmorfs.c */
++extern int create_apparmorfs(void);
++extern void destroy_apparmorfs(void);
++
++/* match.c */
++extern struct aa_dfa *aa_match_alloc(void);
++extern void aa_match_free(struct aa_dfa *dfa);
++extern int unpack_dfa(struct aa_dfa *dfa, void *blob, size_t size);
++extern int verify_dfa(struct aa_dfa *dfa);
++extern unsigned int aa_dfa_match(struct aa_dfa *dfa, const char *str, int *);
++extern unsigned int aa_dfa_next_state(struct aa_dfa *dfa, unsigned int start,
++ const char *str);
++extern unsigned int aa_match_state(struct aa_dfa *dfa, unsigned int start,
++ const char *str, unsigned int *final);
++extern unsigned int aa_dfa_null_transition(struct aa_dfa *dfa,
++ unsigned int start);
++
++#endif /* __APPARMOR_H */
+--- /dev/null
++++ b/security/apparmor/apparmorfs.c
+@@ -0,0 +1,281 @@
++/*
++ * Copyright (C) 1998-2007 Novell/SUSE
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ *
++ * AppArmor filesystem (part of securityfs)
++ */
++
++#include <linux/security.h>
++#include <linux/vmalloc.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <asm/uaccess.h>
++#include <linux/namei.h>
++
++#include "apparmor.h"
++#include "inline.h"
++
++static char *aa_simple_write_to_buffer(const char __user *userbuf,
++ size_t alloc_size, size_t copy_size,
++ loff_t *pos, const char *operation)
++{
++ struct aa_profile *profile;
++ char *data;
++
++ if (*pos != 0) {
++ /* only writes from pos 0, that is complete writes */
++ data = ERR_PTR(-ESPIPE);
++ goto out;
++ }
++
++ /*
++ * Don't allow confined processes to load/replace/remove profiles.
++ * No sane person would add rules allowing this to a profile
++ * but we enforce the restriction anyways.
++ */
++ profile = aa_get_profile(current);
++ if (profile) {
++ struct aa_audit sa;
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = operation;
++ sa.gfp_mask = GFP_KERNEL;
++ sa.error_code = -EACCES;
++ data = ERR_PTR(aa_audit_reject(profile, &sa));
++ aa_put_profile(profile);
++ goto out;
++ }
++
++ data = vmalloc(alloc_size);
++ if (data == NULL) {
++ data = ERR_PTR(-ENOMEM);
++ goto out;
++ }
++
++ if (copy_from_user(data, userbuf, copy_size)) {
++ vfree(data);
++ data = ERR_PTR(-EFAULT);
++ goto out;
++ }
++
++out:
++ return data;
++}
++
++/* apparmor/profiles */
++extern struct seq_operations apparmorfs_profiles_op;
++
++static int aa_profiles_open(struct inode *inode, struct file *file)
++{
++ return seq_open(file, &apparmorfs_profiles_op);
++}
++
++
++static int aa_profiles_release(struct inode *inode, struct file *file)
++{
++ return seq_release(inode, file);
++}
++
++static struct file_operations apparmorfs_profiles_fops = {
++ .open = aa_profiles_open,
++ .read = seq_read,
++ .llseek = seq_lseek,
++ .release = aa_profiles_release,
++};
++
++/* apparmor/matching */
++static ssize_t aa_matching_read(struct file *file, char __user *buf,
++ size_t size, loff_t *ppos)
++{
++ const char *matching = "pattern=aadfa audit perms=rwxamlk/ user::other";
++
++ return simple_read_from_buffer(buf, size, ppos, matching,
++ strlen(matching));
++}
++
++static struct file_operations apparmorfs_matching_fops = {
++ .read = aa_matching_read,
++};
++
++/* apparmor/features */
++static ssize_t aa_features_read(struct file *file, char __user *buf,
++ size_t size, loff_t *ppos)
++{
++ const char *features = "file=3.0 capability=2.0 network=1.0 "
++ "change_hat=1.5 change_profile=1.0 "
++ "aanamespaces=1.0";
++
++ return simple_read_from_buffer(buf, size, ppos, features,
++ strlen(features));
++}
++
++static struct file_operations apparmorfs_features_fops = {
++ .read = aa_features_read,
++};
++
++/* apparmor/.load */
++static ssize_t aa_profile_load(struct file *f, const char __user *buf,
++ size_t size, loff_t *pos)
++{
++ char *data;
++ ssize_t error;
++
++ data = aa_simple_write_to_buffer(buf, size, size, pos, "profile_load");
++
++ error = PTR_ERR(data);
++ if (!IS_ERR(data)) {
++ error = aa_add_profile(data, size);
++ vfree(data);
++ }
++
++ return error;
++}
++
++
++static struct file_operations apparmorfs_profile_load = {
++ .write = aa_profile_load
++};
++
++/* apparmor/.replace */
++static ssize_t aa_profile_replace(struct file *f, const char __user *buf,
++ size_t size, loff_t *pos)
++{
++ char *data;
++ ssize_t error;
++
++ data = aa_simple_write_to_buffer(buf, size, size, pos,
++ "profile_replace");
++
++ error = PTR_ERR(data);
++ if (!IS_ERR(data)) {
++ error = aa_replace_profile(data, size);
++ vfree(data);
++ }
++
++ return error;
++}
++
++
++static struct file_operations apparmorfs_profile_replace = {
++ .write = aa_profile_replace
++};
++
++/* apparmor/.remove */
++static ssize_t aa_profile_remove(struct file *f, const char __user *buf,
++ size_t size, loff_t *pos)
++{
++ char *data;
++ ssize_t error;
++
++ /*
++ * aa_remove_profile needs a null terminated string so 1 extra
++ * byte is allocated and the copied data is null terminated.
++ */
++ data = aa_simple_write_to_buffer(buf, size + 1, size, pos,
++ "profile_remove");
++
++ error = PTR_ERR(data);
++ if (!IS_ERR(data)) {
++ data[size] = 0;
++ error = aa_remove_profile(data, size);
++ vfree(data);
++ }
++
++ return error;
++}
++
++static struct file_operations apparmorfs_profile_remove = {
++ .write = aa_profile_remove
++};
++
++static struct dentry *apparmor_dentry;
++
++static void aafs_remove(const char *name)
++{
++ struct dentry *dentry;
++
++ dentry = lookup_one_len(name, apparmor_dentry, strlen(name));
++ if (!IS_ERR(dentry)) {
++ securityfs_remove(dentry);
++ dput(dentry);
++ }
++}
++
++static int aafs_create(const char *name, int mask, struct file_operations *fops)
++{
++ struct dentry *dentry;
++
++ dentry = securityfs_create_file(name, S_IFREG | mask, apparmor_dentry,
++ NULL, fops);
++
++ return IS_ERR(dentry) ? PTR_ERR(dentry) : 0;
++}
++
++void destroy_apparmorfs(void)
++{
++ if (apparmor_dentry) {
++ aafs_remove(".remove");
++ aafs_remove(".replace");
++ aafs_remove(".load");
++ aafs_remove("matching");
++ aafs_remove("features");
++ aafs_remove("profiles");
++ securityfs_remove(apparmor_dentry);
++ apparmor_dentry = NULL;
++ }
++}
++
++int create_apparmorfs(void)
++{
++ int error;
++
++ if (!apparmor_initialized)
++ return 0;
++
++ if (apparmor_dentry) {
++ AA_ERROR("%s: AppArmor securityfs already exists\n",
++ __FUNCTION__);
++ return -EEXIST;
++ }
++
++ apparmor_dentry = securityfs_create_dir("apparmor", NULL);
++ if (IS_ERR(apparmor_dentry)) {
++ error = PTR_ERR(apparmor_dentry);
++ apparmor_dentry = NULL;
++ goto error;
++ }
++ error = aafs_create("profiles", 0440, &apparmorfs_profiles_fops);
++ if (error)
++ goto error;
++ error = aafs_create("matching", 0444, &apparmorfs_matching_fops);
++ if (error)
++ goto error;
++ error = aafs_create("features", 0444, &apparmorfs_features_fops);
++ if (error)
++ goto error;
++ error = aafs_create(".load", 0640, &apparmorfs_profile_load);
++ if (error)
++ goto error;
++ error = aafs_create(".replace", 0640, &apparmorfs_profile_replace);
++ if (error)
++ goto error;
++ error = aafs_create(".remove", 0640, &apparmorfs_profile_remove);
++ if (error)
++ goto error;
++
++ /* Report that AppArmor fs is enabled */
++ info_message("AppArmor Filesystem Enabled");
++ return 0;
++
++error:
++ destroy_apparmorfs();
++ AA_ERROR("Error creating AppArmor securityfs\n");
++ apparmor_disable();
++ return error;
++}
++
++fs_initcall(create_apparmorfs);
++
+--- /dev/null
++++ b/security/apparmor/inline.h
+@@ -0,0 +1,250 @@
++/*
++ * Copyright (C) 1998-2007 Novell/SUSE
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ */
++
++#ifndef __INLINE_H
++#define __INLINE_H
++
++#include <linux/sched.h>
++
++#include "match.h"
++
++static inline int mediated_filesystem(struct inode *inode)
++{
++ return !(inode->i_sb->s_flags & MS_NOUSER);
++}
++
++static inline struct aa_task_context *aa_task_context(struct task_struct *task)
++{
++ return (struct aa_task_context *) rcu_dereference(task->security);
++}
++
++static inline struct aa_namespace *aa_get_namespace(struct aa_namespace *ns)
++{
++ if (ns)
++ kref_get(&(ns->count));
++
++ return ns;
++}
++
++static inline void aa_put_namespace(struct aa_namespace *ns)
++{
++ if (ns)
++ kref_put(&ns->count, free_aa_namespace_kref);
++}
++
++
++static inline struct aa_namespace *aa_find_namespace(const char *name)
++{
++ struct aa_namespace *ns = NULL;
++
++ read_lock(&profile_ns_list_lock);
++ ns = aa_get_namespace(__aa_find_namespace(name, &profile_ns_list));
++ read_unlock(&profile_ns_list_lock);
++
++ return ns;
++}
++
++/**
++ * aa_dup_profile - increment refcount on profile @p
++ * @p: profile
++ */
++static inline struct aa_profile *aa_dup_profile(struct aa_profile *p)
++{
++ if (p)
++ kref_get(&(p->count));
++
++ return p;
++}
++
++/**
++ * aa_put_profile - decrement refcount on profile @p
++ * @p: profile
++ */
++static inline void aa_put_profile(struct aa_profile *p)
++{
++ if (p)
++ kref_put(&p->count, free_aa_profile_kref);
++}
++
++static inline struct aa_profile *aa_get_profile(struct task_struct *task)
++{
++ struct aa_task_context *cxt;
++ struct aa_profile *profile = NULL;
++
++ rcu_read_lock();
++ cxt = aa_task_context(task);
++ if (cxt) {
++ profile = cxt->profile;
++ aa_dup_profile(profile);
++ }
++ rcu_read_unlock();
++
++ return profile;
++}
++
++static inline struct aa_profile *aa_find_profile(struct aa_namespace *ns,
++ const char *name)
++{
++ struct aa_profile *profile = NULL;
++
++ read_lock(&ns->lock);
++ profile = aa_dup_profile(__aa_find_profile(name, &ns->profiles));
++ read_unlock(&ns->lock);
++
++ return profile;
++}
++
++static inline struct aa_task_context *aa_alloc_task_context(gfp_t flags)
++{
++ struct aa_task_context *cxt;
++
++ cxt = kzalloc(sizeof(*cxt), flags);
++ if (cxt) {
++ INIT_LIST_HEAD(&cxt->list);
++ INIT_RCU_HEAD(&cxt->rcu);
++ }
++
++ return cxt;
++}
++
++static inline void aa_free_task_context(struct aa_task_context *cxt)
++{
++ if (cxt) {
++ aa_put_profile(cxt->profile);
++ aa_put_profile(cxt->previous_profile);
++ kfree(cxt);
++ }
++}
++
++/**
++ * lock_profile - lock a profile
++ * @profile: the profile to lock
++ *
++ * While the profile is locked, local interrupts are disabled. This also
++ * gives us RCU reader safety.
++ */
++static inline void lock_profile_nested(struct aa_profile *profile,
++ enum aa_lock_class lock_class)
++{
++ /*
++ * Lock the profile.
++ *
++ * Need to disable interrupts here because this lock is used in
++ * the task_free_security hook, which may run in RCU context.
++ */
++ if (profile)
++ spin_lock_irqsave_nested(&profile->lock, profile->int_flags,
++ lock_class);
++}
++
++static inline void lock_profile(struct aa_profile *profile)
++{
++ lock_profile_nested(profile, aa_lock_normal);
++}
++
++/**
++ * unlock_profile - unlock a profile
++ * @profile: the profile to unlock
++ */
++static inline void unlock_profile(struct aa_profile *profile)
++{
++ /* Unlock the profile. */
++ if (profile)
++ spin_unlock_irqrestore(&profile->lock, profile->int_flags);
++}
++
++/**
++ * lock_both_profiles - lock two profiles in a deadlock-free way
++ * @profile1: profile to lock (may be NULL)
++ * @profile2: profile to lock (may be NULL)
++ *
++ * The order in which profiles are passed into lock_both_profiles() /
++ * unlock_both_profiles() does not matter.
++ * While the profile is locked, local interrupts are disabled. This also
++ * gives us RCU reader safety.
++ */
++static inline void lock_both_profiles(struct aa_profile *profile1,
++ struct aa_profile *profile2)
++{
++ /*
++ * Lock the two profiles.
++ *
++ * We need to disable interrupts because the profile locks are
++ * used in the task_free_security hook, which may run in RCU
++ * context.
++ *
++ * Do not nest spin_lock_irqsave()/spin_unlock_irqresore():
++ * interrupts only need to be turned off once.
++ */
++ if (!profile1 || profile1 == profile2) {
++ if (profile2)
++ spin_lock_irqsave_nested(&profile2->lock,
++ profile2->int_flags,
++ aa_lock_normal);
++ } else if (profile1 > profile2) {
++ /* profile1 cannot be NULL here. */
++ spin_lock_irqsave_nested(&profile1->lock, profile1->int_flags,
++ aa_lock_normal);
++ if (profile2)
++ spin_lock_nested(&profile2->lock, aa_lock_nested);
++
++ } else {
++ /* profile2 cannot be NULL here. */
++ spin_lock_irqsave_nested(&profile2->lock, profile2->int_flags,
++ aa_lock_normal);
++ spin_lock_nested(&profile1->lock, aa_lock_nested);
++ }
++}
++
++/**
++ * unlock_both_profiles - unlock two profiles in a deadlock-free way
++ * @profile1: profile to unlock (may be NULL)
++ * @profile2: profile to unlock (may be NULL)
++ *
++ * The order in which profiles are passed into lock_both_profiles() /
++ * unlock_both_profiles() does not matter.
++ * While the profile is locked, local interrupts are disabled. This also
++ * gives us RCU reader safety.
++ */
++static inline void unlock_both_profiles(struct aa_profile *profile1,
++ struct aa_profile *profile2)
++{
++ /* Unlock the two profiles. */
++ if (!profile1 || profile1 == profile2) {
++ if (profile2)
++ spin_unlock_irqrestore(&profile2->lock,
++ profile2->int_flags);
++ } else if (profile1 > profile2) {
++ /* profile1 cannot be NULL here. */
++ if (profile2)
++ spin_unlock(&profile2->lock);
++ spin_unlock_irqrestore(&profile1->lock, profile1->int_flags);
++ } else {
++ /* profile2 cannot be NULL here. */
++ spin_unlock(&profile1->lock);
++ spin_unlock_irqrestore(&profile2->lock, profile2->int_flags);
++ }
++}
++
++static inline unsigned int aa_match(struct aa_dfa *dfa, const char *pathname,
++ int *audit_mask)
++{
++ if (dfa)
++ return aa_dfa_match(dfa, pathname, audit_mask);
++ if (audit_mask)
++ *audit_mask = 0;
++ return 0;
++}
++
++static inline int dfa_audit_mask(struct aa_dfa *dfa, unsigned int state)
++{
++ return ACCEPT_TABLE2(dfa)[state];
++}
++
++#endif /* __INLINE_H__ */
+--- /dev/null
++++ b/security/apparmor/list.c
+@@ -0,0 +1,174 @@
++/*
++ * Copyright (C) 1998-2007 Novell/SUSE
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ *
++ * AppArmor Profile List Management
++ */
++
++#include <linux/seq_file.h>
++#include "apparmor.h"
++#include "inline.h"
++
++/* list of profile namespaces and lock */
++LIST_HEAD(profile_ns_list);
++rwlock_t profile_ns_list_lock = RW_LOCK_UNLOCKED;
++
++/**
++ * __aa_find_namespace - look up a profile namespace on the namespace list
++ * @name: name of namespace to find
++ * @head: list to search
++ *
++ * Returns a pointer to the namespace on the list, or NULL if no namespace
++ * called @name exists. The caller must hold the profile_ns_list_lock.
++ */
++struct aa_namespace *__aa_find_namespace(const char *name,
++ struct list_head *head)
++{
++ struct aa_namespace *ns;
++
++ list_for_each_entry(ns, head, list) {
++ if (!strcmp(ns->name, name))
++ return ns;
++ }
++
++ return NULL;
++}
++
++/**
++ * __aa_find_profile - look up a profile on the profile list
++ * @name: name of profile to find
++ * @head: list to search
++ *
++ * Returns a pointer to the profile on the list, or NULL if no profile
++ * called @name exists. The caller must hold the profile_list_lock.
++ */
++struct aa_profile *__aa_find_profile(const char *name, struct list_head *head)
++{
++ struct aa_profile *profile;
++
++ list_for_each_entry(profile, head, list) {
++ if (!strcmp(profile->name, name))
++ return profile;
++ }
++
++ return NULL;
++}
++
++static void aa_profile_list_release(struct list_head *head)
++{
++ struct aa_profile *profile, *tmp;
++ list_for_each_entry_safe(profile, tmp, head, list) {
++ /* Remove the profile from each task context it is on. */
++ lock_profile(profile);
++ profile->isstale = 1;
++ aa_unconfine_tasks(profile);
++ list_del_init(&profile->list);
++ unlock_profile(profile);
++ aa_put_profile(profile);
++ }
++}
++
++/**
++ * aa_profilelist_release - Remove all profiles from profile_list
++ */
++void aa_profile_ns_list_release(void)
++{
++ struct aa_namespace *ns, *tmp;
++
++ /* Remove and release all the profiles on namespace profile lists. */
++ write_lock(&profile_ns_list_lock);
++ list_for_each_entry_safe(ns, tmp, &profile_ns_list, list) {
++ write_lock(&ns->lock);
++ aa_profile_list_release(&ns->profiles);
++ list_del_init(&ns->list);
++ write_unlock(&ns->lock);
++ aa_put_namespace(ns);
++ }
++ write_unlock(&profile_ns_list_lock);
++}
++
++
++static struct aa_profile *next_profile(struct aa_profile *profile)
++{
++ struct aa_profile *next = profile;
++ struct aa_namespace *ns;
++
++ list_for_each_entry_continue(next, &profile->ns->profiles, list)
++ return next;
++
++ ns = profile->ns;
++ read_unlock(&ns->lock);
++ list_for_each_entry_continue(ns, &profile_ns_list, list) {
++ read_lock(&ns->lock);
++ list_for_each_entry(profile, &ns->profiles, list)
++ return profile;
++ read_unlock(&ns->lock);
++ }
++ return NULL;
++}
++
++static void *p_start(struct seq_file *f, loff_t *pos)
++{
++ struct aa_namespace *ns;
++ loff_t l = *pos;
++
++ read_lock(&profile_ns_list_lock);
++ if (!list_empty(&profile_ns_list)) {
++ struct aa_profile *profile = NULL;
++ ns = list_first_entry(&profile_ns_list, typeof(*ns), list);
++ read_lock(&ns->lock);
++ if (!list_empty(&ns->profiles))
++ profile = list_first_entry(&ns->profiles,
++ typeof(*profile), list);
++ else
++ read_unlock(&ns->lock);
++ for ( ; profile && l > 0; l--)
++ profile = next_profile(profile);
++ return profile;
++ }
++ return NULL;
++}
++
++static void *p_next(struct seq_file *f, void *p, loff_t *pos)
++{
++ struct aa_profile *profile = (struct aa_profile *) p;
++
++ (*pos)++;
++ profile = next_profile(profile);
++
++ return profile;
++}
++
++static void p_stop(struct seq_file *f, void *p)
++{
++ struct aa_profile *profile = (struct aa_profile *) p;
++
++ if (profile)
++ read_unlock(&profile->ns->lock);
++ read_unlock(&profile_ns_list_lock);
++}
++
++static int seq_show_profile(struct seq_file *f, void *p)
++{
++ struct aa_profile *profile = (struct aa_profile *)p;
++
++ if (profile->ns == default_namespace)
++ seq_printf(f, "%s (%s)\n", profile->name,
++ PROFILE_COMPLAIN(profile) ? "complain" : "enforce");
++ else
++ seq_printf(f, ":%s:%s (%s)\n", profile->ns->name, profile->name,
++ PROFILE_COMPLAIN(profile) ? "complain" : "enforce");
++ return 0;
++}
++
++/* Used in apparmorfs.c */
++struct seq_operations apparmorfs_profiles_op = {
++ .start = p_start,
++ .next = p_next,
++ .stop = p_stop,
++ .show = seq_show_profile,
++};
+--- /dev/null
++++ b/security/apparmor/locking.txt
+@@ -0,0 +1,68 @@
++Locking in AppArmor
++===================
++
++Lock hierarchy:
++
++ aa_interface_lock
++ profile_list_lock
++ aa_profile->lock
++ task_lock()
++
++
++Which lock protects what?
++
++ /-----------------------+-------------------------------\
++ | Variable | Lock |
++ >-----------------------+-------------------------------<
++ | profile_list | profile_list_lock |
++ +-----------------------+-------------------------------+
++ | aa_profile | (reference count) |
++ +-----------------------+-------------------------------+
++ | aa_profile-> | aa_profile->lock |
++ | isstale, | |
++ | task_contexts | |
++ +-----------------------+-------------------------------+
++ | task_struct->security | read: RCU |
++ | | write: task_lock() |
++ +-----------------------+-------------------------------+
++ | aa_profile->sub | handle on the profile (list |
++ | | is never modified) |
++ \-----------------------+-------------------------------/
++
++(Obviously, the list_heads embedded in data structures are always
++protected with the lock that also protects the list.)
++
++When moving a task context from one profile to another, we grab both
++profile locks with lock_both_profiles(). This ensures that both locks
++are always taken in the same order, and so we won't deadlock.
++
++Since task_struct->security is RCU protected the aa_task_struct it
++references is only guarenteed to exist for the rcu cycle. Where
++aa_task_context->profile is needed in blocking operations the
++profile's reference count is incremented and the profile reference
++is used.
++
++Profiles on profile_list are never stale: when a profile becomes stale,
++it is removed from profile_list at the same time (under profile_list_lock
++and aa_profile->lock).
++
++The aa_interface_lock is taken whenever user-space modifies the profile
++list, and can sleep. This ensures that profile loading/replacement/removal
++won't race with itself. We release the profile_list_lock as soon as
++possible to avoid stalling exec during profile loading/replacement/removal.
++
++AppArmor uses lock subtyping to avoid false positives from lockdep. The
++profile lock is often taken nested, but it is guaranteed to be in a lock
++safe order and not the same lock when done, so it is safe.
++
++A third lock type (aa_lock_task_release) is given to the profile lock
++when it is taken in soft irq context during task release (aa_release).
++This is to avoid a false positive between the task lock and the profile
++lock. In task context the profile lock wraps the task lock with irqs
++off, but the kernel takes the task lock with irqs enabled. This won't
++result in a deadlock because for a deadlock to occur the kernel must
++take dead task A's lock (irqs on), the rcu callback hook freeing
++dead task A must be run and AppArmor must be changing the profile on
++dead task A. The kernel should not be taking a dead task's task_lock
++at the same time the task is being freed by task rcu cleanup other wise
++the task would not be out of its quiescent period.
+--- /dev/null
++++ b/security/apparmor/procattr.c
+@@ -0,0 +1,195 @@
++/*
++ * Copyright (C) 1998-2007 Novell/SUSE
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ *
++ * AppArmor /proc/pid/attr handling
++ */
++
++#include "apparmor.h"
++#include "inline.h"
++
++int aa_getprocattr(struct aa_profile *profile, char **string, unsigned *len)
++{
++ char *str;
++
++ if (profile) {
++ const char *mode_str = PROFILE_COMPLAIN(profile) ?
++ " (complain)" : " (enforce)";
++ int mode_len, name_len, ns_len = 0;
++
++ mode_len = strlen(mode_str);
++ name_len = strlen(profile->name);
++ if (profile->ns != default_namespace)
++ ns_len = strlen(profile->ns->name) + 2;
++ *len = mode_len + ns_len + name_len + 1;
++ str = kmalloc(*len, GFP_ATOMIC);
++ if (!str)
++ return -ENOMEM;
++
++ if (ns_len) {
++ *str++ = ':';
++ memcpy(str, profile->ns->name, ns_len - 2);
++ str += ns_len - 2;
++ *str++ = ':';
++ }
++ memcpy(str, profile->name, name_len);
++ str += name_len;
++ memcpy(str, mode_str, mode_len);
++ str += mode_len;
++ *str++ = '\n';
++ str -= *len;
++ } else {
++ const char *unconfined_str = "unconfined\n";
++
++ *len = strlen(unconfined_str);
++ str = kmalloc(*len, GFP_ATOMIC);
++ if (!str)
++ return -ENOMEM;
++
++ memcpy(str, unconfined_str, *len);
++ }
++ *string = str;
++
++ return 0;
++}
++
++static char *split_token_from_name(const char *op, char *args, u64 *cookie)
++{
++ char *name;
++
++ *cookie = simple_strtoull(args, &name, 16);
++ if ((name == args) || *name != '^') {
++ AA_ERROR("%s: Invalid input '%s'", op, args);
++ return ERR_PTR(-EINVAL);
++ }
++
++ name++; /* skip ^ */
++ if (!*name)
++ name = NULL;
++ return name;
++}
++
++int aa_setprocattr_changehat(char *args)
++{
++ char *hat;
++ u64 cookie;
++
++ hat = split_token_from_name("change_hat", args, &cookie);
++ if (IS_ERR(hat))
++ return PTR_ERR(hat);
++
++ if (!hat && !cookie) {
++ AA_ERROR("change_hat: Invalid input, NULL hat and NULL magic");
++ return -EINVAL;
++ }
++
++ AA_DEBUG("%s: Magic 0x%llx Hat '%s'\n",
++ __FUNCTION__, cookie, hat ? hat : NULL);
++
++ return aa_change_hat(hat, cookie);
++}
++
++int aa_setprocattr_changeprofile(char *args)
++{
++ char *name = args, *ns_name = NULL;
++
++ if (name[0] == ':') {
++ char *split = strchr(&name[1], ':');
++ if (split) {
++ *split = 0;
++ ns_name = &name[1];
++ name = split + 1;
++ }
++ }
++
++ return aa_change_profile(ns_name, name);
++}
++
++int aa_setprocattr_setprofile(struct task_struct *task, char *args)
++{
++ struct aa_profile *old_profile, *new_profile;
++ struct aa_namespace *ns;
++ struct aa_audit sa;
++ char *name, *ns_name = NULL;
++
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = "profile_set";
++ sa.gfp_mask = GFP_KERNEL;
++ sa.task = task->pid;
++
++ AA_DEBUG("%s: current %d\n",
++ __FUNCTION__, current->pid);
++
++ name = args;
++ if (args[0] != '/') {
++ char *split = strchr(args, ':');
++ if (split) {
++ *split = 0;
++ ns_name = args;
++ name = split + 1;
++ }
++ }
++ if (ns_name)
++ ns = aa_find_namespace(ns_name);
++ else
++ ns = aa_get_namespace(default_namespace);
++ if (!ns) {
++ sa.name = ns_name;
++ sa.info = "unknown namespace";
++ aa_audit_reject(NULL, &sa);
++ aa_put_namespace(ns);
++ return -EINVAL;
++ }
++
++repeat:
++ if (strcmp(name, "unconfined") == 0)
++ new_profile = NULL;
++ else {
++ new_profile = aa_find_profile(ns, name);
++ if (!new_profile) {
++ sa.name = ns_name;
++ sa.name2 = name;
++ sa.info = "unknown profile";
++ aa_audit_reject(NULL, &sa);
++ aa_put_namespace(ns);
++ return -EINVAL;
++ }
++ }
++
++ old_profile = __aa_replace_profile(task, new_profile);
++ if (IS_ERR(old_profile)) {
++ int error;
++
++ aa_put_profile(new_profile);
++ error = PTR_ERR(old_profile);
++ if (error == -ESTALE)
++ goto repeat;
++ aa_put_namespace(ns);
++ return error;
++ }
++
++ if (new_profile) {
++ sa.name = ns_name;
++ sa.name2 = name;
++ sa.name3 = old_profile ? old_profile->name :
++ "unconfined";
++ aa_audit_status(NULL, &sa);
++ } else {
++ if (old_profile) {
++ sa.name = "unconfined";
++ sa.name2 = old_profile->name;
++ aa_audit_status(NULL, &sa);
++ } else {
++ sa.info = "task is unconfined";
++ aa_audit_status(NULL, &sa);
++ }
++ }
++ aa_put_namespace(ns);
++ aa_put_profile(old_profile);
++ aa_put_profile(new_profile);
++ return 0;
++}
--- /dev/null
+From: John Johansen <jjohansen@suse.de>
+Subject: AppArmor: Profile loading and manipulation, pathname matching
+
+Pathname matching, transition table loading, profile loading and
+manipulation.
+
+Signed-off-by: John Johansen <jjohansen@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+---
+ security/apparmor/match.c | 364 ++++++++++++++
+ security/apparmor/match.h | 87 +++
+ security/apparmor/module_interface.c | 875 +++++++++++++++++++++++++++++++++++
+ 3 files changed, 1326 insertions(+)
+
+--- /dev/null
++++ b/security/apparmor/match.c
+@@ -0,0 +1,364 @@
++/*
++ * Copyright (C) 2007 Novell/SUSE
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ *
++ * Regular expression transition table matching
++ */
++
++#include <linux/kernel.h>
++#include <linux/slab.h>
++#include <linux/errno.h>
++#include "apparmor.h"
++#include "match.h"
++#include "inline.h"
++
++static struct table_header *unpack_table(void *blob, size_t bsize)
++{
++ struct table_header *table = NULL;
++ struct table_header th;
++ size_t tsize;
++
++ if (bsize < sizeof(struct table_header))
++ goto out;
++
++ th.td_id = be16_to_cpu(*(u16 *) (blob));
++ th.td_flags = be16_to_cpu(*(u16 *) (blob + 2));
++ th.td_lolen = be32_to_cpu(*(u32 *) (blob + 8));
++ blob += sizeof(struct table_header);
++
++ if (!(th.td_flags == YYTD_DATA16 || th.td_flags == YYTD_DATA32 ||
++ th.td_flags == YYTD_DATA8))
++ goto out;
++
++ tsize = table_size(th.td_lolen, th.td_flags);
++ if (bsize < tsize)
++ goto out;
++
++ table = kmalloc(tsize, GFP_KERNEL);
++ if (table) {
++ *table = th;
++ if (th.td_flags == YYTD_DATA8)
++ UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
++ u8, byte_to_byte);
++ else if (th.td_flags == YYTD_DATA16)
++ UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
++ u16, be16_to_cpu);
++ else
++ UNPACK_ARRAY(table->td_data, blob, th.td_lolen,
++ u32, be32_to_cpu);
++ }
++
++out:
++ return table;
++}
++
++int unpack_dfa(struct aa_dfa *dfa, void *blob, size_t size)
++{
++ int hsize, i;
++ int error = -ENOMEM;
++
++ /* get dfa table set header */
++ if (size < sizeof(struct table_set_header))
++ goto fail;
++
++ if (ntohl(*(u32 *)blob) != YYTH_MAGIC)
++ goto fail;
++
++ hsize = ntohl(*(u32 *)(blob + 4));
++ if (size < hsize)
++ goto fail;
++
++ blob += hsize;
++ size -= hsize;
++
++ error = -EPROTO;
++ while (size > 0) {
++ struct table_header *table;
++ table = unpack_table(blob, size);
++ if (!table)
++ goto fail;
++
++ switch(table->td_id) {
++ case YYTD_ID_ACCEPT:
++ case YYTD_ID_ACCEPT2:
++ case YYTD_ID_BASE:
++ dfa->tables[table->td_id - 1] = table;
++ if (table->td_flags != YYTD_DATA32)
++ goto fail;
++ break;
++ case YYTD_ID_DEF:
++ case YYTD_ID_NXT:
++ case YYTD_ID_CHK:
++ dfa->tables[table->td_id - 1] = table;
++ if (table->td_flags != YYTD_DATA16)
++ goto fail;
++ break;
++ case YYTD_ID_EC:
++ dfa->tables[table->td_id - 1] = table;
++ if (table->td_flags != YYTD_DATA8)
++ goto fail;
++ break;
++ default:
++ kfree(table);
++ goto fail;
++ }
++
++ blob += table_size(table->td_lolen, table->td_flags);
++ size -= table_size(table->td_lolen, table->td_flags);
++ }
++
++ return 0;
++
++fail:
++ for (i = 0; i < ARRAY_SIZE(dfa->tables); i++) {
++ if (dfa->tables[i]) {
++ kfree(dfa->tables[i]);
++ dfa->tables[i] = NULL;
++ }
++ }
++ return error;
++}
++
++/**
++ * verify_dfa - verify that all the transitions and states in the dfa tables
++ * are in bounds.
++ * @dfa: dfa to test
++ *
++ * assumes dfa has gone through the verification done by unpacking
++ */
++int verify_dfa(struct aa_dfa *dfa)
++{
++ size_t i, state_count, trans_count;
++ int error = -EPROTO;
++
++ /* check that required tables exist */
++ if (!(dfa->tables[YYTD_ID_ACCEPT - 1] &&
++ dfa->tables[YYTD_ID_ACCEPT2 - 1] &&
++ dfa->tables[YYTD_ID_DEF - 1] &&
++ dfa->tables[YYTD_ID_BASE - 1] &&
++ dfa->tables[YYTD_ID_NXT - 1] &&
++ dfa->tables[YYTD_ID_CHK - 1]))
++ goto out;
++
++ /* accept.size == default.size == base.size */
++ state_count = dfa->tables[YYTD_ID_BASE - 1]->td_lolen;
++ if (!(state_count == dfa->tables[YYTD_ID_DEF - 1]->td_lolen &&
++ state_count == dfa->tables[YYTD_ID_ACCEPT - 1]->td_lolen &&
++ state_count == dfa->tables[YYTD_ID_ACCEPT2 - 1]->td_lolen))
++ goto out;
++
++ /* next.size == chk.size */
++ trans_count = dfa->tables[YYTD_ID_NXT - 1]->td_lolen;
++ if (trans_count != dfa->tables[YYTD_ID_CHK - 1]->td_lolen)
++ goto out;
++
++ /* if equivalence classes then its table size must be 256 */
++ if (dfa->tables[YYTD_ID_EC - 1] &&
++ dfa->tables[YYTD_ID_EC - 1]->td_lolen != 256)
++ goto out;
++
++ for (i = 0; i < state_count; i++) {
++ if (DEFAULT_TABLE(dfa)[i] >= state_count)
++ goto out;
++ if (BASE_TABLE(dfa)[i] >= trans_count + 256)
++ goto out;
++ }
++
++ for (i = 0; i < trans_count ; i++) {
++ if (NEXT_TABLE(dfa)[i] >= state_count)
++ goto out;
++ if (CHECK_TABLE(dfa)[i] >= state_count)
++ goto out;
++ }
++
++ /* verify accept permissions */
++ for (i = 0; i < state_count; i++) {
++ int mode = ACCEPT_TABLE(dfa)[i];
++
++ if (mode & ~AA_VALID_PERM_MASK)
++ goto out;
++ if (ACCEPT_TABLE2(dfa)[i] & ~AA_VALID_PERM2_MASK)
++ goto out;
++
++ /* if any exec modifier is set MAY_EXEC must be set */
++ if ((mode & AA_USER_EXEC_TYPE) && !(mode & AA_USER_EXEC))
++ goto out;
++ if ((mode & AA_OTHER_EXEC_TYPE) && !(mode & AA_OTHER_EXEC))
++ goto out;
++ }
++
++ error = 0;
++out:
++ return error;
++}
++
++struct aa_dfa *aa_match_alloc(void)
++{
++ return kzalloc(sizeof(struct aa_dfa), GFP_KERNEL);
++}
++
++void aa_match_free(struct aa_dfa *dfa)
++{
++ if (dfa) {
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(dfa->tables); i++)
++ kfree(dfa->tables[i]);
++ }
++ kfree(dfa);
++}
++
++/**
++ * aa_dfa_next_state_len - traverse @dfa to find state @str stops at
++ * @dfa: the dfa to match @str against
++ * @start: the state of the dfa to start matching in
++ * @str: the string of bytes to match against the dfa
++ * @len: length of the string of bytes to match
++ *
++ * aa_dfa_next_state will match @str against the dfa and return the state it
++ * finished matching in. The final state can be used to look up the accepting
++ * label, or as the start state of a continuing match.
++ *
++ * aa_dfa_next_state could be implement using this function by doing
++ * return aa_dfa_next_state_len(dfa, start, str, strlen(str));
++ * but that would require traversing the string twice and be slightly
++ * slower.
++ */
++unsigned int aa_dfa_next_state_len(struct aa_dfa *dfa, unsigned int start,
++ const char *str, int len)
++{
++ u16 *def = DEFAULT_TABLE(dfa);
++ u32 *base = BASE_TABLE(dfa);
++ u16 *next = NEXT_TABLE(dfa);
++ u16 *check = CHECK_TABLE(dfa);
++ unsigned int state = start, pos;
++
++ if (state == 0)
++ return 0;
++
++ /* current state is <state>, matching character *str */
++ if (dfa->tables[YYTD_ID_EC - 1]) {
++ u8 *equiv = EQUIV_TABLE(dfa);
++ for (; len; len--) {
++ pos = base[state] + equiv[(u8)*str++];
++ if (check[pos] == state)
++ state = next[pos];
++ else
++ state = def[state];
++ }
++ } else {
++ for (; len; len--) {
++ pos = base[state] + (u8)*str++;
++ if (check[pos] == state)
++ state = next[pos];
++ else
++ state = def[state];
++ }
++ }
++ return state;
++}
++
++/**
++ * aa_dfa_next_state - traverse @dfa to find state @str stops at
++ * @dfa: the dfa to match @str against
++ * @start: the state of the dfa to start matching in
++ * @str: the null terminated string of bytes to match against the dfa
++ *
++ * aa_dfa_next_state will match @str against the dfa and return the state it
++ * finished matching in. The final state can be used to look up the accepting
++ * label, or as the start state of a continuing match.
++ */
++unsigned int aa_dfa_next_state(struct aa_dfa *dfa, unsigned int start,
++ const char *str)
++{
++ u16 *def = DEFAULT_TABLE(dfa);
++ u32 *base = BASE_TABLE(dfa);
++ u16 *next = NEXT_TABLE(dfa);
++ u16 *check = CHECK_TABLE(dfa);
++ unsigned int state = start, pos;
++
++ if (state == 0)
++ return 0;
++
++ /* current state is <state>, matching character *str */
++ if (dfa->tables[YYTD_ID_EC - 1]) {
++ u8 *equiv = EQUIV_TABLE(dfa);
++ while (*str) {
++ pos = base[state] + equiv[(u8)*str++];
++ if (check[pos] == state)
++ state = next[pos];
++ else
++ state = def[state];
++ }
++ } else {
++ while (*str) {
++ pos = base[state] + (u8)*str++;
++ if (check[pos] == state)
++ state = next[pos];
++ else
++ state = def[state];
++ }
++ }
++ return state;
++}
++
++/**
++ * aa_dfa_null_transition - step to next state after null character
++ * @dfa: the dfa to match against
++ * @start: the state of the dfa to start matching in
++ *
++ * aa_dfa_null_transition transitions to the next state after a null
++ * character which is not used in standard matching and is only
++ * used to seperate pairs.
++ */
++unsigned int aa_dfa_null_transition(struct aa_dfa *dfa, unsigned int start)
++{
++ return aa_dfa_next_state_len(dfa, start, "", 1);
++}
++
++/**
++ * aa_dfa_match - find accept perm for @str in @dfa
++ * @dfa: the dfa to match @str against
++ * @str: the string to match against the dfa
++ * @audit_mask: the audit_mask for the final state
++ *
++ * aa_dfa_match will match @str and return the accept perms for the
++ * final state.
++ */
++unsigned int aa_dfa_match(struct aa_dfa *dfa, const char *str, int *audit_mask)
++{
++ int state = aa_dfa_next_state(dfa, DFA_START, str);
++ if (audit_mask)
++ *audit_mask = dfa_audit_mask(dfa, state);
++ return ACCEPT_TABLE(dfa)[state];
++}
++
++/**
++ * aa_match_state - find accept perm and state for @str in @dfa
++ * @dfa: the dfa to match @str against
++ * @start: the state to start the match from
++ * @str: the string to match against the dfa
++ * @final: the state that the match finished in
++ *
++ * aa_match_state will match @str and return the accept perms, and @final
++ * state, the match occured in.
++ */
++unsigned int aa_match_state(struct aa_dfa *dfa, unsigned int start,
++ const char *str, unsigned int *final)
++{
++ unsigned int state;
++ if (dfa) {
++ state = aa_dfa_next_state(dfa, start, str);
++ if (final)
++ *final = state;
++ return ACCEPT_TABLE(dfa)[state];
++ }
++ if (final)
++ *final = 0;
++ return 0;
++}
++
+--- /dev/null
++++ b/security/apparmor/match.h
+@@ -0,0 +1,87 @@
++/*
++ * Copyright (C) 2007 Novell/SUSE
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ *
++ * AppArmor submodule (match) prototypes
++ */
++
++#ifndef __MATCH_H
++#define __MATCH_H
++
++#define DFA_START 1
++
++/**
++ * The format used for transition tables is based on the GNU flex table
++ * file format (--tables-file option; see Table File Format in the flex
++ * info pages and the flex sources for documentation). The magic number
++ * used in the header is 0x1B5E783D insted of 0xF13C57B1 though, because
++ * the YY_ID_CHK (check) and YY_ID_DEF (default) tables are used
++ * slightly differently (see the apparmor-parser package).
++ */
++
++#define YYTH_MAGIC 0x1B5E783D
++
++struct table_set_header {
++ u32 th_magic; /* YYTH_MAGIC */
++ u32 th_hsize;
++ u32 th_ssize;
++ u16 th_flags;
++ char th_version[];
++};
++
++#define YYTD_ID_ACCEPT 1
++#define YYTD_ID_BASE 2
++#define YYTD_ID_CHK 3
++#define YYTD_ID_DEF 4
++#define YYTD_ID_EC 5
++#define YYTD_ID_META 6
++#define YYTD_ID_ACCEPT2 7
++#define YYTD_ID_NXT 8
++
++
++#define YYTD_DATA8 1
++#define YYTD_DATA16 2
++#define YYTD_DATA32 4
++
++struct table_header {
++ u16 td_id;
++ u16 td_flags;
++ u32 td_hilen;
++ u32 td_lolen;
++ char td_data[];
++};
++
++#define DEFAULT_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_DEF - 1]->td_data))
++#define BASE_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_BASE - 1]->td_data))
++#define NEXT_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_NXT - 1]->td_data))
++#define CHECK_TABLE(DFA) ((u16 *)((DFA)->tables[YYTD_ID_CHK - 1]->td_data))
++#define EQUIV_TABLE(DFA) ((u8 *)((DFA)->tables[YYTD_ID_EC - 1]->td_data))
++#define ACCEPT_TABLE(DFA) ((u32 *)((DFA)->tables[YYTD_ID_ACCEPT - 1]->td_data))
++#define ACCEPT_TABLE2(DFA) ((u32 *)((DFA)->tables[YYTD_ID_ACCEPT2 -1]->td_data))
++
++struct aa_dfa {
++ struct table_header *tables[YYTD_ID_NXT];
++};
++
++#define byte_to_byte(X) (X)
++
++#define UNPACK_ARRAY(TABLE, BLOB, LEN, TYPE, NTOHX) \
++ do { \
++ typeof(LEN) __i; \
++ TYPE *__t = (TYPE *) TABLE; \
++ TYPE *__b = (TYPE *) BLOB; \
++ for (__i = 0; __i < LEN; __i++) { \
++ __t[__i] = NTOHX(__b[__i]); \
++ } \
++ } while (0)
++
++static inline size_t table_size(size_t len, size_t el_size)
++{
++ return ALIGN(sizeof(struct table_header) + len * el_size, 8);
++}
++
++#endif /* __MATCH_H */
+--- /dev/null
++++ b/security/apparmor/module_interface.c
+@@ -0,0 +1,875 @@
++/*
++ * Copyright (C) 1998-2007 Novell/SUSE
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ *
++ * AppArmor userspace policy interface
++ */
++
++#include <asm/unaligned.h>
++
++#include "apparmor.h"
++#include "inline.h"
++
++/*
++ * This mutex is used to synchronize profile adds, replacements, and
++ * removals: we only allow one of these operations at a time.
++ * We do not use the profile list lock here in order to avoid blocking
++ * exec during those operations. (Exec involves a profile list lookup
++ * for named-profile transitions.)
++ */
++DEFINE_MUTEX(aa_interface_lock);
++
++/*
++ * The AppArmor interface treats data as a type byte followed by the
++ * actual data. The interface has the notion of a a named entry
++ * which has a name (AA_NAME typecode followed by name string) followed by
++ * the entries typecode and data. Named types allow for optional
++ * elements and extensions to be added and tested for without breaking
++ * backwards compatability.
++ */
++
++enum aa_code {
++ AA_U8,
++ AA_U16,
++ AA_U32,
++ AA_U64,
++ AA_NAME, /* same as string except it is items name */
++ AA_STRING,
++ AA_BLOB,
++ AA_STRUCT,
++ AA_STRUCTEND,
++ AA_LIST,
++ AA_LISTEND,
++ AA_ARRAY,
++ AA_ARRAYEND,
++};
++
++/*
++ * aa_ext is the read of the buffer containing the serialized profile. The
++ * data is copied into a kernel buffer in apparmorfs and then handed off to
++ * the unpack routines.
++ */
++struct aa_ext {
++ void *start;
++ void *end;
++ void *pos; /* pointer to current position in the buffer */
++ u32 version;
++ char *ns_name;
++};
++
++static inline int aa_inbounds(struct aa_ext *e, size_t size)
++{
++ return (size <= e->end - e->pos);
++}
++
++/**
++ * aa_u16_chunck - test and do bounds checking for a u16 size based chunk
++ * @e: serialized data read head
++ * @chunk: start address for chunk of data
++ *
++ * return the size of chunk found with the read head at the end of
++ * the chunk.
++ */
++static size_t aa_is_u16_chunk(struct aa_ext *e, char **chunk)
++{
++ void *pos = e->pos;
++ size_t size = 0;
++
++ if (!aa_inbounds(e, sizeof(u16)))
++ goto fail;
++ size = le16_to_cpu(get_unaligned((u16 *)e->pos));
++ e->pos += sizeof(u16);
++ if (!aa_inbounds(e, size))
++ goto fail;
++ *chunk = e->pos;
++ e->pos += size;
++ return size;
++
++fail:
++ e->pos = pos;
++ return 0;
++}
++
++static inline int aa_is_X(struct aa_ext *e, enum aa_code code)
++{
++ if (!aa_inbounds(e, 1))
++ return 0;
++ if (*(u8 *) e->pos != code)
++ return 0;
++ e->pos++;
++ return 1;
++}
++
++/**
++ * aa_is_nameX - check is the next element is of type X with a name of @name
++ * @e: serialized data extent information
++ * @code: type code
++ * @name: name to match to the serialized element.
++ *
++ * check that the next serialized data element is of type X and has a tag
++ * name @name. If @name is specified then there must be a matching
++ * name element in the stream. If @name is NULL any name element will be
++ * skipped and only the typecode will be tested.
++ * returns 1 on success (both type code and name tests match) and the read
++ * head is advanced past the headers
++ * returns %0 if either match failes, the read head does not move
++ */
++static int aa_is_nameX(struct aa_ext *e, enum aa_code code, const char *name)
++{
++ void *pos = e->pos;
++ /*
++ * Check for presence of a tagname, and if present name size
++ * AA_NAME tag value is a u16.
++ */
++ if (aa_is_X(e, AA_NAME)) {
++ char *tag;
++ size_t size = aa_is_u16_chunk(e, &tag);
++ /* if a name is specified it must match. otherwise skip tag */
++ if (name && (!size || strcmp(name, tag)))
++ goto fail;
++ } else if (name) {
++ /* if a name is specified and there is no name tag fail */
++ goto fail;
++ }
++
++ /* now check if type code matches */
++ if (aa_is_X(e, code))
++ return 1;
++
++fail:
++ e->pos = pos;
++ return 0;
++}
++
++static int aa_is_u16(struct aa_ext *e, u16 *data, const char *name)
++{
++ void *pos = e->pos;
++ if (aa_is_nameX(e, AA_U16, name)) {
++ if (!aa_inbounds(e, sizeof(u16)))
++ goto fail;
++ if (data)
++ *data = le16_to_cpu(get_unaligned((u16 *)e->pos));
++ e->pos += sizeof(u16);
++ return 1;
++ }
++fail:
++ e->pos = pos;
++ return 0;
++}
++
++static int aa_is_u32(struct aa_ext *e, u32 *data, const char *name)
++{
++ void *pos = e->pos;
++ if (aa_is_nameX(e, AA_U32, name)) {
++ if (!aa_inbounds(e, sizeof(u32)))
++ goto fail;
++ if (data)
++ *data = le32_to_cpu(get_unaligned((u32 *)e->pos));
++ e->pos += sizeof(u32);
++ return 1;
++ }
++fail:
++ e->pos = pos;
++ return 0;
++}
++
++static size_t aa_is_array(struct aa_ext *e, const char *name)
++{
++ void *pos = e->pos;
++ if (aa_is_nameX(e, AA_ARRAY, name)) {
++ int size;
++ if (!aa_inbounds(e, sizeof(u16)))
++ goto fail;
++ size = (int) le16_to_cpu(get_unaligned((u16 *)e->pos));
++ e->pos += sizeof(u16);
++ return size;
++ }
++fail:
++ e->pos = pos;
++ return 0;
++}
++
++static size_t aa_is_blob(struct aa_ext *e, char **blob, const char *name)
++{
++ void *pos = e->pos;
++ if (aa_is_nameX(e, AA_BLOB, name)) {
++ u32 size;
++ if (!aa_inbounds(e, sizeof(u32)))
++ goto fail;
++ size = le32_to_cpu(get_unaligned((u32 *)e->pos));
++ e->pos += sizeof(u32);
++ if (aa_inbounds(e, (size_t) size)) {
++ * blob = e->pos;
++ e->pos += size;
++ return size;
++ }
++ }
++fail:
++ e->pos = pos;
++ return 0;
++}
++
++static int aa_is_dynstring(struct aa_ext *e, char **string, const char *name)
++{
++ char *src_str;
++ size_t size = 0;
++ void *pos = e->pos;
++ *string = NULL;
++ if (aa_is_nameX(e, AA_STRING, name) &&
++ (size = aa_is_u16_chunk(e, &src_str))) {
++ char *str;
++ if (!(str = kmalloc(size, GFP_KERNEL)))
++ goto fail;
++ memcpy(str, src_str, size);
++ *string = str;
++ }
++
++ return size;
++
++fail:
++ e->pos = pos;
++ return 0;
++}
++
++/**
++ * aa_unpack_dfa - unpack a file rule dfa
++ * @e: serialized data extent information
++ *
++ * returns dfa or ERR_PTR
++ */
++static struct aa_dfa *aa_unpack_dfa(struct aa_ext *e)
++{
++ char *blob = NULL;
++ size_t size, error = 0;
++ struct aa_dfa *dfa = NULL;
++
++ size = aa_is_blob(e, &blob, "aadfa");
++ if (size) {
++ dfa = aa_match_alloc();
++ if (dfa) {
++ /*
++ * The dfa is aligned with in the blob to 8 bytes
++ * from the beginning of the stream.
++ */
++ size_t sz = blob - (char *) e->start;
++ size_t pad = ALIGN(sz, 8) - sz;
++ error = unpack_dfa(dfa, blob + pad, size - pad);
++ if (!error)
++ error = verify_dfa(dfa);
++ } else {
++ error = -ENOMEM;
++ }
++
++ if (error) {
++ aa_match_free(dfa);
++ dfa = ERR_PTR(error);
++ }
++ }
++
++ return dfa;
++}
++
++static int aa_unpack_exec_table(struct aa_ext *e, struct aa_profile *profile)
++{
++ void *pos = e->pos;
++
++ /* exec table is optional */
++ if (aa_is_nameX(e, AA_STRUCT, "xtable")) {
++ int i, size;
++
++ size = aa_is_array(e, NULL);
++ /* currently 4 exec bits and entries 0-3 are reserved iupcx */
++ if (size > 16 - 4)
++ goto fail;
++ profile->exec_table = kzalloc(sizeof(char *) * size,
++ GFP_KERNEL);
++ if (!profile->exec_table)
++ goto fail;
++
++ for (i = 0; i < size; i++) {
++ char *tmp;
++ if (!aa_is_dynstring(e, &tmp, NULL))
++ goto fail;
++ /* note: strings beginning with a : have an embedded
++ \0 seperating the profile ns name from the profile
++ name */
++ profile->exec_table[i] = tmp;
++ }
++ if (!aa_is_nameX(e, AA_ARRAYEND, NULL))
++ goto fail;
++ if (!aa_is_nameX(e, AA_STRUCTEND, NULL))
++ goto fail;
++ profile->exec_table_size = size;
++ }
++ return 1;
++
++fail:
++ e->pos = pos;
++ return 0;
++}
++
++/**
++ * aa_unpack_profile - unpack a serialized profile
++ * @e: serialized data extent information
++ * @sa: audit struct for the operation
++ */
++static struct aa_profile *aa_unpack_profile(struct aa_ext *e,
++ struct aa_audit *sa)
++{
++ struct aa_profile *profile = NULL;
++
++ int error = -EPROTO;
++
++ profile = alloc_aa_profile();
++ if (!profile)
++ return ERR_PTR(-ENOMEM);
++
++ /* check that we have the right struct being passed */
++ if (!aa_is_nameX(e, AA_STRUCT, "profile"))
++ goto fail;
++ if (!aa_is_dynstring(e, &profile->name, NULL))
++ goto fail;
++
++ /* per profile debug flags (complain, audit) */
++ if (!aa_is_nameX(e, AA_STRUCT, "flags"))
++ goto fail;
++ if (!aa_is_u32(e, &(profile->flags.hat), NULL))
++ goto fail;
++ if (!aa_is_u32(e, &(profile->flags.complain), NULL))
++ goto fail;
++ if (!aa_is_u32(e, &(profile->flags.audit), NULL))
++ goto fail;
++ if (!aa_is_nameX(e, AA_STRUCTEND, NULL))
++ goto fail;
++
++ if (!aa_is_u32(e, &(profile->capabilities), NULL))
++ goto fail;
++ if (!aa_is_u32(e, &(profile->audit_caps), NULL))
++ goto fail;
++ if (!aa_is_u32(e, &(profile->quiet_caps), NULL))
++ goto fail;
++ if (!aa_is_u32(e, &(profile->set_caps), NULL))
++ goto fail;
++
++ /* get file rules */
++ profile->file_rules = aa_unpack_dfa(e);
++ if (IS_ERR(profile->file_rules)) {
++ error = PTR_ERR(profile->file_rules);
++ profile->file_rules = NULL;
++ goto fail;
++ }
++
++ if (!aa_unpack_exec_table(e, profile))
++ goto fail;
++
++ if (!aa_is_nameX(e, AA_STRUCTEND, NULL))
++ goto fail;
++
++ return profile;
++
++fail:
++ sa->name = profile && profile->name ? profile->name : "unknown";
++ if (!sa->info)
++ sa->info = "failed to unpack profile";
++ aa_audit_status(NULL, sa);
++
++ if (profile)
++ free_aa_profile(profile);
++
++ return ERR_PTR(error);
++}
++
++/**
++ * aa_verify_head - unpack serialized stream header
++ * @e: serialized data read head
++ * @operation: operation header is being verified for
++ *
++ * returns error or 0 if header is good
++ */
++static int aa_verify_header(struct aa_ext *e, struct aa_audit *sa)
++{
++ /* get the interface version */
++ if (!aa_is_u32(e, &e->version, "version")) {
++ sa->info = "invalid profile format";
++ aa_audit_status(NULL, sa);
++ return -EPROTONOSUPPORT;
++ }
++
++ /* check that the interface version is currently supported */
++ if (e->version != 5) {
++ sa->info = "unsupported interface version";
++ aa_audit_status(NULL, sa);
++ return -EPROTONOSUPPORT;
++ }
++
++ /* read the namespace if present */
++ if (!aa_is_dynstring(e, &e->ns_name, "namespace")) {
++ e->ns_name = NULL;
++ }
++
++ return 0;
++}
++
++/**
++ * aa_add_profile - Unpack and add a new profile to the profile list
++ * @data: serialized data stream
++ * @size: size of the serialized data stream
++ */
++ssize_t aa_add_profile(void *data, size_t size)
++{
++ struct aa_profile *profile = NULL;
++ struct aa_namespace *ns = NULL;
++ struct aa_ext e = {
++ .start = data,
++ .end = data + size,
++ .pos = data,
++ .ns_name = NULL
++ };
++ ssize_t error;
++ struct aa_audit sa;
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = "profile_load";
++ sa.gfp_mask = GFP_KERNEL;
++
++ error = aa_verify_header(&e, &sa);
++ if (error)
++ return error;
++
++ profile = aa_unpack_profile(&e, &sa);
++ if (IS_ERR(profile))
++ return PTR_ERR(profile);
++
++ mutex_lock(&aa_interface_lock);
++ write_lock(&profile_ns_list_lock);
++ if (e.ns_name)
++ ns = __aa_find_namespace(e.ns_name, &profile_ns_list);
++ else
++ ns = default_namespace;
++ if (!ns) {
++ struct aa_namespace *new_ns;
++ write_unlock(&profile_ns_list_lock);
++ new_ns = alloc_aa_namespace(e.ns_name);
++ if (!new_ns) {
++ mutex_unlock(&aa_interface_lock);
++ return -ENOMEM;
++ }
++ write_lock(&profile_ns_list_lock);
++ ns = __aa_find_namespace(e.ns_name, &profile_ns_list);
++ if (!ns) {
++ list_add(&new_ns->list, &profile_ns_list);
++ ns = new_ns;
++ } else
++ free_aa_namespace(new_ns);
++ }
++
++ write_lock(&ns->lock);
++ if (__aa_find_profile(profile->name, &ns->profiles)) {
++ /* A profile with this name exists already. */
++ write_unlock(&ns->lock);
++ write_unlock(&profile_ns_list_lock);
++ sa.name = profile->name;
++ sa.name2 = ns->name;
++ sa.info = "failed: profile already loaded";
++ aa_audit_status(NULL, &sa);
++ mutex_unlock(&aa_interface_lock);
++ aa_put_profile(profile);
++ return -EEXIST;
++ }
++ profile->ns = aa_get_namespace(ns);
++ ns->profile_count++;
++ list_add(&profile->list, &ns->profiles);
++ write_unlock(&ns->lock);
++ write_unlock(&profile_ns_list_lock);
++
++ sa.name = profile->name;
++ sa.name2 = ns->name;
++ aa_audit_status(NULL, &sa);
++ mutex_unlock(&aa_interface_lock);
++ return size;
++}
++
++/**
++ * task_replace - replace a task's profile
++ * @task: task to replace profile on
++ * @new_cxt: new aa_task_context to do replacement with
++ * @new_profile: new profile
++ */
++static inline void task_replace(struct task_struct *task,
++ struct aa_task_context *new_cxt,
++ struct aa_profile *new_profile)
++{
++ struct aa_task_context *cxt = aa_task_context(task);
++
++ AA_DEBUG("%s: replacing profile for task %d "
++ "profile=%s (%p)\n",
++ __FUNCTION__,
++ cxt->task->pid,
++ cxt->profile->name, cxt->profile);
++
++ aa_change_task_context(task, new_cxt, new_profile, cxt->cookie,
++ cxt->previous_profile);
++}
++
++/**
++ * aa_replace_profile - replace a profile on the profile list
++ * @udata: serialized data stream
++ * @size: size of the serialized data stream
++ *
++ * unpack and replace a profile on the profile list and uses of that profile
++ * by any aa_task_context. If the profile does not exist on the profile list
++ * it is added. Return %0 or error.
++ */
++ssize_t aa_replace_profile(void *udata, size_t size)
++{
++ struct aa_profile *old_profile, *new_profile;
++ struct aa_namespace *ns;
++ struct aa_task_context *new_cxt;
++ struct aa_ext e = {
++ .start = udata,
++ .end = udata + size,
++ .pos = udata,
++ .ns_name = NULL
++ };
++ ssize_t error;
++ struct aa_audit sa;
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = "profile_replace";
++ sa.gfp_mask = GFP_KERNEL;
++
++ error = aa_verify_header(&e, &sa);
++ if (error)
++ return error;
++
++ new_profile = aa_unpack_profile(&e, &sa);
++ if (IS_ERR(new_profile))
++ return PTR_ERR(new_profile);
++
++ mutex_lock(&aa_interface_lock);
++ write_lock(&profile_ns_list_lock);
++ if (e.ns_name)
++ ns = __aa_find_namespace(e.ns_name, &profile_ns_list);
++ else
++ ns = default_namespace;
++ if (!ns) {
++ struct aa_namespace *new_ns;
++ write_unlock(&profile_ns_list_lock);
++ new_ns = alloc_aa_namespace(e.ns_name);
++ if (!new_ns) {
++ mutex_unlock(&aa_interface_lock);
++ return -ENOMEM;
++ }
++ write_lock(&profile_ns_list_lock);
++ ns = __aa_find_namespace(e.ns_name, &profile_ns_list);
++ if (!ns) {
++ list_add(&new_ns->list, &profile_ns_list);
++ ns = new_ns;
++ } else
++ free_aa_namespace(new_ns);
++ }
++
++ write_lock(&ns->lock);
++ old_profile = __aa_find_profile(new_profile->name, &ns->profiles);
++ if (old_profile) {
++ lock_profile(old_profile);
++ old_profile->isstale = 1;
++ list_del_init(&old_profile->list);
++ unlock_profile(old_profile);
++ ns->profile_count--;
++ }
++ new_profile->ns = aa_get_namespace(ns);
++ ns->profile_count++;
++ /* not don't need an extra ref count to keep new_profile as
++ * it is protect by the interface mutex */
++ list_add(&new_profile->list, &ns->profiles);
++ write_unlock(&ns->lock);
++ write_unlock(&profile_ns_list_lock);
++
++ if (!old_profile) {
++ sa.operation = "profile_load";
++ goto out;
++ }
++ /*
++ * Replacement needs to allocate a new aa_task_context for each
++ * task confined by old_profile. To do this the profile locks
++ * are only held when the actual switch is done per task. While
++ * looping to allocate a new aa_task_context the old_task list
++ * may get shorter if tasks exit/change their profile but will
++ * not get longer as new task will not use old_profile detecting
++ * that is stale.
++ */
++ do {
++ new_cxt = aa_alloc_task_context(GFP_KERNEL | __GFP_NOFAIL);
++
++ lock_both_profiles(old_profile, new_profile);
++ if (!list_empty(&old_profile->task_contexts)) {
++ struct task_struct *task =
++ list_entry(old_profile->task_contexts.next,
++ struct aa_task_context, list)->task;
++ task_lock(task);
++ task_replace(task, new_cxt, new_profile);
++ task_unlock(task);
++ new_cxt = NULL;
++ }
++ unlock_both_profiles(old_profile, new_profile);
++ } while (!new_cxt);
++ aa_free_task_context(new_cxt);
++ aa_put_profile(old_profile);
++
++out:
++ sa.name = new_profile->name;
++ sa.name2 = ns->name;
++ aa_audit_status(NULL, &sa);
++ mutex_unlock(&aa_interface_lock);
++ return size;
++}
++
++/**
++ * aa_remove_profile - remove a profile from the system
++ * @name: name of the profile to remove
++ * @size: size of the name
++ *
++ * remove a profile from the profile list and all aa_task_context references
++ * to said profile.
++ */
++ssize_t aa_remove_profile(char *name, size_t size)
++{
++ struct aa_namespace *ns;
++ struct aa_profile *profile;
++ struct aa_audit sa;
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = "profile_remove";
++ sa.gfp_mask = GFP_KERNEL;
++
++ mutex_lock(&aa_interface_lock);
++ write_lock(&profile_ns_list_lock);
++
++ if (name[0] == ':') {
++ char *split = strchr(name + 1, ':');
++ if (!split)
++ goto noent;
++ *split = 0;
++ ns = __aa_find_namespace(name + 1, &profile_ns_list);
++ name = split + 1;
++ } else {
++ ns = default_namespace;
++ }
++
++ if (!ns)
++ goto noent;
++ sa.name2 = ns->name;
++ write_lock(&ns->lock);
++ profile = __aa_find_profile(name, &ns->profiles);
++ if (!profile) {
++ write_unlock(&ns->lock);
++ goto noent;
++ }
++ sa.name = profile->name;
++
++ /* Remove the profile from each task context it is on. */
++ lock_profile(profile);
++ profile->isstale = 1;
++ aa_unconfine_tasks(profile);
++ list_del_init(&profile->list);
++ ns->profile_count--;
++ unlock_profile(profile);
++ /* Release the profile itself. */
++ write_unlock(&ns->lock);
++ /* check to see if the namespace has become stale */
++ if (ns != default_namespace && ns->profile_count == 0) {
++ list_del_init(&ns->list);
++ aa_put_namespace(ns);
++ }
++ write_unlock(&profile_ns_list_lock);
++
++ aa_audit_status(NULL, &sa);
++ mutex_unlock(&aa_interface_lock);
++ aa_put_profile(profile);
++
++ return size;
++
++noent:
++ write_unlock(&profile_ns_list_lock);
++ sa.info = "failed: profile does not exist";
++ aa_audit_status(NULL, &sa);
++ mutex_unlock(&aa_interface_lock);
++ return -ENOENT;
++}
++
++/**
++ * free_aa_namespace_kref - free aa_namespace by kref (see aa_put_namespace)
++ * @kr: kref callback for freeing of a namespace
++ */
++void free_aa_namespace_kref(struct kref *kref)
++{
++ struct aa_namespace *ns=container_of(kref, struct aa_namespace, count);
++
++ free_aa_namespace(ns);
++}
++
++/**
++ * alloc_aa_namespace - allocate, initialize and return a new namespace
++ * @name: a preallocated name
++ * Returns NULL on failure.
++ */
++struct aa_namespace *alloc_aa_namespace(char *name)
++{
++ struct aa_namespace *ns;
++
++ ns = kzalloc(sizeof(*ns), GFP_KERNEL);
++ AA_DEBUG("%s(%p)\n", __FUNCTION__, ns);
++ if (ns) {
++ ns->name = name;
++ INIT_LIST_HEAD(&ns->list);
++ INIT_LIST_HEAD(&ns->profiles);
++ kref_init(&ns->count);
++ rwlock_init(&ns->lock);
++
++ ns->null_complain_profile = alloc_aa_profile();
++ if (!ns->null_complain_profile) {
++ if (!name)
++ kfree(ns->name);
++ kfree(ns);
++ return NULL;
++ }
++ ns->null_complain_profile->name =
++ kstrdup("null-complain-profile", GFP_KERNEL);
++ if (!ns->null_complain_profile->name) {
++ free_aa_profile(ns->null_complain_profile);
++ if (!name)
++ kfree(ns->name);
++ kfree(ns);
++ return NULL;
++ }
++ ns->null_complain_profile->flags.complain = 1;
++ /* null_complain_profile doesn't contribute to ns ref count */
++ ns->null_complain_profile->ns = ns;
++ }
++ return ns;
++}
++
++/**
++ * free_aa_namespace - free a profile namespace
++ * @namespace: the namespace to free
++ *
++ * Free a namespace. All references to the namespace must have been put.
++ * If the namespace was referenced by a profile confining a task,
++ * free_aa_namespace will be called indirectly (through free_aa_profile)
++ * from an rcu callback routine, so we must not sleep here.
++ */
++void free_aa_namespace(struct aa_namespace *ns)
++{
++ AA_DEBUG("%s(%p)\n", __FUNCTION__, ns);
++
++ if (!ns)
++ return;
++
++ /* namespace still contains profiles -- invalid */
++ if (!list_empty(&ns->profiles)) {
++ AA_ERROR("%s: internal error, "
++ "namespace '%s' still contains profiles\n",
++ __FUNCTION__,
++ ns->name);
++ BUG();
++ }
++ if (!list_empty(&ns->list)) {
++ AA_ERROR("%s: internal error, "
++ "namespace '%s' still on list\n",
++ __FUNCTION__,
++ ns->name);
++ BUG();
++ }
++ /* null_complain_profile doesn't contribute to ns ref counting */
++ ns->null_complain_profile->ns = NULL;
++ aa_put_profile(ns->null_complain_profile);
++ kfree(ns->name);
++ kfree(ns);
++}
++
++/**
++ * free_aa_profile_kref - free aa_profile by kref (called by aa_put_profile)
++ * @kr: kref callback for freeing of a profile
++ */
++void free_aa_profile_kref(struct kref *kref)
++{
++ struct aa_profile *p=container_of(kref, struct aa_profile, count);
++
++ free_aa_profile(p);
++}
++
++/**
++ * alloc_aa_profile - allocate, initialize and return a new profile
++ * Returns NULL on failure.
++ */
++struct aa_profile *alloc_aa_profile(void)
++{
++ struct aa_profile *profile;
++
++ profile = kzalloc(sizeof(*profile), GFP_KERNEL);
++ AA_DEBUG("%s(%p)\n", __FUNCTION__, profile);
++ if (profile) {
++ INIT_LIST_HEAD(&profile->list);
++ kref_init(&profile->count);
++ INIT_LIST_HEAD(&profile->task_contexts);
++ spin_lock_init(&profile->lock);
++ }
++ return profile;
++}
++
++/**
++ * free_aa_profile - free a profile
++ * @profile: the profile to free
++ *
++ * Free a profile, its hats and null_profile. All references to the profile,
++ * its hats and null_profile must have been put.
++ *
++ * If the profile was referenced from a task context, free_aa_profile() will
++ * be called from an rcu callback routine, so we must not sleep here.
++ */
++void free_aa_profile(struct aa_profile *profile)
++{
++ AA_DEBUG("%s(%p)\n", __FUNCTION__, profile);
++
++ if (!profile)
++ return;
++
++ /* profile is still on profile namespace list -- invalid */
++ if (!list_empty(&profile->list)) {
++ AA_ERROR("%s: internal error, "
++ "profile '%s' still on global list\n",
++ __FUNCTION__,
++ profile->name);
++ BUG();
++ }
++ aa_put_namespace(profile->ns);
++
++ aa_match_free(profile->file_rules);
++
++ if (profile->name) {
++ AA_DEBUG("%s: %s\n", __FUNCTION__, profile->name);
++ kfree(profile->name);
++ }
++
++ kfree(profile);
++}
++
++/**
++ * aa_unconfine_tasks - remove tasks on a profile's task context list
++ * @profile: profile to remove tasks from
++ *
++ * Assumes that @profile lock is held.
++ */
++void aa_unconfine_tasks(struct aa_profile *profile)
++{
++ while (!list_empty(&profile->task_contexts)) {
++ struct task_struct *task =
++ list_entry(profile->task_contexts.next,
++ struct aa_task_context, list)->task;
++ task_lock(task);
++ aa_change_task_context(task, NULL, NULL, 0, NULL);
++ task_unlock(task);
++ }
++}
--- /dev/null
+From: John Johansen <jjohansen@suse.de>
+Subject: AppArmor: Simplified network controls for AppArmor
+
+Simple network control determining which network families a confined
+application has access to.
+
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ security/apparmor/Makefile | 7 +
+ security/apparmor/apparmor.h | 9 ++
+ security/apparmor/lsm.c | 129 ++++++++++++++++++++++++++++++++++-
+ security/apparmor/main.c | 107 ++++++++++++++++++++++++++++-
+ security/apparmor/module_interface.c | 26 ++++++-
+ 5 files changed, 271 insertions(+), 7 deletions(-)
+
+--- a/security/apparmor/Makefile
++++ b/security/apparmor/Makefile
+@@ -8,6 +8,11 @@ apparmor-y := main.o list.o procattr.o l
+ quiet_cmd_make-caps = GEN $@
+ cmd_make-caps = sed -n -e "/CAP_FS_MASK/d" -e "s/^\#define[ \\t]\\+CAP_\\([A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9]\\+\\)\$$/[\\2] = \"\\1\",/p" $< | tr A-Z a-z > $@
+
+-$(obj)/main.o : $(obj)/capability_names.h
++quiet_cmd_make-af = GEN $@
++cmd_make-af = sed -n -e "/AF_MAX/d" -e "/AF_LOCAL/d" -e "s/^\#define[ \\t]\\+AF_\\([A-Z0-9_]\\+\\)[ \\t]\\+\\([0-9]\\+\\)\\(.*\\)\$$/[\\2] = \"\\1\",/p" $< | tr A-Z a-z > $@
++
++$(obj)/main.o : $(obj)/capability_names.h $(obj)/af_names.h
+ $(obj)/capability_names.h : $(srctree)/include/linux/capability.h
+ $(call cmd,make-caps)
++$(obj)/af_names.h : $(srctree)/include/linux/socket.h
++ $(call cmd,make-af)
+--- a/security/apparmor/apparmor.h
++++ b/security/apparmor/apparmor.h
+@@ -16,6 +16,8 @@
+ #include <linux/fs.h>
+ #include <linux/binfmts.h>
+ #include <linux/rcupdate.h>
++#include <linux/socket.h>
++#include <net/sock.h>
+
+ /*
+ * We use MAY_READ, MAY_WRITE, MAY_EXEC, MAY_APPEND and the following flags
+@@ -212,6 +214,9 @@ struct aa_profile {
+ struct list_head task_contexts;
+ spinlock_t lock;
+ unsigned long int_flags;
++ u16 network_families[AF_MAX];
++ u16 audit_network[AF_MAX];
++ u16 quiet_network[AF_MAX];
+ };
+
+ extern struct list_head profile_ns_list;
+@@ -258,6 +263,7 @@ struct aa_audit {
+ int request_mask, denied_mask, audit_mask;
+ struct iattr *iattr;
+ pid_t task, parent;
++ int family, type, protocol;
+ int error_code;
+ };
+
+@@ -319,6 +325,9 @@ extern void aa_change_task_context(struc
+ struct aa_profile *previous_profile);
+ extern int aa_may_ptrace(struct aa_task_context *cxt,
+ struct aa_profile *tracee);
++extern int aa_net_perm(struct aa_profile *profile, char *operation,
++ int family, int type, int protocol);
++extern int aa_revalidate_sk(struct sock *sk, char *operation);
+
+ /* lsm.c */
+ extern int apparmor_initialized;
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -18,6 +18,7 @@
+ #include <linux/ctype.h>
+ #include <linux/sysctl.h>
+ #include <linux/audit.h>
++#include <net/sock.h>
+
+ #include "apparmor.h"
+ #include "inline.h"
+@@ -680,6 +681,117 @@ static void apparmor_task_free_security(
+ aa_release(task);
+ }
+
++static int apparmor_socket_create(int family, int type, int protocol, int kern)
++{
++ struct aa_profile *profile;
++ int error = 0;
++
++ if (kern)
++ return 0;
++
++ profile = aa_get_profile(current);
++ if (profile)
++ error = aa_net_perm(profile, "socket_create", family,
++ type, protocol);
++ aa_put_profile(profile);
++
++ return error;
++}
++
++static int apparmor_socket_post_create(struct socket *sock, int family,
++ int type, int protocol, int kern)
++{
++ struct sock *sk = sock->sk;
++
++ if (kern)
++ return 0;
++
++ return aa_revalidate_sk(sk, "socket_post_create");
++}
++
++static int apparmor_socket_bind(struct socket *sock,
++ struct sockaddr *address, int addrlen)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(sk, "socket_bind");
++}
++
++static int apparmor_socket_connect(struct socket *sock,
++ struct sockaddr *address, int addrlen)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(sk, "socket_connect");
++}
++
++static int apparmor_socket_listen(struct socket *sock, int backlog)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(sk, "socket_listen");
++}
++
++static int apparmor_socket_accept(struct socket *sock, struct socket *newsock)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(sk, "socket_accept");
++}
++
++static int apparmor_socket_sendmsg(struct socket *sock,
++ struct msghdr *msg, int size)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(sk, "socket_sendmsg");
++}
++
++static int apparmor_socket_recvmsg(struct socket *sock,
++ struct msghdr *msg, int size, int flags)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(sk, "socket_recvmsg");
++}
++
++static int apparmor_socket_getsockname(struct socket *sock)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(sk, "socket_getsockname");
++}
++
++static int apparmor_socket_getpeername(struct socket *sock)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(sk, "socket_getpeername");
++}
++
++static int apparmor_socket_getsockopt(struct socket *sock, int level,
++ int optname)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(sk, "socket_getsockopt");
++}
++
++static int apparmor_socket_setsockopt(struct socket *sock, int level,
++ int optname)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(sk, "socket_setsockopt");
++}
++
++static int apparmor_socket_shutdown(struct socket *sock, int how)
++{
++ struct sock *sk = sock->sk;
++
++ return aa_revalidate_sk(sk, "socket_shutdown");
++}
++
+ static int apparmor_getprocattr(struct task_struct *task, char *name,
+ char **value)
+ {
+@@ -780,9 +892,6 @@ struct security_operations apparmor_ops
+ .capable = apparmor_capable,
+ .syslog = cap_syslog,
+
+- .netlink_send = cap_netlink_send,
+- .netlink_recv = cap_netlink_recv,
+-
+ .bprm_apply_creds = cap_bprm_apply_creds,
+ .bprm_set_security = apparmor_bprm_set_security,
+ .bprm_secureexec = apparmor_bprm_secureexec,
+@@ -820,6 +929,20 @@ struct security_operations apparmor_ops
+
+ .getprocattr = apparmor_getprocattr,
+ .setprocattr = apparmor_setprocattr,
++
++ .socket_create = apparmor_socket_create,
++ .socket_post_create = apparmor_socket_post_create,
++ .socket_bind = apparmor_socket_bind,
++ .socket_connect = apparmor_socket_connect,
++ .socket_listen = apparmor_socket_listen,
++ .socket_accept = apparmor_socket_accept,
++ .socket_sendmsg = apparmor_socket_sendmsg,
++ .socket_recvmsg = apparmor_socket_recvmsg,
++ .socket_getsockname = apparmor_socket_getsockname,
++ .socket_getpeername = apparmor_socket_getpeername,
++ .socket_getsockopt = apparmor_socket_getsockopt,
++ .socket_setsockopt = apparmor_socket_setsockopt,
++ .socket_shutdown = apparmor_socket_shutdown,
+ };
+
+ void info_message(const char *str)
+--- a/security/apparmor/main.c
++++ b/security/apparmor/main.c
+@@ -14,6 +14,9 @@
+ #include <linux/audit.h>
+ #include <linux/mount.h>
+ #include <linux/ptrace.h>
++#include <linux/socket.h>
++#include <linux/net.h>
++#include <net/sock.h>
+
+ #include "apparmor.h"
+
+@@ -116,6 +119,24 @@ static void aa_audit_file_mask(struct au
+ audit_log_format(ab, " %s=\"%s::%s\"", name, user, other);
+ }
+
++static const char *address_families[] = {
++#include "af_names.h"
++};
++
++static const char *sock_types[] = {
++ "unknown(0)",
++ "stream",
++ "dgram",
++ "raw",
++ "rdm",
++ "seqpacket",
++ "dccp",
++ "unknown(7)",
++ "unknown(8)",
++ "unknown(9)",
++ "packet",
++};
++
+ /**
+ * aa_audit - Log an audit event to the audit subsystem
+ * @profile: profile to check against
+@@ -187,7 +208,25 @@ static int aa_audit_base(struct aa_profi
+ audit_log_untrustedstring(ab, sa->name2);
+ }
+
+- audit_log_format(ab, " pid=%d", current->pid);
++ if (sa->family || sa->type) {
++ if (address_families[sa->family])
++ audit_log_format(ab, " family=\"%s\"",
++ address_families[sa->family]);
++ else
++ audit_log_format(ab, " family=\"unknown(%d)\"",
++ sa->family);
++
++ if (sock_types[sa->type])
++ audit_log_format(ab, " sock_type=\"%s\"",
++ sock_types[sa->type]);
++ else
++ audit_log_format(ab, " sock_type=\"unknown(%d)\"",
++ sa->type);
++
++ audit_log_format(ab, " protocol=%d", sa->protocol);
++ }
++
++ audit_log_format(ab, " pid=%d", current->pid);
+
+ if (profile) {
+ audit_log_format(ab, " profile=");
+@@ -767,6 +806,72 @@ int aa_link(struct aa_profile *profile,
+
+ return error;
+ }
++
++int aa_net_perm(struct aa_profile *profile, char *operation,
++ int family, int type, int protocol)
++{
++ struct aa_audit sa;
++ int error = 0;
++ u16 family_mask, audit_mask, quiet_mask;
++
++ if ((family < 0) || (family >= AF_MAX))
++ return -EINVAL;
++
++ if ((type < 0) || (type >= SOCK_MAX))
++ return -EINVAL;
++
++ /* unix domain and netlink sockets are handled by ipc */
++ if (family == AF_UNIX || family == AF_NETLINK)
++ return 0;
++
++ family_mask = profile->network_families[family];
++ audit_mask = profile->audit_network[family];
++ quiet_mask = profile->quiet_network[family];
++
++ error = (family_mask & (1 << type)) ? 0 : -EACCES;
++
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = operation;
++ sa.gfp_mask = GFP_KERNEL;
++ sa.family = family;
++ sa.type = type;
++ sa.protocol = protocol;
++ sa.error_code = error;
++
++ if (likely(!error)) {
++ if (!PROFILE_AUDIT(profile) && !(family_mask & audit_mask))
++ return 0;
++ } else if (!((1 << type) & ~quiet_mask)) {
++ return error;
++ }
++
++ error = aa_audit(profile, &sa);
++
++ return error;
++}
++
++int aa_revalidate_sk(struct sock *sk, char *operation)
++{
++ struct aa_profile *profile;
++ int error = 0;
++
++ /* this is some debugging code to flush out the network hooks that
++ that are called in interrupt context */
++ if (in_interrupt()) {
++ printk("AppArmor Debug: Hook being called from interrupt context\n");
++ dump_stack();
++ return 0;
++ }
++
++ profile = aa_get_profile(current);
++ if (profile)
++ error = aa_net_perm(profile, operation,
++ sk->sk_family, sk->sk_type,
++ sk->sk_protocol);
++ aa_put_profile(profile);
++
++ return error;
++}
+
+ /*******************************
+ * Global task related functions
+--- a/security/apparmor/module_interface.c
++++ b/security/apparmor/module_interface.c
+@@ -321,8 +321,8 @@ static struct aa_profile *aa_unpack_prof
+ struct aa_audit *sa)
+ {
+ struct aa_profile *profile = NULL;
+-
+- int error = -EPROTO;
++ size_t size = 0;
++ int i, error = -EPROTO;
+
+ profile = alloc_aa_profile();
+ if (!profile)
+@@ -355,6 +355,28 @@ static struct aa_profile *aa_unpack_prof
+ if (!aa_is_u32(e, &(profile->set_caps), NULL))
+ goto fail;
+
++ size = aa_is_array(e, "net_allowed_af");
++ if (size) {
++ if (size > AF_MAX)
++ goto fail;
++
++ for (i = 0; i < size; i++) {
++ if (!aa_is_u16(e, &profile->network_families[i], NULL))
++ goto fail;
++ if (!aa_is_u16(e, &profile->audit_network[i], NULL))
++ goto fail;
++ if (!aa_is_u16(e, &profile->quiet_network[i], NULL))
++ goto fail;
++ }
++ if (!aa_is_nameX(e, AA_ARRAYEND, NULL))
++ goto fail;
++ /* allow unix domain and netlink sockets they are handled
++ * by IPC
++ */
++ }
++ profile->network_families[AF_UNIX] = 0xffff;
++ profile->network_families[AF_NETLINK] = 0xffff;
++
+ /* get file rules */
+ profile->file_rules = aa_unpack_dfa(e);
+ if (IS_ERR(profile->file_rules)) {
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: apparmor: use new ptrace security_operations
+
+ This patch implements the new ptrace security_operations members.
+
+ ->ptrace was changed to ->ptrace_may_access and ->ptrace_traceme.
+
+ The apparmor versions are really just wrappers for the old function.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+ security/apparmor/lsm.c | 17 +++++++++++++++--
+ 1 file changed, 15 insertions(+), 2 deletions(-)
+
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -158,7 +158,7 @@ static int aa_reject_syscall(struct task
+ }
+
+ static int apparmor_ptrace(struct task_struct *parent,
+- struct task_struct *child, unsigned int mode)
++ struct task_struct *child)
+ {
+ struct aa_task_context *cxt;
+ int error = 0;
+@@ -207,6 +207,18 @@ static int apparmor_ptrace(struct task_s
+ return error;
+ }
+
++static int apparmor_ptrace_may_access(struct task_struct *child,
++ unsigned int mode)
++{
++ return apparmor_ptrace(child->parent, child);
++}
++
++
++static int apparmor_ptrace_traceme(struct task_struct *parent)
++{
++ return apparmor_ptrace(parent, current);
++}
++
+ static int apparmor_capable(struct task_struct *task, int cap)
+ {
+ int error;
+@@ -899,7 +911,8 @@ static int apparmor_task_setrlimit(unsig
+ }
+
+ struct security_operations apparmor_ops = {
+- .ptrace = apparmor_ptrace,
++ .ptrace_may_access = apparmor_ptrace_may_access,
++ .ptrace_traceme = apparmor_ptrace_traceme,
+ .capget = cap_capget,
+ .capset_check = cap_capset_check,
+ .capset_set = cap_capset_set,
--- /dev/null
+From: John Johansen <jjohansen@suse.de>
+Subject: AppArmor: per profile controls for system rlimits
+
+Provide contol of rlimits on a per profile basis. Each profile provides
+a per limit contol and corresponding hard limit value, such that when a
+profile becomes attached to a task it sets the tasks limits to be <= to
+the profiles specified limits. Note: the profile limit value will not
+raise a tasks limit if it is already less than the profile mandates.
+
+In addition to setting a tasks limits, the ability to set limits on
+a confined task are controlled. AppArmor only controls the raising
+of a tasks limits Tasks with CAP_SYS_RESOURCE can have their hard limits
+raised up to the value specified by the profile. AppArmor does not
+prevent a task for lowering its hard limits, nor does it provide
+additional control on soft limits.
+
+AppArmor only controls the limits specified in a profile so that
+any limit not specified is free to be modified subject to standard
+linux limitations.
+
+---
+ security/apparmor/apparmor.h | 23 ++++++
+ security/apparmor/apparmorfs.c | 2
+ security/apparmor/lsm.c | 16 ++++
+ security/apparmor/main.c | 132 +++++++++++++++++++++++++++++++----
+ security/apparmor/module_interface.c | 56 ++++++++++++++
+ 5 files changed, 215 insertions(+), 14 deletions(-)
+
+--- a/security/apparmor/apparmor.h
++++ b/security/apparmor/apparmor.h
+@@ -16,6 +16,7 @@
+ #include <linux/fs.h>
+ #include <linux/binfmts.h>
+ #include <linux/rcupdate.h>
++#include <linux/resource.h>
+ #include <linux/socket.h>
+ #include <net/sock.h>
+
+@@ -139,6 +140,18 @@ extern unsigned int apparmor_path_max;
+
+ #define AA_ERROR(fmt, args...) printk(KERN_ERR "AppArmor: " fmt, ##args)
+
++/* struct aa_rlimit - rlimits settings for the profile
++ * @mask: which hard limits to set
++ * @limits: rlimit values that override task limits
++ *
++ * AppArmor rlimits are used to set confined task rlimits. Only the
++ * limits specified in @mask will be controlled by apparmor.
++ */
++struct aa_rlimit {
++ unsigned int mask;
++ struct rlimit limits[RLIM_NLIMITS];
++};
++
+ struct aa_profile;
+
+ /* struct aa_namespace - namespace for a set of profiles
+@@ -173,6 +186,8 @@ struct aa_namespace {
+ * @audit_caps: caps that are to be audited
+ * @quiet_caps: caps that should not be audited
+ * @capabilities: capabilities granted by the process
++ * @rlimits: rlimits for the profile
++ * @task_count: how many tasks the profile is attached to
+ * @count: reference count of the profile
+ * @task_contexts: list of tasks confined by profile
+ * @lock: lock for the task_contexts list
+@@ -210,6 +225,9 @@ struct aa_profile {
+ kernel_cap_t audit_caps;
+ kernel_cap_t quiet_caps;
+
++ struct aa_rlimit rlimits;
++ unsigned int task_count;
++
+ struct kref count;
+ struct list_head task_contexts;
+ spinlock_t lock;
+@@ -261,6 +279,7 @@ struct aa_audit {
+ const char *name2;
+ const char *name3;
+ int request_mask, denied_mask, audit_mask;
++ int rlimit;
+ struct iattr *iattr;
+ pid_t task, parent;
+ int family, type, protocol;
+@@ -328,6 +347,10 @@ extern int aa_may_ptrace(struct aa_task_
+ extern int aa_net_perm(struct aa_profile *profile, char *operation,
+ int family, int type, int protocol);
+ extern int aa_revalidate_sk(struct sock *sk, char *operation);
++extern int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
++ struct rlimit *new_rlim);
++extern void aa_set_rlimits(struct task_struct *task, struct aa_profile *profile);
++
+
+ /* lsm.c */
+ extern int apparmor_initialized;
+--- a/security/apparmor/apparmorfs.c
++++ b/security/apparmor/apparmorfs.c
+@@ -106,7 +106,7 @@ static ssize_t aa_features_read(struct f
+ {
+ const char *features = "file=3.0 capability=2.0 network=1.0 "
+ "change_hat=1.5 change_profile=1.0 "
+- "aanamespaces=1.0";
++ "aanamespaces=1.0 rlimit=1.0";
+
+ return simple_read_from_buffer(buf, size, ppos, features,
+ strlen(features));
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -883,6 +883,21 @@ static int apparmor_setprocattr(struct t
+ return error;
+ }
+
++static int apparmor_task_setrlimit(unsigned int resource,
++ struct rlimit *new_rlim)
++{
++ struct aa_profile *profile;
++ int error = 0;
++
++ profile = aa_get_profile(current);
++ if (profile) {
++ error = aa_task_setrlimit(profile, resource, new_rlim);
++ }
++ aa_put_profile(profile);
++
++ return error;
++}
++
+ struct security_operations apparmor_ops = {
+ .ptrace = apparmor_ptrace,
+ .capget = cap_capget,
+@@ -926,6 +941,7 @@ struct security_operations apparmor_ops
+ .task_free_security = apparmor_task_free_security,
+ .task_post_setuid = cap_task_post_setuid,
+ .task_reparent_to_init = cap_task_reparent_to_init,
++ .task_setrlimit = apparmor_task_setrlimit,
+
+ .getprocattr = apparmor_getprocattr,
+ .setprocattr = apparmor_setprocattr,
+--- a/security/apparmor/main.c
++++ b/security/apparmor/main.c
+@@ -177,6 +177,9 @@ static int aa_audit_base(struct aa_profi
+ if (sa->request_mask)
+ audit_log_format(ab, " fsuid=%d", current->fsuid);
+
++ if (sa->rlimit)
++ audit_log_format(ab, " rlimit=%d", sa->rlimit - 1);
++
+ if (sa->iattr) {
+ struct iattr *iattr = sa->iattr;
+
+@@ -872,6 +875,79 @@ int aa_revalidate_sk(struct sock *sk, ch
+
+ return error;
+ }
++/**
++ * aa_task_setrlimit - test permission to set an rlimit
++ * @profile - profile confining the task
++ * @resource - the resource being set
++ * @new_rlim - the new resource limit
++ *
++ * Control raising the processes hard limit.
++ */
++int aa_task_setrlimit(struct aa_profile *profile, unsigned int resource,
++ struct rlimit *new_rlim)
++{
++ struct aa_audit sa;
++ int error = 0;
++
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = "setrlimit";
++ sa.gfp_mask = GFP_KERNEL;
++ sa.rlimit = resource + 1;
++
++ if (profile->rlimits.mask & (1 << resource) &&
++ new_rlim->rlim_max > profile->rlimits.limits[resource].rlim_max) {
++ sa.error_code = -EACCES;
++
++ error = aa_audit(profile, &sa);
++ }
++
++ return error;
++}
++
++static int aa_rlimit_nproc(struct aa_profile *profile) {
++ if (profile && (profile->rlimits.mask & (1 << RLIMIT_NPROC)) &&
++ profile->task_count >= profile->rlimits.limits[RLIMIT_NPROC].rlim_max)
++ return -EAGAIN;
++ return 0;
++}
++
++void aa_set_rlimits(struct task_struct *task, struct aa_profile *profile)
++{
++ int i, mask;
++
++ if (!profile)
++ return;
++
++ if (!profile->rlimits.mask)
++ return;
++
++ task_lock(task->group_leader);
++ mask = 1;
++ for (i = 0; i < RLIM_NLIMITS; i++, mask <<= 1) {
++ struct rlimit new_rlim, *old_rlim;
++
++ /* check to see if NPROC which is per profile and handled
++ * in clone/exec or whether this is a limit to be set
++ * can't set cpu limit either right now
++ */
++ if (i == RLIMIT_NPROC || i == RLIMIT_CPU)
++ continue;
++
++ old_rlim = task->signal->rlim + i;
++ new_rlim = *old_rlim;
++
++ if (mask & profile->rlimits.mask &&
++ profile->rlimits.limits[i].rlim_max < new_rlim.rlim_max) {
++ new_rlim.rlim_max = profile->rlimits.limits[i].rlim_max;
++ /* soft limit should not exceed hard limit */
++ if (new_rlim.rlim_cur > new_rlim.rlim_max)
++ new_rlim.rlim_cur = new_rlim.rlim_max;
++ }
++
++ *old_rlim = new_rlim;
++ }
++ task_unlock(task->group_leader);
++}
+
+ /*******************************
+ * Global task related functions
+@@ -885,6 +961,7 @@ int aa_revalidate_sk(struct sock *sk, ch
+ */
+ int aa_clone(struct task_struct *child)
+ {
++ struct aa_audit sa;
+ struct aa_task_context *cxt, *child_cxt;
+ struct aa_profile *profile;
+
+@@ -894,6 +971,11 @@ int aa_clone(struct task_struct *child)
+ if (!child_cxt)
+ return -ENOMEM;
+
++ memset(&sa, 0, sizeof(sa));
++ sa.operation = "clone";
++ sa.task = child->pid;
++ sa.gfp_mask = GFP_KERNEL;
++
+ repeat:
+ profile = aa_get_profile(current);
+ if (profile) {
+@@ -910,18 +992,22 @@ repeat:
+ goto repeat;
+ }
+
++ if (aa_rlimit_nproc(profile)) {
++ sa.info = "rlimit nproc limit exceeded";
++ unlock_profile(profile);
++ aa_audit_reject(profile, &sa);
++ aa_put_profile(profile);
++ return -EAGAIN;
++ }
++
+ /* No need to grab the child's task lock here. */
+ aa_change_task_context(child, child_cxt, profile,
+ cxt->cookie, cxt->previous_profile);
++
+ unlock_profile(profile);
+
+ if (APPARMOR_COMPLAIN(child_cxt) &&
+ profile == profile->ns->null_complain_profile) {
+- struct aa_audit sa;
+- memset(&sa, 0, sizeof(sa));
+- sa.operation = "clone";
+- sa.gfp_mask = GFP_KERNEL;
+- sa.task = child->pid;
+ aa_audit_hint(profile, &sa);
+ }
+ aa_put_profile(profile);
+@@ -1156,6 +1242,10 @@ repeat:
+ sa.task = current->parent->pid;
+ aa_audit_reject(profile, &sa);
+ }
++ if (PTR_ERR(old_profile) == -EAGAIN) {
++ sa.info = "rlimit nproc limit exceeded";
++ aa_audit_reject(profile, &sa);
++ }
+ new_profile = old_profile;
+ goto cleanup;
+ }
+@@ -1303,6 +1393,12 @@ static int do_change_profile(struct aa_p
+ goto out;
+ }
+
++ if ((error = aa_rlimit_nproc(new_profile))) {
++ sa->info = "rlimit nproc limit exceeded";
++ aa_audit_reject(cxt->profile, sa);
++ goto out;
++ }
++
+ if (new_profile == ns->null_complain_profile)
+ aa_audit_hint(cxt->profile, sa);
+
+@@ -1481,17 +1577,18 @@ struct aa_profile *__aa_replace_profile(
+
+ cxt = lock_task_and_profiles(task, profile);
+ if (unlikely(profile && profile->isstale)) {
+- task_unlock(task);
+- unlock_both_profiles(profile, cxt ? cxt->profile : NULL);
+- aa_free_task_context(new_cxt);
+- return ERR_PTR(-ESTALE);
++ old_profile = ERR_PTR(-ESTALE);
++ goto error;
+ }
+
+ if ((current->ptrace & PT_PTRACED) && aa_may_ptrace(cxt, profile)) {
+- task_unlock(task);
+- unlock_both_profiles(profile, cxt ? cxt->profile : NULL);
+- aa_free_task_context(new_cxt);
+- return ERR_PTR(-EPERM);
++ old_profile = ERR_PTR(-EPERM);
++ goto error;
++ }
++
++ if (aa_rlimit_nproc(profile)) {
++ old_profile = ERR_PTR(-EAGAIN);
++ goto error;
+ }
+
+ if (cxt)
+@@ -1499,8 +1596,15 @@ struct aa_profile *__aa_replace_profile(
+ aa_change_task_context(task, new_cxt, profile, 0, NULL);
+
+ task_unlock(task);
++ aa_set_rlimits(task, profile);
+ unlock_both_profiles(profile, old_profile);
+ return old_profile;
++
++error:
++ task_unlock(task);
++ unlock_both_profiles(profile, cxt ? cxt->profile : NULL);
++ aa_free_task_context(new_cxt);
++ return old_profile;
+ }
+
+ /**
+@@ -1565,6 +1669,7 @@ void aa_change_task_context(struct task_
+
+ if (old_cxt) {
+ list_del_init(&old_cxt->list);
++ old_cxt->profile->task_count--;
+ call_rcu(&old_cxt->rcu, free_aa_task_context_rcu_callback);
+ }
+ if (new_cxt) {
+@@ -1576,6 +1681,7 @@ void aa_change_task_context(struct task_
+ new_cxt->cookie = cookie;
+ new_cxt->task = task;
+ new_cxt->profile = aa_dup_profile(profile);
++ profile->task_count++;
+ new_cxt->previous_profile = aa_dup_profile(previous_profile);
+ list_move(&new_cxt->list, &profile->task_contexts);
+ }
+--- a/security/apparmor/module_interface.c
++++ b/security/apparmor/module_interface.c
+@@ -177,6 +177,22 @@ fail:
+ return 0;
+ }
+
++static int aa_is_u64(struct aa_ext *e, u64 *data, const char *name)
++{
++ void *pos = e->pos;
++ if (aa_is_nameX(e, AA_U64, name)) {
++ if (!aa_inbounds(e, sizeof(u64)))
++ goto fail;
++ if (data)
++ *data = le64_to_cpu(get_unaligned((u64 *)e->pos));
++ e->pos += sizeof(u64);
++ return 1;
++ }
++fail:
++ e->pos = pos;
++ return 0;
++}
++
+ static size_t aa_is_array(struct aa_ext *e, const char *name)
+ {
+ void *pos = e->pos;
+@@ -312,6 +328,39 @@ fail:
+ return 0;
+ }
+
++int aa_unpack_rlimits(struct aa_ext *e, struct aa_profile *profile)
++{
++ void *pos = e->pos;
++
++ /* rlimits are optional */
++ if (aa_is_nameX(e, AA_STRUCT, "rlimits")) {
++ int i, size;
++ u32 tmp = 0;
++ if (!aa_is_u32(e, &tmp, NULL))
++ goto fail;
++ profile->rlimits.mask = tmp;
++
++ size = aa_is_array(e, NULL);
++ if (size > RLIM_NLIMITS)
++ goto fail;
++ for (i = 0; i < size; i++) {
++ u64 tmp = 0;
++ if (!aa_is_u64(e, &tmp, NULL))
++ goto fail;
++ profile->rlimits.limits[i].rlim_max = tmp;
++ }
++ if (!aa_is_nameX(e, AA_ARRAYEND, NULL))
++ goto fail;
++ if (!aa_is_nameX(e, AA_STRUCTEND, NULL))
++ goto fail;
++ }
++ return 1;
++
++fail:
++ e->pos = pos;
++ return 0;
++}
++
+ /**
+ * aa_unpack_profile - unpack a serialized profile
+ * @e: serialized data extent information
+@@ -355,6 +404,9 @@ static struct aa_profile *aa_unpack_prof
+ if (!aa_is_u32(e, &(profile->set_caps), NULL))
+ goto fail;
+
++ if (!aa_unpack_rlimits(e, profile))
++ goto fail;
++
+ size = aa_is_array(e, "net_allowed_af");
+ if (size) {
+ if (size > AF_MAX)
+@@ -614,6 +666,8 @@ ssize_t aa_replace_profile(void *udata,
+ sa.operation = "profile_load";
+ goto out;
+ }
++ /* do not fail replacement based off of profile's NPROC rlimit */
++
+ /*
+ * Replacement needs to allocate a new aa_task_context for each
+ * task confined by old_profile. To do this the profile locks
+@@ -634,6 +688,7 @@ ssize_t aa_replace_profile(void *udata,
+ task_lock(task);
+ task_replace(task, new_cxt, new_profile);
+ task_unlock(task);
++ aa_set_rlimits(task, new_profile);
+ new_cxt = NULL;
+ }
+ unlock_both_profiles(old_profile, new_profile);
+@@ -656,6 +711,7 @@ out:
+ *
+ * remove a profile from the profile list and all aa_task_context references
+ * to said profile.
++ * NOTE: removing confinement does not restore rlimits to preconfinemnet values
+ */
+ ssize_t aa_remove_profile(char *name, size_t size)
+ {
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: Add d_namespace_path() to compute namespace relative pathnames
+
+In AppArmor, we are interested in pathnames relative to the namespace root.
+This is the same as d_path() except for the root where the search ends. Add
+a function for computing the namespace-relative path.
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/namespace.c | 30 ++++++++++++++++++++++++++++++
+ include/linux/mount.h | 2 ++
+ 2 files changed, 32 insertions(+)
+
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2357,3 +2357,33 @@ void put_mnt_ns(struct mnt_namespace *ns
+ kfree(ns);
+ }
+ EXPORT_SYMBOL(put_mnt_ns);
++
++char *d_namespace_path(struct dentry *dentry, struct vfsmount *vfsmnt,
++ char *buf, int buflen)
++{
++ struct path root, tmp, ns_root = { };
++ struct path path = { .mnt = vfsmnt, .dentry = dentry };
++ char *res;
++
++ read_lock(¤t->fs->lock);
++ root = current->fs->root;
++ path_get(¤t->fs->root);
++ read_unlock(¤t->fs->lock);
++ spin_lock(&vfsmount_lock);
++ if (root.mnt)
++ ns_root.mnt = mntget(root.mnt->mnt_ns->root);
++ if (ns_root.mnt)
++ ns_root.dentry = dget(ns_root.mnt->mnt_root);
++ spin_unlock(&vfsmount_lock);
++ tmp = ns_root;
++ res = __d_path(&path, &tmp, buf, buflen,
++ D_PATH_FAIL_DELETED | D_PATH_DISCONNECT);
++ path_put(&root);
++ path_put(&ns_root);
++
++ /* Prevent empty path for lazily unmounted filesystems. */
++ if (!IS_ERR(res) && *res == '\0')
++ *--res = '.';
++ return res;
++}
++EXPORT_SYMBOL(d_namespace_path);
+--- a/include/linux/mount.h
++++ b/include/linux/mount.h
+@@ -137,4 +137,6 @@ extern void mark_mounts_for_expiry(struc
+ extern spinlock_t vfsmount_lock;
+ extern dev_t name_to_dev_t(char *name);
+
++extern char *d_namespace_path(struct dentry *, struct vfsmount *, char *, int);
++
+ #endif /* _LINUX_MOUNT_H */
--- /dev/null
+From: Miklos Szeredi <mszeredi@suse.cz>
+Subject: fix oops in d_namespace_path
+Patch-mainline: no
+References: bnc#433504
+
+d_namespace_path uses the current->fs->root to get the current
+namespace. If root is detached root.mnt->mnt_ns will be NULL, causing
+an Oops. Fix by checking this before dereferencing the mnt_ns.
+
+Signed-off-by: Miklos Szeredi <mszeredi@suse.cz>
+---
+ fs/namespace.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/namespace.c
++++ b/fs/namespace.c
+@@ -2370,7 +2370,7 @@ char *d_namespace_path(struct dentry *de
+ path_get(¤t->fs->root);
+ read_unlock(¤t->fs->lock);
+ spin_lock(&vfsmount_lock);
+- if (root.mnt)
++ if (root.mnt && root.mnt->mnt_ns)
+ ns_root.mnt = mntget(root.mnt->mnt_ns->root);
+ if (ns_root.mnt)
+ ns_root.dentry = dget(ns_root.mnt->mnt_root);
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: Switch to vfs_permission() in do_path_lookup()
+
+Switch from file_permission() to vfs_permission() in do_path_lookup():
+this avoids calling permission() with a NULL nameidata here.
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/namei.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1085,24 +1085,21 @@ static int do_path_lookup(int dfd, const
+ path_get(&fs->pwd);
+ read_unlock(&fs->lock);
+ } else {
+- struct dentry *dentry;
+-
+ file = fget_light(dfd, &fput_needed);
+ retval = -EBADF;
+ if (!file)
+ goto out_fail;
+
+- dentry = file->f_path.dentry;
++ nd->path = file->f_path;
+
+ retval = -ENOTDIR;
+- if (!S_ISDIR(dentry->d_inode->i_mode))
++ if (!S_ISDIR(nd->path.dentry->d_inode->i_mode))
+ goto fput_fail;
+
+ retval = file_permission(file, MAY_EXEC);
+ if (retval)
+ goto fput_fail;
+
+- nd->path = file->f_path;
+ path_get(&file->f_path);
+
+ fput_light(file, fput_needed);
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: Enable LSM hooks to distinguish operations on file descriptors from operations on pathnames
+
+Struct iattr already contains ia_file since commit cc4e69de from
+Miklos (which is related to commit befc649c). Use this to pass
+struct file down the setattr hooks. This allows LSMs to distinguish
+operations on file descriptors from operations on paths.
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+Cc: Miklos Szeredi <mszeredi@suse.cz>
+
+---
+ fs/nfsd/vfs.c | 12 +++++++-----
+ fs/open.c | 5 ++++-
+ 2 files changed, 11 insertions(+), 6 deletions(-)
+
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -425,7 +425,7 @@ static ssize_t nfsd_getxattr(struct dent
+ {
+ ssize_t buflen;
+
+- buflen = vfs_getxattr(dentry, mnt, key, NULL, 0);
++ buflen = vfs_getxattr(dentry, mnt, key, NULL, 0, NULL);
+ if (buflen <= 0)
+ return buflen;
+
+@@ -433,7 +433,7 @@ static ssize_t nfsd_getxattr(struct dent
+ if (!*buf)
+ return -ENOMEM;
+
+- return vfs_getxattr(dentry, mnt, key, *buf, buflen);
++ return vfs_getxattr(dentry, mnt, key, *buf, buflen, NULL);
+ }
+ #endif
+
+@@ -459,7 +459,7 @@ set_nfsv4_acl_one(struct dentry *dentry,
+ goto out;
+ }
+
+- error = vfs_setxattr(dentry, mnt, key, buf, len, 0);
++ error = vfs_setxattr(dentry, mnt, key, buf, len, 0, NULL);
+ out:
+ kfree(buf);
+ return error;
+@@ -2133,12 +2133,14 @@ nfsd_set_posix_acl(struct svc_fh *fhp, i
+ if (error)
+ goto getout;
+ if (size)
+- error = vfs_setxattr(fhp->fh_dentry, mnt, name, value, size,0);
++ error = vfs_setxattr(fhp->fh_dentry, mnt, name, value, size, 0,
++ NULL);
+ else {
+ if (!S_ISDIR(inode->i_mode) && type == ACL_TYPE_DEFAULT)
+ error = 0;
+ else {
+- error = vfs_removexattr(fhp->fh_dentry, mnt, name);
++ error = vfs_removexattr(fhp->fh_dentry, mnt, name,
++ NULL);
+ if (error == -ENODATA)
+ error = 0;
+ }
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -623,7 +623,7 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
+ if (mode == (mode_t) -1)
+ mode = inode->i_mode;
+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+- newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
++ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME | ATTR_FILE;
+ err = fnotify_change(dentry, file->f_path.mnt, &newattrs, file);
+ mutex_unlock(&inode->i_mutex);
+ mnt_drop_write(file->f_path.mnt);
+@@ -686,6 +686,9 @@ static int chown_common(struct dentry *
+ if (!S_ISDIR(inode->i_mode))
+ newattrs.ia_valid |=
+ ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
++ if (file)
++ newattrs.ia_valid |= ATTR_FILE;
++
+ mutex_lock(&inode->i_mutex);
+ error = fnotify_change(dentry, mnt, &newattrs, file);
+ mutex_unlock(&inode->i_mutex);
--- /dev/null
+From: John Johansen <jjohansen@suse.de>
+Subject: fix enforcement of deny rules in complain mode
+Patch-mainline: no
+References: bnc#426159
+
+Fix enforcement of deny rules so that they are not enforced in complain
+mode. This is necessary so that application behavior is not changed by
+the presence of the deny rule.
+
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ security/apparmor/main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/security/apparmor/main.c
++++ b/security/apparmor/main.c
+@@ -325,7 +325,7 @@ static int aa_audit_file(struct aa_profi
+ } else {
+ int mask = AUDIT_QUIET_MASK(sa->audit_mask);
+
+- if (!(sa->denied_mask & ~mask))
++ if (!(sa->denied_mask & ~mask) && !PROFILE_COMPLAIN(profile))
+ return sa->error_code;
+
+ /* mask off perms whose denial is being silenced */
--- /dev/null
+From: John Johansen <jrjohansen@verizon.net>
+Subject: [PATCH] AppArmor: Fix leak of filename for deleted files
+
+ This patch fixes a memory leak where the name doesn't get freed when
+ a file has been deleted.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+---
+ security/apparmor/main.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/security/apparmor/main.c
++++ b/security/apparmor/main.c
+@@ -500,10 +500,10 @@ static char *aa_get_name(struct dentry *
+ *buffer = buf;
+ return name;
+ }
++ kfree(buf);
+ if (PTR_ERR(name) != -ENAMETOOLONG)
+ return name;
+
+- kfree(buf);
+ size <<= 1;
+ if (size > apparmor_path_max)
+ return ERR_PTR(-ENAMETOOLONG);
--- /dev/null
+From: John Johansen <jjohansen@suse.de>
+Subject: fix recognition of security= boot parameter
+Patch-mainline: no
+References: bnc#442668
+
+Fix AppArmor to respect the kernel boot parameter security=, so that if a
+different lsm is choosen apparmor does not try to register its lsm hooks.
+
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ security/Kconfig | 9 +++++++++
+ security/apparmor/lsm.c | 5 +++--
+ security/security.c | 2 +-
+ 3 files changed, 13 insertions(+), 3 deletions(-)
+
+--- a/security/Kconfig
++++ b/security/Kconfig
+@@ -51,6 +51,15 @@ config SECURITY
+
+ If you are unsure how to answer this question, answer N.
+
++config SECURITY_DEFAULT
++ string "Default security module"
++ depends on SECURITY
++ default ""
++ help
++ This determines the security module used if the security=
++ boot parmater is not provided. If a security module is not
++ specified the first module to register will be used.
++
+ config SECURITY_NETWORK
+ bool "Socket and Networking Security Hooks"
+ depends on SECURITY
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -911,6 +911,7 @@ static int apparmor_task_setrlimit(unsig
+ }
+
+ struct security_operations apparmor_ops = {
++ .name = "apparmor",
+ .ptrace_may_access = apparmor_ptrace_may_access,
+ .ptrace_traceme = apparmor_ptrace_traceme,
+ .capget = cap_capget,
+@@ -989,8 +990,8 @@ static int __init apparmor_init(void)
+ {
+ int error;
+
+- if (!apparmor_enabled) {
+- info_message("AppArmor disabled by boottime parameter\n");
++ if (!apparmor_enabled || !security_module_enable(&apparmor_ops)) {
++ info_message("AppArmor disabled by boot time parameter\n");
+ return 0;
+ }
+
+--- a/security/security.c
++++ b/security/security.c
+@@ -18,7 +18,7 @@
+ #include <linux/security.h>
+
+ /* Boot-time LSM user choice */
+-static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1];
++static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] = CONFIG_SECURITY_DEFAULT;
+
+ /* things that live in capability.c */
+ extern struct security_operations default_security_ops;
--- /dev/null
+From: John Johansen <jjohansen@suse.de>
+Subject: Call lsm hook before unhashing dentry in vfs_rmdir()
+
+If we unhash the dentry before calling the security_inode_rmdir hook,
+we cannot compute the file's pathname in the hook anymore. AppArmor
+needs to know the filename in order to decide whether a file may be
+deleted, though.
+
+Signed-off-by: John Johansen <jjohansen@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+---
+ fs/namei.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2177,6 +2177,10 @@ int vfs_rmdir(struct inode *dir, struct
+ if (!dir->i_op || !dir->i_op->rmdir)
+ return -EPERM;
+
++ error = security_inode_rmdir(dir, dentry, mnt);
++ if (error)
++ return error;
++
+ DQUOT_INIT(dir);
+
+ mutex_lock(&dentry->d_inode->i_mutex);
+@@ -2184,12 +2188,9 @@ int vfs_rmdir(struct inode *dir, struct
+ if (d_mountpoint(dentry))
+ error = -EBUSY;
+ else {
+- error = security_inode_rmdir(dir, dentry, mnt);
+- if (!error) {
+- error = dir->i_op->rmdir(dir, dentry);
+- if (!error)
+- dentry->d_inode->i_flags |= S_DEAD;
+- }
++ error = dir->i_op->rmdir(dir, dentry);
++ if (!error)
++ dentry->d_inode->i_flags |= S_DEAD;
+ }
+ mutex_unlock(&dentry->d_inode->i_mutex);
+ if (!error) {
--- /dev/null
+From: John Johansen <jjohansen@suse.de>
+Subject: fix log messages to enable tools profile learning
+Patch-mainline: no
+References: bnc#447564
+
+The allocation of the child pid is done after the LSM clone hook, which
+breaks the AppArmor tools fork tracking, for profiles learning. Output
+the parent pid with each log message to enable the tools to handle fork
+tracking.
+
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ security/apparmor/lsm.c | 28 ----------------------------
+ security/apparmor/main.c | 10 +++++-----
+ security/apparmor/module_interface.c | 2 +-
+ 3 files changed, 6 insertions(+), 34 deletions(-)
+
+--- a/security/apparmor/lsm.c
++++ b/security/apparmor/lsm.c
+@@ -143,20 +143,6 @@ static int param_set_aa_enabled(const ch
+ return 0;
+ }
+
+-static int aa_reject_syscall(struct task_struct *task, gfp_t flags,
+- const char *name)
+-{
+- struct aa_profile *profile = aa_get_profile(task);
+- int error = 0;
+-
+- if (profile) {
+- error = aa_audit_syscallreject(profile, flags, name);
+- aa_put_profile(profile);
+- }
+-
+- return error;
+-}
+-
+ static int apparmor_ptrace(struct task_struct *parent,
+ struct task_struct *child)
+ {
+@@ -292,17 +278,6 @@ static int apparmor_bprm_secureexec(stru
+ return ret;
+ }
+
+-static int apparmor_sb_mount(char *dev_name, struct path *path, char *type,
+- unsigned long flags, void *data)
+-{
+- return aa_reject_syscall(current, GFP_KERNEL, "mount");
+-}
+-
+-static int apparmor_umount(struct vfsmount *mnt, int flags)
+-{
+- return aa_reject_syscall(current, GFP_KERNEL, "umount");
+-}
+-
+ static int apparmor_inode_mkdir(struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt, int mask)
+ {
+@@ -925,9 +900,6 @@ struct security_operations apparmor_ops
+ .bprm_set_security = apparmor_bprm_set_security,
+ .bprm_secureexec = apparmor_bprm_secureexec,
+
+- .sb_mount = apparmor_sb_mount,
+- .sb_umount = apparmor_umount,
+-
+ .inode_mkdir = apparmor_inode_mkdir,
+ .inode_rmdir = apparmor_inode_rmdir,
+ .inode_create = apparmor_inode_create,
+--- a/security/apparmor/main.c
++++ b/security/apparmor/main.c
+@@ -229,9 +229,13 @@ static int aa_audit_base(struct aa_profi
+ audit_log_format(ab, " protocol=%d", sa->protocol);
+ }
+
+- audit_log_format(ab, " pid=%d", current->pid);
++ audit_log_format(ab, " pid=%d", current->pid);
+
+ if (profile) {
++ if (!sa->parent)
++ audit_log_format(ab, " parent=%d",
++ current->real_parent->pid);
++
+ audit_log_format(ab, " profile=");
+ audit_log_untrustedstring(ab, profile->name);
+
+@@ -1006,10 +1010,6 @@ repeat:
+
+ unlock_profile(profile);
+
+- if (APPARMOR_COMPLAIN(child_cxt) &&
+- profile == profile->ns->null_complain_profile) {
+- aa_audit_hint(profile, &sa);
+- }
+ aa_put_profile(profile);
+ } else
+ aa_free_task_context(child_cxt);
+--- a/security/apparmor/module_interface.c
++++ b/security/apparmor/module_interface.c
+@@ -126,7 +126,7 @@ static int aa_is_nameX(struct aa_ext *e,
+ * AA_NAME tag value is a u16.
+ */
+ if (aa_is_X(e, AA_NAME)) {
+- char *tag;
++ char *tag = NULL;
+ size_t size = aa_is_u16_chunk(e, &tag);
+ /* if a name is specified it must match. otherwise skip tag */
+ if (name && (!size || strcmp(name, tag)))
--- /dev/null
+From: John Johansen <jjohansen@suse.de>
+Subject: AppArmor: reintroduce ATTR_FILE
+
+The fsetattr patch removed ATTR_FILE but AppArmor needs it to distinguish
+file based writes.
+
+Note: Now that LSMs must be static, it would be better to add a file
+pointer argument to security_operations->inode_setattr() instead. Then
+move the fs.h chunk to patches.apparmor/fsetattr-restore-ia_file. -jeffm
+
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/open.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -208,6 +208,9 @@ int do_truncate(struct dentry *dentry, s
+ newattrs.ia_size = length;
+ newattrs.ia_valid = ATTR_SIZE | time_attrs;
+
++ if (filp)
++ newattrs.ia_valid |= ATTR_FILE;
++
+ /* Remove suid/sgid on truncate too */
+ newattrs.ia_valid |= should_remove_suid(dentry);
+
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: [PATCH] vfs: restore ia_file for compatibility with external modules
+References: bnc#381259
+
+ patches.apparmor/fsetattr.diff eliminated ia_file and ATTR_FILE in favor
+ of providing a ->fsetattr call that used a file pointer. Until this
+ patch is accepted into mainline, this patch provides the backward
+ compatibility for external file system modules.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+---
+ fs/attr.c | 13 ++++++++++++-
+ include/linux/fs.h | 11 +++++++++++
+ 2 files changed, 23 insertions(+), 1 deletion(-)
+
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -168,8 +168,19 @@ int fnotify_change(struct dentry *dentry
+ if (!error) {
+ if (file && file->f_op && file->f_op->fsetattr)
+ error = file->f_op->fsetattr(file, attr);
+- else
++ else {
++ /* External file system still expect to be
++ * passed a file pointer via ia_file and
++ * have it announced via ATTR_FILE. This
++ * just makes it so they don't need to
++ * change their API just for us. External
++ * callers will have set these themselves. */
++ if (file) {
++ attr->ia_valid |= ATTR_FILE;
++ attr->ia_file = file;
++ }
+ error = inode->i_op->setattr(dentry, attr);
++ }
+ }
+ } else {
+ error = inode_change_ok(inode, attr);
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -367,6 +367,17 @@ struct iattr {
+ struct timespec ia_atime;
+ struct timespec ia_mtime;
+ struct timespec ia_ctime;
++
++ /*
++ * Not an attribute, but an auxilary info for filesystems wanting to
++ * implement an ftruncate() like method. NOTE: filesystem should
++ * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL).
++ *
++ * NOTE: With patches.apparmor/fsetattr.diff applied, this is
++ * for compatibility with external file system modules only. There
++ * should not be any in-kernel users left.
++ */
++ struct file *ia_file;
+ };
+
+ /*
--- /dev/null
+Subject: VFS: new fsetattr() file operation
+
+From: Miklos Szeredi <mszeredi@suse.cz>
+
+Add a new file operation: f_op->fsetattr(), that is invoked by
+ftruncate, fchmod, fchown and utimensat. Fall back to i_op->setattr()
+if it is not defined.
+
+For the reasons why we need this, see patch adding fgetattr().
+
+ftruncate() already passed the open file to the filesystem via the
+ia_file member of struct iattr. However it is cleaner to have a
+separate file operation for this, so remove ia_file, ATTR_FILE and
+convert existing users: fuse and AFS.
+
+Signed-off-by: Miklos Szeredi <mszeredi@suse.cz> ---
+Signed-off-by: John Johansen <jjohansen@suse.de> ---
+
+---
+ fs/afs/dir.c | 1 +
+ fs/afs/file.c | 1 +
+ fs/afs/inode.c | 19 +++++++++++++++----
+ fs/afs/internal.h | 1 +
+ fs/attr.c | 19 +++++++++++++++----
+ fs/fuse/dir.c | 20 +++++++++-----------
+ fs/fuse/file.c | 7 +++++++
+ fs/fuse/fuse_i.h | 4 ++++
+ fs/open.c | 20 ++++++++------------
+ fs/utimes.c | 9 +++++----
+ include/linux/fs.h | 9 ++-------
+ 11 files changed, 68 insertions(+), 42 deletions(-)
+
+--- a/fs/afs/dir.c
++++ b/fs/afs/dir.c
+@@ -45,6 +45,7 @@ const struct file_operations afs_dir_fil
+ .release = afs_release,
+ .readdir = afs_readdir,
+ .lock = afs_lock,
++ .fsetattr = afs_fsetattr,
+ };
+
+ const struct inode_operations afs_dir_inode_operations = {
+--- a/fs/afs/file.c
++++ b/fs/afs/file.c
+@@ -36,6 +36,7 @@ const struct file_operations afs_file_op
+ .fsync = afs_fsync,
+ .lock = afs_lock,
+ .flock = afs_flock,
++ .fsetattr = afs_fsetattr,
+ };
+
+ const struct inode_operations afs_file_inode_operations = {
+--- a/fs/afs/inode.c
++++ b/fs/afs/inode.c
+@@ -358,7 +358,8 @@ void afs_clear_inode(struct inode *inode
+ /*
+ * set the attributes of an inode
+ */
+-int afs_setattr(struct dentry *dentry, struct iattr *attr)
++static int afs_do_setattr(struct dentry *dentry, struct iattr *attr,
++ struct file *file)
+ {
+ struct afs_vnode *vnode = AFS_FS_I(dentry->d_inode);
+ struct key *key;
+@@ -380,8 +381,8 @@ int afs_setattr(struct dentry *dentry, s
+ afs_writeback_all(vnode);
+ }
+
+- if (attr->ia_valid & ATTR_FILE) {
+- key = attr->ia_file->private_data;
++ if (file) {
++ key = file->private_data;
+ } else {
+ key = afs_request_key(vnode->volume->cell);
+ if (IS_ERR(key)) {
+@@ -391,10 +392,20 @@ int afs_setattr(struct dentry *dentry, s
+ }
+
+ ret = afs_vnode_setattr(vnode, key, attr);
+- if (!(attr->ia_valid & ATTR_FILE))
++ if (!file)
+ key_put(key);
+
+ error:
+ _leave(" = %d", ret);
+ return ret;
+ }
++
++int afs_setattr(struct dentry *dentry, struct iattr *attr)
++{
++ return afs_do_setattr(dentry, attr, NULL);
++}
++
++int afs_fsetattr(struct file *file, struct iattr *attr)
++{
++ return afs_do_setattr(file->f_path.dentry, attr, file);
++}
+--- a/fs/afs/internal.h
++++ b/fs/afs/internal.h
+@@ -548,6 +548,7 @@ extern void afs_zap_data(struct afs_vnod
+ extern int afs_validate(struct afs_vnode *, struct key *);
+ extern int afs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+ extern int afs_setattr(struct dentry *, struct iattr *);
++extern int afs_fsetattr(struct file *, struct iattr *);
+ extern void afs_clear_inode(struct inode *);
+
+ /*
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -100,8 +100,8 @@ int inode_setattr(struct inode * inode,
+ }
+ EXPORT_SYMBOL(inode_setattr);
+
+-int notify_change(struct dentry *dentry, struct vfsmount *mnt,
+- struct iattr *attr)
++int fnotify_change(struct dentry *dentry, struct vfsmount *mnt,
++ struct iattr *attr, struct file *file)
+ {
+ struct inode *inode = dentry->d_inode;
+ mode_t mode = inode->i_mode;
+@@ -165,8 +165,12 @@ int notify_change(struct dentry *dentry,
+
+ if (inode->i_op && inode->i_op->setattr) {
+ error = security_inode_setattr(dentry, mnt, attr);
+- if (!error)
+- error = inode->i_op->setattr(dentry, attr);
++ if (!error) {
++ if (file && file->f_op && file->f_op->fsetattr)
++ error = file->f_op->fsetattr(file, attr);
++ else
++ error = inode->i_op->setattr(dentry, attr);
++ }
+ } else {
+ error = inode_change_ok(inode, attr);
+ if (!error)
+@@ -188,5 +192,12 @@ int notify_change(struct dentry *dentry,
+
+ return error;
+ }
++EXPORT_SYMBOL_GPL(fnotify_change);
++
++int notify_change(struct dentry *dentry, struct vfsmount *mnt,
++ struct iattr *attr)
++{
++ return fnotify_change(dentry, mnt, attr, NULL);
++}
+
+ EXPORT_SYMBOL(notify_change);
+--- a/fs/fuse/dir.c
++++ b/fs/fuse/dir.c
+@@ -1105,21 +1105,22 @@ static int fuse_dir_fsync(struct file *f
+ return file ? fuse_fsync_common(file, de, datasync, 1) : 0;
+ }
+
+-static bool update_mtime(unsigned ivalid)
++static bool update_mtime(unsigned ivalid, bool have_file)
+ {
+ /* Always update if mtime is explicitly set */
+ if (ivalid & ATTR_MTIME_SET)
+ return true;
+
+ /* If it's an open(O_TRUNC) or an ftruncate(), don't update */
+- if ((ivalid & ATTR_SIZE) && (ivalid & (ATTR_OPEN | ATTR_FILE)))
++ if ((ivalid & ATTR_SIZE) && ((ivalid & ATTR_OPEN) || have_file))
+ return false;
+
+ /* In all other cases update */
+ return true;
+ }
+
+-static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg)
++static void iattr_to_fattr(struct iattr *iattr, struct fuse_setattr_in *arg,
++ bool have_file)
+ {
+ unsigned ivalid = iattr->ia_valid;
+
+@@ -1138,7 +1139,7 @@ static void iattr_to_fattr(struct iattr
+ if (!(ivalid & ATTR_ATIME_SET))
+ arg->valid |= FATTR_ATIME_NOW;
+ }
+- if ((ivalid & ATTR_MTIME) && update_mtime(ivalid)) {
++ if ((ivalid & ATTR_MTIME) && update_mtime(ivalid, have_file)) {
+ arg->valid |= FATTR_MTIME;
+ arg->mtime = iattr->ia_mtime.tv_sec;
+ arg->mtimensec = iattr->ia_mtime.tv_nsec;
+@@ -1199,8 +1200,8 @@ void fuse_release_nowrite(struct inode *
+ * vmtruncate() doesn't allow for this case, so do the rlimit checking
+ * and the actual truncation by hand.
+ */
+-static int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
+- struct file *file)
++int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
++ struct file *file)
+ {
+ struct inode *inode = entry->d_inode;
+ struct fuse_conn *fc = get_fuse_conn(inode);
+@@ -1244,7 +1245,7 @@ static int fuse_do_setattr(struct dentry
+
+ memset(&inarg, 0, sizeof(inarg));
+ memset(&outarg, 0, sizeof(outarg));
+- iattr_to_fattr(attr, &inarg);
++ iattr_to_fattr(attr, &inarg, file != NULL);
+ if (file) {
+ struct fuse_file *ff = file->private_data;
+ inarg.valid |= FATTR_FH;
+@@ -1314,10 +1315,7 @@ error:
+
+ static int fuse_setattr(struct dentry *entry, struct iattr *attr)
+ {
+- if (attr->ia_valid & ATTR_FILE)
+- return fuse_do_setattr(entry, attr, attr->ia_file);
+- else
+- return fuse_do_setattr(entry, attr, NULL);
++ return fuse_do_setattr(entry, attr, NULL);
+ }
+
+ static int fuse_getattr(struct vfsmount *mnt, struct dentry *entry,
+--- a/fs/fuse/file.c
++++ b/fs/fuse/file.c
+@@ -1467,6 +1467,11 @@ static loff_t fuse_file_llseek(struct fi
+ return retval;
+ }
+
++static int fuse_fsetattr(struct file *file, struct iattr *attr)
++{
++ return fuse_do_setattr(file->f_path.dentry, attr, file);
++}
++
+ static const struct file_operations fuse_file_operations = {
+ .llseek = fuse_file_llseek,
+ .read = do_sync_read,
+@@ -1480,6 +1485,7 @@ static const struct file_operations fuse
+ .fsync = fuse_fsync,
+ .lock = fuse_file_lock,
+ .flock = fuse_file_flock,
++ .fsetattr = fuse_fsetattr,
+ .splice_read = generic_file_splice_read,
+ };
+
+@@ -1493,6 +1499,7 @@ static const struct file_operations fuse
+ .fsync = fuse_fsync,
+ .lock = fuse_file_lock,
+ .flock = fuse_file_flock,
++ .fsetattr = fuse_fsetattr,
+ /* no mmap and splice_read */
+ };
+
+--- a/fs/fuse/fuse_i.h
++++ b/fs/fuse/fuse_i.h
+@@ -551,6 +551,10 @@ void fuse_truncate(struct address_space
+ */
+ int fuse_dev_init(void);
+
++
++int fuse_do_setattr(struct dentry *entry, struct iattr *attr,
++ struct file *file);
++
+ /**
+ * Cleanup the client device
+ */
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -207,16 +207,12 @@ int do_truncate(struct dentry *dentry, s
+
+ newattrs.ia_size = length;
+ newattrs.ia_valid = ATTR_SIZE | time_attrs;
+- if (filp) {
+- newattrs.ia_file = filp;
+- newattrs.ia_valid |= ATTR_FILE;
+- }
+
+ /* Remove suid/sgid on truncate too */
+ newattrs.ia_valid |= should_remove_suid(dentry);
+
+ mutex_lock(&dentry->d_inode->i_mutex);
+- err = notify_change(dentry, mnt, &newattrs);
++ err = fnotify_change(dentry, mnt, &newattrs, filp);
+ mutex_unlock(&dentry->d_inode->i_mutex);
+ return err;
+ }
+@@ -625,7 +621,7 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
+ mode = inode->i_mode;
+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+- err = notify_change(dentry, file->f_path.mnt, &newattrs);
++ err = fnotify_change(dentry, file->f_path.mnt, &newattrs, file);
+ mutex_unlock(&inode->i_mutex);
+ mnt_drop_write(file->f_path.mnt);
+ out_putf:
+@@ -669,7 +665,7 @@ SYSCALL_DEFINE2(chmod, const char __user
+ }
+
+ static int chown_common(struct dentry * dentry, struct vfsmount *mnt,
+- uid_t user, gid_t group)
++ uid_t user, gid_t group, struct file *file)
+ {
+ struct inode *inode = dentry->d_inode;
+ int error;
+@@ -688,7 +684,7 @@ static int chown_common(struct dentry *
+ newattrs.ia_valid |=
+ ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
+ mutex_lock(&inode->i_mutex);
+- error = notify_change(dentry, mnt, &newattrs);
++ error = fnotify_change(dentry, mnt, &newattrs, file);
+ mutex_unlock(&inode->i_mutex);
+
+ return error;
+@@ -705,7 +701,7 @@ SYSCALL_DEFINE3(chown, const char __user
+ error = mnt_want_write(path.mnt);
+ if (error)
+ goto out_release;
+- error = chown_common(path.dentry, path.mnt, user, group);
++ error = chown_common(path.dentry, path.mnt, user, group, NULL);
+ mnt_drop_write(path.mnt);
+ out_release:
+ path_put(&path);
+@@ -730,7 +726,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
+ error = mnt_want_write(path.mnt);
+ if (error)
+ goto out_release;
+- error = chown_common(path.dentry, path.mnt, user, group);
++ error = chown_common(path.dentry, path.mnt, user, group, NULL);
+ mnt_drop_write(path.mnt);
+ out_release:
+ path_put(&path);
+@@ -749,7 +745,7 @@ SYSCALL_DEFINE3(lchown, const char __use
+ error = mnt_want_write(path.mnt);
+ if (error)
+ goto out_release;
+- error = chown_common(path.dentry, path.mnt, user, group);
++ error = chown_common(path.dentry, path.mnt, user, group, NULL);
+ mnt_drop_write(path.mnt);
+ out_release:
+ path_put(&path);
+@@ -772,7 +768,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
+ goto out_fput;
+ dentry = file->f_path.dentry;
+ audit_inode(NULL, dentry);
+- error = chown_common(dentry, file->f_path.mnt, user, group);
++ error = chown_common(dentry, file->f_path.mnt, user, group, file);
+ mnt_drop_write(file->f_path.mnt);
+ out_fput:
+ fput(file);
+--- a/fs/utimes.c
++++ b/fs/utimes.c
+@@ -48,7 +48,8 @@ static bool nsec_valid(long nsec)
+ return nsec >= 0 && nsec <= 999999999;
+ }
+
+-static int utimes_common(struct path *path, struct timespec *times)
++static int utimes_common(struct path *path, struct timespec *times,
++ struct file *f)
+ {
+ int error;
+ struct iattr newattrs;
+@@ -102,7 +103,7 @@ static int utimes_common(struct path *pa
+ }
+ }
+ mutex_lock(&inode->i_mutex);
+- error = notify_change(path->dentry, path->mnt, &newattrs);
++ error = fnotify_change(path->dentry, path->mnt, &newattrs, f);
+ mutex_unlock(&inode->i_mutex);
+
+ mnt_drop_write_and_out:
+@@ -149,7 +150,7 @@ long do_utimes(int dfd, char __user *fil
+ if (!file)
+ goto out;
+
+- error = utimes_common(&file->f_path, times);
++ error = utimes_common(&file->f_path, times, file);
+ fput(file);
+ } else {
+ struct path path;
+@@ -162,7 +163,7 @@ long do_utimes(int dfd, char __user *fil
+ if (error)
+ goto out;
+
+- error = utimes_common(&path, times);
++ error = utimes_common(&path, times, NULL);
+ path_put(&path);
+ }
+
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -367,13 +367,6 @@ struct iattr {
+ struct timespec ia_atime;
+ struct timespec ia_mtime;
+ struct timespec ia_ctime;
+-
+- /*
+- * Not an attribute, but an auxilary info for filesystems wanting to
+- * implement an ftruncate() like method. NOTE: filesystem should
+- * check for (ia_valid & ATTR_FILE), and not for (ia_file != NULL).
+- */
+- struct file *ia_file;
+ };
+
+ /*
+@@ -1280,6 +1273,7 @@ struct file_operations {
+ #define HAVE_FOP_OPEN_EXEC
+ int (*open_exec) (struct inode *);
+ int (*setlease)(struct file *, long, struct file_lock **);
++ int (*fsetattr)(struct file *, struct iattr *);
+ };
+
+ struct inode_operations {
+@@ -1799,6 +1793,7 @@ extern int do_remount_sb(struct super_bl
+ extern sector_t bmap(struct inode *, sector_t);
+ #endif
+ extern int notify_change(struct dentry *, struct vfsmount *, struct iattr *);
++extern int fnotify_change(struct dentry *, struct vfsmount *, struct iattr *, struct file *);
+ extern int inode_permission(struct inode *, int);
+ extern int generic_permission(struct inode *, int,
+ int (*check_acl)(struct inode *, int));
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: Pass struct path down to remove_suid and children
+
+Required by a later patch that adds a struct vfsmount parameter to
+notify_change().
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+
+ mm/filemap.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1826,12 +1826,12 @@ int should_remove_suid(struct dentry *de
+ }
+ EXPORT_SYMBOL(should_remove_suid);
+
+-static int __remove_suid(struct dentry *dentry, int kill)
++static int __remove_suid(struct path *path, int kill)
+ {
+ struct iattr newattrs;
+
+ newattrs.ia_valid = ATTR_FORCE | kill;
+- return notify_change(dentry, &newattrs);
++ return notify_change(path->dentry, &newattrs);
+ }
+
+ int file_remove_suid(struct file *file)
+@@ -1846,7 +1846,7 @@ int file_remove_suid(struct file *file)
+ if (killpriv)
+ error = security_inode_killpriv(dentry);
+ if (!error && killsuid)
+- error = __remove_suid(dentry, killsuid);
++ error = __remove_suid(&file->f_path, killsuid);
+
+ return error;
+ }
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Pass struct vfsmount to the inode_create LSM hook
+
+This is needed for computing pathnames in the AppArmor LSM.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/namei.c | 2 +-
+ include/linux/security.h | 9 ++++++---
+ security/capability.c | 2 +-
+ security/security.c | 5 +++--
+ security/selinux/hooks.c | 3 ++-
+ 5 files changed, 13 insertions(+), 8 deletions(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1543,7 +1543,7 @@ int vfs_create(struct inode *dir, struct
+ return -EACCES; /* shouldn't it be ENOSYS? */
+ mode &= S_IALLUGO;
+ mode |= S_IFREG;
+- error = security_inode_create(dir, dentry, mode);
++ error = security_inode_create(dir, dentry, nd ? nd->path.mnt : NULL, mode);
+ if (error)
+ return error;
+ DQUOT_INIT(dir);
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -337,6 +337,7 @@ static inline void security_free_mnt_opt
+ * Check permission to create a regular file.
+ * @dir contains inode structure of the parent of the new file.
+ * @dentry contains the dentry structure for the file to be created.
++ * @mnt is the vfsmount corresponding to @dentry (may be NULL).
+ * @mode contains the file mode of the file to be created.
+ * Return 0 if permission is granted.
+ * @inode_link:
+@@ -1354,8 +1355,8 @@ struct security_operations {
+ void (*inode_free_security) (struct inode *inode);
+ int (*inode_init_security) (struct inode *inode, struct inode *dir,
+ char **name, void **value, size_t *len);
+- int (*inode_create) (struct inode *dir,
+- struct dentry *dentry, int mode);
++ int (*inode_create) (struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, int mode);
+ int (*inode_link) (struct dentry *old_dentry,
+ struct inode *dir, struct dentry *new_dentry);
+ int (*inode_unlink) (struct inode *dir, struct dentry *dentry);
+@@ -1622,7 +1623,8 @@ int security_inode_alloc(struct inode *i
+ void security_inode_free(struct inode *inode);
+ int security_inode_init_security(struct inode *inode, struct inode *dir,
+ char **name, void **value, size_t *len);
+-int security_inode_create(struct inode *dir, struct dentry *dentry, int mode);
++int security_inode_create(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, int mode);
+ int security_inode_link(struct dentry *old_dentry, struct inode *dir,
+ struct dentry *new_dentry);
+ int security_inode_unlink(struct inode *dir, struct dentry *dentry);
+@@ -1968,6 +1970,7 @@ static inline int security_inode_init_se
+
+ static inline int security_inode_create(struct inode *dir,
+ struct dentry *dentry,
++ struct vfsmount *mnt,
+ int mode)
+ {
+ return 0;
+--- a/security/capability.c
++++ b/security/capability.c
+@@ -155,7 +155,7 @@ static int cap_inode_init_security(struc
+ }
+
+ static int cap_inode_create(struct inode *inode, struct dentry *dentry,
+- int mask)
++ struct vfsmount *mnt, int mask)
+ {
+ return 0;
+ }
+--- a/security/security.c
++++ b/security/security.c
+@@ -355,11 +355,12 @@ int security_inode_init_security(struct
+ }
+ EXPORT_SYMBOL(security_inode_init_security);
+
+-int security_inode_create(struct inode *dir, struct dentry *dentry, int mode)
++int security_inode_create(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, int mode)
+ {
+ if (unlikely(IS_PRIVATE(dir)))
+ return 0;
+- return security_ops->inode_create(dir, dentry, mode);
++ return security_ops->inode_create(dir, dentry, mnt, mode);
+ }
+
+ int security_inode_link(struct dentry *old_dentry, struct inode *dir,
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2566,7 +2566,8 @@ static int selinux_inode_init_security(s
+ return 0;
+ }
+
+-static int selinux_inode_create(struct inode *dir, struct dentry *dentry, int mask)
++static int selinux_inode_create(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, int mask)
+ {
+ return may_create(dir, dentry, SECCLASS_FILE);
+ }
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Pass struct vfsmount to the inode_getxattr LSM hook
+
+This is needed for computing pathnames in the AppArmor LSM.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/xattr.c | 2 +-
+ include/linux/security.h | 11 +++++++----
+ security/capability.c | 3 ++-
+ security/security.c | 5 +++--
+ security/selinux/hooks.c | 3 ++-
+ security/smack/smack_lsm.c | 4 +++-
+ 6 files changed, 18 insertions(+), 10 deletions(-)
+
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -141,7 +141,7 @@ vfs_getxattr(struct dentry *dentry, stru
+ if (error)
+ return error;
+
+- error = security_inode_getxattr(dentry, name);
++ error = security_inode_getxattr(dentry, mnt, name);
+ if (error)
+ return error;
+
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -446,7 +446,7 @@ static inline void security_free_mnt_opt
+ * @value identified by @name for @dentry and @mnt.
+ * @inode_getxattr:
+ * Check permission before obtaining the extended attributes
+- * identified by @name for @dentry.
++ * identified by @name for @dentry and @mnt.
+ * Return 0 if permission is granted.
+ * @inode_listxattr:
+ * Check permission before obtaining the list of extended attribute
+@@ -1400,7 +1400,8 @@ struct security_operations {
+ struct vfsmount *mnt,
+ const char *name, const void *value,
+ size_t size, int flags);
+- int (*inode_getxattr) (struct dentry *dentry, const char *name);
++ int (*inode_getxattr) (struct dentry *dentry, struct vfsmount *mnt,
++ const char *name);
+ int (*inode_listxattr) (struct dentry *dentry);
+ int (*inode_removexattr) (struct dentry *dentry, const char *name);
+ int (*inode_need_killpriv) (struct dentry *dentry);
+@@ -1676,7 +1677,8 @@ int security_inode_setxattr(struct dentr
+ void security_inode_post_setxattr(struct dentry *dentry, struct vfsmount *mnt,
+ const char *name, const void *value,
+ size_t size, int flags);
+-int security_inode_getxattr(struct dentry *dentry, const char *name);
++int security_inode_getxattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name);
+ int security_inode_listxattr(struct dentry *dentry);
+ int security_inode_removexattr(struct dentry *dentry, const char *name);
+ int security_inode_need_killpriv(struct dentry *dentry);
+@@ -2113,7 +2115,8 @@ static inline void security_inode_post_s
+ { }
+
+ static inline int security_inode_getxattr(struct dentry *dentry,
+- const char *name)
++ struct vfsmount *mnt,
++ const char *name)
+ {
+ return 0;
+ }
+--- a/security/capability.c
++++ b/security/capability.c
+@@ -241,7 +241,8 @@ static void cap_inode_post_setxattr(stru
+ {
+ }
+
+-static int cap_inode_getxattr(struct dentry *dentry, const char *name)
++static int cap_inode_getxattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name)
+ {
+ return 0;
+ }
+--- a/security/security.c
++++ b/security/security.c
+@@ -488,11 +488,12 @@ void security_inode_post_setxattr(struct
+ flags);
+ }
+
+-int security_inode_getxattr(struct dentry *dentry, const char *name)
++int security_inode_getxattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name)
+ {
+ if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ return 0;
+- return security_ops->inode_getxattr(dentry, name);
++ return security_ops->inode_getxattr(dentry, mnt, name);
+ }
+
+ int security_inode_listxattr(struct dentry *dentry)
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2796,7 +2796,8 @@ static void selinux_inode_post_setxattr(
+ return;
+ }
+
+-static int selinux_inode_getxattr(struct dentry *dentry, const char *name)
++static int selinux_inode_getxattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name)
+ {
+ return dentry_has_perm(current, NULL, dentry, FILE__GETATTR);
+ }
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -673,11 +673,13 @@ static void smack_inode_post_setxattr(st
+ /*
+ * smack_inode_getxattr - Smack check on getxattr
+ * @dentry: the object
++ * @mnt: unused
+ * @name: unused
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+-static int smack_inode_getxattr(struct dentry *dentry, const char *name)
++static int smack_inode_getxattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name)
+ {
+ return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ);
+ }
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Pass the struct vfsmounts to the inode_link LSM hook
+
+This is needed for computing pathnames in the AppArmor LSM.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/namei.c | 3 ++-
+ include/linux/security.h | 18 ++++++++++++------
+ security/capability.c | 5 +++--
+ security/security.c | 8 +++++---
+ security/selinux/hooks.c | 9 +++++++--
+ security/smack/smack_lsm.c | 5 +++--
+ 6 files changed, 32 insertions(+), 16 deletions(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2437,7 +2437,8 @@ int vfs_link(struct dentry *old_dentry,
+ if (S_ISDIR(inode->i_mode))
+ return -EPERM;
+
+- error = security_inode_link(old_dentry, dir, new_dentry);
++ error = security_inode_link(old_dentry, old_mnt, dir, new_dentry,
++ new_mnt);
+ if (error)
+ return error;
+
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -343,8 +343,10 @@ static inline void security_free_mnt_opt
+ * @inode_link:
+ * Check permission before creating a new hard link to a file.
+ * @old_dentry contains the dentry structure for an existing link to the file.
++ * @old_mnt is the vfsmount corresponding to @old_dentry (may be NULL).
+ * @dir contains the inode structure of the parent directory of the new link.
+ * @new_dentry contains the dentry structure for the new link.
++ * @new_mnt is the vfsmount corresponding to @new_dentry (may be NULL).
+ * Return 0 if permission is granted.
+ * @inode_unlink:
+ * Check the permission to remove a hard link to a file.
+@@ -1362,8 +1364,9 @@ struct security_operations {
+ char **name, void **value, size_t *len);
+ int (*inode_create) (struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt, int mode);
+- int (*inode_link) (struct dentry *old_dentry,
+- struct inode *dir, struct dentry *new_dentry);
++ int (*inode_link) (struct dentry *old_dentry, struct vfsmount *old_mnt,
++ struct inode *dir, struct dentry *new_dentry,
++ struct vfsmount *new_mnt);
+ int (*inode_unlink) (struct inode *dir, struct dentry *dentry);
+ int (*inode_symlink) (struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt, const char *old_name);
+@@ -1632,8 +1635,9 @@ int security_inode_init_security(struct
+ char **name, void **value, size_t *len);
+ int security_inode_create(struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt, int mode);
+-int security_inode_link(struct dentry *old_dentry, struct inode *dir,
+- struct dentry *new_dentry);
++int security_inode_link(struct dentry *old_dentry, struct vfsmount *old_mnt,
++ struct inode *dir, struct dentry *new_dentry,
++ struct vfsmount *new_mnt);
+ int security_inode_unlink(struct inode *dir, struct dentry *dentry);
+ int security_inode_symlink(struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt, const char *old_name);
+@@ -1987,8 +1991,10 @@ static inline int security_inode_create(
+ }
+
+ static inline int security_inode_link(struct dentry *old_dentry,
+- struct inode *dir,
+- struct dentry *new_dentry)
++ struct vfsmount *old_mnt,
++ struct inode *dir,
++ struct dentry *new_dentry,
++ struct vfsmount *new_mnt)
+ {
+ return 0;
+ }
+--- a/security/capability.c
++++ b/security/capability.c
+@@ -160,8 +160,9 @@ static int cap_inode_create(struct inode
+ return 0;
+ }
+
+-static int cap_inode_link(struct dentry *old_dentry, struct inode *inode,
+- struct dentry *new_dentry)
++static int cap_inode_link(struct dentry *old_dentry, struct vfsmount *old_mnt,
++ struct inode *inode,
++ struct dentry *new_dentry, struct vfsmount *new_mnt)
+ {
+ return 0;
+ }
+--- a/security/security.c
++++ b/security/security.c
+@@ -363,12 +363,14 @@ int security_inode_create(struct inode *
+ return security_ops->inode_create(dir, dentry, mnt, mode);
+ }
+
+-int security_inode_link(struct dentry *old_dentry, struct inode *dir,
+- struct dentry *new_dentry)
++int security_inode_link(struct dentry *old_dentry, struct vfsmount *old_mnt,
++ struct inode *dir, struct dentry *new_dentry,
++ struct vfsmount *new_mnt)
+ {
+ if (unlikely(IS_PRIVATE(old_dentry->d_inode)))
+ return 0;
+- return security_ops->inode_link(old_dentry, dir, new_dentry);
++ return security_ops->inode_link(old_dentry, old_mnt, dir,
++ new_dentry, new_mnt);
+ }
+
+ int security_inode_unlink(struct inode *dir, struct dentry *dentry)
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2572,11 +2572,16 @@ static int selinux_inode_create(struct i
+ return may_create(dir, dentry, SECCLASS_FILE);
+ }
+
+-static int selinux_inode_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
++static int selinux_inode_link(struct dentry *old_dentry,
++ struct vfsmount *old_mnt,
++ struct inode *dir,
++ struct dentry *new_dentry,
++ struct vfsmount *new_mnt)
+ {
+ int rc;
+
+- rc = secondary_ops->inode_link(old_dentry, dir, new_dentry);
++ rc = secondary_ops->inode_link(old_dentry, old_mnt, dir, new_dentry,
++ new_mnt);
+ if (rc)
+ return rc;
+ return may_link(dir, old_dentry, MAY_LINK);
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -432,8 +432,9 @@ static int smack_inode_init_security(str
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+-static int smack_inode_link(struct dentry *old_dentry, struct inode *dir,
+- struct dentry *new_dentry)
++static int smack_inode_link(struct dentry *old_dentry, struct vfsmount *old_mnt,
++ struct inode *dir,
++ struct dentry *new_dentry, struct vfsmount *new_mnt)
+ {
+ int rc;
+ char *isp;
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Pass struct vfsmount to the inode_listxattr LSM hook
+
+This is needed for computing pathnames in the AppArmor LSM.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/xattr.c | 2 +-
+ include/linux/security.h | 9 +++++----
+ security/capability.c | 2 +-
+ security/security.c | 4 ++--
+ security/selinux/hooks.c | 2 +-
+ 5 files changed, 10 insertions(+), 9 deletions(-)
+
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -174,7 +174,7 @@ vfs_listxattr(struct dentry *dentry, str
+ struct inode *inode = dentry->d_inode;
+ ssize_t error;
+
+- error = security_inode_listxattr(dentry);
++ error = security_inode_listxattr(dentry, mnt);
+ if (error)
+ return error;
+ error = -EOPNOTSUPP;
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -450,7 +450,7 @@ static inline void security_free_mnt_opt
+ * Return 0 if permission is granted.
+ * @inode_listxattr:
+ * Check permission before obtaining the list of extended attribute
+- * names for @dentry.
++ * names for @dentry and @mnt.
+ * Return 0 if permission is granted.
+ * @inode_removexattr:
+ * Check permission before removing the extended attribute
+@@ -1402,7 +1402,7 @@ struct security_operations {
+ size_t size, int flags);
+ int (*inode_getxattr) (struct dentry *dentry, struct vfsmount *mnt,
+ const char *name);
+- int (*inode_listxattr) (struct dentry *dentry);
++ int (*inode_listxattr) (struct dentry *dentry, struct vfsmount *mnt);
+ int (*inode_removexattr) (struct dentry *dentry, const char *name);
+ int (*inode_need_killpriv) (struct dentry *dentry);
+ int (*inode_killpriv) (struct dentry *dentry);
+@@ -1679,7 +1679,7 @@ void security_inode_post_setxattr(struct
+ size_t size, int flags);
+ int security_inode_getxattr(struct dentry *dentry, struct vfsmount *mnt,
+ const char *name);
+-int security_inode_listxattr(struct dentry *dentry);
++int security_inode_listxattr(struct dentry *dentry, struct vfsmount *mnt);
+ int security_inode_removexattr(struct dentry *dentry, const char *name);
+ int security_inode_need_killpriv(struct dentry *dentry);
+ int security_inode_killpriv(struct dentry *dentry);
+@@ -2121,7 +2121,8 @@ static inline int security_inode_getxatt
+ return 0;
+ }
+
+-static inline int security_inode_listxattr(struct dentry *dentry)
++static inline int security_inode_listxattr(struct dentry *dentry,
++ struct vfsmount *mnt)
+ {
+ return 0;
+ }
+--- a/security/capability.c
++++ b/security/capability.c
+@@ -247,7 +247,7 @@ static int cap_inode_getxattr(struct den
+ return 0;
+ }
+
+-static int cap_inode_listxattr(struct dentry *dentry)
++static int cap_inode_listxattr(struct dentry *dentry, struct vfsmount *mnt)
+ {
+ return 0;
+ }
+--- a/security/security.c
++++ b/security/security.c
+@@ -496,11 +496,11 @@ int security_inode_getxattr(struct dentr
+ return security_ops->inode_getxattr(dentry, mnt, name);
+ }
+
+-int security_inode_listxattr(struct dentry *dentry)
++int security_inode_listxattr(struct dentry *dentry, struct vfsmount *mnt)
+ {
+ if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ return 0;
+- return security_ops->inode_listxattr(dentry);
++ return security_ops->inode_listxattr(dentry, mnt);
+ }
+
+ int security_inode_removexattr(struct dentry *dentry, const char *name)
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2802,7 +2802,7 @@ static int selinux_inode_getxattr(struct
+ return dentry_has_perm(current, NULL, dentry, FILE__GETATTR);
+ }
+
+-static int selinux_inode_listxattr(struct dentry *dentry)
++static int selinux_inode_listxattr(struct dentry *dentry, struct vfsmount *mnt)
+ {
+ return dentry_has_perm(current, NULL, dentry, FILE__GETATTR);
+ }
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Pass struct vfsmount to the inode_mkdir LSM hook
+
+This is needed for computing pathnames in the AppArmor LSM.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/namei.c | 2 +-
+ include/linux/security.h | 8 ++++++--
+ security/capability.c | 2 +-
+ security/security.c | 5 +++--
+ security/selinux/hooks.c | 3 ++-
+ 5 files changed, 13 insertions(+), 7 deletions(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2089,7 +2089,7 @@ int vfs_mkdir(struct inode *dir, struct
+ return -EPERM;
+
+ mode &= (S_IRWXUGO|S_ISVTX);
+- error = security_inode_mkdir(dir, dentry, mode);
++ error = security_inode_mkdir(dir, dentry, mnt, mode);
+ if (error)
+ return error;
+
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -362,6 +362,7 @@ static inline void security_free_mnt_opt
+ * associated with inode strcture @dir.
+ * @dir containst the inode structure of parent of the directory to be created.
+ * @dentry contains the dentry structure of new directory.
++ * @mnt is the vfsmount corresponding to @dentry (may be NULL).
+ * @mode contains the mode of new directory.
+ * Return 0 if permission is granted.
+ * @inode_rmdir:
+@@ -1363,7 +1364,8 @@ struct security_operations {
+ int (*inode_unlink) (struct inode *dir, struct dentry *dentry);
+ int (*inode_symlink) (struct inode *dir,
+ struct dentry *dentry, const char *old_name);
+- int (*inode_mkdir) (struct inode *dir, struct dentry *dentry, int mode);
++ int (*inode_mkdir) (struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, int mode);
+ int (*inode_rmdir) (struct inode *dir, struct dentry *dentry);
+ int (*inode_mknod) (struct inode *dir, struct dentry *dentry,
+ int mode, dev_t dev);
+@@ -1632,7 +1634,8 @@ int security_inode_link(struct dentry *o
+ int security_inode_unlink(struct inode *dir, struct dentry *dentry);
+ int security_inode_symlink(struct inode *dir, struct dentry *dentry,
+ const char *old_name);
+-int security_inode_mkdir(struct inode *dir, struct dentry *dentry, int mode);
++int security_inode_mkdir(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, int mode);
+ int security_inode_rmdir(struct inode *dir, struct dentry *dentry);
+ int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev);
+ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
+@@ -2001,6 +2004,7 @@ static inline int security_inode_symlink
+
+ static inline int security_inode_mkdir(struct inode *dir,
+ struct dentry *dentry,
++ struct vfsmount *mnt,
+ int mode)
+ {
+ return 0;
+--- a/security/capability.c
++++ b/security/capability.c
+@@ -178,7 +178,7 @@ static int cap_inode_symlink(struct inod
+ }
+
+ static int cap_inode_mkdir(struct inode *inode, struct dentry *dentry,
+- int mask)
++ struct vfsmount *mnt, int mask)
+ {
+ return 0;
+ }
+--- a/security/security.c
++++ b/security/security.c
+@@ -386,11 +386,12 @@ int security_inode_symlink(struct inode
+ return security_ops->inode_symlink(dir, dentry, old_name);
+ }
+
+-int security_inode_mkdir(struct inode *dir, struct dentry *dentry, int mode)
++int security_inode_mkdir(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, int mode)
+ {
+ if (unlikely(IS_PRIVATE(dir)))
+ return 0;
+- return security_ops->inode_mkdir(dir, dentry, mode);
++ return security_ops->inode_mkdir(dir, dentry, mnt, mode);
+ }
+
+ int security_inode_rmdir(struct inode *dir, struct dentry *dentry)
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2597,7 +2597,8 @@ static int selinux_inode_symlink(struct
+ return may_create(dir, dentry, SECCLASS_LNK_FILE);
+ }
+
+-static int selinux_inode_mkdir(struct inode *dir, struct dentry *dentry, int mask)
++static int selinux_inode_mkdir(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, int mask)
+ {
+ return may_create(dir, dentry, SECCLASS_DIR);
+ }
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Pass struct vfsmount to the inode_mknod LSM hook
+
+This is needed for computing pathnames in the AppArmor LSM.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/namei.c | 6 +++---
+ include/linux/security.h | 7 +++++--
+ security/capability.c | 2 +-
+ security/security.c | 5 +++--
+ security/selinux/hooks.c | 5 +++--
+ 5 files changed, 15 insertions(+), 10 deletions(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1994,7 +1994,7 @@ int vfs_mknod(struct inode *dir, struct
+ if (error)
+ return error;
+
+- error = security_inode_mknod(dir, dentry, mode, dev);
++ error = security_inode_mknod(dir, dentry, mnt, mode, dev);
+ if (error)
+ return error;
+
+@@ -2056,11 +2056,11 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
+ break;
+ case S_IFCHR: case S_IFBLK:
+ error = vfs_mknod(nd.path.dentry->d_inode, dentry,
+- nd.path, mode, new_decode_dev(dev));
++ nd.path.mnt, mode, new_decode_dev(dev));
+ break;
+ case S_IFIFO: case S_IFSOCK:
+ error = vfs_mknod(nd.path.dentry->d_inode, dentry,
+- nd.path, mode, 0);
++ nd.path.mnt, mode, 0);
+ break;
+ }
+ mnt_drop_write(nd.path.mnt);
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -377,6 +377,7 @@ static inline void security_free_mnt_opt
+ * and not this hook.
+ * @dir contains the inode structure of parent of the new file.
+ * @dentry contains the dentry structure of the new file.
++ * @mnt is the vfsmount corresponding to @dentry (may be NULL).
+ * @mode contains the mode of the new file.
+ * @dev contains the device number.
+ * Return 0 if permission is granted.
+@@ -1368,7 +1369,7 @@ struct security_operations {
+ struct vfsmount *mnt, int mode);
+ int (*inode_rmdir) (struct inode *dir, struct dentry *dentry);
+ int (*inode_mknod) (struct inode *dir, struct dentry *dentry,
+- int mode, dev_t dev);
++ struct vfsmount *mnt, int mode, dev_t dev);
+ int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+ int (*inode_readlink) (struct dentry *dentry);
+@@ -1637,7 +1638,8 @@ int security_inode_symlink(struct inode
+ int security_inode_mkdir(struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt, int mode);
+ int security_inode_rmdir(struct inode *dir, struct dentry *dentry);
+-int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev);
++int security_inode_mknod(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, int mode, dev_t dev);
+ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+ int security_inode_readlink(struct dentry *dentry);
+@@ -2018,6 +2020,7 @@ static inline int security_inode_rmdir(s
+
+ static inline int security_inode_mknod(struct inode *dir,
+ struct dentry *dentry,
++ struct vfsmount *mnt,
+ int mode, dev_t dev)
+ {
+ return 0;
+--- a/security/capability.c
++++ b/security/capability.c
+@@ -189,7 +189,7 @@ static int cap_inode_rmdir(struct inode
+ }
+
+ static int cap_inode_mknod(struct inode *inode, struct dentry *dentry,
+- int mode, dev_t dev)
++ struct vfsmount *mnt, int mode, dev_t dev)
+ {
+ return 0;
+ }
+--- a/security/security.c
++++ b/security/security.c
+@@ -401,11 +401,12 @@ int security_inode_rmdir(struct inode *d
+ return security_ops->inode_rmdir(dir, dentry);
+ }
+
+-int security_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
++int security_inode_mknod(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, int mode, dev_t dev)
+ {
+ if (unlikely(IS_PRIVATE(dir)))
+ return 0;
+- return security_ops->inode_mknod(dir, dentry, mode, dev);
++ return security_ops->inode_mknod(dir, dentry, mnt, mode, dev);
+ }
+
+ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2608,11 +2608,12 @@ static int selinux_inode_rmdir(struct in
+ return may_link(dir, dentry, MAY_RMDIR);
+ }
+
+-static int selinux_inode_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
++static int selinux_inode_mknod(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, int mode, dev_t dev)
+ {
+ int rc;
+
+- rc = secondary_ops->inode_mknod(dir, dentry, mode, dev);
++ rc = secondary_ops->inode_mknod(dir, dentry, mnt, mode, dev);
+ if (rc)
+ return rc;
+
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Pass struct vfsmount to the inode_readlink LSM hook
+
+This is needed for computing pathnames in the AppArmor LSM.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/stat.c | 2 +-
+ include/linux/security.h | 8 +++++---
+ security/capability.c | 2 +-
+ security/security.c | 4 ++--
+ security/selinux/hooks.c | 2 +-
+ 5 files changed, 10 insertions(+), 8 deletions(-)
+
+--- a/fs/stat.c
++++ b/fs/stat.c
+@@ -308,7 +308,7 @@ SYSCALL_DEFINE4(readlinkat, int, dfd, co
+
+ error = -EINVAL;
+ if (inode->i_op && inode->i_op->readlink) {
+- error = security_inode_readlink(path.dentry);
++ error = security_inode_readlink(path.dentry, path.mnt);
+ if (!error) {
+ touch_atime(path.mnt, path.dentry);
+ error = inode->i_op->readlink(path.dentry,
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -392,6 +392,7 @@ static inline void security_free_mnt_opt
+ * @inode_readlink:
+ * Check the permission to read the symbolic link.
+ * @dentry contains the dentry structure for the file link.
++ * @mnt is the vfsmount corresponding to @dentry (may be NULL).
+ * Return 0 if permission is granted.
+ * @inode_follow_link:
+ * Check permission to follow a symbolic link when looking up a pathname.
+@@ -1373,7 +1374,7 @@ struct security_operations {
+ struct vfsmount *mnt, int mode, dev_t dev);
+ int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+- int (*inode_readlink) (struct dentry *dentry);
++ int (*inode_readlink) (struct dentry *dentry, struct vfsmount *mnt);
+ int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
+ int (*inode_permission) (struct inode *inode, int mask);
+ int (*inode_setattr) (struct dentry *dentry, struct vfsmount *,
+@@ -1643,7 +1644,7 @@ int security_inode_mknod(struct inode *d
+ struct vfsmount *mnt, int mode, dev_t dev);
+ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry);
+-int security_inode_readlink(struct dentry *dentry);
++int security_inode_readlink(struct dentry *dentry, struct vfsmount *mnt);
+ int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
+ int security_inode_permission(struct inode *inode, int mask);
+ int security_inode_setattr(struct dentry *dentry, struct vfsmount *mnt,
+@@ -2036,7 +2037,8 @@ static inline int security_inode_rename(
+ return 0;
+ }
+
+-static inline int security_inode_readlink(struct dentry *dentry)
++static inline int security_inode_readlink(struct dentry *dentry,
++ struct vfsmount *mnt)
+ {
+ return 0;
+ }
+--- a/security/capability.c
++++ b/security/capability.c
+@@ -200,7 +200,7 @@ static int cap_inode_rename(struct inode
+ return 0;
+ }
+
+-static int cap_inode_readlink(struct dentry *dentry)
++static int cap_inode_readlink(struct dentry *dentry, struct vfsmount *mnt)
+ {
+ return 0;
+ }
+--- a/security/security.c
++++ b/security/security.c
+@@ -419,11 +419,11 @@ int security_inode_rename(struct inode *
+ new_dir, new_dentry);
+ }
+
+-int security_inode_readlink(struct dentry *dentry)
++int security_inode_readlink(struct dentry *dentry, struct vfsmount *mnt)
+ {
+ if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ return 0;
+- return security_ops->inode_readlink(dentry);
++ return security_ops->inode_readlink(dentry, mnt);
+ }
+
+ int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd)
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2627,7 +2627,7 @@ static int selinux_inode_rename(struct i
+ return may_rename(old_inode, old_dentry, new_inode, new_dentry);
+ }
+
+-static int selinux_inode_readlink(struct dentry *dentry)
++static int selinux_inode_readlink(struct dentry *dentry, struct vfsmount *mnt)
+ {
+ return dentry_has_perm(current, NULL, dentry, FILE__READ);
+ }
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Pass struct vfsmount to the inode_removexattr LSM hook
+
+This is needed for computing pathnames in the AppArmor LSM.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/xattr.c | 2 +-
+ include/linux/security.h | 14 +++++++++-----
+ security/commoncap.c | 3 ++-
+ security/security.c | 5 +++--
+ security/selinux/hooks.c | 3 ++-
+ security/smack/smack_lsm.c | 6 ++++--
+ 6 files changed, 21 insertions(+), 12 deletions(-)
+
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -202,7 +202,7 @@ vfs_removexattr(struct dentry *dentry, s
+ if (error)
+ return error;
+
+- error = security_inode_removexattr(dentry, name);
++ error = security_inode_removexattr(dentry, mnt, name);
+ if (error)
+ return error;
+
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -57,7 +57,8 @@ extern int cap_bprm_secureexec(struct li
+ extern int cap_inode_setxattr(struct dentry *dentry, struct vfsmount *mnt,
+ const char *name, const void *value, size_t size,
+ int flags);
+-extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
++extern int cap_inode_removexattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name);
+ extern int cap_inode_need_killpriv(struct dentry *dentry);
+ extern int cap_inode_killpriv(struct dentry *dentry);
+ extern int cap_task_post_setuid(uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags);
+@@ -1403,7 +1404,8 @@ struct security_operations {
+ int (*inode_getxattr) (struct dentry *dentry, struct vfsmount *mnt,
+ const char *name);
+ int (*inode_listxattr) (struct dentry *dentry, struct vfsmount *mnt);
+- int (*inode_removexattr) (struct dentry *dentry, const char *name);
++ int (*inode_removexattr) (struct dentry *dentry, struct vfsmount *mnt,
++ const char *name);
+ int (*inode_need_killpriv) (struct dentry *dentry);
+ int (*inode_killpriv) (struct dentry *dentry);
+ int (*inode_getsecurity) (const struct inode *inode, const char *name, void **buffer, bool alloc);
+@@ -1680,7 +1682,8 @@ void security_inode_post_setxattr(struct
+ int security_inode_getxattr(struct dentry *dentry, struct vfsmount *mnt,
+ const char *name);
+ int security_inode_listxattr(struct dentry *dentry, struct vfsmount *mnt);
+-int security_inode_removexattr(struct dentry *dentry, const char *name);
++int security_inode_removexattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name);
+ int security_inode_need_killpriv(struct dentry *dentry);
+ int security_inode_killpriv(struct dentry *dentry);
+ int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc);
+@@ -2128,9 +2131,10 @@ static inline int security_inode_listxat
+ }
+
+ static inline int security_inode_removexattr(struct dentry *dentry,
+- const char *name)
++ struct vfsmount *mnt,
++ const char *name)
+ {
+- return cap_inode_removexattr(dentry, name);
++ return cap_inode_removexattr(dentry, mnt, name);
+ }
+
+ static inline int security_inode_need_killpriv(struct dentry *dentry)
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -429,7 +429,8 @@ int cap_inode_setxattr(struct dentry *de
+ return 0;
+ }
+
+-int cap_inode_removexattr(struct dentry *dentry, const char *name)
++int cap_inode_removexattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name)
+ {
+ if (!strcmp(name, XATTR_NAME_CAPS)) {
+ if (!capable(CAP_SETFCAP))
+--- a/security/security.c
++++ b/security/security.c
+@@ -503,11 +503,12 @@ int security_inode_listxattr(struct dent
+ return security_ops->inode_listxattr(dentry, mnt);
+ }
+
+-int security_inode_removexattr(struct dentry *dentry, const char *name)
++int security_inode_removexattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name)
+ {
+ if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ return 0;
+- return security_ops->inode_removexattr(dentry, name);
++ return security_ops->inode_removexattr(dentry, mnt, name);
+ }
+
+ int security_inode_need_killpriv(struct dentry *dentry)
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2807,7 +2807,8 @@ static int selinux_inode_listxattr(struc
+ return dentry_has_perm(current, NULL, dentry, FILE__GETATTR);
+ }
+
+-static int selinux_inode_removexattr(struct dentry *dentry, const char *name)
++static int selinux_inode_removexattr(struct dentry *dentry,
++ struct vfsmount *mnt, const char *name)
+ {
+ if (strcmp(name, XATTR_NAME_SELINUX))
+ return selinux_inode_setotherxattr(dentry, name);
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -687,13 +687,15 @@ static int smack_inode_getxattr(struct d
+ /*
+ * smack_inode_removexattr - Smack check on removexattr
+ * @dentry: the object
++ * @mnt: unused
+ * @name: name of the attribute
+ *
+ * Removing the Smack attribute requires CAP_MAC_ADMIN
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+-static int smack_inode_removexattr(struct dentry *dentry, const char *name)
++static int smack_inode_removexattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name)
+ {
+ int rc = 0;
+
+@@ -703,7 +705,7 @@ static int smack_inode_removexattr(struc
+ if (!capable(CAP_MAC_ADMIN))
+ rc = -EPERM;
+ } else
+- rc = cap_inode_removexattr(dentry, name);
++ rc = cap_inode_removexattr(dentry, mnt, name);
+
+ if (rc == 0)
+ rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE);
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Pass struct vfsmount to the inode_rename LSM hook
+
+This is needed for computing pathnames in the AppArmor LSM.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/namei.c | 6 ++++--
+ include/linux/security.h | 13 ++++++++++---
+ security/capability.c | 3 ++-
+ security/security.c | 7 ++++---
+ security/selinux/hooks.c | 8 ++++++--
+ security/smack/smack_lsm.c | 6 +++++-
+ 6 files changed, 31 insertions(+), 12 deletions(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2563,7 +2563,8 @@ static int vfs_rename_dir(struct inode *
+ return error;
+ }
+
+- error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
++ error = security_inode_rename(old_dir, old_dentry, old_mnt,
++ new_dir, new_dentry, new_mnt);
+ if (error)
+ return error;
+
+@@ -2597,7 +2598,8 @@ static int vfs_rename_other(struct inode
+ struct inode *target;
+ int error;
+
+- error = security_inode_rename(old_dir, old_dentry, new_dir, new_dentry);
++ error = security_inode_rename(old_dir, old_dentry, old_mnt,
++ new_dir, new_dentry, new_mnt);
+ if (error)
+ return error;
+
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -390,8 +390,10 @@ static inline void security_free_mnt_opt
+ * Check for permission to rename a file or directory.
+ * @old_dir contains the inode structure for parent of the old link.
+ * @old_dentry contains the dentry structure of the old link.
++ * @old_mnt is the vfsmount corresponding to @old_dentry (may be NULL).
+ * @new_dir contains the inode structure for parent of the new link.
+ * @new_dentry contains the dentry structure of the new link.
++ * @new_mnt is the vfsmount corresponding to @new_dentry (may be NULL).
+ * Return 0 if permission is granted.
+ * @inode_readlink:
+ * Check the permission to read the symbolic link.
+@@ -1380,7 +1382,9 @@ struct security_operations {
+ int (*inode_mknod) (struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt, int mode, dev_t dev);
+ int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry,
+- struct inode *new_dir, struct dentry *new_dentry);
++ struct vfsmount *old_mnt,
++ struct inode *new_dir, struct dentry *new_dentry,
++ struct vfsmount *new_mnt);
+ int (*inode_readlink) (struct dentry *dentry, struct vfsmount *mnt);
+ int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
+ int (*inode_permission) (struct inode *inode, int mask);
+@@ -1653,7 +1657,8 @@ int security_inode_rmdir(struct inode *d
+ int security_inode_mknod(struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt, int mode, dev_t dev);
+ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
+- struct inode *new_dir, struct dentry *new_dentry);
++ struct vfsmount *old_mnt, struct inode *new_dir,
++ struct dentry *new_dentry, struct vfsmount *new_mnt);
+ int security_inode_readlink(struct dentry *dentry, struct vfsmount *mnt);
+ int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
+ int security_inode_permission(struct inode *inode, int mask);
+@@ -2045,8 +2050,10 @@ static inline int security_inode_mknod(s
+
+ static inline int security_inode_rename(struct inode *old_dir,
+ struct dentry *old_dentry,
++ struct vfsmount *old_mnt,
+ struct inode *new_dir,
+- struct dentry *new_dentry)
++ struct dentry *new_dentry,
++ struct vfsmount *new_mnt)
+ {
+ return 0;
+ }
+--- a/security/capability.c
++++ b/security/capability.c
+@@ -198,7 +198,8 @@ static int cap_inode_mknod(struct inode
+ }
+
+ static int cap_inode_rename(struct inode *old_inode, struct dentry *old_dentry,
+- struct inode *new_inode, struct dentry *new_dentry)
++ struct vfsmount *old_mnt, struct inode *new_inode,
++ struct dentry *new_dentry, struct vfsmount *new_mnt)
+ {
+ return 0;
+ }
+--- a/security/security.c
++++ b/security/security.c
+@@ -414,13 +414,14 @@ int security_inode_mknod(struct inode *d
+ }
+
+ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
+- struct inode *new_dir, struct dentry *new_dentry)
++ struct vfsmount *old_mnt, struct inode *new_dir,
++ struct dentry *new_dentry, struct vfsmount *new_mnt)
+ {
+ if (unlikely(IS_PRIVATE(old_dentry->d_inode) ||
+ (new_dentry->d_inode && IS_PRIVATE(new_dentry->d_inode))))
+ return 0;
+- return security_ops->inode_rename(old_dir, old_dentry,
+- new_dir, new_dentry);
++ return security_ops->inode_rename(old_dir, old_dentry, old_mnt,
++ new_dir, new_dentry, new_mnt);
+ }
+
+ int security_inode_readlink(struct dentry *dentry, struct vfsmount *mnt)
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2628,8 +2628,12 @@ static int selinux_inode_mknod(struct in
+ return may_create(dir, dentry, inode_mode_to_security_class(mode));
+ }
+
+-static int selinux_inode_rename(struct inode *old_inode, struct dentry *old_dentry,
+- struct inode *new_inode, struct dentry *new_dentry)
++static int selinux_inode_rename(struct inode *old_inode,
++ struct dentry *old_dentry,
++ struct vfsmount *old_mnt,
++ struct inode *new_inode,
++ struct dentry *new_dentry,
++ struct vfsmount *new_mnt)
+ {
+ return may_rename(old_inode, old_dentry, new_inode, new_dentry);
+ }
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -509,8 +509,10 @@ static int smack_inode_rmdir(struct inod
+ * smack_inode_rename - Smack check on rename
+ * @old_inode: the old directory
+ * @old_dentry: unused
++ * @old_mnt: unused
+ * @new_inode: the new directory
+ * @new_dentry: unused
++ * @new_mnt: unused
+ *
+ * Read and write access is required on both the old and
+ * new directories.
+@@ -519,8 +521,10 @@ static int smack_inode_rmdir(struct inod
+ */
+ static int smack_inode_rename(struct inode *old_inode,
+ struct dentry *old_dentry,
++ struct vfsmount *old_mnt,
+ struct inode *new_inode,
+- struct dentry *new_dentry)
++ struct dentry *new_dentry,
++ struct vfsmount *new_mnt)
+ {
+ int rc;
+ char *isp;
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Pass struct vfsmount to the inode_rmdir LSM hook
+
+This is needed for computing pathnames in the AppArmor LSM.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/namei.c | 2 +-
+ include/linux/security.h | 10 +++++++---
+ security/capability.c | 3 ++-
+ security/security.c | 5 +++--
+ security/selinux/hooks.c | 3 ++-
+ security/smack/smack_lsm.c | 4 +++-
+ 6 files changed, 18 insertions(+), 9 deletions(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2184,7 +2184,7 @@ int vfs_rmdir(struct inode *dir, struct
+ if (d_mountpoint(dentry))
+ error = -EBUSY;
+ else {
+- error = security_inode_rmdir(dir, dentry);
++ error = security_inode_rmdir(dir, dentry, mnt);
+ if (!error) {
+ error = dir->i_op->rmdir(dir, dentry);
+ if (!error)
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -372,6 +372,7 @@ static inline void security_free_mnt_opt
+ * Check the permission to remove a directory.
+ * @dir contains the inode structure of parent of the directory to be removed.
+ * @dentry contains the dentry structure of directory to be removed.
++ * @mnt is the vfsmount corresponding to @dentry (may be NULL).
+ * Return 0 if permission is granted.
+ * @inode_mknod:
+ * Check permissions when creating a special file (or a socket or a fifo
+@@ -1372,7 +1373,8 @@ struct security_operations {
+ struct vfsmount *mnt, const char *old_name);
+ int (*inode_mkdir) (struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt, int mode);
+- int (*inode_rmdir) (struct inode *dir, struct dentry *dentry);
++ int (*inode_rmdir) (struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt);
+ int (*inode_mknod) (struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt, int mode, dev_t dev);
+ int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry,
+@@ -1643,7 +1645,8 @@ int security_inode_symlink(struct inode
+ struct vfsmount *mnt, const char *old_name);
+ int security_inode_mkdir(struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt, int mode);
+-int security_inode_rmdir(struct inode *dir, struct dentry *dentry);
++int security_inode_rmdir(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt);
+ int security_inode_mknod(struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt, int mode, dev_t dev);
+ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
+@@ -2022,7 +2025,8 @@ static inline int security_inode_mkdir(s
+ }
+
+ static inline int security_inode_rmdir(struct inode *dir,
+- struct dentry *dentry)
++ struct dentry *dentry,
++ struct vfsmount *mnt)
+ {
+ return 0;
+ }
+--- a/security/capability.c
++++ b/security/capability.c
+@@ -184,7 +184,8 @@ static int cap_inode_mkdir(struct inode
+ return 0;
+ }
+
+-static int cap_inode_rmdir(struct inode *inode, struct dentry *dentry)
++static int cap_inode_rmdir(struct inode *inode, struct dentry *dentry,
++ struct vfsmount *mnt)
+ {
+ return 0;
+ }
+--- a/security/security.c
++++ b/security/security.c
+@@ -396,11 +396,12 @@ int security_inode_mkdir(struct inode *d
+ return security_ops->inode_mkdir(dir, dentry, mnt, mode);
+ }
+
+-int security_inode_rmdir(struct inode *dir, struct dentry *dentry)
++int security_inode_rmdir(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt)
+ {
+ if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ return 0;
+- return security_ops->inode_rmdir(dir, dentry);
++ return security_ops->inode_rmdir(dir, dentry, mnt);
+ }
+
+ int security_inode_mknod(struct inode *dir, struct dentry *dentry,
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2609,7 +2609,8 @@ static int selinux_inode_mkdir(struct in
+ return may_create(dir, dentry, SECCLASS_DIR);
+ }
+
+-static int selinux_inode_rmdir(struct inode *dir, struct dentry *dentry)
++static int selinux_inode_rmdir(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt)
+ {
+ return may_link(dir, dentry, MAY_RMDIR);
+ }
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -480,11 +480,13 @@ static int smack_inode_unlink(struct ino
+ * smack_inode_rmdir - Smack check on directory deletion
+ * @dir: containing directory object
+ * @dentry: directory to unlink
++ * @mnt: vfsmount @dentry to unlink
+ *
+ * Returns 0 if current can write the containing directory
+ * and the directory, error code otherwise
+ */
+-static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry)
++static int smack_inode_rmdir(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt)
+ {
+ int rc;
+
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Pass struct vfsmount to the inode_setattr LSM hook
+
+This is needed for computing pathnames in the AppArmor LSM.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/attr.c | 4 ++--
+ fs/fat/file.c | 2 +-
+ include/linux/security.h | 10 +++++++---
+ security/capability.c | 3 ++-
+ security/security.c | 5 +++--
+ security/selinux/hooks.c | 5 +++--
+ security/smack/smack_lsm.c | 3 ++-
+ 7 files changed, 20 insertions(+), 12 deletions(-)
+
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -164,13 +164,13 @@ int notify_change(struct dentry *dentry,
+ down_write(&dentry->d_inode->i_alloc_sem);
+
+ if (inode->i_op && inode->i_op->setattr) {
+- error = security_inode_setattr(dentry, attr);
++ error = security_inode_setattr(dentry, mnt, attr);
+ if (!error)
+ error = inode->i_op->setattr(dentry, attr);
+ } else {
+ error = inode_change_ok(inode, attr);
+ if (!error)
+- error = security_inode_setattr(dentry, attr);
++ error = security_inode_setattr(dentry, mnt, attr);
+ if (!error) {
+ if ((ia_valid & ATTR_UID && attr->ia_uid != inode->i_uid) ||
+ (ia_valid & ATTR_GID && attr->ia_gid != inode->i_gid))
+--- a/fs/fat/file.c
++++ b/fs/fat/file.c
+@@ -98,7 +98,7 @@ int fat_generic_ioctl(struct inode *inod
+ * out the RO attribute for checking by the security
+ * module, just because it maps to a file mode.
+ */
+- err = security_inode_setattr(filp->f_path.dentry, &ia);
++ err = security_inode_setattr(filp->f_path.dentry, filp->f_path.mnt, &ia);
+ if (err)
+ goto up;
+
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -412,6 +412,7 @@ static inline void security_free_mnt_opt
+ * file attributes change (such as when a file is truncated, chown/chmod
+ * operations, transferring disk quotas, etc).
+ * @dentry contains the dentry structure for the file.
++ * @mnt is the vfsmount corresponding to @dentry (may be NULL).
+ * @attr is the iattr structure containing the new file attributes.
+ * Return 0 if permission is granted.
+ * @inode_getattr:
+@@ -1371,7 +1372,8 @@ struct security_operations {
+ int (*inode_readlink) (struct dentry *dentry);
+ int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
+ int (*inode_permission) (struct inode *inode, int mask);
+- int (*inode_setattr) (struct dentry *dentry, struct iattr *attr);
++ int (*inode_setattr) (struct dentry *dentry, struct vfsmount *,
++ struct iattr *attr);
+ int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry);
+ void (*inode_delete) (struct inode *inode);
+ int (*inode_setxattr) (struct dentry *dentry, const char *name,
+@@ -1638,7 +1640,8 @@ int security_inode_rename(struct inode *
+ int security_inode_readlink(struct dentry *dentry);
+ int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
+ int security_inode_permission(struct inode *inode, int mask);
+-int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
++int security_inode_setattr(struct dentry *dentry, struct vfsmount *mnt,
++ struct iattr *attr);
+ int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry);
+ void security_inode_delete(struct inode *inode);
+ int security_inode_setxattr(struct dentry *dentry, const char *name,
+@@ -2041,7 +2044,8 @@ static inline int security_inode_permiss
+ }
+
+ static inline int security_inode_setattr(struct dentry *dentry,
+- struct iattr *attr)
++ struct vfsmount *mnt,
++ struct iattr *attr)
+ {
+ return 0;
+ }
+--- a/security/capability.c
++++ b/security/capability.c
+@@ -216,7 +216,8 @@ static int cap_inode_permission(struct i
+ return 0;
+ }
+
+-static int cap_inode_setattr(struct dentry *dentry, struct iattr *iattr)
++static int cap_inode_setattr(struct dentry *dentry, struct vfsmount *mnt,
++ struct iattr *iattr)
+ {
+ return 0;
+ }
+--- a/security/security.c
++++ b/security/security.c
+@@ -438,11 +438,12 @@ int security_inode_permission(struct ino
+ return security_ops->inode_permission(inode, mask);
+ }
+
+-int security_inode_setattr(struct dentry *dentry, struct iattr *attr)
++int security_inode_setattr(struct dentry *dentry, struct vfsmount *mnt,
++ struct iattr *attr)
+ {
+ if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ return 0;
+- return security_ops->inode_setattr(dentry, attr);
++ return security_ops->inode_setattr(dentry, mnt, attr);
+ }
+ EXPORT_SYMBOL_GPL(security_inode_setattr);
+
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2656,11 +2656,12 @@ static int selinux_inode_permission(stru
+ open_file_mask_to_av(inode->i_mode, mask), NULL);
+ }
+
+-static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr)
++static int selinux_inode_setattr(struct dentry *dentry, struct vfsmount *mnt,
++ struct iattr *iattr)
+ {
+ int rc;
+
+- rc = secondary_ops->inode_setattr(dentry, iattr);
++ rc = secondary_ops->inode_setattr(dentry, mnt, iattr);
+ if (rc)
+ return rc;
+
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -559,7 +559,8 @@ static int smack_inode_permission(struct
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+-static int smack_inode_setattr(struct dentry *dentry, struct iattr *iattr)
++static int smack_inode_setattr(struct dentry *dentry, struct vfsmount *mnt,
++ struct iattr *iattr)
+ {
+ /*
+ * Need to allow for clearing the setuid bit.
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Pass struct vfsmount to the inode_setxattr LSM hook
+
+This is needed for computing pathnames in the AppArmor LSM.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/xattr.c | 4 ++--
+ include/linux/security.h | 41 ++++++++++++++++++++++++++---------------
+ security/capability.c | 3 ++-
+ security/commoncap.c | 5 +++--
+ security/security.c | 16 ++++++++++------
+ security/selinux/hooks.c | 8 +++++---
+ security/smack/smack_lsm.c | 12 ++++++++----
+ 7 files changed, 56 insertions(+), 33 deletions(-)
+
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -78,7 +78,7 @@ vfs_setxattr(struct dentry *dentry, stru
+ return error;
+
+ mutex_lock(&inode->i_mutex);
+- error = security_inode_setxattr(dentry, name, value, size, flags);
++ error = security_inode_setxattr(dentry, mnt, name, value, size, flags);
+ if (error)
+ goto out;
+ error = -EOPNOTSUPP;
+@@ -86,7 +86,7 @@ vfs_setxattr(struct dentry *dentry, stru
+ error = inode->i_op->setxattr(dentry, name, value, size, flags);
+ if (!error) {
+ fsnotify_xattr(dentry);
+- security_inode_post_setxattr(dentry, name, value,
++ security_inode_post_setxattr(dentry, mnt, name, value,
+ size, flags);
+ }
+ } else if (!strncmp(name, XATTR_SECURITY_PREFIX,
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -54,8 +54,9 @@ extern void cap_capset_set(struct task_s
+ extern int cap_bprm_set_security(struct linux_binprm *bprm);
+ extern void cap_bprm_apply_creds(struct linux_binprm *bprm, int unsafe);
+ extern int cap_bprm_secureexec(struct linux_binprm *bprm);
+-extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
+- const void *value, size_t size, int flags);
++extern int cap_inode_setxattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name, const void *value, size_t size,
++ int flags);
+ extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
+ extern int cap_inode_need_killpriv(struct dentry *dentry);
+ extern int cap_inode_killpriv(struct dentry *dentry);
+@@ -438,11 +439,11 @@ static inline void security_free_mnt_opt
+ * inode.
+ * @inode_setxattr:
+ * Check permission before setting the extended attributes
+- * @value identified by @name for @dentry.
++ * @value identified by @name for @dentry and @mnt.
+ * Return 0 if permission is granted.
+ * @inode_post_setxattr:
+ * Update inode security field after successful setxattr operation.
+- * @value identified by @name for @dentry.
++ * @value identified by @name for @dentry and @mnt.
+ * @inode_getxattr:
+ * Check permission before obtaining the extended attributes
+ * identified by @name for @dentry.
+@@ -1392,10 +1393,13 @@ struct security_operations {
+ struct iattr *attr);
+ int (*inode_getattr) (struct vfsmount *mnt, struct dentry *dentry);
+ void (*inode_delete) (struct inode *inode);
+- int (*inode_setxattr) (struct dentry *dentry, const char *name,
+- const void *value, size_t size, int flags);
+- void (*inode_post_setxattr) (struct dentry *dentry, const char *name,
+- const void *value, size_t size, int flags);
++ int (*inode_setxattr) (struct dentry *dentry, struct vfsmount *mnt,
++ const char *name, const void *value, size_t size,
++ int flags);
++ void (*inode_post_setxattr) (struct dentry *dentry,
++ struct vfsmount *mnt,
++ const char *name, const void *value,
++ size_t size, int flags);
+ int (*inode_getxattr) (struct dentry *dentry, const char *name);
+ int (*inode_listxattr) (struct dentry *dentry);
+ int (*inode_removexattr) (struct dentry *dentry, const char *name);
+@@ -1666,10 +1670,12 @@ int security_inode_setattr(struct dentry
+ struct iattr *attr);
+ int security_inode_getattr(struct vfsmount *mnt, struct dentry *dentry);
+ void security_inode_delete(struct inode *inode);
+-int security_inode_setxattr(struct dentry *dentry, const char *name,
+- const void *value, size_t size, int flags);
+-void security_inode_post_setxattr(struct dentry *dentry, const char *name,
+- const void *value, size_t size, int flags);
++int security_inode_setxattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name, const void *value,
++ size_t size, int flags);
++void security_inode_post_setxattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name, const void *value,
++ size_t size, int flags);
+ int security_inode_getxattr(struct dentry *dentry, const char *name);
+ int security_inode_listxattr(struct dentry *dentry);
+ int security_inode_removexattr(struct dentry *dentry, const char *name);
+@@ -2092,13 +2098,18 @@ static inline void security_inode_delete
+ { }
+
+ static inline int security_inode_setxattr(struct dentry *dentry,
+- const char *name, const void *value, size_t size, int flags)
++ struct vfsmount *mnt,
++ const char *name, const void *value,
++ size_t size, int flags)
+ {
+- return cap_inode_setxattr(dentry, name, value, size, flags);
++ return cap_inode_setxattr(dentry, mnt, name, value, size, flags);
+ }
+
+ static inline void security_inode_post_setxattr(struct dentry *dentry,
+- const char *name, const void *value, size_t size, int flags)
++ struct vfsmount *mnt,
++ const char *name,
++ const void *value,
++ size_t size, int flags)
+ { }
+
+ static inline int security_inode_getxattr(struct dentry *dentry,
+--- a/security/capability.c
++++ b/security/capability.c
+@@ -235,7 +235,8 @@ static void cap_inode_delete(struct inod
+ {
+ }
+
+-static void cap_inode_post_setxattr(struct dentry *dentry, const char *name,
++static void cap_inode_post_setxattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name,
+ const void *value, size_t size, int flags)
+ {
+ }
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -414,8 +414,9 @@ int cap_bprm_secureexec (struct linux_bi
+ current->egid != current->gid);
+ }
+
+-int cap_inode_setxattr(struct dentry *dentry, const char *name,
+- const void *value, size_t size, int flags)
++int cap_inode_setxattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name, const void *value, size_t size,
++ int flags)
+ {
+ if (!strcmp(name, XATTR_NAME_CAPS)) {
+ if (!capable(CAP_SETFCAP))
+--- a/security/security.c
++++ b/security/security.c
+@@ -468,20 +468,24 @@ void security_inode_delete(struct inode
+ security_ops->inode_delete(inode);
+ }
+
+-int security_inode_setxattr(struct dentry *dentry, const char *name,
+- const void *value, size_t size, int flags)
++int security_inode_setxattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name, const void *value, size_t size,
++ int flags)
+ {
+ if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ return 0;
+- return security_ops->inode_setxattr(dentry, name, value, size, flags);
++ return security_ops->inode_setxattr(dentry, mnt, name, value, size,
++ flags);
+ }
+
+-void security_inode_post_setxattr(struct dentry *dentry, const char *name,
+- const void *value, size_t size, int flags)
++void security_inode_post_setxattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name, const void *value,
++ size_t size, int flags)
+ {
+ if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ return;
+- security_ops->inode_post_setxattr(dentry, name, value, size, flags);
++ security_ops->inode_post_setxattr(dentry, mnt, name, value, size,
++ flags);
+ }
+
+ int security_inode_getxattr(struct dentry *dentry, const char *name)
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2713,8 +2713,9 @@ static int selinux_inode_setotherxattr(s
+ return dentry_has_perm(current, NULL, dentry, FILE__SETATTR);
+ }
+
+-static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
+- const void *value, size_t size, int flags)
++static int selinux_inode_setxattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name, const void *value,
++ size_t size, int flags)
+ {
+ struct task_security_struct *tsec = current->security;
+ struct inode *inode = dentry->d_inode;
+@@ -2768,7 +2769,8 @@ static int selinux_inode_setxattr(struct
+ &ad);
+ }
+
+-static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
++static void selinux_inode_post_setxattr(struct dentry *dentry,
++ struct vfsmount *mnt, const char *name,
+ const void *value, size_t size,
+ int flags)
+ {
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -595,6 +595,7 @@ static int smack_inode_getattr(struct vf
+ /**
+ * smack_inode_setxattr - Smack check for setting xattrs
+ * @dentry: the object
++ * @mnt: unused
+ * @name: name of the attribute
+ * @value: unused
+ * @size: unused
+@@ -604,8 +605,9 @@ static int smack_inode_getattr(struct vf
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+-static int smack_inode_setxattr(struct dentry *dentry, const char *name,
+- const void *value, size_t size, int flags)
++static int smack_inode_setxattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char *name, const void *value,
++ size_t size, int flags)
+ {
+ int rc = 0;
+
+@@ -617,7 +619,7 @@ static int smack_inode_setxattr(struct d
+ if (size == 0)
+ rc = -EINVAL;
+ } else
+- rc = cap_inode_setxattr(dentry, name, value, size, flags);
++ rc = cap_inode_setxattr(dentry, mnt, name, value, size, flags);
+
+ if (rc == 0)
+ rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE);
+@@ -628,6 +630,7 @@ static int smack_inode_setxattr(struct d
+ /**
+ * smack_inode_post_setxattr - Apply the Smack update approved above
+ * @dentry: object
++ * @mnt: unused
+ * @name: attribute name
+ * @value: attribute value
+ * @size: attribute size
+@@ -636,7 +639,8 @@ static int smack_inode_setxattr(struct d
+ * Set the pointer in the inode blob to the entry found
+ * in the master label list.
+ */
+-static void smack_inode_post_setxattr(struct dentry *dentry, const char *name,
++static void smack_inode_post_setxattr(struct dentry *dentry,
++ struct vfsmount *mnt, const char *name,
+ const void *value, size_t size, int flags)
+ {
+ struct inode_smack *isp;
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Pass struct vfsmount to the inode_symlink LSM hook
+
+This is needed for computing pathnames in the AppArmor LSM.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/namei.c | 2 +-
+ include/linux/security.h | 8 +++++---
+ security/capability.c | 2 +-
+ security/security.c | 4 ++--
+ security/selinux/hooks.c | 3 ++-
+ 5 files changed, 11 insertions(+), 8 deletions(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2358,7 +2358,7 @@ int vfs_symlink(struct inode *dir, struc
+ if (!dir->i_op || !dir->i_op->symlink)
+ return -EPERM;
+
+- error = security_inode_symlink(dir, dentry, oldname);
++ error = security_inode_symlink(dir, dentry, mnt, oldname);
+ if (error)
+ return error;
+
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -355,6 +355,7 @@ static inline void security_free_mnt_opt
+ * Check the permission to create a symbolic link to a file.
+ * @dir contains the inode structure of parent directory of the symbolic link.
+ * @dentry contains the dentry structure of the symbolic link.
++ * @mnt is the vfsmount corresponding to @dentry (may be NULL).
+ * @old_name contains the pathname of file.
+ * Return 0 if permission is granted.
+ * @inode_mkdir:
+@@ -1363,8 +1364,8 @@ struct security_operations {
+ int (*inode_link) (struct dentry *old_dentry,
+ struct inode *dir, struct dentry *new_dentry);
+ int (*inode_unlink) (struct inode *dir, struct dentry *dentry);
+- int (*inode_symlink) (struct inode *dir,
+- struct dentry *dentry, const char *old_name);
++ int (*inode_symlink) (struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, const char *old_name);
+ int (*inode_mkdir) (struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt, int mode);
+ int (*inode_rmdir) (struct inode *dir, struct dentry *dentry);
+@@ -1634,7 +1635,7 @@ int security_inode_link(struct dentry *o
+ struct dentry *new_dentry);
+ int security_inode_unlink(struct inode *dir, struct dentry *dentry);
+ int security_inode_symlink(struct inode *dir, struct dentry *dentry,
+- const char *old_name);
++ struct vfsmount *mnt, const char *old_name);
+ int security_inode_mkdir(struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt, int mode);
+ int security_inode_rmdir(struct inode *dir, struct dentry *dentry);
+@@ -1999,6 +2000,7 @@ static inline int security_inode_unlink(
+
+ static inline int security_inode_symlink(struct inode *dir,
+ struct dentry *dentry,
++ struct vfsmount *mnt,
+ const char *old_name)
+ {
+ return 0;
+--- a/security/capability.c
++++ b/security/capability.c
+@@ -172,7 +172,7 @@ static int cap_inode_unlink(struct inode
+ }
+
+ static int cap_inode_symlink(struct inode *inode, struct dentry *dentry,
+- const char *name)
++ struct vfsmount *mnt, const char *name)
+ {
+ return 0;
+ }
+--- a/security/security.c
++++ b/security/security.c
+@@ -379,11 +379,11 @@ int security_inode_unlink(struct inode *
+ }
+
+ int security_inode_symlink(struct inode *dir, struct dentry *dentry,
+- const char *old_name)
++ struct vfsmount *mnt, const char *old_name)
+ {
+ if (unlikely(IS_PRIVATE(dir)))
+ return 0;
+- return security_ops->inode_symlink(dir, dentry, old_name);
++ return security_ops->inode_symlink(dir, dentry, mnt, old_name);
+ }
+
+ int security_inode_mkdir(struct inode *dir, struct dentry *dentry,
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2592,7 +2592,8 @@ static int selinux_inode_unlink(struct i
+ return may_link(dir, dentry, MAY_UNLINK);
+ }
+
+-static int selinux_inode_symlink(struct inode *dir, struct dentry *dentry, const char *name)
++static int selinux_inode_symlink(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt, const char *name)
+ {
+ return may_create(dir, dentry, SECCLASS_LNK_FILE);
+ }
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Pass struct vfsmount to the inode_unlink LSM hook
+
+This is needed for computing pathnames in the AppArmor LSM.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/namei.c | 2 +-
+ include/linux/security.h | 10 +++++++---
+ security/capability.c | 3 ++-
+ security/security.c | 5 +++--
+ security/selinux/hooks.c | 5 +++--
+ security/smack/smack_lsm.c | 4 +++-
+ 6 files changed, 19 insertions(+), 10 deletions(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2264,7 +2264,7 @@ int vfs_unlink(struct inode *dir, struct
+ if (d_mountpoint(dentry))
+ error = -EBUSY;
+ else {
+- error = security_inode_unlink(dir, dentry);
++ error = security_inode_unlink(dir, dentry, mnt);
+ if (!error)
+ error = dir->i_op->unlink(dir, dentry);
+ }
+--- a/include/linux/security.h
++++ b/include/linux/security.h
+@@ -352,6 +352,7 @@ static inline void security_free_mnt_opt
+ * Check the permission to remove a hard link to a file.
+ * @dir contains the inode structure of parent directory of the file.
+ * @dentry contains the dentry structure for file to be unlinked.
++ * @mnt is the vfsmount corresponding to @dentry (may be NULL).
+ * Return 0 if permission is granted.
+ * @inode_symlink:
+ * Check the permission to create a symbolic link to a file.
+@@ -1368,7 +1369,8 @@ struct security_operations {
+ int (*inode_link) (struct dentry *old_dentry, struct vfsmount *old_mnt,
+ struct inode *dir, struct dentry *new_dentry,
+ struct vfsmount *new_mnt);
+- int (*inode_unlink) (struct inode *dir, struct dentry *dentry);
++ int (*inode_unlink) (struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt);
+ int (*inode_symlink) (struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt, const char *old_name);
+ int (*inode_mkdir) (struct inode *dir, struct dentry *dentry,
+@@ -1640,7 +1642,8 @@ int security_inode_create(struct inode *
+ int security_inode_link(struct dentry *old_dentry, struct vfsmount *old_mnt,
+ struct inode *dir, struct dentry *new_dentry,
+ struct vfsmount *new_mnt);
+-int security_inode_unlink(struct inode *dir, struct dentry *dentry);
++int security_inode_unlink(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt);
+ int security_inode_symlink(struct inode *dir, struct dentry *dentry,
+ struct vfsmount *mnt, const char *old_name);
+ int security_inode_mkdir(struct inode *dir, struct dentry *dentry,
+@@ -2003,7 +2006,8 @@ static inline int security_inode_link(st
+ }
+
+ static inline int security_inode_unlink(struct inode *dir,
+- struct dentry *dentry)
++ struct dentry *dentry,
++ struct vfsmount *mnt)
+ {
+ return 0;
+ }
+--- a/security/capability.c
++++ b/security/capability.c
+@@ -167,7 +167,8 @@ static int cap_inode_link(struct dentry
+ return 0;
+ }
+
+-static int cap_inode_unlink(struct inode *inode, struct dentry *dentry)
++static int cap_inode_unlink(struct inode *inode, struct dentry *dentry,
++ struct vfsmount *mnt)
+ {
+ return 0;
+ }
+--- a/security/security.c
++++ b/security/security.c
+@@ -373,11 +373,12 @@ int security_inode_link(struct dentry *o
+ new_dentry, new_mnt);
+ }
+
+-int security_inode_unlink(struct inode *dir, struct dentry *dentry)
++int security_inode_unlink(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt)
+ {
+ if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ return 0;
+- return security_ops->inode_unlink(dir, dentry);
++ return security_ops->inode_unlink(dir, dentry, mnt);
+ }
+
+ int security_inode_symlink(struct inode *dir, struct dentry *dentry,
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2587,11 +2587,12 @@ static int selinux_inode_link(struct den
+ return may_link(dir, old_dentry, MAY_LINK);
+ }
+
+-static int selinux_inode_unlink(struct inode *dir, struct dentry *dentry)
++static int selinux_inode_unlink(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt)
+ {
+ int rc;
+
+- rc = secondary_ops->inode_unlink(dir, dentry);
++ rc = secondary_ops->inode_unlink(dir, dentry, mnt);
+ if (rc)
+ return rc;
+ return may_link(dir, dentry, MAY_UNLINK);
+--- a/security/smack/smack_lsm.c
++++ b/security/smack/smack_lsm.c
+@@ -454,11 +454,13 @@ static int smack_inode_link(struct dentr
+ * smack_inode_unlink - Smack check on inode deletion
+ * @dir: containing directory object
+ * @dentry: file to unlink
++ * @mnt: vfsmount of file to unlink
+ *
+ * Returns 0 if current can write the containing directory
+ * and the object, error code otherwise
+ */
+-static int smack_inode_unlink(struct inode *dir, struct dentry *dentry)
++static int smack_inode_unlink(struct inode *dir, struct dentry *dentry,
++ struct vfsmount *mnt)
+ {
+ struct inode *ip = dentry->d_inode;
+ int rc;
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: Pass struct file down the inode_*xattr security LSM hooks
+
+This allows LSMs to also distinguish between file descriptor and path
+access for the xattr operations. (The other relevant operations are
+covered by the setattr hook.)
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/xattr.c | 59 +++++++++++++++++++++++----------------------
+ include/linux/security.h | 38 ++++++++++++++++------------
+ include/linux/xattr.h | 9 +++---
+ security/capability.c | 5 ++-
+ security/commoncap.c | 4 +--
+ security/security.c | 17 ++++++------
+ security/selinux/hooks.c | 10 ++++---
+ security/smack/smack_lsm.c | 14 ++++++----
+ 8 files changed, 87 insertions(+), 69 deletions(-)
+
+Index: linux-2.6.27/fs/xattr.c
+===================================================================
+--- linux-2.6.27.orig/fs/xattr.c
++++ linux-2.6.27/fs/xattr.c
+@@ -68,7 +68,7 @@ xattr_permission(struct inode *inode, co
+
+ int
+ vfs_setxattr(struct dentry *dentry, struct vfsmount *mnt, const char *name,
+- const void *value, size_t size, int flags)
++ const void *value, size_t size, int flags, struct file *file)
+ {
+ struct inode *inode = dentry->d_inode;
+ int error;
+@@ -78,7 +78,7 @@ vfs_setxattr(struct dentry *dentry, stru
+ return error;
+
+ mutex_lock(&inode->i_mutex);
+- error = security_inode_setxattr(dentry, mnt, name, value, size, flags);
++ error = security_inode_setxattr(dentry, mnt, name, value, size, flags, file);
+ if (error)
+ goto out;
+ error = -EOPNOTSUPP;
+@@ -132,7 +132,7 @@ EXPORT_SYMBOL_GPL(xattr_getsecurity);
+
+ ssize_t
+ vfs_getxattr(struct dentry *dentry, struct vfsmount *mnt, const char *name,
+- void *value, size_t size)
++ void *value, size_t size, struct file *file)
+ {
+ struct inode *inode = dentry->d_inode;
+ int error;
+@@ -141,7 +141,7 @@ vfs_getxattr(struct dentry *dentry, stru
+ if (error)
+ return error;
+
+- error = security_inode_getxattr(dentry, mnt, name);
++ error = security_inode_getxattr(dentry, mnt, name, file);
+ if (error)
+ return error;
+
+@@ -169,12 +169,12 @@ EXPORT_SYMBOL_GPL(vfs_getxattr);
+
+ ssize_t
+ vfs_listxattr(struct dentry *dentry, struct vfsmount *mnt, char *list,
+- size_t size)
++ size_t size, struct file *file)
+ {
+ struct inode *inode = dentry->d_inode;
+ ssize_t error;
+
+- error = security_inode_listxattr(dentry, mnt);
++ error = security_inode_listxattr(dentry, mnt, file);
+ if (error)
+ return error;
+ error = -EOPNOTSUPP;
+@@ -190,7 +190,8 @@ vfs_listxattr(struct dentry *dentry, str
+ EXPORT_SYMBOL_GPL(vfs_listxattr);
+
+ int
+-vfs_removexattr(struct dentry *dentry, struct vfsmount *mnt, const char *name)
++vfs_removexattr(struct dentry *dentry, struct vfsmount *mnt, const char *name,
++ struct file *file)
+ {
+ struct inode *inode = dentry->d_inode;
+ int error;
+@@ -202,7 +203,7 @@ vfs_removexattr(struct dentry *dentry, s
+ if (error)
+ return error;
+
+- error = security_inode_removexattr(dentry, mnt, name);
++ error = security_inode_removexattr(dentry, mnt, name, file);
+ if (error)
+ return error;
+
+@@ -222,7 +223,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
+ */
+ static long
+ setxattr(struct dentry *dentry, struct vfsmount *mnt, const char __user *name,
+- const void __user *value, size_t size, int flags)
++ const void __user *value, size_t size, int flags, struct file *file)
+ {
+ int error;
+ void *kvalue = NULL;
+@@ -249,7 +250,7 @@ setxattr(struct dentry *dentry, struct v
+ }
+ }
+
+- error = vfs_setxattr(dentry, mnt, kname, kvalue, size, flags);
++ error = vfs_setxattr(dentry, mnt, kname, kvalue, size, flags, file);
+ kfree(kvalue);
+ return error;
+ }
+@@ -266,7 +267,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
+ return error;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = setxattr(path.dentry, path.mnt, name, value, size, flags);
++ error = setxattr(path.dentry, path.mnt, name, value, size, flags, NULL);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -285,7 +286,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
+ return error;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = setxattr(path.dentry, path.mnt, name, value, size, flags);
++ error = setxattr(path.dentry, path.mnt, name, value, size, flags, NULL);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -306,7 +307,8 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
+ audit_inode(NULL, dentry);
+ error = mnt_want_write_file(f->f_path.mnt, f);
+ if (!error) {
+- error = setxattr(dentry, f->f_vfsmnt, name, value, size, flags);
++ error = setxattr(dentry, f->f_vfsmnt, name, value, size, flags,
++ f);
+ mnt_drop_write(f->f_path.mnt);
+ }
+ fput(f);
+@@ -318,7 +320,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
+ */
+ static ssize_t
+ getxattr(struct dentry *dentry, struct vfsmount *mnt, const char __user *name,
+- void __user *value, size_t size)
++ void __user *value, size_t size, struct file *file)
+ {
+ ssize_t error;
+ void *kvalue = NULL;
+@@ -338,7 +340,7 @@ getxattr(struct dentry *dentry, struct v
+ return -ENOMEM;
+ }
+
+- error = vfs_getxattr(dentry, mnt, kname, kvalue, size);
++ error = vfs_getxattr(dentry, mnt, kname, kvalue, size, file);
+ if (error > 0) {
+ if (size && copy_to_user(value, kvalue, error))
+ error = -EFAULT;
+@@ -360,7 +362,7 @@ SYSCALL_DEFINE4(getxattr, const char __u
+ error = user_path(pathname, &path);
+ if (error)
+ return error;
+- error = getxattr(path.dentry, path.mnt, name, value, size);
++ error = getxattr(path.dentry, path.mnt, name, value, size, NULL);
+ path_put(&path);
+ return error;
+ }
+@@ -374,7 +376,7 @@ SYSCALL_DEFINE4(lgetxattr, const char __
+ error = user_lpath(pathname, &path);
+ if (error)
+ return error;
+- error = getxattr(path.dentry, path.mnt, name, value, size);
++ error = getxattr(path.dentry, path.mnt, name, value, size, NULL);
+ path_put(&path);
+ return error;
+ }
+@@ -389,7 +391,7 @@ SYSCALL_DEFINE4(fgetxattr, int, fd, cons
+ if (!f)
+ return error;
+ audit_inode(NULL, f->f_path.dentry);
+- error = getxattr(f->f_path.dentry, f->f_path.mnt, name, value, size);
++ error = getxattr(f->f_path.dentry, f->f_path.mnt, name, value, size, f);
+ fput(f);
+ return error;
+ }
+@@ -399,7 +401,7 @@ SYSCALL_DEFINE4(fgetxattr, int, fd, cons
+ */
+ static ssize_t
+ listxattr(struct dentry *dentry, struct vfsmount *mnt, char __user *list,
+- size_t size)
++ size_t size, struct file *file)
+ {
+ ssize_t error;
+ char *klist = NULL;
+@@ -412,7 +414,7 @@ listxattr(struct dentry *dentry, struct
+ return -ENOMEM;
+ }
+
+- error = vfs_listxattr(dentry, mnt, klist, size);
++ error = vfs_listxattr(dentry, mnt, klist, size, file);
+ if (error > 0) {
+ if (size && copy_to_user(list, klist, error))
+ error = -EFAULT;
+@@ -434,7 +436,7 @@ SYSCALL_DEFINE3(listxattr, const char __
+ error = user_path(pathname, &path);
+ if (error)
+ return error;
+- error = listxattr(path.dentry, path.mnt, list, size);
++ error = listxattr(path.dentry, path.mnt, list, size, NULL);
+ path_put(&path);
+ return error;
+ }
+@@ -448,7 +450,7 @@ SYSCALL_DEFINE3(llistxattr, const char _
+ error = user_lpath(pathname, &path);
+ if (error)
+ return error;
+- error = listxattr(path.dentry, path.mnt, list, size);
++ error = listxattr(path.dentry, path.mnt, list, size, NULL);
+ path_put(&path);
+ return error;
+ }
+@@ -462,7 +464,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, cha
+ if (!f)
+ return error;
+ audit_inode(NULL, f->f_path.dentry);
+- error = listxattr(f->f_path.dentry, f->f_path.mnt, list, size);
++ error = listxattr(f->f_path.dentry, f->f_path.mnt, list, size, f);
+ fput(f);
+ return error;
+ }
+@@ -471,7 +473,8 @@ SYSCALL_DEFINE3(flistxattr, int, fd, cha
+ * Extended attribute REMOVE operations
+ */
+ static long
+-removexattr(struct dentry *dentry, struct vfsmount *mnt, const char __user *name)
++removexattr(struct dentry *dentry, struct vfsmount *mnt,
++ const char __user *name, struct file *file)
+ {
+ int error;
+ char kname[XATTR_NAME_MAX + 1];
+@@ -482,7 +485,7 @@ removexattr(struct dentry *dentry, struc
+ if (error < 0)
+ return error;
+
+- return vfs_removexattr(dentry, mnt, kname);
++ return vfs_removexattr(dentry, mnt, kname, file);
+ }
+
+ SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
+@@ -496,7 +499,7 @@ SYSCALL_DEFINE2(removexattr, const char
+ return error;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = removexattr(path.dentry, path.mnt, name);
++ error = removexattr(path.dentry, path.mnt, name, NULL);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -514,7 +517,7 @@ SYSCALL_DEFINE2(lremovexattr, const char
+ return error;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = removexattr(path.dentry, path.mnt, name);
++ error = removexattr(path.dentry, path.mnt, name, NULL);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -534,7 +537,7 @@ SYSCALL_DEFINE2(fremovexattr, int, fd, c
+ audit_inode(NULL, dentry);
+ error = mnt_want_write_file(f->f_path.mnt, f);
+ if (!error) {
+- error = removexattr(dentry, f->f_path.mnt, name);
++ error = removexattr(dentry, f->f_path.mnt, name, f);
+ mnt_drop_write(f->f_path.mnt);
+ }
+ fput(f);
+Index: linux-2.6.27/include/linux/security.h
+===================================================================
+--- linux-2.6.27.orig/include/linux/security.h
++++ linux-2.6.27/include/linux/security.h
+@@ -56,9 +56,9 @@ extern void cap_bprm_apply_creds(struct
+ extern int cap_bprm_secureexec(struct linux_binprm *bprm);
+ extern int cap_inode_setxattr(struct dentry *dentry, struct vfsmount *mnt,
+ const char *name, const void *value, size_t size,
+- int flags);
++ int flags, struct file *file);
+ extern int cap_inode_removexattr(struct dentry *dentry, struct vfsmount *mnt,
+- const char *name);
++ const char *name, struct file *file);
+ extern int cap_inode_need_killpriv(struct dentry *dentry);
+ extern int cap_inode_killpriv(struct dentry *dentry);
+ extern int cap_task_post_setuid(uid_t old_ruid, uid_t old_euid, uid_t old_suid, int flags);
+@@ -1396,16 +1396,17 @@ struct security_operations {
+ void (*inode_delete) (struct inode *inode);
+ int (*inode_setxattr) (struct dentry *dentry, struct vfsmount *mnt,
+ const char *name, const void *value, size_t size,
+- int flags);
++ int flags, struct file *file);
+ void (*inode_post_setxattr) (struct dentry *dentry,
+ struct vfsmount *mnt,
+ const char *name, const void *value,
+ size_t size, int flags);
+ int (*inode_getxattr) (struct dentry *dentry, struct vfsmount *mnt,
+- const char *name);
+- int (*inode_listxattr) (struct dentry *dentry, struct vfsmount *mnt);
++ const char *name, struct file *file);
++ int (*inode_listxattr) (struct dentry *dentry, struct vfsmount *mnt,
++ struct file *file);
+ int (*inode_removexattr) (struct dentry *dentry, struct vfsmount *mnt,
+- const char *name);
++ const char *name, struct file *file);
+ int (*inode_need_killpriv) (struct dentry *dentry);
+ int (*inode_killpriv) (struct dentry *dentry);
+ int (*inode_getsecurity) (const struct inode *inode, const char *name, void **buffer, bool alloc);
+@@ -1675,15 +1676,16 @@ int security_inode_getattr(struct vfsmou
+ void security_inode_delete(struct inode *inode);
+ int security_inode_setxattr(struct dentry *dentry, struct vfsmount *mnt,
+ const char *name, const void *value,
+- size_t size, int flags);
++ size_t size, int flags, struct file *file);
+ void security_inode_post_setxattr(struct dentry *dentry, struct vfsmount *mnt,
+ const char *name, const void *value,
+ size_t size, int flags);
+ int security_inode_getxattr(struct dentry *dentry, struct vfsmount *mnt,
+- const char *name);
+-int security_inode_listxattr(struct dentry *dentry, struct vfsmount *mnt);
++ const char *name, struct file *file);
++int security_inode_listxattr(struct dentry *dentry, struct vfsmount *mnt,
++ struct file *file);
+ int security_inode_removexattr(struct dentry *dentry, struct vfsmount *mnt,
+- const char *name);
++ const char *name, struct file *file);
+ int security_inode_need_killpriv(struct dentry *dentry);
+ int security_inode_killpriv(struct dentry *dentry);
+ int security_inode_getsecurity(const struct inode *inode, const char *name, void **buffer, bool alloc);
+@@ -2105,9 +2107,10 @@ static inline void security_inode_delete
+ static inline int security_inode_setxattr(struct dentry *dentry,
+ struct vfsmount *mnt,
+ const char *name, const void *value,
+- size_t size, int flags)
++ size_t size, int flags,
++ struct file *file)
+ {
+- return cap_inode_setxattr(dentry, mnt, name, value, size, flags);
++ return cap_inode_setxattr(dentry, mnt, name, value, size, flags, file);
+ }
+
+ static inline void security_inode_post_setxattr(struct dentry *dentry,
+@@ -2119,22 +2122,25 @@ static inline void security_inode_post_s
+
+ static inline int security_inode_getxattr(struct dentry *dentry,
+ struct vfsmount *mnt,
+- const char *name)
++ const char *name,
++ struct file *file)
+ {
+ return 0;
+ }
+
+ static inline int security_inode_listxattr(struct dentry *dentry,
+- struct vfsmount *mnt)
++ struct vfsmount *mnt,
++ struct file *file)
+ {
+ return 0;
+ }
+
+ static inline int security_inode_removexattr(struct dentry *dentry,
+ struct vfsmount *mnt,
+- const char *name)
++ const char *name,
++ struct file *file)
+ {
+- return cap_inode_removexattr(dentry, mnt, name);
++ return cap_inode_removexattr(dentry, mnt, name, file);
+ }
+
+ static inline int security_inode_need_killpriv(struct dentry *dentry)
+Index: linux-2.6.27/include/linux/xattr.h
+===================================================================
+--- linux-2.6.27.orig/include/linux/xattr.h
++++ linux-2.6.27/include/linux/xattr.h
+@@ -17,6 +17,7 @@
+
+ #include <linux/types.h>
+ #include <linux/mount.h>
++#include <linux/fs.h>
+
+ /* Namespaces */
+ #define XATTR_OS2_PREFIX "os2."
+@@ -48,10 +49,10 @@ struct xattr_handler {
+ };
+
+ ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
+-ssize_t vfs_getxattr(struct dentry *, struct vfsmount *, const char *, void *, size_t);
+-ssize_t vfs_listxattr(struct dentry *d, struct vfsmount *, char *list, size_t size);
+-int vfs_setxattr(struct dentry *, struct vfsmount *, const char *, const void *, size_t, int);
+-int vfs_removexattr(struct dentry *, struct vfsmount *mnt, const char *);
++ssize_t vfs_getxattr(struct dentry *, struct vfsmount *, const char *, void *, size_t, struct file *file);
++ssize_t vfs_listxattr(struct dentry *d, struct vfsmount *, char *list, size_t size, struct file *file);
++int vfs_setxattr(struct dentry *, struct vfsmount *, const char *, const void *, size_t, int, struct file *file);
++int vfs_removexattr(struct dentry *, struct vfsmount *mnt, const char *, struct file *file);
+
+ ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size);
+ ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
+Index: linux-2.6.27/security/capability.c
+===================================================================
+--- linux-2.6.27.orig/security/capability.c
++++ linux-2.6.27/security/capability.c
+@@ -242,12 +242,13 @@ static void cap_inode_post_setxattr(stru
+ }
+
+ static int cap_inode_getxattr(struct dentry *dentry, struct vfsmount *mnt,
+- const char *name)
++ const char *name, struct file *f)
+ {
+ return 0;
+ }
+
+-static int cap_inode_listxattr(struct dentry *dentry, struct vfsmount *mnt)
++static int cap_inode_listxattr(struct dentry *dentry, struct vfsmount *mnt,
++ struct file *f)
+ {
+ return 0;
+ }
+Index: linux-2.6.27/security/commoncap.c
+===================================================================
+--- linux-2.6.27.orig/security/commoncap.c
++++ linux-2.6.27/security/commoncap.c
+@@ -416,7 +416,7 @@ int cap_bprm_secureexec (struct linux_bi
+
+ int cap_inode_setxattr(struct dentry *dentry, struct vfsmount *mnt,
+ const char *name, const void *value, size_t size,
+- int flags)
++ int flags, struct file *file)
+ {
+ if (!strcmp(name, XATTR_NAME_CAPS)) {
+ if (!capable(CAP_SETFCAP))
+@@ -430,7 +430,7 @@ int cap_inode_setxattr(struct dentry *de
+ }
+
+ int cap_inode_removexattr(struct dentry *dentry, struct vfsmount *mnt,
+- const char *name)
++ const char *name, struct file *file)
+ {
+ if (!strcmp(name, XATTR_NAME_CAPS)) {
+ if (!capable(CAP_SETFCAP))
+Index: linux-2.6.27/security/security.c
+===================================================================
+--- linux-2.6.27.orig/security/security.c
++++ linux-2.6.27/security/security.c
+@@ -470,12 +470,12 @@ void security_inode_delete(struct inode
+
+ int security_inode_setxattr(struct dentry *dentry, struct vfsmount *mnt,
+ const char *name, const void *value, size_t size,
+- int flags)
++ int flags, struct file *file)
+ {
+ if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ return 0;
+ return security_ops->inode_setxattr(dentry, mnt, name, value, size,
+- flags);
++ flags, file);
+ }
+
+ void security_inode_post_setxattr(struct dentry *dentry, struct vfsmount *mnt,
+@@ -489,26 +489,27 @@ void security_inode_post_setxattr(struct
+ }
+
+ int security_inode_getxattr(struct dentry *dentry, struct vfsmount *mnt,
+- const char *name)
++ const char *name, struct file *file)
+ {
+ if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ return 0;
+- return security_ops->inode_getxattr(dentry, mnt, name);
++ return security_ops->inode_getxattr(dentry, mnt, name, file);
+ }
+
+-int security_inode_listxattr(struct dentry *dentry, struct vfsmount *mnt)
++int security_inode_listxattr(struct dentry *dentry, struct vfsmount *mnt,
++ struct file *file)
+ {
+ if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ return 0;
+- return security_ops->inode_listxattr(dentry, mnt);
++ return security_ops->inode_listxattr(dentry, mnt, file);
+ }
+
+ int security_inode_removexattr(struct dentry *dentry, struct vfsmount *mnt,
+- const char *name)
++ const char *name, struct file *file)
+ {
+ if (unlikely(IS_PRIVATE(dentry->d_inode)))
+ return 0;
+- return security_ops->inode_removexattr(dentry, mnt, name);
++ return security_ops->inode_removexattr(dentry, mnt, name, file);
+ }
+
+ int security_inode_need_killpriv(struct dentry *dentry)
+Index: linux-2.6.27/security/selinux/hooks.c
+===================================================================
+--- linux-2.6.27.orig/security/selinux/hooks.c
++++ linux-2.6.27/security/selinux/hooks.c
+@@ -2715,7 +2715,7 @@ static int selinux_inode_setotherxattr(s
+
+ static int selinux_inode_setxattr(struct dentry *dentry, struct vfsmount *mnt,
+ const char *name, const void *value,
+- size_t size, int flags)
++ size_t size, int flags, struct file *file)
+ {
+ struct task_security_struct *tsec = current->security;
+ struct inode *inode = dentry->d_inode;
+@@ -2797,18 +2797,20 @@ static void selinux_inode_post_setxattr(
+ }
+
+ static int selinux_inode_getxattr(struct dentry *dentry, struct vfsmount *mnt,
+- const char *name)
++ const char *name, struct file *file)
+ {
+ return dentry_has_perm(current, NULL, dentry, FILE__GETATTR);
+ }
+
+-static int selinux_inode_listxattr(struct dentry *dentry, struct vfsmount *mnt)
++static int selinux_inode_listxattr(struct dentry *dentry, struct vfsmount *mnt,
++ struct file *file)
+ {
+ return dentry_has_perm(current, NULL, dentry, FILE__GETATTR);
+ }
+
+ static int selinux_inode_removexattr(struct dentry *dentry,
+- struct vfsmount *mnt, const char *name)
++ struct vfsmount *mnt, const char *name,
++ struct file *file)
+ {
+ if (strcmp(name, XATTR_NAME_SELINUX))
+ return selinux_inode_setotherxattr(dentry, name);
+Index: linux-2.6.27/security/smack/smack_lsm.c
+===================================================================
+--- linux-2.6.27.orig/security/smack/smack_lsm.c
++++ linux-2.6.27/security/smack/smack_lsm.c
+@@ -600,6 +600,7 @@ static int smack_inode_getattr(struct vf
+ * @value: unused
+ * @size: unused
+ * @flags: unused
++ * @file: unused
+ *
+ * This protects the Smack attribute explicitly.
+ *
+@@ -607,7 +608,7 @@ static int smack_inode_getattr(struct vf
+ */
+ static int smack_inode_setxattr(struct dentry *dentry, struct vfsmount *mnt,
+ const char *name, const void *value,
+- size_t size, int flags)
++ size_t size, int flags, struct file *file)
+ {
+ int rc = 0;
+
+@@ -619,7 +620,8 @@ static int smack_inode_setxattr(struct d
+ if (size == 0)
+ rc = -EINVAL;
+ } else
+- rc = cap_inode_setxattr(dentry, mnt, name, value, size, flags);
++ rc = cap_inode_setxattr(dentry, mnt, name, value, size, flags,
++ file);
+
+ if (rc == 0)
+ rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE);
+@@ -675,11 +677,12 @@ static void smack_inode_post_setxattr(st
+ * @dentry: the object
+ * @mnt: unused
+ * @name: unused
++ * @file: unused
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+ static int smack_inode_getxattr(struct dentry *dentry, struct vfsmount *mnt,
+- const char *name)
++ const char *name, struct file *file)
+ {
+ return smk_curacc(smk_of_inode(dentry->d_inode), MAY_READ);
+ }
+@@ -689,13 +692,14 @@ static int smack_inode_getxattr(struct d
+ * @dentry: the object
+ * @mnt: unused
+ * @name: name of the attribute
++ * @file: unused
+ *
+ * Removing the Smack attribute requires CAP_MAC_ADMIN
+ *
+ * Returns 0 if access is permitted, an error code otherwise
+ */
+ static int smack_inode_removexattr(struct dentry *dentry, struct vfsmount *mnt,
+- const char *name)
++ const char *name, struct file *file)
+ {
+ int rc = 0;
+
+@@ -705,7 +709,7 @@ static int smack_inode_removexattr(struc
+ if (!capable(CAP_MAC_ADMIN))
+ rc = -EPERM;
+ } else
+- rc = cap_inode_removexattr(dentry, mnt, name);
++ rc = cap_inode_removexattr(dentry, mnt, name, file);
+
+ if (rc == 0)
+ rc = smk_curacc(smk_of_inode(dentry->d_inode), MAY_WRITE);
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: Factor out sysctl pathname code
+
+Convert the selinux sysctl pathname computation code into a standalone
+function.
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+Reviewed-by: James Morris <jmorris@namei.org>
+
+---
+ include/linux/sysctl.h | 2 ++
+ kernel/sysctl.c | 27 +++++++++++++++++++++++++++
+ security/selinux/hooks.c | 34 +++++-----------------------------
+ 3 files changed, 34 insertions(+), 29 deletions(-)
+
+--- a/include/linux/sysctl.h
++++ b/include/linux/sysctl.h
+@@ -996,6 +996,8 @@ extern int proc_doulongvec_minmax(struct
+ extern int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int,
+ struct file *, void __user *, size_t *, loff_t *);
+
++extern char *sysctl_pathname(ctl_table *, char *, int);
++
+ extern int do_sysctl (int __user *name, int nlen,
+ void __user *oldval, size_t __user *oldlenp,
+ void __user *newval, size_t newlen);
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -1552,6 +1552,33 @@ void register_sysctl_root(struct ctl_tab
+ spin_unlock(&sysctl_lock);
+ }
+
++char *sysctl_pathname(struct ctl_table *table, char *buffer, int buflen)
++{
++ if (buflen < 1)
++ return NULL;
++ buffer += --buflen;
++ *buffer = '\0';
++
++ while (table) {
++ int namelen = strlen(table->procname);
++
++ if (buflen < namelen + 1)
++ return NULL;
++ buflen -= namelen + 1;
++ buffer -= namelen;
++ memcpy(buffer, table->procname, namelen);
++ *--buffer = '/';
++ table = table->parent;
++ }
++ if (buflen < 4)
++ return NULL;
++ buffer -= 4;
++ memcpy(buffer, "/sys", 4);
++
++ return buffer;
++}
++EXPORT_SYMBOL_GPL(sysctl_pathname);
++
+ #ifdef CONFIG_SYSCTL_SYSCALL
+ /* Perform the actual read/write of a sysctl table entry. */
+ static int do_sysctl_strategy(struct ctl_table_root *root,
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -1811,40 +1811,16 @@ static int selinux_capable(struct task_s
+
+ static int selinux_sysctl_get_sid(ctl_table *table, u16 tclass, u32 *sid)
+ {
+- int buflen, rc;
+- char *buffer, *path, *end;
++ char *buffer, *path;
++ int rc = -ENOMEM;
+
+- rc = -ENOMEM;
+ buffer = (char *)__get_free_page(GFP_KERNEL);
+ if (!buffer)
+ goto out;
+
+- buflen = PAGE_SIZE;
+- end = buffer+buflen;
+- *--end = '\0';
+- buflen--;
+- path = end-1;
+- *path = '/';
+- while (table) {
+- const char *name = table->procname;
+- size_t namelen = strlen(name);
+- buflen -= namelen + 1;
+- if (buflen < 0)
+- goto out_free;
+- end -= namelen;
+- memcpy(end, name, namelen);
+- *--end = '/';
+- path = end;
+- table = table->parent;
+- }
+- buflen -= 4;
+- if (buflen < 0)
+- goto out_free;
+- end -= 4;
+- memcpy(end, "/sys", 4);
+- path = end;
+- rc = security_genfs_sid("proc", path, tclass, sid);
+-out_free:
++ path = sysctl_pathname(table, buffer, PAGE_SIZE);
++ if (path)
++ rc = security_genfs_sid("proc", path, tclass, sid);
+ free_page((unsigned long)buffer);
+ out:
+ return rc;
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: Fix __d_path() for lazy unmounts and make it unambiguous
+
+First, when __d_path() hits a lazily unmounted mount point, it tries to prepend
+the name of the lazily unmounted dentry to the path name. It gets this wrong,
+and also overwrites the slash that separates the name from the following
+pathname component. This patch fixes that; if a process was in directory
+/foo/bar and /foo got lazily unmounted, the old result was ``foobar'' (note the
+missing slash), while the new result with this patch is ``foo/bar''.
+
+Second, it isn't always possible to tell from the __d_path() result whether the
+specified root and rootmnt (i.e., the chroot) was reached. We need an
+unambiguous result for AppArmor at least though, so we make sure that paths
+will only start with a slash if the path leads all the way up to the root.
+
+We also add a @fail_deleted argument, which allows to get rid of some of the
+mess in sys_getcwd().
+
+This patch leaves getcwd() and d_path() as they were before for everything
+except for bind-mounted directories; for them, it reports ``/foo/bar'' instead
+of ``foobar'' in the example described above.
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+Acked-by: Alan Cox <alan@lxorguk.ukuu.org.uk>
+
+[ Moved dcache_lock outside vfsmount_lock to fix lock order (bnc#490902) ]
+Signed-off-by: Nick Piggin <npiggin@suse.de>
+
+---
+ fs/dcache.c | 126 +++++++++++++++++++++++++++----------------------
+ fs/seq_file.c | 4 -
+ include/linux/dcache.h | 5 +
+ 3 files changed, 75 insertions(+), 60 deletions(-)
+
+Index: linux-2.6.27/fs/dcache.c
+===================================================================
+--- linux-2.6.27.orig/fs/dcache.c
++++ linux-2.6.27/fs/dcache.c
+@@ -1898,44 +1898,46 @@ static int prepend_name(char **buffer, i
+ * @root: root vfsmnt/dentry (may be modified by this function)
+ * @buffer: buffer to return value in
+ * @buflen: buffer length
++ * @flags: flags controling behavior of d_path
+ *
+- * Convert a dentry into an ASCII path name. If the entry has been deleted
+- * the string " (deleted)" is appended. Note that this is ambiguous.
+- *
+- * Returns the buffer or an error code if the path was too long.
+- *
+- * "buflen" should be positive. Caller holds the dcache_lock.
++ * Convert a dentry into an ASCII path name. If the entry has been deleted,
++ * then if @flags has D_PATH_FAIL_DELETED set, ERR_PTR(-ENOENT) is returned.
++ * Otherwise, the string " (deleted)" is appended. Note that this is ambiguous.
+ *
+ * If path is not reachable from the supplied root, then the value of
+- * root is changed (without modifying refcounts).
++ * root is changed (without modifying refcounts). The path returned in this
++ * case will be relative (i.e., it will not start with a slash).
++ *
++ * Returns the buffer or an error code if the path was too long.
+ */
+ char *__d_path(const struct path *path, struct path *root,
+- char *buffer, int buflen)
++ char *buffer, int buflen, int flags)
+ {
+ struct dentry *dentry = path->dentry;
+ struct vfsmount *vfsmnt = path->mnt;
+- char *end = buffer + buflen;
+- char *retval;
++ const unsigned char *name;
++ int namelen;
++
++ buffer += buflen;
++ prepend(&buffer, &buflen, "\0", 1);
+
++ spin_lock(&dcache_lock);
+ spin_lock(&vfsmount_lock);
+- prepend(&end, &buflen, "\0", 1);
+- if (!IS_ROOT(dentry) && d_unhashed(dentry) &&
+- (prepend(&end, &buflen, " (deleted)", 10) != 0))
++ if (!IS_ROOT(dentry) && d_unhashed(dentry)) {
++ if (flags & D_PATH_FAIL_DELETED) {
++ buffer = ERR_PTR(-ENOENT);
++ goto out;
++ }
++ if (prepend(&buffer, &buflen, " (deleted)", 10) != 0)
+ goto Elong;
+-
++ }
+ if (buflen < 1)
+ goto Elong;
+- /* Get '/' right */
+- retval = end-1;
+- *retval = '/';
+
+- for (;;) {
++ while (dentry != root->dentry || vfsmnt != root->mnt) {
+ struct dentry * parent;
+
+- if (dentry == root->dentry && vfsmnt == root->mnt)
+- break;
+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
+- /* Global root? */
+ if (vfsmnt->mnt_parent == vfsmnt) {
+ goto global_root;
+ }
+@@ -1945,27 +1947,51 @@ char *__d_path(const struct path *path,
+ }
+ parent = dentry->d_parent;
+ prefetch(parent);
+- if ((prepend_name(&end, &buflen, &dentry->d_name) != 0) ||
+- (prepend(&end, &buflen, "/", 1) != 0))
++ if ((prepend_name(&buffer, &buflen, &dentry->d_name) != 0) ||
++ (prepend(&buffer, &buflen, "/", 1) != 0))
+ goto Elong;
+- retval = end;
+ dentry = parent;
+ }
++ /* Get '/' right. */
++ if (*buffer != '/' && prepend(&buffer, &buflen, "/", 1))
++ goto Elong;
+
+ out:
+ spin_unlock(&vfsmount_lock);
+- return retval;
++ spin_unlock(&dcache_lock);
++ return buffer;
+
+ global_root:
+- retval += 1; /* hit the slash */
+- if (prepend_name(&retval, &buflen, &dentry->d_name) != 0)
++ /*
++ * We went past the (vfsmount, dentry) we were looking for and have
++ * either hit a root dentry, a lazily unmounted dentry, an
++ * unconnected dentry, or the file is on a pseudo filesystem.
++ */
++ namelen = dentry->d_name.len;
++ name = dentry->d_name.name;
++
++ /*
++ * If this is a root dentry, then overwrite the slash. This
++ * will also DTRT with pseudo filesystems which have root
++ * dentries named "foo:".
++ */
++ if (IS_ROOT(dentry) && *buffer == '/') {
++ buffer++;
++ buflen++;
++ }
++ if ((flags & D_PATH_DISCONNECT) && *name == '/') {
++ /* Make sure we won't return a pathname starting with '/' */
++ name++;
++ namelen--;
++ }
++ if (prepend(&buffer, &buflen, name, namelen))
+ goto Elong;
+ root->mnt = vfsmnt;
+ root->dentry = dentry;
+ goto out;
+
+ Elong:
+- retval = ERR_PTR(-ENAMETOOLONG);
++ buffer = ERR_PTR(-ENAMETOOLONG);
+ goto out;
+ }
+
+@@ -2002,10 +2028,8 @@ char *d_path(const struct path *path, ch
+ root = current->fs->root;
+ path_get(&root);
+ read_unlock(¤t->fs->lock);
+- spin_lock(&dcache_lock);
+ tmp = root;
+- res = __d_path(path, &tmp, buf, buflen);
+- spin_unlock(&dcache_lock);
++ res = __d_path(path, &tmp, buf, buflen, 0);
+ path_put(&root);
+ return res;
+ }
+@@ -2088,9 +2112,9 @@ Elong:
+ */
+ SYSCALL_DEFINE2(getcwd, char __user *, buf, unsigned long, size)
+ {
+- int error;
+- struct path pwd, root;
+- char *page = (char *) __get_free_page(GFP_USER);
++ int error, len;
++ struct path pwd, root, tmp;
++ char *page = (char *) __get_free_page(GFP_USER), *cwd;
+
+ if (!page)
+ return -ENOMEM;
+@@ -2102,30 +2126,20 @@ SYSCALL_DEFINE2(getcwd, char __user *, b
+ path_get(&root);
+ read_unlock(¤t->fs->lock);
+
+- error = -ENOENT;
+- /* Has the current directory has been unlinked? */
+- spin_lock(&dcache_lock);
+- if (IS_ROOT(pwd.dentry) || !d_unhashed(pwd.dentry)) {
+- unsigned long len;
+- struct path tmp = root;
+- char * cwd;
+-
+- cwd = __d_path(&pwd, &tmp, page, PAGE_SIZE);
+- spin_unlock(&dcache_lock);
+-
++ tmp = root;
++ cwd = __d_path(&pwd, &tmp, page, PAGE_SIZE, D_PATH_FAIL_DELETED);
++ if (IS_ERR(cwd)) {
+ error = PTR_ERR(cwd);
+- if (IS_ERR(cwd))
+- goto out;
++ goto out;
++ }
+
+- error = -ERANGE;
+- len = PAGE_SIZE + page - cwd;
+- if (len <= size) {
+- error = len;
+- if (copy_to_user(buf, cwd, len))
+- error = -EFAULT;
+- }
+- } else
+- spin_unlock(&dcache_lock);
++ error = -ERANGE;
++ len = PAGE_SIZE + page - cwd;
++ if (len <= size) {
++ error = len;
++ if (copy_to_user(buf, cwd, len))
++ error = -EFAULT;
++ }
+
+ out:
+ path_put(&pwd);
+Index: linux-2.6.27/fs/seq_file.c
+===================================================================
+--- linux-2.6.27.orig/fs/seq_file.c
++++ linux-2.6.27/fs/seq_file.c
+@@ -441,9 +441,7 @@ int seq_path_root(struct seq_file *m, st
+ char *s = m->buf + m->count;
+ char *p;
+
+- spin_lock(&dcache_lock);
+- p = __d_path(path, root, s, m->size - m->count);
+- spin_unlock(&dcache_lock);
++ p = __d_path(path, root, s, m->size - m->count, 0);
+ err = PTR_ERR(p);
+ if (!IS_ERR(p)) {
+ s = mangle_path(s, p, esc);
+Index: linux-2.6.27/include/linux/dcache.h
+===================================================================
+--- linux-2.6.27.orig/include/linux/dcache.h
++++ linux-2.6.27/include/linux/dcache.h
+@@ -299,9 +299,12 @@ extern int d_validate(struct dentry *, s
+ /*
+ * helper function for dentry_operations.d_dname() members
+ */
++#define D_PATH_FAIL_DELETED 1
++#define D_PATH_DISCONNECT 2
+ extern char *dynamic_dname(struct dentry *, char *, int, const char *, ...);
+
+-extern char *__d_path(const struct path *path, struct path *root, char *, int);
++extern char *__d_path(const struct path *path, struct path *root, char *, int,
++ int);
+ extern char *d_path(const struct path *, char *, int);
+ extern char *dentry_path(struct dentry *, char *, int);
+
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Add a struct vfsmount parameter to vfs_getxattr()
+
+The vfsmount will be passed down to the LSM hook so that LSMs can compute
+pathnames.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/nfsd/nfs4xdr.c | 2 +-
+ fs/nfsd/vfs.c | 21 ++++++++++++---------
+ fs/xattr.c | 15 ++++++++-------
+ include/linux/nfsd/nfsd.h | 3 ++-
+ include/linux/xattr.h | 2 +-
+ 5 files changed, 24 insertions(+), 19 deletions(-)
+
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -1446,7 +1446,7 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
+ }
+ if (bmval0 & (FATTR4_WORD0_ACL | FATTR4_WORD0_ACLSUPPORT
+ | FATTR4_WORD0_SUPPORTED_ATTRS)) {
+- err = nfsd4_get_nfs4_acl(rqstp, dentry, &acl);
++ err = nfsd4_get_nfs4_acl(rqstp, dentry, exp->ex_path.mnt, &acl);
+ aclsupport = (err == 0);
+ if (bmval0 & FATTR4_WORD0_ACL) {
+ if (err == -EOPNOTSUPP)
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -420,11 +420,12 @@ out_nfserr:
+ #if defined(CONFIG_NFSD_V2_ACL) || \
+ defined(CONFIG_NFSD_V3_ACL) || \
+ defined(CONFIG_NFSD_V4)
+-static ssize_t nfsd_getxattr(struct dentry *dentry, char *key, void **buf)
++static ssize_t nfsd_getxattr(struct dentry *dentry, struct vfsmount *mnt,
++ char *key, void **buf)
+ {
+ ssize_t buflen;
+
+- buflen = vfs_getxattr(dentry, key, NULL, 0);
++ buflen = vfs_getxattr(dentry, mnt, key, NULL, 0);
+ if (buflen <= 0)
+ return buflen;
+
+@@ -432,7 +433,7 @@ static ssize_t nfsd_getxattr(struct dent
+ if (!*buf)
+ return -ENOMEM;
+
+- return vfs_getxattr(dentry, key, *buf, buflen);
++ return vfs_getxattr(dentry, mnt, key, *buf, buflen);
+ }
+ #endif
+
+@@ -513,13 +514,13 @@ out_nfserr:
+ }
+
+ static struct posix_acl *
+-_get_posix_acl(struct dentry *dentry, char *key)
++_get_posix_acl(struct dentry *dentry, struct vfsmount *mnt, char *key)
+ {
+ void *buf = NULL;
+ struct posix_acl *pacl = NULL;
+ int buflen;
+
+- buflen = nfsd_getxattr(dentry, key, &buf);
++ buflen = nfsd_getxattr(dentry, mnt, key, &buf);
+ if (!buflen)
+ buflen = -ENODATA;
+ if (buflen <= 0)
+@@ -531,14 +532,15 @@ _get_posix_acl(struct dentry *dentry, ch
+ }
+
+ int
+-nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry, struct nfs4_acl **acl)
++nfsd4_get_nfs4_acl(struct svc_rqst *rqstp, struct dentry *dentry,
++ struct vfsmount *mnt, struct nfs4_acl **acl)
+ {
+ struct inode *inode = dentry->d_inode;
+ int error = 0;
+ struct posix_acl *pacl = NULL, *dpacl = NULL;
+ unsigned int flags = 0;
+
+- pacl = _get_posix_acl(dentry, POSIX_ACL_XATTR_ACCESS);
++ pacl = _get_posix_acl(dentry, mnt, POSIX_ACL_XATTR_ACCESS);
+ if (IS_ERR(pacl) && PTR_ERR(pacl) == -ENODATA)
+ pacl = posix_acl_from_mode(inode->i_mode, GFP_KERNEL);
+ if (IS_ERR(pacl)) {
+@@ -548,7 +550,7 @@ nfsd4_get_nfs4_acl(struct svc_rqst *rqst
+ }
+
+ if (S_ISDIR(inode->i_mode)) {
+- dpacl = _get_posix_acl(dentry, POSIX_ACL_XATTR_DEFAULT);
++ dpacl = _get_posix_acl(dentry, mnt, POSIX_ACL_XATTR_DEFAULT);
+ if (IS_ERR(dpacl) && PTR_ERR(dpacl) == -ENODATA)
+ dpacl = NULL;
+ else if (IS_ERR(dpacl)) {
+@@ -2080,7 +2082,8 @@ nfsd_get_posix_acl(struct svc_fh *fhp, i
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+- size = nfsd_getxattr(fhp->fh_dentry, name, &value);
++ size = nfsd_getxattr(fhp->fh_dentry, fhp->fh_export->ex_path.mnt, name,
++ &value);
+ if (size < 0)
+ return ERR_PTR(size);
+
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -131,7 +131,8 @@ out_noalloc:
+ EXPORT_SYMBOL_GPL(xattr_getsecurity);
+
+ ssize_t
+-vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
++vfs_getxattr(struct dentry *dentry, struct vfsmount *mnt, const char *name,
++ void *value, size_t size)
+ {
+ struct inode *inode = dentry->d_inode;
+ int error;
+@@ -314,8 +315,8 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
+ * Extended attribute GET operations
+ */
+ static ssize_t
+-getxattr(struct dentry *d, const char __user *name, void __user *value,
+- size_t size)
++getxattr(struct dentry *dentry, struct vfsmount *mnt, const char __user *name,
++ void __user *value, size_t size)
+ {
+ ssize_t error;
+ void *kvalue = NULL;
+@@ -335,7 +336,7 @@ getxattr(struct dentry *d, const char __
+ return -ENOMEM;
+ }
+
+- error = vfs_getxattr(d, kname, kvalue, size);
++ error = vfs_getxattr(dentry, mnt, kname, kvalue, size);
+ if (error > 0) {
+ if (size && copy_to_user(value, kvalue, error))
+ error = -EFAULT;
+@@ -357,7 +358,7 @@ SYSCALL_DEFINE4(getxattr, const char __u
+ error = user_path(pathname, &path);
+ if (error)
+ return error;
+- error = getxattr(path.dentry, name, value, size);
++ error = getxattr(path.dentry, path.mnt, name, value, size);
+ path_put(&path);
+ return error;
+ }
+@@ -371,7 +372,7 @@ SYSCALL_DEFINE4(lgetxattr, const char __
+ error = user_lpath(pathname, &path);
+ if (error)
+ return error;
+- error = getxattr(path.dentry, name, value, size);
++ error = getxattr(path.dentry, path.mnt, name, value, size);
+ path_put(&path);
+ return error;
+ }
+@@ -386,7 +387,7 @@ SYSCALL_DEFINE4(fgetxattr, int, fd, cons
+ if (!f)
+ return error;
+ audit_inode(NULL, f->f_path.dentry);
+- error = getxattr(f->f_path.dentry, name, value, size);
++ error = getxattr(f->f_path.dentry, f->f_path.mnt, name, value, size);
+ fput(f);
+ return error;
+ }
+--- a/include/linux/nfsd/nfsd.h
++++ b/include/linux/nfsd/nfsd.h
+@@ -85,7 +85,8 @@ __be32 nfsd_setattr(struct svc_rqst *,
+ #ifdef CONFIG_NFSD_V4
+ __be32 nfsd4_set_nfs4_acl(struct svc_rqst *, struct svc_fh *,
+ struct nfs4_acl *);
+-int nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *, struct nfs4_acl **);
++int nfsd4_get_nfs4_acl(struct svc_rqst *, struct dentry *,
++ struct vfsmount *mnt, struct nfs4_acl **);
+ #endif /* CONFIG_NFSD_V4 */
+ __be32 nfsd_create(struct svc_rqst *, struct svc_fh *,
+ char *name, int len, struct iattr *attrs,
+--- a/include/linux/xattr.h
++++ b/include/linux/xattr.h
+@@ -48,7 +48,7 @@ struct xattr_handler {
+ };
+
+ ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
+-ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
++ssize_t vfs_getxattr(struct dentry *, struct vfsmount *, const char *, void *, size_t);
+ ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
+ int vfs_setxattr(struct dentry *, struct vfsmount *, const char *, const void *, size_t, int);
+ int vfs_removexattr(struct dentry *, const char *);
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Add struct vfsmount parameters to vfs_link()
+
+The vfsmount will be passed down to the LSM hook so that LSMs can compute
+pathnames.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/ecryptfs/inode.c | 9 +++++++--
+ fs/namei.c | 6 ++++--
+ fs/nfsd/vfs.c | 3 ++-
+ include/linux/fs.h | 2 +-
+ 4 files changed, 14 insertions(+), 6 deletions(-)
+
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -403,19 +403,24 @@ static int ecryptfs_link(struct dentry *
+ struct dentry *new_dentry)
+ {
+ struct dentry *lower_old_dentry;
++ struct vfsmount *lower_old_mnt;
+ struct dentry *lower_new_dentry;
++ struct vfsmount *lower_new_mnt;
+ struct dentry *lower_dir_dentry;
+ u64 file_size_save;
+ int rc;
+
+ file_size_save = i_size_read(old_dentry->d_inode);
+ lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
++ lower_old_mnt = ecryptfs_dentry_to_lower_mnt(old_dentry);
+ lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
++ lower_new_mnt = ecryptfs_dentry_to_lower_mnt(new_dentry);
+ dget(lower_old_dentry);
+ dget(lower_new_dentry);
+ lower_dir_dentry = lock_parent(lower_new_dentry);
+- rc = vfs_link(lower_old_dentry, lower_dir_dentry->d_inode,
+- lower_new_dentry);
++ rc = vfs_link(lower_old_dentry, lower_old_mnt,
++ lower_dir_dentry->d_inode, lower_new_dentry,
++ lower_new_mnt);
+ if (rc || !lower_new_dentry->d_inode)
+ goto out_lock;
+ rc = ecryptfs_interpose(lower_new_dentry, new_dentry, dir->i_sb, 0);
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2412,7 +2412,7 @@ SYSCALL_DEFINE2(symlink, const char __us
+ return sys_symlinkat(oldname, AT_FDCWD, newname);
+ }
+
+-int vfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *new_dentry)
++int vfs_link(struct dentry *old_dentry, struct vfsmount *old_mnt, struct inode *dir, struct dentry *new_dentry, struct vfsmount *new_mnt)
+ {
+ struct inode *inode = old_dentry->d_inode;
+ int error;
+@@ -2490,7 +2490,9 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto out_dput;
+- error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
++ error = vfs_link(old_path.dentry, old_path.mnt,
++ nd.path.dentry->d_inode,
++ new_dentry, nd.path.mnt);
+ mnt_drop_write(nd.path.mnt);
+ out_dput:
+ dput(new_dentry);
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1650,7 +1650,8 @@ nfsd_link(struct svc_rqst *rqstp, struct
+ err = nfserrno(host_err);
+ goto out_dput;
+ }
+- host_err = vfs_link(dold, dirp, dnew);
++ host_err = vfs_link(dold, tfhp->fh_export->ex_path.mnt, dirp,
++ dnew, ffhp->fh_export->ex_path.mnt);
+ if (!host_err) {
+ if (EX_ISSYNC(ffhp->fh_export)) {
+ err = nfserrno(nfsd_sync_dir(ddir));
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1181,7 +1181,7 @@ extern int vfs_create(struct inode *, st
+ extern int vfs_mkdir(struct inode *, struct dentry *, struct vfsmount *, int);
+ extern int vfs_mknod(struct inode *, struct dentry *, struct vfsmount *, int, dev_t);
+ extern int vfs_symlink(struct inode *, struct dentry *, struct vfsmount *, const char *);
+-extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
++extern int vfs_link(struct dentry *, struct vfsmount *, struct inode *, struct dentry *, struct vfsmount *);
+ extern int vfs_rmdir(struct inode *, struct dentry *);
+ extern int vfs_unlink(struct inode *, struct dentry *);
+ extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Add a struct vfsmount parameter to vfs_listxattr()
+
+The vfsmount will be passed down to the LSM hook so that LSMs can compute
+pathnames.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/xattr.c | 25 ++++++++++++++-----------
+ include/linux/xattr.h | 2 +-
+ 2 files changed, 15 insertions(+), 12 deletions(-)
+
+--- a/fs/xattr.c
++++ b/fs/xattr.c
+@@ -168,18 +168,20 @@ nolsm:
+ EXPORT_SYMBOL_GPL(vfs_getxattr);
+
+ ssize_t
+-vfs_listxattr(struct dentry *d, char *list, size_t size)
++vfs_listxattr(struct dentry *dentry, struct vfsmount *mnt, char *list,
++ size_t size)
+ {
++ struct inode *inode = dentry->d_inode;
+ ssize_t error;
+
+- error = security_inode_listxattr(d);
++ error = security_inode_listxattr(dentry);
+ if (error)
+ return error;
+ error = -EOPNOTSUPP;
+- if (d->d_inode->i_op && d->d_inode->i_op->listxattr) {
+- error = d->d_inode->i_op->listxattr(d, list, size);
+- } else {
+- error = security_inode_listsecurity(d->d_inode, list, size);
++ if (inode->i_op && inode->i_op->listxattr)
++ error = inode->i_op->listxattr(dentry, list, size);
++ else {
++ error = security_inode_listsecurity(inode, list, size);
+ if (size && error > size)
+ error = -ERANGE;
+ }
+@@ -396,7 +398,8 @@ SYSCALL_DEFINE4(fgetxattr, int, fd, cons
+ * Extended attribute LIST operations
+ */
+ static ssize_t
+-listxattr(struct dentry *d, char __user *list, size_t size)
++listxattr(struct dentry *dentry, struct vfsmount *mnt, char __user *list,
++ size_t size)
+ {
+ ssize_t error;
+ char *klist = NULL;
+@@ -409,7 +412,7 @@ listxattr(struct dentry *d, char __user
+ return -ENOMEM;
+ }
+
+- error = vfs_listxattr(d, klist, size);
++ error = vfs_listxattr(dentry, mnt, klist, size);
+ if (error > 0) {
+ if (size && copy_to_user(list, klist, error))
+ error = -EFAULT;
+@@ -431,7 +434,7 @@ SYSCALL_DEFINE3(listxattr, const char __
+ error = user_path(pathname, &path);
+ if (error)
+ return error;
+- error = listxattr(path.dentry, list, size);
++ error = listxattr(path.dentry, path.mnt, list, size);
+ path_put(&path);
+ return error;
+ }
+@@ -445,7 +448,7 @@ SYSCALL_DEFINE3(llistxattr, const char _
+ error = user_lpath(pathname, &path);
+ if (error)
+ return error;
+- error = listxattr(path.dentry, list, size);
++ error = listxattr(path.dentry, path.mnt, list, size);
+ path_put(&path);
+ return error;
+ }
+@@ -459,7 +462,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, cha
+ if (!f)
+ return error;
+ audit_inode(NULL, f->f_path.dentry);
+- error = listxattr(f->f_path.dentry, list, size);
++ error = listxattr(f->f_path.dentry, f->f_path.mnt, list, size);
+ fput(f);
+ return error;
+ }
+--- a/include/linux/xattr.h
++++ b/include/linux/xattr.h
+@@ -49,7 +49,7 @@ struct xattr_handler {
+
+ ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
+ ssize_t vfs_getxattr(struct dentry *, struct vfsmount *, const char *, void *, size_t);
+-ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
++ssize_t vfs_listxattr(struct dentry *d, struct vfsmount *, char *list, size_t size);
+ int vfs_setxattr(struct dentry *, struct vfsmount *, const char *, const void *, size_t, int);
+ int vfs_removexattr(struct dentry *, const char *);
+
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Add struct vfsmount parameter to vfs_mkdir()
+
+The vfsmount will be passed down to the LSM hook so that LSMs can compute
+pathnames.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/ecryptfs/inode.c | 5 ++++-
+ fs/namei.c | 5 +++--
+ fs/nfsd/nfs4recover.c | 3 ++-
+ fs/nfsd/vfs.c | 8 +++++---
+ include/linux/fs.h | 2 +-
+ kernel/cgroup.c | 2 +-
+ 6 files changed, 16 insertions(+), 9 deletions(-)
+
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -501,11 +501,14 @@ static int ecryptfs_mkdir(struct inode *
+ {
+ int rc;
+ struct dentry *lower_dentry;
++ struct vfsmount *lower_mnt;
+ struct dentry *lower_dir_dentry;
+
+ lower_dentry = ecryptfs_dentry_to_lower(dentry);
++ lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
+ lower_dir_dentry = lock_parent(lower_dentry);
+- rc = vfs_mkdir(lower_dir_dentry->d_inode, lower_dentry, mode);
++ rc = vfs_mkdir(lower_dir_dentry->d_inode, lower_dentry, lower_mnt,
++ mode);
+ if (rc || !lower_dentry->d_inode)
+ goto out;
+ rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb, 0);
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2077,7 +2077,8 @@ SYSCALL_DEFINE3(mknod, const char __user
+ return sys_mknodat(AT_FDCWD, filename, mode, dev);
+ }
+
+-int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
++int vfs_mkdir(struct inode *dir, struct dentry *dentry, struct vfsmount *mnt,
++ int mode)
+ {
+ int error = may_create(dir, dentry, 1);
+
+@@ -2120,7 +2121,7 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto out_dput;
+- error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
++ error = vfs_mkdir(nd.path.dentry->d_inode, dentry, nd.path.mnt, mode);
+ mnt_drop_write(nd.path.mnt);
+ out_dput:
+ dput(dentry);
+--- a/fs/nfsd/nfs4recover.c
++++ b/fs/nfsd/nfs4recover.c
+@@ -158,7 +158,8 @@ nfsd4_create_clid_dir(struct nfs4_client
+ status = mnt_want_write(rec_dir.path.mnt);
+ if (status)
+ goto out_put;
+- status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry, S_IRWXU);
++ status = vfs_mkdir(rec_dir.path.dentry->d_inode, dentry,
++ rec_dir.path.mnt, S_IRWXU);
+ mnt_drop_write(rec_dir.path.mnt);
+ out_put:
+ dput(dentry);
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1215,6 +1215,7 @@ nfsd_create(struct svc_rqst *rqstp, stru
+ int type, dev_t rdev, struct svc_fh *resfhp)
+ {
+ struct dentry *dentry, *dchild = NULL;
++ struct svc_export *exp;
+ struct inode *dirp;
+ __be32 err;
+ __be32 err2;
+@@ -1232,6 +1233,7 @@ nfsd_create(struct svc_rqst *rqstp, stru
+ goto out;
+
+ dentry = fhp->fh_dentry;
++ exp = fhp->fh_export;
+ dirp = dentry->d_inode;
+
+ err = nfserr_notdir;
+@@ -1248,7 +1250,7 @@ nfsd_create(struct svc_rqst *rqstp, stru
+ host_err = PTR_ERR(dchild);
+ if (IS_ERR(dchild))
+ goto out_nfserr;
+- err = fh_compose(resfhp, fhp->fh_export, dchild, fhp);
++ err = fh_compose(resfhp, exp, dchild, fhp);
+ if (err)
+ goto out;
+ } else {
+@@ -1298,7 +1300,7 @@ nfsd_create(struct svc_rqst *rqstp, stru
+ host_err = vfs_create(dirp, dchild, iap->ia_mode, NULL);
+ break;
+ case S_IFDIR:
+- host_err = vfs_mkdir(dirp, dchild, iap->ia_mode);
++ host_err = vfs_mkdir(dirp, dchild, exp->ex_path.mnt, iap->ia_mode);
+ break;
+ case S_IFCHR:
+ case S_IFBLK:
+@@ -1312,7 +1314,7 @@ nfsd_create(struct svc_rqst *rqstp, stru
+ goto out_nfserr;
+ }
+
+- if (EX_ISSYNC(fhp->fh_export)) {
++ if (EX_ISSYNC(exp)) {
+ err = nfserrno(nfsd_sync_dir(dentry));
+ write_inode_now(dchild->d_inode, 1);
+ }
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1178,7 +1178,7 @@ extern void unlock_super(struct super_bl
+ */
+ extern int vfs_permission(struct nameidata *, int);
+ extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *);
+-extern int vfs_mkdir(struct inode *, struct dentry *, int);
++extern int vfs_mkdir(struct inode *, struct dentry *, struct vfsmount *, int);
+ extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t);
+ extern int vfs_symlink(struct inode *, struct dentry *, const char *);
+ extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -2911,7 +2911,7 @@ int cgroup_clone(struct task_struct *tsk
+ }
+
+ /* Create the cgroup directory, which also creates the cgroup */
+- ret = vfs_mkdir(inode, dentry, S_IFDIR | 0755);
++ ret = vfs_mkdir(inode, dentry, NULL, S_IFDIR | 0755);
+ child = __d_cgrp(dentry);
+ dput(dentry);
+ if (ret) {
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Add a struct vfsmount parameter to vfs_mknod()
+
+The vfsmount will be passed down to the LSM hook so that LSMs can compute
+pathnames.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/ecryptfs/inode.c | 5 ++++-
+ fs/namei.c | 10 ++++++----
+ fs/nfsd/vfs.c | 3 ++-
+ include/linux/fs.h | 2 +-
+ net/unix/af_unix.c | 3 ++-
+ 5 files changed, 15 insertions(+), 8 deletions(-)
+
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -552,11 +552,14 @@ ecryptfs_mknod(struct inode *dir, struct
+ {
+ int rc;
+ struct dentry *lower_dentry;
++ struct vfsmount *lower_mnt;
+ struct dentry *lower_dir_dentry;
+
+ lower_dentry = ecryptfs_dentry_to_lower(dentry);
++ lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
+ lower_dir_dentry = lock_parent(lower_dentry);
+- rc = vfs_mknod(lower_dir_dentry->d_inode, lower_dentry, mode, dev);
++ rc = vfs_mknod(lower_dir_dentry->d_inode, lower_dentry, lower_mnt, mode,
++ dev);
+ if (rc || !lower_dentry->d_inode)
+ goto out;
+ rc = ecryptfs_interpose(lower_dentry, dentry, dir->i_sb, 0);
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1976,7 +1976,8 @@ fail:
+ }
+ EXPORT_SYMBOL_GPL(lookup_create);
+
+-int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
++int vfs_mknod(struct inode *dir, struct dentry *dentry, struct vfsmount *mnt,
++ int mode, dev_t dev)
+ {
+ int error = may_create(dir, dentry, 0);
+
+@@ -2054,11 +2055,12 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
+ error = vfs_create(nd.path.dentry->d_inode,dentry,mode,&nd);
+ break;
+ case S_IFCHR: case S_IFBLK:
+- error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,
+- new_decode_dev(dev));
++ error = vfs_mknod(nd.path.dentry->d_inode, dentry,
++ nd.path, mode, new_decode_dev(dev));
+ break;
+ case S_IFIFO: case S_IFSOCK:
+- error = vfs_mknod(nd.path.dentry->d_inode,dentry,mode,0);
++ error = vfs_mknod(nd.path.dentry->d_inode, dentry,
++ nd.path, mode, 0);
+ break;
+ }
+ mnt_drop_write(nd.path.mnt);
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1306,7 +1306,8 @@ nfsd_create(struct svc_rqst *rqstp, stru
+ case S_IFBLK:
+ case S_IFIFO:
+ case S_IFSOCK:
+- host_err = vfs_mknod(dirp, dchild, iap->ia_mode, rdev);
++ host_err = vfs_mknod(dirp, dchild, exp->ex_path.mnt,
++ iap->ia_mode, rdev);
+ break;
+ }
+ if (host_err < 0) {
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1179,7 +1179,7 @@ extern void unlock_super(struct super_bl
+ extern int vfs_permission(struct nameidata *, int);
+ extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *);
+ extern int vfs_mkdir(struct inode *, struct dentry *, struct vfsmount *, int);
+-extern int vfs_mknod(struct inode *, struct dentry *, int, dev_t);
++extern int vfs_mknod(struct inode *, struct dentry *, struct vfsmount *, int, dev_t);
+ extern int vfs_symlink(struct inode *, struct dentry *, const char *);
+ extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
+ extern int vfs_rmdir(struct inode *, struct dentry *);
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -827,7 +827,8 @@ static int unix_bind(struct socket *sock
+ err = mnt_want_write(nd.path.mnt);
+ if (err)
+ goto out_mknod_dput;
+- err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0);
++ err = vfs_mknod(nd.path.dentry->d_inode, dentry, nd.path.mnt,
++ mode, 0);
+ mnt_drop_write(nd.path.mnt);
+ if (err)
+ goto out_mknod_dput;
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Add a vfsmount parameter to notify_change()
+
+The vfsmount parameter must be set appropriately for files visibile
+outside the kernel. Files that are only used in a filesystem (e.g.,
+reiserfs xattr files) will have a NULL vfsmount.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/attr.c | 3 ++-
+ fs/ecryptfs/inode.c | 4 +++-
+ fs/exec.c | 3 ++-
+ fs/hpfs/namei.c | 2 +-
+ fs/namei.c | 2 +-
+ fs/nfsd/vfs.c | 8 ++++----
+ fs/open.c | 28 +++++++++++++++-------------
+ fs/utimes.c | 2 +-
+ include/linux/fs.h | 6 +++---
+ mm/filemap.c | 2 +-
+ 10 files changed, 33 insertions(+), 27 deletions(-)
+
+--- a/fs/attr.c
++++ b/fs/attr.c
+@@ -100,7 +100,8 @@ int inode_setattr(struct inode * inode,
+ }
+ EXPORT_SYMBOL(inode_setattr);
+
+-int notify_change(struct dentry * dentry, struct iattr * attr)
++int notify_change(struct dentry *dentry, struct vfsmount *mnt,
++ struct iattr *attr)
+ {
+ struct inode *inode = dentry->d_inode;
+ mode_t mode = inode->i_mode;
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -850,6 +850,7 @@ static int ecryptfs_setattr(struct dentr
+ {
+ int rc = 0;
+ struct dentry *lower_dentry;
++ struct vfsmount *lower_mnt;
+ struct inode *inode;
+ struct inode *lower_inode;
+ struct ecryptfs_crypt_stat *crypt_stat;
+@@ -860,6 +861,7 @@ static int ecryptfs_setattr(struct dentr
+ inode = dentry->d_inode;
+ lower_inode = ecryptfs_inode_to_lower(inode);
+ lower_dentry = ecryptfs_dentry_to_lower(dentry);
++ lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
+ mutex_lock(&crypt_stat->cs_mutex);
+ if (S_ISDIR(dentry->d_inode->i_mode))
+ crypt_stat->flags &= ~(ECRYPTFS_ENCRYPTED);
+@@ -911,7 +913,7 @@ static int ecryptfs_setattr(struct dentr
+ ia->ia_valid &= ~ATTR_MODE;
+
+ mutex_lock(&lower_dentry->d_inode->i_mutex);
+- rc = notify_change(lower_dentry, ia);
++ rc = notify_change(lower_dentry, lower_mnt, ia);
+ mutex_unlock(&lower_dentry->d_inode->i_mutex);
+ out:
+ fsstack_copy_attr_all(inode, lower_inode, NULL);
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1841,7 +1841,8 @@ int do_coredump(long signr, int exit_cod
+ goto close_fail;
+ if (!file->f_op->write)
+ goto close_fail;
+- if (!ispipe && do_truncate(file->f_path.dentry, 0, 0, file) != 0)
++ if (!ispipe &&
++ do_truncate(file->f_path.dentry, file->f_path.mnt, 0, 0, file) != 0)
+ goto close_fail;
+
+ retval = binfmt->core_dump(signr, regs, file, core_limit);
+--- a/fs/hpfs/namei.c
++++ b/fs/hpfs/namei.c
+@@ -426,7 +426,7 @@ again:
+ /*printk("HPFS: truncating file before delete.\n");*/
+ newattrs.ia_size = 0;
+ newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
+- err = notify_change(dentry, &newattrs);
++ err = notify_change(dentry, NULL, &newattrs);
+ put_write_access(inode);
+ if (!err)
+ goto again;
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1619,7 +1619,7 @@ int may_open(struct nameidata *nd, int a
+ if (!error) {
+ DQUOT_INIT(inode);
+
+- error = do_truncate(dentry, 0,
++ error = do_truncate(dentry, nd->path.mnt, 0,
+ ATTR_MTIME|ATTR_CTIME|ATTR_OPEN,
+ NULL);
+ }
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -397,7 +397,7 @@ nfsd_setattr(struct svc_rqst *rqstp, str
+ err = nfserr_notsync;
+ if (!check_guard || guardtime == inode->i_ctime.tv_sec) {
+ fh_lock(fhp);
+- host_err = notify_change(dentry, iap);
++ host_err = notify_change(dentry, fhp->fh_export->ex_path.mnt, iap);
+ /* to get NFSERR_JUKEBOX on the wire, need -ETIMEDOUT */
+ if (host_err == -EAGAIN)
+ host_err = -ETIMEDOUT;
+@@ -964,13 +964,13 @@ out:
+ return err;
+ }
+
+-static void kill_suid(struct dentry *dentry)
++static void kill_suid(struct dentry *dentry, struct vfsmount *mnt)
+ {
+ struct iattr ia;
+ ia.ia_valid = ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
+
+ mutex_lock(&dentry->d_inode->i_mutex);
+- notify_change(dentry, &ia);
++ notify_change(dentry, mnt, &ia);
+ mutex_unlock(&dentry->d_inode->i_mutex);
+ }
+
+@@ -1033,7 +1033,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
+
+ /* clear setuid/setgid flag after write */
+ if (host_err >= 0 && (inode->i_mode & (S_ISUID | S_ISGID)))
+- kill_suid(dentry);
++ kill_suid(dentry, exp->ex_path.mnt);
+
+ if (host_err >= 0 && stable) {
+ static ino_t last_ino;
+--- a/fs/open.c
++++ b/fs/open.c
+@@ -195,8 +195,8 @@ out:
+ return error;
+ }
+
+-int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
+- struct file *filp)
++int do_truncate(struct dentry *dentry, struct vfsmount *mnt, loff_t length,
++ unsigned int time_attrs, struct file *filp)
+ {
+ int err;
+ struct iattr newattrs;
+@@ -216,7 +216,7 @@ int do_truncate(struct dentry *dentry, l
+ newattrs.ia_valid |= should_remove_suid(dentry);
+
+ mutex_lock(&dentry->d_inode->i_mutex);
+- err = notify_change(dentry, &newattrs);
++ err = notify_change(dentry, mnt, &newattrs);
+ mutex_unlock(&dentry->d_inode->i_mutex);
+ return err;
+ }
+@@ -272,7 +272,7 @@ static long do_sys_truncate(const char _
+ error = locks_verify_truncate(inode, NULL, length);
+ if (!error) {
+ DQUOT_INIT(inode);
+- error = do_truncate(path.dentry, length, 0, NULL);
++ error = do_truncate(path.dentry, path.mnt, length, 0, NULL);
+ }
+
+ put_write_and_out:
+@@ -327,7 +327,8 @@ static long do_sys_ftruncate(unsigned in
+
+ error = locks_verify_truncate(inode, file, length);
+ if (!error)
+- error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, file);
++ error = do_truncate(dentry, file->f_path.mnt, length,
++ ATTR_MTIME|ATTR_CTIME, file);
+ out_putf:
+ fput(file);
+ out:
+@@ -624,7 +625,7 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
+ mode = inode->i_mode;
+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+- err = notify_change(dentry, &newattrs);
++ err = notify_change(dentry, file->f_path.mnt, &newattrs);
+ mutex_unlock(&inode->i_mutex);
+ mnt_drop_write(file->f_path.mnt);
+ out_putf:
+@@ -653,7 +654,7 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
+ mode = inode->i_mode;
+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+- error = notify_change(path.dentry, &newattrs);
++ error = notify_change(path.dentry, path.mnt, &newattrs);
+ mutex_unlock(&inode->i_mutex);
+ mnt_drop_write(path.mnt);
+ dput_and_out:
+@@ -667,7 +668,8 @@ SYSCALL_DEFINE2(chmod, const char __user
+ return sys_fchmodat(AT_FDCWD, filename, mode);
+ }
+
+-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
++static int chown_common(struct dentry * dentry, struct vfsmount *mnt,
++ uid_t user, gid_t group)
+ {
+ struct inode *inode = dentry->d_inode;
+ int error;
+@@ -686,7 +688,7 @@ static int chown_common(struct dentry *
+ newattrs.ia_valid |=
+ ATTR_KILL_SUID | ATTR_KILL_SGID | ATTR_KILL_PRIV;
+ mutex_lock(&inode->i_mutex);
+- error = notify_change(dentry, &newattrs);
++ error = notify_change(dentry, mnt, &newattrs);
+ mutex_unlock(&inode->i_mutex);
+
+ return error;
+@@ -703,7 +705,7 @@ SYSCALL_DEFINE3(chown, const char __user
+ error = mnt_want_write(path.mnt);
+ if (error)
+ goto out_release;
+- error = chown_common(path.dentry, user, group);
++ error = chown_common(path.dentry, path.mnt, user, group);
+ mnt_drop_write(path.mnt);
+ out_release:
+ path_put(&path);
+@@ -728,7 +730,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
+ error = mnt_want_write(path.mnt);
+ if (error)
+ goto out_release;
+- error = chown_common(path.dentry, user, group);
++ error = chown_common(path.dentry, path.mnt, user, group);
+ mnt_drop_write(path.mnt);
+ out_release:
+ path_put(&path);
+@@ -747,7 +749,7 @@ SYSCALL_DEFINE3(lchown, const char __use
+ error = mnt_want_write(path.mnt);
+ if (error)
+ goto out_release;
+- error = chown_common(path.dentry, user, group);
++ error = chown_common(path.dentry, path.mnt, user, group);
+ mnt_drop_write(path.mnt);
+ out_release:
+ path_put(&path);
+@@ -770,7 +772,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
+ goto out_fput;
+ dentry = file->f_path.dentry;
+ audit_inode(NULL, dentry);
+- error = chown_common(dentry, user, group);
++ error = chown_common(dentry, file->f_path.mnt, user, group);
+ mnt_drop_write(file->f_path.mnt);
+ out_fput:
+ fput(file);
+--- a/fs/utimes.c
++++ b/fs/utimes.c
+@@ -102,7 +102,7 @@ static int utimes_common(struct path *pa
+ }
+ }
+ mutex_lock(&inode->i_mutex);
+- error = notify_change(path->dentry, &newattrs);
++ error = notify_change(path->dentry, path->mnt, &newattrs);
+ mutex_unlock(&inode->i_mutex);
+
+ mnt_drop_write_and_out:
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1636,8 +1636,8 @@ static inline int break_lease(struct ino
+
+ /* fs/open.c */
+
+-extern int do_truncate(struct dentry *, loff_t start, unsigned int time_attrs,
+- struct file *filp);
++extern int do_truncate(struct dentry *, struct vfsmount *, loff_t start,
++ unsigned int time_attrs, struct file *filp);
+ extern long do_sys_open(int dfd, const char __user *filename, int flags,
+ int mode);
+ extern struct file *filp_open(const char *, int, int);
+@@ -1798,7 +1798,7 @@ extern int do_remount_sb(struct super_bl
+ #ifdef CONFIG_BLOCK
+ extern sector_t bmap(struct inode *, sector_t);
+ #endif
+-extern int notify_change(struct dentry *, struct iattr *);
++extern int notify_change(struct dentry *, struct vfsmount *, struct iattr *);
+ extern int inode_permission(struct inode *, int);
+ extern int generic_permission(struct inode *, int,
+ int (*check_acl)(struct inode *, int));
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -1831,7 +1831,7 @@ static int __remove_suid(struct path *pa
+ struct iattr newattrs;
+
+ newattrs.ia_valid = ATTR_FORCE | kill;
+- return notify_change(path->dentry, &newattrs);
++ return notify_change(path->dentry, path->mnt, &newattrs);
+ }
+
+ int file_remove_suid(struct file *file)
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Add a struct vfsmount parameter to vfs_removexattr()
+
+The vfsmount will be passed down to the LSM hook so that LSMs can compute
+pathnames.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/nfsd/vfs.c | 11 ++++++-----
+ fs/xattr.c | 12 ++++++------
+ include/linux/xattr.h | 2 +-
+ 3 files changed, 13 insertions(+), 12 deletions(-)
+
+Index: linux-2.6.27/fs/nfsd/vfs.c
+===================================================================
+--- linux-2.6.27.orig/fs/nfsd/vfs.c
++++ linux-2.6.27/fs/nfsd/vfs.c
+@@ -2095,6 +2095,7 @@ nfsd_get_posix_acl(struct svc_fh *fhp, i
+ int
+ nfsd_set_posix_acl(struct svc_fh *fhp, int type, struct posix_acl *acl)
+ {
++ struct vfsmount *mnt;
+ struct inode *inode = fhp->fh_dentry->d_inode;
+ char *name;
+ void *value = NULL;
+@@ -2127,22 +2128,22 @@ nfsd_set_posix_acl(struct svc_fh *fhp, i
+ } else
+ size = 0;
+
+- error = mnt_want_write(fhp->fh_export->ex_path.mnt);
++ mnt = fhp->fh_export->ex_path.mnt;
++ error = mnt_want_write(mnt);
+ if (error)
+ goto getout;
+ if (size)
+- error = vfs_setxattr(fhp->fh_dentry, fhp->fh_export->ex_path.mnt,
+- name, value, size,0);
++ error = vfs_setxattr(fhp->fh_dentry, mnt, name, value, size,0);
+ else {
+ if (!S_ISDIR(inode->i_mode) && type == ACL_TYPE_DEFAULT)
+ error = 0;
+ else {
+- error = vfs_removexattr(fhp->fh_dentry, name);
++ error = vfs_removexattr(fhp->fh_dentry, mnt, name);
+ if (error == -ENODATA)
+ error = 0;
+ }
+ }
+- mnt_drop_write(fhp->fh_export->ex_path.mnt);
++ mnt_drop_write(mnt);
+
+ getout:
+ kfree(value);
+Index: linux-2.6.27/fs/xattr.c
+===================================================================
+--- linux-2.6.27.orig/fs/xattr.c
++++ linux-2.6.27/fs/xattr.c
+@@ -190,7 +190,7 @@ vfs_listxattr(struct dentry *dentry, str
+ EXPORT_SYMBOL_GPL(vfs_listxattr);
+
+ int
+-vfs_removexattr(struct dentry *dentry, const char *name)
++vfs_removexattr(struct dentry *dentry, struct vfsmount *mnt, const char *name)
+ {
+ struct inode *inode = dentry->d_inode;
+ int error;
+@@ -471,7 +471,7 @@ SYSCALL_DEFINE3(flistxattr, int, fd, cha
+ * Extended attribute REMOVE operations
+ */
+ static long
+-removexattr(struct dentry *d, const char __user *name)
++removexattr(struct dentry *dentry, struct vfsmount *mnt, const char __user *name)
+ {
+ int error;
+ char kname[XATTR_NAME_MAX + 1];
+@@ -482,7 +482,7 @@ removexattr(struct dentry *d, const char
+ if (error < 0)
+ return error;
+
+- return vfs_removexattr(d, kname);
++ return vfs_removexattr(dentry, mnt, kname);
+ }
+
+ SYSCALL_DEFINE2(removexattr, const char __user *, pathname,
+@@ -496,7 +496,7 @@ SYSCALL_DEFINE2(removexattr, const char
+ return error;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = removexattr(path.dentry, name);
++ error = removexattr(path.dentry, path.mnt, name);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -514,7 +514,7 @@ SYSCALL_DEFINE2(lremovexattr, const char
+ return error;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = removexattr(path.dentry, name);
++ error = removexattr(path.dentry, path.mnt, name);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -534,7 +534,7 @@ SYSCALL_DEFINE2(fremovexattr, int, fd, c
+ audit_inode(NULL, dentry);
+ error = mnt_want_write_file(f->f_path.mnt, f);
+ if (!error) {
+- error = removexattr(dentry, name);
++ error = removexattr(dentry, f->f_path.mnt, name);
+ mnt_drop_write(f->f_path.mnt);
+ }
+ fput(f);
+Index: linux-2.6.27/include/linux/xattr.h
+===================================================================
+--- linux-2.6.27.orig/include/linux/xattr.h
++++ linux-2.6.27/include/linux/xattr.h
+@@ -51,7 +51,7 @@ ssize_t xattr_getsecurity(struct inode *
+ ssize_t vfs_getxattr(struct dentry *, struct vfsmount *, const char *, void *, size_t);
+ ssize_t vfs_listxattr(struct dentry *d, struct vfsmount *, char *list, size_t size);
+ int vfs_setxattr(struct dentry *, struct vfsmount *, const char *, const void *, size_t, int);
+-int vfs_removexattr(struct dentry *, const char *);
++int vfs_removexattr(struct dentry *, struct vfsmount *mnt, const char *);
+
+ ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size);
+ ssize_t generic_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Add struct vfsmount parameters to vfs_rename()
+
+The vfsmount will be passed down to the LSM hook so that LSMs can compute
+pathnames.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/ecryptfs/inode.c | 7 ++++++-
+ fs/namei.c | 19 ++++++++++++-------
+ fs/nfsd/vfs.c | 3 ++-
+ include/linux/fs.h | 2 +-
+ 4 files changed, 21 insertions(+), 10 deletions(-)
+
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -590,19 +590,24 @@ ecryptfs_rename(struct inode *old_dir, s
+ {
+ int rc;
+ struct dentry *lower_old_dentry;
++ struct vfsmount *lower_old_mnt;
+ struct dentry *lower_new_dentry;
++ struct vfsmount *lower_new_mnt;
+ struct dentry *lower_old_dir_dentry;
+ struct dentry *lower_new_dir_dentry;
+
+ lower_old_dentry = ecryptfs_dentry_to_lower(old_dentry);
++ lower_old_mnt = ecryptfs_dentry_to_lower_mnt(old_dentry);
+ lower_new_dentry = ecryptfs_dentry_to_lower(new_dentry);
++ lower_new_mnt = ecryptfs_dentry_to_lower_mnt(new_dentry);
+ dget(lower_old_dentry);
+ dget(lower_new_dentry);
+ lower_old_dir_dentry = dget_parent(lower_old_dentry);
+ lower_new_dir_dentry = dget_parent(lower_new_dentry);
+ lock_rename(lower_old_dir_dentry, lower_new_dir_dentry);
+ rc = vfs_rename(lower_old_dir_dentry->d_inode, lower_old_dentry,
+- lower_new_dir_dentry->d_inode, lower_new_dentry);
++ lower_old_mnt, lower_new_dir_dentry->d_inode,
++ lower_new_dentry, lower_new_mnt);
+ if (rc)
+ goto out_lock;
+ fsstack_copy_attr_all(new_dir, lower_new_dir_dentry->d_inode, NULL);
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2547,7 +2547,8 @@ SYSCALL_DEFINE2(link, const char __user
+ * locking].
+ */
+ static int vfs_rename_dir(struct inode *old_dir, struct dentry *old_dentry,
+- struct inode *new_dir, struct dentry *new_dentry)
++ struct vfsmount *old_mnt, struct inode *new_dir,
++ struct dentry *new_dentry, struct vfsmount *new_mnt)
+ {
+ int error = 0;
+ struct inode *target;
+@@ -2590,7 +2591,8 @@ static int vfs_rename_dir(struct inode *
+ }
+
+ static int vfs_rename_other(struct inode *old_dir, struct dentry *old_dentry,
+- struct inode *new_dir, struct dentry *new_dentry)
++ struct vfsmount *old_mnt, struct inode *new_dir,
++ struct dentry *new_dentry, struct vfsmount *new_mnt)
+ {
+ struct inode *target;
+ int error;
+@@ -2618,7 +2620,8 @@ static int vfs_rename_other(struct inode
+ }
+
+ int vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+- struct inode *new_dir, struct dentry *new_dentry)
++ struct vfsmount *old_mnt, struct inode *new_dir,
++ struct dentry *new_dentry, struct vfsmount *new_mnt)
+ {
+ int error;
+ int is_dir = S_ISDIR(old_dentry->d_inode->i_mode);
+@@ -2647,9 +2650,11 @@ int vfs_rename(struct inode *old_dir, st
+ old_name = fsnotify_oldname_init(old_dentry->d_name.name);
+
+ if (is_dir)
+- error = vfs_rename_dir(old_dir,old_dentry,new_dir,new_dentry);
++ error = vfs_rename_dir(old_dir, old_dentry, old_mnt,
++ new_dir, new_dentry, new_mnt);
+ else
+- error = vfs_rename_other(old_dir,old_dentry,new_dir,new_dentry);
++ error = vfs_rename_other(old_dir, old_dentry, old_mnt,
++ new_dir, new_dentry, new_mnt);
+ if (!error) {
+ const char *new_name = old_dentry->d_name.name;
+ fsnotify_move(old_dir, new_dir, old_name, new_name, is_dir,
+@@ -2726,8 +2731,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
+ error = mnt_want_write(oldnd.path.mnt);
+ if (error)
+ goto exit5;
+- error = vfs_rename(old_dir->d_inode, old_dentry,
+- new_dir->d_inode, new_dentry);
++ error = vfs_rename(old_dir->d_inode, old_dentry, oldnd.path.mnt,
++ new_dir->d_inode, new_dentry, newnd.path.mnt);
+ mnt_drop_write(oldnd.path.mnt);
+ exit5:
+ dput(new_dentry);
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1752,7 +1752,8 @@ nfsd_rename(struct svc_rqst *rqstp, stru
+ if (host_err)
+ goto out_dput_new;
+
+- host_err = vfs_rename(fdir, odentry, tdir, ndentry);
++ host_err = vfs_rename(fdir, odentry, ffhp->fh_export->ex_path.mnt,
++ tdir, ndentry, tfhp->fh_export->ex_path.mnt);
+ if (!host_err && EX_ISSYNC(tfhp->fh_export)) {
+ host_err = nfsd_sync_dir(tdentry);
+ if (!host_err)
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1184,7 +1184,7 @@ extern int vfs_symlink(struct inode *, s
+ extern int vfs_link(struct dentry *, struct vfsmount *, struct inode *, struct dentry *, struct vfsmount *);
+ extern int vfs_rmdir(struct inode *, struct dentry *, struct vfsmount *);
+ extern int vfs_unlink(struct inode *, struct dentry *, struct vfsmount *);
+-extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
++extern int vfs_rename(struct inode *, struct dentry *, struct vfsmount *, struct inode *, struct dentry *, struct vfsmount *);
+
+ /*
+ * VFS dentry helper functions.
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Add a struct vfsmount parameter to vfs_rmdir()
+
+The vfsmount will be passed down to the LSM hook so that LSMs can compute
+pathnames.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/ecryptfs/inode.c | 4 +++-
+ fs/namei.c | 4 ++--
+ fs/nfsd/nfs4recover.c | 2 +-
+ fs/nfsd/vfs.c | 8 +++++---
+ include/linux/fs.h | 2 +-
+ 5 files changed, 12 insertions(+), 8 deletions(-)
+
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -534,14 +534,16 @@ out:
+ static int ecryptfs_rmdir(struct inode *dir, struct dentry *dentry)
+ {
+ struct dentry *lower_dentry;
++ struct vfsmount *lower_mnt;
+ struct dentry *lower_dir_dentry;
+ int rc;
+
+ lower_dentry = ecryptfs_dentry_to_lower(dentry);
++ lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
+ dget(dentry);
+ lower_dir_dentry = lock_parent(lower_dentry);
+ dget(lower_dentry);
+- rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry);
++ rc = vfs_rmdir(lower_dir_dentry->d_inode, lower_dentry, lower_mnt);
+ dput(lower_dentry);
+ if (!rc)
+ d_delete(lower_dentry);
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2167,7 +2167,7 @@ void dentry_unhash(struct dentry *dentry
+ spin_unlock(&dcache_lock);
+ }
+
+-int vfs_rmdir(struct inode *dir, struct dentry *dentry)
++int vfs_rmdir(struct inode *dir, struct dentry *dentry,struct vfsmount *mnt)
+ {
+ int error = may_delete(dir, dentry, 1);
+
+@@ -2230,7 +2230,7 @@ static long do_rmdir(int dfd, const char
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto exit3;
+- error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
++ error = vfs_rmdir(nd.path.dentry->d_inode, dentry, nd.path.mnt);
+ mnt_drop_write(nd.path.mnt);
+ exit3:
+ dput(dentry);
+--- a/fs/nfsd/nfs4recover.c
++++ b/fs/nfsd/nfs4recover.c
+@@ -279,7 +279,7 @@ nfsd4_clear_clid_dir(struct dentry *dir,
+ * a kernel from the future.... */
+ nfsd4_list_rec_dir(dentry, nfsd4_remove_clid_file);
+ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
+- status = vfs_rmdir(dir->d_inode, dentry);
++ status = vfs_rmdir(dir->d_inode, dentry, rec_dir.path.mnt);
+ mutex_unlock(&dir->d_inode->i_mutex);
+ return status;
+ }
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1790,6 +1790,7 @@ nfsd_unlink(struct svc_rqst *rqstp, stru
+ char *fname, int flen)
+ {
+ struct dentry *dentry, *rdentry;
++ struct svc_export *exp;
+ struct inode *dirp;
+ __be32 err;
+ int host_err;
+@@ -1804,6 +1805,7 @@ nfsd_unlink(struct svc_rqst *rqstp, stru
+ fh_lock_nested(fhp, I_MUTEX_PARENT);
+ dentry = fhp->fh_dentry;
+ dirp = dentry->d_inode;
++ exp = fhp->fh_export;
+
+ rdentry = lookup_one_len(fname, dentry, flen);
+ host_err = PTR_ERR(rdentry);
+@@ -1825,21 +1827,21 @@ nfsd_unlink(struct svc_rqst *rqstp, stru
+
+ if (type != S_IFDIR) { /* It's UNLINK */
+ #ifdef MSNFS
+- if ((fhp->fh_export->ex_flags & NFSEXP_MSNFS) &&
++ if ((exp->ex_flags & NFSEXP_MSNFS) &&
+ (atomic_read(&rdentry->d_count) > 1)) {
+ host_err = -EPERM;
+ } else
+ #endif
+ host_err = vfs_unlink(dirp, rdentry);
+ } else { /* It's RMDIR */
+- host_err = vfs_rmdir(dirp, rdentry);
++ host_err = vfs_rmdir(dirp, rdentry, exp->ex_path.mnt);
+ }
+
+ dput(rdentry);
+
+ if (host_err)
+ goto out_drop;
+- if (EX_ISSYNC(fhp->fh_export))
++ if (EX_ISSYNC(exp))
+ host_err = nfsd_sync_dir(dentry);
+
+ out_drop:
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1182,7 +1182,7 @@ extern int vfs_mkdir(struct inode *, str
+ extern int vfs_mknod(struct inode *, struct dentry *, struct vfsmount *, int, dev_t);
+ extern int vfs_symlink(struct inode *, struct dentry *, struct vfsmount *, const char *);
+ extern int vfs_link(struct dentry *, struct vfsmount *, struct inode *, struct dentry *, struct vfsmount *);
+-extern int vfs_rmdir(struct inode *, struct dentry *);
++extern int vfs_rmdir(struct inode *, struct dentry *, struct vfsmount *);
+ extern int vfs_unlink(struct inode *, struct dentry *);
+ extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Add a struct vfsmount parameter to vfs_setxattr()
+
+The vfsmount will be passed down to the LSM hook so that LSMs can compute
+pathnames.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/nfsd/vfs.c | 16 +++++++++++-----
+ fs/xattr.c | 16 ++++++++--------
+ include/linux/xattr.h | 3 ++-
+ 3 files changed, 21 insertions(+), 14 deletions(-)
+
+Index: linux-2.6.27/fs/nfsd/vfs.c
+===================================================================
+--- linux-2.6.27.orig/fs/nfsd/vfs.c
++++ linux-2.6.27/fs/nfsd/vfs.c
+@@ -438,7 +438,8 @@ static ssize_t nfsd_getxattr(struct dent
+
+ #if defined(CONFIG_NFSD_V4)
+ static int
+-set_nfsv4_acl_one(struct dentry *dentry, struct posix_acl *pacl, char *key)
++set_nfsv4_acl_one(struct dentry *dentry, struct vfsmount *mnt,
++ struct posix_acl *pacl, char *key)
+ {
+ int len;
+ size_t buflen;
+@@ -457,7 +458,7 @@ set_nfsv4_acl_one(struct dentry *dentry,
+ goto out;
+ }
+
+- error = vfs_setxattr(dentry, key, buf, len, 0);
++ error = vfs_setxattr(dentry, mnt, key, buf, len, 0);
+ out:
+ kfree(buf);
+ return error;
+@@ -470,6 +471,7 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqst
+ __be32 error;
+ int host_error;
+ struct dentry *dentry;
++ struct vfsmount *mnt;
+ struct inode *inode;
+ struct posix_acl *pacl = NULL, *dpacl = NULL;
+ unsigned int flags = 0;
+@@ -480,6 +482,7 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqst
+ return error;
+
+ dentry = fhp->fh_dentry;
++ mnt = fhp->fh_export->ex_path.mnt;
+ inode = dentry->d_inode;
+ if (S_ISDIR(inode->i_mode))
+ flags = NFS4_ACL_DIR;
+@@ -490,12 +493,14 @@ nfsd4_set_nfs4_acl(struct svc_rqst *rqst
+ } else if (host_error < 0)
+ goto out_nfserr;
+
+- host_error = set_nfsv4_acl_one(dentry, pacl, POSIX_ACL_XATTR_ACCESS);
++ host_error = set_nfsv4_acl_one(dentry, mnt, pacl,
++ POSIX_ACL_XATTR_ACCESS);
+ if (host_error < 0)
+ goto out_release;
+
+ if (S_ISDIR(inode->i_mode))
+- host_error = set_nfsv4_acl_one(dentry, dpacl, POSIX_ACL_XATTR_DEFAULT);
++ host_error = set_nfsv4_acl_one(dentry, mnt, dpacl,
++ POSIX_ACL_XATTR_DEFAULT);
+
+ out_release:
+ posix_acl_release(pacl);
+@@ -2123,7 +2128,8 @@ nfsd_set_posix_acl(struct svc_fh *fhp, i
+ if (error)
+ goto getout;
+ if (size)
+- error = vfs_setxattr(fhp->fh_dentry, name, value, size, 0);
++ error = vfs_setxattr(fhp->fh_dentry, fhp->fh_export->ex_path.mnt,
++ name, value, size,0);
+ else {
+ if (!S_ISDIR(inode->i_mode) && type == ACL_TYPE_DEFAULT)
+ error = 0;
+Index: linux-2.6.27/fs/xattr.c
+===================================================================
+--- linux-2.6.27.orig/fs/xattr.c
++++ linux-2.6.27/fs/xattr.c
+@@ -67,8 +67,8 @@ xattr_permission(struct inode *inode, co
+ }
+
+ int
+-vfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+- size_t size, int flags)
++vfs_setxattr(struct dentry *dentry, struct vfsmount *mnt, const char *name,
++ const void *value, size_t size, int flags)
+ {
+ struct inode *inode = dentry->d_inode;
+ int error;
+@@ -218,8 +218,8 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
+ * Extended attribute SET operations
+ */
+ static long
+-setxattr(struct dentry *d, const char __user *name, const void __user *value,
+- size_t size, int flags)
++setxattr(struct dentry *dentry, struct vfsmount *mnt, const char __user *name,
++ const void __user *value, size_t size, int flags)
+ {
+ int error;
+ void *kvalue = NULL;
+@@ -246,7 +246,7 @@ setxattr(struct dentry *d, const char __
+ }
+ }
+
+- error = vfs_setxattr(d, kname, kvalue, size, flags);
++ error = vfs_setxattr(dentry, mnt, kname, kvalue, size, flags);
+ kfree(kvalue);
+ return error;
+ }
+@@ -263,7 +263,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
+ return error;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = setxattr(path.dentry, name, value, size, flags);
++ error = setxattr(path.dentry, path.mnt, name, value, size, flags);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -282,7 +282,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
+ return error;
+ error = mnt_want_write(path.mnt);
+ if (!error) {
+- error = setxattr(path.dentry, name, value, size, flags);
++ error = setxattr(path.dentry, path.mnt, name, value, size, flags);
+ mnt_drop_write(path.mnt);
+ }
+ path_put(&path);
+@@ -303,7 +303,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
+ audit_inode(NULL, dentry);
+ error = mnt_want_write_file(f->f_path.mnt, f);
+ if (!error) {
+- error = setxattr(dentry, name, value, size, flags);
++ error = setxattr(dentry, f->f_vfsmnt, name, value, size, flags);
+ mnt_drop_write(f->f_path.mnt);
+ }
+ fput(f);
+Index: linux-2.6.27/include/linux/xattr.h
+===================================================================
+--- linux-2.6.27.orig/include/linux/xattr.h
++++ linux-2.6.27/include/linux/xattr.h
+@@ -16,6 +16,7 @@
+ #ifdef __KERNEL__
+
+ #include <linux/types.h>
++#include <linux/mount.h>
+
+ /* Namespaces */
+ #define XATTR_OS2_PREFIX "os2."
+@@ -49,7 +50,7 @@ struct xattr_handler {
+ ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
+ ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
+ ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
+-int vfs_setxattr(struct dentry *, const char *, const void *, size_t, int);
++int vfs_setxattr(struct dentry *, struct vfsmount *, const char *, const void *, size_t, int);
+ int vfs_removexattr(struct dentry *, const char *);
+
+ ssize_t generic_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size);
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Add a struct vfsmount parameter to vfs_symlink()
+
+The vfsmount will be passed down to the LSM hook so that LSMs can compute
+pathnames.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/ecryptfs/inode.c | 4 +++-
+ fs/namei.c | 5 +++--
+ fs/nfsd/vfs.c | 12 ++++++++----
+ include/linux/fs.h | 2 +-
+ 4 files changed, 15 insertions(+), 8 deletions(-)
+
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -464,6 +464,7 @@ static int ecryptfs_symlink(struct inode
+ {
+ int rc;
+ struct dentry *lower_dentry;
++ struct vfsmount *lower_mnt;
+ struct dentry *lower_dir_dentry;
+ char *encoded_symname;
+ int encoded_symlen;
+@@ -471,6 +472,7 @@ static int ecryptfs_symlink(struct inode
+
+ lower_dentry = ecryptfs_dentry_to_lower(dentry);
+ dget(lower_dentry);
++ lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
+ lower_dir_dentry = lock_parent(lower_dentry);
+ encoded_symlen = ecryptfs_encode_filename(crypt_stat, symname,
+ strlen(symname),
+@@ -479,7 +481,7 @@ static int ecryptfs_symlink(struct inode
+ rc = encoded_symlen;
+ goto out_lock;
+ }
+- rc = vfs_symlink(lower_dir_dentry->d_inode, lower_dentry,
++ rc = vfs_symlink(lower_dir_dentry->d_inode, lower_dentry, lower_mnt,
+ encoded_symname);
+ kfree(encoded_symname);
+ if (rc || !lower_dentry->d_inode)
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2347,7 +2347,8 @@ SYSCALL_DEFINE1(unlink, const char __use
+ return do_unlinkat(AT_FDCWD, pathname);
+ }
+
+-int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
++int vfs_symlink(struct inode *dir, struct dentry *dentry, struct vfsmount *mnt,
++ const char *oldname)
+ {
+ int error = may_create(dir, dentry, 0);
+
+@@ -2393,7 +2394,7 @@ SYSCALL_DEFINE3(symlinkat, const char __
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto out_dput;
+- error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
++ error = vfs_symlink(nd.path.dentry->d_inode, dentry, nd.path.mnt, from);
+ mnt_drop_write(nd.path.mnt);
+ out_dput:
+ dput(dentry);
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1545,6 +1545,7 @@ nfsd_symlink(struct svc_rqst *rqstp, str
+ struct iattr *iap)
+ {
+ struct dentry *dentry, *dnew;
++ struct svc_export *exp;
+ __be32 err, cerr;
+ int host_err;
+
+@@ -1569,6 +1570,7 @@ nfsd_symlink(struct svc_rqst *rqstp, str
+ if (host_err)
+ goto out_nfserr;
+
++ exp = fhp->fh_export;
+ if (unlikely(path[plen] != 0)) {
+ char *path_alloced = kmalloc(plen+1, GFP_KERNEL);
+ if (path_alloced == NULL)
+@@ -1576,14 +1578,16 @@ nfsd_symlink(struct svc_rqst *rqstp, str
+ else {
+ strncpy(path_alloced, path, plen);
+ path_alloced[plen] = 0;
+- host_err = vfs_symlink(dentry->d_inode, dnew, path_alloced);
++ host_err = vfs_symlink(dentry->d_inode, dnew,
++ exp->ex_path.mnt, path_alloced);
+ kfree(path_alloced);
+ }
+ } else
+- host_err = vfs_symlink(dentry->d_inode, dnew, path);
++ host_err = vfs_symlink(dentry->d_inode, dnew, exp->ex_path.mnt,
++ path);
+
+ if (!host_err) {
+- if (EX_ISSYNC(fhp->fh_export))
++ if (EX_ISSYNC(exp))
+ host_err = nfsd_sync_dir(dentry);
+ }
+ err = nfserrno(host_err);
+@@ -1591,7 +1595,7 @@ nfsd_symlink(struct svc_rqst *rqstp, str
+
+ mnt_drop_write(fhp->fh_export->ex_path.mnt);
+
+- cerr = fh_compose(resfhp, fhp->fh_export, dnew, fhp);
++ cerr = fh_compose(resfhp, exp, dnew, fhp);
+ dput(dnew);
+ if (err==0) err = cerr;
+ out:
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1180,7 +1180,7 @@ extern int vfs_permission(struct nameida
+ extern int vfs_create(struct inode *, struct dentry *, int, struct nameidata *);
+ extern int vfs_mkdir(struct inode *, struct dentry *, struct vfsmount *, int);
+ extern int vfs_mknod(struct inode *, struct dentry *, struct vfsmount *, int, dev_t);
+-extern int vfs_symlink(struct inode *, struct dentry *, const char *);
++extern int vfs_symlink(struct inode *, struct dentry *, struct vfsmount *, const char *);
+ extern int vfs_link(struct dentry *, struct inode *, struct dentry *);
+ extern int vfs_rmdir(struct inode *, struct dentry *);
+ extern int vfs_unlink(struct inode *, struct dentry *);
--- /dev/null
+From: Tony Jones <tonyj@suse.de>
+Subject: Add a struct vfsmount parameter to vfs_unlink()
+
+The vfsmount will be passed down to the LSM hook so that LSMs can compute
+pathnames.
+
+Signed-off-by: Tony Jones <tonyj@suse.de>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+Signed-off-by: John Johansen <jjohansen@suse.de>
+
+---
+ fs/ecryptfs/inode.c | 3 ++-
+ fs/namei.c | 4 ++--
+ fs/nfsd/nfs4recover.c | 2 +-
+ fs/nfsd/vfs.c | 2 +-
+ include/linux/fs.h | 2 +-
+ ipc/mqueue.c | 2 +-
+ 6 files changed, 8 insertions(+), 7 deletions(-)
+
+--- a/fs/ecryptfs/inode.c
++++ b/fs/ecryptfs/inode.c
+@@ -445,12 +445,13 @@ static int ecryptfs_unlink(struct inode
+ {
+ int rc = 0;
+ struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
++ struct vfsmount *lower_mnt = ecryptfs_dentry_to_lower_mnt(dentry);
+ struct inode *lower_dir_inode = ecryptfs_inode_to_lower(dir);
+ struct dentry *lower_dir_dentry;
+
+ dget(lower_dentry);
+ lower_dir_dentry = lock_parent(lower_dentry);
+- rc = vfs_unlink(lower_dir_inode, lower_dentry);
++ rc = vfs_unlink(lower_dir_inode, lower_dentry, lower_mnt);
+ if (rc) {
+ printk(KERN_ERR "Error in vfs_unlink; rc = [%d]\n", rc);
+ goto out_unlock;
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -2248,7 +2248,7 @@ SYSCALL_DEFINE1(rmdir, const char __user
+ return do_rmdir(AT_FDCWD, pathname);
+ }
+
+-int vfs_unlink(struct inode *dir, struct dentry *dentry)
++int vfs_unlink(struct inode *dir, struct dentry *dentry, struct vfsmount *mnt)
+ {
+ int error = may_delete(dir, dentry, 0);
+
+@@ -2313,7 +2313,7 @@ static long do_unlinkat(int dfd, const c
+ error = mnt_want_write(nd.path.mnt);
+ if (error)
+ goto exit2;
+- error = vfs_unlink(nd.path.dentry->d_inode, dentry);
++ error = vfs_unlink(nd.path.dentry->d_inode, dentry, nd.path.mnt);
+ mnt_drop_write(nd.path.mnt);
+ exit2:
+ dput(dentry);
+--- a/fs/nfsd/nfs4recover.c
++++ b/fs/nfsd/nfs4recover.c
+@@ -264,7 +264,7 @@ nfsd4_remove_clid_file(struct dentry *di
+ return -EINVAL;
+ }
+ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
+- status = vfs_unlink(dir->d_inode, dentry);
++ status = vfs_unlink(dir->d_inode, dentry, rec_dir.path.mnt);
+ mutex_unlock(&dir->d_inode->i_mutex);
+ return status;
+ }
+--- a/fs/nfsd/vfs.c
++++ b/fs/nfsd/vfs.c
+@@ -1833,7 +1833,7 @@ nfsd_unlink(struct svc_rqst *rqstp, stru
+ host_err = -EPERM;
+ } else
+ #endif
+- host_err = vfs_unlink(dirp, rdentry);
++ host_err = vfs_unlink(dirp, rdentry, exp->ex_path.mnt);
+ } else { /* It's RMDIR */
+ host_err = vfs_rmdir(dirp, rdentry, exp->ex_path.mnt);
+ }
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1183,7 +1183,7 @@ extern int vfs_mknod(struct inode *, str
+ extern int vfs_symlink(struct inode *, struct dentry *, struct vfsmount *, const char *);
+ extern int vfs_link(struct dentry *, struct vfsmount *, struct inode *, struct dentry *, struct vfsmount *);
+ extern int vfs_rmdir(struct inode *, struct dentry *, struct vfsmount *);
+-extern int vfs_unlink(struct inode *, struct dentry *);
++extern int vfs_unlink(struct inode *, struct dentry *, struct vfsmount *);
+ extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *);
+
+ /*
+--- a/ipc/mqueue.c
++++ b/ipc/mqueue.c
+@@ -746,7 +746,7 @@ SYSCALL_DEFINE1(mq_unlink, const char __
+ err = mnt_want_write(mqueue_mnt);
+ if (err)
+ goto out_err;
+- err = vfs_unlink(dentry->d_parent->d_inode, dentry);
++ err = vfs_unlink(dentry->d_parent->d_inode, dentry, mqueue_mnt);
+ mnt_drop_write(mqueue_mnt);
+ out_err:
+ dput(dentry);
--- /dev/null
+From: Bernhard Walle <bwalle@suse.de>
+Subject: [PATCH] Fix memory map for ia64/discontmem for kdump
+
+makedumpfile[1] cannot run on ia64 discontigmem kernel, because the member
+node_mem_map of struct pgdat_list has invalid value. This patch fixes it.
+
+node_start_pfn shows the start pfn of each node, and node_mem_map should
+point 'struct page' of each node's node_start_pfn.
+On my machine, node0's node_start_pfn shows 0x400 and its node_mem_map points
+0xa0007fffbf000000. This address is the same as vmem_map, so the node_mem_map
+points 'struct page' of pfn 0, even if its node_start_pfn shows 0x400.
+
+The cause is due to the round down of min_pfn in count_node_pages() and
+node0's node_mem_map points 'struct page' of inactive pfn (0x0).
+This patch fixes it.
+
+
+makedumpfile[1]: dump filtering command
+https://sourceforge.net/projects/makedumpfile/
+
+Signed-off-by: Ken'ichi Ohmichi <oomichi@mxs.nes.nec.co.jp>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ arch/ia64/include/asm/meminit.h | 1 -
+ arch/ia64/mm/discontig.c | 1 -
+ 2 files changed, 2 deletions(-)
+
+--- a/arch/ia64/include/asm/meminit.h
++++ b/arch/ia64/include/asm/meminit.h
+@@ -47,7 +47,6 @@ extern int reserve_elfcorehdr(unsigned l
+ */
+ #define GRANULEROUNDDOWN(n) ((n) & ~(IA64_GRANULE_SIZE-1))
+ #define GRANULEROUNDUP(n) (((n)+IA64_GRANULE_SIZE-1) & ~(IA64_GRANULE_SIZE-1))
+-#define ORDERROUNDDOWN(n) ((n) & ~((PAGE_SIZE<<MAX_ORDER)-1))
+
+ #ifdef CONFIG_NUMA
+ extern void call_pernode_memory (unsigned long start, unsigned long len, void *func);
+--- a/arch/ia64/mm/discontig.c
++++ b/arch/ia64/mm/discontig.c
+@@ -635,7 +635,6 @@ static __init int count_node_pages(unsig
+ (min(end, __pa(MAX_DMA_ADDRESS)) - start) >>PAGE_SHIFT;
+ #endif
+ start = GRANULEROUNDDOWN(start);
+- start = ORDERROUNDDOWN(start);
+ end = GRANULEROUNDUP(end);
+ mem_data[node].max_pfn = max(mem_data[node].max_pfn,
+ end >> PAGE_SHIFT);
--- /dev/null
+From: Arnd Bergmann <arnd.bergmann@de.ibm.com>
+Subject: powerpc/cell/axon-msi: retry on missing interrupt
+References: bnc#445964,bnc#467633
+
+The MSI capture logic on the axon bridge can sometimes
+lose interrupts in case of high DMA and interrupt load,
+when it signals an MSI interrupt to the MPIC interrupt
+controller while we are already handling another MSI.
+
+Each MSI vector gets written into a FIFO buffer in main
+memory using DMA, and that DMA access is normally flushed
+by the actual interrupt packet on the IOIF. An MMIO
+register in the MSIC holds the position of the last
+entry in the FIFO buffer that was written. However,
+reading that position does not flush the DMA, so that
+we can observe stale data in the buffer.
+
+In a stress test, we have observed the DMA to arrive
+up to 14 microseconds after reading the register.
+We can reliably detect this conditioning by writing
+an invalid MSI vector into the FIFO buffer after
+reading from it, assuming that all MSIs we get
+are valid. After detecting an invalid MSI vector,
+we udelay(1) in the interrupt cascade for up to
+100 times before giving up.
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: John Jolly <jjolly@novell.com>
+
+commit 23e0e8afafd9ac065d81506524adf3339584044b
+Author: Arnd Bergmann <arnd@arndb.de>
+Date: Fri Dec 12 09:19:50 2008 +0000
+
+ powerpc/cell/axon-msi: Fix MSI after kexec
+
+ Commit d015fe995 'powerpc/cell/axon-msi: Retry on missing interrupt'
+ has turned a rare failure to kexec on QS22 into a reproducible
+ error, which we have now analysed.
+
+ The problem is that after a kexec, the MSIC hardware still points
+ into the middle of the old ring buffer. We set up the ring buffer
+ during reboot, but not the offset into it. On older kernels, this
+ would cause a storm of thousands of spurious interrupts after a
+ kexec, which would most of the time get dropped silently.
+
+ With the new code, we time out on each interrupt, waiting for
+ it to become valid. If more interrupts come in that we time
+ out on, this goes on indefinitely, which eventually leads to
+ a hard crash.
+
+ The solution in this commit is to read the current offset from
+ the MSIC when reinitializing it. This now works correctly, as
+ expected.
+
+ Reported-by: Dirk Herrendoerfer <d.herrendoerfer@de.ibm.com>
+ Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+ Acked-by: Michael Ellerman <michael@ellerman.id.au>
+ Signed-off-by: Paul Mackerras <paulus@samba.org>
+
+
+---
+ arch/powerpc/platforms/cell/axon_msi.c | 39 ++++++++++++++++++++++++++++-----
+ 1 file changed, 34 insertions(+), 5 deletions(-)
+
+--- a/arch/powerpc/platforms/cell/axon_msi.c
++++ b/arch/powerpc/platforms/cell/axon_msi.c
+@@ -95,6 +95,7 @@ static void axon_msi_cascade(unsigned in
+ struct axon_msic *msic = get_irq_data(irq);
+ u32 write_offset, msi;
+ int idx;
++ int retry = 0;
+
+ write_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG);
+ pr_debug("axon_msi: original write_offset 0x%x\n", write_offset);
+@@ -102,7 +103,7 @@ static void axon_msi_cascade(unsigned in
+ /* write_offset doesn't wrap properly, so we have to mask it */
+ write_offset &= MSIC_FIFO_SIZE_MASK;
+
+- while (msic->read_offset != write_offset) {
++ while (msic->read_offset != write_offset && retry < 100) {
+ idx = msic->read_offset / sizeof(__le32);
+ msi = le32_to_cpu(msic->fifo_virt[idx]);
+ msi &= 0xFFFF;
+@@ -110,13 +111,37 @@ static void axon_msi_cascade(unsigned in
+ pr_debug("axon_msi: woff %x roff %x msi %x\n",
+ write_offset, msic->read_offset, msi);
+
++ if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host) {
++ generic_handle_irq(msi);
++ msic->fifo_virt[idx] = cpu_to_le32(0xffffffff);
++ } else {
++ /*
++ * Reading the MSIC_WRITE_OFFSET_REG does not
++ * reliably flush the outstanding DMA to the
++ * FIFO buffer. Here we were reading stale
++ * data, so we need to retry.
++ */
++ udelay(1);
++ retry++;
++ pr_debug("axon_msi: invalid irq 0x%x!\n", msi);
++ continue;
++ }
++
++ if (retry) {
++ pr_debug("axon_msi: late irq 0x%x, retry %d\n",
++ msi, retry);
++ retry = 0;
++ }
++
+ msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
+ msic->read_offset &= MSIC_FIFO_SIZE_MASK;
++ }
+
+- if (msi < NR_IRQS && irq_map[msi].host == msic->irq_host)
+- generic_handle_irq(msi);
+- else
+- pr_debug("axon_msi: invalid irq 0x%x!\n", msi);
++ if (retry) {
++ printk(KERN_WARNING "axon_msi: irq timed out\n");
++
++ msic->read_offset += MSIC_FIFO_ENTRY_SIZE;
++ msic->read_offset &= MSIC_FIFO_SIZE_MASK;
+ }
+
+ desc->chip->eoi(irq);
+@@ -364,6 +389,7 @@ static int axon_msi_probe(struct of_devi
+ dn->full_name);
+ goto out_free_fifo;
+ }
++ memset(msic->fifo_virt, 0xff, MSIC_FIFO_SIZE_BYTES);
+
+ msic->irq_host = irq_alloc_host(dn, IRQ_HOST_MAP_NOMAP,
+ NR_IRQS, &msic_host_ops, 0);
+@@ -387,6 +413,9 @@ static int axon_msi_probe(struct of_devi
+ MSIC_CTRL_IRQ_ENABLE | MSIC_CTRL_ENABLE |
+ MSIC_CTRL_FIFO_SIZE);
+
++ msic->read_offset = dcr_read(msic->dcr_host, MSIC_WRITE_OFFSET_REG)
++ & MSIC_FIFO_SIZE_MASK;
++
+ device->dev.platform_data = msic;
+
+ ppc_md.setup_msi_irqs = axon_msi_setup_msi_irqs;
--- /dev/null
+From: Chandru <chandru@in.ibm.com>
+Date: Sat, 30 Aug 2008 00:28:16 +1000
+Subject: [PATCH] powerpc: Add support for dynamic reconfiguration memory in kexec/kdump kernels
+References: bnc#431492
+X-Git-Commit: cf00085d8045cddd80a8aabad97de96fa8131793 Mon Sep 17 00:00:00 2001
+
+Kdump kernel needs to use only those memory regions that it is allowed
+to use (crashkernel, rtas, tce, etc.). Each of these regions have
+their own sizes and are currently added under 'linux,usable-memory'
+property under each memory@xxx node of the device tree.
+
+The ibm,dynamic-memory property of ibm,dynamic-reconfiguration-memory
+node (on POWER6) now stores in it the representation for most of the
+logical memory blocks with the size of each memory block being a
+constant (lmb_size). If one or more or part of the above mentioned
+regions lie under one of the lmb from ibm,dynamic-memory property,
+there is a need to identify those regions within the given lmb.
+
+This makes the kernel recognize a new 'linux,drconf-usable-memory'
+property added by kexec-tools. Each entry in this property is of the
+form of a count followed by that many (base, size) pairs for the above
+mentioned regions. The number of cells in the count value is given by
+the #size-cells property of the root node.
+
+Signed-off-by: Chandru Siddalingappa <chandru@in.ibm.com>
+Signed-off-by: Paul Mackerras <paulus@samba.org>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ arch/powerpc/kernel/prom.c | 40 +++++++++++++++++++---
+ arch/powerpc/mm/numa.c | 79 +++++++++++++++++++++++++++++++++++----------
+ 2 files changed, 96 insertions(+), 23 deletions(-)
+
+--- a/arch/powerpc/kernel/prom.c
++++ b/arch/powerpc/kernel/prom.c
+@@ -888,9 +888,10 @@ static u64 __init dt_mem_next_cell(int s
+ */
+ static int __init early_init_dt_scan_drconf_memory(unsigned long node)
+ {
+- cell_t *dm, *ls;
++ cell_t *dm, *ls, *usm;
+ unsigned long l, n, flags;
+ u64 base, size, lmb_size;
++ unsigned int is_kexec_kdump = 0, rngs;
+
+ ls = (cell_t *)of_get_flat_dt_prop(node, "ibm,lmb-size", &l);
+ if (ls == NULL || l < dt_root_size_cells * sizeof(cell_t))
+@@ -905,6 +906,12 @@ static int __init early_init_dt_scan_drc
+ if (l < (n * (dt_root_addr_cells + 4) + 1) * sizeof(cell_t))
+ return 0;
+
++ /* check if this is a kexec/kdump kernel. */
++ usm = (cell_t *)of_get_flat_dt_prop(node, "linux,drconf-usable-memory",
++ &l);
++ if (usm != NULL)
++ is_kexec_kdump = 1;
++
+ for (; n != 0; --n) {
+ base = dt_mem_next_cell(dt_root_addr_cells, &dm);
+ flags = dm[3];
+@@ -915,13 +922,34 @@ static int __init early_init_dt_scan_drc
+ if ((flags & 0x80) || !(flags & 0x8))
+ continue;
+ size = lmb_size;
+- if (iommu_is_off) {
+- if (base >= 0x80000000ul)
++ rngs = 1;
++ if (is_kexec_kdump) {
++ /*
++ * For each lmb in ibm,dynamic-memory, a corresponding
++ * entry in linux,drconf-usable-memory property contains
++ * a counter 'p' followed by 'p' (base, size) duple.
++ * Now read the counter from
++ * linux,drconf-usable-memory property
++ */
++ rngs = dt_mem_next_cell(dt_root_size_cells, &usm);
++ if (!rngs) /* there are no (base, size) duple */
+ continue;
+- if ((base + size) > 0x80000000ul)
+- size = 0x80000000ul - base;
+ }
+- lmb_add(base, size);
++ do {
++ if (is_kexec_kdump) {
++ base = dt_mem_next_cell(dt_root_addr_cells,
++ &usm);
++ size = dt_mem_next_cell(dt_root_size_cells,
++ &usm);
++ }
++ if (iommu_is_off) {
++ if (base >= 0x80000000ul)
++ continue;
++ if ((base + size) > 0x80000000ul)
++ size = 0x80000000ul - base;
++ }
++ lmb_add(base, size);
++ } while (--rngs);
+ }
+ lmb_dump_all();
+ return 0;
+--- a/arch/powerpc/mm/numa.c
++++ b/arch/powerpc/mm/numa.c
+@@ -192,6 +192,21 @@ static const int *of_get_associativity(s
+ return of_get_property(dev, "ibm,associativity", NULL);
+ }
+
++/*
++ * Returns the property linux,drconf-usable-memory if
++ * it exists (the property exists only in kexec/kdump kernels,
++ * added by kexec-tools)
++ */
++static const u32 *of_get_usable_memory(struct device_node *memory)
++{
++ const u32 *prop;
++ u32 len;
++ prop = of_get_property(memory, "linux,drconf-usable-memory", &len);
++ if (!prop || len < sizeof(unsigned int))
++ return 0;
++ return prop;
++}
++
+ /* Returns nid in the range [0..MAX_NUMNODES-1], or -1 if no useful numa
+ * info is found.
+ */
+@@ -529,14 +544,29 @@ static unsigned long __init numa_enforce
+ }
+
+ /*
++ * Reads the counter for a given entry in
++ * linux,drconf-usable-memory property
++ */
++static inline int __init read_usm_ranges(const u32 **usm)
++{
++ /*
++ * For each lmb in ibm,dynamic-memory a corresponding
++ * entry in linux,drconf-usable-memory property contains
++ * a counter followed by that many (base, size) duple.
++ * read the counter from linux,drconf-usable-memory
++ */
++ return read_n_cells(n_mem_size_cells, usm);
++}
++
++/*
+ * Extract NUMA information from the ibm,dynamic-reconfiguration-memory
+ * node. This assumes n_mem_{addr,size}_cells have been set.
+ */
+ static void __init parse_drconf_memory(struct device_node *memory)
+ {
+- const u32 *dm;
+- unsigned int n, rc;
+- unsigned long lmb_size, size;
++ const u32 *dm, *usm;
++ unsigned int n, rc, ranges, is_kexec_kdump = 0;
++ unsigned long lmb_size, base, size, sz;
+ int nid;
+ struct assoc_arrays aa;
+
+@@ -552,6 +582,11 @@ static void __init parse_drconf_memory(s
+ if (rc)
+ return;
+
++ /* check if this is a kexec/kdump kernel */
++ usm = of_get_usable_memory(memory);
++ if (usm != NULL)
++ is_kexec_kdump = 1;
++
+ for (; n != 0; --n) {
+ struct of_drconf_cell drmem;
+
+@@ -563,21 +598,31 @@ static void __init parse_drconf_memory(s
+ || !(drmem.flags & DRCONF_MEM_ASSIGNED))
+ continue;
+
+- nid = of_drconf_to_nid_single(&drmem, &aa);
+-
+- fake_numa_create_new_node(
+- ((drmem.base_addr + lmb_size) >> PAGE_SHIFT),
++ base = drmem.base_addr;
++ size = lmb_size;
++ ranges = 1;
++
++ if (is_kexec_kdump) {
++ ranges = read_usm_ranges(&usm);
++ if (!ranges) /* there are no (base, size) duple */
++ continue;
++ }
++ do {
++ if (is_kexec_kdump) {
++ base = read_n_cells(n_mem_addr_cells, &usm);
++ size = read_n_cells(n_mem_size_cells, &usm);
++ }
++ nid = of_drconf_to_nid_single(&drmem, &aa);
++ fake_numa_create_new_node(
++ ((base + size) >> PAGE_SHIFT),
+ &nid);
+-
+- node_set_online(nid);
+-
+- size = numa_enforce_memory_limit(drmem.base_addr, lmb_size);
+- if (!size)
+- continue;
+-
+- add_active_range(nid, drmem.base_addr >> PAGE_SHIFT,
+- (drmem.base_addr >> PAGE_SHIFT)
+- + (size >> PAGE_SHIFT));
++ node_set_online(nid);
++ sz = numa_enforce_memory_limit(base, size);
++ if (sz)
++ add_active_range(nid, base >> PAGE_SHIFT,
++ (base >> PAGE_SHIFT)
++ + (sz >> PAGE_SHIFT));
++ } while (--ranges);
+ }
+ }
+
--- /dev/null
+Date: Thu, 9 Oct 2008 11:20:27 -0400
+From: Neil Horman <nhorman@tuxdriver.com>
+To: linux-kernel@vger.kernel.org, kexec@lists.infradead.org,
+ vgoyal@redhat.com, hbabu@us.ibm.com
+Subject: [PATCH] add additional symbols to /sys/kernel/vmcoreinfo data for
+ ppc(64)
+Cc: nhorman@tuxdriver.com
+
+Hey-
+ The makdumpdile dump filtering program, in some modes of operation needs
+the node_data and/or contig_page_data symbols to function properly. These
+symbols are missing from the powerpc kernel. This patch adds those symbols in
+properly. Tested successfully by myself and the reporter.
+
+Regards
+Neil
+
+Signed-off-by: Neil Horman <nhorman@tuxdriver.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+ arch/powerpc/kernel/machine_kexec.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+
+--- a/arch/powerpc/kernel/machine_kexec.c
++++ b/arch/powerpc/kernel/machine_kexec.c
+@@ -44,6 +44,14 @@ void machine_kexec_cleanup(struct kimage
+ ppc_md.machine_kexec_cleanup(image);
+ }
+
++void arch_crash_save_vmcoreinfo(void)
++{
++#ifdef CONFIG_NEED_MULTIPLE_NODES
++ VMCOREINFO_SYMBOL(node_data);
++ VMCOREINFO_LENGTH(node_data, MAX_NUMNODES);
++#endif
++}
++
+ /*
+ * Do not allocate memory (or fail in any way) in machine_kexec().
+ * We are past the point of no return, committed to rebooting now.
--- /dev/null
+From: Gerald Schaefer <geraldsc@de.ibm.com>
+Subject: cio: update sac values
+References: bnc#445100
+
+Symptom: Drivers based on fcx fail to start I/O.
+Problem: Values for the sac field have changed.
+Solution: Update code accordingly.
+
+Acked-by: John Jolly <jjolly@suse.de>
+---
+ arch/s390/include/asm/fcx.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+Index: linux-sles11/arch/s390/include/asm/fcx.h
+===================================================================
+--- linux-sles11.orig/arch/s390/include/asm/fcx.h
++++ linux-sles11/arch/s390/include/asm/fcx.h
+@@ -248,8 +248,8 @@ struct dcw {
+ #define TCCB_MAX_SIZE (sizeof(struct tccb_tcah) + \
+ TCCB_MAX_DCW * sizeof(struct dcw) + \
+ sizeof(struct tccb_tcat))
+-#define TCCB_SAC_DEFAULT 0xf901
+-#define TCCB_SAC_INTRG 0xf902
++#define TCCB_SAC_DEFAULT 0x1ffe
++#define TCCB_SAC_INTRG 0x1fff
+
+ /**
+ * struct tccb_tcah - Transport-Command-Area Header (TCAH)
--- /dev/null
+From: Gerald Schaefer <geraldsc@de.ibm.com>
+Subject: zfcp: Remove message for failed port
+References: bnc#464466
+
+Symptom: During opening of an adapter the message "Remote port ...
+ could not be opened" is emitted for initiator ports,
+ confusing users.
+Problem: The port scan tries to open all ports, including
+ initiator ports to determine if they are target ports.
+ Sometimes, a different error status is returned for the
+ initiator ports, triggering the message mentioned above.
+Solution: Remove the message, target port failures will be checked
+ later in the error recovery, printing a different message
+ if necessary.
+
+Acked-by: John Jolly <jjolly@suse.de>
+---
+ Documentation/kmsg/s390/zfcp | 15 ---------------
+ drivers/s390/scsi/zfcp_dbf.c | 2 +-
+ drivers/s390/scsi/zfcp_fsf.c | 6 ------
+ 3 files changed, 1 insertion(+), 22 deletions(-)
+
+--- a/Documentation/kmsg/s390/zfcp 2008-12-19 13:18:45.000000000 +0100
++++ b/Documentation/kmsg/s390/zfcp 2008-12-19 13:18:59.000000000 +0100
+@@ -677,21 +677,6 @@
+ */
+
+ /*?
+- * Text: "%s: Remote port 0x%016Lx could not be opened\n"
+- * Severity: Warning
+- * Parameter:
+- * @1: bus ID of the zfcp device
+- * @2: WWPN
+- * Description:
+- * The FCP adapter rejected a request to open the specified port. No retry
+- * is possible.
+- * User action:
+- * Verify the setup and try removing and adding the port again. If this
+- * problem persists, gather Linux debug data, collect the FCP adapter
+- * hardware logs, and report the problem to your support organization.
+- */
+-
+-/*?
+ * Text: "%s: LUN 0x%Lx on port 0x%Lx is already in use by CSS%d, MIF Image ID %x\n"
+ * Severity: Warning
+ * Parameter:
+--- a/drivers/s390/scsi/zfcp_dbf.c 2008-12-19 13:18:45.000000000 +0100
++++ b/drivers/s390/scsi/zfcp_dbf.c 2008-12-19 13:18:59.000000000 +0100
+@@ -521,7 +521,7 @@ static const char *zfcp_rec_dbf_ids[] =
+ [29] = "link down",
+ [30] = "link up status read",
+ [31] = "open port failed",
+- [32] = "open port failed",
++ [32] = "",
+ [33] = "close port",
+ [34] = "open unit failed",
+ [35] = "exclusive open unit failed",
+--- a/drivers/s390/scsi/zfcp_fsf.c 2008-12-19 13:18:45.000000000 +0100
++++ b/drivers/s390/scsi/zfcp_fsf.c 2008-12-19 13:18:59.000000000 +0100
+@@ -1405,13 +1405,7 @@ static void zfcp_fsf_open_port_handler(s
+ switch (header->fsf_status_qual.word[0]) {
+ case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
+ case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
+- req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+- break;
+ case FSF_SQ_NO_RETRY_POSSIBLE:
+- dev_warn(&req->adapter->ccw_device->dev,
+- "Remote port 0x%016Lx could not be opened\n",
+- (unsigned long long)port->wwpn);
+- zfcp_erp_port_failed(port, 32, req);
+ req->status |= ZFCP_STATUS_FSFREQ_ERROR;
+ break;
+ }
--- /dev/null
+From: Gerald Schaefer <geraldsc@de.ibm.com>
+Subject: zfcp: Add support for unchained FSF requests
+References: bnc#464466
+
+Symptom: On a z900 zfcp loops in error recovery.
+Problem: The z900 requires support for unchained FSF requests for
+ CT and ELS requests. The chained format triggers the ERP
+ from the qdio error handler.
+Solution: Check the hardware feature flag and send unchained CT
+ and ELS requests if chaining is not support. Adapt the
+ size of the GPN_FT request as necessary and add debug data
+ and a warning, in case the CT request hits a limit.
+
+Acked-by: John Jolly <jjolly@suse.de>
+---
+ Documentation/kmsg/s390/zfcp | 16 ++++++++++++
+ drivers/s390/scsi/zfcp_dbf.c | 2 +
+ drivers/s390/scsi/zfcp_dbf.h | 1
+ drivers/s390/scsi/zfcp_def.h | 9 -------
+ drivers/s390/scsi/zfcp_fc.c | 55 ++++++++++++++++++++++++-------------------
+ drivers/s390/scsi/zfcp_fsf.c | 32 +++++++++++++++++++------
+ drivers/s390/scsi/zfcp_fsf.h | 2 +
+ 7 files changed, 77 insertions(+), 40 deletions(-)
+
+--- a/drivers/s390/scsi/zfcp_fc.c 2008-12-19 13:36:23.000000000 +0100
++++ b/drivers/s390/scsi/zfcp_fc.c 2008-12-19 13:36:27.000000000 +0100
+@@ -25,9 +25,12 @@ struct gpn_ft_resp_acc {
+ u64 wwpn;
+ } __attribute__ ((packed));
+
+-#define ZFCP_GPN_FT_ENTRIES ((PAGE_SIZE - sizeof(struct ct_hdr)) \
+- / sizeof(struct gpn_ft_resp_acc))
++#define ZFCP_CT_SIZE_ONE_PAGE (PAGE_SIZE - sizeof(struct ct_hdr))
++#define ZFCP_GPN_FT_ENTRIES (ZFCP_CT_SIZE_ONE_PAGE \
++ / sizeof(struct gpn_ft_resp_acc))
+ #define ZFCP_GPN_FT_BUFFERS 4
++#define ZFCP_GPN_FT_MAX_SIZE (ZFCP_GPN_FT_BUFFERS * PAGE_SIZE \
++ - sizeof(struct ct_hdr))
+ #define ZFCP_GPN_FT_MAX_ENTRIES ZFCP_GPN_FT_BUFFERS * (ZFCP_GPN_FT_ENTRIES + 1)
+
+ struct ct_iu_gpn_ft_resp {
+@@ -283,8 +286,6 @@ int static zfcp_fc_ns_gid_pn_request(str
+ gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
+ gid_pn->ct.req = &gid_pn->req;
+ gid_pn->ct.resp = &gid_pn->resp;
+- gid_pn->ct.req_count = 1;
+- gid_pn->ct.resp_count = 1;
+ sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req,
+ sizeof(struct ct_iu_gid_pn_req));
+ sg_init_one(&gid_pn->resp, &gid_pn->ct_iu_resp,
+@@ -296,7 +297,7 @@ int static zfcp_fc_ns_gid_pn_request(str
+ gid_pn->ct_iu_req.header.gs_subtype = ZFCP_CT_NAME_SERVER;
+ gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS;
+ gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN;
+- gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_MAX_SIZE;
++ gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4;
+ gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn;
+
+ init_completion(&compl_rec.done);
+@@ -406,8 +407,6 @@ static int zfcp_fc_adisc(struct zfcp_por
+ sg_init_one(adisc->els.resp, &adisc->ls_adisc_acc,
+ sizeof(struct zfcp_ls_adisc));
+
+- adisc->els.req_count = 1;
+- adisc->els.resp_count = 1;
+ adisc->els.adapter = adapter;
+ adisc->els.port = port;
+ adisc->els.d_id = port->d_id;
+@@ -447,17 +446,17 @@ void zfcp_test_link(struct zfcp_port *po
+ zfcp_erp_port_forced_reopen(port, 0, 65, NULL);
+ }
+
+-static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft)
++static void zfcp_free_sg_env(struct zfcp_gpn_ft *gpn_ft, int buf_num)
+ {
+ struct scatterlist *sg = &gpn_ft->sg_req;
+
+ kfree(sg_virt(sg)); /* free request buffer */
+- zfcp_sg_free_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS);
++ zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
+
+ kfree(gpn_ft);
+ }
+
+-static struct zfcp_gpn_ft *zfcp_alloc_sg_env(void)
++static struct zfcp_gpn_ft *zfcp_alloc_sg_env(int buf_num)
+ {
+ struct zfcp_gpn_ft *gpn_ft;
+ struct ct_iu_gpn_ft_req *req;
+@@ -474,8 +473,8 @@ static struct zfcp_gpn_ft *zfcp_alloc_sg
+ }
+ sg_init_one(&gpn_ft->sg_req, req, sizeof(*req));
+
+- if (zfcp_sg_setup_table(gpn_ft->sg_resp, ZFCP_GPN_FT_BUFFERS)) {
+- zfcp_free_sg_env(gpn_ft);
++ if (zfcp_sg_setup_table(gpn_ft->sg_resp, buf_num)) {
++ zfcp_free_sg_env(gpn_ft, buf_num);
+ gpn_ft = NULL;
+ }
+ out:
+@@ -484,7 +483,8 @@ out:
+
+
+ static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
+- struct zfcp_adapter *adapter)
++ struct zfcp_adapter *adapter,
++ int max_bytes)
+ {
+ struct zfcp_send_ct *ct = &gpn_ft->ct;
+ struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
+@@ -497,8 +497,7 @@ static int zfcp_scan_issue_gpn_ft(struct
+ req->header.gs_subtype = ZFCP_CT_NAME_SERVER;
+ req->header.options = ZFCP_CT_SYNCHRONOUS;
+ req->header.cmd_rsp_code = ZFCP_CT_GPN_FT;
+- req->header.max_res_size = (sizeof(struct gpn_ft_resp_acc) *
+- (ZFCP_GPN_FT_MAX_ENTRIES - 1)) >> 2;
++ req->header.max_res_size = max_bytes / 4;
+ req->flags = 0;
+ req->domain_id_scope = 0;
+ req->area_id_scope = 0;
+@@ -511,8 +510,6 @@ static int zfcp_scan_issue_gpn_ft(struct
+ ct->timeout = 10;
+ ct->req = &gpn_ft->sg_req;
+ ct->resp = gpn_ft->sg_resp;
+- ct->req_count = 1;
+- ct->resp_count = ZFCP_GPN_FT_BUFFERS;
+
+ init_completion(&compl_rec.done);
+ compl_rec.handler = NULL;
+@@ -539,7 +536,7 @@ static void zfcp_validate_port(struct zf
+ zfcp_port_dequeue(port);
+ }
+
+-static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft)
++static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
+ {
+ struct zfcp_send_ct *ct = &gpn_ft->ct;
+ struct scatterlist *sg = gpn_ft->sg_resp;
+@@ -559,13 +556,17 @@ static int zfcp_scan_eval_gpn_ft(struct
+ return -EIO;
+ }
+
+- if (hdr->max_res_size)
++ if (hdr->max_res_size) {
++ dev_warn(&adapter->ccw_device->dev,
++ "The name server reported %d words residual data\n",
++ hdr->max_res_size);
+ return -E2BIG;
++ }
+
+ down(&zfcp_data.config_sema);
+
+ /* first entry is the header */
+- for (x = 1; x < ZFCP_GPN_FT_MAX_ENTRIES && !last; x++) {
++ for (x = 1; x < max_entries && !last; x++) {
+ if (x % (ZFCP_GPN_FT_ENTRIES + 1))
+ acc++;
+ else
+@@ -611,6 +612,12 @@ int zfcp_scan_ports(struct zfcp_adapter
+ {
+ int ret, i;
+ struct zfcp_gpn_ft *gpn_ft;
++ int chain, max_entries, buf_num, max_bytes;
++
++ chain = adapter->adapter_features & FSF_FEATURE_ELS_CT_CHAINED_SBALS;
++ buf_num = chain ? ZFCP_GPN_FT_BUFFERS : 1;
++ max_entries = chain ? ZFCP_GPN_FT_MAX_ENTRIES : ZFCP_GPN_FT_ENTRIES;
++ max_bytes = chain ? ZFCP_GPN_FT_MAX_SIZE : ZFCP_CT_SIZE_ONE_PAGE;
+
+ zfcp_erp_wait(adapter); /* wait until adapter is finished with ERP */
+ if (fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPORT)
+@@ -620,23 +627,23 @@ int zfcp_scan_ports(struct zfcp_adapter
+ if (ret)
+ return ret;
+
+- gpn_ft = zfcp_alloc_sg_env();
++ gpn_ft = zfcp_alloc_sg_env(buf_num);
+ if (!gpn_ft) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < 3; i++) {
+- ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter);
++ ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter, max_bytes);
+ if (!ret) {
+- ret = zfcp_scan_eval_gpn_ft(gpn_ft);
++ ret = zfcp_scan_eval_gpn_ft(gpn_ft, max_entries);
+ if (ret == -EAGAIN)
+ ssleep(1);
+ else
+ break;
+ }
+ }
+- zfcp_free_sg_env(gpn_ft);
++ zfcp_free_sg_env(gpn_ft, buf_num);
+ out:
+ zfcp_wka_port_put(&adapter->nsp);
+ return ret;
+--- a/drivers/s390/scsi/zfcp_fsf.h 2008-12-19 13:36:23.000000000 +0100
++++ b/drivers/s390/scsi/zfcp_fsf.h 2008-12-19 13:36:27.000000000 +0100
+@@ -164,6 +164,7 @@
+ #define FSF_FEATURE_LUN_SHARING 0x00000004
+ #define FSF_FEATURE_NOTIFICATION_LOST 0x00000008
+ #define FSF_FEATURE_HBAAPI_MANAGEMENT 0x00000010
++#define FSF_FEATURE_ELS_CT_CHAINED_SBALS 0x00000020
+ #define FSF_FEATURE_UPDATE_ALERT 0x00000100
+ #define FSF_FEATURE_MEASUREMENT_DATA 0x00000200
+
+@@ -322,6 +323,7 @@ struct fsf_nport_serv_param {
+ u8 vendor_version_level[16];
+ } __attribute__ ((packed));
+
++#define FSF_PLOGI_MIN_LEN 112
+ struct fsf_plogi {
+ u32 code;
+ struct fsf_nport_serv_param serv_param;
+--- a/drivers/s390/scsi/zfcp_fsf.c 2008-12-19 13:36:23.000000000 +0100
++++ b/drivers/s390/scsi/zfcp_fsf.c 2008-12-19 13:36:27.000000000 +0100
+@@ -1012,12 +1012,29 @@ skip_fsfstatus:
+ send_ct->handler(send_ct->handler_data);
+ }
+
+-static int zfcp_fsf_setup_sbals(struct zfcp_fsf_req *req,
+- struct scatterlist *sg_req,
+- struct scatterlist *sg_resp, int max_sbals)
++static int zfcp_fsf_setup_ct_els_sbals(struct zfcp_fsf_req *req,
++ struct scatterlist *sg_req,
++ struct scatterlist *sg_resp,
++ int max_sbals)
+ {
++ struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(req);
++ u32 feat = req->adapter->adapter_features;
+ int bytes;
+
++ if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
++ if (sg_req->length > PAGE_SIZE || sg_resp->length > PAGE_SIZE ||
++ !sg_is_last(sg_req) || !sg_is_last(sg_resp))
++ return -EOPNOTSUPP;
++
++ sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE_READ;
++ sbale[2].addr = sg_virt(sg_req);
++ sbale[2].length = sg_req->length;
++ sbale[3].addr = sg_virt(sg_resp);
++ sbale[3].length = sg_resp->length;
++ sbale[3].flags |= SBAL_FLAGS_LAST_ENTRY;
++ return 0;
++ }
++
+ bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
+ sg_req, max_sbals);
+ if (bytes <= 0)
+@@ -1059,8 +1076,8 @@ int zfcp_fsf_send_ct(struct zfcp_send_ct
+ goto out;
+ }
+
+- ret = zfcp_fsf_setup_sbals(req, ct->req, ct->resp,
+- FSF_MAX_SBALS_PER_REQ);
++ ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp,
++ FSF_MAX_SBALS_PER_REQ);
+ if (ret)
+ goto failed_send;
+
+@@ -1170,7 +1187,7 @@ int zfcp_fsf_send_els(struct zfcp_send_e
+ goto out;
+ }
+
+- ret = zfcp_fsf_setup_sbals(req, els->req, els->resp, 2);
++ ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2);
+
+ if (ret)
+ goto failed_send;
+@@ -1433,7 +1450,8 @@ static void zfcp_fsf_open_port_handler(s
+ * Alternately, an ADISC/PDISC ELS should suffice, as well.
+ */
+ plogi = (struct fsf_plogi *) req->qtcb->bottom.support.els;
+- if (req->qtcb->bottom.support.els1_length >= sizeof(*plogi)) {
++ if (req->qtcb->bottom.support.els1_length >=
++ FSF_PLOGI_MIN_LEN) {
+ if (plogi->serv_param.wwpn != port->wwpn)
+ atomic_clear_mask(ZFCP_STATUS_PORT_DID_DID,
+ &port->status);
+--- a/Documentation/kmsg/s390/zfcp 2008-12-19 13:36:23.000000000 +0100
++++ b/Documentation/kmsg/s390/zfcp 2008-12-19 13:36:27.000000000 +0100
+@@ -813,3 +813,19 @@
+ * problem persists, gather Linux debug data, collect the FCP adapter
+ * hardware logs, and report the problem to your support organization.
+ */
++
++/*?
++ * Text: "%s: The name server reported %d words residual data\n"
++ * Severity: Warning
++ * Parameter:
++ * @1: bus ID of the zfcp device
++ * @2: number of words in residual data
++ * Description:
++ * The fibre channel name server sent too much information about remote ports.
++ * The zfcp device driver did not receive sufficient information to attach all
++ * available remote ports in the SAN.
++ * User action:
++ * Verify that you are running the latest firmware level on the FCP
++ * adapter. Check your SAN setup and consider reducing the number of ports
++ * visible to the FCP adapter by using more restrictive zoning in the SAN.
++ */
+--- a/drivers/s390/scsi/zfcp_dbf.c 2008-12-19 13:36:23.000000000 +0100
++++ b/drivers/s390/scsi/zfcp_dbf.c 2008-12-19 13:36:27.000000000 +0100
+@@ -935,6 +935,7 @@ void zfcp_san_dbf_event_ct_response(stru
+ rct->reason_code = hdr->reason_code;
+ rct->expl = hdr->reason_code_expl;
+ rct->vendor_unique = hdr->vendor_unique;
++ rct->max_res_size = hdr->max_res_size;
+ rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr),
+ ZFCP_DBF_SAN_MAX_PAYLOAD);
+ debug_event(adapter->san_dbf, level, r, sizeof(*r));
+@@ -1042,6 +1043,7 @@ static int zfcp_san_dbf_view_format(debu
+ zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code);
+ zfcp_dbf_out(&p, "reason_code_expl", "0x%02x", ct->expl);
+ zfcp_dbf_out(&p, "vendor_unique", "0x%02x", ct->vendor_unique);
++ zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size);
+ } else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
+ strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
+ strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
+--- a/drivers/s390/scsi/zfcp_dbf.h 2008-12-19 13:36:23.000000000 +0100
++++ b/drivers/s390/scsi/zfcp_dbf.h 2008-12-19 13:36:27.000000000 +0100
+@@ -171,6 +171,7 @@ struct zfcp_san_dbf_record_ct_response {
+ u8 reason_code;
+ u8 expl;
+ u8 vendor_unique;
++ u16 max_res_size;
+ u32 len;
+ } __attribute__ ((packed));
+
+--- a/drivers/s390/scsi/zfcp_def.h 2008-12-19 13:36:23.000000000 +0100
++++ b/drivers/s390/scsi/zfcp_def.h 2008-12-19 13:36:27.000000000 +0100
+@@ -210,7 +210,6 @@ struct zfcp_ls_adisc {
+ #define ZFCP_CT_UNABLE_TO_PERFORM_CMD 0x09
+ #define ZFCP_CT_GID_PN 0x0121
+ #define ZFCP_CT_GPN_FT 0x0172
+-#define ZFCP_CT_MAX_SIZE 0x1020
+ #define ZFCP_CT_ACCEPT 0x8002
+ #define ZFCP_CT_REJECT 0x8001
+
+@@ -339,8 +338,6 @@ struct ct_iu_gid_pn_resp {
+ * @wka_port: port where the request is sent to
+ * @req: scatter-gather list for request
+ * @resp: scatter-gather list for response
+- * @req_count: number of elements in request scatter-gather list
+- * @resp_count: number of elements in response scatter-gather list
+ * @handler: handler function (called for response to the request)
+ * @handler_data: data passed to handler function
+ * @timeout: FSF timeout for this request
+@@ -351,8 +348,6 @@ struct zfcp_send_ct {
+ struct zfcp_wka_port *wka_port;
+ struct scatterlist *req;
+ struct scatterlist *resp;
+- unsigned int req_count;
+- unsigned int resp_count;
+ void (*handler)(unsigned long);
+ unsigned long handler_data;
+ int timeout;
+@@ -377,8 +372,6 @@ struct zfcp_gid_pn_data {
+ * @d_id: destiniation id of port where request is sent to
+ * @req: scatter-gather list for request
+ * @resp: scatter-gather list for response
+- * @req_count: number of elements in request scatter-gather list
+- * @resp_count: number of elements in response scatter-gather list
+ * @handler: handler function (called for response to the request)
+ * @handler_data: data passed to handler function
+ * @completion: completion for synchronization purposes
+@@ -391,8 +384,6 @@ struct zfcp_send_els {
+ u32 d_id;
+ struct scatterlist *req;
+ struct scatterlist *resp;
+- unsigned int req_count;
+- unsigned int resp_count;
+ void (*handler)(unsigned long);
+ unsigned long handler_data;
+ struct completion *completion;
--- /dev/null
+From: Gerald Schaefer <geraldsc@de.ibm.com>
+Subject: kernel: fix cpu topology support
+References: bnc#464466
+
+Symptom: CPU topology changes aren't recognized by the scheduler.
+Problem: The common code scheduler used to have a hook which could be
+ called from architecture code to trigger a rebuild of all
+ scheduling domains when cpu topology changed. This hook got
+ removed errorneously. So cpu topology change notifications
+ got lost.
+Solution: Readd the hook. This patch also removes some unused code
+ from the s390 specific cpu topology code.
+
+Acked-by: John Jolly <jjolly@suse.de>
+---
+ arch/s390/kernel/topology.c | 35 ++++++++++-------------------------
+ include/linux/topology.h | 2 +-
+ kernel/sched.c | 16 +++++++++++++---
+ 3 files changed, 24 insertions(+), 29 deletions(-)
+
+--- a/arch/s390/kernel/topology.c
++++ b/arch/s390/kernel/topology.c
+@@ -14,6 +14,7 @@
+ #include <linux/workqueue.h>
+ #include <linux/cpu.h>
+ #include <linux/smp.h>
++#include <linux/cpuset.h>
+ #include <asm/delay.h>
+ #include <asm/s390_ext.h>
+ #include <asm/sysinfo.h>
+@@ -64,7 +65,6 @@ static void topology_work_fn(struct work
+ static struct tl_info *tl_info;
+ static struct core_info core_info;
+ static int machine_has_topology;
+-static int machine_has_topology_irq;
+ static struct timer_list topology_timer;
+ static void set_topology_timer(void);
+ static DECLARE_WORK(topology_work, topology_work_fn);
+@@ -81,7 +81,7 @@ cpumask_t cpu_coregroup_map(unsigned int
+
+ cpus_clear(mask);
+ if (!topology_enabled || !machine_has_topology)
+- return cpu_present_map;
++ return cpu_possible_map;
+ spin_lock_irqsave(&topology_lock, flags);
+ while (core) {
+ if (cpu_isset(cpu, core->mask)) {
+@@ -171,7 +171,7 @@ static void topology_update_polarization
+ int cpu;
+
+ mutex_lock(&smp_cpu_state_mutex);
+- for_each_present_cpu(cpu)
++ for_each_possible_cpu(cpu)
+ smp_cpu_polarization[cpu] = POLARIZATION_HRZ;
+ mutex_unlock(&smp_cpu_state_mutex);
+ }
+@@ -202,7 +202,7 @@ int topology_set_cpu_management(int fc)
+ rc = ptf(PTF_HORIZONTAL);
+ if (rc)
+ return -EBUSY;
+- for_each_present_cpu(cpu)
++ for_each_possible_cpu(cpu)
+ smp_cpu_polarization[cpu] = POLARIZATION_UNKNWN;
+ return rc;
+ }
+@@ -211,11 +211,11 @@ static void update_cpu_core_map(void)
+ {
+ int cpu;
+
+- for_each_present_cpu(cpu)
++ for_each_possible_cpu(cpu)
+ cpu_core_map[cpu] = cpu_coregroup_map(cpu);
+ }
+
+-void arch_update_cpu_topology(void)
++int arch_update_cpu_topology(void)
+ {
+ struct tl_info *info = tl_info;
+ struct sys_device *sysdev;
+@@ -224,7 +224,7 @@ void arch_update_cpu_topology(void)
+ if (!machine_has_topology) {
+ update_cpu_core_map();
+ topology_update_polarization_simple();
+- return;
++ return 0;
+ }
+ stsi(info, 15, 1, 2);
+ tl_to_cores(info);
+@@ -233,11 +233,12 @@ void arch_update_cpu_topology(void)
+ sysdev = get_cpu_sysdev(cpu);
+ kobject_uevent(&sysdev->kobj, KOBJ_CHANGE);
+ }
++ return 1;
+ }
+
+ static void topology_work_fn(struct work_struct *work)
+ {
+- arch_reinit_sched_domains();
++ rebuild_sched_domains();
+ }
+
+ void topology_schedule_update(void)
+@@ -260,11 +261,6 @@ static void set_topology_timer(void)
+ add_timer(&topology_timer);
+ }
+
+-static void topology_interrupt(__u16 code)
+-{
+- schedule_work(&topology_work);
+-}
+-
+ static int __init early_parse_topology(char *p)
+ {
+ if (strncmp(p, "on", 2))
+@@ -284,14 +280,7 @@ static int __init init_topology_update(v
+ goto out;
+ }
+ init_timer_deferrable(&topology_timer);
+- if (machine_has_topology_irq) {
+- rc = register_external_interrupt(0x2005, topology_interrupt);
+- if (rc)
+- goto out;
+- ctl_set_bit(0, 8);
+- }
+- else
+- set_topology_timer();
++ set_topology_timer();
+ out:
+ update_cpu_core_map();
+ return rc;
+@@ -312,9 +301,6 @@ void __init s390_init_cpu_topology(void)
+ return;
+ machine_has_topology = 1;
+
+- if (facility_bits & (1ULL << 51))
+- machine_has_topology_irq = 1;
+-
+ tl_info = alloc_bootmem_pages(PAGE_SIZE);
+ info = tl_info;
+ stsi(info, 15, 1, 2);
+@@ -338,5 +324,4 @@ void __init s390_init_cpu_topology(void)
+ return;
+ error:
+ machine_has_topology = 0;
+- machine_has_topology_irq = 0;
+ }
+--- a/include/linux/topology.h
++++ b/include/linux/topology.h
+@@ -49,7 +49,7 @@
+ for_each_online_node(node) \
+ if (nr_cpus_node(node))
+
+-void arch_update_cpu_topology(void);
++int arch_update_cpu_topology(void);
+
+ /* Conform to ACPI 2.0 SLIT distance definitions */
+ #define LOCAL_DISTANCE 10
+--- a/kernel/sched.c
++++ b/kernel/sched.c
+@@ -7640,8 +7640,14 @@ static struct sched_domain_attr *dattr_c
+ */
+ static cpumask_t fallback_doms;
+
+-void __attribute__((weak)) arch_update_cpu_topology(void)
++/*
++ * arch_update_cpu_topology lets virtualized architectures update the
++ * cpu core maps. It is supposed to return 1 if the topology changed
++ * or 0 if it stayed the same.
++ */
++int __attribute__((weak)) arch_update_cpu_topology(void)
+ {
++ return 0;
+ }
+
+ /*
+@@ -7735,17 +7741,21 @@ void partition_sched_domains(int ndoms_n
+ struct sched_domain_attr *dattr_new)
+ {
+ int i, j, n;
++ int top_changed;
+
+ mutex_lock(&sched_domains_mutex);
+
+ /* always unregister in case we don't destroy any domains */
+ unregister_sched_domain_sysctl();
+
++ /* Let architecture update cpu core mappings. */
++ top_changed = arch_update_cpu_topology();
++
+ n = doms_new ? ndoms_new : 0;
+
+ /* Destroy deleted domains */
+ for (i = 0; i < ndoms_cur; i++) {
+- for (j = 0; j < n; j++) {
++ for (j = 0; j < n && !top_changed; j++) {
+ if (cpus_equal(doms_cur[i], doms_new[j])
+ && dattrs_equal(dattr_cur, i, dattr_new, j))
+ goto match1;
+@@ -7765,7 +7775,7 @@ match1:
+
+ /* Build new domains */
+ for (i = 0; i < ndoms_new; i++) {
+- for (j = 0; j < ndoms_cur; j++) {
++ for (j = 0; j < ndoms_cur && !top_changed; j++) {
+ if (cpus_equal(doms_new[i], doms_cur[j])
+ && dattrs_equal(dattr_new, i, dattr_cur, j))
+ goto match2;
--- /dev/null
+From: Gerald Schaefer <geraldsc@de.ibm.com>
+Subject: cio: fix subchannel multipath mode setup
+References: bnc#466462,LTC#51047
+
+Symptom: Undefined behavior when trying to access DASD devices with more
+ than one CHPID: e.g. I/O errors due to timeouts after missing
+ interrupts, slow access to DASDs because single path mode is used.
+Problem: Setup of subchannel multipath mode is not performed correctly
+ because changes to a local buffer are lost before they are sent
+ to the channel subsystem. In this state, the control unit assumes
+ multipath mode while the channel subsystem expects single path
+ mode. As a result, interrupts may not be correctly recognized
+ which leads to timeout situations and eventually I/O errors.
+ Also single path processing may slow down DASD access.
+Solution: Apply changes to the subchannel configuration after modifying
+ the local buffer.
+
+Acked-by: John Jolly <jjolly@suse.de>
+
+---
+ drivers/s390/cio/device.c | 6 ++++++
+ drivers/s390/cio/device_fsm.c | 2 ++
+ 2 files changed, 8 insertions(+)
+
+Index: linux-sles11/drivers/s390/cio/device.c
+===================================================================
+--- linux-sles11.orig/drivers/s390/cio/device.c
++++ linux-sles11/drivers/s390/cio/device.c
+@@ -1246,6 +1246,9 @@ static int io_subchannel_probe(struct su
+ return 0;
+ }
+ io_subchannel_init_fields(sch);
++ rc = cio_modify(sch);
++ if (rc)
++ goto out_schedule;
+ /*
+ * First check if a fitting device may be found amongst the
+ * disconnected devices or in the orphanage.
+@@ -1676,6 +1679,9 @@ static int ccw_device_console_enable(str
+ sch->private = cio_get_console_priv();
+ memset(sch->private, 0, sizeof(struct io_subchannel_private));
+ io_subchannel_init_fields(sch);
++ rc = cio_modify(sch);
++ if (rc)
++ return rc;
+ sch->driver = &io_subchannel_driver;
+ /* Initialize the ccw_device structure. */
+ cdev->dev.parent= &sch->dev;
+Index: linux-sles11/drivers/s390/cio/device_fsm.c
+===================================================================
+--- linux-sles11.orig/drivers/s390/cio/device_fsm.c
++++ linux-sles11/drivers/s390/cio/device_fsm.c
+@@ -1028,6 +1028,8 @@ void ccw_device_trigger_reprobe(struct c
+ sch->schib.pmcw.ena = 0;
+ if ((sch->lpm & (sch->lpm - 1)) != 0)
+ sch->schib.pmcw.mp = 1;
++ if (cio_modify(sch))
++ return;
+ /* We should also udate ssd info, but this has to wait. */
+ /* Check if this is another device which appeared on the same sch. */
+ if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
--- /dev/null
+From: Gerald Schaefer <geraldsc@de.ibm.com>
+Subject: zfcp: fix memory alignment for GPN_FT requests.
+References: bnc#466462
+
+Symptom: An unexpected adapter reopen can be triggered in case
+ of a wrongly aligned GPN_FT nameserver request.
+Problem: A request which is stored across a page is not allowed.
+ The standard memory allocation does not guarantee to have
+ all requested memory within one page.
+Solution: Make sure the requested memory is always within one page.
+
+Acked-by: John Jolly <jjolly@suse.de>
+
+---
+ drivers/s390/scsi/zfcp_aux.c | 7 +++++++
+ drivers/s390/scsi/zfcp_def.h | 9 +++++++++
+ drivers/s390/scsi/zfcp_fc.c | 13 +++----------
+ 3 files changed, 19 insertions(+), 10 deletions(-)
+
+Index: linux-sles11/drivers/s390/scsi/zfcp_aux.c
+===================================================================
+--- linux-sles11.orig/drivers/s390/scsi/zfcp_aux.c
++++ linux-sles11/drivers/s390/scsi/zfcp_aux.c
+@@ -175,6 +175,11 @@ static int __init zfcp_module_init(void)
+ if (!zfcp_data.gid_pn_cache)
+ goto out_gid_cache;
+
++ zfcp_data.gpn_ft_cache = zfcp_cache_create(
++ sizeof(struct ct_iu_gpn_ft_req), "zfcp_gpn");
++ if (!zfcp_data.gpn_ft_cache)
++ goto out_gpn_cache;
++
+ zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq");
+
+ INIT_LIST_HEAD(&zfcp_data.adapter_list_head);
+@@ -209,6 +214,8 @@ out_ccw_register:
+ out_misc:
+ fc_release_transport(zfcp_data.scsi_transport_template);
+ out_transport:
++ kmem_cache_destroy(zfcp_data.gpn_ft_cache);
++out_gpn_cache:
+ kmem_cache_destroy(zfcp_data.gid_pn_cache);
+ out_gid_cache:
+ kmem_cache_destroy(zfcp_data.sr_buffer_cache);
+Index: linux-sles11/drivers/s390/scsi/zfcp_def.h
+===================================================================
+--- linux-sles11.orig/drivers/s390/scsi/zfcp_def.h
++++ linux-sles11/drivers/s390/scsi/zfcp_def.h
+@@ -333,6 +333,14 @@ struct ct_iu_gid_pn_resp {
+ u32 d_id;
+ } __attribute__ ((packed));
+
++struct ct_iu_gpn_ft_req {
++ struct ct_hdr header;
++ u8 flags;
++ u8 domain_id_scope;
++ u8 area_id_scope;
++ u8 fc4_type;
++} __attribute__ ((packed));
++
+ /**
+ * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
+ * @wka_port: port where the request is sent to
+@@ -595,6 +603,7 @@ struct zfcp_data {
+ struct kmem_cache *fsf_req_qtcb_cache;
+ struct kmem_cache *sr_buffer_cache;
+ struct kmem_cache *gid_pn_cache;
++ struct kmem_cache *gpn_ft_cache;
+ struct workqueue_struct *work_queue;
+ };
+
+Index: linux-sles11/drivers/s390/scsi/zfcp_fc.c
+===================================================================
+--- linux-sles11.orig/drivers/s390/scsi/zfcp_fc.c
++++ linux-sles11/drivers/s390/scsi/zfcp_fc.c
+@@ -10,14 +10,6 @@
+
+ #include "zfcp_ext.h"
+
+-struct ct_iu_gpn_ft_req {
+- struct ct_hdr header;
+- u8 flags;
+- u8 domain_id_scope;
+- u8 area_id_scope;
+- u8 fc4_type;
+-} __attribute__ ((packed));
+-
+ struct gpn_ft_resp_acc {
+ u8 control;
+ u8 port_id[3];
+@@ -450,7 +442,8 @@ static void zfcp_free_sg_env(struct zfcp
+ {
+ struct scatterlist *sg = &gpn_ft->sg_req;
+
+- kfree(sg_virt(sg)); /* free request buffer */
++ /* free request buffer */
++ kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg));
+ zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
+
+ kfree(gpn_ft);
+@@ -465,7 +458,7 @@ static struct zfcp_gpn_ft *zfcp_alloc_sg
+ if (!gpn_ft)
+ return NULL;
+
+- req = kzalloc(sizeof(struct ct_iu_gpn_ft_req), GFP_KERNEL);
++ req = kmem_cache_alloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
+ if (!req) {
+ kfree(gpn_ft);
+ gpn_ft = NULL;
--- /dev/null
+From: Gerald Schaefer <geraldsc@de.ibm.com>
+Subject: iucv: failing cpu hot remove for inactive iucv
+References: bnc#466462,LTC#51104
+
+Symptom: cpu hot remove rejected with NOTIFY_BAD
+Problem: If the iucv module is compiled in / loaded but no user
+ is registered, cpu hot remove doesn't work. The iucv
+ cpu hotplug notifier on CPU_DOWN_PREPARE checks, if
+ the iucv_buffer_cpumask would be empty after the
+ corresponding bit would be cleared. However the bit
+ was never set since iucv wasn't enabled. That causes
+ all cpu hot unplug operations to fail in this scenario.
+Solution: Use iucv_path_table as an indicator whether iucv is
+ enabled or not.
+
+Acked-by: John Jolly <jjolly@suse.de>
+---
+
+ net/iucv/iucv.c | 18 +++++++++++-------
+ 1 file changed, 11 insertions(+), 7 deletions(-)
+
+Index: linux-sles11/net/iucv/iucv.c
+===================================================================
+--- linux-sles11.orig/net/iucv/iucv.c
++++ linux-sles11/net/iucv/iucv.c
+@@ -516,6 +516,7 @@ static int iucv_enable(void)
+ size_t alloc_size;
+ int cpu, rc;
+
++ get_online_cpus();
+ rc = -ENOMEM;
+ alloc_size = iucv_max_pathid * sizeof(struct iucv_path);
+ iucv_path_table = kzalloc(alloc_size, GFP_KERNEL);
+@@ -523,19 +524,17 @@ static int iucv_enable(void)
+ goto out;
+ /* Declare per cpu buffers. */
+ rc = -EIO;
+- get_online_cpus();
+ for_each_online_cpu(cpu)
+ smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
+ if (cpus_empty(iucv_buffer_cpumask))
+ /* No cpu could declare an iucv buffer. */
+- goto out_path;
++ goto out;
+ put_online_cpus();
+ return 0;
+-
+-out_path:
+- put_online_cpus();
+- kfree(iucv_path_table);
+ out:
++ kfree(iucv_path_table);
++ iucv_path_table = NULL;
++ put_online_cpus();
+ return rc;
+ }
+
+@@ -550,8 +549,9 @@ static void iucv_disable(void)
+ {
+ get_online_cpus();
+ on_each_cpu(iucv_retrieve_cpu, NULL, 1);
+- put_online_cpus();
+ kfree(iucv_path_table);
++ iucv_path_table = NULL;
++ put_online_cpus();
+ }
+
+ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
+@@ -588,10 +588,14 @@ static int __cpuinit iucv_cpu_notify(str
+ case CPU_ONLINE_FROZEN:
+ case CPU_DOWN_FAILED:
+ case CPU_DOWN_FAILED_FROZEN:
++ if (!iucv_path_table)
++ break;
+ smp_call_function_single(cpu, iucv_declare_cpu, NULL, 1);
+ break;
+ case CPU_DOWN_PREPARE:
+ case CPU_DOWN_PREPARE_FROZEN:
++ if (!iucv_path_table)
++ break;
+ cpumask = iucv_buffer_cpumask;
+ cpu_clear(cpu, cpumask);
+ if (cpus_empty(cpumask))
--- /dev/null
+From: Gerald Schaefer <geraldsc@de.ibm.com>
+Subject: kernel: 31 bit compat sigaltstack syscall fails with -EFAULT.
+References: bnc#466462,LTC#50888
+
+Symptom: When 31 bit user space programs call sigaltstack on a 64 bit Linux
+ OS, the system call returns -1 with errno=EFAULT.
+Problem: The 31 bit pointer passed to the system call is extended
+ to 64 bit, but the high order bits are not set to zero.
+ The kernel detects the invalid user space pointer and
+ returns -EFAULT.
+Solution: Call sys32_sigaltstack_wrapper() instead of sys32_sigaltstack().
+ The wrapper function sets the high order bits to zero.
+
+Acked-by: John Jolly <jjolly@suse.de>
+---
+ arch/s390/kernel/syscalls.S | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: linux-sles11/arch/s390/kernel/syscalls.S
+===================================================================
+--- linux-sles11.orig/arch/s390/kernel/syscalls.S
++++ linux-sles11/arch/s390/kernel/syscalls.S
+@@ -194,7 +194,7 @@ SYSCALL(sys_chown16,sys_ni_syscall,sys32
+ SYSCALL(sys_getcwd,sys_getcwd,sys32_getcwd_wrapper)
+ SYSCALL(sys_capget,sys_capget,sys32_capget_wrapper)
+ SYSCALL(sys_capset,sys_capset,sys32_capset_wrapper) /* 185 */
+-SYSCALL(sys_sigaltstack,sys_sigaltstack,sys32_sigaltstack)
++SYSCALL(sys_sigaltstack,sys_sigaltstack,sys32_sigaltstack_wrapper)
+ SYSCALL(sys_sendfile,sys_sendfile64,sys32_sendfile_wrapper)
+ NI_SYSCALL /* streams1 */
+ NI_SYSCALL /* streams2 */
--- /dev/null
+From: Gerald Schaefer <geraldsc@de.ibm.com>
+Subject: topology: introduce arch specific SD_MC_INIT initializer
+References: bnc#477666,LTC#51049
+
+Symptom: Up to 30% more cpu usage for some workloads.
+Problem: For some workloads the extra multicore scheduling domain causes
+ additional cpu usage because of too optimistic assumptions when
+ it is ok to migrate processes from one cpu to another. The default
+ values for SD_MC_INIT don't work well on s390.
+Solution: Define an architecure specific SD_MC_INIT scheduling domain
+ initializer which fixes the regression.
+
+Acked-by: John Jolly <jjolly@suse.de>
+---
+ arch/s390/include/asm/topology.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+Index: linux-sles11/arch/s390/include/asm/topology.h
+===================================================================
+--- linux-sles11.orig/arch/s390/include/asm/topology.h
++++ linux-sles11/arch/s390/include/asm/topology.h
+@@ -28,6 +28,8 @@ static inline void s390_init_cpu_topolog
+ };
+ #endif
+
++#define SD_MC_INIT SD_CPU_INIT
++
+ #include <asm-generic/topology.h>
+
+ #endif /* _ASM_S390_TOPOLOGY_H */
--- /dev/null
+From: Gerald Schaefer <geraldsc@de.ibm.com>
+Subject: kernel: fix idle time accounting
+References: bnc#518291,LTC#54879
+
+Symptom: The idle time reported in /proc/stat is too large
+Problem: The time spent with time ticks disabled is accounted twice,
+ once by the architecture backend and another time by the
+ generic timer code.
+Solution: Stop accounting idle time in the generic timer code if
+ CONFIG_VIRT_CPU_ACCOUNTING is enabled.
+
+Acked-by: John Jolly <jjolly@suse.de>
+---
+
+ kernel/time/tick-sched.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+Index: linux-sles11/kernel/time/tick-sched.c
+===================================================================
+--- linux-sles11.orig/kernel/time/tick-sched.c
++++ linux-sles11/kernel/time/tick-sched.c
+@@ -377,7 +377,9 @@ void tick_nohz_restart_sched_tick(void)
+ {
+ int cpu = smp_processor_id();
+ struct tick_sched *ts = &per_cpu(tick_cpu_sched, cpu);
++#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+ unsigned long ticks;
++#endif
+ ktime_t now;
+
+ local_irq_disable();
+@@ -399,6 +401,7 @@ void tick_nohz_restart_sched_tick(void)
+ tick_do_update_jiffies64(now);
+ cpu_clear(cpu, nohz_cpu_mask);
+
++#ifndef CONFIG_VIRT_CPU_ACCOUNTING
+ /*
+ * We stopped the tick in idle. Update process times would miss the
+ * time we slept as update_process_times does only a 1 tick
+@@ -414,6 +417,7 @@ void tick_nohz_restart_sched_tick(void)
+ jiffies_to_cputime(ticks));
+ sub_preempt_count(HARDIRQ_OFFSET);
+ }
++#endif
+
+ touch_softlockup_watchdog();
+ /*
--- /dev/null
+From: Martin Wilck <martin.wilck@fujitsu-siemens.com>
+Subject: x86-64: Make APIC timer calibration SMI-safe
+References: bnc#410452, bnc#535947
+
+APIC timer calibration can be adversely affected if SMI are
+triggered during calibration. Make the calibration algorithm
+more robust to detect and workaround this situation.
+
+This patch isn't upstream. Explanation from Martin Wilck:
+
+"Unfortunately, while the patch got generally positive review, it hasn't been
+finally accepted. Upstream wanted to see a solution for all affected
+architectures (in particular, also i386) which was more than we were able to do
+at the time, given that the pressure had been reduced by finding a BIOS fix."
+
+Acked-by: Jean Delvare <jdelvare@suse.de>
+
+---
+ arch/x86/kernel/apic_64.c | 39 +++++++++++++++++++++++++++++++++------
+ 1 file changed, 33 insertions(+), 6 deletions(-)
+
+--- a/arch/x86/kernel/apic_64.c
++++ b/arch/x86/kernel/apic_64.c
+@@ -299,6 +299,31 @@ static void setup_APIC_timer(void)
+ }
+
+ /*
++ * Helper function for calibrate_APIC_clock(): Make sure that
++ * APIC TMCTT and TSC are read at the same time, to reasonable
++ * accuracy. On any sane system, the retry loop won't need more
++ * than a single retry, given that the rdtsc/apic_read/rdtsc
++ * sequence won't take more than a few cycles.
++ */
++
++#define MAX_DIFFERENCE 1000UL
++#define MAX_ITER 10
++static inline int
++__read_tsc_and_apic(unsigned long *tsc, unsigned *apic)
++{
++ unsigned long tsc0, tsc1, diff;
++ int i = 0;
++ do {
++ rdtscll(tsc0);
++ *apic = apic_read(APIC_TMCCT);
++ rdtscll(tsc1);
++ diff = tsc1 - tsc0;
++ } while (diff > MAX_DIFFERENCE && ++i < MAX_ITER);
++ *tsc = tsc0 + (diff >> 1);
++ return diff > MAX_DIFFERENCE ? -EIO : 0;
++}
++
++/*
+ * In this function we calibrate APIC bus clocks to the external
+ * timer. Unfortunately we cannot use jiffies and the timer irq
+ * to calibrate, since some later bootup code depends on getting
+@@ -317,7 +342,7 @@ static int __init calibrate_APIC_clock(v
+ {
+ unsigned apic, apic_start;
+ unsigned long tsc, tsc_start;
+- int result;
++ int result, err_start, err;
+
+ local_irq_disable();
+
+@@ -328,25 +353,27 @@ static int __init calibrate_APIC_clock(v
+ *
+ * No interrupt enable !
+ */
+- __setup_APIC_LVTT(250000000, 0, 0);
++ __setup_APIC_LVTT(0xffffffff, 0, 0);
+
+- apic_start = apic_read(APIC_TMCCT);
+ #ifdef CONFIG_X86_PM_TIMER
+ if (apic_calibrate_pmtmr && pmtmr_ioport) {
++ apic_start = apic_read(APIC_TMCCT);
+ pmtimer_wait(5000); /* 5ms wait */
+ apic = apic_read(APIC_TMCCT);
+ result = (apic_start - apic) * 1000L / 5;
+ } else
+ #endif
+ {
+- rdtscll(tsc_start);
++ err_start = __read_tsc_and_apic(&tsc_start, &apic_start);
+
+ do {
+- apic = apic_read(APIC_TMCCT);
+- rdtscll(tsc);
++ err = __read_tsc_and_apic(&tsc, &apic);
+ } while ((tsc - tsc_start) < TICK_COUNT &&
+ (apic_start - apic) < TICK_COUNT);
+
++ if (err_start || err)
++ printk(KERN_CRIT "calibrate_APIC_clock: SMI flood - "
++ "the APIC timer calibration may be wrong!\n");
+ result = (apic_start - apic) * 1000L * tsc_khz /
+ (tsc - tsc_start);
+ }
--- /dev/null
+From: Yi Zou <yi.zou@intel.com>
+Subject: [FcOE] change fcoe_sw sg_tablesize to SG_ALL
+References: bnc #459142
+
+Signed-off-by: Yi Zou <yi.zou@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/fcoe/fcoe_sw.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+
+--- a/drivers/scsi/fcoe/fcoe_sw.c
++++ b/drivers/scsi/fcoe/fcoe_sw.c
+@@ -100,7 +100,7 @@ static struct scsi_host_template fcoe_sw
+ .cmd_per_lun = 32,
+ .can_queue = FCOE_MAX_OUTSTANDING_COMMANDS,
+ .use_clustering = ENABLE_CLUSTERING,
+- .sg_tablesize = 4,
++ .sg_tablesize = SG_ALL,
+ .max_sectors = 0xffff,
+ };
+
--- /dev/null
+From: Yi Zou <yi.zou@intel.com>
+Subject: [FcOE] check return for fc_set_mfs
+References: bnc #459142
+
+Signed-off-by: Yi Zou <yi.zou@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/fcoe/fcoe_sw.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+
+--- a/drivers/scsi/fcoe/fcoe_sw.c
++++ b/drivers/scsi/fcoe/fcoe_sw.c
+@@ -178,7 +178,8 @@ static int fcoe_sw_netdev_config(struct
+ */
+ mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
+ sizeof(struct fcoe_crc_eof));
+- fc_set_mfs(lp, mfs);
++ if (fc_set_mfs(lp, mfs))
++ return -EINVAL;
+
+ lp->link_status = ~FC_PAUSE & ~FC_LINK_UP;
+ if (!fcoe_link_ok(lp))
--- /dev/null
+From: Chris Leech <christopher.leech@intel.com>
+Subject: [FcOE] fix frame length validation in the early receive path
+References: bnc #459142
+
+Validation of the frame length was missing before accessing the FC and FCoE
+headers. Some of the later checks were bogus, because of the way the fr_len
+variable and skb->len were being manipulated they could never fail.
+
+Signed-off-by: Chris Leech <christopher.leech@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/fcoe/libfcoe.c | 48 +++++++++++++++++++++-----------------------
+ include/scsi/fc/fc_fcoe.h | 12 +++++++++++
+ include/scsi/fc_frame.h | 2 -
+ 3 files changed, 36 insertions(+), 26 deletions(-)
+
+
+--- a/drivers/scsi/fcoe/libfcoe.c
++++ b/drivers/scsi/fcoe/libfcoe.c
+@@ -184,7 +184,6 @@ int fcoe_rcv(struct sk_buff *skb, struct
+ struct fcoe_rcv_info *fr;
+ struct fcoe_softc *fc;
+ struct fcoe_dev_stats *stats;
+- u8 *data;
+ struct fc_frame_header *fh;
+ unsigned short oxid;
+ int cpu_idx;
+@@ -211,9 +210,18 @@ int fcoe_rcv(struct sk_buff *skb, struct
+ FC_DBG("wrong FC type frame");
+ goto err;
+ }
+- data = skb->data;
+- data += sizeof(struct fcoe_hdr);
+- fh = (struct fc_frame_header *)data;
++
++ /*
++ * Check for minimum frame length, and make sure required FCoE
++ * and FC headers are pulled into the linear data area.
++ */
++ if (unlikely((skb->len < FCOE_MIN_FRAME) ||
++ !pskb_may_pull(skb, FCOE_HEADER_LEN)))
++ goto err;
++
++ skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
++ fh = (struct fc_frame_header *) skb_transport_header(skb);
++
+ oxid = ntohs(fh->fh_ox_id);
+
+ fr = fcoe_dev_from_skb(skb);
+@@ -514,8 +522,6 @@ int fcoe_percpu_receive_thread(void *arg
+ {
+ struct fcoe_percpu_s *p = arg;
+ u32 fr_len;
+- unsigned int hlen;
+- unsigned int tlen;
+ struct fc_lport *lp;
+ struct fcoe_rcv_info *fr;
+ struct fcoe_dev_stats *stats;
+@@ -572,10 +578,12 @@ int fcoe_percpu_receive_thread(void *arg
+ skb_linearize(skb); /* not ideal */
+
+ /*
+- * Check the header and pull it off.
++ * Frame length checks and setting up the header pointers
++ * was done in fcoe_rcv already.
+ */
+- hlen = sizeof(struct fcoe_hdr);
+- hp = (struct fcoe_hdr *)skb->data;
++ hp = (struct fcoe_hdr *) skb_network_header(skb);
++ fh = (struct fc_frame_header *) skb_transport_header(skb);
++
+ if (unlikely(FC_FCOE_DECAPS_VER(hp) != FC_FCOE_VER)) {
+ if (stats) {
+ if (stats->ErrorFrames < 5)
+@@ -586,22 +594,10 @@ int fcoe_percpu_receive_thread(void *arg
+ kfree_skb(skb);
+ continue;
+ }
++
+ skb_pull(skb, sizeof(struct fcoe_hdr));
+- tlen = sizeof(struct fcoe_crc_eof);
+- fr_len = skb->len - tlen;
+- skb_trim(skb, fr_len);
++ fr_len = skb->len - sizeof(struct fcoe_crc_eof);
+
+- if (unlikely(fr_len > skb->len)) {
+- if (stats) {
+- if (stats->ErrorFrames < 5)
+- FC_DBG("length error fr_len 0x%x "
+- "skb->len 0x%x", fr_len,
+- skb->len);
+- stats->ErrorFrames++;
+- }
+- kfree_skb(skb);
+- continue;
+- }
+ if (stats) {
+ stats->RxFrames++;
+ stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
+@@ -610,9 +606,11 @@ int fcoe_percpu_receive_thread(void *arg
+ fp = (struct fc_frame *)skb;
+ cp = (struct fcoe_crc_eof *)(skb->data + fr_len);
+ fc_frame_init(fp);
+- fr_eof(fp) = cp->fcoe_eof;
+- fr_sof(fp) = hp->fcoe_sof;
+ fr_dev(fp) = lp;
++ fr_sof(fp) = hp->fcoe_sof;
++ fr_eof(fp) = cp->fcoe_eof;
++ /* trim off the CRC and EOF trailer*/
++ skb_trim(skb, fr_len);
+
+ /*
+ * We only check CRC if no offload is available and if it is
+--- a/include/scsi/fc/fc_fcoe.h
++++ b/include/scsi/fc/fc_fcoe.h
+@@ -85,6 +85,18 @@ struct fcoe_crc_eof {
+ } __attribute__((packed));
+
+ /*
++ * Minimum FCoE + FC header length
++ * 14 bytes FCoE header + 24 byte FC header = 38 bytes
++ */
++#define FCOE_HEADER_LEN 38
++
++/*
++ * Minimum FCoE frame size
++ * 14 bytes FCoE header + 24 byte FC header + 8 byte FCoE trailer = 46 bytes
++ */
++#define FCOE_MIN_FRAME 46
++
++/*
+ * fc_fcoe_set_mac - Store OUI + DID into MAC address field.
+ * @mac: mac address to be set
+ * @did: fc dest id to use
+--- a/include/scsi/fc_frame.h
++++ b/include/scsi/fc_frame.h
+@@ -66,10 +66,10 @@ struct fcoe_rcv_info {
+ struct fc_lport *fr_dev; /* transport layer private pointer */
+ struct fc_seq *fr_seq; /* for use with exchange manager */
+ struct scsi_cmnd *fr_cmd; /* for use of scsi command */
++ u16 fr_max_payload; /* max FC payload */
+ enum fc_sof fr_sof; /* start of frame delimiter */
+ enum fc_eof fr_eof; /* end of frame delimiter */
+ u8 fr_flags; /* flags - see below */
+- u16 fr_max_payload; /* max FC payload */
+ };
+
+ /*
--- /dev/null
+From: James Bottomley <James.Bottomley@HansenPartnership.com>
+Subject: fcoe: fix incorrect use of struct module
+Patch-mainline: 9296e519538b77b5070d49f2f9d66032733c76d4
+References: bnc #468051
+
+This structure may not be defined if CONFIG_MODULE=n, so never deref it. Change
+uses of module->name to module_name(module) and corrects some dyslexic printks
+and docbook comments.
+
+Reported-by: Randy Dunlap <randy.dunlap@oracle.com>
+Cc: Robert Love <robert.w.love@intel.com>
+Signed-off-by: James Bottomley <James.Bottomley@HansenPartnership.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ drivers/scsi/fcoe/libfcoe.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/drivers/scsi/fcoe/libfcoe.c
++++ b/drivers/scsi/fcoe/libfcoe.c
+@@ -167,7 +167,7 @@ static int fcoe_cpu_callback(struct noti
+ #endif /* CONFIG_HOTPLUG_CPU */
+
+ /**
+- * foce_rcv - this is the fcoe receive function called by NET_RX_SOFTIRQ
++ * fcoe_rcv - this is the fcoe receive function called by NET_RX_SOFTIRQ
+ * @skb: the receive skb
+ * @dev: associated net device
+ * @ptype: context
+@@ -992,8 +992,8 @@ static int fcoe_ethdrv_get(const struct
+
+ owner = fcoe_netdev_to_module_owner(netdev);
+ if (owner) {
+- printk(KERN_DEBUG "foce:hold driver module %s for %s\n",
+- owner->name, netdev->name);
++ printk(KERN_DEBUG "fcoe:hold driver module %s for %s\n",
++ module_name(owner), netdev->name);
+ return try_module_get(owner);
+ }
+ return -ENODEV;
+@@ -1012,8 +1012,8 @@ static int fcoe_ethdrv_put(const struct
+
+ owner = fcoe_netdev_to_module_owner(netdev);
+ if (owner) {
+- printk(KERN_DEBUG "foce:release driver module %s for %s\n",
+- owner->name, netdev->name);
++ printk(KERN_DEBUG "fcoe:release driver module %s for %s\n",
++ module_name(owner), netdev->name);
+ module_put(owner);
+ return 0;
+ }
--- /dev/null
+From: Vasu Dev <vasu.dev@intel.com>
+Subject: [FcOE] improved load balancing in rx path
+References: bnc #459142
+
+Currently incoming frame exchange id ANDing with total number of bits
+in online CPU bits mask, resulted only at most two CPUs selection in
+rx path, so instead used online CPU bits mask to direct incoming frame
+to a all cpus for better load balancing.
+
+Added code to default to first CPU in case selected CPU is offline or
+its rx thread not present.
+
+Signed-off-by: Vasu Dev <vasu.dev@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/fcoe/libfcoe.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+
+--- a/drivers/scsi/fcoe/libfcoe.c
++++ b/drivers/scsi/fcoe/libfcoe.c
+@@ -230,13 +230,14 @@ int fcoe_rcv(struct sk_buff *skb, struct
+ cpu_idx = 0;
+ #ifdef CONFIG_SMP
+ /*
+- * The exchange ID are ANDed with num of online CPUs,
+- * so that will have the least lock contention in
+- * handling the exchange. if there is no thread
+- * for a given idx then use first online cpu.
++ * The incoming frame exchange id(oxid) is ANDed with num of online
++ * cpu bits to get cpu_idx and then this cpu_idx is used for selecting
++ * a per cpu kernel thread from fcoe_percpu. In case the cpu is
++ * offline or no kernel thread for derived cpu_idx then cpu_idx is
++ * initialize to first online cpu index.
+ */
+- cpu_idx = oxid & (num_online_cpus() >> 1);
+- if (fcoe_percpu[cpu_idx] == NULL)
++ cpu_idx = oxid & (num_online_cpus() - 1);
++ if (!fcoe_percpu[cpu_idx] || !cpu_online(cpu_idx))
+ cpu_idx = first_cpu(cpu_online_map);
+ #endif
+ fps = fcoe_percpu[cpu_idx];
--- /dev/null
+From: Robert Love <robert.w.love@intel.com>
+Subject: [FcOE] Logoff of the fabric when destroying interface
+References: bnc #459142
+
+This line was accidentally removed by a previous patch.
+
+Signed-off-by: Robert Love <robert.w.love@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/fcoe/fcoe_sw.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+
+--- a/drivers/scsi/fcoe/fcoe_sw.c
++++ b/drivers/scsi/fcoe/fcoe_sw.c
+@@ -302,6 +302,9 @@ static int fcoe_sw_destroy(struct net_de
+
+ fc = fcoe_softc(lp);
+
++ /* Logout of the fabric */
++ fc_fabric_logoff(lp);
++
+ /* Remove the instance from fcoe's list */
+ fcoe_hostlist_remove(lp);
+
--- /dev/null
+From: Yi Zou <yi.zou@intel.com>
+Subject: [FcOE] remove WARN_ON in fc_set_mfs
+References: bnc #459142
+
+remove WARN_ON in fc_set_mfs(), also adde comments.
+
+Signed-off-by: Yi Zou <yi.zou@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/libfc/fc_lport.c | 13 +++++++++++--
+ 1 file changed, 11 insertions(+), 2 deletions(-)
+
+
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -656,10 +656,20 @@ int fc_lport_destroy(struct fc_lport *lp
+ }
+ EXPORT_SYMBOL(fc_lport_destroy);
+
++/**
++ * fc_set_mfs - sets up the mfs for the corresponding fc_lport
++ * @lport: fc_lport pointer to unregister
++ * @mfs: the new mfs for fc_lport
++ *
++ * Set mfs for the given fc_lport to the new mfs.
++ *
++ * Return: 0 for success
++ *
++ **/
+ int fc_set_mfs(struct fc_lport *lport, u32 mfs)
+ {
+ unsigned int old_mfs;
+- int rc = -1;
++ int rc = -EINVAL;
+
+ mutex_lock(&lport->lp_mutex);
+
+@@ -667,7 +677,6 @@ int fc_set_mfs(struct fc_lport *lport, u
+
+ if (mfs >= FC_MIN_MAX_FRAME) {
+ mfs &= ~3;
+- WARN_ON((size_t) mfs < FC_MIN_MAX_FRAME);
+ if (mfs > FC_MAX_FRAME)
+ mfs = FC_MAX_FRAME;
+ mfs -= sizeof(struct fc_frame_header);
--- /dev/null
+From: Yi Zou <yi.zou@intel.com>
+Subject: [FcOE] user_mfs is never used
+References: bnc #459142
+
+Signed-off-by: Yi Zou <yi.zou@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/fcoe/libfcoe.c | 2 --
+ include/scsi/libfcoe.h | 1 -
+ 2 files changed, 3 deletions(-)
+
+
+--- a/drivers/scsi/fcoe/libfcoe.c
++++ b/drivers/scsi/fcoe/libfcoe.c
+@@ -900,8 +900,6 @@ static int fcoe_device_notification(stru
+ mfs = fc->real_dev->mtu -
+ (sizeof(struct fcoe_hdr) +
+ sizeof(struct fcoe_crc_eof));
+- if (fc->user_mfs && fc->user_mfs < mfs)
+- mfs = fc->user_mfs;
+ if (mfs >= FC_MIN_MAX_FRAME)
+ fc_set_mfs(lp, mfs);
+ new_status &= ~FC_LINK_UP;
+--- a/include/scsi/libfcoe.h
++++ b/include/scsi/libfcoe.h
+@@ -46,7 +46,6 @@ struct fcoe_softc {
+ struct net_device *phys_dev; /* device with ethtool_ops */
+ struct packet_type fcoe_packet_type;
+ struct sk_buff_head fcoe_pending_queue;
+- u16 user_mfs; /* configured max frame size */
+
+ u8 dest_addr[ETH_ALEN];
+ u8 ctl_src_addr[ETH_ALEN];
--- /dev/null
+From: Robert Love <robert.w.love@intel.com>
+Subject: [FcOE] Add fc_disc.c locking comment block
+References: bnc #459142
+
+Signed-off-by: Robert Love <robert.w.love@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/libfc/fc_disc.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+
+--- a/drivers/scsi/libfc/fc_disc.c
++++ b/drivers/scsi/libfc/fc_disc.c
+@@ -24,6 +24,14 @@
+ * also handles RSCN events and re-discovery if necessary.
+ */
+
++/*
++ * DISC LOCKING
++ *
++ * The disc mutex is can be locked when acquiring rport locks, but may not
++ * be held when acquiring the lport lock. Refer to fc_lport.c for more
++ * details.
++ */
++
+ #include <linux/timer.h>
+ #include <linux/err.h>
+ #include <asm/unaligned.h>
--- /dev/null
+From 251b8184b1bd4e17656d72ba9cffcba733092064 Mon Sep 17 00:00:00 2001
+From: Robert Love <robert.w.love@intel.com>
+Date: Mon, 2 Feb 2009 10:13:06 -0800
+Subject: [PATCH] libfc: check for err when recv and state is incorrect
+References: bnc#473602
+
+If we've just created an interface and the an rport is
+logging in we may have a request on the wire (say PRLI).
+If we destroy the interface, we'll go through each rport
+on the disc->rports list and set each rport's state to NONE.
+Then the lport will reset the EM. The EM reset will send a
+CLOSED event to the prli_resp() handler which will notice
+that the state != PRLI. In this case it frees the frame
+pointer, decrements the refcount and unlocks the rport.
+
+The problem is that there isn't a frame in this case. It's
+just a pointer with an embedded error code. The free causes
+an Oops.
+
+This patch moves the error checking to be before the state
+checking.
+
+Signed-off-by: Robert Love <robert.w.love@intel.com>
+Signed-off-by: Hannes Reinecke <hare@suse.de>
+
+---
+ drivers/scsi/libfc/fc_lport.c | 50 +++++++++++++++++++++---------------------
+ drivers/scsi/libfc/fc_rport.c | 30 ++++++++++++-------------
+ 2 files changed, 40 insertions(+), 40 deletions(-)
+
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -1031,17 +1031,17 @@ static void fc_lport_rft_id_resp(struct
+
+ FC_DEBUG_LPORT("Received a RFT_ID response\n");
+
++ if (IS_ERR(fp)) {
++ fc_lport_error(lport, fp);
++ goto err;
++ }
++
+ if (lport->state != LPORT_ST_RFT_ID) {
+ FC_DBG("Received a RFT_ID response, but in state %s\n",
+ fc_lport_state(lport));
+ goto out;
+ }
+
+- if (IS_ERR(fp)) {
+- fc_lport_error(lport, fp);
+- goto err;
+- }
+-
+ fh = fc_frame_header_get(fp);
+ ct = fc_frame_payload_get(fp, sizeof(*ct));
+
+@@ -1083,17 +1083,17 @@ static void fc_lport_rpn_id_resp(struct
+
+ FC_DEBUG_LPORT("Received a RPN_ID response\n");
+
++ if (IS_ERR(fp)) {
++ fc_lport_error(lport, fp);
++ goto err;
++ }
++
+ if (lport->state != LPORT_ST_RPN_ID) {
+ FC_DBG("Received a RPN_ID response, but in state %s\n",
+ fc_lport_state(lport));
+ goto out;
+ }
+
+- if (IS_ERR(fp)) {
+- fc_lport_error(lport, fp);
+- goto err;
+- }
+-
+ fh = fc_frame_header_get(fp);
+ ct = fc_frame_payload_get(fp, sizeof(*ct));
+ if (fh && ct && fh->fh_type == FC_TYPE_CT &&
+@@ -1133,17 +1133,17 @@ static void fc_lport_scr_resp(struct fc_
+
+ FC_DEBUG_LPORT("Received a SCR response\n");
+
++ if (IS_ERR(fp)) {
++ fc_lport_error(lport, fp);
++ goto err;
++ }
++
+ if (lport->state != LPORT_ST_SCR) {
+ FC_DBG("Received a SCR response, but in state %s\n",
+ fc_lport_state(lport));
+ goto out;
+ }
+
+- if (IS_ERR(fp)) {
+- fc_lport_error(lport, fp);
+- goto err;
+- }
+-
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC)
+ fc_lport_enter_ready(lport);
+@@ -1359,17 +1359,17 @@ static void fc_lport_logo_resp(struct fc
+
+ FC_DEBUG_LPORT("Received a LOGO response\n");
+
++ if (IS_ERR(fp)) {
++ fc_lport_error(lport, fp);
++ goto err;
++ }
++
+ if (lport->state != LPORT_ST_LOGO) {
+ FC_DBG("Received a LOGO response, but in state %s\n",
+ fc_lport_state(lport));
+ goto out;
+ }
+
+- if (IS_ERR(fp)) {
+- fc_lport_error(lport, fp);
+- goto err;
+- }
+-
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC)
+ fc_lport_enter_reset(lport);
+@@ -1443,17 +1443,17 @@ static void fc_lport_flogi_resp(struct f
+
+ FC_DEBUG_LPORT("Received a FLOGI response\n");
+
++ if (IS_ERR(fp)) {
++ fc_lport_error(lport, fp);
++ goto err;
++ }
++
+ if (lport->state != LPORT_ST_FLOGI) {
+ FC_DBG("Received a FLOGI response, but in state %s\n",
+ fc_lport_state(lport));
+ goto out;
+ }
+
+- if (IS_ERR(fp)) {
+- fc_lport_error(lport, fp);
+- goto err;
+- }
+-
+ fh = fc_frame_header_get(fp);
+ did = ntoh24(fh->fh_d_id);
+ if (fc_frame_payload_op(fp) == ELS_LS_ACC && did != 0) {
+--- a/drivers/scsi/libfc/fc_rport.c
++++ b/drivers/scsi/libfc/fc_rport.c
+@@ -505,17 +505,17 @@ static void fc_rport_plogi_resp(struct f
+ FC_DEBUG_RPORT("Received a PLOGI response from port (%6x)\n",
+ rport->port_id);
+
++ if (IS_ERR(fp)) {
++ fc_rport_error_retry(rport, fp);
++ goto err;
++ }
++
+ if (rdata->rp_state != RPORT_ST_PLOGI) {
+ FC_DBG("Received a PLOGI response, but in state %s\n",
+ fc_rport_state(rport));
+ goto out;
+ }
+
+- if (IS_ERR(fp)) {
+- fc_rport_error_retry(rport, fp);
+- goto err;
+- }
+-
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC &&
+ (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
+@@ -614,17 +614,17 @@ static void fc_rport_prli_resp(struct fc
+ FC_DEBUG_RPORT("Received a PRLI response from port (%6x)\n",
+ rport->port_id);
+
++ if (IS_ERR(fp)) {
++ fc_rport_error_retry(rport, fp);
++ goto err;
++ }
++
+ if (rdata->rp_state != RPORT_ST_PRLI) {
+ FC_DBG("Received a PRLI response, but in state %s\n",
+ fc_rport_state(rport));
+ goto out;
+ }
+
+- if (IS_ERR(fp)) {
+- fc_rport_error_retry(rport, fp);
+- goto err;
+- }
+-
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC) {
+ pp = fc_frame_payload_get(fp, sizeof(*pp));
+@@ -764,17 +764,17 @@ static void fc_rport_rtv_resp(struct fc_
+ FC_DEBUG_RPORT("Received a RTV response from port (%6x)\n",
+ rport->port_id);
+
++ if (IS_ERR(fp)) {
++ fc_rport_error(rport, fp);
++ goto err;
++ }
++
+ if (rdata->rp_state != RPORT_ST_RTV) {
+ FC_DBG("Received a RTV response, but in state %s\n",
+ fc_rport_state(rport));
+ goto out;
+ }
+
+- if (IS_ERR(fp)) {
+- fc_rport_error(rport, fp);
+- goto err;
+- }
+-
+ op = fc_frame_payload_op(fp);
+ if (op == ELS_LS_ACC) {
+ struct fc_els_rtv_acc *rtv;
--- /dev/null
+From: Robert Love <robert.w.love@intel.com>
+Subject: libfc: Ensure correct device_put/get usage (round 2)
+References:
+
+Reference counting was barely used and where used
+it was incorrect. This patch creates a few simple
+policies.
+
+When the rport->dev [e.g. struct device] is initialized
+it starts with a refcnt of 1. Whenever we're using the
+rport we will increment the count. When we logoff we
+should decrement the count to 0 and the 'release'
+function will be called. The FC transport provides the
+release function for real rports and libfc provides it
+for rogue rports. When we switch from a rogue to real
+rport we'll decrement the refcnt on the rogue rport
+and increment it for the real rport, after we've created
+it.
+
+Any externally initiated action on an rport (login,
+logoff) will not require the caller to increment and
+decrement the refcnt.
+
+For rport_login(), the rport will have just been created
+and therefore no other thread would be able to access
+this object.
+
+For rport_logoff(), the rport will have been removed
+from the list of rports and therefore no other thread
+would be able to lookup() this rport.
+
+This patch removes the get_device() from the rport_lookup
+function. These are the places where it is called and why
+we don't need a reference.
+
+fc_disc_recv_rscn_req() - called for single port RSCNs
+ the disc mutex is held and
+ ensures that no other thread
+ will find this rport.
+
+fc_disc_new_target() - Same. The rport cannot be looked up
+ so no other thread can free the rport.
+ This code looks buggy though, we
+ shouldn't be calling rport_login() on
+ a 'real' rport, which we could do.
+
+fc_disc_single() - Same. disc mutex protects the list.
+
+fc_lport_recv_req() - Similar, but this time the lport lock
+ ensures that no incoming requests are
+ processed until the current request
+ for an rport has returned.
+
+When the rport layer needs to send a request it will
+increment the count so that the EM can be confident that
+the rport is present when making the callback. If
+fc_remote_port_delete() is called before the response
+callback, which is often the case for LOGO commands, the
+refcnt will still have a value of 1 becuase we grabbed the
+lock before the ctels_send() is called. The exchange would
+have been removed and so the callback will be called with
+an error code. After processing the error code we'll
+decrement the refcnt for the last time and the rport will
+be free'd.
+
+Since point-to-point mode is not working this patch
+does not consider point-to-point.
+
+Signed-off-by: Robert Love <robert.w.love@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/libfc/fc_disc.c | 5 +----
+ drivers/scsi/libfc/fc_lport.c | 5 ++---
+ drivers/scsi/libfc/fc_rport.c | 21 ++++++++++++++-------
+ 3 files changed, 17 insertions(+), 14 deletions(-)
+
+
+--- a/drivers/scsi/libfc/fc_disc.c
++++ b/drivers/scsi/libfc/fc_disc.c
+@@ -81,7 +81,6 @@ struct fc_rport *fc_disc_lookup_rport(co
+ if (rport->port_id == port_id) {
+ disc_found = 1;
+ found = rport;
+- get_device(&found->dev);
+ break;
+ }
+ }
+@@ -767,10 +766,8 @@ static void fc_disc_single(struct fc_dis
+ goto out;
+
+ rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
+- if (rport) {
++ if (rport)
+ fc_disc_del_target(disc, rport);
+- put_device(&rport->dev); /* hold from lookup */
+- }
+
+ new_rport = fc_rport_rogue_create(dp);
+ if (new_rport) {
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -908,10 +908,9 @@ static void fc_lport_recv_req(struct fc_
+ d_id = ntoh24(fh->fh_d_id);
+
+ rport = lport->tt.rport_lookup(lport, s_id);
+- if (rport) {
++ if (rport)
+ lport->tt.rport_recv_req(sp, fp, rport);
+- put_device(&rport->dev); /* hold from lookup */
+- } else {
++ else {
+ rjt_data.fp = NULL;
+ rjt_data.reason = ELS_RJT_UNAB;
+ rjt_data.explan = ELS_EXPL_NONE;
+--- a/drivers/scsi/libfc/fc_rport.c
++++ b/drivers/scsi/libfc/fc_rport.c
+@@ -111,16 +111,11 @@ struct fc_rport *fc_rport_rogue_create(s
+ rport->roles = dp->ids.roles;
+ rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
+ /*
+- * init the device, so other code can manipulate the rport as if
+- * it came from the fc class. We also do an extra get because
+- * libfc will free this rport instead of relying on the normal
+- * refcounting.
+- *
+ * Note: all this libfc rogue rport code will be removed for
+ * upstream so it fine that this is really ugly and hacky right now.
+ */
+ device_initialize(&rport->dev);
+- get_device(&rport->dev);
++ rport->dev.release = fc_rport_rogue_destroy; // XXX: bwalle
+
+ mutex_init(&rdata->rp_mutex);
+ rdata->local_port = dp->lp;
+@@ -402,9 +397,9 @@ static void fc_rport_timeout(struct work
+ case RPORT_ST_NONE:
+ break;
+ }
+- put_device(&rport->dev);
+
+ mutex_unlock(&rdata->rp_mutex);
++ put_device(&rport->dev);
+ }
+
+ /**
+@@ -531,6 +526,7 @@ out:
+ fc_frame_free(fp);
+ err:
+ mutex_unlock(&rdata->rp_mutex);
++ put_device(&rport->dev);
+ }
+
+ /**
+@@ -562,6 +558,8 @@ static void fc_rport_enter_plogi(struct
+ if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI,
+ fc_rport_plogi_resp, rport, lport->e_d_tov))
+ fc_rport_error(rport, fp);
++ else
++ get_device(&rport->dev);
+ }
+
+ /**
+@@ -631,6 +629,7 @@ out:
+ fc_frame_free(fp);
+ err:
+ mutex_unlock(&rdata->rp_mutex);
++ put_device(&rport->dev);
+ }
+
+ /**
+@@ -679,6 +678,7 @@ out:
+ fc_frame_free(fp);
+ err:
+ mutex_unlock(&rdata->rp_mutex);
++ put_device(&rport->dev);
+ }
+
+ /**
+@@ -712,6 +712,8 @@ static void fc_rport_enter_prli(struct f
+ if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI,
+ fc_rport_prli_resp, rport, lport->e_d_tov))
+ fc_rport_error(rport, fp);
++ else
++ get_device(&rport->dev);
+ }
+
+ /**
+@@ -777,6 +779,7 @@ out:
+ fc_frame_free(fp);
+ err:
+ mutex_unlock(&rdata->rp_mutex);
++ put_device(&rport->dev);
+ }
+
+ /**
+@@ -806,6 +809,8 @@ static void fc_rport_enter_rtv(struct fc
+ if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV,
+ fc_rport_rtv_resp, rport, lport->e_d_tov))
+ fc_rport_error(rport, fp);
++ else
++ get_device(&rport->dev);
+ }
+
+ /**
+@@ -835,6 +840,8 @@ static void fc_rport_enter_logo(struct f
+ if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO,
+ fc_rport_logo_resp, rport, lport->e_d_tov))
+ fc_rport_error(rport, fp);
++ else
++ get_device(&rport->dev);
+ }
+
+
--- /dev/null
+From: Vasu Dev <vasu.dev@intel.com>
+Subject: libfc: handle RRQ exch timeout
+References: bnc #465596
+
+Cleanup exchange held due to RRQ when RRQ exch times out, in this case the
+ABTS is already done causing RRQ req therefore proceeding with cleanup in
+fc_exch_rrq_resp should be okay to restore exch resource.
+
+Signed-off-by: Vasu Dev <vasu.dev@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/libfc/fc_exch.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -1605,7 +1605,7 @@ static void fc_exch_rrq_resp(struct fc_s
+ if (IS_ERR(fp)) {
+ int err = PTR_ERR(fp);
+
+- if (err == -FC_EX_CLOSED)
++ if (err == -FC_EX_CLOSED || err == -FC_EX_TIMEOUT)
+ goto cleanup;
+ FC_DBG("Cannot process RRQ, because of frame error %d\n", err);
+ return;
--- /dev/null
+From: Robert Love <robert.w.love@intel.com>
+Subject: [FcOE] Improve fc_lport.c locking comment block
+References: bnc #459142
+
+Signed-off-by: Robert Love <robert.w.love@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/libfc/fc_lport.c | 76 ++++++++++++++++++++++++------------------
+ 1 file changed, 45 insertions(+), 31 deletions(-)
+
+
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -18,34 +18,51 @@
+ */
+
+ /*
+- * General locking notes:
++ * PORT LOCKING NOTES
+ *
+- * The lport and rport blocks both have mutexes that are used to protect
+- * the port objects states. The main motivation for this protection is that
+- * we don't want to be preparing a request/response in one context while
+- * another thread "resets" the port in question. For example, if the lport
+- * block is sending a SCR request to the directory server we don't want
+- * the lport to be reset before we fill out the frame header's port_id. The
+- * problem is that a reset would cause the lport's port_id to reset to 0.
+- * If we don't protect the lport we'd spew incorrect frames.
+- *
+- * At the time of this writing there are two primary mutexes, one for the
+- * lport and one for the rport. Since the lport uses the rport and makes
+- * calls into that block the rport should never make calls that would cause
+- * the lport's mutex to be locked. In other words, the lport's mutex is
+- * considered the outer lock and the rport's lock is considered the inner
+- * lock. The bottom line is that you can hold a lport's mutex and then
+- * hold the rport's mutex, but not the other way around.
+- *
+- * The only complication to this rule is the callbacks from the rport to
+- * the lport's rport_callback function. When rports become READY they make
+- * a callback to the lport so that it can track them. In the case of the
+- * directory server that callback might cause the lport to change its
+- * state, implying that the lport mutex would need to be held. This problem
+- * was solved by serializing the rport notifications to the lport and the
+- * callback is made without holding the rport's lock.
++ * These comments only apply to the 'port code' which consists of the lport,
++ * disc and rport blocks.
+ *
+- * lport locking notes:
++ * MOTIVATION
++ *
++ * The lport, disc and rport blocks all have mutexes that are used to protect
++ * those objects. The main motivation for these locks is to prevent from
++ * having an lport reset just before we send a frame. In that scenario the
++ * lport's FID would get set to zero and then we'd send a frame with an
++ * invalid SID. We also need to ensure that states don't change unexpectedly
++ * while processing another state.
++ *
++ * HEIRARCHY
++ *
++ * The following heirarchy defines the locking rules. A greater lock
++ * may be held before acquiring a lesser lock, but a lesser lock should never
++ * be held while attempting to acquire a greater lock. Here is the heirarchy-
++ *
++ * lport > disc, lport > rport, disc > rport
++ *
++ * CALLBACKS
++ *
++ * The callbacks cause complications with this scheme. There is a callback
++ * from the rport (to either lport or disc) and a callback from disc
++ * (to the lport).
++ *
++ * As rports exit the rport state machine a callback is made to the owner of
++ * the rport to notify success or failure. Since the callback is likely to
++ * cause the lport or disc to grab its lock we cannot hold the rport lock
++ * while making the callback. To ensure that the rport is not free'd while
++ * processing the callback the rport callbacks are serialized through a
++ * single-threaded workqueue. An rport would never be free'd while in a
++ * callback handler becuase no other rport work in this queue can be executed
++ * at the same time.
++ *
++ * When discovery succeeds or fails a callback is made to the lport as
++ * notification. Currently, succesful discovery causes the lport to take no
++ * action. A failure will cause the lport to reset. There is likely a circular
++ * locking problem with this implementation.
++ */
++
++/*
++ * LPORT LOCKING
+ *
+ * The critical sections protected by the lport's mutex are quite broad and
+ * may be improved upon in the future. The lport code and its locking doesn't
+@@ -54,9 +71,9 @@
+ *
+ * The strategy is to lock whenever processing a request or response. Note
+ * that every _enter_* function corresponds to a state change. They generally
+- * change the lports state and then sends a request out on the wire. We lock
++ * change the lports state and then send a request out on the wire. We lock
+ * before calling any of these functions to protect that state change. This
+- * means that the entry points into the lport block to manage the locks while
++ * means that the entry points into the lport block manage the locks while
+ * the state machine can transition between states (i.e. _enter_* functions)
+ * while always staying protected.
+ *
+@@ -68,9 +85,6 @@
+ * Retries also have to consider the locking. The retries occur from a work
+ * context and the work function will lock the lport and then retry the state
+ * (i.e. _enter_* function).
+- *
+- * The implication to all of this is that each lport can only process one
+- * state at a time.
+ */
+
+ #include <linux/timer.h>
--- /dev/null
+From: Robert Love <robert.w.love@intel.com>
+Subject: Improve fc_rport.c locking comment block
+References: 459142
+
+checkpatch.pl was complaining about having spaces
+after '*'s. It seemed to be a false positive. I split
+the comment block into two blocks and it resolved the
+ERROR.
+
+Signed-off-by: Robert Love <robert.w.love@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/libfc/fc_rport.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+
+--- a/drivers/scsi/libfc/fc_rport.c
++++ b/drivers/scsi/libfc/fc_rport.c
+@@ -18,20 +18,23 @@
+ */
+
+ /*
++ * RPORT GENERAL INFO
++ *
+ * This file contains all processing regarding fc_rports. It contains the
+ * rport state machine and does all rport interaction with the transport class.
+ * There should be no other places in libfc that interact directly with the
+ * transport class in regards to adding and deleting rports.
+ *
+ * fc_rport's represent N_Port's within the fabric.
++ */
++
++/*
++ * RPORT LOCKING
+ *
+- * rport locking notes:
+- *
+- * The rport should never hold the rport mutex and then lock the lport
+- * mutex. The rport's mutex is considered lesser than the lport's mutex, so
+- * the lport mutex can be held before locking the rport mutex, but not the
+- * other way around. See the comment block at the top of fc_lport.c for more
+- * details.
++ * The rport should never hold the rport mutex and then attempt to acquire
++ * either the lport or disc mutexes. The rport's mutex is considered lesser
++ * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
++ * more comments on the heirarchy.
+ *
+ * The locking strategy is similar to the lport's strategy. The lock protects
+ * the rport's states and is held and released by the entry points to the rport
--- /dev/null
+From: Chris Leech <christopher.leech@intel.com>
+Subject: [FcOE] make fc_disc inline with the fc_lport structure
+References: bnc #459142
+
+The extra memory allocation we're not being checked for failure. Rather than
+further complicating things, just make the discovery code required fields be
+part of the lport structure.
+
+Signed-off-by: Chris Leech <christopher.leech@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/libfc/fc_disc.c | 80 +++++++-----------------------------------
+ drivers/scsi/libfc/fc_lport.c | 2 -
+ include/scsi/libfc.h | 22 ++++++++++-
+ 3 files changed, 35 insertions(+), 69 deletions(-)
+
+
+--- a/drivers/scsi/libfc/fc_disc.c
++++ b/drivers/scsi/libfc/fc_disc.c
+@@ -45,26 +45,6 @@ static int fc_disc_debug;
+ FC_DBG(fmt); \
+ } while (0)
+
+-struct fc_disc {
+- unsigned char retry_count;
+- unsigned char delay;
+- unsigned char pending;
+- unsigned char requested;
+- unsigned short seq_count;
+- unsigned char buf_len;
+- enum fc_disc_event event;
+-
+- void (*disc_callback)(struct fc_lport *,
+- enum fc_disc_event);
+-
+- struct list_head rports;
+- struct fc_lport *lport;
+- struct mutex disc_mutex;
+- struct fc_gpn_ft_resp partial_buf; /* partial name buffer */
+- struct delayed_work disc_work;
+-
+-};
+-
+ static void fc_disc_gpn_ft_req(struct fc_disc *);
+ static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
+ static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
+@@ -83,14 +63,11 @@ static void fc_disc_restart(struct fc_di
+ struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
+ u32 port_id)
+ {
+- struct fc_disc *disc = lport->disc;
++ const struct fc_disc *disc = &lport->disc;
+ struct fc_rport *rport, *found = NULL;
+ struct fc_rport_libfc_priv *rdata;
+ int disc_found = 0;
+
+- if (!disc)
+- return NULL;
+-
+ list_for_each_entry(rdata, &disc->rports, peers) {
+ rport = PRIV_TO_RPORT(rdata);
+ if (rport->port_id == port_id) {
+@@ -108,27 +85,6 @@ struct fc_rport *fc_disc_lookup_rport(co
+ }
+
+ /**
+- * fc_disc_alloc - Allocate a discovery work object
+- * @lport: The FC lport associated with the discovery job
+- */
+-static inline struct fc_disc *fc_disc_alloc(struct fc_lport *lport)
+-{
+- struct fc_disc *disc;
+-
+- disc = kzalloc(sizeof(struct fc_disc), GFP_KERNEL);
+- INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
+- mutex_init(&disc->disc_mutex);
+- INIT_LIST_HEAD(&disc->rports);
+-
+- disc->lport = lport;
+- lport->disc = disc;
+- disc->delay = FC_DISC_DELAY;
+- disc->event = DISC_EV_NONE;
+-
+- return disc;
+-}
+-
+-/**
+ * fc_disc_stop_rports - delete all the remote ports associated with the lport
+ * @disc: The discovery job to stop rports on
+ *
+@@ -167,7 +123,7 @@ static void fc_disc_rport_callback(struc
+ enum fc_rport_event event)
+ {
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+- struct fc_disc *disc = lport->disc;
++ struct fc_disc *disc = &lport->disc;
+ int found = 0;
+
+ FC_DEBUG_DISC("Received a %d event for port (%6x)\n", event,
+@@ -304,13 +260,7 @@ static void fc_disc_recv_req(struct fc_s
+ struct fc_lport *lport)
+ {
+ u8 op;
+- struct fc_disc *disc = lport->disc;
+-
+- if (!disc) {
+- FC_DBG("Received a request for an lport not managed "
+- "by the discovery engine\n");
+- return;
+- }
++ struct fc_disc *disc = &lport->disc;
+
+ op = fc_frame_payload_op(fp);
+ switch (op) {
+@@ -365,17 +315,7 @@ static void fc_disc_start(void (*disc_ca
+ {
+ struct fc_rport *rport;
+ struct fc_rport_identifiers ids;
+- struct fc_disc *disc = lport->disc;
+-
+- if (!disc) {
+- FC_DEBUG_DISC("No existing discovery job, "
+- "creating one for lport (%6x)\n",
+- fc_host_port_id(lport->host));
+- disc = fc_disc_alloc(lport);
+- } else
+- FC_DEBUG_DISC("Found an existing discovery job "
+- "for lport (%6x)\n",
+- fc_host_port_id(lport->host));
++ struct fc_disc *disc = &lport->disc;
+
+ /*
+ * At this point we may have a new disc job or an existing
+@@ -831,7 +771,7 @@ out:
+ */
+ void fc_disc_stop(struct fc_lport *lport)
+ {
+- struct fc_disc *disc = lport->disc;
++ struct fc_disc *disc = &lport->disc;
+
+ if (disc) {
+ cancel_delayed_work_sync(&disc->disc_work);
+@@ -858,6 +798,7 @@ void fc_disc_stop_final(struct fc_lport
+ */
+ int fc_disc_init(struct fc_lport *lport)
+ {
++ struct fc_disc *disc;
+
+ if (!lport->tt.disc_start)
+ lport->tt.disc_start = fc_disc_start;
+@@ -874,6 +815,15 @@ int fc_disc_init(struct fc_lport *lport)
+ if (!lport->tt.rport_lookup)
+ lport->tt.rport_lookup = fc_disc_lookup_rport;
+
++ disc = &lport->disc;
++ INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
++ mutex_init(&disc->disc_mutex);
++ INIT_LIST_HEAD(&disc->rports);
++
++ disc->lport = lport;
++ disc->delay = FC_DISC_DELAY;
++ disc->event = DISC_EV_NONE;
++
+ return 0;
+ }
+ EXPORT_SYMBOL(fc_disc_init);
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -627,8 +627,6 @@ int fc_fabric_logoff(struct fc_lport *lp
+ {
+ lport->tt.disc_stop_final(lport);
+ mutex_lock(&lport->lp_mutex);
+- kfree(lport->disc);
+- lport->disc = NULL;
+ fc_lport_enter_logo(lport);
+ mutex_unlock(&lport->lp_mutex);
+ return 0;
+--- a/include/scsi/libfc.h
++++ b/include/scsi/libfc.h
+@@ -572,7 +572,25 @@ struct libfc_function_template {
+ void (*disc_stop_final) (struct fc_lport *);
+ };
+
+-struct fc_disc;
++/* information used by the discovery layer */
++struct fc_disc {
++ unsigned char retry_count;
++ unsigned char delay;
++ unsigned char pending;
++ unsigned char requested;
++ unsigned short seq_count;
++ unsigned char buf_len;
++ enum fc_disc_event event;
++
++ void (*disc_callback)(struct fc_lport *,
++ enum fc_disc_event);
++
++ struct list_head rports;
++ struct fc_lport *lport;
++ struct mutex disc_mutex;
++ struct fc_gpn_ft_resp partial_buf; /* partial name buffer */
++ struct delayed_work disc_work;
++};
+
+ struct fc_lport {
+ struct list_head list;
+@@ -582,8 +600,8 @@ struct fc_lport {
+ struct fc_exch_mgr *emp;
+ struct fc_rport *dns_rp;
+ struct fc_rport *ptp_rp;
+- struct fc_disc *disc;
+ void *scsi_priv;
++ struct fc_disc disc;
+
+ /* Operational Information */
+ struct libfc_function_template tt;
--- /dev/null
+From: Chris Leech <christopher.leech@intel.com>
+Subject: [FcOE] make RSCN parsing more robust
+References: bnc #459142
+
+RSCN parsing needs to verify that the payload length specified in the RSCN ELS
+message does not exceed the size of the actual frame received.
+
+Signed-off-by: Chris Leech <christopher.leech@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/libfc/fc_disc.c | 19 +++++++++++++++----
+ 1 files changed, 15 insertions(+), 4 deletions(-)
+
+
+diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
+index 0416041..8b609e4 100644
+--- a/drivers/scsi/libfc/fc_disc.c
++++ b/drivers/scsi/libfc/fc_disc.c
+@@ -173,17 +173,27 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
+ FC_DEBUG_DISC("Received an RSCN event on port (%6x)\n",
+ fc_host_port_id(lport->host));
+
++ /* make sure the frame contains an RSCN message */
+ rp = fc_frame_payload_get(fp, sizeof(*rp));
+-
+- if (!rp || rp->rscn_page_len != sizeof(*pp))
++ if (!rp)
+ goto reject;
+-
++ /* make sure the page length is as expected (4 bytes) */
++ if (rp->rscn_page_len != sizeof(*pp))
++ goto reject;
++ /* get the RSCN payload length */
+ len = ntohs(rp->rscn_plen);
+ if (len < sizeof(*rp))
+ goto reject;
++ /* make sure the frame contains the expected payload */
++ rp = fc_frame_payload_get(fp, len);
++ if (!rp)
++ goto reject;
++ /* payload must be a multiple of the RSCN page size */
+ len -= sizeof(*rp);
++ if (len % sizeof(*pp))
++ goto reject;
+
+- for (pp = (void *)(rp + 1); len; len -= sizeof(*pp), pp++) {
++ for (pp = (void *)(rp + 1); len > 0; len -= sizeof(*pp), pp++) {
+ ev_qual = pp->rscn_page_flags >> ELS_RSCN_EV_QUAL_BIT;
+ ev_qual &= ELS_RSCN_EV_QUAL_MASK;
+ fmt = pp->rscn_page_flags >> ELS_RSCN_ADDR_FMT_BIT;
+@@ -239,6 +249,7 @@ static void fc_disc_recv_rscn_req(struct fc_seq *sp, struct fc_frame *fp,
+ fc_frame_free(fp);
+ return;
+ reject:
++ FC_DEBUG_DISC("Received a bad RSCN frame\n");
+ rjt_data.fp = NULL;
+ rjt_data.reason = ELS_RJT_LOGIC;
+ rjt_data.explan = ELS_EXPL_NONE;
--- /dev/null
+From: Chris Leech <christopher.leech@intel.com>
+Subject: [FcOE] make sure we access the CRC safely
+References: bnc #459142
+
+Even when fcoe verified that the EOF and CRC trailer bytes were there, when
+the CRC check was delayed for solicited SCSI data libfc would look past what
+was marked as valid data in the frame to find the CRC in the FCoE trailer.
+
+Instead, pass the CRC to libfc in the context block.
+
+Signed-off-by: Chris Leech <christopher.leech@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/fcoe/libfcoe.c | 20 ++++++++++++++------
+ drivers/scsi/libfc/fc_fcp.c | 2 +-
+ drivers/scsi/libfc/fc_frame.c | 2 +-
+ include/scsi/fc_frame.h | 3 +++
+ 4 files changed, 19 insertions(+), 8 deletions(-)
+
+
+--- a/drivers/scsi/fcoe/libfcoe.c
++++ b/drivers/scsi/fcoe/libfcoe.c
+@@ -527,7 +527,7 @@ int fcoe_percpu_receive_thread(void *arg
+ struct fcoe_dev_stats *stats;
+ struct fc_frame_header *fh;
+ struct sk_buff *skb;
+- struct fcoe_crc_eof *cp;
++ struct fcoe_crc_eof crc_eof;
+ struct fc_frame *fp;
+ u8 *mac = NULL;
+ struct fcoe_softc *fc;
+@@ -604,13 +604,21 @@ int fcoe_percpu_receive_thread(void *arg
+ }
+
+ fp = (struct fc_frame *)skb;
+- cp = (struct fcoe_crc_eof *)(skb->data + fr_len);
+ fc_frame_init(fp);
+ fr_dev(fp) = lp;
+ fr_sof(fp) = hp->fcoe_sof;
+- fr_eof(fp) = cp->fcoe_eof;
+- /* trim off the CRC and EOF trailer*/
+- skb_trim(skb, fr_len);
++
++ /* Copy out the CRC and EOF trailer for access */
++ if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
++ kfree_skb(skb);
++ continue;
++ }
++ fr_eof(fp) = crc_eof.fcoe_eof;
++ fr_crc(fp) = crc_eof.fcoe_crc32;
++ if (pskb_trim(skb, fr_len)) {
++ kfree_skb(skb);
++ continue;
++ }
+
+ /*
+ * We only check CRC if no offload is available and if it is
+@@ -629,7 +637,7 @@ int fcoe_percpu_receive_thread(void *arg
+ continue;
+ }
+ if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
+- if (le32_to_cpu(cp->fcoe_crc32) !=
++ if (le32_to_cpu(fr_crc(fp)) !=
+ ~crc32(~0, skb->data, fr_len)) {
+ if (debug_fcoe || stats->InvalidCRCCount < 5)
+ printk(KERN_WARNING "fcoe: dropping "
+--- a/drivers/scsi/libfc/fc_fcp.c
++++ b/drivers/scsi/libfc/fc_fcp.c
+@@ -356,7 +356,7 @@ static void fc_fcp_recv_data(struct fc_f
+ len += 4 - (len % 4);
+ }
+
+- if (~crc != le32_to_cpu(*(__le32 *)(buf + len))) {
++ if (~crc != le32_to_cpu(fr_crc(fp))) {
+ crc_err:
+ stats = lp->dev_stats[smp_processor_id()];
+ stats->ErrorFrames++;
+--- a/drivers/scsi/libfc/fc_frame.c
++++ b/drivers/scsi/libfc/fc_frame.c
+@@ -42,7 +42,7 @@ u32 fc_frame_crc_check(struct fc_frame *
+ len = (fr_len(fp) + 3) & ~3; /* round up length to include fill */
+ bp = (const u8 *) fr_hdr(fp);
+ crc = ~crc32(~0, bp, len);
+- error = crc ^ *(u32 *) (bp + len);
++ error = crc ^ fr_crc(fp);
+ return error;
+ }
+ EXPORT_SYMBOL(fc_frame_crc_check);
+--- a/include/scsi/fc_frame.h
++++ b/include/scsi/fc_frame.h
+@@ -56,6 +56,7 @@
+ #define fr_max_payload(fp) (fr_cb(fp)->fr_max_payload)
+ #define fr_cmd(fp) (fr_cb(fp)->fr_cmd)
+ #define fr_dir(fp) (fr_cmd(fp)->sc_data_direction)
++#define fr_crc(fp) (fr_cb(fp)->fr_crc)
+
+ struct fc_frame {
+ struct sk_buff skb;
+@@ -66,12 +67,14 @@ struct fcoe_rcv_info {
+ struct fc_lport *fr_dev; /* transport layer private pointer */
+ struct fc_seq *fr_seq; /* for use with exchange manager */
+ struct scsi_cmnd *fr_cmd; /* for use of scsi command */
++ u32 fr_crc;
+ u16 fr_max_payload; /* max FC payload */
+ enum fc_sof fr_sof; /* start of frame delimiter */
+ enum fc_eof fr_eof; /* end of frame delimiter */
+ u8 fr_flags; /* flags - see below */
+ };
+
++
+ /*
+ * Get fc_frame pointer for an skb that's already been imported.
+ */
--- /dev/null
+From: Abhijeet Joglekar <abjoglek@cisco.com>
+Subject: libfc: Pass lport in exch_mgr_reset
+References: bnc #465596
+
+fc_exch_mgr structure is private to fc_exch.c. To export exch_mgr_reset to
+transport, transport needs access to the exch manager. Change
+exch_mgr_reset to use lport param which is the shared structure between
+libFC and transport.
+
+Alternatively, fc_exch_mgr definition can be moved to libfc.h so that lport
+can be accessed from mp*.
+
+Signed-off-by: Abhijeet Joglekar <abjoglek@cisco.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/libfc/fc_exch.c | 3 ++-
+ drivers/scsi/libfc/fc_lport.c | 4 ++--
+ drivers/scsi/libfc/fc_rport.c | 4 ++--
+ include/scsi/libfc.h | 4 ++--
+ 4 files changed, 8 insertions(+), 7 deletions(-)
+
+
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -1478,10 +1478,11 @@ static void fc_exch_reset(struct fc_exch
+ * If sid is non-zero, reset only exchanges we source from that FID.
+ * If did is non-zero, reset only exchanges destined to that FID.
+ */
+-void fc_exch_mgr_reset(struct fc_exch_mgr *mp, u32 sid, u32 did)
++void fc_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
+ {
+ struct fc_exch *ep;
+ struct fc_exch *next;
++ struct fc_exch_mgr *mp = lp->emp;
+
+ spin_lock_bh(&mp->em_lock);
+ restart:
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -640,7 +640,7 @@ int fc_lport_destroy(struct fc_lport *lp
+ {
+ lport->tt.frame_send = fc_frame_drop;
+ lport->tt.fcp_abort_io(lport);
+- lport->tt.exch_mgr_reset(lport->emp, 0, 0);
++ lport->tt.exch_mgr_reset(lport, 0, 0);
+ return 0;
+ }
+ EXPORT_SYMBOL(fc_lport_destroy);
+@@ -951,7 +951,7 @@ static void fc_lport_enter_reset(struct
+
+ lport->tt.disc_stop(lport);
+
+- lport->tt.exch_mgr_reset(lport->emp, 0, 0);
++ lport->tt.exch_mgr_reset(lport, 0, 0);
+ fc_host_fabric_name(lport->host) = 0;
+ fc_host_port_id(lport->host) = 0;
+
+--- a/drivers/scsi/libfc/fc_rport.c
++++ b/drivers/scsi/libfc/fc_rport.c
+@@ -1302,7 +1302,7 @@ void fc_rport_terminate_io(struct fc_rpo
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ struct fc_lport *lport = rdata->local_port;
+
+- lport->tt.exch_mgr_reset(lport->emp, 0, rport->port_id);
+- lport->tt.exch_mgr_reset(lport->emp, rport->port_id, 0);
++ lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
++ lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
+ }
+ EXPORT_SYMBOL(fc_rport_terminate_io);
+--- a/include/scsi/libfc.h
++++ b/include/scsi/libfc.h
+@@ -469,7 +469,7 @@ struct libfc_function_template {
+ * If s_id is non-zero, reset only exchanges originating from that FID.
+ * If d_id is non-zero, reset only exchanges sending to that FID.
+ */
+- void (*exch_mgr_reset)(struct fc_exch_mgr *,
++ void (*exch_mgr_reset)(struct fc_lport *,
+ u32 s_id, u32 d_id);
+
+ void (*rport_flush_queue)(void);
+@@ -908,7 +908,7 @@ struct fc_seq *fc_seq_start_next(struct
+ * If s_id is non-zero, reset only exchanges originating from that FID.
+ * If d_id is non-zero, reset only exchanges sending to that FID.
+ */
+-void fc_exch_mgr_reset(struct fc_exch_mgr *, u32 s_id, u32 d_id);
++void fc_exch_mgr_reset(struct fc_lport *, u32 s_id, u32 d_id);
+
+ /*
+ * Functions for fc_functions_template
--- /dev/null
+From: Robert Love <robert.w.love@intel.com>
+Subject: [FcOE] Set the release function for the rport's kobject (round 2)
+References: bnc #459142
+
+We need to be better about reference counting. The first
+step is to make use of the release function that is called
+when the reference count drops to 0.
+
+There was some inital push back by Joe on this patch. We
+talked off-list and agreed that the benefit of not having
+to check whether a rport is rogue or real overweighed the
+fact that we might be using reference counting on objects
+(rogue) that cannot be acted on by another thread.
+
+There is likely room for improvement here, but this should
+be a stable start.
+
+Signed-off-by: Robert Love <robert.w.love@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/libfc/fc_rport.c | 18 ++++++++++--------
+ include/scsi/libfc.h | 1 -
+ 2 files changed, 10 insertions(+), 9 deletions(-)
+
+
+--- a/drivers/scsi/libfc/fc_rport.c
++++ b/drivers/scsi/libfc/fc_rport.c
+@@ -93,6 +93,13 @@ static const char *fc_rport_state_names[
+ [RPORT_ST_LOGO] = "LOGO",
+ };
+
++static void fc_rport_rogue_destroy(struct device *dev)
++{
++ struct fc_rport *rport = dev_to_rport(dev);
++ FC_DEBUG_RPORT("Destroying rogue rport (%6x)\n", rport->port_id);
++ kfree(rport);
++}
++
+ struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
+ {
+ struct fc_rport *rport;
+@@ -115,7 +122,7 @@ struct fc_rport *fc_rport_rogue_create(s
+ * upstream so it fine that this is really ugly and hacky right now.
+ */
+ device_initialize(&rport->dev);
+- rport->dev.release = fc_rport_rogue_destroy; // XXX: bwalle
++ rport->dev.release = fc_rport_rogue_destroy;
+
+ mutex_init(&rdata->rp_mutex);
+ rdata->local_port = dp->lp;
+@@ -137,11 +144,6 @@ struct fc_rport *fc_rport_rogue_create(s
+ return rport;
+ }
+
+-void fc_rport_rogue_destroy(struct fc_rport *rport)
+-{
+- kfree(rport);
+-}
+-
+ /**
+ * fc_rport_state - return a string for the state the rport is in
+ * @rport: The rport whose state we want to get a string for
+@@ -263,7 +265,7 @@ static void fc_rport_work(struct work_st
+ "(%6x).\n", ids.port_id);
+ event = RPORT_EV_FAILED;
+ }
+- fc_rport_rogue_destroy(rport);
++ put_device(&rport->dev);
+ rport = new_rport;
+ rdata = new_rport->dd_data;
+ if (rport_ops->event_callback)
+@@ -276,7 +278,7 @@ static void fc_rport_work(struct work_st
+ if (rport_ops->event_callback)
+ rport_ops->event_callback(lport, rport, event);
+ if (trans_state == FC_PORTSTATE_ROGUE)
+- fc_rport_rogue_destroy(rport);
++ put_device(&rport->dev);
+ else
+ fc_remote_port_delete(rport);
+ } else
+--- a/include/scsi/libfc.h
++++ b/include/scsi/libfc.h
+@@ -169,7 +169,6 @@ struct fc_rport_libfc_priv {
+ (struct fc_rport_libfc_priv *)((void *)x + sizeof(struct fc_rport));
+
+ struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *);
+-void fc_rport_rogue_destroy(struct fc_rport *);
+
+ static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn)
+ {
--- /dev/null
+From: Vasu Dev <vasu.dev@intel.com>
+Subject: [FcOE] updated comment for order of em and ex locks
+References: bnc #459142
+
+The fc_exch is public but em_lock is static to fc_exch.c,
+so updated comment only in fc_exch.c on order of these locks.
+
+Also removed seq.f_ctl from comments since this field is
+already removed.
+
+Signed-off-by: Vasu Dev <vasu.dev@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/libfc/fc_exch.c | 5 ++++-
+ include/scsi/libfc.h | 5 ++---
+ 2 files changed, 6 insertions(+), 4 deletions(-)
+
+
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -68,7 +68,8 @@ static struct kmem_cache *fc_em_cachep;
+ */
+ struct fc_exch_mgr {
+ enum fc_class class; /* default class for sequences */
+- spinlock_t em_lock; /* exchange manager lock */
++ spinlock_t em_lock; /* exchange manager lock,
++ must be taken before ex_lock */
+ u16 last_xid; /* last allocated exchange ID */
+ u16 min_xid; /* min exchange ID */
+ u16 max_xid; /* max exchange ID */
+@@ -179,6 +180,8 @@ static struct fc_seq *fc_seq_start_next_
+ * sequence allocation and deallocation must be locked.
+ * - exchange refcnt can be done atomicly without locks.
+ * - sequence allocation must be locked by exch lock.
++ * - If the em_lock and ex_lock must be taken at the same time, then the
++ * em_lock must be taken before the ex_lock.
+ */
+
+ /*
+--- a/include/scsi/libfc.h
++++ b/include/scsi/libfc.h
+@@ -299,11 +299,10 @@ struct fc_seq {
+ /*
+ * Exchange.
+ *
+- * Locking notes: The ex_lock protects changes to the following fields:
+- * esb_stat, f_ctl, seq.ssb_stat, seq.f_ctl.
++ * Locking notes: The ex_lock protects following items:
++ * state, esb_stat, f_ctl, seq.ssb_stat
+ * seq_id
+ * sequence allocation
+- *
+ */
+ struct fc_exch {
+ struct fc_exch_mgr *em; /* exchange manager */
--- /dev/null
+From: Vasu Dev <vasu.dev@intel.com>
+Subject: [FcOE] updated libfc fcoe module ver to 1.0.6
+References: bnc #459142
+
+Signed-off-by: Vasu Dev <vasu.dev@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/fcoe/libfcoe.c | 2 +-
+ drivers/scsi/libfc/fc_fcp.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+
+--- a/drivers/scsi/fcoe/libfcoe.c
++++ b/drivers/scsi/fcoe/libfcoe.c
+@@ -59,7 +59,7 @@ static int debug_fcoe;
+ MODULE_AUTHOR("Open-FCoE.org");
+ MODULE_DESCRIPTION("FCoE");
+ MODULE_LICENSE("GPL");
+-MODULE_VERSION("1.0.5");
++MODULE_VERSION("1.0.6");
+
+ /* fcoe host list */
+ LIST_HEAD(fcoe_hostlist);
+--- a/drivers/scsi/libfc/fc_fcp.c
++++ b/drivers/scsi/libfc/fc_fcp.c
+@@ -42,7 +42,7 @@
+ MODULE_AUTHOR("Open-FCoE.org");
+ MODULE_DESCRIPTION("libfc");
+ MODULE_LICENSE("GPL");
+-MODULE_VERSION("1.0.5");
++MODULE_VERSION("1.0.6");
+
+ static int fc_fcp_debug;
+
--- /dev/null
+From: Robert Love <robert.w.love@intel.com>
+Subject: use an operations structure for rport callbacks
+References: bnc #459142
+
+This was called out for the disc callbacks in review
+comments when submitting to linux-scsi. It needed to be
+fixed for the rport callbacks too.
+
+This patch also fixes some spacing in the fc_rport
+structure definition as well as renaming the fc_lport_rport_event()
+function to fc_lport_rport_callback() to more clearly
+identify what it's doing.
+
+Signed-off-by: Robert Love <robert.w.love@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/libfc/fc_disc.c | 18 +++++++++-----
+ drivers/scsi/libfc/fc_lport.c | 16 ++++++++-----
+ drivers/scsi/libfc/fc_rport.c | 19 +++++++--------
+ include/scsi/libfc.h | 51 ++++++++++++++++++++++--------------------
+ 4 files changed, 57 insertions(+), 47 deletions(-)
+
+
+--- a/drivers/scsi/libfc/fc_disc.c
++++ b/drivers/scsi/libfc/fc_disc.c
+@@ -154,7 +154,7 @@ void fc_disc_stop_rports(struct fc_disc
+ }
+
+ /**
+- * fc_disc_rport_event - Event handler for rport events
++ * fc_disc_rport_callback - Event handler for rport events
+ * @lport: The lport which is receiving the event
+ * @rport: The rport which the event has occured on
+ * @event: The event that occured
+@@ -162,9 +162,9 @@ void fc_disc_stop_rports(struct fc_disc
+ * Locking Note: The rport lock should not be held when calling
+ * this function.
+ */
+-static void fc_disc_rport_event(struct fc_lport *lport,
+- struct fc_rport *rport,
+- enum fc_lport_event event)
++static void fc_disc_rport_callback(struct fc_lport *lport,
++ struct fc_rport *rport,
++ enum fc_rport_event event)
+ {
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+ struct fc_disc *disc = lport->disc;
+@@ -420,6 +420,10 @@ static void fc_disc_start(void (*disc_ca
+ mutex_unlock(&disc->disc_mutex);
+ }
+
++static struct fc_rport_operations fc_disc_rport_ops = {
++ .event_callback = fc_disc_rport_callback,
++};
++
+ /**
+ * fc_disc_new_target - Handle new target found by discovery
+ * @lport: FC local port
+@@ -475,7 +479,7 @@ static int fc_disc_new_target(struct fc_
+ }
+ if (rport) {
+ rp = rport->dd_data;
+- rp->event_callback = fc_disc_rport_event;
++ rp->ops = &fc_disc_rport_ops;
+ rp->rp_state = RPORT_ST_INIT;
+ lport->tt.rport_login(rport);
+ }
+@@ -658,7 +662,7 @@ static int fc_disc_gpn_ft_parse(struct f
+ rport = fc_rport_rogue_create(&dp);
+ if (rport) {
+ rdata = rport->dd_data;
+- rdata->event_callback = fc_disc_rport_event;
++ rdata->ops = &fc_disc_rport_ops;
+ rdata->local_port = lport;
+ lport->tt.rport_login(rport);
+ } else
+@@ -812,7 +816,7 @@ static void fc_disc_single(struct fc_dis
+ new_rport = fc_rport_rogue_create(dp);
+ if (new_rport) {
+ rdata = new_rport->dd_data;
+- rdata->event_callback = fc_disc_rport_event;
++ rdata->ops = &fc_disc_rport_ops;
+ kfree(dp);
+ lport->tt.rport_login(new_rport);
+ }
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -38,7 +38,7 @@
+ * hold the rport's mutex, but not the other way around.
+ *
+ * The only complication to this rule is the callbacks from the rport to
+- * the lport's event_callback function. When rports become READY they make
++ * the lport's rport_callback function. When rports become READY they make
+ * a callback to the lport so that it can track them. In the case of the
+ * directory server that callback might cause the lport to change its
+ * state, implying that the lport mutex would need to be held. This problem
+@@ -125,7 +125,7 @@ static int fc_frame_drop(struct fc_lport
+ }
+
+ /**
+- * fc_lport_rport_event - Event handler for rport events
++ * fc_lport_rport_callback - Event handler for rport events
+ * @lport: The lport which is receiving the event
+ * @rport: The rport which the event has occured on
+ * @event: The event that occured
+@@ -133,9 +133,9 @@ static int fc_frame_drop(struct fc_lport
+ * Locking Note: The rport lock should not be held when calling
+ * this function.
+ */
+-static void fc_lport_rport_event(struct fc_lport *lport,
+- struct fc_rport *rport,
+- enum fc_lport_event event)
++static void fc_lport_rport_callback(struct fc_lport *lport,
++ struct fc_rport *rport,
++ enum fc_rport_event event)
+ {
+ FC_DEBUG_LPORT("Received a %d event for port (%6x)\n", event,
+ rport->port_id);
+@@ -1265,6 +1265,10 @@ static void fc_lport_enter_rpn_id(struct
+ fc_lport_error(lport, fp);
+ }
+
++static struct fc_rport_operations fc_lport_rport_ops = {
++ .event_callback = fc_lport_rport_callback,
++};
++
+ /**
+ * fc_rport_enter_dns - Create a rport to the name server
+ * @lport: Fibre Channel local port requesting a rport for the name server
+@@ -1294,7 +1298,7 @@ static void fc_lport_enter_dns(struct fc
+ goto err;
+
+ rdata = rport->dd_data;
+- rdata->event_callback = fc_lport_rport_event;
++ rdata->ops = &fc_lport_rport_ops;
+ lport->tt.rport_login(rport);
+ return;
+
+--- a/drivers/scsi/libfc/fc_rport.c
++++ b/drivers/scsi/libfc/fc_rport.c
+@@ -125,7 +125,7 @@ struct fc_rport *fc_rport_rogue_create(s
+ rdata->rp_state = RPORT_ST_INIT;
+ rdata->event = RPORT_EV_NONE;
+ rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
+- rdata->event_callback = NULL;
++ rdata->ops = NULL;
+ rdata->e_d_tov = dp->lp->e_d_tov;
+ rdata->r_a_tov = dp->lp->r_a_tov;
+ INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
+@@ -216,16 +216,15 @@ static void fc_rport_work(struct work_st
+ {
+ struct fc_rport_libfc_priv *rdata =
+ container_of(work, struct fc_rport_libfc_priv, event_work);
+- enum fc_lport_event event;
++ enum fc_rport_event event;
+ enum fc_rport_trans_state trans_state;
+ struct fc_lport *lport = rdata->local_port;
+- void (*event_callback)(struct fc_lport *, struct fc_rport *,
+- enum fc_lport_event);
++ struct fc_rport_operations *rport_ops;
+ struct fc_rport *rport = PRIV_TO_RPORT(rdata);
+
+ mutex_lock(&rdata->rp_mutex);
+ event = rdata->event;
+- event_callback = rdata->event_callback;
++ rport_ops = rdata->ops;
+
+ if (event == RPORT_EV_CREATED) {
+ struct fc_rport *new_rport;
+@@ -250,7 +249,7 @@ static void fc_rport_work(struct work_st
+ new_rdata = new_rport->dd_data;
+ new_rdata->e_d_tov = rdata->e_d_tov;
+ new_rdata->r_a_tov = rdata->r_a_tov;
+- new_rdata->event_callback = rdata->event_callback;
++ new_rdata->ops = rdata->ops;
+ new_rdata->local_port = rdata->local_port;
+ new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
+ new_rdata->trans_state = FC_PORTSTATE_REAL;
+@@ -269,15 +268,15 @@ static void fc_rport_work(struct work_st
+ fc_rport_rogue_destroy(rport);
+ rport = new_rport;
+ rdata = new_rport->dd_data;
+- if (event_callback)
+- event_callback(lport, rport, event);
++ if (rport_ops->event_callback)
++ rport_ops->event_callback(lport, rport, event);
+ } else if ((event == RPORT_EV_FAILED) ||
+ (event == RPORT_EV_LOGO) ||
+ (event == RPORT_EV_STOP)) {
+ trans_state = rdata->trans_state;
+ mutex_unlock(&rdata->rp_mutex);
+- if (event_callback)
+- event_callback(lport, rport, event);
++ if (rport_ops->event_callback)
++ rport_ops->event_callback(lport, rport, event);
+ if (trans_state == FC_PORTSTATE_ROGUE)
+ fc_rport_rogue_destroy(rport);
+ else
+--- a/include/scsi/libfc.h
++++ b/include/scsi/libfc.h
+@@ -89,14 +89,6 @@ enum fc_disc_event {
+ DISC_EV_FAILED
+ };
+
+-enum fc_lport_event {
+- RPORT_EV_NONE = 0,
+- RPORT_EV_CREATED,
+- RPORT_EV_FAILED,
+- RPORT_EV_STOP,
+- RPORT_EV_LOGO
+-};
+-
+ enum fc_rport_state {
+ RPORT_ST_NONE = 0,
+ RPORT_ST_INIT, /* initialized */
+@@ -126,6 +118,19 @@ struct fc_disc_port {
+ struct work_struct rport_work;
+ };
+
++enum fc_rport_event {
++ RPORT_EV_NONE = 0,
++ RPORT_EV_CREATED,
++ RPORT_EV_FAILED,
++ RPORT_EV_STOP,
++ RPORT_EV_LOGO
++};
++
++struct fc_rport_operations {
++ void (*event_callback)(struct fc_lport *, struct fc_rport *,
++ enum fc_rport_event);
++};
++
+ /**
+ * struct fc_rport_libfc_priv - libfc internal information about a remote port
+ * @local_port: Fibre Channel host port instance
+@@ -140,24 +145,22 @@ struct fc_disc_port {
+ * @event_callback: Callback for rport READY, FAILED or LOGO
+ */
+ struct fc_rport_libfc_priv {
+- struct fc_lport *local_port;
+- enum fc_rport_state rp_state;
+- u16 flags;
++ struct fc_lport *local_port;
++ enum fc_rport_state rp_state;
++ u16 flags;
+ #define FC_RP_FLAGS_REC_SUPPORTED (1 << 0)
+ #define FC_RP_FLAGS_RETRY (1 << 1)
+- u16 max_seq;
+- unsigned int retries;
+- unsigned int e_d_tov;
+- unsigned int r_a_tov;
+- enum fc_rport_trans_state trans_state;
+- struct mutex rp_mutex;
+- struct delayed_work retry_work;
+- enum fc_lport_event event;
+- void (*event_callback)(struct fc_lport *,
+- struct fc_rport *,
+- enum fc_lport_event);
+- struct list_head peers;
+- struct work_struct event_work;
++ u16 max_seq;
++ unsigned int retries;
++ unsigned int e_d_tov;
++ unsigned int r_a_tov;
++ enum fc_rport_trans_state trans_state;
++ struct mutex rp_mutex;
++ struct delayed_work retry_work;
++ enum fc_rport_event event;
++ struct fc_rport_operations *ops;
++ struct list_head peers;
++ struct work_struct event_work;
+ };
+
+ #define PRIV_TO_RPORT(x) \
--- /dev/null
+From: Abhijeet Joglekar <abjoglek@cisco.com>
+Subject: libfc: when rport goes away (re-plogi), clean up exchanges to/from rport
+References: bnc #465596
+
+When a rport goes away, libFC does a plogi which will reset exchanges
+ at the rport. Clean exchanges at our end, both in transport and libFC.
+ If transport hooks into exch_mgr_reset, it will call back into
+ fc_exch_mgr_reset() to clean up libFC exchanges.
+
+Signed-off-by: Abhijeet Joglekar <abjoglek@cisco.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+
+ drivers/scsi/libfc/fc_rport.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+
+--- a/drivers/scsi/libfc/fc_rport.c
++++ b/drivers/scsi/libfc/fc_rport.c
+@@ -215,6 +215,7 @@ static void fc_rport_state_enter(struct
+
+ static void fc_rport_work(struct work_struct *work)
+ {
++ u32 port_id;
+ struct fc_rport_libfc_priv *rdata =
+ container_of(work, struct fc_rport_libfc_priv, event_work);
+ enum fc_rport_event event;
+@@ -280,8 +281,12 @@ static void fc_rport_work(struct work_st
+ rport_ops->event_callback(lport, rport, event);
+ if (trans_state == FC_PORTSTATE_ROGUE)
+ put_device(&rport->dev);
+- else
++ else {
++ port_id = rport->port_id;
+ fc_remote_port_delete(rport);
++ lport->tt.exch_mgr_reset(lport, 0, port_id);
++ lport->tt.exch_mgr_reset(lport, port_id, 0);
++ }
+ } else
+ mutex_unlock(&rdata->rp_mutex);
+ }
--- /dev/null
+From: Vasu Dev <vasu.dev@intel.com>
+Subject: libfc, fcoe: fixed locking issues with lport->lp_mutex around lport->link_status
+Patch-mainline: 6d235742e63f6b8912d8b200b75f9aa6d48f3e07
+References: bnc #468053
+
+The fcoe_xmit could call fc_pause in case the pending skb queue len is larger
+than FCOE_MAX_QUEUE_DEPTH, the fc_pause was trying to grab lport->lp_muex to
+change lport->link_status and that had these issues :-
+
+1. The fcoe_xmit was getting called with bh disabled, thus causing
+"BUG: scheduling while atomic" when grabbing lport->lp_muex with bh disabled.
+
+2. fc_linkup and fc_linkdown function calls lport_enter function with
+lport->lp_mutex held and these enter function in turn calls fcoe_xmit to send
+lport related FC frame, e.g. fc_linkup => fc_lport_enter_flogi to send flogi
+req. In this case grabbing the same lport->lp_mutex again in fc_puase from
+fcoe_xmit would cause deadlock.
+
+The lport->lp_mutex was used for setting FC_PAUSE in fcoe_xmit path but
+FC_PAUSE bit was not used anywhere beside just setting and clear this
+bit in lport->link_status, instead used a separate field qfull in fc_lport
+to eliminate need for lport->lp_mutex to track pending queue full condition
+and in turn avoid above described two locking issues.
+
+Also added check for lp->qfull in fc_fcp_lport_queue_ready to trigger
+SCSI_MLQUEUE_HOST_BUSY when lp->qfull is set to prevent more scsi-ml cmds
+while lp->qfull is set.
+
+This patch eliminated FC_LINK_UP and FC_PAUSE and instead used dedicated
+fields in fc_lport for this, this simplified all related conditional
+code.
+
+Also removed fc_pause and fc_unpause functions and instead used newly added
+lport->qfull directly in fcoe.
+
+Also fixed a circular locking in fc_exch_recv_abts.
+
+These issues were blocking large file copy to a 2TB lun.
+
+Signed-off-by: Vasu Dev <vasu.dev@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+ drivers/scsi/fcoe/fcoe_sw.c | 6 +++---
+ drivers/scsi/fcoe/libfcoe.c | 41 +++++++++++++++++------------------------
+ drivers/scsi/libfc/fc_exch.c | 2 +-
+ drivers/scsi/libfc/fc_fcp.c | 6 +++---
+ drivers/scsi/libfc/fc_lport.c | 38 +++++++-------------------------------
+ drivers/scsi/libfc/fc_rport.c | 2 +-
+ include/scsi/libfc.h | 12 ++----------
+ 7 files changed, 34 insertions(+), 73 deletions(-)
+
+--- a/drivers/scsi/fcoe/fcoe_sw.c
++++ b/drivers/scsi/fcoe/fcoe_sw.c
+@@ -116,7 +116,8 @@ static int fcoe_sw_lport_config(struct f
+ {
+ int i = 0;
+
+- lp->link_status = 0;
++ lp->link_up = 0;
++ lp->qfull = 0;
+ lp->max_retry_count = 3;
+ lp->e_d_tov = 2 * 1000; /* FC-FS default */
+ lp->r_a_tov = 2 * 2 * 1000;
+@@ -181,9 +182,8 @@ static int fcoe_sw_netdev_config(struct
+ if (fc_set_mfs(lp, mfs))
+ return -EINVAL;
+
+- lp->link_status = ~FC_PAUSE & ~FC_LINK_UP;
+ if (!fcoe_link_ok(lp))
+- lp->link_status |= FC_LINK_UP;
++ lp->link_up = 1;
+
+ /* offload features support */
+ if (fc->real_dev->features & NETIF_F_SG)
+--- a/drivers/scsi/fcoe/libfcoe.c
++++ b/drivers/scsi/fcoe/libfcoe.c
+@@ -505,7 +505,7 @@ int fcoe_xmit(struct fc_lport *lp, struc
+ if (rc) {
+ fcoe_insert_wait_queue(lp, skb);
+ if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+- fc_pause(lp);
++ lp->qfull = 1;
+ }
+
+ return 0;
+@@ -719,7 +719,7 @@ static void fcoe_recv_flogi(struct fcoe_
+ * fcoe_watchdog - fcoe timer callback
+ * @vp:
+ *
+- * This checks the pending queue length for fcoe and put fcoe to be paused state
++ * This checks the pending queue length for fcoe and set lport qfull
+ * if the FCOE_MAX_QUEUE_DEPTH is reached. This is done for all fc_lport on the
+ * fcoe_hostlist.
+ *
+@@ -729,17 +729,17 @@ void fcoe_watchdog(ulong vp)
+ {
+ struct fc_lport *lp;
+ struct fcoe_softc *fc;
+- int paused = 0;
++ int qfilled = 0;
+
+ read_lock(&fcoe_hostlist_lock);
+ list_for_each_entry(fc, &fcoe_hostlist, list) {
+ lp = fc->lp;
+ if (lp) {
+ if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+- paused = 1;
++ qfilled = 1;
+ if (fcoe_check_wait_queue(lp) < FCOE_MAX_QUEUE_DEPTH) {
+- if (paused)
+- fc_unpause(lp);
++ if (qfilled)
++ lp->qfull = 0;
+ }
+ }
+ }
+@@ -768,8 +768,7 @@ void fcoe_watchdog(ulong vp)
+ **/
+ static int fcoe_check_wait_queue(struct fc_lport *lp)
+ {
+- int rc, unpause = 0;
+- int paused = 0;
++ int rc;
+ struct sk_buff *skb;
+ struct fcoe_softc *fc;
+
+@@ -777,10 +776,10 @@ static int fcoe_check_wait_queue(struct
+ spin_lock_bh(&fc->fcoe_pending_queue.lock);
+
+ /*
+- * is this interface paused?
++ * if interface pending queue full then set qfull in lport.
+ */
+ if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+- paused = 1;
++ lp->qfull = 1;
+ if (fc->fcoe_pending_queue.qlen) {
+ while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
+ spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+@@ -792,11 +791,9 @@ static int fcoe_check_wait_queue(struct
+ spin_lock_bh(&fc->fcoe_pending_queue.lock);
+ }
+ if (fc->fcoe_pending_queue.qlen < FCOE_MAX_QUEUE_DEPTH)
+- unpause = 1;
++ lp->qfull = 0;
+ }
+ spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+- if ((unpause) && (paused))
+- fc_unpause(lp);
+ return fc->fcoe_pending_queue.qlen;
+ }
+
+@@ -874,7 +871,7 @@ static int fcoe_device_notification(stru
+ struct net_device *real_dev = ptr;
+ struct fcoe_softc *fc;
+ struct fcoe_dev_stats *stats;
+- u16 new_status;
++ u32 new_link_up;
+ u32 mfs;
+ int rc = NOTIFY_OK;
+
+@@ -891,17 +888,15 @@ static int fcoe_device_notification(stru
+ goto out;
+ }
+
+- new_status = lp->link_status;
++ new_link_up = lp->link_up;
+ switch (event) {
+ case NETDEV_DOWN:
+ case NETDEV_GOING_DOWN:
+- new_status &= ~FC_LINK_UP;
++ new_link_up = 0;
+ break;
+ case NETDEV_UP:
+ case NETDEV_CHANGE:
+- new_status &= ~FC_LINK_UP;
+- if (!fcoe_link_ok(lp))
+- new_status |= FC_LINK_UP;
++ new_link_up = !fcoe_link_ok(lp);
+ break;
+ case NETDEV_CHANGEMTU:
+ mfs = fc->real_dev->mtu -
+@@ -909,17 +904,15 @@ static int fcoe_device_notification(stru
+ sizeof(struct fcoe_crc_eof));
+ if (mfs >= FC_MIN_MAX_FRAME)
+ fc_set_mfs(lp, mfs);
+- new_status &= ~FC_LINK_UP;
+- if (!fcoe_link_ok(lp))
+- new_status |= FC_LINK_UP;
++ new_link_up = !fcoe_link_ok(lp);
+ break;
+ case NETDEV_REGISTER:
+ break;
+ default:
+ FC_DBG("unknown event %ld call", event);
+ }
+- if (lp->link_status != new_status) {
+- if ((new_status & FC_LINK_UP) == FC_LINK_UP)
++ if (lp->link_up != new_link_up) {
++ if (new_link_up)
+ fc_linkup(lp);
+ else {
+ stats = lp->dev_stats[smp_processor_id()];
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -1096,7 +1096,7 @@ static void fc_exch_recv_abts(struct fc_
+ ap->ba_high_seq_cnt = fh->fh_seq_cnt;
+ ap->ba_low_seq_cnt = htons(sp->cnt);
+ }
+- sp = fc_seq_start_next(sp);
++ sp = fc_seq_start_next_locked(sp);
+ spin_unlock_bh(&ep->ex_lock);
+ fc_seq_send_last(sp, fp, FC_RCTL_BA_ACC, FC_TYPE_BLS);
+ fc_frame_free(rx_fp);
+--- a/drivers/scsi/libfc/fc_fcp.c
++++ b/drivers/scsi/libfc/fc_fcp.c
+@@ -20,13 +20,13 @@
+ */
+
+ #include <linux/module.h>
++#include <linux/delay.h>
+ #include <linux/kernel.h>
+ #include <linux/types.h>
+ #include <linux/spinlock.h>
+ #include <linux/scatterlist.h>
+ #include <linux/err.h>
+ #include <linux/crc32.h>
+-#include <linux/delay.h>
+
+ #include <scsi/scsi_tcq.h>
+ #include <scsi/scsi.h>
+@@ -1622,7 +1622,7 @@ out:
+ static inline int fc_fcp_lport_queue_ready(struct fc_lport *lp)
+ {
+ /* lock ? */
+- return (lp->state == LPORT_ST_READY) && (lp->link_status & FC_LINK_UP);
++ return (lp->state == LPORT_ST_READY) && lp->link_up && !lp->qfull;
+ }
+
+ /**
+@@ -1891,7 +1891,7 @@ int fc_eh_abort(struct scsi_cmnd *sc_cmd
+ lp = shost_priv(sc_cmd->device->host);
+ if (lp->state != LPORT_ST_READY)
+ return rc;
+- else if (!(lp->link_status & FC_LINK_UP))
++ else if (!lp->link_up)
+ return rc;
+
+ spin_lock_irqsave(lp->host->host_lock, flags);
+--- a/drivers/scsi/libfc/fc_lport.c
++++ b/drivers/scsi/libfc/fc_lport.c
+@@ -250,7 +250,7 @@ void fc_get_host_port_state(struct Scsi_
+ {
+ struct fc_lport *lp = shost_priv(shost);
+
+- if ((lp->link_status & FC_LINK_UP) == FC_LINK_UP)
++ if (lp->link_up)
+ fc_host_port_state(shost) = FC_PORTSTATE_ONLINE;
+ else
+ fc_host_port_state(shost) = FC_PORTSTATE_OFFLINE;
+@@ -484,7 +484,7 @@ static void fc_lport_recv_rnid_req(struc
+ * @sp: current sequence in the ADISC exchange
+ * @fp: ADISC request frame
+ *
+- * Locking Note: The lport lock is exected to be held before calling
++ * Locking Note: The lport lock is expected to be held before calling
+ * this function.
+ */
+ static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
+@@ -577,8 +577,8 @@ void fc_linkup(struct fc_lport *lport)
+ fc_host_port_id(lport->host));
+
+ mutex_lock(&lport->lp_mutex);
+- if ((lport->link_status & FC_LINK_UP) != FC_LINK_UP) {
+- lport->link_status |= FC_LINK_UP;
++ if (!lport->link_up) {
++ lport->link_up = 1;
+
+ if (lport->state == LPORT_ST_RESET)
+ fc_lport_enter_flogi(lport);
+@@ -597,8 +597,8 @@ void fc_linkdown(struct fc_lport *lport)
+ FC_DEBUG_LPORT("Link is down for port (%6x)\n",
+ fc_host_port_id(lport->host));
+
+- if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP) {
+- lport->link_status &= ~(FC_LINK_UP);
++ if (lport->link_up) {
++ lport->link_up = 0;
+ fc_lport_enter_reset(lport);
+ lport->tt.fcp_cleanup(lport);
+ }
+@@ -607,30 +607,6 @@ void fc_linkdown(struct fc_lport *lport)
+ EXPORT_SYMBOL(fc_linkdown);
+
+ /**
+- * fc_pause - Pause the flow of frames
+- * @lport: The lport to be paused
+- */
+-void fc_pause(struct fc_lport *lport)
+-{
+- mutex_lock(&lport->lp_mutex);
+- lport->link_status |= FC_PAUSE;
+- mutex_unlock(&lport->lp_mutex);
+-}
+-EXPORT_SYMBOL(fc_pause);
+-
+-/**
+- * fc_unpause - Unpause the flow of frames
+- * @lport: The lport to be unpaused
+- */
+-void fc_unpause(struct fc_lport *lport)
+-{
+- mutex_lock(&lport->lp_mutex);
+- lport->link_status &= ~(FC_PAUSE);
+- mutex_unlock(&lport->lp_mutex);
+-}
+-EXPORT_SYMBOL(fc_unpause);
+-
+-/**
+ * fc_fabric_logoff - Logout of the fabric
+ * @lport: fc_lport pointer to logoff the fabric
+ *
+@@ -977,7 +953,7 @@ static void fc_lport_enter_reset(struct
+ fc_host_fabric_name(lport->host) = 0;
+ fc_host_port_id(lport->host) = 0;
+
+- if ((lport->link_status & FC_LINK_UP) == FC_LINK_UP)
++ if (lport->link_up)
+ fc_lport_enter_flogi(lport);
+ }
+
+--- a/include/scsi/libfc.h
++++ b/include/scsi/libfc.h
+@@ -68,9 +68,6 @@
+ /*
+ * FC HBA status
+ */
+-#define FC_PAUSE (1 << 1)
+-#define FC_LINK_UP (1 << 0)
+-
+ enum fc_lport_state {
+ LPORT_ST_NONE = 0,
+ LPORT_ST_FLOGI,
+@@ -603,7 +600,8 @@ struct fc_lport {
+
+ /* Operational Information */
+ struct libfc_function_template tt;
+- u16 link_status;
++ u8 link_up;
++ u8 qfull;
+ enum fc_lport_state state;
+ unsigned long boot_time;
+
+@@ -704,12 +702,6 @@ void fc_linkup(struct fc_lport *);
+ void fc_linkdown(struct fc_lport *);
+
+ /*
+- * Pause and unpause traffic.
+- */
+-void fc_pause(struct fc_lport *);
+-void fc_unpause(struct fc_lport *);
+-
+-/*
+ * Configure the local port.
+ */
+ int fc_lport_config(struct fc_lport *);
--- /dev/null
+From: Chris Leech <christopher.leech@intel.com>
+Subject: libfc: rport retry on LS_RJT from certain ELS
+Patch-mainline: 6147a1194ba86af4266f36c9522a7b0040af98fe
+References: bnc #468054
+
+This allows any rport ELS to retry on LS_RJT.
+
+The rport error handling would only retry on resource allocation failures
+and exchange timeouts. I have a target that will occasionally reject PLOGI
+when we do a quick LOGO/PLOGI. When a critical ELS was rejected, libfc would
+fail silently leaving the rport in a dead state.
+
+The retry count and delay are managed by fc_rport_error_retry. If the retry
+count is exceeded fc_rport_error will be called. When retrying is not the
+correct course of action, fc_rport_error can be called directly.
+
+Signed-off-by: Chris Leech <christopher.leech@intel.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+---
+ drivers/scsi/libfc/fc_exch.c | 2
+ drivers/scsi/libfc/fc_rport.c | 111 ++++++++++++++++++++++++------------------
+ include/scsi/fc/fc_fs.h | 5 +
+ 3 files changed, 69 insertions(+), 49 deletions(-)
+
+--- a/drivers/scsi/libfc/fc_exch.c
++++ b/drivers/scsi/libfc/fc_exch.c
+@@ -32,8 +32,6 @@
+ #include <scsi/libfc.h>
+ #include <scsi/fc_encode.h>
+
+-#define FC_DEF_R_A_TOV (10 * 1000) /* resource allocation timeout */
+-
+ /*
+ * fc_exch_debug can be set in debugger or at compile time to get more logs.
+ */
+--- a/drivers/scsi/libfc/fc_rport.c
++++ b/drivers/scsi/libfc/fc_rport.c
+@@ -81,6 +81,7 @@ static void fc_rport_recv_logo_req(struc
+ struct fc_seq *, struct fc_frame *);
+ static void fc_rport_timeout(struct work_struct *);
+ static void fc_rport_error(struct fc_rport *, struct fc_frame *);
++static void fc_rport_error_retry(struct fc_rport *, struct fc_frame *);
+ static void fc_rport_work(struct work_struct *);
+
+ static const char *fc_rport_state_names[] = {
+@@ -405,55 +406,71 @@ static void fc_rport_timeout(struct work
+ }
+
+ /**
+- * fc_rport_error - Handler for any errors
++ * fc_rport_error - Error handler, called once retries have been exhausted
+ * @rport: The fc_rport object
+ * @fp: The frame pointer
+ *
+- * If the error was caused by a resource allocation failure
+- * then wait for half a second and retry, otherwise retry
+- * immediately.
+- *
+ * Locking Note: The rport lock is expected to be held before
+ * calling this routine
+ */
+ static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
+ {
+ struct fc_rport_libfc_priv *rdata = rport->dd_data;
+- unsigned long delay = 0;
+
+ FC_DEBUG_RPORT("Error %ld in state %s, retries %d\n",
+ PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
+
+- if (!fp || PTR_ERR(fp) == -FC_EX_TIMEOUT) {
+- /*
+- * Memory allocation failure, or the exchange timed out.
+- * Retry after delay
+- */
+- if (rdata->retries < rdata->local_port->max_retry_count) {
+- rdata->retries++;
+- if (!fp)
+- delay = msecs_to_jiffies(500);
+- get_device(&rport->dev);
+- schedule_delayed_work(&rdata->retry_work, delay);
+- } else {
+- switch (rdata->rp_state) {
+- case RPORT_ST_PLOGI:
+- case RPORT_ST_PRLI:
+- case RPORT_ST_LOGO:
+- rdata->event = RPORT_EV_FAILED;
+- queue_work(rport_event_queue,
+- &rdata->event_work);
+- break;
+- case RPORT_ST_RTV:
+- fc_rport_enter_ready(rport);
+- break;
+- case RPORT_ST_NONE:
+- case RPORT_ST_READY:
+- case RPORT_ST_INIT:
+- break;
+- }
+- }
++ switch (rdata->rp_state) {
++ case RPORT_ST_PLOGI:
++ case RPORT_ST_PRLI:
++ case RPORT_ST_LOGO:
++ rdata->event = RPORT_EV_FAILED;
++ queue_work(rport_event_queue,
++ &rdata->event_work);
++ break;
++ case RPORT_ST_RTV:
++ fc_rport_enter_ready(rport);
++ break;
++ case RPORT_ST_NONE:
++ case RPORT_ST_READY:
++ case RPORT_ST_INIT:
++ break;
++ }
++}
++
++/**
++ * fc_rport_error_retry - Error handler when retries are desired
++ * @rport: The fc_rport object
++ * @fp: The frame pointer
++ *
++ * If the error was an exchange timeout retry immediately,
++ * otherwise wait for E_D_TOV.
++ *
++ * Locking Note: The rport lock is expected to be held before
++ * calling this routine
++ */
++static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp)
++{
++ struct fc_rport_libfc_priv *rdata = rport->dd_data;
++ unsigned long delay = FC_DEF_E_D_TOV;
++
++ /* make sure this isn't an FC_EX_CLOSED error, never retry those */
++ if (PTR_ERR(fp) == -FC_EX_CLOSED)
++ return fc_rport_error(rport, fp);
++
++ if (rdata->retries < rdata->local_port->max_retry_count) {
++ FC_DEBUG_RPORT("Error %ld in state %s, retrying\n",
++ PTR_ERR(fp), fc_rport_state(rport));
++ rdata->retries++;
++ /* no additional delay on exchange timeouts */
++ if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
++ delay = 0;
++ get_device(&rport->dev);
++ schedule_delayed_work(&rdata->retry_work, delay);
++ return;
+ }
++
++ return fc_rport_error(rport, fp);
+ }
+
+ /**
+@@ -490,7 +507,7 @@ static void fc_rport_plogi_resp(struct f
+ }
+
+ if (IS_ERR(fp)) {
+- fc_rport_error(rport, fp);
++ fc_rport_error_retry(rport, fp);
+ goto err;
+ }
+
+@@ -522,7 +539,7 @@ static void fc_rport_plogi_resp(struct f
+ else
+ fc_rport_enter_prli(rport);
+ } else
+- fc_rport_error(rport, fp);
++ fc_rport_error_retry(rport, fp);
+
+ out:
+ fc_frame_free(fp);
+@@ -552,14 +569,14 @@ static void fc_rport_enter_plogi(struct
+ rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
+ if (!fp) {
+- fc_rport_error(rport, fp);
++ fc_rport_error_retry(rport, fp);
+ return;
+ }
+ rdata->e_d_tov = lport->e_d_tov;
+
+ if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI,
+ fc_rport_plogi_resp, rport, lport->e_d_tov))
+- fc_rport_error(rport, fp);
++ fc_rport_error_retry(rport, fp);
+ else
+ get_device(&rport->dev);
+ }
+@@ -599,7 +616,7 @@ static void fc_rport_prli_resp(struct fc
+ }
+
+ if (IS_ERR(fp)) {
+- fc_rport_error(rport, fp);
++ fc_rport_error_retry(rport, fp);
+ goto err;
+ }
+
+@@ -657,7 +674,7 @@ static void fc_rport_logo_resp(struct fc
+ rport->port_id);
+
+ if (IS_ERR(fp)) {
+- fc_rport_error(rport, fp);
++ fc_rport_error_retry(rport, fp);
+ goto err;
+ }
+
+@@ -707,13 +724,13 @@ static void fc_rport_enter_prli(struct f
+
+ fp = fc_frame_alloc(lport, sizeof(*pp));
+ if (!fp) {
+- fc_rport_error(rport, fp);
++ fc_rport_error_retry(rport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI,
+ fc_rport_prli_resp, rport, lport->e_d_tov))
+- fc_rport_error(rport, fp);
++ fc_rport_error_retry(rport, fp);
+ else
+ get_device(&rport->dev);
+ }
+@@ -804,13 +821,13 @@ static void fc_rport_enter_rtv(struct fc
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
+ if (!fp) {
+- fc_rport_error(rport, fp);
++ fc_rport_error_retry(rport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV,
+ fc_rport_rtv_resp, rport, lport->e_d_tov))
+- fc_rport_error(rport, fp);
++ fc_rport_error_retry(rport, fp);
+ else
+ get_device(&rport->dev);
+ }
+@@ -835,13 +852,13 @@ static void fc_rport_enter_logo(struct f
+
+ fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
+ if (!fp) {
+- fc_rport_error(rport, fp);
++ fc_rport_error_retry(rport, fp);
+ return;
+ }
+
+ if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO,
+ fc_rport_logo_resp, rport, lport->e_d_tov))
+- fc_rport_error(rport, fp);
++ fc_rport_error_retry(rport, fp);
+ else
+ get_device(&rport->dev);
+ }
+--- a/include/scsi/fc/fc_fs.h
++++ b/include/scsi/fc/fc_fs.h
+@@ -337,4 +337,9 @@ enum fc_pf_rjt_reason {
+ FC_RJT_VENDOR = 0xff, /* vendor specific reject */
+ };
+
++/* default timeout values */
++
++#define FC_DEF_E_D_TOV 2000UL
++#define FC_DEF_R_A_TOV 10000UL
++
+ #endif /* _FC_FS_H_ */
--- /dev/null
+From: Danny Kukawka <dkukawka@suse.de>
+Subject: b43legacy: fix led naming
+
+Fixed led device naming for the b43legacy driver. Due to the
+documentation of the led subsystem/class the naming should be
+"devicename:colour:function" while not applying sections
+should be left blank.
+
+This should lead to e.g. "b43legacy-%s::rx" instead of
+"b43legacy-%s:rx".
+
+Signed-off-by: Danny Kukawka <dkukawka@suse.de>
+--
+ leds.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/wireless/b43legacy/leds.c b/drivers/net/wireless/b43legacy/leds.c
+index cacb786..cb4511f 100644
+--- a/drivers/net/wireless/b43legacy/leds.c
++++ b/drivers/net/wireless/b43legacy/leds.c
+@@ -146,12 +146,12 @@ static void b43legacy_map_led(struct b43legacy_wldev *dev,
+ case B43legacy_LED_TRANSFER:
+ case B43legacy_LED_APTRANSFER:
+ snprintf(name, sizeof(name),
+- "b43legacy-%s:tx", wiphy_name(hw->wiphy));
++ "b43legacy-%s::tx", wiphy_name(hw->wiphy));
+ b43legacy_register_led(dev, &dev->led_tx, name,
+ ieee80211_get_tx_led_name(hw),
+ led_index, activelow);
+ snprintf(name, sizeof(name),
+- "b43legacy-%s:rx", wiphy_name(hw->wiphy));
++ "b43legacy-%s::rx", wiphy_name(hw->wiphy));
+ b43legacy_register_led(dev, &dev->led_rx, name,
+ ieee80211_get_rx_led_name(hw),
+ led_index, activelow);
+@@ -161,7 +161,7 @@ static void b43legacy_map_led(struct b43legacy_wldev *dev,
+ case B43legacy_LED_RADIO_B:
+ case B43legacy_LED_MODE_BG:
+ snprintf(name, sizeof(name),
+- "b43legacy-%s:radio", wiphy_name(hw->wiphy));
++ "b43legacy-%s::radio", wiphy_name(hw->wiphy));
+ b43legacy_register_led(dev, &dev->led_radio, name,
+ b43legacy_rfkill_led_name(dev),
+ led_index, activelow);
+@@ -172,7 +172,7 @@ static void b43legacy_map_led(struct b43legacy_wldev *dev,
+ case B43legacy_LED_WEIRD:
+ case B43legacy_LED_ASSOC:
+ snprintf(name, sizeof(name),
+- "b43legacy-%s:assoc", wiphy_name(hw->wiphy));
++ "b43legacy-%s::assoc", wiphy_name(hw->wiphy));
+ b43legacy_register_led(dev, &dev->led_assoc, name,
+ ieee80211_get_assoc_led_name(hw),
+ led_index, activelow);
+
--- /dev/null
+From: Jan Kara <jack@suse.cz>
+Subject: ocfs2: Fix deadlock on umount
+References: bnc#531716
+Patch-mainline: 2.6.32
+
+In patch patches.fixes/ocfs2-push-out-dropping-of-dentry-lock-to-ocfs2_wq.patch
+we moved the dentry lock put process into ocfs2_wq. This causes problems during
+umount because ocfs2_wq can drop references to inodes while they are being
+invalidated by invalidate_inodes() causing all sorts of nasty things
+(invalidate_inodes() ending in an infinite loop, "Busy inodes after umount"
+messages etc.).
+
+We fix the problem by stopping ocfs2_wq from doing any further releasing of
+inode references on the superblock being unmounted, wait until it finishes
+the current round of releasing and finally cleaning up all the references in
+dentry_lock_list from ocfs2_put_super().
+
+The issue was tracked down by Tao Ma <tao.ma@oracle.com>.
+
+Signed-off-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Joel Becker <joel.becker@oracle.com>
+
+diff -rupX /home/jack/.kerndiffexclude linux-2.6.27-SLE11_BRANCH/fs/ocfs2/dcache.c linux-2.6.27-SLE11_BRANCH-1-dentry_lock_drop//fs/ocfs2/dcache.c
+--- linux-2.6.27-SLE11_BRANCH/fs/ocfs2/dcache.c 2009-08-18 15:39:32.000000000 +0200
++++ linux-2.6.27-SLE11_BRANCH-1-dentry_lock_drop//fs/ocfs2/dcache.c 2009-08-18 15:44:51.000000000 +0200
+@@ -295,22 +295,19 @@ out_attach:
+ return ret;
+ }
+
+-static DEFINE_SPINLOCK(dentry_list_lock);
++DEFINE_SPINLOCK(dentry_list_lock);
+
+ /* We limit the number of dentry locks to drop in one go. We have
+ * this limit so that we don't starve other users of ocfs2_wq. */
+ #define DL_INODE_DROP_COUNT 64
+
+ /* Drop inode references from dentry locks */
+-void ocfs2_drop_dl_inodes(struct work_struct *work)
++static void __ocfs2_drop_dl_inodes(struct ocfs2_super *osb, int drop_count)
+ {
+- struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
+- dentry_lock_work);
+ struct ocfs2_dentry_lock *dl;
+- int drop_count = DL_INODE_DROP_COUNT;
+
+ spin_lock(&dentry_list_lock);
+- while (osb->dentry_lock_list && drop_count--) {
++ while (osb->dentry_lock_list && (drop_count < 0 || drop_count--)) {
+ dl = osb->dentry_lock_list;
+ osb->dentry_lock_list = dl->dl_next;
+ spin_unlock(&dentry_list_lock);
+@@ -318,11 +315,32 @@ void ocfs2_drop_dl_inodes(struct work_st
+ kfree(dl);
+ spin_lock(&dentry_list_lock);
+ }
+- if (osb->dentry_lock_list)
++ spin_unlock(&dentry_list_lock);
++}
++
++void ocfs2_drop_dl_inodes(struct work_struct *work)
++{
++ struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
++ dentry_lock_work);
++
++ __ocfs2_drop_dl_inodes(osb, DL_INODE_DROP_COUNT);
++ /*
++ * Don't queue dropping if umount is in progress. We flush the
++ * list in ocfs2_dismount_volume
++ */
++ spin_lock(&dentry_list_lock);
++ if (osb->dentry_lock_list &&
++ !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED))
+ queue_work(ocfs2_wq, &osb->dentry_lock_work);
+ spin_unlock(&dentry_list_lock);
+ }
+
++/* Flush the whole work queue */
++void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb)
++{
++ __ocfs2_drop_dl_inodes(osb, -1);
++}
++
+ /*
+ * ocfs2_dentry_iput() and friends.
+ *
+@@ -353,7 +371,8 @@ static void ocfs2_drop_dentry_lock(struc
+ /* We leave dropping of inode reference to ocfs2_wq as that can
+ * possibly lead to inode deletion which gets tricky */
+ spin_lock(&dentry_list_lock);
+- if (!osb->dentry_lock_list)
++ if (!osb->dentry_lock_list &&
++ !ocfs2_test_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED))
+ queue_work(ocfs2_wq, &osb->dentry_lock_work);
+ dl->dl_next = osb->dentry_lock_list;
+ osb->dentry_lock_list = dl;
+diff -rupX /home/jack/.kerndiffexclude linux-2.6.27-SLE11_BRANCH/fs/ocfs2/dcache.h linux-2.6.27-SLE11_BRANCH-1-dentry_lock_drop//fs/ocfs2/dcache.h
+--- linux-2.6.27-SLE11_BRANCH/fs/ocfs2/dcache.h 2009-08-18 15:39:32.000000000 +0200
++++ linux-2.6.27-SLE11_BRANCH-1-dentry_lock_drop//fs/ocfs2/dcache.h 2009-08-18 15:44:51.000000000 +0200
+@@ -49,10 +49,13 @@ struct ocfs2_dentry_lock {
+ int ocfs2_dentry_attach_lock(struct dentry *dentry, struct inode *inode,
+ u64 parent_blkno);
+
++extern spinlock_t dentry_list_lock;
++
+ void ocfs2_dentry_lock_put(struct ocfs2_super *osb,
+ struct ocfs2_dentry_lock *dl);
+
+ void ocfs2_drop_dl_inodes(struct work_struct *work);
++void ocfs2_drop_all_dl_inodes(struct ocfs2_super *osb);
+
+ struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno,
+ int skip_unhashed);
+diff -rupX /home/jack/.kerndiffexclude linux-2.6.27-SLE11_BRANCH/fs/ocfs2/ocfs2.h linux-2.6.27-SLE11_BRANCH-1-dentry_lock_drop//fs/ocfs2/ocfs2.h
+--- linux-2.6.27-SLE11_BRANCH/fs/ocfs2/ocfs2.h 2009-08-18 15:39:32.000000000 +0200
++++ linux-2.6.27-SLE11_BRANCH-1-dentry_lock_drop//fs/ocfs2/ocfs2.h 2009-08-18 15:44:51.000000000 +0200
+@@ -201,10 +201,12 @@ enum ocfs2_mount_options
+ OCFS2_MOUNT_GRPQUOTA = 1 << 10, /* We support group quotas */
+ };
+
+-#define OCFS2_OSB_SOFT_RO 0x0001
+-#define OCFS2_OSB_HARD_RO 0x0002
+-#define OCFS2_OSB_ERROR_FS 0x0004
+-#define OCFS2_DEFAULT_ATIME_QUANTUM 60
++#define OCFS2_OSB_SOFT_RO 0x0001
++#define OCFS2_OSB_HARD_RO 0x0002
++#define OCFS2_OSB_ERROR_FS 0x0004
++#define OCFS2_OSB_DROP_DENTRY_LOCK_IMMED 0x0008
++
++#define OCFS2_DEFAULT_ATIME_QUANTUM 60
+
+ struct ocfs2_journal;
+ struct ocfs2_slot_info;
+@@ -400,6 +402,18 @@ static inline void ocfs2_set_osb_flag(st
+ spin_unlock(&osb->osb_lock);
+ }
+
++
++static inline unsigned long ocfs2_test_osb_flag(struct ocfs2_super *osb,
++ unsigned long flag)
++{
++ unsigned long ret;
++
++ spin_lock(&osb->osb_lock);
++ ret = osb->osb_flags & flag;
++ spin_unlock(&osb->osb_lock);
++ return ret;
++}
++
+ static inline void ocfs2_set_ro_flag(struct ocfs2_super *osb,
+ int hard)
+ {
+diff -rupX /home/jack/.kerndiffexclude linux-2.6.27-SLE11_BRANCH/fs/ocfs2/super.c linux-2.6.27-SLE11_BRANCH-1-dentry_lock_drop//fs/ocfs2/super.c
+--- linux-2.6.27-SLE11_BRANCH/fs/ocfs2/super.c 2009-08-18 15:39:33.000000000 +0200
++++ linux-2.6.27-SLE11_BRANCH-1-dentry_lock_drop//fs/ocfs2/super.c 2009-08-18 15:46:56.000000000 +0200
+@@ -1014,14 +1014,27 @@ static int ocfs2_get_sb(struct file_syst
+ mnt);
+ }
+
++static void ocfs2_kill_sb(struct super_block *sb)
++{
++ struct ocfs2_super *osb = OCFS2_SB(sb);
++
++ /* Prevent further queueing of inode drop events */
++ spin_lock(&dentry_list_lock);
++ ocfs2_set_osb_flag(osb, OCFS2_OSB_DROP_DENTRY_LOCK_IMMED);
++ spin_unlock(&dentry_list_lock);
++ /* Wait for work to finish and/or remove it */
++ cancel_work_sync(&osb->dentry_lock_work);
++
++ kill_block_super(sb);
++}
++
+ static struct file_system_type ocfs2_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "ocfs2",
+ .get_sb = ocfs2_get_sb, /* is this called when we mount
+ * the fs? */
+- .kill_sb = kill_block_super, /* set to the generic one
+- * right now, but do we
+- * need to change that? */
++ .kill_sb = ocfs2_kill_sb,
++
+ .fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE,
+ .next = NULL
+ };
+@@ -1621,6 +1634,12 @@ static void ocfs2_dismount_volume(struct
+ osb = OCFS2_SB(sb);
+ BUG_ON(!osb);
+
++ /*
++ * Flush inode dropping work queue so that deletes are
++ * performed while the filesystem is still working
++ */
++ ocfs2_drop_all_dl_inodes(osb);
++
+ ocfs2_disable_quotas(osb);
+
+ ocfs2_shutdown_local_alloc(osb);
--- /dev/null
+From: Jan Kara <jack@suse.cz>
+Subject: [PATCH] ext2: Do not update mtime of a move directory when parent has not changed
+References: bnc#493392
+Patch-mainline: 2.6.30
+
+If the parent of the moved directory has not changed, there's no real
+reason to change mtime. Specs doesn't seem to say anything about this
+particular case and e.g. ext3 does not change mtime in this case.
+So we become a tiny bit more consistent.
+
+Spotted by ronny.pretzsch@dfs.de, initial fix by Jörn Engel <joern@logfs.org>.
+
+Signed-off-by: Jan Kara <jack@suse.cz>
+---
+ fs/ext2/namei.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/fs/ext2/namei.c
++++ b/fs/ext2/namei.c
+@@ -355,7 +355,10 @@ static int ext2_rename (struct inode * o
+ inode_dec_link_count(old_inode);
+
+ if (dir_de) {
+- ext2_set_link(old_inode, dir_de, dir_page, new_dir);
++ /* Set link only if parent has changed and thus avoid setting
++ * of mtime of the moved directory on a pure rename. */
++ if (old_dir != new_dir)
++ ext2_set_link(old_inode, dir_de, dir_page, new_dir);
+ inode_dec_link_count(old_dir);
+ }
+ return 0;
--- /dev/null
+From: Jan Kara <jack@suse.cz>
+Subject: [PATCH] ext3: Avoid false EIO errors
+References: bnc#479730
+
+Sometimes block_write_begin() can map buffers in a page but later we fail to
+copy data into those buffers (because the source page has been paged out in the
+mean time). We then end up with !uptodate mapped buffers. To add a bit more to
+the confusion, block_write_end() does not commit any data (and thus does not
+any mark buffers as uptodate) if we didn't succeed with copying all the data.
+
+Commit f4fc66a894546bdc88a775d0e83ad20a65210bcb (ext3: convert to new aops)
+missed these cases and thus we were inserting non-uptodate buffers to
+transaction's list which confuses JBD code and it reports IO errors, aborts
+a transaction and generally makes users afraid about their data ;-P.
+
+This patch fixes the problem by reorganizing ext3_..._write_end() code to
+first call block_write_end() to mark buffers with valid data uptodate and
+after that we file only uptodate buffers to transaction's lists. Also
+fix a problem where we could leave blocks allocated beyond i_size (i_disksize
+in fact).
+
+Signed-off-by: Jan Kara <jack@suse.cz>
+
+---
+ fs/ext3/inode.c | 99 +++++++++++++++++++++++---------------------------------
+ 1 file changed, 42 insertions(+), 57 deletions(-)
+
+--- a/fs/ext3/inode.c
++++ b/fs/ext3/inode.c
+@@ -1195,6 +1195,18 @@ int ext3_journal_dirty_data(handle_t *ha
+ return err;
+ }
+
++/* For ordered writepage and write_end functions */
++static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
++{
++ /*
++ * Write could have mapped the buffer but it didn't copy the data in
++ * yet. So avoid filing such buffer into a transaction.
++ */
++ if (buffer_mapped(bh) && buffer_uptodate(bh))
++ return ext3_journal_dirty_data(handle, bh);
++ return 0;
++}
++
+ /* For write_end() in data=journal mode */
+ static int write_end_fn(handle_t *handle, struct buffer_head *bh)
+ {
+@@ -1205,26 +1217,29 @@ static int write_end_fn(handle_t *handle
+ }
+
+ /*
+- * Generic write_end handler for ordered and writeback ext3 journal modes.
+- * We can't use generic_write_end, because that unlocks the page and we need to
+- * unlock the page after ext3_journal_stop, but ext3_journal_stop must run
+- * after block_write_end.
++ * This is nasty and subtle: ext3_write_begin() could have allocated blocks
++ * for the whole page but later we failed to copy the data in. So the disk
++ * size we really have allocated is pos + len (block_write_end() has zeroed
++ * the freshly allocated buffers so we aren't going to write garbage). But we
++ * want to keep i_size at the place where data copying finished so that we
++ * don't confuse readers. The worst what can happen is that we expose a page
++ * of zeros at the end of file after a crash...
+ */
+-static int ext3_generic_write_end(struct file *file,
+- struct address_space *mapping,
+- loff_t pos, unsigned len, unsigned copied,
+- struct page *page, void *fsdata)
++static void update_file_sizes(struct inode *inode, loff_t pos, unsigned len,
++ unsigned copied)
+ {
+- struct inode *inode = file->f_mapping->host;
++ int mark_dirty = 0;
+
+- copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
+-
+- if (pos+copied > inode->i_size) {
+- i_size_write(inode, pos+copied);
+- mark_inode_dirty(inode);
++ if (pos + len > EXT3_I(inode)->i_disksize) {
++ mark_dirty = 1;
++ EXT3_I(inode)->i_disksize = pos + len;
+ }
+-
+- return copied;
++ if (pos + copied > inode->i_size) {
++ i_size_write(inode, pos + copied);
++ mark_dirty = 1;
++ }
++ if (mark_dirty)
++ mark_inode_dirty(inode);
+ }
+
+ /*
+@@ -1244,29 +1259,17 @@ static int ext3_ordered_write_end(struct
+ unsigned from, to;
+ int ret = 0, ret2;
+
++ copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
++
++ /* See comment at update_file_sizes() for why we check buffers upto
++ * from + len */
+ from = pos & (PAGE_CACHE_SIZE - 1);
+ to = from + len;
+-
+ ret = walk_page_buffers(handle, page_buffers(page),
+- from, to, NULL, ext3_journal_dirty_data);
++ from, to, NULL, journal_dirty_data_fn);
+
+- if (ret == 0) {
+- /*
+- * generic_write_end() will run mark_inode_dirty() if i_size
+- * changes. So let's piggyback the i_disksize mark_inode_dirty
+- * into that.
+- */
+- loff_t new_i_size;
+-
+- new_i_size = pos + copied;
+- if (new_i_size > EXT3_I(inode)->i_disksize)
+- EXT3_I(inode)->i_disksize = new_i_size;
+- ret2 = ext3_generic_write_end(file, mapping, pos, len, copied,
+- page, fsdata);
+- copied = ret2;
+- if (ret2 < 0)
+- ret = ret2;
+- }
++ if (ret == 0)
++ update_file_sizes(inode, pos, len, copied);
+ ret2 = ext3_journal_stop(handle);
+ if (!ret)
+ ret = ret2;
+@@ -1283,22 +1286,11 @@ static int ext3_writeback_write_end(stru
+ {
+ handle_t *handle = ext3_journal_current_handle();
+ struct inode *inode = file->f_mapping->host;
+- int ret = 0, ret2;
+- loff_t new_i_size;
++ int ret;
+
+- new_i_size = pos + copied;
+- if (new_i_size > EXT3_I(inode)->i_disksize)
+- EXT3_I(inode)->i_disksize = new_i_size;
+-
+- ret2 = ext3_generic_write_end(file, mapping, pos, len, copied,
+- page, fsdata);
+- copied = ret2;
+- if (ret2 < 0)
+- ret = ret2;
+-
+- ret2 = ext3_journal_stop(handle);
+- if (!ret)
+- ret = ret2;
++ copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
++ update_file_sizes(inode, pos, len, copied);
++ ret = ext3_journal_stop(handle);
+ unlock_page(page);
+ page_cache_release(page);
+
+@@ -1412,13 +1404,6 @@ static int bput_one(handle_t *handle, st
+ return 0;
+ }
+
+-static int journal_dirty_data_fn(handle_t *handle, struct buffer_head *bh)
+-{
+- if (buffer_mapped(bh))
+- return ext3_journal_dirty_data(handle, bh);
+- return 0;
+-}
+-
+ /*
+ * Note that we always start a transaction even if we're not journalling
+ * data. This is to preserve ordering: any hole instantiation within
--- /dev/null
+From: Jan Blunck <jblunck@suse.de>
+Subject: ia64-kvm: fix sparse warnings
+
+This patch fixes some sparse warning about dubious one-bit signed bitfield.
+
+Signed-off-by: Jan Blunck <jblunck@suse.de>
+---
+ arch/ia64/kvm/vti.h | 26 +++++++++++++-------------
+ 1 file changed, 13 insertions(+), 13 deletions(-)
+
+Index: b/arch/ia64/kvm/vti.h
+===================================================================
+--- a/arch/ia64/kvm/vti.h
++++ b/arch/ia64/kvm/vti.h
+@@ -83,13 +83,13 @@
+ union vac {
+ unsigned long value;
+ struct {
+- int a_int:1;
+- int a_from_int_cr:1;
+- int a_to_int_cr:1;
+- int a_from_psr:1;
+- int a_from_cpuid:1;
+- int a_cover:1;
+- int a_bsw:1;
++ unsigned int a_int:1;
++ unsigned int a_from_int_cr:1;
++ unsigned int a_to_int_cr:1;
++ unsigned int a_from_psr:1;
++ unsigned int a_from_cpuid:1;
++ unsigned int a_cover:1;
++ unsigned int a_bsw:1;
+ long reserved:57;
+ };
+ };
+@@ -97,12 +97,12 @@ union vac {
+ union vdc {
+ unsigned long value;
+ struct {
+- int d_vmsw:1;
+- int d_extint:1;
+- int d_ibr_dbr:1;
+- int d_pmc:1;
+- int d_to_pmd:1;
+- int d_itm:1;
++ unsigned int d_vmsw:1;
++ unsigned int d_extint:1;
++ unsigned int d_ibr_dbr:1;
++ unsigned int d_pmc:1;
++ unsigned int d_to_pmd:1;
++ unsigned int d_itm:1;
+ long reserved:58;
+ };
+ };
--- /dev/null
+From: Russ Anderson <rja@sgi.com>
+Subject: Add partition id, coherence id, and region size to UV
+References: bnc#442455
+
+Add partition id, coherence id, and region size to UV.
+
+The SGI xp drivers (drivers/misc/sgi-xp) are used on both
+sn (Itanium) and uv (Tukwilla). Using the same names
+(sn_partition_id, sn_coherency_id, sn_region_size)
+simplifies the driver code.
+
+
+Signed-off-by: Russ Anderson <rja@sgi.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+
+ arch/ia64/uv/kernel/setup.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/ia64/uv/kernel/setup.c
++++ b/arch/ia64/uv/kernel/setup.c
+@@ -19,6 +19,12 @@ EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info)
+
+ #ifdef CONFIG_IA64_SGI_UV
+ int sn_prom_type;
++long sn_partition_id;
++EXPORT_SYMBOL(sn_partition_id);
++long sn_coherency_id;
++EXPORT_SYMBOL_GPL(sn_coherency_id);
++long sn_region_size;
++EXPORT_SYMBOL(sn_region_size);
+ #endif
+
+ struct redir_addr {
--- /dev/null
+From: Bernhard Walle <bwalle@suse.de>
+Subject: Add UV watchlist support
+References: bnc#442455
+
+Add UV watchlist support.
+
+This is used by SGI xp drivers (drivers/misc/sgi-xp).
+
+Signed-off-by: Russ Anderson <rja@sgi.com>
+
+---
+
+ arch/ia64/include/asm/sn/sn_sal.h | 45 ++++++++++++++++++++++++++++++++++++++
+ 1 file changed, 45 insertions(+)
+
+Index: linux/arch/ia64/include/asm/sn/sn_sal.h
+===================================================================
+--- linux.orig/arch/ia64/include/asm/sn/sn_sal.h 2008-11-05 09:21:48.690243174 -0600
++++ linux/arch/ia64/include/asm/sn/sn_sal.h 2008-11-05 09:22:01.847928152 -0600
+@@ -90,6 +90,8 @@
+ #define SN_SAL_SET_CPU_NUMBER 0x02000068
+
+ #define SN_SAL_KERNEL_LAUNCH_EVENT 0x02000069
++#define SN_SAL_WATCHLIST_ALLOC 0x02000070
++#define SN_SAL_WATCHLIST_FREE 0x02000071
+
+ /*
+ * Service-specific constants
+@@ -1183,6 +1185,49 @@ ia64_sn_kernel_launch_event(void)
+ {
+ struct ia64_sal_retval rv;
+ SAL_CALL_NOLOCK(rv, SN_SAL_KERNEL_LAUNCH_EVENT, 0, 0, 0, 0, 0, 0, 0);
++ return rv.status;
++}
++
++union sn_watchlist_u {
++ u64 val;
++ struct {
++ u64 blade : 16,
++ size : 32,
++ filler : 16;
++ };
++};
++
++static inline int
++sn_mq_watchlist_alloc(int blade, void *mq, unsigned int mq_size,
++ unsigned long *intr_mmr_offset)
++{
++ struct ia64_sal_retval rv;
++ unsigned long addr;
++ union sn_watchlist_u size_blade;
++ int watchlist;
++
++ addr = (unsigned long)mq;
++ size_blade.size = mq_size;
++ size_blade.blade = blade;
++
++ /*
++ * bios returns watchlist number or negative error number.
++ */
++ ia64_sal_oemcall_nolock(&rv, SN_SAL_WATCHLIST_ALLOC, addr,
++ size_blade.val, (u64)intr_mmr_offset,
++ (u64)&watchlist, 0, 0, 0);
++ if (rv.status < 0)
++ return rv.status;
++
++ return watchlist;
++}
++
++static inline int
++sn_mq_watchlist_free(int blade, int watchlist_num)
++{
++ struct ia64_sal_retval rv;
++ ia64_sal_oemcall_nolock(&rv, SN_SAL_WATCHLIST_FREE, blade,
++ watchlist_num, 0, 0, 0, 0, 0);
+ return rv.status;
+ }
+ #endif /* _ASM_IA64_SN_SN_SAL_H */
--- /dev/null
+From: Danny Kukawka <dkukawka@suse.de>
+Subject: iwlwifi: another led naming fix
+
+Fixed led device naming for the iwlwifi (iwl-3945) driver. Due
+to the documentation of the led subsystem/class the naming should
+be "devicename:colour:function" while not applying sections
+should be left blank.
+
+This should lead to e.g. "iwl-%s::RX" instead of "iwl-%s:RX".
+
+Signed-off-by: Danny Kukawka <dkukawka@suse.de>
+Acked-by: Reinette Chatre <reinette.chatre@intel.com>
+--
+ iwl-led.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+index 4c63890..09f9350 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
++++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+@@ -317,7 +317,7 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
+
+ trigger = ieee80211_get_radio_led_name(priv->hw);
+ snprintf(priv->led[IWL_LED_TRG_RADIO].name,
+- sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s:radio",
++ sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s::radio",
+ wiphy_name(priv->hw->wiphy));
+
+ priv->led[IWL_LED_TRG_RADIO].led_on = iwl3945_led_on;
+@@ -333,7 +333,7 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
+
+ trigger = ieee80211_get_assoc_led_name(priv->hw);
+ snprintf(priv->led[IWL_LED_TRG_ASSOC].name,
+- sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s:assoc",
++ sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s::assoc",
+ wiphy_name(priv->hw->wiphy));
+
+ ret = iwl3945_led_register_led(priv,
+@@ -350,7 +350,7 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
+
+ trigger = ieee80211_get_rx_led_name(priv->hw);
+ snprintf(priv->led[IWL_LED_TRG_RX].name,
+- sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s:RX",
++ sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s::RX",
+ wiphy_name(priv->hw->wiphy));
+
+ ret = iwl3945_led_register_led(priv,
+@@ -366,7 +366,7 @@ int iwl3945_led_register(struct iwl3945_priv *priv)
+
+ trigger = ieee80211_get_tx_led_name(priv->hw);
+ snprintf(priv->led[IWL_LED_TRG_TX].name,
+- sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s:TX",
++ sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s::TX",
+ wiphy_name(priv->hw->wiphy));
+
+ ret = iwl3945_led_register_led(priv,
+
--- /dev/null
+From: Danny Kukawka <dkukawka@suse.de>
+Subject: iwlwifi: fix led naming
+
+Fixed led device naming for the iwl driver. Due to the
+documentation of the led subsystem/class the naming should be
+"devicename:colour:function" while not applying sections
+should be left blank.
+
+This should lead to e.g. "iwl-phy0::RX" instead of "iwl-phy0:RX".
+
+Signed-off-by: Danny Kukawka <dkukawka@suse.de>
+Acked-by: Reinette Chatre <reinette.chatre@intel.com>
+--
+ drivers/net/wireless/iwlwifi/iwl-led.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+--- a/drivers/net/wireless/iwlwifi/iwl-led.c
++++ b/drivers/net/wireless/iwlwifi/iwl-led.c
+@@ -353,7 +353,7 @@ int iwl_leds_register(struct iwl_priv *p
+
+ trigger = ieee80211_get_radio_led_name(priv->hw);
+ snprintf(priv->led[IWL_LED_TRG_RADIO].name,
+- sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s:radio",
++ sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s::radio",
+ wiphy_name(priv->hw->wiphy));
+
+ priv->led[IWL_LED_TRG_RADIO].led_on = iwl4965_led_on_reg;
+@@ -367,7 +367,7 @@ int iwl_leds_register(struct iwl_priv *p
+
+ trigger = ieee80211_get_assoc_led_name(priv->hw);
+ snprintf(priv->led[IWL_LED_TRG_ASSOC].name,
+- sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s:assoc",
++ sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s::assoc",
+ wiphy_name(priv->hw->wiphy));
+
+ ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_ASSOC],
+@@ -383,7 +383,7 @@ int iwl_leds_register(struct iwl_priv *p
+
+ trigger = ieee80211_get_rx_led_name(priv->hw);
+ snprintf(priv->led[IWL_LED_TRG_RX].name,
+- sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s:RX",
++ sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s::RX",
+ wiphy_name(priv->hw->wiphy));
+
+ ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RX],
+@@ -398,7 +398,7 @@ int iwl_leds_register(struct iwl_priv *p
+
+ trigger = ieee80211_get_tx_led_name(priv->hw);
+ snprintf(priv->led[IWL_LED_TRG_TX].name,
+- sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s:TX",
++ sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s::TX",
+ wiphy_name(priv->hw->wiphy));
+
+ ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_TX],
--- /dev/null
+From: Jay Lan <jlan@sgi.com>
+Subject: Fix CONFIG_KDB_KDUMP on xSeries
+Patch-mainline: not yet
+References: bnc#436454
+
+This patch fixes a problem that the capture kernel crashes with various
+backtraces after the machine has been crashed (both sysrq-trigger and panic()).
+Machines were that problem could reproduced at SUSE were molitor.suse.de and
+korner.suse.de.
+
+KDB was turned off in that scenarios.
+
+That patch succeeds in following scenarios:
+
+ a) kdb=0
+ modprobe crasher call_panic
+
+ b) kdb=1/0
+ echo c > /proc/sysrq-trigger
+
+ b) kdb=1
+ ESC KDB
+ kdb> kdump
+
+But it fails in:
+
+ kdb=1
+ modprobe crasher call_panic
+
+That has to be investigated. But I think that's unrelated to that patch,
+and it's no regression.
+
+
+Signed-off-by: Jay Lan <jlan@sgi.com>
+Signed-off-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ arch/x86/kdb/kdba_support.c | 4 +---
+ 1 file changed, 1 insertion(+), 3 deletions(-)
+
+--- a/arch/x86/kdb/kdba_support.c
++++ b/arch/x86/kdb/kdba_support.c
+@@ -35,8 +35,6 @@ void kdba_kdump_prepare(struct pt_regs *
+ if (regs == NULL)
+ regs = &r;
+
+- machine_crash_shutdown_begin();
+-
+ for (i = 1; i < NR_CPUS; ++i) {
+ if (!cpu_online(i))
+ continue;
+@@ -44,7 +42,7 @@ void kdba_kdump_prepare(struct pt_regs *
+ KDB_STATE_SET_CPU(KEXEC, i);
+ }
+
+- machine_crash_shutdown_end(regs);
++ machine_crash_shutdown(regs);
+ }
+
+ extern void halt_current_cpu(struct pt_regs *);
--- /dev/null
+From: Bernhard Walle <bwalle@suse.de>
+Subject: [PATCH] Fix NULL pointer dereference when regs == NULL
+References: bnc#439007
+
+This patch fixes following problem:
+
+When panic() in user context, for example by
+
+ # modprobe crasher call_panic
+
+then KDB crashed in kdba_getpc() once because regs was not checked for being
+NULL:
+
+ Entering kdb (current=0xffff880036c747c0, pid 4420) on processor 1 Oops: <NULL>
+ BUG: unable to handle kernel NULL pointer dereference at 0000000000000080
+ IP: [<ffffffff80415ee2>] kdba_getpc+0x0/0x8
+ PGD 379f4067 PUD 39997067 PMD 0
+ Oops: 0000 [1] SMP
+ last sysfs file: /sys/devices/pci0000:00/0000:00:1c.5/0000:06:00.0/irq
+ kdb: Debugger re-entered on cpu 1, new reason = 5
+ Not executing a kdb command
+ No longjmp available for recovery
+ Cannot recover, allowing event to proceed
+
+Even if that has ieen fixed, then kdba_dumpregs() crashed because
+the return value of kdba_getpc() was assumed to be non-NULL.
+
+This patch simply ports the error handling from its 32 bit counterpart
+implementation. After applying that fix, the test mentioned above succeeds:
+
+ Entering kdb (current=0xffff8800355fc480, pid 7564) on processor 1 Oops: <NULL>
+ due to oops @ 0x0
+ kdba_dumpregs: pt_regs not available, use bt* or pid to select a different task
+ [1]kdb>
+
+
+Signed-off-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ arch/x86/kdb/kdbasupport_64.c | 7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kdb/kdbasupport_64.c
++++ b/arch/x86/kdb/kdbasupport_64.c
+@@ -501,6 +501,11 @@ kdba_dumpregs(struct pt_regs *regs,
+ struct kdbregs *rlp;
+ kdb_machreg_t contents;
+
++ if (!regs) {
++ kdb_printf("%s: pt_regs not available, use bt* or pid to select a different task\n", __FUNCTION__);
++ return KDB_BADREG;
++ }
++
+ for (i=0, rlp=kdbreglist; i<nkdbreglist; i++,rlp++) {
+ kdb_printf("%8s = ", rlp->reg_name);
+ kdba_getregcontents(rlp->reg_name, regs, &contents);
+@@ -554,7 +559,7 @@ EXPORT_SYMBOL(kdba_dumpregs);
+ kdb_machreg_t
+ kdba_getpc(struct pt_regs *regs)
+ {
+- return regs->ip;
++ return regs ? regs->ip : 0;
+ }
+
+ int
--- /dev/null
+From: Jay Lan <jlan@sgi.com>
+Subject: [PATCH] Support '\n' in KDB
+Patch-mainline: 2.6.28-rc3-*-1 patchset
+References: bnc#442808
+
+Cliff tried to use KDB on medusa to verify a UV/KDB compatibility fix and found
+KDB needs to support '\n' in kdb_read() for medusa.
+
+I have integrated his patch to kdb mainline at 2.6.28-rc3-*-1 patchset.
+
+
+Signed-off-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ kdb/kdb_io.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kdb/kdb_io.c
++++ b/kdb/kdb_io.c
+@@ -246,7 +246,8 @@ kdb_read(char *buffer, size_t bufsize)
+ *cp = tmp;
+ }
+ break;
+- case 13: /* enter */
++ case 13: /* enter \r */
++ case 10: /* enter \n */
+ *lastchar++ = '\n';
+ *lastchar++ = '\0';
+ kdb_printf("\n");
--- /dev/null
+From e7706fc691513b0f06adb3de3d6ac04293180146 Mon Sep 17 00:00:00 2001
+From: Ken'ichi Ohmichi <oomichi@mxs.nes.nec.co.jp>
+Date: Mon, 20 Oct 2008 13:51:52 +0900
+Subject: [PATCH] x86, kdump: fix invalid access on i386 sparsemem
+References: bnc#440525
+
+Impact: fix kdump crash on 32-bit sparsemem kernels
+
+Since linux-2.6.27, kdump has failed on i386 sparsemem kernel.
+1st-kernel gets a panic just before switching to 2nd-kernel.
+
+The cause is that a kernel accesses invalid mem_section by
+page_to_pfn(image->swap_page) at machine_kexec().
+image->swap_page is allocated if kexec for hibernation, but
+it is not allocated if kdump. So if kdump, a kernel should
+not access the mem_section corresponding to image->swap_page.
+
+The attached patch fixes this invalid access.
+
+Signed-off-by: Ken'ichi Ohmichi <oomichi@mxs.nes.nec.co.jp>
+Cc: kexec-ml <kexec@lists.infradead.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
+index 0732adb..7a38574 100644
+--- a/arch/x86/kernel/machine_kexec_32.c
++++ b/arch/x86/kernel/machine_kexec_32.c
+@@ -162,7 +162,10 @@ void machine_kexec(struct kimage *image)
+ page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
+ page_list[PA_PTE_1] = __pa(kexec_pte1);
+ page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
+- page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page) << PAGE_SHIFT);
++
++ if (image->type == KEXEC_TYPE_DEFAULT)
++ page_list[PA_SWAP_PAGE] = (page_to_pfn(image->swap_page)
++ << PAGE_SHIFT);
+
+ /* The segment registers are funny things, they have both a
+ * visible and an invisible part. Whenever the visible part is
--- /dev/null
+From: Bernhard Walle <bwalle@suse.de>
+Subject: [PATCH] Fix performance regression on large IA64 systems
+References: bnc #469589
+
+This patch tries to address a performance regression discovered by SGI.
+
+Patch b60c1f6ffd88850079ae419aa933ab0eddbd5535 removes the call
+to note_interrupt() in __do_IRQ(). Patch d85a60d85ea5b7c597508c1510c88e657773d378
+adds it again. Because it's needed for irqpoll.
+
+That patch now introduces a new parameter 'only_fixup' for note_interrupt().
+This parameter determines two cases:
+
+ TRUE => The function should be only executed when irqfixup is set.
+ Either 'irqpoll' or 'irqfixup' directly set that.
+
+ FALSE => Just the behaviour as note_interrupt() always had.
+
+Now the patch converts all calls of note_interrupt() to only_fixup=FALSE,
+except the call that has been removed by b60c1f6ffd88850079ae419aa933ab0eddbd5535.
+So that call is always done, but the body is only executed when either
+'irqpoll' or 'irqfixup' are specified.
+
+This patch is not meant for mainline inclusion in the first run!
+
+
+Signed-off-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ arch/arm/mach-ns9xxx/irq.c | 2 +-
+ arch/powerpc/platforms/cell/interrupt.c | 2 +-
+ include/linux/irq.h | 2 +-
+ kernel/irq/chip.c | 10 +++++-----
+ kernel/irq/handle.c | 4 ++--
+ kernel/irq/spurious.c | 13 ++++++++++++-
+ 6 files changed, 22 insertions(+), 11 deletions(-)
+
+--- a/arch/arm/mach-ns9xxx/irq.c
++++ b/arch/arm/mach-ns9xxx/irq.c
+@@ -86,7 +86,7 @@ static void handle_prio_irq(unsigned int
+ /* XXX: There is no direct way to access noirqdebug, so check
+ * unconditionally for spurious irqs...
+ * Maybe this function should go to kernel/irq/chip.c? */
+- note_interrupt(irq, desc, action_ret);
++ note_interrupt(irq, desc, action_ret, 0);
+
+ spin_lock(&desc->lock);
+ desc->status &= ~IRQ_INPROGRESS;
+--- a/arch/powerpc/platforms/cell/interrupt.c
++++ b/arch/powerpc/platforms/cell/interrupt.c
+@@ -270,7 +270,7 @@ static void handle_iic_irq(unsigned int
+ spin_unlock(&desc->lock);
+ action_ret = handle_IRQ_event(irq, action);
+ if (!noirqdebug)
+- note_interrupt(irq, desc, action_ret);
++ note_interrupt(irq, desc, action_ret, 0);
+ spin_lock(&desc->lock);
+
+ } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
+--- a/include/linux/irq.h
++++ b/include/linux/irq.h
+@@ -296,7 +296,7 @@ static inline void generic_handle_irq(un
+
+ /* Handling of unhandled and spurious interrupts: */
+ extern void note_interrupt(unsigned int irq, struct irq_desc *desc,
+- int action_ret);
++ int action_ret, int only_fixup);
+
+ /* Resending of interrupts :*/
+ void check_irq_resend(struct irq_desc *desc, unsigned int irq);
+--- a/kernel/irq/chip.c
++++ b/kernel/irq/chip.c
+@@ -324,7 +324,7 @@ handle_simple_irq(unsigned int irq, stru
+
+ action_ret = handle_IRQ_event(irq, action);
+ if (!noirqdebug)
+- note_interrupt(irq, desc, action_ret);
++ note_interrupt(irq, desc, action_ret, 0);
+
+ spin_lock(&desc->lock);
+ desc->status &= ~IRQ_INPROGRESS;
+@@ -370,7 +370,7 @@ handle_level_irq(unsigned int irq, struc
+
+ action_ret = handle_IRQ_event(irq, action);
+ if (!noirqdebug)
+- note_interrupt(irq, desc, action_ret);
++ note_interrupt(irq, desc, action_ret, 0);
+
+ spin_lock(&desc->lock);
+ desc->status &= ~IRQ_INPROGRESS;
+@@ -423,7 +423,7 @@ handle_fasteoi_irq(unsigned int irq, str
+
+ action_ret = handle_IRQ_event(irq, action);
+ if (!noirqdebug)
+- note_interrupt(irq, desc, action_ret);
++ note_interrupt(irq, desc, action_ret, 0);
+
+ spin_lock(&desc->lock);
+ desc->status &= ~IRQ_INPROGRESS;
+@@ -503,7 +503,7 @@ handle_edge_irq(unsigned int irq, struct
+ spin_unlock(&desc->lock);
+ action_ret = handle_IRQ_event(irq, action);
+ if (!noirqdebug)
+- note_interrupt(irq, desc, action_ret);
++ note_interrupt(irq, desc, action_ret, 0);
+ spin_lock(&desc->lock);
+
+ } while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
+@@ -532,7 +532,7 @@ handle_percpu_irq(unsigned int irq, stru
+
+ action_ret = handle_IRQ_event(irq, desc->action);
+ if (!noirqdebug)
+- note_interrupt(irq, desc, action_ret);
++ note_interrupt(irq, desc, action_ret, 0);
+
+ if (desc->chip->eoi)
+ desc->chip->eoi(irq);
+--- a/kernel/irq/handle.c
++++ b/kernel/irq/handle.c
+@@ -187,7 +187,7 @@ unsigned int __do_IRQ(unsigned int irq)
+ if (likely(!(desc->status & IRQ_DISABLED))) {
+ action_ret = handle_IRQ_event(irq, desc->action);
+ if (!noirqdebug)
+- note_interrupt(irq, desc, action_ret);
++ note_interrupt(irq, desc, action_ret, 1);
+ }
+ desc->chip->end(irq);
+ return 1;
+@@ -241,7 +241,7 @@ unsigned int __do_IRQ(unsigned int irq)
+
+ action_ret = handle_IRQ_event(irq, action);
+ if (!noirqdebug)
+- note_interrupt(irq, desc, action_ret);
++ note_interrupt(irq, desc, action_ret, 0);
+
+ spin_lock(&desc->lock);
+ if (likely(!(desc->status & IRQ_PENDING)))
+--- a/kernel/irq/spurious.c
++++ b/kernel/irq/spurious.c
+@@ -171,8 +171,19 @@ static inline int try_misrouted_irq(unsi
+ }
+
+ void note_interrupt(unsigned int irq, struct irq_desc *desc,
+- irqreturn_t action_ret)
++ irqreturn_t action_ret, int only_fixup)
+ {
++ /*
++ * The parameter "only_fixup" means that the function should be only
++ * executed if this parameter is set to 1 and the function should
++ * not be executed if the parameter is 0.
++ *
++ * We need that because irqfixup is static to the function but
++ * this function is called from kernel/irq/handle.c.
++ */
++ if (only_fixup && irqfixup == 0)
++ return;
++
+ if (unlikely(action_ret != IRQ_HANDLED)) {
+ /*
+ * If we are seeing only the odd spurious IRQ caused by
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: "No acl" entry put in client-side acl cache instead of "not cached"
+References: 171059
+
+When the acl of a file is not cached and only the default acl of that
+file is requested, a NULL "no acl" entry is put in the client-side acl
+cache of nfs instead of ERR_PTR(-EAGAIN) "not cached".
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+Index: linux-2.6.16/fs/nfs/nfs3acl.c
+===================================================================
+--- linux-2.6.16.orig/fs/nfs/nfs3acl.c
++++ linux-2.6.16/fs/nfs/nfs3acl.c
+@@ -172,8 +172,10 @@ static void nfs3_cache_acls(struct inode
+ inode->i_ino, acl, dfacl);
+ spin_lock(&inode->i_lock);
+ __nfs3_forget_cached_acls(NFS_I(inode));
+- nfsi->acl_access = posix_acl_dup(acl);
+- nfsi->acl_default = posix_acl_dup(dfacl);
++ if (!IS_ERR(acl))
++ nfsi->acl_access = posix_acl_dup(acl);
++ if (!IS_ERR(dfacl))
++ nfsi->acl_default = posix_acl_dup(dfacl);
+ spin_unlock(&inode->i_lock);
+ }
+
+@@ -250,7 +252,9 @@ struct posix_acl *nfs3_proc_getacl(struc
+ res.acl_access = NULL;
+ }
+ }
+- nfs3_cache_acls(inode, res.acl_access, res.acl_default);
++ nfs3_cache_acls(inode,
++ (res.mask & NFS_ACL) ? res.acl_access : ERR_PTR(-EINVAL),
++ (res.mask & NFS_DFACL) ? res.acl_default : ERR_PTR(-EINVAL));
+
+ switch(type) {
+ case ACL_TYPE_ACCESS:
+@@ -321,6 +325,7 @@ static int nfs3_proc_setacls(struct inod
+ switch (status) {
+ case 0:
+ status = nfs_refresh_inode(inode, &fattr);
++ nfs3_cache_acls(inode, acl, dfacl);
+ break;
+ case -EPFNOSUPPORT:
+ case -EPROTONOSUPPORT:
--- /dev/null
+From: Frank Filz <ffilz@us.ibm.com>
+Subject: Fix issues with POSIX->NFSv4 ACL conversion
+References: bnc#535890
+
+1. GROUP@ Allow entry [accidentally has] NFS4_ACE_IDENTIFIER_GROUP, This
+appears to have been introduced by accident as part of commit bec50c4
+
+2. The group deny entries end up denying tcy even though tcy was just
+allowed by the allow entry. This appears to be due to:
+ ace->access_mask = mask_from_posix(deny, flags);
+instead of:
+ ace->access_mask = deny_mask_from_posix(deny, flags);
+
+Frank
+
+Signed-off-by: Frank Filz <ffilzlnx@...>
+Acked-by: J. Bruce Fields <bfields@fieldses.org>
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+diff -X ignore -ruNp linux-2.6.27.19-5/fs/nfsd/nfs4acl.c linux-2.6.27.19-5.5408/fs/nfsd/nfs4acl.c
+--- linux-2.6.27.19-5/fs/nfsd/nfs4acl.c 2008-10-09 15:13:53.000000000 -0700
++++ linux-2.6.27.19-5.5408/fs/nfsd/nfs4acl.c 2009-08-28 13:32:40.000000000 -0700
+@@ -321,7 +321,7 @@ _posix_to_nfsv4_one(struct posix_acl *pa
+ deny = ~pas.group & pas.other;
+ if (deny) {
+ ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE;
+- ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP;
++ ace->flag = eflag;
+ ace->access_mask = deny_mask_from_posix(deny, flags);
+ ace->whotype = NFS4_ACL_WHO_GROUP;
+ ace++;
+@@ -335,7 +335,7 @@ _posix_to_nfsv4_one(struct posix_acl *pa
+ if (deny) {
+ ace->type = NFS4_ACE_ACCESS_DENIED_ACE_TYPE;
+ ace->flag = eflag | NFS4_ACE_IDENTIFIER_GROUP;
+- ace->access_mask = mask_from_posix(deny, flags);
++ ace->access_mask = deny_mask_from_posix(deny, flags);
+ ace->whotype = NFS4_ACL_WHO_NAMED;
+ ace->who = pa->e_id;
+ ace++;
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: [PATCH] scsi: iterate over devices individually for /proc/scsi/scsi
+References: 263731
+Patch-mainline: Probably never, hch wants to kill /proc/scsi/scsi anyway.
+
+ On systems with very large numbers (> 1600 or so) of SCSI devices,
+ cat /proc/scsi/scsi ends up failing with -ENOMEM. This is due to
+ the show routine simply iterating over all of the devices with
+ bus_for_each_dev(), and trying to dump all of them into the buffer
+ at the same time. On my test system (using scsi_debug with 4064 devices),
+ the output ends up being ~ 632k, far more than kmalloc will typically allow.
+
+ This patch defines its own seq_file opreations to iterate over the scsi
+ devices.The result is that each show() operation only dumps ~ 180 bytes
+ into the buffer at a time so we don't run out of memory.
+
+ If the "Attached devices" header isn't required, we can dump the
+ sfile->private bit completely.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+
+ drivers/scsi/scsi_proc.c | 58 ++++++++++++++++++++++++++++++++++++++++++-----
+ 1 file changed, 52 insertions(+), 6 deletions(-)
+
+--- a/drivers/scsi/scsi_proc.c
++++ b/drivers/scsi/scsi_proc.c
+@@ -389,13 +389,59 @@ static ssize_t proc_scsi_write(struct fi
+ * @s: output goes here
+ * @p: not used
+ */
+-static int proc_scsi_show(struct seq_file *s, void *p)
++static int always_match(struct device *dev, void *data)
+ {
+- seq_printf(s, "Attached devices:\n");
+- bus_for_each_dev(&scsi_bus_type, NULL, s, proc_print_scsidevice);
+- return 0;
++ return 1;
+ }
+
++static inline struct device *next_scsi_device(struct device *start)
++{
++ struct device *next = bus_find_device(&scsi_bus_type, start, NULL,
++ always_match);
++ put_device(start);
++ return next;
++}
++
++static void *scsi_seq_start(struct seq_file *sfile, loff_t *pos)
++{
++ struct device *dev = NULL;
++ loff_t n = *pos;
++
++ while ((dev = next_scsi_device(dev))) {
++ if (!n--)
++ break;
++ sfile->private++;
++ }
++ return dev;
++}
++
++static void *scsi_seq_next(struct seq_file *sfile, void *v, loff_t *pos)
++{
++ (*pos)++;
++ sfile->private++;
++ return next_scsi_device(v);
++}
++
++static void scsi_seq_stop(struct seq_file *sfile, void *v)
++{
++ put_device(v);
++}
++
++static int scsi_seq_show(struct seq_file *sfile, void *dev)
++{
++ if (!sfile->private)
++ seq_puts(sfile, "Attached devices:\n");
++
++ return proc_print_scsidevice(dev, sfile);
++}
++
++static struct seq_operations scsi_seq_ops = {
++ .start = scsi_seq_start,
++ .next = scsi_seq_next,
++ .stop = scsi_seq_stop,
++ .show = scsi_seq_show
++};
++
+ /**
+ * proc_scsi_open - glue function
+ * @inode: not used
+@@ -409,7 +455,7 @@ static int proc_scsi_open(struct inode *
+ * We don't really need this for the write case but it doesn't
+ * harm either.
+ */
+- return single_open(file, proc_scsi_show, NULL);
++ return seq_open(file, &scsi_seq_ops);
+ }
+
+ static const struct file_operations proc_scsi_operations = {
+@@ -418,7 +464,7 @@ static const struct file_operations proc
+ .read = seq_read,
+ .write = proc_scsi_write,
+ .llseek = seq_lseek,
+- .release = single_release,
++ .release = seq_release,
+ };
+
+ /**
--- /dev/null
+From: Danny Kukawka <dkukawka@suse.de>
+Subject: rt2x00: fix led naming
+
+Fixed led device naming for the rt2x00 driver. Due to the
+documentation of the led subsystem/class the naming should be
+"devicename:colour:function" while not applying sections
+should be left blank.
+
+This should lead to e.g. "%s::radio" instead of "%s:radio".
+
+Signed-off-by: Danny Kukawka <dkukawka@suse.de>
+--
+ drivers/net/wireless/rt2x00/rt2x00leds.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/wireless/rt2x00/rt2x00leds.c
++++ b/drivers/net/wireless/rt2x00/rt2x00leds.c
+@@ -149,7 +149,7 @@ void rt2x00leds_register(struct rt2x00_d
+ rt2x00dev->ops->name, wiphy_name(rt2x00dev->hw->wiphy));
+
+ if (rt2x00dev->led_radio.flags & LED_INITIALIZED) {
+- snprintf(name, sizeof(name), "%s:radio", dev_name);
++ snprintf(name, sizeof(name), "%s::radio", dev_name);
+
+ retval = rt2x00leds_register_led(rt2x00dev,
+ &rt2x00dev->led_radio,
+@@ -159,7 +159,7 @@ void rt2x00leds_register(struct rt2x00_d
+ }
+
+ if (rt2x00dev->led_assoc.flags & LED_INITIALIZED) {
+- snprintf(name, sizeof(name), "%s:assoc", dev_name);
++ snprintf(name, sizeof(name), "%s::assoc", dev_name);
+
+ retval = rt2x00leds_register_led(rt2x00dev,
+ &rt2x00dev->led_assoc,
+@@ -169,7 +169,7 @@ void rt2x00leds_register(struct rt2x00_d
+ }
+
+ if (rt2x00dev->led_qual.flags & LED_INITIALIZED) {
+- snprintf(name, sizeof(name), "%s:quality", dev_name);
++ snprintf(name, sizeof(name), "%s::quality", dev_name);
+
+ retval = rt2x00leds_register_led(rt2x00dev,
+ &rt2x00dev->led_qual,
--- /dev/null
+From: Oliver Neukum <oneukum@suse.de>
+Subject: fix medium presence misdetection in usb storage device
+References: bnc#362850
+
+From reading the SCSI spec it seems that having the valid bit 0 (0x70
+checked in scsi_sense_valid) should does not invalidate the ASC or ASQ.
+[See page 37 of spc4r02.pdf]. It should only invalidate the INFORMATION
+field. Therefore remove the sense_valid check from the USB quirk.
+
+Signed-off-by: Brandon Philips <bphilips@suse.de>
+
+---
+ drivers/scsi/sd.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+--- a/drivers/scsi/sd.c
++++ b/drivers/scsi/sd.c
+@@ -1208,8 +1208,7 @@ sd_spinup_disk(struct scsi_disk *sdkp)
+ * Yes, this sense key/ASC combination shouldn't
+ * occur here. It's characteristic of these devices.
+ */
+- } else if (sense_valid &&
+- sshdr.sense_key == UNIT_ATTENTION &&
++ } else if (sshdr.sense_key == UNIT_ATTENTION &&
+ sshdr.asc == 0x28) {
+ if (!spintime) {
+ spintime_expire = jiffies + 5 * HZ;
--- /dev/null
+From: Russ Anderson <rja@sgi.com>
+Subject: Add UV bios call to change memory protections.
+References: bnc#442455
+
+
+Add UV bios call to change memory protections.
+
+Signed-off-by: Russ Anderson <rja@sgi.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ arch/x86/kernel/bios_uv.c | 8 ++++++++
+ include/asm-x86/uv/bios.h | 10 +++++++++-
+ 2 files changed, 17 insertions(+), 1 deletion(-)
+
+Index: linux/arch/x86/kernel/bios_uv.c
+===================================================================
+--- linux.orig/arch/x86/kernel/bios_uv.c 2008-11-05 11:12:16.101949483 -0600
++++ linux/arch/x86/kernel/bios_uv.c 2008-11-05 11:13:15.289411601 -0600
+@@ -134,6 +134,14 @@ uv_bios_mq_watchlist_free(int blade, int
+ }
+ EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_free);
+
++s64
++uv_bios_change_memprotect(u64 paddr, u64 len, enum uv_memprotect perms)
++{
++ return uv_bios_call_irqsave(UV_BIOS_MEMPROTECT, paddr, len,
++ perms, 0, 0);
++}
++EXPORT_SYMBOL_GPL(uv_bios_change_memprotect);
++
+ s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second)
+ {
+ return uv_bios_call(UV_BIOS_FREQ_BASE, clock_type,
+Index: linux/include/asm-x86/uv/bios.h
+===================================================================
+--- linux.orig/include/asm-x86/uv/bios.h 2008-11-05 11:12:16.117951501 -0600
++++ linux/include/asm-x86/uv/bios.h 2008-11-05 11:13:15.301413114 -0600
+@@ -34,7 +34,8 @@ enum uv_bios_cmd {
+ UV_BIOS_GET_SN_INFO,
+ UV_BIOS_FREQ_BASE,
+ UV_BIOS_WATCHLIST_ALLOC,
+- UV_BIOS_WATCHLIST_FREE
++ UV_BIOS_WATCHLIST_FREE,
++ UV_BIOS_MEMPROTECT
+ };
+
+ /*
+@@ -82,6 +83,12 @@ union uv_watchlist_u {
+ };
+ };
+
++enum uv_memprotect {
++ UV_MEMPROT_RESTRICT_ACCESS,
++ UV_MEMPROT_ALLOW_AMO,
++ UV_MEMPROT_ALLOW_RW
++};
++
+ /*
+ * bios calls have 6 parameters
+ */
+@@ -94,6 +101,7 @@ extern s64 uv_bios_freq_base(u64, u64 *)
+ extern int uv_bios_mq_watchlist_alloc(int, void *, unsigned int,
+ unsigned long *);
+ extern int uv_bios_mq_watchlist_free(int, int);
++extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
+
+ extern void uv_bios_init(void);
+
--- /dev/null
+From: Russ Anderson <rja@sgi.com>
+Subject: x86: Add UV partition call
+References: bnc#442455
+
+Add a bios call to return partitioning related info.
+
+Signed-off-by: Russ Anderson <rja@sgi.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ arch/x86/kernel/bios_uv.c | 44 ++++++++++++++++++++++++++++++++++-----
+ arch/x86/kernel/genx2apic_uv_x.c | 14 +++++++-----
+ include/asm-x86/uv/bios.h | 22 ++++++++++++++++---
+ 3 files changed, 66 insertions(+), 14 deletions(-)
+
+--- a/arch/x86/kernel/bios_uv.c
++++ b/arch/x86/kernel/bios_uv.c
+@@ -23,6 +23,7 @@
+ #include <asm/efi.h>
+ #include <linux/io.h>
+ #include <asm/uv/bios.h>
++#include <asm/uv/uv_hub.h>
+
+ struct uv_systab uv_systab;
+
+@@ -65,14 +66,47 @@ s64 uv_bios_call_reentrant(enum uv_bios_
+ return ret;
+ }
+
+-long
+-x86_bios_freq_base(unsigned long clock_type, unsigned long *ticks_per_second,
+- unsigned long *drift_info)
++
++long sn_partition_id;
++EXPORT_SYMBOL_GPL(sn_partition_id);
++long uv_coherency_id;
++EXPORT_SYMBOL_GPL(uv_coherency_id);
++long uv_region_size;
++EXPORT_SYMBOL_GPL(uv_region_size);
++int uv_type;
++
++
++s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
++ long *region)
++{
++ s64 ret;
++ u64 v0, v1;
++ union partition_info_u part;
++
++ ret = uv_bios_call_irqsave(UV_BIOS_GET_SN_INFO, fc,
++ (u64)(&v0), (u64)(&v1), 0, 0);
++ if (ret != BIOS_STATUS_SUCCESS)
++ return ret;
++
++ part.val = v0;
++ if (uvtype)
++ *uvtype = part.hub_version;
++ if (partid)
++ *partid = part.partition_id;
++ if (coher)
++ *coher = part.coherence_id;
++ if (region)
++ *region = part.region_size;
++ return ret;
++}
++
++
++s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second)
+ {
+ return uv_bios_call(UV_BIOS_FREQ_BASE, clock_type,
+- (u64)ticks_per_second, 0, 0, 0);
++ (u64)ticks_per_second, 0, 0, 0);
+ }
+-EXPORT_SYMBOL_GPL(x86_bios_freq_base);
++EXPORT_SYMBOL_GPL(uv_bios_freq_base);
+
+
+ #ifdef CONFIG_EFI
+--- a/arch/x86/kernel/genx2apic_uv_x.c
++++ b/arch/x86/kernel/genx2apic_uv_x.c
+@@ -353,12 +353,12 @@ static __init void map_mmioh_high(int ma
+
+ static __init void uv_rtc_init(void)
+ {
+- long status, ticks_per_sec, drift;
++ long status;
++ u64 ticks_per_sec;
+
+- status =
+- x86_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK, &ticks_per_sec,
+- &drift);
+- if (status != 0 || ticks_per_sec < 100000) {
++ status = uv_bios_freq_base(BIOS_FREQ_BASE_REALTIME_CLOCK,
++ &ticks_per_sec);
++ if (status != BIOS_STATUS_SUCCESS || ticks_per_sec < 100000) {
+ printk(KERN_WARNING
+ "unable to determine platform RTC clock frequency, "
+ "guessing.\n");
+@@ -523,6 +523,8 @@ void __init uv_system_init(void)
+ ~((1 << n_val) - 1)) << m_val;
+
+ uv_bios_init();
++ uv_bios_get_sn_info(0, &uv_type, &sn_partition_id,
++ &uv_coherency_id, &uv_region_size);
+ uv_rtc_init();
+
+ for_each_present_cpu(cpu) {
+@@ -544,7 +546,7 @@ void __init uv_system_init(void)
+ uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
+ uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
+ uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
+- uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */
++ uv_cpu_hub_info(cpu)->coherency_domain_number = uv_coherency_id;
+ uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu;
+ uv_node_to_blade[nid] = blade;
+ uv_cpu_to_blade[cpu] = blade;
+--- a/include/asm-x86/uv/bios.h
++++ b/include/asm-x86/uv/bios.h
+@@ -61,6 +61,16 @@ enum {
+ BIOS_FREQ_BASE_REALTIME_CLOCK = 2
+ };
+
++union partition_info_u {
++ u64 val;
++ struct {
++ u64 hub_version : 8,
++ partition_id : 16,
++ coherence_id : 16,
++ region_size : 24;
++ };
++};
++
+ /*
+ * bios calls have 6 parameters
+ */
+@@ -68,10 +78,16 @@ extern s64 uv_bios_call(enum uv_bios_cmd
+ extern s64 uv_bios_call_irqsave(enum uv_bios_cmd, u64, u64, u64, u64, u64);
+ extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
+
++extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
++extern s64 uv_bios_freq_base(u64, u64 *);
++
+ extern void uv_bios_init(void);
+
+-extern long
+-x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second,
+- unsigned long *drift_info);
++extern int uv_type;
++extern long sn_partition_id;
++extern long uv_coherency_id;
++extern long uv_region_size;
++#define partition_coherence_id() (uv_coherency_id)
++
+
+ #endif /* _ASM_X86_BIOS_H */
--- /dev/null
+From: Russ Anderson <rja@sgi.com>
+Subject: Add UV bios call to get the address of the reserved page.
+References: bnc#442455
+
+Add UV bios call to get the address of the reserved page.
+
+
+Signed-off-by: Russ Anderson <rja@sgi.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ arch/x86/kernel/bios_uv.c | 11 +++++++++++
+ include/asm-x86/uv/bios.h | 5 ++++-
+ 2 files changed, 15 insertions(+), 1 deletion(-)
+
+Index: linux/arch/x86/kernel/bios_uv.c
+===================================================================
+--- linux.orig/arch/x86/kernel/bios_uv.c 2008-11-05 11:13:15.289411601 -0600
++++ linux/arch/x86/kernel/bios_uv.c 2008-11-05 11:14:11.428488248 -0600
+@@ -142,6 +142,17 @@ uv_bios_change_memprotect(u64 paddr, u64
+ }
+ EXPORT_SYMBOL_GPL(uv_bios_change_memprotect);
+
++s64
++uv_bios_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len)
++{
++ s64 ret;
++
++ ret = uv_bios_call_irqsave(UV_BIOS_GET_PARTITION_ADDR, (u64)cookie,
++ (u64)addr, buf, (u64)len, 0);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(uv_bios_reserved_page_pa);
++
+ s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second)
+ {
+ return uv_bios_call(UV_BIOS_FREQ_BASE, clock_type,
+Index: linux/include/asm-x86/uv/bios.h
+===================================================================
+--- linux.orig/include/asm-x86/uv/bios.h 2008-11-05 11:13:15.301413114 -0600
++++ linux/include/asm-x86/uv/bios.h 2008-11-05 11:14:11.436489257 -0600
+@@ -35,13 +35,15 @@ enum uv_bios_cmd {
+ UV_BIOS_FREQ_BASE,
+ UV_BIOS_WATCHLIST_ALLOC,
+ UV_BIOS_WATCHLIST_FREE,
+- UV_BIOS_MEMPROTECT
++ UV_BIOS_MEMPROTECT,
++ UV_BIOS_GET_PARTITION_ADDR
+ };
+
+ /*
+ * Status values returned from a BIOS call.
+ */
+ enum {
++ BIOS_STATUS_MORE_PASSES = 1,
+ BIOS_STATUS_SUCCESS = 0,
+ BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
+ BIOS_STATUS_EINVAL = -EINVAL,
+@@ -102,6 +104,7 @@ extern int uv_bios_mq_watchlist_alloc(in
+ unsigned long *);
+ extern int uv_bios_mq_watchlist_free(int, int);
+ extern s64 uv_bios_change_memprotect(u64, u64, enum uv_memprotect);
++extern s64 uv_bios_reserved_page_pa(u64, u64 *, u64 *, u64 *);
+
+ extern void uv_bios_init(void);
+
--- /dev/null
+From: Russ Anderson <rja@sgi.com>
+Subject: Add UV bios calls to allocate and free watchlists.
+References: bnc#442455
+
+Add UV bios calls to allocate and free watchlists.
+
+
+Signed-off-by: Russ Anderson <rja@sgi.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ arch/x86/kernel/bios_uv.c | 33 +++++++++++++++++++++++++++++++++
+ include/asm-x86/uv/bios.h | 17 ++++++++++++++++-
+ 2 files changed, 49 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/bios_uv.c
++++ b/arch/x86/kernel/bios_uv.c
+@@ -100,6 +100,39 @@ s64 uv_bios_get_sn_info(int fc, int *uvt
+ return ret;
+ }
+
++int
++uv_bios_mq_watchlist_alloc(int blade, void *mq, unsigned int mq_size,
++ unsigned long *intr_mmr_offset)
++{
++ union uv_watchlist_u size_blade;
++ unsigned long addr;
++ u64 watchlist;
++ s64 ret;
++
++ addr = (unsigned long)mq;
++ size_blade.size = mq_size;
++ size_blade.blade = blade;
++
++ /*
++ * bios returns watchlist number or negative error number.
++ */
++ ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
++ size_blade.val, (u64)intr_mmr_offset,
++ (u64)&watchlist, 0);
++ if (ret < BIOS_STATUS_SUCCESS)
++ return ret;
++
++ return watchlist;
++}
++EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_alloc);
++
++int
++uv_bios_mq_watchlist_free(int blade, int watchlist_num)
++{
++ return (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_FREE,
++ blade, watchlist_num, 0, 0, 0);
++}
++EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_free);
+
+ s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second)
+ {
+--- a/include/asm-x86/uv/bios.h
++++ b/include/asm-x86/uv/bios.h
+@@ -32,7 +32,9 @@
+ enum uv_bios_cmd {
+ UV_BIOS_COMMON,
+ UV_BIOS_GET_SN_INFO,
+- UV_BIOS_FREQ_BASE
++ UV_BIOS_FREQ_BASE,
++ UV_BIOS_WATCHLIST_ALLOC,
++ UV_BIOS_WATCHLIST_FREE
+ };
+
+ /*
+@@ -71,6 +73,15 @@ union partition_info_u {
+ };
+ };
+
++union uv_watchlist_u {
++ u64 val;
++ struct {
++ u64 blade : 16,
++ size : 32,
++ filler : 16;
++ };
++};
++
+ /*
+ * bios calls have 6 parameters
+ */
+@@ -80,9 +91,13 @@ extern s64 uv_bios_call_reentrant(enum u
+
+ extern s64 uv_bios_get_sn_info(int, int *, long *, long *, long *);
+ extern s64 uv_bios_freq_base(u64, u64 *);
++extern int uv_bios_mq_watchlist_alloc(int, void *, unsigned int,
++ unsigned long *);
++extern int uv_bios_mq_watchlist_free(int, int);
+
+ extern void uv_bios_init(void);
+
++extern unsigned long sn_rtc_cycles_per_second;
+ extern int uv_type;
+ extern long sn_partition_id;
+ extern long sn_coherency_id;
--- /dev/null
+From: Russ Anderson <rja@sgi.com>
+Subject: x86: Add UV bios call infrastructure
+References: bnc#442455
+
+Add the EFI callback function and associated wrapper code.
+Initialize SAL system table entry info at boot time.
+
+Signed-off-by: Russ Anderson <rja@sgi.com>
+Signed-off-by: Paul Jackson <pj@sgi.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ arch/x86/kernel/bios_uv.c | 101 ++++++++++++++++++++++++++++++---------
+ arch/x86/kernel/genx2apic_uv_x.c | 1
+ include/asm-x86/efi.h | 14 +++++
+ include/asm-x86/uv/bios.h | 73 +++++++++++++++-------------
+ 4 files changed, 136 insertions(+), 53 deletions(-)
+
+--- a/arch/x86/kernel/bios_uv.c
++++ b/arch/x86/kernel/bios_uv.c
+@@ -1,8 +1,6 @@
+ /*
+ * BIOS run time interface routines.
+ *
+- * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
+- *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+@@ -16,33 +14,94 @@
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
++ * Copyright (c) Russ Anderson
+ */
+
++#include <linux/efi.h>
++#include <asm/efi.h>
++#include <linux/io.h>
+ #include <asm/uv/bios.h>
+
+-const char *
+-x86_bios_strerror(long status)
++struct uv_systab uv_systab;
++
++s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4, u64 a5)
+ {
+- const char *str;
+- switch (status) {
+- case 0: str = "Call completed without error"; break;
+- case -1: str = "Not implemented"; break;
+- case -2: str = "Invalid argument"; break;
+- case -3: str = "Call completed with error"; break;
+- default: str = "Unknown BIOS status code"; break;
+- }
+- return str;
++ struct uv_systab *tab = &uv_systab;
++
++ if (!tab->function)
++ /*
++ * BIOS does not support UV systab
++ */
++ return BIOS_STATUS_UNIMPLEMENTED;
++
++ return efi_call6((void *)__va(tab->function),
++ (u64)which, a1, a2, a3, a4, a5);
+ }
+
+-long
+-x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second,
+- unsigned long *drift_info)
++s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
++ u64 a4, u64 a5)
+ {
+- struct uv_bios_retval isrv;
++ unsigned long bios_flags;
++ s64 ret;
++
++ local_irq_save(bios_flags);
++ ret = uv_bios_call(which, a1, a2, a3, a4, a5);
++ local_irq_restore(bios_flags);
++
++ return ret;
++}
++
++s64 uv_bios_call_reentrant(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
++ u64 a4, u64 a5)
++{
++ s64 ret;
++
++ preempt_disable();
++ ret = uv_bios_call(which, a1, a2, a3, a4, a5);
++ preempt_enable();
+
+- BIOS_CALL(isrv, BIOS_FREQ_BASE, which, 0, 0, 0, 0, 0, 0);
+- *ticks_per_second = isrv.v0;
+- *drift_info = isrv.v1;
+- return isrv.status;
++ return ret;
++}
++
++long
++x86_bios_freq_base(unsigned long clock_type, unsigned long *ticks_per_second,
++ unsigned long *drift_info)
++{
++ return uv_bios_call(UV_BIOS_FREQ_BASE, clock_type,
++ (u64)ticks_per_second, 0, 0, 0);
+ }
+ EXPORT_SYMBOL_GPL(x86_bios_freq_base);
++
++
++#ifdef CONFIG_EFI
++void uv_bios_init(void)
++{
++ struct uv_systab *tab;
++
++ if ((efi.uv_systab == EFI_INVALID_TABLE_ADDR) ||
++ (efi.uv_systab == (unsigned long)NULL)) {
++ printk(KERN_CRIT "No EFI UV System Table.\n");
++ uv_systab.function = (unsigned long)NULL;
++ return;
++ }
++
++ tab = (struct uv_systab *)ioremap(efi.uv_systab,
++ sizeof(struct uv_systab));
++ if (strncmp(tab->signature, "UVST", 4) != 0)
++ printk(KERN_ERR "bad signature in UV system table!");
++
++ /*
++ * Copy table to permanent spot for later use.
++ */
++ memcpy(&uv_systab, tab, sizeof(struct uv_systab));
++ iounmap(tab);
++
++ printk(KERN_INFO "EFI UV System Table Revision %d\n", tab->revision);
++}
++#else /* !CONFIG_EFI */
++
++void uv_bios_init(void) { }
++#endif
++
+--- a/arch/x86/kernel/genx2apic_uv_x.c
++++ b/arch/x86/kernel/genx2apic_uv_x.c
+@@ -522,6 +522,7 @@ void __init uv_system_init(void)
+ gnode_upper = (((unsigned long)node_id.s.node_id) &
+ ~((1 << n_val) - 1)) << m_val;
+
++ uv_bios_init();
+ uv_rtc_init();
+
+ for_each_present_cpu(cpu) {
+--- a/include/asm-x86/efi.h
++++ b/include/asm-x86/efi.h
+@@ -49,6 +49,20 @@ extern u64 efi_call5(void *fp, u64 arg1,
+ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
+ u64 arg4, u64 arg5, u64 arg6);
+
++
++#ifndef CONFIG_EFI
++/*
++ * IF EFI is not configured, have the EFI calls return -ENOSYS.
++ */
++#define efi_call0(_f) (-ENOSYS)
++#define efi_call1(_f, _a1) (-ENOSYS)
++#define efi_call2(_f, _a1, _a2) (-ENOSYS)
++#define efi_call3(_f, _a1, _a2, _a3) (-ENOSYS)
++#define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS)
++#define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS)
++#define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS)
++#endif /* CONFIG_EFI */
++
+ #define efi_call_phys0(f) \
+ efi_call0((void *)(f))
+ #define efi_call_phys1(f, a1) \
+--- a/include/asm-x86/uv/bios.h
++++ b/include/asm-x86/uv/bios.h
+@@ -2,9 +2,7 @@
+ #define _ASM_X86_BIOS_H
+
+ /*
+- * BIOS layer definitions.
+- *
+- * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
++ * UV BIOS layer definitions.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+@@ -19,50 +17,61 @@
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
++ * Copyright (c) Russ Anderson
+ */
+
+ #include <linux/rtc.h>
+
+-#define BIOS_FREQ_BASE 0x01000001
++/*
++ * Values for the BIOS calls. It is passed as the first * argument in the
++ * BIOS call. Passing any other value in the first argument will result
++ * in a BIOS_STATUS_UNIMPLEMENTED return status.
++ */
++enum uv_bios_cmd {
++ UV_BIOS_COMMON,
++ UV_BIOS_GET_SN_INFO,
++ UV_BIOS_FREQ_BASE
++};
+
++/*
++ * Status values returned from a BIOS call.
++ */
+ enum {
+- BIOS_FREQ_BASE_PLATFORM = 0,
+- BIOS_FREQ_BASE_INTERVAL_TIMER = 1,
+- BIOS_FREQ_BASE_REALTIME_CLOCK = 2
++ BIOS_STATUS_SUCCESS = 0,
++ BIOS_STATUS_UNIMPLEMENTED = -ENOSYS,
++ BIOS_STATUS_EINVAL = -EINVAL,
++ BIOS_STATUS_UNAVAIL = -EBUSY
+ };
+
+-# define BIOS_CALL(result, a0, a1, a2, a3, a4, a5, a6, a7) \
+- do { \
+- /* XXX - the real call goes here */ \
+- result.status = BIOS_STATUS_UNIMPLEMENTED; \
+- isrv.v0 = 0; \
+- isrv.v1 = 0; \
+- } while (0)
++/*
++ * The UV system table describes specific firmware
++ * capabilities available to the Linux kernel at runtime.
++ */
++struct uv_systab {
++ char signature[4]; /* must be "UVST" */
++ u32 revision; /* distinguish different firmware revs */
++ u64 function; /* BIOS runtime callback function ptr */
++};
+
+ enum {
+- BIOS_STATUS_SUCCESS = 0,
+- BIOS_STATUS_UNIMPLEMENTED = -1,
+- BIOS_STATUS_EINVAL = -2,
+- BIOS_STATUS_ERROR = -3
++ BIOS_FREQ_BASE_PLATFORM = 0,
++ BIOS_FREQ_BASE_INTERVAL_TIMER = 1,
++ BIOS_FREQ_BASE_REALTIME_CLOCK = 2
+ };
+
+-struct uv_bios_retval {
+- /*
+- * A zero status value indicates call completed without error.
+- * A negative status value indicates reason of call failure.
+- * A positive status value indicates success but an
+- * informational value should be printed (e.g., "reboot for
+- * change to take effect").
+- */
+- s64 status;
+- u64 v0;
+- u64 v1;
+- u64 v2;
+-};
++/*
++ * bios calls have 6 parameters
++ */
++extern s64 uv_bios_call(enum uv_bios_cmd, u64, u64, u64, u64, u64);
++extern s64 uv_bios_call_irqsave(enum uv_bios_cmd, u64, u64, u64, u64, u64);
++extern s64 uv_bios_call_reentrant(enum uv_bios_cmd, u64, u64, u64, u64, u64);
++
++extern void uv_bios_init(void);
+
+ extern long
+ x86_bios_freq_base(unsigned long which, unsigned long *ticks_per_second,
+ unsigned long *drift_info);
+-extern const char *x86_bios_strerror(long status);
+
+ #endif /* _ASM_X86_BIOS_H */
--- /dev/null
+From: Russ Anderson <rja@sgi.com>
+Subject: x86: Add UV EFI table entry
+References: bnc#442455
+
+Add an EFI table entry for SGI UV system.
+Look for the entry in the EFI tables.
+
+Signed-off-by: Russ Anderson <rja@sgi.com>
+Signed-off-by: Paul Jackson <pj@sgi.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ arch/x86/kernel/efi.c | 4 ++++
+ include/linux/efi.h | 4 ++++
+ 2 files changed, 8 insertions(+)
+
+Index: linux/arch/x86/kernel/efi.c
+===================================================================
+--- linux.orig/arch/x86/kernel/efi.c 2008-10-15 09:56:13.000000000 -0500
++++ linux/arch/x86/kernel/efi.c 2008-10-15 09:56:23.000000000 -0500
+@@ -367,6 +367,10 @@ void __init efi_init(void)
+ efi.smbios = config_tables[i].table;
+ printk(" SMBIOS=0x%lx ", config_tables[i].table);
+ } else if (!efi_guidcmp(config_tables[i].guid,
++ UV_SYSTEM_TABLE_GUID)) {
++ efi.uv_systab = config_tables[i].table;
++ printk(" UVsystab=0x%lx ", config_tables[i].table);
++ } else if (!efi_guidcmp(config_tables[i].guid,
+ HCDP_TABLE_GUID)) {
+ efi.hcdp = config_tables[i].table;
+ printk(" HCDP=0x%lx ", config_tables[i].table);
+Index: linux/include/linux/efi.h
+===================================================================
+--- linux.orig/include/linux/efi.h 2008-10-15 09:56:13.000000000 -0500
++++ linux/include/linux/efi.h 2008-10-15 09:56:23.000000000 -0500
+@@ -208,6 +208,9 @@ typedef efi_status_t efi_set_virtual_add
+ #define EFI_GLOBAL_VARIABLE_GUID \
+ EFI_GUID( 0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c )
+
++#define UV_SYSTEM_TABLE_GUID \
++ EFI_GUID( 0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93 )
++
+ typedef struct {
+ efi_guid_t guid;
+ unsigned long table;
+@@ -255,6 +258,7 @@ extern struct efi {
+ unsigned long boot_info; /* boot info table */
+ unsigned long hcdp; /* HCDP table */
+ unsigned long uga; /* UGA table */
++ unsigned long uv_systab; /* UV system table */
+ efi_get_time_t *get_time;
+ efi_set_time_t *set_time;
+ efi_get_wakeup_time_t *get_wakeup_time;
--- /dev/null
+From: Russ Anderson <rja@sgi.com>
+Subject: Use consistent names for region size and conherence id on x86 and ia64.
+References: bnc#442455
+
+The SGI xp drivers are used on both ia64 and x86. Using the same
+names (sn_coherency_id, sn_region_size) simplies the driver code.
+
+
+Signed-off-by: Russ Anderson <rja@sgi.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+
+ arch/x86/kernel/bios_uv.c | 8 ++++----
+ arch/x86/kernel/genx2apic_uv_x.c | 4 ++--
+ include/asm-x86/uv/bios.h | 6 +++---
+ 3 files changed, 9 insertions(+), 9 deletions(-)
+
+--- a/arch/x86/kernel/bios_uv.c
++++ b/arch/x86/kernel/bios_uv.c
+@@ -69,10 +69,10 @@ s64 uv_bios_call_reentrant(enum uv_bios_
+
+ long sn_partition_id;
+ EXPORT_SYMBOL_GPL(sn_partition_id);
+-long uv_coherency_id;
+-EXPORT_SYMBOL_GPL(uv_coherency_id);
+-long uv_region_size;
+-EXPORT_SYMBOL_GPL(uv_region_size);
++long sn_coherency_id;
++EXPORT_SYMBOL_GPL(sn_coherency_id);
++long sn_region_size;
++EXPORT_SYMBOL_GPL(sn_region_size);
+ int uv_type;
+
+
+--- a/arch/x86/kernel/genx2apic_uv_x.c
++++ b/arch/x86/kernel/genx2apic_uv_x.c
+@@ -524,7 +524,7 @@ void __init uv_system_init(void)
+
+ uv_bios_init();
+ uv_bios_get_sn_info(0, &uv_type, &sn_partition_id,
+- &uv_coherency_id, &uv_region_size);
++ &sn_coherency_id, &sn_region_size);
+ uv_rtc_init();
+
+ for_each_present_cpu(cpu) {
+@@ -546,7 +546,7 @@ void __init uv_system_init(void)
+ uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1;
+ uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper;
+ uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base;
+- uv_cpu_hub_info(cpu)->coherency_domain_number = uv_coherency_id;
++ uv_cpu_hub_info(cpu)->coherency_domain_number = sn_coherency_id;
+ uv_cpu_hub_info(cpu)->scir.offset = SCIR_LOCAL_MMR_BASE + lcpu;
+ uv_node_to_blade[nid] = blade;
+ uv_cpu_to_blade[cpu] = blade;
+--- a/include/asm-x86/uv/bios.h
++++ b/include/asm-x86/uv/bios.h
+@@ -85,9 +85,9 @@ extern void uv_bios_init(void);
+
+ extern int uv_type;
+ extern long sn_partition_id;
+-extern long uv_coherency_id;
+-extern long uv_region_size;
+-#define partition_coherence_id() (uv_coherency_id)
++extern long sn_coherency_id;
++extern long sn_region_size;
++#define partition_coherence_id() (sn_coherency_id)
+
+ extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
+
--- /dev/null
+From: Russ Anderson <rja@sgi.com>
+Subject: x86: Add UV sysfs entries
+References: bnc#442455
+
+Create /sys/firmware/sgi_uv sysfs entries for partition_id and coherence_id.
+
+Signed-off-by: Russ Anderson <rja@sgi.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ arch/x86/kernel/Makefile | 1
+ arch/x86/kernel/uv_sysfs.c | 72 +++++++++++++++++++++++++++++++++++++++++++++
+ include/asm-x86/uv/bios.h | 1
+ 3 files changed, 74 insertions(+)
+
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -103,6 +103,7 @@ obj-$(CONFIG_OLPC) += olpc.o
+ # 64 bit specific files
+ ifeq ($(CONFIG_X86_64),y)
+ obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o
++ obj-y += uv_sysfs.o
+ obj-y += genx2apic_cluster.o
+ obj-y += genx2apic_phys.o
+ obj-y += bios_uv.o
+--- /dev/null
++++ b/arch/x86/kernel/uv_sysfs.c
+@@ -0,0 +1,72 @@
++/*
++ * This file supports the /sys/firmware/sgi_uv interfaces for SGI UV.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (c) 2008 Silicon Graphics, Inc. All Rights Reserved.
++ * Copyright (c) Russ Anderson
++ */
++
++#include <linux/sysdev.h>
++#include <asm/uv/bios.h>
++
++struct kobject *sgi_uv_kobj;
++
++static ssize_t partition_id_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "%ld\n", sn_partition_id);
++}
++
++static ssize_t coherence_id_show(struct kobject *kobj,
++ struct kobj_attribute *attr, char *buf)
++{
++ return snprintf(buf, PAGE_SIZE, "%ld\n", partition_coherence_id());
++}
++
++static struct kobj_attribute partition_id_attr =
++ __ATTR(partition_id, S_IRUGO, partition_id_show, NULL);
++
++static struct kobj_attribute coherence_id_attr =
++ __ATTR(coherence_id, S_IRUGO, coherence_id_show, NULL);
++
++
++static int __init sgi_uv_sysfs_init(void)
++{
++ unsigned long ret;
++
++ if (!sgi_uv_kobj)
++ sgi_uv_kobj = kobject_create_and_add("sgi_uv", firmware_kobj);
++ if (!sgi_uv_kobj) {
++ printk(KERN_WARNING "kobject_create_and_add sgi_uv failed \n");
++ return -EINVAL;
++ }
++
++ ret = sysfs_create_file(sgi_uv_kobj, &partition_id_attr.attr);
++ if (ret) {
++ printk(KERN_WARNING "sysfs_create_file partition_id failed \n");
++ return ret;
++ }
++
++ ret = sysfs_create_file(sgi_uv_kobj, &coherence_id_attr.attr);
++ if (ret) {
++ printk(KERN_WARNING "sysfs_create_file coherence_id failed \n");
++ return ret;
++ }
++
++ return 0;
++}
++
++device_initcall(sgi_uv_sysfs_init);
+--- a/include/asm-x86/uv/bios.h
++++ b/include/asm-x86/uv/bios.h
+@@ -89,5 +89,6 @@ extern long uv_coherency_id;
+ extern long uv_region_size;
+ #define partition_coherence_id() (uv_coherency_id)
+
++extern struct kobject *sgi_uv_kobj; /* /sys/firmware/sgi_uv */
+
+ #endif /* _ASM_X86_BIOS_H */
--- /dev/null
+From: Dean Nelson <dcn@sgi.com>
+Subject: Define xp_expand_memprotect() and xp_restrict_memprotect()
+References: bnc#442461
+
+Define xp_expand_memprotect() and xp_restrict_memprotect() so they can be
+tailered to the hardware they are run on.
+
+Signed-off-by: Dean Nelson <dcn@sgi.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+
+ drivers/misc/sgi-xp/xp.h | 7 +++-
+ drivers/misc/sgi-xp/xp_main.c | 7 ++++
+ drivers/misc/sgi-xp/xp_sn2.c | 34 +++++++++++++++++++++
+ drivers/misc/sgi-xp/xp_uv.c | 66 ++++++++++++++++++++++++++++++++++++++++++
+ drivers/misc/sgi-xp/xpc_sn2.c | 15 +++------
+ 5 files changed, 117 insertions(+), 12 deletions(-)
+
+--- a/drivers/misc/sgi-xp/xp.h
++++ b/drivers/misc/sgi-xp/xp.h
+@@ -190,9 +190,10 @@ enum xp_retval {
+ xpGruSendMqError, /* 59: gru send message queue related error */
+
+ xpBadChannelNumber, /* 60: invalid channel number */
+- xpBadMsgType, /* 60: invalid message type */
++ xpBadMsgType, /* 61: invalid message type */
++ xpBiosError, /* 62: BIOS error */
+
+- xpUnknownReason /* 61: unknown reason - must be last in enum */
++ xpUnknownReason /* 63: unknown reason - must be last in enum */
+ };
+
+ /*
+@@ -341,6 +342,8 @@ extern unsigned long (*xp_pa) (void *);
+ extern enum xp_retval (*xp_remote_memcpy) (unsigned long, const unsigned long,
+ size_t);
+ extern int (*xp_cpu_to_nasid) (int);
++extern enum xp_retval (*xp_expand_memprotect) (unsigned long, unsigned long);
++extern enum xp_retval (*xp_restrict_memprotect) (unsigned long, unsigned long);
+
+ extern u64 xp_nofault_PIOR_target;
+ extern int xp_nofault_PIOR(void *);
+--- a/drivers/misc/sgi-xp/xp_main.c
++++ b/drivers/misc/sgi-xp/xp_main.c
+@@ -51,6 +51,13 @@ EXPORT_SYMBOL_GPL(xp_remote_memcpy);
+ int (*xp_cpu_to_nasid) (int cpuid);
+ EXPORT_SYMBOL_GPL(xp_cpu_to_nasid);
+
++enum xp_retval (*xp_expand_memprotect) (unsigned long phys_addr,
++ unsigned long size);
++EXPORT_SYMBOL_GPL(xp_expand_memprotect);
++enum xp_retval (*xp_restrict_memprotect) (unsigned long phys_addr,
++ unsigned long size);
++EXPORT_SYMBOL_GPL(xp_restrict_memprotect);
++
+ /*
+ * xpc_registrations[] keeps track of xpc_connect()'s done by the kernel-level
+ * users of XPC.
+--- a/drivers/misc/sgi-xp/xp_sn2.c
++++ b/drivers/misc/sgi-xp/xp_sn2.c
+@@ -120,6 +120,38 @@ xp_cpu_to_nasid_sn2(int cpuid)
+ return cpuid_to_nasid(cpuid);
+ }
+
++static enum xp_retval
++xp_expand_memprotect_sn2(unsigned long phys_addr, unsigned long size)
++{
++ u64 nasid_array = 0;
++ int ret;
++
++ ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
++ &nasid_array);
++ if (ret != 0) {
++ dev_err(xp, "sn_change_memprotect(,, "
++ "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret);
++ return xpSalError;
++ }
++ return xpSuccess;
++}
++
++static enum xp_retval
++xp_restrict_memprotect_sn2(unsigned long phys_addr, unsigned long size)
++{
++ u64 nasid_array = 0;
++ int ret;
++
++ ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
++ &nasid_array);
++ if (ret != 0) {
++ dev_err(xp, "sn_change_memprotect(,, "
++ "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret);
++ return xpSalError;
++ }
++ return xpSuccess;
++}
++
+ enum xp_retval
+ xp_init_sn2(void)
+ {
+@@ -132,6 +164,8 @@ xp_init_sn2(void)
+ xp_pa = xp_pa_sn2;
+ xp_remote_memcpy = xp_remote_memcpy_sn2;
+ xp_cpu_to_nasid = xp_cpu_to_nasid_sn2;
++ xp_expand_memprotect = xp_expand_memprotect_sn2;
++ xp_restrict_memprotect = xp_restrict_memprotect_sn2;
+
+ return xp_register_nofault_code_sn2();
+ }
+--- a/drivers/misc/sgi-xp/xp_uv.c
++++ b/drivers/misc/sgi-xp/xp_uv.c
+@@ -15,6 +15,11 @@
+
+ #include <linux/device.h>
+ #include <asm/uv/uv_hub.h>
++#if defined CONFIG_X86_64
++#include <asm/uv/bios.h>
++#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
++#include <asm/sn/sn_sal.h>
++#endif
+ #include "../sgi-gru/grukservices.h"
+ #include "xp.h"
+
+@@ -49,6 +54,65 @@ xp_cpu_to_nasid_uv(int cpuid)
+ return UV_PNODE_TO_NASID(uv_cpu_to_pnode(cpuid));
+ }
+
++static enum xp_retval
++xp_expand_memprotect_uv(unsigned long phys_addr, unsigned long size)
++{
++ int ret;
++
++#if defined CONFIG_X86_64
++ ret = uv_bios_change_memprotect(phys_addr, size, UV_MEMPROT_ALLOW_RW);
++ if (ret != BIOS_STATUS_SUCCESS) {
++ dev_err(xp, "uv_bios_change_memprotect(,, "
++ "UV_MEMPROT_ALLOW_RW) failed, ret=%d\n", ret);
++ return xpBiosError;
++ }
++
++#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
++ u64 nasid_array;
++
++ ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_1,
++ &nasid_array);
++ if (ret != 0) {
++ dev_err(xp, "sn_change_memprotect(,, "
++ "SN_MEMPROT_ACCESS_CLASS_1,) failed ret=%d\n", ret);
++ return xpSalError;
++ }
++#else
++ #error not a supported configuration
++#endif
++ return xpSuccess;
++}
++
++static enum xp_retval
++xp_restrict_memprotect_uv(unsigned long phys_addr, unsigned long size)
++{
++ int ret;
++
++#if defined CONFIG_X86_64
++ ret = uv_bios_change_memprotect(phys_addr, size,
++ UV_MEMPROT_RESTRICT_ACCESS);
++ if (ret != BIOS_STATUS_SUCCESS) {
++ dev_err(xp, "uv_bios_change_memprotect(,, "
++ "UV_MEMPROT_RESTRICT_ACCESS) failed, ret=%d\n", ret);
++ return xpBiosError;
++ }
++
++#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
++ u64 nasid_array;
++
++ ret = sn_change_memprotect(phys_addr, size, SN_MEMPROT_ACCESS_CLASS_0,
++ &nasid_array);
++ if (ret != 0) {
++ dev_err(xp, "sn_change_memprotect(,, "
++ "SN_MEMPROT_ACCESS_CLASS_0,) failed ret=%d\n", ret);
++ return xpSalError;
++ }
++#else
++ #error not a supported configuration
++#endif
++ return xpSuccess;
++}
++
+ enum xp_retval
+ xp_init_uv(void)
+ {
+@@ -61,6 +125,8 @@ xp_init_uv(void)
+ xp_pa = xp_pa_uv;
+ xp_remote_memcpy = xp_remote_memcpy_uv;
+ xp_cpu_to_nasid = xp_cpu_to_nasid_uv;
++ xp_expand_memprotect = xp_expand_memprotect_uv;
++ xp_restrict_memprotect = xp_restrict_memprotect_uv;
+
+ return xpSuccess;
+ }
+--- a/drivers/misc/sgi-xp/xpc_sn2.c
++++ b/drivers/misc/sgi-xp/xpc_sn2.c
+@@ -553,22 +553,17 @@ static u64 xpc_prot_vec_sn2[MAX_NUMNODES
+ static enum xp_retval
+ xpc_allow_amo_ops_sn2(struct amo *amos_page)
+ {
+- u64 nasid_array = 0;
+- int ret;
++ enum xp_retval ret = xpSuccess;
+
+ /*
+ * On SHUB 1.1, we cannot call sn_change_memprotect() since the BIST
+ * collides with memory operations. On those systems we call
+ * xpc_allow_amo_ops_shub_wars_1_1_sn2() instead.
+ */
+- if (!enable_shub_wars_1_1()) {
+- ret = sn_change_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE,
+- SN_MEMPROT_ACCESS_CLASS_1,
+- &nasid_array);
+- if (ret != 0)
+- return xpSalError;
+- }
+- return xpSuccess;
++ if (!enable_shub_wars_1_1())
++ ret = xp_expand_memprotect(ia64_tpa((u64)amos_page), PAGE_SIZE);
++
++ return ret;
+ }
+
+ /*
--- /dev/null
+From: Dean Nelson <dcn@sgi.com>
+Subject: [PATCH] Define xp_partition_id and xp_region_size
+References: bnc#442461
+
+
+Define xp_partition_id and xp_region_size to their correct values.
+
+Signed-off-by: Dean Nelson <dcn@sgi.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+
+ drivers/misc/sgi-xp/xp_uv.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/misc/sgi-xp/xp_uv.c
++++ b/drivers/misc/sgi-xp/xp_uv.c
+@@ -119,8 +119,8 @@ xp_init_uv(void)
+ BUG_ON(!is_uv());
+
+ xp_max_npartitions = XP_MAX_NPARTITIONS_UV;
+- xp_partition_id = 0; /* !!! not correct value */
+- xp_region_size = 0; /* !!! not correct value */
++ xp_partition_id = sn_partition_id;
++ xp_region_size = sn_region_size;
+
+ xp_pa = xp_pa_uv;
+ xp_remote_memcpy = xp_remote_memcpy_uv;
--- /dev/null
+From: Dean Nelson <dcn@sgi.com>
+Subject: [PATCH] Add the code to create the activate and notify gru message queues
+References: bnc#442461
+
+For UV add the code to create the activate and notify gru message queues.
+
+Signed-off-by: Dean Nelson <dcn@sgi.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+
+ drivers/misc/sgi-xp/xpc.h | 12 +
+ drivers/misc/sgi-xp/xpc_uv.c | 259 ++++++++++++++++++++++++++++++++++---------
+ 2 files changed, 218 insertions(+), 53 deletions(-)
+
+--- a/drivers/misc/sgi-xp/xpc.h
++++ b/drivers/misc/sgi-xp/xpc.h
+@@ -181,6 +181,18 @@ struct xpc_vars_part_sn2 {
+ xpc_nasid_mask_nlongs))
+
+ /*
++ * Info pertinent to a GRU message queue using a watch list for irq generation.
++ */
++struct xpc_gru_mq_uv {
++ void *address; /* address of GRU message queue */
++ unsigned int order; /* size of GRU message queue as a power of 2 */
++ int irq; /* irq raised when message is received in mq */
++ int mmr_blade; /* blade where watchlist was allocated from */
++ unsigned long mmr_offset; /* offset of irq mmr located on mmr_blade */
++ int watchlist_num; /* number of watchlist allocatd by BIOS */
++};
++
++/*
+ * The activate_mq is used to send/receive GRU messages that affect XPC's
+ * heartbeat, partition active state, and channel state. This is UV only.
+ */
+--- a/drivers/misc/sgi-xp/xpc_uv.c
++++ b/drivers/misc/sgi-xp/xpc_uv.c
+@@ -18,7 +18,15 @@
+ #include <linux/interrupt.h>
+ #include <linux/delay.h>
+ #include <linux/device.h>
++#include <linux/err.h>
+ #include <asm/uv/uv_hub.h>
++#if defined CONFIG_X86_64
++#include <asm/uv/bios.h>
++#include <asm/uv/uv_irq.h>
++#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
++#include <asm/sn/intr.h>
++#include <asm/sn/sn_sal.h>
++#endif
+ #include "../sgi-gru/gru.h"
+ #include "../sgi-gru/grukservices.h"
+ #include "xpc.h"
+@@ -27,15 +35,17 @@ static atomic64_t xpc_heartbeat_uv;
+ static DECLARE_BITMAP(xpc_heartbeating_to_mask_uv, XP_MAX_NPARTITIONS_UV);
+
+ #define XPC_ACTIVATE_MSG_SIZE_UV (1 * GRU_CACHE_LINE_BYTES)
+-#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
++#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
++ XPC_ACTIVATE_MSG_SIZE_UV)
++#define XPC_ACTIVATE_IRQ_NAME "xpc_activate"
+
+-#define XPC_ACTIVATE_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
+- XPC_ACTIVATE_MSG_SIZE_UV)
+-#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
+- XPC_NOTIFY_MSG_SIZE_UV)
++#define XPC_NOTIFY_MSG_SIZE_UV (2 * GRU_CACHE_LINE_BYTES)
++#define XPC_NOTIFY_MQ_SIZE_UV (4 * XP_MAX_NPARTITIONS_UV * \
++ XPC_NOTIFY_MSG_SIZE_UV)
++#define XPC_NOTIFY_IRQ_NAME "xpc_notify"
+
+-static void *xpc_activate_mq_uv;
+-static void *xpc_notify_mq_uv;
++static struct xpc_gru_mq_uv *xpc_activate_mq_uv;
++static struct xpc_gru_mq_uv *xpc_notify_mq_uv;
+
+ static int
+ xpc_setup_partitions_sn_uv(void)
+@@ -52,62 +62,209 @@ xpc_setup_partitions_sn_uv(void)
+ return 0;
+ }
+
+-static void *
+-xpc_create_gru_mq_uv(unsigned int mq_size, int cpuid, unsigned int irq,
++static int
++xpc_get_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq, int cpu, char *irq_name)
++{
++#if defined CONFIG_X86_64
++ mq->irq = uv_setup_irq(irq_name, cpu, mq->mmr_blade, mq->mmr_offset);
++ if (mq->irq < 0) {
++ dev_err(xpc_part, "uv_setup_irq() returned error=%d\n",
++ mq->irq);
++ }
++
++#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
++ int mmr_pnode;
++ unsigned long mmr_value;
++
++ if (strcmp(irq_name, XPC_ACTIVATE_IRQ_NAME) == 0)
++ mq->irq = SGI_XPC_ACTIVATE;
++ else if (strcmp(irq_name, XPC_NOTIFY_IRQ_NAME) == 0)
++ mq->irq = SGI_XPC_NOTIFY;
++ else
++ return -EINVAL;
++
++ mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
++ mmr_value = (unsigned long)cpu_physical_id(cpu) << 32 | mq->irq;
++
++ uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
++#else
++ #error not a supported configuration
++#endif
++
++ return 0;
++}
++
++static void
++xpc_release_gru_mq_irq_uv(struct xpc_gru_mq_uv *mq)
++{
++#if defined CONFIG_X86_64
++ uv_teardown_irq(mq->irq, mq->mmr_blade, mq->mmr_offset);
++
++#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
++ int mmr_pnode;
++ unsigned long mmr_value;
++
++ mmr_pnode = uv_blade_to_pnode(mq->mmr_blade);
++ mmr_value = 1UL << 16;
++
++ uv_write_global_mmr64(mmr_pnode, mq->mmr_offset, mmr_value);
++#else
++ #error not a supported configuration
++#endif
++}
++
++static int
++xpc_gru_mq_watchlist_alloc_uv(struct xpc_gru_mq_uv *mq)
++{
++ int ret;
++
++#if defined CONFIG_X86_64
++ ret = uv_bios_mq_watchlist_alloc(mq->mmr_blade, mq->address, mq->order,
++ &mq->mmr_offset);
++ if (ret < 0) {
++ dev_err(xpc_part, "uv_bios_mq_watchlist_alloc() failed, "
++ "ret=%d\n", ret);
++ return ret;
++ }
++#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
++ ret = sn_mq_watchlist_alloc(mq->mmr_blade, mq->address, mq->order,
++ &mq->mmr_offset);
++ if (ret < 0) {
++ dev_err(xpc_part, "sn_mq_watchlist_alloc() failed, ret=%d\n",
++ ret);
++ return -EBUSY;
++ }
++#else
++ #error not a supported configuration
++#endif
++
++ mq->watchlist_num = ret;
++ return 0;
++}
++
++static void
++xpc_gru_mq_watchlist_free_uv(struct xpc_gru_mq_uv *mq)
++{
++ int ret;
++
++#if defined CONFIG_X86_64
++ ret = uv_bios_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
++ BUG_ON(ret != BIOS_STATUS_SUCCESS);
++#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
++ ret = sn_mq_watchlist_free(mq->mmr_blade, mq->watchlist_num);
++ BUG_ON(ret != SALRET_OK);
++#else
++ #error not a supported configuration
++#endif
++}
++
++static struct xpc_gru_mq_uv *
++xpc_create_gru_mq_uv(unsigned int mq_size, int cpu, char *irq_name,
+ irq_handler_t irq_handler)
+ {
++ enum xp_retval xp_ret;
+ int ret;
+ int nid;
+- int mq_order;
++ int pg_order;
+ struct page *page;
+- void *mq;
++ struct xpc_gru_mq_uv *mq;
++
++ mq = kmalloc(sizeof(struct xpc_gru_mq_uv), GFP_KERNEL);
++ if (mq == NULL) {
++ dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to kmalloc() "
++ "a xpc_gru_mq_uv structure\n");
++ ret = -ENOMEM;
++ goto out_1;
++ }
++
++ pg_order = get_order(mq_size);
++ mq->order = pg_order + PAGE_SHIFT;
++ mq_size = 1UL << mq->order;
+
+- nid = cpu_to_node(cpuid);
+- mq_order = get_order(mq_size);
++ mq->mmr_blade = uv_cpu_to_blade_id(cpu);
++
++ nid = cpu_to_node(cpu);
+ page = alloc_pages_node(nid, GFP_KERNEL | __GFP_ZERO | GFP_THISNODE,
+- mq_order);
++ pg_order);
+ if (page == NULL) {
+ dev_err(xpc_part, "xpc_create_gru_mq_uv() failed to alloc %d "
+ "bytes of memory on nid=%d for GRU mq\n", mq_size, nid);
+- return NULL;
++ ret = -ENOMEM;
++ goto out_2;
+ }
++ mq->address = page_address(page);
+
+- mq = page_address(page);
+- ret = gru_create_message_queue(mq, mq_size);
++ ret = gru_create_message_queue(mq->address, mq_size);
+ if (ret != 0) {
+ dev_err(xpc_part, "gru_create_message_queue() returned "
+ "error=%d\n", ret);
+- free_pages((unsigned long)mq, mq_order);
+- return NULL;
++ ret = -EINVAL;
++ goto out_3;
+ }
+
+- /* !!! Need to do some other things to set up IRQ */
++ /* enable generation of irq when GRU mq operation occurs to this mq */
++ ret = xpc_gru_mq_watchlist_alloc_uv(mq);
++ if (ret != 0)
++ goto out_3;
++
++ ret = xpc_get_gru_mq_irq_uv(mq, cpu, irq_name);
++ if (ret != 0)
++ goto out_4;
+
+- ret = request_irq(irq, irq_handler, 0, "xpc", NULL);
++ ret = request_irq(mq->irq, irq_handler, 0, irq_name, NULL);
+ if (ret != 0) {
+ dev_err(xpc_part, "request_irq(irq=%d) returned error=%d\n",
+- irq, ret);
+- free_pages((unsigned long)mq, mq_order);
+- return NULL;
++ mq->irq, ret);
++ goto out_5;
+ }
+
+- /* !!! enable generation of irq when GRU mq op occurs to this mq */
+-
+- /* ??? allow other partitions to access GRU mq? */
++ /* allow other partitions to access this GRU mq */
++ xp_ret = xp_expand_memprotect(xp_pa(mq->address), mq_size);
++ if (xp_ret != xpSuccess) {
++ ret = -EACCES;
++ goto out_6;
++ }
+
+ return mq;
++
++ /* something went wrong */
++out_6:
++ free_irq(mq->irq, NULL);
++out_5:
++ xpc_release_gru_mq_irq_uv(mq);
++out_4:
++ xpc_gru_mq_watchlist_free_uv(mq);
++out_3:
++ free_pages((unsigned long)mq->address, pg_order);
++out_2:
++ kfree(mq);
++out_1:
++ return ERR_PTR(ret);
+ }
+
+ static void
+-xpc_destroy_gru_mq_uv(void *mq, unsigned int mq_size, unsigned int irq)
++xpc_destroy_gru_mq_uv(struct xpc_gru_mq_uv *mq)
+ {
+- /* ??? disallow other partitions to access GRU mq? */
++ unsigned int mq_size;
++ int pg_order;
++ int ret;
++
++ /* disallow other partitions to access GRU mq */
++ mq_size = 1UL << mq->order;
++ ret = xp_restrict_memprotect(xp_pa(mq->address), mq_size);
++ BUG_ON(ret != xpSuccess);
++
++ /* unregister irq handler and release mq irq/vector mapping */
++ free_irq(mq->irq, NULL);
++ xpc_release_gru_mq_irq_uv(mq);
+
+- /* !!! disable generation of irq when GRU mq op occurs to this mq */
++ /* disable generation of irq when GRU mq op occurs to this mq */
++ xpc_gru_mq_watchlist_free_uv(mq);
+
+- free_irq(irq, NULL);
++ pg_order = mq->order - PAGE_SHIFT;
++ free_pages((unsigned long)mq->address, pg_order);
+
+- free_pages((unsigned long)mq, get_order(mq_size));
++ kfree(mq);
+ }
+
+ static enum xp_retval
+@@ -402,7 +559,10 @@ xpc_handle_activate_IRQ_uv(int irq, void
+ struct xpc_partition *part;
+ int wakeup_hb_checker = 0;
+
+- while ((msg_hdr = gru_get_next_message(xpc_activate_mq_uv)) != NULL) {
++ while (1) {
++ msg_hdr = gru_get_next_message(xpc_activate_mq_uv->address);
++ if (msg_hdr == NULL)
++ break;
+
+ partid = msg_hdr->partid;
+ if (partid < 0 || partid >= XP_MAX_NPARTITIONS_UV) {
+@@ -418,7 +578,7 @@ xpc_handle_activate_IRQ_uv(int irq, void
+ }
+ }
+
+- gru_free_message(xpc_activate_mq_uv, msg_hdr);
++ gru_free_message(xpc_activate_mq_uv->address, msg_hdr);
+ }
+
+ if (wakeup_hb_checker)
+@@ -507,7 +667,7 @@ xpc_get_partition_rsvd_page_pa_uv(void *
+ static int
+ xpc_setup_rsvd_page_sn_uv(struct xpc_rsvd_page *rp)
+ {
+- rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv);
++ rp->sn.activate_mq_gpa = uv_gpa(xpc_activate_mq_uv->address);
+ return 0;
+ }
+
+@@ -1410,22 +1570,18 @@ xpc_init_uv(void)
+ return -E2BIG;
+ }
+
+- /* ??? The cpuid argument's value is 0, is that what we want? */
+- /* !!! The irq argument's value isn't correct. */
+- xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0, 0,
++ xpc_activate_mq_uv = xpc_create_gru_mq_uv(XPC_ACTIVATE_MQ_SIZE_UV, 0,
++ XPC_ACTIVATE_IRQ_NAME,
+ xpc_handle_activate_IRQ_uv);
+- if (xpc_activate_mq_uv == NULL)
+- return -ENOMEM;
++ if (IS_ERR(xpc_activate_mq_uv))
++ return PTR_ERR(xpc_activate_mq_uv);
+
+- /* ??? The cpuid argument's value is 0, is that what we want? */
+- /* !!! The irq argument's value isn't correct. */
+- xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0, 0,
++ xpc_notify_mq_uv = xpc_create_gru_mq_uv(XPC_NOTIFY_MQ_SIZE_UV, 0,
++ XPC_NOTIFY_IRQ_NAME,
+ xpc_handle_notify_IRQ_uv);
+- if (xpc_notify_mq_uv == NULL) {
+- /* !!! The irq argument's value isn't correct. */
+- xpc_destroy_gru_mq_uv(xpc_activate_mq_uv,
+- XPC_ACTIVATE_MQ_SIZE_UV, 0);
+- return -ENOMEM;
++ if (IS_ERR(xpc_notify_mq_uv)) {
++ xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
++ return PTR_ERR(xpc_notify_mq_uv);
+ }
+
+ return 0;
+@@ -1434,9 +1590,6 @@ xpc_init_uv(void)
+ void
+ xpc_exit_uv(void)
+ {
+- /* !!! The irq argument's value isn't correct. */
+- xpc_destroy_gru_mq_uv(xpc_notify_mq_uv, XPC_NOTIFY_MQ_SIZE_UV, 0);
+-
+- /* !!! The irq argument's value isn't correct. */
+- xpc_destroy_gru_mq_uv(xpc_activate_mq_uv, XPC_ACTIVATE_MQ_SIZE_UV, 0);
++ xpc_destroy_gru_mq_uv(xpc_notify_mq_uv);
++ xpc_destroy_gru_mq_uv(xpc_activate_mq_uv);
+ }
--- /dev/null
+From: Dean Nelson <dcn@sgi.com>
+Subject: [PATCH] Add support for getting the address of a partition's reserved page.
+References: bnc#442461
+
+Add support for getting the address of a partition's reserved page.
+
+Signed-off-by: Dean Nelson <dcn@sgi.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+
+ drivers/misc/sgi-xp/xpc_uv.c | 31 ++++++++++++++++++++++++++++---
+ 1 file changed, 28 insertions(+), 3 deletions(-)
+
+Index: linux/drivers/misc/sgi-xp/xpc_uv.c
+===================================================================
+--- linux.orig/drivers/misc/sgi-xp/xpc_uv.c 2008-10-21 12:50:18.000000000 -0500
++++ linux/drivers/misc/sgi-xp/xpc_uv.c 2008-10-21 14:00:13.000000000 -0500
+@@ -642,7 +642,7 @@ xpc_send_local_activate_IRQ_uv(struct xp
+ struct xpc_partition_uv *part_uv = &part->sn.uv;
+
+ /*
+- * !!! Make our side think that the remote parition sent an activate
++ * !!! Make our side think that the remote partition sent an activate
+ * !!! message our way by doing what the activate IRQ handler would
+ * !!! do had one really been sent.
+ */
+@@ -660,8 +660,33 @@ static enum xp_retval
+ xpc_get_partition_rsvd_page_pa_uv(void *buf, u64 *cookie, unsigned long *rp_pa,
+ size_t *len)
+ {
+- /* !!! call the UV version of sn_partition_reserved_page_pa() */
+- return xpUnsupported;
++ s64 status;
++ enum xp_retval ret;
++
++#if defined CONFIG_X86_64
++ status = uv_bios_reserved_page_pa((u64)buf, cookie, (u64 *)rp_pa,
++ (u64 *)len);
++ if (status == BIOS_STATUS_SUCCESS)
++ ret = xpSuccess;
++ else if (status == BIOS_STATUS_MORE_PASSES)
++ ret = xpNeedMoreInfo;
++ else
++ ret = xpBiosError;
++
++#elif defined CONFIG_IA64_GENERIC || defined CONFIG_IA64_SGI_UV
++ status = sn_partition_reserved_page_pa((u64)buf, cookie, rp_pa, len);
++ if (status == SALRET_OK)
++ ret = xpSuccess;
++ else if (status == SALRET_MORE_PASSES)
++ ret = xpNeedMoreInfo;
++ else
++ ret = xpSalError;
++
++#else
++ #error not a supported configuration
++#endif
++
++ return ret;
+ }
+
+ static int
--- /dev/null
+From: Dean Nelson <dcn@sgi.com>
+Date: Thu, 2 Oct 2008 17:18:21 +0000 (-0500)
+Subject: x86, UV: add uv_setup_irq() and uv_teardown_irq() functions, v3
+X-Git-Tag: v2.6.28-rc1~80^2~27
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=4173a0e7371ece227559b44943c6fd456ee470d1
+References: bnc#442461
+
+x86, UV: add uv_setup_irq() and uv_teardown_irq() functions, v3
+
+Provide a means for UV interrupt MMRs to be setup with the message to be sent
+when an MSI is raised.
+
+Signed-off-by: Dean Nelson <dcn@sgi.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ arch/x86/kernel/Makefile | 2 -
+ arch/x86/kernel/io_apic_64.c | 68 +++++++++++++++++++++++++++++++++++++
+ arch/x86/kernel/uv_irq.c | 79 +++++++++++++++++++++++++++++++++++++++++++
+ include/asm-x86/uv/uv_irq.h | 36 +++++++++++++++++++
+ 4 files changed, 184 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/Makefile
++++ b/arch/x86/kernel/Makefile
+@@ -106,7 +106,7 @@ ifeq ($(CONFIG_X86_64),y)
+ obj-y += uv_sysfs.o
+ obj-y += genx2apic_cluster.o
+ obj-y += genx2apic_phys.o
+- obj-y += bios_uv.o
++ obj-y += bios_uv.o uv_irq.o
+ obj-$(CONFIG_X86_PM_TIMER) += pmtimer_64.o
+ obj-$(CONFIG_AUDIT) += audit_64.o
+
+--- a/arch/x86/kernel/io_apic_64.c
++++ b/arch/x86/kernel/io_apic_64.c
+@@ -51,6 +51,8 @@
+ #include <asm/msidef.h>
+ #include <asm/hypertransport.h>
+ #include <asm/irq_remapping.h>
++#include <asm/uv/uv_hub.h>
++#include <asm/uv/uv_irq.h>
+
+ #include <mach_ipi.h>
+ #include <mach_apic.h>
+@@ -2787,6 +2789,72 @@ int arch_setup_ht_irq(unsigned int irq,
+ }
+ #endif /* CONFIG_HT_IRQ */
+
++#ifdef CONFIG_X86_64
++/*
++ * Re-target the irq to the specified CPU and enable the specified MMR located
++ * on the specified blade to allow the sending of MSIs to the specified CPU.
++ */
++int arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
++ unsigned long mmr_offset)
++{
++ const cpumask_t *eligible_cpu = get_cpu_mask(cpu);
++ struct irq_cfg *cfg;
++ int mmr_pnode;
++ unsigned long mmr_value;
++ struct uv_IO_APIC_route_entry *entry;
++ unsigned long flags;
++ int err;
++
++ err = assign_irq_vector(irq, eligible_cpu);
++ if (err != 0)
++ return err;
++
++ spin_lock_irqsave(&vector_lock, flags);
++ set_irq_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
++ irq_name);
++ spin_unlock_irqrestore(&vector_lock, flags);
++
++ cfg = &irq_cfg[irq];
++
++ mmr_value = 0;
++ entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
++ BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
++
++ entry->vector = cfg->vector;
++ entry->delivery_mode = INT_DELIVERY_MODE;
++ entry->dest_mode = INT_DEST_MODE;
++ entry->polarity = 0;
++ entry->trigger = 0;
++ entry->mask = 0;
++ entry->dest = cpu_mask_to_apicid(eligible_cpu);
++
++ mmr_pnode = uv_blade_to_pnode(mmr_blade);
++ uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
++
++ return irq;
++}
++
++/*
++ * Disable the specified MMR located on the specified blade so that MSIs are
++ * longer allowed to be sent.
++ */
++void arch_disable_uv_irq(int mmr_blade, unsigned long mmr_offset)
++{
++ unsigned long mmr_value;
++ struct uv_IO_APIC_route_entry *entry;
++ int mmr_pnode;
++
++ mmr_value = 0;
++ entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
++ BUG_ON(sizeof(struct uv_IO_APIC_route_entry) != sizeof(unsigned long));
++
++ entry->mask = 1;
++
++ mmr_pnode = uv_blade_to_pnode(mmr_blade);
++ uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
++}
++#endif /* CONFIG_X86_64 */
++
+ /* --------------------------------------------------------------------------
+ ACPI-based IOAPIC Configuration
+ -------------------------------------------------------------------------- */
+--- /dev/null
++++ b/arch/x86/kernel/uv_irq.c
+@@ -0,0 +1,79 @@
++/*
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * SGI UV IRQ functions
++ *
++ * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
++ */
++
++#include <linux/module.h>
++#include <linux/irq.h>
++
++#include <asm/apic.h>
++#include <asm/uv/uv_irq.h>
++
++static void uv_noop(unsigned int irq)
++{
++}
++
++static unsigned int uv_noop_ret(unsigned int irq)
++{
++ return 0;
++}
++
++static void uv_ack_apic(unsigned int irq)
++{
++ ack_APIC_irq();
++}
++
++struct irq_chip uv_irq_chip = {
++ .name = "UV-CORE",
++ .startup = uv_noop_ret,
++ .shutdown = uv_noop,
++ .enable = uv_noop,
++ .disable = uv_noop,
++ .ack = uv_noop,
++ .mask = uv_noop,
++ .unmask = uv_noop,
++ .eoi = uv_ack_apic,
++ .end = uv_noop,
++};
++
++/*
++ * Set up a mapping of an available irq and vector, and enable the specified
++ * MMR that defines the MSI that is to be sent to the specified CPU when an
++ * interrupt is raised.
++ */
++int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
++ unsigned long mmr_offset)
++{
++ int irq;
++ int ret;
++
++ irq = create_irq();
++ if (irq < 0)
++ return -EBUSY;
++
++ ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset);
++ if (ret != irq)
++ destroy_irq(irq);
++
++ return ret;
++}
++EXPORT_SYMBOL_GPL(uv_setup_irq);
++
++/*
++ * Tear down a mapping of an irq and vector, and disable the specified MMR that
++ * defined the MSI that was to be sent to the specified CPU when an interrupt
++ * was raised.
++ *
++ * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
++ */
++void uv_teardown_irq(unsigned int irq, int mmr_blade, unsigned long mmr_offset)
++{
++ arch_disable_uv_irq(mmr_blade, mmr_offset);
++ destroy_irq(irq);
++}
++EXPORT_SYMBOL_GPL(uv_teardown_irq);
+--- /dev/null
++++ b/include/asm-x86/uv/uv_irq.h
+@@ -0,0 +1,36 @@
++/*
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * SGI UV IRQ definitions
++ *
++ * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
++ */
++
++#ifndef _ASM_X86_UV_UV_IRQ_H
++#define _ASM_X86_UV_UV_IRQ_H
++
++/* If a generic version of this structure gets defined, eliminate this one. */
++struct uv_IO_APIC_route_entry {
++ __u64 vector : 8,
++ delivery_mode : 3,
++ dest_mode : 1,
++ delivery_status : 1,
++ polarity : 1,
++ __reserved_1 : 1,
++ trigger : 1,
++ mask : 1,
++ __reserved_2 : 15,
++ dest : 32;
++};
++
++extern struct irq_chip uv_irq_chip;
++
++extern int arch_enable_uv_irq(char *, unsigned int, int, int, unsigned long);
++extern void arch_disable_uv_irq(int, unsigned long);
++
++extern int uv_setup_irq(char *, int, int, unsigned long);
++extern void uv_teardown_irq(unsigned int, int, unsigned long);
++
++#endif /* _ASM_X86_UV_UV_IRQ_H */
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: [PATCH] ipmi: Fix section type conflicts
+
+ Module parameters can't be static since the module macros explicitly
+ put those symbols in the __param section. It causes a section conflict
+ on ia64. This doesn't occur with standard types, since they are global
+ and exported.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+---
+
+ drivers/char/ipmi/ipmi_si_intf.c | 4 ++--
+ drivers/char/ipmi/ipmi_watchdog.c | 10 +++++-----
+ 2 files changed, 7 insertions(+), 7 deletions(-)
+
+--- a/drivers/char/ipmi/ipmi_si_intf.c
++++ b/drivers/char/ipmi/ipmi_si_intf.c
+@@ -1152,7 +1152,7 @@ static unsigned int num_slave_addrs;
+ #define IPMI_MEM_ADDR_SPACE 1
+ static char *addr_space_to_str[] = { "i/o", "mem" };
+
+-static int hotmod_handler(const char *val, struct kernel_param *kp);
++int hotmod_handler(const char *val, struct kernel_param *kp);
+
+ module_param_call(hotmod, hotmod_handler, NULL, NULL, 0200);
+ MODULE_PARM_DESC(hotmod, "Add and remove interfaces. See"
+@@ -1574,7 +1574,7 @@ static int check_hotmod_int_op(const cha
+ return 0;
+ }
+
+-static int hotmod_handler(const char *val, struct kernel_param *kp)
++int hotmod_handler(const char *val, struct kernel_param *kp)
+ {
+ char *str = kstrdup(val, GFP_KERNEL);
+ int rv;
+--- a/drivers/char/ipmi/ipmi_watchdog.c
++++ b/drivers/char/ipmi/ipmi_watchdog.c
+@@ -196,7 +196,7 @@ static void ipmi_unregister_watchdog(int
+ */
+ static int start_now;
+
+-static int set_param_int(const char *val, struct kernel_param *kp)
++int set_param_int(const char *val, struct kernel_param *kp)
+ {
+ char *endp;
+ int l;
+@@ -215,7 +215,7 @@ static int set_param_int(const char *val
+ return rv;
+ }
+
+-static int get_param_int(char *buffer, struct kernel_param *kp)
++int get_param_int(char *buffer, struct kernel_param *kp)
+ {
+ return sprintf(buffer, "%i", *((int *)kp->arg));
+ }
+@@ -227,7 +227,7 @@ static int preaction_op(const char *inva
+ static int preop_op(const char *inval, char *outval);
+ static void check_parms(void);
+
+-static int set_param_str(const char *val, struct kernel_param *kp)
++int set_param_str(const char *val, struct kernel_param *kp)
+ {
+ action_fn fn = (action_fn) kp->arg;
+ int rv = 0;
+@@ -251,7 +251,7 @@ static int set_param_str(const char *val
+ return rv;
+ }
+
+-static int get_param_str(char *buffer, struct kernel_param *kp)
++int get_param_str(char *buffer, struct kernel_param *kp)
+ {
+ action_fn fn = (action_fn) kp->arg;
+ int rv;
+@@ -263,7 +263,7 @@ static int get_param_str(char *buffer, s
+ }
+
+
+-static int set_param_wdog_ifnum(const char *val, struct kernel_param *kp)
++int set_param_wdog_ifnum(const char *val, struct kernel_param *kp)
+ {
+ int rv = param_set_int(val, kp);
+ if (rv)
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: [PATCH] psmouse: fix section type conflict
+
+ Module parameters can't be static since the module macros explicitly
+ put those symbols in the __param section. It causes a section conflict
+ on ia64. This doesn't occur with standard types, since they are global
+ and exported.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+
+ drivers/input/mouse/psmouse-base.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/drivers/input/mouse/psmouse-base.c
++++ b/drivers/input/mouse/psmouse-base.c
+@@ -36,8 +36,8 @@ MODULE_DESCRIPTION(DRIVER_DESC);
+ MODULE_LICENSE("GPL");
+
+ static unsigned int psmouse_max_proto = PSMOUSE_AUTO;
+-static int psmouse_set_maxproto(const char *val, struct kernel_param *kp);
+-static int psmouse_get_maxproto(char *buffer, struct kernel_param *kp);
++int psmouse_set_maxproto(const char *val, struct kernel_param *kp);
++int psmouse_get_maxproto(char *buffer, struct kernel_param *kp);
+ #define param_check_proto_abbrev(name, p) __param_check(name, p, unsigned int)
+ #define param_set_proto_abbrev psmouse_set_maxproto
+ #define param_get_proto_abbrev psmouse_get_maxproto
+@@ -1573,7 +1573,8 @@ static ssize_t psmouse_attr_set_resoluti
+ }
+
+
+-static int psmouse_set_maxproto(const char *val, struct kernel_param *kp)
++/* These two should be static, but it causes a section type conflict */
++int psmouse_set_maxproto(const char *val, struct kernel_param *kp)
+ {
+ const struct psmouse_protocol *proto;
+
+@@ -1590,7 +1591,7 @@ static int psmouse_set_maxproto(const ch
+ return 0;
+ }
+
+-static int psmouse_get_maxproto(char *buffer, struct kernel_param *kp)
++int psmouse_get_maxproto(char *buffer, struct kernel_param *kp)
+ {
+ int type = *((unsigned int *)kp->arg);
+
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: Add ``cloneconfig'' target
+
+Cloneconfig takes the first configuration it finds which appears
+to belong to the running kernel, and configures the kernel sources
+to match this configuration as closely as possible.
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+ scripts/kconfig/Makefile | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/scripts/kconfig/Makefile
++++ b/scripts/kconfig/Makefile
+@@ -61,6 +61,22 @@ allnoconfig: $(obj)/conf
+ allmodconfig: $(obj)/conf
+ $< -m $(Kconfig)
+
++UNAME_RELEASE := $(shell uname -r)
++CLONECONFIG := $(firstword $(wildcard /proc/config.gz \
++ /lib/modules/$(UNAME_RELEASE)/.config \
++ /etc/kernel-config \
++ /boot/config-$(UNAME_RELEASE)))
++cloneconfig: $(obj)/conf
++ $(Q)case "$(CLONECONFIG)" in \
++ '') echo -e "The configuration of the running" \
++ "kernel could not be determined\n"; \
++ false ;; \
++ *.gz) gzip -cd $(CLONECONFIG) > .config.running ;; \
++ *) cat $(CLONECONFIG) > .config.running ;; \
++ esac && \
++ echo -e "Cloning configuration file $(CLONECONFIG)\n"
++ $(Q)$< -D .config.running arch/$(SRCARCH)/Kconfig
++
+ defconfig: $(obj)/conf
+ ifeq ($(KBUILD_DEFCONFIG),)
+ $< -d $(Kconfig)
--- /dev/null
+From: Olaf Dabrunz <od@suse.de>
+Subject: [apm] default to "power_off" when SMP kernel is used on single processor machines
+Reference: SUSE221667
+
+This patch turns on support for the APM power_off function by default when the
+SMP kernel is used on single processor machines.
+
+It is a bit ugly to use a separate variable to make sure the default value is
+only used when needed and the power_off variable is not initialized twice. But
+I did not find a better way to do this with the way the current initialization
+system works.
+
+Signed-off-by: Olaf Dabrunz <od@suse.de>
+
+
+ arch/x86/kernel/apm_32.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/apm_32.c
++++ b/arch/x86/kernel/apm_32.c
+@@ -389,6 +389,7 @@ static int smp __read_mostly;
+ static int apm_disabled = -1;
+ #ifdef CONFIG_SMP
+ static int power_off;
++static int power_off_set;
+ #else
+ static int power_off = 1;
+ #endif
+@@ -1797,6 +1798,14 @@ static int apm(void *unused)
+ }
+ }
+
++#ifdef CONFIG_SMP
++ if (!power_off_set) {
++ power_off = (num_online_cpus() == 1);
++ /* remember not to initialize (with default value) again */
++ power_off_set = 1;
++ }
++#endif
++
+ /* Install our power off handler.. */
+ if (power_off)
+ pm_power_off = apm_power_off;
+@@ -1840,8 +1849,12 @@ static int __init apm_setup(char *str)
+ if (strncmp(str, "debug", 5) == 0)
+ debug = !invert;
+ if ((strncmp(str, "power-off", 9) == 0) ||
+- (strncmp(str, "power_off", 9) == 0))
++ (strncmp(str, "power_off", 9) == 0)) {
+ power_off = !invert;
++#ifdef CONFIG_SMP
++ power_off_set = 1;
++#endif
++ }
+ if (strncmp(str, "smp", 3) == 0) {
+ smp = !invert;
+ idle_threshold = 100;
--- /dev/null
+From: Chris Mason <mason@suse.com>
+Subject: slab testing module
+
+---
+ drivers/char/Kconfig | 5 +
+ drivers/char/Makefile | 1
+ drivers/char/crasher.c | 225 +++++++++++++++++++++++++++++++++++++++++++++++++
+ 3 files changed, 231 insertions(+)
+
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -1104,5 +1104,10 @@ config DEVPORT
+
+ source "drivers/s390/char/Kconfig"
+
++config CRASHER
++ tristate "Crasher Module"
++ help
++ Slab cache memory tester. Only use this as a module
++
+ endmenu
+
+--- a/drivers/char/Makefile
++++ b/drivers/char/Makefile
+@@ -105,6 +105,7 @@ obj-$(CONFIG_IPMI_HANDLER) += ipmi/
+
+ obj-$(CONFIG_HANGCHECK_TIMER) += hangcheck-timer.o
+ obj-$(CONFIG_TCG_TPM) += tpm/
++obj-$(CONFIG_CRASHER) += crasher.o
+
+ obj-$(CONFIG_PS3_FLASH) += ps3flash.o
+
+--- /dev/null
++++ b/drivers/char/crasher.c
+@@ -0,0 +1,225 @@
++/*
++ * crasher.c, it breaks things
++ */
++
++
++#include <linux/module.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/completion.h>
++#include <linux/jiffies.h>
++#include <linux/sched.h>
++#include <linux/moduleparam.h>
++
++static int module_exiting;
++static struct completion startup = COMPLETION_INITIALIZER(startup);
++static unsigned long rand_seed = 152L;
++static unsigned long seed = 152L;
++static int threads = 1;
++static int call_panic;
++static int call_bug;
++static int trap_null, call_null, jump_null;
++static long trap_read, trap_write, call_bad, jump_bad;
++
++module_param(seed, ulong, 0);
++module_param(call_panic, bool, 0);
++module_param(call_bug, bool, 0);
++module_param(trap_null, bool, 0);
++module_param(trap_read, long, 0);
++module_param(trap_write, long, 0);
++module_param(call_null, bool, 0);
++module_param(call_bad, long, 0);
++module_param(jump_null, bool, 0);
++module_param(jump_bad, long, 0);
++module_param(threads, int, 0);
++MODULE_PARM_DESC(seed, "random seed for memory tests");
++MODULE_PARM_DESC(call_panic, "test option. call panic() and render the system unusable.");
++MODULE_PARM_DESC(call_bug, "test option. call BUG() and render the system unusable.");
++MODULE_PARM_DESC(trap_null, "test option. dereference a NULL pointer to simulate a crash and render the system unusable.");
++MODULE_PARM_DESC(trap_read, "test option. read from an invalid address to simulate a crash and render the system unusable.");
++MODULE_PARM_DESC(trap_write, "test option. write to an invalid address to simulate a crash and render the system unusable.");
++MODULE_PARM_DESC(call_null, "test option. call a NULL pointer to simulate a crash and render the system unusable.");
++MODULE_PARM_DESC(call_bad, "test option. call an invalid address to simulate a crash and render the system unusable.");
++MODULE_PARM_DESC(jump_null, "test option. jump to a NULL pointer to simulate a crash and render the system unusable.");
++MODULE_PARM_DESC(jump_bad, "test option. jump to an invalid address to simulate a crash and render the system unusable.");
++MODULE_PARM_DESC(threads, "number of threads to run");
++MODULE_LICENSE("GPL");
++
++#define NUM_ALLOC 24
++#define NUM_SIZES 8
++static int sizes[] = { 32, 64, 128, 192, 256, 1024, 2048, 4096 };
++
++struct mem_buf {
++ char *buf;
++ int size;
++};
++
++static unsigned long crasher_random(void)
++{
++ rand_seed = rand_seed*69069L+1;
++ return rand_seed^jiffies;
++}
++
++void crasher_srandom(unsigned long entropy)
++{
++ rand_seed ^= entropy;
++ crasher_random();
++}
++
++static char *mem_alloc(int size) {
++ char *p = kmalloc(size, GFP_KERNEL);
++ int i;
++ if (!p)
++ return p;
++ for (i = 0 ; i < size; i++)
++ p[i] = (i % 119) + 8;
++ return p;
++}
++
++static void mem_check(char *p, int size) {
++ int i;
++ if (!p)
++ return;
++ for (i = 0 ; i < size; i++) {
++ if (p[i] != ((i % 119) + 8)) {
++ printk(KERN_CRIT "verify error at %lX offset %d "
++ " wanted %d found %d size %d\n",
++ (unsigned long)(p + i), i, (i % 119) + 8,
++ p[i], size);
++ }
++ }
++ // try and trigger slab poisoning for people using this buffer
++ // wrong
++ memset(p, 0, size);
++}
++
++static void mem_verify(void) {
++ struct mem_buf bufs[NUM_ALLOC];
++ struct mem_buf *b;
++ int index;
++ int size;
++ unsigned long sleep;
++ memset(bufs, 0, sizeof(struct mem_buf) * NUM_ALLOC);
++ while(!module_exiting) {
++ index = crasher_random() % NUM_ALLOC;
++ b = bufs + index;
++ if (b->size) {
++ mem_check(b->buf, b->size);
++ kfree(b->buf);
++ b->buf = NULL;
++ b->size = 0;
++ } else {
++ size = crasher_random() % NUM_SIZES;
++ size = sizes[size];
++ b->buf = mem_alloc(size);
++ b->size = size;
++ }
++ sleep = crasher_random() % (HZ / 10);
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule_timeout(sleep);
++ set_current_state(TASK_RUNNING);
++ }
++ for (index = 0 ; index < NUM_ALLOC ; index++) {
++ b = bufs + index;
++ if (b->size) {
++ mem_check(b->buf, b->size);
++ kfree(b->buf);
++ }
++ }
++}
++
++static int crasher_thread(void *unused)
++{
++ daemonize("crasher");
++ complete(&startup);
++ mem_verify();
++ complete(&startup);
++ return 0;
++}
++
++static int __init crasher_init(void)
++{
++ int i;
++ init_completion(&startup);
++ crasher_srandom(seed);
++
++ if (call_panic) {
++ panic("test panic from crasher module. Good Luck.\n");
++ return -EFAULT;
++ }
++ if (call_bug) {
++ printk("triggering BUG\n");
++ BUG_ON(1);
++ return -EFAULT;
++ }
++
++ if (trap_null) {
++ volatile char *p = NULL;
++ printk("dereferencing NULL pointer.\n");
++ p[0] = '\n';
++ return -EFAULT;
++ }
++ if (trap_read) {
++ const volatile char *p = (char *)trap_read;
++ printk("reading from invalid(?) address %p.\n", p);
++ return p[0] ? -EFAULT : -EACCES;
++ }
++ if (trap_write) {
++ volatile char *p = (char *)trap_write;
++ printk("writing to invalid(?) address %p.\n", p);
++ p[0] = ' ';
++ return -EFAULT;
++ }
++
++ if (call_null) {
++ void(*f)(void) = NULL;
++ printk("calling NULL pointer.\n");
++ f();
++ return -EFAULT;
++ }
++ if (call_bad) {
++ void(*f)(void) = (void(*)(void))call_bad;
++ printk("calling invalid(?) address %p.\n", f);
++ f();
++ return -EFAULT;
++ }
++
++ /* These two depend on the compiler doing tail call optimization. */
++ if (jump_null) {
++ int(*f)(void) = NULL;
++ printk("jumping to NULL.\n");
++ return f();
++ }
++ if (jump_bad) {
++ int(*f)(void) = (int(*)(void))jump_bad;
++ printk("jumping to invalid(?) address %p.\n", f);
++ return f();
++ }
++
++ printk("crasher module (%d threads). Testing sizes: ", threads);
++ for (i = 0 ; i < NUM_SIZES ; i++)
++ printk("%d ", sizes[i]);
++ printk("\n");
++
++ for (i = 0 ; i < threads ; i++)
++ kernel_thread(crasher_thread, crasher_thread,
++ CLONE_FS | CLONE_FILES);
++ for (i = 0 ; i < threads ; i++)
++ wait_for_completion(&startup);
++ return 0;
++}
++
++static void __exit crasher_exit(void)
++{
++ int i;
++ module_exiting = 1;
++ for (i = 0 ; i < threads ; i++)
++ wait_for_completion(&startup);
++ printk("all crasher threads done\n");
++ return;
++}
++
++module_init(crasher_init);
++module_exit(crasher_exit);
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: file capabilities: add file_caps switch
+
+Based on a patch from Serge Hallyn <serue@us.ibm.com>:
+
+Add a file_caps boot option when file capabilities are
+compiled into the kernel (CONFIG_SECURITY_FILE_CAPABILITIES=y).
+
+This allows distributions to ship a kernel with file capabilities
+compiled in, without forcing users to use (and understand and
+trust) them.
+
+When file_caps=0 is specified at boot, then when a process executes
+a file, any file capabilities stored with that file will not be
+used in the calculation of the process' new capability sets.
+
+This means that booting with the file_caps=0 boot option will
+not be the same as booting a kernel with file capabilities
+compiled out - in particular a task with CAP_SETPCAP will not
+have any chance of passing capabilities to another task (which
+isn't "really" possible anyway, and which may soon by killed
+altogether by David Howells in any case), and it will instead
+be able to put new capabilities in its pI. However since fI
+will always be empty and pI is masked with fI, it gains the
+task nothing.
+
+We also support the extra prctl options, setting securebits and
+dropping capabilities from the per-process bounding set.
+
+The other remaining difference is that killpriv, task_setscheduler,
+setioprio, and setnice will continue to be hooked. That will
+be noticable in the case where a root task changed its uid
+while keeping some caps, and another task owned by the new uid
+tries to change settings for the more privileged task.
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+---
+ Documentation/kernel-parameters.txt | 9 +++++++++
+ include/linux/capability.h | 3 +++
+ kernel/capability.c | 11 +++++++++++
+ security/commoncap.c | 3 +++
+ 4 files changed, 26 insertions(+)
+
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1403,6 +1403,15 @@ and is between 256 and 4096 characters.
+ instruction doesn't work correctly and not to
+ use it.
+
++ file_caps= Tells the kernel whether to honor file capabilities.
++ When disabled, the only way then for a file to be
++ executed with privilege is to be setuid root or executed
++ by root.
++ Format: {"0" | "1"}
++ 0 -- ignore file capabilities.
++ 1 -- honor file capabilities.
++ Default value is 1.
++
+ nohalt [IA-64] Tells the kernel not to use the power saving
+ function PAL_HALT_LIGHT when idle. This increases
+ power-consumption. On the positive side, it reduces
+--- a/include/linux/capability.h
++++ b/include/linux/capability.h
+@@ -68,6 +68,9 @@ typedef struct __user_cap_data_struct {
+ #define VFS_CAP_U32 VFS_CAP_U32_2
+ #define VFS_CAP_REVISION VFS_CAP_REVISION_2
+
++#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
++extern int file_caps_enabled;
++#endif
+
+ struct vfs_cap_data {
+ __le32 magic_etc; /* Little endian */
+--- a/kernel/capability.c
++++ b/kernel/capability.c
+@@ -33,6 +33,17 @@ EXPORT_SYMBOL(__cap_empty_set);
+ EXPORT_SYMBOL(__cap_full_set);
+ EXPORT_SYMBOL(__cap_init_eff_set);
+
++#ifdef CONFIG_SECURITY_FILE_CAPABILITIES
++int file_caps_enabled = 1;
++
++static int __init setup_file_caps(char *str)
++{
++ get_option(&str, &file_caps_enabled);
++ return 1;
++}
++__setup("file_caps=", setup_file_caps);
++#endif
++
+ /*
+ * More recent versions of libcap are available from:
+ *
+--- a/security/commoncap.c
++++ b/security/commoncap.c
+@@ -281,6 +281,9 @@ static int get_file_caps(struct linux_bi
+
+ bprm_clear_caps(bprm);
+
++ if (!file_caps_enabled)
++ return 0;
++
+ if (bprm->file->f_vfsmnt->mnt_flags & MNT_NOSUID)
+ return 0;
+
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: Disable file capabilities by default
+
+Disable file capabilities by default: we are still lacking documentation
+and file capability awareness in system management tools.
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+---
+ Documentation/kernel-parameters.txt | 2 +-
+ kernel/capability.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/Documentation/kernel-parameters.txt
++++ b/Documentation/kernel-parameters.txt
+@@ -1410,7 +1410,7 @@ and is between 256 and 4096 characters.
+ Format: {"0" | "1"}
+ 0 -- ignore file capabilities.
+ 1 -- honor file capabilities.
+- Default value is 1.
++ Default value is 0.
+
+ nohalt [IA-64] Tells the kernel not to use the power saving
+ function PAL_HALT_LIGHT when idle. This increases
+--- a/kernel/capability.c
++++ b/kernel/capability.c
+@@ -34,7 +34,7 @@ EXPORT_SYMBOL(__cap_full_set);
+ EXPORT_SYMBOL(__cap_init_eff_set);
+
+ #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
+-int file_caps_enabled = 1;
++int file_caps_enabled;
+
+ static int __init setup_file_caps(char *str)
+ {
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: Allow filesystems to handle MAY_APPEND
+
+The MS_WITHAPPEND super_block flag tells the vfs that the permission
+inode operation understands the MAY_APPEND flag. This is required for
+implementing permission models which go beyond the traditional UNIX
+semantics.
+
+If a filesystem does not set the flag, the behavior is unchanged.
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+---
+ fs/namei.c | 6 +++++-
+ include/linux/fs.h | 2 ++
+ 2 files changed, 7 insertions(+), 1 deletion(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -230,6 +230,7 @@ int generic_permission(struct inode *ino
+ int inode_permission(struct inode *inode, int mask)
+ {
+ int retval;
++ int submask = mask;
+
+ if (mask & MAY_WRITE) {
+ umode_t mode = inode->i_mode;
+@@ -248,9 +249,12 @@ int inode_permission(struct inode *inode
+ return -EACCES;
+ }
+
++ if (!IS_WITHAPPEND(inode))
++ submask &= ~MAY_APPEND;
++
+ /* Ordinary permission routines do not understand MAY_APPEND. */
+ if (inode->i_op && inode->i_op->permission) {
+- retval = inode->i_op->permission(inode, mask);
++ retval = inode->i_op->permission(inode, submask);
+ if (!retval) {
+ /*
+ * Exec permission on a regular file is denied if none
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -133,6 +133,7 @@ extern int dir_notify_enable;
+ #define MS_RELATIME (1<<21) /* Update atime relative to mtime/ctime. */
+ #define MS_KERNMOUNT (1<<22) /* this is a kern_mount call */
+ #define MS_I_VERSION (1<<23) /* Update inode I_version field */
++#define MS_WITHAPPEND (1<<24) /* iop->permission() understands MAY_APPEND */
+ #define MS_ACTIVE (1<<30)
+ #define MS_NOUSER (1<<31)
+
+@@ -183,6 +184,7 @@ extern int dir_notify_enable;
+ #define IS_MANDLOCK(inode) __IS_FLG(inode, MS_MANDLOCK)
+ #define IS_NOATIME(inode) __IS_FLG(inode, MS_RDONLY|MS_NOATIME)
+ #define IS_I_VERSION(inode) __IS_FLG(inode, MS_I_VERSION)
++#define IS_WITHAPPEND(inode) __IS_FLG(inode, MS_WITHAPPEND)
+
+ #define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
+ #define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: VFS hooks for per-filesystem permission models
+
+Add may_create and may_delete inode operations that filesystems can
+implement in order to override the vfs provided default behavior.
+This is required for implementing permission models which go beyond
+the traditional UNIX semantics.
+
+If a filesystem does not implement these hooks, the behavior is
+unchanged.
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+---
+ fs/namei.c | 48 +++++++++++++++++++++++++++++++++++++-----------
+ include/linux/fs.h | 2 ++
+ 2 files changed, 39 insertions(+), 11 deletions(-)
+
+--- a/fs/namei.c
++++ b/fs/namei.c
+@@ -1402,13 +1402,24 @@ static int may_delete(struct inode *dir,
+ BUG_ON(victim->d_parent->d_inode != dir);
+ audit_inode_child(victim->d_name.name, victim, dir);
+
+- error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
++ if (dir->i_op->may_delete) {
++ if (IS_RDONLY(dir))
++ return -EROFS;
++ if (IS_IMMUTABLE(dir))
++ return -EACCES;
++ error = dir->i_op->may_delete(dir, victim->d_inode);
++ if (!error)
++ error = security_inode_permission(dir, MAY_WRITE | MAY_EXEC);
++ } else {
++ error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
++ if (!error && check_sticky(dir, victim->d_inode))
++ error = -EPERM;
++ }
+ if (error)
+ return error;
+ if (IS_APPEND(dir))
+ return -EPERM;
+- if (check_sticky(dir, victim->d_inode)||IS_APPEND(victim->d_inode)||
+- IS_IMMUTABLE(victim->d_inode))
++ if (IS_APPEND(victim->d_inode) || IS_IMMUTABLE(victim->d_inode))
+ return -EPERM;
+ if (isdir) {
+ if (!S_ISDIR(victim->d_inode->i_mode))
+@@ -1432,13 +1443,28 @@ static int may_delete(struct inode *dir,
+ * 3. We should have write and exec permissions on dir
+ * 4. We can't do it if dir is immutable (done in permission())
+ */
+-static inline int may_create(struct inode *dir, struct dentry *child)
++static inline int may_create(struct inode *dir, struct dentry *child,
++ int isdir)
+ {
++ int error;
++
+ if (child->d_inode)
+ return -EEXIST;
+ if (IS_DEADDIR(dir))
+ return -ENOENT;
+- return inode_permission(dir, MAY_WRITE | MAY_EXEC);
++
++ if (dir->i_op->may_create) {
++ if (IS_RDONLY(dir))
++ return -EROFS;
++ if (IS_IMMUTABLE(dir))
++ return -EACCES;
++ error = dir->i_op->may_create(dir, isdir);
++ if (!error)
++ error = security_inode_permission(dir, MAY_WRITE | MAY_EXEC);
++ } else
++ error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
++
++ return error;
+ }
+
+ /*
+@@ -1504,7 +1530,7 @@ void unlock_rename(struct dentry *p1, st
+ int vfs_create(struct inode *dir, struct dentry *dentry, int mode,
+ struct nameidata *nd)
+ {
+- int error = may_create(dir, dentry);
++ int error = may_create(dir, dentry, 0);
+
+ if (error)
+ return error;
+@@ -1948,7 +1974,7 @@ EXPORT_SYMBOL_GPL(lookup_create);
+
+ int vfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t dev)
+ {
+- int error = may_create(dir, dentry);
++ int error = may_create(dir, dentry, 0);
+
+ if (error)
+ return error;
+@@ -2049,7 +2075,7 @@ SYSCALL_DEFINE3(mknod, const char __user
+
+ int vfs_mkdir(struct inode *dir, struct dentry *dentry, int mode)
+ {
+- int error = may_create(dir, dentry);
++ int error = may_create(dir, dentry, 1);
+
+ if (error)
+ return error;
+@@ -2316,7 +2342,7 @@ SYSCALL_DEFINE1(unlink, const char __use
+
+ int vfs_symlink(struct inode *dir, struct dentry *dentry, const char *oldname)
+ {
+- int error = may_create(dir, dentry);
++ int error = may_create(dir, dentry, 0);
+
+ if (error)
+ return error;
+@@ -2386,7 +2412,7 @@ int vfs_link(struct dentry *old_dentry,
+ if (!inode)
+ return -ENOENT;
+
+- error = may_create(dir, new_dentry);
++ error = may_create(dir, new_dentry, S_ISDIR(inode->i_mode));
+ if (error)
+ return error;
+
+@@ -2594,7 +2620,7 @@ int vfs_rename(struct inode *old_dir, st
+ return error;
+
+ if (!new_dentry->d_inode)
+- error = may_create(new_dir, new_dentry);
++ error = may_create(new_dir, new_dentry, is_dir);
+ else
+ error = may_delete(new_dir, new_dentry, is_dir);
+ if (error)
+--- a/include/linux/fs.h
++++ b/include/linux/fs.h
+@@ -1293,6 +1293,8 @@ struct inode_operations {
+ void (*put_link) (struct dentry *, struct nameidata *, void *);
+ void (*truncate) (struct inode *);
+ int (*permission) (struct inode *, int);
++ int (*may_create) (struct inode *, int);
++ int (*may_delete) (struct inode *, struct inode *);
+ int (*setattr) (struct dentry *, struct iattr *);
+ int (*getattr) (struct vfsmount *mnt, struct dentry *, struct kstat *);
+ int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: genksyms: add --override flag
+
+Add --override flag to genksyms to allow overriding types with old
+definitions using the 'override' keyword. This is similar to -p --preserve,
+but it doesn't abort the build if a symtype cannot be preserved
+
+[mmarek: added KBUILD_OVERRIDE env var to set this globally for the entire
+ build]
+---
+ scripts/genksyms/genksyms.c | 21 +++++++++++++++------
+ 1 file changed, 15 insertions(+), 6 deletions(-)
+
+Index: b/scripts/genksyms/genksyms.c
+===================================================================
+--- a/scripts/genksyms/genksyms.c
++++ b/scripts/genksyms/genksyms.c
+@@ -43,7 +43,7 @@ int cur_line = 1;
+ char *cur_filename;
+
+ static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types,
+- flag_preserve, flag_warnings;
++ flag_override, flag_preserve, flag_warnings;
+ static const char *arch = "";
+ static const char *mod_prefix = "";
+
+@@ -200,7 +200,7 @@ struct symbol *__add_symbol(const char *
+ sym->is_declared = 1;
+ return sym;
+ } else if (!sym->is_declared) {
+- if (sym->is_override && flag_preserve) {
++ if (sym->is_override && flag_override) {
+ print_location();
+ fprintf(stderr, "ignoring ");
+ print_type_name(type, name);
+@@ -586,11 +586,13 @@ void export_symbol(const char *name)
+ struct symbol *n = sym->expansion_trail;
+
+ if (sym->status != STATUS_UNCHANGED) {
++ int fail = sym->is_override && flag_preserve;
++
+ if (!has_changed) {
+ print_location();
+ fprintf(stderr, "%s: %s: modversion "
+ "changed because of changes "
+- "in ", flag_preserve ? "error" :
++ "in ", fail ? "error" :
+ "warning", name);
+ } else
+ fprintf(stderr, ", ");
+@@ -598,7 +600,7 @@ void export_symbol(const char *name)
+ if (sym->status == STATUS_DEFINED)
+ fprintf(stderr, " (became defined)");
+ has_changed = 1;
+- if (flag_preserve)
++ if (fail)
+ errors++;
+ }
+ sym->expansion_trail = 0;
+@@ -655,6 +657,7 @@ static void genksyms_usage(void)
+ " -D, --dump Dump expanded symbol defs (for debugging only)\n"
+ " -r, --reference file Read reference symbols from a file\n"
+ " -T, --dump-types file Dump expanded types into file\n"
++ " -o, --override Allow to override reference modversions\n"
+ " -p, --preserve Preserve reference modversions or fail\n"
+ " -w, --warnings Enable warnings\n"
+ " -q, --quiet Disable warnings (default)\n"
+@@ -666,6 +669,7 @@ static void genksyms_usage(void)
+ " -D Dump expanded symbol defs (for debugging only)\n"
+ " -r file Read reference symbols from a file\n"
+ " -T file Dump expanded types into file\n"
++ " -o Allow to override reference modversions\n"
+ " -p Preserve reference modversions or fail\n"
+ " -w Enable warnings\n"
+ " -q Disable warnings (default)\n"
+@@ -690,15 +694,16 @@ int main(int argc, char **argv)
+ {"reference", 1, 0, 'r'},
+ {"dump-types", 1, 0, 'T'},
+ {"preserve", 0, 0, 'p'},
++ {"override", 0, 0, 'o'},
+ {"version", 0, 0, 'V'},
+ {"help", 0, 0, 'h'},
+ {0, 0, 0, 0}
+ };
+
+- while ((o = getopt_long(argc, argv, "a:dwqVDr:T:ph",
++ while ((o = getopt_long(argc, argv, "a:dwqVDr:T:oph",
+ &long_opts[0], NULL)) != EOF)
+ #else /* __GNU_LIBRARY__ */
+- while ((o = getopt(argc, argv, "a:dwqVDr:T:ph")) != EOF)
++ while ((o = getopt(argc, argv, "a:dwqVDr:T:oph")) != EOF)
+ #endif /* __GNU_LIBRARY__ */
+ switch (o) {
+ case 'a':
+@@ -735,7 +740,11 @@ int main(int argc, char **argv)
+ return 1;
+ }
+ break;
++ case 'o':
++ flag_override = 1;
++ break;
+ case 'p':
++ flag_override = 1;
+ flag_preserve = 1;
+ break;
+ case 'h':
+Index: b/scripts/Makefile.build
+===================================================================
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -159,6 +159,7 @@ cmd_cc_symtypes_c = \
+ -r $(firstword $(wildcard \
+ $(@:.symtypes=.symref) /dev/null)) \
+ $(if $(KBUILD_PRESERVE),-p) \
++ $(if $(KBUILD_OVERRIDE),-o) \
+ -a $(ARCH) \
+ >/dev/null; \
+ test -s $@ || rm -f $@
+@@ -197,6 +198,7 @@ cmd_modversions = \
+ -r $(firstword $(wildcard \
+ $(@:.o=.symref) /dev/null)) \
+ $(if $(KBUILD_PRESERVE),-p) \
++ $(if $(KBUILD_OVERRIDE),-o) \
+ -a $(ARCH) \
+ > $(@D)/.tmp_$(@F:.o=.ver); \
+ \
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: genksyms: allow to ignore symbol checksum changes
+
+This adds an "override" keyword for use in *.symvers / *.symref files. When a
+symbol is overridden, the symbol's old definition will be used for computing
+checksums instead of the new one, preserving the previous checksum. (Genksyms
+will still warn about the change.)
+
+This is meant to allow distributions to hide minor actual as well as fake ABI
+changes. (For example, when extra type information becomes available because
+additional headers are included, this may change checksums even though none of
+the types used have actully changed.)
+
+This approach also allows to get rid of "#ifdef __GENKSYMS__" hacks in the code,
+which are currently used in some vendor kernels to work around checksum changes.
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+---
+ scripts/genksyms/genksyms.c | 34 ++++++++++++++++++++++++++++++----
+ scripts/genksyms/genksyms.h | 1 +
+ 2 files changed, 31 insertions(+), 4 deletions(-)
+
+--- a/scripts/genksyms/genksyms.c
++++ b/scripts/genksyms/genksyms.c
+@@ -191,11 +191,26 @@ struct symbol *__add_symbol(const char *
+ /* fall through */ ;
+ else if (sym->type == type &&
+ equal_list(sym->defn, defn)) {
++ if (!sym->is_declared && sym->is_override) {
++ print_location();
++ print_type_name(type, name);
++ fprintf(stderr, " modversion is "
++ "unchanged\n");
++ }
+ sym->is_declared = 1;
+ return sym;
+ } else if (!sym->is_declared) {
+- status = is_unknown_symbol(sym) ?
+- STATUS_DEFINED : STATUS_MODIFIED;
++ if (sym->is_override && flag_preserve) {
++ print_location();
++ fprintf(stderr, "ignoring ");
++ print_type_name(type, name);
++ fprintf(stderr, " modversion change\n");
++ sym->is_declared = 1;
++ return sym;
++ } else {
++ status = is_unknown_symbol(sym) ?
++ STATUS_DEFINED : STATUS_MODIFIED;
++ }
+ } else {
+ error_with_pos("redefinition of %s", name);
+ return sym;
+@@ -229,6 +244,7 @@ struct symbol *__add_symbol(const char *
+
+ sym->is_declared = !is_reference;
+ sym->status = status;
++ sym->is_override = 0;
+
+ if (flag_debug) {
+ fprintf(debugfile, "Defn for %s %s == <",
+@@ -348,9 +364,16 @@ static void read_reference(FILE *f)
+ while (!feof(f)) {
+ struct string_list *defn = NULL;
+ struct string_list *sym, *def;
+- int is_extern = 0;
++ int is_extern = 0, is_override = 0;
++ struct symbol *subsym;
+
+ sym = read_node(f);
++ if (sym && sym->tag == SYM_NORMAL &&
++ !strcmp(sym->string, "override")) {
++ is_override = 1;
++ free_node(sym);
++ sym = read_node(f);
++ }
+ if (!sym)
+ continue;
+ def = read_node(f);
+@@ -365,8 +388,9 @@ static void read_reference(FILE *f)
+ defn = def;
+ def = read_node(f);
+ }
+- add_reference_symbol(xstrdup(sym->string), sym->tag,
++ subsym = add_reference_symbol(xstrdup(sym->string), sym->tag,
+ defn, is_extern);
++ subsym->is_override = is_override;
+ free_node(sym);
+ }
+ }
+@@ -743,6 +767,8 @@ int main(int argc, char **argv)
+ while (visited_symbols != (struct symbol *)-1L) {
+ struct symbol *sym = visited_symbols;
+
++ if (sym->is_override)
++ fputs("override ", dumpfile);
+ if (sym->type != SYM_NORMAL) {
+ putc(symbol_type_name[sym->type][0], dumpfile);
+ putc('#', dumpfile);
+--- a/scripts/genksyms/genksyms.h
++++ b/scripts/genksyms/genksyms.h
+@@ -49,6 +49,7 @@ struct symbol {
+ int is_extern;
+ int is_declared;
+ enum symbol_status status;
++ int is_override;
+ };
+
+ typedef struct string_list **yystype;
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: genksyms: track symbol checksum changes
+
+Sometimes it is preferable to avoid changes of exported symbol checksums (to
+avoid breaking externally provided modules). When a checksum change occurs, it
+can be hard to figure out what caused this change: underlying types may have
+changed, or additional type information may simply have become available at the
+point where a symbol is exported.
+
+Add a new --reference option to genksyms which allows it to report why
+checksums change, based on the type information dumps it creates with
+the --dump-types flag. Genksyms will read in such a dump from a previous run,
+and report which symbols have changed (and why).
+
+The behavior can be controlled for an entire build as follows: If
+KBUILD_SYMTYPES is set, genksyms uses --dump-types to produce *.symtypes dump
+files. If any *.symref files exist, those will be used as the reference to
+check against. If KBUILD_PRESERVE is set, checksum changes will fail the
+build.
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+---
+ scripts/Makefile.build | 16 ++
+ scripts/genksyms/genksyms.c | 236 +++++++++++++++++++++++++++++++++++++++++---
+ scripts/genksyms/genksyms.h | 6 +
+ 3 files changed, 239 insertions(+), 19 deletions(-)
+
+--- a/scripts/Makefile.build
++++ b/scripts/Makefile.build
+@@ -153,12 +153,18 @@ $(obj)/%.i: $(src)/%.c FORCE
+
+ quiet_cmd_cc_symtypes_c = SYM $(quiet_modtag) $@
+ cmd_cc_symtypes_c = \
++ set -e; \
+ $(CPP) -D__GENKSYMS__ $(c_flags) $< \
+- | $(GENKSYMS) -T $@ >/dev/null; \
++ | $(GENKSYMS) -T $@ \
++ -r $(firstword $(wildcard \
++ $(@:.symtypes=.symref) /dev/null)) \
++ $(if $(KBUILD_PRESERVE),-p) \
++ -a $(ARCH) \
++ >/dev/null; \
+ test -s $@ || rm -f $@
+
+ $(obj)/%.symtypes : $(src)/%.c FORCE
+- $(call if_changed_dep,cc_symtypes_c)
++ $(call cmd,cc_symtypes_c)
+
+ # C (.c) files
+ # The C file is compiled and updated dependency information is generated.
+@@ -187,7 +193,11 @@ cmd_modversions = \
+ if $(OBJDUMP) -h $(@D)/.tmp_$(@F) | grep -q __ksymtab; then \
+ $(CPP) -D__GENKSYMS__ $(c_flags) $< \
+ | $(GENKSYMS) $(if $(KBUILD_SYMTYPES), \
+- -T $(@D)/$(@F:.o=.symtypes)) -a $(ARCH) \
++ -T $(@:.o=.symtypes)) \
++ -r $(firstword $(wildcard \
++ $(@:.o=.symref) /dev/null)) \
++ $(if $(KBUILD_PRESERVE),-p) \
++ -a $(ARCH) \
+ > $(@D)/.tmp_$(@F:.o=.ver); \
+ \
+ $(LD) $(LDFLAGS) -r -o $@ $(@D)/.tmp_$(@F) \
+--- a/scripts/genksyms/genksyms.c
++++ b/scripts/genksyms/genksyms.c
+@@ -42,7 +42,8 @@ static FILE *debugfile;
+ int cur_line = 1;
+ char *cur_filename;
+
+-static int flag_debug, flag_dump_defs, flag_dump_types, flag_warnings;
++static int flag_debug, flag_dump_defs, flag_reference, flag_dump_types,
++ flag_preserve, flag_warnings;
+ static const char *arch = "";
+ static const char *mod_prefix = "";
+
+@@ -58,6 +59,8 @@ static const char *const symbol_type_nam
+
+ static int equal_list(struct string_list *a, struct string_list *b);
+ static void print_list(FILE * f, struct string_list *list);
++static void print_location(void);
++static void print_type_name(enum symbol_type type, const char *name);
+
+ /*----------------------------------------------------------------------*/
+
+@@ -151,25 +154,66 @@ struct symbol *find_symbol(const char *n
+
+ for (sym = symtab[h]; sym; sym = sym->hash_next)
+ if (map_to_ns(sym->type) == map_to_ns(ns) &&
+- strcmp(name, sym->name) == 0)
++ strcmp(name, sym->name) == 0 &&
++ sym->is_declared)
+ break;
+
+ return sym;
+ }
+
+-struct symbol *add_symbol(const char *name, enum symbol_type type,
+- struct string_list *defn, int is_extern)
++static int is_unknown_symbol(struct symbol *sym)
++{
++ struct string_list *defn;
++
++ return ((sym->type == SYM_STRUCT ||
++ sym->type == SYM_UNION ||
++ sym->type == SYM_ENUM) &&
++ (defn = sym->defn) && defn->tag == SYM_NORMAL &&
++ strcmp(defn->string, "}") == 0 &&
++ (defn = defn->next) && defn->tag == SYM_NORMAL &&
++ strcmp(defn->string, "UNKNOWN") == 0 &&
++ (defn = defn->next) && defn->tag == SYM_NORMAL &&
++ strcmp(defn->string, "{") == 0);
++}
++
++struct symbol *__add_symbol(const char *name, enum symbol_type type,
++ struct string_list *defn, int is_extern,
++ int is_reference)
+ {
+ unsigned long h = crc32(name) % HASH_BUCKETS;
+ struct symbol *sym;
++ enum symbol_status status = STATUS_UNCHANGED;
+
+ for (sym = symtab[h]; sym; sym = sym->hash_next) {
+- if (map_to_ns(sym->type) == map_to_ns(type)
+- && strcmp(name, sym->name) == 0) {
+- if (!equal_list(sym->defn, defn))
++ if (map_to_ns(sym->type) == map_to_ns(type) &&
++ strcmp(name, sym->name) == 0) {
++ if (is_reference)
++ /* fall through */ ;
++ else if (sym->type == type &&
++ equal_list(sym->defn, defn)) {
++ sym->is_declared = 1;
++ return sym;
++ } else if (!sym->is_declared) {
++ status = is_unknown_symbol(sym) ?
++ STATUS_DEFINED : STATUS_MODIFIED;
++ } else {
+ error_with_pos("redefinition of %s", name);
+- return sym;
++ return sym;
++ }
++ break;
++ }
++ }
++
++ if (sym) {
++ struct symbol **psym;
++
++ for (psym = &symtab[h]; *psym; psym = &(*psym)->hash_next) {
++ if (*psym == sym) {
++ *psym = sym->hash_next;
++ break;
++ }
+ }
++ --nsyms;
+ }
+
+ sym = xmalloc(sizeof(*sym));
+@@ -183,6 +227,9 @@ struct symbol *add_symbol(const char *na
+ sym->hash_next = symtab[h];
+ symtab[h] = sym;
+
++ sym->is_declared = !is_reference;
++ sym->status = status;
++
+ if (flag_debug) {
+ fprintf(debugfile, "Defn for %s %s == <",
+ symbol_type_name[type], name);
+@@ -196,6 +243,18 @@ struct symbol *add_symbol(const char *na
+ return sym;
+ }
+
++struct symbol *add_symbol(const char *name, enum symbol_type type,
++ struct string_list *defn, int is_extern)
++{
++ return __add_symbol(name, type, defn, is_extern, 0);
++}
++
++struct symbol *add_reference_symbol(const char *name, enum symbol_type type,
++ struct string_list *defn, int is_extern)
++{
++ return __add_symbol(name, type, defn, is_extern, 1);
++}
++
+ /*----------------------------------------------------------------------*/
+
+ void free_node(struct string_list *node)
+@@ -236,6 +295,82 @@ static int equal_list(struct string_list
+ return !a && !b;
+ }
+
++#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]))
++
++struct string_list *read_node(FILE *f)
++{
++ char buffer[256];
++ struct string_list node = {
++ .string = buffer,
++ .tag = SYM_NORMAL };
++ int c;
++
++ while ((c = fgetc(f)) != EOF) {
++ if (c == ' ') {
++ if (node.string == buffer)
++ continue;
++ break;
++ } else if (c == '\n') {
++ if (node.string == buffer)
++ return NULL;
++ ungetc(c, f);
++ break;
++ }
++ if (node.string >= buffer + sizeof(buffer) - 1) {
++ fprintf(stderr, "Token too long\n");
++ exit(1);
++ }
++ *node.string++ = c;
++ }
++ if (node.string == buffer)
++ return NULL;
++ *node.string = 0;
++ node.string = buffer;
++
++ if (node.string[1] == '#') {
++ int n;
++
++ for (n = 0; n < ARRAY_SIZE(symbol_type_name); n++) {
++ if (node.string[0] == symbol_type_name[n][0]) {
++ node.tag = n;
++ node.string += 2;
++ return copy_node(&node);
++ }
++ }
++ fprintf(stderr, "Unknown type %c\n", node.string[0]);
++ exit(1);
++ }
++ return copy_node(&node);
++}
++
++static void read_reference(FILE *f)
++{
++ while (!feof(f)) {
++ struct string_list *defn = NULL;
++ struct string_list *sym, *def;
++ int is_extern = 0;
++
++ sym = read_node(f);
++ if (!sym)
++ continue;
++ def = read_node(f);
++ if (def && def->tag == SYM_NORMAL &&
++ !strcmp(def->string, "extern")) {
++ is_extern = 1;
++ free_node(def);
++ def = read_node(f);
++ }
++ while (def) {
++ def->next = defn;
++ defn = def;
++ def = read_node(f);
++ }
++ add_reference_symbol(xstrdup(sym->string), sym->tag,
++ defn, is_extern);
++ free_node(sym);
++ }
++}
++
+ static void print_node(FILE * f, struct string_list *list)
+ {
+ if (list->tag != SYM_NORMAL) {
+@@ -311,6 +446,7 @@ static unsigned long expand_and_crc_sym(
+
+ case SYM_TYPEDEF:
+ subsym = find_symbol(cur->string, cur->tag);
++ /* FIXME: Bad reference files can segfault here. */
+ if (subsym->expansion_trail) {
+ if (flag_dump_defs)
+ fprintf(debugfile, "%s ", cur->string);
+@@ -347,9 +483,22 @@ static unsigned long expand_and_crc_sym(
+ t = n;
+
+ n = xmalloc(sizeof(*n));
+- n->string = xstrdup("{ UNKNOWN }");
++ n->string = xstrdup("{");
++ n->tag = SYM_NORMAL;
++ n->next = t;
++ t = n;
++
++ n = xmalloc(sizeof(*n));
++ n->string = xstrdup("UNKNOWN");
++ n->tag = SYM_NORMAL;
++ n->next = t;
++ t = n;
++
++ n = xmalloc(sizeof(*n));
++ n->string = xstrdup("}");
+ n->tag = SYM_NORMAL;
+ n->next = t;
++ t = n;
+
+ subsym =
+ add_symbol(cur->string, cur->tag, n, 0);
+@@ -397,20 +546,42 @@ void export_symbol(const char *name)
+ error_with_pos("export undefined symbol %s", name);
+ else {
+ unsigned long crc;
++ int has_changed = 0;
+
+ if (flag_dump_defs)
+ fprintf(debugfile, "Export %s == <", name);
+
+ expansion_trail = (struct symbol *)-1L;
+
++ sym->expansion_trail = expansion_trail;
++ expansion_trail = sym;
+ crc = expand_and_crc_sym(sym, 0xffffffff) ^ 0xffffffff;
+
+ sym = expansion_trail;
+ while (sym != (struct symbol *)-1L) {
+ struct symbol *n = sym->expansion_trail;
++
++ if (sym->status != STATUS_UNCHANGED) {
++ if (!has_changed) {
++ print_location();
++ fprintf(stderr, "%s: %s: modversion "
++ "changed because of changes "
++ "in ", flag_preserve ? "error" :
++ "warning", name);
++ } else
++ fprintf(stderr, ", ");
++ print_type_name(sym->type, sym->name);
++ if (sym->status == STATUS_DEFINED)
++ fprintf(stderr, " (became defined)");
++ has_changed = 1;
++ if (flag_preserve)
++ errors++;
++ }
+ sym->expansion_trail = 0;
+ sym = n;
+ }
++ if (has_changed)
++ fprintf(stderr, "\n");
+
+ if (flag_dump_defs)
+ fputs(">\n", debugfile);
+@@ -421,13 +592,26 @@ void export_symbol(const char *name)
+ }
+
+ /*----------------------------------------------------------------------*/
++
++static void print_location(void)
++{
++ fprintf(stderr, "%s:%d: ", cur_filename ? : "<stdin>", cur_line);
++}
++
++static void print_type_name(enum symbol_type type, const char *name)
++{
++ if (type != SYM_NORMAL)
++ fprintf(stderr, "%s %s", symbol_type_name[type], name);
++ else
++ fprintf(stderr, "%s", name);
++}
++
+ void error_with_pos(const char *fmt, ...)
+ {
+ va_list args;
+
+ if (flag_warnings) {
+- fprintf(stderr, "%s:%d: ", cur_filename ? : "<stdin>",
+- cur_line);
++ print_location();
+
+ va_start(args, fmt);
+ vfprintf(stderr, fmt, args);
+@@ -445,7 +629,9 @@ static void genksyms_usage(void)
+ " -a, --arch Select architecture\n"
+ " -d, --debug Increment the debug level (repeatable)\n"
+ " -D, --dump Dump expanded symbol defs (for debugging only)\n"
+- " -T, --dump-types file Dump expanded types into file (for debugging only)\n"
++ " -r, --reference file Read reference symbols from a file\n"
++ " -T, --dump-types file Dump expanded types into file\n"
++ " -p, --preserve Preserve reference modversions or fail\n"
+ " -w, --warnings Enable warnings\n"
+ " -q, --quiet Disable warnings (default)\n"
+ " -h, --help Print this message\n"
+@@ -454,7 +640,9 @@ static void genksyms_usage(void)
+ " -a Select architecture\n"
+ " -d Increment the debug level (repeatable)\n"
+ " -D Dump expanded symbol defs (for debugging only)\n"
+- " -T file Dump expanded types into file (for debugging only)\n"
++ " -r file Read reference symbols from a file\n"
++ " -T file Dump expanded types into file\n"
++ " -p Preserve reference modversions or fail\n"
+ " -w Enable warnings\n"
+ " -q Disable warnings (default)\n"
+ " -h Print this message\n"
+@@ -465,7 +653,7 @@ static void genksyms_usage(void)
+
+ int main(int argc, char **argv)
+ {
+- FILE *dumpfile = NULL;
++ FILE *dumpfile = NULL, *ref_file = NULL;
+ int o;
+
+ #ifdef __GNU_LIBRARY__
+@@ -475,16 +663,18 @@ int main(int argc, char **argv)
+ {"warnings", 0, 0, 'w'},
+ {"quiet", 0, 0, 'q'},
+ {"dump", 0, 0, 'D'},
++ {"reference", 1, 0, 'r'},
+ {"dump-types", 1, 0, 'T'},
++ {"preserve", 0, 0, 'p'},
+ {"version", 0, 0, 'V'},
+ {"help", 0, 0, 'h'},
+ {0, 0, 0, 0}
+ };
+
+- while ((o = getopt_long(argc, argv, "a:dwqVDT:h",
++ while ((o = getopt_long(argc, argv, "a:dwqVDr:T:ph",
+ &long_opts[0], NULL)) != EOF)
+ #else /* __GNU_LIBRARY__ */
+- while ((o = getopt(argc, argv, "a:dwqVDT:h")) != EOF)
++ while ((o = getopt(argc, argv, "a:dwqVDr:T:ph")) != EOF)
+ #endif /* __GNU_LIBRARY__ */
+ switch (o) {
+ case 'a':
+@@ -505,6 +695,14 @@ int main(int argc, char **argv)
+ case 'D':
+ flag_dump_defs = 1;
+ break;
++ case 'r':
++ flag_reference = 1;
++ ref_file = fopen(optarg, "r");
++ if (!ref_file) {
++ perror(optarg);
++ return 1;
++ }
++ break;
+ case 'T':
+ flag_dump_types = 1;
+ dumpfile = fopen(optarg, "w");
+@@ -513,6 +711,9 @@ int main(int argc, char **argv)
+ return 1;
+ }
+ break;
++ case 'p':
++ flag_preserve = 1;
++ break;
+ case 'h':
+ genksyms_usage();
+ return 0;
+@@ -533,6 +734,9 @@ int main(int argc, char **argv)
+ /* setlinebuf(debugfile); */
+ }
+
++ if (flag_reference)
++ read_reference(ref_file);
++
+ yyparse();
+
+ if (flag_dump_types && visited_symbols) {
+--- a/scripts/genksyms/genksyms.h
++++ b/scripts/genksyms/genksyms.h
+@@ -29,6 +29,10 @@ enum symbol_type {
+ SYM_NORMAL, SYM_TYPEDEF, SYM_ENUM, SYM_STRUCT, SYM_UNION
+ };
+
++enum symbol_status {
++ STATUS_UNCHANGED, STATUS_DEFINED, STATUS_MODIFIED
++};
++
+ struct string_list {
+ struct string_list *next;
+ enum symbol_type tag;
+@@ -43,6 +47,8 @@ struct symbol {
+ struct symbol *expansion_trail;
+ struct symbol *visited;
+ int is_extern;
++ int is_declared;
++ enum symbol_status status;
+ };
+
+ typedef struct string_list **yystype;
--- /dev/null
+From: Jay Lan <jlan@sgi.com>
+Subject: [PATCH] Resolve KDB conflicts with UV
+References: bnc#440376
+
+Hi Keith,
+
+On Wed, Oct 29, 2008 at 03:57:25PM +1100, Keith Owens wrote:
+> However there is a separate problem with your patch. You now wait in
+> smp_kdb_stop() until all cpus are in KDB. If any cpu is completely
+> hung so it cannot be interrupted then smp_kdb_stop() will never return
+> and KDB will now appear to hang.
+>
+> The existing code avoids this by
+>
+> kdb() -> smp_kdb_stop() - issue KDB_VECTOR as normal interrupt but do not wait for cpus
+> kdb() -> kdba_main_loop()
+> kdba_main_loop() -> kdb_save_running()
+> kdb_save_running() -> kdb_main_loop()
+> kdb_main_loop() -> kdb_wait_for_cpus()
+>
+> kdb_wait_for_cpus() waits until the other cpus are in KDB. If a cpu
+> does not respond to KDB_VECTOR after a few seconds then
+> kdb_wait_for_cpus() hits the missing cpus with NMI.
+>
+> This two step approach (send KDB_VECTOR as normal interrupt, wait then
+> send NMI) is used because NMI can be serviced at any time, even when
+> the target cpu is in the middle of servicing an interrupt. This can
+> result in incomplete register state which leads to broken backtraces.
+> IOW, sending NMI first would actually make debugging harder.
+>
+> Given the above logic, if you are going to take over an existing
+> interrupt vector then the vector needs to be acquired near the start of
+> kdb() and released near the end of kdb(), and only on the master cpu.
+>
+> Note: there is no overwhelming need for KDB_VECTOR to have a high
+> priority. As long as it is received within a few seconds then all is
+> well.
+
+Thanks for the explanation. I see your point.
+
+How about if we keep the two step approach, but take over the vector
+when we need it, in step one. Then give it back when the step two
+ wait is over.
+(assuming we don't take over a vector needed for the NMI)
+
+Like this:
+
+Signed-off-by: Jay Lan <jlan@sgi.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ arch/ia64/include/asm/kdb.h | 4 ++++
+ arch/x86/kdb/kdbasupport_32.c | 22 ++++++++++++++++++----
+ arch/x86/kdb/kdbasupport_64.c | 23 +++++++++++++++++++----
+ include/asm-x86/irq_vectors.h | 11 ++++++-----
+ include/asm-x86/kdb.h | 4 ++++
+ kdb/kdbmain.c | 2 ++
+ 6 files changed, 53 insertions(+), 13 deletions(-)
+
+--- a/arch/ia64/include/asm/kdb.h
++++ b/arch/ia64/include/asm/kdb.h
+@@ -42,4 +42,8 @@ kdba_funcptr_value(void *fp)
+ return *(unsigned long *)fp;
+ }
+
++#ifdef CONFIG_SMP
++#define kdba_giveback_vector(vector) (0)
++#endif
++
+ #endif /* !_ASM_KDB_H */
+--- a/arch/x86/kdb/kdbasupport_32.c
++++ b/arch/x86/kdb/kdbasupport_32.c
+@@ -883,9 +883,6 @@ kdba_cpu_up(void)
+ static int __init
+ kdba_arch_init(void)
+ {
+-#ifdef CONFIG_SMP
+- set_intr_gate(KDB_VECTOR, kdb_interrupt);
+-#endif
+ set_intr_gate(KDBENTER_VECTOR, kdb_call);
+ return 0;
+ }
+@@ -1027,14 +1024,31 @@ kdba_verify_rw(unsigned long addr, size_
+
+ #include <mach_ipi.h>
+
++gate_desc save_idt[NR_VECTORS];
++
++void kdba_takeover_vector(int vector)
++{
++ memcpy(&save_idt[vector], &idt_table[vector], sizeof(gate_desc));
++ set_intr_gate(KDB_VECTOR, kdb_interrupt);
++ return;
++}
++
++void kdba_giveback_vector(int vector)
++{
++ native_write_idt_entry(idt_table, vector, &save_idt[vector]);
++ return;
++}
++
+ /* When first entering KDB, try a normal IPI. That reduces backtrace problems
+ * on the other cpus.
+ */
+ void
+ smp_kdb_stop(void)
+ {
+- if (!KDB_FLAG(NOIPI))
++ if (!KDB_FLAG(NOIPI)) {
++ kdba_takeover_vector(KDB_VECTOR);
+ send_IPI_allbutself(KDB_VECTOR);
++ }
+ }
+
+ /* The normal KDB IPI handler */
+--- a/arch/x86/kdb/kdbasupport_64.c
++++ b/arch/x86/kdb/kdbasupport_64.c
+@@ -21,6 +21,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
+ #include <linux/kdebug.h>
++#include <linux/cpumask.h>
+ #include <asm/processor.h>
+ #include <asm/msr.h>
+ #include <asm/uaccess.h>
+@@ -900,9 +901,6 @@ kdba_cpu_up(void)
+ static int __init
+ kdba_arch_init(void)
+ {
+-#ifdef CONFIG_SMP
+- set_intr_gate(KDB_VECTOR, kdb_interrupt);
+-#endif
+ set_intr_gate(KDBENTER_VECTOR, kdb_call);
+ return 0;
+ }
+@@ -976,14 +974,31 @@ kdba_set_current_task(const struct task_
+
+ #include <mach_ipi.h>
+
++gate_desc save_idt[NR_VECTORS];
++
++void kdba_takeover_vector(int vector)
++{
++ memcpy(&save_idt[vector], &idt_table[vector], sizeof(gate_desc));
++ set_intr_gate(KDB_VECTOR, kdb_interrupt);
++ return;
++}
++
++void kdba_giveback_vector(int vector)
++{
++ native_write_idt_entry(idt_table, vector, &save_idt[vector]);
++ return;
++}
++
+ /* When first entering KDB, try a normal IPI. That reduces backtrace problems
+ * on the other cpus.
+ */
+ void
+ smp_kdb_stop(void)
+ {
+- if (!KDB_FLAG(NOIPI))
++ if (!KDB_FLAG(NOIPI)) {
++ kdba_takeover_vector(KDB_VECTOR);
+ send_IPI_allbutself(KDB_VECTOR);
++ }
+ }
+
+ /* The normal KDB IPI handler */
+--- a/include/asm-x86/irq_vectors.h
++++ b/include/asm-x86/irq_vectors.h
+@@ -66,7 +66,6 @@
+ # define RESCHEDULE_VECTOR 0xfc
+ # define CALL_FUNCTION_VECTOR 0xfb
+ # define CALL_FUNCTION_SINGLE_VECTOR 0xfa
+-#define KDB_VECTOR 0xf9
+ # define THERMAL_APIC_VECTOR 0xf0
+
+ #else
+@@ -79,10 +78,6 @@
+ #define THERMAL_APIC_VECTOR 0xfa
+ #define THRESHOLD_APIC_VECTOR 0xf9
+ #define UV_BAU_MESSAGE 0xf8
+-/* Overload KDB_VECTOR with UV_BAU_MESSAGE. By the time the UV hardware is
+- * ready, we should have moved to a dynamically allocated vector scheme.
+- */
+-#define KDB_VECTOR 0xf8
+ #define INVALIDATE_TLB_VECTOR_END 0xf7
+ #define INVALIDATE_TLB_VECTOR_START 0xf0 /* f0-f7 used for TLB flush */
+
+@@ -91,6 +86,12 @@
+ #endif
+
+ /*
++ * KDB_VECTOR will take over vector 0xfe when it is needed, as in theory
++ * it should not be used anyway.
++ */
++#define KDB_VECTOR 0xfe
++
++/*
+ * Local APIC timer IRQ vector is on a different priority level,
+ * to work around the 'lost local interrupt if more than 2 IRQ
+ * sources per level' errata.
+--- a/include/asm-x86/kdb.h
++++ b/include/asm-x86/kdb.h
+@@ -131,4 +131,8 @@ kdba_funcptr_value(void *fp)
+ return (unsigned long)fp;
+ }
+
++#ifdef CONFIG_SMP
++extern void kdba_giveback_vector(int);
++#endif
++
+ #endif /* !_ASM_KDB_H */
+--- a/kdb/kdbmain.c
++++ b/kdb/kdbmain.c
+@@ -1666,6 +1666,8 @@ kdb_wait_for_cpus(void)
+ wait == 1 ? " is" : "s are",
+ wait == 1 ? "its" : "their");
+ }
++ /* give back the vector we took over in smp_kdb_stop */
++ kdba_giveback_vector(KDB_VECTOR);
+ #endif /* CONFIG_SMP */
+ }
+
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: Implement those parts of Automatic Inheritance (AI) which are safe under POSIX
+
+If AI is disabled for a directory (ACL4_AUTO_INHERIT
+not set), nothing changes. If AI is enabled for a directory, the
+create-time inheritance algorithm changes as follows:
+
+* All inherited ACEs will have the ACE4_INHERITED_ACE flag set.
+
+* The create mode is applied to the ACL (by setting the file masks),
+which means that the ACL must no longer be subject to AI permission
+propagation, and so the ACL4_PROTECTED is set.
+
+By itelf, this is relatively useless because it will not allow
+permissions to propagate, but AI aware applications can clear the
+ACL4_PROTECTED flag when they know what they are doing, and this will
+enable AI permission propagation.
+
+It would be nice if AI aware applications could indicate this fact to
+the kernel so that the kernel can avoid setting the ACL4_PROTECTED flag
+in the first place, but there is no such user-space interface at this
+point.
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+---
+ fs/nfs4acl_base.c | 12 ++++++++++--
+ include/linux/nfs4acl.h | 26 +++++++++++++++++++++++---
+ 2 files changed, 33 insertions(+), 5 deletions(-)
+
+--- a/fs/nfs4acl_base.c
++++ b/fs/nfs4acl_base.c
+@@ -151,7 +151,8 @@ nfs4acl_chmod(struct nfs4acl *acl, mode_
+
+ if (acl->a_owner_mask == owner_mask &&
+ acl->a_group_mask == group_mask &&
+- acl->a_other_mask == other_mask)
++ acl->a_other_mask == other_mask &&
++ (!nfs4acl_is_auto_inherit(acl) || nfs4acl_is_protected(acl)))
+ return acl;
+
+ clone = nfs4acl_clone(acl);
+@@ -162,6 +163,8 @@ nfs4acl_chmod(struct nfs4acl *acl, mode_
+ clone->a_owner_mask = owner_mask;
+ clone->a_group_mask = group_mask;
+ clone->a_other_mask = other_mask;
++ if (nfs4acl_is_auto_inherit(clone))
++ clone->a_flags |= ACL4_PROTECTED;
+
+ if (nfs4acl_write_through(&clone)) {
+ nfs4acl_put(clone);
+@@ -558,7 +561,12 @@ nfs4acl_inherit(const struct nfs4acl *di
+ return ERR_PTR(-ENOMEM);
+ }
+
+- acl->a_flags = (dir_acl->a_flags & ACL4_WRITE_THROUGH);
++ acl->a_flags = (dir_acl->a_flags & ~ACL4_PROTECTED);
++ if (nfs4acl_is_auto_inherit(acl)) {
++ nfs4acl_for_each_entry(ace, acl)
++ ace->e_flags |= ACE4_INHERITED_ACE;
++ acl->a_flags |= ACL4_PROTECTED;
++ }
+
+ return acl;
+ }
+--- a/include/linux/nfs4acl.h
++++ b/include/linux/nfs4acl.h
+@@ -32,10 +32,16 @@ struct nfs4acl {
+ _ace--)
+
+ /* a_flags values */
++#define ACL4_AUTO_INHERIT 0x01
++#define ACL4_PROTECTED 0x02
++#define ACL4_DEFAULTED 0x04
+ #define ACL4_WRITE_THROUGH 0x40
+
+-#define ACL4_VALID_FLAGS \
+- ACL4_WRITE_THROUGH
++#define ACL4_VALID_FLAGS ( \
++ ACL4_AUTO_INHERIT | \
++ ACL4_PROTECTED | \
++ ACL4_DEFAULTED | \
++ ACL4_WRITE_THROUGH )
+
+ /* e_type values */
+ #define ACE4_ACCESS_ALLOWED_ACE_TYPE 0x0000
+@@ -51,6 +57,7 @@ struct nfs4acl {
+ /*#define ACE4_SUCCESSFUL_ACCESS_ACE_FLAG 0x0010*/
+ /*#define ACE4_FAILED_ACCESS_ACE_FLAG 0x0020*/
+ #define ACE4_IDENTIFIER_GROUP 0x0040
++#define ACE4_INHERITED_ACE 0x0080
+ #define ACE4_SPECIAL_WHO 0x4000 /* in-memory representation only */
+
+ #define ACE4_VALID_FLAGS ( \
+@@ -58,7 +65,8 @@ struct nfs4acl {
+ ACE4_DIRECTORY_INHERIT_ACE | \
+ ACE4_NO_PROPAGATE_INHERIT_ACE | \
+ ACE4_INHERIT_ONLY_ACE | \
+- ACE4_IDENTIFIER_GROUP )
++ ACE4_IDENTIFIER_GROUP | \
++ ACE4_INHERITED_ACE )
+
+ /* e_mask bitflags */
+ #define ACE4_READ_DATA 0x00000001
+@@ -128,6 +136,18 @@ extern const char nfs4ace_group_who[];
+ extern const char nfs4ace_everyone_who[];
+
+ static inline int
++nfs4acl_is_auto_inherit(const struct nfs4acl *acl)
++{
++ return acl->a_flags & ACL4_AUTO_INHERIT;
++}
++
++static inline int
++nfs4acl_is_protected(const struct nfs4acl *acl)
++{
++ return acl->a_flags & ACL4_PROTECTED;
++}
++
++static inline int
+ nfs4ace_is_owner(const struct nfs4ace *ace)
+ {
+ return (ace->e_flags & ACE4_SPECIAL_WHO) &&
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: NFSv4 ACL in-memory representation and manipulation
+
+* In-memory representation (struct nfs4acl).
+* Functionality a filesystem needs such as permission checking,
+ apply mode to acl, compute mode from acl, inheritance upon file
+ create.
+* Compute a mask-less acl from struct nfs4acl that grants the same
+ permissions. Protocols which don't understand the masks need
+ this.
+* Convert to/from xattrs.
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+---
+ fs/Kconfig | 4
+ fs/Makefile | 4
+ fs/nfs4acl_base.c | 565 +++++++++++++++++++++++++++++++
+ fs/nfs4acl_compat.c | 757 ++++++++++++++++++++++++++++++++++++++++++
+ fs/nfs4acl_xattr.c | 146 ++++++++
+ include/linux/nfs4acl.h | 205 +++++++++++
+ include/linux/nfs4acl_xattr.h | 32 +
+ 7 files changed, 1713 insertions(+)
+
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -419,6 +419,10 @@ config FS_POSIX_ACL
+ bool
+ default n
+
++config FS_NFS4ACL
++ bool
++ default n
++
+ source "fs/xfs/Kconfig"
+ source "fs/gfs2/Kconfig"
+
+--- a/fs/Makefile
++++ b/fs/Makefile
+@@ -50,6 +50,10 @@ obj-$(CONFIG_FS_POSIX_ACL) += posix_acl.
+ obj-$(CONFIG_NFS_COMMON) += nfs_common/
+ obj-$(CONFIG_GENERIC_ACL) += generic_acl.o
+
++obj-$(CONFIG_FS_NFS4ACL) += nfs4acl.o
++nfs4acl-y := nfs4acl_base.o nfs4acl_xattr.o \
++ nfs4acl_compat.o
++
+ obj-$(CONFIG_QUOTA) += dquot.o
+ obj-$(CONFIG_QFMT_V1) += quota_v1.o
+ obj-$(CONFIG_QFMT_V2) += quota_v2.o
+--- /dev/null
++++ b/fs/nfs4acl_base.c
+@@ -0,0 +1,565 @@
++/*
++ * Copyright (C) 2006 Andreas Gruenbacher <a.gruenbacher@computer.org>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2, or (at your option) any
++ * later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ */
++
++#include <linux/sched.h>
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/nfs4acl.h>
++
++MODULE_LICENSE("GPL");
++
++/*
++ * ACL entries that have ACE4_SPECIAL_WHO set in ace->e_flags use the
++ * pointer values of these constants in ace->u.e_who to avoid massive
++ * amounts of string comparisons.
++ */
++
++const char nfs4ace_owner_who[] = "OWNER@";
++const char nfs4ace_group_who[] = "GROUP@";
++const char nfs4ace_everyone_who[] = "EVERYONE@";
++
++EXPORT_SYMBOL(nfs4ace_owner_who);
++EXPORT_SYMBOL(nfs4ace_group_who);
++EXPORT_SYMBOL(nfs4ace_everyone_who);
++
++/**
++ * nfs4acl_alloc - allocate an acl
++ * @count: number of entries
++ */
++struct nfs4acl *
++nfs4acl_alloc(int count)
++{
++ size_t size = sizeof(struct nfs4acl) + count * sizeof(struct nfs4ace);
++ struct nfs4acl *acl = kmalloc(size, GFP_KERNEL);
++
++ if (acl) {
++ memset(acl, 0, size);
++ atomic_set(&acl->a_refcount, 1);
++ acl->a_count = count;
++ }
++ return acl;
++}
++EXPORT_SYMBOL(nfs4acl_alloc);
++
++/**
++ * nfs4acl_clone - create a copy of an acl
++ */
++struct nfs4acl *
++nfs4acl_clone(const struct nfs4acl *acl)
++{
++ int count = acl->a_count;
++ size_t size = sizeof(struct nfs4acl) + count * sizeof(struct nfs4ace);
++ struct nfs4acl *dup = kmalloc(size, GFP_KERNEL);
++
++ if (dup) {
++ memcpy(dup, acl, size);
++ atomic_set(&dup->a_refcount, 1);
++ }
++ return dup;
++}
++
++/*
++ * The POSIX permissions are supersets of the below mask flags.
++ *
++ * The ACE4_READ_ATTRIBUTES and ACE4_READ_ACL flags are always granted
++ * in POSIX. The ACE4_SYNCHRONIZE flag has no meaning under POSIX. We
++ * make sure that we do not mask them if they are set, so that users who
++ * rely on these flags won't get confused.
++ */
++#define ACE4_POSIX_MODE_READ ( \
++ ACE4_READ_DATA | ACE4_LIST_DIRECTORY )
++#define ACE4_POSIX_MODE_WRITE ( \
++ ACE4_WRITE_DATA | ACE4_ADD_FILE | \
++ ACE4_APPEND_DATA | ACE4_ADD_SUBDIRECTORY | \
++ ACE4_DELETE_CHILD )
++#define ACE4_POSIX_MODE_EXEC ( \
++ ACE4_EXECUTE)
++
++static int
++nfs4acl_mask_to_mode(unsigned int mask)
++{
++ int mode = 0;
++
++ if (mask & ACE4_POSIX_MODE_READ)
++ mode |= MAY_READ;
++ if (mask & ACE4_POSIX_MODE_WRITE)
++ mode |= MAY_WRITE;
++ if (mask & ACE4_POSIX_MODE_EXEC)
++ mode |= MAY_EXEC;
++
++ return mode;
++}
++
++/**
++ * nfs4acl_masks_to_mode - compute file mode permission bits from file masks
++ *
++ * Compute the file mode permission bits from the file masks in the acl.
++ */
++int
++nfs4acl_masks_to_mode(const struct nfs4acl *acl)
++{
++ return nfs4acl_mask_to_mode(acl->a_owner_mask) << 6 |
++ nfs4acl_mask_to_mode(acl->a_group_mask) << 3 |
++ nfs4acl_mask_to_mode(acl->a_other_mask);
++}
++EXPORT_SYMBOL(nfs4acl_masks_to_mode);
++
++static unsigned int
++nfs4acl_mode_to_mask(mode_t mode)
++{
++ unsigned int mask = ACE4_POSIX_ALWAYS_ALLOWED;
++
++ if (mode & MAY_READ)
++ mask |= ACE4_POSIX_MODE_READ;
++ if (mode & MAY_WRITE)
++ mask |= ACE4_POSIX_MODE_WRITE;
++ if (mode & MAY_EXEC)
++ mask |= ACE4_POSIX_MODE_EXEC;
++
++ return mask;
++}
++
++/**
++ * nfs4acl_chmod - update the file masks to reflect the new mode
++ * @mode: file mode permission bits to apply to the @acl
++ *
++ * Converts the mask flags corresponding to the owner, group, and other file
++ * permissions and computes the file masks. Returns @acl if it already has the
++ * appropriate file masks, or updates the flags in a copy of @acl. Takes over
++ * @acl.
++ */
++struct nfs4acl *
++nfs4acl_chmod(struct nfs4acl *acl, mode_t mode)
++{
++ unsigned int owner_mask, group_mask, other_mask;
++ struct nfs4acl *clone;
++
++ owner_mask = nfs4acl_mode_to_mask(mode >> 6);
++ group_mask = nfs4acl_mode_to_mask(mode >> 3);
++ other_mask = nfs4acl_mode_to_mask(mode);
++
++ if (acl->a_owner_mask == owner_mask &&
++ acl->a_group_mask == group_mask &&
++ acl->a_other_mask == other_mask)
++ return acl;
++
++ clone = nfs4acl_clone(acl);
++ nfs4acl_put(acl);
++ if (!clone)
++ return ERR_PTR(-ENOMEM);
++
++ clone->a_owner_mask = owner_mask;
++ clone->a_group_mask = group_mask;
++ clone->a_other_mask = other_mask;
++
++ if (nfs4acl_write_through(&clone)) {
++ nfs4acl_put(clone);
++ clone = ERR_PTR(-ENOMEM);
++ }
++ return clone;
++}
++EXPORT_SYMBOL(nfs4acl_chmod);
++
++/**
++ * nfs4acl_want_to_mask - convert permission want argument to a mask
++ * @want: @want argument of the permission inode operation
++ *
++ * When checking for append, @want is (MAY_WRITE | MAY_APPEND).
++ */
++unsigned int
++nfs4acl_want_to_mask(int want)
++{
++ unsigned int mask = 0;
++
++ if (want & MAY_READ)
++ mask |= ACE4_READ_DATA;
++ if (want & MAY_APPEND)
++ mask |= ACE4_APPEND_DATA;
++ else if (want & MAY_WRITE)
++ mask |= ACE4_WRITE_DATA;
++ if (want & MAY_EXEC)
++ mask |= ACE4_EXECUTE;
++
++ return mask;
++}
++EXPORT_SYMBOL(nfs4acl_want_to_mask);
++
++/**
++ * nfs4acl_capability_check - check for capabilities overriding read/write access
++ * @inode: inode to check
++ * @mask: requested access (ACE4_* bitmask)
++ *
++ * Capabilities other than CAP_DAC_OVERRIDE and CAP_DAC_READ_SEARCH must be checked
++ * separately.
++ */
++static inline int nfs4acl_capability_check(struct inode *inode, unsigned int mask)
++{
++ /*
++ * Read/write DACs are always overridable.
++ * Executable DACs are overridable if at least one exec bit is set.
++ */
++ if (!(mask & (ACE4_WRITE_ACL | ACE4_WRITE_OWNER)) &&
++ (!(mask & ACE4_EXECUTE) ||
++ (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode)))
++ if (capable(CAP_DAC_OVERRIDE))
++ return 0;
++
++ /*
++ * Searching includes executable on directories, else just read.
++ */
++ if (!(mask & ~(ACE4_READ_DATA | ACE4_EXECUTE)) &&
++ (S_ISDIR(inode->i_mode) || !(mask & ACE4_EXECUTE)))
++ if (capable(CAP_DAC_READ_SEARCH))
++ return 0;
++
++ return -EACCES;
++}
++
++/**
++ * nfs4acl_permission - permission check algorithm with masking
++ * @inode: inode to check
++ * @acl: nfs4 acl of the inode
++ * @mask: requested access (ACE4_* bitmask)
++ *
++ * Checks if the current process is granted @mask flags in @acl. With
++ * write-through, the OWNER@ is always granted the owner file mask, the
++ * GROUP@ is always granted the group file mask, and EVERYONE@ is always
++ * granted the other file mask. Otherwise, processes are only granted
++ * @mask flags which they are granted in the @acl as well as in their
++ * file mask.
++ */
++int nfs4acl_permission(struct inode *inode, const struct nfs4acl *acl,
++ unsigned int mask)
++{
++ const struct nfs4ace *ace;
++ unsigned int file_mask, requested = mask, denied = 0;
++ int in_owning_group = in_group_p(inode->i_gid);
++ int owner_or_group_class = in_owning_group;
++
++ /*
++ * A process is in the
++ * - owner file class if it owns the file, in the
++ * - group file class if it is in the file's owning group or
++ * it matches any of the user or group entries, and in the
++ * - other file class otherwise.
++ */
++
++ nfs4acl_for_each_entry(ace, acl) {
++ unsigned int ace_mask = ace->e_mask;
++
++ if (nfs4ace_is_inherit_only(ace))
++ continue;
++ if (nfs4ace_is_owner(ace)) {
++ if (current->fsuid != inode->i_uid)
++ continue;
++ goto is_owner;
++ } else if (nfs4ace_is_group(ace)) {
++ if (!in_owning_group)
++ continue;
++ } else if (nfs4ace_is_unix_id(ace)) {
++ if (ace->e_flags & ACE4_IDENTIFIER_GROUP) {
++ if (!in_group_p(ace->u.e_id))
++ continue;
++ } else {
++ if (current->fsuid != ace->u.e_id)
++ continue;
++ }
++ } else
++ goto is_everyone;
++
++ /*
++ * Apply the group file mask to entries other than OWNER@ and
++ * EVERYONE@. This is not required for correct access checking
++ * but ensures that we grant the same permissions as the acl
++ * computed by nfs4acl_apply_masks().
++ *
++ * For example, without this restriction, 'group@:rw::allow'
++ * with mode 0600 would grant rw access to owner processes
++ * which are also in the owning group. This cannot be expressed
++ * in an acl.
++ */
++ if (nfs4ace_is_allow(ace))
++ ace_mask &= acl->a_group_mask;
++
++ is_owner:
++ /* The process is in the owner or group file class. */
++ owner_or_group_class = 1;
++
++ is_everyone:
++ /* Check which mask flags the ACE allows or denies. */
++ if (nfs4ace_is_deny(ace))
++ denied |= ace_mask & mask;
++ mask &= ~ace_mask;
++
++ /* Keep going until we know which file class the process is in. */
++ if (!mask && owner_or_group_class)
++ break;
++ }
++ denied |= mask;
++
++ /*
++ * Figure out which file mask applies.
++ * Clear write-through if the process is in the file group class but
++ * not in the owning group, and so the denied permissions apply.
++ */
++ if (current->fsuid == inode->i_uid)
++ file_mask = acl->a_owner_mask;
++ else if (in_owning_group || owner_or_group_class)
++ file_mask = acl->a_group_mask;
++ else
++ file_mask = acl->a_other_mask;
++
++ denied |= requested & ~file_mask;
++ if (!denied)
++ return 0;
++ return nfs4acl_capability_check(inode, requested);
++}
++EXPORT_SYMBOL(nfs4acl_permission);
++
++/**
++ * nfs4acl_generic_permission - permission check algorithm without explicit acl
++ * @inode: inode to check permissions for
++ * @mask: requested access (ACE4_* bitmask)
++ *
++ * The file mode of a file without ACL corresponds to an ACL with a single
++ * "EVERYONE:~0::ALLOW" entry, with file masks that correspond to the file mode
++ * permissions. Instead of constructing a temporary ACL and applying
++ * nfs4acl_permission() to it, compute the identical result directly from the file
++ * mode.
++ */
++int nfs4acl_generic_permission(struct inode *inode, unsigned int mask)
++{
++ int mode = inode->i_mode;
++
++ if (current->fsuid == inode->i_uid)
++ mode >>= 6;
++ else if (in_group_p(inode->i_gid))
++ mode >>= 3;
++ if (!(mask & ~nfs4acl_mode_to_mask(mode)))
++ return 0;
++ return nfs4acl_capability_check(inode, mask);
++}
++EXPORT_SYMBOL(nfs4acl_generic_permission);
++
++/*
++ * nfs4ace_is_same_who - do both acl entries refer to the same identifier?
++ */
++int
++nfs4ace_is_same_who(const struct nfs4ace *a, const struct nfs4ace *b)
++{
++#define WHO_FLAGS (ACE4_SPECIAL_WHO | ACE4_IDENTIFIER_GROUP)
++ if ((a->e_flags & WHO_FLAGS) != (b->e_flags & WHO_FLAGS))
++ return 0;
++ if (a->e_flags & ACE4_SPECIAL_WHO)
++ return a->u.e_who == b->u.e_who;
++ else
++ return a->u.e_id == b->u.e_id;
++#undef WHO_FLAGS
++}
++
++/**
++ * nfs4acl_set_who - set a special who value
++ * @ace: acl entry
++ * @who: who value to use
++ */
++int
++nfs4ace_set_who(struct nfs4ace *ace, const char *who)
++{
++ if (!strcmp(who, nfs4ace_owner_who))
++ who = nfs4ace_owner_who;
++ else if (!strcmp(who, nfs4ace_group_who))
++ who = nfs4ace_group_who;
++ else if (!strcmp(who, nfs4ace_everyone_who))
++ who = nfs4ace_everyone_who;
++ else
++ return -EINVAL;
++
++ ace->u.e_who = who;
++ ace->e_flags |= ACE4_SPECIAL_WHO;
++ ace->e_flags &= ~ACE4_IDENTIFIER_GROUP;
++ return 0;
++}
++EXPORT_SYMBOL(nfs4ace_set_who);
++
++/**
++ * nfs4acl_allowed_to_who - mask flags allowed to a specific who value
++ *
++ * Computes the mask values allowed to a specific who value, taking
++ * EVERYONE@ entries into account.
++ */
++static unsigned int
++nfs4acl_allowed_to_who(struct nfs4acl *acl, struct nfs4ace *who)
++{
++ struct nfs4ace *ace;
++ unsigned int allowed = 0;
++
++ nfs4acl_for_each_entry_reverse(ace, acl) {
++ if (nfs4ace_is_inherit_only(ace))
++ continue;
++ if (nfs4ace_is_same_who(ace, who) ||
++ nfs4ace_is_everyone(ace)) {
++ if (nfs4ace_is_allow(ace))
++ allowed |= ace->e_mask;
++ else if (nfs4ace_is_deny(ace))
++ allowed &= ~ace->e_mask;
++ }
++ }
++ return allowed;
++}
++
++/**
++ * nfs4acl_compute_max_masks - compute upper bound masks
++ *
++ * Computes upper bound owner, group, and other masks so that none of
++ * the mask flags allowed by the acl are disabled (for any choice of the
++ * file owner or group membership).
++ */
++static void
++nfs4acl_compute_max_masks(struct nfs4acl *acl)
++{
++ struct nfs4ace *ace;
++
++ acl->a_owner_mask = 0;
++ acl->a_group_mask = 0;
++ acl->a_other_mask = 0;
++
++ nfs4acl_for_each_entry_reverse(ace, acl) {
++ if (nfs4ace_is_inherit_only(ace))
++ continue;
++
++ if (nfs4ace_is_owner(ace)) {
++ if (nfs4ace_is_allow(ace))
++ acl->a_owner_mask |= ace->e_mask;
++ else if (nfs4ace_is_deny(ace))
++ acl->a_owner_mask &= ~ace->e_mask;
++ } else if (nfs4ace_is_everyone(ace)) {
++ if (nfs4ace_is_allow(ace)) {
++ struct nfs4ace who = {
++ .e_flags = ACE4_SPECIAL_WHO,
++ .u.e_who = nfs4ace_group_who,
++ };
++
++ acl->a_other_mask |= ace->e_mask;
++ acl->a_group_mask |=
++ nfs4acl_allowed_to_who(acl, &who);
++ acl->a_owner_mask |= ace->e_mask;
++ } else if (nfs4ace_is_deny(ace)) {
++ acl->a_other_mask &= ~ace->e_mask;
++ acl->a_group_mask &= ~ace->e_mask;
++ acl->a_owner_mask &= ~ace->e_mask;
++ }
++ } else {
++ if (nfs4ace_is_allow(ace)) {
++ unsigned int mask =
++ nfs4acl_allowed_to_who(acl, ace);
++
++ acl->a_group_mask |= mask;
++ acl->a_owner_mask |= mask;
++ }
++ }
++ }
++}
++
++/**
++ * nfs4acl_inherit - compute the acl a new file will inherit
++ * @dir_acl: acl of the containing direcory
++ * @mode: file type and create mode of the new file
++ *
++ * Given the containing directory's acl, this function will compute the
++ * acl that new files in that directory will inherit, or %NULL if
++ * @dir_acl does not contain acl entries inheritable by this file.
++ *
++ * Without write-through, the file masks in the returned acl are set to
++ * the intersection of the create mode and the maximum permissions
++ * allowed to each file class. With write-through, the file masks are
++ * set to the create mode.
++ */
++struct nfs4acl *
++nfs4acl_inherit(const struct nfs4acl *dir_acl, mode_t mode)
++{
++ const struct nfs4ace *dir_ace;
++ struct nfs4acl *acl;
++ struct nfs4ace *ace;
++ int count = 0;
++
++ if (S_ISDIR(mode)) {
++ nfs4acl_for_each_entry(dir_ace, dir_acl) {
++ if (!nfs4ace_is_inheritable(dir_ace))
++ continue;
++ count++;
++ }
++ if (!count)
++ return NULL;
++ acl = nfs4acl_alloc(count);
++ if (!acl)
++ return ERR_PTR(-ENOMEM);
++ ace = acl->a_entries;
++ nfs4acl_for_each_entry(dir_ace, dir_acl) {
++ if (!nfs4ace_is_inheritable(dir_ace))
++ continue;
++ memcpy(ace, dir_ace, sizeof(struct nfs4ace));
++ if (dir_ace->e_flags & ACE4_NO_PROPAGATE_INHERIT_ACE)
++ nfs4ace_clear_inheritance_flags(ace);
++ if ((dir_ace->e_flags & ACE4_FILE_INHERIT_ACE) &&
++ !(dir_ace->e_flags & ACE4_DIRECTORY_INHERIT_ACE))
++ ace->e_flags |= ACE4_INHERIT_ONLY_ACE;
++ ace++;
++ }
++ } else {
++ nfs4acl_for_each_entry(dir_ace, dir_acl) {
++ if (!(dir_ace->e_flags & ACE4_FILE_INHERIT_ACE))
++ continue;
++ count++;
++ }
++ if (!count)
++ return NULL;
++ acl = nfs4acl_alloc(count);
++ if (!acl)
++ return ERR_PTR(-ENOMEM);
++ ace = acl->a_entries;
++ nfs4acl_for_each_entry(dir_ace, dir_acl) {
++ if (!(dir_ace->e_flags & ACE4_FILE_INHERIT_ACE))
++ continue;
++ memcpy(ace, dir_ace, sizeof(struct nfs4ace));
++ nfs4ace_clear_inheritance_flags(ace);
++ ace++;
++ }
++ }
++
++ /* The maximum max flags that the owner, group, and other classes
++ are allowed. */
++ if (dir_acl->a_flags & ACL4_WRITE_THROUGH) {
++ acl->a_owner_mask = ACE4_VALID_MASK;
++ acl->a_group_mask = ACE4_VALID_MASK;
++ acl->a_other_mask = ACE4_VALID_MASK;
++
++ mode &= ~current->fs->umask;
++ } else
++ nfs4acl_compute_max_masks(acl);
++
++ /* Apply the create mode. */
++ acl->a_owner_mask &= nfs4acl_mode_to_mask(mode >> 6);
++ acl->a_group_mask &= nfs4acl_mode_to_mask(mode >> 3);
++ acl->a_other_mask &= nfs4acl_mode_to_mask(mode);
++
++ if (nfs4acl_write_through(&acl)) {
++ nfs4acl_put(acl);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ acl->a_flags = (dir_acl->a_flags & ACL4_WRITE_THROUGH);
++
++ return acl;
++}
++EXPORT_SYMBOL(nfs4acl_inherit);
+--- /dev/null
++++ b/fs/nfs4acl_compat.c
+@@ -0,0 +1,757 @@
++/*
++ * Copyright (C) 2006 Andreas Gruenbacher <a.gruenbacher@computer.org>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2, or (at your option) any
++ * later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ */
++
++#include <linux/module.h>
++#include <linux/fs.h>
++#include <linux/nfs4acl.h>
++
++/**
++ * struct nfs4acl_alloc - remember how many entries are actually allocated
++ * @acl: acl with a_count <= @count
++ * @count: the actual number of entries allocated in @acl
++ *
++ * We pass around this structure while modifying an acl, so that we do
++ * not have to reallocate when we remove existing entries followed by
++ * adding new entries.
++ */
++struct nfs4acl_alloc {
++ struct nfs4acl *acl;
++ unsigned int count;
++};
++
++/**
++ * nfs4acl_delete_entry - delete an entry in an acl
++ * @x: acl and number of allocated entries
++ * @ace: an entry in @x->acl
++ *
++ * Updates @ace so that it points to the entry before the deleted entry
++ * on return. (When deleting the first entry, @ace will point to the
++ * (non-existant) entry before the first entry). This behavior is the
++ * expected behavior when deleting entries while forward iterating over
++ * an acl.
++ */
++static void
++nfs4acl_delete_entry(struct nfs4acl_alloc *x, struct nfs4ace **ace)
++{
++ void *end = x->acl->a_entries + x->acl->a_count;
++
++ memmove(*ace, *ace + 1, end - (void *)(*ace + 1));
++ (*ace)--;
++ x->acl->a_count--;
++}
++
++/**
++ * nfs4acl_insert_entry - insert an entry in an acl
++ * @x: acl and number of allocated entries
++ * @ace: entry before which the new entry shall be inserted
++ *
++ * Insert a new entry in @x->acl at position @ace, and zero-initialize
++ * it. This may require reallocating @x->acl.
++ */
++static int
++nfs4acl_insert_entry(struct nfs4acl_alloc *x, struct nfs4ace **ace)
++{
++ if (x->count == x->acl->a_count) {
++ int n = *ace - x->acl->a_entries;
++ struct nfs4acl *acl2;
++
++ acl2 = nfs4acl_alloc(x->acl->a_count + 1);
++ if (!acl2)
++ return -1;
++ acl2->a_flags = x->acl->a_flags;
++ acl2->a_owner_mask = x->acl->a_owner_mask;
++ acl2->a_group_mask = x->acl->a_group_mask;
++ acl2->a_other_mask = x->acl->a_other_mask;
++ memcpy(acl2->a_entries, x->acl->a_entries,
++ n * sizeof(struct nfs4ace));
++ memcpy(acl2->a_entries + n + 1, *ace,
++ (x->acl->a_count - n) * sizeof(struct nfs4ace));
++ kfree(x->acl);
++ x->acl = acl2;
++ x->count = acl2->a_count;
++ *ace = acl2->a_entries + n;
++ } else {
++ void *end = x->acl->a_entries + x->acl->a_count;
++
++ memmove(*ace + 1, *ace, end - (void *)*ace);
++ x->acl->a_count++;
++ }
++ memset(*ace, 0, sizeof(struct nfs4ace));
++ return 0;
++}
++
++/**
++ * nfs4ace_change_mask - change the mask in @ace to @mask
++ * @x: acl and number of allocated entries
++ * @ace: entry to modify
++ * @mask: new mask for @ace
++ *
++ * Set the effective mask of @ace to @mask. This will require splitting
++ * off a separate acl entry if @ace is inheritable. In that case, the
++ * effective- only acl entry is inserted after the inheritable acl
++ * entry, end the inheritable acl entry is set to inheritable-only. If
++ * @mode is 0, either set the original acl entry to inheritable-only if
++ * it was inheritable, or remove it otherwise. The returned @ace points
++ * to the modified or inserted effective-only acl entry if that entry
++ * exists, to the entry that has become inheritable-only, or else to the
++ * previous entry in the acl. This is the expected behavior when
++ * modifying masks while forward iterating over an acl.
++ */
++static int
++nfs4ace_change_mask(struct nfs4acl_alloc *x, struct nfs4ace **ace,
++ unsigned int mask)
++{
++ if (mask && (*ace)->e_mask == mask)
++ return 0;
++ if (mask & ~ACE4_POSIX_ALWAYS_ALLOWED) {
++ if (nfs4ace_is_inheritable(*ace)) {
++ if (nfs4acl_insert_entry(x, ace))
++ return -1;
++ memcpy(*ace, *ace + 1, sizeof(struct nfs4ace));
++ (*ace)->e_flags |= ACE4_INHERIT_ONLY_ACE;
++ (*ace)++;
++ nfs4ace_clear_inheritance_flags(*ace);
++ }
++ (*ace)->e_mask = mask;
++ } else {
++ if (nfs4ace_is_inheritable(*ace))
++ (*ace)->e_flags |= ACE4_INHERIT_ONLY_ACE;
++ else
++ nfs4acl_delete_entry(x, ace);
++ }
++ return 0;
++}
++
++/**
++ * nfs4acl_move_everyone_aces_down - move everyone@ acl entries to the end
++ * @x: acl and number of allocated entries
++ *
++ * Move all everyone acl entries to the bottom of the acl so that only a
++ * single everyone@ allow acl entry remains at the end, and update the
++ * mask fields of all acl entries on the way. If everyone@ is not
++ * granted any permissions, no empty everyone@ acl entry is inserted.
++ *
++ * This transformation does not modify the permissions that the acl
++ * grants, but we need it to simplify successive transformations.
++ */
++static int
++nfs4acl_move_everyone_aces_down(struct nfs4acl_alloc *x)
++{
++ struct nfs4ace *ace;
++ unsigned int allowed = 0, denied = 0;
++
++ nfs4acl_for_each_entry(ace, x->acl) {
++ if (nfs4ace_is_inherit_only(ace))
++ continue;
++ if (nfs4ace_is_everyone(ace)) {
++ if (nfs4ace_is_allow(ace))
++ allowed |= (ace->e_mask & ~denied);
++ else if (nfs4ace_is_deny(ace))
++ denied |= (ace->e_mask & ~allowed);
++ else
++ continue;
++ if (nfs4ace_change_mask(x, &ace, 0))
++ return -1;
++ } else {
++ if (nfs4ace_is_allow(ace)) {
++ if (nfs4ace_change_mask(x, &ace, allowed |
++ (ace->e_mask & ~denied)))
++ return -1;
++ } else if (nfs4ace_is_deny(ace)) {
++ if (nfs4ace_change_mask(x, &ace, denied |
++ (ace->e_mask & ~allowed)))
++ return -1;
++ }
++ }
++ }
++ if (allowed & ~ACE4_POSIX_ALWAYS_ALLOWED) {
++ struct nfs4ace *last_ace = ace - 1;
++
++ if (nfs4ace_is_everyone(last_ace) &&
++ nfs4ace_is_allow(last_ace) &&
++ nfs4ace_is_inherit_only(last_ace) &&
++ last_ace->e_mask == allowed)
++ last_ace->e_flags &= ~ACE4_INHERIT_ONLY_ACE;
++ else {
++ if (nfs4acl_insert_entry(x, &ace))
++ return -1;
++ ace->e_type = ACE4_ACCESS_ALLOWED_ACE_TYPE;
++ ace->e_flags = ACE4_SPECIAL_WHO;
++ ace->e_mask = allowed;
++ ace->u.e_who = nfs4ace_everyone_who;
++ }
++ }
++ return 0;
++}
++
++/**
++ * __nfs4acl_propagate_everyone - propagate everyone@ mask flags up for @who
++ * @x: acl and number of allocated entries
++ * @who: identifier to propagate mask flags for
++ * @allow: mask flags to propagate up
++ *
++ * Propagate mask flags from the trailing everyone@ allow acl entry up
++ * for the specified @who.
++ *
++ * The idea here is to precede the trailing EVERYONE@ ALLOW entry by an
++ * additional @who ALLOW entry, but with the following optimizations:
++ * (1) we don't bother setting any flags in the new @who ALLOW entry
++ * that has already been allowed or denied by a previous @who entry, (2)
++ * we merge the new @who entry with a previous @who entry if there is
++ * such a previous @who entry and there are no intervening DENY entries
++ * with mask flags that overlap the flags we care about.
++ */
++static int
++__nfs4acl_propagate_everyone(struct nfs4acl_alloc *x, struct nfs4ace *who,
++ unsigned int allow)
++{
++ struct nfs4ace *allow_last = NULL, *ace;
++
++ /* Remove the mask flags from allow that are already determined for
++ this who value, and figure out if there is an ALLOW entry for
++ this who value that is "reachable" from the trailing EVERYONE@
++ ALLOW ACE. */
++ nfs4acl_for_each_entry(ace, x->acl) {
++ if (nfs4ace_is_inherit_only(ace))
++ continue;
++ if (nfs4ace_is_allow(ace)) {
++ if (nfs4ace_is_same_who(ace, who)) {
++ allow &= ~ace->e_mask;
++ allow_last = ace;
++ }
++ } else if (nfs4ace_is_deny(ace)) {
++ if (nfs4ace_is_same_who(ace, who))
++ allow &= ~ace->e_mask;
++ if (allow & ace->e_mask)
++ allow_last = NULL;
++ }
++ }
++
++ if (allow) {
++ if (allow_last)
++ return nfs4ace_change_mask(x, &allow_last,
++ allow_last->e_mask | allow);
++ else {
++ struct nfs4ace who_copy;
++
++ ace = x->acl->a_entries + x->acl->a_count - 1;
++ memcpy(&who_copy, who, sizeof(struct nfs4ace));
++ if (nfs4acl_insert_entry(x, &ace))
++ return -1;
++ memcpy(ace, &who_copy, sizeof(struct nfs4ace));
++ ace->e_type = ACE4_ACCESS_ALLOWED_ACE_TYPE;
++ nfs4ace_clear_inheritance_flags(ace);
++ ace->e_mask = allow;
++ }
++ }
++ return 0;
++}
++
++/**
++ * nfs4acl_propagate_everyone - propagate everyone@ mask flags up the acl
++ * @x: acl and number of allocated entries
++ *
++ * Make sure for owner@, group@, and all other users, groups, and
++ * special identifiers that they are allowed or denied all permissions
++ * that are granted be the trailing everyone@ acl entry. If they are
++ * not, try to add the missing permissions to existing allow acl entries
++ * for those users, or introduce additional acl entries if that is not
++ * possible.
++ *
++ * We do this so that no mask flags will get lost when finally applying
++ * the file masks to the acl entries: otherwise, with an other file mask
++ * that is more restrictive than the owner and/or group file mask, mask
++ * flags that were allowed to processes in the owner and group classes
++ * and that the other mask denies would be lost. For example, the
++ * following two acls show the problem when mode 0664 is applied to
++ * them:
++ *
++ * masking without propagation (wrong)
++ * ===========================================================
++ * joe:r::allow => joe:r::allow
++ * everyone@:rwx::allow => everyone@:r::allow
++ * -----------------------------------------------------------
++ * joe:w::deny => joe:w::deny
++ * everyone@:rwx::allow everyone@:r::allow
++ *
++ * Note that the permissions of joe end up being more restrictive than
++ * what the acl would allow when first computing the allowed flags and
++ * then applying the respective mask. With propagation of permissions,
++ * we get:
++ *
++ * masking after propagation (correct)
++ * ===========================================================
++ * joe:r::allow => joe:rw::allow
++ * owner@:rw::allow
++ * group@:rw::allow
++ * everyone@:rwx::allow everyone@:r::allow
++ * -----------------------------------------------------------
++ * joe:w::deny => owner@:x::deny
++ * joe:w::deny
++ * owner@:rw::allow
++ * owner@:rw::allow
++ * joe:r::allow
++ * everyone@:rwx::allow everyone@:r::allow
++ *
++ * The examples show the acls that would result from propagation with no
++ * masking performed. In fact, we do apply the respective mask to the
++ * acl entries before computing the propagation because this will save
++ * us from adding acl entries that would end up with empty mask fields
++ * after applying the masks.
++ *
++ * It is ensured that no more than one entry will be inserted for each
++ * who value, no matter how many entries each who value has already.
++ */
++static int
++nfs4acl_propagate_everyone(struct nfs4acl_alloc *x)
++{
++ int write_through = (x->acl->a_flags & ACL4_WRITE_THROUGH);
++ struct nfs4ace who = { .e_flags = ACE4_SPECIAL_WHO };
++ struct nfs4ace *ace;
++ unsigned int owner_allow, group_allow;
++ int retval;
++
++ if (!((x->acl->a_owner_mask | x->acl->a_group_mask) &
++ ~x->acl->a_other_mask))
++ return 0;
++ if (!x->acl->a_count)
++ return 0;
++ ace = x->acl->a_entries + x->acl->a_count - 1;
++ if (nfs4ace_is_inherit_only(ace) || !nfs4ace_is_everyone(ace))
++ return 0;
++ if (!(ace->e_mask & ~x->acl->a_other_mask)) {
++ /* None of the allowed permissions will get masked. */
++ return 0;
++ }
++ owner_allow = ace->e_mask & x->acl->a_owner_mask;
++ group_allow = ace->e_mask & x->acl->a_group_mask;
++
++ /* Propagate everyone@ permissions through to owner@. */
++ if (owner_allow && !write_through &&
++ (x->acl->a_owner_mask & ~x->acl->a_other_mask)) {
++ who.u.e_who = nfs4ace_owner_who;
++ retval = __nfs4acl_propagate_everyone(x, &who, owner_allow);
++ if (retval)
++ return -1;
++ }
++
++ if (group_allow && (x->acl->a_group_mask & ~x->acl->a_other_mask)) {
++ int n;
++
++ if (!write_through) {
++ /* Propagate everyone@ permissions through to group@. */
++ who.u.e_who = nfs4ace_group_who;
++ retval = __nfs4acl_propagate_everyone(x, &who,
++ group_allow);
++ if (retval)
++ return -1;
++ }
++
++ /* Start from the entry before the trailing EVERYONE@ ALLOW
++ entry. We will not hit EVERYONE@ entries in the loop. */
++ for (n = x->acl->a_count - 2; n != -1; n--) {
++ ace = x->acl->a_entries + n;
++
++ if (nfs4ace_is_inherit_only(ace) ||
++ nfs4ace_is_owner(ace) ||
++ nfs4ace_is_group(ace))
++ continue;
++ if (nfs4ace_is_allow(ace) || nfs4ace_is_deny(ace)) {
++ /* Any inserted entry will end up below the
++ current entry. */
++ retval = __nfs4acl_propagate_everyone(x, ace,
++ group_allow);
++ if (retval)
++ return -1;
++ }
++ }
++ }
++ return 0;
++}
++
++/**
++ * __nfs4acl_apply_masks - apply the masks to the acl entries
++ * @x: acl and number of allocated entries
++ *
++ * Apply the owner file mask to owner@ entries, the intersection of the
++ * group and other file masks to everyone@ entries, and the group file
++ * mask to all other entries.
++ */
++static int
++__nfs4acl_apply_masks(struct nfs4acl_alloc *x)
++{
++ struct nfs4ace *ace;
++
++ nfs4acl_for_each_entry(ace, x->acl) {
++ unsigned int mask;
++
++ if (nfs4ace_is_inherit_only(ace) || !nfs4ace_is_allow(ace))
++ continue;
++ if (nfs4ace_is_owner(ace))
++ mask = x->acl->a_owner_mask;
++ else if (nfs4ace_is_everyone(ace))
++ mask = x->acl->a_other_mask;
++ else
++ mask = x->acl->a_group_mask;
++ if (nfs4ace_change_mask(x, &ace, ace->e_mask & mask))
++ return -1;
++ }
++ return 0;
++}
++
++/**
++ * nfs4acl_max_allowed - maximum mask flags that anybody is allowed
++ */
++static unsigned int
++nfs4acl_max_allowed(struct nfs4acl *acl)
++{
++ struct nfs4ace *ace;
++ unsigned int allowed = 0;
++
++ nfs4acl_for_each_entry_reverse(ace, acl) {
++ if (nfs4ace_is_inherit_only(ace))
++ continue;
++ if (nfs4ace_is_allow(ace))
++ allowed |= ace->e_mask;
++ else if (nfs4ace_is_deny(ace)) {
++ if (nfs4ace_is_everyone(ace))
++ allowed &= ~ace->e_mask;
++ }
++ }
++ return allowed;
++}
++
++/**
++ * nfs4acl_isolate_owner_class - limit the owner class to the owner file mask
++ * @x: acl and number of allocated entries
++ *
++ * Make sure the owner class (owner@) is granted no more than the owner
++ * mask by first checking which permissions anyone is granted, and then
++ * denying owner@ all permissions beyond that.
++ */
++static int
++nfs4acl_isolate_owner_class(struct nfs4acl_alloc *x)
++{
++ struct nfs4ace *ace;
++ unsigned int allowed = 0;
++
++ allowed = nfs4acl_max_allowed(x->acl);
++ if (allowed & ~x->acl->a_owner_mask) {
++ /* Figure out if we can update an existig OWNER@ DENY entry. */
++ nfs4acl_for_each_entry(ace, x->acl) {
++ if (nfs4ace_is_inherit_only(ace))
++ continue;
++ if (nfs4ace_is_deny(ace)) {
++ if (nfs4ace_is_owner(ace))
++ break;
++ } else if (nfs4ace_is_allow(ace)) {
++ ace = x->acl->a_entries + x->acl->a_count;
++ break;
++ }
++ }
++ if (ace != x->acl->a_entries + x->acl->a_count) {
++ if (nfs4ace_change_mask(x, &ace, ace->e_mask |
++ (allowed & ~x->acl->a_owner_mask)))
++ return -1;
++ } else {
++ /* Insert an owner@ deny entry at the front. */
++ ace = x->acl->a_entries;
++ if (nfs4acl_insert_entry(x, &ace))
++ return -1;
++ ace->e_type = ACE4_ACCESS_DENIED_ACE_TYPE;
++ ace->e_flags = ACE4_SPECIAL_WHO;
++ ace->e_mask = allowed & ~x->acl->a_owner_mask;
++ ace->u.e_who = nfs4ace_owner_who;
++ }
++ }
++ return 0;
++}
++
++/**
++ * __nfs4acl_isolate_who - isolate entry from EVERYONE@ ALLOW entry
++ * @x: acl and number of allocated entries
++ * @who: identifier to isolate
++ * @deny: mask flags this identifier should not be allowed
++ *
++ * Make sure that @who is not allowed any mask flags in @deny by checking
++ * which mask flags this identifier is allowed, and adding excess allowed
++ * mask flags to an existing DENY entry before the trailing EVERYONE@ ALLOW
++ * entry, or inserting such an entry.
++ */
++static int
++__nfs4acl_isolate_who(struct nfs4acl_alloc *x, struct nfs4ace *who,
++ unsigned int deny)
++{
++ struct nfs4ace *ace;
++ unsigned int allowed = 0, n;
++
++ /* Compute the mask flags granted to this who value. */
++ nfs4acl_for_each_entry_reverse(ace, x->acl) {
++ if (nfs4ace_is_inherit_only(ace))
++ continue;
++ if (nfs4ace_is_same_who(ace, who)) {
++ if (nfs4ace_is_allow(ace))
++ allowed |= ace->e_mask;
++ else if (nfs4ace_is_deny(ace))
++ allowed &= ~ace->e_mask;
++ deny &= ~ace->e_mask;
++ }
++ }
++ if (!deny)
++ return 0;
++
++ /* Figure out if we can update an existig DENY entry. Start
++ from the entry before the trailing EVERYONE@ ALLOW entry. We
++ will not hit EVERYONE@ entries in the loop. */
++ for (n = x->acl->a_count - 2; n != -1; n--) {
++ ace = x->acl->a_entries + n;
++ if (nfs4ace_is_inherit_only(ace))
++ continue;
++ if (nfs4ace_is_deny(ace)) {
++ if (nfs4ace_is_same_who(ace, who))
++ break;
++ } else if (nfs4ace_is_allow(ace) &&
++ (ace->e_mask & deny)) {
++ n = -1;
++ break;
++ }
++ }
++ if (n != -1) {
++ if (nfs4ace_change_mask(x, &ace, ace->e_mask | deny))
++ return -1;
++ } else {
++ /* Insert a eny entry before the trailing EVERYONE@ DENY
++ entry. */
++ struct nfs4ace who_copy;
++
++ ace = x->acl->a_entries + x->acl->a_count - 1;
++ memcpy(&who_copy, who, sizeof(struct nfs4ace));
++ if (nfs4acl_insert_entry(x, &ace))
++ return -1;
++ memcpy(ace, &who_copy, sizeof(struct nfs4ace));
++ ace->e_type = ACE4_ACCESS_DENIED_ACE_TYPE;
++ nfs4ace_clear_inheritance_flags(ace);
++ ace->e_mask = deny;
++ }
++ return 0;
++}
++
++/**
++ * nfs4acl_isolate_group_class - limit the group class to the group file mask
++ * @x: acl and number of allocated entries
++ *
++ * Make sure the group class (all entries except owner@ and everyone@) is
++ * granted no more than the group mask by inserting DENY entries for group
++ * class entries where necessary.
++ */
++static int
++nfs4acl_isolate_group_class(struct nfs4acl_alloc *x)
++{
++ struct nfs4ace who = {
++ .e_flags = ACE4_SPECIAL_WHO,
++ .u.e_who = nfs4ace_group_who,
++ };
++ struct nfs4ace *ace;
++ unsigned int deny;
++
++ if (!x->acl->a_count)
++ return 0;
++ ace = x->acl->a_entries + x->acl->a_count - 1;
++ if (nfs4ace_is_inherit_only(ace) || !nfs4ace_is_everyone(ace))
++ return 0;
++ deny = ace->e_mask & ~x->acl->a_group_mask;
++
++ if (deny) {
++ unsigned int n;
++
++ if (__nfs4acl_isolate_who(x, &who, deny))
++ return -1;
++
++ /* Start from the entry before the trailing EVERYONE@ ALLOW
++ entry. We will not hit EVERYONE@ entries in the loop. */
++ for (n = x->acl->a_count - 2; n != -1; n--) {
++ ace = x->acl->a_entries + n;
++
++ if (nfs4ace_is_inherit_only(ace) ||
++ nfs4ace_is_owner(ace) ||
++ nfs4ace_is_group(ace))
++ continue;
++ if (__nfs4acl_isolate_who(x, ace, deny))
++ return -1;
++ }
++ }
++ return 0;
++}
++
++/**
++ * __nfs4acl_write_through - grant the full masks to owner@, group@, everyone@
++ *
++ * Make sure that owner, group@, and everyone@ are allowed the full mask
++ * permissions, and not only the permissions granted both by the acl and
++ * the masks.
++ */
++static int
++__nfs4acl_write_through(struct nfs4acl_alloc *x)
++{
++ struct nfs4ace *ace;
++ unsigned int allowed;
++
++ /* Remove all owner@ and group@ ACEs: we re-insert them at the
++ top. */
++ nfs4acl_for_each_entry(ace, x->acl) {
++ if (nfs4ace_is_inherit_only(ace))
++ continue;
++ if ((nfs4ace_is_owner(ace) || nfs4ace_is_group(ace)) &&
++ nfs4ace_change_mask(x, &ace, 0))
++ return -1;
++ }
++
++ /* Insert the everyone@ allow entry at the end, or update the
++ existing entry. */
++ allowed = x->acl->a_other_mask;
++ if (allowed & ~ACE4_POSIX_ALWAYS_ALLOWED) {
++ ace = x->acl->a_entries + x->acl->a_count - 1;
++ if (x->acl->a_count && nfs4ace_is_everyone(ace) &&
++ !nfs4ace_is_inherit_only(ace)) {
++ if (nfs4ace_change_mask(x, &ace, allowed))
++ return -1;
++ } else {
++ ace = x->acl->a_entries + x->acl->a_count;
++ if (nfs4acl_insert_entry(x, &ace))
++ return -1;
++ ace->e_type = ACE4_ACCESS_ALLOWED_ACE_TYPE;
++ ace->e_flags = ACE4_SPECIAL_WHO;
++ ace->e_mask = allowed;
++ ace->u.e_who = nfs4ace_everyone_who;
++ }
++ }
++
++ /* Compute the permissions that owner@ and group@ are already granted
++ though the everyone@ allow entry at the end. Note that the acl
++ contains no owner@ or group@ entries at this point. */
++ allowed = 0;
++ nfs4acl_for_each_entry_reverse(ace, x->acl) {
++ if (nfs4ace_is_inherit_only(ace))
++ continue;
++ if (nfs4ace_is_allow(ace)) {
++ if (nfs4ace_is_everyone(ace))
++ allowed |= ace->e_mask;
++ } else if (nfs4ace_is_deny(ace))
++ allowed &= ~ace->e_mask;
++ }
++
++ /* Insert the appropriate group@ allow entry at the front. */
++ if (x->acl->a_group_mask & ~allowed) {
++ ace = x->acl->a_entries;
++ if (nfs4acl_insert_entry(x, &ace))
++ return -1;
++ ace->e_type = ACE4_ACCESS_ALLOWED_ACE_TYPE;
++ ace->e_flags = ACE4_SPECIAL_WHO;
++ ace->e_mask = x->acl->a_group_mask /*& ~allowed*/;
++ ace->u.e_who = nfs4ace_group_who;
++ }
++
++ /* Insert the appropriate owner@ allow entry at the front. */
++ if (x->acl->a_owner_mask & ~allowed) {
++ ace = x->acl->a_entries;
++ if (nfs4acl_insert_entry(x, &ace))
++ return -1;
++ ace->e_type = ACE4_ACCESS_ALLOWED_ACE_TYPE;
++ ace->e_flags = ACE4_SPECIAL_WHO;
++ ace->e_mask = x->acl->a_owner_mask /*& ~allowed*/;
++ ace->u.e_who = nfs4ace_owner_who;
++ }
++
++ /* Insert the appropriate owner@ deny entry at the front. */
++ allowed = nfs4acl_max_allowed(x->acl);
++ if (allowed & ~x->acl->a_owner_mask) {
++ nfs4acl_for_each_entry(ace, x->acl) {
++ if (nfs4ace_is_inherit_only(ace))
++ continue;
++ if (nfs4ace_is_allow(ace)) {
++ ace = x->acl->a_entries + x->acl->a_count;
++ break;
++ }
++ if (nfs4ace_is_deny(ace) && nfs4ace_is_owner(ace))
++ break;
++ }
++ if (ace != x->acl->a_entries + x->acl->a_count) {
++ if (nfs4ace_change_mask(x, &ace, ace->e_mask |
++ (allowed & ~x->acl->a_owner_mask)))
++ return -1;
++ } else {
++ ace = x->acl->a_entries;
++ if (nfs4acl_insert_entry(x, &ace))
++ return -1;
++ ace->e_type = ACE4_ACCESS_DENIED_ACE_TYPE;
++ ace->e_flags = ACE4_SPECIAL_WHO;
++ ace->e_mask = allowed & ~x->acl->a_owner_mask;
++ ace->u.e_who = nfs4ace_owner_who;
++ }
++ }
++
++ return 0;
++}
++
++/**
++ * nfs4acl_apply_masks - apply the masks to the acl
++ *
++ * Apply the masks so that the acl allows no more flags than the
++ * intersection between the flags that the original acl allows and the
++ * mask matching the process.
++ *
++ * Note: this algorithm may push the number of entries in the acl above
++ * ACL4_XATTR_MAX_COUNT, so a read-modify-write cycle would fail.
++ */
++int
++nfs4acl_apply_masks(struct nfs4acl **acl)
++{
++ struct nfs4acl_alloc x = {
++ .acl = *acl,
++ .count = (*acl)->a_count,
++ };
++ int retval = 0;
++
++ if (nfs4acl_move_everyone_aces_down(&x) ||
++ nfs4acl_propagate_everyone(&x) ||
++ __nfs4acl_apply_masks(&x) ||
++ nfs4acl_isolate_owner_class(&x) ||
++ nfs4acl_isolate_group_class(&x))
++ retval = -ENOMEM;
++
++ *acl = x.acl;
++ return retval;
++}
++EXPORT_SYMBOL(nfs4acl_apply_masks);
++
++int nfs4acl_write_through(struct nfs4acl **acl)
++{
++ struct nfs4acl_alloc x = {
++ .acl = *acl,
++ .count = (*acl)->a_count,
++ };
++ int retval = 0;
++
++ if (!((*acl)->a_flags & ACL4_WRITE_THROUGH))
++ goto out;
++
++ if (nfs4acl_move_everyone_aces_down(&x) ||
++ nfs4acl_propagate_everyone(&x) ||
++ __nfs4acl_write_through(&x))
++ retval = -ENOMEM;
++
++ *acl = x.acl;
++out:
++ return retval;
++}
+--- /dev/null
++++ b/fs/nfs4acl_xattr.c
+@@ -0,0 +1,146 @@
++/*
++ * Copyright (C) 2006 Andreas Gruenbacher <a.gruenbacher@computer.org>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2, or (at your option) any
++ * later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ */
++
++#include <linux/kernel.h>
++#include <linux/fs.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <linux/nfs4acl_xattr.h>
++
++MODULE_LICENSE("GPL");
++
++struct nfs4acl *
++nfs4acl_from_xattr(const void *value, size_t size)
++{
++ const struct nfs4acl_xattr *xattr_acl = value;
++ const struct nfs4ace_xattr *xattr_ace = (void *)(xattr_acl + 1);
++ struct nfs4acl *acl;
++ struct nfs4ace *ace;
++ int count;
++
++ if (size < sizeof(struct nfs4acl_xattr) ||
++ xattr_acl->a_version != ACL4_XATTR_VERSION ||
++ (xattr_acl->a_flags & ~ACL4_VALID_FLAGS))
++ return ERR_PTR(-EINVAL);
++
++ count = be16_to_cpu(xattr_acl->a_count);
++ if (count > ACL4_XATTR_MAX_COUNT)
++ return ERR_PTR(-EINVAL);
++
++ acl = nfs4acl_alloc(count);
++ if (!acl)
++ return ERR_PTR(-ENOMEM);
++
++ acl->a_flags = xattr_acl->a_flags;
++ acl->a_owner_mask = be32_to_cpu(xattr_acl->a_owner_mask);
++ if (acl->a_owner_mask & ~ACE4_VALID_MASK)
++ goto fail_einval;
++ acl->a_group_mask = be32_to_cpu(xattr_acl->a_group_mask);
++ if (acl->a_group_mask & ~ACE4_VALID_MASK)
++ goto fail_einval;
++ acl->a_other_mask = be32_to_cpu(xattr_acl->a_other_mask);
++ if (acl->a_other_mask & ~ACE4_VALID_MASK)
++ goto fail_einval;
++
++ nfs4acl_for_each_entry(ace, acl) {
++ const char *who = (void *)(xattr_ace + 1), *end;
++ ssize_t used = (void *)who - value;
++
++ if (used > size)
++ goto fail_einval;
++ end = memchr(who, 0, size - used);
++ if (!end)
++ goto fail_einval;
++
++ ace->e_type = be16_to_cpu(xattr_ace->e_type);
++ ace->e_flags = be16_to_cpu(xattr_ace->e_flags);
++ ace->e_mask = be32_to_cpu(xattr_ace->e_mask);
++ ace->u.e_id = be32_to_cpu(xattr_ace->e_id);
++
++ if (ace->e_flags & ~ACE4_VALID_FLAGS) {
++ memset(ace, 0, sizeof(struct nfs4ace));
++ goto fail_einval;
++ }
++ if (ace->e_type > ACE4_ACCESS_DENIED_ACE_TYPE ||
++ (ace->e_mask & ~ACE4_VALID_MASK))
++ goto fail_einval;
++
++ if (who == end) {
++ if (ace->u.e_id == -1)
++ goto fail_einval; /* uid/gid needed */
++ } else if (nfs4ace_set_who(ace, who))
++ goto fail_einval;
++
++ xattr_ace = (void *)who + ALIGN(end - who + 1, 4);
++ }
++
++ return acl;
++
++fail_einval:
++ nfs4acl_put(acl);
++ return ERR_PTR(-EINVAL);
++}
++EXPORT_SYMBOL(nfs4acl_from_xattr);
++
++size_t
++nfs4acl_xattr_size(const struct nfs4acl *acl)
++{
++ size_t size = sizeof(struct nfs4acl_xattr);
++ const struct nfs4ace *ace;
++
++ nfs4acl_for_each_entry(ace, acl) {
++ size += sizeof(struct nfs4ace_xattr) +
++ (nfs4ace_is_unix_id(ace) ? 4 :
++ ALIGN(strlen(ace->u.e_who) + 1, 4));
++ }
++ return size;
++}
++EXPORT_SYMBOL(nfs4acl_xattr_size);
++
++void
++nfs4acl_to_xattr(const struct nfs4acl *acl, void *buffer)
++{
++ struct nfs4acl_xattr *xattr_acl = buffer;
++ struct nfs4ace_xattr *xattr_ace;
++ const struct nfs4ace *ace;
++
++ xattr_acl->a_version = ACL4_XATTR_VERSION;
++ xattr_acl->a_flags = acl->a_flags;
++ xattr_acl->a_count = cpu_to_be16(acl->a_count);
++
++ xattr_acl->a_owner_mask = cpu_to_be32(acl->a_owner_mask);
++ xattr_acl->a_group_mask = cpu_to_be32(acl->a_group_mask);
++ xattr_acl->a_other_mask = cpu_to_be32(acl->a_other_mask);
++
++ xattr_ace = (void *)(xattr_acl + 1);
++ nfs4acl_for_each_entry(ace, acl) {
++ xattr_ace->e_type = cpu_to_be16(ace->e_type);
++ xattr_ace->e_flags = cpu_to_be16(ace->e_flags &
++ ACE4_VALID_FLAGS);
++ xattr_ace->e_mask = cpu_to_be32(ace->e_mask);
++ if (nfs4ace_is_unix_id(ace)) {
++ xattr_ace->e_id = cpu_to_be32(ace->u.e_id);
++ memset(xattr_ace->e_who, 0, 4);
++ xattr_ace = (void *)xattr_ace->e_who + 4;
++ } else {
++ int sz = ALIGN(strlen(ace->u.e_who) + 1, 4);
++
++ xattr_ace->e_id = cpu_to_be32(-1);
++ memset(xattr_ace->e_who + sz - 4, 0, 4);
++ strcpy(xattr_ace->e_who, ace->u.e_who);
++ xattr_ace = (void *)xattr_ace->e_who + sz;
++ }
++ }
++}
++EXPORT_SYMBOL(nfs4acl_to_xattr);
+--- /dev/null
++++ b/include/linux/nfs4acl.h
+@@ -0,0 +1,205 @@
++#ifndef __NFS4ACL_H
++#define __NFS4ACL_H
++
++struct nfs4ace {
++ unsigned short e_type;
++ unsigned short e_flags;
++ unsigned int e_mask;
++ union {
++ unsigned int e_id;
++ const char *e_who;
++ } u;
++};
++
++struct nfs4acl {
++ atomic_t a_refcount;
++ unsigned int a_owner_mask;
++ unsigned int a_group_mask;
++ unsigned int a_other_mask;
++ unsigned short a_count;
++ unsigned short a_flags;
++ struct nfs4ace a_entries[0];
++};
++
++#define nfs4acl_for_each_entry(_ace, _acl) \
++ for (_ace = _acl->a_entries; \
++ _ace != _acl->a_entries + _acl->a_count; \
++ _ace++)
++
++#define nfs4acl_for_each_entry_reverse(_ace, _acl) \
++ for (_ace = _acl->a_entries + _acl->a_count - 1; \
++ _ace != _acl->a_entries - 1; \
++ _ace--)
++
++/* a_flags values */
++#define ACL4_WRITE_THROUGH 0x40
++
++#define ACL4_VALID_FLAGS \
++ ACL4_WRITE_THROUGH
++
++/* e_type values */
++#define ACE4_ACCESS_ALLOWED_ACE_TYPE 0x0000
++#define ACE4_ACCESS_DENIED_ACE_TYPE 0x0001
++/*#define ACE4_SYSTEM_AUDIT_ACE_TYPE 0x0002*/
++/*#define ACE4_SYSTEM_ALARM_ACE_TYPE 0x0003*/
++
++/* e_flags bitflags */
++#define ACE4_FILE_INHERIT_ACE 0x0001
++#define ACE4_DIRECTORY_INHERIT_ACE 0x0002
++#define ACE4_NO_PROPAGATE_INHERIT_ACE 0x0004
++#define ACE4_INHERIT_ONLY_ACE 0x0008
++/*#define ACE4_SUCCESSFUL_ACCESS_ACE_FLAG 0x0010*/
++/*#define ACE4_FAILED_ACCESS_ACE_FLAG 0x0020*/
++#define ACE4_IDENTIFIER_GROUP 0x0040
++#define ACE4_SPECIAL_WHO 0x4000 /* in-memory representation only */
++
++#define ACE4_VALID_FLAGS ( \
++ ACE4_FILE_INHERIT_ACE | \
++ ACE4_DIRECTORY_INHERIT_ACE | \
++ ACE4_NO_PROPAGATE_INHERIT_ACE | \
++ ACE4_INHERIT_ONLY_ACE | \
++ ACE4_IDENTIFIER_GROUP )
++
++/* e_mask bitflags */
++#define ACE4_READ_DATA 0x00000001
++#define ACE4_LIST_DIRECTORY 0x00000001
++#define ACE4_WRITE_DATA 0x00000002
++#define ACE4_ADD_FILE 0x00000002
++#define ACE4_APPEND_DATA 0x00000004
++#define ACE4_ADD_SUBDIRECTORY 0x00000004
++#define ACE4_READ_NAMED_ATTRS 0x00000008
++#define ACE4_WRITE_NAMED_ATTRS 0x00000010
++#define ACE4_EXECUTE 0x00000020
++#define ACE4_DELETE_CHILD 0x00000040
++#define ACE4_READ_ATTRIBUTES 0x00000080
++#define ACE4_WRITE_ATTRIBUTES 0x00000100
++#define ACE4_DELETE 0x00010000
++#define ACE4_READ_ACL 0x00020000
++#define ACE4_WRITE_ACL 0x00040000
++#define ACE4_WRITE_OWNER 0x00080000
++#define ACE4_SYNCHRONIZE 0x00100000
++
++#define ACE4_VALID_MASK ( \
++ ACE4_READ_DATA | ACE4_LIST_DIRECTORY | \
++ ACE4_WRITE_DATA | ACE4_ADD_FILE | \
++ ACE4_APPEND_DATA | ACE4_ADD_SUBDIRECTORY | \
++ ACE4_READ_NAMED_ATTRS | \
++ ACE4_WRITE_NAMED_ATTRS | \
++ ACE4_EXECUTE | \
++ ACE4_DELETE_CHILD | \
++ ACE4_READ_ATTRIBUTES | \
++ ACE4_WRITE_ATTRIBUTES | \
++ ACE4_DELETE | \
++ ACE4_READ_ACL | \
++ ACE4_WRITE_ACL | \
++ ACE4_WRITE_OWNER | \
++ ACE4_SYNCHRONIZE )
++
++#define ACE4_POSIX_ALWAYS_ALLOWED ( \
++ ACE4_SYNCHRONIZE | \
++ ACE4_READ_ATTRIBUTES | \
++ ACE4_READ_ACL )
++/*
++ * Duplicate an NFS4ACL handle.
++ */
++static inline struct nfs4acl *
++nfs4acl_get(struct nfs4acl *acl)
++{
++ if (acl)
++ atomic_inc(&acl->a_refcount);
++ return acl;
++}
++
++/*
++ * Free an NFS4ACL handle
++ */
++static inline void
++nfs4acl_put(struct nfs4acl *acl)
++{
++ if (acl && atomic_dec_and_test(&acl->a_refcount))
++ kfree(acl);
++}
++
++/* Special e_who identifiers: we use these pointer values in comparisons
++ instead of strcmp for efficiency. */
++
++extern const char nfs4ace_owner_who[];
++extern const char nfs4ace_group_who[];
++extern const char nfs4ace_everyone_who[];
++
++static inline int
++nfs4ace_is_owner(const struct nfs4ace *ace)
++{
++ return (ace->e_flags & ACE4_SPECIAL_WHO) &&
++ ace->u.e_who == nfs4ace_owner_who;
++}
++
++static inline int
++nfs4ace_is_group(const struct nfs4ace *ace)
++{
++ return (ace->e_flags & ACE4_SPECIAL_WHO) &&
++ ace->u.e_who == nfs4ace_group_who;
++}
++
++static inline int
++nfs4ace_is_everyone(const struct nfs4ace *ace)
++{
++ return (ace->e_flags & ACE4_SPECIAL_WHO) &&
++ ace->u.e_who == nfs4ace_everyone_who;
++}
++
++static inline int
++nfs4ace_is_unix_id(const struct nfs4ace *ace)
++{
++ return !(ace->e_flags & ACE4_SPECIAL_WHO);
++}
++
++static inline int
++nfs4ace_is_inherit_only(const struct nfs4ace *ace)
++{
++ return ace->e_flags & ACE4_INHERIT_ONLY_ACE;
++}
++
++static inline int
++nfs4ace_is_inheritable(const struct nfs4ace *ace)
++{
++ return ace->e_flags & (ACE4_FILE_INHERIT_ACE |
++ ACE4_DIRECTORY_INHERIT_ACE);
++}
++
++static inline void
++nfs4ace_clear_inheritance_flags(struct nfs4ace *ace)
++{
++ ace->e_flags &= ~(ACE4_FILE_INHERIT_ACE |
++ ACE4_DIRECTORY_INHERIT_ACE |
++ ACE4_NO_PROPAGATE_INHERIT_ACE |
++ ACE4_INHERIT_ONLY_ACE);
++}
++
++static inline int
++nfs4ace_is_allow(const struct nfs4ace *ace)
++{
++ return ace->e_type == ACE4_ACCESS_ALLOWED_ACE_TYPE;
++}
++
++static inline int
++nfs4ace_is_deny(const struct nfs4ace *ace)
++{
++ return ace->e_type == ACE4_ACCESS_DENIED_ACE_TYPE;
++}
++
++extern struct nfs4acl *nfs4acl_alloc(int count);
++extern struct nfs4acl *nfs4acl_clone(const struct nfs4acl *acl);
++
++extern unsigned int nfs4acl_want_to_mask(int want);
++extern int nfs4acl_permission(struct inode *, const struct nfs4acl *, unsigned int);
++extern int nfs4acl_generic_permission(struct inode *, unsigned int);
++extern int nfs4ace_is_same_who(const struct nfs4ace *, const struct nfs4ace *);
++extern int nfs4ace_set_who(struct nfs4ace *ace, const char *who);
++extern struct nfs4acl *nfs4acl_inherit(const struct nfs4acl *, mode_t);
++extern int nfs4acl_masks_to_mode(const struct nfs4acl *);
++extern struct nfs4acl *nfs4acl_chmod(struct nfs4acl *, mode_t);
++extern int nfs4acl_apply_masks(struct nfs4acl **acl);
++extern int nfs4acl_write_through(struct nfs4acl **acl);
++
++#endif /* __NFS4ACL_H */
+--- /dev/null
++++ b/include/linux/nfs4acl_xattr.h
+@@ -0,0 +1,32 @@
++#ifndef __NFS4ACL_XATTR_H
++#define __NFS4ACL_XATTR_H
++
++#include <linux/nfs4acl.h>
++
++#define NFS4ACL_XATTR "system.nfs4acl"
++
++struct nfs4ace_xattr {
++ __be16 e_type;
++ __be16 e_flags;
++ __be32 e_mask;
++ __be32 e_id;
++ char e_who[0];
++};
++
++struct nfs4acl_xattr {
++ unsigned char a_version;
++ unsigned char a_flags;
++ __be16 a_count;
++ __be32 a_owner_mask;
++ __be32 a_group_mask;
++ __be32 a_other_mask;
++};
++
++#define ACL4_XATTR_VERSION 0
++#define ACL4_XATTR_MAX_COUNT 1024
++
++extern struct nfs4acl *nfs4acl_from_xattr(const void *, size_t);
++extern size_t nfs4acl_xattr_size(const struct nfs4acl *acl);
++extern void nfs4acl_to_xattr(const struct nfs4acl *, void *);
++
++#endif /* __NFS4ACL_XATTR_H */
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: NFSv4 ACLs for ext3
+
+With the acl=nfs4 mount option, ext3 will use NFSv4 ACLs instead of
+POSIX ACLs. See http://www.suse.de/~agruen/nfs4acl/ for some
+documentation and examples.
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+---
+ fs/Kconfig | 7
+ fs/ext3/Makefile | 1
+ fs/ext3/acl.c | 8
+ fs/ext3/acl.h | 4
+ fs/ext3/file.c | 4
+ fs/ext3/ialloc.c | 6
+ fs/ext3/inode.c | 73 ++++++++-
+ fs/ext3/namei.c | 15 +
+ fs/ext3/namei.h | 1
+ fs/ext3/nfs4acl.c | 370 ++++++++++++++++++++++++++++++++++++++++++++++
+ fs/ext3/nfs4acl.h | 36 ++++
+ fs/ext3/super.c | 60 +++++--
+ fs/ext3/xattr.c | 9 +
+ fs/ext3/xattr.h | 5
+ include/linux/ext3_fs.h | 1
+ include/linux/ext3_fs_i.h | 3
+ 16 files changed, 577 insertions(+), 26 deletions(-)
+
+--- a/fs/Kconfig
++++ b/fs/Kconfig
+@@ -124,6 +124,13 @@ config EXT3_FS_POSIX_ACL
+
+ If you don't know what Access Control Lists are, say N
+
++config EXT3_FS_NFS4ACL
++ bool "Native NFSv4 ACLs (EXPERIMENTAL)"
++ depends on EXT3_FS_XATTR && EXPERIMENTAL
++ select FS_NFS4ACL
++ help
++ Allow to use NFSv4 ACLs instead of POSIX ACLs.
++
+ config EXT3_FS_SECURITY
+ bool "Ext3 Security Labels"
+ depends on EXT3_FS_XATTR
+--- a/fs/ext3/Makefile
++++ b/fs/ext3/Makefile
+@@ -10,3 +10,4 @@ ext3-y := balloc.o bitmap.o dir.o file.o
+ ext3-$(CONFIG_EXT3_FS_XATTR) += xattr.o xattr_user.o xattr_trusted.o
+ ext3-$(CONFIG_EXT3_FS_POSIX_ACL) += acl.o
+ ext3-$(CONFIG_EXT3_FS_SECURITY) += xattr_security.o
++ext3-$(CONFIG_EXT3_FS_NFS4ACL) += nfs4acl.o
+--- a/fs/ext3/acl.c
++++ b/fs/ext3/acl.c
+@@ -282,7 +282,7 @@ ext3_set_acl(handle_t *handle, struct in
+ return error;
+ }
+
+-static int
++int
+ ext3_check_acl(struct inode *inode, int mask)
+ {
+ struct posix_acl *acl = ext3_get_acl(inode, ACL_TYPE_ACCESS);
+@@ -298,12 +298,6 @@ ext3_check_acl(struct inode *inode, int
+ return -EAGAIN;
+ }
+
+-int
+-ext3_permission(struct inode *inode, int mask)
+-{
+- return generic_permission(inode, mask, ext3_check_acl);
+-}
+-
+ /*
+ * Initialize the ACLs of a new inode. Called from ext3_new_inode.
+ *
+--- a/fs/ext3/acl.h
++++ b/fs/ext3/acl.h
+@@ -58,13 +58,13 @@ static inline int ext3_acl_count(size_t
+ #define EXT3_ACL_NOT_CACHED ((void *)-1)
+
+ /* acl.c */
+-extern int ext3_permission (struct inode *, int);
++extern int ext3_check_acl (struct inode *, int);
+ extern int ext3_acl_chmod (struct inode *);
+ extern int ext3_init_acl (handle_t *, struct inode *, struct inode *);
+
+ #else /* CONFIG_EXT3_FS_POSIX_ACL */
+ #include <linux/sched.h>
+-#define ext3_permission NULL
++#define ext3_check_acl NULL
+
+ static inline int
+ ext3_acl_chmod(struct inode *inode)
+--- a/fs/ext3/file.c
++++ b/fs/ext3/file.c
+@@ -23,8 +23,10 @@
+ #include <linux/jbd.h>
+ #include <linux/ext3_fs.h>
+ #include <linux/ext3_jbd.h>
++#include "namei.h"
+ #include "xattr.h"
+ #include "acl.h"
++#include "nfs4acl.h"
+
+ /*
+ * Called when an inode is released. Note that this is different
+@@ -134,5 +136,7 @@ const struct inode_operations ext3_file_
+ .removexattr = generic_removexattr,
+ #endif
+ .permission = ext3_permission,
++ .may_create = ext3_may_create,
++ .may_delete = ext3_may_delete,
+ };
+
+--- a/fs/ext3/ialloc.c
++++ b/fs/ext3/ialloc.c
+@@ -28,6 +28,7 @@
+
+ #include "xattr.h"
+ #include "acl.h"
++#include "nfs4acl.h"
+
+ /*
+ * ialloc.c contains the inodes allocation and deallocation routines
+@@ -595,7 +596,10 @@ got:
+ goto fail_drop;
+ }
+
+- err = ext3_init_acl(handle, inode, dir);
++ if (test_opt(sb, NFS4ACL))
++ err = ext3_nfs4acl_init(handle, inode, dir);
++ else
++ err = ext3_init_acl(handle, inode, dir);
+ if (err)
+ goto fail_free_drop;
+
+--- a/fs/ext3/inode.c
++++ b/fs/ext3/inode.c
+@@ -38,6 +38,7 @@
+ #include <linux/bio.h>
+ #include "xattr.h"
+ #include "acl.h"
++#include "nfs4acl.h"
+
+ static int ext3_writepage_trans_blocks(struct inode *inode);
+
+@@ -2684,6 +2685,9 @@ struct inode *ext3_iget(struct super_blo
+ ei->i_acl = EXT3_ACL_NOT_CACHED;
+ ei->i_default_acl = EXT3_ACL_NOT_CACHED;
+ #endif
++#ifdef CONFIG_EXT3_FS_NFS4ACL
++ ei->i_nfs4acl = EXT3_NFS4ACL_NOT_CACHED;
++#endif
+ ei->i_block_alloc_info = NULL;
+
+ ret = __ext3_get_inode_loc(inode, &iloc, 0);
+@@ -2983,6 +2987,65 @@ int ext3_write_inode(struct inode *inode
+ return ext3_force_commit(inode->i_sb);
+ }
+
++#ifdef CONFIG_EXT3_FS_NFS4ACL
++static int ext3_inode_change_ok(struct inode *inode, struct iattr *attr)
++{
++ unsigned int ia_valid = attr->ia_valid;
++
++ if (!test_opt(inode->i_sb, NFS4ACL))
++ return inode_change_ok(inode, attr);
++
++ /* If force is set do it anyway. */
++ if (ia_valid & ATTR_FORCE)
++ return 0;
++
++ /* Make sure a caller can chown. */
++ if ((ia_valid & ATTR_UID) &&
++ (current->fsuid != inode->i_uid ||
++ attr->ia_uid != inode->i_uid) &&
++ (current->fsuid != attr->ia_uid ||
++ ext3_nfs4acl_permission(inode, ACE4_WRITE_OWNER)) &&
++ !capable(CAP_CHOWN))
++ goto error;
++
++ /* Make sure caller can chgrp. */
++ if ((ia_valid & ATTR_GID)) {
++ int in_group = in_group_p(attr->ia_gid);
++ if ((current->fsuid != inode->i_uid ||
++ (!in_group && attr->ia_gid != inode->i_gid)) &&
++ (!in_group ||
++ ext3_nfs4acl_permission(inode, ACE4_WRITE_OWNER)) &&
++ !capable(CAP_CHOWN))
++ goto error;
++ }
++
++ /* Make sure a caller can chmod. */
++ if (ia_valid & ATTR_MODE) {
++ if (current->fsuid != inode->i_uid &&
++ ext3_nfs4acl_permission(inode, ACE4_WRITE_ACL) &&
++ !capable(CAP_FOWNER))
++ goto error;
++ /* Also check the setgid bit! */
++ if (!in_group_p((ia_valid & ATTR_GID) ? attr->ia_gid :
++ inode->i_gid) && !capable(CAP_FSETID))
++ attr->ia_mode &= ~S_ISGID;
++ }
++
++ /* Check for setting the inode time. */
++ if (ia_valid & (ATTR_MTIME_SET | ATTR_ATIME_SET)) {
++ if (current->fsuid != inode->i_uid &&
++ ext3_nfs4acl_permission(inode, ACE4_WRITE_ATTRIBUTES) &&
++ !capable(CAP_FOWNER))
++ goto error;
++ }
++ return 0;
++error:
++ return -EPERM;
++}
++#else
++# define ext3_inode_change_ok inode_change_ok
++#endif
++
+ /*
+ * ext3_setattr()
+ *
+@@ -3006,7 +3069,7 @@ int ext3_setattr(struct dentry *dentry,
+ int error, rc = 0;
+ const unsigned int ia_valid = attr->ia_valid;
+
+- error = inode_change_ok(inode, attr);
++ error = ext3_inode_change_ok(inode, attr);
+ if (error)
+ return error;
+
+@@ -3063,8 +3126,12 @@ int ext3_setattr(struct dentry *dentry,
+ if (inode->i_nlink)
+ ext3_orphan_del(NULL, inode);
+
+- if (!rc && (ia_valid & ATTR_MODE))
+- rc = ext3_acl_chmod(inode);
++ if (!rc && (ia_valid & ATTR_MODE)) {
++ if (test_opt(inode->i_sb, NFS4ACL))
++ rc = ext3_nfs4acl_chmod(inode);
++ else
++ rc = ext3_acl_chmod(inode);
++ }
+
+ err_out:
+ ext3_std_error(inode->i_sb, error);
+--- a/fs/ext3/namei.c
++++ b/fs/ext3/namei.c
+@@ -40,6 +40,7 @@
+ #include "namei.h"
+ #include "xattr.h"
+ #include "acl.h"
++#include "nfs4acl.h"
+
+ /*
+ * define how far ahead to read directories while searching them.
+@@ -2412,6 +2413,16 @@ end_rename:
+ return retval;
+ }
+
++int ext3_permission(struct inode *inode, int mask)
++{
++#ifdef CONFIG_EXT3_FS_NFS4ACL
++ if (test_opt(inode->i_sb, NFS4ACL))
++ return ext3_nfs4acl_permission(inode, nfs4acl_want_to_mask(mask));
++ else
++#endif
++ return generic_permission(inode, mask, ext3_check_acl);
++}
++
+ /*
+ * directories can handle most operations...
+ */
+@@ -2433,6 +2444,8 @@ const struct inode_operations ext3_dir_i
+ .removexattr = generic_removexattr,
+ #endif
+ .permission = ext3_permission,
++ .may_create = ext3_may_create,
++ .may_delete = ext3_may_delete,
+ };
+
+ const struct inode_operations ext3_special_inode_operations = {
+@@ -2444,4 +2457,6 @@ const struct inode_operations ext3_speci
+ .removexattr = generic_removexattr,
+ #endif
+ .permission = ext3_permission,
++ .may_create = ext3_may_create,
++ .may_delete = ext3_may_delete,
+ };
+--- a/fs/ext3/namei.h
++++ b/fs/ext3/namei.h
+@@ -5,4 +5,5 @@
+ *
+ */
+
++extern int ext3_permission (struct inode *, int);
+ extern struct dentry *ext3_get_parent(struct dentry *child);
+--- /dev/null
++++ b/fs/ext3/nfs4acl.c
+@@ -0,0 +1,370 @@
++/*
++ * Copyright (C) 2006 Andreas Gruenbacher <a.gruenbacher@computer.org>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License as published by the
++ * Free Software Foundation; either version 2, or (at your option) any
++ * later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ */
++
++#include <linux/kernel.h>
++#include <linux/fs.h>
++#include <linux/ext3_jbd.h>
++#include <linux/ext3_fs.h>
++#include <linux/nfs4acl_xattr.h>
++#include "namei.h"
++#include "xattr.h"
++#include "nfs4acl.h"
++
++static inline struct nfs4acl *
++ext3_iget_nfs4acl(struct inode *inode)
++{
++ struct nfs4acl *acl = EXT3_NFS4ACL_NOT_CACHED;
++ struct ext3_inode_info *ei = EXT3_I(inode);
++
++ spin_lock(&inode->i_lock);
++ if (ei->i_nfs4acl != EXT3_NFS4ACL_NOT_CACHED)
++ acl = nfs4acl_get(ei->i_nfs4acl);
++ spin_unlock(&inode->i_lock);
++
++ return acl;
++}
++
++static inline void
++ext3_iset_nfs4acl(struct inode *inode, struct nfs4acl *acl)
++{
++ struct ext3_inode_info *ei = EXT3_I(inode);
++
++ spin_lock(&inode->i_lock);
++ if (ei->i_nfs4acl != EXT3_NFS4ACL_NOT_CACHED)
++ nfs4acl_put(ei->i_nfs4acl);
++ ei->i_nfs4acl = nfs4acl_get(acl);
++ spin_unlock(&inode->i_lock);
++}
++
++static struct nfs4acl *
++ext3_get_nfs4acl(struct inode *inode)
++{
++ const int name_index = EXT3_XATTR_INDEX_NFS4ACL;
++ void *value = NULL;
++ struct nfs4acl *acl;
++ int retval;
++
++ if (!test_opt(inode->i_sb, NFS4ACL))
++ return NULL;
++
++ acl = ext3_iget_nfs4acl(inode);
++ if (acl != EXT3_NFS4ACL_NOT_CACHED)
++ return acl;
++ retval = ext3_xattr_get(inode, name_index, "", NULL, 0);
++ if (retval > 0) {
++ value = kmalloc(retval, GFP_KERNEL);
++ if (!value)
++ return ERR_PTR(-ENOMEM);
++ retval = ext3_xattr_get(inode, name_index, "", value, retval);
++ }
++ if (retval > 0) {
++ acl = nfs4acl_from_xattr(value, retval);
++ if (acl == ERR_PTR(-EINVAL))
++ acl = ERR_PTR(-EIO);
++ } else if (retval == -ENODATA || retval == -ENOSYS)
++ acl = NULL;
++ else
++ acl = ERR_PTR(retval);
++ kfree(value);
++
++ if (!IS_ERR(acl))
++ ext3_iset_nfs4acl(inode, acl);
++
++ return acl;
++}
++
++static int
++ext3_set_nfs4acl(handle_t *handle, struct inode *inode, struct nfs4acl *acl)
++{
++ const int name_index = EXT3_XATTR_INDEX_NFS4ACL;
++ size_t size = 0;
++ void *value = NULL;
++ int retval;
++
++ if (acl) {
++ size = nfs4acl_xattr_size(acl);
++ value = kmalloc(size, GFP_KERNEL);
++ if (!value)
++ return -ENOMEM;
++ nfs4acl_to_xattr(acl, value);
++ }
++ if (handle)
++ retval = ext3_xattr_set_handle(handle, inode, name_index, "",
++ value, size, 0);
++ else
++ retval = ext3_xattr_set(inode, name_index, "", value, size, 0);
++ if (value)
++ kfree(value);
++ if (!retval)
++ ext3_iset_nfs4acl(inode, acl);
++
++ return retval;
++}
++
++int
++ext3_nfs4acl_permission(struct inode *inode, unsigned int mask)
++{
++ struct nfs4acl *acl;
++ int retval;
++
++ BUG_ON(!test_opt(inode->i_sb, NFS4ACL));
++
++ acl = ext3_get_nfs4acl(inode);
++ if (!acl)
++ retval = nfs4acl_generic_permission(inode, mask);
++ else if (IS_ERR(acl))
++ retval = PTR_ERR(acl);
++ else {
++ retval = nfs4acl_permission(inode, acl, mask);
++ nfs4acl_put(acl);
++ }
++
++ return retval;
++}
++
++int ext3_may_create(struct inode *dir, int isdir)
++{
++ int error;
++
++ if (test_opt(dir->i_sb, NFS4ACL)) {
++ unsigned int mask = (isdir ? ACE4_ADD_SUBDIRECTORY : ACE4_ADD_FILE) |
++ ACE4_EXECUTE;
++
++ error = ext3_nfs4acl_permission(dir, mask);
++ } else
++ error = ext3_permission(dir, MAY_WRITE | MAY_EXEC);
++
++ return error;
++}
++
++static int check_sticky(struct inode *dir, struct inode *inode)
++{
++ if (!(dir->i_mode & S_ISVTX))
++ return 0;
++ if (inode->i_uid == current->fsuid)
++ return 0;
++ if (dir->i_uid == current->fsuid)
++ return 0;
++ return !capable(CAP_FOWNER);
++}
++
++int ext3_may_delete(struct inode *dir, struct inode *inode)
++{
++ int error;
++
++ if (test_opt(inode->i_sb, NFS4ACL)) {
++ error = ext3_nfs4acl_permission(dir, ACE4_DELETE_CHILD | ACE4_EXECUTE);
++ if (!error && check_sticky(dir, inode))
++ error = -EPERM;
++ if (error && !ext3_nfs4acl_permission(inode, ACE4_DELETE))
++ error = 0;
++ } else {
++ error = ext3_permission(dir, MAY_WRITE | MAY_EXEC);
++ if (!error && check_sticky(dir, inode))
++ error = -EPERM;
++ }
++
++ return error;
++}
++
++int
++ext3_nfs4acl_init(handle_t *handle, struct inode *inode, struct inode *dir)
++{
++ struct nfs4acl *dir_acl = NULL, *acl;
++ int retval;
++
++ if (!S_ISLNK(inode->i_mode))
++ dir_acl = ext3_get_nfs4acl(dir);
++ if (!dir_acl || IS_ERR(dir_acl)) {
++ inode->i_mode &= ~current->fs->umask;
++ return PTR_ERR(dir_acl);
++ }
++ acl = nfs4acl_inherit(dir_acl, inode->i_mode);
++ nfs4acl_put(dir_acl);
++
++ retval = PTR_ERR(acl);
++ if (acl && !IS_ERR(acl)) {
++ retval = ext3_set_nfs4acl(handle, inode, acl);
++ inode->i_mode = (inode->i_mode & ~S_IRWXUGO) |
++ nfs4acl_masks_to_mode(acl);
++ nfs4acl_put(acl);
++ }
++ return retval;
++}
++
++int
++ext3_nfs4acl_chmod(struct inode *inode)
++{
++ struct nfs4acl *acl;
++ int retval;
++
++ if (S_ISLNK(inode->i_mode))
++ return -EOPNOTSUPP;
++ acl = ext3_get_nfs4acl(inode);
++ if (!acl || IS_ERR(acl))
++ return PTR_ERR(acl);
++ acl = nfs4acl_chmod(acl, inode->i_mode);
++ if (IS_ERR(acl))
++ return PTR_ERR(acl);
++ retval = ext3_set_nfs4acl(NULL, inode, acl);
++ nfs4acl_put(acl);
++
++ return retval;
++}
++
++static size_t
++ext3_xattr_list_nfs4acl(struct inode *inode, char *list, size_t list_len,
++ const char *name, size_t name_len)
++{
++ const size_t size = sizeof(NFS4ACL_XATTR);
++
++ if (!test_opt(inode->i_sb, NFS4ACL))
++ return 0;
++ if (list && size <= list_len)
++ memcpy(list, NFS4ACL_XATTR, size);
++ return size;
++}
++
++static int
++ext3_xattr_get_nfs4acl(struct inode *inode, const char *name, void *buffer,
++ size_t buffer_size)
++{
++ struct nfs4acl *acl;
++ size_t size;
++
++ if (!test_opt(inode->i_sb, NFS4ACL))
++ return -EOPNOTSUPP;
++ if (strcmp(name, "") != 0)
++ return -EINVAL;
++
++ acl = ext3_get_nfs4acl(inode);
++ if (IS_ERR(acl))
++ return PTR_ERR(acl);
++ if (acl == NULL)
++ return -ENODATA;
++ size = nfs4acl_xattr_size(acl);
++ if (buffer) {
++ if (size > buffer_size)
++ return -ERANGE;
++ nfs4acl_to_xattr(acl, buffer);
++ }
++ nfs4acl_put(acl);
++
++ return size;
++}
++
++#ifdef NFS4ACL_DEBUG
++static size_t
++ext3_xattr_list_masked_nfs4acl(struct inode *inode, char *list, size_t list_len,
++ const char *name, size_t name_len)
++{
++ return 0;
++}
++
++static int
++ext3_xattr_get_masked_nfs4acl(struct inode *inode, const char *name,
++ void *buffer, size_t buffer_size)
++{
++ const int name_index = EXT3_XATTR_INDEX_NFS4ACL;
++ struct nfs4acl *acl;
++ void *xattr;
++ size_t size;
++ int retval;
++
++ if (!test_opt(inode->i_sb, NFS4ACL))
++ return -EOPNOTSUPP;
++ if (strcmp(name, "") != 0)
++ return -EINVAL;
++ retval = ext3_xattr_get(inode, name_index, "", NULL, 0);
++ if (retval <= 0)
++ return retval;
++ xattr = kmalloc(retval, GFP_KERNEL);
++ if (!xattr)
++ return -ENOMEM;
++ retval = ext3_xattr_get(inode, name_index, "", xattr, retval);
++ if (retval <= 0)
++ return retval;
++ acl = nfs4acl_from_xattr(xattr, retval);
++ kfree(xattr);
++ if (IS_ERR(acl))
++ return PTR_ERR(acl);
++ retval = nfs4acl_apply_masks(&acl);
++ if (retval) {
++ nfs4acl_put(acl);
++ return retval;
++ }
++ size = nfs4acl_xattr_size(acl);
++ if (buffer) {
++ if (size > buffer_size)
++ return -ERANGE;
++ nfs4acl_to_xattr(acl, buffer);
++ }
++ nfs4acl_put(acl);
++ return size;
++}
++#endif
++
++static int
++ext3_xattr_set_nfs4acl(struct inode *inode, const char *name,
++ const void *value, size_t size, int flags)
++{
++ handle_t *handle;
++ struct nfs4acl *acl = NULL;
++ int retval, retries = 0;
++
++ if (S_ISLNK(inode->i_mode) || !test_opt(inode->i_sb, NFS4ACL))
++ return -EOPNOTSUPP;
++ if (strcmp(name, "") != 0)
++ return -EINVAL;
++ if (current->fsuid != inode->i_uid &&
++ ext3_nfs4acl_permission(inode, ACE4_WRITE_ACL) &&
++ !capable(CAP_FOWNER))
++ return -EPERM;
++ if (value) {
++ acl = nfs4acl_from_xattr(value, size);
++ if (IS_ERR(acl))
++ return PTR_ERR(acl);
++
++ inode->i_mode &= ~S_IRWXUGO;
++ inode->i_mode |= nfs4acl_masks_to_mode(acl);
++ }
++
++retry:
++ handle = ext3_journal_start(inode, EXT3_DATA_TRANS_BLOCKS(inode->i_sb));
++ if (IS_ERR(handle))
++ return PTR_ERR(handle);
++ ext3_mark_inode_dirty(handle, inode);
++ retval = ext3_set_nfs4acl(handle, inode, acl);
++ ext3_journal_stop(handle);
++ if (retval == ENOSPC && ext3_should_retry_alloc(inode->i_sb, &retries))
++ goto retry;
++ nfs4acl_put(acl);
++ return retval;
++}
++
++struct xattr_handler ext3_nfs4acl_xattr_handler = {
++ .prefix = NFS4ACL_XATTR,
++ .list = ext3_xattr_list_nfs4acl,
++ .get = ext3_xattr_get_nfs4acl,
++ .set = ext3_xattr_set_nfs4acl,
++};
++
++#ifdef NFS4ACL_DEBUG
++struct xattr_handler ext3_masked_nfs4acl_xattr_handler = {
++ .prefix = "system.masked-nfs4acl",
++ .list = ext3_xattr_list_masked_nfs4acl,
++ .get = ext3_xattr_get_masked_nfs4acl,
++ .set = ext3_xattr_set_nfs4acl,
++};
++#endif
+--- /dev/null
++++ b/fs/ext3/nfs4acl.h
+@@ -0,0 +1,36 @@
++#ifndef __FS_EXT3_NFS4ACL_H
++#define __FS_EXT3_NFS4ACL_H
++
++#ifdef CONFIG_EXT3_FS_NFS4ACL
++
++#include <linux/nfs4acl.h>
++
++/* Value for i_nfs4acl if NFS4ACL has not been cached */
++#define EXT3_NFS4ACL_NOT_CACHED ((void *)-1)
++
++extern int ext3_nfs4acl_permission(struct inode *, unsigned int);
++extern int ext3_may_create(struct inode *, int);
++extern int ext3_may_delete(struct inode *, struct inode *);
++extern int ext3_nfs4acl_init(handle_t *, struct inode *, struct inode *);
++extern int ext3_nfs4acl_chmod(struct inode *);
++
++#else /* CONFIG_FS_EXT3_NFS4ACL */
++
++#define ext3_may_create NULL
++#define ext3_may_delete NULL
++
++static inline int
++ext3_nfs4acl_init(handle_t *handle, struct inode *inode, struct inode *dir)
++{
++ return 0;
++}
++
++static inline int
++ext3_nfs4acl_chmod(struct inode *inode)
++{
++ return 0;
++}
++
++#endif /* CONFIG_FS_EXT3_NFS4ACL */
++
++#endif /* __FS_EXT3_NFS4ACL_H */
+--- a/fs/ext3/super.c
++++ b/fs/ext3/super.c
+@@ -36,12 +36,14 @@
+ #include <linux/namei.h>
+ #include <linux/quotaops.h>
+ #include <linux/seq_file.h>
++#include <linux/nfs4acl.h>
+ #include <linux/log2.h>
+
+ #include <asm/uaccess.h>
+
+ #include "xattr.h"
+ #include "acl.h"
++#include "nfs4acl.h"
+ #include "namei.h"
+
+ static int ext3_load_journal(struct super_block *, struct ext3_super_block *,
+@@ -454,6 +456,9 @@ static struct inode *ext3_alloc_inode(st
+ ei->i_acl = EXT3_ACL_NOT_CACHED;
+ ei->i_default_acl = EXT3_ACL_NOT_CACHED;
+ #endif
++#ifdef CONFIG_EXT3_FS_NFS4ACL
++ ei->i_nfs4acl = EXT3_NFS4ACL_NOT_CACHED;
++#endif
+ ei->i_block_alloc_info = NULL;
+ ei->vfs_inode.i_version = 1;
+ return &ei->vfs_inode;
+@@ -516,6 +521,13 @@ static void ext3_clear_inode(struct inod
+ EXT3_I(inode)->i_default_acl = EXT3_ACL_NOT_CACHED;
+ }
+ #endif
++#ifdef CONFIG_EXT3_FS_NFS4ACL
++ if (EXT3_I(inode)->i_nfs4acl &&
++ EXT3_I(inode)->i_nfs4acl != EXT3_NFS4ACL_NOT_CACHED) {
++ nfs4acl_put(EXT3_I(inode)->i_nfs4acl);
++ EXT3_I(inode)->i_nfs4acl = EXT3_NFS4ACL_NOT_CACHED;
++ }
++#endif
+ ext3_discard_reservation(inode);
+ EXT3_I(inode)->i_block_alloc_info = NULL;
+ if (unlikely(rsv))
+@@ -750,7 +762,7 @@ enum {
+ Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
+ Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
+ Opt_nouid32, Opt_nocheck, Opt_debug, Opt_oldalloc, Opt_orlov,
+- Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
++ Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_acl_flavor, Opt_noacl,
+ Opt_reservation, Opt_noreservation, Opt_noload, Opt_nobh, Opt_bh,
+ Opt_commit, Opt_journal_update, Opt_journal_inum, Opt_journal_dev,
+ Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
+@@ -782,6 +794,7 @@ static match_table_t tokens = {
+ {Opt_user_xattr, "user_xattr"},
+ {Opt_nouser_xattr, "nouser_xattr"},
+ {Opt_acl, "acl"},
++ {Opt_acl_flavor, "acl=%s"},
+ {Opt_noacl, "noacl"},
+ {Opt_reservation, "reservation"},
+ {Opt_noreservation, "noreservation"},
+@@ -925,19 +938,33 @@ static int parse_options (char *options,
+ printk("EXT3 (no)user_xattr options not supported\n");
+ break;
+ #endif
+-#ifdef CONFIG_EXT3_FS_POSIX_ACL
+ case Opt_acl:
+- set_opt(sbi->s_mount_opt, POSIX_ACL);
++ args[0].to = args[0].from;
++ /* fall through */
++ case Opt_acl_flavor:
++#ifdef CONFIG_EXT3_FS_POSIX_ACL
++ if (match_string(&args[0], "") ||
++ match_string(&args[0], "posix")) {
++ set_opt(sbi->s_mount_opt, POSIX_ACL);
++ clear_opt(sbi->s_mount_opt, NFS4ACL);
++ } else
++#endif
++#ifdef CONFIG_EXT3_FS_NFS4ACL
++ if (match_string(&args[0], "nfs4")) {
++ clear_opt(sbi->s_mount_opt, POSIX_ACL);
++ set_opt(sbi->s_mount_opt, NFS4ACL);
++ } else
++#endif
++ {
++ printk(KERN_ERR "EXT3-fs: unsupported acl "
++ "flavor\n");
++ return 0;
++ }
+ break;
+ case Opt_noacl:
+ clear_opt(sbi->s_mount_opt, POSIX_ACL);
++ clear_opt(sbi->s_mount_opt, NFS4ACL);
+ break;
+-#else
+- case Opt_acl:
+- case Opt_noacl:
+- printk("EXT3 (no)acl options not supported\n");
+- break;
+-#endif
+ case Opt_reservation:
+ set_opt(sbi->s_mount_opt, RESERVATION);
+ break;
+@@ -1607,8 +1634,11 @@ static int ext3_fill_super (struct super
+ NULL, 0))
+ goto failed_mount;
+
+- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+- ((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
++ sb->s_flags = (sb->s_flags & ~MS_POSIXACL);
++ if (sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL)
++ sb->s_flags |= MS_POSIXACL;
++ if (sbi->s_mount_opt & EXT3_MOUNT_NFS4ACL)
++ sb->s_flags |= MS_POSIXACL | MS_WITHAPPEND;
+
+ if (le32_to_cpu(es->s_rev_level) == EXT3_GOOD_OLD_REV &&
+ (EXT3_HAS_COMPAT_FEATURE(sb, ~0U) ||
+@@ -2451,8 +2481,12 @@ static int ext3_remount (struct super_bl
+ if (sbi->s_mount_opt & EXT3_MOUNT_ABORT)
+ ext3_abort(sb, __func__, "Abort forced by user");
+
+- sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
+- ((sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL) ? MS_POSIXACL : 0);
++ sb->s_flags = (sb->s_flags & ~MS_POSIXACL);
++ if (sbi->s_mount_opt & EXT3_MOUNT_POSIX_ACL)
++ sb->s_flags |= MS_POSIXACL;
++ if (sbi->s_mount_opt & EXT3_MOUNT_NFS4ACL)
++ sb->s_flags |= MS_POSIXACL;
++
+
+ es = sbi->s_es;
+
+--- a/fs/ext3/xattr.c
++++ b/fs/ext3/xattr.c
+@@ -114,6 +114,9 @@ static struct xattr_handler *ext3_xattr_
+ #ifdef CONFIG_EXT3_FS_SECURITY
+ [EXT3_XATTR_INDEX_SECURITY] = &ext3_xattr_security_handler,
+ #endif
++#ifdef CONFIG_EXT3_FS_NFS4ACL
++ [EXT3_XATTR_INDEX_NFS4ACL] = &ext3_nfs4acl_xattr_handler,
++#endif
+ };
+
+ struct xattr_handler *ext3_xattr_handlers[] = {
+@@ -126,6 +129,12 @@ struct xattr_handler *ext3_xattr_handler
+ #ifdef CONFIG_EXT3_FS_SECURITY
+ &ext3_xattr_security_handler,
+ #endif
++#ifdef CONFIG_EXT3_FS_NFS4ACL
++ &ext3_nfs4acl_xattr_handler,
++#ifdef NFS4ACL_DEBUG
++ &ext3_masked_nfs4acl_xattr_handler,
++#endif
++#endif
+ NULL
+ };
+
+--- a/fs/ext3/xattr.h
++++ b/fs/ext3/xattr.h
+@@ -21,6 +21,7 @@
+ #define EXT3_XATTR_INDEX_TRUSTED 4
+ #define EXT3_XATTR_INDEX_LUSTRE 5
+ #define EXT3_XATTR_INDEX_SECURITY 6
++#define EXT3_XATTR_INDEX_NFS4ACL 7
+
+ struct ext3_xattr_header {
+ __le32 h_magic; /* magic number for identification */
+@@ -63,6 +64,10 @@ extern struct xattr_handler ext3_xattr_t
+ extern struct xattr_handler ext3_xattr_acl_access_handler;
+ extern struct xattr_handler ext3_xattr_acl_default_handler;
+ extern struct xattr_handler ext3_xattr_security_handler;
++extern struct xattr_handler ext3_nfs4acl_xattr_handler;
++#ifdef NFS4ACL_DEBUG
++extern struct xattr_handler ext3_masked_nfs4acl_xattr_handler;
++#endif
+
+ extern ssize_t ext3_listxattr(struct dentry *, char *, size_t);
+
+--- a/include/linux/ext3_fs.h
++++ b/include/linux/ext3_fs.h
+@@ -380,6 +380,7 @@ struct ext3_inode {
+ #define EXT3_MOUNT_QUOTA 0x80000 /* Some quota option set */
+ #define EXT3_MOUNT_USRQUOTA 0x100000 /* "old" user quota */
+ #define EXT3_MOUNT_GRPQUOTA 0x200000 /* "old" group quota */
++#define EXT3_MOUNT_NFS4ACL 0x400000 /* NFS version 4 ACLs */
+
+ /* Compatibility, for having both ext2_fs.h and ext3_fs.h included at once */
+ #ifndef _LINUX_EXT2_FS_H
+--- a/include/linux/ext3_fs_i.h
++++ b/include/linux/ext3_fs_i.h
+@@ -107,6 +107,9 @@ struct ext3_inode_info {
+ struct posix_acl *i_acl;
+ struct posix_acl *i_default_acl;
+ #endif
++#ifdef CONFIG_EXT3_FS_NFS4ACL
++ struct nfs4acl *i_nfs4acl;
++#endif
+
+ struct list_head i_orphan; /* unlinked but open inodes */
+
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: nfsacl: improve cache consistency
+
+(This one is currently disabled.)
+
+Index: linux-2.6.11-rc2/fs/nfs/inode.c
+===================================================================
+--- linux-2.6.11-rc2.orig/fs/nfs/inode.c
++++ linux-2.6.11-rc2/fs/nfs/inode.c
+@@ -65,13 +65,8 @@ static int nfs_statfs(struct super_bloc
+ static int nfs_show_options(struct seq_file *, struct vfsmount *);
+
+ #ifdef CONFIG_NFS_ACL
+-static void nfs_forget_cached_acls(struct inode *);
+ static void __nfs_forget_cached_acls(struct nfs_inode *nfsi);
+ #else
+-static inline void nfs_forget_cached_acls(struct inode *inode)
+-{
+-}
+-
+ static inline void __nfs_forget_cached_acls(struct nfs_inode *nfsi)
+ {
+ }
+@@ -1188,7 +1183,7 @@ static void __nfs_forget_cached_acls(str
+ #endif /* CONFIG_NFS_ACL */
+
+ #ifdef CONFIG_NFS_ACL
+-static void nfs_forget_cached_acls(struct inode *inode)
++void nfs_forget_cached_acls(struct inode *inode)
+ {
+ dprintk("NFS: nfs_forget_cached_acls(%s/%ld)\n", inode->i_sb->s_id,
+ inode->i_ino);
+@@ -1293,6 +1288,8 @@ int nfs_refresh_inode(struct inode *inod
+ if ((fattr->valid & NFS_ATTR_WCC) != 0) {
+ if (timespec_equal(&inode->i_ctime, &fattr->pre_ctime))
+ memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
++ else
++ nfs_forget_cached_acls(inode);
+ if (timespec_equal(&inode->i_mtime, &fattr->pre_mtime))
+ memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
+ }
+Index: linux-2.6.11-rc2/fs/nfs/nfs3proc.c
+===================================================================
+--- linux-2.6.11-rc2.orig/fs/nfs/nfs3proc.c
++++ linux-2.6.11-rc2/fs/nfs/nfs3proc.c
+@@ -876,7 +876,11 @@ nfs3_proc_setacls(struct inode *inode, s
+ acl = NULL;
+ }
+ }
+- nfs_cache_acls(inode, acl, dfacl);
++ if ((fattr.valid & NFS_ATTR_WCC) &&
++ timespec_equal(&inode->i_ctime, &fattr.pre_ctime))
++ nfs_cache_acls(inode, acl, dfacl);
++ else
++ nfs_forget_cached_acls(inode);
+ status = nfs_refresh_inode(inode, &fattr);
+ }
+
+Index: linux-2.6.11-rc2/include/linux/nfs_fs.h
+===================================================================
+--- linux-2.6.11-rc2.orig/include/linux/nfs_fs.h
++++ linux-2.6.11-rc2/include/linux/nfs_fs.h
+@@ -293,6 +293,13 @@ extern struct inode *nfs_fhget(struct su
+ struct nfs_fattr *);
+ extern struct posix_acl *nfs_get_cached_acl(struct inode *, int);
+ extern void nfs_cache_acls(struct inode *, struct posix_acl *, struct posix_acl *);
++#ifdef CONFIG_NFS_ACL
++void nfs_forget_cached_acls(struct inode *);
++#else
++static inline void nfs_forget_cached_acls(struct inode *inode)
++{
++}
++#endif
+ extern int nfs_refresh_inode(struct inode *, struct nfs_fattr *);
+ extern int nfs_getattr(struct vfsmount *, struct dentry *, struct kstat *);
+ extern int nfs_permission(struct inode *, int, struct nameidata *);
--- /dev/null
+From: Goldwyn Rodrigues <rgoldwyn@suse.de>
+Subject: Fix oops in set_map_drive
+References: bnc#446824, bnc#444469
+
+The oops was caused because of an unconditional free because of the
+merge changes.
+The error was caused because novfs_set_map_drive was not being called
+with the right args, which caused it to request for incorrect memory
+size.
+Cleaned up some debug messages as well, and corrected debug messages.
+
+
+---
+ fs/novfs/daemon.c | 9 +++++----
+ fs/novfs/inode.c | 11 -----------
+ 2 files changed, 5 insertions(+), 15 deletions(-)
+
+Index: linux-2.6.27/fs/novfs/daemon.c
+===================================================================
+--- linux-2.6.27.orig/fs/novfs/daemon.c 2008-12-02 14:57:29.000000000 +0530
++++ linux-2.6.27/fs/novfs/daemon.c 2008-12-02 15:03:15.000000000 +0530
+@@ -1936,7 +1936,7 @@ static int set_map_drive(struct novfs_xp
+ full_name_hash(drivemap->name,
+ symInfo.linkOffsetLength - 1);
+ drivemap->namelen = symInfo.linkOffsetLength - 1;
+- DbgPrint("NwdSetMapDrive: hash=0x%x path=%s\n",
++ DbgPrint("set_map_drive: hash=0x%lx path=%s\n",
+ drivemap->hash, drivemap->name);
+
+ dm = (struct drive_map *) & DriveMapList.next;
+@@ -1945,8 +1945,8 @@ static int set_map_drive(struct novfs_xp
+
+ list_for_each(list, &DriveMapList) {
+ dm = list_entry(list, struct drive_map, list);
+- DbgPrint("NwdSetMapDrive: dm=0x%p\n"
+- " hash: 0x%x\n"
++ DbgPrint("set_map_drive: dm=0x%p\n"
++ " hash: 0x%lx\n"
+ " namelen: %d\n"
+ " name: %s\n",
+ dm, dm->hash, dm->namelen, dm->name);
+@@ -1971,7 +1971,8 @@ static int set_map_drive(struct novfs_xp
+ &dm->list);
+ }
+ }
+- kfree(drivemap);
++ else
++ kfree(drivemap);
+ up(&DriveMapLock);
+ return (retVal);
+ }
+Index: linux-2.6.27/fs/novfs/inode.c
+===================================================================
+--- linux-2.6.27.orig/fs/novfs/inode.c 2008-12-02 15:01:46.000000000 +0530
++++ linux-2.6.27/fs/novfs/inode.c 2008-12-02 15:01:52.000000000 +0530
+@@ -4055,22 +4055,11 @@ int __init init_novfs(void)
+
+ void __exit exit_novfs(void)
+ {
+- printk(KERN_INFO "exit_novfs\n");
+-
+ novfs_scope_exit();
+- printk(KERN_INFO "exit_novfs after Scope_Uninit\n");
+-
+ novfs_daemon_queue_exit();
+- printk(KERN_INFO "exit_novfs after Uninit_Daemon_Queue\n");
+-
+ novfs_profile_exit();
+- printk(KERN_INFO "exit_novfs after profile_exit\n");
+-
+ novfs_proc_exit();
+- printk(KERN_INFO "exit_novfs Uninit_Procfs_Interface\n");
+-
+ unregister_filesystem(&novfs_fs_type);
+- printk(KERN_INFO "exit_novfs: Exit\n");
+
+ if (novfs_current_mnt) {
+ kfree(novfs_current_mnt);
--- /dev/null
+From: Goldwyn Rodrigues <rgoldwyn@novell.com
+Subject: Merge changes left out during code pull
+References: 445000
+
+Also contains the fix for a clean shutdown during umount.
+
+---
+ fs/novfs/file.c | 55 ++++++++++++++++++++---------------------
+ fs/novfs/inode.c | 71 ++++++++++++++++++++++++++++++++++++------------------
+ fs/novfs/nwcapi.c | 13 ++++++---
+ fs/novfs/vfs.h | 3 +-
+ 4 files changed, 85 insertions(+), 57 deletions(-)
+
+Index: linux-2.6.27/fs/novfs/file.c
+===================================================================
+--- linux-2.6.27.orig/fs/novfs/file.c 2008-12-02 11:57:24.000000000 +0530
++++ linux-2.6.27/fs/novfs/file.c 2008-12-02 11:58:18.000000000 +0530
+@@ -405,7 +405,7 @@ int novfs_setx_file_info(char *Path, con
+
+ cmd->flags = flags;
+ cmd->pathLen = pathlen;
+- memcpy(cmd->data, Path, cmd->pathLen + 1); //+ '\0'
++ memcpy(cmd->data, Path, cmd->pathLen);
+
+ cmd->nameLen = namelen;
+ memcpy(cmd->data + cmd->pathLen + 1, Name, cmd->nameLen + 1);
+@@ -601,7 +601,7 @@ static int begin_directory_enumerate(uns
+ return (retCode);
+ }
+
+-static int end_directory_enumerate(void *EnumHandle, struct novfs_schandle SessionId)
++int novfs_end_directory_enumerate(void *EnumHandle, struct novfs_schandle SessionId)
+ {
+ struct novfs_end_enumerate_directory_request cmd;
+ struct novfs_end_enumerate_directory_reply *reply = NULL;
+@@ -793,11 +793,9 @@ int novfs_get_dir_listex(unsigned char *
+ directory_enumerate_ex(EnumHandle, SessionId, Count, Info,
+ INTERRUPTIBLE);
+ if (retCode) {
+- end_directory_enumerate(*EnumHandle, SessionId);
+- if (-1 == retCode) {
+- retCode = 0;
+- *EnumHandle = Uint32toHandle(-1);
+- }
++ novfs_end_directory_enumerate(*EnumHandle, SessionId);
++ retCode = 0;
++ *EnumHandle = Uint32toHandle(-1);
+ }
+ }
+ return (retCode);
+@@ -915,32 +913,33 @@ int novfs_create(unsigned char * Path, i
+
+ cmdlen = offsetof(struct novfs_create_file_request, path) + pathlen;
+ cmd = kmalloc(cmdlen, GFP_KERNEL);
+- if (cmd) {
+- cmd->Command.CommandType = VFS_COMMAND_CREATE_FILE;
+- if (DirectoryFlag) {
+- cmd->Command.CommandType = VFS_COMMAND_CREATE_DIRECOTRY;
+- }
+- cmd->Command.SequenceNumber = 0;
+- cmd->Command.SessionId = SessionId;
++ if (!cmd)
++ return -ENOMEM;
++ cmd->Command.CommandType = VFS_COMMAND_CREATE_FILE;
++ if (DirectoryFlag) {
++ cmd->Command.CommandType = VFS_COMMAND_CREATE_DIRECOTRY;
++ }
++ cmd->Command.SequenceNumber = 0;
++ cmd->Command.SessionId = SessionId;
+
+- cmd->pathlength = pathlen;
+- memcpy(cmd->path, Path, pathlen);
++ cmd->pathlength = pathlen;
++ memcpy(cmd->path, Path, pathlen);
+
+- retCode =
+- Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply,
+- &replylen, INTERRUPTIBLE);
++ retCode =
++ Queue_Daemon_Command(cmd, cmdlen, NULL, 0, (void *)&reply,
++ &replylen, INTERRUPTIBLE);
++
++ if (reply) {
++ retCode = 0;
++ if (reply->Reply.ErrorCode) {
++ retCode = -EIO;
++ if (reply->Reply.ErrorCode == NWE_ACCESS_DENIED)
++ retCode = -EACCES;
+
+- if (reply) {
+- retCode = 0;
+- if (reply->Reply.ErrorCode) {
+- retCode = -EIO;
+- }
+- kfree(reply);
+ }
+- kfree(cmd);
+- } else {
+- retCode = -ENOMEM;
++ kfree(reply);
+ }
++ kfree(cmd);
+ return (retCode);
+ }
+
+Index: linux-2.6.27/fs/novfs/inode.c
+===================================================================
+--- linux-2.6.27.orig/fs/novfs/inode.c 2008-12-02 11:57:24.000000000 +0530
++++ linux-2.6.27/fs/novfs/inode.c 2008-12-02 11:58:35.000000000 +0530
+@@ -137,11 +137,11 @@ int novfs_i_revalidate(struct dentry *de
+ * Extended attributes operations
+ */
+
+-int novfs_i_getxattr(struct dentry *dentry, const char *name, void *buffer,
++ssize_t novfs_i_getxattr(struct dentry *dentry, const char *name, void *buffer,
+ size_t size);
+ int novfs_i_setxattr(struct dentry *dentry, const char *name, const void *value,
+ size_t value_size, int flags);
+-int novfs_i_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
++ssize_t novfs_i_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size);
+
+ void update_inode(struct inode *Inode, struct novfs_entry_info *Info);
+
+@@ -262,21 +262,17 @@ static struct inode_operations novfs_ino
+ .rename = novfs_i_rename,
+ .setattr = novfs_i_setattr,
+ .getattr = novfs_i_getattr,
+-/*
+ .getxattr = novfs_i_getxattr,
+ .setxattr = novfs_i_setxattr,
+ .listxattr = novfs_i_listxattr,
+-*/
+ };
+
+ static struct inode_operations novfs_file_inode_operations = {
+ .setattr = novfs_i_setattr,
+ .getattr = novfs_i_getattr,
+-/*
+ .getxattr = novfs_i_getxattr,
+ .setxattr = novfs_i_setxattr,
+ .listxattr = novfs_i_listxattr,
+-*/
+ };
+
+ static struct super_operations novfs_ops = {
+@@ -935,14 +931,23 @@ int novfs_dir_open(struct inode *dir, st
+
+ int novfs_dir_release(struct inode *dir, struct file *file)
+ {
+- struct file_private *file_private;
+- file_private = (struct file_private *) file->private_data;
++ struct file_private *file_private = file->private_data;
++ struct inode *inode = file->f_dentry->d_inode;
++ struct novfs_schandle sessionId;
+
+ DbgPrint("novfs_dir_release: Inode 0x%p %d Name %.*s\n", dir,
+ dir->i_ino, file->f_dentry->d_name.len,
+ file->f_dentry->d_name.name);
+
+ if (file_private) {
++ if (file_private->enumHandle && (file_private->enumHandle != ((void *)-1))) {
++ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
++ if (SC_PRESENT(sessionId) == 0) {
++ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry);
++ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
++ }
++ novfs_end_directory_enumerate(file_private->enumHandle, sessionId);
++ }
+ kfree(file_private);
+ file->private_data = NULL;
+ }
+@@ -966,6 +971,16 @@ loff_t novfs_dir_lseek(struct file * fil
+
+ file_private = (struct file_private *) file->private_data;
+ file_private->listedall = 0;
++ if (file_private->enumHandle && (file_private->enumHandle != ((void *)-1))) {
++ struct novfs_schandle sessionId;
++ struct inode *inode = file->f_dentry->d_inode;
++ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
++ if (SC_PRESENT(sessionId) == 0) {
++ ((struct inode_data *)inode->i_private)->Scope = novfs_get_scope(file->f_dentry);
++ sessionId = novfs_scope_get_sessionId(((struct inode_data *)inode->i_private)->Scope);
++ }
++ novfs_end_directory_enumerate(file_private->enumHandle, sessionId);
++ }
+ file_private->enumHandle = NULL;
+
+ return 0;
+@@ -2864,9 +2879,15 @@ int novfs_i_unlink(struct inode *dir, st
+ } else {
+ retCode =
+ novfs_delete(path,
+- S_ISDIR(inode->
+- i_mode),
+- session);
++ S_ISDIR(inode->i_mode), session);
++ if (retCode) {
++ struct iattr ia;
++ memset(&ia, 0, sizeof(ia));
++ ia.ia_valid = ATTR_MODE;
++ ia.ia_mode = S_IRWXU;
++ novfs_set_attr(path, &ia, session);
++ retCode = novfs_delete(path, S_ISDIR(inode->i_mode), session);
++ }
+ }
+ if (!retCode || IS_DEADDIR(inode)) {
+ novfs_remove_inode_entry(dir,
+@@ -3119,13 +3140,16 @@ int novfs_i_rename(struct inode *odir, s
+ }
+
+ retCode =
+- novfs_delete
+- (newpath,
+- S_ISDIR
+- (nd->
+- d_inode->
+- i_mode),
+- session);
++ novfs_delete(newpath, S_ISDIR(nd->d_inode->i_mode), session);
++ if (retCode) {
++ struct iattr ia;
++ memset(&ia, 0, sizeof(ia));
++ ia.ia_valid = ATTR_MODE;
++ ia.ia_mode = S_IRWXU;
++ novfs_set_attr(newpath, &ia, session);
++ retCode = novfs_delete(newpath, S_ISDIR(nd->d_inode->i_mode), session);
++ }
++
+ }
+
+ session = novfs_scope_get_sessionId(((struct inode_data *) ndir->i_private)->Scope);
+@@ -3378,7 +3402,7 @@ int novfs_i_getattr(struct vfsmount *mnt
+ return (retCode);
+ }
+
+-int novfs_i_getxattr(struct dentry *dentry, const char *name, void *buffer,
++ssize_t novfs_i_getxattr(struct dentry *dentry, const char *name, void *buffer,
+ size_t buffer_size)
+ {
+ struct inode *inode = dentry->d_inode;
+@@ -3528,7 +3552,7 @@ int novfs_i_setxattr(struct dentry *dent
+ return (retError);
+ }
+
+-int novfs_i_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
++ssize_t novfs_i_listxattr(struct dentry *dentry, char *buffer, size_t buffer_size)
+ {
+ struct inode *inode = dentry->d_inode;
+ struct novfs_schandle sessionId;
+@@ -3720,6 +3744,9 @@ int novfs_statfs(struct dentry *de, stru
+ DbgPrint("fd=%llu\n", fd);
+ DbgPrint("te=%llu\n", te);
+ DbgPrint("fe=%llu\n", fd);
++ /* fix for Nautilus */
++ if (sb->s_blocksize == 0)
++ sb->s_blocksize = 4096;
+
+ buf->f_type = sb->s_magic;
+ buf->f_bsize = sb->s_blocksize;
+@@ -3762,7 +3789,6 @@ struct inode *novfs_get_inode(struct sup
+ inode->i_mode = mode;
+ inode->i_uid = Uid;
+ inode->i_gid = 0;
+- inode->i_sb->s_blocksize = sb->s_blocksize;
+ inode->i_blkbits = sb->s_blocksize_bits;
+ inode->i_blocks = 0;
+ inode->i_rdev = 0;
+@@ -3826,8 +3852,6 @@ struct inode *novfs_get_inode(struct sup
+ case S_IFDIR:
+ inode->i_op = &novfs_inode_operations;
+ inode->i_fop = &novfs_dir_operations;
+-
+- inode->i_sb->s_blocksize = 0;
+ inode->i_blkbits = 0;
+ break;
+
+@@ -3957,6 +3981,7 @@ static int novfs_get_sb(struct file_syst
+
+ static void novfs_kill_sb(struct super_block *super)
+ {
++ shrink_dcache_sb(super);
+ kill_litter_super(super);
+ }
+
+Index: linux-2.6.27/fs/novfs/vfs.h
+===================================================================
+--- linux-2.6.27.orig/fs/novfs/vfs.h 2008-12-02 11:57:24.000000000 +0530
++++ linux-2.6.27/fs/novfs/vfs.h 2008-12-02 11:58:18.000000000 +0530
+@@ -344,7 +344,8 @@ extern int novfs_close_stream(void * Con
+ struct novfs_schandle SessionId);
+
+ extern int novfs_add_to_root(char *);
+-
++extern int novfs_end_directory_enumerate(void *EnumHandle,
++ struct novfs_schandle SessionId);
+
+ /*
+ * scope.c functions
+Index: linux-2.6.27/fs/novfs/nwcapi.c
+===================================================================
+--- linux-2.6.27.orig/fs/novfs/nwcapi.c 2008-12-02 11:57:24.000000000 +0530
++++ linux-2.6.27/fs/novfs/nwcapi.c 2008-12-02 11:58:18.000000000 +0530
+@@ -1845,14 +1845,14 @@ int novfs_set_map_drive(struct novfs_xpl
+
+ struct novfs_xplat_call_request *cmd;
+ struct novfs_xplat_call_reply *reply;
+- unsigned long status = 0, datalen, cmdlen, replylen, cpylen;
++ unsigned long status = 0, datalen, cmdlen, replylen;
+ struct nwc_map_drive_ex symInfo;
+
+ DbgPrint("Call to NwcSetMapDrive\n");
+- cpylen = copy_from_user(&symInfo, pdata->reqData, sizeof(symInfo));
+ cmdlen = sizeof(*cmd);
+- datalen =
+- sizeof(symInfo) + symInfo.dirPathOffsetLength +
++ if (copy_from_user(&symInfo, pdata->reqData, sizeof(symInfo)))
++ return -EFAULT;
++ datalen = sizeof(symInfo) + symInfo.dirPathOffsetLength +
+ symInfo.linkOffsetLength;
+
+ DbgPrint(" cmdlen = %d\n", cmdlen);
+@@ -1876,7 +1876,10 @@ int novfs_set_map_drive(struct novfs_xpl
+ cmd->Command.SessionId = Session;
+ cmd->NwcCommand = NWC_MAP_DRIVE;
+
+- cpylen = copy_from_user(cmd->data, pdata->reqData, datalen);
++ if (copy_from_user(cmd->data, pdata->reqData, datalen)) {
++ kfree(cmd);
++ return -EFAULT;
++ }
+ status =
+ Queue_Daemon_Command((void *)cmd, cmdlen, NULL, 0,
+ (void **)&reply, &replylen,
--- /dev/null
+From: Kurt Garloff <garloff@suse.de>
+Subject: [PATCH] X86: sysctl to allow panic on IOCK NMI error
+References: bnc427979
+
+This patch introduces a sysctl /proc/sys/kernel/panic_on_io_nmi.,
+which defaults to 0 (off).
+When enabled, the kernel panics when the kernel receives an NMI
+caused by an IO error.
+
+The IO error triggered NMI indicates a serious system condition,
+which could result in IO data corruption. Rather than contiuing,
+panicing and dumping might be a better choice, so one can figure
+out what's causing the IO error.
+This could be especially important to companies running IO intensive
+applications where corruption must be avoided, e.g. a banks databases.
+
+
+Signed-off-by: Roberto Angelino <robertangelino@gmail.com>
+
+
+---
+ arch/x86/kernel/traps_32.c | 4 ++++
+ arch/x86/kernel/traps_64.c | 4 ++++
+ include/linux/kernel.h | 1 +
+ include/linux/sysctl.h | 1 +
+ kernel/sysctl.c | 8 ++++++++
+ kernel/sysctl_check.c | 1 +
+ 6 files changed, 19 insertions(+)
+
+--- a/arch/x86/kernel/traps_32.c
++++ b/arch/x86/kernel/traps_32.c
+@@ -83,6 +83,7 @@ gate_desc idt_table[256]
+ __attribute__((__section__(".data.idt"))) = { { { { 0, 0 } } }, };
+
+ int panic_on_unrecovered_nmi;
++int panic_on_io_nmi;
+ int kstack_depth_to_print = 24;
+ static unsigned int code_bytes = 64;
+ #ifdef CONFIG_STACK_UNWIND
+@@ -779,6 +780,9 @@ io_check_error(unsigned char reason, str
+ printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
+ show_registers(regs);
+
++ if (panic_on_io_nmi)
++ panic("NMI IOCK error: Not continuing");
++
+ /* Re-enable the IOCK line, wait for a few seconds */
+ reason = (reason & 0xf) | 8;
+ outb(reason, 0x61);
+--- a/arch/x86/kernel/traps_64.c
++++ b/arch/x86/kernel/traps_64.c
+@@ -56,6 +56,7 @@
+ #include <mach_traps.h>
+
+ int panic_on_unrecovered_nmi;
++int panic_on_io_nmi;
+ int kstack_depth_to_print = 12;
+ static unsigned int code_bytes = 64;
+ #ifdef CONFIG_STACK_UNWIND
+@@ -840,6 +841,9 @@ io_check_error(unsigned char reason, str
+ printk("NMI: IOCK error (debug interrupt?)\n");
+ show_registers(regs);
+
++ if (panic_on_io_nmi)
++ panic("NMI IOCK error: Not continuing");
++
+ /* Re-enable the IOCK line, wait for a few seconds */
+ reason = (reason & 0xf) | 8;
+ outb(reason, 0x61);
+--- a/include/linux/kernel.h
++++ b/include/linux/kernel.h
+@@ -236,6 +236,7 @@ extern int oops_in_progress; /* If set,
+ extern int panic_timeout;
+ extern int panic_on_oops;
+ extern int panic_on_unrecovered_nmi;
++extern int panic_on_io_nmi;
+ extern int tainted;
+ extern int unsupported;
+ extern const char *print_tainted(void);
+--- a/include/linux/sysctl.h
++++ b/include/linux/sysctl.h
+@@ -165,6 +165,7 @@ enum
+ KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
+ KERN_KDB=77, /* int: kdb on/off */
+ KERN_DUMP_AFTER_NOTIFIER=78, /* int: kdump after panic_notifier (SUSE only) */
++ KERN_PANIC_ON_IO_NMI=79, /* int: whether we will panic on an io NMI */
+ };
+
+
+--- a/kernel/sysctl.c
++++ b/kernel/sysctl.c
+@@ -691,6 +691,14 @@ static struct ctl_table kern_table[] = {
+ .proc_handler = &proc_dointvec,
+ },
+ {
++ .ctl_name = KERN_PANIC_ON_IO_NMI,
++ .procname = "panic_on_io_nmi",
++ .data = &panic_on_io_nmi,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec,
++ },
++ {
+ .ctl_name = KERN_BOOTLOADER_TYPE,
+ .procname = "bootloader_type",
+ .data = &bootloader_type,
+--- a/kernel/sysctl_check.c
++++ b/kernel/sysctl_check.c
+@@ -104,6 +104,7 @@ static const struct trans_ctl_table tran
+ { KERN_MAX_LOCK_DEPTH, "max_lock_depth" },
+ { KERN_NMI_WATCHDOG, "nmi_watchdog" },
+ { KERN_PANIC_ON_NMI, "panic_on_unrecovered_nmi" },
++ { KERN_PANIC_ON_IO_NMI, "panic_on_io_nmi" },
+ { KERN_SETUID_DUMPABLE, "suid_dumpable" },
+ { KERN_KDB, "kdb" },
+ { KERN_DUMP_AFTER_NOTIFIER, "dump_after_notifier" },
--- /dev/null
+From: Andreas Gruenbacher <agruen@suse.de>
+Subject: Add match_string() for mount option parsing
+References: FATE301275
+Patch-mainline: no
+
+The match_string() function allows to parse string constants in
+mount options.
+
+Signed-off-by: Andreas Gruenbacher <agruen@suse.de>
+
+---
+ include/linux/parser.h | 1 +
+ lib/parser.c | 14 ++++++++++++++
+ 2 files changed, 15 insertions(+)
+
+--- a/include/linux/parser.h
++++ b/include/linux/parser.h
+@@ -26,6 +26,7 @@ typedef struct {
+ } substring_t;
+
+ int match_token(char *, match_table_t table, substring_t args[]);
++int match_string(substring_t *s, const char *str);
+ int match_int(substring_t *, int *result);
+ int match_octal(substring_t *, int *result);
+ int match_hex(substring_t *, int *result);
+--- a/lib/parser.c
++++ b/lib/parser.c
+@@ -111,6 +111,19 @@ int match_token(char *s, match_table_t t
+ }
+
+ /**
++ * match_string: check for a particular parameter
++ * @s: substring to be scanned
++ * @str: string to scan for
++ *
++ * Description: Return if a &substring_t is equal to string @str.
++ */
++int match_string(substring_t *s, const char *str)
++{
++ return strlen(str) == s->to - s->from &&
++ !memcmp(str, s->from, s->to - s->from);
++}
++
++/**
+ * match_number: scan a number in the given base from a substring_t
+ * @s: substring to be scanned
+ * @result: resulting integer on success
+@@ -221,6 +234,7 @@ char *match_strdup(const substring_t *s)
+ }
+
+ EXPORT_SYMBOL(match_token);
++EXPORT_SYMBOL(match_string);
+ EXPORT_SYMBOL(match_int);
+ EXPORT_SYMBOL(match_octal);
+ EXPORT_SYMBOL(match_hex);
--- /dev/null
+From: Jan Kara <jack@suse.cz>
+Subject: Allow setting of number of raw devices as a module parameter
+References: FATE 302178
+Patch-mainline: never
+
+Allow setting of maximal number of raw devices as a module parameter. This requires
+changing of static array into a vmalloced one (the array is going to be too large
+for kmalloc).
+
+Signed-off-by: Jan Kara <jack@suse.cz>
+
+---
+ drivers/char/Kconfig | 2 +-
+ drivers/char/raw.c | 33 +++++++++++++++++++++++++++------
+ 2 files changed, 28 insertions(+), 7 deletions(-)
+
+--- a/drivers/char/Kconfig
++++ b/drivers/char/Kconfig
+@@ -1026,7 +1026,7 @@ config RAW_DRIVER
+ with the O_DIRECT flag.
+
+ config MAX_RAW_DEVS
+- int "Maximum number of RAW devices to support (1-8192)"
++ int "Maximum number of RAW devices to support (1-65536)"
+ depends on RAW_DRIVER
+ default "256"
+ help
+--- a/drivers/char/raw.c
++++ b/drivers/char/raw.c
+@@ -20,6 +20,7 @@
+ #include <linux/device.h>
+ #include <linux/mutex.h>
+ #include <linux/smp_lock.h>
++#include <linux/vmalloc.h>
+
+ #include <asm/uaccess.h>
+
+@@ -29,10 +30,15 @@ struct raw_device_data {
+ };
+
+ static struct class *raw_class;
+-static struct raw_device_data raw_devices[MAX_RAW_MINORS];
++static struct raw_device_data *raw_devices;
+ static DEFINE_MUTEX(raw_mutex);
+ static const struct file_operations raw_ctl_fops; /* forward declaration */
+
++static int max_raw_minors = MAX_RAW_MINORS;
++
++module_param(max_raw_minors, int, 0);
++MODULE_PARM_DESC(max_raw_minors, "Maximum number of raw devices (1-65536)");
++
+ /*
+ * Open/close code for raw IO.
+ *
+@@ -158,7 +164,7 @@ static int raw_ctl_ioctl(struct inode *i
+ goto out;
+ }
+
+- if (rq.raw_minor <= 0 || rq.raw_minor >= MAX_RAW_MINORS) {
++ if (rq.raw_minor <= 0 || rq.raw_minor >= max_raw_minors) {
+ err = -EINVAL;
+ goto out;
+ }
+@@ -266,12 +272,26 @@ static int __init raw_init(void)
+ dev_t dev = MKDEV(RAW_MAJOR, 0);
+ int ret;
+
+- ret = register_chrdev_region(dev, MAX_RAW_MINORS, "raw");
++ if (max_raw_minors < 1 || max_raw_minors > 65536) {
++ printk(KERN_WARNING "raw: invalid max_raw_minors (must be"
++ " between 1 and 65536), using %d\n", MAX_RAW_MINORS);
++ max_raw_minors = MAX_RAW_MINORS;
++ }
++
++ raw_devices = vmalloc(sizeof(struct raw_device_data) * max_raw_minors);
++ if (!raw_devices) {
++ printk(KERN_ERR "Not enough memory for raw device structures\n");
++ ret = -ENOMEM;
++ goto error;
++ }
++ memset(raw_devices, 0, sizeof(struct raw_device_data) * max_raw_minors);
++
++ ret = register_chrdev_region(dev, max_raw_minors, "raw");
+ if (ret)
+ goto error;
+
+ cdev_init(&raw_cdev, &raw_fops);
+- ret = cdev_add(&raw_cdev, dev, MAX_RAW_MINORS);
++ ret = cdev_add(&raw_cdev, dev, max_raw_minors);
+ if (ret) {
+ kobject_put(&raw_cdev.kobj);
+ goto error_region;
+@@ -290,8 +310,9 @@ static int __init raw_init(void)
+ return 0;
+
+ error_region:
+- unregister_chrdev_region(dev, MAX_RAW_MINORS);
++ unregister_chrdev_region(dev, max_raw_minors);
+ error:
++ vfree(raw_devices);
+ return ret;
+ }
+
+@@ -300,7 +321,7 @@ static void __exit raw_exit(void)
+ device_destroy(raw_class, MKDEV(RAW_MAJOR, 0));
+ class_destroy(raw_class);
+ cdev_del(&raw_cdev);
+- unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), MAX_RAW_MINORS);
++ unregister_chrdev_region(MKDEV(RAW_MAJOR, 0), max_raw_minors);
+ }
+
+ module_init(raw_init);
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: introduce reiserfs_error()
+
+ Although reiserfs can currently handle severe errors such as journal failure,
+ it cannot handle less severe errors like metadata i/o failure. The following
+ patch adds a reiserfs_error() function akin to the one in ext3.
+
+ Subsequent patches will use this new error handler to handle errors more
+ gracefully in general.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+--
+ fs/reiserfs/prints.c | 25 +++++++++++++++++++++++++
+ include/linux/reiserfs_fs.h | 4 ++++
+ 2 files changed, 29 insertions(+)
+
+--- a/fs/reiserfs/prints.c
++++ b/fs/reiserfs/prints.c
+@@ -373,6 +373,31 @@ void __reiserfs_panic(struct super_block
+ id ? id : "", id ? " " : "", function, error_buf);
+ }
+
++void __reiserfs_error(struct super_block *sb, const char *id,
++ const char *function, const char *fmt, ...)
++{
++ do_reiserfs_warning(fmt);
++
++ BUG_ON(sb == NULL);
++
++ if (reiserfs_error_panic(sb))
++ __reiserfs_panic(sb, id, function, error_buf);
++
++ if (id && id[0])
++ printk(KERN_CRIT "REISERFS error (device %s): %s %s: %s\n",
++ sb->s_id, id, function, error_buf);
++ else
++ printk(KERN_CRIT "REISERFS error (device %s): %s: %s\n",
++ sb->s_id, function, error_buf);
++
++ if (sb->s_flags & MS_RDONLY)
++ return;
++
++ reiserfs_info(sb, "Remounting filesystem read-only\n");
++ sb->s_flags |= MS_RDONLY;
++ reiserfs_abort_journal(sb, -EIO);
++}
++
+ void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...)
+ {
+ do_reiserfs_warning(fmt);
+--- a/include/linux/reiserfs_fs.h
++++ b/include/linux/reiserfs_fs.h
+@@ -2006,6 +2006,10 @@ void __reiserfs_panic(struct super_block
+ __attribute__ ((noreturn));
+ #define reiserfs_panic(s, id, fmt, args...) \
+ __reiserfs_panic(s, id, __func__, fmt, ##args)
++void __reiserfs_error(struct super_block *s, const char *id,
++ const char *function, const char *fmt, ...);
++#define reiserfs_error(s, id, fmt, args...) \
++ __reiserfs_error(s, id, __func__, fmt, ##args)
+ void reiserfs_info(struct super_block *s, const char *fmt, ...);
+ void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...);
+ void print_indirect_item(struct buffer_head *bh, int item_num);
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: use buffer_info for leaf_paste_entries
+
+ This patch makes leaf_paste_entries more consistent with respect to the
+ other leaf operations. Using buffer_info instead of buffer_head directly
+ allows us to get a superblock pointer for use in error handling.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+ fs/reiserfs/do_balan.c | 17 +++++++----------
+ fs/reiserfs/lbalance.c | 5 +++--
+ include/linux/reiserfs_fs.h | 2 +-
+ 3 files changed, 11 insertions(+), 13 deletions(-)
+
+--- a/fs/reiserfs/do_balan.c
++++ b/fs/reiserfs/do_balan.c
+@@ -449,8 +449,7 @@ static int balance_leaf(struct tree_bala
+ /* when we have merge directory item, pos_in_item has been changed too */
+
+ /* paste new directory entry. 1 is entry number */
+- leaf_paste_entries(bi.
+- bi_bh,
++ leaf_paste_entries(&bi,
+ n +
+ item_pos
+ -
+@@ -699,7 +698,7 @@ static int balance_leaf(struct tree_bala
+ n + item_pos -
+ ret_val);
+ if (is_direntry_le_ih(pasted))
+- leaf_paste_entries(bi.bi_bh,
++ leaf_paste_entries(&bi,
+ n +
+ item_pos -
+ ret_val,
+@@ -894,8 +893,7 @@ static int balance_leaf(struct tree_bala
+ tb->insert_size[0],
+ body, zeros_num);
+ /* paste entry */
+- leaf_paste_entries(bi.
+- bi_bh,
++ leaf_paste_entries(&bi,
+ 0,
+ paste_entry_position,
+ 1,
+@@ -1096,7 +1094,7 @@ static int balance_leaf(struct tree_bala
+ tb->rnum[0]);
+ if (is_direntry_le_ih(pasted)
+ && pos_in_item >= 0) {
+- leaf_paste_entries(bi.bi_bh,
++ leaf_paste_entries(&bi,
+ item_pos -
+ n +
+ tb->rnum[0],
+@@ -1339,8 +1337,7 @@ static int balance_leaf(struct tree_bala
+ tb->insert_size[0],
+ body, zeros_num);
+ /* paste new directory entry */
+- leaf_paste_entries(bi.
+- bi_bh,
++ leaf_paste_entries(&bi,
+ 0,
+ pos_in_item
+ -
+@@ -1505,7 +1502,7 @@ static int balance_leaf(struct tree_bala
+ item_pos - n +
+ snum[i]);
+ if (is_direntry_le_ih(pasted)) {
+- leaf_paste_entries(bi.bi_bh,
++ leaf_paste_entries(&bi,
+ item_pos -
+ n + snum[i],
+ pos_in_item,
+@@ -1606,7 +1603,7 @@ static int balance_leaf(struct tree_bala
+ zeros_num);
+
+ /* paste entry */
+- leaf_paste_entries(bi.bi_bh,
++ leaf_paste_entries(&bi,
+ item_pos,
+ pos_in_item,
+ 1,
+--- a/fs/reiserfs/lbalance.c
++++ b/fs/reiserfs/lbalance.c
+@@ -111,7 +111,7 @@ static void leaf_copy_dir_entries(struct
+ item_num_in_dest =
+ (last_first == FIRST_TO_LAST) ? (B_NR_ITEMS(dest) - 1) : 0;
+
+- leaf_paste_entries(dest_bi->bi_bh, item_num_in_dest,
++ leaf_paste_entries(dest_bi, item_num_in_dest,
+ (last_first ==
+ FIRST_TO_LAST) ? I_ENTRY_COUNT(B_N_PITEM_HEAD(dest,
+ item_num_in_dest))
+@@ -1191,7 +1191,7 @@ static void leaf_delete_items_entirely(s
+ }
+
+ /* paste new_entry_count entries (new_dehs, records) into position before to item_num-th item */
+-void leaf_paste_entries(struct buffer_head *bh,
++void leaf_paste_entries(struct buffer_info *bi,
+ int item_num,
+ int before,
+ int new_entry_count,
+@@ -1203,6 +1203,7 @@ void leaf_paste_entries(struct buffer_he
+ struct reiserfs_de_head *deh;
+ char *insert_point;
+ int i, old_entry_num;
++ struct buffer_head *bh = bi->bi_bh;
+
+ if (new_entry_count == 0)
+ return;
+--- a/include/linux/reiserfs_fs.h
++++ b/include/linux/reiserfs_fs.h
+@@ -2026,7 +2026,7 @@ void leaf_paste_in_buffer(struct buffer_
+ int zeros_number);
+ void leaf_cut_from_buffer(struct buffer_info *bi, int cut_item_num,
+ int pos_in_item, int cut_size);
+-void leaf_paste_entries(struct buffer_head *bh, int item_num, int before,
++void leaf_paste_entries(struct buffer_info *bi, int item_num, int before,
+ int new_entry_count, struct reiserfs_de_head *new_dehs,
+ const char *records, int paste_size);
+ /* ibalance.c */
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: cleanup path functions
+
+ This patch cleans up some redundancies in the reiserfs tree path code.
+
+ decrement_bcount() is essentially the same function as brelse(), so we use
+ that instead.
+
+ decrement_counters_in_path() is exactly the same function as pathrelse(), so
+ we kill that and use pathrelse() instead.
+
+ There's also a bit of cleanup that makes the code a bit more readable.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+
+ fs/reiserfs/fix_node.c | 58 ++++++++++++++++++++++++------------------------
+ fs/reiserfs/stree.c | 59 ++++++++++---------------------------------------
+ 2 files changed, 43 insertions(+), 74 deletions(-)
+
+--- a/fs/reiserfs/fix_node.c
++++ b/fs/reiserfs/fix_node.c
+@@ -753,20 +753,21 @@ static void free_buffers_in_tb(struct tr
+ {
+ int n_counter;
+
+- decrement_counters_in_path(p_s_tb->tb_path);
++ pathrelse(p_s_tb->tb_path);
+
+ for (n_counter = 0; n_counter < MAX_HEIGHT; n_counter++) {
+- decrement_bcount(p_s_tb->L[n_counter]);
++ brelse(p_s_tb->L[n_counter]);
++ brelse(p_s_tb->R[n_counter]);
++ brelse(p_s_tb->FL[n_counter]);
++ brelse(p_s_tb->FR[n_counter]);
++ brelse(p_s_tb->CFL[n_counter]);
++ brelse(p_s_tb->CFR[n_counter]);
++
+ p_s_tb->L[n_counter] = NULL;
+- decrement_bcount(p_s_tb->R[n_counter]);
+ p_s_tb->R[n_counter] = NULL;
+- decrement_bcount(p_s_tb->FL[n_counter]);
+ p_s_tb->FL[n_counter] = NULL;
+- decrement_bcount(p_s_tb->FR[n_counter]);
+ p_s_tb->FR[n_counter] = NULL;
+- decrement_bcount(p_s_tb->CFL[n_counter]);
+ p_s_tb->CFL[n_counter] = NULL;
+- decrement_bcount(p_s_tb->CFR[n_counter]);
+ p_s_tb->CFR[n_counter] = NULL;
+ }
+ }
+@@ -1022,7 +1023,7 @@ static int get_far_parent(struct tree_ba
+ if (buffer_locked(*pp_s_com_father)) {
+ __wait_on_buffer(*pp_s_com_father);
+ if (FILESYSTEM_CHANGED_TB(p_s_tb)) {
+- decrement_bcount(*pp_s_com_father);
++ brelse(*pp_s_com_father);
+ return REPEAT_SEARCH;
+ }
+ }
+@@ -1050,8 +1051,8 @@ static int get_far_parent(struct tree_ba
+ return IO_ERROR;
+
+ if (FILESYSTEM_CHANGED_TB(p_s_tb)) {
+- decrement_counters_in_path(&s_path_to_neighbor_father);
+- decrement_bcount(*pp_s_com_father);
++ pathrelse(&s_path_to_neighbor_father);
++ brelse(*pp_s_com_father);
+ return REPEAT_SEARCH;
+ }
+
+@@ -1063,7 +1064,7 @@ static int get_far_parent(struct tree_ba
+ FIRST_PATH_ELEMENT_OFFSET, "PAP-8192: path length is too small");
+
+ s_path_to_neighbor_father.path_length--;
+- decrement_counters_in_path(&s_path_to_neighbor_father);
++ pathrelse(&s_path_to_neighbor_father);
+ return CARRY_ON;
+ }
+
+@@ -1086,10 +1087,10 @@ static int get_parents(struct tree_balan
+ if (n_path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
+ /* The root can not have parents.
+ Release nodes which previously were obtained as parents of the current node neighbors. */
+- decrement_bcount(p_s_tb->FL[n_h]);
+- decrement_bcount(p_s_tb->CFL[n_h]);
+- decrement_bcount(p_s_tb->FR[n_h]);
+- decrement_bcount(p_s_tb->CFR[n_h]);
++ brelse(p_s_tb->FL[n_h]);
++ brelse(p_s_tb->CFL[n_h]);
++ brelse(p_s_tb->FR[n_h]);
++ brelse(p_s_tb->CFR[n_h]);
+ p_s_tb->FL[n_h] = p_s_tb->CFL[n_h] = p_s_tb->FR[n_h] =
+ p_s_tb->CFR[n_h] = NULL;
+ return CARRY_ON;
+@@ -1115,9 +1116,9 @@ static int get_parents(struct tree_balan
+ return n_ret_value;
+ }
+
+- decrement_bcount(p_s_tb->FL[n_h]);
++ brelse(p_s_tb->FL[n_h]);
+ p_s_tb->FL[n_h] = p_s_curf; /* New initialization of FL[n_h]. */
+- decrement_bcount(p_s_tb->CFL[n_h]);
++ brelse(p_s_tb->CFL[n_h]);
+ p_s_tb->CFL[n_h] = p_s_curcf; /* New initialization of CFL[n_h]. */
+
+ RFALSE((p_s_curf && !B_IS_IN_TREE(p_s_curf)) ||
+@@ -1145,10 +1146,10 @@ static int get_parents(struct tree_balan
+ p_s_tb->rkey[n_h] = n_position;
+ }
+
+- decrement_bcount(p_s_tb->FR[n_h]);
++ brelse(p_s_tb->FR[n_h]);
+ p_s_tb->FR[n_h] = p_s_curf; /* New initialization of FR[n_path_offset]. */
+
+- decrement_bcount(p_s_tb->CFR[n_h]);
++ brelse(p_s_tb->CFR[n_h]);
+ p_s_tb->CFR[n_h] = p_s_curcf; /* New initialization of CFR[n_path_offset]. */
+
+ RFALSE((p_s_curf && !B_IS_IN_TREE(p_s_curf)) ||
+@@ -1964,7 +1965,7 @@ static int get_neighbors(struct tree_bal
+ if (!p_s_bh)
+ return IO_ERROR;
+ if (FILESYSTEM_CHANGED_TB(p_s_tb)) {
+- decrement_bcount(p_s_bh);
++ brelse(p_s_bh);
+ PROC_INFO_INC(p_s_sb, get_neighbors_restart[n_h]);
+ return REPEAT_SEARCH;
+ }
+@@ -1980,7 +1981,7 @@ static int get_neighbors(struct tree_bal
+ dc_size(B_N_CHILD(p_s_tb->FL[0], n_child_position)),
+ "PAP-8290: invalid child size of left neighbor");
+
+- decrement_bcount(p_s_tb->L[n_h]);
++ brelse(p_s_tb->L[n_h]);
+ p_s_tb->L[n_h] = p_s_bh;
+ }
+
+@@ -2001,11 +2002,11 @@ static int get_neighbors(struct tree_bal
+ if (!p_s_bh)
+ return IO_ERROR;
+ if (FILESYSTEM_CHANGED_TB(p_s_tb)) {
+- decrement_bcount(p_s_bh);
++ brelse(p_s_bh);
+ PROC_INFO_INC(p_s_sb, get_neighbors_restart[n_h]);
+ return REPEAT_SEARCH;
+ }
+- decrement_bcount(p_s_tb->R[n_h]);
++ brelse(p_s_tb->R[n_h]);
+ p_s_tb->R[n_h] = p_s_bh;
+
+ RFALSE(!n_h
+@@ -2511,16 +2512,17 @@ int fix_nodes(int n_op_mode, struct tree
+ }
+
+ brelse(p_s_tb->L[i]);
+- p_s_tb->L[i] = NULL;
+ brelse(p_s_tb->R[i]);
+- p_s_tb->R[i] = NULL;
+ brelse(p_s_tb->FL[i]);
+- p_s_tb->FL[i] = NULL;
+ brelse(p_s_tb->FR[i]);
+- p_s_tb->FR[i] = NULL;
+ brelse(p_s_tb->CFL[i]);
+- p_s_tb->CFL[i] = NULL;
+ brelse(p_s_tb->CFR[i]);
++
++ p_s_tb->L[i] = NULL;
++ p_s_tb->R[i] = NULL;
++ p_s_tb->FL[i] = NULL;
++ p_s_tb->FR[i] = NULL;
++ p_s_tb->CFL[i] = NULL;
+ p_s_tb->CFR[i] = NULL;
+ }
+
+--- a/fs/reiserfs/stree.c
++++ b/fs/reiserfs/stree.c
+@@ -23,7 +23,6 @@
+ * get_rkey
+ * key_in_buffer
+ * decrement_bcount
+- * decrement_counters_in_path
+ * reiserfs_check_path
+ * pathrelse_and_restore
+ * pathrelse
+@@ -359,36 +358,6 @@ static inline int key_in_buffer(struct t
+ return 1;
+ }
+
+-inline void decrement_bcount(struct buffer_head *p_s_bh)
+-{
+- if (p_s_bh) {
+- if (atomic_read(&(p_s_bh->b_count))) {
+- put_bh(p_s_bh);
+- return;
+- }
+- reiserfs_panic(NULL, "PAP-5070",
+- "trying to free free buffer %b", p_s_bh);
+- }
+-}
+-
+-/* Decrement b_count field of the all buffers in the path. */
+-void decrement_counters_in_path(struct treepath *p_s_search_path)
+-{
+- int n_path_offset = p_s_search_path->path_length;
+-
+- RFALSE(n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET ||
+- n_path_offset > EXTENDED_MAX_HEIGHT - 1,
+- "PAP-5080: invalid path offset of %d", n_path_offset);
+-
+- while (n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) {
+- struct buffer_head *bh;
+-
+- bh = PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--);
+- decrement_bcount(bh);
+- }
+- p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
+-}
+-
+ int reiserfs_check_path(struct treepath *p)
+ {
+ RFALSE(p->path_length != ILLEGAL_PATH_ELEMENT_OFFSET,
+@@ -396,12 +365,11 @@ int reiserfs_check_path(struct treepath
+ return 0;
+ }
+
+-/* Release all buffers in the path. Restore dirty bits clean
+-** when preparing the buffer for the log
+-**
+-** only called from fix_nodes()
+-*/
+-void pathrelse_and_restore(struct super_block *s, struct treepath *p_s_search_path)
++/* Drop the reference to each buffer in a path and restore
++ * dirty bits clean when preparing the buffer for the log.
++ * This version should only be called from fix_nodes() */
++void pathrelse_and_restore(struct super_block *sb,
++ struct treepath *p_s_search_path)
+ {
+ int n_path_offset = p_s_search_path->path_length;
+
+@@ -409,16 +377,15 @@ void pathrelse_and_restore(struct super_
+ "clm-4000: invalid path offset");
+
+ while (n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) {
+- reiserfs_restore_prepared_buffer(s,
+- PATH_OFFSET_PBUFFER
+- (p_s_search_path,
+- n_path_offset));
+- brelse(PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--));
++ struct buffer_head *bh;
++ bh = PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--);
++ reiserfs_restore_prepared_buffer(sb, bh);
++ brelse(bh);
+ }
+ p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
+ }
+
+-/* Release all buffers in the path. */
++/* Drop the reference to each buffer in a path */
+ void pathrelse(struct treepath *p_s_search_path)
+ {
+ int n_path_offset = p_s_search_path->path_length;
+@@ -631,7 +598,7 @@ int search_by_key(struct super_block *p_
+ we must be careful to release all nodes in a path before we either
+ discard the path struct or re-use the path struct, as we do here. */
+
+- decrement_counters_in_path(p_s_search_path);
++ pathrelse(p_s_search_path);
+
+ right_neighbor_of_leaf_node = 0;
+
+@@ -691,7 +658,7 @@ int search_by_key(struct super_block *p_
+ PROC_INFO_INC(p_s_sb, search_by_key_restarted);
+ PROC_INFO_INC(p_s_sb,
+ sbk_restarted[expected_level - 1]);
+- decrement_counters_in_path(p_s_search_path);
++ pathrelse(p_s_search_path);
+
+ /* Get the root block number so that we can repeat the search
+ starting from the root. */
+@@ -1868,7 +1835,7 @@ int reiserfs_do_truncate(struct reiserfs
+ if (journal_transaction_should_end(th, 0) ||
+ reiserfs_transaction_free_space(th) <= JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD) {
+ int orig_len_alloc = th->t_blocks_allocated;
+- decrement_counters_in_path(&s_search_path);
++ pathrelse(&s_search_path);
+
+ if (update_timestamps) {
+ p_s_inode->i_mtime = p_s_inode->i_ctime =
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: use more consistent printk formatting
+
+ The output format between a warning/error/panic/info/etc changes with
+ which one is used.
+
+ The following patch makes the messages more internally consistent, but also
+ more consistent with other Linux filesystems.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+--
+ fs/reiserfs/prints.c | 28 ++++++++++++++--------------
+ 1 file changed, 14 insertions(+), 14 deletions(-)
+
+--- a/fs/reiserfs/prints.c
++++ b/fs/reiserfs/prints.c
+@@ -268,10 +268,10 @@ void reiserfs_warning(struct super_block
+ {
+ do_reiserfs_warning(fmt);
+ if (sb)
+- printk(KERN_WARNING "ReiserFS: %s: warning: %s\n",
+- reiserfs_bdevname(sb), error_buf);
++ printk(KERN_WARNING "REISERFS warning (device %s): %s\n",
++ sb->s_id, error_buf);
+ else
+- printk(KERN_WARNING "ReiserFS: warning: %s\n", error_buf);
++ printk(KERN_WARNING "REISERFS warning: %s\n", error_buf);
+ }
+
+ /* No newline.. reiserfs_info calls can be followed by printk's */
+@@ -279,10 +279,10 @@ void reiserfs_info(struct super_block *s
+ {
+ do_reiserfs_warning(fmt);
+ if (sb)
+- printk(KERN_NOTICE "ReiserFS: %s: %s",
+- reiserfs_bdevname(sb), error_buf);
++ printk(KERN_NOTICE "REISERFS (device %s): %s",
++ sb->s_id, error_buf);
+ else
+- printk(KERN_NOTICE "ReiserFS: %s", error_buf);
++ printk(KERN_NOTICE "REISERFS %s:", error_buf);
+ }
+
+ /* No newline.. reiserfs_printk calls can be followed by printk's */
+@@ -297,10 +297,10 @@ void reiserfs_debug(struct super_block *
+ #ifdef CONFIG_REISERFS_CHECK
+ do_reiserfs_warning(fmt);
+ if (s)
+- printk(KERN_DEBUG "ReiserFS: %s: %s\n",
+- reiserfs_bdevname(s), error_buf);
++ printk(KERN_DEBUG "REISERFS debug (device %s): %s\n",
++ s->s_id, error_buf);
+ else
+- printk(KERN_DEBUG "ReiserFS: %s\n", error_buf);
++ printk(KERN_DEBUG "REISERFS debug: %s\n", error_buf);
+ #endif
+ }
+
+@@ -368,15 +368,15 @@ void reiserfs_abort(struct super_block *
+ do_reiserfs_warning(fmt);
+
+ if (reiserfs_error_panic(sb)) {
+- panic(KERN_CRIT "REISERFS: panic (device %s): %s\n",
+- reiserfs_bdevname(sb), error_buf);
++ panic(KERN_CRIT "REISERFS panic (device %s): %s\n", sb->s_id,
++ error_buf);
+ }
+
+- if (sb->s_flags & MS_RDONLY)
++ if (reiserfs_is_journal_aborted(SB_JOURNAL(sb)))
+ return;
+
+- printk(KERN_CRIT "REISERFS: abort (device %s): %s\n",
+- reiserfs_bdevname(sb), error_buf);
++ printk(KERN_CRIT "REISERFS abort (device %s): %s\n", sb->s_id,
++ error_buf);
+
+ sb->s_flags |= MS_RDONLY;
+ reiserfs_journal_abort(sb, errno);
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: eliminate per-super xattr lock
+
+ With the switch to using inode->i_mutex locking during lookups/creation in
+ the xattr root, the per-super xattr lock is no longer needed.
+
+ This patch removes it.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+--
+ fs/reiserfs/inode.c | 14 -------
+ fs/reiserfs/namei.c | 29 ----------------
+ fs/reiserfs/super.c | 4 --
+ fs/reiserfs/xattr.c | 70 +++++++++++++++++++-------------------
+ fs/reiserfs/xattr_acl.c | 74 ++++++++++++++++++-----------------------
+ include/linux/reiserfs_fs.h | 3 -
+ include/linux/reiserfs_fs_sb.h | 3 -
+ include/linux/reiserfs_xattr.h | 28 ++-------------
+ 8 files changed, 74 insertions(+), 151 deletions(-)
+
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -1962,19 +1962,7 @@ int reiserfs_new_inode(struct reiserfs_t
+ out_inserted_sd:
+ inode->i_nlink = 0;
+ th->t_trans_id = 0; /* so the caller can't use this handle later */
+-
+- /* If we were inheriting an ACL, we need to release the lock so that
+- * iput doesn't deadlock in reiserfs_delete_xattrs. The locking
+- * code really needs to be reworked, but this will take care of it
+- * for now. -jeffm */
+-#ifdef CONFIG_REISERFS_FS_POSIX_ACL
+- if (REISERFS_I(dir)->i_acl_default && !IS_ERR(REISERFS_I(dir)->i_acl_default)) {
+- reiserfs_write_unlock_xattrs(dir->i_sb);
+- iput(inode);
+- reiserfs_write_lock_xattrs(dir->i_sb);
+- } else
+-#endif
+- iput(inode);
++ iput(inode);
+ return err;
+ }
+
+--- a/fs/reiserfs/namei.c
++++ b/fs/reiserfs/namei.c
+@@ -618,9 +618,6 @@ static int reiserfs_create(struct inode
+
+ reiserfs_write_lock(dir->i_sb);
+
+- if (locked)
+- reiserfs_write_lock_xattrs(dir->i_sb);
+-
+ retval = journal_begin(&th, dir->i_sb, jbegin_count);
+ if (retval) {
+ drop_new_inode(inode);
+@@ -633,11 +630,6 @@ static int reiserfs_create(struct inode
+ if (retval)
+ goto out_failed;
+
+- if (locked) {
+- reiserfs_write_unlock_xattrs(dir->i_sb);
+- locked = 0;
+- }
+-
+ inode->i_op = &reiserfs_file_inode_operations;
+ inode->i_fop = &reiserfs_file_operations;
+ inode->i_mapping->a_ops = &reiserfs_address_space_operations;
+@@ -662,8 +654,6 @@ static int reiserfs_create(struct inode
+ retval = journal_end(&th, dir->i_sb, jbegin_count);
+
+ out_failed:
+- if (locked)
+- reiserfs_write_unlock_xattrs(dir->i_sb);
+ reiserfs_write_unlock(dir->i_sb);
+ return retval;
+ }
+@@ -693,9 +683,6 @@ static int reiserfs_mknod(struct inode *
+
+ reiserfs_write_lock(dir->i_sb);
+
+- if (locked)
+- reiserfs_write_lock_xattrs(dir->i_sb);
+-
+ retval = journal_begin(&th, dir->i_sb, jbegin_count);
+ if (retval) {
+ drop_new_inode(inode);
+@@ -709,11 +696,6 @@ static int reiserfs_mknod(struct inode *
+ goto out_failed;
+ }
+
+- if (locked) {
+- reiserfs_write_unlock_xattrs(dir->i_sb);
+- locked = 0;
+- }
+-
+ inode->i_op = &reiserfs_special_inode_operations;
+ init_special_inode(inode, inode->i_mode, rdev);
+
+@@ -741,8 +723,6 @@ static int reiserfs_mknod(struct inode *
+ retval = journal_end(&th, dir->i_sb, jbegin_count);
+
+ out_failed:
+- if (locked)
+- reiserfs_write_unlock_xattrs(dir->i_sb);
+ reiserfs_write_unlock(dir->i_sb);
+ return retval;
+ }
+@@ -772,8 +752,6 @@ static int reiserfs_mkdir(struct inode *
+ locked = reiserfs_cache_default_acl(dir);
+
+ reiserfs_write_lock(dir->i_sb);
+- if (locked)
+- reiserfs_write_lock_xattrs(dir->i_sb);
+
+ retval = journal_begin(&th, dir->i_sb, jbegin_count);
+ if (retval) {
+@@ -795,11 +773,6 @@ static int reiserfs_mkdir(struct inode *
+ goto out_failed;
+ }
+
+- if (locked) {
+- reiserfs_write_unlock_xattrs(dir->i_sb);
+- locked = 0;
+- }
+-
+ reiserfs_update_inode_transaction(inode);
+ reiserfs_update_inode_transaction(dir);
+
+@@ -827,8 +800,6 @@ static int reiserfs_mkdir(struct inode *
+ d_instantiate(dentry, inode);
+ retval = journal_end(&th, dir->i_sb, jbegin_count);
+ out_failed:
+- if (locked)
+- reiserfs_write_unlock_xattrs(dir->i_sb);
+ reiserfs_write_unlock(dir->i_sb);
+ return retval;
+ }
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -1644,10 +1644,6 @@ static int reiserfs_fill_super(struct su
+ REISERFS_SB(s)->s_alloc_options.preallocmin = 0;
+ /* Preallocate by 16 blocks (17-1) at once */
+ REISERFS_SB(s)->s_alloc_options.preallocsize = 17;
+-#ifdef CONFIG_REISERFS_FS_XATTR
+- /* Initialize the rwsem for xattr dir */
+- init_rwsem(&REISERFS_SB(s)->xattr_dir_sem);
+-#endif
+ /* setup default block allocator options */
+ reiserfs_init_alloc_options(s);
+
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -27,6 +27,12 @@
+ * these are special cases for filesystem ACLs, they are interpreted by the
+ * kernel, in addition, they are negatively and positively cached and attached
+ * to the inode so that unnecessary lookups are avoided.
++ *
++ * Locking works like so:
++ * The xattr root (/.reiserfs_priv/xattrs) is protected by its i_mutex.
++ * The xattr dir (/.reiserfs_priv/xattrs/<oid>.<gen>) is protected by
++ * inode->xattr_sem.
++ * The xattrs themselves are likewise protected by the xattr_sem.
+ */
+
+ #include <linux/reiserfs_fs.h>
+@@ -392,16 +398,17 @@ reiserfs_delete_xattrs_filler(void *buf,
+ /* This is called w/ inode->i_mutex downed */
+ int reiserfs_delete_xattrs(struct inode *inode)
+ {
+- struct dentry *dir, *root;
+ int err = 0;
++ struct dentry *dir, *root;
++ struct reiserfs_transaction_handle th;
++ int blocks = JOURNAL_PER_BALANCE_CNT * 2 + 2 +
++ 4 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
+
+ /* Skip out, an xattr has no xattrs associated with it */
+ if (IS_PRIVATE(inode) || get_inode_sd_version(inode) == STAT_DATA_V1)
+ return 0;
+
+- reiserfs_read_lock_xattrs(inode->i_sb);
+ dir = open_xa_dir(inode, XATTR_REPLACE);
+- reiserfs_read_unlock_xattrs(inode->i_sb);
+ if (IS_ERR(dir)) {
+ err = PTR_ERR(dir);
+ goto out;
+@@ -416,18 +423,26 @@ int reiserfs_delete_xattrs(struct inode
+ if (err)
+ goto out_dir;
+
+- /* Leftovers besides . and .. -- that's not good. */
+- if (dir->d_inode->i_nlink <= 2) {
+- root = open_xa_root(inode->i_sb, XATTR_REPLACE);
+- reiserfs_write_lock_xattrs(inode->i_sb);
++ /* We start a transaction here to avoid a ABBA situation
++ * between the xattr root's i_mutex and the journal lock.
++ * Inode creation will inherit an ACL, which requires a
++ * lookup. The lookup locks the xattr root i_mutex with a
++ * transaction open. Inode deletion takes teh xattr root
++ * i_mutex to delete the directory and then starts a
++ * transaction inside it. Boom. This doesn't incur much
++ * additional overhead since the reiserfs_rmdir transaction
++ * will just nest inside the outer transaction. */
++ err = journal_begin(&th, inode->i_sb, blocks);
++ if (!err) {
++ int jerror;
++ root = dget(dir->d_parent);
+ mutex_lock_nested(&root->d_inode->i_mutex, I_MUTEX_XATTR);
+ err = xattr_rmdir(root->d_inode, dir);
++ jerror = journal_end(&th, inode->i_sb, blocks);
+ mutex_unlock(&root->d_inode->i_mutex);
+- reiserfs_write_unlock_xattrs(inode->i_sb);
+ dput(root);
+- } else {
+- reiserfs_warning(inode->i_sb, "jdm-20006",
+- "Couldn't remove all entries in directory");
++
++ err = jerror ?: err;
+ }
+
+ out_dir:
+@@ -437,6 +452,9 @@ out:
+ if (!err)
+ REISERFS_I(inode)->i_flags =
+ REISERFS_I(inode)->i_flags & ~i_has_xattr_dir;
++ else
++ reiserfs_warning(inode->i_sb, "jdm-20004",
++ "Couldn't remove all xattrs (%d)\n", err);
+ return err;
+ }
+
+@@ -485,9 +503,7 @@ int reiserfs_chown_xattrs(struct inode *
+ if (IS_PRIVATE(inode) || get_inode_sd_version(inode) == STAT_DATA_V1)
+ return 0;
+
+- reiserfs_read_lock_xattrs(inode->i_sb);
+ dir = open_xa_dir(inode, XATTR_REPLACE);
+- reiserfs_read_unlock_xattrs(inode->i_sb);
+ if (IS_ERR(dir)) {
+ if (PTR_ERR(dir) != -ENODATA)
+ err = PTR_ERR(dir);
+@@ -731,6 +747,11 @@ reiserfs_xattr_get(const struct inode *i
+ goto out;
+ }
+
++ /* protect against concurrent access. xattrs are backed by
++ * regular files, but they're not regular files. The updates
++ * must be atomic from the perspective of the user. */
++ mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
++
+ isize = i_size_read(dentry->d_inode);
+ REISERFS_I(inode)->i_flags |= i_has_xattr_dir;
+
+@@ -798,6 +819,7 @@ reiserfs_xattr_get(const struct inode *i
+ }
+
+ out_dput:
++ mutex_unlock(&dentry->d_inode->i_mutex);
+ dput(dentry);
+
+ out:
+@@ -834,7 +856,6 @@ int reiserfs_xattr_del(struct inode *ino
+ static struct reiserfs_xattr_handler *find_xattr_handler_prefix(const char *);
+ /*
+ * Inode operation getxattr()
+- * Preliminary locking: we down dentry->d_inode->i_mutex
+ */
+ ssize_t
+ reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer,
+@@ -848,9 +869,7 @@ reiserfs_getxattr(struct dentry * dentry
+ return -EOPNOTSUPP;
+
+ reiserfs_read_lock_xattr_i(dentry->d_inode);
+- reiserfs_read_lock_xattrs(dentry->d_sb);
+ err = xah->get(dentry->d_inode, name, buffer, size);
+- reiserfs_read_unlock_xattrs(dentry->d_sb);
+ reiserfs_read_unlock_xattr_i(dentry->d_inode);
+ return err;
+ }
+@@ -866,23 +885,13 @@ reiserfs_setxattr(struct dentry *dentry,
+ {
+ struct reiserfs_xattr_handler *xah = find_xattr_handler_prefix(name);
+ int err;
+- int lock;
+
+ if (!xah || !reiserfs_xattrs(dentry->d_sb) ||
+ get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
+ return -EOPNOTSUPP;
+
+ reiserfs_write_lock_xattr_i(dentry->d_inode);
+- lock = !has_xattr_dir(dentry->d_inode);
+- if (lock)
+- reiserfs_write_lock_xattrs(dentry->d_sb);
+- else
+- reiserfs_read_lock_xattrs(dentry->d_sb);
+ err = xah->set(dentry->d_inode, name, value, size, flags);
+- if (lock)
+- reiserfs_write_unlock_xattrs(dentry->d_sb);
+- else
+- reiserfs_read_unlock_xattrs(dentry->d_sb);
+ reiserfs_write_unlock_xattr_i(dentry->d_inode);
+ return err;
+ }
+@@ -902,8 +911,6 @@ int reiserfs_removexattr(struct dentry *
+ return -EOPNOTSUPP;
+
+ reiserfs_write_lock_xattr_i(dentry->d_inode);
+- reiserfs_read_lock_xattrs(dentry->d_sb);
+-
+ /* Deletion pre-operation */
+ if (xah->del) {
+ err = xah->del(dentry->d_inode, name);
+@@ -917,7 +924,6 @@ int reiserfs_removexattr(struct dentry *
+ mark_inode_dirty(dentry->d_inode);
+
+ out:
+- reiserfs_read_unlock_xattrs(dentry->d_sb);
+ reiserfs_write_unlock_xattr_i(dentry->d_inode);
+ return err;
+ }
+@@ -966,8 +972,6 @@ reiserfs_listxattr_filler(void *buf, con
+
+ /*
+ * Inode operation listxattr()
+- *
+- * Preliminary locking: we down dentry->d_inode->i_mutex
+ */
+ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
+ {
+@@ -983,9 +987,7 @@ ssize_t reiserfs_listxattr(struct dentry
+ return -EOPNOTSUPP;
+
+ reiserfs_read_lock_xattr_i(dentry->d_inode);
+- reiserfs_read_lock_xattrs(dentry->d_sb);
+ dir = open_xa_dir(dentry->d_inode, XATTR_REPLACE);
+- reiserfs_read_unlock_xattrs(dentry->d_sb);
+ if (IS_ERR(dir)) {
+ err = PTR_ERR(dir);
+ if (err == -ENODATA)
+@@ -1114,11 +1116,9 @@ static int reiserfs_check_acl(struct ino
+ int error = -EAGAIN; /* do regular unix permission checks by default */
+
+ reiserfs_read_lock_xattr_i(inode);
+- reiserfs_read_lock_xattrs(inode->i_sb);
+
+ acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS);
+
+- reiserfs_read_unlock_xattrs(inode->i_sb);
+ reiserfs_read_unlock_xattr_i(inode);
+
+ if (acl) {
+--- a/fs/reiserfs/xattr_acl.c
++++ b/fs/reiserfs/xattr_acl.c
+@@ -172,6 +172,29 @@ static void *posix_acl_to_disk(const str
+ return ERR_PTR(-EINVAL);
+ }
+
++static inline void iset_acl(struct inode *inode, struct posix_acl **i_acl,
++ struct posix_acl *acl)
++{
++ spin_lock(&inode->i_lock);
++ if (*i_acl != ERR_PTR(-ENODATA))
++ posix_acl_release(*i_acl);
++ *i_acl = posix_acl_dup(acl);
++ spin_unlock(&inode->i_lock);
++}
++
++static inline struct posix_acl *iget_acl(struct inode *inode,
++ struct posix_acl **i_acl)
++{
++ struct posix_acl *acl = ERR_PTR(-ENODATA);
++
++ spin_lock(&inode->i_lock);
++ if (*i_acl != ERR_PTR(-ENODATA))
++ acl = posix_acl_dup(*i_acl);
++ spin_unlock(&inode->i_lock);
++
++ return acl;
++}
++
+ /*
+ * Inode operation get_posix_acl().
+ *
+@@ -199,11 +222,11 @@ struct posix_acl *reiserfs_get_acl(struc
+ return ERR_PTR(-EINVAL);
+ }
+
+- if (IS_ERR(*p_acl)) {
+- if (PTR_ERR(*p_acl) == -ENODATA)
+- return NULL;
+- } else if (*p_acl != NULL)
+- return posix_acl_dup(*p_acl);
++ acl = iget_acl(inode, p_acl);
++ if (acl && !IS_ERR(acl))
++ return acl;
++ else if (PTR_ERR(acl) == -ENODATA)
++ return NULL;
+
+ size = reiserfs_xattr_get(inode, name, NULL, 0);
+ if (size < 0) {
+@@ -229,7 +252,7 @@ struct posix_acl *reiserfs_get_acl(struc
+ } else {
+ acl = posix_acl_from_disk(value, retval);
+ if (!IS_ERR(acl))
+- *p_acl = posix_acl_dup(acl);
++ iset_acl(inode, p_acl, acl);
+ }
+
+ kfree(value);
+@@ -300,16 +323,8 @@ reiserfs_set_acl(struct inode *inode, in
+
+ kfree(value);
+
+- if (!error) {
+- /* Release the old one */
+- if (!IS_ERR(*p_acl) && *p_acl)
+- posix_acl_release(*p_acl);
+-
+- if (acl == NULL)
+- *p_acl = ERR_PTR(-ENODATA);
+- else
+- *p_acl = posix_acl_dup(acl);
+- }
++ if (!error)
++ iset_acl(inode, p_acl, acl);
+
+ return error;
+ }
+@@ -404,9 +419,7 @@ int reiserfs_cache_default_acl(struct in
+ if (reiserfs_posixacl(inode->i_sb) && !IS_PRIVATE(inode)) {
+ struct posix_acl *acl;
+ reiserfs_read_lock_xattr_i(inode);
+- reiserfs_read_lock_xattrs(inode->i_sb);
+ acl = reiserfs_get_acl(inode, ACL_TYPE_DEFAULT);
+- reiserfs_read_unlock_xattrs(inode->i_sb);
+ reiserfs_read_unlock_xattr_i(inode);
+ ret = (acl && !IS_ERR(acl));
+ if (ret)
+@@ -429,9 +442,7 @@ int reiserfs_acl_chmod(struct inode *ino
+ return 0;
+ }
+
+- reiserfs_read_lock_xattrs(inode->i_sb);
+ acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS);
+- reiserfs_read_unlock_xattrs(inode->i_sb);
+ if (!acl)
+ return 0;
+ if (IS_ERR(acl))
+@@ -442,17 +453,8 @@ int reiserfs_acl_chmod(struct inode *ino
+ return -ENOMEM;
+ error = posix_acl_chmod_masq(clone, inode->i_mode);
+ if (!error) {
+- int lock = !has_xattr_dir(inode);
+ reiserfs_write_lock_xattr_i(inode);
+- if (lock)
+- reiserfs_write_lock_xattrs(inode->i_sb);
+- else
+- reiserfs_read_lock_xattrs(inode->i_sb);
+ error = reiserfs_set_acl(inode, ACL_TYPE_ACCESS, clone);
+- if (lock)
+- reiserfs_write_unlock_xattrs(inode->i_sb);
+- else
+- reiserfs_read_unlock_xattrs(inode->i_sb);
+ reiserfs_write_unlock_xattr_i(inode);
+ }
+ posix_acl_release(clone);
+@@ -480,14 +482,9 @@ posix_acl_access_set(struct inode *inode
+ static int posix_acl_access_del(struct inode *inode, const char *name)
+ {
+ struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode);
+- struct posix_acl **acl = &reiserfs_i->i_acl_access;
+ if (strlen(name) != sizeof(POSIX_ACL_XATTR_ACCESS) - 1)
+ return -EINVAL;
+- if (!IS_ERR(*acl) && *acl) {
+- posix_acl_release(*acl);
+- *acl = ERR_PTR(-ENODATA);
+- }
+-
++ iset_acl(inode, &reiserfs_i->i_acl_access, ERR_PTR(-ENODATA));
+ return 0;
+ }
+
+@@ -533,14 +530,9 @@ posix_acl_default_set(struct inode *inod
+ static int posix_acl_default_del(struct inode *inode, const char *name)
+ {
+ struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode);
+- struct posix_acl **acl = &reiserfs_i->i_acl_default;
+ if (strlen(name) != sizeof(POSIX_ACL_XATTR_DEFAULT) - 1)
+ return -EINVAL;
+- if (!IS_ERR(*acl) && *acl) {
+- posix_acl_release(*acl);
+- *acl = ERR_PTR(-ENODATA);
+- }
+-
++ iset_acl(inode, &reiserfs_i->i_acl_default, ERR_PTR(-ENODATA));
+ return 0;
+ }
+
+--- a/include/linux/reiserfs_fs.h
++++ b/include/linux/reiserfs_fs.h
+@@ -2224,7 +2224,4 @@ int reiserfs_unpack(struct inode *inode,
+ #define reiserfs_write_lock( sb ) lock_kernel()
+ #define reiserfs_write_unlock( sb ) unlock_kernel()
+
+-/* xattr stuff */
+-#define REISERFS_XATTR_DIR_SEM(s) (REISERFS_SB(s)->xattr_dir_sem)
+-
+ #endif /* _LINUX_REISER_FS_H */
+--- a/include/linux/reiserfs_fs_sb.h
++++ b/include/linux/reiserfs_fs_sb.h
+@@ -402,9 +402,6 @@ struct reiserfs_sb_info {
+ spinlock_t bitmap_lock; /* this lock on now only used to protect reserved_blocks variable */
+ struct dentry *priv_root; /* root of /.reiserfs_priv */
+ struct dentry *xattr_root; /* root of /.reiserfs_priv/.xa */
+-#ifdef CONFIG_REISERFS_FS_XATTR
+- struct rw_semaphore xattr_dir_sem;
+-#endif
+ int j_errno;
+ #ifdef CONFIG_QUOTA
+ char *s_qf_names[MAXQUOTAS];
+--- a/include/linux/reiserfs_xattr.h
++++ b/include/linux/reiserfs_xattr.h
+@@ -67,45 +67,27 @@ extern struct reiserfs_xattr_handler use
+ extern struct reiserfs_xattr_handler trusted_handler;
+ extern struct reiserfs_xattr_handler security_handler;
+
+-static inline void reiserfs_write_lock_xattrs(struct super_block *sb)
+-{
+- down_write(&REISERFS_XATTR_DIR_SEM(sb));
+-}
+-static inline void reiserfs_write_unlock_xattrs(struct super_block *sb)
+-{
+- up_write(&REISERFS_XATTR_DIR_SEM(sb));
+-}
+-static inline void reiserfs_read_lock_xattrs(struct super_block *sb)
+-{
+- down_read(&REISERFS_XATTR_DIR_SEM(sb));
+-}
+-
+-static inline void reiserfs_read_unlock_xattrs(struct super_block *sb)
+-{
+- up_read(&REISERFS_XATTR_DIR_SEM(sb));
+-}
+-
+ static inline void reiserfs_write_lock_xattr_i(struct inode *inode)
+ {
+- down_write(&REISERFS_I(inode)->xattr_sem);
++ down_write(&REISERFS_I(inode)->i_xattr_sem);
+ }
+ static inline void reiserfs_write_unlock_xattr_i(struct inode *inode)
+ {
+- up_write(&REISERFS_I(inode)->xattr_sem);
++ up_write(&REISERFS_I(inode)->i_xattr_sem);
+ }
+ static inline void reiserfs_read_lock_xattr_i(struct inode *inode)
+ {
+- down_read(&REISERFS_I(inode)->xattr_sem);
++ down_read(&REISERFS_I(inode)->i_xattr_sem);
+ }
+
+ static inline void reiserfs_read_unlock_xattr_i(struct inode *inode)
+ {
+- up_read(&REISERFS_I(inode)->xattr_sem);
++ up_read(&REISERFS_I(inode)->i_xattr_sem);
+ }
+
+ static inline void reiserfs_init_xattr_rwsem(struct inode *inode)
+ {
+- init_rwsem(&REISERFS_I(inode)->xattr_sem);
++ init_rwsem(&REISERFS_I(inode)->i_xattr_sem);
+ }
+
+ #else
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: journaled xattrs
+
+ Deadlocks are possible in the xattr code between the journal lock and the
+ xattr sems.
+
+ This patch implements journalling for xattr operations. The benefit is
+ twofold:
+ * It gets rid of the deadlock possibility by always ensuring that xattr
+ write operations are initiated inside a transaction.
+ * It corrects the problem where xattr backing files aren't considered any
+ differently than normal files, despite the fact they are metadata.
+
+ I discussed the added journal load with Chris Mason, and we decided that
+ since xattrs (versus other journal activity) is fairly rare, the introduction
+ of larger transactions to support journaled xattrs wouldn't be too big a deal.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+--
+ fs/reiserfs/inode.c | 3 -
+ fs/reiserfs/namei.c | 14 +----
+ fs/reiserfs/xattr.c | 39 +++++++++++----
+ fs/reiserfs/xattr_acl.c | 105 +++++++++++++++++++++++++++++++----------
+ include/linux/reiserfs_acl.h | 3 -
+ include/linux/reiserfs_fs.h | 4 +
+ include/linux/reiserfs_xattr.h | 40 ++++++++++++++-
+ 7 files changed, 159 insertions(+), 49 deletions(-)
+
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -1919,9 +1919,8 @@ int reiserfs_new_inode(struct reiserfs_t
+ goto out_inserted_sd;
+ }
+
+- /* XXX CHECK THIS */
+ if (reiserfs_posixacl(inode->i_sb)) {
+- retval = reiserfs_inherit_default_acl(dir, dentry, inode);
++ retval = reiserfs_inherit_default_acl(th, dir, dentry, inode);
+ if (retval) {
+ err = retval;
+ reiserfs_check_path(&path_to_key);
+--- a/fs/reiserfs/namei.c
++++ b/fs/reiserfs/namei.c
+@@ -607,15 +607,13 @@ static int reiserfs_create(struct inode
+ 2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) +
+ REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb));
+ struct reiserfs_transaction_handle th;
+- int locked;
+
+ if (!(inode = new_inode(dir->i_sb))) {
+ return -ENOMEM;
+ }
+ new_inode_init(inode, dir, mode);
+
+- locked = reiserfs_cache_default_acl(dir);
+-
++ jbegin_count += reiserfs_cache_default_acl(dir);
+ reiserfs_write_lock(dir->i_sb);
+
+ retval = journal_begin(&th, dir->i_sb, jbegin_count);
+@@ -669,7 +667,6 @@ static int reiserfs_mknod(struct inode *
+ JOURNAL_PER_BALANCE_CNT * 3 +
+ 2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) +
+ REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb));
+- int locked;
+
+ if (!new_valid_dev(rdev))
+ return -EINVAL;
+@@ -679,8 +676,7 @@ static int reiserfs_mknod(struct inode *
+ }
+ new_inode_init(inode, dir, mode);
+
+- locked = reiserfs_cache_default_acl(dir);
+-
++ jbegin_count += reiserfs_cache_default_acl(dir);
+ reiserfs_write_lock(dir->i_sb);
+
+ retval = journal_begin(&th, dir->i_sb, jbegin_count);
+@@ -737,7 +733,6 @@ static int reiserfs_mkdir(struct inode *
+ JOURNAL_PER_BALANCE_CNT * 3 +
+ 2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) +
+ REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb));
+- int locked;
+
+ #ifdef DISPLACE_NEW_PACKING_LOCALITIES
+ /* set flag that new packing locality created and new blocks for the content * of that directory are not displaced yet */
+@@ -749,8 +744,7 @@ static int reiserfs_mkdir(struct inode *
+ }
+ new_inode_init(inode, dir, mode);
+
+- locked = reiserfs_cache_default_acl(dir);
+-
++ jbegin_count += reiserfs_cache_default_acl(dir);
+ reiserfs_write_lock(dir->i_sb);
+
+ retval = journal_begin(&th, dir->i_sb, jbegin_count);
+@@ -1037,8 +1031,6 @@ static int reiserfs_symlink(struct inode
+ memcpy(name, symname, strlen(symname));
+ padd_item(name, item_len, strlen(symname));
+
+- /* We would inherit the default ACL here, but symlinks don't get ACLs */
+-
+ retval = journal_begin(&th, parent_dir->i_sb, jbegin_count);
+ if (retval) {
+ drop_new_inode(inode);
+--- a/fs/reiserfs/xattr_acl.c
++++ b/fs/reiserfs/xattr_acl.c
+@@ -10,15 +10,17 @@
+ #include <linux/reiserfs_acl.h>
+ #include <asm/uaccess.h>
+
+-static int reiserfs_set_acl(struct inode *inode, int type,
++static int reiserfs_set_acl(struct reiserfs_transaction_handle *th,
++ struct inode *inode, int type,
+ struct posix_acl *acl);
+
+ static int
+ xattr_set_acl(struct inode *inode, int type, const void *value, size_t size)
+ {
+ struct posix_acl *acl;
+- int error;
+-
++ int error, error2;
++ struct reiserfs_transaction_handle th;
++ size_t jcreate_blocks;
+ if (!reiserfs_posixacl(inode->i_sb))
+ return -EOPNOTSUPP;
+ if (!is_owner_or_cap(inode))
+@@ -36,7 +38,21 @@ xattr_set_acl(struct inode *inode, int t
+ } else
+ acl = NULL;
+
+- error = reiserfs_set_acl(inode, type, acl);
++ /* Pessimism: We can't assume that anything from the xattr root up
++ * has been created. */
++
++ jcreate_blocks = reiserfs_xattr_jcreate_nblocks(inode) +
++ reiserfs_xattr_nblocks(inode, size) * 2;
++
++ reiserfs_write_lock(inode->i_sb);
++ error = journal_begin(&th, inode->i_sb, jcreate_blocks);
++ if (error == 0) {
++ error = reiserfs_set_acl(&th, inode, type, acl);
++ error2 = journal_end(&th, inode->i_sb, jcreate_blocks);
++ if (error2)
++ error = error2;
++ }
++ reiserfs_write_unlock(inode->i_sb);
+
+ release_and_out:
+ posix_acl_release(acl);
+@@ -266,7 +282,8 @@ struct posix_acl *reiserfs_get_acl(struc
+ * BKL held [before 2.5.x]
+ */
+ static int
+-reiserfs_set_acl(struct inode *inode, int type, struct posix_acl *acl)
++reiserfs_set_acl(struct reiserfs_transaction_handle *th, struct inode *inode,
++ int type, struct posix_acl *acl)
+ {
+ char *name;
+ void *value = NULL;
+@@ -310,7 +327,7 @@ reiserfs_set_acl(struct inode *inode, in
+ return (int)PTR_ERR(value);
+ }
+
+- error = __reiserfs_xattr_set(inode, name, value, size, 0);
++ error = reiserfs_xattr_set_handle(th, inode, name, value, size, 0);
+
+ /*
+ * Ensure that the inode gets dirtied if we're only using
+@@ -337,7 +354,8 @@ reiserfs_set_acl(struct inode *inode, in
+ /* dir->i_mutex: locked,
+ * inode is new and not released into the wild yet */
+ int
+-reiserfs_inherit_default_acl(struct inode *dir, struct dentry *dentry,
++reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
++ struct inode *dir, struct dentry *dentry,
+ struct inode *inode)
+ {
+ struct posix_acl *acl;
+@@ -374,7 +392,8 @@ reiserfs_inherit_default_acl(struct inod
+
+ /* Copy the default ACL to the default ACL of a new directory */
+ if (S_ISDIR(inode->i_mode)) {
+- err = reiserfs_set_acl(inode, ACL_TYPE_DEFAULT, acl);
++ err = reiserfs_set_acl(th, inode, ACL_TYPE_DEFAULT,
++ acl);
+ if (err)
+ goto cleanup;
+ }
+@@ -395,9 +414,9 @@ reiserfs_inherit_default_acl(struct inod
+
+ /* If we need an ACL.. */
+ if (need_acl > 0) {
+- err =
+- reiserfs_set_acl(inode, ACL_TYPE_ACCESS,
+- acl_copy);
++ err = reiserfs_set_acl(th, inode,
++ ACL_TYPE_ACCESS,
++ acl_copy);
+ if (err)
+ goto cleanup_copy;
+ }
+@@ -415,21 +434,45 @@ reiserfs_inherit_default_acl(struct inod
+ return err;
+ }
+
+-/* Looks up and caches the result of the default ACL.
+- * We do this so that we don't need to carry the xattr_sem into
+- * reiserfs_new_inode if we don't need to */
++/* This is used to cache the default acl before a new object is created.
++ * The biggest reason for this is to get an idea of how many blocks will
++ * actually be required for the create operation if we must inherit an ACL.
++ * An ACL write can add up to 3 object creations and an additional file write
++ * so we'd prefer not to reserve that many blocks in the journal if we can.
++ * It also has the advantage of not loading the ACL with a transaction open,
++ * this may seem silly, but if the owner of the directory is doing the
++ * creation, the ACL may not be loaded since the permissions wouldn't require
++ * it.
++ * We return the number of blocks required for the transaction.
++ */
+ int reiserfs_cache_default_acl(struct inode *inode)
+ {
+- int ret = 0;
+- if (reiserfs_posixacl(inode->i_sb) && !IS_PRIVATE(inode)) {
+- struct posix_acl *acl;
+- acl = reiserfs_get_acl(inode, ACL_TYPE_DEFAULT);
+- ret = (acl && !IS_ERR(acl));
+- if (ret)
+- posix_acl_release(acl);
++ struct posix_acl *acl;
++ int nblocks = 0;
++
++ if (IS_PRIVATE(inode))
++ return 0;
++
++ acl = reiserfs_get_acl(inode, ACL_TYPE_DEFAULT);
++
++ if (acl && !IS_ERR(acl)) {
++ int size = reiserfs_acl_size(acl->a_count);
++
++ /* Other xattrs can be created during inode creation. We don't
++ * want to claim too many blocks, so we check to see if we
++ * we need to create the tree to the xattrs, and then we
++ * just want two files. */
++ nblocks = reiserfs_xattr_jcreate_nblocks(inode);
++ nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb);
++
++ REISERFS_I(inode)->i_flags |= i_has_xattr_dir;
++
++ /* We need to account for writes + bitmaps for two files */
++ nblocks += reiserfs_xattr_nblocks(inode, size) * 4;
++ posix_acl_release(acl);
+ }
+
+- return ret;
++ return nblocks;
+ }
+
+ int reiserfs_acl_chmod(struct inode *inode)
+@@ -455,8 +498,22 @@ int reiserfs_acl_chmod(struct inode *ino
+ if (!clone)
+ return -ENOMEM;
+ error = posix_acl_chmod_masq(clone, inode->i_mode);
+- if (!error)
+- error = reiserfs_set_acl(inode, ACL_TYPE_ACCESS, clone);
++ if (!error) {
++ struct reiserfs_transaction_handle th;
++ size_t size = reiserfs_xattr_nblocks(inode,
++ reiserfs_acl_size(clone->a_count));
++ reiserfs_write_lock(inode->i_sb);
++ error = journal_begin(&th, inode->i_sb, size * 2);
++ if (!error) {
++ int error2;
++ error = reiserfs_set_acl(&th, inode, ACL_TYPE_ACCESS,
++ clone);
++ error2 = journal_end(&th, inode->i_sb, size * 2);
++ if (error2)
++ error = error2;
++ }
++ reiserfs_write_unlock(inode->i_sb);
++ }
+ posix_acl_release(clone);
+ return error;
+ }
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -632,8 +632,9 @@ out_dput:
+ * inode->i_mutex: down
+ */
+ int
+-__reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
+- size_t buffer_size, int flags)
++reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *th,
++ struct inode *inode, const char *name,
++ const void *buffer, size_t buffer_size, int flags)
+ {
+ int err = 0;
+ struct dentry *dentry;
+@@ -723,14 +724,34 @@ out_unlock:
+ return err;
+ }
+
+-int
+-reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
+- size_t buffer_size, int flags)
++/* We need to start a transaction to maintain lock ordering */
++int reiserfs_xattr_set(struct inode *inode, const char *name,
++ const void *buffer, size_t buffer_size, int flags)
+ {
+- int err = __reiserfs_xattr_set(inode, name, buffer, buffer_size, flags);
+- if (err == -ENODATA)
+- err = 0;
+- return err;
++
++ struct reiserfs_transaction_handle th;
++ int error, error2;
++ size_t jbegin_count = reiserfs_xattr_nblocks(inode, buffer_size);
++
++ if (!(flags & XATTR_REPLACE))
++ jbegin_count += reiserfs_xattr_jcreate_nblocks(inode);
++
++ reiserfs_write_lock(inode->i_sb);
++ error = journal_begin(&th, inode->i_sb, jbegin_count);
++ if (error) {
++ reiserfs_write_unlock(inode->i_sb);
++ return error;
++ }
++
++ error = reiserfs_xattr_set_handle(&th, inode, name,
++ buffer, buffer_size, flags);
++
++ error2 = journal_end(&th, inode->i_sb, jbegin_count);
++ if (error == 0)
++ error = error2;
++ reiserfs_write_unlock(inode->i_sb);
++
++ return error;
+ }
+
+ /*
+--- a/include/linux/reiserfs_acl.h
++++ b/include/linux/reiserfs_acl.h
+@@ -49,7 +49,8 @@ static inline int reiserfs_acl_count(siz
+ #ifdef CONFIG_REISERFS_FS_POSIX_ACL
+ struct posix_acl *reiserfs_get_acl(struct inode *inode, int type);
+ int reiserfs_acl_chmod(struct inode *inode);
+-int reiserfs_inherit_default_acl(struct inode *dir, struct dentry *dentry,
++int reiserfs_inherit_default_acl(struct reiserfs_transaction_handle *th,
++ struct inode *dir, struct dentry *dentry,
+ struct inode *inode);
+ int reiserfs_cache_default_acl(struct inode *dir);
+ extern struct xattr_handler reiserfs_posix_acl_default_handler;
+--- a/include/linux/reiserfs_fs.h
++++ b/include/linux/reiserfs_fs.h
+@@ -1615,6 +1615,10 @@ struct reiserfs_journal_header {
+ #define JOURNAL_MAX_COMMIT_AGE 30
+ #define JOURNAL_MAX_TRANS_AGE 30
+ #define JOURNAL_PER_BALANCE_CNT (3 * (MAX_HEIGHT-2) + 9)
++#define JOURNAL_BLOCKS_PER_OBJECT(sb) (JOURNAL_PER_BALANCE_CNT * 3 + \
++ 2 * (REISERFS_QUOTA_INIT_BLOCKS(sb) + \
++ REISERFS_QUOTA_TRANS_BLOCKS(sb)))
++
+ #ifdef CONFIG_QUOTA
+ /* We need to update data and inode (atime) */
+ #define REISERFS_QUOTA_TRANS_BLOCKS(s) (REISERFS_SB(s)->s_mount_opt & (1<<REISERFS_QUOTA) ? 2 : 0)
+--- a/include/linux/reiserfs_xattr.h
++++ b/include/linux/reiserfs_xattr.h
+@@ -46,14 +46,50 @@ int reiserfs_removexattr(struct dentry *
+ int reiserfs_permission(struct inode *inode, int mask);
+
+ int reiserfs_xattr_get(struct inode *, const char *, void *, size_t);
+-int __reiserfs_xattr_set(struct inode *, const char *, const void *,
+- size_t, int);
+ int reiserfs_xattr_set(struct inode *, const char *, const void *, size_t, int);
++int reiserfs_xattr_set_handle(struct reiserfs_transaction_handle *,
++ struct inode *, const char *, const void *,
++ size_t, int);
+
+ extern struct xattr_handler reiserfs_xattr_user_handler;
+ extern struct xattr_handler reiserfs_xattr_trusted_handler;
+ extern struct xattr_handler reiserfs_xattr_security_handler;
+
++#define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
++static inline loff_t reiserfs_xattr_nblocks(struct inode *inode, loff_t size)
++{
++ loff_t ret = 0;
++ if (reiserfs_file_data_log(inode)) {
++ ret = _ROUND_UP(xattr_size(size), inode->i_sb->s_blocksize);
++ ret >>= inode->i_sb->s_blocksize_bits;
++ }
++ return ret;
++}
++
++/* We may have to create up to 3 objects: xattr root, xattr dir, xattr file.
++ * Let's try to be smart about it.
++ * xattr root: We cache it. If it's not cached, we may need to create it.
++ * xattr dir: If anything has been loaded for this inode, we can set a flag
++ * saying so.
++ * xattr file: Since we don't cache xattrs, we can't tell. We always include
++ * blocks for it.
++ *
++ * However, since root and dir can be created between calls - YOU MUST SAVE
++ * THIS VALUE.
++ */
++static inline size_t reiserfs_xattr_jcreate_nblocks(struct inode *inode)
++{
++ size_t nblocks = JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb);
++
++ if ((REISERFS_I(inode)->i_flags & i_has_xattr_dir) == 0) {
++ nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb);
++ if (REISERFS_SB(inode->i_sb)->xattr_root == NULL)
++ nblocks += JOURNAL_BLOCKS_PER_OBJECT(inode->i_sb);
++ }
++
++ return nblocks;
++}
++
+ static inline void reiserfs_init_xattr_rwsem(struct inode *inode)
+ {
+ init_rwsem(&REISERFS_I(inode)->i_xattr_sem);
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: use generic readdir for operations across all xattrs
+
+ The current reiserfs xattr implementation open codes reiserfs_readdir and
+ frees the path before calling the filldir function. Typically, the filldir
+ function is something that modifies the file system, such as a chown or
+ an inode deletion that also require reading of an inode associated with each
+ direntry. Since the file system is modified, the path retained becomes
+ invalid for the next run. In addition, it runs backwards in attempt to
+ minimize activity.
+
+ This is clearly suboptimal from a code cleanliness perspective as well as
+ performance-wise.
+
+ This patch implements a generic reiserfs_for_each_xattr that uses the generic
+ readdir and a specific filldir routine that simply populates an array of
+ dentries and then performs a specific operation on them. When all files have
+ been operated on, it then calls the operation on the directory itself.
+
+ The result is a noticable code reduction and better performance.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+--
+ fs/reiserfs/dir.c | 28 +--
+ fs/reiserfs/xattr.c | 402 ++++++++++++--------------------------------
+ include/linux/reiserfs_fs.h | 1
+ 3 files changed, 131 insertions(+), 300 deletions(-)
+
+--- a/fs/reiserfs/dir.c
++++ b/fs/reiserfs/dir.c
+@@ -41,10 +41,10 @@ static int reiserfs_dir_fsync(struct fil
+
+ #define store_ih(where,what) copy_item_head (where, what)
+
+-//
+-static int reiserfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
++int reiserfs_readdir_dentry(struct dentry *dentry, void *dirent,
++ filldir_t filldir, loff_t *pos)
+ {
+- struct inode *inode = filp->f_path.dentry->d_inode;
++ struct inode *inode = dentry->d_inode;
+ struct cpu_key pos_key; /* key of current position in the directory (key of directory entry) */
+ INITIALIZE_PATH(path_to_entry);
+ struct buffer_head *bh;
+@@ -64,13 +64,9 @@ static int reiserfs_readdir(struct file
+
+ /* form key for search the next directory entry using f_pos field of
+ file structure */
+- make_cpu_key(&pos_key, inode,
+- (filp->f_pos) ? (filp->f_pos) : DOT_OFFSET, TYPE_DIRENTRY,
+- 3);
++ make_cpu_key(&pos_key, inode, *pos ?: DOT_OFFSET, TYPE_DIRENTRY, 3);
+ next_pos = cpu_key_k_offset(&pos_key);
+
+- /* reiserfs_warning (inode->i_sb, "reiserfs_readdir 1: f_pos = %Ld", filp->f_pos); */
+-
+ path_to_entry.reada = PATH_READA;
+ while (1) {
+ research:
+@@ -144,7 +140,7 @@ static int reiserfs_readdir(struct file
+ /* Ignore the .reiserfs_priv entry */
+ if (reiserfs_xattrs(inode->i_sb) &&
+ !old_format_only(inode->i_sb) &&
+- filp->f_path.dentry == inode->i_sb->s_root &&
++ dentry == inode->i_sb->s_root &&
+ REISERFS_SB(inode->i_sb)->priv_root &&
+ REISERFS_SB(inode->i_sb)->priv_root->d_inode
+ && deh_objectid(deh) ==
+@@ -156,7 +152,7 @@ static int reiserfs_readdir(struct file
+ }
+
+ d_off = deh_offset(deh);
+- filp->f_pos = d_off;
++ *pos = d_off;
+ d_ino = deh_objectid(deh);
+ if (d_reclen <= 32) {
+ local_buf = small_buf;
+@@ -223,15 +219,21 @@ static int reiserfs_readdir(struct file
+
+ } /* while */
+
+- end:
+- filp->f_pos = next_pos;
++end:
++ *pos = next_pos;
+ pathrelse(&path_to_entry);
+ reiserfs_check_path(&path_to_entry);
+- out:
++out:
+ reiserfs_write_unlock(inode->i_sb);
+ return ret;
+ }
+
++static int reiserfs_readdir(struct file *file, void *dirent, filldir_t filldir)
++{
++ struct dentry *dentry = file->f_path.dentry;
++ return reiserfs_readdir_dentry(dentry, dirent, filldir, &file->f_pos);
++}
++
+ /* compose directory item containing "." and ".." entries (entries are
+ not aligned to 4 byte boundary) */
+ /* the last four params are LE */
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -167,218 +167,65 @@ static struct dentry *open_xa_dir(const
+
+ }
+
+-/*
+- * this is very similar to fs/reiserfs/dir.c:reiserfs_readdir, but
+- * we need to drop the path before calling the filldir struct. That
+- * would be a big performance hit to the non-xattr case, so I've copied
+- * the whole thing for now. --clm
+- *
+- * the big difference is that I go backwards through the directory,
+- * and don't mess with f->f_pos, but the idea is the same. Do some
+- * action on each and every entry in the directory.
+- *
+- * we're called with i_mutex held, so there are no worries about the directory
+- * changing underneath us.
+- */
+-static int __xattr_readdir(struct inode *inode, void *dirent, filldir_t filldir)
+-{
+- struct cpu_key pos_key; /* key of current position in the directory (key of directory entry) */
+- INITIALIZE_PATH(path_to_entry);
+- struct buffer_head *bh;
+- int entry_num;
+- struct item_head *ih, tmp_ih;
+- int search_res;
+- char *local_buf;
+- loff_t next_pos;
+- char small_buf[32]; /* avoid kmalloc if we can */
+- struct reiserfs_de_head *deh;
+- int d_reclen;
+- char *d_name;
+- off_t d_off;
+- ino_t d_ino;
+- struct reiserfs_dir_entry de;
+-
+- /* form key for search the next directory entry using f_pos field of
+- file structure */
+- next_pos = max_reiserfs_offset(inode);
+-
+- while (1) {
+- research:
+- if (next_pos <= DOT_DOT_OFFSET)
+- break;
+- make_cpu_key(&pos_key, inode, next_pos, TYPE_DIRENTRY, 3);
+-
+- search_res =
+- search_by_entry_key(inode->i_sb, &pos_key, &path_to_entry,
+- &de);
+- if (search_res == IO_ERROR) {
+- // FIXME: we could just skip part of directory which could
+- // not be read
+- pathrelse(&path_to_entry);
+- return -EIO;
+- }
+-
+- if (search_res == NAME_NOT_FOUND)
+- de.de_entry_num--;
+-
+- set_de_name_and_namelen(&de);
+- entry_num = de.de_entry_num;
+- deh = &(de.de_deh[entry_num]);
+-
+- bh = de.de_bh;
+- ih = de.de_ih;
+-
+- if (!is_direntry_le_ih(ih)) {
+- reiserfs_error(inode->i_sb, "jdm-20000",
+- "not direntry %h", ih);
+- break;
+- }
+- copy_item_head(&tmp_ih, ih);
+-
+- /* we must have found item, that is item of this directory, */
+- RFALSE(COMP_SHORT_KEYS(&(ih->ih_key), &pos_key),
+- "vs-9000: found item %h does not match to dir we readdir %K",
+- ih, &pos_key);
+-
+- if (deh_offset(deh) <= DOT_DOT_OFFSET) {
+- break;
+- }
+-
+- /* look for the previous entry in the directory */
+- next_pos = deh_offset(deh) - 1;
+-
+- if (!de_visible(deh))
+- /* it is hidden entry */
+- continue;
+-
+- d_reclen = entry_length(bh, ih, entry_num);
+- d_name = B_I_DEH_ENTRY_FILE_NAME(bh, ih, deh);
+- d_off = deh_offset(deh);
+- d_ino = deh_objectid(deh);
+-
+- if (!d_name[d_reclen - 1])
+- d_reclen = strlen(d_name);
+-
+- if (d_reclen > REISERFS_MAX_NAME(inode->i_sb->s_blocksize)) {
+- /* too big to send back to VFS */
+- continue;
+- }
+-
+- /* Ignore the .reiserfs_priv entry */
+- if (reiserfs_xattrs(inode->i_sb) &&
+- !old_format_only(inode->i_sb) &&
+- deh_objectid(deh) ==
+- le32_to_cpu(INODE_PKEY
+- (REISERFS_SB(inode->i_sb)->priv_root->d_inode)->
+- k_objectid))
+- continue;
+-
+- if (d_reclen <= 32) {
+- local_buf = small_buf;
+- } else {
+- local_buf = kmalloc(d_reclen, GFP_NOFS);
+- if (!local_buf) {
+- pathrelse(&path_to_entry);
+- return -ENOMEM;
+- }
+- if (item_moved(&tmp_ih, &path_to_entry)) {
+- kfree(local_buf);
+-
+- /* sigh, must retry. Do this same offset again */
+- next_pos = d_off;
+- goto research;
+- }
+- }
+-
+- // Note, that we copy name to user space via temporary
+- // buffer (local_buf) because filldir will block if
+- // user space buffer is swapped out. At that time
+- // entry can move to somewhere else
+- memcpy(local_buf, d_name, d_reclen);
+-
+- /* the filldir function might need to start transactions,
+- * or do who knows what. Release the path now that we've
+- * copied all the important stuff out of the deh
+- */
+- pathrelse(&path_to_entry);
+-
+- if (filldir(dirent, local_buf, d_reclen, d_off, d_ino,
+- DT_UNKNOWN) < 0) {
+- if (local_buf != small_buf) {
+- kfree(local_buf);
+- }
+- goto end;
+- }
+- if (local_buf != small_buf) {
+- kfree(local_buf);
+- }
+- } /* while */
+-
+- end:
+- pathrelse(&path_to_entry);
+- return 0;
+-}
+-
+-/*
+- * this could be done with dedicated readdir ops for the xattr files,
+- * but I want to get something working asap
+- * this is stolen from vfs_readdir
+- *
+- */
+-static
+-int xattr_readdir(struct inode *inode, filldir_t filler, void *buf)
+-{
+- int res = -ENOENT;
+- if (!IS_DEADDIR(inode)) {
+- lock_kernel();
+- res = __xattr_readdir(inode, buf, filler);
+- unlock_kernel();
+- }
+- return res;
+-}
+-
+ /* The following are side effects of other operations that aren't explicitly
+ * modifying extended attributes. This includes operations such as permissions
+ * or ownership changes, object deletions, etc. */
++struct reiserfs_dentry_buf {
++ struct dentry *xadir;
++ int count;
++ struct dentry *dentries[8];
++};
+
+ static int
+-reiserfs_delete_xattrs_filler(void *buf, const char *name, int namelen,
+- loff_t offset, u64 ino, unsigned int d_type)
++fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset,
++ u64 ino, unsigned int d_type)
+ {
+- struct dentry *xadir = (struct dentry *)buf;
++ struct reiserfs_dentry_buf *dbuf = buf;
+ struct dentry *dentry;
+- int err = 0;
+
+- dentry = lookup_one_len(name, xadir, namelen);
++ if (dbuf->count == ARRAY_SIZE(dbuf->dentries))
++ return -ENOSPC;
++
++ if (name[0] == '.' && (name[1] == '\0' ||
++ (name[1] == '.' && name[2] == '\0')))
++ return 0;
++
++ dentry = lookup_one_len(name, dbuf->xadir, namelen);
+ if (IS_ERR(dentry)) {
+- err = PTR_ERR(dentry);
+- goto out;
++ return PTR_ERR(dentry);
+ } else if (!dentry->d_inode) {
+- err = -ENODATA;
+- goto out_file;
++ /* A directory entry exists, but no file? */
++ reiserfs_error(dentry->d_sb, "xattr-20003",
++ "Corrupted directory: xattr %s listed but "
++ "not found for file %s.\n",
++ dentry->d_name.name, dbuf->xadir->d_name.name);
++ dput(dentry);
++ return -EIO;
+ }
+
+- /* Skip directories.. */
+- if (S_ISDIR(dentry->d_inode->i_mode))
+- goto out_file;
+-
+- err = xattr_unlink(xadir->d_inode, dentry);
+-
+-out_file:
+- dput(dentry);
++ dbuf->dentries[dbuf->count++] = dentry;
++ return 0;
++}
+
+-out:
+- return err;
++static void
++cleanup_dentry_buf(struct reiserfs_dentry_buf *buf)
++{
++ int i;
++ for (i = 0; i < buf->count; i++)
++ if (buf->dentries[i])
++ dput(buf->dentries[i]);
+ }
+
+-/* This is called w/ inode->i_mutex downed */
+-int reiserfs_delete_xattrs(struct inode *inode)
++static int reiserfs_for_each_xattr(struct inode *inode,
++ int (*action)(struct dentry *, void *),
++ void *data)
+ {
+- int err = -ENODATA;
+- struct dentry *dir, *root;
+- struct reiserfs_transaction_handle th;
+- int blocks = JOURNAL_PER_BALANCE_CNT * 2 + 2 +
+- 4 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
++ struct dentry *dir;
++ int i, err = 0;
++ loff_t pos = 0;
++ struct reiserfs_dentry_buf buf = {
++ .count = 0,
++ };
+
+ /* Skip out, an xattr has no xattrs associated with it */
+ if (IS_PRIVATE(inode) || get_inode_sd_version(inode) == STAT_DATA_V1)
+@@ -389,117 +236,97 @@ int reiserfs_delete_xattrs(struct inode
+ err = PTR_ERR(dir);
+ goto out;
+ } else if (!dir->d_inode) {
+- dput(dir);
+- goto out;
++ err = 0;
++ goto out_dir;
+ }
+
+ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
+- err = xattr_readdir(dir->d_inode, reiserfs_delete_xattrs_filler, dir);
+- mutex_unlock(&dir->d_inode->i_mutex);
+- if (err) {
+- dput(dir);
+- goto out;
++ buf.xadir = dir;
++ err = reiserfs_readdir_dentry(dir, &buf, fill_with_dentries, &pos);
++ while ((err == 0 || err == -ENOSPC) && buf.count) {
++ err = 0;
++
++ for (i = 0; i < buf.count && buf.dentries[i]; i++) {
++ int lerr = 0;
++ struct dentry *dentry = buf.dentries[i];
++
++ if (err == 0 && !S_ISDIR(dentry->d_inode->i_mode))
++ lerr = action(dentry, data);
++
++ dput(dentry);
++ buf.dentries[i] = NULL;
++ err = lerr ?: err;
++ }
++ buf.count = 0;
++ if (!err)
++ err = reiserfs_readdir_dentry(dir, &buf,
++ fill_with_dentries, &pos);
+ }
++ mutex_unlock(&dir->d_inode->i_mutex);
+
+- root = dget(dir->d_parent);
+- dput(dir);
++ /* Clean up after a failed readdir */
++ cleanup_dentry_buf(&buf);
+
+- /* We start a transaction here to avoid a ABBA situation
+- * between the xattr root's i_mutex and the journal lock.
+- * Inode creation will inherit an ACL, which requires a
+- * lookup. The lookup locks the xattr root i_mutex with a
+- * transaction open. Inode deletion takes teh xattr root
+- * i_mutex to delete the directory and then starts a
+- * transaction inside it. Boom. This doesn't incur much
+- * additional overhead since the reiserfs_rmdir transaction
+- * will just nest inside the outer transaction. */
+- err = journal_begin(&th, inode->i_sb, blocks);
+ if (!err) {
+- int jerror;
+- mutex_lock_nested(&root->d_inode->i_mutex, I_MUTEX_XATTR);
+- err = xattr_rmdir(root->d_inode, dir);
+- jerror = journal_end(&th, inode->i_sb, blocks);
+- mutex_unlock(&root->d_inode->i_mutex);
+- err = jerror ?: err;
++ /* We start a transaction here to avoid a ABBA situation
++ * between the xattr root's i_mutex and the journal lock.
++ * This doesn't incur much additional overhead since the
++ * new transaction will just nest inside the
++ * outer transaction. */
++ int blocks = JOURNAL_PER_BALANCE_CNT * 2 + 2 +
++ 4 * REISERFS_QUOTA_TRANS_BLOCKS(inode->i_sb);
++ struct reiserfs_transaction_handle th;
++ err = journal_begin(&th, inode->i_sb, blocks);
++ if (!err) {
++ int jerror;
++ mutex_lock_nested(&dir->d_parent->d_inode->i_mutex,
++ I_MUTEX_XATTR);
++ err = action(dir, data);
++ jerror = journal_end(&th, inode->i_sb, blocks);
++ mutex_unlock(&dir->d_parent->d_inode->i_mutex);
++ err = jerror ?: err;
++ }
+ }
+-
+- dput(root);
++out_dir:
++ dput(dir);
+ out:
+- if (err)
+- reiserfs_warning(inode->i_sb, "jdm-20004",
+- "Couldn't remove all xattrs (%d)\n", err);
++ /* -ENODATA isn't an error */
++ if (err == -ENODATA)
++ err = 0;
+ return err;
+ }
+
+-struct reiserfs_chown_buf {
+- struct inode *inode;
+- struct dentry *xadir;
+- struct iattr *attrs;
+-};
+-
+-/* XXX: If there is a better way to do this, I'd love to hear about it */
+-static int
+-reiserfs_chown_xattrs_filler(void *buf, const char *name, int namelen,
+- loff_t offset, u64 ino, unsigned int d_type)
++static int delete_one_xattr(struct dentry *dentry, void *data)
+ {
+- struct reiserfs_chown_buf *chown_buf = (struct reiserfs_chown_buf *)buf;
+- struct dentry *xafile, *xadir = chown_buf->xadir;
+- struct iattr *attrs = chown_buf->attrs;
+- int err = 0;
++ struct inode *dir = dentry->d_parent->d_inode;
+
+- xafile = lookup_one_len(name, xadir, namelen);
+- if (IS_ERR(xafile))
+- return PTR_ERR(xafile);
+- else if (!xafile->d_inode) {
+- dput(xafile);
+- return -ENODATA;
+- }
++ /* This is the xattr dir, handle specially. */
++ if (S_ISDIR(dentry->d_inode->i_mode))
++ return xattr_rmdir(dir, dentry);
+
+- if (!S_ISDIR(xafile->d_inode->i_mode)) {
+- mutex_lock_nested(&xafile->d_inode->i_mutex, I_MUTEX_CHILD);
+- err = reiserfs_setattr(xafile, attrs);
+- mutex_unlock(&xafile->d_inode->i_mutex);
+- }
+- dput(xafile);
++ return xattr_unlink(dir, dentry);
++}
++
++static int chown_one_xattr(struct dentry *dentry, void *data)
++{
++ struct iattr *attrs = data;
++ return reiserfs_setattr(dentry, attrs);
++}
+
++/* No i_mutex, but the inode is unconnected. */
++int reiserfs_delete_xattrs(struct inode *inode)
++{
++ int err = reiserfs_for_each_xattr(inode, delete_one_xattr, NULL);
++ if (err)
++ reiserfs_warning(inode->i_sb, "jdm-20004",
++ "Couldn't delete all xattrs (%d)\n", err);
+ return err;
+ }
+
++/* inode->i_mutex: down */
+ int reiserfs_chown_xattrs(struct inode *inode, struct iattr *attrs)
+ {
+- struct dentry *dir;
+- int err = 0;
+- struct reiserfs_chown_buf buf;
+- unsigned int ia_valid = attrs->ia_valid;
+-
+- /* Skip out, an xattr has no xattrs associated with it */
+- if (IS_PRIVATE(inode) || get_inode_sd_version(inode) == STAT_DATA_V1)
+- return 0;
+-
+- dir = open_xa_dir(inode, XATTR_REPLACE);
+- if (IS_ERR(dir)) {
+- if (PTR_ERR(dir) != -ENODATA)
+- err = PTR_ERR(dir);
+- goto out;
+- } else if (!dir->d_inode)
+- goto out_dir;
+-
+- attrs->ia_valid &= (ATTR_UID | ATTR_GID | ATTR_CTIME);
+- buf.xadir = dir;
+- buf.attrs = attrs;
+- buf.inode = inode;
+-
+- mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
+- err = xattr_readdir(dir->d_inode, reiserfs_chown_xattrs_filler, &buf);
+-
+- if (!err)
+- err = reiserfs_setattr(dir, attrs);
+- mutex_unlock(&dir->d_inode->i_mutex);
+-
+- attrs->ia_valid = ia_valid;
+-out_dir:
+- dput(dir);
+-out:
++ int err = reiserfs_for_each_xattr(inode, chown_one_xattr, attrs);
+ if (err)
+ reiserfs_warning(inode->i_sb, "jdm-20007",
+ "Couldn't chown all xattrs (%d)\n", err);
+@@ -1004,6 +831,7 @@ ssize_t reiserfs_listxattr(struct dentry
+ {
+ struct dentry *dir;
+ int err = 0;
++ loff_t pos = 0;
+ struct listxattr_buf buf = {
+ .inode = dentry->d_inode,
+ .buf = buffer,
+@@ -1026,7 +854,7 @@ ssize_t reiserfs_listxattr(struct dentry
+ }
+
+ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
+- err = xattr_readdir(dir->d_inode, listxattr_filler, &buf);
++ err = reiserfs_readdir_dentry(dir, &buf, listxattr_filler, &pos);
+ mutex_unlock(&dir->d_inode->i_mutex);
+
+ if (!err)
+--- a/include/linux/reiserfs_fs.h
++++ b/include/linux/reiserfs_fs.h
+@@ -1984,6 +1984,7 @@ extern const struct inode_operations rei
+ extern const struct inode_operations reiserfs_symlink_inode_operations;
+ extern const struct inode_operations reiserfs_special_inode_operations;
+ extern const struct file_operations reiserfs_dir_operations;
++int reiserfs_readdir_dentry(struct dentry *, void *, filldir_t, loff_t *);
+
+ /* tail_conversion.c */
+ int direct2indirect(struct reiserfs_transaction_handle *, struct inode *,
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: make per-inode xattr locking more fine grained
+
+ The per-inode locking can be made more fine-grained to surround just the
+ interaction with the filesystem itself. This really only applies to protecting
+ reads during a write, since concurrent writes are barred with inode->i_mutex
+ at the vfs level.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+--
+ fs/reiserfs/xattr.c | 114 +++++++++++++++++++----------------------
+ fs/reiserfs/xattr_acl.c | 7 --
+ include/linux/reiserfs_fs_i.h | 2
+ include/linux/reiserfs_xattr.h | 22 -------
+ 4 files changed, 55 insertions(+), 90 deletions(-)
+
+--- a/fs/reiserfs/xattr_acl.c
++++ b/fs/reiserfs/xattr_acl.c
+@@ -418,9 +418,7 @@ int reiserfs_cache_default_acl(struct in
+ int ret = 0;
+ if (reiserfs_posixacl(inode->i_sb) && !IS_PRIVATE(inode)) {
+ struct posix_acl *acl;
+- reiserfs_read_lock_xattr_i(inode);
+ acl = reiserfs_get_acl(inode, ACL_TYPE_DEFAULT);
+- reiserfs_read_unlock_xattr_i(inode);
+ ret = (acl && !IS_ERR(acl));
+ if (ret)
+ posix_acl_release(acl);
+@@ -452,11 +450,8 @@ int reiserfs_acl_chmod(struct inode *ino
+ if (!clone)
+ return -ENOMEM;
+ error = posix_acl_chmod_masq(clone, inode->i_mode);
+- if (!error) {
+- reiserfs_write_lock_xattr_i(inode);
++ if (!error)
+ error = reiserfs_set_acl(inode, ACL_TYPE_ACCESS, clone);
+- reiserfs_write_unlock_xattr_i(inode);
+- }
+ posix_acl_release(clone);
+ return error;
+ }
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -29,10 +29,8 @@
+ * to the inode so that unnecessary lookups are avoided.
+ *
+ * Locking works like so:
+- * The xattr root (/.reiserfs_priv/xattrs) is protected by its i_mutex.
+- * The xattr dir (/.reiserfs_priv/xattrs/<oid>.<gen>) is protected by
+- * inode->xattr_sem.
+- * The xattrs themselves are likewise protected by the xattr_sem.
++ * Directory components (xattr root, xattr dir) are protectd by their i_mutex.
++ * The xattrs themselves are protected by the xattr_sem.
+ */
+
+ #include <linux/reiserfs_fs.h>
+@@ -55,6 +53,8 @@
+ #define PRIVROOT_NAME ".reiserfs_priv"
+ #define XAROOT_NAME "xattrs"
+
++static struct reiserfs_xattr_handler *find_xattr_handler_prefix(const char *);
++
+ /* Helpers for inode ops. We do this so that we don't have all the VFS
+ * overhead and also for proper i_mutex annotation.
+ * dir->i_mutex must be held for all of them. */
+@@ -339,12 +339,14 @@ int xattr_readdir(struct inode *inode, f
+ return res;
+ }
+
++/* expects xadir->d_inode->i_mutex to be locked */
+ static int
+ __reiserfs_xattr_del(struct dentry *xadir, const char *name, int namelen)
+ {
+ struct dentry *dentry;
+ struct inode *dir = xadir->d_inode;
+ int err = 0;
++ struct reiserfs_xattr_handler *xah;
+
+ dentry = lookup_one_len(name, xadir, namelen);
+ if (IS_ERR(dentry)) {
+@@ -372,6 +374,14 @@ __reiserfs_xattr_del(struct dentry *xadi
+ return -EIO;
+ }
+
++ /* Deletion pre-operation */
++ xah = find_xattr_handler_prefix(name);
++ if (xah && xah->del) {
++ err = xah->del(dentry->d_inode, name);
++ if (err)
++ goto out;
++ }
++
+ err = xattr_unlink(dir, dentry);
+
+ out_file:
+@@ -398,7 +408,7 @@ reiserfs_delete_xattrs_filler(void *buf,
+ /* This is called w/ inode->i_mutex downed */
+ int reiserfs_delete_xattrs(struct inode *inode)
+ {
+- int err = 0;
++ int err = -ENODATA;
+ struct dentry *dir, *root;
+ struct reiserfs_transaction_handle th;
+ int blocks = JOURNAL_PER_BALANCE_CNT * 2 + 2 +
+@@ -414,14 +424,19 @@ int reiserfs_delete_xattrs(struct inode
+ goto out;
+ } else if (!dir->d_inode) {
+ dput(dir);
+- return 0;
++ goto out;
+ }
+
+ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
+ err = xattr_readdir(dir->d_inode, reiserfs_delete_xattrs_filler, dir);
+ mutex_unlock(&dir->d_inode->i_mutex);
+- if (err)
+- goto out_dir;
++ if (err) {
++ dput(dir);
++ goto out;
++ }
++
++ root = dget(dir->d_parent);
++ dput(dir);
+
+ /* We start a transaction here to avoid a ABBA situation
+ * between the xattr root's i_mutex and the journal lock.
+@@ -435,19 +450,14 @@ int reiserfs_delete_xattrs(struct inode
+ err = journal_begin(&th, inode->i_sb, blocks);
+ if (!err) {
+ int jerror;
+- root = dget(dir->d_parent);
+ mutex_lock_nested(&root->d_inode->i_mutex, I_MUTEX_XATTR);
+ err = xattr_rmdir(root->d_inode, dir);
+ jerror = journal_end(&th, inode->i_sb, blocks);
+ mutex_unlock(&root->d_inode->i_mutex);
+- dput(root);
+-
+ err = jerror ?: err;
+ }
+
+-out_dir:
+- dput(dir);
+-
++ dput(root);
+ out:
+ if (!err)
+ REISERFS_I(inode)->i_flags =
+@@ -484,7 +494,7 @@ reiserfs_chown_xattrs_filler(void *buf,
+
+ if (!S_ISDIR(xafile->d_inode->i_mode)) {
+ mutex_lock_nested(&xafile->d_inode->i_mutex, I_MUTEX_CHILD);
+- err = notify_change(xafile, attrs);
++ err = reiserfs_setattr(xafile, attrs);
+ mutex_unlock(&xafile->d_inode->i_mutex);
+ }
+ dput(xafile);
+@@ -520,13 +530,16 @@ int reiserfs_chown_xattrs(struct inode *
+ err = xattr_readdir(dir->d_inode, reiserfs_chown_xattrs_filler, &buf);
+
+ if (!err)
+- err = notify_change(dir, attrs);
++ err = reiserfs_setattr(dir, attrs);
+ mutex_unlock(&dir->d_inode->i_mutex);
+
+ attrs->ia_valid = ia_valid;
+ out_dir:
+ dput(dir);
+ out:
++ if (err)
++ reiserfs_warning(inode->i_sb, "jdm-20007",
++ "Couldn't chown all xattrs (%d)\n", err);
+ return err;
+ }
+
+@@ -635,9 +648,8 @@ reiserfs_xattr_set(struct inode *inode,
+ if (get_inode_sd_version(inode) == STAT_DATA_V1)
+ return -EOPNOTSUPP;
+
+- /* Empty xattrs are ok, they're just empty files, no hash */
+- if (buffer && buffer_size)
+- xahash = xattr_hash(buffer, buffer_size);
++ if (!buffer)
++ return reiserfs_xattr_del(inode, name);
+
+ dentry = get_xa_file_dentry(inode, name, flags);
+ if (IS_ERR(dentry)) {
+@@ -645,13 +657,19 @@ reiserfs_xattr_set(struct inode *inode,
+ goto out;
+ }
+
++ down_write(&REISERFS_I(inode)->i_xattr_sem);
++
++ xahash = xattr_hash(buffer, buffer_size);
+ REISERFS_I(inode)->i_flags |= i_has_xattr_dir;
+
+ /* Resize it so we're ok to write there */
+ newattrs.ia_size = buffer_size;
++ newattrs.ia_ctime = current_fs_time(inode->i_sb);
+ newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
+ mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
+- err = notify_change(dentry, &newattrs);
++ down_write(&dentry->d_inode->i_alloc_sem);
++ err = reiserfs_setattr(dentry, &newattrs);
++ up_write(&dentry->d_inode->i_alloc_sem);
+ mutex_unlock(&dentry->d_inode->i_mutex);
+ if (err)
+ goto out_filp;
+@@ -712,6 +730,7 @@ reiserfs_xattr_set(struct inode *inode,
+ }
+
+ out_filp:
++ up_write(&REISERFS_I(inode)->i_xattr_sem);
+ dput(dentry);
+
+ out:
+@@ -747,10 +766,7 @@ reiserfs_xattr_get(const struct inode *i
+ goto out;
+ }
+
+- /* protect against concurrent access. xattrs are backed by
+- * regular files, but they're not regular files. The updates
+- * must be atomic from the perspective of the user. */
+- mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
++ down_read(&REISERFS_I(inode)->i_xattr_sem);
+
+ isize = i_size_read(dentry->d_inode);
+ REISERFS_I(inode)->i_flags |= i_has_xattr_dir;
+@@ -758,12 +774,12 @@ reiserfs_xattr_get(const struct inode *i
+ /* Just return the size needed */
+ if (buffer == NULL) {
+ err = isize - sizeof(struct reiserfs_xattr_header);
+- goto out_dput;
++ goto out_unlock;
+ }
+
+ if (buffer_size < isize - sizeof(struct reiserfs_xattr_header)) {
+ err = -ERANGE;
+- goto out_dput;
++ goto out_unlock;
+ }
+
+ while (file_pos < isize) {
+@@ -778,7 +794,7 @@ reiserfs_xattr_get(const struct inode *i
+ page = reiserfs_get_page(dentry->d_inode, file_pos);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+- goto out_dput;
++ goto out_unlock;
+ }
+
+ lock_page(page);
+@@ -797,7 +813,7 @@ reiserfs_xattr_get(const struct inode *i
+ "associated with %k", name,
+ INODE_PKEY(inode));
+ err = -EIO;
+- goto out_dput;
++ goto out_unlock;
+ }
+ hash = le32_to_cpu(rxh->h_hash);
+ }
+@@ -818,8 +834,8 @@ reiserfs_xattr_get(const struct inode *i
+ err = -EIO;
+ }
+
+-out_dput:
+- mutex_unlock(&dentry->d_inode->i_mutex);
++out_unlock:
++ up_read(&REISERFS_I(inode)->i_xattr_sem);
+ dput(dentry);
+
+ out:
+@@ -852,8 +868,6 @@ int reiserfs_xattr_del(struct inode *ino
+ }
+
+ /* Actual operations that are exported to VFS-land */
+-
+-static struct reiserfs_xattr_handler *find_xattr_handler_prefix(const char *);
+ /*
+ * Inode operation getxattr()
+ */
+@@ -868,9 +882,7 @@ reiserfs_getxattr(struct dentry * dentry
+ get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
+ return -EOPNOTSUPP;
+
+- reiserfs_read_lock_xattr_i(dentry->d_inode);
+ err = xah->get(dentry->d_inode, name, buffer, size);
+- reiserfs_read_unlock_xattr_i(dentry->d_inode);
+ return err;
+ }
+
+@@ -890,9 +902,7 @@ reiserfs_setxattr(struct dentry *dentry,
+ get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
+ return -EOPNOTSUPP;
+
+- reiserfs_write_lock_xattr_i(dentry->d_inode);
+ err = xah->set(dentry->d_inode, name, value, size, flags);
+- reiserfs_write_unlock_xattr_i(dentry->d_inode);
+ return err;
+ }
+
+@@ -910,21 +920,11 @@ int reiserfs_removexattr(struct dentry *
+ get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
+ return -EOPNOTSUPP;
+
+- reiserfs_write_lock_xattr_i(dentry->d_inode);
+- /* Deletion pre-operation */
+- if (xah->del) {
+- err = xah->del(dentry->d_inode, name);
+- if (err)
+- goto out;
+- }
+-
+ err = reiserfs_xattr_del(dentry->d_inode, name);
+
+ dentry->d_inode->i_ctime = CURRENT_TIME_SEC;
+ mark_inode_dirty(dentry->d_inode);
+
+- out:
+- reiserfs_write_unlock_xattr_i(dentry->d_inode);
+ return err;
+ }
+
+@@ -986,7 +986,6 @@ ssize_t reiserfs_listxattr(struct dentry
+ get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
+ return -EOPNOTSUPP;
+
+- reiserfs_read_lock_xattr_i(dentry->d_inode);
+ dir = open_xa_dir(dentry->d_inode, XATTR_REPLACE);
+ if (IS_ERR(dir)) {
+ err = PTR_ERR(dir);
+@@ -1005,19 +1004,16 @@ ssize_t reiserfs_listxattr(struct dentry
+ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
+ err = xattr_readdir(dir->d_inode, reiserfs_listxattr_filler, &buf);
+ mutex_unlock(&dir->d_inode->i_mutex);
+- if (err)
+- goto out_dir;
+
+- if (buf.r_pos > buf.r_size && buffer != NULL)
+- err = -ERANGE;
+- else
+- err = buf.r_pos;
++ if (!err) {
++ if (buf.r_pos > buf.r_size && buffer != NULL)
++ err = -ERANGE;
++ else
++ err = buf.r_pos;
++ }
+
+- out_dir:
+ dput(dir);
+-
+- out:
+- reiserfs_read_unlock_xattr_i(dentry->d_inode);
++out:
+ return err;
+ }
+
+@@ -1115,12 +1111,8 @@ static int reiserfs_check_acl(struct ino
+ struct posix_acl *acl;
+ int error = -EAGAIN; /* do regular unix permission checks by default */
+
+- reiserfs_read_lock_xattr_i(inode);
+-
+ acl = reiserfs_get_acl(inode, ACL_TYPE_ACCESS);
+
+- reiserfs_read_unlock_xattr_i(inode);
+-
+ if (acl) {
+ if (!IS_ERR(acl)) {
+ error = posix_acl_permission(inode, acl, mask);
+--- a/include/linux/reiserfs_fs_i.h
++++ b/include/linux/reiserfs_fs_i.h
+@@ -59,7 +59,7 @@ struct reiserfs_inode_info {
+ struct posix_acl *i_acl_default;
+ #endif
+ #ifdef CONFIG_REISERFS_FS_XATTR
+- struct rw_semaphore xattr_sem;
++ struct rw_semaphore i_xattr_sem;
+ #endif
+ struct inode vfs_inode;
+ };
+--- a/include/linux/reiserfs_xattr.h
++++ b/include/linux/reiserfs_xattr.h
+@@ -67,24 +67,6 @@ extern struct reiserfs_xattr_handler use
+ extern struct reiserfs_xattr_handler trusted_handler;
+ extern struct reiserfs_xattr_handler security_handler;
+
+-static inline void reiserfs_write_lock_xattr_i(struct inode *inode)
+-{
+- down_write(&REISERFS_I(inode)->i_xattr_sem);
+-}
+-static inline void reiserfs_write_unlock_xattr_i(struct inode *inode)
+-{
+- up_write(&REISERFS_I(inode)->i_xattr_sem);
+-}
+-static inline void reiserfs_read_lock_xattr_i(struct inode *inode)
+-{
+- down_read(&REISERFS_I(inode)->i_xattr_sem);
+-}
+-
+-static inline void reiserfs_read_unlock_xattr_i(struct inode *inode)
+-{
+- up_read(&REISERFS_I(inode)->i_xattr_sem);
+-}
+-
+ static inline void reiserfs_init_xattr_rwsem(struct inode *inode)
+ {
+ init_rwsem(&REISERFS_I(inode)->i_xattr_sem);
+@@ -96,10 +78,6 @@ static inline void reiserfs_init_xattr_r
+ #define reiserfs_setxattr NULL
+ #define reiserfs_listxattr NULL
+ #define reiserfs_removexattr NULL
+-#define reiserfs_write_lock_xattrs(sb) do {;} while(0)
+-#define reiserfs_write_unlock_xattrs(sb) do {;} while(0)
+-#define reiserfs_read_lock_xattrs(sb)
+-#define reiserfs_read_unlock_xattrs(sb)
+
+ #define reiserfs_permission NULL
+
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: rearrange journal abort
+
+ This patch kills off reiserfs_journal_abort as it is never called, and
+ combines __reiserfs_journal_abort_{soft,hard} into one function called
+ reiserfs_abort_journal, which performs the same work. It is silent
+ as opposed to the old version, since the message was always issued
+ after a regular 'abort' message.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+ fs/reiserfs/journal.c | 23 ++++-------------------
+ fs/reiserfs/prints.c | 2 +-
+ include/linux/reiserfs_fs.h | 2 +-
+ 3 files changed, 6 insertions(+), 21 deletions(-)
+
+--- a/fs/reiserfs/journal.c
++++ b/fs/reiserfs/journal.c
+@@ -4292,14 +4292,15 @@ static int do_journal_end(struct reiserf
+ return journal->j_errno;
+ }
+
+-static void __reiserfs_journal_abort_hard(struct super_block *sb)
++/* Send the file system read only and refuse new transactions */
++void reiserfs_abort_journal(struct super_block *sb, int errno)
+ {
+ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ if (test_bit(J_ABORTED, &journal->j_state))
+ return;
+
+- printk(KERN_CRIT "REISERFS: Aborting journal for filesystem on %s\n",
+- reiserfs_bdevname(sb));
++ if (!journal->j_errno)
++ journal->j_errno = errno;
+
+ sb->s_flags |= MS_RDONLY;
+ set_bit(J_ABORTED, &journal->j_state);
+@@ -4309,19 +4310,3 @@ static void __reiserfs_journal_abort_har
+ #endif
+ }
+
+-static void __reiserfs_journal_abort_soft(struct super_block *sb, int errno)
+-{
+- struct reiserfs_journal *journal = SB_JOURNAL(sb);
+- if (test_bit(J_ABORTED, &journal->j_state))
+- return;
+-
+- if (!journal->j_errno)
+- journal->j_errno = errno;
+-
+- __reiserfs_journal_abort_hard(sb);
+-}
+-
+-void reiserfs_journal_abort(struct super_block *sb, int errno)
+-{
+- __reiserfs_journal_abort_soft(sb, errno);
+-}
+--- a/fs/reiserfs/prints.c
++++ b/fs/reiserfs/prints.c
+@@ -389,7 +389,7 @@ void reiserfs_abort(struct super_block *
+ error_buf);
+
+ sb->s_flags |= MS_RDONLY;
+- reiserfs_journal_abort(sb, errno);
++ reiserfs_abort_journal(sb, errno);
+ }
+
+ /* this prints internal nodes (4 keys/items in line) (dc_number,
+--- a/include/linux/reiserfs_fs.h
++++ b/include/linux/reiserfs_fs.h
+@@ -1771,7 +1771,7 @@ int journal_begin(struct reiserfs_transa
+ struct super_block *p_s_sb, unsigned long);
+ int journal_join_abort(struct reiserfs_transaction_handle *,
+ struct super_block *p_s_sb, unsigned long);
+-void reiserfs_journal_abort(struct super_block *sb, int errno);
++void reiserfs_abort_journal(struct super_block *sb, int errno);
+ void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...);
+ int reiserfs_allocate_list_bitmaps(struct super_block *s,
+ struct reiserfs_list_bitmap *, unsigned int);
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: rework reiserfs_warning
+
+ ReiserFS warnings can be somewhat inconsistent.
+ In some cases:
+ * a unique identifier may be associated with it
+ * the function name may be included
+ * the device may be printed separately
+
+ This patch aims to make warnings more consistent. reiserfs_warning() prints
+ the device name, so printing it a second time is not required. The function
+ name for a warning is always helpful in debugging, so it is now automatically
+ inserted into the output. Hans has stated that every warning should have
+ a unique identifier. Some cases lack them, others really shouldn't have them.
+ reiserfs_warning() now expects an id associated with each message. In the
+ rare case where one isn't needed, "" will suffice.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+--
+ fs/reiserfs/bitmap.c | 52 +++-----
+ fs/reiserfs/do_balan.c | 40 +++---
+ fs/reiserfs/file.c | 2
+ fs/reiserfs/fix_node.c | 14 +-
+ fs/reiserfs/inode.c | 60 ++++-----
+ fs/reiserfs/item_ops.c | 60 +++++----
+ fs/reiserfs/journal.c | 174 +++++++++++++++-------------
+ fs/reiserfs/lbalance.c | 12 +
+ fs/reiserfs/namei.c | 45 +++----
+ fs/reiserfs/objectid.c | 5
+ fs/reiserfs/prints.c | 11 +
+ fs/reiserfs/procfs.c | 5
+ fs/reiserfs/stree.c | 107 ++++++++---------
+ fs/reiserfs/super.c | 257 ++++++++++++++++++++++--------------------
+ fs/reiserfs/tail_conversion.c | 6
+ fs/reiserfs/xattr.c | 21 ++-
+ include/linux/reiserfs_fs.h | 9 -
+ 17 files changed, 454 insertions(+), 426 deletions(-)
+
+--- a/fs/reiserfs/bitmap.c
++++ b/fs/reiserfs/bitmap.c
+@@ -64,8 +64,8 @@ int is_reusable(struct super_block *s, b
+ unsigned int bmap_count = reiserfs_bmap_count(s);
+
+ if (block == 0 || block >= SB_BLOCK_COUNT(s)) {
+- reiserfs_warning(s,
+- "vs-4010: is_reusable: block number is out of range %lu (%u)",
++ reiserfs_warning(s, "vs-4010",
++ "block number is out of range %lu (%u)",
+ block, SB_BLOCK_COUNT(s));
+ return 0;
+ }
+@@ -79,30 +79,29 @@ int is_reusable(struct super_block *s, b
+ b_blocknr_t bmap1 = REISERFS_SB(s)->s_sbh->b_blocknr + 1;
+ if (block >= bmap1 &&
+ block <= bmap1 + bmap_count) {
+- reiserfs_warning(s, "vs: 4019: is_reusable: "
+- "bitmap block %lu(%u) can't be freed or reused",
++ reiserfs_warning(s, "vs-4019", "bitmap block %lu(%u) "
++ "can't be freed or reused",
+ block, bmap_count);
+ return 0;
+ }
+ } else {
+ if (offset == 0) {
+- reiserfs_warning(s, "vs: 4020: is_reusable: "
+- "bitmap block %lu(%u) can't be freed or reused",
++ reiserfs_warning(s, "vs-4020", "bitmap block %lu(%u) "
++ "can't be freed or reused",
+ block, bmap_count);
+ return 0;
+ }
+ }
+
+ if (bmap >= bmap_count) {
+- reiserfs_warning(s,
+- "vs-4030: is_reusable: there is no so many bitmap blocks: "
+- "block=%lu, bitmap_nr=%u", block, bmap);
++ reiserfs_warning(s, "vs-4030", "bitmap for requested block "
++ "is out of range: block=%lu, bitmap_nr=%u",
++ block, bmap);
+ return 0;
+ }
+
+ if (bit_value == 0 && block == SB_ROOT_BLOCK(s)) {
+- reiserfs_warning(s,
+- "vs-4050: is_reusable: this is root block (%u), "
++ reiserfs_warning(s, "vs-4050", "this is root block (%u), "
+ "it must be busy", SB_ROOT_BLOCK(s));
+ return 0;
+ }
+@@ -154,8 +153,8 @@ static int scan_bitmap_block(struct reis
+ /* - I mean `a window of zero bits' as in description of this function - Zam. */
+
+ if (!bi) {
+- reiserfs_warning(s, "NULL bitmap info pointer for bitmap %d",
+- bmap_n);
++ reiserfs_warning(s, "jdm-4055", "NULL bitmap info pointer "
++ "for bitmap %d", bmap_n);
+ return 0;
+ }
+
+@@ -400,11 +399,8 @@ static void _reiserfs_free_block(struct
+ get_bit_address(s, block, &nr, &offset);
+
+ if (nr >= reiserfs_bmap_count(s)) {
+- reiserfs_warning(s, "vs-4075: reiserfs_free_block: "
+- "block %lu is out of range on %s "
+- "(nr=%u,max=%u)", block,
+- reiserfs_bdevname(s), nr,
+- reiserfs_bmap_count(s));
++ reiserfs_warning(s, "vs-4075", "block %lu is out of range",
++ block);
+ return;
+ }
+
+@@ -416,9 +412,8 @@ static void _reiserfs_free_block(struct
+
+ /* clear bit for the given block in bit map */
+ if (!reiserfs_test_and_clear_le_bit(offset, bmbh->b_data)) {
+- reiserfs_warning(s, "vs-4080: reiserfs_free_block: "
+- "free_block (%s:%lu)[dev:blocknr]: bit already cleared",
+- reiserfs_bdevname(s), block);
++ reiserfs_warning(s, "vs-4080",
++ "block %lu: bit already cleared", block);
+ }
+ apbi[nr].free_count++;
+ journal_mark_dirty(th, s, bmbh);
+@@ -477,9 +472,8 @@ static void __discard_prealloc(struct re
+ BUG_ON(!th->t_trans_id);
+ #ifdef CONFIG_REISERFS_CHECK
+ if (ei->i_prealloc_count < 0)
+- reiserfs_warning(th->t_super,
+- "zam-4001:%s: inode has negative prealloc blocks count.",
+- __func__);
++ reiserfs_warning(th->t_super, "zam-4001",
++ "inode has negative prealloc blocks count.");
+ #endif
+ while (ei->i_prealloc_count > 0) {
+ reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
+@@ -515,9 +509,9 @@ void reiserfs_discard_all_prealloc(struc
+ i_prealloc_list);
+ #ifdef CONFIG_REISERFS_CHECK
+ if (!ei->i_prealloc_count) {
+- reiserfs_warning(th->t_super,
+- "zam-4001:%s: inode is in prealloc list but has no preallocated blocks.",
+- __func__);
++ reiserfs_warning(th->t_super, "zam-4001",
++ "inode is in prealloc list but has "
++ "no preallocated blocks.");
+ }
+ #endif
+ __discard_prealloc(th, ei);
+@@ -631,8 +625,8 @@ int reiserfs_parse_alloc_options(struct
+ continue;
+ }
+
+- reiserfs_warning(s, "zam-4001: %s : unknown option - %s",
+- __func__, this_char);
++ reiserfs_warning(s, "zam-4001", "unknown option - %s",
++ this_char);
+ return 1;
+ }
+
+--- a/fs/reiserfs/do_balan.c
++++ b/fs/reiserfs/do_balan.c
+@@ -1752,15 +1752,16 @@ static void store_thrown(struct tree_bal
+ int i;
+
+ if (buffer_dirty(bh))
+- reiserfs_warning(tb->tb_sb,
+- "store_thrown deals with dirty buffer");
++ reiserfs_warning(tb->tb_sb, "reiserfs-12320",
++ "called with dirty buffer");
+ for (i = 0; i < ARRAY_SIZE(tb->thrown); i++)
+ if (!tb->thrown[i]) {
+ tb->thrown[i] = bh;
+ get_bh(bh); /* free_thrown puts this */
+ return;
+ }
+- reiserfs_warning(tb->tb_sb, "store_thrown: too many thrown buffers");
++ reiserfs_warning(tb->tb_sb, "reiserfs-12321",
++ "too many thrown buffers");
+ }
+
+ static void free_thrown(struct tree_balance *tb)
+@@ -1771,8 +1772,8 @@ static void free_thrown(struct tree_bala
+ if (tb->thrown[i]) {
+ blocknr = tb->thrown[i]->b_blocknr;
+ if (buffer_dirty(tb->thrown[i]))
+- reiserfs_warning(tb->tb_sb,
+- "free_thrown deals with dirty buffer %d",
++ reiserfs_warning(tb->tb_sb, "reiserfs-12322",
++ "called with dirty buffer %d",
+ blocknr);
+ brelse(tb->thrown[i]); /* incremented in store_thrown */
+ reiserfs_free_block(tb->transaction_handle, NULL,
+@@ -1877,13 +1878,12 @@ static void check_internal_node(struct s
+ }
+ }
+
+-static int locked_or_not_in_tree(struct buffer_head *bh, char *which)
++static int locked_or_not_in_tree(struct tree_balance *tb,
++ struct buffer_head *bh, char *which)
+ {
+ if ((!buffer_journal_prepared(bh) && buffer_locked(bh)) ||
+ !B_IS_IN_TREE(bh)) {
+- reiserfs_warning(NULL,
+- "vs-12339: locked_or_not_in_tree: %s (%b)",
+- which, bh);
++ reiserfs_warning(tb->tb_sb, "vs-12339", "%s (%b)", which, bh);
+ return 1;
+ }
+ return 0;
+@@ -1902,18 +1902,19 @@ static int check_before_balancing(struct
+ /* double check that buffers that we will modify are unlocked. (fix_nodes should already have
+ prepped all of these for us). */
+ if (tb->lnum[0]) {
+- retval |= locked_or_not_in_tree(tb->L[0], "L[0]");
+- retval |= locked_or_not_in_tree(tb->FL[0], "FL[0]");
+- retval |= locked_or_not_in_tree(tb->CFL[0], "CFL[0]");
++ retval |= locked_or_not_in_tree(tb, tb->L[0], "L[0]");
++ retval |= locked_or_not_in_tree(tb, tb->FL[0], "FL[0]");
++ retval |= locked_or_not_in_tree(tb, tb->CFL[0], "CFL[0]");
+ check_leaf(tb->L[0]);
+ }
+ if (tb->rnum[0]) {
+- retval |= locked_or_not_in_tree(tb->R[0], "R[0]");
+- retval |= locked_or_not_in_tree(tb->FR[0], "FR[0]");
+- retval |= locked_or_not_in_tree(tb->CFR[0], "CFR[0]");
++ retval |= locked_or_not_in_tree(tb, tb->R[0], "R[0]");
++ retval |= locked_or_not_in_tree(tb, tb->FR[0], "FR[0]");
++ retval |= locked_or_not_in_tree(tb, tb->CFR[0], "CFR[0]");
+ check_leaf(tb->R[0]);
+ }
+- retval |= locked_or_not_in_tree(PATH_PLAST_BUFFER(tb->tb_path), "S[0]");
++ retval |= locked_or_not_in_tree(tb, PATH_PLAST_BUFFER(tb->tb_path),
++ "S[0]");
+ check_leaf(PATH_PLAST_BUFFER(tb->tb_path));
+
+ return retval;
+@@ -1952,7 +1953,7 @@ static void check_after_balance_leaf(str
+ PATH_H_POSITION(tb->tb_path,
+ 1))));
+ print_cur_tb("12223");
+- reiserfs_warning(tb->tb_sb,
++ reiserfs_warning(tb->tb_sb, "reiserfs-12363",
+ "B_FREE_SPACE (PATH_H_PBUFFER(tb->tb_path,0)) = %d; "
+ "MAX_CHILD_SIZE (%d) - dc_size( %y, %d ) [%d] = %d",
+ left,
+@@ -2104,9 +2105,8 @@ void do_balance(struct tree_balance *tb,
+ }
+ /* if we have no real work to do */
+ if (!tb->insert_size[0]) {
+- reiserfs_warning(tb->tb_sb,
+- "PAP-12350: do_balance: insert_size == 0, mode == %c",
+- flag);
++ reiserfs_warning(tb->tb_sb, "PAP-12350",
++ "insert_size == 0, mode == %c", flag);
+ unfix_nodes(tb);
+ return;
+ }
+--- a/fs/reiserfs/file.c
++++ b/fs/reiserfs/file.c
+@@ -76,7 +76,7 @@ static int reiserfs_file_release(struct
+ * and let the admin know what is going on.
+ */
+ igrab(inode);
+- reiserfs_warning(inode->i_sb,
++ reiserfs_warning(inode->i_sb, "clm-9001",
+ "pinning inode %lu because the "
+ "preallocation can't be freed",
+ inode->i_ino);
+--- a/fs/reiserfs/fix_node.c
++++ b/fs/reiserfs/fix_node.c
+@@ -496,8 +496,8 @@ static int get_num_ver(int mode, struct
+ snum012[needed_nodes - 1 + 3] = units;
+
+ if (needed_nodes > 2)
+- reiserfs_warning(tb->tb_sb, "vs-8111: get_num_ver: "
+- "split_item_position is out of boundary");
++ reiserfs_warning(tb->tb_sb, "vs-8111",
++ "split_item_position is out of range");
+ snum012[needed_nodes - 1]++;
+ split_item_positions[needed_nodes - 1] = i;
+ needed_nodes++;
+@@ -533,8 +533,8 @@ static int get_num_ver(int mode, struct
+
+ if (vn->vn_vi[split_item_num].vi_index != TYPE_DIRENTRY &&
+ vn->vn_vi[split_item_num].vi_index != TYPE_INDIRECT)
+- reiserfs_warning(tb->tb_sb, "vs-8115: get_num_ver: not "
+- "directory or indirect item");
++ reiserfs_warning(tb->tb_sb, "vs-8115",
++ "not directory or indirect item");
+ }
+
+ /* now we know S2bytes, calculate S1bytes */
+@@ -2268,9 +2268,9 @@ static int wait_tb_buffers_until_unlocke
+ #ifdef CONFIG_REISERFS_CHECK
+ repeat_counter++;
+ if ((repeat_counter % 10000) == 0) {
+- reiserfs_warning(p_s_tb->tb_sb,
+- "wait_tb_buffers_until_released(): too many "
+- "iterations waiting for buffer to unlock "
++ reiserfs_warning(p_s_tb->tb_sb, "reiserfs-8200",
++ "too many iterations waiting "
++ "for buffer to unlock "
+ "(%b)", locked);
+
+ /* Don't loop forever. Try to recover from possible error. */
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -842,7 +842,9 @@ int reiserfs_get_block(struct inode *ino
+ if (retval) {
+ if (retval != -ENOSPC)
+ reiserfs_warning(inode->i_sb,
+- "clm-6004: convert tail failed inode %lu, error %d",
++ "clm-6004",
++ "convert tail failed "
++ "inode %lu, error %d",
+ inode->i_ino,
+ retval);
+ if (allocated_block_nr) {
+@@ -1006,8 +1008,7 @@ int reiserfs_get_block(struct inode *ino
+ goto failure;
+ }
+ if (retval == POSITION_FOUND) {
+- reiserfs_warning(inode->i_sb,
+- "vs-825: reiserfs_get_block: "
++ reiserfs_warning(inode->i_sb, "vs-825",
+ "%K should not be found", &key);
+ retval = -EEXIST;
+ if (allocated_block_nr)
+@@ -1332,9 +1333,9 @@ void reiserfs_update_sd_size(struct reis
+ /* look for the object's stat data */
+ retval = search_item(inode->i_sb, &key, &path);
+ if (retval == IO_ERROR) {
+- reiserfs_warning(inode->i_sb,
+- "vs-13050: reiserfs_update_sd: "
+- "i/o failure occurred trying to update %K stat data",
++ reiserfs_warning(inode->i_sb, "vs-13050",
++ "i/o failure occurred trying to "
++ "update %K stat data",
+ &key);
+ return;
+ }
+@@ -1345,9 +1346,9 @@ void reiserfs_update_sd_size(struct reis
+ /*reiserfs_warning (inode->i_sb, "vs-13050: reiserfs_update_sd: i_nlink == 0, stat data not found"); */
+ return;
+ }
+- reiserfs_warning(inode->i_sb,
+- "vs-13060: reiserfs_update_sd: "
+- "stat data of object %k (nlink == %d) not found (pos %d)",
++ reiserfs_warning(inode->i_sb, "vs-13060",
++ "stat data of object %k (nlink == %d) "
++ "not found (pos %d)",
+ INODE_PKEY(inode), inode->i_nlink,
+ pos);
+ reiserfs_check_path(&path);
+@@ -1424,10 +1425,9 @@ void reiserfs_read_locked_inode(struct i
+ /* look for the object's stat data */
+ retval = search_item(inode->i_sb, &key, &path_to_sd);
+ if (retval == IO_ERROR) {
+- reiserfs_warning(inode->i_sb,
+- "vs-13070: reiserfs_read_locked_inode: "
+- "i/o failure occurred trying to find stat data of %K",
+- &key);
++ reiserfs_warning(inode->i_sb, "vs-13070",
++ "i/o failure occurred trying to find "
++ "stat data of %K", &key);
+ reiserfs_make_bad_inode(inode);
+ return;
+ }
+@@ -1457,8 +1457,7 @@ void reiserfs_read_locked_inode(struct i
+ during mount (fs/reiserfs/super.c:finish_unfinished()). */
+ if ((inode->i_nlink == 0) &&
+ !REISERFS_SB(inode->i_sb)->s_is_unlinked_ok) {
+- reiserfs_warning(inode->i_sb,
+- "vs-13075: reiserfs_read_locked_inode: "
++ reiserfs_warning(inode->i_sb, "vs-13075",
+ "dead inode read from disk %K. "
+ "This is likely to be race with knfsd. Ignore",
+ &key);
+@@ -1564,7 +1563,7 @@ struct dentry *reiserfs_fh_to_dentry(str
+ */
+ if (fh_type > fh_len) {
+ if (fh_type != 6 || fh_len != 5)
+- reiserfs_warning(sb,
++ reiserfs_warning(sb, "reiserfs-13077",
+ "nfsd/reiserfs, fhtype=%d, len=%d - odd",
+ fh_type, fh_len);
+ fh_type = 5;
+@@ -1689,13 +1688,13 @@ static int reiserfs_new_directory(struct
+ /* look for place in the tree for new item */
+ retval = search_item(sb, &key, path);
+ if (retval == IO_ERROR) {
+- reiserfs_warning(sb, "vs-13080: reiserfs_new_directory: "
++ reiserfs_warning(sb, "vs-13080",
+ "i/o failure occurred creating new directory");
+ return -EIO;
+ }
+ if (retval == ITEM_FOUND) {
+ pathrelse(path);
+- reiserfs_warning(sb, "vs-13070: reiserfs_new_directory: "
++ reiserfs_warning(sb, "vs-13070",
+ "object with this key exists (%k)",
+ &(ih->ih_key));
+ return -EEXIST;
+@@ -1729,13 +1728,13 @@ static int reiserfs_new_symlink(struct r
+ /* look for place in the tree for new item */
+ retval = search_item(sb, &key, path);
+ if (retval == IO_ERROR) {
+- reiserfs_warning(sb, "vs-13080: reiserfs_new_symlinik: "
++ reiserfs_warning(sb, "vs-13080",
+ "i/o failure occurred creating new symlink");
+ return -EIO;
+ }
+ if (retval == ITEM_FOUND) {
+ pathrelse(path);
+- reiserfs_warning(sb, "vs-13080: reiserfs_new_symlink: "
++ reiserfs_warning(sb, "vs-13080",
+ "object with this key exists (%k)",
+ &(ih->ih_key));
+ return -EEXIST;
+@@ -1932,7 +1931,8 @@ int reiserfs_new_inode(struct reiserfs_t
+ goto out_inserted_sd;
+ }
+ } else if (inode->i_sb->s_flags & MS_POSIXACL) {
+- reiserfs_warning(inode->i_sb, "ACLs aren't enabled in the fs, "
++ reiserfs_warning(inode->i_sb, "jdm-13090",
++ "ACLs aren't enabled in the fs, "
+ "but vfs thinks they are!");
+ } else if (is_reiserfs_priv_object(dir)) {
+ reiserfs_mark_inode_private(inode);
+@@ -2049,8 +2049,8 @@ static int grab_tail_page(struct inode *
+ ** I've screwed up the code to find the buffer, or the code to
+ ** call prepare_write
+ */
+- reiserfs_warning(p_s_inode->i_sb,
+- "clm-6000: error reading block %lu on dev %s",
++ reiserfs_warning(p_s_inode->i_sb, "clm-6000",
++ "error reading block %lu on dev %s",
+ bh->b_blocknr,
+ reiserfs_bdevname(p_s_inode->i_sb));
+ error = -EIO;
+@@ -2094,8 +2094,8 @@ int reiserfs_truncate_file(struct inode
+ // and get_block_create_0 could not find a block to read in,
+ // which is ok.
+ if (error != -ENOENT)
+- reiserfs_warning(p_s_inode->i_sb,
+- "clm-6001: grab_tail_page failed %d",
++ reiserfs_warning(p_s_inode->i_sb, "clm-6001",
++ "grab_tail_page failed %d",
+ error);
+ page = NULL;
+ bh = NULL;
+@@ -2213,9 +2213,8 @@ static int map_block_for_writepage(struc
+ /* we've found an unformatted node */
+ if (indirect_item_found(retval, ih)) {
+ if (bytes_copied > 0) {
+- reiserfs_warning(inode->i_sb,
+- "clm-6002: bytes_copied %d",
+- bytes_copied);
++ reiserfs_warning(inode->i_sb, "clm-6002",
++ "bytes_copied %d", bytes_copied);
+ }
+ if (!get_block_num(item, pos_in_item)) {
+ /* crap, we are writing to a hole */
+@@ -2272,9 +2271,8 @@ static int map_block_for_writepage(struc
+ goto research;
+ }
+ } else {
+- reiserfs_warning(inode->i_sb,
+- "clm-6003: bad item inode %lu, device %s",
+- inode->i_ino, reiserfs_bdevname(inode->i_sb));
++ reiserfs_warning(inode->i_sb, "clm-6003",
++ "bad item inode %lu", inode->i_ino);
+ retval = -EIO;
+ goto out;
+ }
+--- a/fs/reiserfs/item_ops.c
++++ b/fs/reiserfs/item_ops.c
+@@ -97,7 +97,8 @@ static int sd_unit_num(struct virtual_it
+
+ static void sd_print_vi(struct virtual_item *vi)
+ {
+- reiserfs_warning(NULL, "STATDATA, index %d, type 0x%x, %h",
++ reiserfs_warning(NULL, "reiserfs-16100",
++ "STATDATA, index %d, type 0x%x, %h",
+ vi->vi_index, vi->vi_type, vi->vi_ih);
+ }
+
+@@ -190,7 +191,8 @@ static int direct_unit_num(struct virtua
+
+ static void direct_print_vi(struct virtual_item *vi)
+ {
+- reiserfs_warning(NULL, "DIRECT, index %d, type 0x%x, %h",
++ reiserfs_warning(NULL, "reiserfs-16101",
++ "DIRECT, index %d, type 0x%x, %h",
+ vi->vi_index, vi->vi_type, vi->vi_ih);
+ }
+
+@@ -278,7 +280,7 @@ static void indirect_print_item(struct i
+ unp = (__le32 *) item;
+
+ if (ih_item_len(ih) % UNFM_P_SIZE)
+- reiserfs_warning(NULL, "indirect_print_item: invalid item len");
++ reiserfs_warning(NULL, "reiserfs-16102", "invalid item len");
+
+ printk("%d pointers\n[ ", (int)I_UNFM_NUM(ih));
+ for (j = 0; j < I_UNFM_NUM(ih); j++) {
+@@ -334,7 +336,8 @@ static int indirect_unit_num(struct virt
+
+ static void indirect_print_vi(struct virtual_item *vi)
+ {
+- reiserfs_warning(NULL, "INDIRECT, index %d, type 0x%x, %h",
++ reiserfs_warning(NULL, "reiserfs-16103",
++ "INDIRECT, index %d, type 0x%x, %h",
+ vi->vi_index, vi->vi_type, vi->vi_ih);
+ }
+
+@@ -359,7 +362,7 @@ static struct item_operations indirect_o
+
+ static int direntry_bytes_number(struct item_head *ih, int block_size)
+ {
+- reiserfs_warning(NULL, "vs-16090: direntry_bytes_number: "
++ reiserfs_warning(NULL, "vs-16090",
+ "bytes number is asked for direntry");
+ return 0;
+ }
+@@ -614,7 +617,8 @@ static void direntry_print_vi(struct vir
+ int i;
+ struct direntry_uarea *dir_u = vi->vi_uarea;
+
+- reiserfs_warning(NULL, "DIRENTRY, index %d, type 0x%x, %h, flags 0x%x",
++ reiserfs_warning(NULL, "reiserfs-16104",
++ "DIRENTRY, index %d, type 0x%x, %h, flags 0x%x",
+ vi->vi_index, vi->vi_type, vi->vi_ih, dir_u->flags);
+ printk("%d entries: ", dir_u->entry_count);
+ for (i = 0; i < dir_u->entry_count; i++)
+@@ -642,43 +646,43 @@ static struct item_operations direntry_o
+ //
+ static int errcatch_bytes_number(struct item_head *ih, int block_size)
+ {
+- reiserfs_warning(NULL,
+- "green-16001: Invalid item type observed, run fsck ASAP");
++ reiserfs_warning(NULL, "green-16001",
++ "Invalid item type observed, run fsck ASAP");
+ return 0;
+ }
+
+ static void errcatch_decrement_key(struct cpu_key *key)
+ {
+- reiserfs_warning(NULL,
+- "green-16002: Invalid item type observed, run fsck ASAP");
++ reiserfs_warning(NULL, "green-16002",
++ "Invalid item type observed, run fsck ASAP");
+ }
+
+ static int errcatch_is_left_mergeable(struct reiserfs_key *key,
+ unsigned long bsize)
+ {
+- reiserfs_warning(NULL,
+- "green-16003: Invalid item type observed, run fsck ASAP");
++ reiserfs_warning(NULL, "green-16003",
++ "Invalid item type observed, run fsck ASAP");
+ return 0;
+ }
+
+ static void errcatch_print_item(struct item_head *ih, char *item)
+ {
+- reiserfs_warning(NULL,
+- "green-16004: Invalid item type observed, run fsck ASAP");
++ reiserfs_warning(NULL, "green-16004",
++ "Invalid item type observed, run fsck ASAP");
+ }
+
+ static void errcatch_check_item(struct item_head *ih, char *item)
+ {
+- reiserfs_warning(NULL,
+- "green-16005: Invalid item type observed, run fsck ASAP");
++ reiserfs_warning(NULL, "green-16005",
++ "Invalid item type observed, run fsck ASAP");
+ }
+
+ static int errcatch_create_vi(struct virtual_node *vn,
+ struct virtual_item *vi,
+ int is_affected, int insert_size)
+ {
+- reiserfs_warning(NULL,
+- "green-16006: Invalid item type observed, run fsck ASAP");
++ reiserfs_warning(NULL, "green-16006",
++ "Invalid item type observed, run fsck ASAP");
+ return 0; // We might return -1 here as well, but it won't help as create_virtual_node() from where
+ // this operation is called from is of return type void.
+ }
+@@ -686,36 +690,36 @@ static int errcatch_create_vi(struct vir
+ static int errcatch_check_left(struct virtual_item *vi, int free,
+ int start_skip, int end_skip)
+ {
+- reiserfs_warning(NULL,
+- "green-16007: Invalid item type observed, run fsck ASAP");
++ reiserfs_warning(NULL, "green-16007",
++ "Invalid item type observed, run fsck ASAP");
+ return -1;
+ }
+
+ static int errcatch_check_right(struct virtual_item *vi, int free)
+ {
+- reiserfs_warning(NULL,
+- "green-16008: Invalid item type observed, run fsck ASAP");
++ reiserfs_warning(NULL, "green-16008",
++ "Invalid item type observed, run fsck ASAP");
+ return -1;
+ }
+
+ static int errcatch_part_size(struct virtual_item *vi, int first, int count)
+ {
+- reiserfs_warning(NULL,
+- "green-16009: Invalid item type observed, run fsck ASAP");
++ reiserfs_warning(NULL, "green-16009",
++ "Invalid item type observed, run fsck ASAP");
+ return 0;
+ }
+
+ static int errcatch_unit_num(struct virtual_item *vi)
+ {
+- reiserfs_warning(NULL,
+- "green-16010: Invalid item type observed, run fsck ASAP");
++ reiserfs_warning(NULL, "green-16010",
++ "Invalid item type observed, run fsck ASAP");
+ return 0;
+ }
+
+ static void errcatch_print_vi(struct virtual_item *vi)
+ {
+- reiserfs_warning(NULL,
+- "green-16011: Invalid item type observed, run fsck ASAP");
++ reiserfs_warning(NULL, "green-16011",
++ "Invalid item type observed, run fsck ASAP");
+ }
+
+ static struct item_operations errcatch_ops = {
+--- a/fs/reiserfs/journal.c
++++ b/fs/reiserfs/journal.c
+@@ -300,8 +300,8 @@ int reiserfs_allocate_list_bitmaps(struc
+ jb->journal_list = NULL;
+ jb->bitmaps = vmalloc(mem);
+ if (!jb->bitmaps) {
+- reiserfs_warning(p_s_sb,
+- "clm-2000, unable to allocate bitmaps for journal lists");
++ reiserfs_warning(p_s_sb, "clm-2000", "unable to "
++ "allocate bitmaps for journal lists");
+ failed = 1;
+ break;
+ }
+@@ -644,8 +644,8 @@ static void reiserfs_end_buffer_io_sync(
+ char b[BDEVNAME_SIZE];
+
+ if (buffer_journaled(bh)) {
+- reiserfs_warning(NULL,
+- "clm-2084: pinned buffer %lu:%s sent to disk",
++ reiserfs_warning(NULL, "clm-2084",
++ "pinned buffer %lu:%s sent to disk",
+ bh->b_blocknr, bdevname(bh->b_bdev, b));
+ }
+ if (uptodate)
+@@ -1122,7 +1122,8 @@ static int flush_commit_list(struct supe
+ sync_dirty_buffer(tbh);
+ if (unlikely(!buffer_uptodate(tbh))) {
+ #ifdef CONFIG_REISERFS_CHECK
+- reiserfs_warning(s, "journal-601, buffer write failed");
++ reiserfs_warning(s, "journal-601",
++ "buffer write failed");
+ #endif
+ retval = -EIO;
+ }
+@@ -1154,14 +1155,14 @@ static int flush_commit_list(struct supe
+ * up propagating the write error out to the filesystem. */
+ if (unlikely(!buffer_uptodate(jl->j_commit_bh))) {
+ #ifdef CONFIG_REISERFS_CHECK
+- reiserfs_warning(s, "journal-615: buffer write failed");
++ reiserfs_warning(s, "journal-615", "buffer write failed");
+ #endif
+ retval = -EIO;
+ }
+ bforget(jl->j_commit_bh);
+ if (journal->j_last_commit_id != 0 &&
+ (jl->j_trans_id - journal->j_last_commit_id) != 1) {
+- reiserfs_warning(s, "clm-2200: last commit %lu, current %lu",
++ reiserfs_warning(s, "clm-2200", "last commit %lu, current %lu",
+ journal->j_last_commit_id, jl->j_trans_id);
+ }
+ journal->j_last_commit_id = jl->j_trans_id;
+@@ -1250,7 +1251,7 @@ static void remove_all_from_journal_list
+ while (cn) {
+ if (cn->blocknr != 0) {
+ if (debug) {
+- reiserfs_warning(p_s_sb,
++ reiserfs_warning(p_s_sb, "reiserfs-2201",
+ "block %u, bh is %d, state %ld",
+ cn->blocknr, cn->bh ? 1 : 0,
+ cn->state);
+@@ -1288,8 +1289,8 @@ static int _update_journal_header_block(
+ wait_on_buffer((journal->j_header_bh));
+ if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
+ #ifdef CONFIG_REISERFS_CHECK
+- reiserfs_warning(p_s_sb,
+- "journal-699: buffer write failed");
++ reiserfs_warning(p_s_sb, "journal-699",
++ "buffer write failed");
+ #endif
+ return -EIO;
+ }
+@@ -1319,8 +1320,8 @@ static int _update_journal_header_block(
+ sync_dirty_buffer(journal->j_header_bh);
+ }
+ if (!buffer_uptodate(journal->j_header_bh)) {
+- reiserfs_warning(p_s_sb,
+- "journal-837: IO error during journal replay");
++ reiserfs_warning(p_s_sb, "journal-837",
++ "IO error during journal replay");
+ return -EIO;
+ }
+ }
+@@ -1401,8 +1402,7 @@ static int flush_journal_list(struct sup
+ BUG_ON(j_len_saved <= 0);
+
+ if (atomic_read(&journal->j_wcount) != 0) {
+- reiserfs_warning(s,
+- "clm-2048: flush_journal_list called with wcount %d",
++ reiserfs_warning(s, "clm-2048", "called with wcount %d",
+ atomic_read(&journal->j_wcount));
+ }
+ BUG_ON(jl->j_trans_id == 0);
+@@ -1510,8 +1510,8 @@ static int flush_journal_list(struct sup
+ ** is not marked JDirty_wait
+ */
+ if ((!was_jwait) && !buffer_locked(saved_bh)) {
+- reiserfs_warning(s,
+- "journal-813: BAD! buffer %llu %cdirty %cjwait, "
++ reiserfs_warning(s, "journal-813",
++ "BAD! buffer %llu %cdirty %cjwait, "
+ "not in a newer tranasction",
+ (unsigned long long)saved_bh->
+ b_blocknr, was_dirty ? ' ' : '!',
+@@ -1529,8 +1529,8 @@ static int flush_journal_list(struct sup
+ unlock_buffer(saved_bh);
+ count++;
+ } else {
+- reiserfs_warning(s,
+- "clm-2082: Unable to flush buffer %llu in %s",
++ reiserfs_warning(s, "clm-2082",
++ "Unable to flush buffer %llu in %s",
+ (unsigned long long)saved_bh->
+ b_blocknr, __func__);
+ }
+@@ -1541,8 +1541,8 @@ static int flush_journal_list(struct sup
+ /* we incremented this to keep others from taking the buffer head away */
+ put_bh(saved_bh);
+ if (atomic_read(&(saved_bh->b_count)) < 0) {
+- reiserfs_warning(s,
+- "journal-945: saved_bh->b_count < 0");
++ reiserfs_warning(s, "journal-945",
++ "saved_bh->b_count < 0");
+ }
+ }
+ }
+@@ -1561,8 +1561,8 @@ static int flush_journal_list(struct sup
+ }
+ if (unlikely(!buffer_uptodate(cn->bh))) {
+ #ifdef CONFIG_REISERFS_CHECK
+- reiserfs_warning(s,
+- "journal-949: buffer write failed\n");
++ reiserfs_warning(s, "journal-949",
++ "buffer write failed");
+ #endif
+ err = -EIO;
+ }
+@@ -1623,7 +1623,7 @@ static int flush_journal_list(struct sup
+
+ if (journal->j_last_flush_id != 0 &&
+ (jl->j_trans_id - journal->j_last_flush_id) != 1) {
+- reiserfs_warning(s, "clm-2201: last flush %lu, current %lu",
++ reiserfs_warning(s, "clm-2201", "last flush %lu, current %lu",
+ journal->j_last_flush_id, jl->j_trans_id);
+ }
+ journal->j_last_flush_id = jl->j_trans_id;
+@@ -2058,8 +2058,9 @@ static int journal_transaction_is_valid(
+ return -1;
+ }
+ if (get_desc_trans_len(desc) > SB_JOURNAL(p_s_sb)->j_trans_max) {
+- reiserfs_warning(p_s_sb,
+- "journal-2018: Bad transaction length %d encountered, ignoring transaction",
++ reiserfs_warning(p_s_sb, "journal-2018",
++ "Bad transaction length %d "
++ "encountered, ignoring transaction",
+ get_desc_trans_len(desc));
+ return -1;
+ }
+@@ -2195,8 +2196,8 @@ static int journal_read_transaction(stru
+ brelse(d_bh);
+ kfree(log_blocks);
+ kfree(real_blocks);
+- reiserfs_warning(p_s_sb,
+- "journal-1169: kmalloc failed, unable to mount FS");
++ reiserfs_warning(p_s_sb, "journal-1169",
++ "kmalloc failed, unable to mount FS");
+ return -1;
+ }
+ /* get all the buffer heads */
+@@ -2218,15 +2219,18 @@ static int journal_read_transaction(stru
+ j_realblock[i - trans_half]));
+ }
+ if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) {
+- reiserfs_warning(p_s_sb,
+- "journal-1207: REPLAY FAILURE fsck required! Block to replay is outside of filesystem");
++ reiserfs_warning(p_s_sb, "journal-1207",
++ "REPLAY FAILURE fsck required! "
++ "Block to replay is outside of "
++ "filesystem");
+ goto abort_replay;
+ }
+ /* make sure we don't try to replay onto log or reserved area */
+ if (is_block_in_log_or_reserved_area
+ (p_s_sb, real_blocks[i]->b_blocknr)) {
+- reiserfs_warning(p_s_sb,
+- "journal-1204: REPLAY FAILURE fsck required! Trying to replay onto a log block");
++ reiserfs_warning(p_s_sb, "journal-1204",
++ "REPLAY FAILURE fsck required! "
++ "Trying to replay onto a log block");
+ abort_replay:
+ brelse_array(log_blocks, i);
+ brelse_array(real_blocks, i);
+@@ -2242,8 +2246,9 @@ static int journal_read_transaction(stru
+ for (i = 0; i < get_desc_trans_len(desc); i++) {
+ wait_on_buffer(log_blocks[i]);
+ if (!buffer_uptodate(log_blocks[i])) {
+- reiserfs_warning(p_s_sb,
+- "journal-1212: REPLAY FAILURE fsck required! buffer write failed");
++ reiserfs_warning(p_s_sb, "journal-1212",
++ "REPLAY FAILURE fsck required! "
++ "buffer write failed");
+ brelse_array(log_blocks + i,
+ get_desc_trans_len(desc) - i);
+ brelse_array(real_blocks, get_desc_trans_len(desc));
+@@ -2266,8 +2271,9 @@ static int journal_read_transaction(stru
+ for (i = 0; i < get_desc_trans_len(desc); i++) {
+ wait_on_buffer(real_blocks[i]);
+ if (!buffer_uptodate(real_blocks[i])) {
+- reiserfs_warning(p_s_sb,
+- "journal-1226: REPLAY FAILURE, fsck required! buffer write failed");
++ reiserfs_warning(p_s_sb, "journal-1226",
++ "REPLAY FAILURE, fsck required! "
++ "buffer write failed");
+ brelse_array(real_blocks + i,
+ get_desc_trans_len(desc) - i);
+ brelse(c_bh);
+@@ -2418,8 +2424,8 @@ static int journal_read(struct super_blo
+ }
+
+ if (continue_replay && bdev_read_only(p_s_sb->s_bdev)) {
+- reiserfs_warning(p_s_sb,
+- "clm-2076: device is readonly, unable to replay log");
++ reiserfs_warning(p_s_sb, "clm-2076",
++ "device is readonly, unable to replay log");
+ return -1;
+ }
+
+@@ -2580,9 +2586,8 @@ static int release_journal_dev(struct su
+ }
+
+ if (result != 0) {
+- reiserfs_warning(super,
+- "sh-457: release_journal_dev: Cannot release journal device: %i",
+- result);
++ reiserfs_warning(super, "sh-457",
++ "Cannot release journal device: %i", result);
+ }
+ return result;
+ }
+@@ -2611,7 +2616,7 @@ static int journal_init_dev(struct super
+ if (IS_ERR(journal->j_dev_bd)) {
+ result = PTR_ERR(journal->j_dev_bd);
+ journal->j_dev_bd = NULL;
+- reiserfs_warning(super, "sh-458: journal_init_dev: "
++ reiserfs_warning(super, "sh-458",
+ "cannot init journal device '%s': %i",
+ __bdevname(jdev, b), result);
+ return result;
+@@ -2673,16 +2678,16 @@ static int check_advise_trans_params(str
+ journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio ||
+ SB_ONDISK_JOURNAL_SIZE(p_s_sb) / journal->j_trans_max <
+ JOURNAL_MIN_RATIO) {
+- reiserfs_warning(p_s_sb,
+- "sh-462: bad transaction max size (%u). FSCK?",
+- journal->j_trans_max);
++ reiserfs_warning(p_s_sb, "sh-462",
++ "bad transaction max size (%u). "
++ "FSCK?", journal->j_trans_max);
+ return 1;
+ }
+ if (journal->j_max_batch != (journal->j_trans_max) *
+ JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT) {
+- reiserfs_warning(p_s_sb,
+- "sh-463: bad transaction max batch (%u). FSCK?",
+- journal->j_max_batch);
++ reiserfs_warning(p_s_sb, "sh-463",
++ "bad transaction max batch (%u). "
++ "FSCK?", journal->j_max_batch);
+ return 1;
+ }
+ } else {
+@@ -2690,9 +2695,11 @@ static int check_advise_trans_params(str
+ The file system was created by old version
+ of mkreiserfs, so some fields contain zeros,
+ and we need to advise proper values for them */
+- if (p_s_sb->s_blocksize != REISERFS_STANDARD_BLKSIZE)
+- reiserfs_panic(p_s_sb, "sh-464: bad blocksize (%u)",
+- p_s_sb->s_blocksize);
++ if (p_s_sb->s_blocksize != REISERFS_STANDARD_BLKSIZE) {
++ reiserfs_warning(p_s_sb, "sh-464", "bad blocksize (%u)",
++ p_s_sb->s_blocksize);
++ return 1;
++ }
+ journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT;
+ journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT;
+ journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE;
+@@ -2716,8 +2723,8 @@ int journal_init(struct super_block *p_s
+
+ journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof(struct reiserfs_journal));
+ if (!journal) {
+- reiserfs_warning(p_s_sb,
+- "journal-1256: unable to get memory for journal structure");
++ reiserfs_warning(p_s_sb, "journal-1256",
++ "unable to get memory for journal structure");
+ return 1;
+ }
+ memset(journal, 0, sizeof(struct reiserfs_journal));
+@@ -2746,9 +2753,9 @@ int journal_init(struct super_block *p_s
+ if (!SB_ONDISK_JOURNAL_DEVICE(p_s_sb) &&
+ (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) +
+ SB_ONDISK_JOURNAL_SIZE(p_s_sb) > p_s_sb->s_blocksize * 8)) {
+- reiserfs_warning(p_s_sb,
+- "journal-1393: journal does not fit for area "
+- "addressed by first of bitmap blocks. It starts at "
++ reiserfs_warning(p_s_sb, "journal-1393",
++ "journal does not fit for area addressed "
++ "by first of bitmap blocks. It starts at "
+ "%u and its size is %u. Block size %ld",
+ SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb),
+ SB_ONDISK_JOURNAL_SIZE(p_s_sb),
+@@ -2757,8 +2764,8 @@ int journal_init(struct super_block *p_s
+ }
+
+ if (journal_init_dev(p_s_sb, journal, j_dev_name) != 0) {
+- reiserfs_warning(p_s_sb,
+- "sh-462: unable to initialize jornal device");
++ reiserfs_warning(p_s_sb, "sh-462",
++ "unable to initialize jornal device");
+ goto free_and_return;
+ }
+
+@@ -2769,8 +2776,8 @@ int journal_init(struct super_block *p_s
+ SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
+ SB_ONDISK_JOURNAL_SIZE(p_s_sb));
+ if (!bhjh) {
+- reiserfs_warning(p_s_sb,
+- "sh-459: unable to read journal header");
++ reiserfs_warning(p_s_sb, "sh-459",
++ "unable to read journal header");
+ goto free_and_return;
+ }
+ jh = (struct reiserfs_journal_header *)(bhjh->b_data);
+@@ -2779,10 +2786,10 @@ int journal_init(struct super_block *p_s
+ if (is_reiserfs_jr(rs)
+ && (le32_to_cpu(jh->jh_journal.jp_journal_magic) !=
+ sb_jp_journal_magic(rs))) {
+- reiserfs_warning(p_s_sb,
+- "sh-460: journal header magic %x "
+- "(device %s) does not match to magic found in super "
+- "block %x", jh->jh_journal.jp_journal_magic,
++ reiserfs_warning(p_s_sb, "sh-460",
++ "journal header magic %x (device %s) does "
++ "not match to magic found in super block %x",
++ jh->jh_journal.jp_journal_magic,
+ bdevname(journal->j_dev_bd, b),
+ sb_jp_journal_magic(rs));
+ brelse(bhjh);
+@@ -2849,7 +2856,7 @@ int journal_init(struct super_block *p_s
+ journal->j_must_wait = 0;
+
+ if (journal->j_cnode_free == 0) {
+- reiserfs_warning(p_s_sb, "journal-2004: Journal cnode memory "
++ reiserfs_warning(p_s_sb, "journal-2004", "Journal cnode memory "
+ "allocation failed (%ld bytes). Journal is "
+ "too large for available memory. Usually "
+ "this is due to a journal that is too large.",
+@@ -2861,12 +2868,13 @@ int journal_init(struct super_block *p_s
+ jl = journal->j_current_jl;
+ jl->j_list_bitmap = get_list_bitmap(p_s_sb, jl);
+ if (!jl->j_list_bitmap) {
+- reiserfs_warning(p_s_sb,
+- "journal-2005, get_list_bitmap failed for journal list 0");
++ reiserfs_warning(p_s_sb, "journal-2005",
++ "get_list_bitmap failed for journal list 0");
+ goto free_and_return;
+ }
+ if (journal_read(p_s_sb) < 0) {
+- reiserfs_warning(p_s_sb, "Replay Failure, unable to mount");
++ reiserfs_warning(p_s_sb, "reiserfs-2006",
++ "Replay Failure, unable to mount");
+ goto free_and_return;
+ }
+
+@@ -3193,16 +3201,17 @@ int journal_begin(struct reiserfs_transa
+ cur_th->t_refcount++;
+ memcpy(th, cur_th, sizeof(*th));
+ if (th->t_refcount <= 1)
+- reiserfs_warning(p_s_sb,
+- "BAD: refcount <= 1, but journal_info != 0");
++ reiserfs_warning(p_s_sb, "reiserfs-2005",
++ "BAD: refcount <= 1, but "
++ "journal_info != 0");
+ return 0;
+ } else {
+ /* we've ended up with a handle from a different filesystem.
+ ** save it and restore on journal_end. This should never
+ ** really happen...
+ */
+- reiserfs_warning(p_s_sb,
+- "clm-2100: nesting info a different FS");
++ reiserfs_warning(p_s_sb, "clm-2100",
++ "nesting info a different FS");
+ th->t_handle_save = current->journal_info;
+ current->journal_info = th;
+ }
+@@ -3263,7 +3272,8 @@ int journal_mark_dirty(struct reiserfs_t
+ ** could get to disk too early. NOT GOOD.
+ */
+ if (!prepared || buffer_dirty(bh)) {
+- reiserfs_warning(p_s_sb, "journal-1777: buffer %llu bad state "
++ reiserfs_warning(p_s_sb, "journal-1777",
++ "buffer %llu bad state "
+ "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
+ (unsigned long long)bh->b_blocknr,
+ prepared ? ' ' : '!',
+@@ -3273,8 +3283,8 @@ int journal_mark_dirty(struct reiserfs_t
+ }
+
+ if (atomic_read(&(journal->j_wcount)) <= 0) {
+- reiserfs_warning(p_s_sb,
+- "journal-1409: journal_mark_dirty returning because j_wcount was %d",
++ reiserfs_warning(p_s_sb, "journal-1409",
++ "returning because j_wcount was %d",
+ atomic_read(&(journal->j_wcount)));
+ return 1;
+ }
+@@ -3339,8 +3349,8 @@ int journal_end(struct reiserfs_transact
+ struct super_block *p_s_sb, unsigned long nblocks)
+ {
+ if (!current->journal_info && th->t_refcount > 1)
+- reiserfs_warning(p_s_sb, "REISER-NESTING: th NULL, refcount %d",
+- th->t_refcount);
++ reiserfs_warning(p_s_sb, "REISER-NESTING",
++ "th NULL, refcount %d", th->t_refcount);
+
+ if (!th->t_trans_id) {
+ WARN_ON(1);
+@@ -3410,8 +3420,8 @@ static int remove_from_transaction(struc
+ clear_buffer_journal_test(bh);
+ put_bh(bh);
+ if (atomic_read(&(bh->b_count)) < 0) {
+- reiserfs_warning(p_s_sb,
+- "journal-1752: remove from trans, b_count < 0");
++ reiserfs_warning(p_s_sb, "journal-1752",
++ "b_count < 0");
+ }
+ ret = 1;
+ }
+@@ -3731,7 +3741,8 @@ int journal_mark_freed(struct reiserfs_t
+ if (atomic_read
+ (&(cn->bh->b_count)) < 0) {
+ reiserfs_warning(p_s_sb,
+- "journal-2138: cn->bh->b_count < 0");
++ "journal-2138",
++ "cn->bh->b_count < 0");
+ }
+ }
+ if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */
+@@ -4134,8 +4145,9 @@ static int do_journal_end(struct reiserf
+ clear_buffer_journaled(cn->bh);
+ } else {
+ /* JDirty cleared sometime during transaction. don't log this one */
+- reiserfs_warning(p_s_sb,
+- "journal-2048: do_journal_end: BAD, buffer in journal hash, but not JDirty!");
++ reiserfs_warning(p_s_sb, "journal-2048",
++ "BAD, buffer in journal hash, "
++ "but not JDirty!");
+ brelse(cn->bh);
+ }
+ next = cn->next;
+--- a/fs/reiserfs/lbalance.c
++++ b/fs/reiserfs/lbalance.c
+@@ -1288,12 +1288,16 @@ void leaf_paste_entries(struct buffer_in
+ prev = (i != 0) ? deh_location(&(deh[i - 1])) : 0;
+
+ if (prev && prev <= deh_location(&(deh[i])))
+- reiserfs_warning(NULL,
+- "vs-10240: leaf_paste_entries: directory item (%h) corrupted (prev %a, cur(%d) %a)",
++ reiserfs_warning(NULL, "vs-10240",
++ "directory item (%h) "
++ "corrupted (prev %a, "
++ "cur(%d) %a)",
+ ih, deh + i - 1, i, deh + i);
+ if (next && next >= deh_location(&(deh[i])))
+- reiserfs_warning(NULL,
+- "vs-10250: leaf_paste_entries: directory item (%h) corrupted (cur(%d) %a, next %a)",
++ reiserfs_warning(NULL, "vs-10250",
++ "directory item (%h) "
++ "corrupted (cur(%d) %a, "
++ "next %a)",
+ ih, i, deh + i, deh + i + 1);
+ }
+ }
+--- a/fs/reiserfs/namei.c
++++ b/fs/reiserfs/namei.c
+@@ -120,8 +120,8 @@ int search_by_entry_key(struct super_blo
+ switch (retval) {
+ case ITEM_NOT_FOUND:
+ if (!PATH_LAST_POSITION(path)) {
+- reiserfs_warning(sb,
+- "vs-7000: search_by_entry_key: search_by_key returned item position == 0");
++ reiserfs_warning(sb, "vs-7000", "search_by_key "
++ "returned item position == 0");
+ pathrelse(path);
+ return IO_ERROR;
+ }
+@@ -135,8 +135,7 @@ int search_by_entry_key(struct super_blo
+
+ default:
+ pathrelse(path);
+- reiserfs_warning(sb,
+- "vs-7002: search_by_entry_key: no path to here");
++ reiserfs_warning(sb, "vs-7002", "no path to here");
+ return IO_ERROR;
+ }
+
+@@ -300,8 +299,7 @@ static int reiserfs_find_entry(struct in
+ search_by_entry_key(dir->i_sb, &key_to_search,
+ path_to_entry, de);
+ if (retval == IO_ERROR) {
+- reiserfs_warning(dir->i_sb, "zam-7001: io error in %s",
+- __func__);
++ reiserfs_warning(dir->i_sb, "zam-7001", "io error");
+ return IO_ERROR;
+ }
+
+@@ -493,10 +491,9 @@ static int reiserfs_add_entry(struct rei
+ }
+
+ if (retval != NAME_FOUND) {
+- reiserfs_warning(dir->i_sb,
+- "zam-7002:%s: \"reiserfs_find_entry\" "
+- "has returned unexpected value (%d)",
+- __func__, retval);
++ reiserfs_warning(dir->i_sb, "zam-7002",
++ "reiserfs_find_entry() returned "
++ "unexpected value (%d)", retval);
+ }
+
+ return -EEXIST;
+@@ -507,8 +504,9 @@ static int reiserfs_add_entry(struct rei
+ MAX_GENERATION_NUMBER + 1);
+ if (gen_number > MAX_GENERATION_NUMBER) {
+ /* there is no free generation number */
+- reiserfs_warning(dir->i_sb,
+- "reiserfs_add_entry: Congratulations! we have got hash function screwed up");
++ reiserfs_warning(dir->i_sb, "reiserfs-7010",
++ "Congratulations! we have got hash function "
++ "screwed up");
+ if (buffer != small_buf)
+ kfree(buffer);
+ pathrelse(&path);
+@@ -524,10 +522,9 @@ static int reiserfs_add_entry(struct rei
+ if (gen_number != 0) { /* we need to re-search for the insertion point */
+ if (search_by_entry_key(dir->i_sb, &entry_key, &path, &de) !=
+ NAME_NOT_FOUND) {
+- reiserfs_warning(dir->i_sb,
+- "vs-7032: reiserfs_add_entry: "
+- "entry with this key (%K) already exists",
+- &entry_key);
++ reiserfs_warning(dir->i_sb, "vs-7032",
++ "entry with this key (%K) already "
++ "exists", &entry_key);
+
+ if (buffer != small_buf)
+ kfree(buffer);
+@@ -906,8 +903,9 @@ static int reiserfs_rmdir(struct inode *
+ goto end_rmdir;
+
+ if (inode->i_nlink != 2 && inode->i_nlink != 1)
+- reiserfs_warning(inode->i_sb, "%s: empty directory has nlink "
+- "!= 2 (%d)", __func__, inode->i_nlink);
++ reiserfs_warning(inode->i_sb, "reiserfs-7040",
++ "empty directory has nlink != 2 (%d)",
++ inode->i_nlink);
+
+ clear_nlink(inode);
+ inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+@@ -983,10 +981,9 @@ static int reiserfs_unlink(struct inode
+ }
+
+ if (!inode->i_nlink) {
+- reiserfs_warning(inode->i_sb, "%s: deleting nonexistent file "
+- "(%s:%lu), %d", __func__,
+- reiserfs_bdevname(inode->i_sb), inode->i_ino,
+- inode->i_nlink);
++ reiserfs_warning(inode->i_sb, "reiserfs-7042",
++ "deleting nonexistent file (%lu), %d",
++ inode->i_ino, inode->i_nlink);
+ inode->i_nlink = 1;
+ }
+
+@@ -1500,8 +1497,8 @@ static int reiserfs_rename(struct inode
+ if (reiserfs_cut_from_item
+ (&th, &old_entry_path, &(old_de.de_entry_key), old_dir, NULL,
+ 0) < 0)
+- reiserfs_warning(old_dir->i_sb,
+- "vs-7060: reiserfs_rename: couldn't not cut old name. Fsck later?");
++ reiserfs_warning(old_dir->i_sb, "vs-7060",
++ "couldn't not cut old name. Fsck later?");
+
+ old_dir->i_size -= DEH_SIZE + old_de.de_entrylen;
+
+--- a/fs/reiserfs/objectid.c
++++ b/fs/reiserfs/objectid.c
+@@ -61,7 +61,7 @@ __u32 reiserfs_get_unused_objectid(struc
+ /* comment needed -Hans */
+ unused_objectid = le32_to_cpu(map[1]);
+ if (unused_objectid == U32_MAX) {
+- reiserfs_warning(s, "%s: no more object ids", __func__);
++ reiserfs_warning(s, "reiserfs-15100", "no more object ids");
+ reiserfs_restore_prepared_buffer(s, SB_BUFFER_WITH_SB(s));
+ return 0;
+ }
+@@ -160,8 +160,7 @@ void reiserfs_release_objectid(struct re
+ i += 2;
+ }
+
+- reiserfs_warning(s,
+- "vs-15011: reiserfs_release_objectid: tried to free free object id (%lu)",
++ reiserfs_warning(s, "vs-15011", "tried to free free object id (%lu)",
+ (long unsigned)objectid_to_release);
+ }
+
+--- a/fs/reiserfs/prints.c
++++ b/fs/reiserfs/prints.c
+@@ -264,14 +264,17 @@ static void prepare_error_buf(const char
+ va_end( args );\
+ }
+
+-void reiserfs_warning(struct super_block *sb, const char *fmt, ...)
++void __reiserfs_warning(struct super_block *sb, const char *id,
++ const char *function, const char *fmt, ...)
+ {
+ do_reiserfs_warning(fmt);
+ if (sb)
+- printk(KERN_WARNING "REISERFS warning (device %s): %s\n",
+- sb->s_id, error_buf);
++ printk(KERN_WARNING "REISERFS warning (device %s): %s%s%s: "
++ "%s\n", sb->s_id, id ? id : "", id ? " " : "",
++ function, error_buf);
+ else
+- printk(KERN_WARNING "REISERFS warning: %s\n", error_buf);
++ printk(KERN_WARNING "REISERFS warning: %s%s%s: %s\n",
++ id ? id : "", id ? " " : "", function, error_buf);
+ }
+
+ /* No newline.. reiserfs_info calls can be followed by printk's */
+--- a/fs/reiserfs/procfs.c
++++ b/fs/reiserfs/procfs.c
+@@ -505,7 +505,7 @@ int reiserfs_proc_info_init(struct super
+ add_file(sb, "journal", show_journal);
+ return 0;
+ }
+- reiserfs_warning(sb, "reiserfs: cannot create /proc/%s/%s",
++ reiserfs_warning(sb, "cannot create /proc/%s/%s",
+ proc_info_root_name, b);
+ return 1;
+ }
+@@ -561,8 +561,7 @@ int reiserfs_proc_info_global_init(void)
+ if (proc_info_root) {
+ proc_info_root->owner = THIS_MODULE;
+ } else {
+- reiserfs_warning(NULL,
+- "reiserfs: cannot create /proc/%s",
++ reiserfs_warning(NULL, "cannot create /proc/%s",
+ proc_info_root_name);
+ return 1;
+ }
+--- a/fs/reiserfs/stree.c
++++ b/fs/reiserfs/stree.c
+@@ -444,23 +444,24 @@ static int is_leaf(char *buf, int blocks
+
+ blkh = (struct block_head *)buf;
+ if (blkh_level(blkh) != DISK_LEAF_NODE_LEVEL) {
+- reiserfs_warning(NULL,
+- "is_leaf: this should be caught earlier");
++ reiserfs_warning(NULL, "reiserfs-5080",
++ "this should be caught earlier");
+ return 0;
+ }
+
+ nr = blkh_nr_item(blkh);
+ if (nr < 1 || nr > ((blocksize - BLKH_SIZE) / (IH_SIZE + MIN_ITEM_LEN))) {
+ /* item number is too big or too small */
+- reiserfs_warning(NULL, "is_leaf: nr_item seems wrong: %z", bh);
++ reiserfs_warning(NULL, "reiserfs-5081",
++ "nr_item seems wrong: %z", bh);
+ return 0;
+ }
+ ih = (struct item_head *)(buf + BLKH_SIZE) + nr - 1;
+ used_space = BLKH_SIZE + IH_SIZE * nr + (blocksize - ih_location(ih));
+ if (used_space != blocksize - blkh_free_space(blkh)) {
+ /* free space does not match to calculated amount of use space */
+- reiserfs_warning(NULL, "is_leaf: free space seems wrong: %z",
+- bh);
++ reiserfs_warning(NULL, "reiserfs-5082",
++ "free space seems wrong: %z", bh);
+ return 0;
+ }
+ // FIXME: it is_leaf will hit performance too much - we may have
+@@ -471,29 +472,29 @@ static int is_leaf(char *buf, int blocks
+ prev_location = blocksize;
+ for (i = 0; i < nr; i++, ih++) {
+ if (le_ih_k_type(ih) == TYPE_ANY) {
+- reiserfs_warning(NULL,
+- "is_leaf: wrong item type for item %h",
++ reiserfs_warning(NULL, "reiserfs-5083",
++ "wrong item type for item %h",
+ ih);
+ return 0;
+ }
+ if (ih_location(ih) >= blocksize
+ || ih_location(ih) < IH_SIZE * nr) {
+- reiserfs_warning(NULL,
+- "is_leaf: item location seems wrong: %h",
++ reiserfs_warning(NULL, "reiserfs-5084",
++ "item location seems wrong: %h",
+ ih);
+ return 0;
+ }
+ if (ih_item_len(ih) < 1
+ || ih_item_len(ih) > MAX_ITEM_LEN(blocksize)) {
+- reiserfs_warning(NULL,
+- "is_leaf: item length seems wrong: %h",
++ reiserfs_warning(NULL, "reiserfs-5085",
++ "item length seems wrong: %h",
+ ih);
+ return 0;
+ }
+ if (prev_location - ih_location(ih) != ih_item_len(ih)) {
+- reiserfs_warning(NULL,
+- "is_leaf: item location seems wrong (second one): %h",
+- ih);
++ reiserfs_warning(NULL, "reiserfs-5086",
++ "item location seems wrong "
++ "(second one): %h", ih);
+ return 0;
+ }
+ prev_location = ih_location(ih);
+@@ -514,24 +515,23 @@ static int is_internal(char *buf, int bl
+ nr = blkh_level(blkh);
+ if (nr <= DISK_LEAF_NODE_LEVEL || nr > MAX_HEIGHT) {
+ /* this level is not possible for internal nodes */
+- reiserfs_warning(NULL,
+- "is_internal: this should be caught earlier");
++ reiserfs_warning(NULL, "reiserfs-5087",
++ "this should be caught earlier");
+ return 0;
+ }
+
+ nr = blkh_nr_item(blkh);
+ if (nr > (blocksize - BLKH_SIZE - DC_SIZE) / (KEY_SIZE + DC_SIZE)) {
+ /* for internal which is not root we might check min number of keys */
+- reiserfs_warning(NULL,
+- "is_internal: number of key seems wrong: %z",
+- bh);
++ reiserfs_warning(NULL, "reiserfs-5088",
++ "number of key seems wrong: %z", bh);
+ return 0;
+ }
+
+ used_space = BLKH_SIZE + KEY_SIZE * nr + DC_SIZE * (nr + 1);
+ if (used_space != blocksize - blkh_free_space(blkh)) {
+- reiserfs_warning(NULL,
+- "is_internal: free space seems wrong: %z", bh);
++ reiserfs_warning(NULL, "reiserfs-5089",
++ "free space seems wrong: %z", bh);
+ return 0;
+ }
+ // one may imagine much more checks
+@@ -543,8 +543,8 @@ static int is_internal(char *buf, int bl
+ static int is_tree_node(struct buffer_head *bh, int level)
+ {
+ if (B_LEVEL(bh) != level) {
+- reiserfs_warning(NULL,
+- "is_tree_node: node level %d does not match to the expected one %d",
++ reiserfs_warning(NULL, "reiserfs-5090", "node level %d does "
++ "not match to the expected one %d",
+ B_LEVEL(bh), level);
+ return 0;
+ }
+@@ -645,9 +645,9 @@ int search_by_key(struct super_block *p_
+
+ #ifdef CONFIG_REISERFS_CHECK
+ if (!(++n_repeat_counter % 50000))
+- reiserfs_warning(p_s_sb, "PAP-5100: search_by_key: %s:"
+- "there were %d iterations of while loop "
+- "looking for key %K",
++ reiserfs_warning(p_s_sb, "PAP-5100",
++ "%s: there were %d iterations of "
++ "while loop looking for key %K",
+ current->comm, n_repeat_counter,
+ p_s_key);
+ #endif
+@@ -721,9 +721,9 @@ int search_by_key(struct super_block *p_
+ // make sure, that the node contents look like a node of
+ // certain level
+ if (!is_tree_node(p_s_bh, expected_level)) {
+- reiserfs_warning(p_s_sb, "vs-5150: search_by_key: "
+- "invalid format found in block %ld. Fsck?",
+- p_s_bh->b_blocknr);
++ reiserfs_warning(p_s_sb, "vs-5150",
++ "invalid format found in block %ld. "
++ "Fsck?", p_s_bh->b_blocknr);
+ pathrelse(p_s_search_path);
+ return IO_ERROR;
+ }
+@@ -1227,8 +1227,7 @@ int reiserfs_delete_item(struct reiserfs
+ if (n_ret_value == IO_ERROR)
+ break;
+ if (n_ret_value == FILE_NOT_FOUND) {
+- reiserfs_warning(p_s_sb,
+- "vs-5340: reiserfs_delete_item: "
++ reiserfs_warning(p_s_sb, "vs-5340",
+ "no items of the file %K found",
+ p_s_item_key);
+ break;
+@@ -1338,10 +1337,9 @@ void reiserfs_delete_solid_item(struct r
+ while (1) {
+ retval = search_item(th->t_super, &cpu_key, &path);
+ if (retval == IO_ERROR) {
+- reiserfs_warning(th->t_super,
+- "vs-5350: reiserfs_delete_solid_item: "
+- "i/o failure occurred trying to delete %K",
+- &cpu_key);
++ reiserfs_warning(th->t_super, "vs-5350",
++ "i/o failure occurred trying "
++ "to delete %K", &cpu_key);
+ break;
+ }
+ if (retval != ITEM_FOUND) {
+@@ -1355,9 +1353,8 @@ void reiserfs_delete_solid_item(struct r
+ GET_GENERATION_NUMBER(le_key_k_offset
+ (le_key_version(key),
+ key)) == 1))
+- reiserfs_warning(th->t_super,
+- "vs-5355: reiserfs_delete_solid_item: %k not found",
+- key);
++ reiserfs_warning(th->t_super, "vs-5355",
++ "%k not found", key);
+ break;
+ }
+ if (!tb_init) {
+@@ -1389,8 +1386,7 @@ void reiserfs_delete_solid_item(struct r
+ break;
+ }
+ // IO_ERROR, NO_DISK_SPACE, etc
+- reiserfs_warning(th->t_super,
+- "vs-5360: reiserfs_delete_solid_item: "
++ reiserfs_warning(th->t_super, "vs-5360",
+ "could not delete %K due to fix_nodes failure",
+ &cpu_key);
+ unfix_nodes(&tb);
+@@ -1533,8 +1529,9 @@ static void indirect_to_direct_roll_back
+ set_cpu_key_k_offset(&tail_key,
+ cpu_key_k_offset(&tail_key) - removed);
+ }
+- reiserfs_warning(inode->i_sb,
+- "indirect_to_direct_roll_back: indirect_to_direct conversion has been rolled back due to lack of disk space");
++ reiserfs_warning(inode->i_sb, "reiserfs-5091", "indirect_to_direct "
++ "conversion has been rolled back due to "
++ "lack of disk space");
+ //mark_file_without_tail (inode);
+ mark_inode_dirty(inode);
+ }
+@@ -1639,8 +1636,7 @@ int reiserfs_cut_from_item(struct reiser
+ if (n_ret_value == POSITION_FOUND)
+ continue;
+
+- reiserfs_warning(p_s_sb,
+- "PAP-5610: reiserfs_cut_from_item: item %K not found",
++ reiserfs_warning(p_s_sb, "PAP-5610", "item %K not found",
+ p_s_item_key);
+ unfix_nodes(&s_cut_balance);
+ return (n_ret_value == IO_ERROR) ? -EIO : -ENOENT;
+@@ -1654,7 +1650,8 @@ int reiserfs_cut_from_item(struct reiser
+ indirect_to_direct_roll_back(th, p_s_inode, p_s_path);
+ }
+ if (n_ret_value == NO_DISK_SPACE)
+- reiserfs_warning(p_s_sb, "NO_DISK_SPACE");
++ reiserfs_warning(p_s_sb, "reiserfs-5092",
++ "NO_DISK_SPACE");
+ unfix_nodes(&s_cut_balance);
+ return -EIO;
+ }
+@@ -1743,8 +1740,7 @@ static void truncate_directory(struct re
+ {
+ BUG_ON(!th->t_trans_id);
+ if (inode->i_nlink)
+- reiserfs_warning(inode->i_sb,
+- "vs-5655: truncate_directory: link count != 0");
++ reiserfs_warning(inode->i_sb, "vs-5655", "link count != 0");
+
+ set_le_key_k_offset(KEY_FORMAT_3_5, INODE_PKEY(inode), DOT_OFFSET);
+ set_le_key_k_type(KEY_FORMAT_3_5, INODE_PKEY(inode), TYPE_DIRENTRY);
+@@ -1797,16 +1793,14 @@ int reiserfs_do_truncate(struct reiserfs
+ search_for_position_by_key(p_s_inode->i_sb, &s_item_key,
+ &s_search_path);
+ if (retval == IO_ERROR) {
+- reiserfs_warning(p_s_inode->i_sb,
+- "vs-5657: reiserfs_do_truncate: "
++ reiserfs_warning(p_s_inode->i_sb, "vs-5657",
+ "i/o failure occurred trying to truncate %K",
+ &s_item_key);
+ err = -EIO;
+ goto out;
+ }
+ if (retval == POSITION_FOUND || retval == FILE_NOT_FOUND) {
+- reiserfs_warning(p_s_inode->i_sb,
+- "PAP-5660: reiserfs_do_truncate: "
++ reiserfs_warning(p_s_inode->i_sb, "PAP-5660",
+ "wrong result %d of search for %K", retval,
+ &s_item_key);
+
+@@ -1850,8 +1844,8 @@ int reiserfs_do_truncate(struct reiserfs
+ reiserfs_cut_from_item(th, &s_search_path, &s_item_key,
+ p_s_inode, page, n_new_file_size);
+ if (n_deleted < 0) {
+- reiserfs_warning(p_s_inode->i_sb,
+- "vs-5665: reiserfs_do_truncate: reiserfs_cut_from_item failed");
++ reiserfs_warning(p_s_inode->i_sb, "vs-5665",
++ "reiserfs_cut_from_item failed");
+ reiserfs_check_path(&s_search_path);
+ return 0;
+ }
+@@ -2000,8 +1994,8 @@ int reiserfs_paste_into_item(struct reis
+ goto error_out;
+ }
+ if (retval == POSITION_FOUND) {
+- reiserfs_warning(inode->i_sb,
+- "PAP-5710: reiserfs_paste_into_item: entry or pasted byte (%K) exists",
++ reiserfs_warning(inode->i_sb, "PAP-5710",
++ "entry or pasted byte (%K) exists",
+ p_s_key);
+ retval = -EEXIST;
+ goto error_out;
+@@ -2087,8 +2081,7 @@ int reiserfs_insert_item(struct reiserfs
+ goto error_out;
+ }
+ if (retval == ITEM_FOUND) {
+- reiserfs_warning(th->t_super,
+- "PAP-5760: reiserfs_insert_item: "
++ reiserfs_warning(th->t_super, "PAP-5760",
+ "key %K already exists in the tree",
+ key);
+ retval = -EEXIST;
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -181,9 +181,9 @@ static int finish_unfinished(struct supe
+ if (REISERFS_SB(s)->s_qf_names[i]) {
+ int ret = reiserfs_quota_on_mount(s, i);
+ if (ret < 0)
+- reiserfs_warning(s,
+- "reiserfs: cannot turn on journaled quota: error %d",
+- ret);
++ reiserfs_warning(s, "reiserfs-2500",
++ "cannot turn on journaled "
++ "quota: error %d", ret);
+ }
+ }
+ #endif
+@@ -193,8 +193,8 @@ static int finish_unfinished(struct supe
+ while (!retval) {
+ retval = search_item(s, &max_cpu_key, &path);
+ if (retval != ITEM_NOT_FOUND) {
+- reiserfs_warning(s,
+- "vs-2140: finish_unfinished: search_by_key returned %d",
++ reiserfs_warning(s, "vs-2140",
++ "search_by_key returned %d",
+ retval);
+ break;
+ }
+@@ -202,8 +202,8 @@ static int finish_unfinished(struct supe
+ bh = get_last_bh(&path);
+ item_pos = get_item_pos(&path);
+ if (item_pos != B_NR_ITEMS(bh)) {
+- reiserfs_warning(s,
+- "vs-2060: finish_unfinished: wrong position found");
++ reiserfs_warning(s, "vs-2060",
++ "wrong position found");
+ break;
+ }
+ item_pos--;
+@@ -233,8 +233,7 @@ static int finish_unfinished(struct supe
+ if (!inode) {
+ /* the unlink almost completed, it just did not manage to remove
+ "save" link and release objectid */
+- reiserfs_warning(s,
+- "vs-2180: finish_unfinished: iget failed for %K",
++ reiserfs_warning(s, "vs-2180", "iget failed for %K",
+ &obj_key);
+ retval = remove_save_link_only(s, &save_link_key, 1);
+ continue;
+@@ -242,8 +241,8 @@ static int finish_unfinished(struct supe
+
+ if (!truncate && inode->i_nlink) {
+ /* file is not unlinked */
+- reiserfs_warning(s,
+- "vs-2185: finish_unfinished: file %K is not unlinked",
++ reiserfs_warning(s, "vs-2185",
++ "file %K is not unlinked",
+ &obj_key);
+ retval = remove_save_link_only(s, &save_link_key, 0);
+ continue;
+@@ -255,8 +254,9 @@ static int finish_unfinished(struct supe
+ The only imaginable way is to execute unfinished truncate request
+ then boot into old kernel, remove the file and create dir with
+ the same key. */
+- reiserfs_warning(s,
+- "green-2101: impossible truncate on a directory %k. Please report",
++ reiserfs_warning(s, "green-2101",
++ "impossible truncate on a "
++ "directory %k. Please report",
+ INODE_PKEY(inode));
+ retval = remove_save_link_only(s, &save_link_key, 0);
+ truncate = 0;
+@@ -286,9 +286,10 @@ static int finish_unfinished(struct supe
+ /* removal gets completed in iput */
+ retval = 0;
+ } else {
+- reiserfs_warning(s, "Dead loop in "
+- "finish_unfinished detected, "
+- "just remove save link\n");
++ reiserfs_warning(s, "super-2189", "Dead loop "
++ "in finish_unfinished "
++ "detected, just remove "
++ "save link\n");
+ retval = remove_save_link_only(s,
+ &save_link_key, 0);
+ }
+@@ -358,8 +359,9 @@ void add_save_link(struct reiserfs_trans
+ } else {
+ /* truncate */
+ if (S_ISDIR(inode->i_mode))
+- reiserfs_warning(inode->i_sb,
+- "green-2102: Adding a truncate savelink for a directory %k! Please report",
++ reiserfs_warning(inode->i_sb, "green-2102",
++ "Adding a truncate savelink for "
++ "a directory %k! Please report",
+ INODE_PKEY(inode));
+ set_cpu_key_k_offset(&key, 1);
+ set_cpu_key_k_type(&key, TYPE_INDIRECT);
+@@ -374,7 +376,7 @@ void add_save_link(struct reiserfs_trans
+ retval = search_item(inode->i_sb, &key, &path);
+ if (retval != ITEM_NOT_FOUND) {
+ if (retval != -ENOSPC)
+- reiserfs_warning(inode->i_sb, "vs-2100: add_save_link:"
++ reiserfs_warning(inode->i_sb, "vs-2100",
+ "search_by_key (%K) returned %d", &key,
+ retval);
+ pathrelse(&path);
+@@ -389,9 +391,8 @@ void add_save_link(struct reiserfs_trans
+ reiserfs_insert_item(th, &path, &key, &ih, NULL, (char *)&link);
+ if (retval) {
+ if (retval != -ENOSPC)
+- reiserfs_warning(inode->i_sb,
+- "vs-2120: add_save_link: insert_item returned %d",
+- retval);
++ reiserfs_warning(inode->i_sb, "vs-2120",
++ "insert_item returned %d", retval);
+ } else {
+ if (truncate)
+ REISERFS_I(inode)->i_flags |=
+@@ -490,8 +491,7 @@ static void reiserfs_put_super(struct su
+ print_statistics(s);
+
+ if (REISERFS_SB(s)->reserved_blocks != 0) {
+- reiserfs_warning(s,
+- "green-2005: reiserfs_put_super: reserved blocks left %d",
++ reiserfs_warning(s, "green-2005", "reserved blocks left %d",
+ REISERFS_SB(s)->reserved_blocks);
+ }
+
+@@ -557,8 +557,8 @@ static void reiserfs_dirty_inode(struct
+
+ int err = 0;
+ if (inode->i_sb->s_flags & MS_RDONLY) {
+- reiserfs_warning(inode->i_sb,
+- "clm-6006: writing inode %lu on readonly FS",
++ reiserfs_warning(inode->i_sb, "clm-6006",
++ "writing inode %lu on readonly FS",
+ inode->i_ino);
+ return;
+ }
+@@ -790,13 +790,15 @@ static int reiserfs_getopt(struct super_
+ if (bit_flags) {
+ if (opt->clrmask ==
+ (1 << REISERFS_UNSUPPORTED_OPT))
+- reiserfs_warning(s, "%s not supported.",
++ reiserfs_warning(s, "super-6500",
++ "%s not supported.\n",
+ p);
+ else
+ *bit_flags &= ~opt->clrmask;
+ if (opt->setmask ==
+ (1 << REISERFS_UNSUPPORTED_OPT))
+- reiserfs_warning(s, "%s not supported.",
++ reiserfs_warning(s, "super-6501",
++ "%s not supported.\n",
+ p);
+ else
+ *bit_flags |= opt->setmask;
+@@ -805,7 +807,8 @@ static int reiserfs_getopt(struct super_
+ }
+ }
+ if (!opt->option_name) {
+- reiserfs_warning(s, "unknown mount option \"%s\"", p);
++ reiserfs_warning(s, "super-6502",
++ "unknown mount option \"%s\"", p);
+ return -1;
+ }
+
+@@ -813,8 +816,9 @@ static int reiserfs_getopt(struct super_
+ switch (*p) {
+ case '=':
+ if (!opt->arg_required) {
+- reiserfs_warning(s,
+- "the option \"%s\" does not require an argument",
++ reiserfs_warning(s, "super-6503",
++ "the option \"%s\" does not "
++ "require an argument\n",
+ opt->option_name);
+ return -1;
+ }
+@@ -822,14 +826,15 @@ static int reiserfs_getopt(struct super_
+
+ case 0:
+ if (opt->arg_required) {
+- reiserfs_warning(s,
+- "the option \"%s\" requires an argument",
+- opt->option_name);
++ reiserfs_warning(s, "super-6504",
++ "the option \"%s\" requires an "
++ "argument\n", opt->option_name);
+ return -1;
+ }
+ break;
+ default:
+- reiserfs_warning(s, "head of option \"%s\" is only correct",
++ reiserfs_warning(s, "super-6505",
++ "head of option \"%s\" is only correct\n",
+ opt->option_name);
+ return -1;
+ }
+@@ -841,7 +846,8 @@ static int reiserfs_getopt(struct super_
+ && !(opt->arg_required & (1 << REISERFS_OPT_ALLOWEMPTY))
+ && !strlen(p)) {
+ /* this catches "option=," if not allowed */
+- reiserfs_warning(s, "empty argument for \"%s\"",
++ reiserfs_warning(s, "super-6506",
++ "empty argument for \"%s\"\n",
+ opt->option_name);
+ return -1;
+ }
+@@ -863,7 +869,8 @@ static int reiserfs_getopt(struct super_
+ }
+ }
+
+- reiserfs_warning(s, "bad value \"%s\" for option \"%s\"", p,
++ reiserfs_warning(s, "super-6506",
++ "bad value \"%s\" for option \"%s\"\n", p,
+ opt->option_name);
+ return -1;
+ }
+@@ -953,9 +960,9 @@ static int reiserfs_parse_options(struct
+ *blocks = simple_strtoul(arg, &p, 0);
+ if (*p != '\0') {
+ /* NNN does not look like a number */
+- reiserfs_warning(s,
+- "reiserfs_parse_options: bad value %s",
+- arg);
++ reiserfs_warning(s, "super-6507",
++ "bad value %s for "
++ "-oresize\n", arg);
+ return 0;
+ }
+ }
+@@ -966,8 +973,8 @@ static int reiserfs_parse_options(struct
+ unsigned long val = simple_strtoul(arg, &p, 0);
+ /* commit=NNN (time in seconds) */
+ if (*p != '\0' || val >= (unsigned int)-1) {
+- reiserfs_warning(s,
+- "reiserfs_parse_options: bad value %s",
++ reiserfs_warning(s, "super-6508",
++ "bad value %s for -ocommit\n",
+ arg);
+ return 0;
+ }
+@@ -975,16 +982,18 @@ static int reiserfs_parse_options(struct
+ }
+
+ if (c == 'w') {
+- reiserfs_warning(s, "reiserfs: nolargeio option is no longer supported");
++ reiserfs_warning(s, "super-6509", "nolargeio option "
++ "is no longer supported");
+ return 0;
+ }
+
+ if (c == 'j') {
+ if (arg && *arg && jdev_name) {
+ if (*jdev_name) { //Hm, already assigned?
+- reiserfs_warning(s,
+- "reiserfs_parse_options: journal device was already specified to be %s",
+- *jdev_name);
++ reiserfs_warning(s, "super-6510",
++ "journal device was "
++ "already specified to "
++ "be %s", *jdev_name);
+ return 0;
+ }
+ *jdev_name = arg;
+@@ -997,29 +1006,35 @@ static int reiserfs_parse_options(struct
+ if ((sb_any_quota_enabled(s) ||
+ sb_any_quota_suspended(s)) &&
+ (!*arg != !REISERFS_SB(s)->s_qf_names[qtype])) {
+- reiserfs_warning(s,
+- "reiserfs_parse_options: cannot change journaled quota options when quota turned on.");
++ reiserfs_warning(s, "super-6511",
++ "cannot change journaled "
++ "quota options when quota "
++ "turned on.");
+ return 0;
+ }
+ if (*arg) { /* Some filename specified? */
+ if (REISERFS_SB(s)->s_qf_names[qtype]
+ && strcmp(REISERFS_SB(s)->s_qf_names[qtype],
+ arg)) {
+- reiserfs_warning(s,
+- "reiserfs_parse_options: %s quota file already specified.",
++ reiserfs_warning(s, "super-6512",
++ "%s quota file "
++ "already specified.",
+ QTYPE2NAME(qtype));
+ return 0;
+ }
+ if (strchr(arg, '/')) {
+- reiserfs_warning(s,
+- "reiserfs_parse_options: quotafile must be on filesystem root.");
++ reiserfs_warning(s, "super-6513",
++ "quotafile must be "
++ "on filesystem root.");
+ return 0;
+ }
+ qf_names[qtype] =
+ kmalloc(strlen(arg) + 1, GFP_KERNEL);
+ if (!qf_names[qtype]) {
+- reiserfs_warning(s,
+- "reiserfs_parse_options: not enough memory for storing quotafile name.");
++ reiserfs_warning(s, "reiserfs-2502",
++ "not enough memory "
++ "for storing "
++ "quotafile name.");
+ return 0;
+ }
+ strcpy(qf_names[qtype], arg);
+@@ -1037,22 +1052,25 @@ static int reiserfs_parse_options(struct
+ else if (!strcmp(arg, "vfsv0"))
+ *qfmt = QFMT_VFS_V0;
+ else {
+- reiserfs_warning(s,
+- "reiserfs_parse_options: unknown quota format specified.");
++ reiserfs_warning(s, "super-6514",
++ "unknown quota format "
++ "specified.");
+ return 0;
+ }
+ if ((sb_any_quota_enabled(s) ||
+ sb_any_quota_suspended(s)) &&
+ *qfmt != REISERFS_SB(s)->s_jquota_fmt) {
+- reiserfs_warning(s,
+- "reiserfs_parse_options: cannot change journaled quota options when quota turned on.");
++ reiserfs_warning(s, "super-6515",
++ "cannot change journaled "
++ "quota options when quota "
++ "turned on.");
+ return 0;
+ }
+ }
+ #else
+ if (c == 'u' || c == 'g' || c == 'f') {
+- reiserfs_warning(s,
+- "reiserfs_parse_options: journaled quota options not supported.");
++ reiserfs_warning(s, "reiserfs-2503", "journaled "
++ "quota options not supported.");
+ return 0;
+ }
+ #endif
+@@ -1061,15 +1079,15 @@ static int reiserfs_parse_options(struct
+ #ifdef CONFIG_QUOTA
+ if (!REISERFS_SB(s)->s_jquota_fmt && !*qfmt
+ && (qf_names[USRQUOTA] || qf_names[GRPQUOTA])) {
+- reiserfs_warning(s,
+- "reiserfs_parse_options: journaled quota format not specified.");
++ reiserfs_warning(s, "super-6515",
++ "journaled quota format not specified.");
+ return 0;
+ }
+ /* This checking is not precise wrt the quota type but for our purposes it is sufficient */
+ if (!(*mount_options & (1 << REISERFS_QUOTA))
+ && sb_any_quota_enabled(s)) {
+- reiserfs_warning(s,
+- "reiserfs_parse_options: quota options must be present when quota is turned on.");
++ reiserfs_warning(s, "super-6516", "quota options must "
++ "be present when quota is turned on.");
+ return 0;
+ }
+ #endif
+@@ -1129,14 +1147,15 @@ static void handle_attrs(struct super_bl
+
+ if (reiserfs_attrs(s)) {
+ if (old_format_only(s)) {
+- reiserfs_warning(s,
+- "reiserfs: cannot support attributes on 3.5.x disk format");
++ reiserfs_warning(s, "super-6517", "cannot support "
++ "attributes on 3.5.x disk format");
+ REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_ATTRS);
+ return;
+ }
+ if (!(le32_to_cpu(rs->s_flags) & reiserfs_attrs_cleared)) {
+- reiserfs_warning(s,
+- "reiserfs: cannot support attributes until flag is set in super-block");
++ reiserfs_warning(s, "super-6518", "cannot support "
++ "attributes until flag is set in "
++ "super-block");
+ REISERFS_SB(s)->s_mount_opt &= ~(1 << REISERFS_ATTRS);
+ }
+ }
+@@ -1314,7 +1333,7 @@ static int read_super_block(struct super
+
+ bh = sb_bread(s, offset / s->s_blocksize);
+ if (!bh) {
+- reiserfs_warning(s, "sh-2006: read_super_block: "
++ reiserfs_warning(s, "sh-2006",
+ "bread failed (dev %s, block %lu, size %lu)",
+ reiserfs_bdevname(s), offset / s->s_blocksize,
+ s->s_blocksize);
+@@ -1335,8 +1354,8 @@ static int read_super_block(struct super
+
+ bh = sb_bread(s, offset / s->s_blocksize);
+ if (!bh) {
+- reiserfs_warning(s, "sh-2007: read_super_block: "
+- "bread failed (dev %s, block %lu, size %lu)\n",
++ reiserfs_warning(s, "sh-2007",
++ "bread failed (dev %s, block %lu, size %lu)",
+ reiserfs_bdevname(s), offset / s->s_blocksize,
+ s->s_blocksize);
+ return 1;
+@@ -1344,8 +1363,8 @@ static int read_super_block(struct super
+
+ rs = (struct reiserfs_super_block *)bh->b_data;
+ if (sb_blocksize(rs) != s->s_blocksize) {
+- reiserfs_warning(s, "sh-2011: read_super_block: "
+- "can't find a reiserfs filesystem on (dev %s, block %Lu, size %lu)\n",
++ reiserfs_warning(s, "sh-2011", "can't find a reiserfs "
++ "filesystem on (dev %s, block %Lu, size %lu)",
+ reiserfs_bdevname(s),
+ (unsigned long long)bh->b_blocknr,
+ s->s_blocksize);
+@@ -1355,9 +1374,10 @@ static int read_super_block(struct super
+
+ if (rs->s_v1.s_root_block == cpu_to_le32(-1)) {
+ brelse(bh);
+- reiserfs_warning(s,
+- "Unfinished reiserfsck --rebuild-tree run detected. Please run\n"
+- "reiserfsck --rebuild-tree and wait for a completion. If that fails\n"
++ reiserfs_warning(s, "super-6519", "Unfinished reiserfsck "
++ "--rebuild-tree run detected. Please run\n"
++ "reiserfsck --rebuild-tree and wait for a "
++ "completion. If that fails\n"
+ "get newer reiserfsprogs package");
+ return 1;
+ }
+@@ -1375,10 +1395,9 @@ static int read_super_block(struct super
+ reiserfs_info(s, "found reiserfs format \"3.5\""
+ " with non-standard journal\n");
+ else {
+- reiserfs_warning(s,
+- "sh-2012: read_super_block: found unknown "
+- "format \"%u\" of reiserfs with non-standard magic",
+- sb_version(rs));
++ reiserfs_warning(s, "sh-2012", "found unknown "
++ "format \"%u\" of reiserfs with "
++ "non-standard magic", sb_version(rs));
+ return 1;
+ }
+ } else
+@@ -1408,8 +1427,7 @@ static int reread_meta_blocks(struct sup
+ ll_rw_block(READ, 1, &(SB_BUFFER_WITH_SB(s)));
+ wait_on_buffer(SB_BUFFER_WITH_SB(s));
+ if (!buffer_uptodate(SB_BUFFER_WITH_SB(s))) {
+- reiserfs_warning(s,
+- "reread_meta_blocks, error reading the super");
++ reiserfs_warning(s, "reiserfs-2504", "error reading the super");
+ return 1;
+ }
+
+@@ -1473,10 +1491,10 @@ static __u32 find_hash_out(struct super_
+ && (yurahash ==
+ GET_HASH_VALUE(deh_offset
+ (&(de.de_deh[de.de_entry_num])))))) {
+- reiserfs_warning(s,
+- "Unable to automatically detect hash function. "
+- "Please mount with -o hash={tea,rupasov,r5}",
+- reiserfs_bdevname(s));
++ reiserfs_warning(s, "reiserfs-2506", "Unable to "
++ "automatically detect hash function. "
++ "Please mount with -o "
++ "hash={tea,rupasov,r5}");
+ hash = UNSET_HASH;
+ break;
+ }
+@@ -1490,7 +1508,8 @@ static __u32 find_hash_out(struct super_
+ (deh_offset(&(de.de_deh[de.de_entry_num]))) == r5hash)
+ hash = R5_HASH;
+ else {
+- reiserfs_warning(s, "Unrecognised hash function");
++ reiserfs_warning(s, "reiserfs-2506",
++ "Unrecognised hash function");
+ hash = UNSET_HASH;
+ }
+ } while (0);
+@@ -1518,17 +1537,20 @@ static int what_hash(struct super_block
+ ** mount options
+ */
+ if (reiserfs_rupasov_hash(s) && code != YURA_HASH) {
+- reiserfs_warning(s, "Error, %s hash detected, "
++ reiserfs_warning(s, "reiserfs-2507",
++ "Error, %s hash detected, "
+ "unable to force rupasov hash",
+ reiserfs_hashname(code));
+ code = UNSET_HASH;
+ } else if (reiserfs_tea_hash(s) && code != TEA_HASH) {
+- reiserfs_warning(s, "Error, %s hash detected, "
++ reiserfs_warning(s, "reiserfs-2508",
++ "Error, %s hash detected, "
+ "unable to force tea hash",
+ reiserfs_hashname(code));
+ code = UNSET_HASH;
+ } else if (reiserfs_r5_hash(s) && code != R5_HASH) {
+- reiserfs_warning(s, "Error, %s hash detected, "
++ reiserfs_warning(s, "reiserfs-2509",
++ "Error, %s hash detected, "
+ "unable to force r5 hash",
+ reiserfs_hashname(code));
+ code = UNSET_HASH;
+@@ -1587,9 +1609,9 @@ static int function2code(hashf_t func)
+ return 0;
+ }
+
+-#define SWARN(silent, s, ...) \
++#define SWARN(silent, s, id, ...) \
+ if (!(silent)) \
+- reiserfs_warning (s, __VA_ARGS__)
++ reiserfs_warning(s, id, __VA_ARGS__)
+
+ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
+ {
+@@ -1641,8 +1663,7 @@ static int reiserfs_fill_super(struct su
+ #endif
+
+ if (blocks) {
+- SWARN(silent, s, "jmacd-7: reiserfs_fill_super: resize option "
+- "for remount only");
++ SWARN(silent, s, "jmacd-7", "resize option for remount only");
+ goto error;
+ }
+
+@@ -1651,8 +1672,7 @@ static int reiserfs_fill_super(struct su
+ old_format = 1;
+ /* try new format (64-th 1k block), which can contain reiserfs super block */
+ else if (read_super_block(s, REISERFS_DISK_OFFSET_IN_BYTES)) {
+- SWARN(silent, s,
+- "sh-2021: reiserfs_fill_super: can not find reiserfs on %s",
++ SWARN(silent, s, "sh-2021", "can not find reiserfs on %s",
+ reiserfs_bdevname(s));
+ goto error;
+ }
+@@ -1664,13 +1684,12 @@ static int reiserfs_fill_super(struct su
+ if (s->s_bdev && s->s_bdev->bd_inode
+ && i_size_read(s->s_bdev->bd_inode) <
+ sb_block_count(rs) * sb_blocksize(rs)) {
+- SWARN(silent, s,
+- "Filesystem on %s cannot be mounted because it is bigger than the device",
+- reiserfs_bdevname(s));
+- SWARN(silent, s,
+- "You may need to run fsck or increase size of your LVM partition");
+- SWARN(silent, s,
+- "Or may be you forgot to reboot after fdisk when it told you to");
++ SWARN(silent, s, "", "Filesystem cannot be "
++ "mounted because it is bigger than the device");
++ SWARN(silent, s, "", "You may need to run fsck "
++ "or increase size of your LVM partition");
++ SWARN(silent, s, "", "Or may be you forgot to "
++ "reboot after fdisk when it told you to");
+ goto error;
+ }
+
+@@ -1678,14 +1697,13 @@ static int reiserfs_fill_super(struct su
+ sbi->s_mount_state = REISERFS_VALID_FS;
+
+ if ((errval = reiserfs_init_bitmap_cache(s))) {
+- SWARN(silent, s,
+- "jmacd-8: reiserfs_fill_super: unable to read bitmap");
++ SWARN(silent, s, "jmacd-8", "unable to read bitmap");
+ goto error;
+ }
+ errval = -EINVAL;
+ #ifdef CONFIG_REISERFS_CHECK
+- SWARN(silent, s, "CONFIG_REISERFS_CHECK is set ON");
+- SWARN(silent, s, "- it is slow mode for debugging.");
++ SWARN(silent, s, "", "CONFIG_REISERFS_CHECK is set ON");
++ SWARN(silent, s, "", "- it is slow mode for debugging.");
+ #endif
+
+ /* make data=ordered the default */
+@@ -1706,8 +1724,8 @@ static int reiserfs_fill_super(struct su
+ }
+ // set_device_ro(s->s_dev, 1) ;
+ if (journal_init(s, jdev_name, old_format, commit_max_age)) {
+- SWARN(silent, s,
+- "sh-2022: reiserfs_fill_super: unable to initialize journal space");
++ SWARN(silent, s, "sh-2022",
++ "unable to initialize journal space");
+ goto error;
+ } else {
+ jinit_done = 1; /* once this is set, journal_release must be called
+@@ -1715,8 +1733,8 @@ static int reiserfs_fill_super(struct su
+ */
+ }
+ if (reread_meta_blocks(s)) {
+- SWARN(silent, s,
+- "jmacd-9: reiserfs_fill_super: unable to reread meta blocks after journal init");
++ SWARN(silent, s, "jmacd-9",
++ "unable to reread meta blocks after journal init");
+ goto error;
+ }
+
+@@ -1724,8 +1742,8 @@ static int reiserfs_fill_super(struct su
+ goto error;
+
+ if (bdev_read_only(s->s_bdev) && !(s->s_flags & MS_RDONLY)) {
+- SWARN(silent, s,
+- "clm-7000: Detected readonly device, marking FS readonly");
++ SWARN(silent, s, "clm-7000",
++ "Detected readonly device, marking FS readonly");
+ s->s_flags |= MS_RDONLY;
+ }
+ args.objectid = REISERFS_ROOT_OBJECTID;
+@@ -1734,8 +1752,7 @@ static int reiserfs_fill_super(struct su
+ iget5_locked(s, REISERFS_ROOT_OBJECTID, reiserfs_find_actor,
+ reiserfs_init_locked_inode, (void *)(&args));
+ if (!root_inode) {
+- SWARN(silent, s,
+- "jmacd-10: reiserfs_fill_super: get root inode failed");
++ SWARN(silent, s, "jmacd-10", "get root inode failed");
+ goto error;
+ }
+
+@@ -1784,7 +1801,7 @@ static int reiserfs_fill_super(struct su
+ * avoiding corruption. -jeffm */
+ if (bmap_would_wrap(reiserfs_bmap_count(s)) &&
+ sb_bmap_nr(rs) != 0) {
+- reiserfs_warning(s, "super-2030: This file system "
++ reiserfs_warning(s, "super-2030", "This file system "
+ "claims to use %u bitmap blocks in "
+ "its super block, but requires %u. "
+ "Clearing to zero.", sb_bmap_nr(rs),
+@@ -2085,8 +2102,8 @@ static int reiserfs_quota_on(struct supe
+ if (!(REISERFS_I(inode)->i_flags & i_nopack_mask)) {
+ err = reiserfs_unpack(inode, NULL);
+ if (err) {
+- reiserfs_warning(sb,
+- "reiserfs: Unpacking tail of quota file failed"
++ reiserfs_warning(sb, "super-6520",
++ "Unpacking tail of quota file failed"
+ " (%d). Cannot turn on quotas.", err);
+ err = -EINVAL;
+ goto out;
+@@ -2097,8 +2114,8 @@ static int reiserfs_quota_on(struct supe
+ if (REISERFS_SB(sb)->s_qf_names[type]) {
+ /* Quotafile not of fs root? */
+ if (nd.path.dentry->d_parent->d_inode != sb->s_root->d_inode)
+- reiserfs_warning(sb,
+- "reiserfs: Quota file not on filesystem root. "
++ reiserfs_warning(sb, "super-6521",
++ "Quota file not on filesystem root. "
+ "Journalled quota will not work.");
+ }
+
+--- a/fs/reiserfs/tail_conversion.c
++++ b/fs/reiserfs/tail_conversion.c
+@@ -48,9 +48,9 @@ int direct2indirect(struct reiserfs_tran
+
+ // FIXME: we could avoid this
+ if (search_for_position_by_key(sb, &end_key, path) == POSITION_FOUND) {
+- reiserfs_warning(sb, "PAP-14030: direct2indirect: "
+- "pasted or inserted byte exists in the tree %K. "
+- "Use fsck to repair.", &end_key);
++ reiserfs_warning(sb, "PAP-14030",
++ "pasted or inserted byte exists in "
++ "the tree %K. Use fsck to repair.", &end_key);
+ pathrelse(path);
+ return -EIO;
+ }
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -259,7 +259,8 @@ static int __xattr_readdir(struct inode
+ ih = de.de_ih;
+
+ if (!is_direntry_le_ih(ih)) {
+- reiserfs_warning(inode->i_sb, "not direntry %h", ih);
++ reiserfs_warning(inode->i_sb, "jdm-20000",
++ "not direntry %h", ih);
+ break;
+ }
+ copy_item_head(&tmp_ih, ih);
+@@ -598,7 +599,7 @@ reiserfs_xattr_get(const struct inode *i
+ if (rxh->h_magic != cpu_to_le32(REISERFS_XATTR_MAGIC)) {
+ unlock_page(page);
+ reiserfs_put_page(page);
+- reiserfs_warning(inode->i_sb,
++ reiserfs_warning(inode->i_sb, "jdm-20001",
+ "Invalid magic for xattr (%s) "
+ "associated with %k", name,
+ INODE_PKEY(inode));
+@@ -618,7 +619,7 @@ reiserfs_xattr_get(const struct inode *i
+
+ if (xattr_hash(buffer, isize - sizeof(struct reiserfs_xattr_header)) !=
+ hash) {
+- reiserfs_warning(inode->i_sb,
++ reiserfs_warning(inode->i_sb, "jdm-20002",
+ "Invalid hash for xattr (%s) associated "
+ "with %k", name, INODE_PKEY(inode));
+ err = -EIO;
+@@ -652,7 +653,8 @@ __reiserfs_xattr_del(struct dentry *xadi
+ goto out_file;
+
+ if (!is_reiserfs_priv_object(dentry->d_inode)) {
+- reiserfs_warning(dir->i_sb, "OID %08x [%.*s/%.*s] doesn't have "
++ reiserfs_warning(dir->i_sb, "jdm-20003",
++ "OID %08x [%.*s/%.*s] doesn't have "
+ "priv flag set [parent is %sset].",
+ le32_to_cpu(INODE_PKEY(dentry->d_inode)->
+ k_objectid), xadir->d_name.len,
+@@ -750,7 +752,7 @@ int reiserfs_delete_xattrs(struct inode
+ reiserfs_write_unlock_xattrs(inode->i_sb);
+ dput(root);
+ } else {
+- reiserfs_warning(inode->i_sb,
++ reiserfs_warning(inode->i_sb, "jdm-20006",
+ "Couldn't remove all entries in directory");
+ }
+ unlock_kernel();
+@@ -1154,7 +1156,8 @@ int reiserfs_xattr_init(struct super_blo
+ } else if (reiserfs_xattrs_optional(s)) {
+ /* Old format filesystem, but optional xattrs have been enabled
+ * at mount time. Error out. */
+- reiserfs_warning(s, "xattrs/ACLs not supported on pre v3.6 "
++ reiserfs_warning(s, "jdm-20005",
++ "xattrs/ACLs not supported on pre v3.6 "
+ "format filesystem. Failing mount.");
+ err = -EOPNOTSUPP;
+ goto error;
+@@ -1201,8 +1204,10 @@ int reiserfs_xattr_init(struct super_blo
+ /* If we're read-only it just means that the dir hasn't been
+ * created. Not an error -- just no xattrs on the fs. We'll
+ * check again if we go read-write */
+- reiserfs_warning(s, "xattrs/ACLs enabled and couldn't "
+- "find/create .reiserfs_priv. Failing mount.");
++ reiserfs_warning(s, "jdm-20006",
++ "xattrs/ACLs enabled and couldn't "
++ "find/create .reiserfs_priv. "
++ "Failing mount.");
+ err = -EOPNOTSUPP;
+ }
+ }
+--- a/include/linux/reiserfs_fs.h
++++ b/include/linux/reiserfs_fs.h
+@@ -79,7 +79,10 @@ struct fid;
+ */
+ #define REISERFS_DEBUG_CODE 5 /* extra messages to help find/debug errors */
+
+-void reiserfs_warning(struct super_block *s, const char *fmt, ...);
++void __reiserfs_warning(struct super_block *s, const char *id,
++ const char *func, const char *fmt, ...);
++#define reiserfs_warning(s, id, fmt, args...) \
++ __reiserfs_warning(s, id, __func__, fmt, ##args)
+ /* assertions handling */
+
+ /** always check a condition and panic if it's false. */
+@@ -558,7 +561,7 @@ static inline int uniqueness2type(__u32
+ case V1_DIRENTRY_UNIQUENESS:
+ return TYPE_DIRENTRY;
+ default:
+- reiserfs_warning(NULL, "vs-500: unknown uniqueness %d",
++ reiserfs_warning(NULL, "vs-500", "unknown uniqueness %d",
+ uniqueness);
+ case V1_ANY_UNIQUENESS:
+ return TYPE_ANY;
+@@ -578,7 +581,7 @@ static inline __u32 type2uniqueness(int
+ case TYPE_DIRENTRY:
+ return V1_DIRENTRY_UNIQUENESS;
+ default:
+- reiserfs_warning(NULL, "vs-501: unknown type %d", type);
++ reiserfs_warning(NULL, "vs-501", "unknown type %d", type);
+ case TYPE_ANY:
+ return V1_ANY_UNIQUENESS;
+ }
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: make some warnings informational
+
+ In several places, reiserfs_warning is used when there is no warning, just
+ a notice. This patch changes some of them to indicate that the message
+ is merely informational.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+--
+ fs/reiserfs/bitmap.c | 6 +++---
+ fs/reiserfs/super.c | 14 ++++++--------
+ fs/reiserfs/xattr.c | 10 ++++------
+ 3 files changed, 13 insertions(+), 17 deletions(-)
+
+--- a/fs/reiserfs/bitmap.c
++++ b/fs/reiserfs/bitmap.c
+@@ -40,8 +40,8 @@
+
+ #define SET_OPTION(optname) \
+ do { \
+- reiserfs_warning(s, "reiserfs: option \"%s\" is set", #optname); \
+- set_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s)); \
++ reiserfs_info(s, "block allocator option \"%s\" is set", #optname); \
++ set_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s)); \
+ } while(0)
+ #define TEST_OPTION(optname, s) \
+ test_bit(_ALLOC_ ## optname , &SB_ALLOC_OPTS(s))
+@@ -636,7 +636,7 @@ int reiserfs_parse_alloc_options(struct
+ return 1;
+ }
+
+- reiserfs_warning(s, "allocator options = [%08x]\n", SB_ALLOC_OPTS(s));
++ reiserfs_info(s, "allocator options = [%08x]\n", SB_ALLOC_OPTS(s));
+ return 0;
+ }
+
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -1369,13 +1369,11 @@ static int read_super_block(struct super
+ /* magic is of non-standard journal filesystem, look at s_version to
+ find which format is in use */
+ if (sb_version(rs) == REISERFS_VERSION_2)
+- reiserfs_warning(s,
+- "read_super_block: found reiserfs format \"3.6\""
+- " with non-standard journal");
++ reiserfs_info(s, "found reiserfs format \"3.6\""
++ " with non-standard journal\n");
+ else if (sb_version(rs) == REISERFS_VERSION_1)
+- reiserfs_warning(s,
+- "read_super_block: found reiserfs format \"3.5\""
+- " with non-standard journal");
++ reiserfs_info(s, "found reiserfs format \"3.5\""
++ " with non-standard journal\n");
+ else {
+ reiserfs_warning(s,
+ "sh-2012: read_super_block: found unknown "
+@@ -1454,8 +1452,8 @@ static __u32 find_hash_out(struct super_
+ if (reiserfs_rupasov_hash(s)) {
+ hash = YURA_HASH;
+ }
+- reiserfs_warning(s, "FS seems to be empty, autodetect "
+- "is using the default hash");
++ reiserfs_info(s, "FS seems to be empty, autodetect "
++ "is using the default hash\n");
+ break;
+ }
+ r5hash = GET_HASH_VALUE(r5_hash(de.de_name, de.de_namelen));
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -1182,12 +1182,10 @@ int reiserfs_xattr_init(struct super_blo
+ }
+
+ if (dentry && dentry->d_inode)
+- reiserfs_warning(s,
+- "Created %s on %s - reserved for "
+- "xattr storage.",
+- PRIVROOT_NAME,
+- reiserfs_bdevname
+- (inode->i_sb));
++ reiserfs_info(s, "Created %s - "
++ "reserved for xattr "
++ "storage.\n",
++ PRIVROOT_NAME);
+ } else if (!dentry->d_inode) {
+ dput(dentry);
+ dentry = NULL;
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: rework reiserfs_panic
+
+ ReiserFS panics can be somewhat inconsistent.
+ In some cases:
+ * a unique identifier may be associated with it
+ * the function name may be included
+ * the device may be printed separately
+
+ This patch aims to make warnings more consistent. reiserfs_warning() prints
+ the device name, so printing it a second time is not required. The function
+ name for a warning is always helpful in debugging, so it is now automatically
+ inserted into the output. Hans has stated that every warning should have
+ a unique identifier. Some cases lack them, others really shouldn't have them.
+ reiserfs_warning() now expects an id associated with each message. In the
+ rare case where one isn't needed, "" will suffice.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+ fs/reiserfs/do_balan.c | 67 +++++++++++++++++++++--------------------
+ fs/reiserfs/fix_node.c | 68 +++++++++++++++++++++---------------------
+ fs/reiserfs/ibalance.c | 12 +++----
+ fs/reiserfs/inode.c | 3 -
+ fs/reiserfs/item_ops.c | 8 +++-
+ fs/reiserfs/journal.c | 57 +++++++++++++++++------------------
+ fs/reiserfs/lbalance.c | 27 +++++++++-------
+ fs/reiserfs/namei.c | 18 ++++-------
+ fs/reiserfs/objectid.c | 3 -
+ fs/reiserfs/prints.c | 33 +++++++++-----------
+ fs/reiserfs/stree.c | 49 ++++++++++++++----------------
+ fs/reiserfs/tail_conversion.c | 10 ++----
+ include/linux/reiserfs_fs.h | 28 +++++++++++++----
+ 13 files changed, 200 insertions(+), 183 deletions(-)
+
+--- a/fs/reiserfs/do_balan.c
++++ b/fs/reiserfs/do_balan.c
+@@ -153,8 +153,8 @@ static int balance_leaf_when_delete(stru
+
+ default:
+ print_cur_tb("12040");
+- reiserfs_panic(tb->tb_sb,
+- "PAP-12040: balance_leaf_when_delete: unexpectable mode: %s(%d)",
++ reiserfs_panic(tb->tb_sb, "PAP-12040",
++ "unexpected mode: %s(%d)",
+ (flag ==
+ M_PASTE) ? "PASTE" : ((flag ==
+ M_INSERT) ? "INSERT" :
+@@ -721,8 +721,9 @@ static int balance_leaf(struct tree_bala
+ }
+ break;
+ default: /* cases d and t */
+- reiserfs_panic(tb->tb_sb,
+- "PAP-12130: balance_leaf: lnum > 0: unexpectable mode: %s(%d)",
++ reiserfs_panic(tb->tb_sb, "PAP-12130",
++ "lnum > 0: unexpected mode: "
++ " %s(%d)",
+ (flag ==
+ M_DELETE) ? "DELETE" : ((flag ==
+ M_CUT)
+@@ -1134,8 +1135,8 @@ static int balance_leaf(struct tree_bala
+ }
+ break;
+ default: /* cases d and t */
+- reiserfs_panic(tb->tb_sb,
+- "PAP-12175: balance_leaf: rnum > 0: unexpectable mode: %s(%d)",
++ reiserfs_panic(tb->tb_sb, "PAP-12175",
++ "rnum > 0: unexpected mode: %s(%d)",
+ (flag ==
+ M_DELETE) ? "DELETE" : ((flag ==
+ M_CUT) ? "CUT"
+@@ -1165,8 +1166,8 @@ static int balance_leaf(struct tree_bala
+ not set correctly */
+ if (tb->CFL[0]) {
+ if (!tb->CFR[0])
+- reiserfs_panic(tb->tb_sb,
+- "vs-12195: balance_leaf: CFR not initialized");
++ reiserfs_panic(tb->tb_sb, "vs-12195",
++ "CFR not initialized");
+ copy_key(B_N_PDELIM_KEY(tb->CFL[0], tb->lkey[0]),
+ B_N_PDELIM_KEY(tb->CFR[0], tb->rkey[0]));
+ do_balance_mark_internal_dirty(tb, tb->CFL[0], 0);
+@@ -1472,7 +1473,10 @@ static int balance_leaf(struct tree_bala
+ && (pos_in_item != ih_item_len(ih_check)
+ || tb->insert_size[0] <= 0))
+ reiserfs_panic(tb->tb_sb,
+- "PAP-12235: balance_leaf: pos_in_item must be equal to ih_item_len");
++ "PAP-12235",
++ "pos_in_item "
++ "must be equal "
++ "to ih_item_len");
+ #endif /* CONFIG_REISERFS_CHECK */
+
+ leaf_mi =
+@@ -1532,8 +1536,8 @@ static int balance_leaf(struct tree_bala
+ }
+ break;
+ default: /* cases d and t */
+- reiserfs_panic(tb->tb_sb,
+- "PAP-12245: balance_leaf: blknum > 2: unexpectable mode: %s(%d)",
++ reiserfs_panic(tb->tb_sb, "PAP-12245",
++ "blknum > 2: unexpected mode: %s(%d)",
+ (flag ==
+ M_DELETE) ? "DELETE" : ((flag ==
+ M_CUT) ? "CUT"
+@@ -1678,10 +1682,11 @@ static int balance_leaf(struct tree_bala
+ print_cur_tb("12285");
+ reiserfs_panic(tb->
+ tb_sb,
+- "PAP-12285: balance_leaf: insert_size must be 0 (%d)",
+- tb->
+- insert_size
+- [0]);
++ "PAP-12285",
++ "insert_size "
++ "must be 0 "
++ "(%d)",
++ tb->insert_size[0]);
+ }
+ }
+ #endif /* CONFIG_REISERFS_CHECK */
+@@ -1694,11 +1699,10 @@ static int balance_leaf(struct tree_bala
+ if (flag == M_PASTE && tb->insert_size[0]) {
+ print_cur_tb("12290");
+ reiserfs_panic(tb->tb_sb,
+- "PAP-12290: balance_leaf: insert_size is still not 0 (%d)",
++ "PAP-12290", "insert_size is still not 0 (%d)",
+ tb->insert_size[0]);
+ }
+ #endif /* CONFIG_REISERFS_CHECK */
+-
+ return 0;
+ } /* Leaf level of the tree is balanced (end of balance_leaf) */
+
+@@ -1729,8 +1733,7 @@ struct buffer_head *get_FEB(struct tree_
+ break;
+
+ if (i == MAX_FEB_SIZE)
+- reiserfs_panic(tb->tb_sb,
+- "vs-12300: get_FEB: FEB list is empty");
++ reiserfs_panic(tb->tb_sb, "vs-12300", "FEB list is empty");
+
+ bi.tb = tb;
+ bi.bi_bh = first_b = tb->FEB[i];
+@@ -1871,8 +1874,8 @@ static void check_internal_node(struct s
+ for (i = 0; i <= B_NR_ITEMS(bh); i++, dc++) {
+ if (!is_reusable(s, dc_block_number(dc), 1)) {
+ print_cur_tb(mes);
+- reiserfs_panic(s,
+- "PAP-12338: check_internal_node: invalid child pointer %y in %b",
++ reiserfs_panic(s, "PAP-12338",
++ "invalid child pointer %y in %b",
+ dc, bh);
+ }
+ }
+@@ -1894,9 +1897,10 @@ static int check_before_balancing(struct
+ int retval = 0;
+
+ if (cur_tb) {
+- reiserfs_panic(tb->tb_sb, "vs-12335: check_before_balancing: "
+- "suspect that schedule occurred based on cur_tb not being null at this point in code. "
+- "do_balance cannot properly handle schedule occurring while it runs.");
++ reiserfs_panic(tb->tb_sb, "vs-12335", "suspect that schedule "
++ "occurred based on cur_tb not being null at "
++ "this point in code. do_balance cannot properly "
++ "handle schedule occurring while it runs.");
+ }
+
+ /* double check that buffers that we will modify are unlocked. (fix_nodes should already have
+@@ -1928,8 +1932,8 @@ static void check_after_balance_leaf(str
+ dc_size(B_N_CHILD
+ (tb->FL[0], get_left_neighbor_position(tb, 0)))) {
+ print_cur_tb("12221");
+- reiserfs_panic(tb->tb_sb,
+- "PAP-12355: check_after_balance_leaf: shift to left was incorrect");
++ reiserfs_panic(tb->tb_sb, "PAP-12355",
++ "shift to left was incorrect");
+ }
+ }
+ if (tb->rnum[0]) {
+@@ -1938,8 +1942,8 @@ static void check_after_balance_leaf(str
+ dc_size(B_N_CHILD
+ (tb->FR[0], get_right_neighbor_position(tb, 0)))) {
+ print_cur_tb("12222");
+- reiserfs_panic(tb->tb_sb,
+- "PAP-12360: check_after_balance_leaf: shift to right was incorrect");
++ reiserfs_panic(tb->tb_sb, "PAP-12360",
++ "shift to right was incorrect");
+ }
+ }
+ if (PATH_H_PBUFFER(tb->tb_path, 1) &&
+@@ -1964,8 +1968,7 @@ static void check_after_balance_leaf(str
+ (PATH_H_PBUFFER(tb->tb_path, 1),
+ PATH_H_POSITION(tb->tb_path, 1))),
+ right);
+- reiserfs_panic(tb->tb_sb,
+- "PAP-12365: check_after_balance_leaf: S is incorrect");
++ reiserfs_panic(tb->tb_sb, "PAP-12365", "S is incorrect");
+ }
+ }
+
+@@ -2100,8 +2103,8 @@ void do_balance(struct tree_balance *tb,
+ tb->need_balance_dirty = 0;
+
+ if (FILESYSTEM_CHANGED_TB(tb)) {
+- reiserfs_panic(tb->tb_sb,
+- "clm-6000: do_balance, fs generation has changed\n");
++ reiserfs_panic(tb->tb_sb, "clm-6000", "fs generation has "
++ "changed");
+ }
+ /* if we have no real work to do */
+ if (!tb->insert_size[0]) {
+--- a/fs/reiserfs/fix_node.c
++++ b/fs/reiserfs/fix_node.c
+@@ -135,8 +135,7 @@ static void create_virtual_node(struct t
+ vn->vn_free_ptr +=
+ op_create_vi(vn, vi, is_affected, tb->insert_size[0]);
+ if (tb->vn_buf + tb->vn_buf_size < vn->vn_free_ptr)
+- reiserfs_panic(tb->tb_sb,
+- "vs-8030: create_virtual_node: "
++ reiserfs_panic(tb->tb_sb, "vs-8030",
+ "virtual node space consumed");
+
+ if (!is_affected)
+@@ -186,8 +185,9 @@ static void create_virtual_node(struct t
+ && I_ENTRY_COUNT(B_N_PITEM_HEAD(Sh, 0)) == 1)) {
+ /* node contains more than 1 item, or item is not directory item, or this item contains more than 1 entry */
+ print_block(Sh, 0, -1, -1);
+- reiserfs_panic(tb->tb_sb,
+- "vs-8045: create_virtual_node: rdkey %k, affected item==%d (mode==%c) Must be %c",
++ reiserfs_panic(tb->tb_sb, "vs-8045",
++ "rdkey %k, affected item==%d "
++ "(mode==%c) Must be %c",
+ key, vn->vn_affected_item_num,
+ vn->vn_mode, M_DELETE);
+ }
+@@ -1255,8 +1255,8 @@ static int ip_check_balance(struct tree_
+ /* Calculate balance parameters for creating new root. */
+ if (!Sh) {
+ if (!h)
+- reiserfs_panic(tb->tb_sb,
+- "vs-8210: ip_check_balance: S[0] can not be 0");
++ reiserfs_panic(tb->tb_sb, "vs-8210",
++ "S[0] can not be 0");
+ switch (n_ret_value = get_empty_nodes(tb, h)) {
+ case CARRY_ON:
+ set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
+@@ -1266,8 +1266,8 @@ static int ip_check_balance(struct tree_
+ case REPEAT_SEARCH:
+ return n_ret_value;
+ default:
+- reiserfs_panic(tb->tb_sb,
+- "vs-8215: ip_check_balance: incorrect return value of get_empty_nodes");
++ reiserfs_panic(tb->tb_sb, "vs-8215", "incorrect "
++ "return value of get_empty_nodes");
+ }
+ }
+
+@@ -2095,38 +2095,38 @@ static void tb_buffer_sanity_check(struc
+ if (p_s_bh) {
+ if (atomic_read(&(p_s_bh->b_count)) <= 0) {
+
+- reiserfs_panic(p_s_sb,
+- "jmacd-1: tb_buffer_sanity_check(): negative or zero reference counter for buffer %s[%d] (%b)\n",
+- descr, level, p_s_bh);
++ reiserfs_panic(p_s_sb, "jmacd-1", "negative or zero "
++ "reference counter for buffer %s[%d] "
++ "(%b)", descr, level, p_s_bh);
+ }
+
+ if (!buffer_uptodate(p_s_bh)) {
+- reiserfs_panic(p_s_sb,
+- "jmacd-2: tb_buffer_sanity_check(): buffer is not up to date %s[%d] (%b)\n",
++ reiserfs_panic(p_s_sb, "jmacd-2", "buffer is not up "
++ "to date %s[%d] (%b)",
+ descr, level, p_s_bh);
+ }
+
+ if (!B_IS_IN_TREE(p_s_bh)) {
+- reiserfs_panic(p_s_sb,
+- "jmacd-3: tb_buffer_sanity_check(): buffer is not in tree %s[%d] (%b)\n",
++ reiserfs_panic(p_s_sb, "jmacd-3", "buffer is not "
++ "in tree %s[%d] (%b)",
+ descr, level, p_s_bh);
+ }
+
+ if (p_s_bh->b_bdev != p_s_sb->s_bdev) {
+- reiserfs_panic(p_s_sb,
+- "jmacd-4: tb_buffer_sanity_check(): buffer has wrong device %s[%d] (%b)\n",
++ reiserfs_panic(p_s_sb, "jmacd-4", "buffer has wrong "
++ "device %s[%d] (%b)",
+ descr, level, p_s_bh);
+ }
+
+ if (p_s_bh->b_size != p_s_sb->s_blocksize) {
+- reiserfs_panic(p_s_sb,
+- "jmacd-5: tb_buffer_sanity_check(): buffer has wrong blocksize %s[%d] (%b)\n",
++ reiserfs_panic(p_s_sb, "jmacd-5", "buffer has wrong "
++ "blocksize %s[%d] (%b)",
+ descr, level, p_s_bh);
+ }
+
+ if (p_s_bh->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) {
+- reiserfs_panic(p_s_sb,
+- "jmacd-6: tb_buffer_sanity_check(): buffer block number too high %s[%d] (%b)\n",
++ reiserfs_panic(p_s_sb, "jmacd-6", "buffer block "
++ "number too high %s[%d] (%b)",
+ descr, level, p_s_bh);
+ }
+ }
+@@ -2358,14 +2358,14 @@ int fix_nodes(int n_op_mode, struct tree
+ #ifdef CONFIG_REISERFS_CHECK
+ if (cur_tb) {
+ print_cur_tb("fix_nodes");
+- reiserfs_panic(p_s_tb->tb_sb,
+- "PAP-8305: fix_nodes: there is pending do_balance");
++ reiserfs_panic(p_s_tb->tb_sb, "PAP-8305",
++ "there is pending do_balance");
+ }
+
+ if (!buffer_uptodate(p_s_tbS0) || !B_IS_IN_TREE(p_s_tbS0)) {
+- reiserfs_panic(p_s_tb->tb_sb,
+- "PAP-8320: fix_nodes: S[0] (%b %z) is not uptodate "
+- "at the beginning of fix_nodes or not in tree (mode %c)",
++ reiserfs_panic(p_s_tb->tb_sb, "PAP-8320", "S[0] (%b %z) is "
++ "not uptodate at the beginning of fix_nodes "
++ "or not in tree (mode %c)",
+ p_s_tbS0, p_s_tbS0, n_op_mode);
+ }
+
+@@ -2373,24 +2373,26 @@ int fix_nodes(int n_op_mode, struct tree
+ switch (n_op_mode) {
+ case M_INSERT:
+ if (n_item_num <= 0 || n_item_num > B_NR_ITEMS(p_s_tbS0))
+- reiserfs_panic(p_s_tb->tb_sb,
+- "PAP-8330: fix_nodes: Incorrect item number %d (in S0 - %d) in case of insert",
+- n_item_num, B_NR_ITEMS(p_s_tbS0));
++ reiserfs_panic(p_s_tb->tb_sb, "PAP-8330", "Incorrect "
++ "item number %d (in S0 - %d) in case "
++ "of insert", n_item_num,
++ B_NR_ITEMS(p_s_tbS0));
+ break;
+ case M_PASTE:
+ case M_DELETE:
+ case M_CUT:
+ if (n_item_num < 0 || n_item_num >= B_NR_ITEMS(p_s_tbS0)) {
+ print_block(p_s_tbS0, 0, -1, -1);
+- reiserfs_panic(p_s_tb->tb_sb,
+- "PAP-8335: fix_nodes: Incorrect item number(%d); mode = %c insert_size = %d\n",
++ reiserfs_panic(p_s_tb->tb_sb, "PAP-8335", "Incorrect "
++ "item number(%d); mode = %c "
++ "insert_size = %d",
+ n_item_num, n_op_mode,
+ p_s_tb->insert_size[0]);
+ }
+ break;
+ default:
+- reiserfs_panic(p_s_tb->tb_sb,
+- "PAP-8340: fix_nodes: Incorrect mode of operation");
++ reiserfs_panic(p_s_tb->tb_sb, "PAP-8340", "Incorrect mode "
++ "of operation");
+ }
+ #endif
+
+--- a/fs/reiserfs/ibalance.c
++++ b/fs/reiserfs/ibalance.c
+@@ -105,8 +105,8 @@ static void internal_define_dest_src_inf
+ break;
+
+ default:
+- reiserfs_panic(tb->tb_sb,
+- "internal_define_dest_src_infos: shift type is unknown (%d)",
++ reiserfs_panic(tb->tb_sb, "ibalance-1",
++ "shift type is unknown (%d)",
+ shift_mode);
+ }
+ }
+@@ -702,8 +702,8 @@ static void balance_internal_when_delete
+
+ return;
+ }
+- reiserfs_panic(tb->tb_sb,
+- "balance_internal_when_delete: unexpected tb->lnum[%d]==%d or tb->rnum[%d]==%d",
++ reiserfs_panic(tb->tb_sb, "ibalance-2",
++ "unexpected tb->lnum[%d]==%d or tb->rnum[%d]==%d",
+ h, tb->lnum[h], h, tb->rnum[h]);
+ }
+
+@@ -940,8 +940,8 @@ int balance_internal(struct tree_balance
+ struct block_head *blkh;
+
+ if (tb->blknum[h] != 1)
+- reiserfs_panic(NULL,
+- "balance_internal: One new node required for creating the new root");
++ reiserfs_panic(NULL, "ibalance-3", "One new node "
++ "required for creating the new root");
+ /* S[h] = empty buffer from the list FEB. */
+ tbSh = get_FEB(tb);
+ blkh = B_BLK_HEAD(tbSh);
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -1300,8 +1300,7 @@ static void update_stat_data(struct tree
+ ih = PATH_PITEM_HEAD(path);
+
+ if (!is_statdata_le_ih(ih))
+- reiserfs_panic(inode->i_sb,
+- "vs-13065: update_stat_data: key %k, found item %h",
++ reiserfs_panic(inode->i_sb, "vs-13065", "key %k, found item %h",
+ INODE_PKEY(inode), ih);
+
+ if (stat_data_v1(ih)) {
+--- a/fs/reiserfs/item_ops.c
++++ b/fs/reiserfs/item_ops.c
+@@ -517,8 +517,9 @@ static int direntry_create_vi(struct vir
+ ((is_affected
+ && (vn->vn_mode == M_PASTE
+ || vn->vn_mode == M_CUT)) ? insert_size : 0)) {
+- reiserfs_panic(NULL,
+- "vs-8025: set_entry_sizes: (mode==%c, insert_size==%d), invalid length of directory item",
++ reiserfs_panic(NULL, "vs-8025", "(mode==%c, "
++ "insert_size==%d), invalid length of "
++ "directory item",
+ vn->vn_mode, insert_size);
+ }
+ }
+@@ -549,7 +550,8 @@ static int direntry_check_left(struct vi
+ }
+
+ if (entries == dir_u->entry_count) {
+- reiserfs_panic(NULL, "free space %d, entry_count %d\n", free,
++ reiserfs_panic(NULL, "item_ops-1",
++ "free space %d, entry_count %d", free,
+ dir_u->entry_count);
+ }
+
+--- a/fs/reiserfs/journal.c
++++ b/fs/reiserfs/journal.c
+@@ -436,8 +436,8 @@ void reiserfs_check_lock_depth(struct su
+ {
+ #ifdef CONFIG_SMP
+ if (current->lock_depth < 0) {
+- reiserfs_panic(sb, "%s called without kernel lock held",
+- caller);
++ reiserfs_panic(sb, "journal-1", "%s called without kernel "
++ "lock held", caller);
+ }
+ #else
+ ;
+@@ -574,7 +574,7 @@ static inline void put_journal_list(stru
+ struct reiserfs_journal_list *jl)
+ {
+ if (jl->j_refcount < 1) {
+- reiserfs_panic(s, "trans id %u, refcount at %d",
++ reiserfs_panic(s, "journal-2", "trans id %u, refcount at %d",
+ jl->j_trans_id, jl->j_refcount);
+ }
+ if (--jl->j_refcount == 0)
+@@ -1416,8 +1416,7 @@ static int flush_journal_list(struct sup
+
+ count = 0;
+ if (j_len_saved > journal->j_trans_max) {
+- reiserfs_panic(s,
+- "journal-715: flush_journal_list, length is %lu, trans id %lu\n",
++ reiserfs_panic(s, "journal-715", "length is %lu, trans id %lu",
+ j_len_saved, jl->j_trans_id);
+ return 0;
+ }
+@@ -1449,8 +1448,8 @@ static int flush_journal_list(struct sup
+ ** or wait on a more recent transaction, or just ignore it
+ */
+ if (atomic_read(&(journal->j_wcount)) != 0) {
+- reiserfs_panic(s,
+- "journal-844: panic journal list is flushing, wcount is not 0\n");
++ reiserfs_panic(s, "journal-844", "journal list is flushing, "
++ "wcount is not 0");
+ }
+ cn = jl->j_realblock;
+ while (cn) {
+@@ -1551,13 +1550,13 @@ static int flush_journal_list(struct sup
+ while (cn) {
+ if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) {
+ if (!cn->bh) {
+- reiserfs_panic(s,
+- "journal-1011: cn->bh is NULL\n");
++ reiserfs_panic(s, "journal-1011",
++ "cn->bh is NULL");
+ }
+ wait_on_buffer(cn->bh);
+ if (!cn->bh) {
+- reiserfs_panic(s,
+- "journal-1012: cn->bh is NULL\n");
++ reiserfs_panic(s, "journal-1012",
++ "cn->bh is NULL");
+ }
+ if (unlikely(!buffer_uptodate(cn->bh))) {
+ #ifdef CONFIG_REISERFS_CHECK
+@@ -3252,8 +3251,8 @@ int journal_mark_dirty(struct reiserfs_t
+
+ PROC_INFO_INC(p_s_sb, journal.mark_dirty);
+ if (th->t_trans_id != journal->j_trans_id) {
+- reiserfs_panic(th->t_super,
+- "journal-1577: handle trans id %ld != current trans id %ld\n",
++ reiserfs_panic(th->t_super, "journal-1577",
++ "handle trans id %ld != current trans id %ld",
+ th->t_trans_id, journal->j_trans_id);
+ }
+
+@@ -3292,8 +3291,8 @@ int journal_mark_dirty(struct reiserfs_t
+ ** Nothing can be done here, except make the FS readonly or panic.
+ */
+ if (journal->j_len >= journal->j_trans_max) {
+- reiserfs_panic(th->t_super,
+- "journal-1413: journal_mark_dirty: j_len (%lu) is too big\n",
++ reiserfs_panic(th->t_super, "journal-1413",
++ "j_len (%lu) is too big",
+ journal->j_len);
+ }
+
+@@ -3313,7 +3312,8 @@ int journal_mark_dirty(struct reiserfs_t
+ if (!cn) {
+ cn = get_cnode(p_s_sb);
+ if (!cn) {
+- reiserfs_panic(p_s_sb, "get_cnode failed!\n");
++ reiserfs_panic(p_s_sb, "journal-4",
++ "get_cnode failed!");
+ }
+
+ if (th->t_blocks_logged == th->t_blocks_allocated) {
+@@ -3581,8 +3581,8 @@ static int check_journal_end(struct reis
+ BUG_ON(!th->t_trans_id);
+
+ if (th->t_trans_id != journal->j_trans_id) {
+- reiserfs_panic(th->t_super,
+- "journal-1577: handle trans id %ld != current trans id %ld\n",
++ reiserfs_panic(th->t_super, "journal-1577",
++ "handle trans id %ld != current trans id %ld",
+ th->t_trans_id, journal->j_trans_id);
+ }
+
+@@ -3661,8 +3661,8 @@ static int check_journal_end(struct reis
+ }
+
+ if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
+- reiserfs_panic(p_s_sb,
+- "journal-003: journal_end: j_start (%ld) is too high\n",
++ reiserfs_panic(p_s_sb, "journal-003",
++ "j_start (%ld) is too high",
+ journal->j_start);
+ }
+ return 1;
+@@ -3707,8 +3707,8 @@ int journal_mark_freed(struct reiserfs_t
+ /* set the bit for this block in the journal bitmap for this transaction */
+ jb = journal->j_current_jl->j_list_bitmap;
+ if (!jb) {
+- reiserfs_panic(p_s_sb,
+- "journal-1702: journal_mark_freed, journal_list_bitmap is NULL\n");
++ reiserfs_panic(p_s_sb, "journal-1702",
++ "journal_list_bitmap is NULL");
+ }
+ set_bit_in_list_bitmap(p_s_sb, blocknr, jb);
+
+@@ -4063,8 +4063,8 @@ static int do_journal_end(struct reiserf
+ if (buffer_journaled(cn->bh)) {
+ jl_cn = get_cnode(p_s_sb);
+ if (!jl_cn) {
+- reiserfs_panic(p_s_sb,
+- "journal-1676, get_cnode returned NULL\n");
++ reiserfs_panic(p_s_sb, "journal-1676",
++ "get_cnode returned NULL");
+ }
+ if (i == 0) {
+ jl->j_realblock = jl_cn;
+@@ -4080,8 +4080,9 @@ static int do_journal_end(struct reiserf
+
+ if (is_block_in_log_or_reserved_area
+ (p_s_sb, cn->bh->b_blocknr)) {
+- reiserfs_panic(p_s_sb,
+- "journal-2332: Trying to log block %lu, which is a log block\n",
++ reiserfs_panic(p_s_sb, "journal-2332",
++ "Trying to log block %lu, "
++ "which is a log block",
+ cn->bh->b_blocknr);
+ }
+ jl_cn->blocknr = cn->bh->b_blocknr;
+@@ -4265,8 +4266,8 @@ static int do_journal_end(struct reiserf
+ get_list_bitmap(p_s_sb, journal->j_current_jl);
+
+ if (!(journal->j_current_jl->j_list_bitmap)) {
+- reiserfs_panic(p_s_sb,
+- "journal-1996: do_journal_end, could not get a list bitmap\n");
++ reiserfs_panic(p_s_sb, "journal-1996",
++ "could not get a list bitmap");
+ }
+
+ atomic_set(&(journal->j_jlock), 0);
+--- a/fs/reiserfs/lbalance.c
++++ b/fs/reiserfs/lbalance.c
+@@ -168,10 +168,11 @@ static int leaf_copy_boundary_item(struc
+ if (bytes_or_entries == ih_item_len(ih)
+ && is_indirect_le_ih(ih))
+ if (get_ih_free_space(ih))
+- reiserfs_panic(NULL,
+- "vs-10020: leaf_copy_boundary_item: "
+- "last unformatted node must be filled entirely (%h)",
+- ih);
++ reiserfs_panic(sb_from_bi(dest_bi),
++ "vs-10020",
++ "last unformatted node "
++ "must be filled "
++ "entirely (%h)", ih);
+ }
+ #endif
+
+@@ -622,9 +623,8 @@ static void leaf_define_dest_src_infos(i
+ break;
+
+ default:
+- reiserfs_panic(NULL,
+- "vs-10250: leaf_define_dest_src_infos: shift type is unknown (%d)",
+- shift_mode);
++ reiserfs_panic(sb_from_bi(src_bi), "vs-10250",
++ "shift type is unknown (%d)", shift_mode);
+ }
+ RFALSE(!src_bi->bi_bh || !dest_bi->bi_bh,
+ "vs-10260: mode==%d, source (%p) or dest (%p) buffer is initialized incorrectly",
+@@ -674,9 +674,9 @@ int leaf_shift_left(struct tree_balance
+ #ifdef CONFIG_REISERFS_CHECK
+ if (tb->tb_mode == M_PASTE || tb->tb_mode == M_INSERT) {
+ print_cur_tb("vs-10275");
+- reiserfs_panic(tb->tb_sb,
+- "vs-10275: leaf_shift_left: balance condition corrupted (%c)",
+- tb->tb_mode);
++ reiserfs_panic(tb->tb_sb, "vs-10275",
++ "balance condition corrupted "
++ "(%c)", tb->tb_mode);
+ }
+ #endif
+
+@@ -889,9 +889,12 @@ void leaf_paste_in_buffer(struct buffer_
+
+ #ifdef CONFIG_REISERFS_CHECK
+ if (zeros_number > paste_size) {
++ struct super_block *sb = NULL;
++ if (bi && bi->tb)
++ sb = bi->tb->tb_sb;
+ print_cur_tb("10177");
+- reiserfs_panic(NULL,
+- "vs-10177: leaf_paste_in_buffer: ero number == %d, paste_size == %d",
++ reiserfs_panic(sb, "vs-10177",
++ "zeros_number == %d, paste_size == %d",
+ zeros_number, paste_size);
+ }
+ #endif /* CONFIG_REISERFS_CHECK */
+--- a/fs/reiserfs/namei.c
++++ b/fs/reiserfs/namei.c
+@@ -145,10 +145,9 @@ int search_by_entry_key(struct super_blo
+ if (!is_direntry_le_ih(de->de_ih) ||
+ COMP_SHORT_KEYS(&(de->de_ih->ih_key), key)) {
+ print_block(de->de_bh, 0, -1, -1);
+- reiserfs_panic(sb,
+- "vs-7005: search_by_entry_key: found item %h is not directory item or "
+- "does not belong to the same directory as key %K",
+- de->de_ih, key);
++ reiserfs_panic(sb, "vs-7005", "found item %h is not directory "
++ "item or does not belong to the same directory "
++ "as key %K", de->de_ih, key);
+ }
+ #endif /* CONFIG_REISERFS_CHECK */
+
+@@ -1194,15 +1193,14 @@ static int entry_points_to_object(const
+
+ if (inode) {
+ if (!de_visible(de->de_deh + de->de_entry_num))
+- reiserfs_panic(NULL,
+- "vs-7042: entry_points_to_object: entry must be visible");
++ reiserfs_panic(inode->i_sb, "vs-7042",
++ "entry must be visible");
+ return (de->de_objectid == inode->i_ino) ? 1 : 0;
+ }
+
+ /* this must be added hidden entry */
+ if (de_visible(de->de_deh + de->de_entry_num))
+- reiserfs_panic(NULL,
+- "vs-7043: entry_points_to_object: entry must be visible");
++ reiserfs_panic(NULL, "vs-7043", "entry must be visible");
+
+ return 1;
+ }
+@@ -1316,8 +1314,8 @@ static int reiserfs_rename(struct inode
+ new_dentry->d_name.len, old_inode, 0);
+ if (retval == -EEXIST) {
+ if (!new_dentry_inode) {
+- reiserfs_panic(old_dir->i_sb,
+- "vs-7050: new entry is found, new inode == 0\n");
++ reiserfs_panic(old_dir->i_sb, "vs-7050",
++ "new entry is found, new inode == 0");
+ }
+ } else if (retval) {
+ int err = journal_end(&th, old_dir->i_sb, jbegin_count);
+--- a/fs/reiserfs/objectid.c
++++ b/fs/reiserfs/objectid.c
+@@ -18,8 +18,7 @@
+ static void check_objectid_map(struct super_block *s, __le32 * map)
+ {
+ if (le32_to_cpu(map[0]) != 1)
+- reiserfs_panic(s,
+- "vs-15010: check_objectid_map: map corrupted: %lx",
++ reiserfs_panic(s, "vs-15010", "map corrupted: %lx",
+ (long unsigned int)le32_to_cpu(map[0]));
+
+ // FIXME: add something else here
+--- a/fs/reiserfs/prints.c
++++ b/fs/reiserfs/prints.c
+@@ -356,14 +356,21 @@ void reiserfs_debug(struct super_block *
+ extern struct tree_balance *cur_tb;
+ #endif
+
+-void reiserfs_panic(struct super_block *sb, const char *fmt, ...)
++void __reiserfs_panic(struct super_block *sb, const char *id,
++ const char *function, const char *fmt, ...)
+ {
+ do_reiserfs_warning(fmt);
+
++#ifdef CONFIG_REISERFS_CHECK
+ dump_stack();
+-
+- panic(KERN_EMERG "REISERFS: panic (device %s): %s\n",
+- reiserfs_bdevname(sb), error_buf);
++#endif
++ if (sb)
++ panic(KERN_WARNING "REISERFS panic (device %s): %s%s%s: %s\n",
++ sb->s_id, id ? id : "", id ? " " : "",
++ function, error_buf);
++ else
++ panic(KERN_WARNING "REISERFS panic: %s%s%s: %s\n",
++ id ? id : "", id ? " " : "", function, error_buf);
+ }
+
+ void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...)
+@@ -684,12 +691,10 @@ static void check_leaf_block_head(struct
+ blkh = B_BLK_HEAD(bh);
+ nr = blkh_nr_item(blkh);
+ if (nr > (bh->b_size - BLKH_SIZE) / IH_SIZE)
+- reiserfs_panic(NULL,
+- "vs-6010: check_leaf_block_head: invalid item number %z",
++ reiserfs_panic(NULL, "vs-6010", "invalid item number %z",
+ bh);
+ if (blkh_free_space(blkh) > bh->b_size - BLKH_SIZE - IH_SIZE * nr)
+- reiserfs_panic(NULL,
+- "vs-6020: check_leaf_block_head: invalid free space %z",
++ reiserfs_panic(NULL, "vs-6020", "invalid free space %z",
+ bh);
+
+ }
+@@ -700,21 +705,15 @@ static void check_internal_block_head(st
+
+ blkh = B_BLK_HEAD(bh);
+ if (!(B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL && B_LEVEL(bh) <= MAX_HEIGHT))
+- reiserfs_panic(NULL,
+- "vs-6025: check_internal_block_head: invalid level %z",
+- bh);
++ reiserfs_panic(NULL, "vs-6025", "invalid level %z", bh);
+
+ if (B_NR_ITEMS(bh) > (bh->b_size - BLKH_SIZE) / IH_SIZE)
+- reiserfs_panic(NULL,
+- "vs-6030: check_internal_block_head: invalid item number %z",
+- bh);
++ reiserfs_panic(NULL, "vs-6030", "invalid item number %z", bh);
+
+ if (B_FREE_SPACE(bh) !=
+ bh->b_size - BLKH_SIZE - KEY_SIZE * B_NR_ITEMS(bh) -
+ DC_SIZE * (B_NR_ITEMS(bh) + 1))
+- reiserfs_panic(NULL,
+- "vs-6040: check_internal_block_head: invalid free space %z",
+- bh);
++ reiserfs_panic(NULL, "vs-6040", "invalid free space %z", bh);
+
+ }
+
+--- a/fs/reiserfs/stree.c
++++ b/fs/reiserfs/stree.c
+@@ -366,9 +366,8 @@ inline void decrement_bcount(struct buff
+ put_bh(p_s_bh);
+ return;
+ }
+- reiserfs_panic(NULL,
+- "PAP-5070: decrement_bcount: trying to free free buffer %b",
+- p_s_bh);
++ reiserfs_panic(NULL, "PAP-5070",
++ "trying to free free buffer %b", p_s_bh);
+ }
+ }
+
+@@ -713,8 +712,8 @@ int search_by_key(struct super_block *p_
+ #ifdef CONFIG_REISERFS_CHECK
+ if (cur_tb) {
+ print_cur_tb("5140");
+- reiserfs_panic(p_s_sb,
+- "PAP-5140: search_by_key: schedule occurred in do_balance!");
++ reiserfs_panic(p_s_sb, "PAP-5140",
++ "schedule occurred in do_balance!");
+ }
+ #endif
+
+@@ -1511,8 +1510,8 @@ static void indirect_to_direct_roll_back
+ /* look for the last byte of the tail */
+ if (search_for_position_by_key(inode->i_sb, &tail_key, path) ==
+ POSITION_NOT_FOUND)
+- reiserfs_panic(inode->i_sb,
+- "vs-5615: indirect_to_direct_roll_back: found invalid item");
++ reiserfs_panic(inode->i_sb, "vs-5615",
++ "found invalid item");
+ RFALSE(path->pos_in_item !=
+ ih_item_len(PATH_PITEM_HEAD(path)) - 1,
+ "vs-5616: appended bytes found");
+@@ -1612,8 +1611,8 @@ int reiserfs_cut_from_item(struct reiser
+ print_block(PATH_PLAST_BUFFER(p_s_path), 3,
+ PATH_LAST_POSITION(p_s_path) - 1,
+ PATH_LAST_POSITION(p_s_path) + 1);
+- reiserfs_panic(p_s_sb,
+- "PAP-5580: reiserfs_cut_from_item: item to convert does not exist (%K)",
++ reiserfs_panic(p_s_sb, "PAP-5580", "item to "
++ "convert does not exist (%K)",
+ p_s_item_key);
+ }
+ continue;
+@@ -1693,22 +1692,20 @@ int reiserfs_cut_from_item(struct reiser
+ sure, that we exactly remove last unformatted node pointer
+ of the item */
+ if (!is_indirect_le_ih(le_ih))
+- reiserfs_panic(p_s_sb,
+- "vs-5652: reiserfs_cut_from_item: "
++ reiserfs_panic(p_s_sb, "vs-5652",
+ "item must be indirect %h", le_ih);
+
+ if (c_mode == M_DELETE && ih_item_len(le_ih) != UNFM_P_SIZE)
+- reiserfs_panic(p_s_sb,
+- "vs-5653: reiserfs_cut_from_item: "
+- "completing indirect2direct conversion indirect item %h "
+- "being deleted must be of 4 byte long",
+- le_ih);
++ reiserfs_panic(p_s_sb, "vs-5653", "completing "
++ "indirect2direct conversion indirect "
++ "item %h being deleted must be of "
++ "4 byte long", le_ih);
+
+ if (c_mode == M_CUT
+ && s_cut_balance.insert_size[0] != -UNFM_P_SIZE) {
+- reiserfs_panic(p_s_sb,
+- "vs-5654: reiserfs_cut_from_item: "
+- "can not complete indirect2direct conversion of %h (CUT, insert_size==%d)",
++ reiserfs_panic(p_s_sb, "vs-5654", "can not complete "
++ "indirect2direct conversion of %h "
++ "(CUT, insert_size==%d)",
+ le_ih, s_cut_balance.insert_size[0]);
+ }
+ /* it would be useful to make sure, that right neighboring
+@@ -1923,10 +1920,10 @@ static void check_research_for_paste(str
+ || op_bytes_number(found_ih,
+ get_last_bh(path)->b_size) !=
+ pos_in_item(path))
+- reiserfs_panic(NULL,
+- "PAP-5720: check_research_for_paste: "
+- "found direct item %h or position (%d) does not match to key %K",
+- found_ih, pos_in_item(path), p_s_key);
++ reiserfs_panic(NULL, "PAP-5720", "found direct item "
++ "%h or position (%d) does not match "
++ "to key %K", found_ih,
++ pos_in_item(path), p_s_key);
+ }
+ if (is_indirect_le_ih(found_ih)) {
+ if (le_ih_k_offset(found_ih) +
+@@ -1935,9 +1932,9 @@ static void check_research_for_paste(str
+ cpu_key_k_offset(p_s_key)
+ || I_UNFM_NUM(found_ih) != pos_in_item(path)
+ || get_ih_free_space(found_ih) != 0)
+- reiserfs_panic(NULL,
+- "PAP-5730: check_research_for_paste: "
+- "found indirect item (%h) or position (%d) does not match to key (%K)",
++ reiserfs_panic(NULL, "PAP-5730", "found indirect "
++ "item (%h) or position (%d) does not "
++ "match to key (%K)",
+ found_ih, pos_in_item(path), p_s_key);
+ }
+ }
+--- a/fs/reiserfs/tail_conversion.c
++++ b/fs/reiserfs/tail_conversion.c
+@@ -92,8 +92,7 @@ int direct2indirect(struct reiserfs_tran
+ last item of the file */
+ if (search_for_position_by_key(sb, &end_key, path) ==
+ POSITION_FOUND)
+- reiserfs_panic(sb,
+- "PAP-14050: direct2indirect: "
++ reiserfs_panic(sb, "PAP-14050",
+ "direct item (%K) not found", &end_key);
+ p_le_ih = PATH_PITEM_HEAD(path);
+ RFALSE(!is_direct_le_ih(p_le_ih),
+@@ -214,8 +213,7 @@ int indirect2direct(struct reiserfs_tran
+ /* re-search indirect item */
+ if (search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path)
+ == POSITION_NOT_FOUND)
+- reiserfs_panic(p_s_sb,
+- "PAP-5520: indirect2direct: "
++ reiserfs_panic(p_s_sb, "PAP-5520",
+ "item to be converted %K does not exist",
+ p_s_item_key);
+ copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
+@@ -224,8 +222,8 @@ int indirect2direct(struct reiserfs_tran
+ (ih_item_len(&s_ih) / UNFM_P_SIZE -
+ 1) * p_s_sb->s_blocksize;
+ if (pos != pos1)
+- reiserfs_panic(p_s_sb, "vs-5530: indirect2direct: "
+- "tail position changed while we were reading it");
++ reiserfs_panic(p_s_sb, "vs-5530", "tail position "
++ "changed while we were reading it");
+ #endif
+ }
+
+--- a/include/linux/reiserfs_fs.h
++++ b/include/linux/reiserfs_fs.h
+@@ -86,11 +86,14 @@ void __reiserfs_warning(struct super_blo
+ /* assertions handling */
+
+ /** always check a condition and panic if it's false. */
+-#define __RASSERT( cond, scond, format, args... ) \
+-if( !( cond ) ) \
+- reiserfs_panic( NULL, "reiserfs[%i]: assertion " scond " failed at " \
+- __FILE__ ":%i:%s: " format "\n", \
+- in_interrupt() ? -1 : task_pid_nr(current), __LINE__ , __FUNCTION__ , ##args )
++#define __RASSERT(cond, scond, format, args...) \
++do { \
++ if (!(cond)) \
++ reiserfs_panic(NULL, "assertion failure", "(" #cond ") at " \
++ __FILE__ ":%i:%s: " format "\n", \
++ in_interrupt() ? -1 : task_pid_nr(current), \
++ __LINE__, __func__ , ##args); \
++} while (0)
+
+ #define RASSERT(cond, format, args...) __RASSERT(cond, #cond, format, ##args)
+
+@@ -1448,6 +1451,16 @@ struct buffer_info {
+ int bi_position;
+ };
+
++static inline struct super_block *sb_from_tb(struct tree_balance *tb)
++{
++ return tb ? tb->tb_sb : NULL;
++}
++
++static inline struct super_block *sb_from_bi(struct buffer_info *bi)
++{
++ return bi ? sb_from_tb(bi->tb) : NULL;
++}
++
+ /* there are 4 types of items: stat data, directory item, indirect, direct.
+ +-------------------+------------+--------------+------------+
+ | | k_offset | k_uniqueness | mergeable? |
+@@ -1988,8 +2001,11 @@ int fix_nodes(int n_op_mode, struct tree
+ void unfix_nodes(struct tree_balance *);
+
+ /* prints.c */
+-void reiserfs_panic(struct super_block *s, const char *fmt, ...)
++void __reiserfs_panic(struct super_block *s, const char *id,
++ const char *function, const char *fmt, ...)
+ __attribute__ ((noreturn));
++#define reiserfs_panic(s, id, fmt, args...) \
++ __reiserfs_panic(s, id, __func__, fmt, ##args)
+ void reiserfs_info(struct super_block *s, const char *fmt, ...);
+ void reiserfs_debug(struct super_block *s, int level, const char *fmt, ...);
+ void print_indirect_item(struct buffer_head *bh, int item_num);
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: remove i_has_xattr_dir
+
+ With the changes to xattr root locking, the i_has_xattr_dir flag
+ is no longer needed. This patch removes it.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+ fs/reiserfs/xattr.c | 9 +--------
+ 1 file changed, 1 insertion(+), 8 deletions(-)
+
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -459,10 +459,7 @@ int reiserfs_delete_xattrs(struct inode
+
+ dput(root);
+ out:
+- if (!err)
+- REISERFS_I(inode)->i_flags =
+- REISERFS_I(inode)->i_flags & ~i_has_xattr_dir;
+- else
++ if (err)
+ reiserfs_warning(inode->i_sb, "jdm-20004",
+ "Couldn't remove all xattrs (%d)\n", err);
+ return err;
+@@ -660,7 +657,6 @@ reiserfs_xattr_set(struct inode *inode,
+ down_write(&REISERFS_I(inode)->i_xattr_sem);
+
+ xahash = xattr_hash(buffer, buffer_size);
+- REISERFS_I(inode)->i_flags |= i_has_xattr_dir;
+
+ /* Resize it so we're ok to write there */
+ newattrs.ia_size = buffer_size;
+@@ -769,7 +765,6 @@ reiserfs_xattr_get(const struct inode *i
+ down_read(&REISERFS_I(inode)->i_xattr_sem);
+
+ isize = i_size_read(dentry->d_inode);
+- REISERFS_I(inode)->i_flags |= i_has_xattr_dir;
+
+ /* Just return the size needed */
+ if (buffer == NULL) {
+@@ -999,8 +994,6 @@ ssize_t reiserfs_listxattr(struct dentry
+ buf.r_pos = 0;
+ buf.r_inode = dentry->d_inode;
+
+- REISERFS_I(dentry->d_inode)->i_flags |= i_has_xattr_dir;
+-
+ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
+ err = xattr_readdir(dir->d_inode, reiserfs_listxattr_filler, &buf);
+ mutex_unlock(&dir->d_inode->i_mutex);
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: remove link detection code
+
+ Early in the reiserfs xattr development, there was a plan to use hardlinks
+ to save disk space for identical xattrs. That code never materialized and
+ isn't going to, so this patch removes the detection code.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+ fs/reiserfs/xattr.c | 13 -------------
+ 1 file changed, 13 deletions(-)
+
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -432,7 +432,6 @@ reiserfs_xattr_set(struct inode *inode,
+ if (buffer && buffer_size)
+ xahash = xattr_hash(buffer, buffer_size);
+
+- open_file:
+ dentry = get_xa_file_dentry(inode, name, flags);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+@@ -441,18 +440,6 @@ reiserfs_xattr_set(struct inode *inode,
+
+ REISERFS_I(inode)->i_flags |= i_has_xattr_dir;
+
+- /* we need to copy it off.. */
+- if (dentry->d_inode->i_nlink > 1) {
+- dput(dentry);
+- err = reiserfs_xattr_del(inode, name);
+- if (err < 0)
+- goto out;
+- /* We just killed the old one, we're not replacing anymore */
+- if (flags & XATTR_REPLACE)
+- flags &= ~XATTR_REPLACE;
+- goto open_file;
+- }
+-
+ /* Resize it so we're ok to write there */
+ newattrs.ia_size = buffer_size;
+ newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: rename [cn]_* variables
+
+ This patch renames n_, c_, etc variables to something more sane. This is
+ the sixth in a series of patches to rip out some of the awful variable
+ naming in reiserfs.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+
+ fs/reiserfs/file.c | 6
+ fs/reiserfs/fix_node.c | 474 +++++++++++++++++++++---------------------
+ fs/reiserfs/stree.c | 370 ++++++++++++++++----------------
+ fs/reiserfs/tail_conversion.c | 30 +-
+ 4 files changed, 438 insertions(+), 442 deletions(-)
+
+--- a/fs/reiserfs/file.c
++++ b/fs/reiserfs/file.c
+@@ -138,11 +138,11 @@ static int reiserfs_sync_file(struct fil
+ struct dentry *dentry, int datasync)
+ {
+ struct inode *inode = dentry->d_inode;
+- int n_err;
++ int err;
+ int barrier_done;
+
+ BUG_ON(!S_ISREG(inode->i_mode));
+- n_err = sync_mapping_buffers(inode->i_mapping);
++ err = sync_mapping_buffers(inode->i_mapping);
+ reiserfs_write_lock(inode->i_sb);
+ barrier_done = reiserfs_commit_for_inode(inode);
+ reiserfs_write_unlock(inode->i_sb);
+@@ -150,7 +150,7 @@ static int reiserfs_sync_file(struct fil
+ blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
+ if (barrier_done < 0)
+ return barrier_done;
+- return (n_err < 0) ? -EIO : 0;
++ return (err < 0) ? -EIO : 0;
+ }
+
+ /* taken fs/buffer.c:__block_commit_write */
+--- a/fs/reiserfs/fix_node.c
++++ b/fs/reiserfs/fix_node.c
+@@ -751,24 +751,24 @@ else \
+
+ static void free_buffers_in_tb(struct tree_balance *tb)
+ {
+- int n_counter;
++ int i;
+
+ pathrelse(tb->tb_path);
+
+- for (n_counter = 0; n_counter < MAX_HEIGHT; n_counter++) {
+- brelse(tb->L[n_counter]);
+- brelse(tb->R[n_counter]);
+- brelse(tb->FL[n_counter]);
+- brelse(tb->FR[n_counter]);
+- brelse(tb->CFL[n_counter]);
+- brelse(tb->CFR[n_counter]);
+-
+- tb->L[n_counter] = NULL;
+- tb->R[n_counter] = NULL;
+- tb->FL[n_counter] = NULL;
+- tb->FR[n_counter] = NULL;
+- tb->CFL[n_counter] = NULL;
+- tb->CFR[n_counter] = NULL;
++ for (i = 0; i < MAX_HEIGHT; i++) {
++ brelse(tb->L[i]);
++ brelse(tb->R[i]);
++ brelse(tb->FL[i]);
++ brelse(tb->FR[i]);
++ brelse(tb->CFL[i]);
++ brelse(tb->CFR[i]);
++
++ tb->L[i] = NULL;
++ tb->R[i] = NULL;
++ tb->FL[i] = NULL;
++ tb->FR[i] = NULL;
++ tb->CFL[i] = NULL;
++ tb->CFR[i] = NULL;
+ }
+ }
+
+@@ -778,13 +778,13 @@ static void free_buffers_in_tb(struct tr
+ * NO_DISK_SPACE - no disk space.
+ */
+ /* The function is NOT SCHEDULE-SAFE! */
+-static int get_empty_nodes(struct tree_balance *tb, int n_h)
++static int get_empty_nodes(struct tree_balance *tb, int h)
+ {
+ struct buffer_head *new_bh,
+- *Sh = PATH_H_PBUFFER(tb->tb_path, n_h);
+- b_blocknr_t *blocknr, a_n_blocknrs[MAX_AMOUNT_NEEDED] = { 0, };
+- int n_counter, n_number_of_freeblk, n_amount_needed, /* number of needed empty blocks */
+- n_retval = CARRY_ON;
++ *Sh = PATH_H_PBUFFER(tb->tb_path, h);
++ b_blocknr_t *blocknr, blocknrs[MAX_AMOUNT_NEEDED] = { 0, };
++ int counter, number_of_freeblk, amount_needed, /* number of needed empty blocks */
++ retval = CARRY_ON;
+ struct super_block *sb = tb->tb_sb;
+
+ /* number_of_freeblk is the number of empty blocks which have been
+@@ -793,7 +793,7 @@ static int get_empty_nodes(struct tree_b
+ number_of_freeblk = tb->cur_blknum can be non-zero if a schedule occurs
+ after empty blocks are acquired, and the balancing analysis is
+ then restarted, amount_needed is the number needed by this level
+- (n_h) of the balancing analysis.
++ (h) of the balancing analysis.
+
+ Note that for systems with many processes writing, it would be
+ more layout optimal to calculate the total number needed by all
+@@ -801,31 +801,31 @@ static int get_empty_nodes(struct tree_b
+
+ /* Initiate number_of_freeblk to the amount acquired prior to the restart of
+ the analysis or 0 if not restarted, then subtract the amount needed
+- by all of the levels of the tree below n_h. */
+- /* blknum includes S[n_h], so we subtract 1 in this calculation */
+- for (n_counter = 0, n_number_of_freeblk = tb->cur_blknum;
+- n_counter < n_h; n_counter++)
+- n_number_of_freeblk -=
+- (tb->blknum[n_counter]) ? (tb->blknum[n_counter] -
++ by all of the levels of the tree below h. */
++ /* blknum includes S[h], so we subtract 1 in this calculation */
++ for (counter = 0, number_of_freeblk = tb->cur_blknum;
++ counter < h; counter++)
++ number_of_freeblk -=
++ (tb->blknum[counter]) ? (tb->blknum[counter] -
+ 1) : 0;
+
+ /* Allocate missing empty blocks. */
+ /* if Sh == 0 then we are getting a new root */
+- n_amount_needed = (Sh) ? (tb->blknum[n_h] - 1) : 1;
++ amount_needed = (Sh) ? (tb->blknum[h] - 1) : 1;
+ /* Amount_needed = the amount that we need more than the amount that we have. */
+- if (n_amount_needed > n_number_of_freeblk)
+- n_amount_needed -= n_number_of_freeblk;
++ if (amount_needed > number_of_freeblk)
++ amount_needed -= number_of_freeblk;
+ else /* If we have enough already then there is nothing to do. */
+ return CARRY_ON;
+
+ /* No need to check quota - is not allocated for blocks used for formatted nodes */
+- if (reiserfs_new_form_blocknrs(tb, a_n_blocknrs,
+- n_amount_needed) == NO_DISK_SPACE)
++ if (reiserfs_new_form_blocknrs(tb, blocknrs,
++ amount_needed) == NO_DISK_SPACE)
+ return NO_DISK_SPACE;
+
+ /* for each blocknumber we just got, get a buffer and stick it on FEB */
+- for (blocknr = a_n_blocknrs, n_counter = 0;
+- n_counter < n_amount_needed; blocknr++, n_counter++) {
++ for (blocknr = blocknrs, counter = 0;
++ counter < amount_needed; blocknr++, counter++) {
+
+ RFALSE(!*blocknr,
+ "PAP-8135: reiserfs_new_blocknrs failed when got new blocks");
+@@ -845,10 +845,10 @@ static int get_empty_nodes(struct tree_b
+ tb->FEB[tb->cur_blknum++] = new_bh;
+ }
+
+- if (n_retval == CARRY_ON && FILESYSTEM_CHANGED_TB(tb))
+- n_retval = REPEAT_SEARCH;
++ if (retval == CARRY_ON && FILESYSTEM_CHANGED_TB(tb))
++ retval = REPEAT_SEARCH;
+
+- return n_retval;
++ return retval;
+ }
+
+ /* Get free space of the left neighbor, which is stored in the parent
+@@ -896,36 +896,36 @@ static int get_rfree(struct tree_balance
+ }
+
+ /* Check whether left neighbor is in memory. */
+-static int is_left_neighbor_in_cache(struct tree_balance *tb, int n_h)
++static int is_left_neighbor_in_cache(struct tree_balance *tb, int h)
+ {
+ struct buffer_head *father, *left;
+ struct super_block *sb = tb->tb_sb;
+- b_blocknr_t n_left_neighbor_blocknr;
+- int n_left_neighbor_position;
++ b_blocknr_t left_neighbor_blocknr;
++ int left_neighbor_position;
+
+ /* Father of the left neighbor does not exist. */
+- if (!tb->FL[n_h])
++ if (!tb->FL[h])
+ return 0;
+
+ /* Calculate father of the node to be balanced. */
+- father = PATH_H_PBUFFER(tb->tb_path, n_h + 1);
++ father = PATH_H_PBUFFER(tb->tb_path, h + 1);
+
+ RFALSE(!father ||
+ !B_IS_IN_TREE(father) ||
+- !B_IS_IN_TREE(tb->FL[n_h]) ||
++ !B_IS_IN_TREE(tb->FL[h]) ||
+ !buffer_uptodate(father) ||
+- !buffer_uptodate(tb->FL[n_h]),
++ !buffer_uptodate(tb->FL[h]),
+ "vs-8165: F[h] (%b) or FL[h] (%b) is invalid",
+- father, tb->FL[n_h]);
++ father, tb->FL[h]);
+
+ /* Get position of the pointer to the left neighbor into the left father. */
+- n_left_neighbor_position = (father == tb->FL[n_h]) ?
+- tb->lkey[n_h] : B_NR_ITEMS(tb->FL[n_h]);
++ left_neighbor_position = (father == tb->FL[h]) ?
++ tb->lkey[h] : B_NR_ITEMS(tb->FL[h]);
+ /* Get left neighbor block number. */
+- n_left_neighbor_blocknr =
+- B_N_CHILD_NUM(tb->FL[n_h], n_left_neighbor_position);
++ left_neighbor_blocknr =
++ B_N_CHILD_NUM(tb->FL[h], left_neighbor_position);
+ /* Look for the left neighbor in the cache. */
+- if ((left = sb_find_get_block(sb, n_left_neighbor_blocknr))) {
++ if ((left = sb_find_get_block(sb, left_neighbor_blocknr))) {
+
+ RFALSE(buffer_uptodate(left) && !B_IS_IN_TREE(left),
+ "vs-8170: left neighbor (%b %z) is not in the tree",
+@@ -955,7 +955,7 @@ static void decrement_key(struct cpu_key
+ * CARRY_ON - schedule didn't occur while the function worked;
+ */
+ static int get_far_parent(struct tree_balance *tb,
+- int n_h,
++ int h,
+ struct buffer_head **pfather,
+ struct buffer_head **pcom_father, char c_lr_par)
+ {
+@@ -963,38 +963,38 @@ static int get_far_parent(struct tree_ba
+ INITIALIZE_PATH(s_path_to_neighbor_father);
+ struct treepath *path = tb->tb_path;
+ struct cpu_key s_lr_father_key;
+- int n_counter,
+- n_position = INT_MAX,
+- n_first_last_position = 0,
+- n_path_offset = PATH_H_PATH_OFFSET(path, n_h);
++ int counter,
++ position = INT_MAX,
++ first_last_position = 0,
++ path_offset = PATH_H_PATH_OFFSET(path, h);
+
+- /* Starting from F[n_h] go upwards in the tree, and look for the common
+- ancestor of F[n_h], and its neighbor l/r, that should be obtained. */
++ /* Starting from F[h] go upwards in the tree, and look for the common
++ ancestor of F[h], and its neighbor l/r, that should be obtained. */
+
+- n_counter = n_path_offset;
++ counter = path_offset;
+
+- RFALSE(n_counter < FIRST_PATH_ELEMENT_OFFSET,
++ RFALSE(counter < FIRST_PATH_ELEMENT_OFFSET,
+ "PAP-8180: invalid path length");
+
+- for (; n_counter > FIRST_PATH_ELEMENT_OFFSET; n_counter--) {
++ for (; counter > FIRST_PATH_ELEMENT_OFFSET; counter--) {
+ /* Check whether parent of the current buffer in the path is really parent in the tree. */
+ if (!B_IS_IN_TREE
+- (parent = PATH_OFFSET_PBUFFER(path, n_counter - 1)))
++ (parent = PATH_OFFSET_PBUFFER(path, counter - 1)))
+ return REPEAT_SEARCH;
+ /* Check whether position in the parent is correct. */
+- if ((n_position =
++ if ((position =
+ PATH_OFFSET_POSITION(path,
+- n_counter - 1)) >
++ counter - 1)) >
+ B_NR_ITEMS(parent))
+ return REPEAT_SEARCH;
+ /* Check whether parent at the path really points to the child. */
+- if (B_N_CHILD_NUM(parent, n_position) !=
+- PATH_OFFSET_PBUFFER(path, n_counter)->b_blocknr)
++ if (B_N_CHILD_NUM(parent, position) !=
++ PATH_OFFSET_PBUFFER(path, counter)->b_blocknr)
+ return REPEAT_SEARCH;
+ /* Return delimiting key if position in the parent is not equal to first/last one. */
+ if (c_lr_par == RIGHT_PARENTS)
+- n_first_last_position = B_NR_ITEMS(parent);
+- if (n_position != n_first_last_position) {
++ first_last_position = B_NR_ITEMS(parent);
++ if (position != first_last_position) {
+ *pcom_father = parent;
+ get_bh(*pcom_father);
+ /*(*pcom_father = parent)->b_count++; */
+@@ -1003,7 +1003,7 @@ static int get_far_parent(struct tree_ba
+ }
+
+ /* if we are in the root of the tree, then there is no common father */
+- if (n_counter == FIRST_PATH_ELEMENT_OFFSET) {
++ if (counter == FIRST_PATH_ELEMENT_OFFSET) {
+ /* Check whether first buffer in the path is the root of the tree. */
+ if (PATH_OFFSET_PBUFFER
+ (tb->tb_path,
+@@ -1036,18 +1036,18 @@ static int get_far_parent(struct tree_ba
+ le_key2cpu_key(&s_lr_father_key,
+ B_N_PDELIM_KEY(*pcom_father,
+ (c_lr_par ==
+- LEFT_PARENTS) ? (tb->lkey[n_h - 1] =
+- n_position -
+- 1) : (tb->rkey[n_h -
++ LEFT_PARENTS) ? (tb->lkey[h - 1] =
++ position -
++ 1) : (tb->rkey[h -
+ 1] =
+- n_position)));
++ position)));
+
+ if (c_lr_par == LEFT_PARENTS)
+ decrement_key(&s_lr_father_key);
+
+ if (search_by_key
+ (tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father,
+- n_h + 1) == IO_ERROR)
++ h + 1) == IO_ERROR)
+ // path is released
+ return IO_ERROR;
+
+@@ -1059,7 +1059,7 @@ static int get_far_parent(struct tree_ba
+
+ *pfather = PATH_PLAST_BUFFER(&s_path_to_neighbor_father);
+
+- RFALSE(B_LEVEL(*pfather) != n_h + 1,
++ RFALSE(B_LEVEL(*pfather) != h + 1,
+ "PAP-8190: (%b %z) level too small", *pfather, *pfather);
+ RFALSE(s_path_to_neighbor_father.path_length <
+ FIRST_PATH_ELEMENT_OFFSET, "PAP-8192: path length is too small");
+@@ -1069,92 +1069,92 @@ static int get_far_parent(struct tree_ba
+ return CARRY_ON;
+ }
+
+-/* Get parents of neighbors of node in the path(S[n_path_offset]) and common parents of
+- * S[n_path_offset] and L[n_path_offset]/R[n_path_offset]: F[n_path_offset], FL[n_path_offset],
+- * FR[n_path_offset], CFL[n_path_offset], CFR[n_path_offset].
+- * Calculate numbers of left and right delimiting keys position: lkey[n_path_offset], rkey[n_path_offset].
++/* Get parents of neighbors of node in the path(S[path_offset]) and common parents of
++ * S[path_offset] and L[path_offset]/R[path_offset]: F[path_offset], FL[path_offset],
++ * FR[path_offset], CFL[path_offset], CFR[path_offset].
++ * Calculate numbers of left and right delimiting keys position: lkey[path_offset], rkey[path_offset].
+ * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
+ * CARRY_ON - schedule didn't occur while the function worked;
+ */
+-static int get_parents(struct tree_balance *tb, int n_h)
++static int get_parents(struct tree_balance *tb, int h)
+ {
+ struct treepath *path = tb->tb_path;
+- int n_position,
+- n_ret_value,
+- n_path_offset = PATH_H_PATH_OFFSET(tb->tb_path, n_h);
++ int position,
++ ret,
++ path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h);
+ struct buffer_head *curf, *curcf;
+
+ /* Current node is the root of the tree or will be root of the tree */
+- if (n_path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
++ if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
+ /* The root can not have parents.
+ Release nodes which previously were obtained as parents of the current node neighbors. */
+- brelse(tb->FL[n_h]);
+- brelse(tb->CFL[n_h]);
+- brelse(tb->FR[n_h]);
+- brelse(tb->CFR[n_h]);
+- tb->FL[n_h] = NULL;
+- tb->CFL[n_h] = NULL;
+- tb->FR[n_h] = NULL;
+- tb->CFR[n_h] = NULL;
++ brelse(tb->FL[h]);
++ brelse(tb->CFL[h]);
++ brelse(tb->FR[h]);
++ brelse(tb->CFR[h]);
++ tb->FL[h] = NULL;
++ tb->CFL[h] = NULL;
++ tb->FR[h] = NULL;
++ tb->CFR[h] = NULL;
+ return CARRY_ON;
+ }
+
+- /* Get parent FL[n_path_offset] of L[n_path_offset]. */
+- n_position = PATH_OFFSET_POSITION(path, n_path_offset - 1);
+- if (n_position) {
++ /* Get parent FL[path_offset] of L[path_offset]. */
++ position = PATH_OFFSET_POSITION(path, path_offset - 1);
++ if (position) {
+ /* Current node is not the first child of its parent. */
+- curf = PATH_OFFSET_PBUFFER(path, n_path_offset - 1);
+- curcf = PATH_OFFSET_PBUFFER(path, n_path_offset - 1);
++ curf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
++ curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
+ get_bh(curf);
+ get_bh(curf);
+- tb->lkey[n_h] = n_position - 1;
++ tb->lkey[h] = position - 1;
+ } else {
+- /* Calculate current parent of L[n_path_offset], which is the left neighbor of the current node.
+- Calculate current common parent of L[n_path_offset] and the current node. Note that
+- CFL[n_path_offset] not equal FL[n_path_offset] and CFL[n_path_offset] not equal F[n_path_offset].
+- Calculate lkey[n_path_offset]. */
+- if ((n_ret_value = get_far_parent(tb, n_h + 1, &curf,
++ /* Calculate current parent of L[path_offset], which is the left neighbor of the current node.
++ Calculate current common parent of L[path_offset] and the current node. Note that
++ CFL[path_offset] not equal FL[path_offset] and CFL[path_offset] not equal F[path_offset].
++ Calculate lkey[path_offset]. */
++ if ((ret = get_far_parent(tb, h + 1, &curf,
+ &curcf,
+ LEFT_PARENTS)) != CARRY_ON)
+- return n_ret_value;
++ return ret;
+ }
+
+- brelse(tb->FL[n_h]);
+- tb->FL[n_h] = curf; /* New initialization of FL[n_h]. */
+- brelse(tb->CFL[n_h]);
+- tb->CFL[n_h] = curcf; /* New initialization of CFL[n_h]. */
++ brelse(tb->FL[h]);
++ tb->FL[h] = curf; /* New initialization of FL[h]. */
++ brelse(tb->CFL[h]);
++ tb->CFL[h] = curcf; /* New initialization of CFL[h]. */
+
+ RFALSE((curf && !B_IS_IN_TREE(curf)) ||
+ (curcf && !B_IS_IN_TREE(curcf)),
+ "PAP-8195: FL (%b) or CFL (%b) is invalid", curf, curcf);
+
+-/* Get parent FR[n_h] of R[n_h]. */
++/* Get parent FR[h] of R[h]. */
+
+-/* Current node is the last child of F[n_h]. FR[n_h] != F[n_h]. */
+- if (n_position == B_NR_ITEMS(PATH_H_PBUFFER(path, n_h + 1))) {
+-/* Calculate current parent of R[n_h], which is the right neighbor of F[n_h].
+- Calculate current common parent of R[n_h] and current node. Note that CFR[n_h]
+- not equal FR[n_path_offset] and CFR[n_h] not equal F[n_h]. */
+- if ((n_ret_value =
+- get_far_parent(tb, n_h + 1, &curf, &curcf,
++/* Current node is the last child of F[h]. FR[h] != F[h]. */
++ if (position == B_NR_ITEMS(PATH_H_PBUFFER(path, h + 1))) {
++/* Calculate current parent of R[h], which is the right neighbor of F[h].
++ Calculate current common parent of R[h] and current node. Note that CFR[h]
++ not equal FR[path_offset] and CFR[h] not equal F[h]. */
++ if ((ret =
++ get_far_parent(tb, h + 1, &curf, &curcf,
+ RIGHT_PARENTS)) != CARRY_ON)
+- return n_ret_value;
++ return ret;
+ } else {
+-/* Current node is not the last child of its parent F[n_h]. */
+- curf = PATH_OFFSET_PBUFFER(path, n_path_offset - 1);
+- curcf = PATH_OFFSET_PBUFFER(path, n_path_offset - 1);
++/* Current node is not the last child of its parent F[h]. */
++ curf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
++ curcf = PATH_OFFSET_PBUFFER(path, path_offset - 1);
+ get_bh(curf);
+ get_bh(curf);
+- tb->rkey[n_h] = n_position;
++ tb->rkey[h] = position;
+ }
+
+- brelse(tb->FR[n_h]);
+- /* New initialization of FR[n_path_offset]. */
+- tb->FR[n_h] = curf;
++ brelse(tb->FR[h]);
++ /* New initialization of FR[path_offset]. */
++ tb->FR[h] = curf;
+
+- brelse(tb->CFR[n_h]);
+- /* New initialization of CFR[n_path_offset]. */
+- tb->CFR[n_h] = curcf;
++ brelse(tb->CFR[h]);
++ /* New initialization of CFR[path_offset]. */
++ tb->CFR[h] = curcf;
+
+ RFALSE((curf && !B_IS_IN_TREE(curf)) ||
+ (curcf && !B_IS_IN_TREE(curcf)),
+@@ -1222,7 +1222,7 @@ static int ip_check_balance(struct tree_
+ contains node being balanced. The mnemonic is
+ that the attempted change in node space used level
+ is levbytes bytes. */
+- n_ret_value;
++ ret;
+
+ int lfree, sfree, rfree /* free space in L, S and R */ ;
+
+@@ -1262,22 +1262,22 @@ static int ip_check_balance(struct tree_
+ if (!h)
+ reiserfs_panic(tb->tb_sb, "vs-8210",
+ "S[0] can not be 0");
+- switch (n_ret_value = get_empty_nodes(tb, h)) {
++ switch (ret = get_empty_nodes(tb, h)) {
+ case CARRY_ON:
+ set_parameters(tb, h, 0, 0, 1, NULL, -1, -1);
+ return NO_BALANCING_NEEDED; /* no balancing for higher levels needed */
+
+ case NO_DISK_SPACE:
+ case REPEAT_SEARCH:
+- return n_ret_value;
++ return ret;
+ default:
+ reiserfs_panic(tb->tb_sb, "vs-8215", "incorrect "
+ "return value of get_empty_nodes");
+ }
+ }
+
+- if ((n_ret_value = get_parents(tb, h)) != CARRY_ON) /* get parents of S[h] neighbors. */
+- return n_ret_value;
++ if ((ret = get_parents(tb, h)) != CARRY_ON) /* get parents of S[h] neighbors. */
++ return ret;
+
+ sfree = B_FREE_SPACE(Sh);
+
+@@ -1564,7 +1564,7 @@ static int dc_check_balance_internal(str
+ /* Sh is the node whose balance is currently being checked,
+ and Fh is its father. */
+ struct buffer_head *Sh, *Fh;
+- int maxsize, n_ret_value;
++ int maxsize, ret;
+ int lfree, rfree /* free space in L and R */ ;
+
+ Sh = PATH_H_PBUFFER(tb->tb_path, h);
+@@ -1589,8 +1589,8 @@ static int dc_check_balance_internal(str
+ return CARRY_ON;
+ }
+
+- if ((n_ret_value = get_parents(tb, h)) != CARRY_ON)
+- return n_ret_value;
++ if ((ret = get_parents(tb, h)) != CARRY_ON)
++ return ret;
+
+ /* get free space of neighbors */
+ rfree = get_rfree(tb, h);
+@@ -1747,7 +1747,7 @@ static int dc_check_balance_leaf(struct
+ attempted change in node space used level is levbytes bytes. */
+ int levbytes;
+ /* the maximal item size */
+- int maxsize, n_ret_value;
++ int maxsize, ret;
+ /* S0 is the node whose balance is currently being checked,
+ and F0 is its father. */
+ struct buffer_head *S0, *F0;
+@@ -1769,8 +1769,8 @@ static int dc_check_balance_leaf(struct
+ return NO_BALANCING_NEEDED;
+ }
+
+- if ((n_ret_value = get_parents(tb, h)) != CARRY_ON)
+- return n_ret_value;
++ if ((ret = get_parents(tb, h)) != CARRY_ON)
++ return ret;
+
+ /* get free space of neighbors */
+ rfree = get_rfree(tb, h);
+@@ -1889,40 +1889,40 @@ static int check_balance(int mode,
+ }
+
+ /* Check whether parent at the path is the really parent of the current node.*/
+-static int get_direct_parent(struct tree_balance *tb, int n_h)
++static int get_direct_parent(struct tree_balance *tb, int h)
+ {
+ struct buffer_head *bh;
+ struct treepath *path = tb->tb_path;
+- int n_position,
+- n_path_offset = PATH_H_PATH_OFFSET(tb->tb_path, n_h);
++ int position,
++ path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h);
+
+ /* We are in the root or in the new root. */
+- if (n_path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
++ if (path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
+
+- RFALSE(n_path_offset < FIRST_PATH_ELEMENT_OFFSET - 1,
++ RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET - 1,
+ "PAP-8260: invalid offset in the path");
+
+ if (PATH_OFFSET_PBUFFER(path, FIRST_PATH_ELEMENT_OFFSET)->
+ b_blocknr == SB_ROOT_BLOCK(tb->tb_sb)) {
+ /* Root is not changed. */
+- PATH_OFFSET_PBUFFER(path, n_path_offset - 1) = NULL;
+- PATH_OFFSET_POSITION(path, n_path_offset - 1) = 0;
++ PATH_OFFSET_PBUFFER(path, path_offset - 1) = NULL;
++ PATH_OFFSET_POSITION(path, path_offset - 1) = 0;
+ return CARRY_ON;
+ }
+ return REPEAT_SEARCH; /* Root is changed and we must recalculate the path. */
+ }
+
+ if (!B_IS_IN_TREE
+- (bh = PATH_OFFSET_PBUFFER(path, n_path_offset - 1)))
++ (bh = PATH_OFFSET_PBUFFER(path, path_offset - 1)))
+ return REPEAT_SEARCH; /* Parent in the path is not in the tree. */
+
+- if ((n_position =
++ if ((position =
+ PATH_OFFSET_POSITION(path,
+- n_path_offset - 1)) > B_NR_ITEMS(bh))
++ path_offset - 1)) > B_NR_ITEMS(bh))
+ return REPEAT_SEARCH;
+
+- if (B_N_CHILD_NUM(bh, n_position) !=
+- PATH_OFFSET_PBUFFER(path, n_path_offset)->b_blocknr)
++ if (B_N_CHILD_NUM(bh, position) !=
++ PATH_OFFSET_PBUFFER(path, path_offset)->b_blocknr)
+ /* Parent in the path is not parent of the current node in the tree. */
+ return REPEAT_SEARCH;
+
+@@ -1935,92 +1935,92 @@ static int get_direct_parent(struct tree
+ return CARRY_ON; /* Parent in the path is unlocked and really parent of the current node. */
+ }
+
+-/* Using lnum[n_h] and rnum[n_h] we should determine what neighbors
+- * of S[n_h] we
+- * need in order to balance S[n_h], and get them if necessary.
++/* Using lnum[h] and rnum[h] we should determine what neighbors
++ * of S[h] we
++ * need in order to balance S[h], and get them if necessary.
+ * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
+ * CARRY_ON - schedule didn't occur while the function worked;
+ */
+-static int get_neighbors(struct tree_balance *tb, int n_h)
++static int get_neighbors(struct tree_balance *tb, int h)
+ {
+- int n_child_position,
+- n_path_offset = PATH_H_PATH_OFFSET(tb->tb_path, n_h + 1);
+- unsigned long n_son_number;
++ int child_position,
++ path_offset = PATH_H_PATH_OFFSET(tb->tb_path, h + 1);
++ unsigned long son_number;
+ struct super_block *sb = tb->tb_sb;
+ struct buffer_head *bh;
+
+- PROC_INFO_INC(sb, get_neighbors[n_h]);
++ PROC_INFO_INC(sb, get_neighbors[h]);
+
+- if (tb->lnum[n_h]) {
+- /* We need left neighbor to balance S[n_h]. */
+- PROC_INFO_INC(sb, need_l_neighbor[n_h]);
+- bh = PATH_OFFSET_PBUFFER(tb->tb_path, n_path_offset);
++ if (tb->lnum[h]) {
++ /* We need left neighbor to balance S[h]. */
++ PROC_INFO_INC(sb, need_l_neighbor[h]);
++ bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset);
+
+- RFALSE(bh == tb->FL[n_h] &&
+- !PATH_OFFSET_POSITION(tb->tb_path, n_path_offset),
++ RFALSE(bh == tb->FL[h] &&
++ !PATH_OFFSET_POSITION(tb->tb_path, path_offset),
+ "PAP-8270: invalid position in the parent");
+
+- n_child_position =
++ child_position =
+ (bh ==
+- tb->FL[n_h]) ? tb->lkey[n_h] : B_NR_ITEMS(tb->
+- FL[n_h]);
+- n_son_number = B_N_CHILD_NUM(tb->FL[n_h], n_child_position);
+- bh = sb_bread(sb, n_son_number);
++ tb->FL[h]) ? tb->lkey[h] : B_NR_ITEMS(tb->
++ FL[h]);
++ son_number = B_N_CHILD_NUM(tb->FL[h], child_position);
++ bh = sb_bread(sb, son_number);
+ if (!bh)
+ return IO_ERROR;
+ if (FILESYSTEM_CHANGED_TB(tb)) {
+ brelse(bh);
+- PROC_INFO_INC(sb, get_neighbors_restart[n_h]);
++ PROC_INFO_INC(sb, get_neighbors_restart[h]);
+ return REPEAT_SEARCH;
+ }
+
+- RFALSE(!B_IS_IN_TREE(tb->FL[n_h]) ||
+- n_child_position > B_NR_ITEMS(tb->FL[n_h]) ||
+- B_N_CHILD_NUM(tb->FL[n_h], n_child_position) !=
++ RFALSE(!B_IS_IN_TREE(tb->FL[h]) ||
++ child_position > B_NR_ITEMS(tb->FL[h]) ||
++ B_N_CHILD_NUM(tb->FL[h], child_position) !=
+ bh->b_blocknr, "PAP-8275: invalid parent");
+ RFALSE(!B_IS_IN_TREE(bh), "PAP-8280: invalid child");
+- RFALSE(!n_h &&
++ RFALSE(!h &&
+ B_FREE_SPACE(bh) !=
+ MAX_CHILD_SIZE(bh) -
+- dc_size(B_N_CHILD(tb->FL[0], n_child_position)),
++ dc_size(B_N_CHILD(tb->FL[0], child_position)),
+ "PAP-8290: invalid child size of left neighbor");
+
+- brelse(tb->L[n_h]);
+- tb->L[n_h] = bh;
++ brelse(tb->L[h]);
++ tb->L[h] = bh;
+ }
+
+- /* We need right neighbor to balance S[n_path_offset]. */
+- if (tb->rnum[n_h]) {
+- PROC_INFO_INC(sb, need_r_neighbor[n_h]);
+- bh = PATH_OFFSET_PBUFFER(tb->tb_path, n_path_offset);
++ /* We need right neighbor to balance S[path_offset]. */
++ if (tb->rnum[h]) { /* We need right neighbor to balance S[path_offset]. */
++ PROC_INFO_INC(sb, need_r_neighbor[h]);
++ bh = PATH_OFFSET_PBUFFER(tb->tb_path, path_offset);
+
+- RFALSE(bh == tb->FR[n_h] &&
++ RFALSE(bh == tb->FR[h] &&
+ PATH_OFFSET_POSITION(tb->tb_path,
+- n_path_offset) >=
++ path_offset) >=
+ B_NR_ITEMS(bh),
+ "PAP-8295: invalid position in the parent");
+
+- n_child_position =
+- (bh == tb->FR[n_h]) ? tb->rkey[n_h] + 1 : 0;
+- n_son_number = B_N_CHILD_NUM(tb->FR[n_h], n_child_position);
+- bh = sb_bread(sb, n_son_number);
++ child_position =
++ (bh == tb->FR[h]) ? tb->rkey[h] + 1 : 0;
++ son_number = B_N_CHILD_NUM(tb->FR[h], child_position);
++ bh = sb_bread(sb, son_number);
+ if (!bh)
+ return IO_ERROR;
+ if (FILESYSTEM_CHANGED_TB(tb)) {
+ brelse(bh);
+- PROC_INFO_INC(sb, get_neighbors_restart[n_h]);
++ PROC_INFO_INC(sb, get_neighbors_restart[h]);
+ return REPEAT_SEARCH;
+ }
+- brelse(tb->R[n_h]);
+- tb->R[n_h] = bh;
++ brelse(tb->R[h]);
++ tb->R[h] = bh;
+
+- RFALSE(!n_h
++ RFALSE(!h
+ && B_FREE_SPACE(bh) !=
+ MAX_CHILD_SIZE(bh) -
+- dc_size(B_N_CHILD(tb->FR[0], n_child_position)),
++ dc_size(B_N_CHILD(tb->FR[0], child_position)),
+ "PAP-8300: invalid child size of right neighbor (%d != %d - %d)",
+ B_FREE_SPACE(bh), MAX_CHILD_SIZE(bh),
+- dc_size(B_N_CHILD(tb->FR[0], n_child_position)));
++ dc_size(B_N_CHILD(tb->FR[0], child_position)));
+
+ }
+ return CARRY_ON;
+@@ -2317,11 +2317,11 @@ static int wait_tb_buffers_until_unlocke
+ * -1 - if no_disk_space
+ */
+
+-int fix_nodes(int n_op_mode, struct tree_balance *tb,
++int fix_nodes(int op_mode, struct tree_balance *tb,
+ struct item_head *ins_ih, const void *data)
+ {
+- int n_ret_value, n_h, n_item_num = PATH_LAST_POSITION(tb->tb_path);
+- int n_pos_in_item;
++ int ret, h, item_num = PATH_LAST_POSITION(tb->tb_path);
++ int pos_in_item;
+
+ /* we set wait_tb_buffers_run when we have to restore any dirty bits cleared
+ ** during wait_tb_buffers_run
+@@ -2331,7 +2331,7 @@ int fix_nodes(int n_op_mode, struct tree
+
+ ++REISERFS_SB(tb->tb_sb)->s_fix_nodes;
+
+- n_pos_in_item = tb->tb_path->pos_in_item;
++ pos_in_item = tb->tb_path->pos_in_item;
+
+ tb->fs_gen = get_generation(tb->tb_sb);
+
+@@ -2364,26 +2364,26 @@ int fix_nodes(int n_op_mode, struct tree
+ reiserfs_panic(tb->tb_sb, "PAP-8320", "S[0] (%b %z) is "
+ "not uptodate at the beginning of fix_nodes "
+ "or not in tree (mode %c)",
+- tbS0, tbS0, n_op_mode);
++ tbS0, tbS0, op_mode);
+
+ /* Check parameters. */
+- switch (n_op_mode) {
++ switch (op_mode) {
+ case M_INSERT:
+- if (n_item_num <= 0 || n_item_num > B_NR_ITEMS(tbS0))
++ if (item_num <= 0 || item_num > B_NR_ITEMS(tbS0))
+ reiserfs_panic(tb->tb_sb, "PAP-8330", "Incorrect "
+ "item number %d (in S0 - %d) in case "
+- "of insert", n_item_num,
++ "of insert", item_num,
+ B_NR_ITEMS(tbS0));
+ break;
+ case M_PASTE:
+ case M_DELETE:
+ case M_CUT:
+- if (n_item_num < 0 || n_item_num >= B_NR_ITEMS(tbS0)) {
++ if (item_num < 0 || item_num >= B_NR_ITEMS(tbS0)) {
+ print_block(tbS0, 0, -1, -1);
+ reiserfs_panic(tb->tb_sb, "PAP-8335", "Incorrect "
+ "item number(%d); mode = %c "
+ "insert_size = %d",
+- n_item_num, n_op_mode,
++ item_num, op_mode,
+ tb->insert_size[0]);
+ }
+ break;
+@@ -2397,73 +2397,73 @@ int fix_nodes(int n_op_mode, struct tree
+ // FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat
+ return REPEAT_SEARCH;
+
+- /* Starting from the leaf level; for all levels n_h of the tree. */
+- for (n_h = 0; n_h < MAX_HEIGHT && tb->insert_size[n_h]; n_h++) {
+- n_ret_value = get_direct_parent(tb, n_h);
+- if (n_ret_value != CARRY_ON)
++ /* Starting from the leaf level; for all levels h of the tree. */
++ for (h = 0; h < MAX_HEIGHT && tb->insert_size[h]; h++) {
++ ret = get_direct_parent(tb, h);
++ if (ret != CARRY_ON)
+ goto repeat;
+
+- n_ret_value = check_balance(n_op_mode, tb, n_h, n_item_num,
+- n_pos_in_item, ins_ih, data);
+- if (n_ret_value != CARRY_ON) {
+- if (n_ret_value == NO_BALANCING_NEEDED) {
++ ret = check_balance(op_mode, tb, h, item_num,
++ pos_in_item, ins_ih, data);
++ if (ret != CARRY_ON) {
++ if (ret == NO_BALANCING_NEEDED) {
+ /* No balancing for higher levels needed. */
+- n_ret_value = get_neighbors(tb, n_h);
+- if (n_ret_value != CARRY_ON)
++ ret = get_neighbors(tb, h);
++ if (ret != CARRY_ON)
+ goto repeat;
+- if (n_h != MAX_HEIGHT - 1)
+- tb->insert_size[n_h + 1] = 0;
++ if (h != MAX_HEIGHT - 1)
++ tb->insert_size[h + 1] = 0;
+ /* ok, analysis and resource gathering are complete */
+ break;
+ }
+ goto repeat;
+ }
+
+- n_ret_value = get_neighbors(tb, n_h);
+- if (n_ret_value != CARRY_ON)
++ ret = get_neighbors(tb, h);
++ if (ret != CARRY_ON)
+ goto repeat;
+
+ /* No disk space, or schedule occurred and analysis may be
+ * invalid and needs to be redone. */
+- n_ret_value = get_empty_nodes(tb, n_h);
+- if (n_ret_value != CARRY_ON)
++ ret = get_empty_nodes(tb, h);
++ if (ret != CARRY_ON)
+ goto repeat;
+
+- if (!PATH_H_PBUFFER(tb->tb_path, n_h)) {
++ if (!PATH_H_PBUFFER(tb->tb_path, h)) {
+ /* We have a positive insert size but no nodes exist on this
+ level, this means that we are creating a new root. */
+
+- RFALSE(tb->blknum[n_h] != 1,
++ RFALSE(tb->blknum[h] != 1,
+ "PAP-8350: creating new empty root");
+
+- if (n_h < MAX_HEIGHT - 1)
+- tb->insert_size[n_h + 1] = 0;
+- } else if (!PATH_H_PBUFFER(tb->tb_path, n_h + 1)) {
+- if (tb->blknum[n_h] > 1) {
+- /* The tree needs to be grown, so this node S[n_h]
++ if (h < MAX_HEIGHT - 1)
++ tb->insert_size[h + 1] = 0;
++ } else if (!PATH_H_PBUFFER(tb->tb_path, h + 1)) {
++ if (tb->blknum[h] > 1) {
++ /* The tree needs to be grown, so this node S[h]
+ which is the root node is split into two nodes,
+- and a new node (S[n_h+1]) will be created to
++ and a new node (S[h+1]) will be created to
+ become the root node. */
+
+- RFALSE(n_h == MAX_HEIGHT - 1,
++ RFALSE(h == MAX_HEIGHT - 1,
+ "PAP-8355: attempt to create too high of a tree");
+
+- tb->insert_size[n_h + 1] =
++ tb->insert_size[h + 1] =
+ (DC_SIZE +
+- KEY_SIZE) * (tb->blknum[n_h] - 1) +
++ KEY_SIZE) * (tb->blknum[h] - 1) +
+ DC_SIZE;
+- } else if (n_h < MAX_HEIGHT - 1)
+- tb->insert_size[n_h + 1] = 0;
++ } else if (h < MAX_HEIGHT - 1)
++ tb->insert_size[h + 1] = 0;
+ } else
+- tb->insert_size[n_h + 1] =
+- (DC_SIZE + KEY_SIZE) * (tb->blknum[n_h] - 1);
++ tb->insert_size[h + 1] =
++ (DC_SIZE + KEY_SIZE) * (tb->blknum[h] - 1);
+ }
+
+- n_ret_value = wait_tb_buffers_until_unlocked(tb);
+- if (n_ret_value == CARRY_ON) {
++ ret = wait_tb_buffers_until_unlocked(tb);
++ if (ret == CARRY_ON) {
+ if (FILESYSTEM_CHANGED_TB(tb)) {
+ wait_tb_buffers_run = 1;
+- n_ret_value = REPEAT_SEARCH;
++ ret = REPEAT_SEARCH;
+ goto repeat;
+ } else {
+ return CARRY_ON;
+@@ -2529,7 +2529,7 @@ int fix_nodes(int n_op_mode, struct tree
+ (tb->tb_sb, tb->FEB[i]);
+ }
+ }
+- return n_ret_value;
++ return ret;
+ }
+
+ }
+--- a/fs/reiserfs/stree.c
++++ b/fs/reiserfs/stree.c
+@@ -136,11 +136,11 @@ inline int comp_short_le_keys(const stru
+ const struct reiserfs_key *key2)
+ {
+ __u32 *k1_u32, *k2_u32;
+- int n_key_length = REISERFS_SHORT_KEY_LEN;
++ int key_length = REISERFS_SHORT_KEY_LEN;
+
+ k1_u32 = (__u32 *) key1;
+ k2_u32 = (__u32 *) key2;
+- for (; n_key_length--; ++k1_u32, ++k2_u32) {
++ for (; key_length--; ++k1_u32, ++k2_u32) {
+ if (le32_to_cpu(*k1_u32) < le32_to_cpu(*k2_u32))
+ return -1;
+ if (le32_to_cpu(*k1_u32) > le32_to_cpu(*k2_u32))
+@@ -177,10 +177,10 @@ inline int comp_le_keys(const struct rei
+ * *pos = number of the searched element if found, else the *
+ * number of the first element that is larger than key. *
+ **************************************************************************/
+-/* For those not familiar with binary search: n_lbound is the leftmost item that it
+- could be, n_rbound the rightmost item that it could be. We examine the item
+- halfway between n_lbound and n_rbound, and that tells us either that we can increase
+- n_lbound, or decrease n_rbound, or that we have found it, or if n_lbound <= n_rbound that
++/* For those not familiar with binary search: lbound is the leftmost item that it
++ could be, rbound the rightmost item that it could be. We examine the item
++ halfway between lbound and rbound, and that tells us either that we can increase
++ lbound, or decrease rbound, or that we have found it, or if lbound <= rbound that
+ there are no possible items, and we have not found it. With each examination we
+ cut the number of possible items it could be by one more than half rounded down,
+ or we find it. */
+@@ -198,28 +198,27 @@ static inline int bin_search(const void
+ int *pos /* Number of the searched for element. */
+ )
+ {
+- int n_rbound, n_lbound, n_j;
++ int rbound, lbound, j;
+
+- for (n_j = ((n_rbound = num - 1) + (n_lbound = 0)) / 2;
+- n_lbound <= n_rbound; n_j = (n_rbound + n_lbound) / 2)
++ for (j = ((rbound = num - 1) + (lbound = 0)) / 2;
++ lbound <= rbound; j = (rbound + lbound) / 2)
+ switch (comp_keys
+- ((struct reiserfs_key *)((char *)base +
+- n_j * width),
++ ((struct reiserfs_key *)((char *)base + j * width),
+ (struct cpu_key *)key)) {
+ case -1:
+- n_lbound = n_j + 1;
++ lbound = j + 1;
+ continue;
+ case 1:
+- n_rbound = n_j - 1;
++ rbound = j - 1;
+ continue;
+ case 0:
+- *pos = n_j;
++ *pos = j;
+ return ITEM_FOUND; /* Key found in the array. */
+ }
+
+ /* bin_search did not find given key, it returns position of key,
+ that is minimal and greater than the given one. */
+- *pos = n_lbound;
++ *pos = lbound;
+ return ITEM_NOT_FOUND;
+ }
+
+@@ -242,43 +241,41 @@ static const struct reiserfs_key MAX_KEY
+ of the path, and going upwards. We must check the path's validity at each step. If the key is not in
+ the path, there is no delimiting key in the tree (buffer is first or last buffer in tree), and in this
+ case we return a special key, either MIN_KEY or MAX_KEY. */
+-static inline const struct reiserfs_key *get_lkey(const struct treepath
+- *chk_path,
+- const struct super_block
+- *sb)
++static inline const struct reiserfs_key *get_lkey(const struct treepath *chk_path,
++ const struct super_block *sb)
+ {
+- int n_position, n_path_offset = chk_path->path_length;
++ int position, path_offset = chk_path->path_length;
+ struct buffer_head *parent;
+
+- RFALSE(n_path_offset < FIRST_PATH_ELEMENT_OFFSET,
++ RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET,
+ "PAP-5010: invalid offset in the path");
+
+ /* While not higher in path than first element. */
+- while (n_path_offset-- > FIRST_PATH_ELEMENT_OFFSET) {
++ while (path_offset-- > FIRST_PATH_ELEMENT_OFFSET) {
+
+ RFALSE(!buffer_uptodate
+- (PATH_OFFSET_PBUFFER(chk_path, n_path_offset)),
++ (PATH_OFFSET_PBUFFER(chk_path, path_offset)),
+ "PAP-5020: parent is not uptodate");
+
+ /* Parent at the path is not in the tree now. */
+ if (!B_IS_IN_TREE
+ (parent =
+- PATH_OFFSET_PBUFFER(chk_path, n_path_offset)))
++ PATH_OFFSET_PBUFFER(chk_path, path_offset)))
+ return &MAX_KEY;
+ /* Check whether position in the parent is correct. */
+- if ((n_position =
++ if ((position =
+ PATH_OFFSET_POSITION(chk_path,
+- n_path_offset)) >
++ path_offset)) >
+ B_NR_ITEMS(parent))
+ return &MAX_KEY;
+ /* Check whether parent at the path really points to the child. */
+- if (B_N_CHILD_NUM(parent, n_position) !=
++ if (B_N_CHILD_NUM(parent, position) !=
+ PATH_OFFSET_PBUFFER(chk_path,
+- n_path_offset + 1)->b_blocknr)
++ path_offset + 1)->b_blocknr)
+ return &MAX_KEY;
+ /* Return delimiting key if position in the parent is not equal to zero. */
+- if (n_position)
+- return B_N_PDELIM_KEY(parent, n_position - 1);
++ if (position)
++ return B_N_PDELIM_KEY(parent, position - 1);
+ }
+ /* Return MIN_KEY if we are in the root of the buffer tree. */
+ if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)->
+@@ -291,37 +288,37 @@ static inline const struct reiserfs_key
+ inline const struct reiserfs_key *get_rkey(const struct treepath *chk_path,
+ const struct super_block *sb)
+ {
+- int n_position, n_path_offset = chk_path->path_length;
++ int position, path_offset = chk_path->path_length;
+ struct buffer_head *parent;
+
+- RFALSE(n_path_offset < FIRST_PATH_ELEMENT_OFFSET,
++ RFALSE(path_offset < FIRST_PATH_ELEMENT_OFFSET,
+ "PAP-5030: invalid offset in the path");
+
+- while (n_path_offset-- > FIRST_PATH_ELEMENT_OFFSET) {
++ while (path_offset-- > FIRST_PATH_ELEMENT_OFFSET) {
+
+ RFALSE(!buffer_uptodate
+- (PATH_OFFSET_PBUFFER(chk_path, n_path_offset)),
++ (PATH_OFFSET_PBUFFER(chk_path, path_offset)),
+ "PAP-5040: parent is not uptodate");
+
+ /* Parent at the path is not in the tree now. */
+ if (!B_IS_IN_TREE
+ (parent =
+- PATH_OFFSET_PBUFFER(chk_path, n_path_offset)))
++ PATH_OFFSET_PBUFFER(chk_path, path_offset)))
+ return &MIN_KEY;
+ /* Check whether position in the parent is correct. */
+- if ((n_position =
++ if ((position =
+ PATH_OFFSET_POSITION(chk_path,
+- n_path_offset)) >
++ path_offset)) >
+ B_NR_ITEMS(parent))
+ return &MIN_KEY;
+ /* Check whether parent at the path really points to the child. */
+- if (B_N_CHILD_NUM(parent, n_position) !=
++ if (B_N_CHILD_NUM(parent, position) !=
+ PATH_OFFSET_PBUFFER(chk_path,
+- n_path_offset + 1)->b_blocknr)
++ path_offset + 1)->b_blocknr)
+ return &MIN_KEY;
+ /* Return delimiting key if position in the parent is not the last one. */
+- if (n_position != B_NR_ITEMS(parent))
+- return B_N_PDELIM_KEY(parent, n_position);
++ if (position != B_NR_ITEMS(parent))
++ return B_N_PDELIM_KEY(parent, position);
+ }
+ /* Return MAX_KEY if we are in the root of the buffer tree. */
+ if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)->
+@@ -371,14 +368,14 @@ int reiserfs_check_path(struct treepath
+ void pathrelse_and_restore(struct super_block *sb,
+ struct treepath *search_path)
+ {
+- int n_path_offset = search_path->path_length;
++ int path_offset = search_path->path_length;
+
+- RFALSE(n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET,
++ RFALSE(path_offset < ILLEGAL_PATH_ELEMENT_OFFSET,
+ "clm-4000: invalid path offset");
+
+- while (n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) {
++ while (path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) {
+ struct buffer_head *bh;
+- bh = PATH_OFFSET_PBUFFER(search_path, n_path_offset--);
++ bh = PATH_OFFSET_PBUFFER(search_path, path_offset--);
+ reiserfs_restore_prepared_buffer(sb, bh);
+ brelse(bh);
+ }
+@@ -388,13 +385,13 @@ void pathrelse_and_restore(struct super_
+ /* Drop the reference to each buffer in a path */
+ void pathrelse(struct treepath *search_path)
+ {
+- int n_path_offset = search_path->path_length;
++ int path_offset = search_path->path_length;
+
+- RFALSE(n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET,
++ RFALSE(path_offset < ILLEGAL_PATH_ELEMENT_OFFSET,
+ "PAP-5090: invalid path offset");
+
+- while (n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET)
+- brelse(PATH_OFFSET_PBUFFER(search_path, n_path_offset--));
++ while (path_offset > ILLEGAL_PATH_ELEMENT_OFFSET)
++ brelse(PATH_OFFSET_PBUFFER(search_path, path_offset--));
+
+ search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
+ }
+@@ -572,16 +569,16 @@ int search_by_key(struct super_block *sb
+ by the calling
+ function. It is filled up
+ by this function. */
+- int n_stop_level /* How far down the tree to search. To
++ int stop_level /* How far down the tree to search. To
+ stop at leaf level - set to
+ DISK_LEAF_NODE_LEVEL */
+ )
+ {
+- b_blocknr_t n_block_number;
++ b_blocknr_t block_number;
+ int expected_level;
+ struct buffer_head *bh;
+ struct path_element *last_element;
+- int n_node_level, n_retval;
++ int node_level, retval;
+ int right_neighbor_of_leaf_node;
+ int fs_gen;
+ struct buffer_head *reada_bh[SEARCH_BY_KEY_READA];
+@@ -589,7 +586,7 @@ int search_by_key(struct super_block *sb
+ int reada_count = 0;
+
+ #ifdef CONFIG_REISERFS_CHECK
+- int n_repeat_counter = 0;
++ int repeat_counter = 0;
+ #endif
+
+ PROC_INFO_INC(sb, search_by_key);
+@@ -605,16 +602,16 @@ int search_by_key(struct super_block *sb
+ /* With each iteration of this loop we search through the items in the
+ current node, and calculate the next current node(next path element)
+ for the next iteration of this loop.. */
+- n_block_number = SB_ROOT_BLOCK(sb);
++ block_number = SB_ROOT_BLOCK(sb);
+ expected_level = -1;
+ while (1) {
+
+ #ifdef CONFIG_REISERFS_CHECK
+- if (!(++n_repeat_counter % 50000))
++ if (!(++repeat_counter % 50000))
+ reiserfs_warning(sb, "PAP-5100",
+ "%s: there were %d iterations of "
+ "while loop looking for key %K",
+- current->comm, n_repeat_counter,
++ current->comm, repeat_counter,
+ key);
+ #endif
+
+@@ -627,7 +624,7 @@ int search_by_key(struct super_block *sb
+ /* Read the next tree node, and set the last element in the path to
+ have a pointer to it. */
+ if ((bh = last_element->pe_buffer =
+- sb_getblk(sb, n_block_number))) {
++ sb_getblk(sb, block_number))) {
+ if (!buffer_uptodate(bh) && reada_count > 1)
+ search_by_key_reada(sb, reada_bh,
+ reada_blocks, reada_count);
+@@ -661,7 +658,7 @@ int search_by_key(struct super_block *sb
+
+ /* Get the root block number so that we can repeat the search
+ starting from the root. */
+- n_block_number = SB_ROOT_BLOCK(sb);
++ block_number = SB_ROOT_BLOCK(sb);
+ expected_level = -1;
+ right_neighbor_of_leaf_node = 0;
+
+@@ -694,26 +691,26 @@ int search_by_key(struct super_block *sb
+ }
+
+ /* ok, we have acquired next formatted node in the tree */
+- n_node_level = B_LEVEL(bh);
++ node_level = B_LEVEL(bh);
+
+- PROC_INFO_BH_STAT(sb, bh, n_node_level - 1);
++ PROC_INFO_BH_STAT(sb, bh, node_level - 1);
+
+- RFALSE(n_node_level < n_stop_level,
++ RFALSE(node_level < stop_level,
+ "vs-5152: tree level (%d) is less than stop level (%d)",
+- n_node_level, n_stop_level);
++ node_level, stop_level);
+
+- n_retval = bin_search(key, B_N_PITEM_HEAD(bh, 0),
++ retval = bin_search(key, B_N_PITEM_HEAD(bh, 0),
+ B_NR_ITEMS(bh),
+- (n_node_level ==
++ (node_level ==
+ DISK_LEAF_NODE_LEVEL) ? IH_SIZE :
+ KEY_SIZE,
+ &(last_element->pe_position));
+- if (n_node_level == n_stop_level) {
+- return n_retval;
++ if (node_level == stop_level) {
++ return retval;
+ }
+
+ /* we are not in the stop level */
+- if (n_retval == ITEM_FOUND)
++ if (retval == ITEM_FOUND)
+ /* item has been found, so we choose the pointer which is to the right of the found one */
+ last_element->pe_position++;
+
+@@ -724,12 +721,12 @@ int search_by_key(struct super_block *sb
+ /* So we have chosen a position in the current node which is
+ an internal node. Now we calculate child block number by
+ position in the node. */
+- n_block_number =
++ block_number =
+ B_N_CHILD_NUM(bh, last_element->pe_position);
+
+ /* if we are going to read leaf nodes, try for read ahead as well */
+ if ((search_path->reada & PATH_READA) &&
+- n_node_level == DISK_LEAF_NODE_LEVEL + 1) {
++ node_level == DISK_LEAF_NODE_LEVEL + 1) {
+ int pos = last_element->pe_position;
+ int limit = B_NR_ITEMS(bh);
+ struct reiserfs_key *le_key;
+@@ -781,7 +778,7 @@ int search_for_position_by_key(struct su
+ )
+ {
+ struct item_head *p_le_ih; /* pointer to on-disk structure */
+- int n_blk_size;
++ int blk_size;
+ loff_t item_offset, offset;
+ struct reiserfs_dir_entry de;
+ int retval;
+@@ -816,7 +813,7 @@ int search_for_position_by_key(struct su
+ p_le_ih =
+ B_N_PITEM_HEAD(PATH_PLAST_BUFFER(search_path),
+ --PATH_LAST_POSITION(search_path));
+- n_blk_size = sb->s_blocksize;
++ blk_size = sb->s_blocksize;
+
+ if (comp_short_keys(&(p_le_ih->ih_key), p_cpu_key)) {
+ return FILE_NOT_FOUND;
+@@ -828,10 +825,10 @@ int search_for_position_by_key(struct su
+
+ /* Needed byte is contained in the item pointed to by the path. */
+ if (item_offset <= offset &&
+- item_offset + op_bytes_number(p_le_ih, n_blk_size) > offset) {
++ item_offset + op_bytes_number(p_le_ih, blk_size) > offset) {
+ pos_in_item(search_path) = offset - item_offset;
+ if (is_indirect_le_ih(p_le_ih)) {
+- pos_in_item(search_path) /= n_blk_size;
++ pos_in_item(search_path) /= blk_size;
+ }
+ return POSITION_FOUND;
+ }
+@@ -891,7 +888,7 @@ static inline int prepare_for_direct_ite
+ if (get_inode_item_key_version(inode) == KEY_FORMAT_3_6) {
+ //
+ round_len = ROUND_UP(new_file_length);
+- /* this was n_new_file_length < le_ih ... */
++ /* this was new_file_length < le_ih ... */
+ if (round_len < le_ih_k_offset(le_ih)) {
+ *cut_size = -(IH_SIZE + ih_item_len(le_ih));
+ return M_DELETE; /* Delete this item. */
+@@ -953,7 +950,7 @@ static inline int prepare_for_direntry_i
+ This function returns a determination of what balance mode the calling function should employ. */
+ static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, struct inode *inode, struct treepath *path, const struct cpu_key *item_key, int *removed, /* Number of unformatted nodes which were removed
+ from end of the file. */
+- int *cut_size, unsigned long long n_new_file_length /* MAX_KEY_OFFSET in case of delete. */
++ int *cut_size, unsigned long long new_file_length /* MAX_KEY_OFFSET in case of delete. */
+ )
+ {
+ struct super_block *sb = inode->i_sb;
+@@ -965,7 +962,7 @@ static char prepare_for_delete_or_cut(st
+ /* Stat_data item. */
+ if (is_statdata_le_ih(p_le_ih)) {
+
+- RFALSE(n_new_file_length != max_reiserfs_offset(inode),
++ RFALSE(new_file_length != max_reiserfs_offset(inode),
+ "PAP-5210: mode must be M_DELETE");
+
+ *cut_size = -(IH_SIZE + ih_item_len(p_le_ih));
+@@ -975,13 +972,13 @@ static char prepare_for_delete_or_cut(st
+ /* Directory item. */
+ if (is_direntry_le_ih(p_le_ih))
+ return prepare_for_direntry_item(path, p_le_ih, inode,
+- n_new_file_length,
++ new_file_length,
+ cut_size);
+
+ /* Direct item. */
+ if (is_direct_le_ih(p_le_ih))
+ return prepare_for_direct_item(path, p_le_ih, inode,
+- n_new_file_length, cut_size);
++ new_file_length, cut_size);
+
+ /* Case of an indirect item. */
+ {
+@@ -992,10 +989,10 @@ static char prepare_for_delete_or_cut(st
+ int result = M_CUT;
+ int pos = 0;
+
+- if ( n_new_file_length == max_reiserfs_offset (inode) ) {
++ if ( new_file_length == max_reiserfs_offset (inode) ) {
+ /* prepare_for_delete_or_cut() is called by
+ * reiserfs_delete_item() */
+- n_new_file_length = 0;
++ new_file_length = 0;
+ delete = 1;
+ }
+
+@@ -1006,7 +1003,7 @@ static char prepare_for_delete_or_cut(st
+ copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
+ pos = I_UNFM_NUM(&s_ih);
+
+- while (le_ih_k_offset (&s_ih) + (pos - 1) * blk_size > n_new_file_length) {
++ while (le_ih_k_offset (&s_ih) + (pos - 1) * blk_size > new_file_length) {
+ __le32 *unfm;
+ __u32 block;
+
+@@ -1062,35 +1059,34 @@ static char prepare_for_delete_or_cut(st
+ }
+
+ /* Calculate number of bytes which will be deleted or cut during balance */
+-static int calc_deleted_bytes_number(struct tree_balance *tb, char c_mode)
++static int calc_deleted_bytes_number(struct tree_balance *tb, char mode)
+ {
+- int n_del_size;
++ int del_size;
+ struct item_head *p_le_ih = PATH_PITEM_HEAD(tb->tb_path);
+
+ if (is_statdata_le_ih(p_le_ih))
+ return 0;
+
+- n_del_size =
+- (c_mode ==
++ del_size =
++ (mode ==
+ M_DELETE) ? ih_item_len(p_le_ih) : -tb->insert_size[0];
+ if (is_direntry_le_ih(p_le_ih)) {
+- // return EMPTY_DIR_SIZE; /* We delete emty directoris only. */
+- // we can't use EMPTY_DIR_SIZE, as old format dirs have a different
+- // empty size. ick. FIXME, is this right?
+- //
+- return n_del_size;
++ /* return EMPTY_DIR_SIZE; We delete emty directoris only.
++ * we can't use EMPTY_DIR_SIZE, as old format dirs have a different
++ * empty size. ick. FIXME, is this right? */
++ return del_size;
+ }
+
+ if (is_indirect_le_ih(p_le_ih))
+- n_del_size = (n_del_size / UNFM_P_SIZE) *
++ del_size = (del_size / UNFM_P_SIZE) *
+ (PATH_PLAST_BUFFER(tb->tb_path)->b_size);
+- return n_del_size;
++ return del_size;
+ }
+
+ static void init_tb_struct(struct reiserfs_transaction_handle *th,
+ struct tree_balance *tb,
+ struct super_block *sb,
+- struct treepath *path, int n_size)
++ struct treepath *path, int size)
+ {
+
+ BUG_ON(!th->t_trans_id);
+@@ -1101,7 +1097,7 @@ static void init_tb_struct(struct reiser
+ tb->tb_path = path;
+ PATH_OFFSET_PBUFFER(path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL;
+ PATH_OFFSET_POSITION(path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0;
+- tb->insert_size[0] = n_size;
++ tb->insert_size[0] = size;
+ }
+
+ void padd_item(char *item, int total_length, int length)
+@@ -1156,11 +1152,11 @@ int reiserfs_delete_item(struct reiserfs
+ struct item_head s_ih;
+ struct item_head *q_ih;
+ int quota_cut_bytes;
+- int n_ret_value, n_del_size, n_removed;
++ int ret_value, del_size, removed;
+
+ #ifdef CONFIG_REISERFS_CHECK
+- char c_mode;
+- int n_iter = 0;
++ char mode;
++ int iter = 0;
+ #endif
+
+ BUG_ON(!th->t_trans_id);
+@@ -1169,34 +1165,34 @@ int reiserfs_delete_item(struct reiserfs
+ 0 /*size is unknown */ );
+
+ while (1) {
+- n_removed = 0;
++ removed = 0;
+
+ #ifdef CONFIG_REISERFS_CHECK
+- n_iter++;
+- c_mode =
++ iter++;
++ mode =
+ #endif
+ prepare_for_delete_or_cut(th, inode, path,
+- item_key, &n_removed,
+- &n_del_size,
++ item_key, &removed,
++ &del_size,
+ max_reiserfs_offset(inode));
+
+- RFALSE(c_mode != M_DELETE, "PAP-5320: mode must be M_DELETE");
++ RFALSE(mode != M_DELETE, "PAP-5320: mode must be M_DELETE");
+
+ copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
+- s_del_balance.insert_size[0] = n_del_size;
++ s_del_balance.insert_size[0] = del_size;
+
+- n_ret_value = fix_nodes(M_DELETE, &s_del_balance, NULL, NULL);
+- if (n_ret_value != REPEAT_SEARCH)
++ ret_value = fix_nodes(M_DELETE, &s_del_balance, NULL, NULL);
++ if (ret_value != REPEAT_SEARCH)
+ break;
+
+ PROC_INFO_INC(sb, delete_item_restarted);
+
+ // file system changed, repeat search
+- n_ret_value =
++ ret_value =
+ search_for_position_by_key(sb, item_key, path);
+- if (n_ret_value == IO_ERROR)
++ if (ret_value == IO_ERROR)
+ break;
+- if (n_ret_value == FILE_NOT_FOUND) {
++ if (ret_value == FILE_NOT_FOUND) {
+ reiserfs_warning(sb, "vs-5340",
+ "no items of the file %K found",
+ item_key);
+@@ -1204,12 +1200,12 @@ int reiserfs_delete_item(struct reiserfs
+ }
+ } /* while (1) */
+
+- if (n_ret_value != CARRY_ON) {
++ if (ret_value != CARRY_ON) {
+ unfix_nodes(&s_del_balance);
+ return 0;
+ }
+ // reiserfs_delete_item returns item length when success
+- n_ret_value = calc_deleted_bytes_number(&s_del_balance, M_DELETE);
++ ret_value = calc_deleted_bytes_number(&s_del_balance, M_DELETE);
+ q_ih = get_ih(path);
+ quota_cut_bytes = ih_item_len(q_ih);
+
+@@ -1255,7 +1251,7 @@ int reiserfs_delete_item(struct reiserfs
+ off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
+ memcpy(data + off,
+ B_I_PITEM(PATH_PLAST_BUFFER(path), &s_ih),
+- n_ret_value);
++ ret_value);
+ kunmap_atomic(data, KM_USER0);
+ }
+ /* Perform balancing after all resources have been collected at once. */
+@@ -1269,7 +1265,7 @@ int reiserfs_delete_item(struct reiserfs
+ DQUOT_FREE_SPACE_NODIRTY(inode, quota_cut_bytes);
+
+ /* Return deleted body length */
+- return n_ret_value;
++ return ret_value;
+ }
+
+ /* Summary Of Mechanisms For Handling Collisions Between Processes:
+@@ -1432,13 +1428,13 @@ static int maybe_indirect_to_direct(stru
+ struct page *page,
+ struct treepath *path,
+ const struct cpu_key *item_key,
+- loff_t n_new_file_size, char *mode)
++ loff_t new_file_size, char *mode)
+ {
+ struct super_block *sb = inode->i_sb;
+- int n_block_size = sb->s_blocksize;
++ int block_size = sb->s_blocksize;
+ int cut_bytes;
+ BUG_ON(!th->t_trans_id);
+- BUG_ON(n_new_file_size != inode->i_size);
++ BUG_ON(new_file_size != inode->i_size);
+
+ /* the page being sent in could be NULL if there was an i/o error
+ ** reading in the last block. The user will hit problems trying to
+@@ -1450,15 +1446,15 @@ static int maybe_indirect_to_direct(stru
+ /* leave tail in an unformatted node */
+ *mode = M_SKIP_BALANCING;
+ cut_bytes =
+- n_block_size - (n_new_file_size & (n_block_size - 1));
++ block_size - (new_file_size & (block_size - 1));
+ pathrelse(path);
+ return cut_bytes;
+ }
+ /* Perform the conversion to a direct_item. */
+ /* return indirect_to_direct(inode, path, item_key,
+- n_new_file_size, mode); */
++ new_file_size, mode); */
+ return indirect2direct(th, inode, page, path, item_key,
+- n_new_file_size, mode);
++ new_file_size, mode);
+ }
+
+ /* we did indirect_to_direct conversion. And we have inserted direct
+@@ -1512,7 +1508,7 @@ int reiserfs_cut_from_item(struct reiser
+ struct treepath *path,
+ struct cpu_key *item_key,
+ struct inode *inode,
+- struct page *page, loff_t n_new_file_size)
++ struct page *page, loff_t new_file_size)
+ {
+ struct super_block *sb = inode->i_sb;
+ /* Every function which is going to call do_balance must first
+@@ -1521,10 +1517,10 @@ int reiserfs_cut_from_item(struct reiser
+ After that we can make tree balancing. */
+ struct tree_balance s_cut_balance;
+ struct item_head *p_le_ih;
+- int n_cut_size = 0, /* Amount to be cut. */
+- n_ret_value = CARRY_ON, n_removed = 0, /* Number of the removed unformatted nodes. */
+- n_is_inode_locked = 0;
+- char c_mode; /* Mode of the balance. */
++ int cut_size = 0, /* Amount to be cut. */
++ ret_value = CARRY_ON, removed = 0, /* Number of the removed unformatted nodes. */
++ is_inode_locked = 0;
++ char mode; /* Mode of the balance. */
+ int retval2 = -1;
+ int quota_cut_bytes;
+ loff_t tail_pos = 0;
+@@ -1532,7 +1528,7 @@ int reiserfs_cut_from_item(struct reiser
+ BUG_ON(!th->t_trans_id);
+
+ init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
+- n_cut_size);
++ cut_size);
+
+ /* Repeat this loop until we either cut the item without needing
+ to balance, or we fix_nodes without schedule occurring */
+@@ -1542,30 +1538,30 @@ int reiserfs_cut_from_item(struct reiser
+ free unformatted nodes which are pointed to by the cut
+ pointers. */
+
+- c_mode =
++ mode =
+ prepare_for_delete_or_cut(th, inode, path,
+- item_key, &n_removed,
+- &n_cut_size, n_new_file_size);
+- if (c_mode == M_CONVERT) {
++ item_key, &removed,
++ &cut_size, new_file_size);
++ if (mode == M_CONVERT) {
+ /* convert last unformatted node to direct item or leave
+ tail in the unformatted node */
+- RFALSE(n_ret_value != CARRY_ON,
++ RFALSE(ret_value != CARRY_ON,
+ "PAP-5570: can not convert twice");
+
+- n_ret_value =
++ ret_value =
+ maybe_indirect_to_direct(th, inode, page,
+ path, item_key,
+- n_new_file_size, &c_mode);
+- if (c_mode == M_SKIP_BALANCING)
++ new_file_size, &mode);
++ if (mode == M_SKIP_BALANCING)
+ /* tail has been left in the unformatted node */
+- return n_ret_value;
++ return ret_value;
+
+- n_is_inode_locked = 1;
++ is_inode_locked = 1;
+
+ /* removing of last unformatted node will change value we
+ have to return to truncate. Save it */
+- retval2 = n_ret_value;
+- /*retval2 = sb->s_blocksize - (n_new_file_size & (sb->s_blocksize - 1)); */
++ retval2 = ret_value;
++ /*retval2 = sb->s_blocksize - (new_file_size & (sb->s_blocksize - 1)); */
+
+ /* So, we have performed the first part of the conversion:
+ inserting the new direct item. Now we are removing the
+@@ -1573,10 +1569,10 @@ int reiserfs_cut_from_item(struct reiser
+ it. */
+ set_cpu_key_k_type(item_key, TYPE_INDIRECT);
+ item_key->key_length = 4;
+- n_new_file_size -=
+- (n_new_file_size & (sb->s_blocksize - 1));
+- tail_pos = n_new_file_size;
+- set_cpu_key_k_offset(item_key, n_new_file_size + 1);
++ new_file_size -=
++ (new_file_size & (sb->s_blocksize - 1));
++ tail_pos = new_file_size;
++ set_cpu_key_k_offset(item_key, new_file_size + 1);
+ if (search_for_position_by_key
+ (sb, item_key,
+ path) == POSITION_NOT_FOUND) {
+@@ -1589,38 +1585,38 @@ int reiserfs_cut_from_item(struct reiser
+ }
+ continue;
+ }
+- if (n_cut_size == 0) {
++ if (cut_size == 0) {
+ pathrelse(path);
+ return 0;
+ }
+
+- s_cut_balance.insert_size[0] = n_cut_size;
++ s_cut_balance.insert_size[0] = cut_size;
+
+- n_ret_value = fix_nodes(c_mode, &s_cut_balance, NULL, NULL);
+- if (n_ret_value != REPEAT_SEARCH)
++ ret_value = fix_nodes(mode, &s_cut_balance, NULL, NULL);
++ if (ret_value != REPEAT_SEARCH)
+ break;
+
+ PROC_INFO_INC(sb, cut_from_item_restarted);
+
+- n_ret_value =
++ ret_value =
+ search_for_position_by_key(sb, item_key, path);
+- if (n_ret_value == POSITION_FOUND)
++ if (ret_value == POSITION_FOUND)
+ continue;
+
+ reiserfs_warning(sb, "PAP-5610", "item %K not found",
+ item_key);
+ unfix_nodes(&s_cut_balance);
+- return (n_ret_value == IO_ERROR) ? -EIO : -ENOENT;
++ return (ret_value == IO_ERROR) ? -EIO : -ENOENT;
+ } /* while */
+
+ // check fix_nodes results (IO_ERROR or NO_DISK_SPACE)
+- if (n_ret_value != CARRY_ON) {
+- if (n_is_inode_locked) {
++ if (ret_value != CARRY_ON) {
++ if (is_inode_locked) {
+ // FIXME: this seems to be not needed: we are always able
+ // to cut item
+ indirect_to_direct_roll_back(th, inode, path);
+ }
+- if (n_ret_value == NO_DISK_SPACE)
++ if (ret_value == NO_DISK_SPACE)
+ reiserfs_warning(sb, "reiserfs-5092",
+ "NO_DISK_SPACE");
+ unfix_nodes(&s_cut_balance);
+@@ -1629,24 +1625,24 @@ int reiserfs_cut_from_item(struct reiser
+
+ /* go ahead and perform balancing */
+
+- RFALSE(c_mode == M_PASTE || c_mode == M_INSERT, "invalid mode");
++ RFALSE(mode == M_PASTE || mode == M_INSERT, "invalid mode");
+
+ /* Calculate number of bytes that need to be cut from the item. */
+ quota_cut_bytes =
+- (c_mode ==
++ (mode ==
+ M_DELETE) ? ih_item_len(get_ih(path)) : -s_cut_balance.
+ insert_size[0];
+ if (retval2 == -1)
+- n_ret_value = calc_deleted_bytes_number(&s_cut_balance, c_mode);
++ ret_value = calc_deleted_bytes_number(&s_cut_balance, mode);
+ else
+- n_ret_value = retval2;
++ ret_value = retval2;
+
+ /* For direct items, we only change the quota when deleting the last
+ ** item.
+ */
+ p_le_ih = PATH_PITEM_HEAD(s_cut_balance.tb_path);
+ if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(p_le_ih)) {
+- if (c_mode == M_DELETE &&
++ if (mode == M_DELETE &&
+ (le_ih_k_offset(p_le_ih) & (sb->s_blocksize - 1)) ==
+ 1) {
+ // FIXME: this is to keep 3.5 happy
+@@ -1657,7 +1653,7 @@ int reiserfs_cut_from_item(struct reiser
+ }
+ }
+ #ifdef CONFIG_REISERFS_CHECK
+- if (n_is_inode_locked) {
++ if (is_inode_locked) {
+ struct item_head *le_ih =
+ PATH_PITEM_HEAD(s_cut_balance.tb_path);
+ /* we are going to complete indirect2direct conversion. Make
+@@ -1667,13 +1663,13 @@ int reiserfs_cut_from_item(struct reiser
+ reiserfs_panic(sb, "vs-5652",
+ "item must be indirect %h", le_ih);
+
+- if (c_mode == M_DELETE && ih_item_len(le_ih) != UNFM_P_SIZE)
++ if (mode == M_DELETE && ih_item_len(le_ih) != UNFM_P_SIZE)
+ reiserfs_panic(sb, "vs-5653", "completing "
+ "indirect2direct conversion indirect "
+ "item %h being deleted must be of "
+ "4 byte long", le_ih);
+
+- if (c_mode == M_CUT
++ if (mode == M_CUT
+ && s_cut_balance.insert_size[0] != -UNFM_P_SIZE) {
+ reiserfs_panic(sb, "vs-5654", "can not complete "
+ "indirect2direct conversion of %h "
+@@ -1685,8 +1681,8 @@ int reiserfs_cut_from_item(struct reiser
+ }
+ #endif
+
+- do_balance(&s_cut_balance, NULL, NULL, c_mode);
+- if (n_is_inode_locked) {
++ do_balance(&s_cut_balance, NULL, NULL, mode);
++ if (is_inode_locked) {
+ /* we've done an indirect->direct conversion. when the data block
+ ** was freed, it was removed from the list of blocks that must
+ ** be flushed before the transaction commits, make sure to
+@@ -1701,7 +1697,7 @@ int reiserfs_cut_from_item(struct reiser
+ quota_cut_bytes, inode->i_uid, '?');
+ #endif
+ DQUOT_FREE_SPACE_NODIRTY(inode, quota_cut_bytes);
+- return n_ret_value;
++ return ret_value;
+ }
+
+ static void truncate_directory(struct reiserfs_transaction_handle *th,
+@@ -1733,9 +1729,9 @@ int reiserfs_do_truncate(struct reiserfs
+ INITIALIZE_PATH(s_search_path); /* Path to the current object item. */
+ struct item_head *p_le_ih; /* Pointer to an item header. */
+ struct cpu_key s_item_key; /* Key to search for a previous file item. */
+- loff_t n_file_size, /* Old file size. */
+- n_new_file_size; /* New file size. */
+- int n_deleted; /* Number of deleted or truncated bytes. */
++ loff_t file_size, /* Old file size. */
++ new_file_size; /* New file size. */
++ int deleted; /* Number of deleted or truncated bytes. */
+ int retval;
+ int err = 0;
+
+@@ -1752,7 +1748,7 @@ int reiserfs_do_truncate(struct reiserfs
+ }
+
+ /* Get new file size. */
+- n_new_file_size = inode->i_size;
++ new_file_size = inode->i_size;
+
+ // FIXME: note, that key type is unimportant here
+ make_cpu_key(&s_item_key, inode, max_reiserfs_offset(inode),
+@@ -1782,7 +1778,7 @@ int reiserfs_do_truncate(struct reiserfs
+ /* Get real file size (total length of all file items) */
+ p_le_ih = PATH_PITEM_HEAD(&s_search_path);
+ if (is_statdata_le_ih(p_le_ih))
+- n_file_size = 0;
++ file_size = 0;
+ else {
+ loff_t offset = le_ih_k_offset(p_le_ih);
+ int bytes =
+@@ -1791,42 +1787,42 @@ int reiserfs_do_truncate(struct reiserfs
+ /* this may mismatch with real file size: if last direct item
+ had no padding zeros and last unformatted node had no free
+ space, this file would have this file size */
+- n_file_size = offset + bytes - 1;
++ file_size = offset + bytes - 1;
+ }
+ /*
+ * are we doing a full truncate or delete, if so
+ * kick in the reada code
+ */
+- if (n_new_file_size == 0)
++ if (new_file_size == 0)
+ s_search_path.reada = PATH_READA | PATH_READA_BACK;
+
+- if (n_file_size == 0 || n_file_size < n_new_file_size) {
++ if (file_size == 0 || file_size < new_file_size) {
+ goto update_and_out;
+ }
+
+ /* Update key to search for the last file item. */
+- set_cpu_key_k_offset(&s_item_key, n_file_size);
++ set_cpu_key_k_offset(&s_item_key, file_size);
+
+ do {
+ /* Cut or delete file item. */
+- n_deleted =
++ deleted =
+ reiserfs_cut_from_item(th, &s_search_path, &s_item_key,
+- inode, page, n_new_file_size);
+- if (n_deleted < 0) {
++ inode, page, new_file_size);
++ if (deleted < 0) {
+ reiserfs_warning(inode->i_sb, "vs-5665",
+ "reiserfs_cut_from_item failed");
+ reiserfs_check_path(&s_search_path);
+ return 0;
+ }
+
+- RFALSE(n_deleted > n_file_size,
++ RFALSE(deleted > file_size,
+ "PAP-5670: reiserfs_cut_from_item: too many bytes deleted: deleted %d, file_size %lu, item_key %K",
+- n_deleted, n_file_size, &s_item_key);
++ deleted, file_size, &s_item_key);
+
+ /* Change key to search the last file item. */
+- n_file_size -= n_deleted;
++ file_size -= deleted;
+
+- set_cpu_key_k_offset(&s_item_key, n_file_size);
++ set_cpu_key_k_offset(&s_item_key, file_size);
+
+ /* While there are bytes to truncate and previous file item is presented in the tree. */
+
+@@ -1857,13 +1853,13 @@ int reiserfs_do_truncate(struct reiserfs
+ goto out;
+ reiserfs_update_inode_transaction(inode);
+ }
+- } while (n_file_size > ROUND_UP(n_new_file_size) &&
++ } while (file_size > ROUND_UP(new_file_size) &&
+ search_for_position_by_key(inode->i_sb, &s_item_key,
+ &s_search_path) == POSITION_FOUND);
+
+- RFALSE(n_file_size > ROUND_UP(n_new_file_size),
++ RFALSE(file_size > ROUND_UP(new_file_size),
+ "PAP-5680: truncate did not finish: new_file_size %Ld, current %Ld, oid %d",
+- n_new_file_size, n_file_size, s_item_key.on_disk_key.k_objectid);
++ new_file_size, file_size, s_item_key.on_disk_key.k_objectid);
+
+ update_and_out:
+ if (update_timestamps) {
+@@ -1918,7 +1914,7 @@ int reiserfs_paste_into_item(struct reis
+ const struct cpu_key *key, /* Key to search for the needed item. */
+ struct inode *inode, /* Inode item belongs to */
+ const char *body, /* Pointer to the bytes to paste. */
+- int n_pasted_size)
++ int pasted_size)
+ { /* Size of pasted bytes. */
+ struct tree_balance s_paste_balance;
+ int retval;
+@@ -1931,16 +1927,16 @@ int reiserfs_paste_into_item(struct reis
+ #ifdef REISERQUOTA_DEBUG
+ reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE,
+ "reiserquota paste_into_item(): allocating %u id=%u type=%c",
+- n_pasted_size, inode->i_uid,
++ pasted_size, inode->i_uid,
+ key2type(&(key->on_disk_key)));
+ #endif
+
+- if (DQUOT_ALLOC_SPACE_NODIRTY(inode, n_pasted_size)) {
++ if (DQUOT_ALLOC_SPACE_NODIRTY(inode, pasted_size)) {
+ pathrelse(search_path);
+ return -EDQUOT;
+ }
+ init_tb_struct(th, &s_paste_balance, th->t_super, search_path,
+- n_pasted_size);
++ pasted_size);
+ #ifdef DISPLACE_NEW_PACKING_LOCALITIES
+ s_paste_balance.key = key->on_disk_key;
+ #endif
+@@ -1988,10 +1984,10 @@ int reiserfs_paste_into_item(struct reis
+ #ifdef REISERQUOTA_DEBUG
+ reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE,
+ "reiserquota paste_into_item(): freeing %u id=%u type=%c",
+- n_pasted_size, inode->i_uid,
++ pasted_size, inode->i_uid,
+ key2type(&(key->on_disk_key)));
+ #endif
+- DQUOT_FREE_SPACE_NODIRTY(inode, n_pasted_size);
++ DQUOT_FREE_SPACE_NODIRTY(inode, pasted_size);
+ return retval;
+ }
+
+--- a/fs/reiserfs/tail_conversion.c
++++ b/fs/reiserfs/tail_conversion.c
+@@ -26,7 +26,7 @@ int direct2indirect(struct reiserfs_tran
+ converted item. */
+ struct item_head ind_ih; /* new indirect item to be inserted or
+ key of unfm pointer to be pasted */
+- int n_blk_size, n_retval; /* returned value for reiserfs_insert_item and clones */
++ int blk_size, retval; /* returned value for reiserfs_insert_item and clones */
+ unp_t unfm_ptr; /* Handle on an unformatted node
+ that will be inserted in the
+ tree. */
+@@ -35,7 +35,7 @@ int direct2indirect(struct reiserfs_tran
+
+ REISERFS_SB(sb)->s_direct2indirect++;
+
+- n_blk_size = sb->s_blocksize;
++ blk_size = sb->s_blocksize;
+
+ /* and key to search for append or insert pointer to the new
+ unformatted node. */
+@@ -64,17 +64,17 @@ int direct2indirect(struct reiserfs_tran
+ set_ih_free_space(&ind_ih, 0); /* delete at nearest future */
+ put_ih_item_len(&ind_ih, UNFM_P_SIZE);
+ PATH_LAST_POSITION(path)++;
+- n_retval =
++ retval =
+ reiserfs_insert_item(th, path, &end_key, &ind_ih, inode,
+ (char *)&unfm_ptr);
+ } else {
+ /* Paste into last indirect item of an object. */
+- n_retval = reiserfs_paste_into_item(th, path, &end_key, inode,
++ retval = reiserfs_paste_into_item(th, path, &end_key, inode,
+ (char *)&unfm_ptr,
+ UNFM_P_SIZE);
+ }
+- if (n_retval) {
+- return n_retval;
++ if (retval) {
++ return retval;
+ }
+ // note: from here there are two keys which have matching first
+ // three key components. They only differ by the fourth one.
+@@ -98,7 +98,7 @@ int direct2indirect(struct reiserfs_tran
+ RFALSE(!is_direct_le_ih(p_le_ih),
+ "vs-14055: direct item expected(%K), found %h",
+ &end_key, p_le_ih);
+- tail_size = (le_ih_k_offset(p_le_ih) & (n_blk_size - 1))
++ tail_size = (le_ih_k_offset(p_le_ih) & (blk_size - 1))
+ + ih_item_len(p_le_ih) - 1;
+
+ /* we only send the unbh pointer if the buffer is not up to date.
+@@ -113,11 +113,11 @@ int direct2indirect(struct reiserfs_tran
+ } else {
+ up_to_date_bh = unbh;
+ }
+- n_retval = reiserfs_delete_item(th, path, &end_key, inode,
++ retval = reiserfs_delete_item(th, path, &end_key, inode,
+ up_to_date_bh);
+
+- total_tail += n_retval;
+- if (tail_size == n_retval)
++ total_tail += retval;
++ if (tail_size == retval)
+ // done: file does not have direct items anymore
+ break;
+
+@@ -129,7 +129,7 @@ int direct2indirect(struct reiserfs_tran
+ unsigned pgoff =
+ (tail_offset + total_tail - 1) & (PAGE_CACHE_SIZE - 1);
+ char *kaddr = kmap_atomic(up_to_date_bh->b_page, KM_USER0);
+- memset(kaddr + pgoff, 0, n_blk_size - total_tail);
++ memset(kaddr + pgoff, 0, blk_size - total_tail);
+ kunmap_atomic(kaddr, KM_USER0);
+ }
+
+@@ -181,7 +181,7 @@ int indirect2direct(struct reiserfs_tran
+ {
+ struct super_block *sb = inode->i_sb;
+ struct item_head s_ih;
+- unsigned long n_block_size = sb->s_blocksize;
++ unsigned long block_size = sb->s_blocksize;
+ char *tail;
+ int tail_len, round_tail_len;
+ loff_t pos, pos1; /* position of first byte of the tail */
+@@ -196,7 +196,7 @@ int indirect2direct(struct reiserfs_tran
+ /* store item head path points to. */
+ copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
+
+- tail_len = (n_new_file_size & (n_block_size - 1));
++ tail_len = (n_new_file_size & (block_size - 1));
+ if (get_inode_sd_version(inode) == STAT_DATA_V2)
+ round_tail_len = ROUND_UP(tail_len);
+ else
+@@ -257,7 +257,7 @@ int indirect2direct(struct reiserfs_tran
+ unformatted node. For now i_size is considered as guard for
+ going out of file size */
+ kunmap(page);
+- return n_block_size - round_tail_len;
++ return block_size - round_tail_len;
+ }
+ kunmap(page);
+
+@@ -276,5 +276,5 @@ int indirect2direct(struct reiserfs_tran
+ /* mark_file_with_tail (inode, pos1 + 1); */
+ REISERFS_I(inode)->i_first_direct_byte = pos1 + 1;
+
+- return n_block_size - round_tail_len;
++ return block_size - round_tail_len;
+ }
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: rename p_._ variables
+
+ This patch is a simple s/p_._//g to the reiserfs code. This is the fifth
+ in a series of patches to rip out some of the awful variable naming in
+ reiserfs.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+
+ fs/reiserfs/file.c | 6
+ fs/reiserfs/fix_node.c | 169 +++++++--------
+ fs/reiserfs/stree.c | 472 +++++++++++++++++++++---------------------
+ fs/reiserfs/tail_conversion.c | 28 +-
+ include/linux/reiserfs_fs.h | 46 ++--
+ 5 files changed, 365 insertions(+), 356 deletions(-)
+
+--- a/fs/reiserfs/file.c
++++ b/fs/reiserfs/file.c
+@@ -134,10 +134,10 @@ static void reiserfs_vfs_truncate_file(s
+ * be removed...
+ */
+
+-static int reiserfs_sync_file(struct file *p_s_filp,
+- struct dentry *p_s_dentry, int datasync)
++static int reiserfs_sync_file(struct file *filp,
++ struct dentry *dentry, int datasync)
+ {
+- struct inode *inode = p_s_dentry->d_inode;
++ struct inode *inode = dentry->d_inode;
+ int n_err;
+ int barrier_done;
+
+--- a/fs/reiserfs/fix_node.c
++++ b/fs/reiserfs/fix_node.c
+@@ -780,9 +780,9 @@ static void free_buffers_in_tb(struct tr
+ /* The function is NOT SCHEDULE-SAFE! */
+ static int get_empty_nodes(struct tree_balance *tb, int n_h)
+ {
+- struct buffer_head *p_s_new_bh,
+- *p_s_Sh = PATH_H_PBUFFER(tb->tb_path, n_h);
+- b_blocknr_t *p_n_blocknr, a_n_blocknrs[MAX_AMOUNT_NEEDED] = { 0, };
++ struct buffer_head *new_bh,
++ *Sh = PATH_H_PBUFFER(tb->tb_path, n_h);
++ b_blocknr_t *blocknr, a_n_blocknrs[MAX_AMOUNT_NEEDED] = { 0, };
+ int n_counter, n_number_of_freeblk, n_amount_needed, /* number of needed empty blocks */
+ n_retval = CARRY_ON;
+ struct super_block *sb = tb->tb_sb;
+@@ -810,8 +810,8 @@ static int get_empty_nodes(struct tree_b
+ 1) : 0;
+
+ /* Allocate missing empty blocks. */
+- /* if p_s_Sh == 0 then we are getting a new root */
+- n_amount_needed = (p_s_Sh) ? (tb->blknum[n_h] - 1) : 1;
++ /* if Sh == 0 then we are getting a new root */
++ n_amount_needed = (Sh) ? (tb->blknum[n_h] - 1) : 1;
+ /* Amount_needed = the amount that we need more than the amount that we have. */
+ if (n_amount_needed > n_number_of_freeblk)
+ n_amount_needed -= n_number_of_freeblk;
+@@ -824,25 +824,25 @@ static int get_empty_nodes(struct tree_b
+ return NO_DISK_SPACE;
+
+ /* for each blocknumber we just got, get a buffer and stick it on FEB */
+- for (p_n_blocknr = a_n_blocknrs, n_counter = 0;
+- n_counter < n_amount_needed; p_n_blocknr++, n_counter++) {
++ for (blocknr = a_n_blocknrs, n_counter = 0;
++ n_counter < n_amount_needed; blocknr++, n_counter++) {
+
+- RFALSE(!*p_n_blocknr,
++ RFALSE(!*blocknr,
+ "PAP-8135: reiserfs_new_blocknrs failed when got new blocks");
+
+- p_s_new_bh = sb_getblk(sb, *p_n_blocknr);
+- RFALSE(buffer_dirty(p_s_new_bh) ||
+- buffer_journaled(p_s_new_bh) ||
+- buffer_journal_dirty(p_s_new_bh),
++ new_bh = sb_getblk(sb, *blocknr);
++ RFALSE(buffer_dirty(new_bh) ||
++ buffer_journaled(new_bh) ||
++ buffer_journal_dirty(new_bh),
+ "PAP-8140: journlaled or dirty buffer %b for the new block",
+- p_s_new_bh);
++ new_bh);
+
+ /* Put empty buffers into the array. */
+ RFALSE(tb->FEB[tb->cur_blknum],
+ "PAP-8141: busy slot for new buffer");
+
+- set_buffer_journal_new(p_s_new_bh);
+- tb->FEB[tb->cur_blknum++] = p_s_new_bh;
++ set_buffer_journal_new(new_bh);
++ tb->FEB[tb->cur_blknum++] = new_bh;
+ }
+
+ if (n_retval == CARRY_ON && FILESYSTEM_CHANGED_TB(tb))
+@@ -898,7 +898,7 @@ static int get_rfree(struct tree_balance
+ /* Check whether left neighbor is in memory. */
+ static int is_left_neighbor_in_cache(struct tree_balance *tb, int n_h)
+ {
+- struct buffer_head *p_s_father, *left;
++ struct buffer_head *father, *left;
+ struct super_block *sb = tb->tb_sb;
+ b_blocknr_t n_left_neighbor_blocknr;
+ int n_left_neighbor_position;
+@@ -908,18 +908,18 @@ static int is_left_neighbor_in_cache(str
+ return 0;
+
+ /* Calculate father of the node to be balanced. */
+- p_s_father = PATH_H_PBUFFER(tb->tb_path, n_h + 1);
++ father = PATH_H_PBUFFER(tb->tb_path, n_h + 1);
+
+- RFALSE(!p_s_father ||
+- !B_IS_IN_TREE(p_s_father) ||
++ RFALSE(!father ||
++ !B_IS_IN_TREE(father) ||
+ !B_IS_IN_TREE(tb->FL[n_h]) ||
+- !buffer_uptodate(p_s_father) ||
++ !buffer_uptodate(father) ||
+ !buffer_uptodate(tb->FL[n_h]),
+ "vs-8165: F[h] (%b) or FL[h] (%b) is invalid",
+- p_s_father, tb->FL[n_h]);
++ father, tb->FL[n_h]);
+
+ /* Get position of the pointer to the left neighbor into the left father. */
+- n_left_neighbor_position = (p_s_father == tb->FL[n_h]) ?
++ n_left_neighbor_position = (father == tb->FL[n_h]) ?
+ tb->lkey[n_h] : B_NR_ITEMS(tb->FL[n_h]);
+ /* Get left neighbor block number. */
+ n_left_neighbor_blocknr =
+@@ -940,10 +940,10 @@ static int is_left_neighbor_in_cache(str
+ #define LEFT_PARENTS 'l'
+ #define RIGHT_PARENTS 'r'
+
+-static void decrement_key(struct cpu_key *p_s_key)
++static void decrement_key(struct cpu_key *key)
+ {
+ // call item specific function for this key
+- item_ops[cpu_key_k_type(p_s_key)]->decrement_key(p_s_key);
++ item_ops[cpu_key_k_type(key)]->decrement_key(key);
+ }
+
+ /* Calculate far left/right parent of the left/right neighbor of the current node, that
+@@ -956,17 +956,17 @@ static void decrement_key(struct cpu_key
+ */
+ static int get_far_parent(struct tree_balance *tb,
+ int n_h,
+- struct buffer_head **pp_s_father,
+- struct buffer_head **pp_s_com_father, char c_lr_par)
++ struct buffer_head **pfather,
++ struct buffer_head **pcom_father, char c_lr_par)
+ {
+- struct buffer_head *p_s_parent;
++ struct buffer_head *parent;
+ INITIALIZE_PATH(s_path_to_neighbor_father);
+- struct treepath *p_s_path = tb->tb_path;
++ struct treepath *path = tb->tb_path;
+ struct cpu_key s_lr_father_key;
+ int n_counter,
+ n_position = INT_MAX,
+ n_first_last_position = 0,
+- n_path_offset = PATH_H_PATH_OFFSET(p_s_path, n_h);
++ n_path_offset = PATH_H_PATH_OFFSET(path, n_h);
+
+ /* Starting from F[n_h] go upwards in the tree, and look for the common
+ ancestor of F[n_h], and its neighbor l/r, that should be obtained. */
+@@ -979,25 +979,25 @@ static int get_far_parent(struct tree_ba
+ for (; n_counter > FIRST_PATH_ELEMENT_OFFSET; n_counter--) {
+ /* Check whether parent of the current buffer in the path is really parent in the tree. */
+ if (!B_IS_IN_TREE
+- (p_s_parent = PATH_OFFSET_PBUFFER(p_s_path, n_counter - 1)))
++ (parent = PATH_OFFSET_PBUFFER(path, n_counter - 1)))
+ return REPEAT_SEARCH;
+ /* Check whether position in the parent is correct. */
+ if ((n_position =
+- PATH_OFFSET_POSITION(p_s_path,
++ PATH_OFFSET_POSITION(path,
+ n_counter - 1)) >
+- B_NR_ITEMS(p_s_parent))
++ B_NR_ITEMS(parent))
+ return REPEAT_SEARCH;
+ /* Check whether parent at the path really points to the child. */
+- if (B_N_CHILD_NUM(p_s_parent, n_position) !=
+- PATH_OFFSET_PBUFFER(p_s_path, n_counter)->b_blocknr)
++ if (B_N_CHILD_NUM(parent, n_position) !=
++ PATH_OFFSET_PBUFFER(path, n_counter)->b_blocknr)
+ return REPEAT_SEARCH;
+ /* Return delimiting key if position in the parent is not equal to first/last one. */
+ if (c_lr_par == RIGHT_PARENTS)
+- n_first_last_position = B_NR_ITEMS(p_s_parent);
++ n_first_last_position = B_NR_ITEMS(parent);
+ if (n_position != n_first_last_position) {
+- *pp_s_com_father = p_s_parent;
+- get_bh(*pp_s_com_father);
+- /*(*pp_s_com_father = p_s_parent)->b_count++; */
++ *pcom_father = parent;
++ get_bh(*pcom_father);
++ /*(*pcom_father = parent)->b_count++; */
+ break;
+ }
+ }
+@@ -1009,22 +1009,22 @@ static int get_far_parent(struct tree_ba
+ (tb->tb_path,
+ FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
+ SB_ROOT_BLOCK(tb->tb_sb)) {
+- *pp_s_father = *pp_s_com_father = NULL;
++ *pfather = *pcom_father = NULL;
+ return CARRY_ON;
+ }
+ return REPEAT_SEARCH;
+ }
+
+- RFALSE(B_LEVEL(*pp_s_com_father) <= DISK_LEAF_NODE_LEVEL,
++ RFALSE(B_LEVEL(*pcom_father) <= DISK_LEAF_NODE_LEVEL,
+ "PAP-8185: (%b %z) level too small",
+- *pp_s_com_father, *pp_s_com_father);
++ *pcom_father, *pcom_father);
+
+ /* Check whether the common parent is locked. */
+
+- if (buffer_locked(*pp_s_com_father)) {
+- __wait_on_buffer(*pp_s_com_father);
++ if (buffer_locked(*pcom_father)) {
++ __wait_on_buffer(*pcom_father);
+ if (FILESYSTEM_CHANGED_TB(tb)) {
+- brelse(*pp_s_com_father);
++ brelse(*pcom_father);
+ return REPEAT_SEARCH;
+ }
+ }
+@@ -1034,7 +1034,7 @@ static int get_far_parent(struct tree_ba
+
+ /* Form key to get parent of the left/right neighbor. */
+ le_key2cpu_key(&s_lr_father_key,
+- B_N_PDELIM_KEY(*pp_s_com_father,
++ B_N_PDELIM_KEY(*pcom_father,
+ (c_lr_par ==
+ LEFT_PARENTS) ? (tb->lkey[n_h - 1] =
+ n_position -
+@@ -1053,14 +1053,14 @@ static int get_far_parent(struct tree_ba
+
+ if (FILESYSTEM_CHANGED_TB(tb)) {
+ pathrelse(&s_path_to_neighbor_father);
+- brelse(*pp_s_com_father);
++ brelse(*pcom_father);
+ return REPEAT_SEARCH;
+ }
+
+- *pp_s_father = PATH_PLAST_BUFFER(&s_path_to_neighbor_father);
++ *pfather = PATH_PLAST_BUFFER(&s_path_to_neighbor_father);
+
+- RFALSE(B_LEVEL(*pp_s_father) != n_h + 1,
+- "PAP-8190: (%b %z) level too small", *pp_s_father, *pp_s_father);
++ RFALSE(B_LEVEL(*pfather) != n_h + 1,
++ "PAP-8190: (%b %z) level too small", *pfather, *pfather);
+ RFALSE(s_path_to_neighbor_father.path_length <
+ FIRST_PATH_ELEMENT_OFFSET, "PAP-8192: path length is too small");
+
+@@ -1078,11 +1078,11 @@ static int get_far_parent(struct tree_ba
+ */
+ static int get_parents(struct tree_balance *tb, int n_h)
+ {
+- struct treepath *p_s_path = tb->tb_path;
++ struct treepath *path = tb->tb_path;
+ int n_position,
+ n_ret_value,
+ n_path_offset = PATH_H_PATH_OFFSET(tb->tb_path, n_h);
+- struct buffer_head *p_s_curf, *p_s_curcf;
++ struct buffer_head *curf, *curcf;
+
+ /* Current node is the root of the tree or will be root of the tree */
+ if (n_path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
+@@ -1100,66 +1100,65 @@ static int get_parents(struct tree_balan
+ }
+
+ /* Get parent FL[n_path_offset] of L[n_path_offset]. */
+- if ((n_position = PATH_OFFSET_POSITION(p_s_path, n_path_offset - 1))) {
++ n_position = PATH_OFFSET_POSITION(path, n_path_offset - 1);
++ if (n_position) {
+ /* Current node is not the first child of its parent. */
+- /*(p_s_curf = p_s_curcf = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1))->b_count += 2; */
+- p_s_curf = p_s_curcf =
+- PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1);
+- get_bh(p_s_curf);
+- get_bh(p_s_curf);
++ curf = PATH_OFFSET_PBUFFER(path, n_path_offset - 1);
++ curcf = PATH_OFFSET_PBUFFER(path, n_path_offset - 1);
++ get_bh(curf);
++ get_bh(curf);
+ tb->lkey[n_h] = n_position - 1;
+ } else {
+ /* Calculate current parent of L[n_path_offset], which is the left neighbor of the current node.
+ Calculate current common parent of L[n_path_offset] and the current node. Note that
+ CFL[n_path_offset] not equal FL[n_path_offset] and CFL[n_path_offset] not equal F[n_path_offset].
+ Calculate lkey[n_path_offset]. */
+- if ((n_ret_value = get_far_parent(tb, n_h + 1, &p_s_curf,
+- &p_s_curcf,
++ if ((n_ret_value = get_far_parent(tb, n_h + 1, &curf,
++ &curcf,
+ LEFT_PARENTS)) != CARRY_ON)
+ return n_ret_value;
+ }
+
+ brelse(tb->FL[n_h]);
+- tb->FL[n_h] = p_s_curf; /* New initialization of FL[n_h]. */
++ tb->FL[n_h] = curf; /* New initialization of FL[n_h]. */
+ brelse(tb->CFL[n_h]);
+- tb->CFL[n_h] = p_s_curcf; /* New initialization of CFL[n_h]. */
++ tb->CFL[n_h] = curcf; /* New initialization of CFL[n_h]. */
+
+- RFALSE((p_s_curf && !B_IS_IN_TREE(p_s_curf)) ||
+- (p_s_curcf && !B_IS_IN_TREE(p_s_curcf)),
+- "PAP-8195: FL (%b) or CFL (%b) is invalid", p_s_curf, p_s_curcf);
++ RFALSE((curf && !B_IS_IN_TREE(curf)) ||
++ (curcf && !B_IS_IN_TREE(curcf)),
++ "PAP-8195: FL (%b) or CFL (%b) is invalid", curf, curcf);
+
+ /* Get parent FR[n_h] of R[n_h]. */
+
+ /* Current node is the last child of F[n_h]. FR[n_h] != F[n_h]. */
+- if (n_position == B_NR_ITEMS(PATH_H_PBUFFER(p_s_path, n_h + 1))) {
++ if (n_position == B_NR_ITEMS(PATH_H_PBUFFER(path, n_h + 1))) {
+ /* Calculate current parent of R[n_h], which is the right neighbor of F[n_h].
+ Calculate current common parent of R[n_h] and current node. Note that CFR[n_h]
+ not equal FR[n_path_offset] and CFR[n_h] not equal F[n_h]. */
+ if ((n_ret_value =
+- get_far_parent(tb, n_h + 1, &p_s_curf, &p_s_curcf,
++ get_far_parent(tb, n_h + 1, &curf, &curcf,
+ RIGHT_PARENTS)) != CARRY_ON)
+ return n_ret_value;
+ } else {
+ /* Current node is not the last child of its parent F[n_h]. */
+- /*(p_s_curf = p_s_curcf = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1))->b_count += 2; */
+- p_s_curf = p_s_curcf =
+- PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1);
+- get_bh(p_s_curf);
+- get_bh(p_s_curf);
++ curf = PATH_OFFSET_PBUFFER(path, n_path_offset - 1);
++ curcf = PATH_OFFSET_PBUFFER(path, n_path_offset - 1);
++ get_bh(curf);
++ get_bh(curf);
+ tb->rkey[n_h] = n_position;
+ }
+
+ brelse(tb->FR[n_h]);
+ /* New initialization of FR[n_path_offset]. */
+- tb->FR[n_h] = p_s_curf;
++ tb->FR[n_h] = curf;
+
+ brelse(tb->CFR[n_h]);
+ /* New initialization of CFR[n_path_offset]. */
+- tb->CFR[n_h] = p_s_curcf;
++ tb->CFR[n_h] = curcf;
+
+- RFALSE((p_s_curf && !B_IS_IN_TREE(p_s_curf)) ||
+- (p_s_curcf && !B_IS_IN_TREE(p_s_curcf)),
+- "PAP-8205: FR (%b) or CFR (%b) is invalid", p_s_curf, p_s_curcf);
++ RFALSE((curf && !B_IS_IN_TREE(curf)) ||
++ (curcf && !B_IS_IN_TREE(curcf)),
++ "PAP-8205: FR (%b) or CFR (%b) is invalid", curf, curcf);
+
+ return CARRY_ON;
+ }
+@@ -1893,7 +1892,7 @@ static int check_balance(int mode,
+ static int get_direct_parent(struct tree_balance *tb, int n_h)
+ {
+ struct buffer_head *bh;
+- struct treepath *p_s_path = tb->tb_path;
++ struct treepath *path = tb->tb_path;
+ int n_position,
+ n_path_offset = PATH_H_PATH_OFFSET(tb->tb_path, n_h);
+
+@@ -1903,27 +1902,27 @@ static int get_direct_parent(struct tree
+ RFALSE(n_path_offset < FIRST_PATH_ELEMENT_OFFSET - 1,
+ "PAP-8260: invalid offset in the path");
+
+- if (PATH_OFFSET_PBUFFER(p_s_path, FIRST_PATH_ELEMENT_OFFSET)->
++ if (PATH_OFFSET_PBUFFER(path, FIRST_PATH_ELEMENT_OFFSET)->
+ b_blocknr == SB_ROOT_BLOCK(tb->tb_sb)) {
+ /* Root is not changed. */
+- PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1) = NULL;
+- PATH_OFFSET_POSITION(p_s_path, n_path_offset - 1) = 0;
++ PATH_OFFSET_PBUFFER(path, n_path_offset - 1) = NULL;
++ PATH_OFFSET_POSITION(path, n_path_offset - 1) = 0;
+ return CARRY_ON;
+ }
+ return REPEAT_SEARCH; /* Root is changed and we must recalculate the path. */
+ }
+
+ if (!B_IS_IN_TREE
+- (bh = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1)))
++ (bh = PATH_OFFSET_PBUFFER(path, n_path_offset - 1)))
+ return REPEAT_SEARCH; /* Parent in the path is not in the tree. */
+
+ if ((n_position =
+- PATH_OFFSET_POSITION(p_s_path,
++ PATH_OFFSET_POSITION(path,
+ n_path_offset - 1)) > B_NR_ITEMS(bh))
+ return REPEAT_SEARCH;
+
+ if (B_N_CHILD_NUM(bh, n_position) !=
+- PATH_OFFSET_PBUFFER(p_s_path, n_path_offset)->b_blocknr)
++ PATH_OFFSET_PBUFFER(path, n_path_offset)->b_blocknr)
+ /* Parent in the path is not parent of the current node in the tree. */
+ return REPEAT_SEARCH;
+
+@@ -2319,7 +2318,7 @@ static int wait_tb_buffers_until_unlocke
+ */
+
+ int fix_nodes(int n_op_mode, struct tree_balance *tb,
+- struct item_head *p_s_ins_ih, const void *data)
++ struct item_head *ins_ih, const void *data)
+ {
+ int n_ret_value, n_h, n_item_num = PATH_LAST_POSITION(tb->tb_path);
+ int n_pos_in_item;
+@@ -2405,7 +2404,7 @@ int fix_nodes(int n_op_mode, struct tree
+ goto repeat;
+
+ n_ret_value = check_balance(n_op_mode, tb, n_h, n_item_num,
+- n_pos_in_item, p_s_ins_ih, data);
++ n_pos_in_item, ins_ih, data);
+ if (n_ret_value != CARRY_ON) {
+ if (n_ret_value == NO_BALANCING_NEEDED) {
+ /* No balancing for higher levels needed. */
+--- a/fs/reiserfs/stree.c
++++ b/fs/reiserfs/stree.c
+@@ -68,10 +68,10 @@ inline int B_IS_IN_TREE(const struct buf
+ //
+ // to gets item head in le form
+ //
+-inline void copy_item_head(struct item_head *p_v_to,
+- const struct item_head *p_v_from)
++inline void copy_item_head(struct item_head *to,
++ const struct item_head *from)
+ {
+- memcpy(p_v_to, p_v_from, IH_SIZE);
++ memcpy(to, from, IH_SIZE);
+ }
+
+ /* k1 is pointer to on-disk structure which is stored in little-endian
+@@ -135,15 +135,15 @@ static inline int comp_keys(const struct
+ inline int comp_short_le_keys(const struct reiserfs_key *key1,
+ const struct reiserfs_key *key2)
+ {
+- __u32 *p_s_1_u32, *p_s_2_u32;
++ __u32 *k1_u32, *k2_u32;
+ int n_key_length = REISERFS_SHORT_KEY_LEN;
+
+- p_s_1_u32 = (__u32 *) key1;
+- p_s_2_u32 = (__u32 *) key2;
+- for (; n_key_length--; ++p_s_1_u32, ++p_s_2_u32) {
+- if (le32_to_cpu(*p_s_1_u32) < le32_to_cpu(*p_s_2_u32))
++ k1_u32 = (__u32 *) key1;
++ k2_u32 = (__u32 *) key2;
++ for (; n_key_length--; ++k1_u32, ++k2_u32) {
++ if (le32_to_cpu(*k1_u32) < le32_to_cpu(*k2_u32))
+ return -1;
+- if (le32_to_cpu(*p_s_1_u32) > le32_to_cpu(*p_s_2_u32))
++ if (le32_to_cpu(*k1_u32) > le32_to_cpu(*k2_u32))
+ return 1;
+ }
+ return 0;
+@@ -174,8 +174,8 @@ inline int comp_le_keys(const struct rei
+ * Binary search toolkit function *
+ * Search for an item in the array by the item key *
+ * Returns: 1 if found, 0 if not found; *
+- * *p_n_pos = number of the searched element if found, else the *
+- * number of the first element that is larger than p_v_key. *
++ * *pos = number of the searched element if found, else the *
++ * number of the first element that is larger than key. *
+ **************************************************************************/
+ /* For those not familiar with binary search: n_lbound is the leftmost item that it
+ could be, n_rbound the rightmost item that it could be. We examine the item
+@@ -184,28 +184,28 @@ inline int comp_le_keys(const struct rei
+ there are no possible items, and we have not found it. With each examination we
+ cut the number of possible items it could be by one more than half rounded down,
+ or we find it. */
+-static inline int bin_search(const void *p_v_key, /* Key to search for. */
+- const void *p_v_base, /* First item in the array. */
+- int p_n_num, /* Number of items in the array. */
+- int p_n_width, /* Item size in the array.
+- searched. Lest the reader be
+- confused, note that this is crafted
+- as a general function, and when it
+- is applied specifically to the array
+- of item headers in a node, p_n_width
+- is actually the item header size not
+- the item size. */
+- int *p_n_pos /* Number of the searched for element. */
++static inline int bin_search(const void *key, /* Key to search for. */
++ const void *base, /* First item in the array. */
++ int num, /* Number of items in the array. */
++ int width, /* Item size in the array.
++ searched. Lest the reader be
++ confused, note that this is crafted
++ as a general function, and when it
++ is applied specifically to the array
++ of item headers in a node, width
++ is actually the item header size not
++ the item size. */
++ int *pos /* Number of the searched for element. */
+ )
+ {
+ int n_rbound, n_lbound, n_j;
+
+- for (n_j = ((n_rbound = p_n_num - 1) + (n_lbound = 0)) / 2;
++ for (n_j = ((n_rbound = num - 1) + (n_lbound = 0)) / 2;
+ n_lbound <= n_rbound; n_j = (n_rbound + n_lbound) / 2)
+ switch (comp_keys
+- ((struct reiserfs_key *)((char *)p_v_base +
+- n_j * p_n_width),
+- (struct cpu_key *)p_v_key)) {
++ ((struct reiserfs_key *)((char *)base +
++ n_j * width),
++ (struct cpu_key *)key)) {
+ case -1:
+ n_lbound = n_j + 1;
+ continue;
+@@ -213,13 +213,13 @@ static inline int bin_search(const void
+ n_rbound = n_j - 1;
+ continue;
+ case 0:
+- *p_n_pos = n_j;
++ *pos = n_j;
+ return ITEM_FOUND; /* Key found in the array. */
+ }
+
+ /* bin_search did not find given key, it returns position of key,
+ that is minimal and greater than the given one. */
+- *p_n_pos = n_lbound;
++ *pos = n_lbound;
+ return ITEM_NOT_FOUND;
+ }
+
+@@ -243,12 +243,12 @@ static const struct reiserfs_key MAX_KEY
+ the path, there is no delimiting key in the tree (buffer is first or last buffer in tree), and in this
+ case we return a special key, either MIN_KEY or MAX_KEY. */
+ static inline const struct reiserfs_key *get_lkey(const struct treepath
+- *p_s_chk_path,
++ *chk_path,
+ const struct super_block
+ *sb)
+ {
+- int n_position, n_path_offset = p_s_chk_path->path_length;
+- struct buffer_head *p_s_parent;
++ int n_position, n_path_offset = chk_path->path_length;
++ struct buffer_head *parent;
+
+ RFALSE(n_path_offset < FIRST_PATH_ELEMENT_OFFSET,
+ "PAP-5010: invalid offset in the path");
+@@ -257,42 +257,42 @@ static inline const struct reiserfs_key
+ while (n_path_offset-- > FIRST_PATH_ELEMENT_OFFSET) {
+
+ RFALSE(!buffer_uptodate
+- (PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)),
++ (PATH_OFFSET_PBUFFER(chk_path, n_path_offset)),
+ "PAP-5020: parent is not uptodate");
+
+ /* Parent at the path is not in the tree now. */
+ if (!B_IS_IN_TREE
+- (p_s_parent =
+- PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)))
++ (parent =
++ PATH_OFFSET_PBUFFER(chk_path, n_path_offset)))
+ return &MAX_KEY;
+ /* Check whether position in the parent is correct. */
+ if ((n_position =
+- PATH_OFFSET_POSITION(p_s_chk_path,
++ PATH_OFFSET_POSITION(chk_path,
+ n_path_offset)) >
+- B_NR_ITEMS(p_s_parent))
++ B_NR_ITEMS(parent))
+ return &MAX_KEY;
+ /* Check whether parent at the path really points to the child. */
+- if (B_N_CHILD_NUM(p_s_parent, n_position) !=
+- PATH_OFFSET_PBUFFER(p_s_chk_path,
++ if (B_N_CHILD_NUM(parent, n_position) !=
++ PATH_OFFSET_PBUFFER(chk_path,
+ n_path_offset + 1)->b_blocknr)
+ return &MAX_KEY;
+ /* Return delimiting key if position in the parent is not equal to zero. */
+ if (n_position)
+- return B_N_PDELIM_KEY(p_s_parent, n_position - 1);
++ return B_N_PDELIM_KEY(parent, n_position - 1);
+ }
+ /* Return MIN_KEY if we are in the root of the buffer tree. */
+- if (PATH_OFFSET_PBUFFER(p_s_chk_path, FIRST_PATH_ELEMENT_OFFSET)->
++ if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)->
+ b_blocknr == SB_ROOT_BLOCK(sb))
+ return &MIN_KEY;
+ return &MAX_KEY;
+ }
+
+ /* Get delimiting key of the buffer at the path and its right neighbor. */
+-inline const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path,
++inline const struct reiserfs_key *get_rkey(const struct treepath *chk_path,
+ const struct super_block *sb)
+ {
+- int n_position, n_path_offset = p_s_chk_path->path_length;
+- struct buffer_head *p_s_parent;
++ int n_position, n_path_offset = chk_path->path_length;
++ struct buffer_head *parent;
+
+ RFALSE(n_path_offset < FIRST_PATH_ELEMENT_OFFSET,
+ "PAP-5030: invalid offset in the path");
+@@ -300,31 +300,31 @@ inline const struct reiserfs_key *get_rk
+ while (n_path_offset-- > FIRST_PATH_ELEMENT_OFFSET) {
+
+ RFALSE(!buffer_uptodate
+- (PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)),
++ (PATH_OFFSET_PBUFFER(chk_path, n_path_offset)),
+ "PAP-5040: parent is not uptodate");
+
+ /* Parent at the path is not in the tree now. */
+ if (!B_IS_IN_TREE
+- (p_s_parent =
+- PATH_OFFSET_PBUFFER(p_s_chk_path, n_path_offset)))
++ (parent =
++ PATH_OFFSET_PBUFFER(chk_path, n_path_offset)))
+ return &MIN_KEY;
+ /* Check whether position in the parent is correct. */
+ if ((n_position =
+- PATH_OFFSET_POSITION(p_s_chk_path,
++ PATH_OFFSET_POSITION(chk_path,
+ n_path_offset)) >
+- B_NR_ITEMS(p_s_parent))
++ B_NR_ITEMS(parent))
+ return &MIN_KEY;
+ /* Check whether parent at the path really points to the child. */
+- if (B_N_CHILD_NUM(p_s_parent, n_position) !=
+- PATH_OFFSET_PBUFFER(p_s_chk_path,
++ if (B_N_CHILD_NUM(parent, n_position) !=
++ PATH_OFFSET_PBUFFER(chk_path,
+ n_path_offset + 1)->b_blocknr)
+ return &MIN_KEY;
+ /* Return delimiting key if position in the parent is not the last one. */
+- if (n_position != B_NR_ITEMS(p_s_parent))
+- return B_N_PDELIM_KEY(p_s_parent, n_position);
++ if (n_position != B_NR_ITEMS(parent))
++ return B_N_PDELIM_KEY(parent, n_position);
+ }
+ /* Return MAX_KEY if we are in the root of the buffer tree. */
+- if (PATH_OFFSET_PBUFFER(p_s_chk_path, FIRST_PATH_ELEMENT_OFFSET)->
++ if (PATH_OFFSET_PBUFFER(chk_path, FIRST_PATH_ELEMENT_OFFSET)->
+ b_blocknr == SB_ROOT_BLOCK(sb))
+ return &MAX_KEY;
+ return &MIN_KEY;
+@@ -335,25 +335,25 @@ inline const struct reiserfs_key *get_rk
+ the path. These delimiting keys are stored at least one level above that buffer in the tree. If the
+ buffer is the first or last node in the tree order then one of the delimiting keys may be absent, and in
+ this case get_lkey and get_rkey return a special key which is MIN_KEY or MAX_KEY. */
+-static inline int key_in_buffer(struct treepath *p_s_chk_path, /* Path which should be checked. */
+- const struct cpu_key *p_s_key, /* Key which should be checked. */
+- struct super_block *sb /* Super block pointer. */
++static inline int key_in_buffer(struct treepath *chk_path, /* Path which should be checked. */
++ const struct cpu_key *key, /* Key which should be checked. */
++ struct super_block *sb
+ )
+ {
+
+- RFALSE(!p_s_key || p_s_chk_path->path_length < FIRST_PATH_ELEMENT_OFFSET
+- || p_s_chk_path->path_length > MAX_HEIGHT,
++ RFALSE(!key || chk_path->path_length < FIRST_PATH_ELEMENT_OFFSET
++ || chk_path->path_length > MAX_HEIGHT,
+ "PAP-5050: pointer to the key(%p) is NULL or invalid path length(%d)",
+- p_s_key, p_s_chk_path->path_length);
+- RFALSE(!PATH_PLAST_BUFFER(p_s_chk_path)->b_bdev,
++ key, chk_path->path_length);
++ RFALSE(!PATH_PLAST_BUFFER(chk_path)->b_bdev,
+ "PAP-5060: device must not be NODEV");
+
+- if (comp_keys(get_lkey(p_s_chk_path, sb), p_s_key) == 1)
++ if (comp_keys(get_lkey(chk_path, sb), key) == 1)
+ /* left delimiting key is bigger, that the key we look for */
+ return 0;
+- // if ( comp_keys(p_s_key, get_rkey(p_s_chk_path, sb)) != -1 )
+- if (comp_keys(get_rkey(p_s_chk_path, sb), p_s_key) != 1)
+- /* p_s_key must be less than right delimitiing key */
++ /* if ( comp_keys(key, get_rkey(chk_path, sb)) != -1 ) */
++ if (comp_keys(get_rkey(chk_path, sb), key) != 1)
++ /* key must be less than right delimitiing key */
+ return 0;
+ return 1;
+ }
+@@ -369,34 +369,34 @@ int reiserfs_check_path(struct treepath
+ * dirty bits clean when preparing the buffer for the log.
+ * This version should only be called from fix_nodes() */
+ void pathrelse_and_restore(struct super_block *sb,
+- struct treepath *p_s_search_path)
++ struct treepath *search_path)
+ {
+- int n_path_offset = p_s_search_path->path_length;
++ int n_path_offset = search_path->path_length;
+
+ RFALSE(n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET,
+ "clm-4000: invalid path offset");
+
+ while (n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET) {
+ struct buffer_head *bh;
+- bh = PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--);
++ bh = PATH_OFFSET_PBUFFER(search_path, n_path_offset--);
+ reiserfs_restore_prepared_buffer(sb, bh);
+ brelse(bh);
+ }
+- p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
++ search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
+ }
+
+ /* Drop the reference to each buffer in a path */
+-void pathrelse(struct treepath *p_s_search_path)
++void pathrelse(struct treepath *search_path)
+ {
+- int n_path_offset = p_s_search_path->path_length;
++ int n_path_offset = search_path->path_length;
+
+ RFALSE(n_path_offset < ILLEGAL_PATH_ELEMENT_OFFSET,
+ "PAP-5090: invalid path offset");
+
+ while (n_path_offset > ILLEGAL_PATH_ELEMENT_OFFSET)
+- brelse(PATH_OFFSET_PBUFFER(p_s_search_path, n_path_offset--));
++ brelse(PATH_OFFSET_PBUFFER(search_path, n_path_offset--));
+
+- p_s_search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
++ search_path->path_length = ILLEGAL_PATH_ELEMENT_OFFSET;
+ }
+
+ static int is_leaf(char *buf, int blocksize, struct buffer_head *bh)
+@@ -547,9 +547,9 @@ static void search_by_key_reada(struct s
+ * Algorithm SearchByKey *
+ * look for item in the Disk S+Tree by its key *
+ * Input: sb - super block *
+- * p_s_key - pointer to the key to search *
++ * key - pointer to the key to search *
+ * Output: ITEM_FOUND, ITEM_NOT_FOUND or IO_ERROR *
+- * p_s_search_path - path from the root to the needed leaf *
++ * search_path - path from the root to the needed leaf *
+ **************************************************************************/
+
+ /* This function fills up the path from the root to the leaf as it
+@@ -566,8 +566,8 @@ static void search_by_key_reada(struct s
+ correctness of the top of the path but need not be checked for the
+ correctness of the bottom of the path */
+ /* The function is NOT SCHEDULE-SAFE! */
+-int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key to search. */
+- struct treepath *p_s_search_path,/* This structure was
++int search_by_key(struct super_block *sb, const struct cpu_key *key, /* Key to search. */
++ struct treepath *search_path,/* This structure was
+ allocated and initialized
+ by the calling
+ function. It is filled up
+@@ -580,7 +580,7 @@ int search_by_key(struct super_block *sb
+ b_blocknr_t n_block_number;
+ int expected_level;
+ struct buffer_head *bh;
+- struct path_element *p_s_last_element;
++ struct path_element *last_element;
+ int n_node_level, n_retval;
+ int right_neighbor_of_leaf_node;
+ int fs_gen;
+@@ -598,7 +598,7 @@ int search_by_key(struct super_block *sb
+ we must be careful to release all nodes in a path before we either
+ discard the path struct or re-use the path struct, as we do here. */
+
+- pathrelse(p_s_search_path);
++ pathrelse(search_path);
+
+ right_neighbor_of_leaf_node = 0;
+
+@@ -615,18 +615,18 @@ int search_by_key(struct super_block *sb
+ "%s: there were %d iterations of "
+ "while loop looking for key %K",
+ current->comm, n_repeat_counter,
+- p_s_key);
++ key);
+ #endif
+
+ /* prep path to have another element added to it. */
+- p_s_last_element =
+- PATH_OFFSET_PELEMENT(p_s_search_path,
+- ++p_s_search_path->path_length);
++ last_element =
++ PATH_OFFSET_PELEMENT(search_path,
++ ++search_path->path_length);
+ fs_gen = get_generation(sb);
+
+ /* Read the next tree node, and set the last element in the path to
+ have a pointer to it. */
+- if ((bh = p_s_last_element->pe_buffer =
++ if ((bh = last_element->pe_buffer =
+ sb_getblk(sb, n_block_number))) {
+ if (!buffer_uptodate(bh) && reada_count > 1)
+ search_by_key_reada(sb, reada_bh,
+@@ -637,8 +637,8 @@ int search_by_key(struct super_block *sb
+ goto io_error;
+ } else {
+ io_error:
+- p_s_search_path->path_length--;
+- pathrelse(p_s_search_path);
++ search_path->path_length--;
++ pathrelse(search_path);
+ return IO_ERROR;
+ }
+ reada_count = 0;
+@@ -652,12 +652,12 @@ int search_by_key(struct super_block *sb
+ if (fs_changed(fs_gen, sb) &&
+ (!B_IS_IN_TREE(bh) ||
+ B_LEVEL(bh) != expected_level ||
+- !key_in_buffer(p_s_search_path, p_s_key, sb))) {
++ !key_in_buffer(search_path, key, sb))) {
+ PROC_INFO_INC(sb, search_by_key_fs_changed);
+ PROC_INFO_INC(sb, search_by_key_restarted);
+ PROC_INFO_INC(sb,
+ sbk_restarted[expected_level - 1]);
+- pathrelse(p_s_search_path);
++ pathrelse(search_path);
+
+ /* Get the root block number so that we can repeat the search
+ starting from the root. */
+@@ -669,11 +669,11 @@ int search_by_key(struct super_block *sb
+ continue;
+ }
+
+- /* only check that the key is in the buffer if p_s_key is not
++ /* only check that the key is in the buffer if key is not
+ equal to the MAX_KEY. Latter case is only possible in
+ "finish_unfinished()" processing during mount. */
+- RFALSE(comp_keys(&MAX_KEY, p_s_key) &&
+- !key_in_buffer(p_s_search_path, p_s_key, sb),
++ RFALSE(comp_keys(&MAX_KEY, key) &&
++ !key_in_buffer(search_path, key, sb),
+ "PAP-5130: key is not in the buffer");
+ #ifdef CONFIG_REISERFS_CHECK
+ if (cur_tb) {
+@@ -689,7 +689,7 @@ int search_by_key(struct super_block *sb
+ reiserfs_error(sb, "vs-5150",
+ "invalid format found in block %ld. "
+ "Fsck?", bh->b_blocknr);
+- pathrelse(p_s_search_path);
++ pathrelse(search_path);
+ return IO_ERROR;
+ }
+
+@@ -702,12 +702,12 @@ int search_by_key(struct super_block *sb
+ "vs-5152: tree level (%d) is less than stop level (%d)",
+ n_node_level, n_stop_level);
+
+- n_retval = bin_search(p_s_key, B_N_PITEM_HEAD(bh, 0),
++ n_retval = bin_search(key, B_N_PITEM_HEAD(bh, 0),
+ B_NR_ITEMS(bh),
+ (n_node_level ==
+ DISK_LEAF_NODE_LEVEL) ? IH_SIZE :
+ KEY_SIZE,
+- &(p_s_last_element->pe_position));
++ &(last_element->pe_position));
+ if (n_node_level == n_stop_level) {
+ return n_retval;
+ }
+@@ -715,7 +715,7 @@ int search_by_key(struct super_block *sb
+ /* we are not in the stop level */
+ if (n_retval == ITEM_FOUND)
+ /* item has been found, so we choose the pointer which is to the right of the found one */
+- p_s_last_element->pe_position++;
++ last_element->pe_position++;
+
+ /* if item was not found we choose the position which is to
+ the left of the found item. This requires no code,
+@@ -725,23 +725,23 @@ int search_by_key(struct super_block *sb
+ an internal node. Now we calculate child block number by
+ position in the node. */
+ n_block_number =
+- B_N_CHILD_NUM(bh, p_s_last_element->pe_position);
++ B_N_CHILD_NUM(bh, last_element->pe_position);
+
+ /* if we are going to read leaf nodes, try for read ahead as well */
+- if ((p_s_search_path->reada & PATH_READA) &&
++ if ((search_path->reada & PATH_READA) &&
+ n_node_level == DISK_LEAF_NODE_LEVEL + 1) {
+- int pos = p_s_last_element->pe_position;
++ int pos = last_element->pe_position;
+ int limit = B_NR_ITEMS(bh);
+ struct reiserfs_key *le_key;
+
+- if (p_s_search_path->reada & PATH_READA_BACK)
++ if (search_path->reada & PATH_READA_BACK)
+ limit = 0;
+ while (reada_count < SEARCH_BY_KEY_READA) {
+ if (pos == limit)
+ break;
+ reada_blocks[reada_count++] =
+ B_N_CHILD_NUM(bh, pos);
+- if (p_s_search_path->reada & PATH_READA_BACK)
++ if (search_path->reada & PATH_READA_BACK)
+ pos--;
+ else
+ pos++;
+@@ -751,7 +751,7 @@ int search_by_key(struct super_block *sb
+ */
+ le_key = B_N_PDELIM_KEY(bh, pos);
+ if (le32_to_cpu(le_key->k_objectid) !=
+- p_s_key->on_disk_key.k_objectid) {
++ key->on_disk_key.k_objectid) {
+ break;
+ }
+ }
+@@ -760,11 +760,11 @@ int search_by_key(struct super_block *sb
+ }
+
+ /* Form the path to an item and position in this item which contains
+- file byte defined by p_s_key. If there is no such item
++ file byte defined by key. If there is no such item
+ corresponding to the key, we point the path to the item with
+- maximal key less than p_s_key, and *p_n_pos_in_item is set to one
++ maximal key less than key, and *pos_in_item is set to one
+ past the last entry/byte in the item. If searching for entry in a
+- directory item, and it is not found, *p_n_pos_in_item is set to one
++ directory item, and it is not found, *pos_in_item is set to one
+ entry more than the entry with maximal key which is less than the
+ sought key.
+
+@@ -777,7 +777,7 @@ int search_by_key(struct super_block *sb
+ /* The function is NOT SCHEDULE-SAFE! */
+ int search_for_position_by_key(struct super_block *sb, /* Pointer to the super block. */
+ const struct cpu_key *p_cpu_key, /* Key to search (cpu variable) */
+- struct treepath *p_s_search_path /* Filled up by this function. */
++ struct treepath *search_path /* Filled up by this function. */
+ )
+ {
+ struct item_head *p_le_ih; /* pointer to on-disk structure */
+@@ -788,34 +788,34 @@ int search_for_position_by_key(struct su
+
+ /* If searching for directory entry. */
+ if (is_direntry_cpu_key(p_cpu_key))
+- return search_by_entry_key(sb, p_cpu_key, p_s_search_path,
++ return search_by_entry_key(sb, p_cpu_key, search_path,
+ &de);
+
+ /* If not searching for directory entry. */
+
+ /* If item is found. */
+- retval = search_item(sb, p_cpu_key, p_s_search_path);
++ retval = search_item(sb, p_cpu_key, search_path);
+ if (retval == IO_ERROR)
+ return retval;
+ if (retval == ITEM_FOUND) {
+
+ RFALSE(!ih_item_len
+ (B_N_PITEM_HEAD
+- (PATH_PLAST_BUFFER(p_s_search_path),
+- PATH_LAST_POSITION(p_s_search_path))),
++ (PATH_PLAST_BUFFER(search_path),
++ PATH_LAST_POSITION(search_path))),
+ "PAP-5165: item length equals zero");
+
+- pos_in_item(p_s_search_path) = 0;
++ pos_in_item(search_path) = 0;
+ return POSITION_FOUND;
+ }
+
+- RFALSE(!PATH_LAST_POSITION(p_s_search_path),
++ RFALSE(!PATH_LAST_POSITION(search_path),
+ "PAP-5170: position equals zero");
+
+ /* Item is not found. Set path to the previous item. */
+ p_le_ih =
+- B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_search_path),
+- --PATH_LAST_POSITION(p_s_search_path));
++ B_N_PITEM_HEAD(PATH_PLAST_BUFFER(search_path),
++ --PATH_LAST_POSITION(search_path));
+ n_blk_size = sb->s_blocksize;
+
+ if (comp_short_keys(&(p_le_ih->ih_key), p_cpu_key)) {
+@@ -829,9 +829,9 @@ int search_for_position_by_key(struct su
+ /* Needed byte is contained in the item pointed to by the path. */
+ if (item_offset <= offset &&
+ item_offset + op_bytes_number(p_le_ih, n_blk_size) > offset) {
+- pos_in_item(p_s_search_path) = offset - item_offset;
++ pos_in_item(search_path) = offset - item_offset;
+ if (is_indirect_le_ih(p_le_ih)) {
+- pos_in_item(p_s_search_path) /= n_blk_size;
++ pos_in_item(search_path) /= n_blk_size;
+ }
+ return POSITION_FOUND;
+ }
+@@ -839,18 +839,18 @@ int search_for_position_by_key(struct su
+ /* Needed byte is not contained in the item pointed to by the
+ path. Set pos_in_item out of the item. */
+ if (is_indirect_le_ih(p_le_ih))
+- pos_in_item(p_s_search_path) =
++ pos_in_item(search_path) =
+ ih_item_len(p_le_ih) / UNFM_P_SIZE;
+ else
+- pos_in_item(p_s_search_path) = ih_item_len(p_le_ih);
++ pos_in_item(search_path) = ih_item_len(p_le_ih);
+
+ return POSITION_NOT_FOUND;
+ }
+
+ /* Compare given item and item pointed to by the path. */
+-int comp_items(const struct item_head *stored_ih, const struct treepath *p_s_path)
++int comp_items(const struct item_head *stored_ih, const struct treepath *path)
+ {
+- struct buffer_head *bh = PATH_PLAST_BUFFER(p_s_path);
++ struct buffer_head *bh = PATH_PLAST_BUFFER(path);
+ struct item_head *ih;
+
+ /* Last buffer at the path is not in the tree. */
+@@ -858,11 +858,11 @@ int comp_items(const struct item_head *s
+ return 1;
+
+ /* Last path position is invalid. */
+- if (PATH_LAST_POSITION(p_s_path) >= B_NR_ITEMS(bh))
++ if (PATH_LAST_POSITION(path) >= B_NR_ITEMS(bh))
+ return 1;
+
+ /* we need only to know, whether it is the same item */
+- ih = get_ih(p_s_path);
++ ih = get_ih(path);
+ return memcmp(stored_ih, ih, IH_SIZE);
+ }
+
+@@ -951,14 +951,14 @@ static inline int prepare_for_direntry_i
+ In case of file truncate calculate whether this item must be deleted/truncated or last
+ unformatted node of this item will be converted to a direct item.
+ This function returns a determination of what balance mode the calling function should employ. */
+-static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, struct inode *inode, struct treepath *p_s_path, const struct cpu_key *p_s_item_key, int *p_n_removed, /* Number of unformatted nodes which were removed
++static char prepare_for_delete_or_cut(struct reiserfs_transaction_handle *th, struct inode *inode, struct treepath *path, const struct cpu_key *item_key, int *removed, /* Number of unformatted nodes which were removed
+ from end of the file. */
+- int *p_n_cut_size, unsigned long long n_new_file_length /* MAX_KEY_OFFSET in case of delete. */
++ int *cut_size, unsigned long long n_new_file_length /* MAX_KEY_OFFSET in case of delete. */
+ )
+ {
+ struct super_block *sb = inode->i_sb;
+- struct item_head *p_le_ih = PATH_PITEM_HEAD(p_s_path);
+- struct buffer_head *bh = PATH_PLAST_BUFFER(p_s_path);
++ struct item_head *p_le_ih = PATH_PITEM_HEAD(path);
++ struct buffer_head *bh = PATH_PLAST_BUFFER(path);
+
+ BUG_ON(!th->t_trans_id);
+
+@@ -968,20 +968,20 @@ static char prepare_for_delete_or_cut(st
+ RFALSE(n_new_file_length != max_reiserfs_offset(inode),
+ "PAP-5210: mode must be M_DELETE");
+
+- *p_n_cut_size = -(IH_SIZE + ih_item_len(p_le_ih));
++ *cut_size = -(IH_SIZE + ih_item_len(p_le_ih));
+ return M_DELETE;
+ }
+
+ /* Directory item. */
+ if (is_direntry_le_ih(p_le_ih))
+- return prepare_for_direntry_item(p_s_path, p_le_ih, inode,
++ return prepare_for_direntry_item(path, p_le_ih, inode,
+ n_new_file_length,
+- p_n_cut_size);
++ cut_size);
+
+ /* Direct item. */
+ if (is_direct_le_ih(p_le_ih))
+- return prepare_for_direct_item(p_s_path, p_le_ih, inode,
+- n_new_file_length, p_n_cut_size);
++ return prepare_for_direct_item(path, p_le_ih, inode,
++ n_new_file_length, cut_size);
+
+ /* Case of an indirect item. */
+ {
+@@ -1001,9 +1001,9 @@ static char prepare_for_delete_or_cut(st
+
+ do {
+ need_re_search = 0;
+- *p_n_cut_size = 0;
+- bh = PATH_PLAST_BUFFER(p_s_path);
+- copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
++ *cut_size = 0;
++ bh = PATH_PLAST_BUFFER(path);
++ copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
+ pos = I_UNFM_NUM(&s_ih);
+
+ while (le_ih_k_offset (&s_ih) + (pos - 1) * blk_size > n_new_file_length) {
+@@ -1013,10 +1013,9 @@ static char prepare_for_delete_or_cut(st
+ /* Each unformatted block deletion may involve one additional
+ * bitmap block into the transaction, thereby the initial
+ * journal space reservation might not be enough. */
+- if (!delete && (*p_n_cut_size) != 0 &&
+- reiserfs_transaction_free_space(th) < JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD) {
++ if (!delete && (*cut_size) != 0 &&
++ reiserfs_transaction_free_space(th) < JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD)
+ break;
+- }
+
+ unfm = (__le32 *)B_I_PITEM(bh, &s_ih) + pos - 1;
+ block = get_block_num(unfm, 0);
+@@ -1030,17 +1029,17 @@ static char prepare_for_delete_or_cut(st
+
+ cond_resched();
+
+- if (item_moved (&s_ih, p_s_path)) {
++ if (item_moved (&s_ih, path)) {
+ need_re_search = 1;
+ break;
+ }
+
+ pos --;
+- (*p_n_removed) ++;
+- (*p_n_cut_size) -= UNFM_P_SIZE;
++ (*removed)++;
++ (*cut_size) -= UNFM_P_SIZE;
+
+ if (pos == 0) {
+- (*p_n_cut_size) -= IH_SIZE;
++ (*cut_size) -= IH_SIZE;
+ result = M_DELETE;
+ break;
+ }
+@@ -1050,10 +1049,10 @@ static char prepare_for_delete_or_cut(st
+ ** buffer */
+ reiserfs_restore_prepared_buffer(sb, bh);
+ } while (need_re_search &&
+- search_for_position_by_key(sb, p_s_item_key, p_s_path) == POSITION_FOUND);
+- pos_in_item(p_s_path) = pos * UNFM_P_SIZE;
++ search_for_position_by_key(sb, item_key, path) == POSITION_FOUND);
++ pos_in_item(path) = pos * UNFM_P_SIZE;
+
+- if (*p_n_cut_size == 0) {
++ if (*cut_size == 0) {
+ /* Nothing were cut. maybe convert last unformatted node to the
+ * direct item? */
+ result = M_CONVERT;
+@@ -1091,7 +1090,7 @@ static int calc_deleted_bytes_number(str
+ static void init_tb_struct(struct reiserfs_transaction_handle *th,
+ struct tree_balance *tb,
+ struct super_block *sb,
+- struct treepath *p_s_path, int n_size)
++ struct treepath *path, int n_size)
+ {
+
+ BUG_ON(!th->t_trans_id);
+@@ -1099,9 +1098,9 @@ static void init_tb_struct(struct reiser
+ memset(tb, '\0', sizeof(struct tree_balance));
+ tb->transaction_handle = th;
+ tb->tb_sb = sb;
+- tb->tb_path = p_s_path;
+- PATH_OFFSET_PBUFFER(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL;
+- PATH_OFFSET_POSITION(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0;
++ tb->tb_path = path;
++ PATH_OFFSET_PBUFFER(path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL;
++ PATH_OFFSET_POSITION(path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0;
+ tb->insert_size[0] = n_size;
+ }
+
+@@ -1141,13 +1140,17 @@ char head2type(struct item_head *ih)
+ }
+ #endif
+
+-/* Delete object item. */
+-int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath *p_s_path, /* Path to the deleted item. */
+- const struct cpu_key *p_s_item_key, /* Key to search for the deleted item. */
+- struct inode *inode, /* inode is here just to update
+- * i_blocks and quotas */
+- struct buffer_head *p_s_un_bh)
+-{ /* NULL or unformatted node pointer. */
++/* Delete object item.
++ * th - active transaction handle
++ * path - path to the deleted item
++ * item_key - key to search for the deleted item
++ * indode - used for updating i_blocks and quotas
++ * un_bh - NULL or unformatted node pointer
++ */
++int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
++ struct treepath *path, const struct cpu_key *item_key,
++ struct inode *inode, struct buffer_head *un_bh)
++{
+ struct super_block *sb = inode->i_sb;
+ struct tree_balance s_del_balance;
+ struct item_head s_ih;
+@@ -1162,7 +1165,7 @@ int reiserfs_delete_item(struct reiserfs
+
+ BUG_ON(!th->t_trans_id);
+
+- init_tb_struct(th, &s_del_balance, sb, p_s_path,
++ init_tb_struct(th, &s_del_balance, sb, path,
+ 0 /*size is unknown */ );
+
+ while (1) {
+@@ -1172,14 +1175,14 @@ int reiserfs_delete_item(struct reiserfs
+ n_iter++;
+ c_mode =
+ #endif
+- prepare_for_delete_or_cut(th, inode, p_s_path,
+- p_s_item_key, &n_removed,
++ prepare_for_delete_or_cut(th, inode, path,
++ item_key, &n_removed,
+ &n_del_size,
+ max_reiserfs_offset(inode));
+
+ RFALSE(c_mode != M_DELETE, "PAP-5320: mode must be M_DELETE");
+
+- copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
++ copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
+ s_del_balance.insert_size[0] = n_del_size;
+
+ n_ret_value = fix_nodes(M_DELETE, &s_del_balance, NULL, NULL);
+@@ -1190,13 +1193,13 @@ int reiserfs_delete_item(struct reiserfs
+
+ // file system changed, repeat search
+ n_ret_value =
+- search_for_position_by_key(sb, p_s_item_key, p_s_path);
++ search_for_position_by_key(sb, item_key, path);
+ if (n_ret_value == IO_ERROR)
+ break;
+ if (n_ret_value == FILE_NOT_FOUND) {
+ reiserfs_warning(sb, "vs-5340",
+ "no items of the file %K found",
+- p_s_item_key);
++ item_key);
+ break;
+ }
+ } /* while (1) */
+@@ -1207,7 +1210,7 @@ int reiserfs_delete_item(struct reiserfs
+ }
+ // reiserfs_delete_item returns item length when success
+ n_ret_value = calc_deleted_bytes_number(&s_del_balance, M_DELETE);
+- q_ih = get_ih(p_s_path);
++ q_ih = get_ih(path);
+ quota_cut_bytes = ih_item_len(q_ih);
+
+ /* hack so the quota code doesn't have to guess if the file
+@@ -1224,7 +1227,7 @@ int reiserfs_delete_item(struct reiserfs
+ }
+ }
+
+- if (p_s_un_bh) {
++ if (un_bh) {
+ int off;
+ char *data;
+
+@@ -1242,16 +1245,16 @@ int reiserfs_delete_item(struct reiserfs
+ ** The unformatted node must be dirtied later on. We can't be
+ ** sure here if the entire tail has been deleted yet.
+ **
+- ** p_s_un_bh is from the page cache (all unformatted nodes are
++ ** un_bh is from the page cache (all unformatted nodes are
+ ** from the page cache) and might be a highmem page. So, we
+- ** can't use p_s_un_bh->b_data.
++ ** can't use un_bh->b_data.
+ ** -clm
+ */
+
+- data = kmap_atomic(p_s_un_bh->b_page, KM_USER0);
++ data = kmap_atomic(un_bh->b_page, KM_USER0);
+ off = ((le_ih_k_offset(&s_ih) - 1) & (PAGE_CACHE_SIZE - 1));
+ memcpy(data + off,
+- B_I_PITEM(PATH_PLAST_BUFFER(p_s_path), &s_ih),
++ B_I_PITEM(PATH_PLAST_BUFFER(path), &s_ih),
+ n_ret_value);
+ kunmap_atomic(data, KM_USER0);
+ }
+@@ -1427,9 +1430,9 @@ static void unmap_buffers(struct page *p
+ static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th,
+ struct inode *inode,
+ struct page *page,
+- struct treepath *p_s_path,
+- const struct cpu_key *p_s_item_key,
+- loff_t n_new_file_size, char *p_c_mode)
++ struct treepath *path,
++ const struct cpu_key *item_key,
++ loff_t n_new_file_size, char *mode)
+ {
+ struct super_block *sb = inode->i_sb;
+ int n_block_size = sb->s_blocksize;
+@@ -1445,17 +1448,17 @@ static int maybe_indirect_to_direct(stru
+ !tail_has_to_be_packed(inode) ||
+ !page || (REISERFS_I(inode)->i_flags & i_nopack_mask)) {
+ /* leave tail in an unformatted node */
+- *p_c_mode = M_SKIP_BALANCING;
++ *mode = M_SKIP_BALANCING;
+ cut_bytes =
+ n_block_size - (n_new_file_size & (n_block_size - 1));
+- pathrelse(p_s_path);
++ pathrelse(path);
+ return cut_bytes;
+ }
+- /* Permorm the conversion to a direct_item. */
+- /* return indirect_to_direct(inode, p_s_path, p_s_item_key,
+- n_new_file_size, p_c_mode); */
+- return indirect2direct(th, inode, page, p_s_path, p_s_item_key,
+- n_new_file_size, p_c_mode);
++ /* Perform the conversion to a direct_item. */
++ /* return indirect_to_direct(inode, path, item_key,
++ n_new_file_size, mode); */
++ return indirect2direct(th, inode, page, path, item_key,
++ n_new_file_size, mode);
+ }
+
+ /* we did indirect_to_direct conversion. And we have inserted direct
+@@ -1506,8 +1509,8 @@ static void indirect_to_direct_roll_back
+
+ /* (Truncate or cut entry) or delete object item. Returns < 0 on failure */
+ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
+- struct treepath *p_s_path,
+- struct cpu_key *p_s_item_key,
++ struct treepath *path,
++ struct cpu_key *item_key,
+ struct inode *inode,
+ struct page *page, loff_t n_new_file_size)
+ {
+@@ -1528,7 +1531,7 @@ int reiserfs_cut_from_item(struct reiser
+
+ BUG_ON(!th->t_trans_id);
+
+- init_tb_struct(th, &s_cut_balance, inode->i_sb, p_s_path,
++ init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
+ n_cut_size);
+
+ /* Repeat this loop until we either cut the item without needing
+@@ -1540,8 +1543,8 @@ int reiserfs_cut_from_item(struct reiser
+ pointers. */
+
+ c_mode =
+- prepare_for_delete_or_cut(th, inode, p_s_path,
+- p_s_item_key, &n_removed,
++ prepare_for_delete_or_cut(th, inode, path,
++ item_key, &n_removed,
+ &n_cut_size, n_new_file_size);
+ if (c_mode == M_CONVERT) {
+ /* convert last unformatted node to direct item or leave
+@@ -1551,7 +1554,7 @@ int reiserfs_cut_from_item(struct reiser
+
+ n_ret_value =
+ maybe_indirect_to_direct(th, inode, page,
+- p_s_path, p_s_item_key,
++ path, item_key,
+ n_new_file_size, &c_mode);
+ if (c_mode == M_SKIP_BALANCING)
+ /* tail has been left in the unformatted node */
+@@ -1568,26 +1571,26 @@ int reiserfs_cut_from_item(struct reiser
+ inserting the new direct item. Now we are removing the
+ last unformatted node pointer. Set key to search for
+ it. */
+- set_cpu_key_k_type(p_s_item_key, TYPE_INDIRECT);
+- p_s_item_key->key_length = 4;
++ set_cpu_key_k_type(item_key, TYPE_INDIRECT);
++ item_key->key_length = 4;
+ n_new_file_size -=
+ (n_new_file_size & (sb->s_blocksize - 1));
+ tail_pos = n_new_file_size;
+- set_cpu_key_k_offset(p_s_item_key, n_new_file_size + 1);
++ set_cpu_key_k_offset(item_key, n_new_file_size + 1);
+ if (search_for_position_by_key
+- (sb, p_s_item_key,
+- p_s_path) == POSITION_NOT_FOUND) {
+- print_block(PATH_PLAST_BUFFER(p_s_path), 3,
+- PATH_LAST_POSITION(p_s_path) - 1,
+- PATH_LAST_POSITION(p_s_path) + 1);
++ (sb, item_key,
++ path) == POSITION_NOT_FOUND) {
++ print_block(PATH_PLAST_BUFFER(path), 3,
++ PATH_LAST_POSITION(path) - 1,
++ PATH_LAST_POSITION(path) + 1);
+ reiserfs_panic(sb, "PAP-5580", "item to "
+ "convert does not exist (%K)",
+- p_s_item_key);
++ item_key);
+ }
+ continue;
+ }
+ if (n_cut_size == 0) {
+- pathrelse(p_s_path);
++ pathrelse(path);
+ return 0;
+ }
+
+@@ -1600,12 +1603,12 @@ int reiserfs_cut_from_item(struct reiser
+ PROC_INFO_INC(sb, cut_from_item_restarted);
+
+ n_ret_value =
+- search_for_position_by_key(sb, p_s_item_key, p_s_path);
++ search_for_position_by_key(sb, item_key, path);
+ if (n_ret_value == POSITION_FOUND)
+ continue;
+
+ reiserfs_warning(sb, "PAP-5610", "item %K not found",
+- p_s_item_key);
++ item_key);
+ unfix_nodes(&s_cut_balance);
+ return (n_ret_value == IO_ERROR) ? -EIO : -ENOENT;
+ } /* while */
+@@ -1615,7 +1618,7 @@ int reiserfs_cut_from_item(struct reiser
+ if (n_is_inode_locked) {
+ // FIXME: this seems to be not needed: we are always able
+ // to cut item
+- indirect_to_direct_roll_back(th, inode, p_s_path);
++ indirect_to_direct_roll_back(th, inode, path);
+ }
+ if (n_ret_value == NO_DISK_SPACE)
+ reiserfs_warning(sb, "reiserfs-5092",
+@@ -1631,7 +1634,7 @@ int reiserfs_cut_from_item(struct reiser
+ /* Calculate number of bytes that need to be cut from the item. */
+ quota_cut_bytes =
+ (c_mode ==
+- M_DELETE) ? ih_item_len(get_ih(p_s_path)) : -s_cut_balance.
++ M_DELETE) ? ih_item_len(get_ih(path)) : -s_cut_balance.
+ insert_size[0];
+ if (retval2 == -1)
+ n_ret_value = calc_deleted_bytes_number(&s_cut_balance, c_mode);
+@@ -1878,7 +1881,7 @@ int reiserfs_do_truncate(struct reiserfs
+ #ifdef CONFIG_REISERFS_CHECK
+ // this makes sure, that we __append__, not overwrite or add holes
+ static void check_research_for_paste(struct treepath *path,
+- const struct cpu_key *p_s_key)
++ const struct cpu_key *key)
+ {
+ struct item_head *found_ih = get_ih(path);
+
+@@ -1886,35 +1889,35 @@ static void check_research_for_paste(str
+ if (le_ih_k_offset(found_ih) +
+ op_bytes_number(found_ih,
+ get_last_bh(path)->b_size) !=
+- cpu_key_k_offset(p_s_key)
++ cpu_key_k_offset(key)
+ || op_bytes_number(found_ih,
+ get_last_bh(path)->b_size) !=
+ pos_in_item(path))
+ reiserfs_panic(NULL, "PAP-5720", "found direct item "
+ "%h or position (%d) does not match "
+ "to key %K", found_ih,
+- pos_in_item(path), p_s_key);
++ pos_in_item(path), key);
+ }
+ if (is_indirect_le_ih(found_ih)) {
+ if (le_ih_k_offset(found_ih) +
+ op_bytes_number(found_ih,
+ get_last_bh(path)->b_size) !=
+- cpu_key_k_offset(p_s_key)
++ cpu_key_k_offset(key)
+ || I_UNFM_NUM(found_ih) != pos_in_item(path)
+ || get_ih_free_space(found_ih) != 0)
+ reiserfs_panic(NULL, "PAP-5730", "found indirect "
+ "item (%h) or position (%d) does not "
+ "match to key (%K)",
+- found_ih, pos_in_item(path), p_s_key);
++ found_ih, pos_in_item(path), key);
+ }
+ }
+ #endif /* config reiserfs check */
+
+ /* Paste bytes to the existing item. Returns bytes number pasted into the item. */
+-int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct treepath *p_s_search_path, /* Path to the pasted item. */
+- const struct cpu_key *p_s_key, /* Key to search for the needed item. */
++int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct treepath *search_path, /* Path to the pasted item. */
++ const struct cpu_key *key, /* Key to search for the needed item. */
+ struct inode *inode, /* Inode item belongs to */
+- const char *p_c_body, /* Pointer to the bytes to paste. */
++ const char *body, /* Pointer to the bytes to paste. */
+ int n_pasted_size)
+ { /* Size of pasted bytes. */
+ struct tree_balance s_paste_balance;
+@@ -1929,17 +1932,17 @@ int reiserfs_paste_into_item(struct reis
+ reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE,
+ "reiserquota paste_into_item(): allocating %u id=%u type=%c",
+ n_pasted_size, inode->i_uid,
+- key2type(&(p_s_key->on_disk_key)));
++ key2type(&(key->on_disk_key)));
+ #endif
+
+ if (DQUOT_ALLOC_SPACE_NODIRTY(inode, n_pasted_size)) {
+- pathrelse(p_s_search_path);
++ pathrelse(search_path);
+ return -EDQUOT;
+ }
+- init_tb_struct(th, &s_paste_balance, th->t_super, p_s_search_path,
++ init_tb_struct(th, &s_paste_balance, th->t_super, search_path,
+ n_pasted_size);
+ #ifdef DISPLACE_NEW_PACKING_LOCALITIES
+- s_paste_balance.key = p_s_key->on_disk_key;
++ s_paste_balance.key = key->on_disk_key;
+ #endif
+
+ /* DQUOT_* can schedule, must check before the fix_nodes */
+@@ -1949,13 +1952,13 @@ int reiserfs_paste_into_item(struct reis
+
+ while ((retval =
+ fix_nodes(M_PASTE, &s_paste_balance, NULL,
+- p_c_body)) == REPEAT_SEARCH) {
++ body)) == REPEAT_SEARCH) {
+ search_again:
+ /* file system changed while we were in the fix_nodes */
+ PROC_INFO_INC(th->t_super, paste_into_item_restarted);
+ retval =
+- search_for_position_by_key(th->t_super, p_s_key,
+- p_s_search_path);
++ search_for_position_by_key(th->t_super, key,
++ search_path);
+ if (retval == IO_ERROR) {
+ retval = -EIO;
+ goto error_out;
+@@ -1963,19 +1966,19 @@ int reiserfs_paste_into_item(struct reis
+ if (retval == POSITION_FOUND) {
+ reiserfs_warning(inode->i_sb, "PAP-5710",
+ "entry or pasted byte (%K) exists",
+- p_s_key);
++ key);
+ retval = -EEXIST;
+ goto error_out;
+ }
+ #ifdef CONFIG_REISERFS_CHECK
+- check_research_for_paste(p_s_search_path, p_s_key);
++ check_research_for_paste(search_path, key);
+ #endif
+ }
+
+ /* Perform balancing after all resources are collected by fix_nodes, and
+ accessing them will not risk triggering schedule. */
+ if (retval == CARRY_ON) {
+- do_balance(&s_paste_balance, NULL /*ih */ , p_c_body, M_PASTE);
++ do_balance(&s_paste_balance, NULL /*ih */ , body, M_PASTE);
+ return 0;
+ }
+ retval = (retval == NO_DISK_SPACE) ? -ENOSPC : -EIO;
+@@ -1986,17 +1989,23 @@ int reiserfs_paste_into_item(struct reis
+ reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE,
+ "reiserquota paste_into_item(): freeing %u id=%u type=%c",
+ n_pasted_size, inode->i_uid,
+- key2type(&(p_s_key->on_disk_key)));
++ key2type(&(key->on_disk_key)));
+ #endif
+ DQUOT_FREE_SPACE_NODIRTY(inode, n_pasted_size);
+ return retval;
+ }
+
+-/* Insert new item into the buffer at the path. */
+-int reiserfs_insert_item(struct reiserfs_transaction_handle *th, struct treepath *p_s_path, /* Path to the inserteded item. */
+- const struct cpu_key *key, struct item_head *p_s_ih, /* Pointer to the item header to insert. */
+- struct inode *inode, const char *p_c_body)
+-{ /* Pointer to the bytes to insert. */
++/* Insert new item into the buffer at the path.
++ * th - active transaction handle
++ * path - path to the inserted item
++ * ih - pointer to the item header to insert
++ * body - pointer to the bytes to insert
++ */
++int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
++ struct treepath *path, const struct cpu_key *key,
++ struct item_head *ih, struct inode *inode,
++ const char *body)
++{
+ struct tree_balance s_ins_balance;
+ int retval;
+ int fs_gen = 0;
+@@ -2006,28 +2015,27 @@ int reiserfs_insert_item(struct reiserfs
+
+ if (inode) { /* Do we count quotas for item? */
+ fs_gen = get_generation(inode->i_sb);
+- quota_bytes = ih_item_len(p_s_ih);
++ quota_bytes = ih_item_len(ih);
+
+ /* hack so the quota code doesn't have to guess if the file has
+ ** a tail, links are always tails, so there's no guessing needed
+ */
+- if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(p_s_ih)) {
++ if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(ih))
+ quota_bytes = inode->i_sb->s_blocksize + UNFM_P_SIZE;
+- }
+ #ifdef REISERQUOTA_DEBUG
+ reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE,
+ "reiserquota insert_item(): allocating %u id=%u type=%c",
+- quota_bytes, inode->i_uid, head2type(p_s_ih));
++ quota_bytes, inode->i_uid, head2type(ih));
+ #endif
+ /* We can't dirty inode here. It would be immediately written but
+ * appropriate stat item isn't inserted yet... */
+ if (DQUOT_ALLOC_SPACE_NODIRTY(inode, quota_bytes)) {
+- pathrelse(p_s_path);
++ pathrelse(path);
+ return -EDQUOT;
+ }
+ }
+- init_tb_struct(th, &s_ins_balance, th->t_super, p_s_path,
+- IH_SIZE + ih_item_len(p_s_ih));
++ init_tb_struct(th, &s_ins_balance, th->t_super, path,
++ IH_SIZE + ih_item_len(ih));
+ #ifdef DISPLACE_NEW_PACKING_LOCALITIES
+ s_ins_balance.key = key->on_disk_key;
+ #endif
+@@ -2037,12 +2045,12 @@ int reiserfs_insert_item(struct reiserfs
+ }
+
+ while ((retval =
+- fix_nodes(M_INSERT, &s_ins_balance, p_s_ih,
+- p_c_body)) == REPEAT_SEARCH) {
++ fix_nodes(M_INSERT, &s_ins_balance, ih,
++ body)) == REPEAT_SEARCH) {
+ search_again:
+ /* file system changed while we were in the fix_nodes */
+ PROC_INFO_INC(th->t_super, insert_item_restarted);
+- retval = search_item(th->t_super, key, p_s_path);
++ retval = search_item(th->t_super, key, path);
+ if (retval == IO_ERROR) {
+ retval = -EIO;
+ goto error_out;
+@@ -2058,7 +2066,7 @@ int reiserfs_insert_item(struct reiserfs
+
+ /* make balancing after all resources will be collected at a time */
+ if (retval == CARRY_ON) {
+- do_balance(&s_ins_balance, p_s_ih, p_c_body, M_INSERT);
++ do_balance(&s_ins_balance, ih, body, M_INSERT);
+ return 0;
+ }
+
+@@ -2069,7 +2077,7 @@ int reiserfs_insert_item(struct reiserfs
+ #ifdef REISERQUOTA_DEBUG
+ reiserfs_debug(th->t_super, REISERFS_DEBUG_CODE,
+ "reiserquota insert_item(): freeing %u id=%u type=%c",
+- quota_bytes, inode->i_uid, head2type(p_s_ih));
++ quota_bytes, inode->i_uid, head2type(ih));
+ #endif
+ if (inode)
+ DQUOT_FREE_SPACE_NODIRTY(inode, quota_bytes);
+--- a/fs/reiserfs/tail_conversion.c
++++ b/fs/reiserfs/tail_conversion.c
+@@ -172,10 +172,12 @@ void reiserfs_unmap_buffer(struct buffer
+ inode */
+ int indirect2direct(struct reiserfs_transaction_handle *th,
+ struct inode *inode, struct page *page,
+- struct treepath *p_s_path, /* path to the indirect item. */
+- const struct cpu_key *p_s_item_key, /* Key to look for unformatted node pointer to be cut. */
++ struct treepath *path, /* path to the indirect item. */
++ const struct cpu_key *item_key, /* Key to look for
++ * unformatted node
++ * pointer to be cut. */
+ loff_t n_new_file_size, /* New file size. */
+- char *p_c_mode)
++ char *mode)
+ {
+ struct super_block *sb = inode->i_sb;
+ struct item_head s_ih;
+@@ -189,10 +191,10 @@ int indirect2direct(struct reiserfs_tran
+
+ REISERFS_SB(sb)->s_indirect2direct++;
+
+- *p_c_mode = M_SKIP_BALANCING;
++ *mode = M_SKIP_BALANCING;
+
+ /* store item head path points to. */
+- copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
++ copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
+
+ tail_len = (n_new_file_size & (n_block_size - 1));
+ if (get_inode_sd_version(inode) == STAT_DATA_V2)
+@@ -211,14 +213,14 @@ int indirect2direct(struct reiserfs_tran
+
+ tail = (char *)kmap(page); /* this can schedule */
+
+- if (path_changed(&s_ih, p_s_path)) {
++ if (path_changed(&s_ih, path)) {
+ /* re-search indirect item */
+- if (search_for_position_by_key(sb, p_s_item_key, p_s_path)
++ if (search_for_position_by_key(sb, item_key, path)
+ == POSITION_NOT_FOUND)
+ reiserfs_panic(sb, "PAP-5520",
+ "item to be converted %K does not exist",
+- p_s_item_key);
+- copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
++ item_key);
++ copy_item_head(&s_ih, PATH_PITEM_HEAD(path));
+ #ifdef CONFIG_REISERFS_CHECK
+ pos = le_ih_k_offset(&s_ih) - 1 +
+ (ih_item_len(&s_ih) / UNFM_P_SIZE -
+@@ -240,13 +242,13 @@ int indirect2direct(struct reiserfs_tran
+ */
+ tail = tail + (pos & (PAGE_CACHE_SIZE - 1));
+
+- PATH_LAST_POSITION(p_s_path)++;
++ PATH_LAST_POSITION(path)++;
+
+- key = *p_s_item_key;
++ key = *item_key;
+ set_cpu_key_k_type(&key, TYPE_DIRECT);
+ key.key_length = 4;
+ /* Insert tail as new direct item in the tree */
+- if (reiserfs_insert_item(th, p_s_path, &key, &s_ih, inode,
++ if (reiserfs_insert_item(th, path, &key, &s_ih, inode,
+ tail ? tail : NULL) < 0) {
+ /* No disk memory. So we can not convert last unformatted node
+ to the direct item. In this case we used to adjust
+@@ -268,7 +270,7 @@ int indirect2direct(struct reiserfs_tran
+
+ /* We have inserted new direct item and must remove last
+ unformatted node. */
+- *p_c_mode = M_CUT;
++ *mode = M_CUT;
+
+ /* we store position of first direct item in the in-core inode */
+ /* mark_file_with_tail (inode, pos1 + 1); */
+--- a/include/linux/reiserfs_fs.h
++++ b/include/linux/reiserfs_fs.h
+@@ -694,9 +694,9 @@ static inline void cpu_key_k_offset_dec(
+ #define is_indirect_cpu_ih(ih) (is_indirect_cpu_key (&((ih)->ih_key)))
+ #define is_statdata_cpu_ih(ih) (is_statdata_cpu_key (&((ih)->ih_key)))
+
+-#define I_K_KEY_IN_ITEM(p_s_ih, p_s_key, n_blocksize) \
+- ( ! COMP_SHORT_KEYS(p_s_ih, p_s_key) && \
+- I_OFF_BYTE_IN_ITEM(p_s_ih, k_offset (p_s_key), n_blocksize) )
++#define I_K_KEY_IN_ITEM(ih, key, n_blocksize) \
++ (!COMP_SHORT_KEYS(ih, key) && \
++ I_OFF_BYTE_IN_ITEM(ih, k_offset(key), n_blocksize))
+
+ /* maximal length of item */
+ #define MAX_ITEM_LEN(block_size) (block_size - BLKH_SIZE - IH_SIZE)
+@@ -1196,33 +1196,33 @@ struct treepath {
+ struct treepath var = {.path_length = ILLEGAL_PATH_ELEMENT_OFFSET, .reada = 0,}
+
+ /* Get path element by path and path position. */
+-#define PATH_OFFSET_PELEMENT(p_s_path,n_offset) ((p_s_path)->path_elements +(n_offset))
++#define PATH_OFFSET_PELEMENT(path, n_offset) ((path)->path_elements + (n_offset))
+
+ /* Get buffer header at the path by path and path position. */
+-#define PATH_OFFSET_PBUFFER(p_s_path,n_offset) (PATH_OFFSET_PELEMENT(p_s_path,n_offset)->pe_buffer)
++#define PATH_OFFSET_PBUFFER(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_buffer)
+
+ /* Get position in the element at the path by path and path position. */
+-#define PATH_OFFSET_POSITION(p_s_path,n_offset) (PATH_OFFSET_PELEMENT(p_s_path,n_offset)->pe_position)
++#define PATH_OFFSET_POSITION(path, n_offset) (PATH_OFFSET_PELEMENT(path, n_offset)->pe_position)
+
+-#define PATH_PLAST_BUFFER(p_s_path) (PATH_OFFSET_PBUFFER((p_s_path), (p_s_path)->path_length))
++#define PATH_PLAST_BUFFER(path) (PATH_OFFSET_PBUFFER((path), (path)->path_length))
+ /* you know, to the person who didn't
+ write this the macro name does not
+ at first suggest what it does.
+ Maybe POSITION_FROM_PATH_END? Or
+ maybe we should just focus on
+ dumping paths... -Hans */
+-#define PATH_LAST_POSITION(p_s_path) (PATH_OFFSET_POSITION((p_s_path), (p_s_path)->path_length))
++#define PATH_LAST_POSITION(path) (PATH_OFFSET_POSITION((path), (path)->path_length))
+
+-#define PATH_PITEM_HEAD(p_s_path) B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_path),PATH_LAST_POSITION(p_s_path))
++#define PATH_PITEM_HEAD(path) B_N_PITEM_HEAD(PATH_PLAST_BUFFER(path), PATH_LAST_POSITION(path))
+
+ /* in do_balance leaf has h == 0 in contrast with path structure,
+ where root has level == 0. That is why we need these defines */
+-#define PATH_H_PBUFFER(p_s_path, h) PATH_OFFSET_PBUFFER (p_s_path, p_s_path->path_length - (h)) /* tb->S[h] */
++#define PATH_H_PBUFFER(path, h) PATH_OFFSET_PBUFFER (path, path->path_length - (h)) /* tb->S[h] */
+ #define PATH_H_PPARENT(path, h) PATH_H_PBUFFER (path, (h) + 1) /* tb->F[h] or tb->S[0]->b_parent */
+ #define PATH_H_POSITION(path, h) PATH_OFFSET_POSITION (path, path->path_length - (h))
+ #define PATH_H_B_ITEM_ORDER(path, h) PATH_H_POSITION(path, h + 1) /* tb->S[h]->b_item_order */
+
+-#define PATH_H_PATH_OFFSET(p_s_path, n_h) ((p_s_path)->path_length - (n_h))
++#define PATH_H_PATH_OFFSET(path, n_h) ((path)->path_length - (n_h))
+
+ #define get_last_bh(path) PATH_PLAST_BUFFER(path)
+ #define get_ih(path) PATH_PITEM_HEAD(path)
+@@ -1512,7 +1512,7 @@ extern struct item_operations *item_ops[
+ #define COMP_SHORT_KEYS comp_short_keys
+
+ /* number of blocks pointed to by the indirect item */
+-#define I_UNFM_NUM(p_s_ih) ( ih_item_len(p_s_ih) / UNFM_P_SIZE )
++#define I_UNFM_NUM(ih) (ih_item_len(ih) / UNFM_P_SIZE)
+
+ /* the used space within the unformatted node corresponding to pos within the item pointed to by ih */
+ #define I_POS_UNFM_SIZE(ih,pos,size) (((pos) == I_UNFM_NUM(ih) - 1 ) ? (size) - ih_free_space(ih) : (size))
+@@ -1793,8 +1793,8 @@ int reiserfs_convert_objectid_map_v1(str
+
+ /* stree.c */
+ int B_IS_IN_TREE(const struct buffer_head *);
+-extern void copy_item_head(struct item_head *p_v_to,
+- const struct item_head *p_v_from);
++extern void copy_item_head(struct item_head *to,
++ const struct item_head *from);
+
+ // first key is in cpu form, second - le
+ extern int comp_short_keys(const struct reiserfs_key *le_key,
+@@ -1829,20 +1829,20 @@ static inline void copy_key(struct reise
+ memcpy(to, from, KEY_SIZE);
+ }
+
+-int comp_items(const struct item_head *stored_ih, const struct treepath *p_s_path);
+-const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path,
++int comp_items(const struct item_head *stored_ih, const struct treepath *path);
++const struct reiserfs_key *get_rkey(const struct treepath *chk_path,
+ const struct super_block *sb);
+ int search_by_key(struct super_block *, const struct cpu_key *,
+ struct treepath *, int);
+ #define search_item(s,key,path) search_by_key (s, key, path, DISK_LEAF_NODE_LEVEL)
+ int search_for_position_by_key(struct super_block *sb,
+- const struct cpu_key *p_s_cpu_key,
+- struct treepath *p_s_search_path);
++ const struct cpu_key *cpu_key,
++ struct treepath *search_path);
+ extern void decrement_bcount(struct buffer_head *bh);
+-void decrement_counters_in_path(struct treepath *p_s_search_path);
+-void pathrelse(struct treepath *p_s_search_path);
++void decrement_counters_in_path(struct treepath *search_path);
++void pathrelse(struct treepath *search_path);
+ int reiserfs_check_path(struct treepath *p);
+-void pathrelse_and_restore(struct super_block *s, struct treepath *p_s_search_path);
++void pathrelse_and_restore(struct super_block *s, struct treepath *search_path);
+
+ int reiserfs_insert_item(struct reiserfs_transaction_handle *th,
+ struct treepath *path,
+@@ -1865,7 +1865,7 @@ int reiserfs_cut_from_item(struct reiser
+ int reiserfs_delete_item(struct reiserfs_transaction_handle *th,
+ struct treepath *path,
+ const struct cpu_key *key,
+- struct inode *inode, struct buffer_head *p_s_un_bh);
++ struct inode *inode, struct buffer_head *un_bh);
+
+ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
+ struct inode *inode, struct reiserfs_key *key);
+@@ -2005,7 +2005,7 @@ extern const struct address_space_operat
+ /* fix_nodes.c */
+
+ int fix_nodes(int n_op_mode, struct tree_balance *tb,
+- struct item_head *p_s_ins_ih, const void *);
++ struct item_head *ins_ih, const void *);
+ void unfix_nodes(struct tree_balance *);
+
+ /* prints.c */
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: rename p_s_bh to bh
+
+ This patch is a simple s/p_s_bh/bh/g to the reiserfs code. This is the second
+ in a series of patches to rip out some of the awful variable naming in
+ reiserfs.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+
+ fs/reiserfs/fix_node.c | 94 ++++++++++++++++++++------------------------
+ fs/reiserfs/stree.c | 63 ++++++++++++++---------------
+ include/linux/reiserfs_fs.h | 37 ++++++++---------
+ 3 files changed, 94 insertions(+), 100 deletions(-)
+
+--- a/fs/reiserfs/fix_node.c
++++ b/fs/reiserfs/fix_node.c
+@@ -1887,7 +1887,7 @@ static int check_balance(int mode,
+ /* Check whether parent at the path is the really parent of the current node.*/
+ static int get_direct_parent(struct tree_balance *p_s_tb, int n_h)
+ {
+- struct buffer_head *p_s_bh;
++ struct buffer_head *bh;
+ struct treepath *p_s_path = p_s_tb->tb_path;
+ int n_position,
+ n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h);
+@@ -1909,21 +1909,21 @@ static int get_direct_parent(struct tree
+ }
+
+ if (!B_IS_IN_TREE
+- (p_s_bh = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1)))
++ (bh = PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1)))
+ return REPEAT_SEARCH; /* Parent in the path is not in the tree. */
+
+ if ((n_position =
+ PATH_OFFSET_POSITION(p_s_path,
+- n_path_offset - 1)) > B_NR_ITEMS(p_s_bh))
++ n_path_offset - 1)) > B_NR_ITEMS(bh))
+ return REPEAT_SEARCH;
+
+- if (B_N_CHILD_NUM(p_s_bh, n_position) !=
++ if (B_N_CHILD_NUM(bh, n_position) !=
+ PATH_OFFSET_PBUFFER(p_s_path, n_path_offset)->b_blocknr)
+ /* Parent in the path is not parent of the current node in the tree. */
+ return REPEAT_SEARCH;
+
+- if (buffer_locked(p_s_bh)) {
+- __wait_on_buffer(p_s_bh);
++ if (buffer_locked(bh)) {
++ __wait_on_buffer(bh);
+ if (FILESYSTEM_CHANGED_TB(p_s_tb))
+ return REPEAT_SEARCH;
+ }
+@@ -1943,29 +1943,29 @@ static int get_neighbors(struct tree_bal
+ n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h + 1);
+ unsigned long n_son_number;
+ struct super_block *sb = p_s_tb->tb_sb;
+- struct buffer_head *p_s_bh;
++ struct buffer_head *bh;
+
+ PROC_INFO_INC(sb, get_neighbors[n_h]);
+
+ if (p_s_tb->lnum[n_h]) {
+ /* We need left neighbor to balance S[n_h]. */
+ PROC_INFO_INC(sb, need_l_neighbor[n_h]);
+- p_s_bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset);
++ bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset);
+
+- RFALSE(p_s_bh == p_s_tb->FL[n_h] &&
++ RFALSE(bh == p_s_tb->FL[n_h] &&
+ !PATH_OFFSET_POSITION(p_s_tb->tb_path, n_path_offset),
+ "PAP-8270: invalid position in the parent");
+
+ n_child_position =
+- (p_s_bh ==
++ (bh ==
+ p_s_tb->FL[n_h]) ? p_s_tb->lkey[n_h] : B_NR_ITEMS(p_s_tb->
+ FL[n_h]);
+ n_son_number = B_N_CHILD_NUM(p_s_tb->FL[n_h], n_child_position);
+- p_s_bh = sb_bread(sb, n_son_number);
+- if (!p_s_bh)
++ bh = sb_bread(sb, n_son_number);
++ if (!bh)
+ return IO_ERROR;
+ if (FILESYSTEM_CHANGED_TB(p_s_tb)) {
+- brelse(p_s_bh);
++ brelse(bh);
+ PROC_INFO_INC(sb, get_neighbors_restart[n_h]);
+ return REPEAT_SEARCH;
+ }
+@@ -1973,48 +1973,48 @@ static int get_neighbors(struct tree_bal
+ RFALSE(!B_IS_IN_TREE(p_s_tb->FL[n_h]) ||
+ n_child_position > B_NR_ITEMS(p_s_tb->FL[n_h]) ||
+ B_N_CHILD_NUM(p_s_tb->FL[n_h], n_child_position) !=
+- p_s_bh->b_blocknr, "PAP-8275: invalid parent");
+- RFALSE(!B_IS_IN_TREE(p_s_bh), "PAP-8280: invalid child");
++ bh->b_blocknr, "PAP-8275: invalid parent");
++ RFALSE(!B_IS_IN_TREE(bh), "PAP-8280: invalid child");
+ RFALSE(!n_h &&
+- B_FREE_SPACE(p_s_bh) !=
+- MAX_CHILD_SIZE(p_s_bh) -
++ B_FREE_SPACE(bh) !=
++ MAX_CHILD_SIZE(bh) -
+ dc_size(B_N_CHILD(p_s_tb->FL[0], n_child_position)),
+ "PAP-8290: invalid child size of left neighbor");
+
+ brelse(p_s_tb->L[n_h]);
+- p_s_tb->L[n_h] = p_s_bh;
++ p_s_tb->L[n_h] = bh;
+ }
+
+ if (p_s_tb->rnum[n_h]) { /* We need right neighbor to balance S[n_path_offset]. */
+ PROC_INFO_INC(sb, need_r_neighbor[n_h]);
+- p_s_bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset);
++ bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset);
+
+- RFALSE(p_s_bh == p_s_tb->FR[n_h] &&
++ RFALSE(bh == p_s_tb->FR[n_h] &&
+ PATH_OFFSET_POSITION(p_s_tb->tb_path,
+ n_path_offset) >=
+- B_NR_ITEMS(p_s_bh),
++ B_NR_ITEMS(bh),
+ "PAP-8295: invalid position in the parent");
+
+ n_child_position =
+- (p_s_bh == p_s_tb->FR[n_h]) ? p_s_tb->rkey[n_h] + 1 : 0;
++ (bh == p_s_tb->FR[n_h]) ? p_s_tb->rkey[n_h] + 1 : 0;
+ n_son_number = B_N_CHILD_NUM(p_s_tb->FR[n_h], n_child_position);
+- p_s_bh = sb_bread(sb, n_son_number);
+- if (!p_s_bh)
++ bh = sb_bread(sb, n_son_number);
++ if (!bh)
+ return IO_ERROR;
+ if (FILESYSTEM_CHANGED_TB(p_s_tb)) {
+- brelse(p_s_bh);
++ brelse(bh);
+ PROC_INFO_INC(sb, get_neighbors_restart[n_h]);
+ return REPEAT_SEARCH;
+ }
+ brelse(p_s_tb->R[n_h]);
+- p_s_tb->R[n_h] = p_s_bh;
++ p_s_tb->R[n_h] = bh;
+
+ RFALSE(!n_h
+- && B_FREE_SPACE(p_s_bh) !=
+- MAX_CHILD_SIZE(p_s_bh) -
++ && B_FREE_SPACE(bh) !=
++ MAX_CHILD_SIZE(bh) -
+ dc_size(B_N_CHILD(p_s_tb->FR[0], n_child_position)),
+ "PAP-8300: invalid child size of right neighbor (%d != %d - %d)",
+- B_FREE_SPACE(p_s_bh), MAX_CHILD_SIZE(p_s_bh),
++ B_FREE_SPACE(bh), MAX_CHILD_SIZE(bh),
+ dc_size(B_N_CHILD(p_s_tb->FR[0], n_child_position)));
+
+ }
+@@ -2090,51 +2090,45 @@ static int get_mem_for_virtual_node(stru
+
+ #ifdef CONFIG_REISERFS_CHECK
+ static void tb_buffer_sanity_check(struct super_block *sb,
+- struct buffer_head *p_s_bh,
++ struct buffer_head *bh,
+ const char *descr, int level)
+ {
+- if (p_s_bh) {
+- if (atomic_read(&(p_s_bh->b_count)) <= 0) {
++ if (bh) {
++ if (atomic_read(&(bh->b_count)) <= 0)
+
+ reiserfs_panic(sb, "jmacd-1", "negative or zero "
+ "reference counter for buffer %s[%d] "
+- "(%b)", descr, level, p_s_bh);
+- }
++ "(%b)", descr, level, bh);
+
+- if (!buffer_uptodate(p_s_bh)) {
++ if (!buffer_uptodate(bh))
+ reiserfs_panic(sb, "jmacd-2", "buffer is not up "
+ "to date %s[%d] (%b)",
+- descr, level, p_s_bh);
+- }
++ descr, level, bh);
+
+- if (!B_IS_IN_TREE(p_s_bh)) {
++ if (!B_IS_IN_TREE(bh))
+ reiserfs_panic(sb, "jmacd-3", "buffer is not "
+ "in tree %s[%d] (%b)",
+- descr, level, p_s_bh);
+- }
++ descr, level, bh);
+
+- if (p_s_bh->b_bdev != sb->s_bdev) {
++ if (bh->b_bdev != sb->s_bdev)
+ reiserfs_panic(sb, "jmacd-4", "buffer has wrong "
+ "device %s[%d] (%b)",
+- descr, level, p_s_bh);
+- }
++ descr, level, bh);
+
+- if (p_s_bh->b_size != sb->s_blocksize) {
++ if (bh->b_size != sb->s_blocksize)
+ reiserfs_panic(sb, "jmacd-5", "buffer has wrong "
+ "blocksize %s[%d] (%b)",
+- descr, level, p_s_bh);
+- }
++ descr, level, bh);
+
+- if (p_s_bh->b_blocknr > SB_BLOCK_COUNT(sb)) {
++ if (bh->b_blocknr > SB_BLOCK_COUNT(sb))
+ reiserfs_panic(sb, "jmacd-6", "buffer block "
+ "number too high %s[%d] (%b)",
+- descr, level, p_s_bh);
+- }
++ descr, level, bh);
+ }
+ }
+ #else
+ static void tb_buffer_sanity_check(struct super_block *sb,
+- struct buffer_head *p_s_bh,
++ struct buffer_head *bh,
+ const char *descr, int level)
+ {;
+ }
+--- a/fs/reiserfs/stree.c
++++ b/fs/reiserfs/stree.c
+@@ -56,13 +56,13 @@
+ #include <linux/quotaops.h>
+
+ /* Does the buffer contain a disk block which is in the tree. */
+-inline int B_IS_IN_TREE(const struct buffer_head *p_s_bh)
++inline int B_IS_IN_TREE(const struct buffer_head *bh)
+ {
+
+- RFALSE(B_LEVEL(p_s_bh) > MAX_HEIGHT,
+- "PAP-1010: block (%b) has too big level (%z)", p_s_bh, p_s_bh);
++ RFALSE(B_LEVEL(bh) > MAX_HEIGHT,
++ "PAP-1010: block (%b) has too big level (%z)", bh, bh);
+
+- return (B_LEVEL(p_s_bh) != FREE_LEVEL);
++ return (B_LEVEL(bh) != FREE_LEVEL);
+ }
+
+ //
+@@ -579,7 +579,7 @@ int search_by_key(struct super_block *sb
+ {
+ b_blocknr_t n_block_number;
+ int expected_level;
+- struct buffer_head *p_s_bh;
++ struct buffer_head *bh;
+ struct path_element *p_s_last_element;
+ int n_node_level, n_retval;
+ int right_neighbor_of_leaf_node;
+@@ -626,15 +626,14 @@ int search_by_key(struct super_block *sb
+
+ /* Read the next tree node, and set the last element in the path to
+ have a pointer to it. */
+- if ((p_s_bh = p_s_last_element->pe_buffer =
++ if ((bh = p_s_last_element->pe_buffer =
+ sb_getblk(sb, n_block_number))) {
+- if (!buffer_uptodate(p_s_bh) && reada_count > 1) {
++ if (!buffer_uptodate(bh) && reada_count > 1)
+ search_by_key_reada(sb, reada_bh,
+ reada_blocks, reada_count);
+- }
+- ll_rw_block(READ, 1, &p_s_bh);
+- wait_on_buffer(p_s_bh);
+- if (!buffer_uptodate(p_s_bh))
++ ll_rw_block(READ, 1, &bh);
++ wait_on_buffer(bh);
++ if (!buffer_uptodate(bh))
+ goto io_error;
+ } else {
+ io_error:
+@@ -651,8 +650,8 @@ int search_by_key(struct super_block *sb
+ to search is still in the tree rooted from the current buffer. If
+ not then repeat search from the root. */
+ if (fs_changed(fs_gen, sb) &&
+- (!B_IS_IN_TREE(p_s_bh) ||
+- B_LEVEL(p_s_bh) != expected_level ||
++ (!B_IS_IN_TREE(bh) ||
++ B_LEVEL(bh) != expected_level ||
+ !key_in_buffer(p_s_search_path, p_s_key, sb))) {
+ PROC_INFO_INC(sb, search_by_key_fs_changed);
+ PROC_INFO_INC(sb, search_by_key_restarted);
+@@ -686,25 +685,25 @@ int search_by_key(struct super_block *sb
+
+ // make sure, that the node contents look like a node of
+ // certain level
+- if (!is_tree_node(p_s_bh, expected_level)) {
++ if (!is_tree_node(bh, expected_level)) {
+ reiserfs_error(sb, "vs-5150",
+ "invalid format found in block %ld. "
+- "Fsck?", p_s_bh->b_blocknr);
++ "Fsck?", bh->b_blocknr);
+ pathrelse(p_s_search_path);
+ return IO_ERROR;
+ }
+
+ /* ok, we have acquired next formatted node in the tree */
+- n_node_level = B_LEVEL(p_s_bh);
++ n_node_level = B_LEVEL(bh);
+
+- PROC_INFO_BH_STAT(sb, p_s_bh, n_node_level - 1);
++ PROC_INFO_BH_STAT(sb, bh, n_node_level - 1);
+
+ RFALSE(n_node_level < n_stop_level,
+ "vs-5152: tree level (%d) is less than stop level (%d)",
+ n_node_level, n_stop_level);
+
+- n_retval = bin_search(p_s_key, B_N_PITEM_HEAD(p_s_bh, 0),
+- B_NR_ITEMS(p_s_bh),
++ n_retval = bin_search(p_s_key, B_N_PITEM_HEAD(bh, 0),
++ B_NR_ITEMS(bh),
+ (n_node_level ==
+ DISK_LEAF_NODE_LEVEL) ? IH_SIZE :
+ KEY_SIZE,
+@@ -726,13 +725,13 @@ int search_by_key(struct super_block *sb
+ an internal node. Now we calculate child block number by
+ position in the node. */
+ n_block_number =
+- B_N_CHILD_NUM(p_s_bh, p_s_last_element->pe_position);
++ B_N_CHILD_NUM(bh, p_s_last_element->pe_position);
+
+ /* if we are going to read leaf nodes, try for read ahead as well */
+ if ((p_s_search_path->reada & PATH_READA) &&
+ n_node_level == DISK_LEAF_NODE_LEVEL + 1) {
+ int pos = p_s_last_element->pe_position;
+- int limit = B_NR_ITEMS(p_s_bh);
++ int limit = B_NR_ITEMS(bh);
+ struct reiserfs_key *le_key;
+
+ if (p_s_search_path->reada & PATH_READA_BACK)
+@@ -741,7 +740,7 @@ int search_by_key(struct super_block *sb
+ if (pos == limit)
+ break;
+ reada_blocks[reada_count++] =
+- B_N_CHILD_NUM(p_s_bh, pos);
++ B_N_CHILD_NUM(bh, pos);
+ if (p_s_search_path->reada & PATH_READA_BACK)
+ pos--;
+ else
+@@ -750,7 +749,7 @@ int search_by_key(struct super_block *sb
+ /*
+ * check to make sure we're in the same object
+ */
+- le_key = B_N_PDELIM_KEY(p_s_bh, pos);
++ le_key = B_N_PDELIM_KEY(bh, pos);
+ if (le32_to_cpu(le_key->k_objectid) !=
+ p_s_key->on_disk_key.k_objectid) {
+ break;
+@@ -851,15 +850,15 @@ int search_for_position_by_key(struct su
+ /* Compare given item and item pointed to by the path. */
+ int comp_items(const struct item_head *stored_ih, const struct treepath *p_s_path)
+ {
+- struct buffer_head *p_s_bh;
++ struct buffer_head *bh = PATH_PLAST_BUFFER(p_s_path);
+ struct item_head *ih;
+
+ /* Last buffer at the path is not in the tree. */
+- if (!B_IS_IN_TREE(p_s_bh = PATH_PLAST_BUFFER(p_s_path)))
++ if (!B_IS_IN_TREE(bh))
+ return 1;
+
+ /* Last path position is invalid. */
+- if (PATH_LAST_POSITION(p_s_path) >= B_NR_ITEMS(p_s_bh))
++ if (PATH_LAST_POSITION(p_s_path) >= B_NR_ITEMS(bh))
+ return 1;
+
+ /* we need only to know, whether it is the same item */
+@@ -959,7 +958,7 @@ static char prepare_for_delete_or_cut(st
+ {
+ struct super_block *sb = inode->i_sb;
+ struct item_head *p_le_ih = PATH_PITEM_HEAD(p_s_path);
+- struct buffer_head *p_s_bh = PATH_PLAST_BUFFER(p_s_path);
++ struct buffer_head *bh = PATH_PLAST_BUFFER(p_s_path);
+
+ BUG_ON(!th->t_trans_id);
+
+@@ -1003,7 +1002,7 @@ static char prepare_for_delete_or_cut(st
+ do {
+ need_re_search = 0;
+ *p_n_cut_size = 0;
+- p_s_bh = PATH_PLAST_BUFFER(p_s_path);
++ bh = PATH_PLAST_BUFFER(p_s_path);
+ copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
+ pos = I_UNFM_NUM(&s_ih);
+
+@@ -1019,13 +1018,13 @@ static char prepare_for_delete_or_cut(st
+ break;
+ }
+
+- unfm = (__le32 *)B_I_PITEM(p_s_bh, &s_ih) + pos - 1;
++ unfm = (__le32 *)B_I_PITEM(bh, &s_ih) + pos - 1;
+ block = get_block_num(unfm, 0);
+
+ if (block != 0) {
+- reiserfs_prepare_for_journal(sb, p_s_bh, 1);
++ reiserfs_prepare_for_journal(sb, bh, 1);
+ put_block_num(unfm, 0, 0);
+- journal_mark_dirty (th, sb, p_s_bh);
++ journal_mark_dirty(th, sb, bh);
+ reiserfs_free_block(th, inode, block, 1);
+ }
+
+@@ -1049,7 +1048,7 @@ static char prepare_for_delete_or_cut(st
+ /* a trick. If the buffer has been logged, this will do nothing. If
+ ** we've broken the loop without logging it, it will restore the
+ ** buffer */
+- reiserfs_restore_prepared_buffer(sb, p_s_bh);
++ reiserfs_restore_prepared_buffer(sb, bh);
+ } while (need_re_search &&
+ search_for_position_by_key(sb, p_s_item_key, p_s_path) == POSITION_FOUND);
+ pos_in_item(p_s_path) = pos * UNFM_P_SIZE;
+--- a/include/linux/reiserfs_fs.h
++++ b/include/linux/reiserfs_fs.h
+@@ -751,25 +751,25 @@ struct block_head {
+ #define DISK_LEAF_NODE_LEVEL 1 /* Leaf node level. */
+
+ /* Given the buffer head of a formatted node, resolve to the block head of that node. */
+-#define B_BLK_HEAD(p_s_bh) ((struct block_head *)((p_s_bh)->b_data))
++#define B_BLK_HEAD(bh) ((struct block_head *)((bh)->b_data))
+ /* Number of items that are in buffer. */
+-#define B_NR_ITEMS(p_s_bh) (blkh_nr_item(B_BLK_HEAD(p_s_bh)))
+-#define B_LEVEL(p_s_bh) (blkh_level(B_BLK_HEAD(p_s_bh)))
+-#define B_FREE_SPACE(p_s_bh) (blkh_free_space(B_BLK_HEAD(p_s_bh)))
+-
+-#define PUT_B_NR_ITEMS(p_s_bh,val) do { set_blkh_nr_item(B_BLK_HEAD(p_s_bh),val); } while (0)
+-#define PUT_B_LEVEL(p_s_bh,val) do { set_blkh_level(B_BLK_HEAD(p_s_bh),val); } while (0)
+-#define PUT_B_FREE_SPACE(p_s_bh,val) do { set_blkh_free_space(B_BLK_HEAD(p_s_bh),val); } while (0)
++#define B_NR_ITEMS(bh) (blkh_nr_item(B_BLK_HEAD(bh)))
++#define B_LEVEL(bh) (blkh_level(B_BLK_HEAD(bh)))
++#define B_FREE_SPACE(bh) (blkh_free_space(B_BLK_HEAD(bh)))
++
++#define PUT_B_NR_ITEMS(bh, val) do { set_blkh_nr_item(B_BLK_HEAD(bh), val); } while (0)
++#define PUT_B_LEVEL(bh, val) do { set_blkh_level(B_BLK_HEAD(bh), val); } while (0)
++#define PUT_B_FREE_SPACE(bh, val) do { set_blkh_free_space(B_BLK_HEAD(bh), val); } while (0)
+
+ /* Get right delimiting key. -- little endian */
+-#define B_PRIGHT_DELIM_KEY(p_s_bh) (&(blk_right_delim_key(B_BLK_HEAD(p_s_bh))))
++#define B_PRIGHT_DELIM_KEY(bh) (&(blk_right_delim_key(B_BLK_HEAD(bh))))
+
+ /* Does the buffer contain a disk leaf. */
+-#define B_IS_ITEMS_LEVEL(p_s_bh) (B_LEVEL(p_s_bh) == DISK_LEAF_NODE_LEVEL)
++#define B_IS_ITEMS_LEVEL(bh) (B_LEVEL(bh) == DISK_LEAF_NODE_LEVEL)
+
+ /* Does the buffer contain a disk internal node */
+-#define B_IS_KEYS_LEVEL(p_s_bh) (B_LEVEL(p_s_bh) > DISK_LEAF_NODE_LEVEL \
+- && B_LEVEL(p_s_bh) <= MAX_HEIGHT)
++#define B_IS_KEYS_LEVEL(bh) (B_LEVEL(bh) > DISK_LEAF_NODE_LEVEL \
++ && B_LEVEL(bh) <= MAX_HEIGHT)
+
+ /***************************************************************************/
+ /* STAT DATA */
+@@ -1119,12 +1119,13 @@ struct disk_child {
+ #define put_dc_size(dc_p, val) do { (dc_p)->dc_size = cpu_to_le16(val); } while(0)
+
+ /* Get disk child by buffer header and position in the tree node. */
+-#define B_N_CHILD(p_s_bh,n_pos) ((struct disk_child *)\
+-((p_s_bh)->b_data+BLKH_SIZE+B_NR_ITEMS(p_s_bh)*KEY_SIZE+DC_SIZE*(n_pos)))
++#define B_N_CHILD(bh, n_pos) ((struct disk_child *)\
++((bh)->b_data + BLKH_SIZE + B_NR_ITEMS(bh) * KEY_SIZE + DC_SIZE * (n_pos)))
+
+ /* Get disk child number by buffer header and position in the tree node. */
+-#define B_N_CHILD_NUM(p_s_bh,n_pos) (dc_block_number(B_N_CHILD(p_s_bh,n_pos)))
+-#define PUT_B_N_CHILD_NUM(p_s_bh,n_pos, val) (put_dc_block_number(B_N_CHILD(p_s_bh,n_pos), val ))
++#define B_N_CHILD_NUM(bh, n_pos) (dc_block_number(B_N_CHILD(bh, n_pos)))
++#define PUT_B_N_CHILD_NUM(bh, n_pos, val) \
++ (put_dc_block_number(B_N_CHILD(bh, n_pos), val))
+
+ /* maximal value of field child_size in structure disk_child */
+ /* child size is the combined size of all items and their headers */
+@@ -1837,7 +1838,7 @@ int search_by_key(struct super_block *,
+ int search_for_position_by_key(struct super_block *sb,
+ const struct cpu_key *p_s_cpu_key,
+ struct treepath *p_s_search_path);
+-extern void decrement_bcount(struct buffer_head *p_s_bh);
++extern void decrement_bcount(struct buffer_head *bh);
+ void decrement_counters_in_path(struct treepath *p_s_search_path);
+ void pathrelse(struct treepath *p_s_search_path);
+ int reiserfs_check_path(struct treepath *p);
+@@ -1978,7 +1979,7 @@ int reiserfs_global_version_in_proc(char
+ #define PROC_INFO_MAX( sb, field, value ) VOID_V
+ #define PROC_INFO_INC( sb, field ) VOID_V
+ #define PROC_INFO_ADD( sb, field, val ) VOID_V
+-#define PROC_INFO_BH_STAT(sb, p_s_bh, n_node_level) VOID_V
++#define PROC_INFO_BH_STAT(sb, bh, n_node_level) VOID_V
+ #endif
+
+ /* dir.c */
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: rename p_s_inode to inode
+
+ This patch is a simple s/p_s_inode/inode/g to the reiserfs code. This is the
+ third in a series of patches to rip out some of the awful variable naming in
+ reiserfs.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+
+ fs/reiserfs/file.c | 16 +++---
+ fs/reiserfs/inode.c | 43 ++++++++---------
+ fs/reiserfs/stree.c | 103 +++++++++++++++++++++---------------------
+ fs/reiserfs/tail_conversion.c | 18 ++++---
+ include/linux/reiserfs_fs.h | 4 -
+ 5 files changed, 95 insertions(+), 89 deletions(-)
+
+--- a/fs/reiserfs/file.c
++++ b/fs/reiserfs/file.c
+@@ -137,17 +137,17 @@ static void reiserfs_vfs_truncate_file(s
+ static int reiserfs_sync_file(struct file *p_s_filp,
+ struct dentry *p_s_dentry, int datasync)
+ {
+- struct inode *p_s_inode = p_s_dentry->d_inode;
++ struct inode *inode = p_s_dentry->d_inode;
+ int n_err;
+ int barrier_done;
+
+- BUG_ON(!S_ISREG(p_s_inode->i_mode));
+- n_err = sync_mapping_buffers(p_s_inode->i_mapping);
+- reiserfs_write_lock(p_s_inode->i_sb);
+- barrier_done = reiserfs_commit_for_inode(p_s_inode);
+- reiserfs_write_unlock(p_s_inode->i_sb);
+- if (barrier_done != 1 && reiserfs_barrier_flush(p_s_inode->i_sb))
+- blkdev_issue_flush(p_s_inode->i_sb->s_bdev, NULL);
++ BUG_ON(!S_ISREG(inode->i_mode));
++ n_err = sync_mapping_buffers(inode->i_mapping);
++ reiserfs_write_lock(inode->i_sb);
++ barrier_done = reiserfs_commit_for_inode(inode);
++ reiserfs_write_unlock(inode->i_sb);
++ if (barrier_done != 1 && reiserfs_barrier_flush(inode->i_sb))
++ blkdev_issue_flush(inode->i_sb->s_bdev, NULL);
+ if (barrier_done < 0)
+ return barrier_done;
+ return (n_err < 0) ? -EIO : 0;
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -1992,7 +1992,7 @@ int reiserfs_new_inode(struct reiserfs_t
+ **
+ ** on failure, nonzero is returned, page_result and bh_result are untouched.
+ */
+-static int grab_tail_page(struct inode *p_s_inode,
++static int grab_tail_page(struct inode *inode,
+ struct page **page_result,
+ struct buffer_head **bh_result)
+ {
+@@ -2000,11 +2000,11 @@ static int grab_tail_page(struct inode *
+ /* we want the page with the last byte in the file,
+ ** not the page that will hold the next byte for appending
+ */
+- unsigned long index = (p_s_inode->i_size - 1) >> PAGE_CACHE_SHIFT;
++ unsigned long index = (inode->i_size - 1) >> PAGE_CACHE_SHIFT;
+ unsigned long pos = 0;
+ unsigned long start = 0;
+- unsigned long blocksize = p_s_inode->i_sb->s_blocksize;
+- unsigned long offset = (p_s_inode->i_size) & (PAGE_CACHE_SIZE - 1);
++ unsigned long blocksize = inode->i_sb->s_blocksize;
++ unsigned long offset = (inode->i_size) & (PAGE_CACHE_SIZE - 1);
+ struct buffer_head *bh;
+ struct buffer_head *head;
+ struct page *page;
+@@ -2018,7 +2018,7 @@ static int grab_tail_page(struct inode *
+ if ((offset & (blocksize - 1)) == 0) {
+ return -ENOENT;
+ }
+- page = grab_cache_page(p_s_inode->i_mapping, index);
++ page = grab_cache_page(inode->i_mapping, index);
+ error = -ENOMEM;
+ if (!page) {
+ goto out;
+@@ -2047,7 +2047,7 @@ static int grab_tail_page(struct inode *
+ ** I've screwed up the code to find the buffer, or the code to
+ ** call prepare_write
+ */
+- reiserfs_error(p_s_inode->i_sb, "clm-6000",
++ reiserfs_error(inode->i_sb, "clm-6000",
+ "error reading block %lu", bh->b_blocknr);
+ error = -EIO;
+ goto unlock;
+@@ -2070,27 +2070,28 @@ static int grab_tail_page(struct inode *
+ **
+ ** some code taken from block_truncate_page
+ */
+-int reiserfs_truncate_file(struct inode *p_s_inode, int update_timestamps)
++int reiserfs_truncate_file(struct inode *inode, int update_timestamps)
+ {
+ struct reiserfs_transaction_handle th;
+ /* we want the offset for the first byte after the end of the file */
+- unsigned long offset = p_s_inode->i_size & (PAGE_CACHE_SIZE - 1);
+- unsigned blocksize = p_s_inode->i_sb->s_blocksize;
++ unsigned long offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
++ unsigned blocksize = inode->i_sb->s_blocksize;
+ unsigned length;
+ struct page *page = NULL;
+ int error;
+ struct buffer_head *bh = NULL;
+ int err2;
+
+- reiserfs_write_lock(p_s_inode->i_sb);
++ reiserfs_write_lock(inode->i_sb);
+
+- if (p_s_inode->i_size > 0) {
+- if ((error = grab_tail_page(p_s_inode, &page, &bh))) {
++ if (inode->i_size > 0) {
++ error = grab_tail_page(inode, &page, &bh);
++ if (error) {
+ // -ENOENT means we truncated past the end of the file,
+ // and get_block_create_0 could not find a block to read in,
+ // which is ok.
+ if (error != -ENOENT)
+- reiserfs_error(p_s_inode->i_sb, "clm-6001",
++ reiserfs_error(inode->i_sb, "clm-6001",
+ "grab_tail_page failed %d",
+ error);
+ page = NULL;
+@@ -2108,19 +2109,19 @@ int reiserfs_truncate_file(struct inode
+ /* it is enough to reserve space in transaction for 2 balancings:
+ one for "save" link adding and another for the first
+ cut_from_item. 1 is for update_sd */
+- error = journal_begin(&th, p_s_inode->i_sb,
++ error = journal_begin(&th, inode->i_sb,
+ JOURNAL_PER_BALANCE_CNT * 2 + 1);
+ if (error)
+ goto out;
+- reiserfs_update_inode_transaction(p_s_inode);
++ reiserfs_update_inode_transaction(inode);
+ if (update_timestamps)
+ /* we are doing real truncate: if the system crashes before the last
+ transaction of truncating gets committed - on reboot the file
+ either appears truncated properly or not truncated at all */
+- add_save_link(&th, p_s_inode, 1);
+- err2 = reiserfs_do_truncate(&th, p_s_inode, page, update_timestamps);
++ add_save_link(&th, inode, 1);
++ err2 = reiserfs_do_truncate(&th, inode, page, update_timestamps);
+ error =
+- journal_end(&th, p_s_inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 + 1);
++ journal_end(&th, inode->i_sb, JOURNAL_PER_BALANCE_CNT * 2 + 1);
+ if (error)
+ goto out;
+
+@@ -2131,7 +2132,7 @@ int reiserfs_truncate_file(struct inode
+ }
+
+ if (update_timestamps) {
+- error = remove_save_link(p_s_inode, 1 /* truncate */ );
++ error = remove_save_link(inode, 1 /* truncate */);
+ if (error)
+ goto out;
+ }
+@@ -2150,14 +2151,14 @@ int reiserfs_truncate_file(struct inode
+ page_cache_release(page);
+ }
+
+- reiserfs_write_unlock(p_s_inode->i_sb);
++ reiserfs_write_unlock(inode->i_sb);
+ return 0;
+ out:
+ if (page) {
+ unlock_page(page);
+ page_cache_release(page);
+ }
+- reiserfs_write_unlock(p_s_inode->i_sb);
++ reiserfs_write_unlock(inode->i_sb);
+ return error;
+ }
+
+--- a/fs/reiserfs/stree.c
++++ b/fs/reiserfs/stree.c
+@@ -1143,10 +1143,11 @@ char head2type(struct item_head *ih)
+ /* Delete object item. */
+ int reiserfs_delete_item(struct reiserfs_transaction_handle *th, struct treepath *p_s_path, /* Path to the deleted item. */
+ const struct cpu_key *p_s_item_key, /* Key to search for the deleted item. */
+- struct inode *p_s_inode, /* inode is here just to update i_blocks and quotas */
++ struct inode *inode, /* inode is here just to update
++ * i_blocks and quotas */
+ struct buffer_head *p_s_un_bh)
+ { /* NULL or unformatted node pointer. */
+- struct super_block *sb = p_s_inode->i_sb;
++ struct super_block *sb = inode->i_sb;
+ struct tree_balance s_del_balance;
+ struct item_head s_ih;
+ struct item_head *q_ih;
+@@ -1170,10 +1171,10 @@ int reiserfs_delete_item(struct reiserfs
+ n_iter++;
+ c_mode =
+ #endif
+- prepare_for_delete_or_cut(th, p_s_inode, p_s_path,
++ prepare_for_delete_or_cut(th, inode, p_s_path,
+ p_s_item_key, &n_removed,
+ &n_del_size,
+- max_reiserfs_offset(p_s_inode));
++ max_reiserfs_offset(inode));
+
+ RFALSE(c_mode != M_DELETE, "PAP-5320: mode must be M_DELETE");
+
+@@ -1214,7 +1215,7 @@ int reiserfs_delete_item(struct reiserfs
+ ** split into multiple items, and we only want to decrement for
+ ** the unfm node once
+ */
+- if (!S_ISLNK(p_s_inode->i_mode) && is_direct_le_ih(q_ih)) {
++ if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(q_ih)) {
+ if ((le_ih_k_offset(q_ih) & (sb->s_blocksize - 1)) == 1) {
+ quota_cut_bytes = sb->s_blocksize + UNFM_P_SIZE;
+ } else {
+@@ -1259,9 +1260,9 @@ int reiserfs_delete_item(struct reiserfs
+ #ifdef REISERQUOTA_DEBUG
+ reiserfs_debug(sb, REISERFS_DEBUG_CODE,
+ "reiserquota delete_item(): freeing %u, id=%u type=%c",
+- quota_cut_bytes, p_s_inode->i_uid, head2type(&s_ih));
++ quota_cut_bytes, inode->i_uid, head2type(&s_ih));
+ #endif
+- DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes);
++ DQUOT_FREE_SPACE_NODIRTY(inode, quota_cut_bytes);
+
+ /* Return deleted body length */
+ return n_ret_value;
+@@ -1423,25 +1424,25 @@ static void unmap_buffers(struct page *p
+ }
+
+ static int maybe_indirect_to_direct(struct reiserfs_transaction_handle *th,
+- struct inode *p_s_inode,
++ struct inode *inode,
+ struct page *page,
+ struct treepath *p_s_path,
+ const struct cpu_key *p_s_item_key,
+ loff_t n_new_file_size, char *p_c_mode)
+ {
+- struct super_block *sb = p_s_inode->i_sb;
++ struct super_block *sb = inode->i_sb;
+ int n_block_size = sb->s_blocksize;
+ int cut_bytes;
+ BUG_ON(!th->t_trans_id);
+- BUG_ON(n_new_file_size != p_s_inode->i_size);
++ BUG_ON(n_new_file_size != inode->i_size);
+
+ /* the page being sent in could be NULL if there was an i/o error
+ ** reading in the last block. The user will hit problems trying to
+ ** read the file, but for now we just skip the indirect2direct
+ */
+- if (atomic_read(&p_s_inode->i_count) > 1 ||
+- !tail_has_to_be_packed(p_s_inode) ||
+- !page || (REISERFS_I(p_s_inode)->i_flags & i_nopack_mask)) {
++ if (atomic_read(&inode->i_count) > 1 ||
++ !tail_has_to_be_packed(inode) ||
++ !page || (REISERFS_I(inode)->i_flags & i_nopack_mask)) {
+ /* leave tail in an unformatted node */
+ *p_c_mode = M_SKIP_BALANCING;
+ cut_bytes =
+@@ -1450,8 +1451,9 @@ static int maybe_indirect_to_direct(stru
+ return cut_bytes;
+ }
+ /* Permorm the conversion to a direct_item. */
+- /*return indirect_to_direct (p_s_inode, p_s_path, p_s_item_key, n_new_file_size, p_c_mode); */
+- return indirect2direct(th, p_s_inode, page, p_s_path, p_s_item_key,
++ /* return indirect_to_direct(inode, p_s_path, p_s_item_key,
++ n_new_file_size, p_c_mode); */
++ return indirect2direct(th, inode, page, p_s_path, p_s_item_key,
+ n_new_file_size, p_c_mode);
+ }
+
+@@ -1505,10 +1507,10 @@ static void indirect_to_direct_roll_back
+ int reiserfs_cut_from_item(struct reiserfs_transaction_handle *th,
+ struct treepath *p_s_path,
+ struct cpu_key *p_s_item_key,
+- struct inode *p_s_inode,
++ struct inode *inode,
+ struct page *page, loff_t n_new_file_size)
+ {
+- struct super_block *sb = p_s_inode->i_sb;
++ struct super_block *sb = inode->i_sb;
+ /* Every function which is going to call do_balance must first
+ create a tree_balance structure. Then it must fill up this
+ structure by using the init_tb_struct and fix_nodes functions.
+@@ -1525,7 +1527,7 @@ int reiserfs_cut_from_item(struct reiser
+
+ BUG_ON(!th->t_trans_id);
+
+- init_tb_struct(th, &s_cut_balance, p_s_inode->i_sb, p_s_path,
++ init_tb_struct(th, &s_cut_balance, inode->i_sb, p_s_path,
+ n_cut_size);
+
+ /* Repeat this loop until we either cut the item without needing
+@@ -1537,7 +1539,7 @@ int reiserfs_cut_from_item(struct reiser
+ pointers. */
+
+ c_mode =
+- prepare_for_delete_or_cut(th, p_s_inode, p_s_path,
++ prepare_for_delete_or_cut(th, inode, p_s_path,
+ p_s_item_key, &n_removed,
+ &n_cut_size, n_new_file_size);
+ if (c_mode == M_CONVERT) {
+@@ -1547,7 +1549,7 @@ int reiserfs_cut_from_item(struct reiser
+ "PAP-5570: can not convert twice");
+
+ n_ret_value =
+- maybe_indirect_to_direct(th, p_s_inode, page,
++ maybe_indirect_to_direct(th, inode, page,
+ p_s_path, p_s_item_key,
+ n_new_file_size, &c_mode);
+ if (c_mode == M_SKIP_BALANCING)
+@@ -1612,7 +1614,7 @@ int reiserfs_cut_from_item(struct reiser
+ if (n_is_inode_locked) {
+ // FIXME: this seems to be not needed: we are always able
+ // to cut item
+- indirect_to_direct_roll_back(th, p_s_inode, p_s_path);
++ indirect_to_direct_roll_back(th, inode, p_s_path);
+ }
+ if (n_ret_value == NO_DISK_SPACE)
+ reiserfs_warning(sb, "reiserfs-5092",
+@@ -1639,12 +1641,12 @@ int reiserfs_cut_from_item(struct reiser
+ ** item.
+ */
+ p_le_ih = PATH_PITEM_HEAD(s_cut_balance.tb_path);
+- if (!S_ISLNK(p_s_inode->i_mode) && is_direct_le_ih(p_le_ih)) {
++ if (!S_ISLNK(inode->i_mode) && is_direct_le_ih(p_le_ih)) {
+ if (c_mode == M_DELETE &&
+ (le_ih_k_offset(p_le_ih) & (sb->s_blocksize - 1)) ==
+ 1) {
+ // FIXME: this is to keep 3.5 happy
+- REISERFS_I(p_s_inode)->i_first_direct_byte = U32_MAX;
++ REISERFS_I(inode)->i_first_direct_byte = U32_MAX;
+ quota_cut_bytes = sb->s_blocksize + UNFM_P_SIZE;
+ } else {
+ quota_cut_bytes = 0;
+@@ -1687,14 +1689,14 @@ int reiserfs_cut_from_item(struct reiser
+ ** unmap and invalidate it
+ */
+ unmap_buffers(page, tail_pos);
+- REISERFS_I(p_s_inode)->i_flags &= ~i_pack_on_close_mask;
++ REISERFS_I(inode)->i_flags &= ~i_pack_on_close_mask;
+ }
+ #ifdef REISERQUOTA_DEBUG
+- reiserfs_debug(p_s_inode->i_sb, REISERFS_DEBUG_CODE,
++ reiserfs_debug(inode->i_sb, REISERFS_DEBUG_CODE,
+ "reiserquota cut_from_item(): freeing %u id=%u type=%c",
+- quota_cut_bytes, p_s_inode->i_uid, '?');
++ quota_cut_bytes, inode->i_uid, '?');
+ #endif
+- DQUOT_FREE_SPACE_NODIRTY(p_s_inode, quota_cut_bytes);
++ DQUOT_FREE_SPACE_NODIRTY(inode, quota_cut_bytes);
+ return n_ret_value;
+ }
+
+@@ -1715,8 +1717,8 @@ static void truncate_directory(struct re
+
+ /* Truncate file to the new size. Note, this must be called with a transaction
+ already started */
+-int reiserfs_do_truncate(struct reiserfs_transaction_handle *th, struct inode *p_s_inode, /* ->i_size contains new
+- size */
++int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
++ struct inode *inode, /* ->i_size contains new size */
+ struct page *page, /* up to date for last block */
+ int update_timestamps /* when it is called by
+ file_release to convert
+@@ -1735,35 +1737,35 @@ int reiserfs_do_truncate(struct reiserfs
+
+ BUG_ON(!th->t_trans_id);
+ if (!
+- (S_ISREG(p_s_inode->i_mode) || S_ISDIR(p_s_inode->i_mode)
+- || S_ISLNK(p_s_inode->i_mode)))
++ (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode)
++ || S_ISLNK(inode->i_mode)))
+ return 0;
+
+- if (S_ISDIR(p_s_inode->i_mode)) {
++ if (S_ISDIR(inode->i_mode)) {
+ // deletion of directory - no need to update timestamps
+- truncate_directory(th, p_s_inode);
++ truncate_directory(th, inode);
+ return 0;
+ }
+
+ /* Get new file size. */
+- n_new_file_size = p_s_inode->i_size;
++ n_new_file_size = inode->i_size;
+
+ // FIXME: note, that key type is unimportant here
+- make_cpu_key(&s_item_key, p_s_inode, max_reiserfs_offset(p_s_inode),
++ make_cpu_key(&s_item_key, inode, max_reiserfs_offset(inode),
+ TYPE_DIRECT, 3);
+
+ retval =
+- search_for_position_by_key(p_s_inode->i_sb, &s_item_key,
++ search_for_position_by_key(inode->i_sb, &s_item_key,
+ &s_search_path);
+ if (retval == IO_ERROR) {
+- reiserfs_error(p_s_inode->i_sb, "vs-5657",
++ reiserfs_error(inode->i_sb, "vs-5657",
+ "i/o failure occurred trying to truncate %K",
+ &s_item_key);
+ err = -EIO;
+ goto out;
+ }
+ if (retval == POSITION_FOUND || retval == FILE_NOT_FOUND) {
+- reiserfs_error(p_s_inode->i_sb, "PAP-5660",
++ reiserfs_error(inode->i_sb, "PAP-5660",
+ "wrong result %d of search for %K", retval,
+ &s_item_key);
+
+@@ -1780,7 +1782,7 @@ int reiserfs_do_truncate(struct reiserfs
+ else {
+ loff_t offset = le_ih_k_offset(p_le_ih);
+ int bytes =
+- op_bytes_number(p_le_ih, p_s_inode->i_sb->s_blocksize);
++ op_bytes_number(p_le_ih, inode->i_sb->s_blocksize);
+
+ /* this may mismatch with real file size: if last direct item
+ had no padding zeros and last unformatted node had no free
+@@ -1805,9 +1807,9 @@ int reiserfs_do_truncate(struct reiserfs
+ /* Cut or delete file item. */
+ n_deleted =
+ reiserfs_cut_from_item(th, &s_search_path, &s_item_key,
+- p_s_inode, page, n_new_file_size);
++ inode, page, n_new_file_size);
+ if (n_deleted < 0) {
+- reiserfs_warning(p_s_inode->i_sb, "vs-5665",
++ reiserfs_warning(inode->i_sb, "vs-5665",
+ "reiserfs_cut_from_item failed");
+ reiserfs_check_path(&s_search_path);
+ return 0;
+@@ -1837,22 +1839,22 @@ int reiserfs_do_truncate(struct reiserfs
+ pathrelse(&s_search_path);
+
+ if (update_timestamps) {
+- p_s_inode->i_mtime = p_s_inode->i_ctime =
+- CURRENT_TIME_SEC;
++ inode->i_mtime = CURRENT_TIME_SEC;
++ inode->i_ctime = CURRENT_TIME_SEC;
+ }
+- reiserfs_update_sd(th, p_s_inode);
++ reiserfs_update_sd(th, inode);
+
+- err = journal_end(th, p_s_inode->i_sb, orig_len_alloc);
++ err = journal_end(th, inode->i_sb, orig_len_alloc);
+ if (err)
+ goto out;
+- err = journal_begin(th, p_s_inode->i_sb,
++ err = journal_begin(th, inode->i_sb,
+ JOURNAL_FOR_FREE_BLOCK_AND_UPDATE_SD + JOURNAL_PER_BALANCE_CNT * 4) ;
+ if (err)
+ goto out;
+- reiserfs_update_inode_transaction(p_s_inode);
++ reiserfs_update_inode_transaction(inode);
+ }
+ } while (n_file_size > ROUND_UP(n_new_file_size) &&
+- search_for_position_by_key(p_s_inode->i_sb, &s_item_key,
++ search_for_position_by_key(inode->i_sb, &s_item_key,
+ &s_search_path) == POSITION_FOUND);
+
+ RFALSE(n_file_size > ROUND_UP(n_new_file_size),
+@@ -1862,9 +1864,10 @@ int reiserfs_do_truncate(struct reiserfs
+ update_and_out:
+ if (update_timestamps) {
+ // this is truncate, not file closing
+- p_s_inode->i_mtime = p_s_inode->i_ctime = CURRENT_TIME_SEC;
++ inode->i_mtime = CURRENT_TIME_SEC;
++ inode->i_ctime = CURRENT_TIME_SEC;
+ }
+- reiserfs_update_sd(th, p_s_inode);
++ reiserfs_update_sd(th, inode);
+
+ out:
+ pathrelse(&s_search_path);
+--- a/fs/reiserfs/tail_conversion.c
++++ b/fs/reiserfs/tail_conversion.c
+@@ -170,12 +170,14 @@ void reiserfs_unmap_buffer(struct buffer
+ what we expect from it (number of cut bytes). But when tail remains
+ in the unformatted node, we set mode to SKIP_BALANCING and unlock
+ inode */
+-int indirect2direct(struct reiserfs_transaction_handle *th, struct inode *p_s_inode, struct page *page, struct treepath *p_s_path, /* path to the indirect item. */
++int indirect2direct(struct reiserfs_transaction_handle *th,
++ struct inode *inode, struct page *page,
++ struct treepath *p_s_path, /* path to the indirect item. */
+ const struct cpu_key *p_s_item_key, /* Key to look for unformatted node pointer to be cut. */
+ loff_t n_new_file_size, /* New file size. */
+ char *p_c_mode)
+ {
+- struct super_block *sb = p_s_inode->i_sb;
++ struct super_block *sb = inode->i_sb;
+ struct item_head s_ih;
+ unsigned long n_block_size = sb->s_blocksize;
+ char *tail;
+@@ -193,7 +195,7 @@ int indirect2direct(struct reiserfs_tran
+ copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
+
+ tail_len = (n_new_file_size & (n_block_size - 1));
+- if (get_inode_sd_version(p_s_inode) == STAT_DATA_V2)
++ if (get_inode_sd_version(inode) == STAT_DATA_V2)
+ round_tail_len = ROUND_UP(tail_len);
+ else
+ round_tail_len = tail_len;
+@@ -228,7 +230,7 @@ int indirect2direct(struct reiserfs_tran
+ }
+
+ /* Set direct item header to insert. */
+- make_le_item_head(&s_ih, NULL, get_inode_item_key_version(p_s_inode),
++ make_le_item_head(&s_ih, NULL, get_inode_item_key_version(inode),
+ pos1 + 1, TYPE_DIRECT, round_tail_len,
+ 0xffff /*ih_free_space */ );
+
+@@ -244,7 +246,7 @@ int indirect2direct(struct reiserfs_tran
+ set_cpu_key_k_type(&key, TYPE_DIRECT);
+ key.key_length = 4;
+ /* Insert tail as new direct item in the tree */
+- if (reiserfs_insert_item(th, p_s_path, &key, &s_ih, p_s_inode,
++ if (reiserfs_insert_item(th, p_s_path, &key, &s_ih, inode,
+ tail ? tail : NULL) < 0) {
+ /* No disk memory. So we can not convert last unformatted node
+ to the direct item. In this case we used to adjust
+@@ -258,7 +260,7 @@ int indirect2direct(struct reiserfs_tran
+ kunmap(page);
+
+ /* make sure to get the i_blocks changes from reiserfs_insert_item */
+- reiserfs_update_sd(th, p_s_inode);
++ reiserfs_update_sd(th, inode);
+
+ // note: we have now the same as in above direct2indirect
+ // conversion: there are two keys which have matching first three
+@@ -269,8 +271,8 @@ int indirect2direct(struct reiserfs_tran
+ *p_c_mode = M_CUT;
+
+ /* we store position of first direct item in the in-core inode */
+- //mark_file_with_tail (p_s_inode, pos1 + 1);
+- REISERFS_I(p_s_inode)->i_first_direct_byte = pos1 + 1;
++ /* mark_file_with_tail (inode, pos1 + 1); */
++ REISERFS_I(inode)->i_first_direct_byte = pos1 + 1;
+
+ return n_block_size - round_tail_len;
+ }
+--- a/include/linux/reiserfs_fs.h
++++ b/include/linux/reiserfs_fs.h
+@@ -1870,9 +1870,9 @@ int reiserfs_delete_item(struct reiserfs
+ void reiserfs_delete_solid_item(struct reiserfs_transaction_handle *th,
+ struct inode *inode, struct reiserfs_key *key);
+ int reiserfs_delete_object(struct reiserfs_transaction_handle *th,
+- struct inode *p_s_inode);
++ struct inode *inode);
+ int reiserfs_do_truncate(struct reiserfs_transaction_handle *th,
+- struct inode *p_s_inode, struct page *,
++ struct inode *inode, struct page *,
+ int update_timestamps);
+
+ #define i_block_size(inode) ((inode)->i_sb->s_blocksize)
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: rename p_s_sb to sb
+
+ This patch is a simple s/p_s_sb/sb/g to the reiserfs code. This is the first
+ in a series of patches to rip out some of the awful variable naming in
+ reiserfs.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+
+ fs/reiserfs/fix_node.c | 46 +-
+ fs/reiserfs/fix_node.c | 46 +-
+ fs/reiserfs/journal.c | 735 ++++++++++++++++++++----------------------
+ fs/reiserfs/stree.c | 126 +++----
+ fs/reiserfs/tail_conversion.c | 16
+ include/linux/reiserfs_fs.h | 14
+ 5 files changed, 468 insertions(+), 469 deletions(-)
+
+--- a/fs/reiserfs/fix_node.c
++++ b/fs/reiserfs/fix_node.c
+@@ -785,7 +785,7 @@ static int get_empty_nodes(struct tree_b
+ b_blocknr_t *p_n_blocknr, a_n_blocknrs[MAX_AMOUNT_NEEDED] = { 0, };
+ int n_counter, n_number_of_freeblk, n_amount_needed, /* number of needed empty blocks */
+ n_retval = CARRY_ON;
+- struct super_block *p_s_sb = p_s_tb->tb_sb;
++ struct super_block *sb = p_s_tb->tb_sb;
+
+ /* number_of_freeblk is the number of empty blocks which have been
+ acquired for use by the balancing algorithm minus the number of
+@@ -830,7 +830,7 @@ static int get_empty_nodes(struct tree_b
+ RFALSE(!*p_n_blocknr,
+ "PAP-8135: reiserfs_new_blocknrs failed when got new blocks");
+
+- p_s_new_bh = sb_getblk(p_s_sb, *p_n_blocknr);
++ p_s_new_bh = sb_getblk(sb, *p_n_blocknr);
+ RFALSE(buffer_dirty(p_s_new_bh) ||
+ buffer_journaled(p_s_new_bh) ||
+ buffer_journal_dirty(p_s_new_bh),
+@@ -899,7 +899,7 @@ static int get_rfree(struct tree_balance
+ static int is_left_neighbor_in_cache(struct tree_balance *p_s_tb, int n_h)
+ {
+ struct buffer_head *p_s_father, *left;
+- struct super_block *p_s_sb = p_s_tb->tb_sb;
++ struct super_block *sb = p_s_tb->tb_sb;
+ b_blocknr_t n_left_neighbor_blocknr;
+ int n_left_neighbor_position;
+
+@@ -924,7 +924,7 @@ static int is_left_neighbor_in_cache(str
+ n_left_neighbor_blocknr =
+ B_N_CHILD_NUM(p_s_tb->FL[n_h], n_left_neighbor_position);
+ /* Look for the left neighbor in the cache. */
+- if ((left = sb_find_get_block(p_s_sb, n_left_neighbor_blocknr))) {
++ if ((left = sb_find_get_block(sb, n_left_neighbor_blocknr))) {
+
+ RFALSE(buffer_uptodate(left) && !B_IS_IN_TREE(left),
+ "vs-8170: left neighbor (%b %z) is not in the tree",
+@@ -1942,14 +1942,14 @@ static int get_neighbors(struct tree_bal
+ int n_child_position,
+ n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h + 1);
+ unsigned long n_son_number;
+- struct super_block *p_s_sb = p_s_tb->tb_sb;
++ struct super_block *sb = p_s_tb->tb_sb;
+ struct buffer_head *p_s_bh;
+
+- PROC_INFO_INC(p_s_sb, get_neighbors[n_h]);
++ PROC_INFO_INC(sb, get_neighbors[n_h]);
+
+ if (p_s_tb->lnum[n_h]) {
+ /* We need left neighbor to balance S[n_h]. */
+- PROC_INFO_INC(p_s_sb, need_l_neighbor[n_h]);
++ PROC_INFO_INC(sb, need_l_neighbor[n_h]);
+ p_s_bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset);
+
+ RFALSE(p_s_bh == p_s_tb->FL[n_h] &&
+@@ -1961,12 +1961,12 @@ static int get_neighbors(struct tree_bal
+ p_s_tb->FL[n_h]) ? p_s_tb->lkey[n_h] : B_NR_ITEMS(p_s_tb->
+ FL[n_h]);
+ n_son_number = B_N_CHILD_NUM(p_s_tb->FL[n_h], n_child_position);
+- p_s_bh = sb_bread(p_s_sb, n_son_number);
++ p_s_bh = sb_bread(sb, n_son_number);
+ if (!p_s_bh)
+ return IO_ERROR;
+ if (FILESYSTEM_CHANGED_TB(p_s_tb)) {
+ brelse(p_s_bh);
+- PROC_INFO_INC(p_s_sb, get_neighbors_restart[n_h]);
++ PROC_INFO_INC(sb, get_neighbors_restart[n_h]);
+ return REPEAT_SEARCH;
+ }
+
+@@ -1986,7 +1986,7 @@ static int get_neighbors(struct tree_bal
+ }
+
+ if (p_s_tb->rnum[n_h]) { /* We need right neighbor to balance S[n_path_offset]. */
+- PROC_INFO_INC(p_s_sb, need_r_neighbor[n_h]);
++ PROC_INFO_INC(sb, need_r_neighbor[n_h]);
+ p_s_bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset);
+
+ RFALSE(p_s_bh == p_s_tb->FR[n_h] &&
+@@ -1998,12 +1998,12 @@ static int get_neighbors(struct tree_bal
+ n_child_position =
+ (p_s_bh == p_s_tb->FR[n_h]) ? p_s_tb->rkey[n_h] + 1 : 0;
+ n_son_number = B_N_CHILD_NUM(p_s_tb->FR[n_h], n_child_position);
+- p_s_bh = sb_bread(p_s_sb, n_son_number);
++ p_s_bh = sb_bread(sb, n_son_number);
+ if (!p_s_bh)
+ return IO_ERROR;
+ if (FILESYSTEM_CHANGED_TB(p_s_tb)) {
+ brelse(p_s_bh);
+- PROC_INFO_INC(p_s_sb, get_neighbors_restart[n_h]);
++ PROC_INFO_INC(sb, get_neighbors_restart[n_h]);
+ return REPEAT_SEARCH;
+ }
+ brelse(p_s_tb->R[n_h]);
+@@ -2089,51 +2089,51 @@ static int get_mem_for_virtual_node(stru
+ }
+
+ #ifdef CONFIG_REISERFS_CHECK
+-static void tb_buffer_sanity_check(struct super_block *p_s_sb,
++static void tb_buffer_sanity_check(struct super_block *sb,
+ struct buffer_head *p_s_bh,
+ const char *descr, int level)
+ {
+ if (p_s_bh) {
+ if (atomic_read(&(p_s_bh->b_count)) <= 0) {
+
+- reiserfs_panic(p_s_sb, "jmacd-1", "negative or zero "
++ reiserfs_panic(sb, "jmacd-1", "negative or zero "
+ "reference counter for buffer %s[%d] "
+ "(%b)", descr, level, p_s_bh);
+ }
+
+ if (!buffer_uptodate(p_s_bh)) {
+- reiserfs_panic(p_s_sb, "jmacd-2", "buffer is not up "
++ reiserfs_panic(sb, "jmacd-2", "buffer is not up "
+ "to date %s[%d] (%b)",
+ descr, level, p_s_bh);
+ }
+
+ if (!B_IS_IN_TREE(p_s_bh)) {
+- reiserfs_panic(p_s_sb, "jmacd-3", "buffer is not "
++ reiserfs_panic(sb, "jmacd-3", "buffer is not "
+ "in tree %s[%d] (%b)",
+ descr, level, p_s_bh);
+ }
+
+- if (p_s_bh->b_bdev != p_s_sb->s_bdev) {
+- reiserfs_panic(p_s_sb, "jmacd-4", "buffer has wrong "
++ if (p_s_bh->b_bdev != sb->s_bdev) {
++ reiserfs_panic(sb, "jmacd-4", "buffer has wrong "
+ "device %s[%d] (%b)",
+ descr, level, p_s_bh);
+ }
+
+- if (p_s_bh->b_size != p_s_sb->s_blocksize) {
+- reiserfs_panic(p_s_sb, "jmacd-5", "buffer has wrong "
++ if (p_s_bh->b_size != sb->s_blocksize) {
++ reiserfs_panic(sb, "jmacd-5", "buffer has wrong "
+ "blocksize %s[%d] (%b)",
+ descr, level, p_s_bh);
+ }
+
+- if (p_s_bh->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) {
+- reiserfs_panic(p_s_sb, "jmacd-6", "buffer block "
++ if (p_s_bh->b_blocknr > SB_BLOCK_COUNT(sb)) {
++ reiserfs_panic(sb, "jmacd-6", "buffer block "
+ "number too high %s[%d] (%b)",
+ descr, level, p_s_bh);
+ }
+ }
+ }
+ #else
+-static void tb_buffer_sanity_check(struct super_block *p_s_sb,
++static void tb_buffer_sanity_check(struct super_block *sb,
+ struct buffer_head *p_s_bh,
+ const char *descr, int level)
+ {;
+--- a/fs/reiserfs/journal.c
++++ b/fs/reiserfs/journal.c
+@@ -97,7 +97,7 @@ static int flush_commit_list(struct supe
+ struct reiserfs_journal_list *jl, int flushall);
+ static int can_dirty(struct reiserfs_journal_cnode *cn);
+ static int journal_join(struct reiserfs_transaction_handle *th,
+- struct super_block *p_s_sb, unsigned long nblocks);
++ struct super_block *sb, unsigned long nblocks);
+ static int release_journal_dev(struct super_block *super,
+ struct reiserfs_journal *journal);
+ static int dirty_one_transaction(struct super_block *s,
+@@ -113,12 +113,12 @@ enum {
+ };
+
+ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
+- struct super_block *p_s_sb,
++ struct super_block *sb,
+ unsigned long nblocks, int join);
+
+-static void init_journal_hash(struct super_block *p_s_sb)
++static void init_journal_hash(struct super_block *sb)
+ {
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ memset(journal->j_hash_table, 0,
+ JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
+ }
+@@ -145,7 +145,7 @@ static void disable_barrier(struct super
+ }
+
+ static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block
+- *p_s_sb)
++ *sb)
+ {
+ struct reiserfs_bitmap_node *bn;
+ static int id;
+@@ -154,7 +154,7 @@ static struct reiserfs_bitmap_node *allo
+ if (!bn) {
+ return NULL;
+ }
+- bn->data = kzalloc(p_s_sb->s_blocksize, GFP_NOFS);
++ bn->data = kzalloc(sb->s_blocksize, GFP_NOFS);
+ if (!bn->data) {
+ kfree(bn);
+ return NULL;
+@@ -164,9 +164,9 @@ static struct reiserfs_bitmap_node *allo
+ return bn;
+ }
+
+-static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *p_s_sb)
++static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *sb)
+ {
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ struct reiserfs_bitmap_node *bn = NULL;
+ struct list_head *entry = journal->j_bitmap_nodes.next;
+
+@@ -176,21 +176,21 @@ static struct reiserfs_bitmap_node *get_
+ if (entry != &journal->j_bitmap_nodes) {
+ bn = list_entry(entry, struct reiserfs_bitmap_node, list);
+ list_del(entry);
+- memset(bn->data, 0, p_s_sb->s_blocksize);
++ memset(bn->data, 0, sb->s_blocksize);
+ journal->j_free_bitmap_nodes--;
+ return bn;
+ }
+- bn = allocate_bitmap_node(p_s_sb);
++ bn = allocate_bitmap_node(sb);
+ if (!bn) {
+ yield();
+ goto repeat;
+ }
+ return bn;
+ }
+-static inline void free_bitmap_node(struct super_block *p_s_sb,
++static inline void free_bitmap_node(struct super_block *sb,
+ struct reiserfs_bitmap_node *bn)
+ {
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ journal->j_used_bitmap_nodes--;
+ if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) {
+ kfree(bn->data);
+@@ -201,13 +201,13 @@ static inline void free_bitmap_node(stru
+ }
+ }
+
+-static void allocate_bitmap_nodes(struct super_block *p_s_sb)
++static void allocate_bitmap_nodes(struct super_block *sb)
+ {
+ int i;
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ struct reiserfs_bitmap_node *bn = NULL;
+ for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) {
+- bn = allocate_bitmap_node(p_s_sb);
++ bn = allocate_bitmap_node(sb);
+ if (bn) {
+ list_add(&bn->list, &journal->j_bitmap_nodes);
+ journal->j_free_bitmap_nodes++;
+@@ -217,30 +217,30 @@ static void allocate_bitmap_nodes(struct
+ }
+ }
+
+-static int set_bit_in_list_bitmap(struct super_block *p_s_sb,
++static int set_bit_in_list_bitmap(struct super_block *sb,
+ b_blocknr_t block,
+ struct reiserfs_list_bitmap *jb)
+ {
+- unsigned int bmap_nr = block / (p_s_sb->s_blocksize << 3);
+- unsigned int bit_nr = block % (p_s_sb->s_blocksize << 3);
++ unsigned int bmap_nr = block / (sb->s_blocksize << 3);
++ unsigned int bit_nr = block % (sb->s_blocksize << 3);
+
+ if (!jb->bitmaps[bmap_nr]) {
+- jb->bitmaps[bmap_nr] = get_bitmap_node(p_s_sb);
++ jb->bitmaps[bmap_nr] = get_bitmap_node(sb);
+ }
+ set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data);
+ return 0;
+ }
+
+-static void cleanup_bitmap_list(struct super_block *p_s_sb,
++static void cleanup_bitmap_list(struct super_block *sb,
+ struct reiserfs_list_bitmap *jb)
+ {
+ int i;
+ if (jb->bitmaps == NULL)
+ return;
+
+- for (i = 0; i < reiserfs_bmap_count(p_s_sb); i++) {
++ for (i = 0; i < reiserfs_bmap_count(sb); i++) {
+ if (jb->bitmaps[i]) {
+- free_bitmap_node(p_s_sb, jb->bitmaps[i]);
++ free_bitmap_node(sb, jb->bitmaps[i]);
+ jb->bitmaps[i] = NULL;
+ }
+ }
+@@ -249,7 +249,7 @@ static void cleanup_bitmap_list(struct s
+ /*
+ ** only call this on FS unmount.
+ */
+-static int free_list_bitmaps(struct super_block *p_s_sb,
++static int free_list_bitmaps(struct super_block *sb,
+ struct reiserfs_list_bitmap *jb_array)
+ {
+ int i;
+@@ -257,16 +257,16 @@ static int free_list_bitmaps(struct supe
+ for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
+ jb = jb_array + i;
+ jb->journal_list = NULL;
+- cleanup_bitmap_list(p_s_sb, jb);
++ cleanup_bitmap_list(sb, jb);
+ vfree(jb->bitmaps);
+ jb->bitmaps = NULL;
+ }
+ return 0;
+ }
+
+-static int free_bitmap_nodes(struct super_block *p_s_sb)
++static int free_bitmap_nodes(struct super_block *sb)
+ {
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ struct list_head *next = journal->j_bitmap_nodes.next;
+ struct reiserfs_bitmap_node *bn;
+
+@@ -286,7 +286,7 @@ static int free_bitmap_nodes(struct supe
+ ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
+ ** jb_array is the array to be filled in.
+ */
+-int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
++int reiserfs_allocate_list_bitmaps(struct super_block *sb,
+ struct reiserfs_list_bitmap *jb_array,
+ unsigned int bmap_nr)
+ {
+@@ -300,7 +300,7 @@ int reiserfs_allocate_list_bitmaps(struc
+ jb->journal_list = NULL;
+ jb->bitmaps = vmalloc(mem);
+ if (!jb->bitmaps) {
+- reiserfs_warning(p_s_sb, "clm-2000", "unable to "
++ reiserfs_warning(sb, "clm-2000", "unable to "
+ "allocate bitmaps for journal lists");
+ failed = 1;
+ break;
+@@ -308,7 +308,7 @@ int reiserfs_allocate_list_bitmaps(struc
+ memset(jb->bitmaps, 0, mem);
+ }
+ if (failed) {
+- free_list_bitmaps(p_s_sb, jb_array);
++ free_list_bitmaps(sb, jb_array);
+ return -1;
+ }
+ return 0;
+@@ -318,12 +318,12 @@ int reiserfs_allocate_list_bitmaps(struc
+ ** find an available list bitmap. If you can't find one, flush a commit list
+ ** and try again
+ */
+-static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,
++static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *sb,
+ struct reiserfs_journal_list
+ *jl)
+ {
+ int i, j;
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ struct reiserfs_list_bitmap *jb = NULL;
+
+ for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) {
+@@ -331,7 +331,7 @@ static struct reiserfs_list_bitmap *get_
+ journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS;
+ jb = journal->j_list_bitmap + i;
+ if (journal->j_list_bitmap[i].journal_list) {
+- flush_commit_list(p_s_sb,
++ flush_commit_list(sb,
+ journal->j_list_bitmap[i].
+ journal_list, 1);
+ if (!journal->j_list_bitmap[i].journal_list) {
+@@ -378,12 +378,12 @@ static struct reiserfs_journal_cnode *al
+ /*
+ ** pulls a cnode off the free list, or returns NULL on failure
+ */
+-static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb)
++static struct reiserfs_journal_cnode *get_cnode(struct super_block *sb)
+ {
+ struct reiserfs_journal_cnode *cn;
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+
+- reiserfs_check_lock_depth(p_s_sb, "get_cnode");
++ reiserfs_check_lock_depth(sb, "get_cnode");
+
+ if (journal->j_cnode_free <= 0) {
+ return NULL;
+@@ -405,12 +405,12 @@ static struct reiserfs_journal_cnode *ge
+ /*
+ ** returns a cnode to the free list
+ */
+-static void free_cnode(struct super_block *p_s_sb,
++static void free_cnode(struct super_block *sb,
+ struct reiserfs_journal_cnode *cn)
+ {
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+
+- reiserfs_check_lock_depth(p_s_sb, "free_cnode");
++ reiserfs_check_lock_depth(sb, "free_cnode");
+
+ journal->j_cnode_used--;
+ journal->j_cnode_free++;
+@@ -481,11 +481,11 @@ static inline struct reiserfs_journal_cn
+ ** reject it on the next call to reiserfs_in_journal
+ **
+ */
+-int reiserfs_in_journal(struct super_block *p_s_sb,
++int reiserfs_in_journal(struct super_block *sb,
+ unsigned int bmap_nr, int bit_nr, int search_all,
+ b_blocknr_t * next_zero_bit)
+ {
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ struct reiserfs_journal_cnode *cn;
+ struct reiserfs_list_bitmap *jb;
+ int i;
+@@ -493,14 +493,14 @@ int reiserfs_in_journal(struct super_blo
+
+ *next_zero_bit = 0; /* always start this at zero. */
+
+- PROC_INFO_INC(p_s_sb, journal.in_journal);
++ PROC_INFO_INC(sb, journal.in_journal);
+ /* If we aren't doing a search_all, this is a metablock, and it will be logged before use.
+ ** if we crash before the transaction that freed it commits, this transaction won't
+ ** have committed either, and the block will never be written
+ */
+ if (search_all) {
+ for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) {
+- PROC_INFO_INC(p_s_sb, journal.in_journal_bitmap);
++ PROC_INFO_INC(sb, journal.in_journal_bitmap);
+ jb = journal->j_list_bitmap + i;
+ if (jb->journal_list && jb->bitmaps[bmap_nr] &&
+ test_bit(bit_nr,
+@@ -510,28 +510,28 @@ int reiserfs_in_journal(struct super_blo
+ find_next_zero_bit((unsigned long *)
+ (jb->bitmaps[bmap_nr]->
+ data),
+- p_s_sb->s_blocksize << 3,
++ sb->s_blocksize << 3,
+ bit_nr + 1);
+ return 1;
+ }
+ }
+ }
+
+- bl = bmap_nr * (p_s_sb->s_blocksize << 3) + bit_nr;
++ bl = bmap_nr * (sb->s_blocksize << 3) + bit_nr;
+ /* is it in any old transactions? */
+ if (search_all
+ && (cn =
+- get_journal_hash_dev(p_s_sb, journal->j_list_hash_table, bl))) {
++ get_journal_hash_dev(sb, journal->j_list_hash_table, bl))) {
+ return 1;
+ }
+
+ /* is it in the current transaction. This should never happen */
+- if ((cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, bl))) {
++ if ((cn = get_journal_hash_dev(sb, journal->j_hash_table, bl))) {
+ BUG();
+ return 1;
+ }
+
+- PROC_INFO_INC(p_s_sb, journal.in_journal_reusable);
++ PROC_INFO_INC(sb, journal.in_journal_reusable);
+ /* safe for reuse */
+ return 0;
+ }
+@@ -553,16 +553,16 @@ static inline void insert_journal_hash(s
+ }
+
+ /* lock the current transaction */
+-static inline void lock_journal(struct super_block *p_s_sb)
++static inline void lock_journal(struct super_block *sb)
+ {
+- PROC_INFO_INC(p_s_sb, journal.lock_journal);
+- mutex_lock(&SB_JOURNAL(p_s_sb)->j_mutex);
++ PROC_INFO_INC(sb, journal.lock_journal);
++ mutex_lock(&SB_JOURNAL(sb)->j_mutex);
+ }
+
+ /* unlock the current transaction */
+-static inline void unlock_journal(struct super_block *p_s_sb)
++static inline void unlock_journal(struct super_block *sb)
+ {
+- mutex_unlock(&SB_JOURNAL(p_s_sb)->j_mutex);
++ mutex_unlock(&SB_JOURNAL(sb)->j_mutex);
+ }
+
+ static inline void get_journal_list(struct reiserfs_journal_list *jl)
+@@ -586,13 +586,13 @@ static inline void put_journal_list(stru
+ ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a
+ ** transaction.
+ */
+-static void cleanup_freed_for_journal_list(struct super_block *p_s_sb,
++static void cleanup_freed_for_journal_list(struct super_block *sb,
+ struct reiserfs_journal_list *jl)
+ {
+
+ struct reiserfs_list_bitmap *jb = jl->j_list_bitmap;
+ if (jb) {
+- cleanup_bitmap_list(p_s_sb, jb);
++ cleanup_bitmap_list(sb, jb);
+ }
+ jl->j_list_bitmap->journal_list = NULL;
+ jl->j_list_bitmap = NULL;
+@@ -1237,11 +1237,11 @@ static void remove_journal_hash(struct s
+ ** journal list for this transaction. Aside from freeing the cnode, this also allows the
+ ** block to be reallocated for data blocks if it had been deleted.
+ */
+-static void remove_all_from_journal_list(struct super_block *p_s_sb,
++static void remove_all_from_journal_list(struct super_block *sb,
+ struct reiserfs_journal_list *jl,
+ int debug)
+ {
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ struct reiserfs_journal_cnode *cn, *last;
+ cn = jl->j_realblock;
+
+@@ -1251,18 +1251,18 @@ static void remove_all_from_journal_list
+ while (cn) {
+ if (cn->blocknr != 0) {
+ if (debug) {
+- reiserfs_warning(p_s_sb, "reiserfs-2201",
++ reiserfs_warning(sb, "reiserfs-2201",
+ "block %u, bh is %d, state %ld",
+ cn->blocknr, cn->bh ? 1 : 0,
+ cn->state);
+ }
+ cn->state = 0;
+- remove_journal_hash(p_s_sb, journal->j_list_hash_table,
++ remove_journal_hash(sb, journal->j_list_hash_table,
+ jl, cn->blocknr, 1);
+ }
+ last = cn;
+ cn = cn->next;
+- free_cnode(p_s_sb, last);
++ free_cnode(sb, last);
+ }
+ jl->j_realblock = NULL;
+ }
+@@ -1274,12 +1274,12 @@ static void remove_all_from_journal_list
+ ** called by flush_journal_list, before it calls remove_all_from_journal_list
+ **
+ */
+-static int _update_journal_header_block(struct super_block *p_s_sb,
++static int _update_journal_header_block(struct super_block *sb,
+ unsigned long offset,
+ unsigned int trans_id)
+ {
+ struct reiserfs_journal_header *jh;
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+
+ if (reiserfs_is_journal_aborted(journal))
+ return -EIO;
+@@ -1289,7 +1289,7 @@ static int _update_journal_header_block(
+ wait_on_buffer((journal->j_header_bh));
+ if (unlikely(!buffer_uptodate(journal->j_header_bh))) {
+ #ifdef CONFIG_REISERFS_CHECK
+- reiserfs_warning(p_s_sb, "journal-699",
++ reiserfs_warning(sb, "journal-699",
+ "buffer write failed");
+ #endif
+ return -EIO;
+@@ -1303,24 +1303,24 @@ static int _update_journal_header_block(
+ jh->j_first_unflushed_offset = cpu_to_le32(offset);
+ jh->j_mount_id = cpu_to_le32(journal->j_mount_id);
+
+- if (reiserfs_barrier_flush(p_s_sb)) {
++ if (reiserfs_barrier_flush(sb)) {
+ int ret;
+ lock_buffer(journal->j_header_bh);
+ ret = submit_barrier_buffer(journal->j_header_bh);
+ if (ret == -EOPNOTSUPP) {
+ set_buffer_uptodate(journal->j_header_bh);
+- disable_barrier(p_s_sb);
++ disable_barrier(sb);
+ goto sync;
+ }
+ wait_on_buffer(journal->j_header_bh);
+- check_barrier_completion(p_s_sb, journal->j_header_bh);
++ check_barrier_completion(sb, journal->j_header_bh);
+ } else {
+ sync:
+ set_buffer_dirty(journal->j_header_bh);
+ sync_dirty_buffer(journal->j_header_bh);
+ }
+ if (!buffer_uptodate(journal->j_header_bh)) {
+- reiserfs_warning(p_s_sb, "journal-837",
++ reiserfs_warning(sb, "journal-837",
+ "IO error during journal replay");
+ return -EIO;
+ }
+@@ -1328,23 +1328,23 @@ static int _update_journal_header_block(
+ return 0;
+ }
+
+-static int update_journal_header_block(struct super_block *p_s_sb,
++static int update_journal_header_block(struct super_block *sb,
+ unsigned long offset,
+ unsigned int trans_id)
+ {
+- return _update_journal_header_block(p_s_sb, offset, trans_id);
++ return _update_journal_header_block(sb, offset, trans_id);
+ }
+
+ /*
+ ** flush any and all journal lists older than you are
+ ** can only be called from flush_journal_list
+ */
+-static int flush_older_journal_lists(struct super_block *p_s_sb,
++static int flush_older_journal_lists(struct super_block *sb,
+ struct reiserfs_journal_list *jl)
+ {
+ struct list_head *entry;
+ struct reiserfs_journal_list *other_jl;
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ unsigned int trans_id = jl->j_trans_id;
+
+ /* we know we are the only ones flushing things, no extra race
+@@ -1359,7 +1359,7 @@ static int flush_older_journal_lists(str
+ if (other_jl->j_trans_id < trans_id) {
+ BUG_ON(other_jl->j_refcount <= 0);
+ /* do not flush all */
+- flush_journal_list(p_s_sb, other_jl, 0);
++ flush_journal_list(sb, other_jl, 0);
+
+ /* other_jl is now deleted from the list */
+ goto restart;
+@@ -1908,22 +1908,22 @@ void remove_journal_hash(struct super_bl
+ }
+ }
+
+-static void free_journal_ram(struct super_block *p_s_sb)
++static void free_journal_ram(struct super_block *sb)
+ {
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ kfree(journal->j_current_jl);
+ journal->j_num_lists--;
+
+ vfree(journal->j_cnode_free_orig);
+- free_list_bitmaps(p_s_sb, journal->j_list_bitmap);
+- free_bitmap_nodes(p_s_sb); /* must be after free_list_bitmaps */
++ free_list_bitmaps(sb, journal->j_list_bitmap);
++ free_bitmap_nodes(sb); /* must be after free_list_bitmaps */
+ if (journal->j_header_bh) {
+ brelse(journal->j_header_bh);
+ }
+ /* j_header_bh is on the journal dev, make sure not to release the journal
+ * dev until we brelse j_header_bh
+ */
+- release_journal_dev(p_s_sb, journal);
++ release_journal_dev(sb, journal);
+ vfree(journal);
+ }
+
+@@ -1932,27 +1932,27 @@ static void free_journal_ram(struct supe
+ ** of read_super() yet. Any other caller must keep error at 0.
+ */
+ static int do_journal_release(struct reiserfs_transaction_handle *th,
+- struct super_block *p_s_sb, int error)
++ struct super_block *sb, int error)
+ {
+ struct reiserfs_transaction_handle myth;
+ int flushed = 0;
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+
+ /* we only want to flush out transactions if we were called with error == 0
+ */
+- if (!error && !(p_s_sb->s_flags & MS_RDONLY)) {
++ if (!error && !(sb->s_flags & MS_RDONLY)) {
+ /* end the current trans */
+ BUG_ON(!th->t_trans_id);
+- do_journal_end(th, p_s_sb, 10, FLUSH_ALL);
++ do_journal_end(th, sb, 10, FLUSH_ALL);
+
+ /* make sure something gets logged to force our way into the flush code */
+- if (!journal_join(&myth, p_s_sb, 1)) {
+- reiserfs_prepare_for_journal(p_s_sb,
+- SB_BUFFER_WITH_SB(p_s_sb),
++ if (!journal_join(&myth, sb, 1)) {
++ reiserfs_prepare_for_journal(sb,
++ SB_BUFFER_WITH_SB(sb),
+ 1);
+- journal_mark_dirty(&myth, p_s_sb,
+- SB_BUFFER_WITH_SB(p_s_sb));
+- do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL);
++ journal_mark_dirty(&myth, sb,
++ SB_BUFFER_WITH_SB(sb));
++ do_journal_end(&myth, sb, 1, FLUSH_ALL);
+ flushed = 1;
+ }
+ }
+@@ -1960,26 +1960,26 @@ static int do_journal_release(struct rei
+ /* this also catches errors during the do_journal_end above */
+ if (!error && reiserfs_is_journal_aborted(journal)) {
+ memset(&myth, 0, sizeof(myth));
+- if (!journal_join_abort(&myth, p_s_sb, 1)) {
+- reiserfs_prepare_for_journal(p_s_sb,
+- SB_BUFFER_WITH_SB(p_s_sb),
++ if (!journal_join_abort(&myth, sb, 1)) {
++ reiserfs_prepare_for_journal(sb,
++ SB_BUFFER_WITH_SB(sb),
+ 1);
+- journal_mark_dirty(&myth, p_s_sb,
+- SB_BUFFER_WITH_SB(p_s_sb));
+- do_journal_end(&myth, p_s_sb, 1, FLUSH_ALL);
++ journal_mark_dirty(&myth, sb,
++ SB_BUFFER_WITH_SB(sb));
++ do_journal_end(&myth, sb, 1, FLUSH_ALL);
+ }
+ }
+
+ reiserfs_mounted_fs_count--;
+ /* wait for all commits to finish */
+- cancel_delayed_work(&SB_JOURNAL(p_s_sb)->j_work);
++ cancel_delayed_work(&SB_JOURNAL(sb)->j_work);
+ flush_workqueue(commit_wq);
+ if (!reiserfs_mounted_fs_count) {
+ destroy_workqueue(commit_wq);
+ commit_wq = NULL;
+ }
+
+- free_journal_ram(p_s_sb);
++ free_journal_ram(sb);
+
+ return 0;
+ }
+@@ -1988,28 +1988,28 @@ static int do_journal_release(struct rei
+ ** call on unmount. flush all journal trans, release all alloc'd ram
+ */
+ int journal_release(struct reiserfs_transaction_handle *th,
+- struct super_block *p_s_sb)
++ struct super_block *sb)
+ {
+- return do_journal_release(th, p_s_sb, 0);
++ return do_journal_release(th, sb, 0);
+ }
+
+ /*
+ ** only call from an error condition inside reiserfs_read_super!
+ */
+ int journal_release_error(struct reiserfs_transaction_handle *th,
+- struct super_block *p_s_sb)
++ struct super_block *sb)
+ {
+- return do_journal_release(th, p_s_sb, 1);
++ return do_journal_release(th, sb, 1);
+ }
+
+ /* compares description block with commit block. returns 1 if they differ, 0 if they are the same */
+-static int journal_compare_desc_commit(struct super_block *p_s_sb,
++static int journal_compare_desc_commit(struct super_block *sb,
+ struct reiserfs_journal_desc *desc,
+ struct reiserfs_journal_commit *commit)
+ {
+ if (get_commit_trans_id(commit) != get_desc_trans_id(desc) ||
+ get_commit_trans_len(commit) != get_desc_trans_len(desc) ||
+- get_commit_trans_len(commit) > SB_JOURNAL(p_s_sb)->j_trans_max ||
++ get_commit_trans_len(commit) > SB_JOURNAL(sb)->j_trans_max ||
+ get_commit_trans_len(commit) <= 0) {
+ return 1;
+ }
+@@ -2020,7 +2020,7 @@ static int journal_compare_desc_commit(s
+ ** returns -1 if it found a corrupt commit block
+ ** returns 1 if both desc and commit were valid
+ */
+-static int journal_transaction_is_valid(struct super_block *p_s_sb,
++static int journal_transaction_is_valid(struct super_block *sb,
+ struct buffer_head *d_bh,
+ unsigned int *oldest_invalid_trans_id,
+ unsigned long *newest_mount_id)
+@@ -2038,7 +2038,7 @@ static int journal_transaction_is_valid(
+ && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) {
+ if (oldest_invalid_trans_id && *oldest_invalid_trans_id
+ && get_desc_trans_id(desc) > *oldest_invalid_trans_id) {
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE,
+ "journal-986: transaction "
+ "is valid returning because trans_id %d is greater than "
+ "oldest_invalid %lu",
+@@ -2048,7 +2048,7 @@ static int journal_transaction_is_valid(
+ }
+ if (newest_mount_id
+ && *newest_mount_id > get_desc_mount_id(desc)) {
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE,
+ "journal-1087: transaction "
+ "is valid returning because mount_id %d is less than "
+ "newest_mount_id %lu",
+@@ -2056,37 +2056,37 @@ static int journal_transaction_is_valid(
+ *newest_mount_id);
+ return -1;
+ }
+- if (get_desc_trans_len(desc) > SB_JOURNAL(p_s_sb)->j_trans_max) {
+- reiserfs_warning(p_s_sb, "journal-2018",
++ if (get_desc_trans_len(desc) > SB_JOURNAL(sb)->j_trans_max) {
++ reiserfs_warning(sb, "journal-2018",
+ "Bad transaction length %d "
+ "encountered, ignoring transaction",
+ get_desc_trans_len(desc));
+ return -1;
+ }
+- offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
++ offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
+
+ /* ok, we have a journal description block, lets see if the transaction was valid */
+ c_bh =
+- journal_bread(p_s_sb,
+- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
++ journal_bread(sb,
++ SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
+ ((offset + get_desc_trans_len(desc) +
+- 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
++ 1) % SB_ONDISK_JOURNAL_SIZE(sb)));
+ if (!c_bh)
+ return 0;
+ commit = (struct reiserfs_journal_commit *)c_bh->b_data;
+- if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
++ if (journal_compare_desc_commit(sb, desc, commit)) {
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE,
+ "journal_transaction_is_valid, commit offset %ld had bad "
+ "time %d or length %d",
+ c_bh->b_blocknr -
+- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
++ SB_ONDISK_JOURNAL_1st_BLOCK(sb),
+ get_commit_trans_id(commit),
+ get_commit_trans_len(commit));
+ brelse(c_bh);
+ if (oldest_invalid_trans_id) {
+ *oldest_invalid_trans_id =
+ get_desc_trans_id(desc);
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE,
+ "journal-1004: "
+ "transaction_is_valid setting oldest invalid trans_id "
+ "to %d",
+@@ -2095,11 +2095,11 @@ static int journal_transaction_is_valid(
+ return -1;
+ }
+ brelse(c_bh);
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE,
+ "journal-1006: found valid "
+ "transaction start offset %llu, len %d id %d",
+ d_bh->b_blocknr -
+- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
++ SB_ONDISK_JOURNAL_1st_BLOCK(sb),
+ get_desc_trans_len(desc),
+ get_desc_trans_id(desc));
+ return 1;
+@@ -2121,13 +2121,13 @@ static void brelse_array(struct buffer_h
+ ** this either reads in a replays a transaction, or returns because the transaction
+ ** is invalid, or too old.
+ */
+-static int journal_read_transaction(struct super_block *p_s_sb,
++static int journal_read_transaction(struct super_block *sb,
+ unsigned long cur_dblock,
+ unsigned long oldest_start,
+ unsigned int oldest_trans_id,
+ unsigned long newest_mount_id)
+ {
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ struct reiserfs_journal_desc *desc;
+ struct reiserfs_journal_commit *commit;
+ unsigned int trans_id = 0;
+@@ -2139,45 +2139,45 @@ static int journal_read_transaction(stru
+ int i;
+ int trans_half;
+
+- d_bh = journal_bread(p_s_sb, cur_dblock);
++ d_bh = journal_bread(sb, cur_dblock);
+ if (!d_bh)
+ return 1;
+ desc = (struct reiserfs_journal_desc *)d_bh->b_data;
+- trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1037: "
++ trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1037: "
+ "journal_read_transaction, offset %llu, len %d mount_id %d",
+- d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
++ d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb),
+ get_desc_trans_len(desc), get_desc_mount_id(desc));
+ if (get_desc_trans_id(desc) < oldest_trans_id) {
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1039: "
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1039: "
+ "journal_read_trans skipping because %lu is too old",
+ cur_dblock -
+- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb));
++ SB_ONDISK_JOURNAL_1st_BLOCK(sb));
+ brelse(d_bh);
+ return 1;
+ }
+ if (get_desc_mount_id(desc) != newest_mount_id) {
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1146: "
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1146: "
+ "journal_read_trans skipping because %d is != "
+ "newest_mount_id %lu", get_desc_mount_id(desc),
+ newest_mount_id);
+ brelse(d_bh);
+ return 1;
+ }
+- c_bh = journal_bread(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
++ c_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
+ ((trans_offset + get_desc_trans_len(desc) + 1) %
+- SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
++ SB_ONDISK_JOURNAL_SIZE(sb)));
+ if (!c_bh) {
+ brelse(d_bh);
+ return 1;
+ }
+ commit = (struct reiserfs_journal_commit *)c_bh->b_data;
+- if (journal_compare_desc_commit(p_s_sb, desc, commit)) {
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
++ if (journal_compare_desc_commit(sb, desc, commit)) {
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE,
+ "journal_read_transaction, "
+ "commit offset %llu had bad time %d or length %d",
+ c_bh->b_blocknr -
+- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
++ SB_ONDISK_JOURNAL_1st_BLOCK(sb),
+ get_commit_trans_id(commit),
+ get_commit_trans_len(commit));
+ brelse(c_bh);
+@@ -2195,30 +2195,30 @@ static int journal_read_transaction(stru
+ brelse(d_bh);
+ kfree(log_blocks);
+ kfree(real_blocks);
+- reiserfs_warning(p_s_sb, "journal-1169",
++ reiserfs_warning(sb, "journal-1169",
+ "kmalloc failed, unable to mount FS");
+ return -1;
+ }
+ /* get all the buffer heads */
+- trans_half = journal_trans_half(p_s_sb->s_blocksize);
++ trans_half = journal_trans_half(sb->s_blocksize);
+ for (i = 0; i < get_desc_trans_len(desc); i++) {
+ log_blocks[i] =
+- journal_getblk(p_s_sb,
+- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
++ journal_getblk(sb,
++ SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
+ (trans_offset + 1 +
+- i) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
++ i) % SB_ONDISK_JOURNAL_SIZE(sb));
+ if (i < trans_half) {
+ real_blocks[i] =
+- sb_getblk(p_s_sb,
++ sb_getblk(sb,
+ le32_to_cpu(desc->j_realblock[i]));
+ } else {
+ real_blocks[i] =
+- sb_getblk(p_s_sb,
++ sb_getblk(sb,
+ le32_to_cpu(commit->
+ j_realblock[i - trans_half]));
+ }
+- if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(p_s_sb)) {
+- reiserfs_warning(p_s_sb, "journal-1207",
++ if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(sb)) {
++ reiserfs_warning(sb, "journal-1207",
+ "REPLAY FAILURE fsck required! "
+ "Block to replay is outside of "
+ "filesystem");
+@@ -2226,8 +2226,8 @@ static int journal_read_transaction(stru
+ }
+ /* make sure we don't try to replay onto log or reserved area */
+ if (is_block_in_log_or_reserved_area
+- (p_s_sb, real_blocks[i]->b_blocknr)) {
+- reiserfs_warning(p_s_sb, "journal-1204",
++ (sb, real_blocks[i]->b_blocknr)) {
++ reiserfs_warning(sb, "journal-1204",
+ "REPLAY FAILURE fsck required! "
+ "Trying to replay onto a log block");
+ abort_replay:
+@@ -2245,7 +2245,7 @@ static int journal_read_transaction(stru
+ for (i = 0; i < get_desc_trans_len(desc); i++) {
+ wait_on_buffer(log_blocks[i]);
+ if (!buffer_uptodate(log_blocks[i])) {
+- reiserfs_warning(p_s_sb, "journal-1212",
++ reiserfs_warning(sb, "journal-1212",
+ "REPLAY FAILURE fsck required! "
+ "buffer write failed");
+ brelse_array(log_blocks + i,
+@@ -2270,7 +2270,7 @@ static int journal_read_transaction(stru
+ for (i = 0; i < get_desc_trans_len(desc); i++) {
+ wait_on_buffer(real_blocks[i]);
+ if (!buffer_uptodate(real_blocks[i])) {
+- reiserfs_warning(p_s_sb, "journal-1226",
++ reiserfs_warning(sb, "journal-1226",
+ "REPLAY FAILURE, fsck required! "
+ "buffer write failed");
+ brelse_array(real_blocks + i,
+@@ -2284,15 +2284,15 @@ static int journal_read_transaction(stru
+ brelse(real_blocks[i]);
+ }
+ cur_dblock =
+- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
++ SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
+ ((trans_offset + get_desc_trans_len(desc) +
+- 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb));
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
++ 2) % SB_ONDISK_JOURNAL_SIZE(sb));
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE,
+ "journal-1095: setting journal " "start to offset %ld",
+- cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb));
++ cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb));
+
+ /* init starting values for the first transaction, in case this is the last transaction to be replayed. */
+- journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
++ journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb);
+ journal->j_last_flush_trans_id = trans_id;
+ journal->j_trans_id = trans_id + 1;
+ /* check for trans_id overflow */
+@@ -2357,9 +2357,9 @@ static struct buffer_head *reiserfs_brea
+ **
+ ** On exit, it sets things up so the first transaction will work correctly.
+ */
+-static int journal_read(struct super_block *p_s_sb)
++static int journal_read(struct super_block *sb)
+ {
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ struct reiserfs_journal_desc *desc;
+ unsigned int oldest_trans_id = 0;
+ unsigned int oldest_invalid_trans_id = 0;
+@@ -2375,8 +2375,8 @@ static int journal_read(struct super_blo
+ int ret;
+ char b[BDEVNAME_SIZE];
+
+- cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb);
+- reiserfs_info(p_s_sb, "checking transaction log (%s)\n",
++ cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb);
++ reiserfs_info(sb, "checking transaction log (%s)\n",
+ bdevname(journal->j_dev_bd, b));
+ start = get_seconds();
+
+@@ -2384,22 +2384,22 @@ static int journal_read(struct super_blo
+ ** is the first unflushed, and if that transaction is not valid,
+ ** replay is done
+ */
+- journal->j_header_bh = journal_bread(p_s_sb,
+- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb)
+- + SB_ONDISK_JOURNAL_SIZE(p_s_sb));
++ journal->j_header_bh = journal_bread(sb,
++ SB_ONDISK_JOURNAL_1st_BLOCK(sb)
++ + SB_ONDISK_JOURNAL_SIZE(sb));
+ if (!journal->j_header_bh) {
+ return 1;
+ }
+ jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data);
+ if (le32_to_cpu(jh->j_first_unflushed_offset) <
+- SB_ONDISK_JOURNAL_SIZE(p_s_sb)
++ SB_ONDISK_JOURNAL_SIZE(sb)
+ && le32_to_cpu(jh->j_last_flush_trans_id) > 0) {
+ oldest_start =
+- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
++ SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
+ le32_to_cpu(jh->j_first_unflushed_offset);
+ oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1;
+ newest_mount_id = le32_to_cpu(jh->j_mount_id);
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE,
+ "journal-1153: found in "
+ "header: first_unflushed_offset %d, last_flushed_trans_id "
+ "%lu", le32_to_cpu(jh->j_first_unflushed_offset),
+@@ -2411,10 +2411,10 @@ static int journal_read(struct super_blo
+ ** through the whole log.
+ */
+ d_bh =
+- journal_bread(p_s_sb,
+- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
++ journal_bread(sb,
++ SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
+ le32_to_cpu(jh->j_first_unflushed_offset));
+- ret = journal_transaction_is_valid(p_s_sb, d_bh, NULL, NULL);
++ ret = journal_transaction_is_valid(sb, d_bh, NULL, NULL);
+ if (!ret) {
+ continue_replay = 0;
+ }
+@@ -2422,8 +2422,8 @@ static int journal_read(struct super_blo
+ goto start_log_replay;
+ }
+
+- if (continue_replay && bdev_read_only(p_s_sb->s_bdev)) {
+- reiserfs_warning(p_s_sb, "clm-2076",
++ if (continue_replay && bdev_read_only(sb->s_bdev)) {
++ reiserfs_warning(sb, "clm-2076",
+ "device is readonly, unable to replay log");
+ return -1;
+ }
+@@ -2433,17 +2433,17 @@ static int journal_read(struct super_blo
+ */
+ while (continue_replay
+ && cur_dblock <
+- (SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
+- SB_ONDISK_JOURNAL_SIZE(p_s_sb))) {
++ (SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
++ SB_ONDISK_JOURNAL_SIZE(sb))) {
+ /* Note that it is required for blocksize of primary fs device and journal
+ device to be the same */
+ d_bh =
+ reiserfs_breada(journal->j_dev_bd, cur_dblock,
+- p_s_sb->s_blocksize,
+- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
+- SB_ONDISK_JOURNAL_SIZE(p_s_sb));
++ sb->s_blocksize,
++ SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
++ SB_ONDISK_JOURNAL_SIZE(sb));
+ ret =
+- journal_transaction_is_valid(p_s_sb, d_bh,
++ journal_transaction_is_valid(sb, d_bh,
+ &oldest_invalid_trans_id,
+ &newest_mount_id);
+ if (ret == 1) {
+@@ -2452,26 +2452,26 @@ static int journal_read(struct super_blo
+ oldest_trans_id = get_desc_trans_id(desc);
+ oldest_start = d_bh->b_blocknr;
+ newest_mount_id = get_desc_mount_id(desc);
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE,
+ "journal-1179: Setting "
+ "oldest_start to offset %llu, trans_id %lu",
+ oldest_start -
+ SB_ONDISK_JOURNAL_1st_BLOCK
+- (p_s_sb), oldest_trans_id);
++ (sb), oldest_trans_id);
+ } else if (oldest_trans_id > get_desc_trans_id(desc)) {
+ /* one we just read was older */
+ oldest_trans_id = get_desc_trans_id(desc);
+ oldest_start = d_bh->b_blocknr;
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE,
+ "journal-1180: Resetting "
+ "oldest_start to offset %lu, trans_id %lu",
+ oldest_start -
+ SB_ONDISK_JOURNAL_1st_BLOCK
+- (p_s_sb), oldest_trans_id);
++ (sb), oldest_trans_id);
+ }
+ if (newest_mount_id < get_desc_mount_id(desc)) {
+ newest_mount_id = get_desc_mount_id(desc);
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE,
+ "journal-1299: Setting "
+ "newest_mount_id to %d",
+ get_desc_mount_id(desc));
+@@ -2486,17 +2486,17 @@ static int journal_read(struct super_blo
+ start_log_replay:
+ cur_dblock = oldest_start;
+ if (oldest_trans_id) {
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE,
+ "journal-1206: Starting replay "
+ "from offset %llu, trans_id %lu",
+- cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
++ cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb),
+ oldest_trans_id);
+
+ }
+ replay_count = 0;
+ while (continue_replay && oldest_trans_id > 0) {
+ ret =
+- journal_read_transaction(p_s_sb, cur_dblock, oldest_start,
++ journal_read_transaction(sb, cur_dblock, oldest_start,
+ oldest_trans_id, newest_mount_id);
+ if (ret < 0) {
+ return ret;
+@@ -2504,14 +2504,14 @@ static int journal_read(struct super_blo
+ break;
+ }
+ cur_dblock =
+- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + journal->j_start;
++ SB_ONDISK_JOURNAL_1st_BLOCK(sb) + journal->j_start;
+ replay_count++;
+ if (cur_dblock == oldest_start)
+ break;
+ }
+
+ if (oldest_trans_id == 0) {
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE,
+ "journal-1225: No valid " "transactions found");
+ }
+ /* j_start does not get set correctly if we don't replay any transactions.
+@@ -2531,16 +2531,16 @@ static int journal_read(struct super_blo
+ } else {
+ journal->j_mount_id = newest_mount_id + 1;
+ }
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1299: Setting "
+ "newest_mount_id to %lu", journal->j_mount_id);
+ journal->j_first_unflushed_offset = journal->j_start;
+ if (replay_count > 0) {
+- reiserfs_info(p_s_sb,
++ reiserfs_info(sb,
+ "replayed %d transactions in %lu seconds\n",
+ replay_count, get_seconds() - start);
+ }
+- if (!bdev_read_only(p_s_sb->s_bdev) &&
+- _update_journal_header_block(p_s_sb, journal->j_start,
++ if (!bdev_read_only(sb->s_bdev) &&
++ _update_journal_header_block(sb, journal->j_start,
+ journal->j_last_flush_trans_id)) {
+ /* replay failed, caller must call free_journal_ram and abort
+ ** the mount
+@@ -2565,9 +2565,9 @@ static struct reiserfs_journal_list *all
+ return jl;
+ }
+
+-static void journal_list_init(struct super_block *p_s_sb)
++static void journal_list_init(struct super_block *sb)
+ {
+- SB_JOURNAL(p_s_sb)->j_current_jl = alloc_journal_list(p_s_sb);
++ SB_JOURNAL(sb)->j_current_jl = alloc_journal_list(sb);
+ }
+
+ static int release_journal_dev(struct super_block *super,
+@@ -2663,28 +2663,28 @@ static int journal_init_dev(struct super
+ */
+ #define REISERFS_STANDARD_BLKSIZE (4096)
+
+-static int check_advise_trans_params(struct super_block *p_s_sb,
++static int check_advise_trans_params(struct super_block *sb,
+ struct reiserfs_journal *journal)
+ {
+ if (journal->j_trans_max) {
+ /* Non-default journal params.
+ Do sanity check for them. */
+ int ratio = 1;
+- if (p_s_sb->s_blocksize < REISERFS_STANDARD_BLKSIZE)
+- ratio = REISERFS_STANDARD_BLKSIZE / p_s_sb->s_blocksize;
++ if (sb->s_blocksize < REISERFS_STANDARD_BLKSIZE)
++ ratio = REISERFS_STANDARD_BLKSIZE / sb->s_blocksize;
+
+ if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio ||
+ journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio ||
+- SB_ONDISK_JOURNAL_SIZE(p_s_sb) / journal->j_trans_max <
++ SB_ONDISK_JOURNAL_SIZE(sb) / journal->j_trans_max <
+ JOURNAL_MIN_RATIO) {
+- reiserfs_warning(p_s_sb, "sh-462",
++ reiserfs_warning(sb, "sh-462",
+ "bad transaction max size (%u). "
+ "FSCK?", journal->j_trans_max);
+ return 1;
+ }
+ if (journal->j_max_batch != (journal->j_trans_max) *
+ JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT) {
+- reiserfs_warning(p_s_sb, "sh-463",
++ reiserfs_warning(sb, "sh-463",
+ "bad transaction max batch (%u). "
+ "FSCK?", journal->j_max_batch);
+ return 1;
+@@ -2694,9 +2694,9 @@ static int check_advise_trans_params(str
+ The file system was created by old version
+ of mkreiserfs, so some fields contain zeros,
+ and we need to advise proper values for them */
+- if (p_s_sb->s_blocksize != REISERFS_STANDARD_BLKSIZE) {
+- reiserfs_warning(p_s_sb, "sh-464", "bad blocksize (%u)",
+- p_s_sb->s_blocksize);
++ if (sb->s_blocksize != REISERFS_STANDARD_BLKSIZE) {
++ reiserfs_warning(sb, "sh-464", "bad blocksize (%u)",
++ sb->s_blocksize);
+ return 1;
+ }
+ journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT;
+@@ -2709,10 +2709,10 @@ static int check_advise_trans_params(str
+ /*
+ ** must be called once on fs mount. calls journal_read for you
+ */
+-int journal_init(struct super_block *p_s_sb, const char *j_dev_name,
++int journal_init(struct super_block *sb, const char *j_dev_name,
+ int old_format, unsigned int commit_max_age)
+ {
+- int num_cnodes = SB_ONDISK_JOURNAL_SIZE(p_s_sb) * 2;
++ int num_cnodes = SB_ONDISK_JOURNAL_SIZE(sb) * 2;
+ struct buffer_head *bhjh;
+ struct reiserfs_super_block *rs;
+ struct reiserfs_journal_header *jh;
+@@ -2720,9 +2720,9 @@ int journal_init(struct super_block *p_s
+ struct reiserfs_journal_list *jl;
+ char b[BDEVNAME_SIZE];
+
+- journal = SB_JOURNAL(p_s_sb) = vmalloc(sizeof(struct reiserfs_journal));
++ journal = SB_JOURNAL(sb) = vmalloc(sizeof(struct reiserfs_journal));
+ if (!journal) {
+- reiserfs_warning(p_s_sb, "journal-1256",
++ reiserfs_warning(sb, "journal-1256",
+ "unable to get memory for journal structure");
+ return 1;
+ }
+@@ -2732,50 +2732,50 @@ int journal_init(struct super_block *p_s
+ INIT_LIST_HEAD(&journal->j_working_list);
+ INIT_LIST_HEAD(&journal->j_journal_list);
+ journal->j_persistent_trans = 0;
+- if (reiserfs_allocate_list_bitmaps(p_s_sb,
++ if (reiserfs_allocate_list_bitmaps(sb,
+ journal->j_list_bitmap,
+- reiserfs_bmap_count(p_s_sb)))
++ reiserfs_bmap_count(sb)))
+ goto free_and_return;
+- allocate_bitmap_nodes(p_s_sb);
++ allocate_bitmap_nodes(sb);
+
+ /* reserved for journal area support */
+- SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) = (old_format ?
++ SB_JOURNAL_1st_RESERVED_BLOCK(sb) = (old_format ?
+ REISERFS_OLD_DISK_OFFSET_IN_BYTES
+- / p_s_sb->s_blocksize +
+- reiserfs_bmap_count(p_s_sb) +
++ / sb->s_blocksize +
++ reiserfs_bmap_count(sb) +
+ 1 :
+ REISERFS_DISK_OFFSET_IN_BYTES /
+- p_s_sb->s_blocksize + 2);
++ sb->s_blocksize + 2);
+
+ /* Sanity check to see is the standard journal fitting withing first bitmap
+ (actual for small blocksizes) */
+- if (!SB_ONDISK_JOURNAL_DEVICE(p_s_sb) &&
+- (SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb) +
+- SB_ONDISK_JOURNAL_SIZE(p_s_sb) > p_s_sb->s_blocksize * 8)) {
+- reiserfs_warning(p_s_sb, "journal-1393",
++ if (!SB_ONDISK_JOURNAL_DEVICE(sb) &&
++ (SB_JOURNAL_1st_RESERVED_BLOCK(sb) +
++ SB_ONDISK_JOURNAL_SIZE(sb) > sb->s_blocksize * 8)) {
++ reiserfs_warning(sb, "journal-1393",
+ "journal does not fit for area addressed "
+ "by first of bitmap blocks. It starts at "
+ "%u and its size is %u. Block size %ld",
+- SB_JOURNAL_1st_RESERVED_BLOCK(p_s_sb),
+- SB_ONDISK_JOURNAL_SIZE(p_s_sb),
+- p_s_sb->s_blocksize);
++ SB_JOURNAL_1st_RESERVED_BLOCK(sb),
++ SB_ONDISK_JOURNAL_SIZE(sb),
++ sb->s_blocksize);
+ goto free_and_return;
+ }
+
+- if (journal_init_dev(p_s_sb, journal, j_dev_name) != 0) {
+- reiserfs_warning(p_s_sb, "sh-462",
++ if (journal_init_dev(sb, journal, j_dev_name) != 0) {
++ reiserfs_warning(sb, "sh-462",
+ "unable to initialize jornal device");
+ goto free_and_return;
+ }
+
+- rs = SB_DISK_SUPER_BLOCK(p_s_sb);
++ rs = SB_DISK_SUPER_BLOCK(sb);
+
+ /* read journal header */
+- bhjh = journal_bread(p_s_sb,
+- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
+- SB_ONDISK_JOURNAL_SIZE(p_s_sb));
++ bhjh = journal_bread(sb,
++ SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
++ SB_ONDISK_JOURNAL_SIZE(sb));
+ if (!bhjh) {
+- reiserfs_warning(p_s_sb, "sh-459",
++ reiserfs_warning(sb, "sh-459",
+ "unable to read journal header");
+ goto free_and_return;
+ }
+@@ -2785,7 +2785,7 @@ int journal_init(struct super_block *p_s
+ if (is_reiserfs_jr(rs)
+ && (le32_to_cpu(jh->jh_journal.jp_journal_magic) !=
+ sb_jp_journal_magic(rs))) {
+- reiserfs_warning(p_s_sb, "sh-460",
++ reiserfs_warning(sb, "sh-460",
+ "journal header magic %x (device %s) does "
+ "not match to magic found in super block %x",
+ jh->jh_journal.jp_journal_magic,
+@@ -2801,7 +2801,7 @@ int journal_init(struct super_block *p_s
+ le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age);
+ journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE;
+
+- if (check_advise_trans_params(p_s_sb, journal) != 0)
++ if (check_advise_trans_params(sb, journal) != 0)
+ goto free_and_return;
+ journal->j_default_max_commit_age = journal->j_max_commit_age;
+
+@@ -2810,12 +2810,12 @@ int journal_init(struct super_block *p_s
+ journal->j_max_trans_age = commit_max_age;
+ }
+
+- reiserfs_info(p_s_sb, "journal params: device %s, size %u, "
++ reiserfs_info(sb, "journal params: device %s, size %u, "
+ "journal first block %u, max trans len %u, max batch %u, "
+ "max commit age %u, max trans age %u\n",
+ bdevname(journal->j_dev_bd, b),
+- SB_ONDISK_JOURNAL_SIZE(p_s_sb),
+- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb),
++ SB_ONDISK_JOURNAL_SIZE(sb),
++ SB_ONDISK_JOURNAL_1st_BLOCK(sb),
+ journal->j_trans_max,
+ journal->j_max_batch,
+ journal->j_max_commit_age, journal->j_max_trans_age);
+@@ -2823,7 +2823,7 @@ int journal_init(struct super_block *p_s
+ brelse(bhjh);
+
+ journal->j_list_bitmap_index = 0;
+- journal_list_init(p_s_sb);
++ journal_list_init(sb);
+
+ memset(journal->j_list_hash_table, 0,
+ JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *));
+@@ -2855,7 +2855,7 @@ int journal_init(struct super_block *p_s
+ journal->j_must_wait = 0;
+
+ if (journal->j_cnode_free == 0) {
+- reiserfs_warning(p_s_sb, "journal-2004", "Journal cnode memory "
++ reiserfs_warning(sb, "journal-2004", "Journal cnode memory "
+ "allocation failed (%ld bytes). Journal is "
+ "too large for available memory. Usually "
+ "this is due to a journal that is too large.",
+@@ -2863,16 +2863,16 @@ int journal_init(struct super_block *p_s
+ goto free_and_return;
+ }
+
+- init_journal_hash(p_s_sb);
++ init_journal_hash(sb);
+ jl = journal->j_current_jl;
+- jl->j_list_bitmap = get_list_bitmap(p_s_sb, jl);
++ jl->j_list_bitmap = get_list_bitmap(sb, jl);
+ if (!jl->j_list_bitmap) {
+- reiserfs_warning(p_s_sb, "journal-2005",
++ reiserfs_warning(sb, "journal-2005",
+ "get_list_bitmap failed for journal list 0");
+ goto free_and_return;
+ }
+- if (journal_read(p_s_sb) < 0) {
+- reiserfs_warning(p_s_sb, "reiserfs-2006",
++ if (journal_read(sb) < 0) {
++ reiserfs_warning(sb, "reiserfs-2006",
+ "Replay Failure, unable to mount");
+ goto free_and_return;
+ }
+@@ -2882,10 +2882,10 @@ int journal_init(struct super_block *p_s
+ commit_wq = create_workqueue("reiserfs");
+
+ INIT_DELAYED_WORK(&journal->j_work, flush_async_commits);
+- journal->j_work_sb = p_s_sb;
++ journal->j_work_sb = sb;
+ return 0;
+ free_and_return:
+- free_journal_ram(p_s_sb);
++ free_journal_ram(sb);
+ return 1;
+ }
+
+@@ -3001,37 +3001,37 @@ static void let_transaction_grow(struct
+ ** expect to use in nblocks.
+ */
+ static int do_journal_begin_r(struct reiserfs_transaction_handle *th,
+- struct super_block *p_s_sb, unsigned long nblocks,
++ struct super_block *sb, unsigned long nblocks,
+ int join)
+ {
+ time_t now = get_seconds();
+ unsigned int old_trans_id;
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ struct reiserfs_transaction_handle myth;
+ int sched_count = 0;
+ int retval;
+
+- reiserfs_check_lock_depth(p_s_sb, "journal_begin");
++ reiserfs_check_lock_depth(sb, "journal_begin");
+ BUG_ON(nblocks > journal->j_trans_max);
+
+- PROC_INFO_INC(p_s_sb, journal.journal_being);
++ PROC_INFO_INC(sb, journal.journal_being);
+ /* set here for journal_join */
+ th->t_refcount = 1;
+- th->t_super = p_s_sb;
++ th->t_super = sb;
+
+ relock:
+- lock_journal(p_s_sb);
++ lock_journal(sb);
+ if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) {
+- unlock_journal(p_s_sb);
++ unlock_journal(sb);
+ retval = journal->j_errno;
+ goto out_fail;
+ }
+ journal->j_bcount++;
+
+ if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) {
+- unlock_journal(p_s_sb);
+- reiserfs_wait_on_write_block(p_s_sb);
+- PROC_INFO_INC(p_s_sb, journal.journal_relock_writers);
++ unlock_journal(sb);
++ reiserfs_wait_on_write_block(sb);
++ PROC_INFO_INC(sb, journal.journal_relock_writers);
+ goto relock;
+ }
+ now = get_seconds();
+@@ -3052,7 +3052,7 @@ static int do_journal_begin_r(struct rei
+ || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) {
+
+ old_trans_id = journal->j_trans_id;
+- unlock_journal(p_s_sb); /* allow others to finish this transaction */
++ unlock_journal(sb); /* allow others to finish this transaction */
+
+ if (!join && (journal->j_len_alloc + nblocks + 2) >=
+ journal->j_max_batch &&
+@@ -3060,7 +3060,7 @@ static int do_journal_begin_r(struct rei
+ (journal->j_len_alloc * 75)) {
+ if (atomic_read(&journal->j_wcount) > 10) {
+ sched_count++;
+- queue_log_writer(p_s_sb);
++ queue_log_writer(sb);
+ goto relock;
+ }
+ }
+@@ -3070,25 +3070,25 @@ static int do_journal_begin_r(struct rei
+ if (atomic_read(&journal->j_jlock)) {
+ while (journal->j_trans_id == old_trans_id &&
+ atomic_read(&journal->j_jlock)) {
+- queue_log_writer(p_s_sb);
++ queue_log_writer(sb);
+ }
+ goto relock;
+ }
+- retval = journal_join(&myth, p_s_sb, 1);
++ retval = journal_join(&myth, sb, 1);
+ if (retval)
+ goto out_fail;
+
+ /* someone might have ended the transaction while we joined */
+ if (old_trans_id != journal->j_trans_id) {
+- retval = do_journal_end(&myth, p_s_sb, 1, 0);
++ retval = do_journal_end(&myth, sb, 1, 0);
+ } else {
+- retval = do_journal_end(&myth, p_s_sb, 1, COMMIT_NOW);
++ retval = do_journal_end(&myth, sb, 1, COMMIT_NOW);
+ }
+
+ if (retval)
+ goto out_fail;
+
+- PROC_INFO_INC(p_s_sb, journal.journal_relock_wcount);
++ PROC_INFO_INC(sb, journal.journal_relock_wcount);
+ goto relock;
+ }
+ /* we are the first writer, set trans_id */
+@@ -3100,7 +3100,7 @@ static int do_journal_begin_r(struct rei
+ th->t_blocks_logged = 0;
+ th->t_blocks_allocated = nblocks;
+ th->t_trans_id = journal->j_trans_id;
+- unlock_journal(p_s_sb);
++ unlock_journal(sb);
+ INIT_LIST_HEAD(&th->t_list);
+ get_fs_excl();
+ return 0;
+@@ -3110,7 +3110,7 @@ static int do_journal_begin_r(struct rei
+ /* Re-set th->t_super, so we can properly keep track of how many
+ * persistent transactions there are. We need to do this so if this
+ * call is part of a failed restart_transaction, we can free it later */
+- th->t_super = p_s_sb;
++ th->t_super = sb;
+ return retval;
+ }
+
+@@ -3161,7 +3161,7 @@ int reiserfs_end_persistent_transaction(
+ }
+
+ static int journal_join(struct reiserfs_transaction_handle *th,
+- struct super_block *p_s_sb, unsigned long nblocks)
++ struct super_block *sb, unsigned long nblocks)
+ {
+ struct reiserfs_transaction_handle *cur_th = current->journal_info;
+
+@@ -3170,11 +3170,11 @@ static int journal_join(struct reiserfs_
+ */
+ th->t_handle_save = cur_th;
+ BUG_ON(cur_th && cur_th->t_refcount > 1);
+- return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_JOIN);
++ return do_journal_begin_r(th, sb, nblocks, JBEGIN_JOIN);
+ }
+
+ int journal_join_abort(struct reiserfs_transaction_handle *th,
+- struct super_block *p_s_sb, unsigned long nblocks)
++ struct super_block *sb, unsigned long nblocks)
+ {
+ struct reiserfs_transaction_handle *cur_th = current->journal_info;
+
+@@ -3183,11 +3183,11 @@ int journal_join_abort(struct reiserfs_t
+ */
+ th->t_handle_save = cur_th;
+ BUG_ON(cur_th && cur_th->t_refcount > 1);
+- return do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_ABORT);
++ return do_journal_begin_r(th, sb, nblocks, JBEGIN_ABORT);
+ }
+
+ int journal_begin(struct reiserfs_transaction_handle *th,
+- struct super_block *p_s_sb, unsigned long nblocks)
++ struct super_block *sb, unsigned long nblocks)
+ {
+ struct reiserfs_transaction_handle *cur_th = current->journal_info;
+ int ret;
+@@ -3195,12 +3195,12 @@ int journal_begin(struct reiserfs_transa
+ th->t_handle_save = NULL;
+ if (cur_th) {
+ /* we are nesting into the current transaction */
+- if (cur_th->t_super == p_s_sb) {
++ if (cur_th->t_super == sb) {
+ BUG_ON(!cur_th->t_refcount);
+ cur_th->t_refcount++;
+ memcpy(th, cur_th, sizeof(*th));
+ if (th->t_refcount <= 1)
+- reiserfs_warning(p_s_sb, "reiserfs-2005",
++ reiserfs_warning(sb, "reiserfs-2005",
+ "BAD: refcount <= 1, but "
+ "journal_info != 0");
+ return 0;
+@@ -3209,7 +3209,7 @@ int journal_begin(struct reiserfs_transa
+ ** save it and restore on journal_end. This should never
+ ** really happen...
+ */
+- reiserfs_warning(p_s_sb, "clm-2100",
++ reiserfs_warning(sb, "clm-2100",
+ "nesting info a different FS");
+ th->t_handle_save = current->journal_info;
+ current->journal_info = th;
+@@ -3217,7 +3217,7 @@ int journal_begin(struct reiserfs_transa
+ } else {
+ current->journal_info = th;
+ }
+- ret = do_journal_begin_r(th, p_s_sb, nblocks, JBEGIN_REG);
++ ret = do_journal_begin_r(th, sb, nblocks, JBEGIN_REG);
+ BUG_ON(current->journal_info != th);
+
+ /* I guess this boils down to being the reciprocal of clm-2100 above.
+@@ -3241,28 +3241,28 @@ int journal_begin(struct reiserfs_transa
+ ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
+ */
+ int journal_mark_dirty(struct reiserfs_transaction_handle *th,
+- struct super_block *p_s_sb, struct buffer_head *bh)
++ struct super_block *sb, struct buffer_head *bh)
+ {
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ struct reiserfs_journal_cnode *cn = NULL;
+ int count_already_incd = 0;
+ int prepared = 0;
+ BUG_ON(!th->t_trans_id);
+
+- PROC_INFO_INC(p_s_sb, journal.mark_dirty);
++ PROC_INFO_INC(sb, journal.mark_dirty);
+ if (th->t_trans_id != journal->j_trans_id) {
+ reiserfs_panic(th->t_super, "journal-1577",
+ "handle trans id %ld != current trans id %ld",
+ th->t_trans_id, journal->j_trans_id);
+ }
+
+- p_s_sb->s_dirt = 1;
++ sb->s_dirt = 1;
+
+ prepared = test_clear_buffer_journal_prepared(bh);
+ clear_buffer_journal_restore_dirty(bh);
+ /* already in this transaction, we are done */
+ if (buffer_journaled(bh)) {
+- PROC_INFO_INC(p_s_sb, journal.mark_dirty_already);
++ PROC_INFO_INC(sb, journal.mark_dirty_already);
+ return 0;
+ }
+
+@@ -3271,7 +3271,7 @@ int journal_mark_dirty(struct reiserfs_t
+ ** could get to disk too early. NOT GOOD.
+ */
+ if (!prepared || buffer_dirty(bh)) {
+- reiserfs_warning(p_s_sb, "journal-1777",
++ reiserfs_warning(sb, "journal-1777",
+ "buffer %llu bad state "
+ "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT",
+ (unsigned long long)bh->b_blocknr,
+@@ -3282,7 +3282,7 @@ int journal_mark_dirty(struct reiserfs_t
+ }
+
+ if (atomic_read(&(journal->j_wcount)) <= 0) {
+- reiserfs_warning(p_s_sb, "journal-1409",
++ reiserfs_warning(sb, "journal-1409",
+ "returning because j_wcount was %d",
+ atomic_read(&(journal->j_wcount)));
+ return 1;
+@@ -3298,7 +3298,7 @@ int journal_mark_dirty(struct reiserfs_t
+
+ if (buffer_journal_dirty(bh)) {
+ count_already_incd = 1;
+- PROC_INFO_INC(p_s_sb, journal.mark_dirty_notjournal);
++ PROC_INFO_INC(sb, journal.mark_dirty_notjournal);
+ clear_buffer_journal_dirty(bh);
+ }
+
+@@ -3310,10 +3310,9 @@ int journal_mark_dirty(struct reiserfs_t
+
+ /* now put this guy on the end */
+ if (!cn) {
+- cn = get_cnode(p_s_sb);
++ cn = get_cnode(sb);
+ if (!cn) {
+- reiserfs_panic(p_s_sb, "journal-4",
+- "get_cnode failed!");
++ reiserfs_panic(sb, "journal-4", "get_cnode failed!");
+ }
+
+ if (th->t_blocks_logged == th->t_blocks_allocated) {
+@@ -3325,7 +3324,7 @@ int journal_mark_dirty(struct reiserfs_t
+
+ cn->bh = bh;
+ cn->blocknr = bh->b_blocknr;
+- cn->sb = p_s_sb;
++ cn->sb = sb;
+ cn->jlist = NULL;
+ insert_journal_hash(journal->j_hash_table, cn);
+ if (!count_already_incd) {
+@@ -3346,10 +3345,10 @@ int journal_mark_dirty(struct reiserfs_t
+ }
+
+ int journal_end(struct reiserfs_transaction_handle *th,
+- struct super_block *p_s_sb, unsigned long nblocks)
++ struct super_block *sb, unsigned long nblocks)
+ {
+ if (!current->journal_info && th->t_refcount > 1)
+- reiserfs_warning(p_s_sb, "REISER-NESTING",
++ reiserfs_warning(sb, "REISER-NESTING",
+ "th NULL, refcount %d", th->t_refcount);
+
+ if (!th->t_trans_id) {
+@@ -3373,7 +3372,7 @@ int journal_end(struct reiserfs_transact
+ }
+ return 0;
+ } else {
+- return do_journal_end(th, p_s_sb, nblocks, 0);
++ return do_journal_end(th, sb, nblocks, 0);
+ }
+ }
+
+@@ -3384,15 +3383,15 @@ int journal_end(struct reiserfs_transact
+ **
+ ** returns 1 if it cleaned and relsed the buffer. 0 otherwise
+ */
+-static int remove_from_transaction(struct super_block *p_s_sb,
++static int remove_from_transaction(struct super_block *sb,
+ b_blocknr_t blocknr, int already_cleaned)
+ {
+ struct buffer_head *bh;
+ struct reiserfs_journal_cnode *cn;
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ int ret = 0;
+
+- cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
++ cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr);
+ if (!cn || !cn->bh) {
+ return ret;
+ }
+@@ -3410,7 +3409,7 @@ static int remove_from_transaction(struc
+ journal->j_last = cn->prev;
+ }
+ if (bh)
+- remove_journal_hash(p_s_sb, journal->j_hash_table, NULL,
++ remove_journal_hash(sb, journal->j_hash_table, NULL,
+ bh->b_blocknr, 0);
+ clear_buffer_journaled(bh); /* don't log this one */
+
+@@ -3420,14 +3419,14 @@ static int remove_from_transaction(struc
+ clear_buffer_journal_test(bh);
+ put_bh(bh);
+ if (atomic_read(&(bh->b_count)) < 0) {
+- reiserfs_warning(p_s_sb, "journal-1752",
++ reiserfs_warning(sb, "journal-1752",
+ "b_count < 0");
+ }
+ ret = 1;
+ }
+ journal->j_len--;
+ journal->j_len_alloc--;
+- free_cnode(p_s_sb, cn);
++ free_cnode(sb, cn);
+ return ret;
+ }
+
+@@ -3478,19 +3477,19 @@ static int can_dirty(struct reiserfs_jou
+ ** will wait until the current transaction is done/committed before returning
+ */
+ int journal_end_sync(struct reiserfs_transaction_handle *th,
+- struct super_block *p_s_sb, unsigned long nblocks)
++ struct super_block *sb, unsigned long nblocks)
+ {
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+
+ BUG_ON(!th->t_trans_id);
+ /* you can sync while nested, very, very bad */
+ BUG_ON(th->t_refcount > 1);
+ if (journal->j_len == 0) {
+- reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
++ reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
+ 1);
+- journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb));
++ journal_mark_dirty(th, sb, SB_BUFFER_WITH_SB(sb));
+ }
+- return do_journal_end(th, p_s_sb, nblocks, COMMIT_NOW | WAIT);
++ return do_journal_end(th, sb, nblocks, COMMIT_NOW | WAIT);
+ }
+
+ /*
+@@ -3500,7 +3499,7 @@ static void flush_async_commits(struct w
+ {
+ struct reiserfs_journal *journal =
+ container_of(work, struct reiserfs_journal, j_work.work);
+- struct super_block *p_s_sb = journal->j_work_sb;
++ struct super_block *sb = journal->j_work_sb;
+ struct reiserfs_journal_list *jl;
+ struct list_head *entry;
+
+@@ -3509,7 +3508,7 @@ static void flush_async_commits(struct w
+ /* last entry is the youngest, commit it and you get everything */
+ entry = journal->j_journal_list.prev;
+ jl = JOURNAL_LIST_ENTRY(entry);
+- flush_commit_list(p_s_sb, jl, 1);
++ flush_commit_list(sb, jl, 1);
+ }
+ unlock_kernel();
+ }
+@@ -3518,11 +3517,11 @@ static void flush_async_commits(struct w
+ ** flushes any old transactions to disk
+ ** ends the current transaction if it is too old
+ */
+-int reiserfs_flush_old_commits(struct super_block *p_s_sb)
++int reiserfs_flush_old_commits(struct super_block *sb)
+ {
+ time_t now;
+ struct reiserfs_transaction_handle th;
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+
+ now = get_seconds();
+ /* safety check so we don't flush while we are replaying the log during
+@@ -3539,20 +3538,20 @@ int reiserfs_flush_old_commits(struct su
+ journal->j_trans_start_time > 0 &&
+ journal->j_len > 0 &&
+ (now - journal->j_trans_start_time) > journal->j_max_trans_age) {
+- if (!journal_join(&th, p_s_sb, 1)) {
+- reiserfs_prepare_for_journal(p_s_sb,
+- SB_BUFFER_WITH_SB(p_s_sb),
++ if (!journal_join(&th, sb, 1)) {
++ reiserfs_prepare_for_journal(sb,
++ SB_BUFFER_WITH_SB(sb),
+ 1);
+- journal_mark_dirty(&th, p_s_sb,
+- SB_BUFFER_WITH_SB(p_s_sb));
++ journal_mark_dirty(&th, sb,
++ SB_BUFFER_WITH_SB(sb));
+
+ /* we're only being called from kreiserfsd, it makes no sense to do
+ ** an async commit so that kreiserfsd can do it later
+ */
+- do_journal_end(&th, p_s_sb, 1, COMMIT_NOW | WAIT);
++ do_journal_end(&th, sb, 1, COMMIT_NOW | WAIT);
+ }
+ }
+- return p_s_sb->s_dirt;
++ return sb->s_dirt;
+ }
+
+ /*
+@@ -3567,7 +3566,7 @@ int reiserfs_flush_old_commits(struct su
+ ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
+ */
+ static int check_journal_end(struct reiserfs_transaction_handle *th,
+- struct super_block *p_s_sb, unsigned long nblocks,
++ struct super_block *sb, unsigned long nblocks,
+ int flags)
+ {
+
+@@ -3576,7 +3575,7 @@ static int check_journal_end(struct reis
+ int commit_now = flags & COMMIT_NOW;
+ int wait_on_commit = flags & WAIT;
+ struct reiserfs_journal_list *jl;
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+
+ BUG_ON(!th->t_trans_id);
+
+@@ -3615,31 +3614,31 @@ static int check_journal_end(struct reis
+ if (flush) {
+ journal->j_next_full_flush = 1;
+ }
+- unlock_journal(p_s_sb);
++ unlock_journal(sb);
+
+ /* sleep while the current transaction is still j_jlocked */
+ while (journal->j_trans_id == trans_id) {
+ if (atomic_read(&journal->j_jlock)) {
+- queue_log_writer(p_s_sb);
++ queue_log_writer(sb);
+ } else {
+- lock_journal(p_s_sb);
++ lock_journal(sb);
+ if (journal->j_trans_id == trans_id) {
+ atomic_set(&(journal->j_jlock),
+ 1);
+ }
+- unlock_journal(p_s_sb);
++ unlock_journal(sb);
+ }
+ }
+ BUG_ON(journal->j_trans_id == trans_id);
+
+ if (commit_now
+- && journal_list_still_alive(p_s_sb, trans_id)
++ && journal_list_still_alive(sb, trans_id)
+ && wait_on_commit) {
+- flush_commit_list(p_s_sb, jl, 1);
++ flush_commit_list(sb, jl, 1);
+ }
+ return 0;
+ }
+- unlock_journal(p_s_sb);
++ unlock_journal(sb);
+ return 0;
+ }
+
+@@ -3656,12 +3655,12 @@ static int check_journal_end(struct reis
+ && journal->j_len_alloc < journal->j_max_batch
+ && journal->j_cnode_free > (journal->j_trans_max * 3)) {
+ journal->j_bcount++;
+- unlock_journal(p_s_sb);
++ unlock_journal(sb);
+ return 0;
+ }
+
+- if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
+- reiserfs_panic(p_s_sb, "journal-003",
++ if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(sb)) {
++ reiserfs_panic(sb, "journal-003",
+ "j_start (%ld) is too high",
+ journal->j_start);
+ }
+@@ -3683,16 +3682,16 @@ static int check_journal_end(struct reis
+ ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list.
+ */
+ int journal_mark_freed(struct reiserfs_transaction_handle *th,
+- struct super_block *p_s_sb, b_blocknr_t blocknr)
++ struct super_block *sb, b_blocknr_t blocknr)
+ {
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ struct reiserfs_journal_cnode *cn = NULL;
+ struct buffer_head *bh = NULL;
+ struct reiserfs_list_bitmap *jb = NULL;
+ int cleaned = 0;
+ BUG_ON(!th->t_trans_id);
+
+- cn = get_journal_hash_dev(p_s_sb, journal->j_hash_table, blocknr);
++ cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr);
+ if (cn && cn->bh) {
+ bh = cn->bh;
+ get_bh(bh);
+@@ -3702,15 +3701,15 @@ int journal_mark_freed(struct reiserfs_t
+ clear_buffer_journal_new(bh);
+ clear_prepared_bits(bh);
+ reiserfs_clean_and_file_buffer(bh);
+- cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned);
++ cleaned = remove_from_transaction(sb, blocknr, cleaned);
+ } else {
+ /* set the bit for this block in the journal bitmap for this transaction */
+ jb = journal->j_current_jl->j_list_bitmap;
+ if (!jb) {
+- reiserfs_panic(p_s_sb, "journal-1702",
++ reiserfs_panic(sb, "journal-1702",
+ "journal_list_bitmap is NULL");
+ }
+- set_bit_in_list_bitmap(p_s_sb, blocknr, jb);
++ set_bit_in_list_bitmap(sb, blocknr, jb);
+
+ /* Note, the entire while loop is not allowed to schedule. */
+
+@@ -3718,13 +3717,13 @@ int journal_mark_freed(struct reiserfs_t
+ clear_prepared_bits(bh);
+ reiserfs_clean_and_file_buffer(bh);
+ }
+- cleaned = remove_from_transaction(p_s_sb, blocknr, cleaned);
++ cleaned = remove_from_transaction(sb, blocknr, cleaned);
+
+ /* find all older transactions with this block, make sure they don't try to write it out */
+- cn = get_journal_hash_dev(p_s_sb, journal->j_list_hash_table,
++ cn = get_journal_hash_dev(sb, journal->j_list_hash_table,
+ blocknr);
+ while (cn) {
+- if (p_s_sb == cn->sb && blocknr == cn->blocknr) {
++ if (sb == cn->sb && blocknr == cn->blocknr) {
+ set_bit(BLOCK_FREED, &cn->state);
+ if (cn->bh) {
+ if (!cleaned) {
+@@ -3740,7 +3739,7 @@ int journal_mark_freed(struct reiserfs_t
+ put_bh(cn->bh);
+ if (atomic_read
+ (&(cn->bh->b_count)) < 0) {
+- reiserfs_warning(p_s_sb,
++ reiserfs_warning(sb,
+ "journal-2138",
+ "cn->bh->b_count < 0");
+ }
+@@ -3847,18 +3846,18 @@ int reiserfs_commit_for_inode(struct ino
+ return __commit_trans_jl(inode, id, jl);
+ }
+
+-void reiserfs_restore_prepared_buffer(struct super_block *p_s_sb,
++void reiserfs_restore_prepared_buffer(struct super_block *sb,
+ struct buffer_head *bh)
+ {
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
+- PROC_INFO_INC(p_s_sb, journal.restore_prepared);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
++ PROC_INFO_INC(sb, journal.restore_prepared);
+ if (!bh) {
+ return;
+ }
+ if (test_clear_buffer_journal_restore_dirty(bh) &&
+ buffer_journal_dirty(bh)) {
+ struct reiserfs_journal_cnode *cn;
+- cn = get_journal_hash_dev(p_s_sb,
++ cn = get_journal_hash_dev(sb,
+ journal->j_list_hash_table,
+ bh->b_blocknr);
+ if (cn && can_dirty(cn)) {
+@@ -3877,10 +3876,10 @@ extern struct tree_balance *cur_tb;
+ ** wait on it.
+ **
+ */
+-int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
++int reiserfs_prepare_for_journal(struct super_block *sb,
+ struct buffer_head *bh, int wait)
+ {
+- PROC_INFO_INC(p_s_sb, journal.prepare);
++ PROC_INFO_INC(sb, journal.prepare);
+
+ if (!trylock_buffer(bh)) {
+ if (!wait)
+@@ -3928,10 +3927,10 @@ static void flush_old_journal_lists(stru
+ ** journal lists, etc just won't happen.
+ */
+ static int do_journal_end(struct reiserfs_transaction_handle *th,
+- struct super_block *p_s_sb, unsigned long nblocks,
++ struct super_block *sb, unsigned long nblocks,
+ int flags)
+ {
+- struct reiserfs_journal *journal = SB_JOURNAL(p_s_sb);
++ struct reiserfs_journal *journal = SB_JOURNAL(sb);
+ struct reiserfs_journal_cnode *cn, *next, *jl_cn;
+ struct reiserfs_journal_cnode *last_cn = NULL;
+ struct reiserfs_journal_desc *desc;
+@@ -3961,14 +3960,14 @@ static int do_journal_end(struct reiserf
+
+ put_fs_excl();
+ current->journal_info = th->t_handle_save;
+- reiserfs_check_lock_depth(p_s_sb, "journal end");
++ reiserfs_check_lock_depth(sb, "journal end");
+ if (journal->j_len == 0) {
+- reiserfs_prepare_for_journal(p_s_sb, SB_BUFFER_WITH_SB(p_s_sb),
++ reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb),
+ 1);
+- journal_mark_dirty(th, p_s_sb, SB_BUFFER_WITH_SB(p_s_sb));
++ journal_mark_dirty(th, sb, SB_BUFFER_WITH_SB(sb));
+ }
+
+- lock_journal(p_s_sb);
++ lock_journal(sb);
+ if (journal->j_next_full_flush) {
+ flags |= FLUSH_ALL;
+ flush = 1;
+@@ -3981,10 +3980,10 @@ static int do_journal_end(struct reiserf
+ /* check_journal_end locks the journal, and unlocks if it does not return 1
+ ** it tells us if we should continue with the journal_end, or just return
+ */
+- if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
+- p_s_sb->s_dirt = 1;
+- wake_queued_writers(p_s_sb);
+- reiserfs_async_progress_wait(p_s_sb);
++ if (!check_journal_end(th, sb, nblocks, flags)) {
++ sb->s_dirt = 1;
++ wake_queued_writers(sb);
++ reiserfs_async_progress_wait(sb);
+ goto out;
+ }
+
+@@ -4013,8 +4012,8 @@ static int do_journal_end(struct reiserf
+
+ /* setup description block */
+ d_bh =
+- journal_getblk(p_s_sb,
+- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
++ journal_getblk(sb,
++ SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
+ journal->j_start);
+ set_buffer_uptodate(d_bh);
+ desc = (struct reiserfs_journal_desc *)(d_bh)->b_data;
+@@ -4023,9 +4022,9 @@ static int do_journal_end(struct reiserf
+ set_desc_trans_id(desc, journal->j_trans_id);
+
+ /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */
+- c_bh = journal_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
++ c_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
+ ((journal->j_start + journal->j_len +
+- 1) % SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
++ 1) % SB_ONDISK_JOURNAL_SIZE(sb)));
+ commit = (struct reiserfs_journal_commit *)c_bh->b_data;
+ memset(c_bh->b_data, 0, c_bh->b_size);
+ set_commit_trans_id(commit, journal->j_trans_id);
+@@ -4058,12 +4057,12 @@ static int do_journal_end(struct reiserf
+ ** for each real block, add it to the journal list hash,
+ ** copy into real block index array in the commit or desc block
+ */
+- trans_half = journal_trans_half(p_s_sb->s_blocksize);
++ trans_half = journal_trans_half(sb->s_blocksize);
+ for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) {
+ if (buffer_journaled(cn->bh)) {
+- jl_cn = get_cnode(p_s_sb);
++ jl_cn = get_cnode(sb);
+ if (!jl_cn) {
+- reiserfs_panic(p_s_sb, "journal-1676",
++ reiserfs_panic(sb, "journal-1676",
+ "get_cnode returned NULL");
+ }
+ if (i == 0) {
+@@ -4079,15 +4078,15 @@ static int do_journal_end(struct reiserf
+ of journal or reserved area */
+
+ if (is_block_in_log_or_reserved_area
+- (p_s_sb, cn->bh->b_blocknr)) {
+- reiserfs_panic(p_s_sb, "journal-2332",
++ (sb, cn->bh->b_blocknr)) {
++ reiserfs_panic(sb, "journal-2332",
+ "Trying to log block %lu, "
+ "which is a log block",
+ cn->bh->b_blocknr);
+ }
+ jl_cn->blocknr = cn->bh->b_blocknr;
+ jl_cn->state = 0;
+- jl_cn->sb = p_s_sb;
++ jl_cn->sb = sb;
+ jl_cn->bh = cn->bh;
+ jl_cn->jlist = jl;
+ insert_journal_hash(journal->j_list_hash_table, jl_cn);
+@@ -4128,11 +4127,11 @@ static int do_journal_end(struct reiserf
+ char *addr;
+ struct page *page;
+ tmp_bh =
+- journal_getblk(p_s_sb,
+- SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
++ journal_getblk(sb,
++ SB_ONDISK_JOURNAL_1st_BLOCK(sb) +
+ ((cur_write_start +
+ jindex) %
+- SB_ONDISK_JOURNAL_SIZE(p_s_sb)));
++ SB_ONDISK_JOURNAL_SIZE(sb)));
+ set_buffer_uptodate(tmp_bh);
+ page = cn->bh->b_page;
+ addr = kmap(page);
+@@ -4146,13 +4145,13 @@ static int do_journal_end(struct reiserf
+ clear_buffer_journaled(cn->bh);
+ } else {
+ /* JDirty cleared sometime during transaction. don't log this one */
+- reiserfs_warning(p_s_sb, "journal-2048",
++ reiserfs_warning(sb, "journal-2048",
+ "BAD, buffer in journal hash, "
+ "but not JDirty!");
+ brelse(cn->bh);
+ }
+ next = cn->next;
+- free_cnode(p_s_sb, cn);
++ free_cnode(sb, cn);
+ cn = next;
+ cond_resched();
+ }
+@@ -4162,7 +4161,7 @@ static int do_journal_end(struct reiserf
+ ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1.
+ */
+
+- journal->j_current_jl = alloc_journal_list(p_s_sb);
++ journal->j_current_jl = alloc_journal_list(sb);
+
+ /* now it is safe to insert this transaction on the main list */
+ list_add_tail(&jl->j_list, &journal->j_journal_list);
+@@ -4173,7 +4172,7 @@ static int do_journal_end(struct reiserf
+ old_start = journal->j_start;
+ journal->j_start =
+ (journal->j_start + journal->j_len +
+- 2) % SB_ONDISK_JOURNAL_SIZE(p_s_sb);
++ 2) % SB_ONDISK_JOURNAL_SIZE(sb);
+ atomic_set(&(journal->j_wcount), 0);
+ journal->j_bcount = 0;
+ journal->j_last = NULL;
+@@ -4188,7 +4187,7 @@ static int do_journal_end(struct reiserf
+ journal->j_len_alloc = 0;
+ journal->j_next_full_flush = 0;
+ journal->j_next_async_flush = 0;
+- init_journal_hash(p_s_sb);
++ init_journal_hash(sb);
+
+ // make sure reiserfs_add_jh sees the new current_jl before we
+ // write out the tails
+@@ -4217,8 +4216,8 @@ static int do_journal_end(struct reiserf
+ ** queue don't wait for this proc to flush journal lists and such.
+ */
+ if (flush) {
+- flush_commit_list(p_s_sb, jl, 1);
+- flush_journal_list(p_s_sb, jl, 1);
++ flush_commit_list(sb, jl, 1);
++ flush_journal_list(sb, jl, 1);
+ } else if (!(jl->j_state & LIST_COMMIT_PENDING))
+ queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
+
+@@ -4232,11 +4231,11 @@ static int do_journal_end(struct reiserf
+ if (journal->j_start <= temp_jl->j_start) {
+ if ((journal->j_start + journal->j_trans_max + 1) >=
+ temp_jl->j_start) {
+- flush_used_journal_lists(p_s_sb, temp_jl);
++ flush_used_journal_lists(sb, temp_jl);
+ goto first_jl;
+ } else if ((journal->j_start +
+ journal->j_trans_max + 1) <
+- SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
++ SB_ONDISK_JOURNAL_SIZE(sb)) {
+ /* if we don't cross into the next transaction and we don't
+ * wrap, there is no way we can overlap any later transactions
+ * break now
+@@ -4245,11 +4244,11 @@ static int do_journal_end(struct reiserf
+ }
+ } else if ((journal->j_start +
+ journal->j_trans_max + 1) >
+- SB_ONDISK_JOURNAL_SIZE(p_s_sb)) {
++ SB_ONDISK_JOURNAL_SIZE(sb)) {
+ if (((journal->j_start + journal->j_trans_max + 1) %
+- SB_ONDISK_JOURNAL_SIZE(p_s_sb)) >=
++ SB_ONDISK_JOURNAL_SIZE(sb)) >=
+ temp_jl->j_start) {
+- flush_used_journal_lists(p_s_sb, temp_jl);
++ flush_used_journal_lists(sb, temp_jl);
+ goto first_jl;
+ } else {
+ /* we don't overlap anything from out start to the end of the
+@@ -4260,34 +4259,34 @@ static int do_journal_end(struct reiserf
+ }
+ }
+ }
+- flush_old_journal_lists(p_s_sb);
++ flush_old_journal_lists(sb);
+
+ journal->j_current_jl->j_list_bitmap =
+- get_list_bitmap(p_s_sb, journal->j_current_jl);
++ get_list_bitmap(sb, journal->j_current_jl);
+
+ if (!(journal->j_current_jl->j_list_bitmap)) {
+- reiserfs_panic(p_s_sb, "journal-1996",
++ reiserfs_panic(sb, "journal-1996",
+ "could not get a list bitmap");
+ }
+
+ atomic_set(&(journal->j_jlock), 0);
+- unlock_journal(p_s_sb);
++ unlock_journal(sb);
+ /* wake up any body waiting to join. */
+ clear_bit(J_WRITERS_QUEUED, &journal->j_state);
+ wake_up(&(journal->j_join_wait));
+
+ if (!flush && wait_on_commit &&
+- journal_list_still_alive(p_s_sb, commit_trans_id)) {
+- flush_commit_list(p_s_sb, jl, 1);
++ journal_list_still_alive(sb, commit_trans_id)) {
++ flush_commit_list(sb, jl, 1);
+ }
+ out:
+- reiserfs_check_lock_depth(p_s_sb, "journal end2");
++ reiserfs_check_lock_depth(sb, "journal end2");
+
+ memset(th, 0, sizeof(*th));
+ /* Re-set th->t_super, so we can properly keep track of how many
+ * persistent transactions there are. We need to do this so if this
+ * call is part of a failed restart_transaction, we can free it later */
+- th->t_super = p_s_sb;
++ th->t_super = sb;
+
+ return journal->j_errno;
+ }
+--- a/fs/reiserfs/stree.c
++++ b/fs/reiserfs/stree.c
+@@ -245,7 +245,7 @@ static const struct reiserfs_key MAX_KEY
+ static inline const struct reiserfs_key *get_lkey(const struct treepath
+ *p_s_chk_path,
+ const struct super_block
+- *p_s_sb)
++ *sb)
+ {
+ int n_position, n_path_offset = p_s_chk_path->path_length;
+ struct buffer_head *p_s_parent;
+@@ -282,14 +282,14 @@ static inline const struct reiserfs_key
+ }
+ /* Return MIN_KEY if we are in the root of the buffer tree. */
+ if (PATH_OFFSET_PBUFFER(p_s_chk_path, FIRST_PATH_ELEMENT_OFFSET)->
+- b_blocknr == SB_ROOT_BLOCK(p_s_sb))
++ b_blocknr == SB_ROOT_BLOCK(sb))
+ return &MIN_KEY;
+ return &MAX_KEY;
+ }
+
+ /* Get delimiting key of the buffer at the path and its right neighbor. */
+ inline const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path,
+- const struct super_block *p_s_sb)
++ const struct super_block *sb)
+ {
+ int n_position, n_path_offset = p_s_chk_path->path_length;
+ struct buffer_head *p_s_parent;
+@@ -325,7 +325,7 @@ inline const struct reiserfs_key *get_rk
+ }
+ /* Return MAX_KEY if we are in the root of the buffer tree. */
+ if (PATH_OFFSET_PBUFFER(p_s_chk_path, FIRST_PATH_ELEMENT_OFFSET)->
+- b_blocknr == SB_ROOT_BLOCK(p_s_sb))
++ b_blocknr == SB_ROOT_BLOCK(sb))
+ return &MAX_KEY;
+ return &MIN_KEY;
+ }
+@@ -337,7 +337,7 @@ inline const struct reiserfs_key *get_rk
+ this case get_lkey and get_rkey return a special key which is MIN_KEY or MAX_KEY. */
+ static inline int key_in_buffer(struct treepath *p_s_chk_path, /* Path which should be checked. */
+ const struct cpu_key *p_s_key, /* Key which should be checked. */
+- struct super_block *p_s_sb /* Super block pointer. */
++ struct super_block *sb /* Super block pointer. */
+ )
+ {
+
+@@ -348,11 +348,11 @@ static inline int key_in_buffer(struct t
+ RFALSE(!PATH_PLAST_BUFFER(p_s_chk_path)->b_bdev,
+ "PAP-5060: device must not be NODEV");
+
+- if (comp_keys(get_lkey(p_s_chk_path, p_s_sb), p_s_key) == 1)
++ if (comp_keys(get_lkey(p_s_chk_path, sb), p_s_key) == 1)
+ /* left delimiting key is bigger, that the key we look for */
+ return 0;
+- // if ( comp_keys(p_s_key, get_rkey(p_s_chk_path, p_s_sb)) != -1 )
+- if (comp_keys(get_rkey(p_s_chk_path, p_s_sb), p_s_key) != 1)
++ // if ( comp_keys(p_s_key, get_rkey(p_s_chk_path, sb)) != -1 )
++ if (comp_keys(get_rkey(p_s_chk_path, sb), p_s_key) != 1)
+ /* p_s_key must be less than right delimitiing key */
+ return 0;
+ return 1;
+@@ -546,7 +546,7 @@ static void search_by_key_reada(struct s
+ /**************************************************************************
+ * Algorithm SearchByKey *
+ * look for item in the Disk S+Tree by its key *
+- * Input: p_s_sb - super block *
++ * Input: sb - super block *
+ * p_s_key - pointer to the key to search *
+ * Output: ITEM_FOUND, ITEM_NOT_FOUND or IO_ERROR *
+ * p_s_search_path - path from the root to the needed leaf *
+@@ -566,7 +566,7 @@ static void search_by_key_reada(struct s
+ correctness of the top of the path but need not be checked for the
+ correctness of the bottom of the path */
+ /* The function is NOT SCHEDULE-SAFE! */
+-int search_by_key(struct super_block *p_s_sb, const struct cpu_key *p_s_key, /* Key to search. */
++int search_by_key(struct super_block *sb, const struct cpu_key *p_s_key, /* Key to search. */
+ struct treepath *p_s_search_path,/* This structure was
+ allocated and initialized
+ by the calling
+@@ -592,7 +592,7 @@ int search_by_key(struct super_block *p_
+ int n_repeat_counter = 0;
+ #endif
+
+- PROC_INFO_INC(p_s_sb, search_by_key);
++ PROC_INFO_INC(sb, search_by_key);
+
+ /* As we add each node to a path we increase its count. This means that
+ we must be careful to release all nodes in a path before we either
+@@ -605,13 +605,13 @@ int search_by_key(struct super_block *p_
+ /* With each iteration of this loop we search through the items in the
+ current node, and calculate the next current node(next path element)
+ for the next iteration of this loop.. */
+- n_block_number = SB_ROOT_BLOCK(p_s_sb);
++ n_block_number = SB_ROOT_BLOCK(sb);
+ expected_level = -1;
+ while (1) {
+
+ #ifdef CONFIG_REISERFS_CHECK
+ if (!(++n_repeat_counter % 50000))
+- reiserfs_warning(p_s_sb, "PAP-5100",
++ reiserfs_warning(sb, "PAP-5100",
+ "%s: there were %d iterations of "
+ "while loop looking for key %K",
+ current->comm, n_repeat_counter,
+@@ -622,14 +622,14 @@ int search_by_key(struct super_block *p_
+ p_s_last_element =
+ PATH_OFFSET_PELEMENT(p_s_search_path,
+ ++p_s_search_path->path_length);
+- fs_gen = get_generation(p_s_sb);
++ fs_gen = get_generation(sb);
+
+ /* Read the next tree node, and set the last element in the path to
+ have a pointer to it. */
+ if ((p_s_bh = p_s_last_element->pe_buffer =
+- sb_getblk(p_s_sb, n_block_number))) {
++ sb_getblk(sb, n_block_number))) {
+ if (!buffer_uptodate(p_s_bh) && reada_count > 1) {
+- search_by_key_reada(p_s_sb, reada_bh,
++ search_by_key_reada(sb, reada_bh,
+ reada_blocks, reada_count);
+ }
+ ll_rw_block(READ, 1, &p_s_bh);
+@@ -644,25 +644,25 @@ int search_by_key(struct super_block *p_
+ }
+ reada_count = 0;
+ if (expected_level == -1)
+- expected_level = SB_TREE_HEIGHT(p_s_sb);
++ expected_level = SB_TREE_HEIGHT(sb);
+ expected_level--;
+
+ /* It is possible that schedule occurred. We must check whether the key
+ to search is still in the tree rooted from the current buffer. If
+ not then repeat search from the root. */
+- if (fs_changed(fs_gen, p_s_sb) &&
++ if (fs_changed(fs_gen, sb) &&
+ (!B_IS_IN_TREE(p_s_bh) ||
+ B_LEVEL(p_s_bh) != expected_level ||
+- !key_in_buffer(p_s_search_path, p_s_key, p_s_sb))) {
+- PROC_INFO_INC(p_s_sb, search_by_key_fs_changed);
+- PROC_INFO_INC(p_s_sb, search_by_key_restarted);
+- PROC_INFO_INC(p_s_sb,
++ !key_in_buffer(p_s_search_path, p_s_key, sb))) {
++ PROC_INFO_INC(sb, search_by_key_fs_changed);
++ PROC_INFO_INC(sb, search_by_key_restarted);
++ PROC_INFO_INC(sb,
+ sbk_restarted[expected_level - 1]);
+ pathrelse(p_s_search_path);
+
+ /* Get the root block number so that we can repeat the search
+ starting from the root. */
+- n_block_number = SB_ROOT_BLOCK(p_s_sb);
++ n_block_number = SB_ROOT_BLOCK(sb);
+ expected_level = -1;
+ right_neighbor_of_leaf_node = 0;
+
+@@ -674,12 +674,12 @@ int search_by_key(struct super_block *p_
+ equal to the MAX_KEY. Latter case is only possible in
+ "finish_unfinished()" processing during mount. */
+ RFALSE(comp_keys(&MAX_KEY, p_s_key) &&
+- !key_in_buffer(p_s_search_path, p_s_key, p_s_sb),
++ !key_in_buffer(p_s_search_path, p_s_key, sb),
+ "PAP-5130: key is not in the buffer");
+ #ifdef CONFIG_REISERFS_CHECK
+ if (cur_tb) {
+ print_cur_tb("5140");
+- reiserfs_panic(p_s_sb, "PAP-5140",
++ reiserfs_panic(sb, "PAP-5140",
+ "schedule occurred in do_balance!");
+ }
+ #endif
+@@ -687,7 +687,7 @@ int search_by_key(struct super_block *p_
+ // make sure, that the node contents look like a node of
+ // certain level
+ if (!is_tree_node(p_s_bh, expected_level)) {
+- reiserfs_error(p_s_sb, "vs-5150",
++ reiserfs_error(sb, "vs-5150",
+ "invalid format found in block %ld. "
+ "Fsck?", p_s_bh->b_blocknr);
+ pathrelse(p_s_search_path);
+@@ -697,7 +697,7 @@ int search_by_key(struct super_block *p_
+ /* ok, we have acquired next formatted node in the tree */
+ n_node_level = B_LEVEL(p_s_bh);
+
+- PROC_INFO_BH_STAT(p_s_sb, p_s_bh, n_node_level - 1);
++ PROC_INFO_BH_STAT(sb, p_s_bh, n_node_level - 1);
+
+ RFALSE(n_node_level < n_stop_level,
+ "vs-5152: tree level (%d) is less than stop level (%d)",
+@@ -776,7 +776,7 @@ int search_by_key(struct super_block *p_
+ units of directory entries. */
+
+ /* The function is NOT SCHEDULE-SAFE! */
+-int search_for_position_by_key(struct super_block *p_s_sb, /* Pointer to the super block. */
++int search_for_position_by_key(struct super_block *sb, /* Pointer to the super block. */
+ const struct cpu_key *p_cpu_key, /* Key to search (cpu variable) */
+ struct treepath *p_s_search_path /* Filled up by this function. */
+ )
+@@ -789,13 +789,13 @@ int search_for_position_by_key(struct su
+
+ /* If searching for directory entry. */
+ if (is_direntry_cpu_key(p_cpu_key))
+- return search_by_entry_key(p_s_sb, p_cpu_key, p_s_search_path,
++ return search_by_entry_key(sb, p_cpu_key, p_s_search_path,
+ &de);
+
+ /* If not searching for directory entry. */
+
+ /* If item is found. */
+- retval = search_item(p_s_sb, p_cpu_key, p_s_search_path);
++ retval = search_item(sb, p_cpu_key, p_s_search_path);
+ if (retval == IO_ERROR)
+ return retval;
+ if (retval == ITEM_FOUND) {
+@@ -817,7 +817,7 @@ int search_for_position_by_key(struct su
+ p_le_ih =
+ B_N_PITEM_HEAD(PATH_PLAST_BUFFER(p_s_search_path),
+ --PATH_LAST_POSITION(p_s_search_path));
+- n_blk_size = p_s_sb->s_blocksize;
++ n_blk_size = sb->s_blocksize;
+
+ if (comp_short_keys(&(p_le_ih->ih_key), p_cpu_key)) {
+ return FILE_NOT_FOUND;
+@@ -957,7 +957,7 @@ static char prepare_for_delete_or_cut(st
+ int *p_n_cut_size, unsigned long long n_new_file_length /* MAX_KEY_OFFSET in case of delete. */
+ )
+ {
+- struct super_block *p_s_sb = inode->i_sb;
++ struct super_block *sb = inode->i_sb;
+ struct item_head *p_le_ih = PATH_PITEM_HEAD(p_s_path);
+ struct buffer_head *p_s_bh = PATH_PLAST_BUFFER(p_s_path);
+
+@@ -986,7 +986,7 @@ static char prepare_for_delete_or_cut(st
+
+ /* Case of an indirect item. */
+ {
+- int blk_size = p_s_sb->s_blocksize;
++ int blk_size = sb->s_blocksize;
+ struct item_head s_ih;
+ int need_re_search;
+ int delete = 0;
+@@ -1023,9 +1023,9 @@ static char prepare_for_delete_or_cut(st
+ block = get_block_num(unfm, 0);
+
+ if (block != 0) {
+- reiserfs_prepare_for_journal(p_s_sb, p_s_bh, 1);
++ reiserfs_prepare_for_journal(sb, p_s_bh, 1);
+ put_block_num(unfm, 0, 0);
+- journal_mark_dirty (th, p_s_sb, p_s_bh);
++ journal_mark_dirty (th, sb, p_s_bh);
+ reiserfs_free_block(th, inode, block, 1);
+ }
+
+@@ -1049,9 +1049,9 @@ static char prepare_for_delete_or_cut(st
+ /* a trick. If the buffer has been logged, this will do nothing. If
+ ** we've broken the loop without logging it, it will restore the
+ ** buffer */
+- reiserfs_restore_prepared_buffer(p_s_sb, p_s_bh);
++ reiserfs_restore_prepared_buffer(sb, p_s_bh);
+ } while (need_re_search &&
+- search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path) == POSITION_FOUND);
++ search_for_position_by_key(sb, p_s_item_key, p_s_path) == POSITION_FOUND);
+ pos_in_item(p_s_path) = pos * UNFM_P_SIZE;
+
+ if (*p_n_cut_size == 0) {
+@@ -1090,7 +1090,7 @@ static int calc_deleted_bytes_number(str
+
+ static void init_tb_struct(struct reiserfs_transaction_handle *th,
+ struct tree_balance *p_s_tb,
+- struct super_block *p_s_sb,
++ struct super_block *sb,
+ struct treepath *p_s_path, int n_size)
+ {
+
+@@ -1098,7 +1098,7 @@ static void init_tb_struct(struct reiser
+
+ memset(p_s_tb, '\0', sizeof(struct tree_balance));
+ p_s_tb->transaction_handle = th;
+- p_s_tb->tb_sb = p_s_sb;
++ p_s_tb->tb_sb = sb;
+ p_s_tb->tb_path = p_s_path;
+ PATH_OFFSET_PBUFFER(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL;
+ PATH_OFFSET_POSITION(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0;
+@@ -1147,7 +1147,7 @@ int reiserfs_delete_item(struct reiserfs
+ struct inode *p_s_inode, /* inode is here just to update i_blocks and quotas */
+ struct buffer_head *p_s_un_bh)
+ { /* NULL or unformatted node pointer. */
+- struct super_block *p_s_sb = p_s_inode->i_sb;
++ struct super_block *sb = p_s_inode->i_sb;
+ struct tree_balance s_del_balance;
+ struct item_head s_ih;
+ struct item_head *q_ih;
+@@ -1161,7 +1161,7 @@ int reiserfs_delete_item(struct reiserfs
+
+ BUG_ON(!th->t_trans_id);
+
+- init_tb_struct(th, &s_del_balance, p_s_sb, p_s_path,
++ init_tb_struct(th, &s_del_balance, sb, p_s_path,
+ 0 /*size is unknown */ );
+
+ while (1) {
+@@ -1185,15 +1185,15 @@ int reiserfs_delete_item(struct reiserfs
+ if (n_ret_value != REPEAT_SEARCH)
+ break;
+
+- PROC_INFO_INC(p_s_sb, delete_item_restarted);
++ PROC_INFO_INC(sb, delete_item_restarted);
+
+ // file system changed, repeat search
+ n_ret_value =
+- search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path);
++ search_for_position_by_key(sb, p_s_item_key, p_s_path);
+ if (n_ret_value == IO_ERROR)
+ break;
+ if (n_ret_value == FILE_NOT_FOUND) {
+- reiserfs_warning(p_s_sb, "vs-5340",
++ reiserfs_warning(sb, "vs-5340",
+ "no items of the file %K found",
+ p_s_item_key);
+ break;
+@@ -1216,8 +1216,8 @@ int reiserfs_delete_item(struct reiserfs
+ ** the unfm node once
+ */
+ if (!S_ISLNK(p_s_inode->i_mode) && is_direct_le_ih(q_ih)) {
+- if ((le_ih_k_offset(q_ih) & (p_s_sb->s_blocksize - 1)) == 1) {
+- quota_cut_bytes = p_s_sb->s_blocksize + UNFM_P_SIZE;
++ if ((le_ih_k_offset(q_ih) & (sb->s_blocksize - 1)) == 1) {
++ quota_cut_bytes = sb->s_blocksize + UNFM_P_SIZE;
+ } else {
+ quota_cut_bytes = 0;
+ }
+@@ -1258,7 +1258,7 @@ int reiserfs_delete_item(struct reiserfs
+ do_balance(&s_del_balance, NULL, NULL, M_DELETE);
+
+ #ifdef REISERQUOTA_DEBUG
+- reiserfs_debug(p_s_sb, REISERFS_DEBUG_CODE,
++ reiserfs_debug(sb, REISERFS_DEBUG_CODE,
+ "reiserquota delete_item(): freeing %u, id=%u type=%c",
+ quota_cut_bytes, p_s_inode->i_uid, head2type(&s_ih));
+ #endif
+@@ -1430,8 +1430,8 @@ static int maybe_indirect_to_direct(stru
+ const struct cpu_key *p_s_item_key,
+ loff_t n_new_file_size, char *p_c_mode)
+ {
+- struct super_block *p_s_sb = p_s_inode->i_sb;
+- int n_block_size = p_s_sb->s_blocksize;
++ struct super_block *sb = p_s_inode->i_sb;
++ int n_block_size = sb->s_blocksize;
+ int cut_bytes;
+ BUG_ON(!th->t_trans_id);
+ BUG_ON(n_new_file_size != p_s_inode->i_size);
+@@ -1509,7 +1509,7 @@ int reiserfs_cut_from_item(struct reiser
+ struct inode *p_s_inode,
+ struct page *page, loff_t n_new_file_size)
+ {
+- struct super_block *p_s_sb = p_s_inode->i_sb;
++ struct super_block *sb = p_s_inode->i_sb;
+ /* Every function which is going to call do_balance must first
+ create a tree_balance structure. Then it must fill up this
+ structure by using the init_tb_struct and fix_nodes functions.
+@@ -1560,7 +1560,7 @@ int reiserfs_cut_from_item(struct reiser
+ /* removing of last unformatted node will change value we
+ have to return to truncate. Save it */
+ retval2 = n_ret_value;
+- /*retval2 = p_s_sb->s_blocksize - (n_new_file_size & (p_s_sb->s_blocksize - 1)); */
++ /*retval2 = sb->s_blocksize - (n_new_file_size & (sb->s_blocksize - 1)); */
+
+ /* So, we have performed the first part of the conversion:
+ inserting the new direct item. Now we are removing the
+@@ -1569,16 +1569,16 @@ int reiserfs_cut_from_item(struct reiser
+ set_cpu_key_k_type(p_s_item_key, TYPE_INDIRECT);
+ p_s_item_key->key_length = 4;
+ n_new_file_size -=
+- (n_new_file_size & (p_s_sb->s_blocksize - 1));
++ (n_new_file_size & (sb->s_blocksize - 1));
+ tail_pos = n_new_file_size;
+ set_cpu_key_k_offset(p_s_item_key, n_new_file_size + 1);
+ if (search_for_position_by_key
+- (p_s_sb, p_s_item_key,
++ (sb, p_s_item_key,
+ p_s_path) == POSITION_NOT_FOUND) {
+ print_block(PATH_PLAST_BUFFER(p_s_path), 3,
+ PATH_LAST_POSITION(p_s_path) - 1,
+ PATH_LAST_POSITION(p_s_path) + 1);
+- reiserfs_panic(p_s_sb, "PAP-5580", "item to "
++ reiserfs_panic(sb, "PAP-5580", "item to "
+ "convert does not exist (%K)",
+ p_s_item_key);
+ }
+@@ -1595,14 +1595,14 @@ int reiserfs_cut_from_item(struct reiser
+ if (n_ret_value != REPEAT_SEARCH)
+ break;
+
+- PROC_INFO_INC(p_s_sb, cut_from_item_restarted);
++ PROC_INFO_INC(sb, cut_from_item_restarted);
+
+ n_ret_value =
+- search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path);
++ search_for_position_by_key(sb, p_s_item_key, p_s_path);
+ if (n_ret_value == POSITION_FOUND)
+ continue;
+
+- reiserfs_warning(p_s_sb, "PAP-5610", "item %K not found",
++ reiserfs_warning(sb, "PAP-5610", "item %K not found",
+ p_s_item_key);
+ unfix_nodes(&s_cut_balance);
+ return (n_ret_value == IO_ERROR) ? -EIO : -ENOENT;
+@@ -1616,7 +1616,7 @@ int reiserfs_cut_from_item(struct reiser
+ indirect_to_direct_roll_back(th, p_s_inode, p_s_path);
+ }
+ if (n_ret_value == NO_DISK_SPACE)
+- reiserfs_warning(p_s_sb, "reiserfs-5092",
++ reiserfs_warning(sb, "reiserfs-5092",
+ "NO_DISK_SPACE");
+ unfix_nodes(&s_cut_balance);
+ return -EIO;
+@@ -1642,11 +1642,11 @@ int reiserfs_cut_from_item(struct reiser
+ p_le_ih = PATH_PITEM_HEAD(s_cut_balance.tb_path);
+ if (!S_ISLNK(p_s_inode->i_mode) && is_direct_le_ih(p_le_ih)) {
+ if (c_mode == M_DELETE &&
+- (le_ih_k_offset(p_le_ih) & (p_s_sb->s_blocksize - 1)) ==
++ (le_ih_k_offset(p_le_ih) & (sb->s_blocksize - 1)) ==
+ 1) {
+ // FIXME: this is to keep 3.5 happy
+ REISERFS_I(p_s_inode)->i_first_direct_byte = U32_MAX;
+- quota_cut_bytes = p_s_sb->s_blocksize + UNFM_P_SIZE;
++ quota_cut_bytes = sb->s_blocksize + UNFM_P_SIZE;
+ } else {
+ quota_cut_bytes = 0;
+ }
+@@ -1659,18 +1659,18 @@ int reiserfs_cut_from_item(struct reiser
+ sure, that we exactly remove last unformatted node pointer
+ of the item */
+ if (!is_indirect_le_ih(le_ih))
+- reiserfs_panic(p_s_sb, "vs-5652",
++ reiserfs_panic(sb, "vs-5652",
+ "item must be indirect %h", le_ih);
+
+ if (c_mode == M_DELETE && ih_item_len(le_ih) != UNFM_P_SIZE)
+- reiserfs_panic(p_s_sb, "vs-5653", "completing "
++ reiserfs_panic(sb, "vs-5653", "completing "
+ "indirect2direct conversion indirect "
+ "item %h being deleted must be of "
+ "4 byte long", le_ih);
+
+ if (c_mode == M_CUT
+ && s_cut_balance.insert_size[0] != -UNFM_P_SIZE) {
+- reiserfs_panic(p_s_sb, "vs-5654", "can not complete "
++ reiserfs_panic(sb, "vs-5654", "can not complete "
+ "indirect2direct conversion of %h "
+ "(CUT, insert_size==%d)",
+ le_ih, s_cut_balance.insert_size[0]);
+--- a/fs/reiserfs/tail_conversion.c
++++ b/fs/reiserfs/tail_conversion.c
+@@ -175,9 +175,9 @@ int indirect2direct(struct reiserfs_tran
+ loff_t n_new_file_size, /* New file size. */
+ char *p_c_mode)
+ {
+- struct super_block *p_s_sb = p_s_inode->i_sb;
++ struct super_block *sb = p_s_inode->i_sb;
+ struct item_head s_ih;
+- unsigned long n_block_size = p_s_sb->s_blocksize;
++ unsigned long n_block_size = sb->s_blocksize;
+ char *tail;
+ int tail_len, round_tail_len;
+ loff_t pos, pos1; /* position of first byte of the tail */
+@@ -185,7 +185,7 @@ int indirect2direct(struct reiserfs_tran
+
+ BUG_ON(!th->t_trans_id);
+
+- REISERFS_SB(p_s_sb)->s_indirect2direct++;
++ REISERFS_SB(sb)->s_indirect2direct++;
+
+ *p_c_mode = M_SKIP_BALANCING;
+
+@@ -200,7 +200,7 @@ int indirect2direct(struct reiserfs_tran
+
+ pos =
+ le_ih_k_offset(&s_ih) - 1 + (ih_item_len(&s_ih) / UNFM_P_SIZE -
+- 1) * p_s_sb->s_blocksize;
++ 1) * sb->s_blocksize;
+ pos1 = pos;
+
+ // we are protected by i_mutex. The tail can not disapper, not
+@@ -211,18 +211,18 @@ int indirect2direct(struct reiserfs_tran
+
+ if (path_changed(&s_ih, p_s_path)) {
+ /* re-search indirect item */
+- if (search_for_position_by_key(p_s_sb, p_s_item_key, p_s_path)
++ if (search_for_position_by_key(sb, p_s_item_key, p_s_path)
+ == POSITION_NOT_FOUND)
+- reiserfs_panic(p_s_sb, "PAP-5520",
++ reiserfs_panic(sb, "PAP-5520",
+ "item to be converted %K does not exist",
+ p_s_item_key);
+ copy_item_head(&s_ih, PATH_PITEM_HEAD(p_s_path));
+ #ifdef CONFIG_REISERFS_CHECK
+ pos = le_ih_k_offset(&s_ih) - 1 +
+ (ih_item_len(&s_ih) / UNFM_P_SIZE -
+- 1) * p_s_sb->s_blocksize;
++ 1) * sb->s_blocksize;
+ if (pos != pos1)
+- reiserfs_panic(p_s_sb, "vs-5530", "tail position "
++ reiserfs_panic(sb, "vs-5530", "tail position "
+ "changed while we were reading it");
+ #endif
+ }
+--- a/include/linux/reiserfs_fs.h
++++ b/include/linux/reiserfs_fs.h
+@@ -1769,12 +1769,12 @@ int journal_end_sync(struct reiserfs_tra
+ int journal_mark_freed(struct reiserfs_transaction_handle *,
+ struct super_block *, b_blocknr_t blocknr);
+ int journal_transaction_should_end(struct reiserfs_transaction_handle *, int);
+-int reiserfs_in_journal(struct super_block *p_s_sb, unsigned int bmap_nr,
+- int bit_nr, int searchall, b_blocknr_t *next);
++int reiserfs_in_journal(struct super_block *sb, unsigned int bmap_nr,
++ int bit_nr, int searchall, b_blocknr_t *next);
+ int journal_begin(struct reiserfs_transaction_handle *,
+- struct super_block *p_s_sb, unsigned long);
++ struct super_block *sb, unsigned long);
+ int journal_join_abort(struct reiserfs_transaction_handle *,
+- struct super_block *p_s_sb, unsigned long);
++ struct super_block *sb, unsigned long);
+ void reiserfs_abort_journal(struct super_block *sb, int errno);
+ void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...);
+ int reiserfs_allocate_list_bitmaps(struct super_block *s,
+@@ -1830,11 +1830,11 @@ static inline void copy_key(struct reise
+
+ int comp_items(const struct item_head *stored_ih, const struct treepath *p_s_path);
+ const struct reiserfs_key *get_rkey(const struct treepath *p_s_chk_path,
+- const struct super_block *p_s_sb);
++ const struct super_block *sb);
+ int search_by_key(struct super_block *, const struct cpu_key *,
+ struct treepath *, int);
+ #define search_item(s,key,path) search_by_key (s, key, path, DISK_LEAF_NODE_LEVEL)
+-int search_for_position_by_key(struct super_block *p_s_sb,
++int search_for_position_by_key(struct super_block *sb,
+ const struct cpu_key *p_s_cpu_key,
+ struct treepath *p_s_search_path);
+ extern void decrement_bcount(struct buffer_head *p_s_bh);
+@@ -1978,7 +1978,7 @@ int reiserfs_global_version_in_proc(char
+ #define PROC_INFO_MAX( sb, field, value ) VOID_V
+ #define PROC_INFO_INC( sb, field ) VOID_V
+ #define PROC_INFO_ADD( sb, field, val ) VOID_V
+-#define PROC_INFO_BH_STAT( p_s_sb, p_s_bh, n_node_level ) VOID_V
++#define PROC_INFO_BH_STAT(sb, p_s_bh, n_node_level) VOID_V
+ #endif
+
+ /* dir.c */
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: rename p_s_tb to tb
+
+ This patch is a simple s/p_s_tb/tb/g to the reiserfs code. This is the fourth
+ in a series of patches to rip out some of the awful variable naming in
+ reiserfs.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+
+ fs/reiserfs/fix_node.c | 482 ++++++++++++++++++++++----------------------
+ fs/reiserfs/stree.c | 21 +
+ include/linux/reiserfs_fs.h | 2
+ 3 files changed, 254 insertions(+), 251 deletions(-)
+
+--- a/fs/reiserfs/fix_node.c
++++ b/fs/reiserfs/fix_node.c
+@@ -749,26 +749,26 @@ else \
+ -1, -1);\
+ }
+
+-static void free_buffers_in_tb(struct tree_balance *p_s_tb)
++static void free_buffers_in_tb(struct tree_balance *tb)
+ {
+ int n_counter;
+
+- pathrelse(p_s_tb->tb_path);
++ pathrelse(tb->tb_path);
+
+ for (n_counter = 0; n_counter < MAX_HEIGHT; n_counter++) {
+- brelse(p_s_tb->L[n_counter]);
+- brelse(p_s_tb->R[n_counter]);
+- brelse(p_s_tb->FL[n_counter]);
+- brelse(p_s_tb->FR[n_counter]);
+- brelse(p_s_tb->CFL[n_counter]);
+- brelse(p_s_tb->CFR[n_counter]);
+-
+- p_s_tb->L[n_counter] = NULL;
+- p_s_tb->R[n_counter] = NULL;
+- p_s_tb->FL[n_counter] = NULL;
+- p_s_tb->FR[n_counter] = NULL;
+- p_s_tb->CFL[n_counter] = NULL;
+- p_s_tb->CFR[n_counter] = NULL;
++ brelse(tb->L[n_counter]);
++ brelse(tb->R[n_counter]);
++ brelse(tb->FL[n_counter]);
++ brelse(tb->FR[n_counter]);
++ brelse(tb->CFL[n_counter]);
++ brelse(tb->CFR[n_counter]);
++
++ tb->L[n_counter] = NULL;
++ tb->R[n_counter] = NULL;
++ tb->FL[n_counter] = NULL;
++ tb->FR[n_counter] = NULL;
++ tb->CFL[n_counter] = NULL;
++ tb->CFR[n_counter] = NULL;
+ }
+ }
+
+@@ -778,14 +778,14 @@ static void free_buffers_in_tb(struct tr
+ * NO_DISK_SPACE - no disk space.
+ */
+ /* The function is NOT SCHEDULE-SAFE! */
+-static int get_empty_nodes(struct tree_balance *p_s_tb, int n_h)
++static int get_empty_nodes(struct tree_balance *tb, int n_h)
+ {
+ struct buffer_head *p_s_new_bh,
+- *p_s_Sh = PATH_H_PBUFFER(p_s_tb->tb_path, n_h);
++ *p_s_Sh = PATH_H_PBUFFER(tb->tb_path, n_h);
+ b_blocknr_t *p_n_blocknr, a_n_blocknrs[MAX_AMOUNT_NEEDED] = { 0, };
+ int n_counter, n_number_of_freeblk, n_amount_needed, /* number of needed empty blocks */
+ n_retval = CARRY_ON;
+- struct super_block *sb = p_s_tb->tb_sb;
++ struct super_block *sb = tb->tb_sb;
+
+ /* number_of_freeblk is the number of empty blocks which have been
+ acquired for use by the balancing algorithm minus the number of
+@@ -803,15 +803,15 @@ static int get_empty_nodes(struct tree_b
+ the analysis or 0 if not restarted, then subtract the amount needed
+ by all of the levels of the tree below n_h. */
+ /* blknum includes S[n_h], so we subtract 1 in this calculation */
+- for (n_counter = 0, n_number_of_freeblk = p_s_tb->cur_blknum;
++ for (n_counter = 0, n_number_of_freeblk = tb->cur_blknum;
+ n_counter < n_h; n_counter++)
+ n_number_of_freeblk -=
+- (p_s_tb->blknum[n_counter]) ? (p_s_tb->blknum[n_counter] -
++ (tb->blknum[n_counter]) ? (tb->blknum[n_counter] -
+ 1) : 0;
+
+ /* Allocate missing empty blocks. */
+ /* if p_s_Sh == 0 then we are getting a new root */
+- n_amount_needed = (p_s_Sh) ? (p_s_tb->blknum[n_h] - 1) : 1;
++ n_amount_needed = (p_s_Sh) ? (tb->blknum[n_h] - 1) : 1;
+ /* Amount_needed = the amount that we need more than the amount that we have. */
+ if (n_amount_needed > n_number_of_freeblk)
+ n_amount_needed -= n_number_of_freeblk;
+@@ -819,7 +819,7 @@ static int get_empty_nodes(struct tree_b
+ return CARRY_ON;
+
+ /* No need to check quota - is not allocated for blocks used for formatted nodes */
+- if (reiserfs_new_form_blocknrs(p_s_tb, a_n_blocknrs,
++ if (reiserfs_new_form_blocknrs(tb, a_n_blocknrs,
+ n_amount_needed) == NO_DISK_SPACE)
+ return NO_DISK_SPACE;
+
+@@ -838,14 +838,14 @@ static int get_empty_nodes(struct tree_b
+ p_s_new_bh);
+
+ /* Put empty buffers into the array. */
+- RFALSE(p_s_tb->FEB[p_s_tb->cur_blknum],
++ RFALSE(tb->FEB[tb->cur_blknum],
+ "PAP-8141: busy slot for new buffer");
+
+ set_buffer_journal_new(p_s_new_bh);
+- p_s_tb->FEB[p_s_tb->cur_blknum++] = p_s_new_bh;
++ tb->FEB[tb->cur_blknum++] = p_s_new_bh;
+ }
+
+- if (n_retval == CARRY_ON && FILESYSTEM_CHANGED_TB(p_s_tb))
++ if (n_retval == CARRY_ON && FILESYSTEM_CHANGED_TB(tb))
+ n_retval = REPEAT_SEARCH;
+
+ return n_retval;
+@@ -896,33 +896,34 @@ static int get_rfree(struct tree_balance
+ }
+
+ /* Check whether left neighbor is in memory. */
+-static int is_left_neighbor_in_cache(struct tree_balance *p_s_tb, int n_h)
++static int is_left_neighbor_in_cache(struct tree_balance *tb, int n_h)
+ {
+ struct buffer_head *p_s_father, *left;
+- struct super_block *sb = p_s_tb->tb_sb;
++ struct super_block *sb = tb->tb_sb;
+ b_blocknr_t n_left_neighbor_blocknr;
+ int n_left_neighbor_position;
+
+- if (!p_s_tb->FL[n_h]) /* Father of the left neighbor does not exist. */
++ /* Father of the left neighbor does not exist. */
++ if (!tb->FL[n_h])
+ return 0;
+
+ /* Calculate father of the node to be balanced. */
+- p_s_father = PATH_H_PBUFFER(p_s_tb->tb_path, n_h + 1);
++ p_s_father = PATH_H_PBUFFER(tb->tb_path, n_h + 1);
+
+ RFALSE(!p_s_father ||
+ !B_IS_IN_TREE(p_s_father) ||
+- !B_IS_IN_TREE(p_s_tb->FL[n_h]) ||
++ !B_IS_IN_TREE(tb->FL[n_h]) ||
+ !buffer_uptodate(p_s_father) ||
+- !buffer_uptodate(p_s_tb->FL[n_h]),
++ !buffer_uptodate(tb->FL[n_h]),
+ "vs-8165: F[h] (%b) or FL[h] (%b) is invalid",
+- p_s_father, p_s_tb->FL[n_h]);
++ p_s_father, tb->FL[n_h]);
+
+ /* Get position of the pointer to the left neighbor into the left father. */
+- n_left_neighbor_position = (p_s_father == p_s_tb->FL[n_h]) ?
+- p_s_tb->lkey[n_h] : B_NR_ITEMS(p_s_tb->FL[n_h]);
++ n_left_neighbor_position = (p_s_father == tb->FL[n_h]) ?
++ tb->lkey[n_h] : B_NR_ITEMS(tb->FL[n_h]);
+ /* Get left neighbor block number. */
+ n_left_neighbor_blocknr =
+- B_N_CHILD_NUM(p_s_tb->FL[n_h], n_left_neighbor_position);
++ B_N_CHILD_NUM(tb->FL[n_h], n_left_neighbor_position);
+ /* Look for the left neighbor in the cache. */
+ if ((left = sb_find_get_block(sb, n_left_neighbor_blocknr))) {
+
+@@ -953,14 +954,14 @@ static void decrement_key(struct cpu_key
+ SCHEDULE_OCCURRED - schedule occurred while the function worked;
+ * CARRY_ON - schedule didn't occur while the function worked;
+ */
+-static int get_far_parent(struct tree_balance *p_s_tb,
++static int get_far_parent(struct tree_balance *tb,
+ int n_h,
+ struct buffer_head **pp_s_father,
+ struct buffer_head **pp_s_com_father, char c_lr_par)
+ {
+ struct buffer_head *p_s_parent;
+ INITIALIZE_PATH(s_path_to_neighbor_father);
+- struct treepath *p_s_path = p_s_tb->tb_path;
++ struct treepath *p_s_path = tb->tb_path;
+ struct cpu_key s_lr_father_key;
+ int n_counter,
+ n_position = INT_MAX,
+@@ -1005,9 +1006,9 @@ static int get_far_parent(struct tree_ba
+ if (n_counter == FIRST_PATH_ELEMENT_OFFSET) {
+ /* Check whether first buffer in the path is the root of the tree. */
+ if (PATH_OFFSET_PBUFFER
+- (p_s_tb->tb_path,
++ (tb->tb_path,
+ FIRST_PATH_ELEMENT_OFFSET)->b_blocknr ==
+- SB_ROOT_BLOCK(p_s_tb->tb_sb)) {
++ SB_ROOT_BLOCK(tb->tb_sb)) {
+ *pp_s_father = *pp_s_com_father = NULL;
+ return CARRY_ON;
+ }
+@@ -1022,7 +1023,7 @@ static int get_far_parent(struct tree_ba
+
+ if (buffer_locked(*pp_s_com_father)) {
+ __wait_on_buffer(*pp_s_com_father);
+- if (FILESYSTEM_CHANGED_TB(p_s_tb)) {
++ if (FILESYSTEM_CHANGED_TB(tb)) {
+ brelse(*pp_s_com_father);
+ return REPEAT_SEARCH;
+ }
+@@ -1035,9 +1036,9 @@ static int get_far_parent(struct tree_ba
+ le_key2cpu_key(&s_lr_father_key,
+ B_N_PDELIM_KEY(*pp_s_com_father,
+ (c_lr_par ==
+- LEFT_PARENTS) ? (p_s_tb->lkey[n_h - 1] =
++ LEFT_PARENTS) ? (tb->lkey[n_h - 1] =
+ n_position -
+- 1) : (p_s_tb->rkey[n_h -
++ 1) : (tb->rkey[n_h -
+ 1] =
+ n_position)));
+
+@@ -1045,12 +1046,12 @@ static int get_far_parent(struct tree_ba
+ decrement_key(&s_lr_father_key);
+
+ if (search_by_key
+- (p_s_tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father,
++ (tb->tb_sb, &s_lr_father_key, &s_path_to_neighbor_father,
+ n_h + 1) == IO_ERROR)
+ // path is released
+ return IO_ERROR;
+
+- if (FILESYSTEM_CHANGED_TB(p_s_tb)) {
++ if (FILESYSTEM_CHANGED_TB(tb)) {
+ pathrelse(&s_path_to_neighbor_father);
+ brelse(*pp_s_com_father);
+ return REPEAT_SEARCH;
+@@ -1075,24 +1076,26 @@ static int get_far_parent(struct tree_ba
+ * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
+ * CARRY_ON - schedule didn't occur while the function worked;
+ */
+-static int get_parents(struct tree_balance *p_s_tb, int n_h)
++static int get_parents(struct tree_balance *tb, int n_h)
+ {
+- struct treepath *p_s_path = p_s_tb->tb_path;
++ struct treepath *p_s_path = tb->tb_path;
+ int n_position,
+ n_ret_value,
+- n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h);
++ n_path_offset = PATH_H_PATH_OFFSET(tb->tb_path, n_h);
+ struct buffer_head *p_s_curf, *p_s_curcf;
+
+ /* Current node is the root of the tree or will be root of the tree */
+ if (n_path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
+ /* The root can not have parents.
+ Release nodes which previously were obtained as parents of the current node neighbors. */
+- brelse(p_s_tb->FL[n_h]);
+- brelse(p_s_tb->CFL[n_h]);
+- brelse(p_s_tb->FR[n_h]);
+- brelse(p_s_tb->CFR[n_h]);
+- p_s_tb->FL[n_h] = p_s_tb->CFL[n_h] = p_s_tb->FR[n_h] =
+- p_s_tb->CFR[n_h] = NULL;
++ brelse(tb->FL[n_h]);
++ brelse(tb->CFL[n_h]);
++ brelse(tb->FR[n_h]);
++ brelse(tb->CFR[n_h]);
++ tb->FL[n_h] = NULL;
++ tb->CFL[n_h] = NULL;
++ tb->FR[n_h] = NULL;
++ tb->CFR[n_h] = NULL;
+ return CARRY_ON;
+ }
+
+@@ -1104,22 +1107,22 @@ static int get_parents(struct tree_balan
+ PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1);
+ get_bh(p_s_curf);
+ get_bh(p_s_curf);
+- p_s_tb->lkey[n_h] = n_position - 1;
++ tb->lkey[n_h] = n_position - 1;
+ } else {
+ /* Calculate current parent of L[n_path_offset], which is the left neighbor of the current node.
+ Calculate current common parent of L[n_path_offset] and the current node. Note that
+ CFL[n_path_offset] not equal FL[n_path_offset] and CFL[n_path_offset] not equal F[n_path_offset].
+ Calculate lkey[n_path_offset]. */
+- if ((n_ret_value = get_far_parent(p_s_tb, n_h + 1, &p_s_curf,
++ if ((n_ret_value = get_far_parent(tb, n_h + 1, &p_s_curf,
+ &p_s_curcf,
+ LEFT_PARENTS)) != CARRY_ON)
+ return n_ret_value;
+ }
+
+- brelse(p_s_tb->FL[n_h]);
+- p_s_tb->FL[n_h] = p_s_curf; /* New initialization of FL[n_h]. */
+- brelse(p_s_tb->CFL[n_h]);
+- p_s_tb->CFL[n_h] = p_s_curcf; /* New initialization of CFL[n_h]. */
++ brelse(tb->FL[n_h]);
++ tb->FL[n_h] = p_s_curf; /* New initialization of FL[n_h]. */
++ brelse(tb->CFL[n_h]);
++ tb->CFL[n_h] = p_s_curcf; /* New initialization of CFL[n_h]. */
+
+ RFALSE((p_s_curf && !B_IS_IN_TREE(p_s_curf)) ||
+ (p_s_curcf && !B_IS_IN_TREE(p_s_curcf)),
+@@ -1133,7 +1136,7 @@ static int get_parents(struct tree_balan
+ Calculate current common parent of R[n_h] and current node. Note that CFR[n_h]
+ not equal FR[n_path_offset] and CFR[n_h] not equal F[n_h]. */
+ if ((n_ret_value =
+- get_far_parent(p_s_tb, n_h + 1, &p_s_curf, &p_s_curcf,
++ get_far_parent(tb, n_h + 1, &p_s_curf, &p_s_curcf,
+ RIGHT_PARENTS)) != CARRY_ON)
+ return n_ret_value;
+ } else {
+@@ -1143,14 +1146,16 @@ static int get_parents(struct tree_balan
+ PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1);
+ get_bh(p_s_curf);
+ get_bh(p_s_curf);
+- p_s_tb->rkey[n_h] = n_position;
++ tb->rkey[n_h] = n_position;
+ }
+
+- brelse(p_s_tb->FR[n_h]);
+- p_s_tb->FR[n_h] = p_s_curf; /* New initialization of FR[n_path_offset]. */
++ brelse(tb->FR[n_h]);
++ /* New initialization of FR[n_path_offset]. */
++ tb->FR[n_h] = p_s_curf;
+
+- brelse(p_s_tb->CFR[n_h]);
+- p_s_tb->CFR[n_h] = p_s_curcf; /* New initialization of CFR[n_path_offset]. */
++ brelse(tb->CFR[n_h]);
++ /* New initialization of CFR[n_path_offset]. */
++ tb->CFR[n_h] = p_s_curcf;
+
+ RFALSE((p_s_curf && !B_IS_IN_TREE(p_s_curf)) ||
+ (p_s_curcf && !B_IS_IN_TREE(p_s_curcf)),
+@@ -1885,12 +1890,12 @@ static int check_balance(int mode,
+ }
+
+ /* Check whether parent at the path is the really parent of the current node.*/
+-static int get_direct_parent(struct tree_balance *p_s_tb, int n_h)
++static int get_direct_parent(struct tree_balance *tb, int n_h)
+ {
+ struct buffer_head *bh;
+- struct treepath *p_s_path = p_s_tb->tb_path;
++ struct treepath *p_s_path = tb->tb_path;
+ int n_position,
+- n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h);
++ n_path_offset = PATH_H_PATH_OFFSET(tb->tb_path, n_h);
+
+ /* We are in the root or in the new root. */
+ if (n_path_offset <= FIRST_PATH_ELEMENT_OFFSET) {
+@@ -1899,7 +1904,7 @@ static int get_direct_parent(struct tree
+ "PAP-8260: invalid offset in the path");
+
+ if (PATH_OFFSET_PBUFFER(p_s_path, FIRST_PATH_ELEMENT_OFFSET)->
+- b_blocknr == SB_ROOT_BLOCK(p_s_tb->tb_sb)) {
++ b_blocknr == SB_ROOT_BLOCK(tb->tb_sb)) {
+ /* Root is not changed. */
+ PATH_OFFSET_PBUFFER(p_s_path, n_path_offset - 1) = NULL;
+ PATH_OFFSET_POSITION(p_s_path, n_path_offset - 1) = 0;
+@@ -1924,7 +1929,7 @@ static int get_direct_parent(struct tree
+
+ if (buffer_locked(bh)) {
+ __wait_on_buffer(bh);
+- if (FILESYSTEM_CHANGED_TB(p_s_tb))
++ if (FILESYSTEM_CHANGED_TB(tb))
+ return REPEAT_SEARCH;
+ }
+
+@@ -1937,85 +1942,86 @@ static int get_direct_parent(struct tree
+ * Returns: SCHEDULE_OCCURRED - schedule occurred while the function worked;
+ * CARRY_ON - schedule didn't occur while the function worked;
+ */
+-static int get_neighbors(struct tree_balance *p_s_tb, int n_h)
++static int get_neighbors(struct tree_balance *tb, int n_h)
+ {
+ int n_child_position,
+- n_path_offset = PATH_H_PATH_OFFSET(p_s_tb->tb_path, n_h + 1);
++ n_path_offset = PATH_H_PATH_OFFSET(tb->tb_path, n_h + 1);
+ unsigned long n_son_number;
+- struct super_block *sb = p_s_tb->tb_sb;
++ struct super_block *sb = tb->tb_sb;
+ struct buffer_head *bh;
+
+ PROC_INFO_INC(sb, get_neighbors[n_h]);
+
+- if (p_s_tb->lnum[n_h]) {
++ if (tb->lnum[n_h]) {
+ /* We need left neighbor to balance S[n_h]. */
+ PROC_INFO_INC(sb, need_l_neighbor[n_h]);
+- bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset);
++ bh = PATH_OFFSET_PBUFFER(tb->tb_path, n_path_offset);
+
+- RFALSE(bh == p_s_tb->FL[n_h] &&
+- !PATH_OFFSET_POSITION(p_s_tb->tb_path, n_path_offset),
++ RFALSE(bh == tb->FL[n_h] &&
++ !PATH_OFFSET_POSITION(tb->tb_path, n_path_offset),
+ "PAP-8270: invalid position in the parent");
+
+ n_child_position =
+ (bh ==
+- p_s_tb->FL[n_h]) ? p_s_tb->lkey[n_h] : B_NR_ITEMS(p_s_tb->
++ tb->FL[n_h]) ? tb->lkey[n_h] : B_NR_ITEMS(tb->
+ FL[n_h]);
+- n_son_number = B_N_CHILD_NUM(p_s_tb->FL[n_h], n_child_position);
++ n_son_number = B_N_CHILD_NUM(tb->FL[n_h], n_child_position);
+ bh = sb_bread(sb, n_son_number);
+ if (!bh)
+ return IO_ERROR;
+- if (FILESYSTEM_CHANGED_TB(p_s_tb)) {
++ if (FILESYSTEM_CHANGED_TB(tb)) {
+ brelse(bh);
+ PROC_INFO_INC(sb, get_neighbors_restart[n_h]);
+ return REPEAT_SEARCH;
+ }
+
+- RFALSE(!B_IS_IN_TREE(p_s_tb->FL[n_h]) ||
+- n_child_position > B_NR_ITEMS(p_s_tb->FL[n_h]) ||
+- B_N_CHILD_NUM(p_s_tb->FL[n_h], n_child_position) !=
++ RFALSE(!B_IS_IN_TREE(tb->FL[n_h]) ||
++ n_child_position > B_NR_ITEMS(tb->FL[n_h]) ||
++ B_N_CHILD_NUM(tb->FL[n_h], n_child_position) !=
+ bh->b_blocknr, "PAP-8275: invalid parent");
+ RFALSE(!B_IS_IN_TREE(bh), "PAP-8280: invalid child");
+ RFALSE(!n_h &&
+ B_FREE_SPACE(bh) !=
+ MAX_CHILD_SIZE(bh) -
+- dc_size(B_N_CHILD(p_s_tb->FL[0], n_child_position)),
++ dc_size(B_N_CHILD(tb->FL[0], n_child_position)),
+ "PAP-8290: invalid child size of left neighbor");
+
+- brelse(p_s_tb->L[n_h]);
+- p_s_tb->L[n_h] = bh;
++ brelse(tb->L[n_h]);
++ tb->L[n_h] = bh;
+ }
+
+- if (p_s_tb->rnum[n_h]) { /* We need right neighbor to balance S[n_path_offset]. */
++ /* We need right neighbor to balance S[n_path_offset]. */
++ if (tb->rnum[n_h]) {
+ PROC_INFO_INC(sb, need_r_neighbor[n_h]);
+- bh = PATH_OFFSET_PBUFFER(p_s_tb->tb_path, n_path_offset);
++ bh = PATH_OFFSET_PBUFFER(tb->tb_path, n_path_offset);
+
+- RFALSE(bh == p_s_tb->FR[n_h] &&
+- PATH_OFFSET_POSITION(p_s_tb->tb_path,
++ RFALSE(bh == tb->FR[n_h] &&
++ PATH_OFFSET_POSITION(tb->tb_path,
+ n_path_offset) >=
+ B_NR_ITEMS(bh),
+ "PAP-8295: invalid position in the parent");
+
+ n_child_position =
+- (bh == p_s_tb->FR[n_h]) ? p_s_tb->rkey[n_h] + 1 : 0;
+- n_son_number = B_N_CHILD_NUM(p_s_tb->FR[n_h], n_child_position);
++ (bh == tb->FR[n_h]) ? tb->rkey[n_h] + 1 : 0;
++ n_son_number = B_N_CHILD_NUM(tb->FR[n_h], n_child_position);
+ bh = sb_bread(sb, n_son_number);
+ if (!bh)
+ return IO_ERROR;
+- if (FILESYSTEM_CHANGED_TB(p_s_tb)) {
++ if (FILESYSTEM_CHANGED_TB(tb)) {
+ brelse(bh);
+ PROC_INFO_INC(sb, get_neighbors_restart[n_h]);
+ return REPEAT_SEARCH;
+ }
+- brelse(p_s_tb->R[n_h]);
+- p_s_tb->R[n_h] = bh;
++ brelse(tb->R[n_h]);
++ tb->R[n_h] = bh;
+
+ RFALSE(!n_h
+ && B_FREE_SPACE(bh) !=
+ MAX_CHILD_SIZE(bh) -
+- dc_size(B_N_CHILD(p_s_tb->FR[0], n_child_position)),
++ dc_size(B_N_CHILD(tb->FR[0], n_child_position)),
+ "PAP-8300: invalid child size of right neighbor (%d != %d - %d)",
+ B_FREE_SPACE(bh), MAX_CHILD_SIZE(bh),
+- dc_size(B_N_CHILD(p_s_tb->FR[0], n_child_position)));
++ dc_size(B_N_CHILD(tb->FR[0], n_child_position)));
+
+ }
+ return CARRY_ON;
+@@ -2139,7 +2145,7 @@ static int clear_all_dirty_bits(struct s
+ return reiserfs_prepare_for_journal(s, bh, 0);
+ }
+
+-static int wait_tb_buffers_until_unlocked(struct tree_balance *p_s_tb)
++static int wait_tb_buffers_until_unlocked(struct tree_balance *tb)
+ {
+ struct buffer_head *locked;
+ #ifdef CONFIG_REISERFS_CHECK
+@@ -2151,95 +2157,94 @@ static int wait_tb_buffers_until_unlocke
+
+ locked = NULL;
+
+- for (i = p_s_tb->tb_path->path_length;
++ for (i = tb->tb_path->path_length;
+ !locked && i > ILLEGAL_PATH_ELEMENT_OFFSET; i--) {
+- if (PATH_OFFSET_PBUFFER(p_s_tb->tb_path, i)) {
++ if (PATH_OFFSET_PBUFFER(tb->tb_path, i)) {
+ /* if I understand correctly, we can only be sure the last buffer
+ ** in the path is in the tree --clm
+ */
+ #ifdef CONFIG_REISERFS_CHECK
+- if (PATH_PLAST_BUFFER(p_s_tb->tb_path) ==
+- PATH_OFFSET_PBUFFER(p_s_tb->tb_path, i)) {
+- tb_buffer_sanity_check(p_s_tb->tb_sb,
++ if (PATH_PLAST_BUFFER(tb->tb_path) ==
++ PATH_OFFSET_PBUFFER(tb->tb_path, i))
++ tb_buffer_sanity_check(tb->tb_sb,
+ PATH_OFFSET_PBUFFER
+- (p_s_tb->tb_path,
++ (tb->tb_path,
+ i), "S",
+- p_s_tb->tb_path->
++ tb->tb_path->
+ path_length - i);
+- }
+ #endif
+- if (!clear_all_dirty_bits(p_s_tb->tb_sb,
++ if (!clear_all_dirty_bits(tb->tb_sb,
+ PATH_OFFSET_PBUFFER
+- (p_s_tb->tb_path,
++ (tb->tb_path,
+ i))) {
+ locked =
+- PATH_OFFSET_PBUFFER(p_s_tb->tb_path,
++ PATH_OFFSET_PBUFFER(tb->tb_path,
+ i);
+ }
+ }
+ }
+
+- for (i = 0; !locked && i < MAX_HEIGHT && p_s_tb->insert_size[i];
++ for (i = 0; !locked && i < MAX_HEIGHT && tb->insert_size[i];
+ i++) {
+
+- if (p_s_tb->lnum[i]) {
++ if (tb->lnum[i]) {
+
+- if (p_s_tb->L[i]) {
+- tb_buffer_sanity_check(p_s_tb->tb_sb,
+- p_s_tb->L[i],
++ if (tb->L[i]) {
++ tb_buffer_sanity_check(tb->tb_sb,
++ tb->L[i],
+ "L", i);
+ if (!clear_all_dirty_bits
+- (p_s_tb->tb_sb, p_s_tb->L[i]))
+- locked = p_s_tb->L[i];
++ (tb->tb_sb, tb->L[i]))
++ locked = tb->L[i];
+ }
+
+- if (!locked && p_s_tb->FL[i]) {
+- tb_buffer_sanity_check(p_s_tb->tb_sb,
+- p_s_tb->FL[i],
++ if (!locked && tb->FL[i]) {
++ tb_buffer_sanity_check(tb->tb_sb,
++ tb->FL[i],
+ "FL", i);
+ if (!clear_all_dirty_bits
+- (p_s_tb->tb_sb, p_s_tb->FL[i]))
+- locked = p_s_tb->FL[i];
++ (tb->tb_sb, tb->FL[i]))
++ locked = tb->FL[i];
+ }
+
+- if (!locked && p_s_tb->CFL[i]) {
+- tb_buffer_sanity_check(p_s_tb->tb_sb,
+- p_s_tb->CFL[i],
++ if (!locked && tb->CFL[i]) {
++ tb_buffer_sanity_check(tb->tb_sb,
++ tb->CFL[i],
+ "CFL", i);
+ if (!clear_all_dirty_bits
+- (p_s_tb->tb_sb, p_s_tb->CFL[i]))
+- locked = p_s_tb->CFL[i];
++ (tb->tb_sb, tb->CFL[i]))
++ locked = tb->CFL[i];
+ }
+
+ }
+
+- if (!locked && (p_s_tb->rnum[i])) {
++ if (!locked && (tb->rnum[i])) {
+
+- if (p_s_tb->R[i]) {
+- tb_buffer_sanity_check(p_s_tb->tb_sb,
+- p_s_tb->R[i],
++ if (tb->R[i]) {
++ tb_buffer_sanity_check(tb->tb_sb,
++ tb->R[i],
+ "R", i);
+ if (!clear_all_dirty_bits
+- (p_s_tb->tb_sb, p_s_tb->R[i]))
+- locked = p_s_tb->R[i];
++ (tb->tb_sb, tb->R[i]))
++ locked = tb->R[i];
+ }
+
+- if (!locked && p_s_tb->FR[i]) {
+- tb_buffer_sanity_check(p_s_tb->tb_sb,
+- p_s_tb->FR[i],
++ if (!locked && tb->FR[i]) {
++ tb_buffer_sanity_check(tb->tb_sb,
++ tb->FR[i],
+ "FR", i);
+ if (!clear_all_dirty_bits
+- (p_s_tb->tb_sb, p_s_tb->FR[i]))
+- locked = p_s_tb->FR[i];
++ (tb->tb_sb, tb->FR[i]))
++ locked = tb->FR[i];
+ }
+
+- if (!locked && p_s_tb->CFR[i]) {
+- tb_buffer_sanity_check(p_s_tb->tb_sb,
+- p_s_tb->CFR[i],
++ if (!locked && tb->CFR[i]) {
++ tb_buffer_sanity_check(tb->tb_sb,
++ tb->CFR[i],
+ "CFR", i);
+ if (!clear_all_dirty_bits
+- (p_s_tb->tb_sb, p_s_tb->CFR[i]))
+- locked = p_s_tb->CFR[i];
++ (tb->tb_sb, tb->CFR[i]))
++ locked = tb->CFR[i];
+ }
+ }
+ }
+@@ -2252,10 +2257,10 @@ static int wait_tb_buffers_until_unlocke
+ ** --clm
+ */
+ for (i = 0; !locked && i < MAX_FEB_SIZE; i++) {
+- if (p_s_tb->FEB[i]) {
++ if (tb->FEB[i]) {
+ if (!clear_all_dirty_bits
+- (p_s_tb->tb_sb, p_s_tb->FEB[i]))
+- locked = p_s_tb->FEB[i];
++ (tb->tb_sb, tb->FEB[i]))
++ locked = tb->FEB[i];
+ }
+ }
+
+@@ -2263,21 +2268,20 @@ static int wait_tb_buffers_until_unlocke
+ #ifdef CONFIG_REISERFS_CHECK
+ repeat_counter++;
+ if ((repeat_counter % 10000) == 0) {
+- reiserfs_warning(p_s_tb->tb_sb, "reiserfs-8200",
++ reiserfs_warning(tb->tb_sb, "reiserfs-8200",
+ "too many iterations waiting "
+ "for buffer to unlock "
+ "(%b)", locked);
+
+ /* Don't loop forever. Try to recover from possible error. */
+
+- return (FILESYSTEM_CHANGED_TB(p_s_tb)) ?
++ return (FILESYSTEM_CHANGED_TB(tb)) ?
+ REPEAT_SEARCH : CARRY_ON;
+ }
+ #endif
+ __wait_on_buffer(locked);
+- if (FILESYSTEM_CHANGED_TB(p_s_tb)) {
++ if (FILESYSTEM_CHANGED_TB(tb))
+ return REPEAT_SEARCH;
+- }
+ }
+
+ } while (locked);
+@@ -2307,138 +2311,136 @@ static int wait_tb_buffers_until_unlocke
+ * tb tree_balance structure;
+ * inum item number in S[h];
+ * pos_in_item - comment this if you can
+- * ins_ih & ins_sd are used when inserting
++ * ins_ih item head of item being inserted
++ * data inserted item or data to be pasted
+ * Returns: 1 - schedule occurred while the function worked;
+ * 0 - schedule didn't occur while the function worked;
+ * -1 - if no_disk_space
+ */
+
+-int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, struct item_head *p_s_ins_ih, // item head of item being inserted
+- const void *data // inserted item or data to be pasted
+- )
++int fix_nodes(int n_op_mode, struct tree_balance *tb,
++ struct item_head *p_s_ins_ih, const void *data)
+ {
+- int n_ret_value, n_h, n_item_num = PATH_LAST_POSITION(p_s_tb->tb_path);
++ int n_ret_value, n_h, n_item_num = PATH_LAST_POSITION(tb->tb_path);
+ int n_pos_in_item;
+
+ /* we set wait_tb_buffers_run when we have to restore any dirty bits cleared
+ ** during wait_tb_buffers_run
+ */
+ int wait_tb_buffers_run = 0;
+- struct buffer_head *p_s_tbS0 = PATH_PLAST_BUFFER(p_s_tb->tb_path);
++ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+
+- ++REISERFS_SB(p_s_tb->tb_sb)->s_fix_nodes;
++ ++REISERFS_SB(tb->tb_sb)->s_fix_nodes;
+
+- n_pos_in_item = p_s_tb->tb_path->pos_in_item;
++ n_pos_in_item = tb->tb_path->pos_in_item;
+
+- p_s_tb->fs_gen = get_generation(p_s_tb->tb_sb);
++ tb->fs_gen = get_generation(tb->tb_sb);
+
+ /* we prepare and log the super here so it will already be in the
+ ** transaction when do_balance needs to change it.
+ ** This way do_balance won't have to schedule when trying to prepare
+ ** the super for logging
+ */
+- reiserfs_prepare_for_journal(p_s_tb->tb_sb,
+- SB_BUFFER_WITH_SB(p_s_tb->tb_sb), 1);
+- journal_mark_dirty(p_s_tb->transaction_handle, p_s_tb->tb_sb,
+- SB_BUFFER_WITH_SB(p_s_tb->tb_sb));
+- if (FILESYSTEM_CHANGED_TB(p_s_tb))
++ reiserfs_prepare_for_journal(tb->tb_sb,
++ SB_BUFFER_WITH_SB(tb->tb_sb), 1);
++ journal_mark_dirty(tb->transaction_handle, tb->tb_sb,
++ SB_BUFFER_WITH_SB(tb->tb_sb));
++ if (FILESYSTEM_CHANGED_TB(tb))
+ return REPEAT_SEARCH;
+
+ /* if it possible in indirect_to_direct conversion */
+- if (buffer_locked(p_s_tbS0)) {
+- __wait_on_buffer(p_s_tbS0);
+- if (FILESYSTEM_CHANGED_TB(p_s_tb))
++ if (buffer_locked(tbS0)) {
++ __wait_on_buffer(tbS0);
++ if (FILESYSTEM_CHANGED_TB(tb))
+ return REPEAT_SEARCH;
+ }
+ #ifdef CONFIG_REISERFS_CHECK
+ if (cur_tb) {
+ print_cur_tb("fix_nodes");
+- reiserfs_panic(p_s_tb->tb_sb, "PAP-8305",
++ reiserfs_panic(tb->tb_sb, "PAP-8305",
+ "there is pending do_balance");
+ }
+
+- if (!buffer_uptodate(p_s_tbS0) || !B_IS_IN_TREE(p_s_tbS0)) {
+- reiserfs_panic(p_s_tb->tb_sb, "PAP-8320", "S[0] (%b %z) is "
++ if (!buffer_uptodate(tbS0) || !B_IS_IN_TREE(tbS0))
++ reiserfs_panic(tb->tb_sb, "PAP-8320", "S[0] (%b %z) is "
+ "not uptodate at the beginning of fix_nodes "
+ "or not in tree (mode %c)",
+- p_s_tbS0, p_s_tbS0, n_op_mode);
+- }
++ tbS0, tbS0, n_op_mode);
+
+ /* Check parameters. */
+ switch (n_op_mode) {
+ case M_INSERT:
+- if (n_item_num <= 0 || n_item_num > B_NR_ITEMS(p_s_tbS0))
+- reiserfs_panic(p_s_tb->tb_sb, "PAP-8330", "Incorrect "
++ if (n_item_num <= 0 || n_item_num > B_NR_ITEMS(tbS0))
++ reiserfs_panic(tb->tb_sb, "PAP-8330", "Incorrect "
+ "item number %d (in S0 - %d) in case "
+ "of insert", n_item_num,
+- B_NR_ITEMS(p_s_tbS0));
++ B_NR_ITEMS(tbS0));
+ break;
+ case M_PASTE:
+ case M_DELETE:
+ case M_CUT:
+- if (n_item_num < 0 || n_item_num >= B_NR_ITEMS(p_s_tbS0)) {
+- print_block(p_s_tbS0, 0, -1, -1);
+- reiserfs_panic(p_s_tb->tb_sb, "PAP-8335", "Incorrect "
++ if (n_item_num < 0 || n_item_num >= B_NR_ITEMS(tbS0)) {
++ print_block(tbS0, 0, -1, -1);
++ reiserfs_panic(tb->tb_sb, "PAP-8335", "Incorrect "
+ "item number(%d); mode = %c "
+ "insert_size = %d",
+ n_item_num, n_op_mode,
+- p_s_tb->insert_size[0]);
++ tb->insert_size[0]);
+ }
+ break;
+ default:
+- reiserfs_panic(p_s_tb->tb_sb, "PAP-8340", "Incorrect mode "
++ reiserfs_panic(tb->tb_sb, "PAP-8340", "Incorrect mode "
+ "of operation");
+ }
+ #endif
+
+- if (get_mem_for_virtual_node(p_s_tb) == REPEAT_SEARCH)
++ if (get_mem_for_virtual_node(tb) == REPEAT_SEARCH)
+ // FIXME: maybe -ENOMEM when tb->vn_buf == 0? Now just repeat
+ return REPEAT_SEARCH;
+
+ /* Starting from the leaf level; for all levels n_h of the tree. */
+- for (n_h = 0; n_h < MAX_HEIGHT && p_s_tb->insert_size[n_h]; n_h++) {
+- if ((n_ret_value = get_direct_parent(p_s_tb, n_h)) != CARRY_ON) {
++ for (n_h = 0; n_h < MAX_HEIGHT && tb->insert_size[n_h]; n_h++) {
++ n_ret_value = get_direct_parent(tb, n_h);
++ if (n_ret_value != CARRY_ON)
+ goto repeat;
+- }
+
+- if ((n_ret_value =
+- check_balance(n_op_mode, p_s_tb, n_h, n_item_num,
+- n_pos_in_item, p_s_ins_ih,
+- data)) != CARRY_ON) {
++ n_ret_value = check_balance(n_op_mode, tb, n_h, n_item_num,
++ n_pos_in_item, p_s_ins_ih, data);
++ if (n_ret_value != CARRY_ON) {
+ if (n_ret_value == NO_BALANCING_NEEDED) {
+ /* No balancing for higher levels needed. */
+- if ((n_ret_value =
+- get_neighbors(p_s_tb, n_h)) != CARRY_ON) {
++ n_ret_value = get_neighbors(tb, n_h);
++ if (n_ret_value != CARRY_ON)
+ goto repeat;
+- }
+ if (n_h != MAX_HEIGHT - 1)
+- p_s_tb->insert_size[n_h + 1] = 0;
++ tb->insert_size[n_h + 1] = 0;
+ /* ok, analysis and resource gathering are complete */
+ break;
+ }
+ goto repeat;
+ }
+
+- if ((n_ret_value = get_neighbors(p_s_tb, n_h)) != CARRY_ON) {
++ n_ret_value = get_neighbors(tb, n_h);
++ if (n_ret_value != CARRY_ON)
+ goto repeat;
+- }
+
+- if ((n_ret_value = get_empty_nodes(p_s_tb, n_h)) != CARRY_ON) {
+- goto repeat; /* No disk space, or schedule occurred and
+- analysis may be invalid and needs to be redone. */
+- }
++ /* No disk space, or schedule occurred and analysis may be
++ * invalid and needs to be redone. */
++ n_ret_value = get_empty_nodes(tb, n_h);
++ if (n_ret_value != CARRY_ON)
++ goto repeat;
+
+- if (!PATH_H_PBUFFER(p_s_tb->tb_path, n_h)) {
++ if (!PATH_H_PBUFFER(tb->tb_path, n_h)) {
+ /* We have a positive insert size but no nodes exist on this
+ level, this means that we are creating a new root. */
+
+- RFALSE(p_s_tb->blknum[n_h] != 1,
++ RFALSE(tb->blknum[n_h] != 1,
+ "PAP-8350: creating new empty root");
+
+ if (n_h < MAX_HEIGHT - 1)
+- p_s_tb->insert_size[n_h + 1] = 0;
+- } else if (!PATH_H_PBUFFER(p_s_tb->tb_path, n_h + 1)) {
+- if (p_s_tb->blknum[n_h] > 1) {
++ tb->insert_size[n_h + 1] = 0;
++ } else if (!PATH_H_PBUFFER(tb->tb_path, n_h + 1)) {
++ if (tb->blknum[n_h] > 1) {
+ /* The tree needs to be grown, so this node S[n_h]
+ which is the root node is split into two nodes,
+ and a new node (S[n_h+1]) will be created to
+@@ -2447,19 +2449,20 @@ int fix_nodes(int n_op_mode, struct tree
+ RFALSE(n_h == MAX_HEIGHT - 1,
+ "PAP-8355: attempt to create too high of a tree");
+
+- p_s_tb->insert_size[n_h + 1] =
++ tb->insert_size[n_h + 1] =
+ (DC_SIZE +
+- KEY_SIZE) * (p_s_tb->blknum[n_h] - 1) +
++ KEY_SIZE) * (tb->blknum[n_h] - 1) +
+ DC_SIZE;
+ } else if (n_h < MAX_HEIGHT - 1)
+- p_s_tb->insert_size[n_h + 1] = 0;
++ tb->insert_size[n_h + 1] = 0;
+ } else
+- p_s_tb->insert_size[n_h + 1] =
+- (DC_SIZE + KEY_SIZE) * (p_s_tb->blknum[n_h] - 1);
++ tb->insert_size[n_h + 1] =
++ (DC_SIZE + KEY_SIZE) * (tb->blknum[n_h] - 1);
+ }
+
+- if ((n_ret_value = wait_tb_buffers_until_unlocked(p_s_tb)) == CARRY_ON) {
+- if (FILESYSTEM_CHANGED_TB(p_s_tb)) {
++ n_ret_value = wait_tb_buffers_until_unlocked(tb);
++ if (n_ret_value == CARRY_ON) {
++ if (FILESYSTEM_CHANGED_TB(tb)) {
+ wait_tb_buffers_run = 1;
+ n_ret_value = REPEAT_SEARCH;
+ goto repeat;
+@@ -2482,50 +2485,49 @@ int fix_nodes(int n_op_mode, struct tree
+
+ /* Release path buffers. */
+ if (wait_tb_buffers_run) {
+- pathrelse_and_restore(p_s_tb->tb_sb, p_s_tb->tb_path);
++ pathrelse_and_restore(tb->tb_sb, tb->tb_path);
+ } else {
+- pathrelse(p_s_tb->tb_path);
++ pathrelse(tb->tb_path);
+ }
+ /* brelse all resources collected for balancing */
+ for (i = 0; i < MAX_HEIGHT; i++) {
+ if (wait_tb_buffers_run) {
+- reiserfs_restore_prepared_buffer(p_s_tb->tb_sb,
+- p_s_tb->L[i]);
+- reiserfs_restore_prepared_buffer(p_s_tb->tb_sb,
+- p_s_tb->R[i]);
+- reiserfs_restore_prepared_buffer(p_s_tb->tb_sb,
+- p_s_tb->FL[i]);
+- reiserfs_restore_prepared_buffer(p_s_tb->tb_sb,
+- p_s_tb->FR[i]);
+- reiserfs_restore_prepared_buffer(p_s_tb->tb_sb,
+- p_s_tb->
++ reiserfs_restore_prepared_buffer(tb->tb_sb,
++ tb->L[i]);
++ reiserfs_restore_prepared_buffer(tb->tb_sb,
++ tb->R[i]);
++ reiserfs_restore_prepared_buffer(tb->tb_sb,
++ tb->FL[i]);
++ reiserfs_restore_prepared_buffer(tb->tb_sb,
++ tb->FR[i]);
++ reiserfs_restore_prepared_buffer(tb->tb_sb,
++ tb->
+ CFL[i]);
+- reiserfs_restore_prepared_buffer(p_s_tb->tb_sb,
+- p_s_tb->
++ reiserfs_restore_prepared_buffer(tb->tb_sb,
++ tb->
+ CFR[i]);
+ }
+
+- brelse(p_s_tb->L[i]);
+- brelse(p_s_tb->R[i]);
+- brelse(p_s_tb->FL[i]);
+- brelse(p_s_tb->FR[i]);
+- brelse(p_s_tb->CFL[i]);
+- brelse(p_s_tb->CFR[i]);
+-
+- p_s_tb->L[i] = NULL;
+- p_s_tb->R[i] = NULL;
+- p_s_tb->FL[i] = NULL;
+- p_s_tb->FR[i] = NULL;
+- p_s_tb->CFL[i] = NULL;
+- p_s_tb->CFR[i] = NULL;
++ brelse(tb->L[i]);
++ brelse(tb->R[i]);
++ brelse(tb->FL[i]);
++ brelse(tb->FR[i]);
++ brelse(tb->CFL[i]);
++ brelse(tb->CFR[i]);
++
++ tb->L[i] = NULL;
++ tb->R[i] = NULL;
++ tb->FL[i] = NULL;
++ tb->FR[i] = NULL;
++ tb->CFL[i] = NULL;
++ tb->CFR[i] = NULL;
+ }
+
+ if (wait_tb_buffers_run) {
+ for (i = 0; i < MAX_FEB_SIZE; i++) {
+- if (p_s_tb->FEB[i]) {
++ if (tb->FEB[i])
+ reiserfs_restore_prepared_buffer
+- (p_s_tb->tb_sb, p_s_tb->FEB[i]);
+- }
++ (tb->tb_sb, tb->FEB[i]);
+ }
+ }
+ return n_ret_value;
+@@ -2533,7 +2535,7 @@ int fix_nodes(int n_op_mode, struct tree
+
+ }
+
+-/* Anatoly will probably forgive me renaming p_s_tb to tb. I just
++/* Anatoly will probably forgive me renaming tb to tb. I just
+ wanted to make lines shorter */
+ void unfix_nodes(struct tree_balance *tb)
+ {
+--- a/fs/reiserfs/stree.c
++++ b/fs/reiserfs/stree.c
+@@ -1063,17 +1063,17 @@ static char prepare_for_delete_or_cut(st
+ }
+
+ /* Calculate number of bytes which will be deleted or cut during balance */
+-static int calc_deleted_bytes_number(struct tree_balance *p_s_tb, char c_mode)
++static int calc_deleted_bytes_number(struct tree_balance *tb, char c_mode)
+ {
+ int n_del_size;
+- struct item_head *p_le_ih = PATH_PITEM_HEAD(p_s_tb->tb_path);
++ struct item_head *p_le_ih = PATH_PITEM_HEAD(tb->tb_path);
+
+ if (is_statdata_le_ih(p_le_ih))
+ return 0;
+
+ n_del_size =
+ (c_mode ==
+- M_DELETE) ? ih_item_len(p_le_ih) : -p_s_tb->insert_size[0];
++ M_DELETE) ? ih_item_len(p_le_ih) : -tb->insert_size[0];
+ if (is_direntry_le_ih(p_le_ih)) {
+ // return EMPTY_DIR_SIZE; /* We delete emty directoris only. */
+ // we can't use EMPTY_DIR_SIZE, as old format dirs have a different
+@@ -1083,25 +1083,26 @@ static int calc_deleted_bytes_number(str
+ }
+
+ if (is_indirect_le_ih(p_le_ih))
+- n_del_size = (n_del_size / UNFM_P_SIZE) * (PATH_PLAST_BUFFER(p_s_tb->tb_path)->b_size); // - get_ih_free_space (p_le_ih);
++ n_del_size = (n_del_size / UNFM_P_SIZE) *
++ (PATH_PLAST_BUFFER(tb->tb_path)->b_size);
+ return n_del_size;
+ }
+
+ static void init_tb_struct(struct reiserfs_transaction_handle *th,
+- struct tree_balance *p_s_tb,
++ struct tree_balance *tb,
+ struct super_block *sb,
+ struct treepath *p_s_path, int n_size)
+ {
+
+ BUG_ON(!th->t_trans_id);
+
+- memset(p_s_tb, '\0', sizeof(struct tree_balance));
+- p_s_tb->transaction_handle = th;
+- p_s_tb->tb_sb = sb;
+- p_s_tb->tb_path = p_s_path;
++ memset(tb, '\0', sizeof(struct tree_balance));
++ tb->transaction_handle = th;
++ tb->tb_sb = sb;
++ tb->tb_path = p_s_path;
+ PATH_OFFSET_PBUFFER(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = NULL;
+ PATH_OFFSET_POSITION(p_s_path, ILLEGAL_PATH_ELEMENT_OFFSET) = 0;
+- p_s_tb->insert_size[0] = n_size;
++ tb->insert_size[0] = n_size;
+ }
+
+ void padd_item(char *item, int total_length, int length)
+--- a/include/linux/reiserfs_fs.h
++++ b/include/linux/reiserfs_fs.h
+@@ -2004,7 +2004,7 @@ extern const struct address_space_operat
+
+ /* fix_nodes.c */
+
+-int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb,
++int fix_nodes(int n_op_mode, struct tree_balance *tb,
+ struct item_head *p_s_ins_ih, const void *);
+ void unfix_nodes(struct tree_balance *);
+
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: add atomic addition of selinux attributes during inode creation
+
+ Some time ago, some changes were made to make security inode attributes
+ be atomically written during inode creation. ReiserFS fell behind in this
+ area, but with the reworking of the xattr code, it's now fairly easy to add.
+
+ The following patch adds the ability for security attributes to be added
+ automatically during inode creation.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+--
+ fs/reiserfs/inode.c | 16 +++++++++++-
+ fs/reiserfs/namei.c | 37 +++++++++++++++++++++++++---
+ fs/reiserfs/xattr_security.c | 54 +++++++++++++++++++++++++++++++++++++++++
+ include/linux/reiserfs_fs.h | 4 ++-
+ include/linux/reiserfs_xattr.h | 32 ++++++++++++++++++++++++
+ 5 files changed, 137 insertions(+), 6 deletions(-)
+
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -1756,7 +1756,8 @@ int reiserfs_new_inode(struct reiserfs_t
+ /* 0 for regular, EMTRY_DIR_SIZE for dirs,
+ strlen (symname) for symlinks) */
+ loff_t i_size, struct dentry *dentry,
+- struct inode *inode)
++ struct inode *inode,
++ struct reiserfs_security_handle *security)
+ {
+ struct super_block *sb;
+ INITIALIZE_PATH(path_to_key);
+@@ -1934,6 +1935,19 @@ int reiserfs_new_inode(struct reiserfs_t
+ } else if (IS_PRIVATE(dir))
+ inode->i_flags |= S_PRIVATE;
+
++ if (security->name) {
++ retval = reiserfs_security_write(th, inode, security);
++ if (retval) {
++ err = retval;
++ reiserfs_check_path(&path_to_key);
++ retval = journal_end(th, th->t_super,
++ th->t_blocks_allocated);
++ if (retval)
++ err = retval;
++ goto out_inserted_sd;
++ }
++ }
++
+ insert_inode_hash(inode);
+ reiserfs_update_sd(th, inode);
+ reiserfs_check_path(&path_to_key);
+--- a/fs/reiserfs/namei.c
++++ b/fs/reiserfs/namei.c
+@@ -607,6 +607,7 @@ static int reiserfs_create(struct inode
+ 2 * (REISERFS_QUOTA_INIT_BLOCKS(dir->i_sb) +
+ REISERFS_QUOTA_TRANS_BLOCKS(dir->i_sb));
+ struct reiserfs_transaction_handle th;
++ struct reiserfs_security_handle security;
+
+ if (!(inode = new_inode(dir->i_sb))) {
+ return -ENOMEM;
+@@ -614,6 +615,12 @@ static int reiserfs_create(struct inode
+ new_inode_init(inode, dir, mode);
+
+ jbegin_count += reiserfs_cache_default_acl(dir);
++ retval = reiserfs_security_init(dir, inode, &security);
++ if (retval < 0) {
++ drop_new_inode(inode);
++ return retval;
++ }
++ jbegin_count += retval;
+ reiserfs_write_lock(dir->i_sb);
+
+ retval = journal_begin(&th, dir->i_sb, jbegin_count);
+@@ -624,7 +631,7 @@ static int reiserfs_create(struct inode
+
+ retval =
+ reiserfs_new_inode(&th, dir, mode, NULL, 0 /*i_size */ , dentry,
+- inode);
++ inode, &security);
+ if (retval)
+ goto out_failed;
+
+@@ -662,6 +669,7 @@ static int reiserfs_mknod(struct inode *
+ int retval;
+ struct inode *inode;
+ struct reiserfs_transaction_handle th;
++ struct reiserfs_security_handle security;
+ /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
+ int jbegin_count =
+ JOURNAL_PER_BALANCE_CNT * 3 +
+@@ -677,6 +685,12 @@ static int reiserfs_mknod(struct inode *
+ new_inode_init(inode, dir, mode);
+
+ jbegin_count += reiserfs_cache_default_acl(dir);
++ retval = reiserfs_security_init(dir, inode, &security);
++ if (retval < 0) {
++ drop_new_inode(inode);
++ return retval;
++ }
++ jbegin_count += retval;
+ reiserfs_write_lock(dir->i_sb);
+
+ retval = journal_begin(&th, dir->i_sb, jbegin_count);
+@@ -687,7 +701,7 @@ static int reiserfs_mknod(struct inode *
+
+ retval =
+ reiserfs_new_inode(&th, dir, mode, NULL, 0 /*i_size */ , dentry,
+- inode);
++ inode, &security);
+ if (retval) {
+ goto out_failed;
+ }
+@@ -728,6 +742,7 @@ static int reiserfs_mkdir(struct inode *
+ int retval;
+ struct inode *inode;
+ struct reiserfs_transaction_handle th;
++ struct reiserfs_security_handle security;
+ /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
+ int jbegin_count =
+ JOURNAL_PER_BALANCE_CNT * 3 +
+@@ -745,6 +760,12 @@ static int reiserfs_mkdir(struct inode *
+ new_inode_init(inode, dir, mode);
+
+ jbegin_count += reiserfs_cache_default_acl(dir);
++ retval = reiserfs_security_init(dir, inode, &security);
++ if (retval < 0) {
++ drop_new_inode(inode);
++ return retval;
++ }
++ jbegin_count += retval;
+ reiserfs_write_lock(dir->i_sb);
+
+ retval = journal_begin(&th, dir->i_sb, jbegin_count);
+@@ -761,7 +782,7 @@ static int reiserfs_mkdir(struct inode *
+ retval = reiserfs_new_inode(&th, dir, mode, NULL /*symlink */ ,
+ old_format_only(dir->i_sb) ?
+ EMPTY_DIR_SIZE_V1 : EMPTY_DIR_SIZE,
+- dentry, inode);
++ dentry, inode, &security);
+ if (retval) {
+ dir->i_nlink--;
+ goto out_failed;
+@@ -1002,6 +1023,7 @@ static int reiserfs_symlink(struct inode
+ char *name;
+ int item_len;
+ struct reiserfs_transaction_handle th;
++ struct reiserfs_security_handle security;
+ int mode = S_IFLNK | S_IRWXUGO;
+ /* We need blocks for transaction + (user+group)*(quotas for new inode + update of quota for directory owner) */
+ int jbegin_count =
+@@ -1014,6 +1036,13 @@ static int reiserfs_symlink(struct inode
+ }
+ new_inode_init(inode, parent_dir, mode);
+
++ retval = reiserfs_security_init(parent_dir, inode, &security);
++ if (retval < 0) {
++ drop_new_inode(inode);
++ return retval;
++ }
++ jbegin_count += retval;
++
+ reiserfs_write_lock(parent_dir->i_sb);
+ item_len = ROUND_UP(strlen(symname));
+ if (item_len > MAX_DIRECT_ITEM_LEN(parent_dir->i_sb->s_blocksize)) {
+@@ -1040,7 +1069,7 @@ static int reiserfs_symlink(struct inode
+
+ retval =
+ reiserfs_new_inode(&th, parent_dir, mode, name, strlen(symname),
+- dentry, inode);
++ dentry, inode, &security);
+ kfree(name);
+ if (retval) { /* reiserfs_new_inode iputs for us */
+ goto out_failed;
+--- a/fs/reiserfs/xattr_security.c
++++ b/fs/reiserfs/xattr_security.c
+@@ -4,6 +4,7 @@
+ #include <linux/pagemap.h>
+ #include <linux/xattr.h>
+ #include <linux/reiserfs_xattr.h>
++#include <linux/security.h>
+ #include <asm/uaccess.h>
+
+ static int
+@@ -47,6 +48,59 @@ static size_t security_list(struct inode
+ return len;
+ }
+
++/* Initializes the security context for a new inode and returns the number
++ * of blocks needed for the transaction. If successful, reiserfs_security
++ * must be released using reiserfs_security_free when the caller is done. */
++int reiserfs_security_init(struct inode *dir, struct inode *inode,
++ struct reiserfs_security_handle *sec)
++{
++ int blocks = 0;
++ int error = security_inode_init_security(inode, dir, &sec->name,
++ &sec->value, &sec->length);
++ if (error) {
++ if (error == -EOPNOTSUPP)
++ error = 0;
++
++ sec->name = NULL;
++ sec->value = NULL;
++ sec->length = 0;
++ return error;
++ }
++
++ if (sec->length) {
++ blocks = reiserfs_xattr_jcreate_nblocks(inode) +
++ reiserfs_xattr_nblocks(inode, sec->length);
++ /* We don't want to count the directories twice if we have
++ * a default ACL. */
++ REISERFS_I(inode)->i_flags |= i_has_xattr_dir;
++ }
++ return blocks;
++}
++
++int reiserfs_security_write(struct reiserfs_transaction_handle *th,
++ struct inode *inode,
++ struct reiserfs_security_handle *sec)
++{
++ int error;
++ if (strlen(sec->name) < sizeof(XATTR_SECURITY_PREFIX))
++ return -EINVAL;
++
++ error = reiserfs_xattr_set_handle(th, inode, sec->name, sec->value,
++ sec->length, XATTR_CREATE);
++ if (error == -ENODATA || error == -EOPNOTSUPP)
++ error = 0;
++
++ return error;
++}
++
++void reiserfs_security_free(struct reiserfs_security_handle *sec)
++{
++ kfree(sec->name);
++ kfree(sec->value);
++ sec->name = NULL;
++ sec->value = NULL;
++}
++
+ struct xattr_handler reiserfs_xattr_security_handler = {
+ .prefix = XATTR_SECURITY_PREFIX,
+ .get = security_get,
+--- a/include/linux/reiserfs_fs.h
++++ b/include/linux/reiserfs_fs.h
+@@ -1915,10 +1915,12 @@ void make_le_item_head(struct item_head
+ loff_t offset, int type, int length, int entry_count);
+ struct inode *reiserfs_iget(struct super_block *s, const struct cpu_key *key);
+
++struct reiserfs_security_handle;
+ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
+ struct inode *dir, int mode,
+ const char *symname, loff_t i_size,
+- struct dentry *dentry, struct inode *inode);
++ struct dentry *dentry, struct inode *inode,
++ struct reiserfs_security_handle *security);
+
+ void reiserfs_update_sd_size(struct reiserfs_transaction_handle *th,
+ struct inode *inode, loff_t size);
+--- a/include/linux/reiserfs_xattr.h
++++ b/include/linux/reiserfs_xattr.h
+@@ -15,6 +15,12 @@ struct reiserfs_xattr_header {
+ __le32 h_hash; /* hash of the value */
+ };
+
++struct reiserfs_security_handle {
++ char *name;
++ void *value;
++ size_t length;
++};
++
+ #ifdef __KERNEL__
+
+ #include <linux/init.h>
+@@ -54,6 +60,14 @@ int reiserfs_xattr_set_handle(struct rei
+ extern struct xattr_handler reiserfs_xattr_user_handler;
+ extern struct xattr_handler reiserfs_xattr_trusted_handler;
+ extern struct xattr_handler reiserfs_xattr_security_handler;
++#ifdef CONFIG_REISERFS_FS_SECURITY
++int reiserfs_security_init(struct inode *dir, struct inode *inode,
++ struct reiserfs_security_handle *sec);
++int reiserfs_security_write(struct reiserfs_transaction_handle *th,
++ struct inode *inode,
++ struct reiserfs_security_handle *sec);
++void reiserfs_security_free(struct reiserfs_security_handle *sec);
++#endif
+
+ #define xattr_size(size) ((size) + sizeof(struct reiserfs_xattr_header))
+ static inline loff_t reiserfs_xattr_nblocks(struct inode *inode, loff_t size)
+@@ -109,6 +123,24 @@ static inline void reiserfs_init_xattr_r
+ }
+ #endif /* CONFIG_REISERFS_FS_XATTR */
+
++#ifndef CONFIG_REISERFS_FS_SECURITY
++static inline int reiserfs_security_init(struct inode *dir,
++ struct inode *inode,
++ struct reiserfs_security_handle *sec)
++{
++ return 0;
++}
++static inline int
++reiserfs_security_write(struct reiserfs_transaction_handle *th,
++ struct inode *inode,
++ struct reiserfs_security_handle *sec)
++{
++ return 0;
++}
++static inline void reiserfs_security_free(struct reiserfs_security_handle *sec)
++{}
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _LINUX_REISERFS_XATTR_H */
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: [PATCH 31/40] reiserfs: factor out buffer_info initialization
+
+ This is the first in a series of patches to make balance_leaf() not quite
+ so insane.
+
+ This patch factors out the open coded initializations of buffer_info
+ structures and defines a few initializers for the 4 cases they're used.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+ fs/reiserfs/do_balan.c | 175 ++++++++++++++++---------------------------------
+ 1 file changed, 60 insertions(+), 115 deletions(-)
+
+--- a/fs/reiserfs/do_balan.c
++++ b/fs/reiserfs/do_balan.c
+@@ -29,6 +29,43 @@ struct tree_balance *cur_tb = NULL; /* d
+ is interrupting do_balance */
+ #endif
+
++static inline void buffer_info_init_left(struct tree_balance *tb,
++ struct buffer_info *bi)
++{
++ bi->tb = tb;
++ bi->bi_bh = tb->L[0];
++ bi->bi_parent = tb->FL[0];
++ bi->bi_position = get_left_neighbor_position(tb, 0);
++}
++
++static inline void buffer_info_init_right(struct tree_balance *tb,
++ struct buffer_info *bi)
++{
++ bi->tb = tb;
++ bi->bi_bh = tb->R[0];
++ bi->bi_parent = tb->FR[0];
++ bi->bi_position = get_right_neighbor_position(tb, 0);
++}
++
++static inline void buffer_info_init_tbS0(struct tree_balance *tb,
++ struct buffer_info *bi)
++{
++ bi->tb = tb;
++ bi->bi_bh = PATH_PLAST_BUFFER(tb->tb_path);
++ bi->bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
++ bi->bi_position = PATH_H_POSITION(tb->tb_path, 1);
++}
++
++static inline void buffer_info_init_bh(struct tree_balance *tb,
++ struct buffer_info *bi,
++ struct buffer_head *bh)
++{
++ bi->tb = tb;
++ bi->bi_bh = bh;
++ bi->bi_parent = NULL;
++ bi->bi_position = 0;
++}
++
+ inline void do_balance_mark_leaf_dirty(struct tree_balance *tb,
+ struct buffer_head *bh, int flag)
+ {
+@@ -86,6 +123,7 @@ static int balance_leaf_when_delete(stru
+ "PAP-12010: tree can not be empty");
+
+ ih = B_N_PITEM_HEAD(tbS0, item_pos);
++ buffer_info_init_tbS0(tb, &bi);
+
+ /* Delete or truncate the item */
+
+@@ -96,10 +134,6 @@ static int balance_leaf_when_delete(stru
+ "vs-12013: mode Delete, insert size %d, ih to be deleted %h",
+ -tb->insert_size[0], ih);
+
+- bi.tb = tb;
+- bi.bi_bh = tbS0;
+- bi.bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
+- bi.bi_position = PATH_H_POSITION(tb->tb_path, 1);
+ leaf_delete_items(&bi, 0, item_pos, 1, -1);
+
+ if (!item_pos && tb->CFL[0]) {
+@@ -121,10 +155,6 @@ static int balance_leaf_when_delete(stru
+ break;
+
+ case M_CUT:{ /* cut item in S[0] */
+- bi.tb = tb;
+- bi.bi_bh = tbS0;
+- bi.bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
+- bi.bi_position = PATH_H_POSITION(tb->tb_path, 1);
+ if (is_direntry_le_ih(ih)) {
+
+ /* UFS unlink semantics are such that you can only delete one directory entry at a time. */
+@@ -325,11 +355,7 @@ static int balance_leaf(struct tree_bala
+ ih_item_len(ih));
+
+ /* Insert new item into L[0] */
+- bi.tb = tb;
+- bi.bi_bh = tb->L[0];
+- bi.bi_parent = tb->FL[0];
+- bi.bi_position =
+- get_left_neighbor_position(tb, 0);
++ buffer_info_init_left(tb, &bi);
+ leaf_insert_into_buf(&bi,
+ n + item_pos -
+ ret_val, ih, body,
+@@ -369,11 +395,7 @@ static int balance_leaf(struct tree_bala
+ leaf_shift_left(tb, tb->lnum[0] - 1,
+ tb->lbytes);
+ /* Insert new item into L[0] */
+- bi.tb = tb;
+- bi.bi_bh = tb->L[0];
+- bi.bi_parent = tb->FL[0];
+- bi.bi_position =
+- get_left_neighbor_position(tb, 0);
++ buffer_info_init_left(tb, &bi);
+ leaf_insert_into_buf(&bi,
+ n + item_pos -
+ ret_val, ih, body,
+@@ -429,13 +451,7 @@ static int balance_leaf(struct tree_bala
+ }
+
+ /* Append given directory entry to directory item */
+- bi.tb = tb;
+- bi.bi_bh = tb->L[0];
+- bi.bi_parent =
+- tb->FL[0];
+- bi.bi_position =
+- get_left_neighbor_position
+- (tb, 0);
++ buffer_info_init_left(tb, &bi);
+ leaf_paste_in_buffer
+ (&bi,
+ n + item_pos -
+@@ -523,13 +539,7 @@ static int balance_leaf(struct tree_bala
+ (tbS0,
+ item_pos)));
+ /* Append to body of item in L[0] */
+- bi.tb = tb;
+- bi.bi_bh = tb->L[0];
+- bi.bi_parent =
+- tb->FL[0];
+- bi.bi_position =
+- get_left_neighbor_position
+- (tb, 0);
++ buffer_info_init_left(tb, &bi);
+ leaf_paste_in_buffer
+ (&bi,
+ n + item_pos -
+@@ -680,11 +690,7 @@ static int balance_leaf(struct tree_bala
+ leaf_shift_left(tb, tb->lnum[0],
+ tb->lbytes);
+ /* Append to body of item in L[0] */
+- bi.tb = tb;
+- bi.bi_bh = tb->L[0];
+- bi.bi_parent = tb->FL[0];
+- bi.bi_position =
+- get_left_neighbor_position(tb, 0);
++ buffer_info_init_left(tb, &bi);
+ leaf_paste_in_buffer(&bi,
+ n + item_pos -
+ ret_val,
+@@ -776,11 +782,7 @@ static int balance_leaf(struct tree_bala
+ set_le_ih_k_offset(ih, offset);
+ put_ih_item_len(ih, tb->rbytes);
+ /* Insert part of the item into R[0] */
+- bi.tb = tb;
+- bi.bi_bh = tb->R[0];
+- bi.bi_parent = tb->FR[0];
+- bi.bi_position =
+- get_right_neighbor_position(tb, 0);
++ buffer_info_init_right(tb, &bi);
+ if ((old_len - tb->rbytes) > zeros_num) {
+ r_zeros_number = 0;
+ r_body =
+@@ -817,11 +819,7 @@ static int balance_leaf(struct tree_bala
+ tb->rnum[0] - 1,
+ tb->rbytes);
+ /* Insert new item into R[0] */
+- bi.tb = tb;
+- bi.bi_bh = tb->R[0];
+- bi.bi_parent = tb->FR[0];
+- bi.bi_position =
+- get_right_neighbor_position(tb, 0);
++ buffer_info_init_right(tb, &bi);
+ leaf_insert_into_buf(&bi,
+ item_pos - n +
+ tb->rnum[0] - 1,
+@@ -881,13 +879,7 @@ static int balance_leaf(struct tree_bala
+ pos_in_item -
+ entry_count +
+ tb->rbytes - 1;
+- bi.tb = tb;
+- bi.bi_bh = tb->R[0];
+- bi.bi_parent =
+- tb->FR[0];
+- bi.bi_position =
+- get_right_neighbor_position
+- (tb, 0);
++ buffer_info_init_right(tb, &bi);
+ leaf_paste_in_buffer
+ (&bi, 0,
+ paste_entry_position,
+@@ -1018,12 +1010,7 @@ static int balance_leaf(struct tree_bala
+ (tb, tb->CFR[0], 0);
+
+ /* Append part of body into R[0] */
+- bi.tb = tb;
+- bi.bi_bh = tb->R[0];
+- bi.bi_parent = tb->FR[0];
+- bi.bi_position =
+- get_right_neighbor_position
+- (tb, 0);
++ buffer_info_init_right(tb, &bi);
+ if (n_rem > zeros_num) {
+ r_zeros_number = 0;
+ r_body =
+@@ -1070,12 +1057,7 @@ static int balance_leaf(struct tree_bala
+ tb->rbytes);
+ /* append item in R[0] */
+ if (pos_in_item >= 0) {
+- bi.tb = tb;
+- bi.bi_bh = tb->R[0];
+- bi.bi_parent = tb->FR[0];
+- bi.bi_position =
+- get_right_neighbor_position
+- (tb, 0);
++ buffer_info_init_right(tb, &bi);
+ leaf_paste_in_buffer(&bi,
+ item_pos -
+ n +
+@@ -1231,10 +1213,7 @@ static int balance_leaf(struct tree_bala
+ put_ih_item_len(ih, sbytes[i]);
+
+ /* Insert part of the item into S_new[i] before 0-th item */
+- bi.tb = tb;
+- bi.bi_bh = S_new[i];
+- bi.bi_parent = NULL;
+- bi.bi_position = 0;
++ buffer_info_init_bh(tb, &bi, S_new[i]);
+
+ if ((old_len - sbytes[i]) > zeros_num) {
+ r_zeros_number = 0;
+@@ -1266,10 +1245,7 @@ static int balance_leaf(struct tree_bala
+ S_new[i]);
+
+ /* Insert new item into S_new[i] */
+- bi.tb = tb;
+- bi.bi_bh = S_new[i];
+- bi.bi_parent = NULL;
+- bi.bi_position = 0;
++ buffer_info_init_bh(tb, &bi, S_new[i]);
+ leaf_insert_into_buf(&bi,
+ item_pos - n +
+ snum[i] - 1, ih,
+@@ -1326,10 +1302,7 @@ static int balance_leaf(struct tree_bala
+ sbytes[i] - 1,
+ S_new[i]);
+ /* Paste given directory entry to directory item */
+- bi.tb = tb;
+- bi.bi_bh = S_new[i];
+- bi.bi_parent = NULL;
+- bi.bi_position = 0;
++ buffer_info_init_bh(tb, &bi, S_new[i]);
+ leaf_paste_in_buffer
+ (&bi, 0,
+ pos_in_item -
+@@ -1399,11 +1372,7 @@ static int balance_leaf(struct tree_bala
+ if (n_rem < 0)
+ n_rem = 0;
+ /* Append part of body into S_new[0] */
+- bi.tb = tb;
+- bi.bi_bh = S_new[i];
+- bi.bi_parent = NULL;
+- bi.bi_position = 0;
+-
++ buffer_info_init_bh(tb, &bi, S_new[i]);
+ if (n_rem > zeros_num) {
+ r_zeros_number = 0;
+ r_body =
+@@ -1490,10 +1459,7 @@ static int balance_leaf(struct tree_bala
+ leaf_mi);
+
+ /* paste into item */
+- bi.tb = tb;
+- bi.bi_bh = S_new[i];
+- bi.bi_parent = NULL;
+- bi.bi_position = 0;
++ buffer_info_init_bh(tb, &bi, S_new[i]);
+ leaf_paste_in_buffer(&bi,
+ item_pos - n +
+ snum[i],
+@@ -1560,10 +1526,7 @@ static int balance_leaf(struct tree_bala
+
+ switch (flag) {
+ case M_INSERT: /* insert item into S[0] */
+- bi.tb = tb;
+- bi.bi_bh = tbS0;
+- bi.bi_parent = PATH_H_PPARENT(tb->tb_path, 0);
+- bi.bi_position = PATH_H_POSITION(tb->tb_path, 1);
++ buffer_info_init_tbS0(tb, &bi);
+ leaf_insert_into_buf(&bi, item_pos, ih, body,
+ zeros_num);
+
+@@ -1590,14 +1553,7 @@ static int balance_leaf(struct tree_bala
+ "PAP-12260: insert_size is 0 already");
+
+ /* prepare space */
+- bi.tb = tb;
+- bi.bi_bh = tbS0;
+- bi.bi_parent =
+- PATH_H_PPARENT(tb->tb_path,
+- 0);
+- bi.bi_position =
+- PATH_H_POSITION(tb->tb_path,
+- 1);
++ buffer_info_init_tbS0(tb, &bi);
+ leaf_paste_in_buffer(&bi,
+ item_pos,
+ pos_in_item,
+@@ -1645,14 +1601,7 @@ static int balance_leaf(struct tree_bala
+ RFALSE(tb->insert_size[0] <= 0,
+ "PAP-12275: insert size must not be %d",
+ tb->insert_size[0]);
+- bi.tb = tb;
+- bi.bi_bh = tbS0;
+- bi.bi_parent =
+- PATH_H_PPARENT(tb->tb_path,
+- 0);
+- bi.bi_position =
+- PATH_H_POSITION(tb->tb_path,
+- 1);
++ buffer_info_init_tbS0(tb, &bi);
+ leaf_paste_in_buffer(&bi,
+ item_pos,
+ pos_in_item,
+@@ -1725,7 +1674,6 @@ void make_empty_node(struct buffer_info
+ struct buffer_head *get_FEB(struct tree_balance *tb)
+ {
+ int i;
+- struct buffer_head *first_b;
+ struct buffer_info bi;
+
+ for (i = 0; i < MAX_FEB_SIZE; i++)
+@@ -1735,16 +1683,13 @@ struct buffer_head *get_FEB(struct tree_
+ if (i == MAX_FEB_SIZE)
+ reiserfs_panic(tb->tb_sb, "vs-12300", "FEB list is empty");
+
+- bi.tb = tb;
+- bi.bi_bh = first_b = tb->FEB[i];
+- bi.bi_parent = NULL;
+- bi.bi_position = 0;
++ buffer_info_init_bh(tb, &bi, tb->FEB[i]);
+ make_empty_node(&bi);
+- set_buffer_uptodate(first_b);
++ set_buffer_uptodate(tb->FEB[i]);
++ tb->used[i] = tb->FEB[i];
+ tb->FEB[i] = NULL;
+- tb->used[i] = first_b;
+
+- return (first_b);
++ return tb->used[i];
+ }
+
+ /* This is now used because reiserfs_free_block has to be able to
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: simplify xattr internal file lookups/opens
+
+ The xattr file open/lookup code is needlessly complex. We can use vfs-level
+ operations to perform the same work, and also simplify the locking
+ constraints. The locking advantages will be exploited in future patches.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+
+ fs/reiserfs/xattr.c | 262 ++++++++++++++++++++++++++--------------------------
+ 1 file changed, 135 insertions(+), 127 deletions(-)
+
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -44,100 +44,123 @@
+ #include <net/checksum.h>
+ #include <linux/smp_lock.h>
+ #include <linux/stat.h>
++#include <linux/quotaops.h>
+
+-#define FL_READONLY 128
+-#define FL_DIR_SEM_HELD 256
+ #define PRIVROOT_NAME ".reiserfs_priv"
+ #define XAROOT_NAME "xattrs"
+
+-/* Returns the dentry referring to the root of the extended attribute
+- * directory tree. If it has already been retrieved, it is used. If it
+- * hasn't been created and the flags indicate creation is allowed, we
+- * attempt to create it. On error, we return a pointer-encoded error.
+- */
+-static struct dentry *get_xa_root(struct super_block *sb, int flags)
++/* Helpers for inode ops. We do this so that we don't have all the VFS
++ * overhead and also for proper i_mutex annotation.
++ * dir->i_mutex must be held for all of them. */
++static int xattr_create(struct inode *dir, struct dentry *dentry, int mode)
+ {
+- struct dentry *privroot = dget(REISERFS_SB(sb)->priv_root);
+- struct dentry *xaroot;
++ BUG_ON(!mutex_is_locked(&dir->i_mutex));
++ DQUOT_INIT(dir);
++ return dir->i_op->create(dir, dentry, mode, NULL);
++}
+
+- /* This needs to be created at mount-time */
+- if (!privroot)
+- return ERR_PTR(-ENODATA);
++static int xattr_mkdir(struct inode *dir, struct dentry *dentry, int mode)
++{
++ BUG_ON(!mutex_is_locked(&dir->i_mutex));
++ DQUOT_INIT(dir);
++ return dir->i_op->mkdir(dir, dentry, mode);
++}
+
+- mutex_lock_nested(&privroot->d_inode->i_mutex, I_MUTEX_XATTR);
+- if (REISERFS_SB(sb)->xattr_root) {
+- xaroot = dget(REISERFS_SB(sb)->xattr_root);
+- goto out;
+- }
++/* We use I_MUTEX_CHILD here to silence lockdep. It's safe because xattr
++ * mutation ops aren't called during rename or splace, which are the
++ * only other users of I_MUTEX_CHILD. It violates the ordering, but that's
++ * better than allocating another subclass just for this code. */
++static int xattr_unlink(struct inode *dir, struct dentry *dentry)
++{
++ int error;
++ BUG_ON(!mutex_is_locked(&dir->i_mutex));
++ DQUOT_INIT(dir);
+
+- xaroot = lookup_one_len(XAROOT_NAME, privroot, strlen(XAROOT_NAME));
+- if (IS_ERR(xaroot)) {
+- goto out;
+- } else if (!xaroot->d_inode) {
++ mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
++ error = dir->i_op->unlink(dir, dentry);
++ mutex_unlock(&dentry->d_inode->i_mutex);
++
++ if (!error)
++ d_delete(dentry);
++ return error;
++}
++
++static int xattr_rmdir(struct inode *dir, struct dentry *dentry)
++{
++ int error;
++ BUG_ON(!mutex_is_locked(&dir->i_mutex));
++ DQUOT_INIT(dir);
++
++ mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
++ dentry_unhash(dentry);
++ error = dir->i_op->rmdir(dir, dentry);
++ if (!error)
++ dentry->d_inode->i_flags |= S_DEAD;
++ mutex_unlock(&dentry->d_inode->i_mutex);
++ if (!error)
++ d_delete(dentry);
++ dput(dentry);
++
++ return error;
++}
++
++
++#define xattr_may_create(flags) (!flags || flags & XATTR_CREATE)
++
++/* Returns and possibly creates the xattr dir. */
++static struct dentry *lookup_or_create_dir(struct dentry *parent,
++ const char *name, int flags)
++{
++ struct dentry *dentry;
++ BUG_ON(!parent);
++
++ dentry = lookup_one_len(name, parent, strlen(name));
++ if (IS_ERR(dentry))
++ return dentry;
++ else if (!dentry->d_inode) {
+ int err = -ENODATA;
+- if (flags == 0 || flags & XATTR_CREATE)
+- err = privroot->d_inode->i_op->mkdir(privroot->d_inode,
+- xaroot, 0700);
++
++ if (xattr_may_create(flags)) {
++ mutex_lock_nested(&parent->d_inode->i_mutex,
++ I_MUTEX_XATTR);
++ err = xattr_mkdir(parent->d_inode, dentry, 0700);
++ mutex_unlock(&parent->d_inode->i_mutex);
++ }
++
+ if (err) {
+- dput(xaroot);
+- xaroot = ERR_PTR(err);
+- goto out;
++ dput(dentry);
++ dentry = ERR_PTR(err);
+ }
+ }
+- REISERFS_SB(sb)->xattr_root = dget(xaroot);
+
+- out:
+- mutex_unlock(&privroot->d_inode->i_mutex);
+- dput(privroot);
+- return xaroot;
++ return dentry;
++}
++
++static struct dentry *open_xa_root(struct super_block *sb, int flags)
++{
++ struct dentry *privroot = REISERFS_SB(sb)->priv_root;
++ if (!privroot)
++ return ERR_PTR(-ENODATA);
++ return lookup_or_create_dir(privroot, XAROOT_NAME, flags);
+ }
+
+-/* Opens the directory corresponding to the inode's extended attribute store.
+- * If flags allow, the tree to the directory may be created. If creation is
+- * prohibited, -ENODATA is returned. */
+ static struct dentry *open_xa_dir(const struct inode *inode, int flags)
+ {
+ struct dentry *xaroot, *xadir;
+ char namebuf[17];
+
+- xaroot = get_xa_root(inode->i_sb, flags);
++ xaroot = open_xa_root(inode->i_sb, flags);
+ if (IS_ERR(xaroot))
+ return xaroot;
+
+- /* ok, we have xaroot open */
+ snprintf(namebuf, sizeof(namebuf), "%X.%X",
+ le32_to_cpu(INODE_PKEY(inode)->k_objectid),
+ inode->i_generation);
+- xadir = lookup_one_len(namebuf, xaroot, strlen(namebuf));
+- if (IS_ERR(xadir)) {
+- dput(xaroot);
+- return xadir;
+- }
+-
+- if (!xadir->d_inode) {
+- int err;
+- if (flags == 0 || flags & XATTR_CREATE) {
+- /* Although there is nothing else trying to create this directory,
+- * another directory with the same hash may be created, so we need
+- * to protect against that */
+- err =
+- xaroot->d_inode->i_op->mkdir(xaroot->d_inode, xadir,
+- 0700);
+- if (err) {
+- dput(xaroot);
+- dput(xadir);
+- return ERR_PTR(err);
+- }
+- }
+- if (!xadir->d_inode) {
+- dput(xaroot);
+- dput(xadir);
+- return ERR_PTR(-ENODATA);
+- }
+- }
+
++ xadir = lookup_or_create_dir(xaroot, namebuf, flags);
+ dput(xaroot);
+ return xadir;
++
+ }
+
+ /*
+@@ -302,13 +325,11 @@ static
+ int xattr_readdir(struct inode *inode, filldir_t filler, void *buf)
+ {
+ int res = -ENOENT;
+- mutex_lock_nested(&inode->i_mutex, I_MUTEX_XATTR);
+ if (!IS_DEADDIR(inode)) {
+ lock_kernel();
+ res = __xattr_readdir(inode, buf, filler);
+ unlock_kernel();
+ }
+- mutex_unlock(&inode->i_mutex);
+ return res;
+ }
+
+@@ -345,9 +366,7 @@ __reiserfs_xattr_del(struct dentry *xadi
+ return -EIO;
+ }
+
+- err = dir->i_op->unlink(dir, dentry);
+- if (!err)
+- d_delete(dentry);
++ err = xattr_unlink(dir, dentry);
+
+ out_file:
+ dput(dentry);
+@@ -381,7 +400,7 @@ int reiserfs_delete_xattrs(struct inode
+ return 0;
+
+ reiserfs_read_lock_xattrs(inode->i_sb);
+- dir = open_xa_dir(inode, FL_READONLY);
++ dir = open_xa_dir(inode, XATTR_REPLACE);
+ reiserfs_read_unlock_xattrs(inode->i_sb);
+ if (IS_ERR(dir)) {
+ err = PTR_ERR(dir);
+@@ -391,25 +410,25 @@ int reiserfs_delete_xattrs(struct inode
+ return 0;
+ }
+
+- lock_kernel();
++ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
+ err = xattr_readdir(dir->d_inode, reiserfs_delete_xattrs_filler, dir);
+- if (err) {
+- unlock_kernel();
++ mutex_unlock(&dir->d_inode->i_mutex);
++ if (err)
+ goto out_dir;
+- }
+
+ /* Leftovers besides . and .. -- that's not good. */
+ if (dir->d_inode->i_nlink <= 2) {
+- root = get_xa_root(inode->i_sb, XATTR_REPLACE);
++ root = open_xa_root(inode->i_sb, XATTR_REPLACE);
+ reiserfs_write_lock_xattrs(inode->i_sb);
+- err = vfs_rmdir(root->d_inode, dir);
++ mutex_lock_nested(&root->d_inode->i_mutex, I_MUTEX_XATTR);
++ err = xattr_rmdir(root->d_inode, dir);
++ mutex_unlock(&root->d_inode->i_mutex);
+ reiserfs_write_unlock_xattrs(inode->i_sb);
+ dput(root);
+ } else {
+ reiserfs_warning(inode->i_sb, "jdm-20006",
+ "Couldn't remove all entries in directory");
+ }
+- unlock_kernel();
+
+ out_dir:
+ dput(dir);
+@@ -445,8 +464,11 @@ reiserfs_chown_xattrs_filler(void *buf,
+ return -ENODATA;
+ }
+
+- if (!S_ISDIR(xafile->d_inode->i_mode))
++ if (!S_ISDIR(xafile->d_inode->i_mode)) {
++ mutex_lock_nested(&xafile->d_inode->i_mutex, I_MUTEX_CHILD);
+ err = notify_change(xafile, attrs);
++ mutex_unlock(&xafile->d_inode->i_mutex);
++ }
+ dput(xafile);
+
+ return err;
+@@ -464,38 +486,31 @@ int reiserfs_chown_xattrs(struct inode *
+ return 0;
+
+ reiserfs_read_lock_xattrs(inode->i_sb);
+- dir = open_xa_dir(inode, FL_READONLY);
++ dir = open_xa_dir(inode, XATTR_REPLACE);
+ reiserfs_read_unlock_xattrs(inode->i_sb);
+ if (IS_ERR(dir)) {
+ if (PTR_ERR(dir) != -ENODATA)
+ err = PTR_ERR(dir);
+ goto out;
+- } else if (!dir->d_inode) {
+- dput(dir);
+- goto out;
+- }
+-
+- lock_kernel();
++ } else if (!dir->d_inode)
++ goto out_dir;
+
+ attrs->ia_valid &= (ATTR_UID | ATTR_GID | ATTR_CTIME);
+ buf.xadir = dir;
+ buf.attrs = attrs;
+ buf.inode = inode;
+
++ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
+ err = xattr_readdir(dir->d_inode, reiserfs_chown_xattrs_filler, &buf);
+- if (err) {
+- unlock_kernel();
+- goto out_dir;
+- }
+
+- err = notify_change(dir, attrs);
+- unlock_kernel();
++ if (!err)
++ err = notify_change(dir, attrs);
++ mutex_unlock(&dir->d_inode->i_mutex);
+
++ attrs->ia_valid = ia_valid;
+ out_dir:
+ dput(dir);
+-
+ out:
+- attrs->ia_valid = ia_valid;
+ return err;
+ }
+
+@@ -513,47 +528,35 @@ static struct dentry *get_xa_file_dentry
+ int err = 0;
+
+ xadir = open_xa_dir(inode, flags);
+- if (IS_ERR(xadir)) {
++ if (IS_ERR(xadir))
+ return ERR_CAST(xadir);
+- } else if (xadir && !xadir->d_inode) {
+- dput(xadir);
+- return ERR_PTR(-ENODATA);
+- }
+
+ xafile = lookup_one_len(name, xadir, strlen(name));
+ if (IS_ERR(xafile)) {
+- dput(xadir);
+- return ERR_CAST(xafile);
++ err = PTR_ERR(xafile);
++ goto out;
+ }
+
+- if (xafile->d_inode) { /* file exists */
+- if (flags & XATTR_CREATE) {
+- err = -EEXIST;
+- dput(xafile);
+- goto out;
+- }
+- } else if (flags & XATTR_REPLACE || flags & FL_READONLY) {
+- goto out;
+- } else {
+- /* inode->i_mutex is down, so nothing else can try to create
+- * the same xattr */
+- err = xadir->d_inode->i_op->create(xadir->d_inode, xafile,
+- 0700 | S_IFREG, NULL);
++ if (xafile->d_inode && (flags & XATTR_CREATE))
++ err = -EEXIST;
+
+- if (err) {
+- dput(xafile);
+- goto out;
++ if (!xafile->d_inode) {
++ err = -ENODATA;
++ if (xattr_may_create(flags)) {
++ mutex_lock_nested(&xadir->d_inode->i_mutex,
++ I_MUTEX_XATTR);
++ err = xattr_create(xadir->d_inode, xafile,
++ 0700|S_IFREG);
++ mutex_unlock(&xadir->d_inode->i_mutex);
+ }
+ }
+
++ if (err)
++ dput(xafile);
+ out:
+ dput(xadir);
+ if (err)
+- xafile = ERR_PTR(err);
+- else if (!xafile->d_inode) {
+- dput(xafile);
+- xafile = ERR_PTR(-ENODATA);
+- }
++ return ERR_PTR(err);
+ return xafile;
+ }
+
+@@ -633,6 +636,7 @@ reiserfs_xattr_set(struct inode *inode,
+ newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
+ mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
+ err = notify_change(dentry, &newattrs);
++ mutex_unlock(&dentry->d_inode->i_mutex);
+ if (err)
+ goto out_filp;
+
+@@ -692,7 +696,6 @@ reiserfs_xattr_set(struct inode *inode,
+ }
+
+ out_filp:
+- mutex_unlock(&dentry->d_inode->i_mutex);
+ dput(dentry);
+
+ out:
+@@ -722,7 +725,7 @@ reiserfs_xattr_get(const struct inode *i
+ if (get_inode_sd_version(inode) == STAT_DATA_V1)
+ return -EOPNOTSUPP;
+
+- dentry = get_xa_file_dentry(inode, name, FL_READONLY);
++ dentry = get_xa_file_dentry(inode, name, XATTR_REPLACE);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out;
+@@ -806,13 +809,15 @@ int reiserfs_xattr_del(struct inode *ino
+ struct dentry *dir;
+ int err;
+
+- dir = open_xa_dir(inode, FL_READONLY);
++ dir = open_xa_dir(inode, XATTR_REPLACE);
+ if (IS_ERR(dir)) {
+ err = PTR_ERR(dir);
+ goto out;
+ }
+
++ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
+ err = __reiserfs_xattr_del(dir, name, strlen(name));
++ mutex_unlock(&dir->d_inode->i_mutex);
+ dput(dir);
+
+ if (!err) {
+@@ -826,6 +831,7 @@ int reiserfs_xattr_del(struct inode *ino
+
+ /* Actual operations that are exported to VFS-land */
+
++static struct reiserfs_xattr_handler *find_xattr_handler_prefix(const char *);
+ /*
+ * Inode operation getxattr()
+ * Preliminary locking: we down dentry->d_inode->i_mutex
+@@ -978,7 +984,7 @@ ssize_t reiserfs_listxattr(struct dentry
+
+ reiserfs_read_lock_xattr_i(dentry->d_inode);
+ reiserfs_read_lock_xattrs(dentry->d_sb);
+- dir = open_xa_dir(dentry->d_inode, FL_READONLY);
++ dir = open_xa_dir(dentry->d_inode, XATTR_REPLACE);
+ reiserfs_read_unlock_xattrs(dentry->d_sb);
+ if (IS_ERR(dir)) {
+ err = PTR_ERR(dir);
+@@ -994,7 +1000,9 @@ ssize_t reiserfs_listxattr(struct dentry
+
+ REISERFS_I(dentry->d_inode)->i_flags |= i_has_xattr_dir;
+
++ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
+ err = xattr_readdir(dir->d_inode, reiserfs_listxattr_filler, &buf);
++ mutex_unlock(&dir->d_inode->i_mutex);
+ if (err)
+ goto out_dir;
+
+@@ -1146,7 +1154,7 @@ static int create_privroot(struct dentry
+ int err;
+ struct inode *inode = dentry->d_parent->d_inode;
+ mutex_lock_nested(&inode->i_mutex, I_MUTEX_XATTR);
+- err = inode->i_op->mkdir(inode, dentry, 0700);
++ err = xattr_mkdir(inode, dentry, 0700);
+ mutex_unlock(&inode->i_mutex);
+ if (err) {
+ dput(dentry);
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: strip trailing whitespace
+
+ This patch strips trailing whitespace from the reiserfs code.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+ fs/reiserfs/README | 4 -
+ fs/reiserfs/do_balan.c | 14 ++--
+ fs/reiserfs/file.c | 8 +-
+ fs/reiserfs/fix_node.c | 38 ++++++------
+ fs/reiserfs/hashes.c | 2
+ fs/reiserfs/ibalance.c | 10 +--
+ fs/reiserfs/inode.c | 52 ++++++++---------
+ fs/reiserfs/ioctl.c | 2
+ fs/reiserfs/journal.c | 120 ++++++++++++++++++++---------------------
+ fs/reiserfs/lbalance.c | 18 +++---
+ fs/reiserfs/namei.c | 30 +++++-----
+ fs/reiserfs/objectid.c | 2
+ fs/reiserfs/prints.c | 26 ++++----
+ fs/reiserfs/procfs.c | 2
+ fs/reiserfs/resize.c | 6 +-
+ fs/reiserfs/stree.c | 8 +-
+ fs/reiserfs/super.c | 10 +--
+ fs/reiserfs/tail_conversion.c | 2
+ include/linux/reiserfs_fs_sb.h | 14 ++--
+ 19 files changed, 184 insertions(+), 184 deletions(-)
+
+--- a/fs/reiserfs/do_balan.c
++++ b/fs/reiserfs/do_balan.c
+@@ -76,21 +76,21 @@ inline void do_balance_mark_leaf_dirty(s
+ #define do_balance_mark_internal_dirty do_balance_mark_leaf_dirty
+ #define do_balance_mark_sb_dirty do_balance_mark_leaf_dirty
+
+-/* summary:
++/* summary:
+ if deleting something ( tb->insert_size[0] < 0 )
+ return(balance_leaf_when_delete()); (flag d handled here)
+ else
+ if lnum is larger than 0 we put items into the left node
+ if rnum is larger than 0 we put items into the right node
+ if snum1 is larger than 0 we put items into the new node s1
+- if snum2 is larger than 0 we put items into the new node s2
++ if snum2 is larger than 0 we put items into the new node s2
+ Note that all *num* count new items being created.
+
+ It would be easier to read balance_leaf() if each of these summary
+ lines was a separate procedure rather than being inlined. I think
+ that there are many passages here and in balance_leaf_when_delete() in
+ which two calls to one procedure can replace two passages, and it
+-might save cache space and improve software maintenance costs to do so.
++might save cache space and improve software maintenance costs to do so.
+
+ Vladimir made the perceptive comment that we should offload most of
+ the decision making in this function into fix_nodes/check_balance, and
+@@ -288,15 +288,15 @@ static int balance_leaf(struct tree_bala
+ )
+ {
+ struct buffer_head *tbS0 = PATH_PLAST_BUFFER(tb->tb_path);
+- int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0]
++ int item_pos = PATH_LAST_POSITION(tb->tb_path); /* index into the array of item headers in S[0]
+ of the affected item */
+ struct buffer_info bi;
+ struct buffer_head *S_new[2]; /* new nodes allocated to hold what could not fit into S */
+ int snum[2]; /* number of items that will be placed
+ into S_new (includes partially shifted
+ items) */
+- int sbytes[2]; /* if an item is partially shifted into S_new then
+- if it is a directory item
++ int sbytes[2]; /* if an item is partially shifted into S_new then
++ if it is a directory item
+ it is the number of entries from the item that are shifted into S_new
+ else
+ it is the number of bytes from the item that are shifted into S_new
+@@ -1983,7 +1983,7 @@ static inline void do_balance_starts(str
+ /* store_print_tb (tb); */
+
+ /* do not delete, just comment it out */
+-/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb,
++/* print_tb(flag, PATH_LAST_POSITION(tb->tb_path), tb->tb_path->pos_in_item, tb,
+ "check");*/
+ RFALSE(check_before_balancing(tb), "PAP-12340: locked buffers in TB");
+ #ifdef CONFIG_REISERFS_CHECK
+--- a/fs/reiserfs/file.c
++++ b/fs/reiserfs/file.c
+@@ -20,14 +20,14 @@
+ ** insertion/balancing, for files that are written in one write.
+ ** It avoids unnecessary tail packings (balances) for files that are written in
+ ** multiple writes and are small enough to have tails.
+-**
++**
+ ** file_release is called by the VFS layer when the file is closed. If
+ ** this is the last open file descriptor, and the file
+ ** small enough to have a tail, and the tail is currently in an
+ ** unformatted node, the tail is converted back into a direct item.
+-**
++**
+ ** We use reiserfs_truncate_file to pack the tail, since it already has
+-** all the conditions coded.
++** all the conditions coded.
+ */
+ static int reiserfs_file_release(struct inode *inode, struct file *filp)
+ {
+@@ -223,7 +223,7 @@ int reiserfs_commit_page(struct inode *i
+ }
+
+ /* Write @count bytes at position @ppos in a file indicated by @file
+- from the buffer @buf.
++ from the buffer @buf.
+
+ generic_file_write() is only appropriate for filesystems that are not seeking to optimize performance and want
+ something simple that works. It is not for serious use by general purpose filesystems, excepting the one that it was
+--- a/fs/reiserfs/fix_node.c
++++ b/fs/reiserfs/fix_node.c
+@@ -30,8 +30,8 @@
+ ** get_direct_parent
+ ** get_neighbors
+ ** fix_nodes
+- **
+- **
++ **
++ **
+ **/
+
+ #include <linux/time.h>
+@@ -377,9 +377,9 @@ static int get_num_ver(int mode, struct
+ int needed_nodes;
+ int start_item, /* position of item we start filling node from */
+ end_item, /* position of item we finish filling node by */
+- start_bytes, /* number of first bytes (entries for directory) of start_item-th item
++ start_bytes, /* number of first bytes (entries for directory) of start_item-th item
+ we do not include into node that is being filled */
+- end_bytes; /* number of last bytes (entries for directory) of end_item-th item
++ end_bytes; /* number of last bytes (entries for directory) of end_item-th item
+ we do node include into node that is being filled */
+ int split_item_positions[2]; /* these are positions in virtual item of
+ items, that are split between S[0] and
+@@ -569,7 +569,7 @@ extern struct tree_balance *cur_tb;
+
+ /* Set parameters for balancing.
+ * Performs write of results of analysis of balancing into structure tb,
+- * where it will later be used by the functions that actually do the balancing.
++ * where it will later be used by the functions that actually do the balancing.
+ * Parameters:
+ * tb tree_balance structure;
+ * h current level of the node;
+@@ -1204,7 +1204,7 @@ static inline int can_node_be_removed(in
+ * h current level of the node;
+ * inum item number in S[h];
+ * mode i - insert, p - paste;
+- * Returns: 1 - schedule occurred;
++ * Returns: 1 - schedule occurred;
+ * 0 - balancing for higher levels needed;
+ * -1 - no balancing for higher levels needed;
+ * -2 - no disk space.
+@@ -1239,7 +1239,7 @@ static int ip_check_balance(struct tree_
+ /* we perform 8 calls to get_num_ver(). For each call we calculate five parameters.
+ where 4th parameter is s1bytes and 5th - s2bytes
+ */
+- short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases
++ short snum012[40] = { 0, }; /* s0num, s1num, s2num for 8 cases
+ 0,1 - do not shift and do not shift but bottle
+ 2 - shift only whole item to left
+ 3 - shift to left and bottle as much as possible
+@@ -1288,7 +1288,7 @@ static int ip_check_balance(struct tree_
+
+ create_virtual_node(tb, h);
+
+- /*
++ /*
+ determine maximal number of items we can shift to the left neighbor (in tb structure)
+ and the maximal number of bytes that can flow to the left neighbor
+ from the left most liquid item that cannot be shifted from S[0] entirely (returned value)
+@@ -1349,13 +1349,13 @@ static int ip_check_balance(struct tree_
+
+ {
+ int lpar, rpar, nset, lset, rset, lrset;
+- /*
++ /*
+ * regular overflowing of the node
+ */
+
+- /* get_num_ver works in 2 modes (FLOW & NO_FLOW)
++ /* get_num_ver works in 2 modes (FLOW & NO_FLOW)
+ lpar, rpar - number of items we can shift to left/right neighbor (including splitting item)
+- nset, lset, rset, lrset - shows, whether flowing items give better packing
++ nset, lset, rset, lrset - shows, whether flowing items give better packing
+ */
+ #define FLOW 1
+ #define NO_FLOW 0 /* do not any splitting */
+@@ -1545,7 +1545,7 @@ static int ip_check_balance(struct tree_
+ * h current level of the node;
+ * inum item number in S[h];
+ * mode i - insert, p - paste;
+- * Returns: 1 - schedule occurred;
++ * Returns: 1 - schedule occurred;
+ * 0 - balancing for higher levels needed;
+ * -1 - no balancing for higher levels needed;
+ * -2 - no disk space.
+@@ -1728,7 +1728,7 @@ static int dc_check_balance_internal(str
+ * h current level of the node;
+ * inum item number in S[h];
+ * mode i - insert, p - paste;
+- * Returns: 1 - schedule occurred;
++ * Returns: 1 - schedule occurred;
+ * 0 - balancing for higher levels needed;
+ * -1 - no balancing for higher levels needed;
+ * -2 - no disk space.
+@@ -1822,7 +1822,7 @@ static int dc_check_balance_leaf(struct
+ * h current level of the node;
+ * inum item number in S[h];
+ * mode d - delete, c - cut.
+- * Returns: 1 - schedule occurred;
++ * Returns: 1 - schedule occurred;
+ * 0 - balancing for higher levels needed;
+ * -1 - no balancing for higher levels needed;
+ * -2 - no disk space.
+@@ -1851,7 +1851,7 @@ static int dc_check_balance(struct tree_
+ * h current level of the node;
+ * inum item number in S[h];
+ * mode i - insert, p - paste, d - delete, c - cut.
+- * Returns: 1 - schedule occurred;
++ * Returns: 1 - schedule occurred;
+ * 0 - balancing for higher levels needed;
+ * -1 - no balancing for higher levels needed;
+ * -2 - no disk space.
+@@ -2296,15 +2296,15 @@ static int wait_tb_buffers_until_unlocke
+ * analyze what and where should be moved;
+ * get sufficient number of new nodes;
+ * Balancing will start only after all resources will be collected at a time.
+- *
++ *
+ * When ported to SMP kernels, only at the last moment after all needed nodes
+ * are collected in cache, will the resources be locked using the usual
+ * textbook ordered lock acquisition algorithms. Note that ensuring that
+ * this code neither write locks what it does not need to write lock nor locks out of order
+ * will be a pain in the butt that could have been avoided. Grumble grumble. -Hans
+- *
++ *
+ * fix is meant in the sense of render unchanging
+- *
++ *
+ * Latency might be improved by first gathering a list of what buffers are needed
+ * and then getting as many of them in parallel as possible? -Hans
+ *
+@@ -2316,7 +2316,7 @@ static int wait_tb_buffers_until_unlocke
+ * ins_ih & ins_sd are used when inserting
+ * Returns: 1 - schedule occurred while the function worked;
+ * 0 - schedule didn't occur while the function worked;
+- * -1 - if no_disk_space
++ * -1 - if no_disk_space
+ */
+
+ int fix_nodes(int n_op_mode, struct tree_balance *p_s_tb, struct item_head *p_s_ins_ih, // item head of item being inserted
+--- a/fs/reiserfs/hashes.c
++++ b/fs/reiserfs/hashes.c
+@@ -7,7 +7,7 @@
+ * (see Applied Cryptography, 2nd edition, p448).
+ *
+ * Jeremy Fitzhardinge <jeremy@zip.com.au> 1998
+- *
++ *
+ * Jeremy has agreed to the contents of reiserfs/README. -Hans
+ * Yura's function is added (04/07/2000)
+ */
+--- a/fs/reiserfs/ibalance.c
++++ b/fs/reiserfs/ibalance.c
+@@ -278,7 +278,7 @@ static void internal_delete_childs(struc
+
+ /* copy cpy_num node pointers and cpy_num - 1 items from buffer src to buffer dest
+ * last_first == FIRST_TO_LAST means, that we copy first items from src to tail of dest
+- * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest
++ * last_first == LAST_TO_FIRST means, that we copy last items from src to head of dest
+ */
+ static void internal_copy_pointers_items(struct buffer_info *dest_bi,
+ struct buffer_head *src,
+@@ -385,7 +385,7 @@ static void internal_move_pointers_items
+ if (last_first == FIRST_TO_LAST) { /* shift_left occurs */
+ first_pointer = 0;
+ first_item = 0;
+- /* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer,
++ /* delete cpy_num - del_par pointers and keys starting for pointers with first_pointer,
+ for key - with first_item */
+ internal_delete_pointers_items(src_bi, first_pointer,
+ first_item, cpy_num - del_par);
+@@ -453,7 +453,7 @@ static void internal_insert_key(struct b
+ }
+ }
+
+-/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest.
++/* Insert d_key'th (delimiting) key from buffer cfl to tail of dest.
+ * Copy pointer_amount node pointers and pointer_amount - 1 items from buffer src to buffer dest.
+ * Replace d_key'th key in buffer cfl.
+ * Delete pointer_amount items and node pointers from buffer src.
+@@ -518,7 +518,7 @@ static void internal_shift1_left(struct
+ /* internal_move_pointers_items (tb->L[h], tb->S[h], FIRST_TO_LAST, pointer_amount, 1); */
+ }
+
+-/* Insert d_key'th (delimiting) key from buffer cfr to head of dest.
++/* Insert d_key'th (delimiting) key from buffer cfr to head of dest.
+ * Copy n node pointers and n - 1 items from buffer src to buffer dest.
+ * Replace d_key'th key in buffer cfr.
+ * Delete n items and node pointers from buffer src.
+@@ -749,7 +749,7 @@ int balance_internal(struct tree_balance
+ this means that new pointers and items must be inserted AFTER *
+ child_pos
+ }
+- else
++ else
+ {
+ it is the position of the leftmost pointer that must be deleted (together with
+ its corresponding key to the left of the pointer)
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -52,7 +52,7 @@ void reiserfs_delete_inode(struct inode
+ /* Do quota update inside a transaction for journaled quotas. We must do that
+ * after delete_object so that quota updates go into the same transaction as
+ * stat data deletion */
+- if (!err)
++ if (!err)
+ DQUOT_FREE_INODE(inode);
+
+ if (journal_end(&th, inode->i_sb, jbegin_count))
+@@ -363,7 +363,7 @@ static int _get_block_create_0(struct in
+ }
+ /* make sure we don't read more bytes than actually exist in
+ ** the file. This can happen in odd cases where i_size isn't
+- ** correct, and when direct item padding results in a few
++ ** correct, and when direct item padding results in a few
+ ** extra bytes at the end of the direct item
+ */
+ if ((le_ih_k_offset(ih) + path.pos_in_item) > inode->i_size)
+@@ -438,15 +438,15 @@ static int reiserfs_bmap(struct inode *i
+ ** -ENOENT instead of a valid buffer. block_prepare_write expects to
+ ** be able to do i/o on the buffers returned, unless an error value
+ ** is also returned.
+-**
++**
+ ** So, this allows block_prepare_write to be used for reading a single block
+ ** in a page. Where it does not produce a valid page for holes, or past the
+ ** end of the file. This turns out to be exactly what we need for reading
+ ** tails for conversion.
+ **
+ ** The point of the wrapper is forcing a certain value for create, even
+-** though the VFS layer is calling this function with create==1. If you
+-** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
++** though the VFS layer is calling this function with create==1. If you
++** don't want to send create == GET_BLOCK_NO_HOLE to reiserfs_get_block,
+ ** don't use this function.
+ */
+ static int reiserfs_get_block_create_0(struct inode *inode, sector_t block,
+@@ -602,7 +602,7 @@ int reiserfs_get_block(struct inode *ino
+ int done;
+ int fs_gen;
+ struct reiserfs_transaction_handle *th = NULL;
+- /* space reserved in transaction batch:
++ /* space reserved in transaction batch:
+ . 3 balancings in direct->indirect conversion
+ . 1 block involved into reiserfs_update_sd()
+ XXX in practically impossible worst case direct2indirect()
+@@ -754,7 +754,7 @@ int reiserfs_get_block(struct inode *ino
+ reiserfs_write_unlock(inode->i_sb);
+
+ /* the item was found, so new blocks were not added to the file
+- ** there is no need to make sure the inode is updated with this
++ ** there is no need to make sure the inode is updated with this
+ ** transaction
+ */
+ return retval;
+@@ -986,7 +986,7 @@ int reiserfs_get_block(struct inode *ino
+
+ /* this loop could log more blocks than we had originally asked
+ ** for. So, we have to allow the transaction to end if it is
+- ** too big or too full. Update the inode so things are
++ ** too big or too full. Update the inode so things are
+ ** consistent if we crash before the function returns
+ **
+ ** release the path so that anybody waiting on the path before
+@@ -997,7 +997,7 @@ int reiserfs_get_block(struct inode *ino
+ if (retval)
+ goto failure;
+ }
+- /* inserting indirect pointers for a hole can take a
++ /* inserting indirect pointers for a hole can take a
+ ** long time. reschedule if needed
+ */
+ cond_resched();
+@@ -1444,7 +1444,7 @@ void reiserfs_read_locked_inode(struct i
+ update sd on unlink all that is required is to check for nlink
+ here. This bug was first found by Sizif when debugging
+ SquidNG/Butterfly, forgotten, and found again after Philippe
+- Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
++ Gramoulle <philippe.gramoulle@mmania.com> reproduced it.
+
+ More logical fix would require changes in fs/inode.c:iput() to
+ remove inode from hash-table _after_ fs cleaned disk stuff up and
+@@ -1628,7 +1628,7 @@ int reiserfs_write_inode(struct inode *i
+ if (inode->i_sb->s_flags & MS_RDONLY)
+ return -EROFS;
+ /* memory pressure can sometimes initiate write_inode calls with sync == 1,
+- ** these cases are just when the system needs ram, not when the
++ ** these cases are just when the system needs ram, not when the
+ ** inode needs to reach disk for safety, and they can safely be
+ ** ignored because the altered inode has already been logged.
+ */
+@@ -1745,7 +1745,7 @@ static int reiserfs_new_symlink(struct r
+ /* inserts the stat data into the tree, and then calls
+ reiserfs_new_directory (to insert ".", ".." item if new object is
+ directory) or reiserfs_new_symlink (to insert symlink body if new
+- object is symlink) or nothing (if new object is regular file)
++ object is symlink) or nothing (if new object is regular file)
+
+ NOTE! uid and gid must already be set in the inode. If we return
+ non-zero due to an error, we have to drop the quota previously allocated
+@@ -1753,7 +1753,7 @@ static int reiserfs_new_symlink(struct r
+ if we return non-zero, we also end the transaction. */
+ int reiserfs_new_inode(struct reiserfs_transaction_handle *th,
+ struct inode *dir, int mode, const char *symname,
+- /* 0 for regular, EMTRY_DIR_SIZE for dirs,
++ /* 0 for regular, EMTRY_DIR_SIZE for dirs,
+ strlen (symname) for symlinks) */
+ loff_t i_size, struct dentry *dentry,
+ struct inode *inode,
+@@ -1788,7 +1788,7 @@ int reiserfs_new_inode(struct reiserfs_t
+ goto out_bad_inode;
+ }
+ if (old_format_only(sb))
+- /* not a perfect generation count, as object ids can be reused, but
++ /* not a perfect generation count, as object ids can be reused, but
+ ** this is as good as reiserfs can do right now.
+ ** note that the private part of inode isn't filled in yet, we have
+ ** to use the directory.
+@@ -2086,7 +2086,7 @@ int reiserfs_truncate_file(struct inode
+
+ if (p_s_inode->i_size > 0) {
+ if ((error = grab_tail_page(p_s_inode, &page, &bh))) {
+- // -ENOENT means we truncated past the end of the file,
++ // -ENOENT means we truncated past the end of the file,
+ // and get_block_create_0 could not find a block to read in,
+ // which is ok.
+ if (error != -ENOENT)
+@@ -2098,11 +2098,11 @@ int reiserfs_truncate_file(struct inode
+ }
+ }
+
+- /* so, if page != NULL, we have a buffer head for the offset at
+- ** the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
+- ** then we have an unformatted node. Otherwise, we have a direct item,
+- ** and no zeroing is required on disk. We zero after the truncate,
+- ** because the truncate might pack the item anyway
++ /* so, if page != NULL, we have a buffer head for the offset at
++ ** the end of the file. if the bh is mapped, and bh->b_blocknr != 0,
++ ** then we have an unformatted node. Otherwise, we have a direct item,
++ ** and no zeroing is required on disk. We zero after the truncate,
++ ** because the truncate might pack the item anyway
+ ** (it will unmap bh if it packs).
+ */
+ /* it is enough to reserve space in transaction for 2 balancings:
+@@ -2311,8 +2311,8 @@ static int map_block_for_writepage(struc
+ return retval;
+ }
+
+-/*
+- * mason@suse.com: updated in 2.5.54 to follow the same general io
++/*
++ * mason@suse.com: updated in 2.5.54 to follow the same general io
+ * start/recovery path as __block_write_full_page, along with special
+ * code to handle reiserfs tails.
+ */
+@@ -2452,7 +2452,7 @@ static int reiserfs_write_full_page(stru
+ unlock_page(page);
+
+ /*
+- * since any buffer might be the only dirty buffer on the page,
++ * since any buffer might be the only dirty buffer on the page,
+ * the first submit_bh can bring the page out of writeback.
+ * be careful with the buffers.
+ */
+@@ -2471,8 +2471,8 @@ static int reiserfs_write_full_page(stru
+ if (nr == 0) {
+ /*
+ * if this page only had a direct item, it is very possible for
+- * no io to be required without there being an error. Or,
+- * someone else could have locked them and sent them down the
++ * no io to be required without there being an error. Or,
++ * someone else could have locked them and sent them down the
+ * pipe without locking the page
+ */
+ bh = head;
+@@ -2491,7 +2491,7 @@ static int reiserfs_write_full_page(stru
+
+ fail:
+ /* catches various errors, we need to make sure any valid dirty blocks
+- * get to the media. The page is currently locked and not marked for
++ * get to the media. The page is currently locked and not marked for
+ * writeback
+ */
+ ClearPageUptodate(page);
+--- a/fs/reiserfs/ioctl.c
++++ b/fs/reiserfs/ioctl.c
+@@ -189,7 +189,7 @@ int reiserfs_unpack(struct inode *inode,
+ }
+
+ /* we unpack by finding the page with the tail, and calling
+- ** reiserfs_prepare_write on that page. This will force a
++ ** reiserfs_prepare_write on that page. This will force a
+ ** reiserfs_get_block to unpack the tail for us.
+ */
+ index = inode->i_size >> PAGE_CACHE_SHIFT;
+--- a/fs/reiserfs/journal.c
++++ b/fs/reiserfs/journal.c
+@@ -1,36 +1,36 @@
+ /*
+ ** Write ahead logging implementation copyright Chris Mason 2000
+ **
+-** The background commits make this code very interelated, and
++** The background commits make this code very interelated, and
+ ** overly complex. I need to rethink things a bit....The major players:
+ **
+-** journal_begin -- call with the number of blocks you expect to log.
++** journal_begin -- call with the number of blocks you expect to log.
+ ** If the current transaction is too
+-** old, it will block until the current transaction is
++** old, it will block until the current transaction is
+ ** finished, and then start a new one.
+-** Usually, your transaction will get joined in with
++** Usually, your transaction will get joined in with
+ ** previous ones for speed.
+ **
+-** journal_join -- same as journal_begin, but won't block on the current
++** journal_join -- same as journal_begin, but won't block on the current
+ ** transaction regardless of age. Don't ever call
+-** this. Ever. There are only two places it should be
++** this. Ever. There are only two places it should be
+ ** called from, and they are both inside this file.
+ **
+-** journal_mark_dirty -- adds blocks into this transaction. clears any flags
++** journal_mark_dirty -- adds blocks into this transaction. clears any flags
+ ** that might make them get sent to disk
+-** and then marks them BH_JDirty. Puts the buffer head
+-** into the current transaction hash.
++** and then marks them BH_JDirty. Puts the buffer head
++** into the current transaction hash.
+ **
+ ** journal_end -- if the current transaction is batchable, it does nothing
+ ** otherwise, it could do an async/synchronous commit, or
+-** a full flush of all log and real blocks in the
++** a full flush of all log and real blocks in the
+ ** transaction.
+ **
+-** flush_old_commits -- if the current transaction is too old, it is ended and
+-** commit blocks are sent to disk. Forces commit blocks
+-** to disk for all backgrounded commits that have been
++** flush_old_commits -- if the current transaction is too old, it is ended and
++** commit blocks are sent to disk. Forces commit blocks
++** to disk for all backgrounded commits that have been
+ ** around too long.
+-** -- Note, if you call this as an immediate flush from
++** -- Note, if you call this as an immediate flush from
+ ** from within kupdate, it will ignore the immediate flag
+ */
+
+@@ -212,7 +212,7 @@ static void allocate_bitmap_nodes(struct
+ list_add(&bn->list, &journal->j_bitmap_nodes);
+ journal->j_free_bitmap_nodes++;
+ } else {
+- break; // this is ok, we'll try again when more are needed
++ break; /* this is ok, we'll try again when more are needed */
+ }
+ }
+ }
+@@ -283,7 +283,7 @@ static int free_bitmap_nodes(struct supe
+ }
+
+ /*
+-** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
++** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps.
+ ** jb_array is the array to be filled in.
+ */
+ int reiserfs_allocate_list_bitmaps(struct super_block *p_s_sb,
+@@ -315,7 +315,7 @@ int reiserfs_allocate_list_bitmaps(struc
+ }
+
+ /*
+-** find an available list bitmap. If you can't find one, flush a commit list
++** find an available list bitmap. If you can't find one, flush a commit list
+ ** and try again
+ */
+ static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *p_s_sb,
+@@ -348,7 +348,7 @@ static struct reiserfs_list_bitmap *get_
+ return jb;
+ }
+
+-/*
++/*
+ ** allocates a new chunk of X nodes, and links them all together as a list.
+ ** Uses the cnode->next and cnode->prev pointers
+ ** returns NULL on failure
+@@ -376,7 +376,7 @@ static struct reiserfs_journal_cnode *al
+ }
+
+ /*
+-** pulls a cnode off the free list, or returns NULL on failure
++** pulls a cnode off the free list, or returns NULL on failure
+ */
+ static struct reiserfs_journal_cnode *get_cnode(struct super_block *p_s_sb)
+ {
+@@ -403,7 +403,7 @@ static struct reiserfs_journal_cnode *ge
+ }
+
+ /*
+-** returns a cnode to the free list
++** returns a cnode to the free list
+ */
+ static void free_cnode(struct super_block *p_s_sb,
+ struct reiserfs_journal_cnode *cn)
+@@ -1192,8 +1192,8 @@ static int flush_commit_list(struct supe
+ }
+
+ /*
+-** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
+-** returns NULL if it can't find anything
++** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or
++** returns NULL if it can't find anything
+ */
+ static struct reiserfs_journal_list *find_newer_jl_for_cn(struct
+ reiserfs_journal_cnode
+@@ -1335,8 +1335,8 @@ static int update_journal_header_block(s
+ return _update_journal_header_block(p_s_sb, offset, trans_id);
+ }
+
+-/*
+-** flush any and all journal lists older than you are
++/*
++** flush any and all journal lists older than you are
+ ** can only be called from flush_journal_list
+ */
+ static int flush_older_journal_lists(struct super_block *p_s_sb,
+@@ -1382,8 +1382,8 @@ static void del_from_work_list(struct su
+ ** always set flushall to 1, unless you are calling from inside
+ ** flush_journal_list
+ **
+-** IMPORTANT. This can only be called while there are no journal writers,
+-** and the journal is locked. That means it can only be called from
++** IMPORTANT. This can only be called while there are no journal writers,
++** and the journal is locked. That means it can only be called from
+ ** do_journal_end, or by journal_release
+ */
+ static int flush_journal_list(struct super_block *s,
+@@ -1429,7 +1429,7 @@ static int flush_journal_list(struct sup
+ goto flush_older_and_return;
+ }
+
+- /* start by putting the commit list on disk. This will also flush
++ /* start by putting the commit list on disk. This will also flush
+ ** the commit lists of any olders transactions
+ */
+ flush_commit_list(s, jl, 1);
+@@ -1444,8 +1444,8 @@ static int flush_journal_list(struct sup
+ goto flush_older_and_return;
+ }
+
+- /* loop through each cnode, see if we need to write it,
+- ** or wait on a more recent transaction, or just ignore it
++ /* loop through each cnode, see if we need to write it,
++ ** or wait on a more recent transaction, or just ignore it
+ */
+ if (atomic_read(&(journal->j_wcount)) != 0) {
+ reiserfs_panic(s, "journal-844", "journal list is flushing, "
+@@ -1473,8 +1473,8 @@ static int flush_journal_list(struct sup
+ if (!pjl && cn->bh) {
+ saved_bh = cn->bh;
+
+- /* we do this to make sure nobody releases the buffer while
+- ** we are working with it
++ /* we do this to make sure nobody releases the buffer while
++ ** we are working with it
+ */
+ get_bh(saved_bh);
+
+@@ -1497,8 +1497,8 @@ static int flush_journal_list(struct sup
+ goto free_cnode;
+ }
+
+- /* bh == NULL when the block got to disk on its own, OR,
+- ** the block got freed in a future transaction
++ /* bh == NULL when the block got to disk on its own, OR,
++ ** the block got freed in a future transaction
+ */
+ if (saved_bh == NULL) {
+ goto free_cnode;
+@@ -1586,7 +1586,7 @@ static int flush_journal_list(struct sup
+ __func__);
+ flush_older_and_return:
+
+- /* before we can update the journal header block, we _must_ flush all
++ /* before we can update the journal header block, we _must_ flush all
+ ** real blocks from all older transactions to disk. This is because
+ ** once the header block is updated, this transaction will not be
+ ** replayed after a crash
+@@ -1596,7 +1596,7 @@ static int flush_journal_list(struct sup
+ }
+
+ err = journal->j_errno;
+- /* before we can remove everything from the hash tables for this
++ /* before we can remove everything from the hash tables for this
+ ** transaction, we must make sure it can never be replayed
+ **
+ ** since we are only called from do_journal_end, we know for sure there
+@@ -2016,9 +2016,9 @@ static int journal_compare_desc_commit(s
+ return 0;
+ }
+
+-/* returns 0 if it did not find a description block
++/* returns 0 if it did not find a description block
+ ** returns -1 if it found a corrupt commit block
+-** returns 1 if both desc and commit were valid
++** returns 1 if both desc and commit were valid
+ */
+ static int journal_transaction_is_valid(struct super_block *p_s_sb,
+ struct buffer_head *d_bh,
+@@ -2380,8 +2380,8 @@ static int journal_read(struct super_blo
+ bdevname(journal->j_dev_bd, b));
+ start = get_seconds();
+
+- /* step 1, read in the journal header block. Check the transaction it says
+- ** is the first unflushed, and if that transaction is not valid,
++ /* step 1, read in the journal header block. Check the transaction it says
++ ** is the first unflushed, and if that transaction is not valid,
+ ** replay is done
+ */
+ journal->j_header_bh = journal_bread(p_s_sb,
+@@ -2406,8 +2406,8 @@ static int journal_read(struct super_blo
+ le32_to_cpu(jh->j_last_flush_trans_id));
+ valid_journal_header = 1;
+
+- /* now, we try to read the first unflushed offset. If it is not valid,
+- ** there is nothing more we can do, and it makes no sense to read
++ /* now, we try to read the first unflushed offset. If it is not valid,
++ ** there is nothing more we can do, and it makes no sense to read
+ ** through the whole log.
+ */
+ d_bh =
+@@ -2916,7 +2916,7 @@ int journal_transaction_should_end(struc
+ return 0;
+ }
+
+-/* this must be called inside a transaction, and requires the
++/* this must be called inside a transaction, and requires the
+ ** kernel_lock to be held
+ */
+ void reiserfs_block_writes(struct reiserfs_transaction_handle *th)
+@@ -3037,7 +3037,7 @@ static int do_journal_begin_r(struct rei
+ now = get_seconds();
+
+ /* if there is no room in the journal OR
+- ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
++ ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning
+ ** we don't sleep if there aren't other writers
+ */
+
+@@ -3237,7 +3237,7 @@ int journal_begin(struct reiserfs_transa
+ **
+ ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the
+ ** transaction is committed.
+-**
++**
+ ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len.
+ */
+ int journal_mark_dirty(struct reiserfs_transaction_handle *th,
+@@ -3287,7 +3287,7 @@ int journal_mark_dirty(struct reiserfs_t
+ atomic_read(&(journal->j_wcount)));
+ return 1;
+ }
+- /* this error means I've screwed up, and we've overflowed the transaction.
++ /* this error means I've screwed up, and we've overflowed the transaction.
+ ** Nothing can be done here, except make the FS readonly or panic.
+ */
+ if (journal->j_len >= journal->j_trans_max) {
+@@ -3377,7 +3377,7 @@ int journal_end(struct reiserfs_transact
+ }
+ }
+
+-/* removes from the current transaction, relsing and descrementing any counters.
++/* removes from the current transaction, relsing and descrementing any counters.
+ ** also files the removed buffer directly onto the clean list
+ **
+ ** called by journal_mark_freed when a block has been deleted
+@@ -3475,7 +3475,7 @@ static int can_dirty(struct reiserfs_jou
+ }
+
+ /* syncs the commit blocks, but does not force the real buffers to disk
+-** will wait until the current transaction is done/committed before returning
++** will wait until the current transaction is done/committed before returning
+ */
+ int journal_end_sync(struct reiserfs_transaction_handle *th,
+ struct super_block *p_s_sb, unsigned long nblocks)
+@@ -3557,13 +3557,13 @@ int reiserfs_flush_old_commits(struct su
+
+ /*
+ ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit
+-**
+-** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
++**
++** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all
+ ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just
+ ** flushes the commit list and returns 0.
+ **
+ ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait.
+-**
++**
+ ** Note, we can't allow the journal_end to proceed while there are still writers in the log.
+ */
+ static int check_journal_end(struct reiserfs_transaction_handle *th,
+@@ -3591,7 +3591,7 @@ static int check_journal_end(struct reis
+ atomic_dec(&(journal->j_wcount));
+ }
+
+- /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
++ /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released
+ ** will be dealt with by next transaction that actually writes something, but should be taken
+ ** care of in this trans
+ */
+@@ -3600,7 +3600,7 @@ static int check_journal_end(struct reis
+ /* if wcount > 0, and we are called to with flush or commit_now,
+ ** we wait on j_join_wait. We will wake up when the last writer has
+ ** finished the transaction, and started it on its way to the disk.
+- ** Then, we flush the commit or journal list, and just return 0
++ ** Then, we flush the commit or journal list, and just return 0
+ ** because the rest of journal end was already done for this transaction.
+ */
+ if (atomic_read(&(journal->j_wcount)) > 0) {
+@@ -3671,7 +3671,7 @@ static int check_journal_end(struct reis
+ /*
+ ** Does all the work that makes deleting blocks safe.
+ ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on.
+-**
++**
+ ** otherwise:
+ ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes
+ ** before this transaction has finished.
+@@ -3875,7 +3875,7 @@ extern struct tree_balance *cur_tb;
+ ** be written to disk while we are altering it. So, we must:
+ ** clean it
+ ** wait on it.
+-**
++**
+ */
+ int reiserfs_prepare_for_journal(struct super_block *p_s_sb,
+ struct buffer_head *bh, int wait)
+@@ -3917,7 +3917,7 @@ static void flush_old_journal_lists(stru
+ }
+ }
+
+-/*
++/*
+ ** long and ugly. If flush, will not return until all commit
+ ** blocks and all real buffers in the trans are on disk.
+ ** If no_async, won't return until all commit blocks are on disk.
+@@ -3978,7 +3978,7 @@ static int do_journal_end(struct reiserf
+ wait_on_commit = 1;
+ }
+
+- /* check_journal_end locks the journal, and unlocks if it does not return 1
++ /* check_journal_end locks the journal, and unlocks if it does not return 1
+ ** it tells us if we should continue with the journal_end, or just return
+ */
+ if (!check_journal_end(th, p_s_sb, nblocks, flags)) {
+@@ -4075,7 +4075,7 @@ static int do_journal_end(struct reiserf
+ last_cn->next = jl_cn;
+ }
+ last_cn = jl_cn;
+- /* make sure the block we are trying to log is not a block
++ /* make sure the block we are trying to log is not a block
+ of journal or reserved area */
+
+ if (is_block_in_log_or_reserved_area
+@@ -4222,9 +4222,9 @@ static int do_journal_end(struct reiserf
+ } else if (!(jl->j_state & LIST_COMMIT_PENDING))
+ queue_delayed_work(commit_wq, &journal->j_work, HZ / 10);
+
+- /* if the next transaction has any chance of wrapping, flush
+- ** transactions that might get overwritten. If any journal lists are very
+- ** old flush them as well.
++ /* if the next transaction has any chance of wrapping, flush
++ ** transactions that might get overwritten. If any journal lists are very
++ ** old flush them as well.
+ */
+ first_jl:
+ list_for_each_safe(entry, safe, &journal->j_journal_list) {
+--- a/fs/reiserfs/lbalance.c
++++ b/fs/reiserfs/lbalance.c
+@@ -119,8 +119,8 @@ static void leaf_copy_dir_entries(struct
+ DEH_SIZE * copy_count + copy_records_len);
+ }
+
+-/* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or
+- part of it or nothing (see the return 0 below) from SOURCE to the end
++/* Copy the first (if last_first == FIRST_TO_LAST) or last (last_first == LAST_TO_FIRST) item or
++ part of it or nothing (see the return 0 below) from SOURCE to the end
+ (if last_first) or beginning (!last_first) of the DEST */
+ /* returns 1 if anything was copied, else 0 */
+ static int leaf_copy_boundary_item(struct buffer_info *dest_bi,
+@@ -396,7 +396,7 @@ static void leaf_item_bottle(struct buff
+ else {
+ struct item_head n_ih;
+
+- /* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST
++ /* copy part of the body of the item number 'item_num' of SOURCE to the end of the DEST
+ part defined by 'cpy_bytes'; create new item header; change old item_header (????);
+ n_ih = new item_header;
+ */
+@@ -426,7 +426,7 @@ static void leaf_item_bottle(struct buff
+ else {
+ struct item_head n_ih;
+
+- /* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST
++ /* copy part of the body of the item number 'item_num' of SOURCE to the begin of the DEST
+ part defined by 'cpy_bytes'; create new item header;
+ n_ih = new item_header;
+ */
+@@ -724,7 +724,7 @@ int leaf_shift_right(struct tree_balance
+ static void leaf_delete_items_entirely(struct buffer_info *bi,
+ int first, int del_num);
+ /* If del_bytes == -1, starting from position 'first' delete del_num items in whole in buffer CUR.
+- If not.
++ If not.
+ If last_first == 0. Starting from position 'first' delete del_num-1 items in whole. Delete part of body of
+ the first item. Part defined by del_bytes. Don't delete first item header
+ If last_first == 1. Starting from position 'first+1' delete del_num-1 items in whole. Delete part of body of
+@@ -783,7 +783,7 @@ void leaf_delete_items(struct buffer_inf
+ /* len = body len of item */
+ len = ih_item_len(ih);
+
+- /* delete the part of the last item of the bh
++ /* delete the part of the last item of the bh
+ do not delete item header
+ */
+ leaf_cut_from_buffer(cur_bi, B_NR_ITEMS(bh) - 1,
+@@ -865,7 +865,7 @@ void leaf_insert_into_buf(struct buffer_
+ }
+ }
+
+-/* paste paste_size bytes to affected_item_num-th item.
++/* paste paste_size bytes to affected_item_num-th item.
+ When item is a directory, this only prepare space for new entries */
+ void leaf_paste_in_buffer(struct buffer_info *bi, int affected_item_num,
+ int pos_in_item, int paste_size,
+@@ -1022,7 +1022,7 @@ static int leaf_cut_entries(struct buffe
+ /* when cut item is part of regular file
+ pos_in_item - first byte that must be cut
+ cut_size - number of bytes to be cut beginning from pos_in_item
+-
++
+ when cut item is part of directory
+ pos_in_item - number of first deleted entry
+ cut_size - count of deleted entries
+@@ -1275,7 +1275,7 @@ void leaf_paste_entries(struct buffer_in
+ /* change item key if necessary (when we paste before 0-th entry */
+ if (!before) {
+ set_le_ih_k_offset(ih, deh_offset(new_dehs));
+-/* memcpy (&ih->ih_key.k_offset,
++/* memcpy (&ih->ih_key.k_offset,
+ &new_dehs->deh_offset, SHORT_KEY_SIZE);*/
+ }
+ #ifdef CONFIG_REISERFS_CHECK
+--- a/fs/reiserfs/namei.c
++++ b/fs/reiserfs/namei.c
+@@ -106,7 +106,7 @@ key of the first directory entry in it.
+ This function first calls search_by_key, then, if item whose first
+ entry matches is not found it looks for the entry inside directory
+ item found by search_by_key. Fills the path to the entry, and to the
+-entry position in the item
++entry position in the item
+
+ */
+
+@@ -371,7 +371,7 @@ static struct dentry *reiserfs_lookup(st
+ return d_splice_alias(inode, dentry);
+ }
+
+-/*
++/*
+ ** looks up the dentry of the parent directory for child.
+ ** taken from ext2_get_parent
+ */
+@@ -410,7 +410,7 @@ struct dentry *reiserfs_get_parent(struc
+ return parent;
+ }
+
+-/* add entry to the directory (entry can be hidden).
++/* add entry to the directory (entry can be hidden).
+
+ insert definition of when hidden directories are used here -Hans
+
+@@ -568,7 +568,7 @@ static int drop_new_inode(struct inode *
+ return 0;
+ }
+
+-/* utility function that does setup for reiserfs_new_inode.
++/* utility function that does setup for reiserfs_new_inode.
+ ** DQUOT_INIT needs lots of credits so it's better to have it
+ ** outside of a transaction, so we had to pull some bits of
+ ** reiserfs_new_inode out into this func.
+@@ -823,7 +823,7 @@ static inline int reiserfs_empty_dir(str
+ {
+ /* we can cheat because an old format dir cannot have
+ ** EMPTY_DIR_SIZE, and a new format dir cannot have
+- ** EMPTY_DIR_SIZE_V1. So, if the inode is either size,
++ ** EMPTY_DIR_SIZE_V1. So, if the inode is either size,
+ ** regardless of disk format version, the directory is empty.
+ */
+ if (inode->i_size != EMPTY_DIR_SIZE &&
+@@ -1163,7 +1163,7 @@ static int reiserfs_link(struct dentry *
+ return retval;
+ }
+
+-// de contains information pointing to an entry which
++/* de contains information pointing to an entry which */
+ static int de_still_valid(const char *name, int len,
+ struct reiserfs_dir_entry *de)
+ {
+@@ -1207,10 +1207,10 @@ static void set_ino_in_dir_entry(struct
+ de->de_deh[de->de_entry_num].deh_objectid = key->k_objectid;
+ }
+
+-/*
++/*
+ * process, that is going to call fix_nodes/do_balance must hold only
+ * one path. If it holds 2 or more, it can get into endless waiting in
+- * get_empty_nodes or its clones
++ * get_empty_nodes or its clones
+ */
+ static int reiserfs_rename(struct inode *old_dir, struct dentry *old_dentry,
+ struct inode *new_dir, struct dentry *new_dentry)
+@@ -1264,7 +1264,7 @@ static int reiserfs_rename(struct inode
+
+ old_inode_mode = old_inode->i_mode;
+ if (S_ISDIR(old_inode_mode)) {
+- // make sure, that directory being renamed has correct ".."
++ // make sure, that directory being renamed has correct ".."
+ // and that its new parent directory has not too many links
+ // already
+
+@@ -1275,8 +1275,8 @@ static int reiserfs_rename(struct inode
+ }
+ }
+
+- /* directory is renamed, its parent directory will be changed,
+- ** so find ".." entry
++ /* directory is renamed, its parent directory will be changed,
++ ** so find ".." entry
+ */
+ dot_dot_de.de_gen_number_bit_string = NULL;
+ retval =
+@@ -1386,9 +1386,9 @@ static int reiserfs_rename(struct inode
+ this stuff, yes? Then, having
+ gathered everything into RAM we
+ should lock the buffers, yes? -Hans */
+- /* probably. our rename needs to hold more
+- ** than one path at once. The seals would
+- ** have to be written to deal with multi-path
++ /* probably. our rename needs to hold more
++ ** than one path at once. The seals would
++ ** have to be written to deal with multi-path
+ ** issues -chris
+ */
+ /* sanity checking before doing the rename - avoid races many
+@@ -1466,7 +1466,7 @@ static int reiserfs_rename(struct inode
+ }
+
+ if (S_ISDIR(old_inode_mode)) {
+- // adjust ".." of renamed directory
++ /* adjust ".." of renamed directory */
+ set_ino_in_dir_entry(&dot_dot_de, INODE_PKEY(new_dir));
+ journal_mark_dirty(&th, new_dir->i_sb, dot_dot_de.de_bh);
+
+--- a/fs/reiserfs/objectid.c
++++ b/fs/reiserfs/objectid.c
+@@ -180,7 +180,7 @@ int reiserfs_convert_objectid_map_v1(str
+
+ if (cur_size > new_size) {
+ /* mark everyone used that was listed as free at the end of the objectid
+- ** map
++ ** map
+ */
+ objectid_map[new_size - 1] = objectid_map[cur_size - 1];
+ set_sb_oid_cursize(disk_sb, new_size);
+--- a/fs/reiserfs/prints.c
++++ b/fs/reiserfs/prints.c
+@@ -181,11 +181,11 @@ static char *is_there_reiserfs_struct(ch
+ appropriative printk. With this reiserfs_warning you can use format
+ specification for complex structures like you used to do with
+ printfs for integers, doubles and pointers. For instance, to print
+- out key structure you have to write just:
+- reiserfs_warning ("bad key %k", key);
+- instead of
+- printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
+- key->k_offset, key->k_uniqueness);
++ out key structure you have to write just:
++ reiserfs_warning ("bad key %k", key);
++ instead of
++ printk ("bad key %lu %lu %lu %lu", key->k_dir_id, key->k_objectid,
++ key->k_offset, key->k_uniqueness);
+ */
+
+ static void prepare_error_buf(const char *fmt, va_list args)
+@@ -247,11 +247,11 @@ static void prepare_error_buf(const char
+ }
+
+ /* in addition to usual conversion specifiers this accepts reiserfs
+- specific conversion specifiers:
+- %k to print little endian key,
+- %K to print cpu key,
++ specific conversion specifiers:
++ %k to print little endian key,
++ %K to print cpu key,
+ %h to print item_head,
+- %t to print directory entry
++ %t to print directory entry
+ %z to print block head (arg must be struct buffer_head *
+ %b to print buffer_head
+ */
+@@ -317,17 +317,17 @@ void reiserfs_debug(struct super_block *
+ maintainer-errorid. Don't bother with reusing errorids, there are
+ lots of numbers out there.
+
+- Example:
+-
++ Example:
++
+ reiserfs_panic(
+ p_sb, "reiser-29: reiserfs_new_blocknrs: "
+ "one of search_start or rn(%d) is equal to MAX_B_NUM,"
+- "which means that we are optimizing location based on the bogus location of a temp buffer (%p).",
++ "which means that we are optimizing location based on the bogus location of a temp buffer (%p).",
+ rn, bh
+ );
+
+ Regular panic()s sometimes clear the screen before the message can
+- be read, thus the need for the while loop.
++ be read, thus the need for the while loop.
+
+ Numbering scheme for panic used by Vladimir and Anatoly( Hans completely ignores this scheme, and considers it
+ pointless complexity):
+--- a/fs/reiserfs/procfs.c
++++ b/fs/reiserfs/procfs.c
+@@ -636,7 +636,7 @@ int reiserfs_global_version_in_proc(char
+ *
+ */
+
+-/*
++/*
+ * Make Linus happy.
+ * Local variables:
+ * c-indentation-style: "K&R"
+--- a/fs/reiserfs/README
++++ b/fs/reiserfs/README
+@@ -1,4 +1,4 @@
+-[LICENSING]
++[LICENSING]
+
+ ReiserFS is hereby licensed under the GNU General
+ Public License version 2.
+@@ -31,7 +31,7 @@ the GPL as not allowing those additional
+ it wrongly, and Richard Stallman agrees with me, when carefully read
+ you can see that those restrictions on additional terms do not apply
+ to the owner of the copyright, and my interpretation of this shall
+-govern for this license.
++govern for this license.
+
+ Finally, nothing in this license shall be interpreted to allow you to
+ fail to fairly credit me, or to remove my credits, without my
+--- a/fs/reiserfs/resize.c
++++ b/fs/reiserfs/resize.c
+@@ -1,8 +1,8 @@
+-/*
++/*
+ * Copyright 2000 by Hans Reiser, licensing governed by reiserfs/README
+ */
+
+-/*
++/*
+ * Written by Alexander Zarochentcev.
+ *
+ * The kernel part of the (on-line) reiserfs resizer.
+@@ -101,7 +101,7 @@ int reiserfs_resize(struct super_block *
+ memcpy(jbitmap[i].bitmaps, jb->bitmaps, copy_size);
+
+ /* just in case vfree schedules on us, copy the new
+- ** pointer into the journal struct before freeing the
++ ** pointer into the journal struct before freeing the
+ ** old one
+ */
+ node_tmp = jb->bitmaps;
+--- a/fs/reiserfs/stree.c
++++ b/fs/reiserfs/stree.c
+@@ -77,7 +77,7 @@ inline void copy_item_head(struct item_h
+ /* k1 is pointer to on-disk structure which is stored in little-endian
+ form. k2 is pointer to cpu variable. For key of items of the same
+ object this returns 0.
+- Returns: -1 if key1 < key2
++ Returns: -1 if key1 < key2
+ 0 if key1 == key2
+ 1 if key1 > key2 */
+ inline int comp_short_keys(const struct reiserfs_key *le_key,
+@@ -890,7 +890,7 @@ static inline int prepare_for_direct_ite
+ }
+ // new file gets truncated
+ if (get_inode_item_key_version(inode) == KEY_FORMAT_3_6) {
+- //
++ //
+ round_len = ROUND_UP(new_file_length);
+ /* this was n_new_file_length < le_ih ... */
+ if (round_len < le_ih_k_offset(le_ih)) {
+@@ -1443,7 +1443,7 @@ static int maybe_indirect_to_direct(stru
+ if (atomic_read(&p_s_inode->i_count) > 1 ||
+ !tail_has_to_be_packed(p_s_inode) ||
+ !page || (REISERFS_I(p_s_inode)->i_flags & i_nopack_mask)) {
+- // leave tail in an unformatted node
++ /* leave tail in an unformatted node */
+ *p_c_mode = M_SKIP_BALANCING;
+ cut_bytes =
+ n_block_size - (n_new_file_size & (n_block_size - 1));
+@@ -1826,7 +1826,7 @@ int reiserfs_do_truncate(struct reiserfs
+ /* While there are bytes to truncate and previous file item is presented in the tree. */
+
+ /*
+- ** This loop could take a really long time, and could log
++ ** This loop could take a really long time, and could log
+ ** many more blocks than a transaction can hold. So, we do a polite
+ ** journal end here, and if the transaction needs ending, we make
+ ** sure the file is consistent before ending the current trans
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -754,7 +754,7 @@ static int reiserfs_getopt(struct super_
+ char **opt_arg, unsigned long *bit_flags)
+ {
+ char *p;
+- /* foo=bar,
++ /* foo=bar,
+ ^ ^ ^
+ | | +-- option_end
+ | +-- arg_start
+@@ -1346,7 +1346,7 @@ static int read_super_block(struct super
+ }
+ //
+ // ok, reiserfs signature (old or new) found in at the given offset
+- //
++ //
+ fs_blocksize = sb_blocksize(rs);
+ brelse(bh);
+ sb_set_blocksize(s, fs_blocksize);
+@@ -1532,8 +1532,8 @@ static int what_hash(struct super_block
+ code = find_hash_out(s);
+
+ if (code != UNSET_HASH && reiserfs_hash_detect(s)) {
+- /* detection has found the hash, and we must check against the
+- ** mount options
++ /* detection has found the hash, and we must check against the
++ ** mount options
+ */
+ if (reiserfs_rupasov_hash(s) && code != YURA_HASH) {
+ reiserfs_warning(s, "reiserfs-2507",
+@@ -1565,7 +1565,7 @@ static int what_hash(struct super_block
+ }
+ }
+
+- /* if we are mounted RW, and we have a new valid hash code, update
++ /* if we are mounted RW, and we have a new valid hash code, update
+ ** the super
+ */
+ if (code != UNSET_HASH &&
+--- a/fs/reiserfs/tail_conversion.c
++++ b/fs/reiserfs/tail_conversion.c
+@@ -46,7 +46,7 @@ int direct2indirect(struct reiserfs_tran
+ /* Set the key to search for the place for new unfm pointer */
+ make_cpu_key(&end_key, inode, tail_offset, TYPE_INDIRECT, 4);
+
+- // FIXME: we could avoid this
++ /* FIXME: we could avoid this */
+ if (search_for_position_by_key(sb, &end_key, path) == POSITION_FOUND) {
+ reiserfs_error(sb, "PAP-14030",
+ "pasted or inserted byte exists in "
+--- a/include/linux/reiserfs_fs_sb.h
++++ b/include/linux/reiserfs_fs_sb.h
+@@ -14,7 +14,7 @@ typedef enum {
+ } reiserfs_super_block_flags;
+
+ /* struct reiserfs_super_block accessors/mutators
+- * since this is a disk structure, it will always be in
++ * since this is a disk structure, it will always be in
+ * little endian format. */
+ #define sb_block_count(sbp) (le32_to_cpu((sbp)->s_v1.s_block_count))
+ #define set_sb_block_count(sbp,v) ((sbp)->s_v1.s_block_count = cpu_to_le32(v))
+@@ -83,16 +83,16 @@ typedef enum {
+
+ /* LOGGING -- */
+
+-/* These all interelate for performance.
++/* These all interelate for performance.
+ **
+-** If the journal block count is smaller than n transactions, you lose speed.
++** If the journal block count is smaller than n transactions, you lose speed.
+ ** I don't know what n is yet, I'm guessing 8-16.
+ **
+ ** typical transaction size depends on the application, how often fsync is
+-** called, and how many metadata blocks you dirty in a 30 second period.
++** called, and how many metadata blocks you dirty in a 30 second period.
+ ** The more small files (<16k) you use, the larger your transactions will
+ ** be.
+-**
++**
+ ** If your journal fills faster than dirty buffers get flushed to disk, it must flush them before allowing the journal
+ ** to wrap, which slows things down. If you need high speed meta data updates, the journal should be big enough
+ ** to prevent wrapping before dirty meta blocks get to disk.
+@@ -241,7 +241,7 @@ struct reiserfs_journal {
+
+ struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS]; /* array of bitmaps to record the deleted blocks */
+ struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE]; /* hash table for real buffer heads in current trans */
+- struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all
++ struct reiserfs_journal_cnode *j_list_hash_table[JOURNAL_HASH_SIZE]; /* hash table for all the real buffer heads in all
+ the transactions */
+ struct list_head j_prealloc_list; /* list of inodes which have preallocated blocks */
+ int j_persistent_trans;
+@@ -425,7 +425,7 @@ enum reiserfs_mount_options {
+ partition will be dealt with in a
+ manner of 3.5.x */
+
+-/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
++/* -o hash={tea, rupasov, r5, detect} is meant for properly mounting
+ ** reiserfs disks from 3.5.19 or earlier. 99% of the time, this option
+ ** is not required. If the normal autodection code can't determine which
+ ** hash to use (because both hashes had the same value for a file)
--- /dev/null
+Subject: reiserfs: use generic xattr handlers
+From: Jeff Mahoney <jeffm@suse.com>
+
+ Christoph Hellwig had asked me quite some time ago to port the reiserfs
+ xattrs to the generic xattr interface.
+
+ This patch replaces the reiserfs-specific xattr handling code with the
+ generic struct xattr_handler.
+
+ However, since reiserfs doesn't split the prefix and name when accessing
+ xattrs, it can't leverage generic_{set,get,list,remove}xattr without
+ needlessly reconstructing the name on the back end.
+
+ Update 7/26/07: Added missing dput() to deletion path.
+ Update 8/30/07: Added missing mark_inode_dirty when i_mode is used to
+ represent an ACL and no previous ACL existed.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+---
+
+ fs/reiserfs/super.c | 7
+ fs/reiserfs/xattr.c | 467 ++++++++++++++++-------------------------
+ fs/reiserfs/xattr_acl.c | 79 ++----
+ fs/reiserfs/xattr_security.c | 26 --
+ fs/reiserfs/xattr_trusted.c | 45 ---
+ fs/reiserfs/xattr_user.c | 31 --
+ include/linux/reiserfs_acl.h | 16 -
+ include/linux/reiserfs_fs_sb.h | 3
+ include/linux/reiserfs_xattr.h | 25 --
+ 9 files changed, 258 insertions(+), 441 deletions(-)
+
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -2261,9 +2261,6 @@ static int __init init_reiserfs_fs(void)
+ return ret;
+ }
+
+- if ((ret = reiserfs_xattr_register_handlers()))
+- goto failed_reiserfs_xattr_register_handlers;
+-
+ reiserfs_proc_info_global_init();
+ reiserfs_proc_register_global("version",
+ reiserfs_global_version_in_proc);
+@@ -2274,9 +2271,6 @@ static int __init init_reiserfs_fs(void)
+ return 0;
+ }
+
+- reiserfs_xattr_unregister_handlers();
+-
+- failed_reiserfs_xattr_register_handlers:
+ reiserfs_proc_unregister_global("version");
+ reiserfs_proc_info_global_done();
+ destroy_inodecache();
+@@ -2286,7 +2280,6 @@ static int __init init_reiserfs_fs(void)
+
+ static void __exit exit_reiserfs_fs(void)
+ {
+- reiserfs_xattr_unregister_handlers();
+ reiserfs_proc_unregister_global("version");
+ reiserfs_proc_info_global_done();
+ unregister_filesystem(&reiserfs_fs_type);
+--- a/fs/reiserfs/xattr_acl.c
++++ b/fs/reiserfs/xattr_acl.c
+@@ -271,7 +271,7 @@ reiserfs_set_acl(struct inode *inode, in
+ char *name;
+ void *value = NULL;
+ struct posix_acl **p_acl;
+- size_t size;
++ size_t size = 0;
+ int error;
+ struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode);
+
+@@ -308,16 +308,21 @@ reiserfs_set_acl(struct inode *inode, in
+ value = posix_acl_to_disk(acl, &size);
+ if (IS_ERR(value))
+ return (int)PTR_ERR(value);
+- error = reiserfs_xattr_set(inode, name, value, size, 0);
+- } else {
+- error = reiserfs_xattr_del(inode, name);
+- if (error == -ENODATA) {
+- /* This may seem odd here, but it means that the ACL was set
+- * with a value representable with mode bits. If there was
+- * an ACL before, reiserfs_xattr_del already dirtied the inode.
+- */
++ }
++
++ error = __reiserfs_xattr_set(inode, name, value, size, 0);
++
++ /*
++ * Ensure that the inode gets dirtied if we're only using
++ * the mode bits and an old ACL didn't exist. We don't need
++ * to check if the inode is hashed here since we won't get
++ * called by reiserfs_inherit_default_acl().
++ */
++ if (error == -ENODATA) {
++ error = 0;
++ if (type == ACL_TYPE_ACCESS) {
++ inode->i_ctime = CURRENT_TIME_SEC;
+ mark_inode_dirty(inode);
+- error = 0;
+ }
+ }
+
+@@ -474,33 +479,22 @@ posix_acl_access_set(struct inode *inode
+ return xattr_set_acl(inode, ACL_TYPE_ACCESS, value, size);
+ }
+
+-static int posix_acl_access_del(struct inode *inode, const char *name)
+-{
+- struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode);
+- if (strlen(name) != sizeof(POSIX_ACL_XATTR_ACCESS) - 1)
+- return -EINVAL;
+- iset_acl(inode, &reiserfs_i->i_acl_access, ERR_PTR(-ENODATA));
+- return 0;
+-}
+-
+-static int
+-posix_acl_access_list(struct inode *inode, const char *name, int namelen,
+- char *out)
++static size_t posix_acl_access_list(struct inode *inode, char *list,
++ size_t list_size, const char *name,
++ size_t name_len)
+ {
+- int len = namelen;
++ const size_t size = sizeof(POSIX_ACL_XATTR_ACCESS);
+ if (!reiserfs_posixacl(inode->i_sb))
+ return 0;
+- if (out)
+- memcpy(out, name, len);
+-
+- return len;
++ if (list && size <= list_size)
++ memcpy(list, POSIX_ACL_XATTR_ACCESS, size);
++ return size;
+ }
+
+-struct reiserfs_xattr_handler posix_acl_access_handler = {
++struct xattr_handler reiserfs_posix_acl_access_handler = {
+ .prefix = POSIX_ACL_XATTR_ACCESS,
+ .get = posix_acl_access_get,
+ .set = posix_acl_access_set,
+- .del = posix_acl_access_del,
+ .list = posix_acl_access_list,
+ };
+
+@@ -522,32 +516,21 @@ posix_acl_default_set(struct inode *inod
+ return xattr_set_acl(inode, ACL_TYPE_DEFAULT, value, size);
+ }
+
+-static int posix_acl_default_del(struct inode *inode, const char *name)
+-{
+- struct reiserfs_inode_info *reiserfs_i = REISERFS_I(inode);
+- if (strlen(name) != sizeof(POSIX_ACL_XATTR_DEFAULT) - 1)
+- return -EINVAL;
+- iset_acl(inode, &reiserfs_i->i_acl_default, ERR_PTR(-ENODATA));
+- return 0;
+-}
+-
+-static int
+-posix_acl_default_list(struct inode *inode, const char *name, int namelen,
+- char *out)
++static size_t posix_acl_default_list(struct inode *inode, char *list,
++ size_t list_size, const char *name,
++ size_t name_len)
+ {
+- int len = namelen;
++ const size_t size = sizeof(POSIX_ACL_XATTR_DEFAULT);
+ if (!reiserfs_posixacl(inode->i_sb))
+ return 0;
+- if (out)
+- memcpy(out, name, len);
+-
+- return len;
++ if (list && size <= list_size)
++ memcpy(list, POSIX_ACL_XATTR_DEFAULT, size);
++ return size;
+ }
+
+-struct reiserfs_xattr_handler posix_acl_default_handler = {
++struct xattr_handler reiserfs_posix_acl_default_handler = {
+ .prefix = POSIX_ACL_XATTR_DEFAULT,
+ .get = posix_acl_default_get,
+ .set = posix_acl_default_set,
+- .del = posix_acl_default_del,
+ .list = posix_acl_default_list,
+ };
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -53,7 +53,6 @@
+ #define PRIVROOT_NAME ".reiserfs_priv"
+ #define XAROOT_NAME "xattrs"
+
+-static struct reiserfs_xattr_handler *find_xattr_handler_prefix(const char *);
+
+ /* Helpers for inode ops. We do this so that we don't have all the VFS
+ * overhead and also for proper i_mutex annotation.
+@@ -110,7 +109,6 @@ static int xattr_rmdir(struct inode *dir
+ return error;
+ }
+
+-
+ #define xattr_may_create(flags) (!flags || flags & XATTR_CREATE)
+
+ /* Returns and possibly creates the xattr dir. */
+@@ -339,14 +337,17 @@ int xattr_readdir(struct inode *inode, f
+ return res;
+ }
+
+-/* expects xadir->d_inode->i_mutex to be locked */
++/* The following are side effects of other operations that aren't explicitly
++ * modifying extended attributes. This includes operations such as permissions
++ * or ownership changes, object deletions, etc. */
++
+ static int
+-__reiserfs_xattr_del(struct dentry *xadir, const char *name, int namelen)
++reiserfs_delete_xattrs_filler(void *buf, const char *name, int namelen,
++ loff_t offset, u64 ino, unsigned int d_type)
+ {
++ struct dentry *xadir = (struct dentry *)buf;
+ struct dentry *dentry;
+- struct inode *dir = xadir->d_inode;
+ int err = 0;
+- struct reiserfs_xattr_handler *xah;
+
+ dentry = lookup_one_len(name, xadir, namelen);
+ if (IS_ERR(dentry)) {
+@@ -361,28 +362,7 @@ __reiserfs_xattr_del(struct dentry *xadi
+ if (S_ISDIR(dentry->d_inode->i_mode))
+ goto out_file;
+
+- if (!IS_PRIVATE(dentry->d_inode)) {
+- reiserfs_error(dir->i_sb, "jdm-20003",
+- "OID %08x [%.*s/%.*s] doesn't have "
+- "priv flag set [parent is %sset].",
+- le32_to_cpu(INODE_PKEY(dentry->d_inode)->
+- k_objectid), xadir->d_name.len,
+- xadir->d_name.name, namelen, name,
+- IS_PRIVATE(xadir->d_inode) ? "" :
+- "not ");
+- dput(dentry);
+- return -EIO;
+- }
+-
+- /* Deletion pre-operation */
+- xah = find_xattr_handler_prefix(name);
+- if (xah && xah->del) {
+- err = xah->del(dentry->d_inode, name);
+- if (err)
+- goto out;
+- }
+-
+- err = xattr_unlink(dir, dentry);
++ err = xattr_unlink(xadir->d_inode, dentry);
+
+ out_file:
+ dput(dentry);
+@@ -391,20 +371,6 @@ out:
+ return err;
+ }
+
+-/* The following are side effects of other operations that aren't explicitly
+- * modifying extended attributes. This includes operations such as permissions
+- * or ownership changes, object deletions, etc. */
+-
+-static int
+-reiserfs_delete_xattrs_filler(void *buf, const char *name, int namelen,
+- loff_t offset, u64 ino, unsigned int d_type)
+-{
+- struct dentry *xadir = (struct dentry *)buf;
+-
+- return __reiserfs_xattr_del(xadir, name, namelen);
+-
+-}
+-
+ /* This is called w/ inode->i_mutex downed */
+ int reiserfs_delete_xattrs(struct inode *inode)
+ {
+@@ -541,14 +507,11 @@ out:
+ }
+
+ #ifdef CONFIG_REISERFS_FS_XATTR
+-static struct reiserfs_xattr_handler *find_xattr_handler_prefix(const char
+- *prefix);
+-
+ /* Returns a dentry corresponding to a specific extended attribute file
+ * for the inode. If flags allow, the file is created. Otherwise, a
+ * valid or negative dentry, or an error is returned. */
+-static struct dentry *get_xa_file_dentry(const struct inode *inode,
+- const char *name, int flags)
++static struct dentry *xattr_lookup(struct inode *inode, const char *name,
++ int flags)
+ {
+ struct dentry *xadir, *xafile;
+ int err = 0;
+@@ -623,6 +586,45 @@ int reiserfs_commit_write(struct file *f
+ int reiserfs_prepare_write(struct file *f, struct page *page,
+ unsigned from, unsigned to);
+
++static void update_ctime(struct inode *inode)
++{
++ struct timespec now = current_fs_time(inode->i_sb);
++ if (hlist_unhashed(&inode->i_hash) || !inode->i_nlink ||
++ timespec_equal(&inode->i_ctime, &now))
++ return;
++
++ inode->i_ctime = CURRENT_TIME_SEC;
++ mark_inode_dirty(inode);
++}
++
++static int lookup_and_delete_xattr(struct inode *inode, const char *name)
++{
++ int err = 0;
++ struct dentry *dentry, *xadir;
++
++ xadir = open_xa_dir(inode, XATTR_REPLACE);
++ if (IS_ERR(xadir))
++ return PTR_ERR(xadir);
++
++ dentry = lookup_one_len(name, xadir, strlen(name));
++ if (IS_ERR(dentry)) {
++ err = PTR_ERR(dentry);
++ goto out_dput;
++ }
++
++ if (dentry->d_inode) {
++ mutex_lock_nested(&xadir->d_inode->i_mutex, I_MUTEX_XATTR);
++ err = xattr_unlink(xadir->d_inode, dentry);
++ mutex_unlock(&xadir->d_inode->i_mutex);
++ update_ctime(inode);
++ }
++
++ dput(dentry);
++out_dput:
++ dput(xadir);
++ return err;
++}
++
+
+ /* Generic extended attribute operations that can be used by xa plugins */
+
+@@ -630,8 +632,8 @@ int reiserfs_prepare_write(struct file *
+ * inode->i_mutex: down
+ */
+ int
+-reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
+- size_t buffer_size, int flags)
++__reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
++ size_t buffer_size, int flags)
+ {
+ int err = 0;
+ struct dentry *dentry;
+@@ -639,37 +641,22 @@ reiserfs_xattr_set(struct inode *inode,
+ char *data;
+ size_t file_pos = 0;
+ size_t buffer_pos = 0;
+- struct iattr newattrs;
++ size_t new_size;
+ __u32 xahash = 0;
+
+ if (get_inode_sd_version(inode) == STAT_DATA_V1)
+ return -EOPNOTSUPP;
+
+ if (!buffer)
+- return reiserfs_xattr_del(inode, name);
++ return lookup_and_delete_xattr(inode, name);
+
+- dentry = get_xa_file_dentry(inode, name, flags);
+- if (IS_ERR(dentry)) {
+- err = PTR_ERR(dentry);
+- goto out;
+- }
++ dentry = xattr_lookup(inode, name, flags);
++ if (IS_ERR(dentry))
++ return PTR_ERR(dentry);
+
+ down_write(&REISERFS_I(inode)->i_xattr_sem);
+
+ xahash = xattr_hash(buffer, buffer_size);
+-
+- /* Resize it so we're ok to write there */
+- newattrs.ia_size = buffer_size;
+- newattrs.ia_ctime = current_fs_time(inode->i_sb);
+- newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
+- mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
+- down_write(&dentry->d_inode->i_alloc_sem);
+- err = reiserfs_setattr(dentry, &newattrs);
+- up_write(&dentry->d_inode->i_alloc_sem);
+- mutex_unlock(&dentry->d_inode->i_mutex);
+- if (err)
+- goto out_filp;
+-
+ while (buffer_pos < buffer_size || buffer_pos == 0) {
+ size_t chunk;
+ size_t skip = 0;
+@@ -682,7 +669,7 @@ reiserfs_xattr_set(struct inode *inode,
+ page = reiserfs_get_page(dentry->d_inode, file_pos);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+- goto out_filp;
++ goto out_unlock;
+ }
+
+ lock_page(page);
+@@ -716,20 +703,33 @@ reiserfs_xattr_set(struct inode *inode,
+ break;
+ }
+
+- /* We can't mark the inode dirty if it's not hashed. This is the case
+- * when we're inheriting the default ACL. If we dirty it, the inode
+- * gets marked dirty, but won't (ever) make it onto the dirty list until
+- * it's synced explicitly to clear I_DIRTY. This is bad. */
+- if (!hlist_unhashed(&inode->i_hash)) {
+- inode->i_ctime = CURRENT_TIME_SEC;
+- mark_inode_dirty(inode);
+- }
+-
+- out_filp:
++ new_size = buffer_size + sizeof(struct reiserfs_xattr_header);
++ if (!err && new_size < i_size_read(dentry->d_inode)) {
++ struct iattr newattrs = {
++ .ia_ctime = current_fs_time(inode->i_sb),
++ .ia_size = buffer_size,
++ .ia_valid = ATTR_SIZE | ATTR_CTIME,
++ };
++ mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_XATTR);
++ down_write(&dentry->d_inode->i_alloc_sem);
++ err = reiserfs_setattr(dentry, &newattrs);
++ up_write(&dentry->d_inode->i_alloc_sem);
++ mutex_unlock(&dentry->d_inode->i_mutex);
++ } else
++ update_ctime(inode);
++out_unlock:
+ up_write(&REISERFS_I(inode)->i_xattr_sem);
+ dput(dentry);
++ return err;
++}
+
+- out:
++int
++reiserfs_xattr_set(struct inode *inode, const char *name, const void *buffer,
++ size_t buffer_size, int flags)
++{
++ int err = __reiserfs_xattr_set(inode, name, buffer, buffer_size, flags);
++ if (err == -ENODATA)
++ err = 0;
+ return err;
+ }
+
+@@ -737,7 +737,7 @@ reiserfs_xattr_set(struct inode *inode,
+ * inode->i_mutex: down
+ */
+ int
+-reiserfs_xattr_get(const struct inode *inode, const char *name, void *buffer,
++reiserfs_xattr_get(struct inode *inode, const char *name, void *buffer,
+ size_t buffer_size)
+ {
+ ssize_t err = 0;
+@@ -756,7 +756,7 @@ reiserfs_xattr_get(const struct inode *i
+ if (get_inode_sd_version(inode) == STAT_DATA_V1)
+ return -EOPNOTSUPP;
+
+- dentry = get_xa_file_dentry(inode, name, XATTR_REPLACE);
++ dentry = xattr_lookup(inode, name, XATTR_REPLACE);
+ if (IS_ERR(dentry)) {
+ err = PTR_ERR(dentry);
+ goto out;
+@@ -837,32 +837,53 @@ out:
+ return err;
+ }
+
+-int reiserfs_xattr_del(struct inode *inode, const char *name)
+-{
+- struct dentry *dir;
+- int err;
++/* Actual operations that are exported to VFS-land */
++struct xattr_handler *reiserfs_xattr_handlers[] = {
++ &reiserfs_xattr_user_handler,
++ &reiserfs_xattr_trusted_handler,
++#ifdef CONFIG_REISERFS_FS_SECURITY
++ &reiserfs_xattr_security_handler,
++#endif
++#ifdef CONFIG_REISERFS_FS_POSIX_ACL
++ &reiserfs_posix_acl_access_handler,
++ &reiserfs_posix_acl_default_handler,
++#endif
++ NULL
++};
+
+- dir = open_xa_dir(inode, XATTR_REPLACE);
+- if (IS_ERR(dir)) {
+- err = PTR_ERR(dir);
+- goto out;
+- }
++/*
++ * In order to implement different sets of xattr operations for each xattr
++ * prefix with the generic xattr API, a filesystem should create a
++ * null-terminated array of struct xattr_handler (one for each prefix) and
++ * hang a pointer to it off of the s_xattr field of the superblock.
++ *
++ * The generic_fooxattr() functions will use this list to dispatch xattr
++ * operations to the correct xattr_handler.
++ */
++#define for_each_xattr_handler(handlers, handler) \
++ for ((handler) = *(handlers)++; \
++ (handler) != NULL; \
++ (handler) = *(handlers)++)
+
+- mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
+- err = __reiserfs_xattr_del(dir, name, strlen(name));
+- mutex_unlock(&dir->d_inode->i_mutex);
+- dput(dir);
++/* This is the implementation for the xattr plugin infrastructure */
++static inline struct xattr_handler *
++find_xattr_handler_prefix(struct xattr_handler **handlers,
++ const char *name)
++{
++ struct xattr_handler *xah;
+
+- if (!err) {
+- inode->i_ctime = CURRENT_TIME_SEC;
+- mark_inode_dirty(inode);
++ if (!handlers)
++ return NULL;
++
++ for_each_xattr_handler(handlers, xah) {
++ if (strncmp(xah->prefix, name, strlen(xah->prefix)) == 0)
++ break;
+ }
+
+- out:
+- return err;
++ return xah;
+ }
+
+-/* Actual operations that are exported to VFS-land */
++
+ /*
+ * Inode operation getxattr()
+ */
+@@ -870,15 +891,15 @@ ssize_t
+ reiserfs_getxattr(struct dentry * dentry, const char *name, void *buffer,
+ size_t size)
+ {
+- struct reiserfs_xattr_handler *xah = find_xattr_handler_prefix(name);
+- int err;
++ struct inode *inode = dentry->d_inode;
++ struct xattr_handler *handler;
+
+- if (!xah || !reiserfs_xattrs(dentry->d_sb) ||
+- get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
++ handler = find_xattr_handler_prefix(inode->i_sb->s_xattr, name);
++
++ if (!handler || get_inode_sd_version(inode) == STAT_DATA_V1)
+ return -EOPNOTSUPP;
+
+- err = xah->get(dentry->d_inode, name, buffer, size);
+- return err;
++ return handler->get(inode, name, buffer, size);
+ }
+
+ /*
+@@ -890,15 +911,15 @@ int
+ reiserfs_setxattr(struct dentry *dentry, const char *name, const void *value,
+ size_t size, int flags)
+ {
+- struct reiserfs_xattr_handler *xah = find_xattr_handler_prefix(name);
+- int err;
++ struct inode *inode = dentry->d_inode;
++ struct xattr_handler *handler;
+
+- if (!xah || !reiserfs_xattrs(dentry->d_sb) ||
+- get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
++ handler = find_xattr_handler_prefix(inode->i_sb->s_xattr, name);
++
++ if (!handler || get_inode_sd_version(inode) == STAT_DATA_V1)
+ return -EOPNOTSUPP;
+
+- err = xah->set(dentry->d_inode, name, value, size, flags);
+- return err;
++ return handler->set(inode, name, value, size, flags);
+ }
+
+ /*
+@@ -908,71 +929,65 @@ reiserfs_setxattr(struct dentry *dentry,
+ */
+ int reiserfs_removexattr(struct dentry *dentry, const char *name)
+ {
+- int err;
+- struct reiserfs_xattr_handler *xah = find_xattr_handler_prefix(name);
++ struct inode *inode = dentry->d_inode;
++ struct xattr_handler *handler;
++ handler = find_xattr_handler_prefix(inode->i_sb->s_xattr, name);
+
+- if (!xah || !reiserfs_xattrs(dentry->d_sb) ||
+- get_inode_sd_version(dentry->d_inode) == STAT_DATA_V1)
++ if (!handler || get_inode_sd_version(inode) == STAT_DATA_V1)
+ return -EOPNOTSUPP;
+
+- err = reiserfs_xattr_del(dentry->d_inode, name);
+-
+- dentry->d_inode->i_ctime = CURRENT_TIME_SEC;
+- mark_inode_dirty(dentry->d_inode);
+-
+- return err;
++ return handler->set(inode, name, NULL, 0, XATTR_REPLACE);
+ }
+
+-/* This is what filldir will use:
+- * r_pos will always contain the amount of space required for the entire
+- * list. If r_pos becomes larger than r_size, we need more space and we
+- * return an error indicating this. If r_pos is less than r_size, then we've
+- * filled the buffer successfully and we return success */
+-struct reiserfs_listxattr_buf {
+- int r_pos;
+- int r_size;
+- char *r_buf;
+- struct inode *r_inode;
++struct listxattr_buf {
++ size_t size;
++ size_t pos;
++ char *buf;
++ struct inode *inode;
+ };
+
+-static int
+-reiserfs_listxattr_filler(void *buf, const char *name, int namelen,
+- loff_t offset, u64 ino, unsigned int d_type)
++static int listxattr_filler(void *buf, const char *name, int namelen,
++ loff_t offset, u64 ino, unsigned int d_type)
+ {
+- struct reiserfs_listxattr_buf *b = (struct reiserfs_listxattr_buf *)buf;
+- int len = 0;
+- if (name[0] != '.'
+- || (namelen != 1 && (name[1] != '.' || namelen != 2))) {
+- struct reiserfs_xattr_handler *xah =
+- find_xattr_handler_prefix(name);
+- if (!xah)
+- return 0; /* Unsupported xattr name, skip it */
+-
+- /* We call ->list() twice because the operation isn't required to just
+- * return the name back - we want to make sure we have enough space */
+- len += xah->list(b->r_inode, name, namelen, NULL);
+-
+- if (len) {
+- if (b->r_pos + len + 1 <= b->r_size) {
+- char *p = b->r_buf + b->r_pos;
+- p += xah->list(b->r_inode, name, namelen, p);
+- *p++ = '\0';
+- }
+- b->r_pos += len + 1;
++ struct listxattr_buf *b = (struct listxattr_buf *)buf;
++ size_t size;
++ if (name[0] != '.' ||
++ (namelen != 1 && (name[1] != '.' || namelen != 2))) {
++ struct xattr_handler *handler;
++ handler = find_xattr_handler_prefix(b->inode->i_sb->s_xattr,
++ name);
++ if (!handler) /* Unsupported xattr name */
++ return 0;
++ if (b->buf) {
++ size = handler->list(b->inode, b->buf + b->pos,
++ b->size, name, namelen);
++ if (size > b->size)
++ return -ERANGE;
++ } else {
++ size = handler->list(b->inode, NULL, 0, name, namelen);
+ }
+- }
+
++ b->pos += size;
++ }
+ return 0;
+ }
+
+ /*
+ * Inode operation listxattr()
++ *
++ * We totally ignore the generic listxattr here because it would be stupid
++ * not to. Since the xattrs are organized in a directory, we can just
++ * readdir to find them.
+ */
+ ssize_t reiserfs_listxattr(struct dentry * dentry, char *buffer, size_t size)
+ {
+ struct dentry *dir;
+ int err = 0;
+- struct reiserfs_listxattr_buf buf;
++ struct listxattr_buf buf = {
++ .inode = dentry->d_inode,
++ .buf = buffer,
++ .size = buffer ? size : 0,
++ };
+
+ if (!dentry->d_inode)
+ return -EINVAL;
+@@ -985,120 +1000,22 @@ ssize_t reiserfs_listxattr(struct dentry
+ if (IS_ERR(dir)) {
+ err = PTR_ERR(dir);
+ if (err == -ENODATA)
+- err = 0; /* Not an error if there aren't any xattrs */
++ err = 0; /* Not an error if there aren't any xattrs */
+ goto out;
+ }
+
+- buf.r_buf = buffer;
+- buf.r_size = buffer ? size : 0;
+- buf.r_pos = 0;
+- buf.r_inode = dentry->d_inode;
+-
+ mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_XATTR);
+- err = xattr_readdir(dir->d_inode, reiserfs_listxattr_filler, &buf);
++ err = xattr_readdir(dir->d_inode, listxattr_filler, &buf);
+ mutex_unlock(&dir->d_inode->i_mutex);
+
+- if (!err) {
+- if (buf.r_pos > buf.r_size && buffer != NULL)
+- err = -ERANGE;
+- else
+- err = buf.r_pos;
+- }
++ if (!err)
++ err = buf.pos;
+
+ dput(dir);
+ out:
+ return err;
+ }
+
+-/* This is the implementation for the xattr plugin infrastructure */
+-static LIST_HEAD(xattr_handlers);
+-static DEFINE_RWLOCK(handler_lock);
+-
+-static struct reiserfs_xattr_handler *find_xattr_handler_prefix(const char
+- *prefix)
+-{
+- struct reiserfs_xattr_handler *xah = NULL;
+- struct list_head *p;
+-
+- read_lock(&handler_lock);
+- list_for_each(p, &xattr_handlers) {
+- xah = list_entry(p, struct reiserfs_xattr_handler, handlers);
+- if (strncmp(xah->prefix, prefix, strlen(xah->prefix)) == 0)
+- break;
+- xah = NULL;
+- }
+-
+- read_unlock(&handler_lock);
+- return xah;
+-}
+-
+-static void __unregister_handlers(void)
+-{
+- struct reiserfs_xattr_handler *xah;
+- struct list_head *p, *tmp;
+-
+- list_for_each_safe(p, tmp, &xattr_handlers) {
+- xah = list_entry(p, struct reiserfs_xattr_handler, handlers);
+- if (xah->exit)
+- xah->exit();
+-
+- list_del_init(p);
+- }
+- INIT_LIST_HEAD(&xattr_handlers);
+-}
+-
+-int __init reiserfs_xattr_register_handlers(void)
+-{
+- int err = 0;
+- struct reiserfs_xattr_handler *xah;
+- struct list_head *p;
+-
+- write_lock(&handler_lock);
+-
+- /* If we're already initialized, nothing to do */
+- if (!list_empty(&xattr_handlers)) {
+- write_unlock(&handler_lock);
+- return 0;
+- }
+-
+- /* Add the handlers */
+- list_add_tail(&user_handler.handlers, &xattr_handlers);
+- list_add_tail(&trusted_handler.handlers, &xattr_handlers);
+-#ifdef CONFIG_REISERFS_FS_SECURITY
+- list_add_tail(&security_handler.handlers, &xattr_handlers);
+-#endif
+-#ifdef CONFIG_REISERFS_FS_POSIX_ACL
+- list_add_tail(&posix_acl_access_handler.handlers, &xattr_handlers);
+- list_add_tail(&posix_acl_default_handler.handlers, &xattr_handlers);
+-#endif
+-
+- /* Run initializers, if available */
+- list_for_each(p, &xattr_handlers) {
+- xah = list_entry(p, struct reiserfs_xattr_handler, handlers);
+- if (xah->init) {
+- err = xah->init();
+- if (err) {
+- list_del_init(p);
+- break;
+- }
+- }
+- }
+-
+- /* Clean up other handlers, if any failed */
+- if (err)
+- __unregister_handlers();
+-
+- write_unlock(&handler_lock);
+- return err;
+-}
+-
+-void reiserfs_xattr_unregister_handlers(void)
+-{
+- write_lock(&handler_lock);
+- __unregister_handlers();
+- write_unlock(&handler_lock);
+-}
+-
+ static int reiserfs_check_acl(struct inode *inode, int mask)
+ {
+ struct posix_acl *acl;
+@@ -1157,20 +1074,16 @@ static int xattr_mount_check(struct supe
+ {
+ /* We need generation numbers to ensure that the oid mapping is correct
+ * v3.5 filesystems don't have them. */
+- if (!old_format_only(s)) {
+- set_bit(REISERFS_XATTRS, &(REISERFS_SB(s)->s_mount_opt));
+- } else if (reiserfs_xattrs_optional(s)) {
+- /* Old format filesystem, but optional xattrs have been enabled
+- * at mount time. Error out. */
+- reiserfs_warning(s, "jdm-20005",
+- "xattrs/ACLs not supported on pre v3.6 "
+- "format filesystem. Failing mount.");
+- return -EOPNOTSUPP;
+- } else {
+- /* Old format filesystem, but no optional xattrs have
+- * been enabled. This means we silently disable xattrs
+- * on the filesystem. */
+- clear_bit(REISERFS_XATTRS, &(REISERFS_SB(s)->s_mount_opt));
++ if (old_format_only(s)) {
++ if (reiserfs_xattrs_optional(s)) {
++ /* Old format filesystem, but optional xattrs have
++ * been enabled. Error out. */
++ reiserfs_warning(s, "jdm-2005",
++ "xattrs/ACLs not supported "
++ "on pre-v3.6 format filesystems. "
++ "Failing mount.");
++ return -EOPNOTSUPP;
++ }
+ }
+
+ return 0;
+@@ -1251,9 +1164,11 @@ int reiserfs_xattr_init(struct super_blo
+ }
+
+ #ifdef CONFIG_REISERFS_FS_XATTR
++ if (!err)
++ s->s_xattr = reiserfs_xattr_handlers;
++
+ error:
+ if (err) {
+- clear_bit(REISERFS_XATTRS, &(REISERFS_SB(s)->s_mount_opt));
+ clear_bit(REISERFS_XATTRS_USER, &(REISERFS_SB(s)->s_mount_opt));
+ clear_bit(REISERFS_POSIXACL, &(REISERFS_SB(s)->s_mount_opt));
+ }
+--- a/fs/reiserfs/xattr_security.c
++++ b/fs/reiserfs/xattr_security.c
+@@ -31,35 +31,25 @@ security_set(struct inode *inode, const
+ return reiserfs_xattr_set(inode, name, buffer, size, flags);
+ }
+
+-static int security_del(struct inode *inode, const char *name)
++static size_t security_list(struct inode *inode, char *list, size_t list_len,
++ const char *name, size_t namelen)
+ {
+- if (strlen(name) < sizeof(XATTR_SECURITY_PREFIX))
+- return -EINVAL;
+-
+- if (IS_PRIVATE(inode))
+- return -EPERM;
+-
+- return 0;
+-}
+-
+-static int
+-security_list(struct inode *inode, const char *name, int namelen, char *out)
+-{
+- int len = namelen;
++ const size_t len = namelen + 1;
+
+ if (IS_PRIVATE(inode))
+ return 0;
+
+- if (out)
+- memcpy(out, name, len);
++ if (list && len <= list_len) {
++ memcpy(list, name, namelen);
++ list[namelen] = '\0';
++ }
+
+ return len;
+ }
+
+-struct reiserfs_xattr_handler security_handler = {
++struct xattr_handler reiserfs_xattr_security_handler = {
+ .prefix = XATTR_SECURITY_PREFIX,
+ .get = security_get,
+ .set = security_set,
+- .del = security_del,
+ .list = security_list,
+ };
+--- a/fs/reiserfs/xattr_trusted.c
++++ b/fs/reiserfs/xattr_trusted.c
+@@ -13,10 +13,7 @@ trusted_get(struct inode *inode, const c
+ if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX))
+ return -EINVAL;
+
+- if (!reiserfs_xattrs(inode->i_sb))
+- return -EOPNOTSUPP;
+-
+- if (!(capable(CAP_SYS_ADMIN) || IS_PRIVATE(inode)))
++ if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(inode))
+ return -EPERM;
+
+ return reiserfs_xattr_get(inode, name, buffer, size);
+@@ -29,50 +26,30 @@ trusted_set(struct inode *inode, const c
+ if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX))
+ return -EINVAL;
+
+- if (!reiserfs_xattrs(inode->i_sb))
+- return -EOPNOTSUPP;
+-
+- if (!(capable(CAP_SYS_ADMIN) || IS_PRIVATE(inode)))
++ if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(inode))
+ return -EPERM;
+
+ return reiserfs_xattr_set(inode, name, buffer, size, flags);
+ }
+
+-static int trusted_del(struct inode *inode, const char *name)
++static size_t trusted_list(struct inode *inode, char *list, size_t list_size,
++ const char *name, size_t name_len)
+ {
+- if (strlen(name) < sizeof(XATTR_TRUSTED_PREFIX))
+- return -EINVAL;
++ const size_t len = name_len + 1;
+
+- if (!reiserfs_xattrs(inode->i_sb))
+- return -EOPNOTSUPP;
+-
+- if (!(capable(CAP_SYS_ADMIN) || IS_PRIVATE(inode)))
+- return -EPERM;
+-
+- return 0;
+-}
+-
+-static int
+-trusted_list(struct inode *inode, const char *name, int namelen, char *out)
+-{
+- int len = namelen;
+-
+- if (!reiserfs_xattrs(inode->i_sb))
++ if (!capable(CAP_SYS_ADMIN) || IS_PRIVATE(inode))
+ return 0;
+
+- if (!(capable(CAP_SYS_ADMIN) || IS_PRIVATE(inode)))
+- return 0;
+-
+- if (out)
+- memcpy(out, name, len);
+-
++ if (list && len <= list_size) {
++ memcpy(list, name, name_len);
++ list[name_len] = '\0';
++ }
+ return len;
+ }
+
+-struct reiserfs_xattr_handler trusted_handler = {
++struct xattr_handler reiserfs_xattr_trusted_handler = {
+ .prefix = XATTR_TRUSTED_PREFIX,
+ .get = trusted_get,
+ .set = trusted_set,
+- .del = trusted_del,
+ .list = trusted_list,
+ };
+--- a/fs/reiserfs/xattr_user.c
++++ b/fs/reiserfs/xattr_user.c
+@@ -6,10 +6,6 @@
+ #include <linux/reiserfs_xattr.h>
+ #include <asm/uaccess.h>
+
+-#ifdef CONFIG_REISERFS_FS_POSIX_ACL
+-# include <linux/reiserfs_acl.h>
+-#endif
+-
+ static int
+ user_get(struct inode *inode, const char *name, void *buffer, size_t size)
+ {
+@@ -25,7 +21,6 @@ static int
+ user_set(struct inode *inode, const char *name, const void *buffer,
+ size_t size, int flags)
+ {
+-
+ if (strlen(name) < sizeof(XATTR_USER_PREFIX))
+ return -EINVAL;
+
+@@ -34,33 +29,23 @@ user_set(struct inode *inode, const char
+ return reiserfs_xattr_set(inode, name, buffer, size, flags);
+ }
+
+-static int user_del(struct inode *inode, const char *name)
++static size_t user_list(struct inode *inode, char *list, size_t list_size,
++ const char *name, size_t name_len)
+ {
+- if (strlen(name) < sizeof(XATTR_USER_PREFIX))
+- return -EINVAL;
+-
+- if (!reiserfs_xattrs_user(inode->i_sb))
+- return -EOPNOTSUPP;
+- return 0;
+-}
++ const size_t len = name_len + 1;
+
+-static int
+-user_list(struct inode *inode, const char *name, int namelen, char *out)
+-{
+- int len = namelen;
+ if (!reiserfs_xattrs_user(inode->i_sb))
+ return 0;
+-
+- if (out)
+- memcpy(out, name, len);
+-
++ if (list && len <= list_size) {
++ memcpy(list, name, name_len);
++ list[name_len] = '\0';
++ }
+ return len;
+ }
+
+-struct reiserfs_xattr_handler user_handler = {
++struct xattr_handler reiserfs_xattr_user_handler = {
+ .prefix = XATTR_USER_PREFIX,
+ .get = user_get,
+ .set = user_set,
+- .del = user_del,
+ .list = user_list,
+ };
+--- a/include/linux/reiserfs_acl.h
++++ b/include/linux/reiserfs_acl.h
+@@ -52,10 +52,8 @@ int reiserfs_acl_chmod(struct inode *ino
+ int reiserfs_inherit_default_acl(struct inode *dir, struct dentry *dentry,
+ struct inode *inode);
+ int reiserfs_cache_default_acl(struct inode *dir);
+-extern int reiserfs_xattr_posix_acl_init(void) __init;
+-extern int reiserfs_xattr_posix_acl_exit(void);
+-extern struct reiserfs_xattr_handler posix_acl_default_handler;
+-extern struct reiserfs_xattr_handler posix_acl_access_handler;
++extern struct xattr_handler reiserfs_posix_acl_default_handler;
++extern struct xattr_handler reiserfs_posix_acl_access_handler;
+
+ static inline void reiserfs_init_acl_access(struct inode *inode)
+ {
+@@ -75,16 +73,6 @@ static inline struct posix_acl *reiserfs
+ return NULL;
+ }
+
+-static inline int reiserfs_xattr_posix_acl_init(void)
+-{
+- return 0;
+-}
+-
+-static inline int reiserfs_xattr_posix_acl_exit(void)
+-{
+- return 0;
+-}
+-
+ static inline int reiserfs_acl_chmod(struct inode *inode)
+ {
+ return 0;
+--- a/include/linux/reiserfs_fs_sb.h
++++ b/include/linux/reiserfs_fs_sb.h
+@@ -450,7 +450,6 @@ enum reiserfs_mount_options {
+ REISERFS_NO_UNHASHED_RELOCATION,
+ REISERFS_HASHED_RELOCATION,
+ REISERFS_ATTRS,
+- REISERFS_XATTRS,
+ REISERFS_XATTRS_USER,
+ REISERFS_POSIXACL,
+ REISERFS_BARRIER_NONE,
+@@ -488,7 +487,7 @@ enum reiserfs_mount_options {
+ #define reiserfs_data_log(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_LOG))
+ #define reiserfs_data_ordered(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_ORDERED))
+ #define reiserfs_data_writeback(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_DATA_WRITEBACK))
+-#define reiserfs_xattrs(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS))
++#define reiserfs_xattrs(s) ((s)->s_xattr != NULL)
+ #define reiserfs_xattrs_user(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_XATTRS_USER))
+ #define reiserfs_posixacl(s) (REISERFS_SB(s)->s_mount_opt & (1 << REISERFS_POSIXACL))
+ #define reiserfs_xattrs_optional(s) (reiserfs_xattrs_user(s) || reiserfs_posixacl(s))
+--- a/include/linux/reiserfs_xattr.h
++++ b/include/linux/reiserfs_xattr.h
+@@ -29,20 +29,6 @@ struct iattr;
+ struct super_block;
+ struct nameidata;
+
+-struct reiserfs_xattr_handler {
+- char *prefix;
+- int (*init) (void);
+- void (*exit) (void);
+- int (*get) (struct inode * inode, const char *name, void *buffer,
+- size_t size);
+- int (*set) (struct inode * inode, const char *name, const void *buffer,
+- size_t size, int flags);
+- int (*del) (struct inode * inode, const char *name);
+- int (*list) (struct inode * inode, const char *name, int namelen,
+- char *out);
+- struct list_head handlers;
+-};
+-
+ int reiserfs_xattr_register_handlers(void) __init;
+ void reiserfs_xattr_unregister_handlers(void);
+ int reiserfs_xattr_init(struct super_block *sb, int mount_flags);
+@@ -59,13 +45,14 @@ ssize_t reiserfs_listxattr(struct dentry
+ int reiserfs_removexattr(struct dentry *dentry, const char *name);
+ int reiserfs_permission(struct inode *inode, int mask);
+
+-int reiserfs_xattr_del(struct inode *, const char *);
+-int reiserfs_xattr_get(const struct inode *, const char *, void *, size_t);
++int reiserfs_xattr_get(struct inode *, const char *, void *, size_t);
++int __reiserfs_xattr_set(struct inode *, const char *, const void *,
++ size_t, int);
+ int reiserfs_xattr_set(struct inode *, const char *, const void *, size_t, int);
+
+-extern struct reiserfs_xattr_handler user_handler;
+-extern struct reiserfs_xattr_handler trusted_handler;
+-extern struct reiserfs_xattr_handler security_handler;
++extern struct xattr_handler reiserfs_xattr_user_handler;
++extern struct xattr_handler reiserfs_xattr_trusted_handler;
++extern struct xattr_handler reiserfs_xattr_security_handler;
+
+ static inline void reiserfs_init_xattr_rwsem(struct inode *inode)
+ {
--- /dev/null
+From: Jeff Mahoney <jeffm@suse.com>
+Subject: reiserfs: use reiserfs_error()
+
+ This patch makes many paths that are currently using warnings to handle
+ the error.
+
+Signed-off-by: Jeff Mahoney <jeffm@suse.com>
+
+--
+
+ fs/reiserfs/bitmap.c | 56 +++++++++++++++++++++---------------------
+ fs/reiserfs/inode.c | 45 +++++++++++++++------------------
+ fs/reiserfs/lbalance.c | 20 +++++++--------
+ fs/reiserfs/namei.c | 24 +++++++++---------
+ fs/reiserfs/objectid.c | 4 +--
+ fs/reiserfs/stree.c | 26 +++++++++----------
+ fs/reiserfs/super.c | 15 +++++------
+ fs/reiserfs/tail_conversion.c | 6 ++--
+ fs/reiserfs/xattr.c | 21 +++++++--------
+ 9 files changed, 107 insertions(+), 110 deletions(-)
+
+--- a/fs/reiserfs/bitmap.c
++++ b/fs/reiserfs/bitmap.c
+@@ -64,9 +64,9 @@ int is_reusable(struct super_block *s, b
+ unsigned int bmap_count = reiserfs_bmap_count(s);
+
+ if (block == 0 || block >= SB_BLOCK_COUNT(s)) {
+- reiserfs_warning(s, "vs-4010",
+- "block number is out of range %lu (%u)",
+- block, SB_BLOCK_COUNT(s));
++ reiserfs_error(s, "vs-4010",
++ "block number is out of range %lu (%u)",
++ block, SB_BLOCK_COUNT(s));
+ return 0;
+ }
+
+@@ -79,30 +79,30 @@ int is_reusable(struct super_block *s, b
+ b_blocknr_t bmap1 = REISERFS_SB(s)->s_sbh->b_blocknr + 1;
+ if (block >= bmap1 &&
+ block <= bmap1 + bmap_count) {
+- reiserfs_warning(s, "vs-4019", "bitmap block %lu(%u) "
+- "can't be freed or reused",
+- block, bmap_count);
++ reiserfs_error(s, "vs-4019", "bitmap block %lu(%u) "
++ "can't be freed or reused",
++ block, bmap_count);
+ return 0;
+ }
+ } else {
+ if (offset == 0) {
+- reiserfs_warning(s, "vs-4020", "bitmap block %lu(%u) "
+- "can't be freed or reused",
+- block, bmap_count);
++ reiserfs_error(s, "vs-4020", "bitmap block %lu(%u) "
++ "can't be freed or reused",
++ block, bmap_count);
+ return 0;
+ }
+ }
+
+ if (bmap >= bmap_count) {
+- reiserfs_warning(s, "vs-4030", "bitmap for requested block "
+- "is out of range: block=%lu, bitmap_nr=%u",
+- block, bmap);
++ reiserfs_error(s, "vs-4030", "bitmap for requested block "
++ "is out of range: block=%lu, bitmap_nr=%u",
++ block, bmap);
+ return 0;
+ }
+
+ if (bit_value == 0 && block == SB_ROOT_BLOCK(s)) {
+- reiserfs_warning(s, "vs-4050", "this is root block (%u), "
+- "it must be busy", SB_ROOT_BLOCK(s));
++ reiserfs_error(s, "vs-4050", "this is root block (%u), "
++ "it must be busy", SB_ROOT_BLOCK(s));
+ return 0;
+ }
+
+@@ -153,8 +153,8 @@ static int scan_bitmap_block(struct reis
+ /* - I mean `a window of zero bits' as in description of this function - Zam. */
+
+ if (!bi) {
+- reiserfs_warning(s, "jdm-4055", "NULL bitmap info pointer "
+- "for bitmap %d", bmap_n);
++ reiserfs_error(s, "jdm-4055", "NULL bitmap info pointer "
++ "for bitmap %d", bmap_n);
+ return 0;
+ }
+
+@@ -399,8 +399,8 @@ static void _reiserfs_free_block(struct
+ get_bit_address(s, block, &nr, &offset);
+
+ if (nr >= reiserfs_bmap_count(s)) {
+- reiserfs_warning(s, "vs-4075", "block %lu is out of range",
+- block);
++ reiserfs_error(s, "vs-4075", "block %lu is out of range",
++ block);
+ return;
+ }
+
+@@ -412,8 +412,8 @@ static void _reiserfs_free_block(struct
+
+ /* clear bit for the given block in bit map */
+ if (!reiserfs_test_and_clear_le_bit(offset, bmbh->b_data)) {
+- reiserfs_warning(s, "vs-4080",
+- "block %lu: bit already cleared", block);
++ reiserfs_error(s, "vs-4080",
++ "block %lu: bit already cleared", block);
+ }
+ apbi[nr].free_count++;
+ journal_mark_dirty(th, s, bmbh);
+@@ -440,7 +440,7 @@ void reiserfs_free_block(struct reiserfs
+ return;
+
+ if (block > sb_block_count(REISERFS_SB(s)->s_rs)) {
+- reiserfs_panic(th->t_super, "bitmap-4072",
++ reiserfs_error(th->t_super, "bitmap-4072",
+ "Trying to free block outside file system "
+ "boundaries (%lu > %lu)",
+ block, sb_block_count(REISERFS_SB(s)->s_rs));
+@@ -472,8 +472,8 @@ static void __discard_prealloc(struct re
+ BUG_ON(!th->t_trans_id);
+ #ifdef CONFIG_REISERFS_CHECK
+ if (ei->i_prealloc_count < 0)
+- reiserfs_warning(th->t_super, "zam-4001",
+- "inode has negative prealloc blocks count.");
++ reiserfs_error(th->t_super, "zam-4001",
++ "inode has negative prealloc blocks count.");
+ #endif
+ while (ei->i_prealloc_count > 0) {
+ reiserfs_free_prealloc_block(th, inode, ei->i_prealloc_block);
+@@ -509,9 +509,9 @@ void reiserfs_discard_all_prealloc(struc
+ i_prealloc_list);
+ #ifdef CONFIG_REISERFS_CHECK
+ if (!ei->i_prealloc_count) {
+- reiserfs_warning(th->t_super, "zam-4001",
+- "inode is in prealloc list but has "
+- "no preallocated blocks.");
++ reiserfs_error(th->t_super, "zam-4001",
++ "inode is in prealloc list but has "
++ "no preallocated blocks.");
+ }
+ #endif
+ __discard_prealloc(th, ei);
+@@ -1214,7 +1214,9 @@ void reiserfs_cache_bitmap_metadata(stru
+ unsigned long *cur = (unsigned long *)(bh->b_data + bh->b_size);
+
+ /* The first bit must ALWAYS be 1 */
+- BUG_ON(!reiserfs_test_le_bit(0, (unsigned long *)bh->b_data));
++ if (!reiserfs_test_le_bit(0, (unsigned long *)bh->b_data))
++ reiserfs_error(sb, "reiserfs-2025", "bitmap block %lu is "
++ "corrupted: first bit must be 1", bh->b_blocknr);
+
+ info->free_count = 0;
+
+--- a/fs/reiserfs/inode.c
++++ b/fs/reiserfs/inode.c
+@@ -841,12 +841,12 @@ int reiserfs_get_block(struct inode *ino
+ tail_offset);
+ if (retval) {
+ if (retval != -ENOSPC)
+- reiserfs_warning(inode->i_sb,
+- "clm-6004",
+- "convert tail failed "
+- "inode %lu, error %d",
+- inode->i_ino,
+- retval);
++ reiserfs_error(inode->i_sb,
++ "clm-6004",
++ "convert tail failed "
++ "inode %lu, error %d",
++ inode->i_ino,
++ retval);
+ if (allocated_block_nr) {
+ /* the bitmap, the super, and the stat data == 3 */
+ if (!th)
+@@ -1332,10 +1332,9 @@ void reiserfs_update_sd_size(struct reis
+ /* look for the object's stat data */
+ retval = search_item(inode->i_sb, &key, &path);
+ if (retval == IO_ERROR) {
+- reiserfs_warning(inode->i_sb, "vs-13050",
+- "i/o failure occurred trying to "
+- "update %K stat data",
+- &key);
++ reiserfs_error(inode->i_sb, "vs-13050",
++ "i/o failure occurred trying to "
++ "update %K stat data", &key);
+ return;
+ }
+ if (retval == ITEM_NOT_FOUND) {
+@@ -1424,9 +1423,9 @@ void reiserfs_read_locked_inode(struct i
+ /* look for the object's stat data */
+ retval = search_item(inode->i_sb, &key, &path_to_sd);
+ if (retval == IO_ERROR) {
+- reiserfs_warning(inode->i_sb, "vs-13070",
+- "i/o failure occurred trying to find "
+- "stat data of %K", &key);
++ reiserfs_error(inode->i_sb, "vs-13070",
++ "i/o failure occurred trying to find "
++ "stat data of %K", &key);
+ reiserfs_make_bad_inode(inode);
+ return;
+ }
+@@ -1687,8 +1686,8 @@ static int reiserfs_new_directory(struct
+ /* look for place in the tree for new item */
+ retval = search_item(sb, &key, path);
+ if (retval == IO_ERROR) {
+- reiserfs_warning(sb, "vs-13080",
+- "i/o failure occurred creating new directory");
++ reiserfs_error(sb, "vs-13080",
++ "i/o failure occurred creating new directory");
+ return -EIO;
+ }
+ if (retval == ITEM_FOUND) {
+@@ -1727,8 +1726,8 @@ static int reiserfs_new_symlink(struct r
+ /* look for place in the tree for new item */
+ retval = search_item(sb, &key, path);
+ if (retval == IO_ERROR) {
+- reiserfs_warning(sb, "vs-13080",
+- "i/o failure occurred creating new symlink");
++ reiserfs_error(sb, "vs-13080",
++ "i/o failure occurred creating new symlink");
+ return -EIO;
+ }
+ if (retval == ITEM_FOUND) {
+@@ -2048,10 +2047,8 @@ static int grab_tail_page(struct inode *
+ ** I've screwed up the code to find the buffer, or the code to
+ ** call prepare_write
+ */
+- reiserfs_warning(p_s_inode->i_sb, "clm-6000",
+- "error reading block %lu on dev %s",
+- bh->b_blocknr,
+- reiserfs_bdevname(p_s_inode->i_sb));
++ reiserfs_error(p_s_inode->i_sb, "clm-6000",
++ "error reading block %lu", bh->b_blocknr);
+ error = -EIO;
+ goto unlock;
+ }
+@@ -2093,9 +2090,9 @@ int reiserfs_truncate_file(struct inode
+ // and get_block_create_0 could not find a block to read in,
+ // which is ok.
+ if (error != -ENOENT)
+- reiserfs_warning(p_s_inode->i_sb, "clm-6001",
+- "grab_tail_page failed %d",
+- error);
++ reiserfs_error(p_s_inode->i_sb, "clm-6001",
++ "grab_tail_page failed %d",
++ error);
+ page = NULL;
+ bh = NULL;
+ }
+--- a/fs/reiserfs/lbalance.c
++++ b/fs/reiserfs/lbalance.c
+@@ -1291,17 +1291,17 @@ void leaf_paste_entries(struct buffer_in
+ prev = (i != 0) ? deh_location(&(deh[i - 1])) : 0;
+
+ if (prev && prev <= deh_location(&(deh[i])))
+- reiserfs_warning(NULL, "vs-10240",
+- "directory item (%h) "
+- "corrupted (prev %a, "
+- "cur(%d) %a)",
+- ih, deh + i - 1, i, deh + i);
++ reiserfs_error(sb_from_bi(bi), "vs-10240",
++ "directory item (%h) "
++ "corrupted (prev %a, "
++ "cur(%d) %a)",
++ ih, deh + i - 1, i, deh + i);
+ if (next && next >= deh_location(&(deh[i])))
+- reiserfs_warning(NULL, "vs-10250",
+- "directory item (%h) "
+- "corrupted (cur(%d) %a, "
+- "next %a)",
+- ih, i, deh + i, deh + i + 1);
++ reiserfs_error(sb_from_bi(bi), "vs-10250",
++ "directory item (%h) "
++ "corrupted (cur(%d) %a, "
++ "next %a)",
++ ih, i, deh + i, deh + i + 1);
+ }
+ }
+ #endif
+--- a/fs/reiserfs/namei.c
++++ b/fs/reiserfs/namei.c
+@@ -120,8 +120,8 @@ int search_by_entry_key(struct super_blo
+ switch (retval) {
+ case ITEM_NOT_FOUND:
+ if (!PATH_LAST_POSITION(path)) {
+- reiserfs_warning(sb, "vs-7000", "search_by_key "
+- "returned item position == 0");
++ reiserfs_error(sb, "vs-7000", "search_by_key "
++ "returned item position == 0");
+ pathrelse(path);
+ return IO_ERROR;
+ }
+@@ -135,7 +135,7 @@ int search_by_entry_key(struct super_blo
+
+ default:
+ pathrelse(path);
+- reiserfs_warning(sb, "vs-7002", "no path to here");
++ reiserfs_error(sb, "vs-7002", "no path to here");
+ return IO_ERROR;
+ }
+
+@@ -298,7 +298,7 @@ static int reiserfs_find_entry(struct in
+ search_by_entry_key(dir->i_sb, &key_to_search,
+ path_to_entry, de);
+ if (retval == IO_ERROR) {
+- reiserfs_warning(dir->i_sb, "zam-7001", "io error");
++ reiserfs_error(dir->i_sb, "zam-7001", "io error");
+ return IO_ERROR;
+ }
+
+@@ -490,9 +490,9 @@ static int reiserfs_add_entry(struct rei
+ }
+
+ if (retval != NAME_FOUND) {
+- reiserfs_warning(dir->i_sb, "zam-7002",
+- "reiserfs_find_entry() returned "
+- "unexpected value (%d)", retval);
++ reiserfs_error(dir->i_sb, "zam-7002",
++ "reiserfs_find_entry() returned "
++ "unexpected value (%d)", retval);
+ }
+
+ return -EEXIST;
+@@ -902,9 +902,9 @@ static int reiserfs_rmdir(struct inode *
+ goto end_rmdir;
+
+ if (inode->i_nlink != 2 && inode->i_nlink != 1)
+- reiserfs_warning(inode->i_sb, "reiserfs-7040",
+- "empty directory has nlink != 2 (%d)",
+- inode->i_nlink);
++ reiserfs_error(inode->i_sb, "reiserfs-7040",
++ "empty directory has nlink != 2 (%d)",
++ inode->i_nlink);
+
+ clear_nlink(inode);
+ inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME_SEC;
+@@ -1495,8 +1495,8 @@ static int reiserfs_rename(struct inode
+ if (reiserfs_cut_from_item
+ (&th, &old_entry_path, &(old_de.de_entry_key), old_dir, NULL,
+ 0) < 0)
+- reiserfs_warning(old_dir->i_sb, "vs-7060",
+- "couldn't not cut old name. Fsck later?");
++ reiserfs_error(old_dir->i_sb, "vs-7060",
++ "couldn't not cut old name. Fsck later?");
+
+ old_dir->i_size -= DEH_SIZE + old_de.de_entrylen;
+
+--- a/fs/reiserfs/objectid.c
++++ b/fs/reiserfs/objectid.c
+@@ -159,8 +159,8 @@ void reiserfs_release_objectid(struct re
+ i += 2;
+ }
+
+- reiserfs_warning(s, "vs-15011", "tried to free free object id (%lu)",
+- (long unsigned)objectid_to_release);
++ reiserfs_error(s, "vs-15011", "tried to free free object id (%lu)",
++ (long unsigned)objectid_to_release);
+ }
+
+ int reiserfs_convert_objectid_map_v1(struct super_block *s)
+--- a/fs/reiserfs/stree.c
++++ b/fs/reiserfs/stree.c
+@@ -720,9 +720,9 @@ int search_by_key(struct super_block *p_
+ // make sure, that the node contents look like a node of
+ // certain level
+ if (!is_tree_node(p_s_bh, expected_level)) {
+- reiserfs_warning(p_s_sb, "vs-5150",
+- "invalid format found in block %ld. "
+- "Fsck?", p_s_bh->b_blocknr);
++ reiserfs_error(p_s_sb, "vs-5150",
++ "invalid format found in block %ld. "
++ "Fsck?", p_s_bh->b_blocknr);
+ pathrelse(p_s_search_path);
+ return IO_ERROR;
+ }
+@@ -1336,9 +1336,9 @@ void reiserfs_delete_solid_item(struct r
+ while (1) {
+ retval = search_item(th->t_super, &cpu_key, &path);
+ if (retval == IO_ERROR) {
+- reiserfs_warning(th->t_super, "vs-5350",
+- "i/o failure occurred trying "
+- "to delete %K", &cpu_key);
++ reiserfs_error(th->t_super, "vs-5350",
++ "i/o failure occurred trying "
++ "to delete %K", &cpu_key);
+ break;
+ }
+ if (retval != ITEM_FOUND) {
+@@ -1737,7 +1737,7 @@ static void truncate_directory(struct re
+ {
+ BUG_ON(!th->t_trans_id);
+ if (inode->i_nlink)
+- reiserfs_warning(inode->i_sb, "vs-5655", "link count != 0");
++ reiserfs_error(inode->i_sb, "vs-5655", "link count != 0");
+
+ set_le_key_k_offset(KEY_FORMAT_3_5, INODE_PKEY(inode), DOT_OFFSET);
+ set_le_key_k_type(KEY_FORMAT_3_5, INODE_PKEY(inode), TYPE_DIRENTRY);
+@@ -1790,16 +1790,16 @@ int reiserfs_do_truncate(struct reiserfs
+ search_for_position_by_key(p_s_inode->i_sb, &s_item_key,
+ &s_search_path);
+ if (retval == IO_ERROR) {
+- reiserfs_warning(p_s_inode->i_sb, "vs-5657",
+- "i/o failure occurred trying to truncate %K",
+- &s_item_key);
++ reiserfs_error(p_s_inode->i_sb, "vs-5657",
++ "i/o failure occurred trying to truncate %K",
++ &s_item_key);
+ err = -EIO;
+ goto out;
+ }
+ if (retval == POSITION_FOUND || retval == FILE_NOT_FOUND) {
+- reiserfs_warning(p_s_inode->i_sb, "PAP-5660",
+- "wrong result %d of search for %K", retval,
+- &s_item_key);
++ reiserfs_error(p_s_inode->i_sb, "PAP-5660",
++ "wrong result %d of search for %K", retval,
++ &s_item_key);
+
+ err = -EIO;
+ goto out;
+--- a/fs/reiserfs/super.c
++++ b/fs/reiserfs/super.c
+@@ -193,9 +193,8 @@ static int finish_unfinished(struct supe
+ while (!retval) {
+ retval = search_item(s, &max_cpu_key, &path);
+ if (retval != ITEM_NOT_FOUND) {
+- reiserfs_warning(s, "vs-2140",
+- "search_by_key returned %d",
+- retval);
++ reiserfs_error(s, "vs-2140",
++ "search_by_key returned %d", retval);
+ break;
+ }
+
+@@ -376,9 +375,9 @@ void add_save_link(struct reiserfs_trans
+ retval = search_item(inode->i_sb, &key, &path);
+ if (retval != ITEM_NOT_FOUND) {
+ if (retval != -ENOSPC)
+- reiserfs_warning(inode->i_sb, "vs-2100",
+- "search_by_key (%K) returned %d", &key,
+- retval);
++ reiserfs_error(inode->i_sb, "vs-2100",
++ "search_by_key (%K) returned %d", &key,
++ retval);
+ pathrelse(&path);
+ return;
+ }
+@@ -391,8 +390,8 @@ void add_save_link(struct reiserfs_trans
+ reiserfs_insert_item(th, &path, &key, &ih, NULL, (char *)&link);
+ if (retval) {
+ if (retval != -ENOSPC)
+- reiserfs_warning(inode->i_sb, "vs-2120",
+- "insert_item returned %d", retval);
++ reiserfs_error(inode->i_sb, "vs-2120",
++ "insert_item returned %d", retval);
+ } else {
+ if (truncate)
+ REISERFS_I(inode)->i_flags |=
+--- a/fs/reiserfs/tail_conversion.c
++++ b/fs/reiserfs/tail_conversion.c
+@@ -48,9 +48,9 @@ int direct2indirect(struct reiserfs_tran
+
+ // FIXME: we could avoid this
+ if (search_for_position_by_key(sb, &end_key, path) == POSITION_FOUND) {
+- reiserfs_warning(sb, "PAP-14030",
+- "pasted or inserted byte exists in "
+- "the tree %K. Use fsck to repair.", &end_key);
++ reiserfs_error(sb, "PAP-14030",
++ "pasted or inserted byte exists in "
++ "the tree %K. Use fsck to repair.", &end_key);
+ pathrelse(path);
+ return -EIO;
+ }
+--- a/fs/reiserfs/xattr.c
++++ b/fs/reiserfs/xattr.c
+@@ -259,8 +259,8 @@ static int __xattr_readdir(struct inode
+ ih = de.de_ih;
+
+ if (!is_direntry_le_ih(ih)) {
+- reiserfs_warning(inode->i_sb, "jdm-20000",
+- "not direntry %h", ih);
++ reiserfs_error(inode->i_sb, "jdm-20000",
++ "not direntry %h", ih);
+ break;
+ }
+ copy_item_head(&tmp_ih, ih);
+@@ -653,15 +653,14 @@ __reiserfs_xattr_del(struct dentry *xadi
+ goto out_file;
+
+ if (!is_reiserfs_priv_object(dentry->d_inode)) {
+- reiserfs_warning(dir->i_sb, "jdm-20003",
+- "OID %08x [%.*s/%.*s] doesn't have "
+- "priv flag set [parent is %sset].",
+- le32_to_cpu(INODE_PKEY(dentry->d_inode)->
+- k_objectid), xadir->d_name.len,
+- xadir->d_name.name, namelen, name,
+- is_reiserfs_priv_object(xadir->
+- d_inode) ? "" :
+- "not ");
++ reiserfs_error(dir->i_sb, "jdm-20003",
++ "OID %08x [%.*s/%.*s] doesn't have "
++ "priv flag set [parent is %sset].",
++ le32_to_cpu(INODE_PKEY(dentry->d_inode)->
++ k_objectid), xadir->d_name.len,
++ xadir->d_name.name, namelen, name,
++ is_reiserfs_priv_object(xadir->d_inode) ? "" :
++ "not ");
+ dput(dentry);
+ return -EIO;
+ }
--- /dev/null
+From: Michael Holzheu <holzheu@de.ibm.com>
+Subject: [PATCH] Generate Kerntypes file
+Patch-mainline: never
+References: bnc #471422
+
+Since dwarfextract doesn't produce a correct dwarf Kerntypes,
+we produce it with the compiler again.
+
+
+Signed-off-by: Michael Holzheu <holzheu@de.ibm.com>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+---
+ arch/s390/boot/Makefile | 2
+ arch/s390/boot/kerntypes.c | 289 +++++++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 290 insertions(+), 1 deletion(-)
+
+--- a/arch/s390/boot/Makefile
++++ b/arch/s390/boot/Makefile
+@@ -8,7 +8,7 @@ COMPILE_VERSION := __linux_compile_versi
+
+ EXTRA_CFLAGS := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I.
+
+-targets := image
++targets := image kerntypes.o
+
+ $(obj)/image: vmlinux FORCE
+ $(call if_changed,objcopy)
+--- /dev/null
++++ b/arch/s390/boot/kerntypes.c
+@@ -0,0 +1,289 @@
++/*
++ * kerntypes.c
++ *
++ * Dummy module that includes headers for all kernel types of interest.
++ * The kernel type information is used by the lcrash utility when
++ * analyzing system crash dumps or the live system. Using the type
++ * information for the running system, rather than kernel header files,
++ * makes for a more flexible and robust analysis tool.
++ *
++ * This source code is released under the GNU GPL.
++ */
++
++/* generate version for this file */
++typedef char *COMPILE_VERSION;
++
++/* General linux types */
++
++#include <linux/autoconf.h>
++#include <linux/compile.h>
++#include <linux/utsname.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#ifdef CONFIG_SLUB
++ #include <linux/slub_def.h>
++#else
++ #include <linux/slab_def.h>
++#endif
++#include <linux/slab.h>
++#include <linux/bio.h>
++#include <linux/bitmap.h>
++#include <linux/bitops.h>
++#include <linux/bitrev.h>
++#include <linux/blkdev.h>
++#include <linux/blkpg.h>
++#include <linux/bootmem.h>
++#include <linux/buffer_head.h>
++#include <linux/cache.h>
++#include <linux/cdev.h>
++#include <linux/cpu.h>
++#include <linux/cpumask.h>
++#include <linux/cpuset.h>
++#include <linux/dcache.h>
++#include <linux/debugfs.h>
++#include <linux/elevator.h>
++#include <linux/fd.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/futex.h>
++#include <linux/genhd.h>
++#include <linux/highmem.h>
++#include <linux/if.h>
++#include <linux/if_addr.h>
++#include <linux/if_arp.h>
++#include <linux/if_bonding.h>
++#include <linux/if_ether.h>
++#include <linux/if_tr.h>
++#include <linux/if_tun.h>
++#include <linux/if_vlan.h>
++#include <linux/in.h>
++#include <linux/in6.h>
++#include <linux/in_route.h>
++#include <linux/inet.h>
++#include <linux/inet_diag.h>
++#include <linux/inetdevice.h>
++#include <linux/init.h>
++#include <linux/initrd.h>
++#include <linux/inotify.h>
++#include <linux/interrupt.h>
++#include <linux/ioctl.h>
++#include <linux/ip.h>
++#include <linux/ipsec.h>
++#include <linux/ipv6.h>
++#include <linux/ipv6_route.h>
++#include <linux/irq.h>
++#include <linux/irqflags.h>
++#include <linux/irqreturn.h>
++#include <linux/jbd2.h>
++#include <linux/jffs2.h>
++#include <linux/jhash.h>
++#include <linux/jiffies.h>
++#include <linux/kallsyms.h>
++#include <linux/kernel.h>
++#include <linux/kernel_stat.h>
++#include <linux/kexec.h>
++#include <linux/kobject.h>
++#include <linux/kthread.h>
++#include <linux/ktime.h>
++#include <linux/list.h>
++#include <linux/memory.h>
++#include <linux/miscdevice.h>
++#include <linux/mm.h>
++#include <linux/mm_inline.h>
++#include <linux/mm_types.h>
++#include <linux/mman.h>
++#include <linux/mmtimer.h>
++#include <linux/mmzone.h>
++#include <linux/mnt_namespace.h>
++#include <linux/module.h>
++#include <linux/moduleloader.h>
++#include <linux/moduleparam.h>
++#include <linux/mount.h>
++#include <linux/mpage.h>
++#include <linux/mqueue.h>
++#include <linux/mtio.h>
++#include <linux/mutex.h>
++#include <linux/namei.h>
++#include <linux/neighbour.h>
++#include <linux/net.h>
++#include <linux/netdevice.h>
++#include <linux/netfilter.h>
++#include <linux/netfilter_arp.h>
++#include <linux/netfilter_bridge.h>
++#include <linux/netfilter_decnet.h>
++#include <linux/netfilter_ipv4.h>
++#include <linux/netfilter_ipv6.h>
++#include <linux/netlink.h>
++#include <linux/netpoll.h>
++#include <linux/pagemap.h>
++#include <linux/param.h>
++#include <linux/percpu.h>
++#include <linux/percpu_counter.h>
++#include <linux/pfn.h>
++#include <linux/pid.h>
++#include <linux/pid_namespace.h>
++#include <linux/pm.h>
++#include <linux/poll.h>
++#include <linux/posix-timers.h>
++#include <linux/posix_acl.h>
++#include <linux/posix_acl_xattr.h>
++#include <linux/posix_types.h>
++#include <linux/preempt.h>
++#include <linux/prio_tree.h>
++#include <linux/proc_fs.h>
++#include <linux/profile.h>
++#include <linux/ptrace.h>
++#include <linux/radix-tree.h>
++#include <linux/ramfs.h>
++#include <linux/raw.h>
++#include <linux/rbtree.h>
++#include <linux/rcupdate.h>
++#include <linux/reboot.h>
++#include <linux/relay.h>
++#include <linux/resource.h>
++#include <linux/romfs_fs.h>
++#include <linux/root_dev.h>
++#include <linux/route.h>
++#include <linux/rwsem.h>
++#include <linux/sched.h>
++#include <linux/sem.h>
++#include <linux/seq_file.h>
++#include <linux/seqlock.h>
++#include <linux/shm.h>
++#include <linux/shmem_fs.h>
++#include <linux/signal.h>
++#include <linux/signalfd.h>
++#include <linux/skbuff.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/socket.h>
++#include <linux/sockios.h>
++#include <linux/spinlock.h>
++#include <linux/stat.h>
++#include <linux/statfs.h>
++#include <linux/stddef.h>
++#include <linux/swap.h>
++#include <linux/swapops.h>
++#include <linux/sys.h>
++#include <linux/syscalls.h>
++#include <linux/sysctl.h>
++#include <linux/sysdev.h>
++#include <linux/sysfs.h>
++#include <linux/sysrq.h>
++#include <linux/tc.h>
++#include <linux/tcp.h>
++#include <linux/thread_info.h>
++#include <linux/threads.h>
++#include <linux/tick.h>
++#include <linux/time.h>
++#include <linux/timer.h>
++#include <linux/timerfd.h>
++#include <linux/times.h>
++#include <linux/timex.h>
++#include <linux/topology.h>
++#include <linux/transport_class.h>
++#include <linux/tty.h>
++#include <linux/tty_driver.h>
++#include <linux/tty_flip.h>
++#include <linux/tty_ldisc.h>
++#include <linux/types.h>
++#include <linux/uaccess.h>
++#include <linux/unistd.h>
++#include <linux/utime.h>
++#include <linux/uts.h>
++#include <linux/utsname.h>
++#include <linux/utsrelease.h>
++#include <linux/version.h>
++#include <linux/vfs.h>
++#include <linux/vmalloc.h>
++#include <linux/vmstat.h>
++#include <linux/wait.h>
++#include <linux/watchdog.h>
++#include <linux/workqueue.h>
++#include <linux/zconf.h>
++#include <linux/zlib.h>
++
++/*
++ * s390 specific includes
++ */
++
++#include <asm/lowcore.h>
++#include <asm/debug.h>
++#include <asm/ccwdev.h>
++#include <asm/ccwgroup.h>
++#include <asm/qdio.h>
++#include <asm/zcrypt.h>
++#include <asm/etr.h>
++#include <asm/ipl.h>
++#include <asm/setup.h>
++#include <asm/schid.h>
++#include <asm/chsc.h>
++
++/* channel subsystem driver */
++#include "drivers/s390/cio/cio.h"
++#include "drivers/s390/cio/chsc.h"
++#include "drivers/s390/cio/css.h"
++#include "drivers/s390/cio/device.h"
++#include "drivers/s390/cio/chsc_sch.h"
++
++/* dasd device driver */
++#include "drivers/s390/block/dasd_int.h"
++#include "drivers/s390/block/dasd_diag.h"
++#include "drivers/s390/block/dasd_eckd.h"
++#include "drivers/s390/block/dasd_fba.h"
++
++/* networking drivers */
++#include "include/net/iucv/iucv.h"
++#include "drivers/s390/net/fsm.h"
++#include "drivers/s390/net/ctcm_main.h"
++#include "drivers/s390/net/ctcm_fsms.h"
++#include "drivers/s390/net/lcs.h"
++#include "drivers/s390/net/qeth_core.h"
++#include "drivers/s390/net/qeth_core_mpc.h"
++#include "drivers/s390/net/qeth_core_offl.h"
++#include "drivers/s390/net/qeth_l3.h"
++
++/* zfcp device driver */
++#include "drivers/s390/scsi/zfcp_def.h"
++#include "drivers/s390/scsi/zfcp_fsf.h"
++
++/* crypto device driver */
++#include "drivers/s390/crypto/ap_bus.h"
++#include "drivers/s390/crypto/zcrypt_api.h"
++#include "drivers/s390/crypto/zcrypt_cca_key.h"
++#include "drivers/s390/crypto/zcrypt_pcica.h"
++#include "drivers/s390/crypto/zcrypt_pcicc.h"
++#include "drivers/s390/crypto/zcrypt_pcixcc.h"
++#include "drivers/s390/crypto/zcrypt_cex2a.h"
++
++/* sclp device driver */
++#include "drivers/s390/char/sclp.h"
++#include "drivers/s390/char/sclp_rw.h"
++#include "drivers/s390/char/sclp_tty.h"
++
++/* vmur device driver */
++#include "drivers/s390/char/vmur.h"
++
++/* qdio device driver */
++#include "drivers/s390/cio/qdio.h"
++#include "drivers/s390/cio/qdio_thinint.c"
++#include "drivers/s390/cio/qdio_perf.h"
++
++/*
++ * include sched.c for types:
++ * - struct prio_array
++ * - struct runqueue
++ */
++#include "kernel/sched.c"
++/*
++ * include slab.c for struct kmem_cache
++ */
++#ifdef CONFIG_SLUB
++ #include "mm/slub.c"
++#else
++ #include "mm/slab.c"
++#endif
++
++/* include driver core private structures */
++#include "drivers/base/base.h"
--- /dev/null
+From: Bernhard Walle <bwalle@suse.de>
+Subject: [PATCH] Strip L2^B symbols
+Patch-mainline: never
+References: bnc #456682
+
+This patches strips all L2^B symbols that happen on s390 only from System.map.
+We don't need that symbols as this are local labels. It confuses (older)
+versions of crash and just makes System.map larger.
+
+The proper fix needs to be in binutils. However, since the binutils maintainer
+at SUSE is not cooperative I workarounded this in the kernel. The proper
+binutils patch is already mainline [1].
+
+
+Signed-off-by: Bernhard Walle <bwalle@suse.de>
+
+[1] http://article.gmane.org/gmane.comp.gnu.binutils.cvs/12731
+---
+ scripts/mksysmap | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/scripts/mksysmap
++++ b/scripts/mksysmap
+@@ -41,5 +41,5 @@
+ # so we just ignore them to let readprofile continue to work.
+ # (At least sparc64 has __crc_ in the middle).
+
+-$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)' > $2
++$NM -n $1 | grep -v '\( [aNUw] \)\|\(__crc_\)\|\( \$[adt]\)\|\(L2\ 2\)' > $2
+
--- /dev/null
+From: Petr Ostadal <postadal@novell.com>
+Subject: fix ti_usb_3410_5052 driver for device 04b3:4543
+References: bnc#395775
+
+Signed-off-by: Oliver Neukum <oneukum@suse.de>
+
+---
+ drivers/usb/serial/ti_usb_3410_5052.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/serial/ti_usb_3410_5052.c
++++ b/drivers/usb/serial/ti_usb_3410_5052.c
+@@ -463,9 +463,11 @@ static int ti_startup(struct usb_serial
+ goto free_tdev;
+ }
+
+- /* the second configuration must be set (in sysfs by hotplug script) */
++ /* the second configuration must be set */
++ printk(KERN_DEBUG"%s: bConfigurationValue: %x\n", __FUNCTION__, dev->actconfig->desc.bConfigurationValue);
+ if (dev->actconfig->desc.bConfigurationValue == TI_BOOT_CONFIG) {
+- status = -ENODEV;
++ status = usb_driver_set_configuration(dev, TI_ACTIVE_CONFIG);
++ status = status ? status : -ENODEV;
+ goto free_tdev;
+ }
+
--- /dev/null
+From: Jan Blunck <jblunck@suse.de>
+Subject: Select FRAME_POINTER only on SYSPROF_TRACER
+Date: Wed Sep 24 10:32:16 CEST 2008
+
+The only tracer that requires frame pointers is the sysprof trace. Since this
+tracer copies the functionality of oprofile, it isn't required at all.
+
+Signed-off-by: Jan Blunck <jblunck@suse.de>
+---
+ kernel/trace/Kconfig | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+Index: b/kernel/trace/Kconfig
+===================================================================
+--- a/kernel/trace/Kconfig
++++ b/kernel/trace/Kconfig
+@@ -18,7 +18,6 @@ config TRACING
+ config FTRACE
+ bool "Kernel Function Tracer"
+ depends on HAVE_FTRACE
+- select FRAME_POINTER
+ select TRACING
+ select CONTEXT_SWITCH_TRACER
+ help
+@@ -79,6 +78,7 @@ config SYSPROF_TRACER
+ bool "Sysprof Tracer"
+ depends on X86
+ select TRACING
++ select FRAME_POINTER
+ help
+ This tracer provides the trace needed by the 'Sysprof' userspace
+ tool.
--- /dev/null
+Subject: [PATCH] fix syscall_get_nr.
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+syscall_get_nr() currently returns a valid result only if the call
+chain of the traced process includes do_syscall_trace_enter(). But
+collect_syscall() can be called for any sleeping task, the result of
+syscall_get_nr() in general is completely bogus.
+
+To make syscall_get_nr() work for any sleeping task the traps field
+in pt_regs is replace with svcnr - the system call number the process
+is executing. If svcnr == 0 the process is not on a system call path.
+
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Acked-by: John Jolly <jjolly@novell.com>
+---
+
+ arch/s390/include/asm/ptrace.h | 2 +-
+ arch/s390/include/asm/syscall.h | 4 +---
+ arch/s390/kernel/asm-offsets.c | 2 +-
+ arch/s390/kernel/compat_signal.c | 2 +-
+ arch/s390/kernel/entry.S | 21 +++++++++++----------
+ arch/s390/kernel/entry64.S | 23 ++++++++++-------------
+ arch/s390/kernel/ptrace.c | 2 +-
+ arch/s390/kernel/signal.c | 6 +++---
+ 8 files changed, 29 insertions(+), 33 deletions(-)
+
+--- a/arch/s390/include/asm/ptrace.h
++++ b/arch/s390/include/asm/ptrace.h
+@@ -321,8 +321,8 @@ struct pt_regs
+ psw_t psw;
+ unsigned long gprs[NUM_GPRS];
+ unsigned long orig_gpr2;
++ unsigned short svcnr;
+ unsigned short ilc;
+- unsigned short trap;
+ };
+ #endif
+
+--- a/arch/s390/include/asm/syscall.h
++++ b/arch/s390/include/asm/syscall.h
+@@ -17,9 +17,7 @@
+ static inline long syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+ {
+- if (regs->trap != __LC_SVC_OLD_PSW)
+- return -1;
+- return regs->gprs[2];
++ return regs->svcnr ? regs->svcnr : -1;
+ }
+
+ static inline void syscall_rollback(struct task_struct *task,
+--- a/arch/s390/kernel/asm-offsets.c
++++ b/arch/s390/kernel/asm-offsets.c
+@@ -32,7 +32,7 @@ int main(void)
+ DEFINE(__PT_GPRS, offsetof(struct pt_regs, gprs));
+ DEFINE(__PT_ORIG_GPR2, offsetof(struct pt_regs, orig_gpr2));
+ DEFINE(__PT_ILC, offsetof(struct pt_regs, ilc));
+- DEFINE(__PT_TRAP, offsetof(struct pt_regs, trap));
++ DEFINE(__PT_SVCNR, offsetof(struct pt_regs, svcnr));
+ DEFINE(__PT_SIZE, sizeof(struct pt_regs));
+ BLANK();
+ DEFINE(__SF_BACKCHAIN, offsetof(struct stack_frame, back_chain));
+--- a/arch/s390/kernel/compat_signal.c
++++ b/arch/s390/kernel/compat_signal.c
+@@ -340,7 +340,7 @@ static int restore_sigregs32(struct pt_r
+ return err;
+
+ restore_fp_regs(¤t->thread.fp_regs);
+- regs->trap = -1; /* disable syscall checks */
++ regs->svcnr = 0; /* disable syscall checks */
+ return 0;
+ }
+
+--- a/arch/s390/kernel/entry64.S
++++ b/arch/s390/kernel/entry64.S
+@@ -46,7 +46,7 @@ SP_R14 = STACK_FRAME_OVERHEAD + __P
+ SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 120
+ SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
+ SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC
+-SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP
++SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR
+ SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
+
+ STACK_SHIFT = PAGE_SHIFT + THREAD_ORDER
+@@ -168,11 +168,10 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_
+ .macro CREATE_STACK_FRAME psworg,savearea
+ aghi %r15,-SP_SIZE # make room for registers & psw
+ mvc SP_PSW(16,%r15),0(%r12) # move user PSW to stack
+- la %r12,\psworg
+ stg %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
+- icm %r12,12,__LC_SVC_ILC
++ icm %r12,3,__LC_SVC_ILC
+ stmg %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
+- st %r12,SP_ILC(%r15)
++ st %r12,SP_SVCNR(%r15)
+ mvc SP_R12(32,%r15),\savearea # move %r12-%r15 to stack
+ la %r12,0
+ stg %r12,__SF_BACKCHAIN(%r15)
+@@ -247,16 +246,17 @@ sysc_update:
+ #endif
+ sysc_do_svc:
+ lg %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+- slag %r7,%r7,2 # *4 and test for svc 0
++ ltgr %r7,%r7 # test for svc 0
+ jnz sysc_nr_ok
+ # svc 0: system call number in %r1
+ cl %r1,BASED(.Lnr_syscalls)
+ jnl sysc_nr_ok
+ lgfr %r7,%r1 # clear high word in r1
+- slag %r7,%r7,2 # svc 0: system call number in %r1
+ sysc_nr_ok:
+ mvc SP_ARGS(8,%r15),SP_R7(%r15)
+ sysc_do_restart:
++ sth %r7,SP_SVCNR(%r15)
++ sllg %r7,%r7,2 # svc number * 4
+ larl %r10,sys_call_table
+ #ifdef CONFIG_COMPAT
+ tm __TI_flags+5(%r9),(_TIF_31BIT>>16) # running in 31 bit mode ?
+@@ -360,7 +360,6 @@ sysc_notify_resume:
+ sysc_restart:
+ ni __TI_flags+7(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
+ lg %r7,SP_R2(%r15) # load new svc number
+- slag %r7,%r7,2 # *4
+ mvc SP_R2(8,%r15),SP_ORIG_R2(%r15) # restore first argument
+ lmg %r2,%r6,SP_R2(%r15) # load svc arguments
+ j sysc_do_restart # restart svc
+@@ -369,9 +368,8 @@ sysc_restart:
+ # _TIF_SINGLE_STEP is set, call do_single_step
+ #
+ sysc_singlestep:
+- ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
+- lhi %r0,__LC_PGM_OLD_PSW
+- sth %r0,SP_TRAP(%r15) # set trap indication to pgm check
++ ni __TI_flags+7(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
++ xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ larl %r14,sysc_return # load adr. of system return
+ jg do_single_step # branch to do_sigtrap
+@@ -389,7 +387,7 @@ sysc_tracesys:
+ lghi %r0,NR_syscalls
+ clgr %r0,%r2
+ jnh sysc_tracenogo
+- slag %r7,%r2,2 # *4
++ sllg %r7,%r2,2 # svc number *4
+ lgf %r8,0(%r7,%r10)
+ sysc_tracego:
+ lmg %r3,%r6,SP_R3(%r15)
+@@ -564,8 +562,7 @@ pgm_svcper:
+ # per was called from kernel, must be kprobes
+ #
+ kernel_per:
+- lhi %r0,__LC_PGM_OLD_PSW
+- sth %r0,SP_TRAP(%r15) # set trap indication to pgm check
++ xc SP_SVCNR(2,%r15),SP_SVCNR(%r15) # clear svc number
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ larl %r14,sysc_restore # load adr. of system ret, no work
+ jg do_single_step # branch to do_single_step
+--- a/arch/s390/kernel/entry.S
++++ b/arch/s390/kernel/entry.S
+@@ -46,7 +46,7 @@ SP_R14 = STACK_FRAME_OVERHEAD + __P
+ SP_R15 = STACK_FRAME_OVERHEAD + __PT_GPRS + 60
+ SP_ORIG_R2 = STACK_FRAME_OVERHEAD + __PT_ORIG_GPR2
+ SP_ILC = STACK_FRAME_OVERHEAD + __PT_ILC
+-SP_TRAP = STACK_FRAME_OVERHEAD + __PT_TRAP
++SP_SVCNR = STACK_FRAME_OVERHEAD + __PT_SVCNR
+ SP_SIZE = STACK_FRAME_OVERHEAD + __PT_SIZE
+
+ _TIF_WORK_SVC = (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | _TIF_NEED_RESCHED | \
+@@ -180,11 +180,10 @@ STACK_SIZE = 1 << STACK_SHIFT
+ .macro CREATE_STACK_FRAME psworg,savearea
+ s %r15,BASED(.Lc_spsize) # make room for registers & psw
+ mvc SP_PSW(8,%r15),0(%r12) # move user PSW to stack
+- la %r12,\psworg
+ st %r2,SP_ORIG_R2(%r15) # store original content of gpr 2
+- icm %r12,12,__LC_SVC_ILC
++ icm %r12,3,__LC_SVC_ILC
+ stm %r0,%r11,SP_R0(%r15) # store gprs %r0-%r11 to kernel stack
+- st %r12,SP_ILC(%r15)
++ st %r12,SP_SVCNR(%r15)
+ mvc SP_R12(16,%r15),\savearea # move %r12-%r15 to stack
+ la %r12,0
+ st %r12,__SF_BACKCHAIN(%r15) # clear back chain
+@@ -261,16 +260,17 @@ sysc_update:
+ #endif
+ sysc_do_svc:
+ l %r9,__LC_THREAD_INFO # load pointer to thread_info struct
+- sla %r7,2 # *4 and test for svc 0
++ ltr %r7,%r7 # test for svc 0
+ bnz BASED(sysc_nr_ok) # svc number > 0
+ # svc 0: system call number in %r1
+ cl %r1,BASED(.Lnr_syscalls)
+ bnl BASED(sysc_nr_ok)
+ lr %r7,%r1 # copy svc number to %r7
+- sla %r7,2 # *4
+ sysc_nr_ok:
+ mvc SP_ARGS(4,%r15),SP_R7(%r15)
+ sysc_do_restart:
++ sth %r7,SP_SVCNR(%r15)
++ sll %r7,2 # svc number *4
+ l %r8,BASED(.Lsysc_table)
+ tm __TI_flags+3(%r9),(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT)
+ l %r8,0(%r7,%r8) # get system call addr.
+@@ -373,7 +373,6 @@ sysc_notify_resume:
+ sysc_restart:
+ ni __TI_flags+3(%r9),255-_TIF_RESTART_SVC # clear TIF_RESTART_SVC
+ l %r7,SP_R2(%r15) # load new svc number
+- sla %r7,2
+ mvc SP_R2(4,%r15),SP_ORIG_R2(%r15) # restore first argument
+ lm %r2,%r6,SP_R2(%r15) # load svc arguments
+ b BASED(sysc_do_restart) # restart svc
+@@ -383,7 +382,8 @@ sysc_restart:
+ #
+ sysc_singlestep:
+ ni __TI_flags+3(%r9),255-_TIF_SINGLE_STEP # clear TIF_SINGLE_STEP
+- mvi SP_TRAP+1(%r15),0x28 # set trap indication to pgm check
++ mvi SP_SVCNR(%r15),0xff # set trap indication to pgm check
++ mvi SP_SVCNR+1(%r15),0xff
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ l %r1,BASED(.Lhandle_per) # load adr. of per handler
+ la %r14,BASED(sysc_return) # load adr. of system return
+@@ -404,7 +404,7 @@ sysc_tracesys:
+ bnl BASED(sysc_tracenogo)
+ l %r8,BASED(.Lsysc_table)
+ lr %r7,%r2
+- sll %r7,2 # *4
++ sll %r7,2 # svc number *4
+ l %r8,0(%r7,%r8)
+ sysc_tracego:
+ lm %r3,%r6,SP_R3(%r15)
+@@ -583,7 +583,8 @@ pgm_svcper:
+ # per was called from kernel, must be kprobes
+ #
+ kernel_per:
+- mvi SP_TRAP+1(%r15),0x28 # set trap indication to pgm check
++ mvi SP_SVCNR(%r15),0xff # set trap indication to pgm check
++ mvi SP_SVCNR+1(%r15),0xff
+ la %r2,SP_PTREGS(%r15) # address of register-save area
+ l %r1,BASED(.Lhandle_per) # load adr. of per handler
+ la %r14,BASED(sysc_restore)# load adr. of system return
+--- a/arch/s390/kernel/ptrace.c
++++ b/arch/s390/kernel/ptrace.c
+@@ -671,7 +671,7 @@ asmlinkage long do_syscall_trace_enter(s
+ * debugger stored an invalid system call number. Skip
+ * the system call and the system call restart handling.
+ */
+- regs->trap = -1;
++ regs->svcnr = 0;
+ ret = -1;
+ }
+
+--- a/arch/s390/kernel/signal.c
++++ b/arch/s390/kernel/signal.c
+@@ -157,7 +157,7 @@ static int restore_sigregs(struct pt_reg
+ current->thread.fp_regs.fpc &= FPC_VALID_MASK;
+
+ restore_fp_regs(¤t->thread.fp_regs);
+- regs->trap = -1; /* disable syscall checks */
++ regs->svcnr = 0; /* disable syscall checks */
+ return 0;
+ }
+
+@@ -442,7 +442,7 @@ void do_signal(struct pt_regs *regs)
+ oldset = ¤t->blocked;
+
+ /* Are we from a system call? */
+- if (regs->trap == __LC_SVC_OLD_PSW) {
++ if (regs->svcnr) {
+ continue_addr = regs->psw.addr;
+ restart_addr = continue_addr - regs->ilc;
+ retval = regs->gprs[2];
+@@ -459,7 +459,7 @@ void do_signal(struct pt_regs *regs)
+ case -ERESTART_RESTARTBLOCK:
+ regs->gprs[2] = -EINTR;
+ }
+- regs->trap = -1; /* Don't deal with this again. */
++ regs->svcnr = 0; /* Don't deal with this again. */
+ }
+
+ /* Get signal to deliver. When running under ptrace, at this point
--- /dev/null
+Subject: xen3 arch-i386
+From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 728:832aac894efd)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+Index: head-2008-11-25/arch/x86/kernel/asm-offsets_32.c
+===================================================================
+--- head-2008-11-25.orig/arch/x86/kernel/asm-offsets_32.c 2008-11-25 12:33:06.000000000 +0100
++++ head-2008-11-25/arch/x86/kernel/asm-offsets_32.c 2008-11-25 12:35:53.000000000 +0100
+@@ -91,9 +91,14 @@ void foo(void)
+ OFFSET(pbe_orig_address, pbe, orig_address);
+ OFFSET(pbe_next, pbe, next);
+
++#ifndef CONFIG_X86_NO_TSS
+ /* Offset from the sysenter stack to tss.sp0 */
+- DEFINE(TSS_sysenter_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
++ DEFINE(SYSENTER_stack_sp0, offsetof(struct tss_struct, x86_tss.sp0) -
+ sizeof(struct tss_struct));
++#else
++ /* sysenter stack points directly to sp0 */
++ DEFINE(SYSENTER_stack_sp0, 0);
++#endif
+
+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
+Index: head-2008-11-25/arch/x86/kernel/entry_32.S
+===================================================================
+--- head-2008-11-25.orig/arch/x86/kernel/entry_32.S 2008-11-25 12:33:06.000000000 +0100
++++ head-2008-11-25/arch/x86/kernel/entry_32.S 2008-11-25 12:35:53.000000000 +0100
+@@ -293,7 +293,7 @@ ENTRY(ia32_sysenter_target)
+ CFI_SIGNAL_FRAME
+ CFI_DEF_CFA esp, 0
+ CFI_REGISTER esp, ebp
+- movl TSS_sysenter_sp0(%esp),%esp
++ movl SYSENTER_stack_sp0(%esp),%esp
+ sysenter_past_esp:
+ /*
+ * Interrupts are disabled here, but we can't trace it until
+@@ -782,7 +782,7 @@ END(device_not_available)
+ * that sets up the real kernel stack. Check here, since we can't
+ * allow the wrong stack to be used.
+ *
+- * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
++ * "SYSENTER_stack_sp0+12" is because the NMI/debug handler will have
+ * already pushed 3 words if it hits on the sysenter instruction:
+ * eflags, cs and eip.
+ *
+@@ -794,7 +794,7 @@ END(device_not_available)
+ cmpw $__KERNEL_CS,4(%esp); \
+ jne ok; \
+ label: \
+- movl TSS_sysenter_sp0+offset(%esp),%esp; \
++ movl SYSENTER_stack_sp0+offset(%esp),%esp; \
+ CFI_DEF_CFA esp, 0; \
+ CFI_UNDEFINED eip; \
+ pushfl; \
+Index: head-2008-11-25/arch/x86/kernel/machine_kexec_32.c
+===================================================================
+--- head-2008-11-25.orig/arch/x86/kernel/machine_kexec_32.c 2008-11-17 13:38:03.000000000 +0100
++++ head-2008-11-25/arch/x86/kernel/machine_kexec_32.c 2008-11-25 12:35:53.000000000 +0100
+@@ -25,6 +25,10 @@
+ #include <asm/system.h>
+ #include <asm/cacheflush.h>
+
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#endif
++
+ #define PAGE_ALIGNED __attribute__ ((__aligned__(PAGE_SIZE)))
+ static u32 kexec_pgd[1024] PAGE_ALIGNED;
+ #ifdef CONFIG_X86_PAE
+@@ -34,6 +38,55 @@ static u32 kexec_pmd1[1024] PAGE_ALIGNED
+ static u32 kexec_pte0[1024] PAGE_ALIGNED;
+ static u32 kexec_pte1[1024] PAGE_ALIGNED;
+
++#ifdef CONFIG_XEN
++
++#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
++
++#if PAGES_NR > KEXEC_XEN_NO_PAGES
++#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
++#endif
++
++#if PA_CONTROL_PAGE != 0
++#error PA_CONTROL_PAGE is non zero - Xen support will break
++#endif
++
++void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
++{
++ void *control_page;
++
++ memset(xki->page_list, 0, sizeof(xki->page_list));
++
++ control_page = page_address(image->control_code_page);
++ memcpy(control_page, relocate_kernel, PAGE_SIZE);
++
++ xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
++ xki->page_list[PA_PGD] = __ma(kexec_pgd);
++#ifdef CONFIG_X86_PAE
++ xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
++ xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
++#endif
++ xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
++ xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
++
++}
++
++int __init machine_kexec_setup_resources(struct resource *hypervisor,
++ struct resource *phys_cpus,
++ int nr_phys_cpus)
++{
++ int k;
++
++ /* The per-cpu crash note resources belong to the hypervisor resource */
++ for (k = 0; k < nr_phys_cpus; k++)
++ request_resource(hypervisor, phys_cpus + k);
++
++ return 0;
++}
++
++void machine_kexec_register_resources(struct resource *res) { ; }
++
++#endif /* CONFIG_XEN */
++
+ /*
+ * A architecture hook called to validate the
+ * proposed image and prepare the control pages
+@@ -64,6 +117,7 @@ void machine_kexec_cleanup(struct kimage
+ set_pages_nx(image->control_code_page, 1);
+ }
+
++#ifndef CONFIG_XEN
+ /*
+ * Do not allocate memory (or fail in any way) in machine_kexec().
+ * We are past the point of no return, committed to rebooting now.
+@@ -137,6 +191,7 @@ void machine_kexec(struct kimage *image)
+
+ __ftrace_enabled_restore(save_ftrace_enabled);
+ }
++#endif
+
+ void arch_crash_save_vmcoreinfo(void)
+ {
+Index: head-2008-11-25/arch/x86/kernel/vm86_32.c
+===================================================================
+--- head-2008-11-25.orig/arch/x86/kernel/vm86_32.c 2008-11-25 12:33:06.000000000 +0100
++++ head-2008-11-25/arch/x86/kernel/vm86_32.c 2008-11-25 12:35:53.000000000 +0100
+@@ -124,7 +124,9 @@ static int copy_vm86_regs_from_user(stru
+
+ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ struct tss_struct *tss;
++#endif
+ struct pt_regs *ret;
+ unsigned long tmp;
+
+@@ -147,12 +149,16 @@ struct pt_regs *save_v86_state(struct ke
+ do_exit(SIGSEGV);
+ }
+
++#ifndef CONFIG_X86_NO_TSS
+ tss = &per_cpu(init_tss, get_cpu());
++#endif
+ current->thread.sp0 = current->thread.saved_sp0;
+ current->thread.sysenter_cs = __KERNEL_CS;
+ load_sp0(tss, ¤t->thread);
+ current->thread.saved_sp0 = 0;
++#ifndef CONFIG_X86_NO_TSS
+ put_cpu();
++#endif
+
+ ret = KVM86->regs32;
+
+@@ -279,7 +285,9 @@ out:
+
+ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ struct tss_struct *tss;
++#endif
+ /*
+ * make sure the vm86() system call doesn't try to do anything silly
+ */
+@@ -324,12 +332,16 @@ static void do_sys_vm86(struct kernel_vm
+ tsk->thread.saved_fs = info->regs32->fs;
+ savesegment(gs, tsk->thread.saved_gs);
+
++#ifndef CONFIG_X86_NO_TSS
+ tss = &per_cpu(init_tss, get_cpu());
++#endif
+ tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
+ if (cpu_has_sep)
+ tsk->thread.sysenter_cs = 0;
+ load_sp0(tss, &tsk->thread);
++#ifndef CONFIG_X86_NO_TSS
+ put_cpu();
++#endif
+
+ tsk->thread.screen_bitmap = info->screen_bitmap;
+ if (info->flags & VM86_SCREEN_BITMAP)
+Index: head-2008-11-25/arch/x86/power/cpu_32.c
+===================================================================
+--- head-2008-11-25.orig/arch/x86/power/cpu_32.c 2008-11-25 12:33:06.000000000 +0100
++++ head-2008-11-25/arch/x86/power/cpu_32.c 2008-11-25 12:35:53.000000000 +0100
+@@ -65,6 +65,7 @@ static void do_fpu_end(void)
+
+ static void fix_processor_context(void)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ int cpu = smp_processor_id();
+ struct tss_struct *t = &per_cpu(init_tss, cpu);
+
+@@ -74,6 +75,7 @@ static void fix_processor_context(void)
+ * 386 hardware has concept of busy TSS or some
+ * similar stupidity.
+ */
++#endif
+
+ load_TR_desc(); /* This does ltr */
+ load_LDT(¤t->active_mm->context); /* This does lldt */
+Index: head-2008-11-25/arch/x86/vdso/vdso32-setup.c
+===================================================================
+--- head-2008-11-25.orig/arch/x86/vdso/vdso32-setup.c 2008-11-25 12:33:06.000000000 +0100
++++ head-2008-11-25/arch/x86/vdso/vdso32-setup.c 2008-11-25 12:35:53.000000000 +0100
+@@ -26,6 +26,10 @@
+ #include <asm/vdso.h>
+ #include <asm/proto.h>
+
++#ifdef CONFIG_XEN
++#include <xen/interface/callback.h>
++#endif
++
+ enum {
+ VDSO_DISABLED = 0,
+ VDSO_ENABLED = 1,
+@@ -225,6 +229,7 @@ static inline void map_compat_vdso(int m
+
+ void enable_sep_cpu(void)
+ {
++#ifndef CONFIG_XEN
+ int cpu = get_cpu();
+ struct tss_struct *tss = &per_cpu(init_tss, cpu);
+
+@@ -239,6 +244,35 @@ void enable_sep_cpu(void)
+ wrmsr(MSR_IA32_SYSENTER_ESP, tss->x86_tss.sp1, 0);
+ wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long) ia32_sysenter_target, 0);
+ put_cpu();
++#else
++ extern asmlinkage void ia32pv_sysenter_target(void);
++ static struct callback_register sysenter = {
++ .type = CALLBACKTYPE_sysenter,
++ .address = { __KERNEL_CS, (unsigned long)ia32pv_sysenter_target },
++ };
++
++ if (!boot_cpu_has(X86_FEATURE_SEP))
++ return;
++
++ get_cpu();
++
++ if (xen_feature(XENFEAT_supervisor_mode_kernel))
++ sysenter.address.eip = (unsigned long)ia32_sysenter_target;
++
++ switch (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter)) {
++ case 0:
++ break;
++#if CONFIG_XEN_COMPAT < 0x030200
++ case -ENOSYS:
++ sysenter.type = CALLBACKTYPE_sysenter_deprecated;
++ if (HYPERVISOR_callback_op(CALLBACKOP_register, &sysenter) == 0)
++ break;
++#endif
++ default:
++ clear_bit(X86_FEATURE_SEP, boot_cpu_data.x86_capability);
++ break;
++ }
++#endif
+ }
+
+ static struct vm_area_struct gate_vma;
--- /dev/null
+Subject: xen3 arch-x86
+From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 728:832aac894efd)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+List of files that don't require modification anymore (and hence
+removed from this patch), for reference and in case upstream wants to
+take the forward porting patches:
+2.6.26/arch/x86/kernel/crash.c
+
+Index: head-2008-12-01/arch/x86/Makefile
+===================================================================
+--- head-2008-12-01.orig/arch/x86/Makefile 2008-12-01 10:53:14.000000000 +0100
++++ head-2008-12-01/arch/x86/Makefile 2008-12-01 11:11:08.000000000 +0100
+@@ -115,6 +115,10 @@ mcore-y := arch/x86/mach-default/
+ mflags-$(CONFIG_X86_VOYAGER) := -Iinclude/asm-x86/mach-voyager
+ mcore-$(CONFIG_X86_VOYAGER) := arch/x86/mach-voyager/
+
++# Xen subarch support
++mflags-$(CONFIG_X86_XEN) := -Iinclude/asm-x86/mach-xen
++mcore-$(CONFIG_X86_XEN) := arch/x86/mach-xen/
++
+ # generic subarchitecture
+ mflags-$(CONFIG_X86_GENERICARCH):= -Iinclude/asm-x86/mach-generic
+ fcore-$(CONFIG_X86_GENERICARCH) += arch/x86/mach-generic/
+@@ -183,9 +187,26 @@ drivers-$(CONFIG_KDB) += arch/x86/kdb/
+
+ boot := arch/x86/boot
+
+-PHONY += zImage bzImage compressed zlilo bzlilo \
++PHONY += zImage bzImage vmlinuz compressed zlilo bzlilo \
+ zdisk bzdisk fdimage fdimage144 fdimage288 isoimage install
+
++ifdef CONFIG_XEN
++CPPFLAGS := -D__XEN_INTERFACE_VERSION__=$(CONFIG_XEN_INTERFACE_VERSION) \
++ -Iinclude$(if $(KBUILD_SRC),2)/asm/mach-xen $(CPPFLAGS)
++
++ifdef CONFIG_X86_64
++LDFLAGS_vmlinux := -e startup_64
++endif
++
++# Default kernel to build
++all: vmlinuz
++
++# KBUILD_IMAGE specifies the target image being built
++KBUILD_IMAGE := $(boot)/vmlinuz
++
++vmlinuz: vmlinux
++ $(Q)$(MAKE) $(build)=$(boot) $(KBUILD_IMAGE)
++else
+ # Default kernel to build
+ all: bzImage
+
+@@ -208,6 +229,7 @@ zdisk bzdisk: vmlinux
+
+ fdimage fdimage144 fdimage288 isoimage: vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) $@
++endif
+
+ install:
+ $(Q)$(MAKE) $(build)=$(boot) BOOTIMAGE=$(KBUILD_IMAGE) install
+Index: head-2008-12-01/arch/x86/boot/Makefile
+===================================================================
+--- head-2008-12-01.orig/arch/x86/boot/Makefile 2008-12-01 10:53:14.000000000 +0100
++++ head-2008-12-01/arch/x86/boot/Makefile 2008-12-01 11:11:08.000000000 +0100
+@@ -25,7 +25,7 @@ SVGA_MODE := -DSVGA_MODE=NORMAL_VGA
+
+ #RAMDISK := -DRAMDISK=512
+
+-targets := vmlinux.bin setup.bin setup.elf zImage bzImage
++targets := vmlinux.bin setup.bin setup.elf zImage bzImage vmlinuz vmlinux-stripped
+ subdir- := compressed
+
+ setup-y += a20.o cmdline.o copy.o cpu.o cpucheck.o edd.o
+@@ -190,5 +190,13 @@ zlilo: $(BOOTIMAGE)
+ cp System.map $(INSTALL_PATH)/
+ if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
+
++$(obj)/vmlinuz: $(obj)/vmlinux-stripped FORCE
++ $(call if_changed,gzip)
++ @echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
++
++$(obj)/vmlinux-stripped: OBJCOPYFLAGS := -g --strip-unneeded
++$(obj)/vmlinux-stripped: vmlinux FORCE
++ $(call if_changed,objcopy)
++
+ install:
+ sh $(srctree)/$(src)/install.sh $(KERNELRELEASE) $(BOOTIMAGE) System.map "$(INSTALL_PATH)"
+Index: head-2008-12-01/arch/x86/kernel/Makefile
+===================================================================
+--- head-2008-12-01.orig/arch/x86/kernel/Makefile 2008-12-01 10:53:14.000000000 +0100
++++ head-2008-12-01/arch/x86/kernel/Makefile 2008-12-01 11:14:33.000000000 +0100
+@@ -99,10 +99,13 @@ scx200-y += scx200_32.o
+
+ obj-$(CONFIG_OLPC) += olpc.o
+
++obj-$(CONFIG_X86_XEN) += fixup.o
++
+ ###
+ # 64 bit specific files
+ ifeq ($(CONFIG_X86_64),y)
+ obj-y += genapic_64.o genapic_flat_64.o genx2apic_uv_x.o tlb_uv.o
++ obj-$(CONFIG_X86_XEN_GENAPIC) += genapic_xen_64.o
+ obj-y += uv_sysfs.o
+ obj-y += genx2apic_cluster.o
+ obj-y += genx2apic_phys.o
+@@ -116,4 +119,10 @@ ifeq ($(CONFIG_X86_64),y)
+ obj-$(CONFIG_SWIOTLB) += pci-swiotlb_64.o
+
+ obj-$(CONFIG_PCI_MMCONFIG) += mmconf-fam10h_64.o
++
++ time_64-$(CONFIG_XEN) += time_32.o
++ pci-dma_64-$(CONFIG_XEN) += pci-dma_32.o
+ endif
++
++disabled-obj-$(CONFIG_XEN) := i8259_$(BITS).o reboot.o smpboot_$(BITS).o
++%/head_$(BITS).o %/head_$(BITS).s: $(if $(CONFIG_XEN),EXTRA_AFLAGS,dummy) :=
+Index: head-2008-12-01/arch/x86/kernel/acpi/Makefile
+===================================================================
+--- head-2008-12-01.orig/arch/x86/kernel/acpi/Makefile 2008-12-01 10:53:14.000000000 +0100
++++ head-2008-12-01/arch/x86/kernel/acpi/Makefile 2008-12-01 11:11:08.000000000 +0100
+@@ -5,6 +5,9 @@ obj-$(CONFIG_ACPI_SLEEP) += sleep.o wake
+
+ ifneq ($(CONFIG_ACPI_PROCESSOR),)
+ obj-y += cstate.o processor.o
++ifneq ($(CONFIG_PROCESSOR_EXTERNAL_CONTROL),)
++obj-$(CONFIG_XEN) += processor_extcntl_xen.o
++endif
+ endif
+
+ $(obj)/wakeup_rm.o: $(obj)/realmode/wakeup.bin
+@@ -12,3 +15,4 @@ $(obj)/wakeup_rm.o: $(obj)/realmode/w
+ $(obj)/realmode/wakeup.bin: FORCE
+ $(Q)$(MAKE) $(build)=$(obj)/realmode
+
++disabled-obj-$(CONFIG_XEN) := cstate.o wakeup_$(BITS).o
+Index: head-2008-12-01/arch/x86/kernel/acpi/boot.c
+===================================================================
+--- head-2008-12-01.orig/arch/x86/kernel/acpi/boot.c 2008-12-01 10:53:14.000000000 +0100
++++ head-2008-12-01/arch/x86/kernel/acpi/boot.c 2008-12-01 11:11:08.000000000 +0100
+@@ -130,8 +130,10 @@ char *__init __acpi_map_table(unsigned l
+ if (!phys || !size)
+ return NULL;
+
++#ifndef CONFIG_XEN
+ if (phys+size <= (max_low_pfn_mapped << PAGE_SHIFT))
+ return __va(phys);
++#endif
+
+ offset = phys & (PAGE_SIZE - 1);
+ mapped_size = PAGE_SIZE - offset;
+Index: head-2008-12-01/arch/x86/kernel/acpi/processor.c
+===================================================================
+--- head-2008-12-01.orig/arch/x86/kernel/acpi/processor.c 2008-12-01 10:53:14.000000000 +0100
++++ head-2008-12-01/arch/x86/kernel/acpi/processor.c 2008-12-01 11:11:08.000000000 +0100
+@@ -75,7 +75,18 @@ static void init_intel_pdc(struct acpi_p
+ /* Initialize _PDC data based on the CPU vendor */
+ void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
+ {
++#ifdef CONFIG_XEN
++ /*
++ * As a work-around, just use cpu0's cpuinfo for all processors.
++ * Further work is required to expose xen hypervisor interface of
++ * getting physical cpuinfo to dom0 kernel and then
++ * arch_acpi_processor_init_pdc can set _PDC parameters according
++ * to Xen's phys information.
++ */
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++#else
+ struct cpuinfo_x86 *c = &cpu_data(pr->id);
++#endif
+
+ pr->pdc = NULL;
+ if (c->x86_vendor == X86_VENDOR_INTEL)
+Index: head-2008-12-01/arch/x86/kernel/cpu/mtrr/Makefile
+===================================================================
+--- head-2008-12-01.orig/arch/x86/kernel/cpu/mtrr/Makefile 2008-12-01 10:53:14.000000000 +0100
++++ head-2008-12-01/arch/x86/kernel/cpu/mtrr/Makefile 2008-12-01 11:11:08.000000000 +0100
+@@ -1,3 +1,4 @@
+ obj-y := main.o if.o generic.o state.o
+ obj-$(CONFIG_X86_32) += amd.o cyrix.o centaur.o
+
++obj-$(CONFIG_XEN) := main.o if.o
+Index: head-2008-12-01/arch/x86/lib/Makefile
+===================================================================
+--- head-2008-12-01.orig/arch/x86/lib/Makefile 2008-12-01 10:53:14.000000000 +0100
++++ head-2008-12-01/arch/x86/lib/Makefile 2008-12-01 11:11:08.000000000 +0100
+@@ -25,3 +25,5 @@ else
+ lib-y += memmove_64.o memset_64.o
+ lib-y += copy_user_64.o rwlock_64.o copy_user_nocache_64.o
+ endif
++
++lib-$(CONFIG_XEN_SCRUB_PAGES) += scrub.o
+Index: head-2008-12-01/arch/x86/mm/Makefile
+===================================================================
+--- head-2008-12-01.orig/arch/x86/mm/Makefile 2008-12-01 10:53:14.000000000 +0100
++++ head-2008-12-01/arch/x86/mm/Makefile 2008-12-01 11:11:08.000000000 +0100
+@@ -21,4 +21,6 @@ obj-$(CONFIG_K8_NUMA) += k8topology_64.
+ endif
+ obj-$(CONFIG_ACPI_NUMA) += srat_$(BITS).o
+
++obj-$(CONFIG_XEN) += hypervisor.o
++
+ obj-$(CONFIG_MEMTEST) += memtest.o
+Index: head-2008-12-01/arch/x86/oprofile/Makefile
+===================================================================
+--- head-2008-12-01.orig/arch/x86/oprofile/Makefile 2008-12-01 10:53:14.000000000 +0100
++++ head-2008-12-01/arch/x86/oprofile/Makefile 2008-12-01 11:11:08.000000000 +0100
+@@ -6,7 +6,14 @@ DRIVER_OBJS = $(addprefix ../../../drive
+ oprofilefs.o oprofile_stats.o \
+ timer_int.o )
+
++ifdef CONFIG_XEN
++XENOPROF_COMMON_OBJS = $(addprefix ../../../drivers/xen/xenoprof/, \
++ xenoprofile.o)
++oprofile-y := $(DRIVER_OBJS) \
++ $(XENOPROF_COMMON_OBJS) xenoprof.o
++else
+ oprofile-y := $(DRIVER_OBJS) init.o backtrace.o
+ oprofile-$(CONFIG_X86_LOCAL_APIC) += nmi_int.o op_model_athlon.o \
+ op_model_ppro.o op_model_p4.o
+ oprofile-$(CONFIG_X86_IO_APIC) += nmi_timer_int.o
++endif
+Index: head-2008-12-01/arch/x86/pci/Makefile
+===================================================================
+--- head-2008-12-01.orig/arch/x86/pci/Makefile 2008-12-01 10:53:14.000000000 +0100
++++ head-2008-12-01/arch/x86/pci/Makefile 2008-12-01 11:11:08.000000000 +0100
+@@ -4,6 +4,9 @@ obj-$(CONFIG_PCI_BIOS) += pcbios.o
+ obj-$(CONFIG_PCI_MMCONFIG) += mmconfig_$(BITS).o direct.o mmconfig-shared.o
+ obj-$(CONFIG_PCI_DIRECT) += direct.o
+ obj-$(CONFIG_PCI_OLPC) += olpc.o
++# pcifront should be after mmconfig.o and direct.o as it should only
++# take over if direct access to the PCI bus is unavailable
++obj-$(CONFIG_XEN_PCIDEV_FRONTEND) += pcifront.o
+
+ obj-y += fixup.o
+ obj-$(CONFIG_ACPI) += acpi.o
+Index: head-2008-12-01/include/asm-x86/acpi.h
+===================================================================
+--- head-2008-12-01.orig/include/asm-x86/acpi.h 2008-12-01 10:53:14.000000000 +0100
++++ head-2008-12-01/include/asm-x86/acpi.h 2008-12-01 11:11:08.000000000 +0100
+@@ -30,6 +30,10 @@
+ #include <asm/mmu.h>
+ #include <asm/mpspec.h>
+
++#ifdef CONFIG_XEN
++#include <xen/interface/platform.h>
++#endif
++
+ #define COMPILER_DEPENDENT_INT64 long long
+ #define COMPILER_DEPENDENT_UINT64 unsigned long long
+
+@@ -124,6 +128,27 @@ extern unsigned long acpi_wakeup_address
+ /* early initialization routine */
+ extern void acpi_reserve_bootmem(void);
+
++#ifdef CONFIG_XEN
++static inline int acpi_notify_hypervisor_state(u8 sleep_state,
++ u32 pm1a_cnt_val,
++ u32 pm1b_cnt_val)
++{
++ struct xen_platform_op op = {
++ .cmd = XENPF_enter_acpi_sleep,
++ .interface_version = XENPF_INTERFACE_VERSION,
++ .u = {
++ .enter_acpi_sleep = {
++ .pm1a_cnt_val = pm1a_cnt_val,
++ .pm1b_cnt_val = pm1b_cnt_val,
++ .sleep_state = sleep_state,
++ },
++ },
++ };
++
++ return HYPERVISOR_platform_op(&op);
++}
++#endif /* CONFIG_XEN */
++
+ /*
+ * Check if the CPU can handle C2 and deeper
+ */
+@@ -156,7 +181,9 @@ static inline void disable_acpi(void) {
+
+ #endif /* !CONFIG_ACPI */
+
++#ifndef CONFIG_XEN
+ #define ARCH_HAS_POWER_INIT 1
++#endif
+
+ struct bootnode;
+
+Index: head-2008-12-01/include/asm-x86/apic.h
+===================================================================
+--- head-2008-12-01.orig/include/asm-x86/apic.h 2008-12-01 10:53:14.000000000 +0100
++++ head-2008-12-01/include/asm-x86/apic.h 2008-12-01 11:11:08.000000000 +0100
+@@ -12,7 +12,9 @@
+ #include <asm/cpufeature.h>
+ #include <asm/msr.h>
+
++#ifndef CONFIG_XEN
+ #define ARCH_APICTIMER_STOPS_ON_C3 1
++#endif
+
+ /*
+ * Debugging macros
+Index: head-2008-12-01/include/asm-x86/kexec.h
+===================================================================
+--- head-2008-12-01.orig/include/asm-x86/kexec.h 2008-12-01 10:53:14.000000000 +0100
++++ head-2008-12-01/include/asm-x86/kexec.h 2008-12-01 11:11:08.000000000 +0100
+@@ -170,6 +170,19 @@ relocate_kernel(unsigned long indirectio
+ unsigned long start_address) ATTRIB_NORET;
+ #endif
+
++/* Under Xen we need to work with machine addresses. These macros give the
++ * machine address of a certain page to the generic kexec code instead of
++ * the pseudo physical address which would be given by the default macros.
++ */
++
++#ifdef CONFIG_XEN
++#define KEXEC_ARCH_HAS_PAGE_MACROS
++#define kexec_page_to_pfn(page) pfn_to_mfn(page_to_pfn(page))
++#define kexec_pfn_to_page(pfn) pfn_to_page(mfn_to_pfn(pfn))
++#define kexec_virt_to_phys(addr) virt_to_machine(addr)
++#define kexec_phys_to_virt(addr) phys_to_virt(machine_to_phys(addr))
++#endif
++
+ #endif /* __ASSEMBLY__ */
+
+ #endif /* _KEXEC_H */
--- /dev/null
+Subject: xen3 arch-x86_64
+From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 728:832aac894efd)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+Index: head-2008-11-25/arch/x86/kernel/asm-offsets_64.c
+===================================================================
+--- head-2008-11-25.orig/arch/x86/kernel/asm-offsets_64.c 2008-11-25 12:33:06.000000000 +0100
++++ head-2008-11-25/arch/x86/kernel/asm-offsets_64.c 2008-11-25 12:35:54.000000000 +0100
+@@ -122,8 +122,10 @@ int main(void)
+ ENTRY(cr8);
+ BLANK();
+ #undef ENTRY
++#ifndef CONFIG_X86_NO_TSS
+ DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
+ BLANK();
++#endif
+ DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
+ BLANK();
+ DEFINE(__NR_syscall_max, sizeof(syscalls) - 1);
+Index: head-2008-11-25/arch/x86/kernel/machine_kexec_64.c
+===================================================================
+--- head-2008-11-25.orig/arch/x86/kernel/machine_kexec_64.c 2008-08-18 10:13:08.000000000 +0200
++++ head-2008-11-25/arch/x86/kernel/machine_kexec_64.c 2008-11-25 12:35:54.000000000 +0100
+@@ -27,6 +27,119 @@ static u64 kexec_pud1[512] PAGE_ALIGNED;
+ static u64 kexec_pmd1[512] PAGE_ALIGNED;
+ static u64 kexec_pte1[512] PAGE_ALIGNED;
+
++#ifdef CONFIG_XEN
++
++/* In the case of Xen, override hypervisor functions to be able to create
++ * a regular identity mapping page table...
++ */
++
++#include <xen/interface/kexec.h>
++#include <xen/interface/memory.h>
++
++#define x__pmd(x) ((pmd_t) { (x) } )
++#define x__pud(x) ((pud_t) { (x) } )
++#define x__pgd(x) ((pgd_t) { (x) } )
++
++#define x_pmd_val(x) ((x).pmd)
++#define x_pud_val(x) ((x).pud)
++#define x_pgd_val(x) ((x).pgd)
++
++static inline void x_set_pmd(pmd_t *dst, pmd_t val)
++{
++ x_pmd_val(*dst) = x_pmd_val(val);
++}
++
++static inline void x_set_pud(pud_t *dst, pud_t val)
++{
++ x_pud_val(*dst) = phys_to_machine(x_pud_val(val));
++}
++
++static inline void x_pud_clear (pud_t *pud)
++{
++ x_pud_val(*pud) = 0;
++}
++
++static inline void x_set_pgd(pgd_t *dst, pgd_t val)
++{
++ x_pgd_val(*dst) = phys_to_machine(x_pgd_val(val));
++}
++
++static inline void x_pgd_clear (pgd_t * pgd)
++{
++ x_pgd_val(*pgd) = 0;
++}
++
++#define X__PAGE_KERNEL_LARGE_EXEC \
++ _PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_PSE
++#define X_KERNPG_TABLE _PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY
++
++#define __ma(x) (pfn_to_mfn(__pa((x)) >> PAGE_SHIFT) << PAGE_SHIFT)
++
++#if PAGES_NR > KEXEC_XEN_NO_PAGES
++#error PAGES_NR is greater than KEXEC_XEN_NO_PAGES - Xen support will break
++#endif
++
++#if PA_CONTROL_PAGE != 0
++#error PA_CONTROL_PAGE is non zero - Xen support will break
++#endif
++
++void machine_kexec_setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
++{
++ void *control_page;
++ void *table_page;
++
++ memset(xki->page_list, 0, sizeof(xki->page_list));
++
++ control_page = page_address(image->control_code_page) + PAGE_SIZE;
++ memcpy(control_page, relocate_kernel, PAGE_SIZE);
++
++ table_page = page_address(image->control_code_page);
++
++ xki->page_list[PA_CONTROL_PAGE] = __ma(control_page);
++ xki->page_list[PA_TABLE_PAGE] = __ma(table_page);
++
++ xki->page_list[PA_PGD] = __ma(kexec_pgd);
++ xki->page_list[PA_PUD_0] = __ma(kexec_pud0);
++ xki->page_list[PA_PUD_1] = __ma(kexec_pud1);
++ xki->page_list[PA_PMD_0] = __ma(kexec_pmd0);
++ xki->page_list[PA_PMD_1] = __ma(kexec_pmd1);
++ xki->page_list[PA_PTE_0] = __ma(kexec_pte0);
++ xki->page_list[PA_PTE_1] = __ma(kexec_pte1);
++}
++
++int __init machine_kexec_setup_resources(struct resource *hypervisor,
++ struct resource *phys_cpus,
++ int nr_phys_cpus)
++{
++ int k;
++
++ /* The per-cpu crash note resources belong to the hypervisor resource */
++ for (k = 0; k < nr_phys_cpus; k++)
++ request_resource(hypervisor, phys_cpus + k);
++
++ return 0;
++}
++
++void machine_kexec_register_resources(struct resource *res) { ; }
++
++#else /* CONFIG_XEN */
++
++#define x__pmd(x) __pmd(x)
++#define x__pud(x) __pud(x)
++#define x__pgd(x) __pgd(x)
++
++#define x_set_pmd(x, y) set_pmd(x, y)
++#define x_set_pud(x, y) set_pud(x, y)
++#define x_set_pgd(x, y) set_pgd(x, y)
++
++#define x_pud_clear(x) pud_clear(x)
++#define x_pgd_clear(x) pgd_clear(x)
++
++#define X__PAGE_KERNEL_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
++#define X_KERNPG_TABLE _KERNPG_TABLE
++
++#endif /* CONFIG_XEN */
++
+ static void init_level2_page(pmd_t *level2p, unsigned long addr)
+ {
+ unsigned long end_addr;
+@@ -34,7 +147,7 @@ static void init_level2_page(pmd_t *leve
+ addr &= PAGE_MASK;
+ end_addr = addr + PUD_SIZE;
+ while (addr < end_addr) {
+- set_pmd(level2p++, __pmd(addr | __PAGE_KERNEL_LARGE_EXEC));
++ x_set_pmd(level2p++, x__pmd(addr | X__PAGE_KERNEL_LARGE_EXEC));
+ addr += PMD_SIZE;
+ }
+ }
+@@ -59,12 +172,12 @@ static int init_level3_page(struct kimag
+ }
+ level2p = (pmd_t *)page_address(page);
+ init_level2_page(level2p, addr);
+- set_pud(level3p++, __pud(__pa(level2p) | _KERNPG_TABLE));
++ x_set_pud(level3p++, x__pud(__pa(level2p) | X_KERNPG_TABLE));
+ addr += PUD_SIZE;
+ }
+ /* clear the unused entries */
+ while (addr < end_addr) {
+- pud_clear(level3p++);
++ x_pud_clear(level3p++);
+ addr += PUD_SIZE;
+ }
+ out:
+@@ -95,12 +208,12 @@ static int init_level4_page(struct kimag
+ if (result) {
+ goto out;
+ }
+- set_pgd(level4p++, __pgd(__pa(level3p) | _KERNPG_TABLE));
++ x_set_pgd(level4p++, x__pgd(__pa(level3p) | X_KERNPG_TABLE));
+ addr += PGDIR_SIZE;
+ }
+ /* clear the unused entries */
+ while (addr < end_addr) {
+- pgd_clear(level4p++);
++ x_pgd_clear(level4p++);
+ addr += PGDIR_SIZE;
+ }
+ out:
+@@ -111,8 +224,14 @@ out:
+ static int init_pgtable(struct kimage *image, unsigned long start_pgtable)
+ {
+ pgd_t *level4p;
++ unsigned long x_max_pfn = max_pfn;
++
++#ifdef CONFIG_XEN
++ x_max_pfn = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
++#endif
++
+ level4p = (pgd_t *)__va(start_pgtable);
+- return init_level4_page(image, level4p, 0, max_pfn << PAGE_SHIFT);
++ return init_level4_page(image, level4p, 0, x_max_pfn << PAGE_SHIFT);
+ }
+
+ int machine_kexec_prepare(struct kimage *image)
+@@ -136,6 +255,7 @@ void machine_kexec_cleanup(struct kimage
+ return;
+ }
+
++#ifndef CONFIG_XEN
+ /*
+ * Do not allocate memory (or fail in any way) in machine_kexec().
+ * We are past the point of no return, committed to rebooting now.
+@@ -176,6 +296,7 @@ void machine_kexec(struct kimage *image)
+ relocate_kernel((unsigned long)image->head, (unsigned long)page_list,
+ image->start);
+ }
++#endif
+
+ void arch_crash_save_vmcoreinfo(void)
+ {
+Index: head-2008-11-25/arch/x86/power/cpu_64.c
+===================================================================
+--- head-2008-11-25.orig/arch/x86/power/cpu_64.c 2008-11-25 12:33:06.000000000 +0100
++++ head-2008-11-25/arch/x86/power/cpu_64.c 2008-11-25 12:35:54.000000000 +0100
+@@ -135,6 +135,7 @@ void restore_processor_state(void)
+
+ static void fix_processor_context(void)
+ {
++#ifndef CONFIG_X86_NO_TSS
+ int cpu = smp_processor_id();
+ struct tss_struct *t = &per_cpu(init_tss, cpu);
+
+@@ -146,6 +147,7 @@ static void fix_processor_context(void)
+ set_tss_desc(cpu, t);
+
+ get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
++#endif
+
+ syscall_init(); /* This sets MSR_*STAR and related */
+ load_TR_desc(); /* This does ltr */
+Index: head-2008-11-25/arch/x86/vdso/Makefile
+===================================================================
+--- head-2008-11-25.orig/arch/x86/vdso/Makefile 2008-11-25 12:33:06.000000000 +0100
++++ head-2008-11-25/arch/x86/vdso/Makefile 2008-11-25 12:35:54.000000000 +0100
+@@ -65,6 +65,8 @@ obj-$(VDSO32-y) += vdso32-syms.lds
+ vdso32.so-$(VDSO32-y) += int80
+ vdso32.so-$(CONFIG_COMPAT) += syscall
+ vdso32.so-$(VDSO32-y) += sysenter
++xen-vdso32-$(subst 1,$(CONFIG_COMPAT),$(shell expr $(CONFIG_XEN_COMPAT)0 '<' 0x0302000)) += int80
++vdso32.so-$(CONFIG_XEN) += $(xen-vdso32-y)
+
+ vdso32-images = $(vdso32.so-y:%=vdso32-%.so)
+
--- /dev/null
+Subject: xen3 common
+From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 728:832aac894efd)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+List of files that don't require modification anymore (and hence
+removed from this patch), for reference and in case upstream wants to
+take the forward porting patches:
+2.6.22/include/linux/sched.h
+2.6.22/kernel/softlockup.c
+2.6.22/kernel/timer.c
+2.6.25/mm/highmem.c
+
+--- sle11-2009-10-16.orig/drivers/Makefile 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/Makefile 2009-08-26 11:52:33.000000000 +0200
+@@ -37,6 +37,7 @@ obj-y += base/ block/ misc/ mfd/ net/
+ obj-$(CONFIG_NUBUS) += nubus/
+ obj-$(CONFIG_ATM) += atm/
+ obj-y += macintosh/
++obj-$(CONFIG_XEN) += xen/
+ obj-$(CONFIG_SCSI) += scsi/
+ obj-$(CONFIG_ATA) += ata/
+ obj-$(CONFIG_IDE) += ide/
+--- sle11-2009-10-16.orig/drivers/acpi/Makefile 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/acpi/Makefile 2009-08-26 11:52:33.000000000 +0200
+@@ -34,6 +34,9 @@ processor-objs += processor_core.o proce
+ ifdef CONFIG_CPU_FREQ
+ processor-objs += processor_perflib.o
+ endif
++ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++processor-objs += processor_perflib.o processor_extcntl.o
++endif
+
+ obj-y += sleep/
+ obj-y += bus.o glue.o
+--- sle11-2009-10-16.orig/drivers/acpi/hardware/hwsleep.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/acpi/hardware/hwsleep.c 2009-08-26 11:52:33.000000000 +0200
+@@ -241,7 +241,11 @@ acpi_status asmlinkage acpi_enter_sleep_
+ u32 PM1Bcontrol;
+ struct acpi_bit_register_info *sleep_type_reg_info;
+ struct acpi_bit_register_info *sleep_enable_reg_info;
++#if !(defined(CONFIG_XEN) && defined(CONFIG_X86))
+ u32 in_value;
++#else
++ int err;
++#endif
+ struct acpi_object_list arg_list;
+ union acpi_object arg;
+ acpi_status status;
+@@ -351,6 +355,7 @@ acpi_status asmlinkage acpi_enter_sleep_
+
+ ACPI_FLUSH_CPU_CACHE();
+
++#if !(defined(CONFIG_XEN) && defined(CONFIG_X86))
+ status = acpi_hw_register_write(ACPI_REGISTER_PM1A_CONTROL,
+ PM1Acontrol);
+ if (ACPI_FAILURE(status)) {
+@@ -397,6 +402,16 @@ acpi_status asmlinkage acpi_enter_sleep_
+ /* Spin until we wake */
+
+ } while (!in_value);
++#else
++ /* PV ACPI just need check hypercall return value */
++ err = acpi_notify_hypervisor_state(sleep_state,
++ PM1Acontrol, PM1Bcontrol);
++ if (err) {
++ ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
++ "Hypervisor failure [%d]\n", err));
++ return_ACPI_STATUS(AE_ERROR);
++ }
++#endif
+
+ return_ACPI_STATUS(AE_OK);
+ }
+--- sle11-2009-10-16.orig/drivers/acpi/processor_core.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/acpi/processor_core.c 2009-08-26 11:52:33.000000000 +0200
+@@ -620,7 +620,8 @@ static int acpi_processor_get_info(struc
+ */
+ if (pr->id == -1) {
+ if (ACPI_FAILURE
+- (acpi_processor_hotadd_init(pr->handle, &pr->id))) {
++ (acpi_processor_hotadd_init(pr->handle, &pr->id)) &&
++ !processor_cntl_external()) {
+ return -ENODEV;
+ }
+ }
+@@ -671,7 +672,11 @@ static int acpi_processor_get_info(struc
+ return 0;
+ }
+
++#ifndef CONFIG_XEN
+ static DEFINE_PER_CPU(void *, processor_device_array);
++#else
++static void *processor_device_array[NR_ACPI_CPUS];
++#endif
+
+ static int __cpuinit acpi_processor_start(struct acpi_device *device)
+ {
+@@ -680,30 +685,46 @@ static int __cpuinit acpi_processor_star
+ struct acpi_processor *pr;
+ struct sys_device *sysdev;
+
++ processor_extcntl_init();
++
+ pr = acpi_driver_data(device);
+
+ result = acpi_processor_get_info(device);
+- if (result) {
++ if (result ||
++ ((pr->id == -1) && !processor_cntl_external())) {
+ /* Processor is physically not present */
+ return 0;
+ }
+
+- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
++ BUG_ON(!processor_cntl_external() &&
++ ((pr->id >= nr_cpu_ids) || (pr->id < 0)));
+
+ /*
+ * Buggy BIOS check
+ * ACPI id of processors can be reported wrongly by the BIOS.
+ * Don't trust it blindly
+ */
++#ifndef CONFIG_XEN
+ if (per_cpu(processor_device_array, pr->id) != NULL &&
+ per_cpu(processor_device_array, pr->id) != device) {
++#else
++ BUG_ON(pr->acpi_id >= NR_ACPI_CPUS);
++ if (processor_device_array[pr->acpi_id] != NULL &&
++ processor_device_array[pr->acpi_id] != device) {
++#endif
+ printk(KERN_WARNING "BIOS reported wrong ACPI id "
+ "for the processor\n");
+ return -ENODEV;
+ }
++#ifndef CONFIG_XEN
+ per_cpu(processor_device_array, pr->id) = device;
+
+ per_cpu(processors, pr->id) = pr;
++#else
++ processor_device_array[pr->acpi_id] = device;
++ if (pr->id != -1)
++ per_cpu(processors, pr->id) = pr;
++#endif
+
+ result = acpi_processor_add_fs(device);
+ if (result)
+@@ -719,15 +740,28 @@ static int __cpuinit acpi_processor_star
+ /* _PDC call should be done before doing anything else (if reqd.). */
+ arch_acpi_processor_init_pdc(pr);
+ acpi_processor_set_pdc(pr);
+-#ifdef CONFIG_CPU_FREQ
++#if defined(CONFIG_CPU_FREQ) || defined(CONFIG_PROCESSOR_EXTERNAL_CONTROL)
+ acpi_processor_ppc_has_changed(pr);
+ #endif
+- acpi_processor_get_throttling_info(pr);
+- acpi_processor_get_limit_info(pr);
++
++ /*
++ * pr->id may equal to -1 while processor_cntl_external enabled.
++ * throttle and thermal module don't support this case.
++ * Tx only works when dom0 vcpu == pcpu num by far, as we give
++ * control to dom0.
++ */
++ if (pr->id != -1) {
++ acpi_processor_get_throttling_info(pr);
++ acpi_processor_get_limit_info(pr);
++ }
+
+
+ acpi_processor_power_init(pr, device);
+
++ result = processor_extcntl_prepare(pr);
++ if (result)
++ goto end;
++
+ pr->cdev = thermal_cooling_device_register("Processor", device,
+ &processor_cooling_ops);
+ if (IS_ERR(pr->cdev)) {
+@@ -855,7 +889,7 @@ static int acpi_processor_remove(struct
+
+ pr = acpi_driver_data(device);
+
+- if (pr->id >= nr_cpu_ids) {
++ if (!processor_cntl_external() && pr->id >= nr_cpu_ids) {
+ kfree(pr);
+ return 0;
+ }
+@@ -881,8 +915,14 @@ static int acpi_processor_remove(struct
+ pr->cdev = NULL;
+ }
+
++#ifndef CONFIG_XEN
+ per_cpu(processors, pr->id) = NULL;
+ per_cpu(processor_device_array, pr->id) = NULL;
++#else
++ if (pr->id != -1)
++ per_cpu(processors, pr->id) = NULL;
++ processor_device_array[pr->acpi_id] = NULL;
++#endif
+ kfree(pr);
+
+ return 0;
+@@ -942,6 +982,10 @@ int acpi_processor_device_add(acpi_handl
+ if (!pr)
+ return -ENODEV;
+
++ if (processor_cntl_external())
++ processor_notify_external(pr,
++ PROCESSOR_HOTPLUG, HOTPLUG_TYPE_ADD);
++
+ if ((pr->id >= 0) && (pr->id < nr_cpu_ids)) {
+ kobject_uevent(&(*device)->dev.kobj, KOBJ_ONLINE);
+ }
+@@ -981,6 +1025,10 @@ static void __ref acpi_processor_hotplug
+ break;
+ }
+
++ if (processor_cntl_external())
++ processor_notify_external(pr,
++ PROCESSOR_HOTPLUG, HOTPLUG_TYPE_ADD);
++
+ if (pr->id >= 0 && (pr->id < nr_cpu_ids)) {
+ kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
+ break;
+@@ -1012,6 +1060,11 @@ static void __ref acpi_processor_hotplug
+
+ if ((pr->id < nr_cpu_ids) && (cpu_present(pr->id)))
+ kobject_uevent(&device->dev.kobj, KOBJ_OFFLINE);
++
++ if (processor_cntl_external())
++ processor_notify_external(pr, PROCESSOR_HOTPLUG,
++ HOTPLUG_TYPE_REMOVE);
++
+ break;
+ default:
+ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
+@@ -1076,6 +1129,11 @@ static acpi_status acpi_processor_hotadd
+
+ static int acpi_processor_handle_eject(struct acpi_processor *pr)
+ {
++#ifdef CONFIG_XEN
++ if (pr->id == -1)
++ return (0);
++#endif
++
+ if (cpu_online(pr->id))
+ cpu_down(pr->id);
+
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ sle11-2009-10-16/drivers/acpi/processor_extcntl.c 2009-08-26 11:52:33.000000000 +0200
+@@ -0,0 +1,241 @@
++/*
++ * processor_extcntl.c - channel to external control logic
++ *
++ * Copyright (C) 2008, Intel corporation
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/acpi.h>
++#include <linux/pm.h>
++#include <linux/cpu.h>
++
++#include <acpi/processor.h>
++
++#define ACPI_PROCESSOR_COMPONENT 0x01000000
++#define ACPI_PROCESSOR_CLASS "processor"
++#define ACPI_PROCESSOR_DRIVER_NAME "ACPI Processor Driver"
++#define _COMPONENT ACPI_PROCESSOR_COMPONENT
++ACPI_MODULE_NAME("acpi_processor")
++
++static int processor_extcntl_parse_csd(struct acpi_processor *pr);
++static int processor_extcntl_get_performance(struct acpi_processor *pr);
++/*
++ * External processor control logic may register with its own set of
++ * ops to get ACPI related notification. One example is like VMM.
++ */
++const struct processor_extcntl_ops *processor_extcntl_ops;
++EXPORT_SYMBOL(processor_extcntl_ops);
++
++static int processor_notify_smm(void)
++{
++ acpi_status status;
++ static int is_done = 0;
++
++ /* only need successfully notify BIOS once */
++ /* avoid double notification which may lead to unexpected result */
++ if (is_done)
++ return 0;
++
++ /* Can't write pstate_cnt to smi_cmd if either value is zero */
++ if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) {
++ ACPI_DEBUG_PRINT((ACPI_DB_INFO,"No SMI port or pstate_cnt\n"));
++ return 0;
++ }
++
++ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
++ "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n",
++ acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd));
++
++ /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use
++ * it anyway, so we need to support it... */
++ if (acpi_fadt_is_v1) {
++ ACPI_DEBUG_PRINT((ACPI_DB_INFO,
++ "Using v1.0 FADT reserved value for pstate_cnt\n"));
++ }
++
++ status = acpi_os_write_port(acpi_fadt.smi_cmd,
++ (u32) acpi_fadt.pstate_cnt, 8);
++ if (ACPI_FAILURE(status))
++ return status;
++
++ is_done = 1;
++
++ return 0;
++}
++
++int processor_notify_external(struct acpi_processor *pr, int event, int type)
++{
++ int ret = -EINVAL;
++
++ if (!processor_cntl_external())
++ return -EINVAL;
++
++ switch (event) {
++ case PROCESSOR_PM_INIT:
++ case PROCESSOR_PM_CHANGE:
++ if ((type >= PM_TYPE_MAX) ||
++ !processor_extcntl_ops->pm_ops[type])
++ break;
++
++ ret = processor_extcntl_ops->pm_ops[type](pr, event);
++ break;
++ case PROCESSOR_HOTPLUG:
++ if (processor_extcntl_ops->hotplug)
++ ret = processor_extcntl_ops->hotplug(pr, type);
++ break;
++ default:
++ printk(KERN_ERR "Unsupport processor events %d.\n", event);
++ break;
++ }
++
++ return ret;
++}
++
++/*
++ * External control logic can decide to grab full or part of physical
++ * processor control bits. Take a VMM for example, physical processors
++ * are owned by VMM and thus existence information like hotplug is
++ * always required to be notified to VMM. Similar is processor idle
++ * state which is also necessarily controlled by VMM. But for other
++ * control bits like performance/throttle states, VMM may choose to
++ * control or not upon its own policy.
++ */
++void processor_extcntl_init(void)
++{
++ if (!processor_extcntl_ops)
++ arch_acpi_processor_init_extcntl(&processor_extcntl_ops);
++}
++
++/*
++ * This is called from ACPI processor init, and targeted to hold
++ * some tricky housekeeping jobs to satisfy external control model.
++ * For example, we may put dependency parse stub here for idle
++ * and performance state. Those information may be not available
++ * if splitting from dom0 control logic like cpufreq driver.
++ */
++int processor_extcntl_prepare(struct acpi_processor *pr)
++{
++ /* parse cstate dependency information */
++ if (processor_pm_external())
++ processor_extcntl_parse_csd(pr);
++
++ /* Initialize performance states */
++ if (processor_pmperf_external())
++ processor_extcntl_get_performance(pr);
++
++ return 0;
++}
++
++/*
++ * Currently no _CSD is implemented which is why existing ACPI code
++ * doesn't parse _CSD at all. But to keep interface complete with
++ * external control logic, we put a placeholder here for future
++ * compatibility.
++ */
++static int processor_extcntl_parse_csd(struct acpi_processor *pr)
++{
++ int i;
++
++ for (i = 0; i < pr->power.count; i++) {
++ if (!pr->power.states[i].valid)
++ continue;
++
++ /* No dependency by default */
++ pr->power.states[i].domain_info = NULL;
++ pr->power.states[i].csd_count = 0;
++ }
++
++ return 0;
++}
++
++/*
++ * Existing ACPI module does parse performance states at some point,
++ * when acpi-cpufreq driver is loaded which however is something
++ * we'd like to disable to avoid confliction with external control
++ * logic. So we have to collect raw performance information here
++ * when ACPI processor object is found and started.
++ */
++static int processor_extcntl_get_performance(struct acpi_processor *pr)
++{
++ int ret;
++ struct acpi_processor_performance *perf;
++ struct acpi_psd_package *pdomain;
++
++ if (pr->performance)
++ return -EBUSY;
++
++ perf = kzalloc(sizeof(struct acpi_processor_performance), GFP_KERNEL);
++ if (!perf)
++ return -ENOMEM;
++
++ pr->performance = perf;
++ /* Get basic performance state information */
++ ret = acpi_processor_get_performance_info(pr);
++ if (ret < 0)
++ goto err_out;
++
++ /*
++ * Well, here we need retrieve performance dependency information
++ * from _PSD object. The reason why existing interface is not used
++ * is due to the reason that existing interface sticks to Linux cpu
++ * id to construct some bitmap, however we want to split ACPI
++ * processor objects from Linux cpu id logic. For example, even
++ * when Linux is configured as UP, we still want to parse all ACPI
++ * processor objects to external logic. In this case, it's preferred
++ * to use ACPI ID instead.
++ */
++ pdomain = &pr->performance->domain_info;
++ pdomain->num_processors = 0;
++ ret = acpi_processor_get_psd(pr);
++ if (ret < 0) {
++ /*
++ * _PSD is optional - assume no coordination if absent (or
++ * broken), matching native kernels' behavior.
++ */
++ pdomain->num_entries = ACPI_PSD_REV0_ENTRIES;
++ pdomain->revision = ACPI_PSD_REV0_REVISION;
++ pdomain->domain = pr->acpi_id;
++ pdomain->coord_type = DOMAIN_COORD_TYPE_SW_ALL;
++ pdomain->num_processors = 1;
++ }
++
++ /* Some sanity check */
++ if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
++ (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) ||
++ ((pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL) &&
++ (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY) &&
++ (pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL))) {
++ ret = -EINVAL;
++ goto err_out;
++ }
++
++ /* Last step is to notify BIOS that external logic exists */
++ processor_notify_smm();
++
++ processor_notify_external(pr, PROCESSOR_PM_INIT, PM_TYPE_PERF);
++
++ return 0;
++err_out:
++ pr->performance = NULL;
++ kfree(perf);
++ return ret;
++}
+--- sle11-2009-10-16.orig/drivers/acpi/processor_idle.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/acpi/processor_idle.c 2009-08-26 11:52:33.000000000 +0200
+@@ -908,7 +908,8 @@ static int acpi_processor_get_power_info
+ */
+ cx.entry_method = ACPI_CSTATE_HALT;
+ snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
+- } else {
++ /* This doesn't apply to external control case */
++ } else if (!processor_pm_external()) {
+ continue;
+ }
+ if (cx.type == ACPI_STATE_C1 &&
+@@ -947,6 +948,12 @@ static int acpi_processor_get_power_info
+
+ cx.power = obj->integer.value;
+
++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++ /* cache control methods to notify external logic */
++ if (processor_pm_external())
++ memcpy(&cx.reg, reg, sizeof(*reg));
++#endif
++
+ current_count++;
+ memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
+
+@@ -1289,14 +1296,18 @@ int acpi_processor_cst_has_changed(struc
+ * been initialized.
+ */
+ if (pm_idle_save) {
+- pm_idle = pm_idle_save;
++ if (!processor_pm_external())
++ pm_idle = pm_idle_save;
+ /* Relies on interrupts forcing exit from idle. */
+ synchronize_sched();
+ }
+
+ pr->flags.power = 0;
+ result = acpi_processor_get_power_info(pr);
+- if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
++ if (processor_pm_external())
++ processor_notify_external(pr,
++ PROCESSOR_PM_CHANGE, PM_TYPE_IDLE);
++ else if ((pr->flags.power == 1) && (pr->flags.power_setup_done))
+ pm_idle = acpi_processor_idle;
+
+ return result;
+@@ -1821,7 +1832,7 @@ int __cpuinit acpi_processor_power_init(
+ printk(")\n");
+
+ #ifndef CONFIG_CPU_IDLE
+- if (pr->id == 0) {
++ if (!processor_pm_external() && (pr->id == 0)) {
+ pm_idle_save = pm_idle;
+ pm_idle = acpi_processor_idle;
+ }
+@@ -1835,6 +1846,11 @@ int __cpuinit acpi_processor_power_init(
+ acpi_driver_data(device));
+ if (!entry)
+ return -EIO;
++
++ if (processor_pm_external())
++ processor_notify_external(pr,
++ PROCESSOR_PM_INIT, PM_TYPE_IDLE);
++
+ return 0;
+ }
+
+--- sle11-2009-10-16.orig/drivers/acpi/processor_perflib.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/acpi/processor_perflib.c 2009-08-26 11:52:33.000000000 +0200
+@@ -80,6 +80,7 @@ MODULE_PARM_DESC(ignore_ppc, "If the fre
+
+ static int acpi_processor_ppc_status;
+
++#ifdef CONFIG_CPU_FREQ
+ static int acpi_processor_ppc_notifier(struct notifier_block *nb,
+ unsigned long event, void *data)
+ {
+@@ -122,6 +123,7 @@ static int acpi_processor_ppc_notifier(s
+ static struct notifier_block acpi_ppc_notifier_block = {
+ .notifier_call = acpi_processor_ppc_notifier,
+ };
++#endif /* CONFIG_CPU_FREQ */
+
+ static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
+ {
+@@ -166,9 +168,15 @@ int acpi_processor_ppc_has_changed(struc
+ if (ret < 0)
+ return (ret);
+ else
++#ifdef CONFIG_CPU_FREQ
+ return cpufreq_update_policy(pr->id);
++#elif CONFIG_PROCESSOR_EXTERNAL_CONTROL
++ return processor_notify_external(pr,
++ PROCESSOR_PM_CHANGE, PM_TYPE_PERF);
++#endif
+ }
+
++#ifdef CONFIG_CPU_FREQ
+ void acpi_processor_ppc_init(void)
+ {
+ if (!cpufreq_register_notifier
+@@ -187,6 +195,7 @@ void acpi_processor_ppc_exit(void)
+
+ acpi_processor_ppc_status &= ~PPC_REGISTERED;
+ }
++#endif /* CONFIG_CPU_FREQ */
+
+ static int acpi_processor_get_performance_control(struct acpi_processor *pr)
+ {
+@@ -328,7 +337,10 @@ static int acpi_processor_get_performanc
+ return result;
+ }
+
+-static int acpi_processor_get_performance_info(struct acpi_processor *pr)
++#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++static
++#endif
++int acpi_processor_get_performance_info(struct acpi_processor *pr)
+ {
+ int result = 0;
+ acpi_status status = AE_OK;
+@@ -356,6 +368,7 @@ static int acpi_processor_get_performanc
+ return 0;
+ }
+
++#ifdef CONFIG_CPU_FREQ
+ int acpi_processor_notify_smm(struct module *calling_module)
+ {
+ acpi_status status;
+@@ -416,6 +429,7 @@ int acpi_processor_notify_smm(struct mod
+ }
+
+ EXPORT_SYMBOL(acpi_processor_notify_smm);
++#endif /* CONFIG_CPU_FREQ */
+
+ #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
+ /* /proc/acpi/processor/../performance interface (DEPRECATED) */
+@@ -507,7 +521,10 @@ static void acpi_cpufreq_remove_file(str
+ }
+ #endif /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
+
+-static int acpi_processor_get_psd(struct acpi_processor *pr)
++#ifndef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++static
++#endif
++int acpi_processor_get_psd(struct acpi_processor *pr)
+ {
+ int result = 0;
+ acpi_status status = AE_OK;
+--- sle11-2009-10-16.orig/drivers/acpi/sleep/main.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/acpi/sleep/main.c 2009-08-26 11:52:33.000000000 +0200
+@@ -27,6 +27,7 @@ u8 sleep_states[ACPI_S_STATE_COUNT];
+ static int acpi_sleep_prepare(u32 acpi_state)
+ {
+ #ifdef CONFIG_ACPI_SLEEP
++#ifndef CONFIG_ACPI_PV_SLEEP
+ /* do we have a wakeup address for S2 and S3? */
+ if (acpi_state == ACPI_STATE_S3) {
+ if (!acpi_wakeup_address) {
+@@ -36,6 +37,7 @@ static int acpi_sleep_prepare(u32 acpi_s
+ (acpi_physical_address)acpi_wakeup_address);
+
+ }
++#endif
+ ACPI_FLUSH_CPU_CACHE();
+ acpi_enable_wakeup_device_prep(acpi_state);
+ #endif
+@@ -208,7 +210,14 @@ static int acpi_suspend_enter(suspend_st
+ break;
+
+ case ACPI_STATE_S3:
++#ifdef CONFIG_ACPI_PV_SLEEP
++ /* Hyperviosr will save and restore CPU context
++ * and then we can skip low level housekeeping here.
++ */
++ acpi_enter_sleep_state(acpi_state);
++#else
+ do_suspend_lowlevel();
++#endif
+ break;
+ }
+
+--- sle11-2009-10-16.orig/drivers/char/agp/intel-agp.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/char/agp/intel-agp.c 2009-10-16 14:49:12.000000000 +0200
+@@ -259,6 +259,13 @@ static void *i8xx_alloc_pages(void)
+ if (page == NULL)
+ return NULL;
+
++#ifdef CONFIG_XEN
++ if (xen_create_contiguous_region((unsigned long)page_address(page), 2, 32)) {
++ __free_pages(page, 2);
++ return NULL;
++ }
++#endif
++
+ if (set_pages_uc(page, 4) < 0) {
+ set_pages_wb(page, 4);
+ __free_pages(page, 2);
+@@ -278,6 +285,9 @@ static void i8xx_destroy_pages(void *add
+
+ page = virt_to_page(addr);
+ set_pages_wb(page, 4);
++#ifdef CONFIG_XEN
++ xen_destroy_contiguous_region((unsigned long)page_address(page), 2);
++#endif
+ put_page(page);
+ __free_pages(page, 2);
+ atomic_dec(&agp_bridge->current_memory_agp);
+--- sle11-2009-10-16.orig/drivers/char/mem.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/char/mem.c 2009-08-26 11:52:33.000000000 +0200
+@@ -110,6 +110,7 @@ void __attribute__((weak)) unxlate_dev_m
+ {
+ }
+
++#ifndef ARCH_HAS_DEV_MEM
+ /*
+ * This funcion reads the *physical* memory. The f_pos points directly to the
+ * memory location.
+@@ -254,6 +255,7 @@ static ssize_t write_mem(struct file * f
+ *ppos += written;
+ return written;
+ }
++#endif
+
+ int __attribute__((weak)) phys_mem_access_prot_allowed(struct file *file,
+ unsigned long pfn, unsigned long size, pgprot_t *vma_prot)
+@@ -372,6 +374,9 @@ static int mmap_mem(struct file * file,
+ static int mmap_kmem(struct file * file, struct vm_area_struct * vma)
+ {
+ unsigned long pfn;
++#ifdef CONFIG_XEN
++ unsigned long i, count;
++#endif
+
+ /* Turn a kernel-virtual address into a physical page frame */
+ pfn = __pa((u64)vma->vm_pgoff << PAGE_SHIFT) >> PAGE_SHIFT;
+@@ -386,6 +391,13 @@ static int mmap_kmem(struct file * file,
+ if (!pfn_valid(pfn))
+ return -EIO;
+
++#ifdef CONFIG_XEN
++ count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ for (i = 0; i < count; i++)
++ if ((pfn + i) != mfn_to_local_pfn(pfn_to_mfn(pfn + i)))
++ return -EIO;
++#endif
++
+ vma->vm_pgoff = pfn;
+ return mmap_mem(file, vma);
+ }
+@@ -905,6 +917,7 @@ static int open_port(struct inode * inod
+ #define open_kmem open_mem
+ #define open_oldmem open_mem
+
++#ifndef ARCH_HAS_DEV_MEM
+ static const struct file_operations mem_fops = {
+ .llseek = memory_lseek,
+ .read = read_mem,
+@@ -913,6 +926,9 @@ static const struct file_operations mem_
+ .open = open_mem,
+ .get_unmapped_area = get_unmapped_area_mem,
+ };
++#else
++extern const struct file_operations mem_fops;
++#endif
+
+ #ifdef CONFIG_DEVKMEM
+ static const struct file_operations kmem_fops = {
+--- sle11-2009-10-16.orig/drivers/char/tpm/Makefile 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/char/tpm/Makefile 2009-08-26 11:52:33.000000000 +0200
+@@ -9,3 +9,5 @@ obj-$(CONFIG_TCG_TIS) += tpm_tis.o
+ obj-$(CONFIG_TCG_NSC) += tpm_nsc.o
+ obj-$(CONFIG_TCG_ATMEL) += tpm_atmel.o
+ obj-$(CONFIG_TCG_INFINEON) += tpm_infineon.o
++obj-$(CONFIG_TCG_XEN) += tpm_xenu.o
++tpm_xenu-y = tpm_xen.o tpm_vtpm.o
+--- sle11-2009-10-16.orig/drivers/char/tpm/tpm.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/char/tpm/tpm.h 2009-08-26 11:52:33.000000000 +0200
+@@ -107,6 +107,9 @@ struct tpm_chip {
+ struct dentry **bios_dir;
+
+ struct list_head list;
++#ifdef CONFIG_XEN
++ void *priv;
++#endif
+ void (*release) (struct device *);
+ };
+
+@@ -124,6 +127,18 @@ static inline void tpm_write_index(int b
+ outb(value & 0xFF, base+1);
+ }
+
++#ifdef CONFIG_XEN
++static inline void *chip_get_private(const struct tpm_chip *chip)
++{
++ return chip->priv;
++}
++
++static inline void chip_set_private(struct tpm_chip *chip, void *priv)
++{
++ chip->priv = priv;
++}
++#endif
++
+ extern void tpm_get_timeouts(struct tpm_chip *);
+ extern void tpm_gen_interrupt(struct tpm_chip *);
+ extern void tpm_continue_selftest(struct tpm_chip *);
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ sle11-2009-10-16/drivers/char/tpm/tpm_vtpm.c 2009-08-26 11:52:33.000000000 +0200
+@@ -0,0 +1,542 @@
++/*
++ * Copyright (C) 2006 IBM Corporation
++ *
++ * Authors:
++ * Stefan Berger <stefanb@us.ibm.com>
++ *
++ * Generic device driver part for device drivers in a virtualized
++ * environment.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation, version 2 of the
++ * License.
++ *
++ */
++
++#include <asm/uaccess.h>
++#include <linux/list.h>
++#include <linux/device.h>
++#include <linux/interrupt.h>
++#include <linux/platform_device.h>
++#include "tpm.h"
++#include "tpm_vtpm.h"
++
++/* read status bits */
++enum {
++ STATUS_BUSY = 0x01,
++ STATUS_DATA_AVAIL = 0x02,
++ STATUS_READY = 0x04
++};
++
++struct transmission {
++ struct list_head next;
++
++ unsigned char *request;
++ size_t request_len;
++ size_t request_buflen;
++
++ unsigned char *response;
++ size_t response_len;
++ size_t response_buflen;
++
++ unsigned int flags;
++};
++
++enum {
++ TRANSMISSION_FLAG_WAS_QUEUED = 0x1
++};
++
++
++enum {
++ DATAEX_FLAG_QUEUED_ONLY = 0x1
++};
++
++
++/* local variables */
++
++/* local function prototypes */
++static int _vtpm_send_queued(struct tpm_chip *chip);
++
++
++/* =============================================================
++ * Some utility functions
++ * =============================================================
++ */
++static void vtpm_state_init(struct vtpm_state *vtpms)
++{
++ vtpms->current_request = NULL;
++ spin_lock_init(&vtpms->req_list_lock);
++ init_waitqueue_head(&vtpms->req_wait_queue);
++ INIT_LIST_HEAD(&vtpms->queued_requests);
++
++ vtpms->current_response = NULL;
++ spin_lock_init(&vtpms->resp_list_lock);
++ init_waitqueue_head(&vtpms->resp_wait_queue);
++
++ vtpms->disconnect_time = jiffies;
++}
++
++
++static inline struct transmission *transmission_alloc(void)
++{
++ return kzalloc(sizeof(struct transmission), GFP_ATOMIC);
++}
++
++static unsigned char *
++transmission_set_req_buffer(struct transmission *t,
++ unsigned char *buffer, size_t len)
++{
++ if (t->request_buflen < len) {
++ kfree(t->request);
++ t->request = kmalloc(len, GFP_KERNEL);
++ if (!t->request) {
++ t->request_buflen = 0;
++ return NULL;
++ }
++ t->request_buflen = len;
++ }
++
++ memcpy(t->request, buffer, len);
++ t->request_len = len;
++
++ return t->request;
++}
++
++static unsigned char *
++transmission_set_res_buffer(struct transmission *t,
++ const unsigned char *buffer, size_t len)
++{
++ if (t->response_buflen < len) {
++ kfree(t->response);
++ t->response = kmalloc(len, GFP_ATOMIC);
++ if (!t->response) {
++ t->response_buflen = 0;
++ return NULL;
++ }
++ t->response_buflen = len;
++ }
++
++ memcpy(t->response, buffer, len);
++ t->response_len = len;
++
++ return t->response;
++}
++
++static inline void transmission_free(struct transmission *t)
++{
++ kfree(t->request);
++ kfree(t->response);
++ kfree(t);
++}
++
++/* =============================================================
++ * Interface with the lower layer driver
++ * =============================================================
++ */
++/*
++ * Lower layer uses this function to make a response available.
++ */
++int vtpm_vd_recv(const struct tpm_chip *chip,
++ const unsigned char *buffer, size_t count,
++ void *ptr)
++{
++ unsigned long flags;
++ int ret_size = 0;
++ struct transmission *t;
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ /*
++ * The list with requests must contain one request
++ * only and the element there must be the one that
++ * was passed to me from the front-end.
++ */
++ spin_lock_irqsave(&vtpms->resp_list_lock, flags);
++ if (vtpms->current_request != ptr) {
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ return 0;
++ }
++
++ if ((t = vtpms->current_request)) {
++ transmission_free(t);
++ vtpms->current_request = NULL;
++ }
++
++ t = transmission_alloc();
++ if (t) {
++ if (!transmission_set_res_buffer(t, buffer, count)) {
++ transmission_free(t);
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ return -ENOMEM;
++ }
++ ret_size = count;
++ vtpms->current_response = t;
++ wake_up_interruptible(&vtpms->resp_wait_queue);
++ }
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++
++ return ret_size;
++}
++
++
++/*
++ * Lower layer indicates its status (connected/disconnected)
++ */
++void vtpm_vd_status(const struct tpm_chip *chip, u8 vd_status)
++{
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ vtpms->vd_status = vd_status;
++ if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
++ vtpms->disconnect_time = jiffies;
++ }
++}
++
++/* =============================================================
++ * Interface with the generic TPM driver
++ * =============================================================
++ */
++static int vtpm_recv(struct tpm_chip *chip, u8 *buf, size_t count)
++{
++ int rc = 0;
++ unsigned long flags;
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ /*
++ * Check if the previous operation only queued the command
++ * In this case there won't be a response, so I just
++ * return from here and reset that flag. In any other
++ * case I should receive a response from the back-end.
++ */
++ spin_lock_irqsave(&vtpms->resp_list_lock, flags);
++ if ((vtpms->flags & DATAEX_FLAG_QUEUED_ONLY) != 0) {
++ vtpms->flags &= ~DATAEX_FLAG_QUEUED_ONLY;
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ /*
++ * The first few commands (measurements) must be
++ * queued since it might not be possible to talk to the
++ * TPM, yet.
++ * Return a response of up to 30 '0's.
++ */
++
++ count = min_t(size_t, count, 30);
++ memset(buf, 0x0, count);
++ return count;
++ }
++ /*
++ * Check whether something is in the responselist and if
++ * there's nothing in the list wait for something to appear.
++ */
++
++ if (!vtpms->current_response) {
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ interruptible_sleep_on_timeout(&vtpms->resp_wait_queue,
++ 1000);
++ spin_lock_irqsave(&vtpms->resp_list_lock ,flags);
++ }
++
++ if (vtpms->current_response) {
++ struct transmission *t = vtpms->current_response;
++ vtpms->current_response = NULL;
++ rc = min(count, t->response_len);
++ memcpy(buf, t->response, rc);
++ transmission_free(t);
++ }
++
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ return rc;
++}
++
++static int vtpm_send(struct tpm_chip *chip, u8 *buf, size_t count)
++{
++ int rc = 0;
++ unsigned long flags;
++ struct transmission *t = transmission_alloc();
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ if (!t)
++ return -ENOMEM;
++ /*
++ * If there's a current request, it must be the
++ * previous request that has timed out.
++ */
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++ if (vtpms->current_request != NULL) {
++ printk("WARNING: Sending although there is a request outstanding.\n"
++ " Previous request must have timed out.\n");
++ transmission_free(vtpms->current_request);
++ vtpms->current_request = NULL;
++ }
++ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++
++ /*
++ * Queue the packet if the driver below is not
++ * ready, yet, or there is any packet already
++ * in the queue.
++ * If the driver below is ready, unqueue all
++ * packets first before sending our current
++ * packet.
++ * For each unqueued packet, except for the
++ * last (=current) packet, call the function
++ * tpm_xen_recv to wait for the response to come
++ * back.
++ */
++ if ((vtpms->vd_status & TPM_VD_STATUS_CONNECTED) == 0) {
++ if (time_after(jiffies,
++ vtpms->disconnect_time + HZ * 10)) {
++ rc = -ENOENT;
++ } else {
++ goto queue_it;
++ }
++ } else {
++ /*
++ * Send all queued packets.
++ */
++ if (_vtpm_send_queued(chip) == 0) {
++
++ vtpms->current_request = t;
++
++ rc = vtpm_vd_send(vtpms->tpm_private,
++ buf,
++ count,
++ t);
++ /*
++ * The generic TPM driver will call
++ * the function to receive the response.
++ */
++ if (rc < 0) {
++ vtpms->current_request = NULL;
++ goto queue_it;
++ }
++ } else {
++queue_it:
++ if (!transmission_set_req_buffer(t, buf, count)) {
++ transmission_free(t);
++ rc = -ENOMEM;
++ goto exit;
++ }
++ /*
++ * An error occurred. Don't event try
++ * to send the current request. Just
++ * queue it.
++ */
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++ vtpms->flags |= DATAEX_FLAG_QUEUED_ONLY;
++ list_add_tail(&t->next, &vtpms->queued_requests);
++ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++ }
++ }
++
++exit:
++ return rc;
++}
++
++
++/*
++ * Send all queued requests.
++ */
++static int _vtpm_send_queued(struct tpm_chip *chip)
++{
++ int rc;
++ int error = 0;
++ long flags;
++ unsigned char buffer[1];
++ struct vtpm_state *vtpms;
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++
++ while (!list_empty(&vtpms->queued_requests)) {
++ /*
++ * Need to dequeue them.
++ * Read the result into a dummy buffer.
++ */
++ struct transmission *qt = (struct transmission *)
++ vtpms->queued_requests.next;
++ list_del(&qt->next);
++ vtpms->current_request = qt;
++ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++
++ rc = vtpm_vd_send(vtpms->tpm_private,
++ qt->request,
++ qt->request_len,
++ qt);
++
++ if (rc < 0) {
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++ if ((qt = vtpms->current_request) != NULL) {
++ /*
++ * requeue it at the beginning
++ * of the list
++ */
++ list_add(&qt->next,
++ &vtpms->queued_requests);
++ }
++ vtpms->current_request = NULL;
++ error = 1;
++ break;
++ }
++ /*
++ * After this point qt is not valid anymore!
++ * It is freed when the front-end is delivering
++ * the data by calling tpm_recv
++ */
++ /*
++ * Receive response into provided dummy buffer
++ */
++ rc = vtpm_recv(chip, buffer, sizeof(buffer));
++ spin_lock_irqsave(&vtpms->req_list_lock, flags);
++ }
++
++ spin_unlock_irqrestore(&vtpms->req_list_lock, flags);
++
++ return error;
++}
++
++static void vtpm_cancel(struct tpm_chip *chip)
++{
++ unsigned long flags;
++ struct vtpm_state *vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ spin_lock_irqsave(&vtpms->resp_list_lock,flags);
++
++ if (!vtpms->current_response && vtpms->current_request) {
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ interruptible_sleep_on(&vtpms->resp_wait_queue);
++ spin_lock_irqsave(&vtpms->resp_list_lock,flags);
++ }
++
++ if (vtpms->current_response) {
++ struct transmission *t = vtpms->current_response;
++ vtpms->current_response = NULL;
++ transmission_free(t);
++ }
++
++ spin_unlock_irqrestore(&vtpms->resp_list_lock,flags);
++}
++
++static u8 vtpm_status(struct tpm_chip *chip)
++{
++ u8 rc = 0;
++ unsigned long flags;
++ struct vtpm_state *vtpms;
++
++ vtpms = (struct vtpm_state *)chip_get_private(chip);
++
++ spin_lock_irqsave(&vtpms->resp_list_lock, flags);
++ /*
++ * Data are available if:
++ * - there's a current response
++ * - the last packet was queued only (this is fake, but necessary to
++ * get the generic TPM layer to call the receive function.)
++ */
++ if (vtpms->current_response ||
++ 0 != (vtpms->flags & DATAEX_FLAG_QUEUED_ONLY)) {
++ rc = STATUS_DATA_AVAIL;
++ } else if (!vtpms->current_response && !vtpms->current_request) {
++ rc = STATUS_READY;
++ }
++
++ spin_unlock_irqrestore(&vtpms->resp_list_lock, flags);
++ return rc;
++}
++
++static struct file_operations vtpm_ops = {
++ .owner = THIS_MODULE,
++ .llseek = no_llseek,
++ .open = tpm_open,
++ .read = tpm_read,
++ .write = tpm_write,
++ .release = tpm_release,
++};
++
++static DEVICE_ATTR(pubek, S_IRUGO, tpm_show_pubek, NULL);
++static DEVICE_ATTR(pcrs, S_IRUGO, tpm_show_pcrs, NULL);
++static DEVICE_ATTR(enabled, S_IRUGO, tpm_show_enabled, NULL);
++static DEVICE_ATTR(active, S_IRUGO, tpm_show_active, NULL);
++static DEVICE_ATTR(owned, S_IRUGO, tpm_show_owned, NULL);
++static DEVICE_ATTR(temp_deactivated, S_IRUGO, tpm_show_temp_deactivated,
++ NULL);
++static DEVICE_ATTR(caps, S_IRUGO, tpm_show_caps, NULL);
++static DEVICE_ATTR(cancel, S_IWUSR |S_IWGRP, NULL, tpm_store_cancel);
++
++static struct attribute *vtpm_attrs[] = {
++ &dev_attr_pubek.attr,
++ &dev_attr_pcrs.attr,
++ &dev_attr_enabled.attr,
++ &dev_attr_active.attr,
++ &dev_attr_owned.attr,
++ &dev_attr_temp_deactivated.attr,
++ &dev_attr_caps.attr,
++ &dev_attr_cancel.attr,
++ NULL,
++};
++
++static struct attribute_group vtpm_attr_grp = { .attrs = vtpm_attrs };
++
++#define TPM_LONG_TIMEOUT (10 * 60 * HZ)
++
++static struct tpm_vendor_specific tpm_vtpm = {
++ .recv = vtpm_recv,
++ .send = vtpm_send,
++ .cancel = vtpm_cancel,
++ .status = vtpm_status,
++ .req_complete_mask = STATUS_BUSY | STATUS_DATA_AVAIL,
++ .req_complete_val = STATUS_DATA_AVAIL,
++ .req_canceled = STATUS_READY,
++ .attr_group = &vtpm_attr_grp,
++ .miscdev = {
++ .fops = &vtpm_ops,
++ },
++ .duration = {
++ TPM_LONG_TIMEOUT,
++ TPM_LONG_TIMEOUT,
++ TPM_LONG_TIMEOUT,
++ },
++};
++
++struct tpm_chip *init_vtpm(struct device *dev,
++ struct tpm_private *tp)
++{
++ long rc;
++ struct tpm_chip *chip;
++ struct vtpm_state *vtpms;
++
++ vtpms = kzalloc(sizeof(struct vtpm_state), GFP_KERNEL);
++ if (!vtpms)
++ return ERR_PTR(-ENOMEM);
++
++ vtpm_state_init(vtpms);
++ vtpms->tpm_private = tp;
++
++ chip = tpm_register_hardware(dev, &tpm_vtpm);
++ if (!chip) {
++ rc = -ENODEV;
++ goto err_free_mem;
++ }
++
++ chip_set_private(chip, vtpms);
++
++ return chip;
++
++err_free_mem:
++ kfree(vtpms);
++
++ return ERR_PTR(rc);
++}
++
++void cleanup_vtpm(struct device *dev)
++{
++ struct tpm_chip *chip = dev_get_drvdata(dev);
++ struct vtpm_state *vtpms = (struct vtpm_state*)chip_get_private(chip);
++ tpm_remove_hardware(dev);
++ kfree(vtpms);
++}
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ sle11-2009-10-16/drivers/char/tpm/tpm_vtpm.h 2009-08-26 11:52:33.000000000 +0200
+@@ -0,0 +1,55 @@
++#ifndef TPM_VTPM_H
++#define TPM_VTPM_H
++
++struct tpm_chip;
++struct tpm_private;
++
++struct vtpm_state {
++ struct transmission *current_request;
++ spinlock_t req_list_lock;
++ wait_queue_head_t req_wait_queue;
++
++ struct list_head queued_requests;
++
++ struct transmission *current_response;
++ spinlock_t resp_list_lock;
++ wait_queue_head_t resp_wait_queue; // processes waiting for responses
++
++ u8 vd_status;
++ u8 flags;
++
++ unsigned long disconnect_time;
++
++ /*
++ * The following is a private structure of the underlying
++ * driver. It is passed as parameter in the send function.
++ */
++ struct tpm_private *tpm_private;
++};
++
++
++enum vdev_status {
++ TPM_VD_STATUS_DISCONNECTED = 0x0,
++ TPM_VD_STATUS_CONNECTED = 0x1
++};
++
++/* this function is called from tpm_vtpm.c */
++int vtpm_vd_send(struct tpm_private * tp,
++ const u8 * buf, size_t count, void *ptr);
++
++/* these functions are offered by tpm_vtpm.c */
++struct tpm_chip *init_vtpm(struct device *,
++ struct tpm_private *);
++void cleanup_vtpm(struct device *);
++int vtpm_vd_recv(const struct tpm_chip* chip,
++ const unsigned char *buffer, size_t count, void *ptr);
++void vtpm_vd_status(const struct tpm_chip *, u8 status);
++
++static inline struct tpm_private *tpm_private_from_dev(struct device *dev)
++{
++ struct tpm_chip *chip = dev_get_drvdata(dev);
++ struct vtpm_state *vtpms = chip_get_private(chip);
++ return vtpms->tpm_private;
++}
++
++#endif
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ sle11-2009-10-16/drivers/char/tpm/tpm_xen.c 2009-08-26 11:52:33.000000000 +0200
+@@ -0,0 +1,722 @@
++/*
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb@us.ibm.com
++ * Grant table support: Mahadevan Gomathisankaran
++ *
++ * This code has been derived from drivers/xen/netfront/netfront.c
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/errno.h>
++#include <linux/err.h>
++#include <linux/interrupt.h>
++#include <linux/mutex.h>
++#include <asm/uaccess.h>
++#include <xen/evtchn.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/tpmif.h>
++#include <xen/gnttab.h>
++#include <xen/xenbus.h>
++#include "tpm.h"
++#include "tpm_vtpm.h"
++
++#undef DEBUG
++
++/* local structures */
++struct tpm_private {
++ struct tpm_chip *chip;
++
++ tpmif_tx_interface_t *tx;
++ atomic_t refcnt;
++ unsigned int irq;
++ u8 is_connected;
++ u8 is_suspended;
++
++ spinlock_t tx_lock;
++
++ struct tx_buffer *tx_buffers[TPMIF_TX_RING_SIZE];
++
++ atomic_t tx_busy;
++ void *tx_remember;
++
++ domid_t backend_id;
++ wait_queue_head_t wait_q;
++
++ struct xenbus_device *dev;
++ int ring_ref;
++};
++
++struct tx_buffer {
++ unsigned int size; // available space in data
++ unsigned int len; // used space in data
++ unsigned char *data; // pointer to a page
++};
++
++
++/* locally visible variables */
++static grant_ref_t gref_head;
++static struct tpm_private *my_priv;
++
++/* local function prototypes */
++static irqreturn_t tpmif_int(int irq,
++ void *tpm_priv,
++ struct pt_regs *ptregs);
++static void tpmif_rx_action(unsigned long unused);
++static int tpmif_connect(struct xenbus_device *dev,
++ struct tpm_private *tp,
++ domid_t domid);
++static DECLARE_TASKLET(tpmif_rx_tasklet, tpmif_rx_action, 0);
++static int tpmif_allocate_tx_buffers(struct tpm_private *tp);
++static void tpmif_free_tx_buffers(struct tpm_private *tp);
++static void tpmif_set_connected_state(struct tpm_private *tp,
++ u8 newstate);
++static int tpm_xmit(struct tpm_private *tp,
++ const u8 * buf, size_t count, int userbuffer,
++ void *remember);
++static void destroy_tpmring(struct tpm_private *tp);
++void __exit tpmif_exit(void);
++
++#define DPRINTK(fmt, args...) \
++ pr_debug("xen_tpm_fr (%s:%d) " fmt, __FUNCTION__, __LINE__, ##args)
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "xen_tpm_fr: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "xen_tpm_fr: " fmt, ##args)
++
++#define GRANT_INVALID_REF 0
++
++
++static inline int
++tx_buffer_copy(struct tx_buffer *txb, const u8 *src, int len,
++ int isuserbuffer)
++{
++ int copied = len;
++
++ if (len > txb->size)
++ copied = txb->size;
++ if (isuserbuffer) {
++ if (copy_from_user(txb->data, src, copied))
++ return -EFAULT;
++ } else {
++ memcpy(txb->data, src, copied);
++ }
++ txb->len = len;
++ return copied;
++}
++
++static inline struct tx_buffer *tx_buffer_alloc(void)
++{
++ struct tx_buffer *txb;
++
++ txb = kzalloc(sizeof(struct tx_buffer), GFP_KERNEL);
++ if (!txb)
++ return NULL;
++
++ txb->len = 0;
++ txb->size = PAGE_SIZE;
++ txb->data = (unsigned char *)__get_free_page(GFP_KERNEL);
++ if (txb->data == NULL) {
++ kfree(txb);
++ txb = NULL;
++ }
++
++ return txb;
++}
++
++
++static inline void tx_buffer_free(struct tx_buffer *txb)
++{
++ if (txb) {
++ free_page((long)txb->data);
++ kfree(txb);
++ }
++}
++
++/**************************************************************
++ Utility function for the tpm_private structure
++**************************************************************/
++static void tpm_private_init(struct tpm_private *tp)
++{
++ spin_lock_init(&tp->tx_lock);
++ init_waitqueue_head(&tp->wait_q);
++ atomic_set(&tp->refcnt, 1);
++}
++
++static void tpm_private_put(void)
++{
++ if (!atomic_dec_and_test(&my_priv->refcnt))
++ return;
++
++ tpmif_free_tx_buffers(my_priv);
++ kfree(my_priv);
++ my_priv = NULL;
++}
++
++static struct tpm_private *tpm_private_get(void)
++{
++ int err;
++
++ if (my_priv) {
++ atomic_inc(&my_priv->refcnt);
++ return my_priv;
++ }
++
++ my_priv = kzalloc(sizeof(struct tpm_private), GFP_KERNEL);
++ if (!my_priv)
++ return NULL;
++
++ tpm_private_init(my_priv);
++ err = tpmif_allocate_tx_buffers(my_priv);
++ if (err < 0)
++ tpm_private_put();
++
++ return my_priv;
++}
++
++/**************************************************************
++
++ The interface to let the tpm plugin register its callback
++ function and send data to another partition using this module
++
++**************************************************************/
++
++static DEFINE_MUTEX(suspend_lock);
++/*
++ * Send data via this module by calling this function
++ */
++int vtpm_vd_send(struct tpm_private *tp,
++ const u8 * buf, size_t count, void *ptr)
++{
++ int sent;
++
++ mutex_lock(&suspend_lock);
++ sent = tpm_xmit(tp, buf, count, 0, ptr);
++ mutex_unlock(&suspend_lock);
++
++ return sent;
++}
++
++/**************************************************************
++ XENBUS support code
++**************************************************************/
++
++static int setup_tpmring(struct xenbus_device *dev,
++ struct tpm_private *tp)
++{
++ tpmif_tx_interface_t *sring;
++ int err;
++
++ tp->ring_ref = GRANT_INVALID_REF;
++
++ sring = (void *)__get_free_page(GFP_KERNEL);
++ if (!sring) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
++ return -ENOMEM;
++ }
++ tp->tx = sring;
++
++ err = xenbus_grant_ring(dev, virt_to_mfn(tp->tx));
++ if (err < 0) {
++ free_page((unsigned long)sring);
++ tp->tx = NULL;
++ xenbus_dev_fatal(dev, err, "allocating grant reference");
++ goto fail;
++ }
++ tp->ring_ref = err;
++
++ err = tpmif_connect(dev, tp, dev->otherend_id);
++ if (err)
++ goto fail;
++
++ return 0;
++fail:
++ destroy_tpmring(tp);
++ return err;
++}
++
++
++static void destroy_tpmring(struct tpm_private *tp)
++{
++ tpmif_set_connected_state(tp, 0);
++
++ if (tp->ring_ref != GRANT_INVALID_REF) {
++ gnttab_end_foreign_access(tp->ring_ref, (unsigned long)tp->tx);
++ tp->ring_ref = GRANT_INVALID_REF;
++ tp->tx = NULL;
++ }
++
++ if (tp->irq)
++ unbind_from_irqhandler(tp->irq, tp);
++
++ tp->irq = 0;
++}
++
++
++static int talk_to_backend(struct xenbus_device *dev,
++ struct tpm_private *tp)
++{
++ const char *message = NULL;
++ int err;
++ struct xenbus_transaction xbt;
++
++ err = setup_tpmring(dev, tp);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "setting up ring");
++ goto out;
++ }
++
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ goto destroy_tpmring;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename,
++ "ring-ref","%u", tp->ring_ref);
++ if (err) {
++ message = "writing ring-ref";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++ irq_to_evtchn_port(tp->irq));
++ if (err) {
++ message = "writing event-channel";
++ goto abort_transaction;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN)
++ goto again;
++ if (err) {
++ xenbus_dev_fatal(dev, err, "completing transaction");
++ goto destroy_tpmring;
++ }
++
++ xenbus_switch_state(dev, XenbusStateConnected);
++
++ return 0;
++
++abort_transaction:
++ xenbus_transaction_end(xbt, 1);
++ if (message)
++ xenbus_dev_error(dev, err, "%s", message);
++destroy_tpmring:
++ destroy_tpmring(tp);
++out:
++ return err;
++}
++
++/**
++ * Callback received when the backend's state changes.
++ */
++static void backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ DPRINTK("\n");
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitWait:
++ case XenbusStateInitialised:
++ case XenbusStateReconfiguring:
++ case XenbusStateReconfigured:
++ case XenbusStateUnknown:
++ break;
++
++ case XenbusStateConnected:
++ tpmif_set_connected_state(tp, 1);
++ break;
++
++ case XenbusStateClosing:
++ tpmif_set_connected_state(tp, 0);
++ xenbus_frontend_closed(dev);
++ break;
++
++ case XenbusStateClosed:
++ tpmif_set_connected_state(tp, 0);
++ if (tp->is_suspended == 0)
++ device_unregister(&dev->dev);
++ xenbus_frontend_closed(dev);
++ break;
++ }
++}
++
++static int tpmfront_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++ int handle;
++ struct tpm_private *tp = tpm_private_get();
++
++ if (!tp)
++ return -ENOMEM;
++
++ tp->chip = init_vtpm(&dev->dev, tp);
++ if (IS_ERR(tp->chip))
++ return PTR_ERR(tp->chip);
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename,
++ "handle", "%i", &handle);
++ if (XENBUS_EXIST_ERR(err))
++ return err;
++
++ if (err < 0) {
++ xenbus_dev_fatal(dev,err,"reading virtual-device");
++ return err;
++ }
++
++ tp->dev = dev;
++
++ err = talk_to_backend(dev, tp);
++ if (err) {
++ tpm_private_put();
++ return err;
++ }
++
++ return 0;
++}
++
++
++static int tpmfront_remove(struct xenbus_device *dev)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ destroy_tpmring(tp);
++ cleanup_vtpm(&dev->dev);
++ return 0;
++}
++
++static int tpmfront_suspend(struct xenbus_device *dev)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ u32 ctr;
++
++ /* Take the lock, preventing any application from sending. */
++ mutex_lock(&suspend_lock);
++ tp->is_suspended = 1;
++
++ for (ctr = 0; atomic_read(&tp->tx_busy); ctr++) {
++ if ((ctr % 10) == 0)
++ printk("TPM-FE [INFO]: Waiting for outstanding "
++ "request.\n");
++ /* Wait for a request to be responded to. */
++ interruptible_sleep_on_timeout(&tp->wait_q, 100);
++ }
++
++ return 0;
++}
++
++static int tpmfront_suspend_finish(struct tpm_private *tp)
++{
++ tp->is_suspended = 0;
++ /* Allow applications to send again. */
++ mutex_unlock(&suspend_lock);
++ return 0;
++}
++
++static int tpmfront_suspend_cancel(struct xenbus_device *dev)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ return tpmfront_suspend_finish(tp);
++}
++
++static int tpmfront_resume(struct xenbus_device *dev)
++{
++ struct tpm_private *tp = tpm_private_from_dev(&dev->dev);
++ destroy_tpmring(tp);
++ return talk_to_backend(dev, tp);
++}
++
++static int tpmif_connect(struct xenbus_device *dev,
++ struct tpm_private *tp,
++ domid_t domid)
++{
++ int err;
++
++ tp->backend_id = domid;
++
++ err = bind_listening_port_to_irqhandler(
++ domid, tpmif_int, SA_SAMPLE_RANDOM, "tpmif", tp);
++ if (err <= 0) {
++ WPRINTK("bind_listening_port_to_irqhandler failed "
++ "(err=%d)\n", err);
++ return err;
++ }
++ tp->irq = err;
++
++ return 0;
++}
++
++static struct xenbus_device_id tpmfront_ids[] = {
++ { "vtpm" },
++ { "" }
++};
++
++static struct xenbus_driver tpmfront = {
++ .name = "vtpm",
++ .owner = THIS_MODULE,
++ .ids = tpmfront_ids,
++ .probe = tpmfront_probe,
++ .remove = tpmfront_remove,
++ .resume = tpmfront_resume,
++ .otherend_changed = backend_changed,
++ .suspend = tpmfront_suspend,
++ .suspend_cancel = tpmfront_suspend_cancel,
++};
++
++static void __init init_tpm_xenbus(void)
++{
++ xenbus_register_frontend(&tpmfront);
++}
++
++static int tpmif_allocate_tx_buffers(struct tpm_private *tp)
++{
++ unsigned int i;
++
++ for (i = 0; i < TPMIF_TX_RING_SIZE; i++) {
++ tp->tx_buffers[i] = tx_buffer_alloc();
++ if (!tp->tx_buffers[i]) {
++ tpmif_free_tx_buffers(tp);
++ return -ENOMEM;
++ }
++ }
++ return 0;
++}
++
++static void tpmif_free_tx_buffers(struct tpm_private *tp)
++{
++ unsigned int i;
++
++ for (i = 0; i < TPMIF_TX_RING_SIZE; i++)
++ tx_buffer_free(tp->tx_buffers[i]);
++}
++
++static void tpmif_rx_action(unsigned long priv)
++{
++ struct tpm_private *tp = (struct tpm_private *)priv;
++ int i = 0;
++ unsigned int received;
++ unsigned int offset = 0;
++ u8 *buffer;
++ tpmif_tx_request_t *tx = &tp->tx->ring[i].req;
++
++ atomic_set(&tp->tx_busy, 0);
++ wake_up_interruptible(&tp->wait_q);
++
++ received = tx->size;
++
++ buffer = kmalloc(received, GFP_ATOMIC);
++ if (!buffer)
++ return;
++
++ for (i = 0; i < TPMIF_TX_RING_SIZE && offset < received; i++) {
++ struct tx_buffer *txb = tp->tx_buffers[i];
++ tpmif_tx_request_t *tx;
++ unsigned int tocopy;
++
++ tx = &tp->tx->ring[i].req;
++ tocopy = tx->size;
++ if (tocopy > PAGE_SIZE)
++ tocopy = PAGE_SIZE;
++
++ memcpy(&buffer[offset], txb->data, tocopy);
++
++ gnttab_release_grant_reference(&gref_head, tx->ref);
++
++ offset += tocopy;
++ }
++
++ vtpm_vd_recv(tp->chip, buffer, received, tp->tx_remember);
++ kfree(buffer);
++}
++
++
++static irqreturn_t tpmif_int(int irq, void *tpm_priv, struct pt_regs *ptregs)
++{
++ struct tpm_private *tp = tpm_priv;
++ unsigned long flags;
++
++ spin_lock_irqsave(&tp->tx_lock, flags);
++ tpmif_rx_tasklet.data = (unsigned long)tp;
++ tasklet_schedule(&tpmif_rx_tasklet);
++ spin_unlock_irqrestore(&tp->tx_lock, flags);
++
++ return IRQ_HANDLED;
++}
++
++
++static int tpm_xmit(struct tpm_private *tp,
++ const u8 * buf, size_t count, int isuserbuffer,
++ void *remember)
++{
++ tpmif_tx_request_t *tx;
++ TPMIF_RING_IDX i;
++ unsigned int offset = 0;
++
++ spin_lock_irq(&tp->tx_lock);
++
++ if (unlikely(atomic_read(&tp->tx_busy))) {
++ printk("tpm_xmit: There's an outstanding request/response "
++ "on the way!\n");
++ spin_unlock_irq(&tp->tx_lock);
++ return -EBUSY;
++ }
++
++ if (tp->is_connected != 1) {
++ spin_unlock_irq(&tp->tx_lock);
++ return -EIO;
++ }
++
++ for (i = 0; count > 0 && i < TPMIF_TX_RING_SIZE; i++) {
++ struct tx_buffer *txb = tp->tx_buffers[i];
++ int copied;
++
++ if (!txb) {
++ DPRINTK("txb (i=%d) is NULL. buffers initilized?\n"
++ "Not transmitting anything!\n", i);
++ spin_unlock_irq(&tp->tx_lock);
++ return -EFAULT;
++ }
++
++ copied = tx_buffer_copy(txb, &buf[offset], count,
++ isuserbuffer);
++ if (copied < 0) {
++ /* An error occurred */
++ spin_unlock_irq(&tp->tx_lock);
++ return copied;
++ }
++ count -= copied;
++ offset += copied;
++
++ tx = &tp->tx->ring[i].req;
++ tx->addr = virt_to_machine(txb->data);
++ tx->size = txb->len;
++ tx->unused = 0;
++
++ DPRINTK("First 4 characters sent by TPM-FE are "
++ "0x%02x 0x%02x 0x%02x 0x%02x\n",
++ txb->data[0],txb->data[1],txb->data[2],txb->data[3]);
++
++ /* Get the granttable reference for this page. */
++ tx->ref = gnttab_claim_grant_reference(&gref_head);
++ if (tx->ref == -ENOSPC) {
++ spin_unlock_irq(&tp->tx_lock);
++ DPRINTK("Grant table claim reference failed in "
++ "func:%s line:%d file:%s\n",
++ __FUNCTION__, __LINE__, __FILE__);
++ return -ENOSPC;
++ }
++ gnttab_grant_foreign_access_ref(tx->ref,
++ tp->backend_id,
++ virt_to_mfn(txb->data),
++ 0 /*RW*/);
++ wmb();
++ }
++
++ atomic_set(&tp->tx_busy, 1);
++ tp->tx_remember = remember;
++
++ mb();
++
++ notify_remote_via_irq(tp->irq);
++
++ spin_unlock_irq(&tp->tx_lock);
++ return offset;
++}
++
++
++static void tpmif_notify_upperlayer(struct tpm_private *tp)
++{
++ /* Notify upper layer about the state of the connection to the BE. */
++ vtpm_vd_status(tp->chip, (tp->is_connected
++ ? TPM_VD_STATUS_CONNECTED
++ : TPM_VD_STATUS_DISCONNECTED));
++}
++
++
++static void tpmif_set_connected_state(struct tpm_private *tp, u8 is_connected)
++{
++ /*
++ * Don't notify upper layer if we are in suspend mode and
++ * should disconnect - assumption is that we will resume
++ * The mutex keeps apps from sending.
++ */
++ if (is_connected == 0 && tp->is_suspended == 1)
++ return;
++
++ /*
++ * Unlock the mutex if we are connected again
++ * after being suspended - now resuming.
++ * This also removes the suspend state.
++ */
++ if (is_connected == 1 && tp->is_suspended == 1)
++ tpmfront_suspend_finish(tp);
++
++ if (is_connected != tp->is_connected) {
++ tp->is_connected = is_connected;
++ tpmif_notify_upperlayer(tp);
++ }
++}
++
++
++
++/* =================================================================
++ * Initialization function.
++ * =================================================================
++ */
++
++
++static int __init tpmif_init(void)
++{
++ struct tpm_private *tp;
++
++ if (is_initial_xendomain())
++ return -EPERM;
++
++ tp = tpm_private_get();
++ if (!tp)
++ return -ENOMEM;
++
++ IPRINTK("Initialising the vTPM driver.\n");
++ if (gnttab_alloc_grant_references(TPMIF_TX_RING_SIZE,
++ &gref_head) < 0) {
++ tpm_private_put();
++ return -EFAULT;
++ }
++
++ init_tpm_xenbus();
++ return 0;
++}
++
++
++module_init(tpmif_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+--- sle11-2009-10-16.orig/drivers/ide/ide-lib.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/ide/ide-lib.c 2009-08-26 11:52:33.000000000 +0200
+@@ -177,12 +177,12 @@ void ide_toggle_bounce(ide_drive_t *driv
+ {
+ u64 addr = BLK_BOUNCE_HIGH; /* dma64_addr_t */
+
+- if (!PCI_DMA_BUS_IS_PHYS) {
+- addr = BLK_BOUNCE_ANY;
+- } else if (on && drive->media == ide_disk) {
++ if (on && drive->media == ide_disk) {
+ struct device *dev = drive->hwif->dev;
+
+- if (dev && dev->dma_mask)
++ if (!PCI_DMA_BUS_IS_PHYS)
++ addr = BLK_BOUNCE_ANY;
++ else if (dev && dev->dma_mask)
+ addr = *dev->dma_mask;
+ }
+
+--- sle11-2009-10-16.orig/drivers/oprofile/buffer_sync.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/oprofile/buffer_sync.c 2009-08-26 11:52:33.000000000 +0200
+@@ -6,6 +6,10 @@
+ *
+ * @author John Levon <levon@movementarian.org>
+ *
++ * Modified by Aravind Menon for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ *
+ * This is the core of the buffer management. Each
+ * CPU buffer is processed and entered into the
+ * global event buffer. Such processing is necessary
+@@ -40,6 +44,7 @@ static cpumask_t marked_cpus = CPU_MASK_
+ static DEFINE_SPINLOCK(task_mortuary);
+ static void process_task_mortuary(void);
+
++static int cpu_current_domain[NR_CPUS];
+
+ /* Take ownership of the task struct and place it on the
+ * list for processing. Only after two full buffer syncs
+@@ -148,6 +153,11 @@ static void end_sync(void)
+ int sync_start(void)
+ {
+ int err;
++ int i;
++
++ for (i = 0; i < NR_CPUS; i++) {
++ cpu_current_domain[i] = COORDINATOR_DOMAIN;
++ }
+
+ start_cpu_work();
+
+@@ -274,15 +284,31 @@ static void add_cpu_switch(int i)
+ last_cookie = INVALID_COOKIE;
+ }
+
+-static void add_kernel_ctx_switch(unsigned int in_kernel)
++static void add_cpu_mode_switch(unsigned int cpu_mode)
+ {
+ add_event_entry(ESCAPE_CODE);
+- if (in_kernel)
+- add_event_entry(KERNEL_ENTER_SWITCH_CODE);
+- else
+- add_event_entry(KERNEL_EXIT_SWITCH_CODE);
++ switch (cpu_mode) {
++ case CPU_MODE_USER:
++ add_event_entry(USER_ENTER_SWITCH_CODE);
++ break;
++ case CPU_MODE_KERNEL:
++ add_event_entry(KERNEL_ENTER_SWITCH_CODE);
++ break;
++ case CPU_MODE_XEN:
++ add_event_entry(XEN_ENTER_SWITCH_CODE);
++ break;
++ default:
++ break;
++ }
+ }
+-
++
++static void add_domain_switch(unsigned long domain_id)
++{
++ add_event_entry(ESCAPE_CODE);
++ add_event_entry(DOMAIN_SWITCH_CODE);
++ add_event_entry(domain_id);
++}
++
+ static void
+ add_user_ctx_switch(struct task_struct const * task, unsigned long cookie)
+ {
+@@ -347,9 +373,9 @@ static int add_us_sample(struct mm_struc
+ * for later lookup from userspace.
+ */
+ static int
+-add_sample(struct mm_struct * mm, struct op_sample * s, int in_kernel)
++add_sample(struct mm_struct * mm, struct op_sample * s, int cpu_mode)
+ {
+- if (in_kernel) {
++ if (cpu_mode >= CPU_MODE_KERNEL) {
+ add_sample_entry(s->eip, s->event);
+ return 1;
+ } else if (mm) {
+@@ -495,15 +521,21 @@ void sync_buffer(int cpu)
+ struct mm_struct *mm = NULL;
+ struct task_struct * new;
+ unsigned long cookie = 0;
+- int in_kernel = 1;
++ int cpu_mode = 1;
+ unsigned int i;
+ sync_buffer_state state = sb_buffer_start;
+ unsigned long available;
++ int domain_switch = 0;
+
+ mutex_lock(&buffer_mutex);
+
+ add_cpu_switch(cpu);
+
++ /* We need to assign the first samples in this CPU buffer to the
++ same domain that we were processing at the last sync_buffer */
++ if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
++ add_domain_switch(cpu_current_domain[cpu]);
++ }
+ /* Remember, only we can modify tail_pos */
+
+ available = get_slots(cpu_buf);
+@@ -511,16 +543,18 @@ void sync_buffer(int cpu)
+ for (i = 0; i < available; ++i) {
+ struct op_sample * s = &cpu_buf->buffer[cpu_buf->tail_pos];
+
+- if (is_code(s->eip)) {
+- if (s->event <= CPU_IS_KERNEL) {
+- /* kernel/userspace switch */
+- in_kernel = s->event;
++ if (is_code(s->eip) && !domain_switch) {
++ if (s->event <= CPU_MODE_XEN) {
++ /* xen/kernel/userspace switch */
++ cpu_mode = s->event;
+ if (state == sb_buffer_start)
+ state = sb_sample_start;
+- add_kernel_ctx_switch(s->event);
++ add_cpu_mode_switch(s->event);
+ } else if (s->event == CPU_TRACE_BEGIN) {
+ state = sb_bt_start;
+ add_trace_begin();
++ } else if (s->event == CPU_DOMAIN_SWITCH) {
++ domain_switch = 1;
+ } else {
+ struct mm_struct * oldmm = mm;
+
+@@ -534,11 +568,21 @@ void sync_buffer(int cpu)
+ add_user_ctx_switch(new, cookie);
+ }
+ } else {
+- if (state >= sb_bt_start &&
+- !add_sample(mm, s, in_kernel)) {
+- if (state == sb_bt_start) {
+- state = sb_bt_ignore;
+- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
++ if (domain_switch) {
++ cpu_current_domain[cpu] = s->eip;
++ add_domain_switch(s->eip);
++ domain_switch = 0;
++ } else {
++ if (cpu_current_domain[cpu] !=
++ COORDINATOR_DOMAIN) {
++ add_sample_entry(s->eip, s->event);
++ }
++ else if (state >= sb_bt_start &&
++ !add_sample(mm, s, cpu_mode)) {
++ if (state == sb_bt_start) {
++ state = sb_bt_ignore;
++ atomic_inc(&oprofile_stats.bt_lost_no_mapping);
++ }
+ }
+ }
+ }
+@@ -547,6 +591,11 @@ void sync_buffer(int cpu)
+ }
+ release_mm(mm);
+
++ /* We reset domain to COORDINATOR at each CPU switch */
++ if (cpu_current_domain[cpu] != COORDINATOR_DOMAIN) {
++ add_domain_switch(COORDINATOR_DOMAIN);
++ }
++
+ mark_done(cpu);
+
+ mutex_unlock(&buffer_mutex);
+--- sle11-2009-10-16.orig/drivers/oprofile/cpu_buffer.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/oprofile/cpu_buffer.c 2009-08-26 11:52:33.000000000 +0200
+@@ -6,6 +6,10 @@
+ *
+ * @author John Levon <levon@movementarian.org>
+ *
++ * Modified by Aravind Menon for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ *
+ * Each CPU has a local buffer that stores PC value/event
+ * pairs. We also log context switches when we notice them.
+ * Eventually each CPU's buffer is processed into the global
+@@ -34,6 +38,8 @@ static void wq_sync_buffer(struct work_s
+ #define DEFAULT_TIMER_EXPIRE (HZ / 10)
+ static int work_enabled;
+
++static int32_t current_domain = COORDINATOR_DOMAIN;
++
+ void free_cpu_buffers(void)
+ {
+ int i;
+@@ -72,7 +78,7 @@ int alloc_cpu_buffers(void)
+ goto fail;
+
+ b->last_task = NULL;
+- b->last_is_kernel = -1;
++ b->last_cpu_mode = -1;
+ b->tracing = 0;
+ b->buffer_size = buffer_size;
+ b->tail_pos = 0;
+@@ -130,7 +136,7 @@ void cpu_buffer_reset(struct oprofile_cp
+ * collected will populate the buffer with proper
+ * values to initialize the buffer
+ */
+- cpu_buf->last_is_kernel = -1;
++ cpu_buf->last_cpu_mode = -1;
+ cpu_buf->last_task = NULL;
+ }
+
+@@ -180,13 +186,13 @@ add_code(struct oprofile_cpu_buffer * bu
+ * because of the head/tail separation of the writer and reader
+ * of the CPU buffer.
+ *
+- * is_kernel is needed because on some architectures you cannot
++ * cpu_mode is needed because on some architectures you cannot
+ * tell if you are in kernel or user space simply by looking at
+- * pc. We tag this in the buffer by generating kernel enter/exit
+- * events whenever is_kernel changes
++ * pc. We tag this in the buffer by generating kernel/user (and xen)
++ * enter events whenever cpu_mode changes
+ */
+ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc,
+- int is_kernel, unsigned long event)
++ int cpu_mode, unsigned long event)
+ {
+ struct task_struct * task;
+
+@@ -202,18 +208,18 @@ static int log_sample(struct oprofile_cp
+ return 0;
+ }
+
+- is_kernel = !!is_kernel;
+-
+ task = current;
+
+ /* notice a switch from user->kernel or vice versa */
+- if (cpu_buf->last_is_kernel != is_kernel) {
+- cpu_buf->last_is_kernel = is_kernel;
+- add_code(cpu_buf, is_kernel);
++ if (cpu_buf->last_cpu_mode != cpu_mode) {
++ cpu_buf->last_cpu_mode = cpu_mode;
++ add_code(cpu_buf, cpu_mode);
+ }
+-
++
+ /* notice a task switch */
+- if (cpu_buf->last_task != task) {
++ /* if not processing other domain samples */
++ if ((cpu_buf->last_task != task) &&
++ (current_domain == COORDINATOR_DOMAIN)) {
+ cpu_buf->last_task = task;
+ add_code(cpu_buf, (unsigned long)task);
+ }
+@@ -297,6 +303,25 @@ void oprofile_add_trace(unsigned long pc
+ add_sample(cpu_buf, pc, 0);
+ }
+
++int oprofile_add_domain_switch(int32_t domain_id)
++{
++ struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
++
++ /* should have space for switching into and out of domain
++ (2 slots each) plus one sample and one cpu mode switch */
++ if (((nr_available_slots(cpu_buf) < 6) &&
++ (domain_id != COORDINATOR_DOMAIN)) ||
++ (nr_available_slots(cpu_buf) < 2))
++ return 0;
++
++ add_code(cpu_buf, CPU_DOMAIN_SWITCH);
++ add_sample(cpu_buf, domain_id, 0);
++
++ current_domain = domain_id;
++
++ return 1;
++}
++
+ /*
+ * This serves to avoid cpu buffer overflow, and makes sure
+ * the task mortuary progresses
+--- sle11-2009-10-16.orig/drivers/oprofile/cpu_buffer.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/oprofile/cpu_buffer.h 2009-08-26 11:52:33.000000000 +0200
+@@ -37,7 +37,7 @@ struct oprofile_cpu_buffer {
+ volatile unsigned long tail_pos;
+ unsigned long buffer_size;
+ struct task_struct * last_task;
+- int last_is_kernel;
++ int last_cpu_mode;
+ int tracing;
+ struct op_sample * buffer;
+ unsigned long sample_received;
+@@ -53,7 +53,10 @@ DECLARE_PER_CPU(struct oprofile_cpu_buff
+ void cpu_buffer_reset(struct oprofile_cpu_buffer * cpu_buf);
+
+ /* transient events for the CPU buffer -> event buffer */
+-#define CPU_IS_KERNEL 1
+-#define CPU_TRACE_BEGIN 2
++#define CPU_MODE_USER 0
++#define CPU_MODE_KERNEL 1
++#define CPU_MODE_XEN 2
++#define CPU_TRACE_BEGIN 3
++#define CPU_DOMAIN_SWITCH 4
+
+ #endif /* OPROFILE_CPU_BUFFER_H */
+--- sle11-2009-10-16.orig/drivers/oprofile/event_buffer.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/oprofile/event_buffer.h 2009-08-26 11:52:33.000000000 +0200
+@@ -30,6 +30,9 @@ void wake_up_buffer_waiter(void);
+ #define INVALID_COOKIE ~0UL
+ #define NO_COOKIE 0UL
+
++/* Constant used to refer to coordinator domain (Xen) */
++#define COORDINATOR_DOMAIN -1
++
+ extern const struct file_operations event_buffer_fops;
+
+ /* mutex between sync_cpu_buffers() and the
+--- sle11-2009-10-16.orig/drivers/oprofile/oprof.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/oprofile/oprof.c 2009-08-26 11:52:33.000000000 +0200
+@@ -5,6 +5,10 @@
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
++ *
++ * Modified by Aravind Menon for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
+ */
+
+ #include <linux/kernel.h>
+@@ -33,6 +37,32 @@ static DEFINE_MUTEX(start_mutex);
+ */
+ static int timer = 0;
+
++int oprofile_set_active(int active_domains[], unsigned int adomains)
++{
++ int err;
++
++ if (!oprofile_ops.set_active)
++ return -EINVAL;
++
++ mutex_lock(&start_mutex);
++ err = oprofile_ops.set_active(active_domains, adomains);
++ mutex_unlock(&start_mutex);
++ return err;
++}
++
++int oprofile_set_passive(int passive_domains[], unsigned int pdomains)
++{
++ int err;
++
++ if (!oprofile_ops.set_passive)
++ return -EINVAL;
++
++ mutex_lock(&start_mutex);
++ err = oprofile_ops.set_passive(passive_domains, pdomains);
++ mutex_unlock(&start_mutex);
++ return err;
++}
++
+ int oprofile_setup(void)
+ {
+ int err;
+--- sle11-2009-10-16.orig/drivers/oprofile/oprof.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/oprofile/oprof.h 2009-08-26 11:52:33.000000000 +0200
+@@ -35,5 +35,8 @@ void oprofile_create_files(struct super_
+ void oprofile_timer_init(struct oprofile_operations * ops);
+
+ int oprofile_set_backtrace(unsigned long depth);
++
++int oprofile_set_active(int active_domains[], unsigned int adomains);
++int oprofile_set_passive(int passive_domains[], unsigned int pdomains);
+
+ #endif /* OPROF_H */
+--- sle11-2009-10-16.orig/drivers/oprofile/oprofile_files.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/drivers/oprofile/oprofile_files.c 2009-08-26 11:52:33.000000000 +0200
+@@ -5,15 +5,21 @@
+ * @remark Read the file COPYING
+ *
+ * @author John Levon <levon@movementarian.org>
++ *
++ * Modified by Aravind Menon for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
+ */
+
+ #include <linux/fs.h>
+ #include <linux/oprofile.h>
++#include <asm/uaccess.h>
++#include <linux/ctype.h>
+
+ #include "event_buffer.h"
+ #include "oprofile_stats.h"
+ #include "oprof.h"
+-
++
+ unsigned long fs_buffer_size = 131072;
+ unsigned long fs_cpu_buffer_size = 8192;
+ unsigned long fs_buffer_watershed = 32768; /* FIXME: tune */
+@@ -117,11 +123,202 @@ static ssize_t dump_write(struct file *
+ static const struct file_operations dump_fops = {
+ .write = dump_write,
+ };
+-
++
++#define TMPBUFSIZE 512
++
++static unsigned int adomains = 0;
++static int active_domains[MAX_OPROF_DOMAINS + 1];
++static DEFINE_MUTEX(adom_mutex);
++
++static ssize_t adomain_write(struct file * file, char const __user * buf,
++ size_t count, loff_t * offset)
++{
++ char *tmpbuf;
++ char *startp, *endp;
++ int i;
++ unsigned long val;
++ ssize_t retval = count;
++
++ if (*offset)
++ return -EINVAL;
++ if (count > TMPBUFSIZE - 1)
++ return -EINVAL;
++
++ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
++ return -ENOMEM;
++
++ if (copy_from_user(tmpbuf, buf, count)) {
++ kfree(tmpbuf);
++ return -EFAULT;
++ }
++ tmpbuf[count] = 0;
++
++ mutex_lock(&adom_mutex);
++
++ startp = tmpbuf;
++ /* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
++ for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
++ val = simple_strtoul(startp, &endp, 0);
++ if (endp == startp)
++ break;
++ while (ispunct(*endp) || isspace(*endp))
++ endp++;
++ active_domains[i] = val;
++ if (active_domains[i] != val)
++ /* Overflow, force error below */
++ i = MAX_OPROF_DOMAINS + 1;
++ startp = endp;
++ }
++ /* Force error on trailing junk */
++ adomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
++
++ kfree(tmpbuf);
++
++ if (adomains > MAX_OPROF_DOMAINS
++ || oprofile_set_active(active_domains, adomains)) {
++ adomains = 0;
++ retval = -EINVAL;
++ }
++
++ mutex_unlock(&adom_mutex);
++ return retval;
++}
++
++static ssize_t adomain_read(struct file * file, char __user * buf,
++ size_t count, loff_t * offset)
++{
++ char * tmpbuf;
++ size_t len;
++ int i;
++ ssize_t retval;
++
++ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
++ return -ENOMEM;
++
++ mutex_lock(&adom_mutex);
++
++ len = 0;
++ for (i = 0; i < adomains; i++)
++ len += snprintf(tmpbuf + len,
++ len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
++ "%u ", active_domains[i]);
++ WARN_ON(len > TMPBUFSIZE);
++ if (len != 0 && len <= TMPBUFSIZE)
++ tmpbuf[len-1] = '\n';
++
++ mutex_unlock(&adom_mutex);
++
++ retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
++
++ kfree(tmpbuf);
++ return retval;
++}
++
++
++static struct file_operations active_domain_ops = {
++ .read = adomain_read,
++ .write = adomain_write,
++};
++
++static unsigned int pdomains = 0;
++static int passive_domains[MAX_OPROF_DOMAINS];
++static DEFINE_MUTEX(pdom_mutex);
++
++static ssize_t pdomain_write(struct file * file, char const __user * buf,
++ size_t count, loff_t * offset)
++{
++ char *tmpbuf;
++ char *startp, *endp;
++ int i;
++ unsigned long val;
++ ssize_t retval = count;
++
++ if (*offset)
++ return -EINVAL;
++ if (count > TMPBUFSIZE - 1)
++ return -EINVAL;
++
++ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
++ return -ENOMEM;
++
++ if (copy_from_user(tmpbuf, buf, count)) {
++ kfree(tmpbuf);
++ return -EFAULT;
++ }
++ tmpbuf[count] = 0;
++
++ mutex_lock(&pdom_mutex);
++
++ startp = tmpbuf;
++ /* Parse one more than MAX_OPROF_DOMAINS, for easy error checking */
++ for (i = 0; i <= MAX_OPROF_DOMAINS; i++) {
++ val = simple_strtoul(startp, &endp, 0);
++ if (endp == startp)
++ break;
++ while (ispunct(*endp) || isspace(*endp))
++ endp++;
++ passive_domains[i] = val;
++ if (passive_domains[i] != val)
++ /* Overflow, force error below */
++ i = MAX_OPROF_DOMAINS + 1;
++ startp = endp;
++ }
++ /* Force error on trailing junk */
++ pdomains = *startp ? MAX_OPROF_DOMAINS + 1 : i;
++
++ kfree(tmpbuf);
++
++ if (pdomains > MAX_OPROF_DOMAINS
++ || oprofile_set_passive(passive_domains, pdomains)) {
++ pdomains = 0;
++ retval = -EINVAL;
++ }
++
++ mutex_unlock(&pdom_mutex);
++ return retval;
++}
++
++static ssize_t pdomain_read(struct file * file, char __user * buf,
++ size_t count, loff_t * offset)
++{
++ char * tmpbuf;
++ size_t len;
++ int i;
++ ssize_t retval;
++
++ if (!(tmpbuf = kmalloc(TMPBUFSIZE, GFP_KERNEL)))
++ return -ENOMEM;
++
++ mutex_lock(&pdom_mutex);
++
++ len = 0;
++ for (i = 0; i < pdomains; i++)
++ len += snprintf(tmpbuf + len,
++ len < TMPBUFSIZE ? TMPBUFSIZE - len : 0,
++ "%u ", passive_domains[i]);
++ WARN_ON(len > TMPBUFSIZE);
++ if (len != 0 && len <= TMPBUFSIZE)
++ tmpbuf[len-1] = '\n';
++
++ mutex_unlock(&pdom_mutex);
++
++ retval = simple_read_from_buffer(buf, count, offset, tmpbuf, len);
++
++ kfree(tmpbuf);
++ return retval;
++}
++
++static struct file_operations passive_domain_ops = {
++ .read = pdomain_read,
++ .write = pdomain_write,
++};
++
+ void oprofile_create_files(struct super_block * sb, struct dentry * root)
+ {
+ oprofilefs_create_file(sb, root, "enable", &enable_fops);
+ oprofilefs_create_file_perm(sb, root, "dump", &dump_fops, 0666);
++ oprofilefs_create_file(sb, root, "active_domains", &active_domain_ops);
++ oprofilefs_create_file(sb, root, "passive_domains", &passive_domain_ops);
+ oprofilefs_create_file(sb, root, "buffer", &event_buffer_fops);
+ oprofilefs_create_ulong(sb, root, "buffer_size", &fs_buffer_size);
+ oprofilefs_create_ulong(sb, root, "buffer_watershed", &fs_buffer_watershed);
+--- sle11-2009-10-16.orig/fs/aio.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/fs/aio.c 2009-08-26 11:52:33.000000000 +0200
+@@ -36,6 +36,11 @@
+ #include <asm/uaccess.h>
+ #include <asm/mmu_context.h>
+
++#ifdef CONFIG_EPOLL
++#include <linux/poll.h>
++#include <linux/eventpoll.h>
++#endif
++
+ #if DEBUG > 1
+ #define dprintk printk
+ #else
+@@ -1026,6 +1031,11 @@ put_rq:
+ if (waitqueue_active(&ctx->wait))
+ wake_up(&ctx->wait);
+
++#ifdef CONFIG_EPOLL
++ if (ctx->file && waitqueue_active(&ctx->poll_wait))
++ wake_up(&ctx->poll_wait);
++#endif
++
+ spin_unlock_irqrestore(&ctx->ctx_lock, flags);
+ return ret;
+ }
+@@ -1033,6 +1043,8 @@ put_rq:
+ /* aio_read_evt
+ * Pull an event off of the ioctx's event ring. Returns the number of
+ * events fetched (0 or 1 ;-)
++ * If ent parameter is 0, just returns the number of events that would
++ * be fetched.
+ * FIXME: make this use cmpxchg.
+ * TODO: make the ringbuffer user mmap()able (requires FIXME).
+ */
+@@ -1055,13 +1067,18 @@ static int aio_read_evt(struct kioctx *i
+
+ head = ring->head % info->nr;
+ if (head != ring->tail) {
+- struct io_event *evp = aio_ring_event(info, head, KM_USER1);
+- *ent = *evp;
+- head = (head + 1) % info->nr;
+- smp_mb(); /* finish reading the event before updatng the head */
+- ring->head = head;
+- ret = 1;
+- put_aio_ring_event(evp, KM_USER1);
++ if (ent) { /* event requested */
++ struct io_event *evp =
++ aio_ring_event(info, head, KM_USER1);
++ *ent = *evp;
++ head = (head + 1) % info->nr;
++ /* finish reading the event before updatng the head */
++ smp_mb();
++ ring->head = head;
++ ret = 1;
++ put_aio_ring_event(evp, KM_USER1);
++ } else /* only need to know availability */
++ ret = 1;
+ }
+ spin_unlock(&info->ring_lock);
+
+@@ -1251,6 +1268,13 @@ static void io_destroy(struct kioctx *io
+
+ aio_cancel_all(ioctx);
+ wait_for_all_aios(ioctx);
++#ifdef CONFIG_EPOLL
++ /* forget the poll file, but it's up to the user to close it */
++ if (ioctx->file) {
++ ioctx->file->private_data = 0;
++ ioctx->file = 0;
++ }
++#endif
+
+ /*
+ * Wake up any waiters. The setting of ctx->dead must be seen
+@@ -1261,6 +1285,67 @@ static void io_destroy(struct kioctx *io
+ put_ioctx(ioctx); /* once for the lookup */
+ }
+
++#ifdef CONFIG_EPOLL
++
++static int aio_queue_fd_close(struct inode *inode, struct file *file)
++{
++ struct kioctx *ioctx = file->private_data;
++ if (ioctx) {
++ file->private_data = 0;
++ spin_lock_irq(&ioctx->ctx_lock);
++ ioctx->file = 0;
++ spin_unlock_irq(&ioctx->ctx_lock);
++ }
++ return 0;
++}
++
++static unsigned int aio_queue_fd_poll(struct file *file, poll_table *wait)
++{ unsigned int pollflags = 0;
++ struct kioctx *ioctx = file->private_data;
++
++ if (ioctx) {
++
++ spin_lock_irq(&ioctx->ctx_lock);
++ /* Insert inside our poll wait queue */
++ poll_wait(file, &ioctx->poll_wait, wait);
++
++ /* Check our condition */
++ if (aio_read_evt(ioctx, 0))
++ pollflags = POLLIN | POLLRDNORM;
++ spin_unlock_irq(&ioctx->ctx_lock);
++ }
++
++ return pollflags;
++}
++
++static const struct file_operations aioq_fops = {
++ .release = aio_queue_fd_close,
++ .poll = aio_queue_fd_poll
++};
++
++/* make_aio_fd:
++ * Create a file descriptor that can be used to poll the event queue.
++ * Based and piggybacked on the excellent epoll code.
++ */
++
++static int make_aio_fd(struct kioctx *ioctx)
++{
++ int error, fd;
++ struct inode *inode;
++ struct file *file;
++
++ error = ep_getfd(&fd, &inode, &file, NULL, &aioq_fops);
++ if (error)
++ return error;
++
++ /* associate the file with the IO context */
++ file->private_data = ioctx;
++ ioctx->file = file;
++ init_waitqueue_head(&ioctx->poll_wait);
++ return fd;
++}
++#endif
++
+ /* sys_io_setup:
+ * Create an aio_context capable of receiving at least nr_events.
+ * ctxp must not point to an aio_context that already exists, and
+@@ -1273,18 +1358,30 @@ static void io_destroy(struct kioctx *io
+ * resources are available. May fail with -EFAULT if an invalid
+ * pointer is passed for ctxp. Will fail with -ENOSYS if not
+ * implemented.
++ *
++ * To request a selectable fd, the user context has to be initialized
++ * to 1, instead of 0, and the return value is the fd.
++ * This keeps the system call compatible, since a non-zero value
++ * was not allowed so far.
+ */
+ SYSCALL_DEFINE2(io_setup, unsigned, nr_events, aio_context_t __user *, ctxp)
+ {
+ struct kioctx *ioctx = NULL;
+ unsigned long ctx;
+ long ret;
++ int make_fd = 0;
+
+ ret = get_user(ctx, ctxp);
+ if (unlikely(ret))
+ goto out;
+
+ ret = -EINVAL;
++#ifdef CONFIG_EPOLL
++ if (ctx == 1) {
++ make_fd = 1;
++ ctx = 0;
++ }
++#endif
+ if (unlikely(ctx || nr_events == 0)) {
+ pr_debug("EINVAL: io_setup: ctx %lu nr_events %u\n",
+ ctx, nr_events);
+@@ -1295,8 +1392,12 @@ SYSCALL_DEFINE2(io_setup, unsigned, nr_e
+ ret = PTR_ERR(ioctx);
+ if (!IS_ERR(ioctx)) {
+ ret = put_user(ioctx->user_id, ctxp);
+- if (!ret)
+- return 0;
++#ifdef CONFIG_EPOLL
++ if (make_fd && ret >= 0)
++ ret = make_aio_fd(ioctx);
++#endif
++ if (ret >= 0)
++ return ret;
+
+ get_ioctx(ioctx); /* io_destroy() expects us to hold a ref */
+ io_destroy(ioctx);
+--- sle11-2009-10-16.orig/fs/compat_ioctl.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/fs/compat_ioctl.c 2009-08-26 11:52:33.000000000 +0200
+@@ -114,6 +114,13 @@
+ #include <asm/fbio.h>
+ #endif
+
++#ifdef CONFIG_XEN
++#include <xen/interface/xen.h>
++#include <xen/public/evtchn.h>
++#include <xen/public/privcmd.h>
++#include <xen/compat_ioctl.h>
++#endif
++
+ static int do_ioctl32_pointer(unsigned int fd, unsigned int cmd,
+ unsigned long arg, struct file *f)
+ {
+@@ -2736,6 +2743,18 @@ IGNORE_IOCTL(FBIOGETCMAP32)
+ IGNORE_IOCTL(FBIOSCURSOR32)
+ IGNORE_IOCTL(FBIOGCURSOR32)
+ #endif
++
++#ifdef CONFIG_XEN
++HANDLE_IOCTL(IOCTL_PRIVCMD_MMAP_32, privcmd_ioctl_32)
++HANDLE_IOCTL(IOCTL_PRIVCMD_MMAPBATCH_32, privcmd_ioctl_32)
++COMPATIBLE_IOCTL(IOCTL_PRIVCMD_HYPERCALL)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_VIRQ)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_INTERDOMAIN)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_BIND_UNBOUND_PORT)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_UNBIND)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_NOTIFY)
++COMPATIBLE_IOCTL(IOCTL_EVTCHN_RESET)
++#endif
+ };
+
+ #define IOCTL_HASHSIZE 256
+--- sle11-2009-10-16.orig/include/acpi/processor.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/include/acpi/processor.h 2009-08-26 11:52:33.000000000 +0200
+@@ -17,6 +17,12 @@
+ #define ACPI_PROCESSOR_MAX_THROTTLE 250 /* 25% */
+ #define ACPI_PROCESSOR_MAX_DUTY_WIDTH 4
+
++#ifdef CONFIG_XEN
++#define NR_ACPI_CPUS (NR_CPUS < 256 ? 256 : NR_CPUS)
++#else
++#define NR_ACPI_CPUS NR_CPUS
++#endif /* CONFIG_XEN */
++
+ #define ACPI_PDC_REVISION_ID 0x1
+
+ #define ACPI_PSD_REV0_REVISION 0 /* Support for _PSD as in ACPI 3.0 */
+@@ -42,6 +48,17 @@
+
+ struct acpi_processor_cx;
+
++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++struct acpi_csd_package {
++ acpi_integer num_entries;
++ acpi_integer revision;
++ acpi_integer domain;
++ acpi_integer coord_type;
++ acpi_integer num_processors;
++ acpi_integer index;
++} __attribute__ ((packed));
++#endif
++
+ struct acpi_power_register {
+ u8 descriptor;
+ u16 length;
+@@ -74,6 +91,12 @@ struct acpi_processor_cx {
+ u32 power;
+ u32 usage;
+ u64 time;
++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++ /* Require raw information for external control logic */
++ struct acpi_power_register reg;
++ u32 csd_count;
++ struct acpi_csd_package *domain_info;
++#endif
+ struct acpi_processor_cx_policy promotion;
+ struct acpi_processor_cx_policy demotion;
+ char desc[ACPI_CX_DESC_LEN];
+@@ -304,6 +327,9 @@ static inline void acpi_processor_ppc_ex
+ {
+ return;
+ }
++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++int acpi_processor_ppc_has_changed(struct acpi_processor *pr);
++#else
+ static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
+ {
+ static unsigned int printout = 1;
+@@ -316,6 +342,7 @@ static inline int acpi_processor_ppc_has
+ }
+ return 0;
+ }
++#endif /* CONFIG_PROCESSOR_EXTERNAL_CONTROL */
+ #endif /* CONFIG_CPU_FREQ */
+
+ /* in processor_throttling.c */
+@@ -352,4 +379,120 @@ static inline void acpi_thermal_cpufreq_
+ }
+ #endif
+
++/*
++ * Following are interfaces geared to external processor PM control
++ * logic like a VMM
++ */
++/* Events notified to external control logic */
++#define PROCESSOR_PM_INIT 1
++#define PROCESSOR_PM_CHANGE 2
++#define PROCESSOR_HOTPLUG 3
++
++/* Objects for the PM events */
++#define PM_TYPE_IDLE 0
++#define PM_TYPE_PERF 1
++#define PM_TYPE_THR 2
++#define PM_TYPE_MAX 3
++
++/* Processor hotplug events */
++#define HOTPLUG_TYPE_ADD 0
++#define HOTPLUG_TYPE_REMOVE 1
++
++#ifdef CONFIG_PROCESSOR_EXTERNAL_CONTROL
++struct processor_extcntl_ops {
++ /* Transfer processor PM events to external control logic */
++ int (*pm_ops[PM_TYPE_MAX])(struct acpi_processor *pr, int event);
++ /* Notify physical processor status to external control logic */
++ int (*hotplug)(struct acpi_processor *pr, int type);
++};
++extern const struct processor_extcntl_ops *processor_extcntl_ops;
++
++static inline int processor_cntl_external(void)
++{
++ return (processor_extcntl_ops != NULL);
++}
++
++static inline int processor_pm_external(void)
++{
++ return processor_cntl_external() &&
++ (processor_extcntl_ops->pm_ops[PM_TYPE_IDLE] != NULL);
++}
++
++static inline int processor_pmperf_external(void)
++{
++ return processor_cntl_external() &&
++ (processor_extcntl_ops->pm_ops[PM_TYPE_PERF] != NULL);
++}
++
++static inline int processor_pmthr_external(void)
++{
++ return processor_cntl_external() &&
++ (processor_extcntl_ops->pm_ops[PM_TYPE_THR] != NULL);
++}
++
++extern int processor_notify_external(struct acpi_processor *pr,
++ int event, int type);
++extern void processor_extcntl_init(void);
++extern int processor_extcntl_prepare(struct acpi_processor *pr);
++extern int acpi_processor_get_performance_info(struct acpi_processor *pr);
++extern int acpi_processor_get_psd(struct acpi_processor *pr);
++void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **);
++#else
++static inline int processor_cntl_external(void) {return 0;}
++static inline int processor_pm_external(void) {return 0;}
++static inline int processor_pmperf_external(void) {return 0;}
++static inline int processor_pmthr_external(void) {return 0;}
++static inline int processor_notify_external(struct acpi_processor *pr,
++ int event, int type)
++{
++ return 0;
++}
++static inline void processor_extcntl_init(void) {}
++static inline int processor_extcntl_prepare(struct acpi_processor *pr)
++{
++ return 0;
++}
++#endif /* CONFIG_PROCESSOR_EXTERNAL_CONTROL */
++
++#ifdef CONFIG_XEN
++static inline void xen_convert_pct_reg(struct xen_pct_register *xpct,
++ struct acpi_pct_register *apct)
++{
++ xpct->descriptor = apct->descriptor;
++ xpct->length = apct->length;
++ xpct->space_id = apct->space_id;
++ xpct->bit_width = apct->bit_width;
++ xpct->bit_offset = apct->bit_offset;
++ xpct->reserved = apct->reserved;
++ xpct->address = apct->address;
++}
++
++static inline void xen_convert_pss_states(struct xen_processor_px *xpss,
++ struct acpi_processor_px *apss, int state_count)
++{
++ int i;
++ for(i=0; i<state_count; i++) {
++ xpss->core_frequency = apss->core_frequency;
++ xpss->power = apss->power;
++ xpss->transition_latency = apss->transition_latency;
++ xpss->bus_master_latency = apss->bus_master_latency;
++ xpss->control = apss->control;
++ xpss->status = apss->status;
++ xpss++;
++ apss++;
++ }
++}
++
++static inline void xen_convert_psd_pack(struct xen_psd_package *xpsd,
++ struct acpi_psd_package *apsd)
++{
++ xpsd->num_entries = apsd->num_entries;
++ xpsd->revision = apsd->revision;
++ xpsd->domain = apsd->domain;
++ xpsd->coord_type = apsd->coord_type;
++ xpsd->num_processors = apsd->num_processors;
++}
++
++#endif /* CONFIG_XEN */
++
+ #endif
+--- sle11-2009-10-16.orig/include/asm-generic/pci.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/include/asm-generic/pci.h 2009-08-26 11:52:33.000000000 +0200
+@@ -43,7 +43,9 @@ pcibios_select_root(struct pci_dev *pdev
+ return root;
+ }
+
++#ifndef pcibios_scan_all_fns
+ #define pcibios_scan_all_fns(a, b) 0
++#endif
+
+ #ifndef HAVE_ARCH_PCI_GET_LEGACY_IDE_IRQ
+ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+--- sle11-2009-10-16.orig/include/asm-generic/pgtable.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/include/asm-generic/pgtable.h 2009-08-26 11:52:33.000000000 +0200
+@@ -99,6 +99,10 @@ static inline void ptep_set_wrprotect(st
+ }
+ #endif
+
++#ifndef arch_change_pte_range
++#define arch_change_pte_range(mm, pmd, addr, end, newprot) 0
++#endif
++
+ #ifndef __HAVE_ARCH_PTE_SAME
+ #define pte_same(A,B) (pte_val(A) == pte_val(B))
+ #endif
+--- sle11-2009-10-16.orig/include/linux/aio.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/include/linux/aio.h 2009-08-26 11:52:33.000000000 +0200
+@@ -199,6 +199,11 @@ struct kioctx {
+ struct aio_ring_info ring_info;
+
+ struct delayed_work wq;
++#ifdef CONFIG_EPOLL
++ // poll integration
++ wait_queue_head_t poll_wait;
++ struct file *file;
++#endif
+ };
+
+ /* prototypes */
+--- sle11-2009-10-16.orig/include/linux/highmem.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/include/linux/highmem.h 2009-08-26 11:52:33.000000000 +0200
+@@ -62,6 +62,7 @@ static inline void *kmap_atomic(struct p
+
+ #endif /* CONFIG_HIGHMEM */
+
++#ifndef __HAVE_ARCH_CLEAR_USER_HIGHPAGE
+ /* when CONFIG_HIGHMEM is not set these will be plain clear/copy_page */
+ static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
+ {
+@@ -69,6 +70,7 @@ static inline void clear_user_highpage(s
+ clear_user_page(addr, vaddr, page);
+ kunmap_atomic(addr, KM_USER0);
+ }
++#endif
+
+ #ifndef __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
+ /**
+@@ -115,12 +117,14 @@ alloc_zeroed_user_highpage_movable(struc
+ return __alloc_zeroed_user_highpage(__GFP_MOVABLE, vma, vaddr);
+ }
+
++#ifndef __HAVE_ARCH_CLEAR_HIGHPAGE
+ static inline void clear_highpage(struct page *page)
+ {
+ void *kaddr = kmap_atomic(page, KM_USER0);
+ clear_page(kaddr);
+ kunmap_atomic(kaddr, KM_USER0);
+ }
++#endif
+
+ static inline void zero_user_segments(struct page *page,
+ unsigned start1, unsigned end1,
+@@ -174,6 +178,8 @@ static inline void copy_user_highpage(st
+
+ #endif
+
++#ifndef __HAVE_ARCH_COPY_HIGHPAGE
++
+ static inline void copy_highpage(struct page *to, struct page *from)
+ {
+ char *vfrom, *vto;
+@@ -185,4 +191,6 @@ static inline void copy_highpage(struct
+ kunmap_atomic(vto, KM_USER1);
+ }
+
++#endif
++
+ #endif /* _LINUX_HIGHMEM_H */
+--- sle11-2009-10-16.orig/include/linux/interrupt.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/include/linux/interrupt.h 2009-08-26 11:52:33.000000000 +0200
+@@ -218,6 +218,12 @@ static inline int disable_irq_wake(unsig
+ }
+ #endif /* CONFIG_GENERIC_HARDIRQS */
+
++#ifdef CONFIG_HAVE_IRQ_IGNORE_UNHANDLED
++int irq_ignore_unhandled(unsigned int irq);
++#else
++#define irq_ignore_unhandled(irq) 0
++#endif
++
+ #ifndef __ARCH_SET_SOFTIRQ_PENDING
+ #define set_softirq_pending(x) (local_softirq_pending() = (x))
+ #define or_softirq_pending(x) (local_softirq_pending() |= (x))
+--- sle11-2009-10-16.orig/include/linux/kexec.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/include/linux/kexec.h 2009-08-26 11:52:33.000000000 +0200
+@@ -46,6 +46,13 @@
+ KEXEC_CORE_NOTE_NAME_BYTES + \
+ KEXEC_CORE_NOTE_DESC_BYTES )
+
++#ifndef KEXEC_ARCH_HAS_PAGE_MACROS
++#define kexec_page_to_pfn(page) page_to_pfn(page)
++#define kexec_pfn_to_page(pfn) pfn_to_page(pfn)
++#define kexec_virt_to_phys(addr) virt_to_phys(addr)
++#define kexec_phys_to_virt(addr) phys_to_virt(addr)
++#endif
++
+ /*
+ * This structure is used to hold the arguments that are used when loading
+ * kernel binaries.
+@@ -108,6 +115,12 @@ struct kimage {
+ extern void machine_kexec(struct kimage *image);
+ extern int machine_kexec_prepare(struct kimage *image);
+ extern void machine_kexec_cleanup(struct kimage *image);
++#ifdef CONFIG_XEN
++extern int xen_machine_kexec_load(struct kimage *image);
++extern void xen_machine_kexec_unload(struct kimage *image);
++extern void xen_machine_kexec_setup_resources(void);
++extern void xen_machine_kexec_register_resources(struct resource *res);
++#endif
+ extern asmlinkage long sys_kexec_load(unsigned long entry,
+ unsigned long nr_segments,
+ struct kexec_segment __user *segments,
+--- sle11-2009-10-16.orig/include/linux/mm.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/include/linux/mm.h 2009-08-26 11:52:33.000000000 +0200
+@@ -114,6 +114,9 @@ extern unsigned int kobjsize(const void
+ #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
+ #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
+ #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
++#ifdef CONFIG_XEN
++#define VM_FOREIGN 0x40000000 /* Has pages belonging to another VM */
++#endif
+ #define VM_PAGE_MKWRITE2 0x80000000 /* Uses page_mkwrite2 rather than page_mkwrite */
+
+ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
+@@ -199,6 +202,11 @@ struct vm_operations_struct {
+ */
+ int (*access)(struct vm_area_struct *vma, unsigned long addr,
+ void *buf, int len, int write);
++
++ /* Area-specific function for clearing the PTE at @ptep. Returns the
++ * original value of @ptep. */
++ pte_t (*zap_pte)(struct vm_area_struct *vma,
++ unsigned long addr, pte_t *ptep, int is_fullmm);
+ #ifdef CONFIG_NUMA
+ /*
+ * set_policy() op must add a reference to any non-NULL @new mempolicy
+--- sle11-2009-10-16.orig/include/linux/oprofile.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/include/linux/oprofile.h 2009-08-26 11:52:33.000000000 +0200
+@@ -16,6 +16,8 @@
+ #include <linux/types.h>
+ #include <linux/spinlock.h>
+ #include <asm/atomic.h>
++
++#include <xen/interface/xenoprof.h>
+
+ /* Each escaped entry is prefixed by ESCAPE_CODE
+ * then one of the following codes, then the
+@@ -28,14 +30,18 @@
+ #define CPU_SWITCH_CODE 2
+ #define COOKIE_SWITCH_CODE 3
+ #define KERNEL_ENTER_SWITCH_CODE 4
+-#define KERNEL_EXIT_SWITCH_CODE 5
++#define USER_ENTER_SWITCH_CODE 5
+ #define MODULE_LOADED_CODE 6
+ #define CTX_TGID_CODE 7
+ #define TRACE_BEGIN_CODE 8
+ #define TRACE_END_CODE 9
+ #define XEN_ENTER_SWITCH_CODE 10
++#ifndef CONFIG_XEN
+ #define SPU_PROFILING_CODE 11
+ #define SPU_CTX_SWITCH_CODE 12
++#else
++#define DOMAIN_SWITCH_CODE 11
++#endif
+
+ struct super_block;
+ struct dentry;
+@@ -47,6 +53,11 @@ struct oprofile_operations {
+ /* create any necessary configuration files in the oprofile fs.
+ * Optional. */
+ int (*create_files)(struct super_block * sb, struct dentry * root);
++ /* setup active domains with Xen */
++ int (*set_active)(int *active_domains, unsigned int adomains);
++ /* setup passive domains with Xen */
++ int (*set_passive)(int *passive_domains, unsigned int pdomains);
++
+ /* Do any necessary interrupt setup. Optional. */
+ int (*setup)(void);
+ /* Do any necessary interrupt shutdown. Optional. */
+@@ -106,6 +117,8 @@ void oprofile_add_pc(unsigned long pc, i
+ /* add a backtrace entry, to be called from the ->backtrace callback */
+ void oprofile_add_trace(unsigned long eip);
+
++/* add a domain switch entry */
++int oprofile_add_domain_switch(int32_t domain_id);
+
+ /**
+ * Create a file of the given name as a child of the given root, with
+--- sle11-2009-10-16.orig/include/linux/page-flags.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/include/linux/page-flags.h 2009-08-26 11:52:33.000000000 +0200
+@@ -98,6 +98,9 @@ enum pageflags {
+ #ifdef CONFIG_IA64_UNCACHED_ALLOCATOR
+ PG_uncached, /* Page has been mapped as uncached */
+ #endif
++#ifdef CONFIG_XEN
++ PG_foreign, /* Page is owned by foreign allocator. */
++#endif
+ __NR_PAGEFLAGS,
+
+ /* Filesystems */
+@@ -271,6 +274,19 @@ static inline void SetPageUptodate(struc
+
+ CLEARPAGEFLAG(Uptodate, uptodate)
+
++#define PageForeign(page) test_bit(PG_foreign, &(page)->flags)
++#define SetPageForeign(_page, dtor) do { \
++ set_bit(PG_foreign, &(_page)->flags); \
++ BUG_ON((dtor) == (void (*)(struct page *))0); \
++ (_page)->index = (long)(dtor); \
++} while (0)
++#define ClearPageForeign(page) do { \
++ clear_bit(PG_foreign, &(page)->flags); \
++ (page)->index = 0; \
++} while (0)
++#define PageForeignDestructor(_page) \
++ ((void (*)(struct page *))(_page)->index)(_page)
++
+ extern void cancel_dirty_page(struct page *page, unsigned int account_size);
+
+ int test_clear_page_writeback(struct page *page);
+@@ -341,9 +357,18 @@ PAGEFLAG(MemError, memerror)
+ PAGEFLAG_FALSE(MemError)
+ #endif
+
++#if !defined(CONFIG_XEN)
++# define PAGE_FLAGS_XEN 0
++#elif defined(CONFIG_X86)
++# define PAGE_FLAGS_XEN ((1 << PG_pinned) | (1 << PG_foreign))
++#else
++# define PAGE_FLAGS_XEN (1 << PG_foreign)
++#endif
++
+ #define PAGE_FLAGS (1 << PG_lru | 1 << PG_private | 1 << PG_locked | \
+ 1 << PG_buddy | 1 << PG_writeback | 1 << PG_waiters | \
+- 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active)
++ 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
++ PAGE_FLAGS_XEN)
+
+ /*
+ * Flags checked in bad_page(). Pages on the free list should not have
+--- sle11-2009-10-16.orig/include/linux/pci.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/include/linux/pci.h 2009-08-26 11:52:33.000000000 +0200
+@@ -211,6 +211,9 @@ struct pci_dev {
+ * directly, use the values stored here. They might be different!
+ */
+ unsigned int irq;
++#ifdef CONFIG_XEN
++ unsigned int irq_old;
++#endif
+ struct resource resource[DEVICE_COUNT_RESOURCE]; /* I/O and memory regions + expansion ROMs */
+
+ /* These fields are used by common fixups */
+@@ -772,6 +775,11 @@ static inline int pci_msi_enabled(void)
+ {
+ return 0;
+ }
++
++#ifdef CONFIG_XEN
++#define register_msi_get_owner(func) 0
++#define unregister_msi_get_owner(func) 0
++#endif
+ #else
+ extern int pci_enable_msi(struct pci_dev *dev);
+ extern void pci_msi_shutdown(struct pci_dev *dev);
+@@ -784,6 +792,10 @@ extern void msi_remove_pci_irq_vectors(s
+ extern void pci_restore_msi_state(struct pci_dev *dev);
+ extern int pci_msi_enabled(void);
+
++#ifdef CONFIG_XEN
++extern int register_msi_get_owner(int (*func)(struct pci_dev *dev));
++extern int unregister_msi_get_owner(int (*func)(struct pci_dev *dev));
++#endif
+ #endif
+
+ #ifndef CONFIG_PCIEASPM
+--- sle11-2009-10-16.orig/include/linux/skbuff.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/include/linux/skbuff.h 2009-08-26 11:52:33.000000000 +0200
+@@ -217,6 +217,8 @@ typedef unsigned char *sk_buff_data_t;
+ * @local_df: allow local fragmentation
+ * @cloned: Head may be cloned (check refcnt to be sure)
+ * @nohdr: Payload reference only, must not modify header
++ * @proto_data_valid: Protocol data validated since arriving at localhost
++ * @proto_csum_blank: Protocol csum must be added before leaving localhost
+ * @pkt_type: Packet class
+ * @fclone: skbuff clone status
+ * @ip_summed: Driver fed us an IP checksum
+@@ -323,7 +325,11 @@ struct sk_buff {
+ #ifdef CONFIG_NETVM
+ __u8 emergency:1;
+ #endif
+- /* 12-16 bit hole */
++#ifdef CONFIG_XEN
++ __u8 proto_data_valid:1,
++ proto_csum_blank:1;
++#endif
++ /* 10-16 bit hole */
+
+ #ifdef CONFIG_NET_DMA
+ dma_cookie_t dma_cookie;
+--- sle11-2009-10-16.orig/include/linux/vermagic.h 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/include/linux/vermagic.h 2009-08-26 11:52:33.000000000 +0200
+@@ -22,6 +22,11 @@
+ #else
+ #define MODULE_VERMAGIC_MODVERSIONS ""
+ #endif
++#ifdef CONFIG_XEN
++#define MODULE_VERMAGIC_XEN "Xen "
++#else
++#define MODULE_VERMAGIC_XEN
++#endif
+ #ifndef MODULE_ARCH_VERMAGIC
+ #define MODULE_ARCH_VERMAGIC ""
+ #endif
+@@ -30,5 +35,5 @@
+ UTS_RELEASE " " \
+ MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
+ MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
+- MODULE_ARCH_VERMAGIC
++ MODULE_VERMAGIC_XEN MODULE_ARCH_VERMAGIC
+
+--- sle11-2009-10-16.orig/kernel/irq/spurious.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/kernel/irq/spurious.c 2009-08-26 11:52:33.000000000 +0200
+@@ -193,7 +193,7 @@ void note_interrupt(unsigned int irq, st
+ */
+ if (time_after(jiffies, desc->last_unhandled + HZ/10))
+ desc->irqs_unhandled = 1;
+- else
++ else if (!irq_ignore_unhandled(irq))
+ desc->irqs_unhandled++;
+ desc->last_unhandled = jiffies;
+ if (unlikely(action_ret != IRQ_NONE))
+--- sle11-2009-10-16.orig/kernel/kexec.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/kernel/kexec.c 2009-08-26 11:52:33.000000000 +0200
+@@ -359,13 +359,26 @@ static int kimage_is_destination_range(s
+ return 0;
+ }
+
+-static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order)
++static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order, unsigned long limit)
+ {
+ struct page *pages;
+
+ pages = alloc_pages(gfp_mask, order);
+ if (pages) {
+ unsigned int count, i;
++#ifdef CONFIG_XEN
++ int address_bits;
++
++ if (limit == ~0UL)
++ address_bits = BITS_PER_LONG;
++ else
++ address_bits = long_log2(limit);
++
++ if (xen_limit_pages_to_max_mfn(pages, order, address_bits) < 0) {
++ __free_pages(pages, order);
++ return NULL;
++ }
++#endif
+ pages->mapping = NULL;
+ set_page_private(pages, order);
+ count = 1 << order;
+@@ -384,6 +397,9 @@ static void kimage_free_pages(struct pag
+ count = 1 << order;
+ for (i = 0; i < count; i++)
+ ClearPageReserved(page + i);
++#ifdef CONFIG_XEN
++ xen_destroy_contiguous_region((unsigned long)page_address(page), order);
++#endif
+ __free_pages(page, order);
+ }
+
+@@ -429,10 +445,10 @@ static struct page *kimage_alloc_normal_
+ do {
+ unsigned long pfn, epfn, addr, eaddr;
+
+- pages = kimage_alloc_pages(GFP_KERNEL, order);
++ pages = kimage_alloc_pages(GFP_KERNEL, order, KEXEC_CONTROL_MEMORY_LIMIT);
+ if (!pages)
+ break;
+- pfn = page_to_pfn(pages);
++ pfn = kexec_page_to_pfn(pages);
+ epfn = pfn + count;
+ addr = pfn << PAGE_SHIFT;
+ eaddr = epfn << PAGE_SHIFT;
+@@ -466,6 +482,7 @@ static struct page *kimage_alloc_normal_
+ return pages;
+ }
+
++#ifndef CONFIG_XEN
+ static struct page *kimage_alloc_crash_control_pages(struct kimage *image,
+ unsigned int order)
+ {
+@@ -519,7 +536,7 @@ static struct page *kimage_alloc_crash_c
+ }
+ /* If I don't overlap any segments I have found my hole! */
+ if (i == image->nr_segments) {
+- pages = pfn_to_page(hole_start >> PAGE_SHIFT);
++ pages = kexec_pfn_to_page(hole_start >> PAGE_SHIFT);
+ break;
+ }
+ }
+@@ -546,6 +563,13 @@ struct page *kimage_alloc_control_pages(
+
+ return pages;
+ }
++#else /* !CONFIG_XEN */
++struct page *kimage_alloc_control_pages(struct kimage *image,
++ unsigned int order)
++{
++ return kimage_alloc_normal_control_pages(image, order);
++}
++#endif
+
+ static int kimage_add_entry(struct kimage *image, kimage_entry_t entry)
+ {
+@@ -561,7 +585,7 @@ static int kimage_add_entry(struct kimag
+ return -ENOMEM;
+
+ ind_page = page_address(page);
+- *image->entry = virt_to_phys(ind_page) | IND_INDIRECTION;
++ *image->entry = kexec_virt_to_phys(ind_page) | IND_INDIRECTION;
+ image->entry = ind_page;
+ image->last_entry = ind_page +
+ ((PAGE_SIZE/sizeof(kimage_entry_t)) - 1);
+@@ -620,13 +644,13 @@ static void kimage_terminate(struct kima
+ #define for_each_kimage_entry(image, ptr, entry) \
+ for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE); \
+ ptr = (entry & IND_INDIRECTION)? \
+- phys_to_virt((entry & PAGE_MASK)): ptr +1)
++ kexec_phys_to_virt((entry & PAGE_MASK)): ptr +1)
+
+ static void kimage_free_entry(kimage_entry_t entry)
+ {
+ struct page *page;
+
+- page = pfn_to_page(entry >> PAGE_SHIFT);
++ page = kexec_pfn_to_page(entry >> PAGE_SHIFT);
+ kimage_free_pages(page);
+ }
+
+@@ -638,6 +662,10 @@ static void kimage_free(struct kimage *i
+ if (!image)
+ return;
+
++#ifdef CONFIG_XEN
++ xen_machine_kexec_unload(image);
++#endif
++
+ kimage_free_extra_pages(image);
+ for_each_kimage_entry(image, ptr, entry) {
+ if (entry & IND_INDIRECTION) {
+@@ -713,7 +741,7 @@ static struct page *kimage_alloc_page(st
+ * have a match.
+ */
+ list_for_each_entry(page, &image->dest_pages, lru) {
+- addr = page_to_pfn(page) << PAGE_SHIFT;
++ addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
+ if (addr == destination) {
+ list_del(&page->lru);
+ return page;
+@@ -724,16 +752,16 @@ static struct page *kimage_alloc_page(st
+ kimage_entry_t *old;
+
+ /* Allocate a page, if we run out of memory give up */
+- page = kimage_alloc_pages(gfp_mask, 0);
++ page = kimage_alloc_pages(gfp_mask, 0, KEXEC_SOURCE_MEMORY_LIMIT);
+ if (!page)
+ return NULL;
+ /* If the page cannot be used file it away */
+- if (page_to_pfn(page) >
++ if (kexec_page_to_pfn(page) >
+ (KEXEC_SOURCE_MEMORY_LIMIT >> PAGE_SHIFT)) {
+ list_add(&page->lru, &image->unuseable_pages);
+ continue;
+ }
+- addr = page_to_pfn(page) << PAGE_SHIFT;
++ addr = kexec_page_to_pfn(page) << PAGE_SHIFT;
+
+ /* If it is the destination page we want use it */
+ if (addr == destination)
+@@ -756,7 +784,7 @@ static struct page *kimage_alloc_page(st
+ struct page *old_page;
+
+ old_addr = *old & PAGE_MASK;
+- old_page = pfn_to_page(old_addr >> PAGE_SHIFT);
++ old_page = kexec_pfn_to_page(old_addr >> PAGE_SHIFT);
+ copy_highpage(page, old_page);
+ *old = addr | (*old & ~PAGE_MASK);
+
+@@ -812,7 +840,7 @@ static int kimage_load_normal_segment(st
+ result = -ENOMEM;
+ goto out;
+ }
+- result = kimage_add_page(image, page_to_pfn(page)
++ result = kimage_add_page(image, kexec_page_to_pfn(page)
+ << PAGE_SHIFT);
+ if (result < 0)
+ goto out;
+@@ -844,6 +872,7 @@ out:
+ return result;
+ }
+
++#ifndef CONFIG_XEN
+ static int kimage_load_crash_segment(struct kimage *image,
+ struct kexec_segment *segment)
+ {
+@@ -866,7 +895,7 @@ static int kimage_load_crash_segment(str
+ char *ptr;
+ size_t uchunk, mchunk;
+
+- page = pfn_to_page(maddr >> PAGE_SHIFT);
++ page = kexec_pfn_to_page(maddr >> PAGE_SHIFT);
+ if (!page) {
+ result = -ENOMEM;
+ goto out;
+@@ -915,6 +944,13 @@ static int kimage_load_segment(struct ki
+
+ return result;
+ }
++#else /* CONFIG_XEN */
++static int kimage_load_segment(struct kimage *image,
++ struct kexec_segment *segment)
++{
++ return kimage_load_normal_segment(image, segment);
++}
++#endif
+
+ /*
+ * Exec Kernel system call: for obvious reasons only root may call it.
+@@ -1018,6 +1054,13 @@ SYSCALL_DEFINE4(kexec_load, unsigned lon
+ }
+ kimage_terminate(image);
+ }
++#ifdef CONFIG_XEN
++ if (image) {
++ result = xen_machine_kexec_load(image);
++ if (result)
++ goto out;
++ }
++#endif
+ /* Install the new kernel, and Uninstall the old */
+ image = xchg(dest_image, image);
+
+--- sle11-2009-10-16.orig/kernel/sysctl.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/kernel/sysctl.c 2009-08-26 11:52:33.000000000 +0200
+@@ -751,7 +751,7 @@ static struct ctl_table kern_table[] = {
+ .proc_handler = &proc_dointvec,
+ },
+ #endif
+-#if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86)
++#if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86) && !defined(CONFIG_ACPI_PV_SLEEP)
+ {
+ .procname = "acpi_video_flags",
+ .data = &acpi_realmode_flags,
+--- sle11-2009-10-16.orig/mm/memory.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/mm/memory.c 2009-08-26 11:52:33.000000000 +0200
+@@ -446,6 +446,12 @@ struct page *vm_normal_page(struct vm_ar
+ {
+ unsigned long pfn;
+
++#if defined(CONFIG_XEN) && defined(CONFIG_X86)
++ /* XEN: Covers user-space grant mappings (even of local pages). */
++ if (unlikely(vma->vm_flags & VM_FOREIGN))
++ return NULL;
++#endif
++
+ if (HAVE_PTE_SPECIAL) {
+ if (likely(!pte_special(pte))) {
+ VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+@@ -474,7 +480,14 @@ struct page *vm_normal_page(struct vm_ar
+ }
+ }
+
++#ifndef CONFIG_XEN
+ VM_BUG_ON(!pfn_valid(pfn));
++#else
++ if (unlikely(!pfn_valid(pfn))) {
++ VM_BUG_ON(!(vma->vm_flags & VM_RESERVED));
++ return NULL;
++ }
++#endif
+
+ /*
+ * NOTE! We still have PageReserved() pages in the page tables.
+@@ -745,8 +758,12 @@ static unsigned long zap_pte_range(struc
+ page->index > details->last_index))
+ continue;
+ }
+- ptent = ptep_get_and_clear_full(mm, addr, pte,
+- tlb->fullmm);
++ if (unlikely(vma->vm_ops && vma->vm_ops->zap_pte))
++ ptent = vma->vm_ops->zap_pte(vma, addr, pte,
++ tlb->fullmm);
++ else
++ ptent = ptep_get_and_clear_full(mm, addr, pte,
++ tlb->fullmm);
+ tlb_remove_tlb_entry(tlb, pte, addr);
+ if (unlikely(!page))
+ continue;
+@@ -996,6 +1013,7 @@ unsigned long zap_page_range(struct vm_a
+ tlb_finish_mmu(tlb, address, end);
+ return end;
+ }
++EXPORT_SYMBOL(zap_page_range);
+
+ /**
+ * zap_vma_ptes - remove ptes mapping the vma
+@@ -1193,6 +1211,26 @@ int get_user_pages(struct task_struct *t
+ continue;
+ }
+
++#ifdef CONFIG_XEN
++ if (vma && (vma->vm_flags & VM_FOREIGN)) {
++ struct page **map = vma->vm_private_data;
++ int offset = (start - vma->vm_start) >> PAGE_SHIFT;
++ if (map[offset] != NULL) {
++ if (pages) {
++ struct page *page = map[offset];
++
++ pages[i] = page;
++ get_page(page);
++ }
++ if (vmas)
++ vmas[i] = vma;
++ i++;
++ start += PAGE_SIZE;
++ len--;
++ continue;
++ }
++ }
++#endif
+ if (!vma || (vma->vm_flags & (VM_IO | VM_PFNMAP))
+ || !(vm_flags & vma->vm_flags))
+ return i ? : -EFAULT;
+--- sle11-2009-10-16.orig/mm/mprotect.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/mm/mprotect.c 2009-08-26 11:52:33.000000000 +0200
+@@ -92,6 +92,8 @@ static inline void change_pmd_range(stru
+ next = pmd_addr_end(addr, end);
+ if (pmd_none_or_clear_bad(pmd))
+ continue;
++ if (arch_change_pte_range(mm, pmd, addr, next, newprot))
++ continue;
+ change_pte_range(mm, pmd, addr, next, newprot, dirty_accountable);
+ } while (pmd++, addr = next, addr != end);
+ }
+--- sle11-2009-10-16.orig/mm/page_alloc.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/mm/page_alloc.c 2009-08-26 11:52:33.000000000 +0200
+@@ -533,6 +533,12 @@ static void __free_pages_ok(struct page
+ int i;
+ int reserved = 0;
+
++#ifdef CONFIG_XEN
++ if (PageForeign(page)) {
++ PageForeignDestructor(page);
++ return;
++ }
++#endif
+ trace_page_free(page, order);
+
+ for (i = 0 ; i < (1 << order) ; ++i)
+@@ -998,6 +1004,12 @@ static void free_hot_cold_page(struct pa
+ struct per_cpu_pages *pcp;
+ unsigned long flags;
+
++#ifdef CONFIG_XEN
++ if (PageForeign(page)) {
++ PageForeignDestructor(page);
++ return;
++ }
++#endif
+ trace_page_free(page, 0);
+
+ if (PageAnon(page))
+--- sle11-2009-10-16.orig/net/core/dev.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/net/core/dev.c 2009-08-26 11:52:33.000000000 +0200
+@@ -131,6 +131,12 @@
+
+ #include "net-sysfs.h"
+
++#ifdef CONFIG_XEN
++#include <net/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#endif
++
+ /*
+ * The list of packet types we will receive (as opposed to discard)
+ * and the routines to invoke.
+@@ -1734,6 +1740,42 @@ static struct netdev_queue *dev_pick_tx(
+ return netdev_get_tx_queue(dev, queue_index);
+ }
+
++#ifdef CONFIG_XEN
++inline int skb_checksum_setup(struct sk_buff *skb)
++{
++ if (skb->proto_csum_blank) {
++ if (skb->protocol != htons(ETH_P_IP))
++ goto out;
++ skb->h.raw = (unsigned char *)skb->nh.iph + 4*skb->nh.iph->ihl;
++ if (skb->h.raw >= skb->tail)
++ goto out;
++ switch (skb->nh.iph->protocol) {
++ case IPPROTO_TCP:
++ skb->csum = offsetof(struct tcphdr, check);
++ break;
++ case IPPROTO_UDP:
++ skb->csum = offsetof(struct udphdr, check);
++ break;
++ default:
++ if (net_ratelimit())
++ printk(KERN_ERR "Attempting to checksum a non-"
++ "TCP/UDP packet, dropping a protocol"
++ " %d packet", skb->nh.iph->protocol);
++ goto out;
++ }
++ if ((skb->h.raw + skb->csum + 2) > skb->tail)
++ goto out;
++ skb->ip_summed = CHECKSUM_HW;
++ skb->proto_csum_blank = 0;
++ }
++ return 0;
++out:
++ return -EPROTO;
++}
++#else
++inline int skb_checksum_setup(struct sk_buff *skb) { return 0; }
++#endif
++
+ /**
+ * dev_queue_xmit - transmit a buffer
+ * @skb: buffer to transmit
+@@ -1766,6 +1808,12 @@ int dev_queue_xmit(struct sk_buff *skb)
+ struct Qdisc *q;
+ int rc = -ENOMEM;
+
++ /* If a checksum-deferred packet is forwarded to a device that needs a
++ * checksum, correct the pointers and force checksumming.
++ */
++ if (skb_checksum_setup(skb))
++ goto out_kfree_skb;
++
+ /* GSO will handle the following emulations directly. */
+ if (netif_needs_gso(dev, skb))
+ goto gso;
+@@ -2274,6 +2322,19 @@ int netif_receive_skb(struct sk_buff *sk
+ }
+ #endif
+
++#ifdef CONFIG_XEN
++ switch (skb->ip_summed) {
++ case CHECKSUM_UNNECESSARY:
++ skb->proto_data_valid = 1;
++ break;
++ case CHECKSUM_HW:
++ /* XXX Implement me. */
++ default:
++ skb->proto_data_valid = 0;
++ break;
++ }
++#endif
++
+ if (skb_emergency(skb))
+ goto skip_taps;
+
+@@ -4928,6 +4989,7 @@ EXPORT_SYMBOL(unregister_netdevice_notif
+ EXPORT_SYMBOL(net_enable_timestamp);
+ EXPORT_SYMBOL(net_disable_timestamp);
+ EXPORT_SYMBOL(dev_get_flags);
++EXPORT_SYMBOL(skb_checksum_setup);
+
+ #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
+ EXPORT_SYMBOL(br_handle_frame_hook);
+--- sle11-2009-10-16.orig/net/core/skbuff.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/net/core/skbuff.c 2009-08-26 11:52:33.000000000 +0200
+@@ -555,6 +555,10 @@ static struct sk_buff *__skb_clone(struc
+ n->hdr_len = skb->nohdr ? skb_headroom(skb) : skb->hdr_len;
+ n->cloned = 1;
+ n->nohdr = 0;
++#ifdef CONFIG_XEN
++ C(proto_data_valid);
++ C(proto_csum_blank);
++#endif
+ n->destructor = NULL;
+ C(iif);
+ C(tail);
+--- sle11-2009-10-16.orig/net/ipv4/netfilter/nf_nat_proto_tcp.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/net/ipv4/netfilter/nf_nat_proto_tcp.c 2009-08-26 11:52:33.000000000 +0200
+@@ -75,6 +75,9 @@ tcp_manip_pkt(struct sk_buff *skb,
+ if (hdrsize < sizeof(*hdr))
+ return true;
+
++ if (skb_checksum_setup(skb))
++ return false;
++
+ inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
+ inet_proto_csum_replace2(&hdr->check, skb, oldport, newport, 0);
+ return true;
+--- sle11-2009-10-16.orig/net/ipv4/netfilter/nf_nat_proto_udp.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/net/ipv4/netfilter/nf_nat_proto_udp.c 2009-08-26 11:52:33.000000000 +0200
+@@ -60,6 +60,10 @@ udp_manip_pkt(struct sk_buff *skb,
+ newport = tuple->dst.u.udp.port;
+ portptr = &hdr->dest;
+ }
++
++ if (skb_checksum_setup(skb))
++ return false;
++
+ if (hdr->check || skb->ip_summed == CHECKSUM_PARTIAL) {
+ inet_proto_csum_replace4(&hdr->check, skb, oldip, newip, 1);
+ inet_proto_csum_replace2(&hdr->check, skb, *portptr, newport,
+--- sle11-2009-10-16.orig/net/ipv4/xfrm4_output.c 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/net/ipv4/xfrm4_output.c 2009-08-26 11:52:33.000000000 +0200
+@@ -81,7 +81,7 @@ static int xfrm4_output_finish(struct sk
+ #endif
+
+ skb->protocol = htons(ETH_P_IP);
+- return xfrm_output(skb);
++ return skb_checksum_setup(skb) ?: xfrm_output(skb);
+ }
+
+ int xfrm4_output(struct sk_buff *skb)
+--- sle11-2009-10-16.orig/scripts/Makefile.build 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/scripts/Makefile.build 2009-08-26 11:52:33.000000000 +0200
+@@ -73,6 +73,20 @@ ifndef obj
+ $(warning kbuild: Makefile.build is included improperly)
+ endif
+
++ifeq ($(CONFIG_XEN),y)
++$(objtree)/scripts/Makefile.xen: $(srctree)/scripts/Makefile.xen.awk $(srctree)/scripts/Makefile.build
++ @echo ' Updating $@'
++ $(if $(shell echo a | $(AWK) '{ print gensub(/a/, "AA", "g"); }'),\
++ ,$(error 'Your awk program does not define gensub. Use gawk or another awk with gensub'))
++ @$(AWK) -f $< $(filter-out $<,$^) >$@
++
++xen-src-single-used-m := $(patsubst $(srctree)/%,%,$(wildcard $(addprefix $(srctree)/,$(single-used-m:.o=-xen.c))))
++xen-single-used-m := $(xen-src-single-used-m:-xen.c=.o)
++single-used-m := $(filter-out $(xen-single-used-m),$(single-used-m))
++
++-include $(objtree)/scripts/Makefile.xen
++endif
++
+ # ===========================================================================
+
+ ifneq ($(strip $(lib-y) $(lib-m) $(lib-n) $(lib-)),)
+--- sle11-2009-10-16.orig/scripts/Makefile.lib 2009-10-16 14:48:16.000000000 +0200
++++ sle11-2009-10-16/scripts/Makefile.lib 2009-08-26 11:52:33.000000000 +0200
+@@ -17,6 +17,12 @@ obj-m := $(filter-out $(obj-y),$(obj-m))
+
+ lib-y := $(filter-out $(obj-y), $(sort $(lib-y) $(lib-m)))
+
++# Remove objects forcibly disabled
++
++obj-y := $(filter-out $(disabled-obj-y),$(obj-y))
++obj-m := $(filter-out $(disabled-obj-y),$(obj-m))
++lib-y := $(filter-out $(disabled-obj-y),$(lib-y))
++
+
+ # Handle objects in subdirs
+ # ---------------------------------------------------------------------------
--- /dev/null
+Subject: xen3 include-xen-interface
+From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 728:832aac894efd)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+Index: head-2008-11-25/include/xen/interface/COPYING
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/COPYING 2007-06-12 13:14:19.000000000 +0200
+@@ -0,0 +1,38 @@
++XEN NOTICE
++==========
++
++This copyright applies to all files within this subdirectory and its
++subdirectories:
++ include/public/*.h
++ include/public/hvm/*.h
++ include/public/io/*.h
++
++The intention is that these files can be freely copied into the source
++tree of an operating system when porting that OS to run on Xen. Doing
++so does *not* cause the OS to become subject to the terms of the GPL.
++
++All other files in the Xen source distribution are covered by version
++2 of the GNU General Public License except where explicitly stated
++otherwise within individual source files.
++
++ -- Keir Fraser (on behalf of the Xen team)
++
++=====================================================================
++
++Permission is hereby granted, free of charge, to any person obtaining a copy
++of this software and associated documentation files (the "Software"), to
++deal in the Software without restriction, including without limitation the
++rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++sell copies of the Software, and to permit persons to whom the Software is
++furnished to do so, subject to the following conditions:
++
++The above copyright notice and this permission notice shall be included in
++all copies or substantial portions of the Software.
++
++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++DEALINGS IN THE SOFTWARE.
+Index: head-2008-11-25/include/xen/interface/arch-x86/cpuid.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/arch-x86/cpuid.h 2008-01-21 11:15:27.000000000 +0100
+@@ -0,0 +1,68 @@
++/******************************************************************************
++ * arch-x86/cpuid.h
++ *
++ * CPUID interface to Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2007 Citrix Systems, Inc.
++ *
++ * Authors:
++ * Keir Fraser <keir.fraser@citrix.com>
++ */
++
++#ifndef __XEN_PUBLIC_ARCH_X86_CPUID_H__
++#define __XEN_PUBLIC_ARCH_X86_CPUID_H__
++
++/* Xen identification leaves start at 0x40000000. */
++#define XEN_CPUID_FIRST_LEAF 0x40000000
++#define XEN_CPUID_LEAF(i) (XEN_CPUID_FIRST_LEAF + (i))
++
++/*
++ * Leaf 1 (0x40000000)
++ * EAX: Largest Xen-information leaf. All leaves up to an including @EAX
++ * are supported by the Xen host.
++ * EBX-EDX: "XenVMMXenVMM" signature, allowing positive identification
++ * of a Xen host.
++ */
++#define XEN_CPUID_SIGNATURE_EBX 0x566e6558 /* "XenV" */
++#define XEN_CPUID_SIGNATURE_ECX 0x65584d4d /* "MMXe" */
++#define XEN_CPUID_SIGNATURE_EDX 0x4d4d566e /* "nVMM" */
++
++/*
++ * Leaf 2 (0x40000001)
++ * EAX[31:16]: Xen major version.
++ * EAX[15: 0]: Xen minor version.
++ * EBX-EDX: Reserved (currently all zeroes).
++ */
++
++/*
++ * Leaf 3 (0x40000002)
++ * EAX: Number of hypercall transfer pages. This register is always guaranteed
++ * to specify one hypercall page.
++ * EBX: Base address of Xen-specific MSRs.
++ * ECX: Features 1. Unused bits are set to zero.
++ * EDX: Features 2. Unused bits are set to zero.
++ */
++
++/* Does the host support MMU_PT_UPDATE_PRESERVE_AD for this guest? */
++#define _XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD 0
++#define XEN_CPUID_FEAT1_MMU_PT_UPDATE_PRESERVE_AD (1u<<0)
++
++#endif /* __XEN_PUBLIC_ARCH_X86_CPUID_H__ */
+Index: head-2008-11-25/include/xen/interface/arch-x86/hvm/save.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/arch-x86/hvm/save.h 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,440 @@
++/*
++ * Structure definitions for HVM state that is held by Xen and must
++ * be saved along with the domain's memory and device-model state.
++ *
++ * Copyright (c) 2007 XenSource Ltd.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_SAVE_X86_H__
++#define __XEN_PUBLIC_HVM_SAVE_X86_H__
++
++/*
++ * Save/restore header: general info about the save file.
++ */
++
++#define HVM_FILE_MAGIC 0x54381286
++#define HVM_FILE_VERSION 0x00000001
++
++struct hvm_save_header {
++ uint32_t magic; /* Must be HVM_FILE_MAGIC */
++ uint32_t version; /* File format version */
++ uint64_t changeset; /* Version of Xen that saved this file */
++ uint32_t cpuid; /* CPUID[0x01][%eax] on the saving machine */
++ uint32_t pad0;
++};
++
++DECLARE_HVM_SAVE_TYPE(HEADER, 1, struct hvm_save_header);
++
++
++/*
++ * Processor
++ */
++
++struct hvm_hw_cpu {
++ uint8_t fpu_regs[512];
++
++ uint64_t rax;
++ uint64_t rbx;
++ uint64_t rcx;
++ uint64_t rdx;
++ uint64_t rbp;
++ uint64_t rsi;
++ uint64_t rdi;
++ uint64_t rsp;
++ uint64_t r8;
++ uint64_t r9;
++ uint64_t r10;
++ uint64_t r11;
++ uint64_t r12;
++ uint64_t r13;
++ uint64_t r14;
++ uint64_t r15;
++
++ uint64_t rip;
++ uint64_t rflags;
++
++ uint64_t cr0;
++ uint64_t cr2;
++ uint64_t cr3;
++ uint64_t cr4;
++
++ uint64_t dr0;
++ uint64_t dr1;
++ uint64_t dr2;
++ uint64_t dr3;
++ uint64_t dr6;
++ uint64_t dr7;
++
++ uint32_t cs_sel;
++ uint32_t ds_sel;
++ uint32_t es_sel;
++ uint32_t fs_sel;
++ uint32_t gs_sel;
++ uint32_t ss_sel;
++ uint32_t tr_sel;
++ uint32_t ldtr_sel;
++
++ uint32_t cs_limit;
++ uint32_t ds_limit;
++ uint32_t es_limit;
++ uint32_t fs_limit;
++ uint32_t gs_limit;
++ uint32_t ss_limit;
++ uint32_t tr_limit;
++ uint32_t ldtr_limit;
++ uint32_t idtr_limit;
++ uint32_t gdtr_limit;
++
++ uint64_t cs_base;
++ uint64_t ds_base;
++ uint64_t es_base;
++ uint64_t fs_base;
++ uint64_t gs_base;
++ uint64_t ss_base;
++ uint64_t tr_base;
++ uint64_t ldtr_base;
++ uint64_t idtr_base;
++ uint64_t gdtr_base;
++
++ uint32_t cs_arbytes;
++ uint32_t ds_arbytes;
++ uint32_t es_arbytes;
++ uint32_t fs_arbytes;
++ uint32_t gs_arbytes;
++ uint32_t ss_arbytes;
++ uint32_t tr_arbytes;
++ uint32_t ldtr_arbytes;
++
++ uint32_t sysenter_cs;
++ uint32_t padding0;
++
++ uint64_t sysenter_esp;
++ uint64_t sysenter_eip;
++
++ /* msr for em64t */
++ uint64_t shadow_gs;
++
++ /* msr content saved/restored. */
++ uint64_t msr_flags;
++ uint64_t msr_lstar;
++ uint64_t msr_star;
++ uint64_t msr_cstar;
++ uint64_t msr_syscall_mask;
++ uint64_t msr_efer;
++
++ /* guest's idea of what rdtsc() would return */
++ uint64_t tsc;
++
++ /* pending event, if any */
++ union {
++ uint32_t pending_event;
++ struct {
++ uint8_t pending_vector:8;
++ uint8_t pending_type:3;
++ uint8_t pending_error_valid:1;
++ uint32_t pending_reserved:19;
++ uint8_t pending_valid:1;
++ };
++ };
++ /* error code for pending event */
++ uint32_t error_code;
++};
++
++DECLARE_HVM_SAVE_TYPE(CPU, 2, struct hvm_hw_cpu);
++
++
++/*
++ * PIC
++ */
++
++struct hvm_hw_vpic {
++ /* IR line bitmasks. */
++ uint8_t irr;
++ uint8_t imr;
++ uint8_t isr;
++
++ /* Line IRx maps to IRQ irq_base+x */
++ uint8_t irq_base;
++
++ /*
++ * Where are we in ICW2-4 initialisation (0 means no init in progress)?
++ * Bits 0-1 (=x): Next write at A=1 sets ICW(x+1).
++ * Bit 2: ICW1.IC4 (1 == ICW4 included in init sequence)
++ * Bit 3: ICW1.SNGL (0 == ICW3 included in init sequence)
++ */
++ uint8_t init_state:4;
++
++ /* IR line with highest priority. */
++ uint8_t priority_add:4;
++
++ /* Reads from A=0 obtain ISR or IRR? */
++ uint8_t readsel_isr:1;
++
++ /* Reads perform a polling read? */
++ uint8_t poll:1;
++
++ /* Automatically clear IRQs from the ISR during INTA? */
++ uint8_t auto_eoi:1;
++
++ /* Automatically rotate IRQ priorities during AEOI? */
++ uint8_t rotate_on_auto_eoi:1;
++
++ /* Exclude slave inputs when considering in-service IRQs? */
++ uint8_t special_fully_nested_mode:1;
++
++ /* Special mask mode excludes masked IRs from AEOI and priority checks. */
++ uint8_t special_mask_mode:1;
++
++ /* Is this a master PIC or slave PIC? (NB. This is not programmable.) */
++ uint8_t is_master:1;
++
++ /* Edge/trigger selection. */
++ uint8_t elcr;
++
++ /* Virtual INT output. */
++ uint8_t int_output;
++};
++
++DECLARE_HVM_SAVE_TYPE(PIC, 3, struct hvm_hw_vpic);
++
++
++/*
++ * IO-APIC
++ */
++
++#ifdef __ia64__
++#define VIOAPIC_IS_IOSAPIC 1
++#define VIOAPIC_NUM_PINS 24
++#else
++#define VIOAPIC_NUM_PINS 48 /* 16 ISA IRQs, 32 non-legacy PCI IRQS. */
++#endif
++
++struct hvm_hw_vioapic {
++ uint64_t base_address;
++ uint32_t ioregsel;
++ uint32_t id;
++ union vioapic_redir_entry
++ {
++ uint64_t bits;
++ struct {
++ uint8_t vector;
++ uint8_t delivery_mode:3;
++ uint8_t dest_mode:1;
++ uint8_t delivery_status:1;
++ uint8_t polarity:1;
++ uint8_t remote_irr:1;
++ uint8_t trig_mode:1;
++ uint8_t mask:1;
++ uint8_t reserve:7;
++#if !VIOAPIC_IS_IOSAPIC
++ uint8_t reserved[4];
++ uint8_t dest_id;
++#else
++ uint8_t reserved[3];
++ uint16_t dest_id;
++#endif
++ } fields;
++ } redirtbl[VIOAPIC_NUM_PINS];
++};
++
++DECLARE_HVM_SAVE_TYPE(IOAPIC, 4, struct hvm_hw_vioapic);
++
++
++/*
++ * LAPIC
++ */
++
++struct hvm_hw_lapic {
++ uint64_t apic_base_msr;
++ uint32_t disabled; /* VLAPIC_xx_DISABLED */
++ uint32_t timer_divisor;
++};
++
++DECLARE_HVM_SAVE_TYPE(LAPIC, 5, struct hvm_hw_lapic);
++
++struct hvm_hw_lapic_regs {
++ uint8_t data[1024];
++};
++
++DECLARE_HVM_SAVE_TYPE(LAPIC_REGS, 6, struct hvm_hw_lapic_regs);
++
++
++/*
++ * IRQs
++ */
++
++struct hvm_hw_pci_irqs {
++ /*
++ * Virtual interrupt wires for a single PCI bus.
++ * Indexed by: device*4 + INTx#.
++ */
++ union {
++ DECLARE_BITMAP(i, 32*4);
++ uint64_t pad[2];
++ };
++};
++
++DECLARE_HVM_SAVE_TYPE(PCI_IRQ, 7, struct hvm_hw_pci_irqs);
++
++struct hvm_hw_isa_irqs {
++ /*
++ * Virtual interrupt wires for ISA devices.
++ * Indexed by ISA IRQ (assumes no ISA-device IRQ sharing).
++ */
++ union {
++ DECLARE_BITMAP(i, 16);
++ uint64_t pad[1];
++ };
++};
++
++DECLARE_HVM_SAVE_TYPE(ISA_IRQ, 8, struct hvm_hw_isa_irqs);
++
++struct hvm_hw_pci_link {
++ /*
++ * PCI-ISA interrupt router.
++ * Each PCI <device:INTx#> is 'wire-ORed' into one of four links using
++ * the traditional 'barber's pole' mapping ((device + INTx#) & 3).
++ * The router provides a programmable mapping from each link to a GSI.
++ */
++ uint8_t route[4];
++ uint8_t pad0[4];
++};
++
++DECLARE_HVM_SAVE_TYPE(PCI_LINK, 9, struct hvm_hw_pci_link);
++
++/*
++ * PIT
++ */
++
++struct hvm_hw_pit {
++ struct hvm_hw_pit_channel {
++ uint32_t count; /* can be 65536 */
++ uint16_t latched_count;
++ uint8_t count_latched;
++ uint8_t status_latched;
++ uint8_t status;
++ uint8_t read_state;
++ uint8_t write_state;
++ uint8_t write_latch;
++ uint8_t rw_mode;
++ uint8_t mode;
++ uint8_t bcd; /* not supported */
++ uint8_t gate; /* timer start */
++ } channels[3]; /* 3 x 16 bytes */
++ uint32_t speaker_data_on;
++ uint32_t pad0;
++};
++
++DECLARE_HVM_SAVE_TYPE(PIT, 10, struct hvm_hw_pit);
++
++
++/*
++ * RTC
++ */
++
++#define RTC_CMOS_SIZE 14
++struct hvm_hw_rtc {
++ /* CMOS bytes */
++ uint8_t cmos_data[RTC_CMOS_SIZE];
++ /* Index register for 2-part operations */
++ uint8_t cmos_index;
++ uint8_t pad0;
++};
++
++DECLARE_HVM_SAVE_TYPE(RTC, 11, struct hvm_hw_rtc);
++
++
++/*
++ * HPET
++ */
++
++#define HPET_TIMER_NUM 3 /* 3 timers supported now */
++struct hvm_hw_hpet {
++ /* Memory-mapped, software visible registers */
++ uint64_t capability; /* capabilities */
++ uint64_t res0; /* reserved */
++ uint64_t config; /* configuration */
++ uint64_t res1; /* reserved */
++ uint64_t isr; /* interrupt status reg */
++ uint64_t res2[25]; /* reserved */
++ uint64_t mc64; /* main counter */
++ uint64_t res3; /* reserved */
++ struct { /* timers */
++ uint64_t config; /* configuration/cap */
++ uint64_t cmp; /* comparator */
++ uint64_t fsb; /* FSB route, not supported now */
++ uint64_t res4; /* reserved */
++ } timers[HPET_TIMER_NUM];
++ uint64_t res5[4*(24-HPET_TIMER_NUM)]; /* reserved, up to 0x3ff */
++
++ /* Hidden register state */
++ uint64_t period[HPET_TIMER_NUM]; /* Last value written to comparator */
++};
++
++DECLARE_HVM_SAVE_TYPE(HPET, 12, struct hvm_hw_hpet);
++
++
++/*
++ * PM timer
++ */
++
++struct hvm_hw_pmtimer {
++ uint32_t tmr_val; /* PM_TMR_BLK.TMR_VAL: 32bit free-running counter */
++ uint16_t pm1a_sts; /* PM1a_EVT_BLK.PM1a_STS: status register */
++ uint16_t pm1a_en; /* PM1a_EVT_BLK.PM1a_EN: enable register */
++};
++
++DECLARE_HVM_SAVE_TYPE(PMTIMER, 13, struct hvm_hw_pmtimer);
++
++/*
++ * MTRR MSRs
++ */
++
++struct hvm_hw_mtrr {
++#define MTRR_VCNT 8
++#define NUM_FIXED_MSR 11
++ uint64_t msr_pat_cr;
++ /* mtrr physbase & physmask msr pair*/
++ uint64_t msr_mtrr_var[MTRR_VCNT*2];
++ uint64_t msr_mtrr_fixed[NUM_FIXED_MSR];
++ uint64_t msr_mtrr_cap;
++ uint64_t msr_mtrr_def_type;
++};
++
++DECLARE_HVM_SAVE_TYPE(MTRR, 14, struct hvm_hw_mtrr);
++
++/*
++ * Viridian hypervisor context.
++ */
++
++struct hvm_viridian_context {
++ uint64_t hypercall_gpa;
++ uint64_t guest_os_id;
++};
++
++DECLARE_HVM_SAVE_TYPE(VIRIDIAN, 15, struct hvm_viridian_context);
++
++/*
++ * Largest type-code in use
++ */
++#define HVM_SAVE_CODE_MAX 15
++
++#endif /* __XEN_PUBLIC_HVM_SAVE_X86_H__ */
+Index: head-2008-11-25/include/xen/interface/arch-x86/xen-mca.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/arch-x86/xen-mca.h 2008-09-01 12:07:31.000000000 +0200
+@@ -0,0 +1,279 @@
++/******************************************************************************
++ * arch-x86/mca.h
++ *
++ * Contributed by Advanced Micro Devices, Inc.
++ * Author: Christoph Egger <Christoph.Egger@amd.com>
++ *
++ * Guest OS machine check interface to x86 Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++/* Full MCA functionality has the following Usecases from the guest side:
++ *
++ * Must have's:
++ * 1. Dom0 and DomU register machine check trap callback handlers
++ * (already done via "set_trap_table" hypercall)
++ * 2. Dom0 registers machine check event callback handler
++ * (doable via EVTCHNOP_bind_virq)
++ * 3. Dom0 and DomU fetches machine check data
++ * 4. Dom0 wants Xen to notify a DomU
++ * 5. Dom0 gets DomU ID from physical address
++ * 6. Dom0 wants Xen to kill DomU (already done for "xm destroy")
++ *
++ * Nice to have's:
++ * 7. Dom0 wants Xen to deactivate a physical CPU
++ * This is better done as separate task, physical CPU hotplugging,
++ * and hypercall(s) should be sysctl's
++ * 8. Page migration proposed from Xen NUMA work, where Dom0 can tell Xen to
++ * move a DomU (or Dom0 itself) away from a malicious page
++ * producing correctable errors.
++ * 9. offlining physical page:
++ * Xen free's and never re-uses a certain physical page.
++ * 10. Testfacility: Allow Dom0 to write values into machine check MSR's
++ * and tell Xen to trigger a machine check
++ */
++
++#ifndef __XEN_PUBLIC_ARCH_X86_MCA_H__
++#define __XEN_PUBLIC_ARCH_X86_MCA_H__
++
++/* Hypercall */
++#define __HYPERVISOR_mca __HYPERVISOR_arch_0
++
++#define XEN_MCA_INTERFACE_VERSION 0x03000001
++
++/* IN: Dom0 calls hypercall from MC event handler. */
++#define XEN_MC_CORRECTABLE 0x0
++/* IN: Dom0/DomU calls hypercall from MC trap handler. */
++#define XEN_MC_TRAP 0x1
++/* XEN_MC_CORRECTABLE and XEN_MC_TRAP are mutually exclusive. */
++
++/* OUT: All is ok */
++#define XEN_MC_OK 0x0
++/* OUT: Domain could not fetch data. */
++#define XEN_MC_FETCHFAILED 0x1
++/* OUT: There was no machine check data to fetch. */
++#define XEN_MC_NODATA 0x2
++/* OUT: Between notification time and this hypercall an other
++ * (most likely) correctable error happened. The fetched data,
++ * does not match the original machine check data. */
++#define XEN_MC_NOMATCH 0x4
++
++/* OUT: DomU did not register MC NMI handler. Try something else. */
++#define XEN_MC_CANNOTHANDLE 0x8
++/* OUT: Notifying DomU failed. Retry later or try something else. */
++#define XEN_MC_NOTDELIVERED 0x10
++/* Note, XEN_MC_CANNOTHANDLE and XEN_MC_NOTDELIVERED are mutually exclusive. */
++
++
++#ifndef __ASSEMBLY__
++
++#define VIRQ_MCA VIRQ_ARCH_0 /* G. (DOM0) Machine Check Architecture */
++
++/*
++ * Machine Check Architecure:
++ * structs are read-only and used to report all kinds of
++ * correctable and uncorrectable errors detected by the HW.
++ * Dom0 and DomU: register a handler to get notified.
++ * Dom0 only: Correctable errors are reported via VIRQ_MCA
++ * Dom0 and DomU: Uncorrectable errors are reported via nmi handlers
++ */
++#define MC_TYPE_GLOBAL 0
++#define MC_TYPE_BANK 1
++#define MC_TYPE_EXTENDED 2
++
++struct mcinfo_common {
++ uint16_t type; /* structure type */
++ uint16_t size; /* size of this struct in bytes */
++};
++
++
++#define MC_FLAG_CORRECTABLE (1 << 0)
++#define MC_FLAG_UNCORRECTABLE (1 << 1)
++
++/* contains global x86 mc information */
++struct mcinfo_global {
++ struct mcinfo_common common;
++
++ /* running domain at the time in error (most likely the impacted one) */
++ uint16_t mc_domid;
++ uint32_t mc_socketid; /* physical socket of the physical core */
++ uint16_t mc_coreid; /* physical impacted core */
++ uint16_t mc_core_threadid; /* core thread of physical core */
++ uint16_t mc_vcpuid; /* virtual cpu scheduled for mc_domid */
++ uint64_t mc_gstatus; /* global status */
++ uint32_t mc_flags;
++};
++
++/* contains bank local x86 mc information */
++struct mcinfo_bank {
++ struct mcinfo_common common;
++
++ uint16_t mc_bank; /* bank nr */
++ uint16_t mc_domid; /* Usecase 5: domain referenced by mc_addr on dom0
++ * and if mc_addr is valid. Never valid on DomU. */
++ uint64_t mc_status; /* bank status */
++ uint64_t mc_addr; /* bank address, only valid
++ * if addr bit is set in mc_status */
++ uint64_t mc_misc;
++};
++
++
++struct mcinfo_msr {
++ uint64_t reg; /* MSR */
++ uint64_t value; /* MSR value */
++};
++
++/* contains mc information from other
++ * or additional mc MSRs */
++struct mcinfo_extended {
++ struct mcinfo_common common;
++
++ /* You can fill up to five registers.
++ * If you need more, then use this structure
++ * multiple times. */
++
++ uint32_t mc_msrs; /* Number of msr with valid values. */
++ struct mcinfo_msr mc_msr[5];
++};
++
++#define MCINFO_HYPERCALLSIZE 1024
++#define MCINFO_MAXSIZE 768
++
++struct mc_info {
++ /* Number of mcinfo_* entries in mi_data */
++ uint32_t mi_nentries;
++
++ uint8_t mi_data[MCINFO_MAXSIZE - sizeof(uint32_t)];
++};
++typedef struct mc_info mc_info_t;
++
++
++
++/*
++ * OS's should use these instead of writing their own lookup function
++ * each with its own bugs and drawbacks.
++ * We use macros instead of static inline functions to allow guests
++ * to include this header in assembly files (*.S).
++ */
++/* Prototype:
++ * uint32_t x86_mcinfo_nentries(struct mc_info *mi);
++ */
++#define x86_mcinfo_nentries(_mi) \
++ (_mi)->mi_nentries
++/* Prototype:
++ * struct mcinfo_common *x86_mcinfo_first(struct mc_info *mi);
++ */
++#define x86_mcinfo_first(_mi) \
++ (struct mcinfo_common *)((_mi)->mi_data)
++/* Prototype:
++ * struct mcinfo_common *x86_mcinfo_next(struct mcinfo_common *mic);
++ */
++#define x86_mcinfo_next(_mic) \
++ (struct mcinfo_common *)((uint8_t *)(_mic) + (_mic)->size)
++
++/* Prototype:
++ * void x86_mcinfo_lookup(void *ret, struct mc_info *mi, uint16_t type);
++ */
++#define x86_mcinfo_lookup(_ret, _mi, _type) \
++ do { \
++ uint32_t found, i; \
++ struct mcinfo_common *_mic; \
++ \
++ found = 0; \
++ (_ret) = NULL; \
++ if (_mi == NULL) break; \
++ _mic = x86_mcinfo_first(_mi); \
++ for (i = 0; i < x86_mcinfo_nentries(_mi); i++) { \
++ if (_mic->type == (_type)) { \
++ found = 1; \
++ break; \
++ } \
++ _mic = x86_mcinfo_next(_mic); \
++ } \
++ (_ret) = found ? _mic : NULL; \
++ } while (0)
++
++
++/* Usecase 1
++ * Register machine check trap callback handler
++ * (already done via "set_trap_table" hypercall)
++ */
++
++/* Usecase 2
++ * Dom0 registers machine check event callback handler
++ * done by EVTCHNOP_bind_virq
++ */
++
++/* Usecase 3
++ * Fetch machine check data from hypervisor.
++ * Note, this hypercall is special, because both Dom0 and DomU must use this.
++ */
++#define XEN_MC_fetch 1
++struct xen_mc_fetch {
++ /* IN/OUT variables. */
++ uint32_t flags;
++
++/* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */
++/* OUT: XEN_MC_OK, XEN_MC_FETCHFAILED, XEN_MC_NODATA, XEN_MC_NOMATCH */
++
++ /* OUT variables. */
++ uint32_t fetch_idx; /* only useful for Dom0 for the notify hypercall */
++ struct mc_info mc_info;
++};
++typedef struct xen_mc_fetch xen_mc_fetch_t;
++DEFINE_XEN_GUEST_HANDLE(xen_mc_fetch_t);
++
++
++/* Usecase 4
++ * This tells the hypervisor to notify a DomU about the machine check error
++ */
++#define XEN_MC_notifydomain 2
++struct xen_mc_notifydomain {
++ /* IN variables. */
++ uint16_t mc_domid; /* The unprivileged domain to notify. */
++ uint16_t mc_vcpuid; /* The vcpu in mc_domid to notify.
++ * Usually echo'd value from the fetch hypercall. */
++ uint32_t fetch_idx; /* echo'd value from the fetch hypercall. */
++
++ /* IN/OUT variables. */
++ uint32_t flags;
++
++/* IN: XEN_MC_CORRECTABLE, XEN_MC_TRAP */
++/* OUT: XEN_MC_OK, XEN_MC_CANNOTHANDLE, XEN_MC_NOTDELIVERED, XEN_MC_NOMATCH */
++};
++typedef struct xen_mc_notifydomain xen_mc_notifydomain_t;
++DEFINE_XEN_GUEST_HANDLE(xen_mc_notifydomain_t);
++
++
++struct xen_mc {
++ uint32_t cmd;
++ uint32_t interface_version; /* XEN_MCA_INTERFACE_VERSION */
++ union {
++ struct xen_mc_fetch mc_fetch;
++ struct xen_mc_notifydomain mc_notifydomain;
++ uint8_t pad[MCINFO_HYPERCALLSIZE];
++ } u;
++};
++typedef struct xen_mc xen_mc_t;
++DEFINE_XEN_GUEST_HANDLE(xen_mc_t);
++
++#endif /* __ASSEMBLY__ */
++
++#endif /* __XEN_PUBLIC_ARCH_X86_MCA_H__ */
+Index: head-2008-11-25/include/xen/interface/arch-x86/xen-x86_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/arch-x86/xen-x86_32.h 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,180 @@
++/******************************************************************************
++ * xen-x86_32.h
++ *
++ * Guest OS interface to x86 32-bit Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2007, K A Fraser
++ */
++
++#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
++#define __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__
++
++/*
++ * Hypercall interface:
++ * Input: %ebx, %ecx, %edx, %esi, %edi (arguments 1-5)
++ * Output: %eax
++ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
++ * call hypercall_page + hypercall-number * 32
++ * Clobbered: Argument registers (e.g., 2-arg hypercall clobbers %ebx,%ecx)
++ */
++
++#if __XEN_INTERFACE_VERSION__ < 0x00030203
++/*
++ * Legacy hypercall interface:
++ * As above, except the entry sequence to the hypervisor is:
++ * mov $hypercall-number*32,%eax ; int $0x82
++ */
++#define TRAP_INSTR "int $0x82"
++#endif
++
++/*
++ * These flat segments are in the Xen-private section of every GDT. Since these
++ * are also present in the initial GDT, many OSes will be able to avoid
++ * installing their own GDT.
++ */
++#define FLAT_RING1_CS 0xe019 /* GDT index 259 */
++#define FLAT_RING1_DS 0xe021 /* GDT index 260 */
++#define FLAT_RING1_SS 0xe021 /* GDT index 260 */
++#define FLAT_RING3_CS 0xe02b /* GDT index 261 */
++#define FLAT_RING3_DS 0xe033 /* GDT index 262 */
++#define FLAT_RING3_SS 0xe033 /* GDT index 262 */
++
++#define FLAT_KERNEL_CS FLAT_RING1_CS
++#define FLAT_KERNEL_DS FLAT_RING1_DS
++#define FLAT_KERNEL_SS FLAT_RING1_SS
++#define FLAT_USER_CS FLAT_RING3_CS
++#define FLAT_USER_DS FLAT_RING3_DS
++#define FLAT_USER_SS FLAT_RING3_SS
++
++#define __HYPERVISOR_VIRT_START_PAE 0xF5800000
++#define __MACH2PHYS_VIRT_START_PAE 0xF5800000
++#define __MACH2PHYS_VIRT_END_PAE 0xF6800000
++#define HYPERVISOR_VIRT_START_PAE \
++ mk_unsigned_long(__HYPERVISOR_VIRT_START_PAE)
++#define MACH2PHYS_VIRT_START_PAE \
++ mk_unsigned_long(__MACH2PHYS_VIRT_START_PAE)
++#define MACH2PHYS_VIRT_END_PAE \
++ mk_unsigned_long(__MACH2PHYS_VIRT_END_PAE)
++
++/* Non-PAE bounds are obsolete. */
++#define __HYPERVISOR_VIRT_START_NONPAE 0xFC000000
++#define __MACH2PHYS_VIRT_START_NONPAE 0xFC000000
++#define __MACH2PHYS_VIRT_END_NONPAE 0xFC400000
++#define HYPERVISOR_VIRT_START_NONPAE \
++ mk_unsigned_long(__HYPERVISOR_VIRT_START_NONPAE)
++#define MACH2PHYS_VIRT_START_NONPAE \
++ mk_unsigned_long(__MACH2PHYS_VIRT_START_NONPAE)
++#define MACH2PHYS_VIRT_END_NONPAE \
++ mk_unsigned_long(__MACH2PHYS_VIRT_END_NONPAE)
++
++#define __HYPERVISOR_VIRT_START __HYPERVISOR_VIRT_START_PAE
++#define __MACH2PHYS_VIRT_START __MACH2PHYS_VIRT_START_PAE
++#define __MACH2PHYS_VIRT_END __MACH2PHYS_VIRT_END_PAE
++
++#ifndef HYPERVISOR_VIRT_START
++#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
++#endif
++
++#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
++#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
++#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>2)
++#ifndef machine_to_phys_mapping
++#define machine_to_phys_mapping ((unsigned long *)MACH2PHYS_VIRT_START)
++#endif
++
++/* 32-/64-bit invariability for control interfaces (domctl/sysctl). */
++#if defined(__XEN__) || defined(__XEN_TOOLS__)
++#undef ___DEFINE_XEN_GUEST_HANDLE
++#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
++ typedef struct { type *p; } \
++ __guest_handle_ ## name; \
++ typedef struct { union { type *p; uint64_aligned_t q; }; } \
++ __guest_handle_64_ ## name
++#undef set_xen_guest_handle
++#define set_xen_guest_handle(hnd, val) \
++ do { if ( sizeof(hnd) == 8 ) *(uint64_t *)&(hnd) = 0; \
++ (hnd).p = val; \
++ } while ( 0 )
++#define uint64_aligned_t uint64_t __attribute__((aligned(8)))
++#define __XEN_GUEST_HANDLE_64(name) __guest_handle_64_ ## name
++#define XEN_GUEST_HANDLE_64(name) __XEN_GUEST_HANDLE_64(name)
++#endif
++
++#ifndef __ASSEMBLY__
++
++struct cpu_user_regs {
++ uint32_t ebx;
++ uint32_t ecx;
++ uint32_t edx;
++ uint32_t esi;
++ uint32_t edi;
++ uint32_t ebp;
++ uint32_t eax;
++ uint16_t error_code; /* private */
++ uint16_t entry_vector; /* private */
++ uint32_t eip;
++ uint16_t cs;
++ uint8_t saved_upcall_mask;
++ uint8_t _pad0;
++ uint32_t eflags; /* eflags.IF == !saved_upcall_mask */
++ uint32_t esp;
++ uint16_t ss, _pad1;
++ uint16_t es, _pad2;
++ uint16_t ds, _pad3;
++ uint16_t fs, _pad4;
++ uint16_t gs, _pad5;
++};
++typedef struct cpu_user_regs cpu_user_regs_t;
++DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
++
++/*
++ * Page-directory addresses above 4GB do not fit into architectural %cr3.
++ * When accessing %cr3, or equivalent field in vcpu_guest_context, guests
++ * must use the following accessor macros to pack/unpack valid MFNs.
++ */
++#define xen_pfn_to_cr3(pfn) (((unsigned)(pfn) << 12) | ((unsigned)(pfn) >> 20))
++#define xen_cr3_to_pfn(cr3) (((unsigned)(cr3) >> 12) | ((unsigned)(cr3) << 20))
++
++struct arch_vcpu_info {
++ unsigned long cr2;
++ unsigned long pad[5]; /* sizeof(vcpu_info_t) == 64 */
++};
++typedef struct arch_vcpu_info arch_vcpu_info_t;
++
++struct xen_callback {
++ unsigned long cs;
++ unsigned long eip;
++};
++typedef struct xen_callback xen_callback_t;
++
++#endif /* !__ASSEMBLY__ */
++
++#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_32_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/arch-x86/xen-x86_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/arch-x86/xen-x86_64.h 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,212 @@
++/******************************************************************************
++ * xen-x86_64.h
++ *
++ * Guest OS interface to x86 64-bit Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2006, K A Fraser
++ */
++
++#ifndef __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
++#define __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__
++
++/*
++ * Hypercall interface:
++ * Input: %rdi, %rsi, %rdx, %r10, %r8 (arguments 1-5)
++ * Output: %rax
++ * Access is via hypercall page (set up by guest loader or via a Xen MSR):
++ * call hypercall_page + hypercall-number * 32
++ * Clobbered: argument registers (e.g., 2-arg hypercall clobbers %rdi,%rsi)
++ */
++
++#if __XEN_INTERFACE_VERSION__ < 0x00030203
++/*
++ * Legacy hypercall interface:
++ * As above, except the entry sequence to the hypervisor is:
++ * mov $hypercall-number*32,%eax ; syscall
++ * Clobbered: %rcx, %r11, argument registers (as above)
++ */
++#define TRAP_INSTR "syscall"
++#endif
++
++/*
++ * 64-bit segment selectors
++ * These flat segments are in the Xen-private section of every GDT. Since these
++ * are also present in the initial GDT, many OSes will be able to avoid
++ * installing their own GDT.
++ */
++
++#define FLAT_RING3_CS32 0xe023 /* GDT index 260 */
++#define FLAT_RING3_CS64 0xe033 /* GDT index 261 */
++#define FLAT_RING3_DS32 0xe02b /* GDT index 262 */
++#define FLAT_RING3_DS64 0x0000 /* NULL selector */
++#define FLAT_RING3_SS32 0xe02b /* GDT index 262 */
++#define FLAT_RING3_SS64 0xe02b /* GDT index 262 */
++
++#define FLAT_KERNEL_DS64 FLAT_RING3_DS64
++#define FLAT_KERNEL_DS32 FLAT_RING3_DS32
++#define FLAT_KERNEL_DS FLAT_KERNEL_DS64
++#define FLAT_KERNEL_CS64 FLAT_RING3_CS64
++#define FLAT_KERNEL_CS32 FLAT_RING3_CS32
++#define FLAT_KERNEL_CS FLAT_KERNEL_CS64
++#define FLAT_KERNEL_SS64 FLAT_RING3_SS64
++#define FLAT_KERNEL_SS32 FLAT_RING3_SS32
++#define FLAT_KERNEL_SS FLAT_KERNEL_SS64
++
++#define FLAT_USER_DS64 FLAT_RING3_DS64
++#define FLAT_USER_DS32 FLAT_RING3_DS32
++#define FLAT_USER_DS FLAT_USER_DS64
++#define FLAT_USER_CS64 FLAT_RING3_CS64
++#define FLAT_USER_CS32 FLAT_RING3_CS32
++#define FLAT_USER_CS FLAT_USER_CS64
++#define FLAT_USER_SS64 FLAT_RING3_SS64
++#define FLAT_USER_SS32 FLAT_RING3_SS32
++#define FLAT_USER_SS FLAT_USER_SS64
++
++#define __HYPERVISOR_VIRT_START 0xFFFF800000000000
++#define __HYPERVISOR_VIRT_END 0xFFFF880000000000
++#define __MACH2PHYS_VIRT_START 0xFFFF800000000000
++#define __MACH2PHYS_VIRT_END 0xFFFF804000000000
++
++#ifndef HYPERVISOR_VIRT_START
++#define HYPERVISOR_VIRT_START mk_unsigned_long(__HYPERVISOR_VIRT_START)
++#define HYPERVISOR_VIRT_END mk_unsigned_long(__HYPERVISOR_VIRT_END)
++#endif
++
++#define MACH2PHYS_VIRT_START mk_unsigned_long(__MACH2PHYS_VIRT_START)
++#define MACH2PHYS_VIRT_END mk_unsigned_long(__MACH2PHYS_VIRT_END)
++#define MACH2PHYS_NR_ENTRIES ((MACH2PHYS_VIRT_END-MACH2PHYS_VIRT_START)>>3)
++#ifndef machine_to_phys_mapping
++#define machine_to_phys_mapping ((unsigned long *)HYPERVISOR_VIRT_START)
++#endif
++
++/*
++ * int HYPERVISOR_set_segment_base(unsigned int which, unsigned long base)
++ * @which == SEGBASE_* ; @base == 64-bit base address
++ * Returns 0 on success.
++ */
++#define SEGBASE_FS 0
++#define SEGBASE_GS_USER 1
++#define SEGBASE_GS_KERNEL 2
++#define SEGBASE_GS_USER_SEL 3 /* Set user %gs specified in base[15:0] */
++
++/*
++ * int HYPERVISOR_iret(void)
++ * All arguments are on the kernel stack, in the following format.
++ * Never returns if successful. Current kernel context is lost.
++ * The saved CS is mapped as follows:
++ * RING0 -> RING3 kernel mode.
++ * RING1 -> RING3 kernel mode.
++ * RING2 -> RING3 kernel mode.
++ * RING3 -> RING3 user mode.
++ * However RING0 indicates that the guest kernel should return to iteself
++ * directly with
++ * orb $3,1*8(%rsp)
++ * iretq
++ * If flags contains VGCF_in_syscall:
++ * Restore RAX, RIP, RFLAGS, RSP.
++ * Discard R11, RCX, CS, SS.
++ * Otherwise:
++ * Restore RAX, R11, RCX, CS:RIP, RFLAGS, SS:RSP.
++ * All other registers are saved on hypercall entry and restored to user.
++ */
++/* Guest exited in SYSCALL context? Return to guest with SYSRET? */
++#define _VGCF_in_syscall 8
++#define VGCF_in_syscall (1<<_VGCF_in_syscall)
++#define VGCF_IN_SYSCALL VGCF_in_syscall
++
++#ifndef __ASSEMBLY__
++
++struct iret_context {
++ /* Top of stack (%rsp at point of hypercall). */
++ uint64_t rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
++ /* Bottom of iret stack frame. */
++};
++
++#if defined(__GNUC__) && !defined(__STRICT_ANSI__)
++/* Anonymous union includes both 32- and 64-bit names (e.g., eax/rax). */
++#define __DECL_REG(name) union { \
++ uint64_t r ## name, e ## name; \
++ uint32_t _e ## name; \
++}
++#else
++/* Non-gcc sources must always use the proper 64-bit name (e.g., rax). */
++#define __DECL_REG(name) uint64_t r ## name
++#endif
++
++struct cpu_user_regs {
++ uint64_t r15;
++ uint64_t r14;
++ uint64_t r13;
++ uint64_t r12;
++ __DECL_REG(bp);
++ __DECL_REG(bx);
++ uint64_t r11;
++ uint64_t r10;
++ uint64_t r9;
++ uint64_t r8;
++ __DECL_REG(ax);
++ __DECL_REG(cx);
++ __DECL_REG(dx);
++ __DECL_REG(si);
++ __DECL_REG(di);
++ uint32_t error_code; /* private */
++ uint32_t entry_vector; /* private */
++ __DECL_REG(ip);
++ uint16_t cs, _pad0[1];
++ uint8_t saved_upcall_mask;
++ uint8_t _pad1[3];
++ __DECL_REG(flags); /* rflags.IF == !saved_upcall_mask */
++ __DECL_REG(sp);
++ uint16_t ss, _pad2[3];
++ uint16_t es, _pad3[3];
++ uint16_t ds, _pad4[3];
++ uint16_t fs, _pad5[3]; /* Non-zero => takes precedence over fs_base. */
++ uint16_t gs, _pad6[3]; /* Non-zero => takes precedence over gs_base_usr. */
++};
++typedef struct cpu_user_regs cpu_user_regs_t;
++DEFINE_XEN_GUEST_HANDLE(cpu_user_regs_t);
++
++#undef __DECL_REG
++
++#define xen_pfn_to_cr3(pfn) ((unsigned long)(pfn) << 12)
++#define xen_cr3_to_pfn(cr3) ((unsigned long)(cr3) >> 12)
++
++struct arch_vcpu_info {
++ unsigned long cr2;
++ unsigned long pad; /* sizeof(vcpu_info_t) == 64 */
++};
++typedef struct arch_vcpu_info arch_vcpu_info_t;
++
++typedef unsigned long xen_callback_t;
++
++#endif /* !__ASSEMBLY__ */
++
++#endif /* __XEN_PUBLIC_ARCH_X86_XEN_X86_64_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/arch-x86/xen.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/arch-x86/xen.h 2008-09-01 12:07:31.000000000 +0200
+@@ -0,0 +1,204 @@
++/******************************************************************************
++ * arch-x86/xen.h
++ *
++ * Guest OS interface to x86 Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2006, K A Fraser
++ */
++
++#include "../xen.h"
++
++#ifndef __XEN_PUBLIC_ARCH_X86_XEN_H__
++#define __XEN_PUBLIC_ARCH_X86_XEN_H__
++
++/* Structural guest handles introduced in 0x00030201. */
++#if __XEN_INTERFACE_VERSION__ >= 0x00030201
++#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
++ typedef struct { type *p; } __guest_handle_ ## name
++#else
++#define ___DEFINE_XEN_GUEST_HANDLE(name, type) \
++ typedef type * __guest_handle_ ## name
++#endif
++
++#define __DEFINE_XEN_GUEST_HANDLE(name, type) \
++ ___DEFINE_XEN_GUEST_HANDLE(name, type); \
++ ___DEFINE_XEN_GUEST_HANDLE(const_##name, const type)
++#define DEFINE_XEN_GUEST_HANDLE(name) __DEFINE_XEN_GUEST_HANDLE(name, name)
++#define __XEN_GUEST_HANDLE(name) __guest_handle_ ## name
++#define XEN_GUEST_HANDLE(name) __XEN_GUEST_HANDLE(name)
++#define set_xen_guest_handle(hnd, val) do { (hnd).p = val; } while (0)
++#ifdef __XEN_TOOLS__
++#define get_xen_guest_handle(val, hnd) do { val = (hnd).p; } while (0)
++#endif
++
++#if defined(__i386__)
++#include "xen-x86_32.h"
++#elif defined(__x86_64__)
++#include "xen-x86_64.h"
++#endif
++
++#ifndef __ASSEMBLY__
++typedef unsigned long xen_pfn_t;
++#define PRI_xen_pfn "lx"
++#endif
++
++/*
++ * SEGMENT DESCRIPTOR TABLES
++ */
++/*
++ * A number of GDT entries are reserved by Xen. These are not situated at the
++ * start of the GDT because some stupid OSes export hard-coded selector values
++ * in their ABI. These hard-coded values are always near the start of the GDT,
++ * so Xen places itself out of the way, at the far end of the GDT.
++ */
++#define FIRST_RESERVED_GDT_PAGE 14
++#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
++#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
++
++/* Maximum number of virtual CPUs in multi-processor guests. */
++#define MAX_VIRT_CPUS 32
++
++
++/* Machine check support */
++#include "xen-mca.h"
++
++#ifndef __ASSEMBLY__
++
++typedef unsigned long xen_ulong_t;
++
++/*
++ * Send an array of these to HYPERVISOR_set_trap_table().
++ * The privilege level specifies which modes may enter a trap via a software
++ * interrupt. On x86/64, since rings 1 and 2 are unavailable, we allocate
++ * privilege levels as follows:
++ * Level == 0: Noone may enter
++ * Level == 1: Kernel may enter
++ * Level == 2: Kernel may enter
++ * Level == 3: Everyone may enter
++ */
++#define TI_GET_DPL(_ti) ((_ti)->flags & 3)
++#define TI_GET_IF(_ti) ((_ti)->flags & 4)
++#define TI_SET_DPL(_ti,_dpl) ((_ti)->flags |= (_dpl))
++#define TI_SET_IF(_ti,_if) ((_ti)->flags |= ((!!(_if))<<2))
++struct trap_info {
++ uint8_t vector; /* exception vector */
++ uint8_t flags; /* 0-3: privilege level; 4: clear event enable? */
++ uint16_t cs; /* code selector */
++ unsigned long address; /* code offset */
++};
++typedef struct trap_info trap_info_t;
++DEFINE_XEN_GUEST_HANDLE(trap_info_t);
++
++typedef uint64_t tsc_timestamp_t; /* RDTSC timestamp */
++
++/*
++ * The following is all CPU context. Note that the fpu_ctxt block is filled
++ * in by FXSAVE if the CPU has feature FXSR; otherwise FSAVE is used.
++ */
++struct vcpu_guest_context {
++ /* FPU registers come first so they can be aligned for FXSAVE/FXRSTOR. */
++ struct { char x[512]; } fpu_ctxt; /* User-level FPU registers */
++#define VGCF_I387_VALID (1<<0)
++#define VGCF_IN_KERNEL (1<<2)
++#define _VGCF_i387_valid 0
++#define VGCF_i387_valid (1<<_VGCF_i387_valid)
++#define _VGCF_in_kernel 2
++#define VGCF_in_kernel (1<<_VGCF_in_kernel)
++#define _VGCF_failsafe_disables_events 3
++#define VGCF_failsafe_disables_events (1<<_VGCF_failsafe_disables_events)
++#define _VGCF_syscall_disables_events 4
++#define VGCF_syscall_disables_events (1<<_VGCF_syscall_disables_events)
++#define _VGCF_online 5
++#define VGCF_online (1<<_VGCF_online)
++ unsigned long flags; /* VGCF_* flags */
++ struct cpu_user_regs user_regs; /* User-level CPU registers */
++ struct trap_info trap_ctxt[256]; /* Virtual IDT */
++ unsigned long ldt_base, ldt_ents; /* LDT (linear address, # ents) */
++ unsigned long gdt_frames[16], gdt_ents; /* GDT (machine frames, # ents) */
++ unsigned long kernel_ss, kernel_sp; /* Virtual TSS (only SS1/SP1) */
++ /* NB. User pagetable on x86/64 is placed in ctrlreg[1]. */
++ unsigned long ctrlreg[8]; /* CR0-CR7 (control registers) */
++ unsigned long debugreg[8]; /* DB0-DB7 (debug registers) */
++#ifdef __i386__
++ unsigned long event_callback_cs; /* CS:EIP of event callback */
++ unsigned long event_callback_eip;
++ unsigned long failsafe_callback_cs; /* CS:EIP of failsafe callback */
++ unsigned long failsafe_callback_eip;
++#else
++ unsigned long event_callback_eip;
++ unsigned long failsafe_callback_eip;
++#ifdef __XEN__
++ union {
++ unsigned long syscall_callback_eip;
++ struct {
++ unsigned int event_callback_cs; /* compat CS of event cb */
++ unsigned int failsafe_callback_cs; /* compat CS of failsafe cb */
++ };
++ };
++#else
++ unsigned long syscall_callback_eip;
++#endif
++#endif
++ unsigned long vm_assist; /* VMASST_TYPE_* bitmap */
++#ifdef __x86_64__
++ /* Segment base addresses. */
++ uint64_t fs_base;
++ uint64_t gs_base_kernel;
++ uint64_t gs_base_user;
++#endif
++};
++typedef struct vcpu_guest_context vcpu_guest_context_t;
++DEFINE_XEN_GUEST_HANDLE(vcpu_guest_context_t);
++
++struct arch_shared_info {
++ unsigned long max_pfn; /* max pfn that appears in table */
++ /* Frame containing list of mfns containing list of mfns containing p2m. */
++ xen_pfn_t pfn_to_mfn_frame_list_list;
++ unsigned long nmi_reason;
++ uint64_t pad[32];
++};
++typedef struct arch_shared_info arch_shared_info_t;
++
++#endif /* !__ASSEMBLY__ */
++
++/*
++ * Prefix forces emulation of some non-trapping instructions.
++ * Currently only CPUID.
++ */
++#ifdef __ASSEMBLY__
++#define XEN_EMULATE_PREFIX .byte 0x0f,0x0b,0x78,0x65,0x6e ;
++#define XEN_CPUID XEN_EMULATE_PREFIX cpuid
++#else
++#define XEN_EMULATE_PREFIX ".byte 0x0f,0x0b,0x78,0x65,0x6e ; "
++#define XEN_CPUID XEN_EMULATE_PREFIX "cpuid"
++#endif
++
++#endif /* __XEN_PUBLIC_ARCH_X86_XEN_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/arch-x86_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/arch-x86_32.h 2007-06-12 13:14:19.000000000 +0200
+@@ -0,0 +1,27 @@
++/******************************************************************************
++ * arch-x86_32.h
++ *
++ * Guest OS interface to x86 32-bit Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2006, K A Fraser
++ */
++
++#include "arch-x86/xen.h"
+Index: head-2008-11-25/include/xen/interface/arch-x86_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/arch-x86_64.h 2007-06-12 13:14:19.000000000 +0200
+@@ -0,0 +1,27 @@
++/******************************************************************************
++ * arch-x86_64.h
++ *
++ * Guest OS interface to x86 64-bit Xen.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2004-2006, K A Fraser
++ */
++
++#include "arch-x86/xen.h"
+Index: head-2008-11-25/include/xen/interface/dom0_ops.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/dom0_ops.h 2007-06-12 13:14:19.000000000 +0200
+@@ -0,0 +1,120 @@
++/******************************************************************************
++ * dom0_ops.h
++ *
++ * Process command requests from domain-0 guest OS.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2002-2003, B Dragovic
++ * Copyright (c) 2002-2006, K Fraser
++ */
++
++#ifndef __XEN_PUBLIC_DOM0_OPS_H__
++#define __XEN_PUBLIC_DOM0_OPS_H__
++
++#include "xen.h"
++#include "platform.h"
++
++#if __XEN_INTERFACE_VERSION__ >= 0x00030204
++#error "dom0_ops.h is a compatibility interface only"
++#endif
++
++#define DOM0_INTERFACE_VERSION XENPF_INTERFACE_VERSION
++
++#define DOM0_SETTIME XENPF_settime
++#define dom0_settime xenpf_settime
++#define dom0_settime_t xenpf_settime_t
++
++#define DOM0_ADD_MEMTYPE XENPF_add_memtype
++#define dom0_add_memtype xenpf_add_memtype
++#define dom0_add_memtype_t xenpf_add_memtype_t
++
++#define DOM0_DEL_MEMTYPE XENPF_del_memtype
++#define dom0_del_memtype xenpf_del_memtype
++#define dom0_del_memtype_t xenpf_del_memtype_t
++
++#define DOM0_READ_MEMTYPE XENPF_read_memtype
++#define dom0_read_memtype xenpf_read_memtype
++#define dom0_read_memtype_t xenpf_read_memtype_t
++
++#define DOM0_MICROCODE XENPF_microcode_update
++#define dom0_microcode xenpf_microcode_update
++#define dom0_microcode_t xenpf_microcode_update_t
++
++#define DOM0_PLATFORM_QUIRK XENPF_platform_quirk
++#define dom0_platform_quirk xenpf_platform_quirk
++#define dom0_platform_quirk_t xenpf_platform_quirk_t
++
++typedef uint64_t cpumap_t;
++
++/* Unsupported legacy operation -- defined for API compatibility. */
++#define DOM0_MSR 15
++struct dom0_msr {
++ /* IN variables. */
++ uint32_t write;
++ cpumap_t cpu_mask;
++ uint32_t msr;
++ uint32_t in1;
++ uint32_t in2;
++ /* OUT variables. */
++ uint32_t out1;
++ uint32_t out2;
++};
++typedef struct dom0_msr dom0_msr_t;
++DEFINE_XEN_GUEST_HANDLE(dom0_msr_t);
++
++/* Unsupported legacy operation -- defined for API compatibility. */
++#define DOM0_PHYSICAL_MEMORY_MAP 40
++struct dom0_memory_map_entry {
++ uint64_t start, end;
++ uint32_t flags; /* reserved */
++ uint8_t is_ram;
++};
++typedef struct dom0_memory_map_entry dom0_memory_map_entry_t;
++DEFINE_XEN_GUEST_HANDLE(dom0_memory_map_entry_t);
++
++struct dom0_op {
++ uint32_t cmd;
++ uint32_t interface_version; /* DOM0_INTERFACE_VERSION */
++ union {
++ struct dom0_msr msr;
++ struct dom0_settime settime;
++ struct dom0_add_memtype add_memtype;
++ struct dom0_del_memtype del_memtype;
++ struct dom0_read_memtype read_memtype;
++ struct dom0_microcode microcode;
++ struct dom0_platform_quirk platform_quirk;
++ struct dom0_memory_map_entry physical_memory_map;
++ uint8_t pad[128];
++ } u;
++};
++typedef struct dom0_op dom0_op_t;
++DEFINE_XEN_GUEST_HANDLE(dom0_op_t);
++
++#endif /* __XEN_PUBLIC_DOM0_OPS_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/domctl.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/domctl.h 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,680 @@
++/******************************************************************************
++ * domctl.h
++ *
++ * Domain management operations. For use by node control stack.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2002-2003, B Dragovic
++ * Copyright (c) 2002-2006, K Fraser
++ */
++
++#ifndef __XEN_PUBLIC_DOMCTL_H__
++#define __XEN_PUBLIC_DOMCTL_H__
++
++#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
++#error "domctl operations are intended for use by node control tools only"
++#endif
++
++#include "xen.h"
++
++#define XEN_DOMCTL_INTERFACE_VERSION 0x00000005
++
++struct xenctl_cpumap {
++ XEN_GUEST_HANDLE_64(uint8) bitmap;
++ uint32_t nr_cpus;
++};
++
++/*
++ * NB. xen_domctl.domain is an IN/OUT parameter for this operation.
++ * If it is specified as zero, an id is auto-allocated and returned.
++ */
++#define XEN_DOMCTL_createdomain 1
++struct xen_domctl_createdomain {
++ /* IN parameters */
++ uint32_t ssidref;
++ xen_domain_handle_t handle;
++ /* Is this an HVM guest (as opposed to a PV guest)? */
++#define _XEN_DOMCTL_CDF_hvm_guest 0
++#define XEN_DOMCTL_CDF_hvm_guest (1U<<_XEN_DOMCTL_CDF_hvm_guest)
++ /* Use hardware-assisted paging if available? */
++#define _XEN_DOMCTL_CDF_hap 1
++#define XEN_DOMCTL_CDF_hap (1U<<_XEN_DOMCTL_CDF_hap)
++ uint32_t flags;
++};
++typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
++
++#define XEN_DOMCTL_destroydomain 2
++#define XEN_DOMCTL_pausedomain 3
++#define XEN_DOMCTL_unpausedomain 4
++#define XEN_DOMCTL_resumedomain 27
++
++#define XEN_DOMCTL_getdomaininfo 5
++struct xen_domctl_getdomaininfo {
++ /* OUT variables. */
++ domid_t domain; /* Also echoed in domctl.domain */
++ /* Domain is scheduled to die. */
++#define _XEN_DOMINF_dying 0
++#define XEN_DOMINF_dying (1U<<_XEN_DOMINF_dying)
++ /* Domain is an HVM guest (as opposed to a PV guest). */
++#define _XEN_DOMINF_hvm_guest 1
++#define XEN_DOMINF_hvm_guest (1U<<_XEN_DOMINF_hvm_guest)
++ /* The guest OS has shut down. */
++#define _XEN_DOMINF_shutdown 2
++#define XEN_DOMINF_shutdown (1U<<_XEN_DOMINF_shutdown)
++ /* Currently paused by control software. */
++#define _XEN_DOMINF_paused 3
++#define XEN_DOMINF_paused (1U<<_XEN_DOMINF_paused)
++ /* Currently blocked pending an event. */
++#define _XEN_DOMINF_blocked 4
++#define XEN_DOMINF_blocked (1U<<_XEN_DOMINF_blocked)
++ /* Domain is currently running. */
++#define _XEN_DOMINF_running 5
++#define XEN_DOMINF_running (1U<<_XEN_DOMINF_running)
++ /* Being debugged. */
++#define _XEN_DOMINF_debugged 6
++#define XEN_DOMINF_debugged (1U<<_XEN_DOMINF_debugged)
++ /* CPU to which this domain is bound. */
++#define XEN_DOMINF_cpumask 255
++#define XEN_DOMINF_cpushift 8
++ /* XEN_DOMINF_shutdown guest-supplied code. */
++#define XEN_DOMINF_shutdownmask 255
++#define XEN_DOMINF_shutdownshift 16
++ uint32_t flags; /* XEN_DOMINF_* */
++ uint64_aligned_t tot_pages;
++ uint64_aligned_t max_pages;
++ uint64_aligned_t shared_info_frame; /* GMFN of shared_info struct */
++ uint64_aligned_t cpu_time;
++ uint32_t nr_online_vcpus; /* Number of VCPUs currently online. */
++ uint32_t max_vcpu_id; /* Maximum VCPUID in use by this domain. */
++ uint32_t ssidref;
++ xen_domain_handle_t handle;
++};
++typedef struct xen_domctl_getdomaininfo xen_domctl_getdomaininfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getdomaininfo_t);
++
++
++#define XEN_DOMCTL_getmemlist 6
++struct xen_domctl_getmemlist {
++ /* IN variables. */
++ /* Max entries to write to output buffer. */
++ uint64_aligned_t max_pfns;
++ /* Start index in guest's page list. */
++ uint64_aligned_t start_pfn;
++ XEN_GUEST_HANDLE_64(uint64) buffer;
++ /* OUT variables. */
++ uint64_aligned_t num_pfns;
++};
++typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t);
++
++
++#define XEN_DOMCTL_getpageframeinfo 7
++
++#define XEN_DOMCTL_PFINFO_LTAB_SHIFT 28
++#define XEN_DOMCTL_PFINFO_NOTAB (0x0U<<28)
++#define XEN_DOMCTL_PFINFO_L1TAB (0x1U<<28)
++#define XEN_DOMCTL_PFINFO_L2TAB (0x2U<<28)
++#define XEN_DOMCTL_PFINFO_L3TAB (0x3U<<28)
++#define XEN_DOMCTL_PFINFO_L4TAB (0x4U<<28)
++#define XEN_DOMCTL_PFINFO_LTABTYPE_MASK (0x7U<<28)
++#define XEN_DOMCTL_PFINFO_LPINTAB (0x1U<<31)
++#define XEN_DOMCTL_PFINFO_XTAB (0xfU<<28) /* invalid page */
++#define XEN_DOMCTL_PFINFO_LTAB_MASK (0xfU<<28)
++
++struct xen_domctl_getpageframeinfo {
++ /* IN variables. */
++ uint64_aligned_t gmfn; /* GMFN to query */
++ /* OUT variables. */
++ /* Is the page PINNED to a type? */
++ uint32_t type; /* see above type defs */
++};
++typedef struct xen_domctl_getpageframeinfo xen_domctl_getpageframeinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo_t);
++
++
++#define XEN_DOMCTL_getpageframeinfo2 8
++struct xen_domctl_getpageframeinfo2 {
++ /* IN variables. */
++ uint64_aligned_t num;
++ /* IN/OUT variables. */
++ XEN_GUEST_HANDLE_64(uint32) array;
++};
++typedef struct xen_domctl_getpageframeinfo2 xen_domctl_getpageframeinfo2_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getpageframeinfo2_t);
++
++
++/*
++ * Control shadow pagetables operation
++ */
++#define XEN_DOMCTL_shadow_op 10
++
++/* Disable shadow mode. */
++#define XEN_DOMCTL_SHADOW_OP_OFF 0
++
++/* Enable shadow mode (mode contains ORed XEN_DOMCTL_SHADOW_ENABLE_* flags). */
++#define XEN_DOMCTL_SHADOW_OP_ENABLE 32
++
++/* Log-dirty bitmap operations. */
++ /* Return the bitmap and clean internal copy for next round. */
++#define XEN_DOMCTL_SHADOW_OP_CLEAN 11
++ /* Return the bitmap but do not modify internal copy. */
++#define XEN_DOMCTL_SHADOW_OP_PEEK 12
++
++/* Memory allocation accessors. */
++#define XEN_DOMCTL_SHADOW_OP_GET_ALLOCATION 30
++#define XEN_DOMCTL_SHADOW_OP_SET_ALLOCATION 31
++
++/* Legacy enable operations. */
++ /* Equiv. to ENABLE with no mode flags. */
++#define XEN_DOMCTL_SHADOW_OP_ENABLE_TEST 1
++ /* Equiv. to ENABLE with mode flag ENABLE_LOG_DIRTY. */
++#define XEN_DOMCTL_SHADOW_OP_ENABLE_LOGDIRTY 2
++ /* Equiv. to ENABLE with mode flags ENABLE_REFCOUNT and ENABLE_TRANSLATE. */
++#define XEN_DOMCTL_SHADOW_OP_ENABLE_TRANSLATE 3
++
++/* Mode flags for XEN_DOMCTL_SHADOW_OP_ENABLE. */
++ /*
++ * Shadow pagetables are refcounted: guest does not use explicit mmu
++ * operations nor write-protect its pagetables.
++ */
++#define XEN_DOMCTL_SHADOW_ENABLE_REFCOUNT (1 << 1)
++ /*
++ * Log pages in a bitmap as they are dirtied.
++ * Used for live relocation to determine which pages must be re-sent.
++ */
++#define XEN_DOMCTL_SHADOW_ENABLE_LOG_DIRTY (1 << 2)
++ /*
++ * Automatically translate GPFNs into MFNs.
++ */
++#define XEN_DOMCTL_SHADOW_ENABLE_TRANSLATE (1 << 3)
++ /*
++ * Xen does not steal virtual address space from the guest.
++ * Requires HVM support.
++ */
++#define XEN_DOMCTL_SHADOW_ENABLE_EXTERNAL (1 << 4)
++
++struct xen_domctl_shadow_op_stats {
++ uint32_t fault_count;
++ uint32_t dirty_count;
++};
++typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t);
++
++struct xen_domctl_shadow_op {
++ /* IN variables. */
++ uint32_t op; /* XEN_DOMCTL_SHADOW_OP_* */
++
++ /* OP_ENABLE */
++ uint32_t mode; /* XEN_DOMCTL_SHADOW_ENABLE_* */
++
++ /* OP_GET_ALLOCATION / OP_SET_ALLOCATION */
++ uint32_t mb; /* Shadow memory allocation in MB */
++
++ /* OP_PEEK / OP_CLEAN */
++ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
++ uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */
++ struct xen_domctl_shadow_op_stats stats;
++};
++typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t);
++
++
++#define XEN_DOMCTL_max_mem 11
++struct xen_domctl_max_mem {
++ /* IN variables. */
++ uint64_aligned_t max_memkb;
++};
++typedef struct xen_domctl_max_mem xen_domctl_max_mem_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t);
++
++
++#define XEN_DOMCTL_setvcpucontext 12
++#define XEN_DOMCTL_getvcpucontext 13
++struct xen_domctl_vcpucontext {
++ uint32_t vcpu; /* IN */
++ XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */
++};
++typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t);
++
++
++#define XEN_DOMCTL_getvcpuinfo 14
++struct xen_domctl_getvcpuinfo {
++ /* IN variables. */
++ uint32_t vcpu;
++ /* OUT variables. */
++ uint8_t online; /* currently online (not hotplugged)? */
++ uint8_t blocked; /* blocked waiting for an event? */
++ uint8_t running; /* currently scheduled on its CPU? */
++ uint64_aligned_t cpu_time; /* total cpu time consumed (ns) */
++ uint32_t cpu; /* current mapping */
++};
++typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t);
++
++
++/* Get/set which physical cpus a vcpu can execute on. */
++#define XEN_DOMCTL_setvcpuaffinity 9
++#define XEN_DOMCTL_getvcpuaffinity 25
++struct xen_domctl_vcpuaffinity {
++ uint32_t vcpu; /* IN */
++ struct xenctl_cpumap cpumap; /* IN/OUT */
++};
++typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
++
++
++#define XEN_DOMCTL_max_vcpus 15
++struct xen_domctl_max_vcpus {
++ uint32_t max; /* maximum number of vcpus */
++};
++typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
++
++
++#define XEN_DOMCTL_scheduler_op 16
++/* Scheduler types. */
++#define XEN_SCHEDULER_SEDF 4
++#define XEN_SCHEDULER_CREDIT 5
++/* Set or get info? */
++#define XEN_DOMCTL_SCHEDOP_putinfo 0
++#define XEN_DOMCTL_SCHEDOP_getinfo 1
++struct xen_domctl_scheduler_op {
++ uint32_t sched_id; /* XEN_SCHEDULER_* */
++ uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */
++ union {
++ struct xen_domctl_sched_sedf {
++ uint64_aligned_t period;
++ uint64_aligned_t slice;
++ uint64_aligned_t latency;
++ uint32_t extratime;
++ uint32_t weight;
++ } sedf;
++ struct xen_domctl_sched_credit {
++ uint16_t weight;
++ uint16_t cap;
++ } credit;
++ } u;
++};
++typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t);
++
++
++#define XEN_DOMCTL_setdomainhandle 17
++struct xen_domctl_setdomainhandle {
++ xen_domain_handle_t handle;
++};
++typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t);
++
++
++#define XEN_DOMCTL_setdebugging 18
++struct xen_domctl_setdebugging {
++ uint8_t enable;
++};
++typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t);
++
++
++#define XEN_DOMCTL_irq_permission 19
++struct xen_domctl_irq_permission {
++ uint8_t pirq;
++ uint8_t allow_access; /* flag to specify enable/disable of IRQ access */
++};
++typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t);
++
++
++#define XEN_DOMCTL_iomem_permission 20
++struct xen_domctl_iomem_permission {
++ uint64_aligned_t first_mfn;/* first page (physical page number) in range */
++ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */
++ uint8_t allow_access; /* allow (!0) or deny (0) access to range? */
++};
++typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t);
++
++
++#define XEN_DOMCTL_ioport_permission 21
++struct xen_domctl_ioport_permission {
++ uint32_t first_port; /* first port int range */
++ uint32_t nr_ports; /* size of port range */
++ uint8_t allow_access; /* allow or deny access to range? */
++};
++typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t);
++
++
++#define XEN_DOMCTL_hypercall_init 22
++struct xen_domctl_hypercall_init {
++ uint64_aligned_t gmfn; /* GMFN to be initialised */
++};
++typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t);
++
++
++#define XEN_DOMCTL_arch_setup 23
++#define _XEN_DOMAINSETUP_hvm_guest 0
++#define XEN_DOMAINSETUP_hvm_guest (1UL<<_XEN_DOMAINSETUP_hvm_guest)
++#define _XEN_DOMAINSETUP_query 1 /* Get parameters (for save) */
++#define XEN_DOMAINSETUP_query (1UL<<_XEN_DOMAINSETUP_query)
++#define _XEN_DOMAINSETUP_sioemu_guest 2
++#define XEN_DOMAINSETUP_sioemu_guest (1UL<<_XEN_DOMAINSETUP_sioemu_guest)
++typedef struct xen_domctl_arch_setup {
++ uint64_aligned_t flags; /* XEN_DOMAINSETUP_* */
++#ifdef __ia64__
++ uint64_aligned_t bp; /* mpaddr of boot param area */
++ uint64_aligned_t maxmem; /* Highest memory address for MDT. */
++ uint64_aligned_t xsi_va; /* Xen shared_info area virtual address. */
++ uint32_t hypercall_imm; /* Break imm for Xen hypercalls. */
++ int8_t vhpt_size_log2; /* Log2 of VHPT size. */
++#endif
++} xen_domctl_arch_setup_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_arch_setup_t);
++
++
++#define XEN_DOMCTL_settimeoffset 24
++struct xen_domctl_settimeoffset {
++ int32_t time_offset_seconds; /* applied to domain wallclock time */
++};
++typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
++
++
++#define XEN_DOMCTL_gethvmcontext 33
++#define XEN_DOMCTL_sethvmcontext 34
++typedef struct xen_domctl_hvmcontext {
++ uint32_t size; /* IN/OUT: size of buffer / bytes filled */
++ XEN_GUEST_HANDLE_64(uint8) buffer; /* IN/OUT: data, or call
++ * gethvmcontext with NULL
++ * buffer to get size req'd */
++} xen_domctl_hvmcontext_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);
++
++
++#define XEN_DOMCTL_set_address_size 35
++#define XEN_DOMCTL_get_address_size 36
++typedef struct xen_domctl_address_size {
++ uint32_t size;
++} xen_domctl_address_size_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t);
++
++
++#define XEN_DOMCTL_real_mode_area 26
++struct xen_domctl_real_mode_area {
++ uint32_t log; /* log2 of Real Mode Area size */
++};
++typedef struct xen_domctl_real_mode_area xen_domctl_real_mode_area_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_real_mode_area_t);
++
++
++#define XEN_DOMCTL_sendtrigger 28
++#define XEN_DOMCTL_SENDTRIGGER_NMI 0
++#define XEN_DOMCTL_SENDTRIGGER_RESET 1
++#define XEN_DOMCTL_SENDTRIGGER_INIT 2
++struct xen_domctl_sendtrigger {
++ uint32_t trigger; /* IN */
++ uint32_t vcpu; /* IN */
++};
++typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t);
++
++
++/* Assign PCI device to HVM guest. Sets up IOMMU structures. */
++#define XEN_DOMCTL_assign_device 37
++#define XEN_DOMCTL_test_assign_device 45
++#define XEN_DOMCTL_deassign_device 47
++struct xen_domctl_assign_device {
++ uint32_t machine_bdf; /* machine PCI ID of assigned device */
++};
++typedef struct xen_domctl_assign_device xen_domctl_assign_device_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t);
++
++/* Retrieve sibling devices infomation of machine_bdf */
++#define XEN_DOMCTL_get_device_group 50
++struct xen_domctl_get_device_group {
++ uint32_t machine_bdf; /* IN */
++ uint32_t max_sdevs; /* IN */
++ uint32_t num_sdevs; /* OUT */
++ XEN_GUEST_HANDLE_64(uint32) sdev_array; /* OUT */
++};
++typedef struct xen_domctl_get_device_group xen_domctl_get_device_group_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_get_device_group_t);
++
++/* Pass-through interrupts: bind real irq -> hvm devfn. */
++#define XEN_DOMCTL_bind_pt_irq 38
++#define XEN_DOMCTL_unbind_pt_irq 48
++typedef enum pt_irq_type_e {
++ PT_IRQ_TYPE_PCI,
++ PT_IRQ_TYPE_ISA,
++ PT_IRQ_TYPE_MSI,
++} pt_irq_type_t;
++struct xen_domctl_bind_pt_irq {
++ uint32_t machine_irq;
++ pt_irq_type_t irq_type;
++ uint32_t hvm_domid;
++
++ union {
++ struct {
++ uint8_t isa_irq;
++ } isa;
++ struct {
++ uint8_t bus;
++ uint8_t device;
++ uint8_t intx;
++ } pci;
++ struct {
++ uint8_t gvec;
++ uint32_t gflags;
++ } msi;
++ } u;
++};
++typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t);
++
++
++/* Bind machine I/O address range -> HVM address range. */
++#define XEN_DOMCTL_memory_mapping 39
++#define DPCI_ADD_MAPPING 1
++#define DPCI_REMOVE_MAPPING 0
++struct xen_domctl_memory_mapping {
++ uint64_aligned_t first_gfn; /* first page (hvm guest phys page) in range */
++ uint64_aligned_t first_mfn; /* first page (machine page) in range */
++ uint64_aligned_t nr_mfns; /* number of pages in range (>0) */
++ uint32_t add_mapping; /* add or remove mapping */
++ uint32_t padding; /* padding for 64-bit aligned structure */
++};
++typedef struct xen_domctl_memory_mapping xen_domctl_memory_mapping_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_memory_mapping_t);
++
++
++/* Bind machine I/O port range -> HVM I/O port range. */
++#define XEN_DOMCTL_ioport_mapping 40
++struct xen_domctl_ioport_mapping {
++ uint32_t first_gport; /* first guest IO port*/
++ uint32_t first_mport; /* first machine IO port */
++ uint32_t nr_ports; /* size of port range */
++ uint32_t add_mapping; /* add or remove mapping */
++};
++typedef struct xen_domctl_ioport_mapping xen_domctl_ioport_mapping_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t);
++
++
++/*
++ * Pin caching type of RAM space for x86 HVM domU.
++ */
++#define XEN_DOMCTL_pin_mem_cacheattr 41
++/* Caching types: these happen to be the same as x86 MTRR/PAT type codes. */
++#define XEN_DOMCTL_MEM_CACHEATTR_UC 0
++#define XEN_DOMCTL_MEM_CACHEATTR_WC 1
++#define XEN_DOMCTL_MEM_CACHEATTR_WT 4
++#define XEN_DOMCTL_MEM_CACHEATTR_WP 5
++#define XEN_DOMCTL_MEM_CACHEATTR_WB 6
++#define XEN_DOMCTL_MEM_CACHEATTR_UCM 7
++struct xen_domctl_pin_mem_cacheattr {
++ uint64_aligned_t start, end;
++ unsigned int type; /* XEN_DOMCTL_MEM_CACHEATTR_* */
++};
++typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t);
++
++
++#define XEN_DOMCTL_set_ext_vcpucontext 42
++#define XEN_DOMCTL_get_ext_vcpucontext 43
++struct xen_domctl_ext_vcpucontext {
++ /* IN: VCPU that this call applies to. */
++ uint32_t vcpu;
++ /*
++ * SET: Size of struct (IN)
++ * GET: Size of struct (OUT)
++ */
++ uint32_t size;
++#if defined(__i386__) || defined(__x86_64__)
++ /* SYSCALL from 32-bit mode and SYSENTER callback information. */
++ /* NB. SYSCALL from 64-bit mode is contained in vcpu_guest_context_t */
++ uint64_aligned_t syscall32_callback_eip;
++ uint64_aligned_t sysenter_callback_eip;
++ uint16_t syscall32_callback_cs;
++ uint16_t sysenter_callback_cs;
++ uint8_t syscall32_disables_events;
++ uint8_t sysenter_disables_events;
++#endif
++};
++typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t);
++
++/*
++ * Set optimizaton features for a domain
++ */
++#define XEN_DOMCTL_set_opt_feature 44
++struct xen_domctl_set_opt_feature {
++#if defined(__ia64__)
++ struct xen_ia64_opt_feature optf;
++#else
++ /* Make struct non-empty: do not depend on this field name! */
++ uint64_t dummy;
++#endif
++};
++typedef struct xen_domctl_set_opt_feature xen_domctl_set_opt_feature_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_opt_feature_t);
++
++/*
++ * Set the target domain for a domain
++ */
++#define XEN_DOMCTL_set_target 46
++struct xen_domctl_set_target {
++ domid_t target;
++};
++typedef struct xen_domctl_set_target xen_domctl_set_target_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_target_t);
++
++#if defined(__i386__) || defined(__x86_64__)
++# define XEN_CPUID_INPUT_UNUSED 0xFFFFFFFF
++# define XEN_DOMCTL_set_cpuid 49
++struct xen_domctl_cpuid {
++ unsigned int input[2];
++ unsigned int eax;
++ unsigned int ebx;
++ unsigned int ecx;
++ unsigned int edx;
++};
++typedef struct xen_domctl_cpuid xen_domctl_cpuid_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t);
++#endif
++
++#define XEN_DOMCTL_subscribe 29
++struct xen_domctl_subscribe {
++ uint32_t port; /* IN */
++};
++typedef struct xen_domctl_subscribe xen_domctl_subscribe_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_subscribe_t);
++
++/*
++ * Define the maximum machine address size which should be allocated
++ * to a guest.
++ */
++#define XEN_DOMCTL_set_machine_address_size 51
++#define XEN_DOMCTL_get_machine_address_size 52
++
++/*
++ * Do not inject spurious page faults into this domain.
++ */
++#define XEN_DOMCTL_suppress_spurious_page_faults 53
++
++struct xen_domctl {
++ uint32_t cmd;
++ uint32_t interface_version; /* XEN_DOMCTL_INTERFACE_VERSION */
++ domid_t domain;
++ union {
++ struct xen_domctl_createdomain createdomain;
++ struct xen_domctl_getdomaininfo getdomaininfo;
++ struct xen_domctl_getmemlist getmemlist;
++ struct xen_domctl_getpageframeinfo getpageframeinfo;
++ struct xen_domctl_getpageframeinfo2 getpageframeinfo2;
++ struct xen_domctl_vcpuaffinity vcpuaffinity;
++ struct xen_domctl_shadow_op shadow_op;
++ struct xen_domctl_max_mem max_mem;
++ struct xen_domctl_vcpucontext vcpucontext;
++ struct xen_domctl_getvcpuinfo getvcpuinfo;
++ struct xen_domctl_max_vcpus max_vcpus;
++ struct xen_domctl_scheduler_op scheduler_op;
++ struct xen_domctl_setdomainhandle setdomainhandle;
++ struct xen_domctl_setdebugging setdebugging;
++ struct xen_domctl_irq_permission irq_permission;
++ struct xen_domctl_iomem_permission iomem_permission;
++ struct xen_domctl_ioport_permission ioport_permission;
++ struct xen_domctl_hypercall_init hypercall_init;
++ struct xen_domctl_arch_setup arch_setup;
++ struct xen_domctl_settimeoffset settimeoffset;
++ struct xen_domctl_real_mode_area real_mode_area;
++ struct xen_domctl_hvmcontext hvmcontext;
++ struct xen_domctl_address_size address_size;
++ struct xen_domctl_sendtrigger sendtrigger;
++ struct xen_domctl_get_device_group get_device_group;
++ struct xen_domctl_assign_device assign_device;
++ struct xen_domctl_bind_pt_irq bind_pt_irq;
++ struct xen_domctl_memory_mapping memory_mapping;
++ struct xen_domctl_ioport_mapping ioport_mapping;
++ struct xen_domctl_pin_mem_cacheattr pin_mem_cacheattr;
++ struct xen_domctl_ext_vcpucontext ext_vcpucontext;
++ struct xen_domctl_set_opt_feature set_opt_feature;
++ struct xen_domctl_set_target set_target;
++ struct xen_domctl_subscribe subscribe;
++#if defined(__i386__) || defined(__x86_64__)
++ struct xen_domctl_cpuid cpuid;
++#endif
++ uint8_t pad[128];
++ } u;
++};
++typedef struct xen_domctl xen_domctl_t;
++DEFINE_XEN_GUEST_HANDLE(xen_domctl_t);
++
++#endif /* __XEN_PUBLIC_DOMCTL_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/hvm/e820.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/hvm/e820.h 2007-06-12 13:14:19.000000000 +0200
+@@ -0,0 +1,34 @@
++
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_E820_H__
++#define __XEN_PUBLIC_HVM_E820_H__
++
++/* E820 location in HVM virtual address space. */
++#define HVM_E820_PAGE 0x00090000
++#define HVM_E820_NR_OFFSET 0x000001E8
++#define HVM_E820_OFFSET 0x000002D0
++
++#define HVM_BELOW_4G_RAM_END 0xF0000000
++#define HVM_BELOW_4G_MMIO_START HVM_BELOW_4G_RAM_END
++#define HVM_BELOW_4G_MMIO_LENGTH ((1ULL << 32) - HVM_BELOW_4G_MMIO_START)
++
++#endif /* __XEN_PUBLIC_HVM_E820_H__ */
+Index: head-2008-11-25/include/xen/interface/hvm/hvm_info_table.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/hvm/hvm_info_table.h 2007-06-12 13:14:19.000000000 +0200
+@@ -0,0 +1,41 @@
++/******************************************************************************
++ * hvm/hvm_info_table.h
++ *
++ * HVM parameter and information table, written into guest memory map.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
++#define __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__
++
++#define HVM_INFO_PFN 0x09F
++#define HVM_INFO_OFFSET 0x800
++#define HVM_INFO_PADDR ((HVM_INFO_PFN << 12) + HVM_INFO_OFFSET)
++
++struct hvm_info_table {
++ char signature[8]; /* "HVM INFO" */
++ uint32_t length;
++ uint8_t checksum;
++ uint8_t acpi_enabled;
++ uint8_t apic_mode;
++ uint32_t nr_vcpus;
++};
++
++#endif /* __XEN_PUBLIC_HVM_HVM_INFO_TABLE_H__ */
+Index: head-2008-11-25/include/xen/interface/hvm/hvm_op.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/hvm/hvm_op.h 2008-09-01 12:07:31.000000000 +0200
+@@ -0,0 +1,131 @@
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_HVM_OP_H__
++#define __XEN_PUBLIC_HVM_HVM_OP_H__
++
++/* Get/set subcommands: extra argument == pointer to xen_hvm_param struct. */
++#define HVMOP_set_param 0
++#define HVMOP_get_param 1
++struct xen_hvm_param {
++ domid_t domid; /* IN */
++ uint32_t index; /* IN */
++ uint64_t value; /* IN/OUT */
++};
++typedef struct xen_hvm_param xen_hvm_param_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_param_t);
++
++/* Set the logical level of one of a domain's PCI INTx wires. */
++#define HVMOP_set_pci_intx_level 2
++struct xen_hvm_set_pci_intx_level {
++ /* Domain to be updated. */
++ domid_t domid;
++ /* PCI INTx identification in PCI topology (domain:bus:device:intx). */
++ uint8_t domain, bus, device, intx;
++ /* Assertion level (0 = unasserted, 1 = asserted). */
++ uint8_t level;
++};
++typedef struct xen_hvm_set_pci_intx_level xen_hvm_set_pci_intx_level_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_intx_level_t);
++
++/* Set the logical level of one of a domain's ISA IRQ wires. */
++#define HVMOP_set_isa_irq_level 3
++struct xen_hvm_set_isa_irq_level {
++ /* Domain to be updated. */
++ domid_t domid;
++ /* ISA device identification, by ISA IRQ (0-15). */
++ uint8_t isa_irq;
++ /* Assertion level (0 = unasserted, 1 = asserted). */
++ uint8_t level;
++};
++typedef struct xen_hvm_set_isa_irq_level xen_hvm_set_isa_irq_level_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_isa_irq_level_t);
++
++#define HVMOP_set_pci_link_route 4
++struct xen_hvm_set_pci_link_route {
++ /* Domain to be updated. */
++ domid_t domid;
++ /* PCI link identifier (0-3). */
++ uint8_t link;
++ /* ISA IRQ (1-15), or 0 (disable link). */
++ uint8_t isa_irq;
++};
++typedef struct xen_hvm_set_pci_link_route xen_hvm_set_pci_link_route_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_pci_link_route_t);
++
++/* Flushes all VCPU TLBs: @arg must be NULL. */
++#define HVMOP_flush_tlbs 5
++
++/* Following tools-only interfaces may change in future. */
++#if defined(__XEN__) || defined(__XEN_TOOLS__)
++
++/* Track dirty VRAM. */
++#define HVMOP_track_dirty_vram 6
++struct xen_hvm_track_dirty_vram {
++ /* Domain to be tracked. */
++ domid_t domid;
++ /* First pfn to track. */
++ uint64_aligned_t first_pfn;
++ /* Number of pages to track. */
++ uint64_aligned_t nr;
++ /* OUT variable. */
++ /* Dirty bitmap buffer. */
++ XEN_GUEST_HANDLE_64(uint8) dirty_bitmap;
++};
++typedef struct xen_hvm_track_dirty_vram xen_hvm_track_dirty_vram_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_track_dirty_vram_t);
++
++/* Notify that some pages got modified by the Device Model. */
++#define HVMOP_modified_memory 7
++struct xen_hvm_modified_memory {
++ /* Domain to be updated. */
++ domid_t domid;
++ /* First pfn. */
++ uint64_aligned_t first_pfn;
++ /* Number of pages. */
++ uint64_aligned_t nr;
++};
++typedef struct xen_hvm_modified_memory xen_hvm_modified_memory_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_modified_memory_t);
++
++#define HVMOP_set_mem_type 8
++typedef enum {
++ HVMMEM_ram_rw, /* Normal read/write guest RAM */
++ HVMMEM_ram_ro, /* Read-only; writes are discarded */
++ HVMMEM_mmio_dm, /* Reads and write go to the device model */
++} hvmmem_type_t;
++/* Notify that a region of memory is to be treated in a specific way. */
++struct xen_hvm_set_mem_type {
++ /* Domain to be updated. */
++ domid_t domid;
++ /* Memory type */
++ hvmmem_type_t hvmmem_type;
++ /* First pfn. */
++ uint64_aligned_t first_pfn;
++ /* Number of pages. */
++ uint64_aligned_t nr;
++};
++typedef struct xen_hvm_set_mem_type xen_hvm_set_mem_type_t;
++DEFINE_XEN_GUEST_HANDLE(xen_hvm_set_mem_type_t);
++
++
++#endif /* defined(__XEN__) || defined(__XEN_TOOLS__) */
++
++#endif /* __XEN_PUBLIC_HVM_HVM_OP_H__ */
+Index: head-2008-11-25/include/xen/interface/hvm/ioreq.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/hvm/ioreq.h 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,127 @@
++/*
++ * ioreq.h: I/O request definitions for device models
++ * Copyright (c) 2004, Intel Corporation.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef _IOREQ_H_
++#define _IOREQ_H_
++
++#define IOREQ_READ 1
++#define IOREQ_WRITE 0
++
++#define STATE_IOREQ_NONE 0
++#define STATE_IOREQ_READY 1
++#define STATE_IOREQ_INPROCESS 2
++#define STATE_IORESP_READY 3
++
++#define IOREQ_TYPE_PIO 0 /* pio */
++#define IOREQ_TYPE_COPY 1 /* mmio ops */
++#define IOREQ_TYPE_TIMEOFFSET 7
++#define IOREQ_TYPE_INVALIDATE 8 /* mapcache */
++
++/*
++ * VMExit dispatcher should cooperate with instruction decoder to
++ * prepare this structure and notify service OS and DM by sending
++ * virq
++ */
++struct ioreq {
++ uint64_t addr; /* physical address */
++ uint64_t size; /* size in bytes */
++ uint64_t count; /* for rep prefixes */
++ uint64_t data; /* data (or paddr of data) */
++ uint8_t state:4;
++ uint8_t data_is_ptr:1; /* if 1, data above is the guest paddr
++ * of the real data to use. */
++ uint8_t dir:1; /* 1=read, 0=write */
++ uint8_t df:1;
++ uint8_t pad:1;
++ uint8_t type; /* I/O type */
++ uint8_t _pad0[6];
++ uint64_t io_count; /* How many IO done on a vcpu */
++};
++typedef struct ioreq ioreq_t;
++
++struct vcpu_iodata {
++ struct ioreq vp_ioreq;
++ /* Event channel port, used for notifications to/from the device model. */
++ uint32_t vp_eport;
++ uint32_t _pad0;
++};
++typedef struct vcpu_iodata vcpu_iodata_t;
++
++struct shared_iopage {
++ struct vcpu_iodata vcpu_iodata[1];
++};
++typedef struct shared_iopage shared_iopage_t;
++
++struct buf_ioreq {
++ uint8_t type; /* I/O type */
++ uint8_t pad:1;
++ uint8_t dir:1; /* 1=read, 0=write */
++ uint8_t size:2; /* 0=>1, 1=>2, 2=>4, 3=>8. If 8, use two buf_ioreqs */
++ uint32_t addr:20;/* physical address */
++ uint32_t data; /* data */
++};
++typedef struct buf_ioreq buf_ioreq_t;
++
++#define IOREQ_BUFFER_SLOT_NUM 511 /* 8 bytes each, plus 2 4-byte indexes */
++struct buffered_iopage {
++ unsigned int read_pointer;
++ unsigned int write_pointer;
++ buf_ioreq_t buf_ioreq[IOREQ_BUFFER_SLOT_NUM];
++}; /* NB. Size of this structure must be no greater than one page. */
++typedef struct buffered_iopage buffered_iopage_t;
++
++#if defined(__ia64__)
++struct pio_buffer {
++ uint32_t page_offset;
++ uint32_t pointer;
++ uint32_t data_end;
++ uint32_t buf_size;
++ void *opaque;
++};
++
++#define PIO_BUFFER_IDE_PRIMARY 0 /* I/O port = 0x1F0 */
++#define PIO_BUFFER_IDE_SECONDARY 1 /* I/O port = 0x170 */
++#define PIO_BUFFER_ENTRY_NUM 2
++struct buffered_piopage {
++ struct pio_buffer pio[PIO_BUFFER_ENTRY_NUM];
++ uint8_t buffer[1];
++};
++#endif /* defined(__ia64__) */
++
++#define ACPI_PM1A_EVT_BLK_ADDRESS 0x0000000000001f40
++#define ACPI_PM1A_CNT_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x04)
++#define ACPI_PM_TMR_BLK_ADDRESS (ACPI_PM1A_EVT_BLK_ADDRESS + 0x08)
++#define ACPI_GPE0_BLK_ADDRESS (ACPI_PM_TMR_BLK_ADDRESS + 0x20)
++#define ACPI_GPE0_BLK_LEN 0x08
++
++#endif /* _IOREQ_H_ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/hvm/params.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/hvm/params.h 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,105 @@
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_PARAMS_H__
++#define __XEN_PUBLIC_HVM_PARAMS_H__
++
++#include "hvm_op.h"
++
++/*
++ * Parameter space for HVMOP_{set,get}_param.
++ */
++
++/*
++ * How should CPU0 event-channel notifications be delivered?
++ * val[63:56] == 0: val[55:0] is a delivery GSI (Global System Interrupt).
++ * val[63:56] == 1: val[55:0] is a delivery PCI INTx line, as follows:
++ * Domain = val[47:32], Bus = val[31:16],
++ * DevFn = val[15: 8], IntX = val[ 1: 0]
++ * If val == 0 then CPU0 event-channel notifications are not delivered.
++ */
++#define HVM_PARAM_CALLBACK_IRQ 0
++
++/*
++ * These are not used by Xen. They are here for convenience of HVM-guest
++ * xenbus implementations.
++ */
++#define HVM_PARAM_STORE_PFN 1
++#define HVM_PARAM_STORE_EVTCHN 2
++
++#define HVM_PARAM_PAE_ENABLED 4
++
++#define HVM_PARAM_IOREQ_PFN 5
++
++#define HVM_PARAM_BUFIOREQ_PFN 6
++
++#ifdef __ia64__
++
++#define HVM_PARAM_NVRAM_FD 7
++#define HVM_PARAM_VHPT_SIZE 8
++#define HVM_PARAM_BUFPIOREQ_PFN 9
++
++#elif defined(__i386__) || defined(__x86_64__)
++
++/* Expose Viridian interfaces to this HVM guest? */
++#define HVM_PARAM_VIRIDIAN 9
++
++#endif
++
++/*
++ * Set mode for virtual timers (currently x86 only):
++ * delay_for_missed_ticks (default):
++ * Do not advance a vcpu's time beyond the correct delivery time for
++ * interrupts that have been missed due to preemption. Deliver missed
++ * interrupts when the vcpu is rescheduled and advance the vcpu's virtual
++ * time stepwise for each one.
++ * no_delay_for_missed_ticks:
++ * As above, missed interrupts are delivered, but guest time always tracks
++ * wallclock (i.e., real) time while doing so.
++ * no_missed_ticks_pending:
++ * No missed interrupts are held pending. Instead, to ensure ticks are
++ * delivered at some non-zero rate, if we detect missed ticks then the
++ * internal tick alarm is not disabled if the VCPU is preempted during the
++ * next tick period.
++ * one_missed_tick_pending:
++ * Missed interrupts are collapsed together and delivered as one 'late tick'.
++ * Guest time always tracks wallclock (i.e., real) time.
++ */
++#define HVM_PARAM_TIMER_MODE 10
++#define HVMPTM_delay_for_missed_ticks 0
++#define HVMPTM_no_delay_for_missed_ticks 1
++#define HVMPTM_no_missed_ticks_pending 2
++#define HVMPTM_one_missed_tick_pending 3
++
++/* Boolean: Enable virtual HPET (high-precision event timer)? (x86-only) */
++#define HVM_PARAM_HPET_ENABLED 11
++
++/* Identity-map page directory used by Intel EPT when CR0.PG=0. */
++#define HVM_PARAM_IDENT_PT 12
++
++/* Device Model domain, defaults to 0. */
++#define HVM_PARAM_DM_DOMAIN 13
++
++/* ACPI S state: currently support S0 and S3 on x86. */
++#define HVM_PARAM_ACPI_S_STATE 14
++
++#define HVM_NR_PARAMS 15
++
++#endif /* __XEN_PUBLIC_HVM_PARAMS_H__ */
+Index: head-2008-11-25/include/xen/interface/hvm/save.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/hvm/save.h 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,88 @@
++/*
++ * hvm/save.h
++ *
++ * Structure definitions for HVM state that is held by Xen and must
++ * be saved along with the domain's memory and device-model state.
++ *
++ * Copyright (c) 2007 XenSource Ltd.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_PUBLIC_HVM_SAVE_H__
++#define __XEN_PUBLIC_HVM_SAVE_H__
++
++/*
++ * Structures in this header *must* have the same layout in 32bit
++ * and 64bit environments: this means that all fields must be explicitly
++ * sized types and aligned to their sizes, and the structs must be
++ * a multiple of eight bytes long.
++ *
++ * Only the state necessary for saving and restoring (i.e. fields
++ * that are analogous to actual hardware state) should go in this file.
++ * Internal mechanisms should be kept in Xen-private headers.
++ */
++
++#if !defined(__GNUC__) || defined(__STRICT_ANSI__)
++#error "Anonymous structs/unions are a GNU extension."
++#endif
++
++/*
++ * Each entry is preceded by a descriptor giving its type and length
++ */
++struct hvm_save_descriptor {
++ uint16_t typecode; /* Used to demux the various types below */
++ uint16_t instance; /* Further demux within a type */
++ uint32_t length; /* In bytes, *not* including this descriptor */
++};
++
++
++/*
++ * Each entry has a datatype associated with it: for example, the CPU state
++ * is saved as a HVM_SAVE_TYPE(CPU), which has HVM_SAVE_LENGTH(CPU),
++ * and is identified by a descriptor with typecode HVM_SAVE_CODE(CPU).
++ * DECLARE_HVM_SAVE_TYPE binds these things together with some type-system
++ * ugliness.
++ */
++
++#define DECLARE_HVM_SAVE_TYPE(_x, _code, _type) \
++ struct __HVM_SAVE_TYPE_##_x { _type t; char c[_code]; }
++
++#define HVM_SAVE_TYPE(_x) typeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->t)
++#define HVM_SAVE_LENGTH(_x) (sizeof (HVM_SAVE_TYPE(_x)))
++#define HVM_SAVE_CODE(_x) (sizeof (((struct __HVM_SAVE_TYPE_##_x *)(0))->c))
++
++
++/*
++ * The series of save records is teminated by a zero-type, zero-length
++ * descriptor.
++ */
++
++struct hvm_save_end {};
++DECLARE_HVM_SAVE_TYPE(END, 0, struct hvm_save_end);
++
++#if defined(__i386__) || defined(__x86_64__)
++#include "../arch-x86/hvm/save.h"
++#elif defined(__ia64__)
++#include "../arch-ia64/hvm/save.h"
++#else
++#error "unsupported architecture"
++#endif
++
++#endif /* __XEN_PUBLIC_HVM_SAVE_H__ */
+Index: head-2008-11-25/include/xen/interface/io/fsif.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/io/fsif.h 2008-09-01 12:07:31.000000000 +0200
+@@ -0,0 +1,191 @@
++/******************************************************************************
++ * fsif.h
++ *
++ * Interface to FS level split device drivers.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2007, Grzegorz Milos, <gm281@cam.ac.uk>.
++ */
++
++#ifndef __XEN_PUBLIC_IO_FSIF_H__
++#define __XEN_PUBLIC_IO_FSIF_H__
++
++#include "ring.h"
++#include "../grant_table.h"
++
++#define REQ_FILE_OPEN 1
++#define REQ_FILE_CLOSE 2
++#define REQ_FILE_READ 3
++#define REQ_FILE_WRITE 4
++#define REQ_STAT 5
++#define REQ_FILE_TRUNCATE 6
++#define REQ_REMOVE 7
++#define REQ_RENAME 8
++#define REQ_CREATE 9
++#define REQ_DIR_LIST 10
++#define REQ_CHMOD 11
++#define REQ_FS_SPACE 12
++#define REQ_FILE_SYNC 13
++
++struct fsif_open_request {
++ grant_ref_t gref;
++};
++
++struct fsif_close_request {
++ uint32_t fd;
++};
++
++struct fsif_read_request {
++ uint32_t fd;
++ int32_t pad;
++ uint64_t len;
++ uint64_t offset;
++ grant_ref_t grefs[1]; /* Variable length */
++};
++
++struct fsif_write_request {
++ uint32_t fd;
++ int32_t pad;
++ uint64_t len;
++ uint64_t offset;
++ grant_ref_t grefs[1]; /* Variable length */
++};
++
++struct fsif_stat_request {
++ uint32_t fd;
++};
++
++/* This structure is a copy of some fields from stat structure, returned
++ * via the ring. */
++struct fsif_stat_response {
++ int32_t stat_mode;
++ uint32_t stat_uid;
++ uint32_t stat_gid;
++ int32_t stat_ret;
++ int64_t stat_size;
++ int64_t stat_atime;
++ int64_t stat_mtime;
++ int64_t stat_ctime;
++};
++
++struct fsif_truncate_request {
++ uint32_t fd;
++ int32_t pad;
++ int64_t length;
++};
++
++struct fsif_remove_request {
++ grant_ref_t gref;
++};
++
++struct fsif_rename_request {
++ uint16_t old_name_offset;
++ uint16_t new_name_offset;
++ grant_ref_t gref;
++};
++
++struct fsif_create_request {
++ int8_t directory;
++ int8_t pad;
++ int16_t pad2;
++ int32_t mode;
++ grant_ref_t gref;
++};
++
++struct fsif_list_request {
++ uint32_t offset;
++ grant_ref_t gref;
++};
++
++#define NR_FILES_SHIFT 0
++#define NR_FILES_SIZE 16 /* 16 bits for the number of files mask */
++#define NR_FILES_MASK (((1ULL << NR_FILES_SIZE) - 1) << NR_FILES_SHIFT)
++#define ERROR_SIZE 32 /* 32 bits for the error mask */
++#define ERROR_SHIFT (NR_FILES_SIZE + NR_FILES_SHIFT)
++#define ERROR_MASK (((1ULL << ERROR_SIZE) - 1) << ERROR_SHIFT)
++#define HAS_MORE_SHIFT (ERROR_SHIFT + ERROR_SIZE)
++#define HAS_MORE_FLAG (1ULL << HAS_MORE_SHIFT)
++
++struct fsif_chmod_request {
++ uint32_t fd;
++ int32_t mode;
++};
++
++struct fsif_space_request {
++ grant_ref_t gref;
++};
++
++struct fsif_sync_request {
++ uint32_t fd;
++};
++
++
++/* FS operation request */
++struct fsif_request {
++ uint8_t type; /* Type of the request */
++ uint8_t pad;
++ uint16_t id; /* Request ID, copied to the response */
++ uint32_t pad2;
++ union {
++ struct fsif_open_request fopen;
++ struct fsif_close_request fclose;
++ struct fsif_read_request fread;
++ struct fsif_write_request fwrite;
++ struct fsif_stat_request fstat;
++ struct fsif_truncate_request ftruncate;
++ struct fsif_remove_request fremove;
++ struct fsif_rename_request frename;
++ struct fsif_create_request fcreate;
++ struct fsif_list_request flist;
++ struct fsif_chmod_request fchmod;
++ struct fsif_space_request fspace;
++ struct fsif_sync_request fsync;
++ } u;
++};
++typedef struct fsif_request fsif_request_t;
++
++/* FS operation response */
++struct fsif_response {
++ uint16_t id;
++ uint16_t pad1;
++ uint32_t pad2;
++ union {
++ uint64_t ret_val;
++ struct fsif_stat_response fstat;
++ };
++};
++
++typedef struct fsif_response fsif_response_t;
++
++#define FSIF_RING_ENTRY_SIZE 64
++
++#define FSIF_NR_READ_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_read_request)) / \
++ sizeof(grant_ref_t) + 1)
++#define FSIF_NR_WRITE_GNTS ((FSIF_RING_ENTRY_SIZE - sizeof(struct fsif_write_request)) / \
++ sizeof(grant_ref_t) + 1)
++
++DEFINE_RING_TYPES(fsif, struct fsif_request, struct fsif_response);
++
++#define STATE_INITIALISED "init"
++#define STATE_READY "ready"
++
++
++
++#endif
+Index: head-2008-11-25/include/xen/interface/io/pciif.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/io/pciif.h 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,101 @@
++/*
++ * PCI Backend/Frontend Common Data Structures & Macros
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#ifndef __XEN_PCI_COMMON_H__
++#define __XEN_PCI_COMMON_H__
++
++/* Be sure to bump this number if you change this file */
++#define XEN_PCI_MAGIC "7"
++
++/* xen_pci_sharedinfo flags */
++#define _XEN_PCIF_active (0)
++#define XEN_PCIF_active (1<<_XEN_PCI_active)
++
++/* xen_pci_op commands */
++#define XEN_PCI_OP_conf_read (0)
++#define XEN_PCI_OP_conf_write (1)
++#define XEN_PCI_OP_enable_msi (2)
++#define XEN_PCI_OP_disable_msi (3)
++#define XEN_PCI_OP_enable_msix (4)
++#define XEN_PCI_OP_disable_msix (5)
++
++/* xen_pci_op error numbers */
++#define XEN_PCI_ERR_success (0)
++#define XEN_PCI_ERR_dev_not_found (-1)
++#define XEN_PCI_ERR_invalid_offset (-2)
++#define XEN_PCI_ERR_access_denied (-3)
++#define XEN_PCI_ERR_not_implemented (-4)
++/* XEN_PCI_ERR_op_failed - backend failed to complete the operation */
++#define XEN_PCI_ERR_op_failed (-5)
++
++/*
++ * it should be PAGE_SIZE-sizeof(struct xen_pci_op))/sizeof(struct msix_entry))
++ * Should not exceed 128
++ */
++#define SH_INFO_MAX_VEC 128
++
++struct xen_msix_entry {
++ uint16_t vector;
++ uint16_t entry;
++};
++struct xen_pci_op {
++ /* IN: what action to perform: XEN_PCI_OP_* */
++ uint32_t cmd;
++
++ /* OUT: will contain an error number (if any) from errno.h */
++ int32_t err;
++
++ /* IN: which device to touch */
++ uint32_t domain; /* PCI Domain/Segment */
++ uint32_t bus;
++ uint32_t devfn;
++
++ /* IN: which configuration registers to touch */
++ int32_t offset;
++ int32_t size;
++
++ /* IN/OUT: Contains the result after a READ or the value to WRITE */
++ uint32_t value;
++ /* IN: Contains extra infor for this operation */
++ uint32_t info;
++ /*IN: param for msi-x */
++ struct xen_msix_entry msix_entries[SH_INFO_MAX_VEC];
++};
++
++struct xen_pci_sharedinfo {
++ /* flags - XEN_PCIF_* */
++ uint32_t flags;
++ struct xen_pci_op op;
++};
++
++#endif /* __XEN_PCI_COMMON_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/io/tpmif.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/io/tpmif.h 2007-06-12 13:14:19.000000000 +0200
+@@ -0,0 +1,77 @@
++/******************************************************************************
++ * tpmif.h
++ *
++ * TPM I/O interface for Xen guest OSes.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb@us.ibm.com
++ * Grant table support: Mahadevan Gomathisankaran
++ *
++ * This code has been derived from tools/libxc/xen/io/netif.h
++ *
++ * Copyright (c) 2003-2004, Keir Fraser
++ */
++
++#ifndef __XEN_PUBLIC_IO_TPMIF_H__
++#define __XEN_PUBLIC_IO_TPMIF_H__
++
++#include "../grant_table.h"
++
++struct tpmif_tx_request {
++ unsigned long addr; /* Machine address of packet. */
++ grant_ref_t ref; /* grant table access reference */
++ uint16_t unused;
++ uint16_t size; /* Packet size in bytes. */
++};
++typedef struct tpmif_tx_request tpmif_tx_request_t;
++
++/*
++ * The TPMIF_TX_RING_SIZE defines the number of pages the
++ * front-end and backend can exchange (= size of array).
++ */
++typedef uint32_t TPMIF_RING_IDX;
++
++#define TPMIF_TX_RING_SIZE 1
++
++/* This structure must fit in a memory page. */
++
++struct tpmif_ring {
++ struct tpmif_tx_request req;
++};
++typedef struct tpmif_ring tpmif_ring_t;
++
++struct tpmif_tx_interface {
++ struct tpmif_ring ring[TPMIF_TX_RING_SIZE];
++};
++typedef struct tpmif_tx_interface tpmif_tx_interface_t;
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/io/vscsiif.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/io/vscsiif.h 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,105 @@
++/******************************************************************************
++ * vscsiif.h
++ *
++ * Based on the blkif.h code.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright(c) FUJITSU Limited 2008.
++ */
++
++#ifndef __XEN__PUBLIC_IO_SCSI_H__
++#define __XEN__PUBLIC_IO_SCSI_H__
++
++#include "ring.h"
++#include "../grant_table.h"
++
++/* command between backend and frontend */
++#define VSCSIIF_ACT_SCSI_CDB 1 /* SCSI CDB command */
++#define VSCSIIF_ACT_SCSI_ABORT 2 /* SCSI Device(Lun) Abort*/
++#define VSCSIIF_ACT_SCSI_RESET 3 /* SCSI Device(Lun) Reset*/
++
++
++#define VSCSIIF_BACK_MAX_PENDING_REQS 128
++
++/*
++ * Maximum scatter/gather segments per request.
++ *
++ * Considering balance between allocating al least 16 "vscsiif_request"
++ * structures on one page (4096bytes) and number of scatter gather
++ * needed, we decided to use 26 as a magic number.
++ */
++#define VSCSIIF_SG_TABLESIZE 26
++
++/*
++ * base on linux kernel 2.6.18
++ */
++#define VSCSIIF_MAX_COMMAND_SIZE 16
++#define VSCSIIF_SENSE_BUFFERSIZE 96
++
++
++struct vscsiif_request {
++ uint16_t rqid; /* private guest value, echoed in resp */
++ uint8_t act; /* command between backend and frontend */
++ uint8_t cmd_len;
++
++ uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
++ uint16_t timeout_per_command; /* The command is issued by twice
++ the value in Backend. */
++ uint16_t channel, id, lun;
++ uint16_t padding;
++ uint8_t sc_data_direction; /* for DMA_TO_DEVICE(1)
++ DMA_FROM_DEVICE(2)
++ DMA_NONE(3) requests */
++ uint8_t nr_segments; /* Number of pieces of scatter-gather */
++
++ struct scsiif_request_segment {
++ grant_ref_t gref;
++ uint16_t offset;
++ uint16_t length;
++ } seg[VSCSIIF_SG_TABLESIZE];
++ uint32_t reserved[3];
++};
++typedef struct vscsiif_request vscsiif_request_t;
++
++struct vscsiif_response {
++ uint16_t rqid;
++ uint8_t padding;
++ uint8_t sense_len;
++ uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
++ int32_t rslt;
++ uint32_t residual_len; /* request bufflen -
++ return the value from physical device */
++ uint32_t reserved[36];
++};
++typedef struct vscsiif_response vscsiif_response_t;
++
++DEFINE_RING_TYPES(vscsiif, struct vscsiif_request, struct vscsiif_response);
++
++
++#endif /*__XEN__PUBLIC_IO_SCSI_H__*/
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/kexec.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/kexec.h 2008-11-25 12:22:34.000000000 +0100
+@@ -0,0 +1,168 @@
++/******************************************************************************
++ * kexec.h - Public portion
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Xen port written by:
++ * - Simon 'Horms' Horman <horms@verge.net.au>
++ * - Magnus Damm <magnus@valinux.co.jp>
++ */
++
++#ifndef _XEN_PUBLIC_KEXEC_H
++#define _XEN_PUBLIC_KEXEC_H
++
++
++/* This file describes the Kexec / Kdump hypercall interface for Xen.
++ *
++ * Kexec under vanilla Linux allows a user to reboot the physical machine
++ * into a new user-specified kernel. The Xen port extends this idea
++ * to allow rebooting of the machine from dom0. When kexec for dom0
++ * is used to reboot, both the hypervisor and the domains get replaced
++ * with some other kernel. It is possible to kexec between vanilla
++ * Linux and Xen and back again. Xen to Xen works well too.
++ *
++ * The hypercall interface for kexec can be divided into three main
++ * types of hypercall operations:
++ *
++ * 1) Range information:
++ * This is used by the dom0 kernel to ask the hypervisor about various
++ * address information. This information is needed to allow kexec-tools
++ * to fill in the ELF headers for /proc/vmcore properly.
++ *
++ * 2) Load and unload of images:
++ * There are no big surprises here, the kexec binary from kexec-tools
++ * runs in userspace in dom0. The tool loads/unloads data into the
++ * dom0 kernel such as new kernel, initramfs and hypervisor. When
++ * loaded the dom0 kernel performs a load hypercall operation, and
++ * before releasing all page references the dom0 kernel calls unload.
++ *
++ * 3) Kexec operation:
++ * This is used to start a previously loaded kernel.
++ */
++
++#include "xen.h"
++
++#if defined(__i386__) || defined(__x86_64__)
++#define KEXEC_XEN_NO_PAGES 17
++#endif
++
++/*
++ * Prototype for this hypercall is:
++ * int kexec_op(int cmd, void *args)
++ * @cmd == KEXEC_CMD_...
++ * KEXEC operation to perform
++ * @args == Operation-specific extra arguments (NULL if none).
++ */
++
++/*
++ * Kexec supports two types of operation:
++ * - kexec into a regular kernel, very similar to a standard reboot
++ * - KEXEC_TYPE_DEFAULT is used to specify this type
++ * - kexec into a special "crash kernel", aka kexec-on-panic
++ * - KEXEC_TYPE_CRASH is used to specify this type
++ * - parts of our system may be broken at kexec-on-panic time
++ * - the code should be kept as simple and self-contained as possible
++ */
++
++#define KEXEC_TYPE_DEFAULT 0
++#define KEXEC_TYPE_CRASH 1
++
++
++/* The kexec implementation for Xen allows the user to load two
++ * types of kernels, KEXEC_TYPE_DEFAULT and KEXEC_TYPE_CRASH.
++ * All data needed for a kexec reboot is kept in one xen_kexec_image_t
++ * per "instance". The data mainly consists of machine address lists to pages
++ * together with destination addresses. The data in xen_kexec_image_t
++ * is passed to the "code page" which is one page of code that performs
++ * the final relocations before jumping to the new kernel.
++ */
++
++typedef struct xen_kexec_image {
++#if defined(__i386__) || defined(__x86_64__)
++ unsigned long page_list[KEXEC_XEN_NO_PAGES];
++#endif
++#if defined(__ia64__)
++ unsigned long reboot_code_buffer;
++#endif
++ unsigned long indirection_page;
++ unsigned long start_address;
++} xen_kexec_image_t;
++
++/*
++ * Perform kexec having previously loaded a kexec or kdump kernel
++ * as appropriate.
++ * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
++ */
++#define KEXEC_CMD_kexec 0
++typedef struct xen_kexec_exec {
++ int type;
++} xen_kexec_exec_t;
++
++/*
++ * Load/Unload kernel image for kexec or kdump.
++ * type == KEXEC_TYPE_DEFAULT or KEXEC_TYPE_CRASH [in]
++ * image == relocation information for kexec (ignored for unload) [in]
++ */
++#define KEXEC_CMD_kexec_load 1
++#define KEXEC_CMD_kexec_unload 2
++typedef struct xen_kexec_load {
++ int type;
++ xen_kexec_image_t image;
++} xen_kexec_load_t;
++
++#define KEXEC_RANGE_MA_CRASH 0 /* machine address and size of crash area */
++#define KEXEC_RANGE_MA_XEN 1 /* machine address and size of Xen itself */
++#define KEXEC_RANGE_MA_CPU 2 /* machine address and size of a CPU note */
++#define KEXEC_RANGE_MA_XENHEAP 3 /* machine address and size of xenheap
++ * Note that although this is adjacent
++ * to Xen it exists in a separate EFI
++ * region on ia64, and thus needs to be
++ * inserted into iomem_machine separately */
++#define KEXEC_RANGE_MA_BOOT_PARAM 4 /* machine address and size of
++ * the ia64_boot_param */
++#define KEXEC_RANGE_MA_EFI_MEMMAP 5 /* machine address and size of
++ * of the EFI Memory Map */
++#define KEXEC_RANGE_MA_VMCOREINFO 6 /* machine address and size of vmcoreinfo */
++
++/*
++ * Find the address and size of certain memory areas
++ * range == KEXEC_RANGE_... [in]
++ * nr == physical CPU number (starting from 0) if KEXEC_RANGE_MA_CPU [in]
++ * size == number of bytes reserved in window [out]
++ * start == address of the first byte in the window [out]
++ */
++#define KEXEC_CMD_kexec_get_range 3
++typedef struct xen_kexec_range {
++ int range;
++ int nr;
++ unsigned long size;
++ unsigned long start;
++} xen_kexec_range_t;
++
++#endif /* _XEN_PUBLIC_KEXEC_H */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/nmi.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/nmi.h 2007-06-12 13:14:19.000000000 +0200
+@@ -0,0 +1,78 @@
++/******************************************************************************
++ * nmi.h
++ *
++ * NMI callback registration and reason codes.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2005, Keir Fraser <keir@xensource.com>
++ */
++
++#ifndef __XEN_PUBLIC_NMI_H__
++#define __XEN_PUBLIC_NMI_H__
++
++/*
++ * NMI reason codes:
++ * Currently these are x86-specific, stored in arch_shared_info.nmi_reason.
++ */
++ /* I/O-check error reported via ISA port 0x61, bit 6. */
++#define _XEN_NMIREASON_io_error 0
++#define XEN_NMIREASON_io_error (1UL << _XEN_NMIREASON_io_error)
++ /* Parity error reported via ISA port 0x61, bit 7. */
++#define _XEN_NMIREASON_parity_error 1
++#define XEN_NMIREASON_parity_error (1UL << _XEN_NMIREASON_parity_error)
++ /* Unknown hardware-generated NMI. */
++#define _XEN_NMIREASON_unknown 2
++#define XEN_NMIREASON_unknown (1UL << _XEN_NMIREASON_unknown)
++
++/*
++ * long nmi_op(unsigned int cmd, void *arg)
++ * NB. All ops return zero on success, else a negative error code.
++ */
++
++/*
++ * Register NMI callback for this (calling) VCPU. Currently this only makes
++ * sense for domain 0, vcpu 0. All other callers will be returned EINVAL.
++ * arg == pointer to xennmi_callback structure.
++ */
++#define XENNMI_register_callback 0
++struct xennmi_callback {
++ unsigned long handler_address;
++ unsigned long pad;
++};
++typedef struct xennmi_callback xennmi_callback_t;
++DEFINE_XEN_GUEST_HANDLE(xennmi_callback_t);
++
++/*
++ * Deregister NMI callback for this (calling) VCPU.
++ * arg == NULL.
++ */
++#define XENNMI_unregister_callback 1
++
++#endif /* __XEN_PUBLIC_NMI_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/platform.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/platform.h 2008-09-25 13:55:33.000000000 +0200
+@@ -0,0 +1,346 @@
++/******************************************************************************
++ * platform.h
++ *
++ * Hardware platform operations. Intended for use by domain-0 kernel.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2002-2006, K Fraser
++ */
++
++#ifndef __XEN_PUBLIC_PLATFORM_H__
++#define __XEN_PUBLIC_PLATFORM_H__
++
++#include "xen.h"
++
++#define XENPF_INTERFACE_VERSION 0x03000001
++
++/*
++ * Set clock such that it would read <secs,nsecs> after 00:00:00 UTC,
++ * 1 January, 1970 if the current system time was <system_time>.
++ */
++#define XENPF_settime 17
++struct xenpf_settime {
++ /* IN variables. */
++ uint32_t secs;
++ uint32_t nsecs;
++ uint64_t system_time;
++};
++typedef struct xenpf_settime xenpf_settime_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_settime_t);
++
++/*
++ * Request memory range (@mfn, @mfn+@nr_mfns-1) to have type @type.
++ * On x86, @type is an architecture-defined MTRR memory type.
++ * On success, returns the MTRR that was used (@reg) and a handle that can
++ * be passed to XENPF_DEL_MEMTYPE to accurately tear down the new setting.
++ * (x86-specific).
++ */
++#define XENPF_add_memtype 31
++struct xenpf_add_memtype {
++ /* IN variables. */
++ xen_pfn_t mfn;
++ uint64_t nr_mfns;
++ uint32_t type;
++ /* OUT variables. */
++ uint32_t handle;
++ uint32_t reg;
++};
++typedef struct xenpf_add_memtype xenpf_add_memtype_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_add_memtype_t);
++
++/*
++ * Tear down an existing memory-range type. If @handle is remembered then it
++ * should be passed in to accurately tear down the correct setting (in case
++ * of overlapping memory regions with differing types). If it is not known
++ * then @handle should be set to zero. In all cases @reg must be set.
++ * (x86-specific).
++ */
++#define XENPF_del_memtype 32
++struct xenpf_del_memtype {
++ /* IN variables. */
++ uint32_t handle;
++ uint32_t reg;
++};
++typedef struct xenpf_del_memtype xenpf_del_memtype_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_del_memtype_t);
++
++/* Read current type of an MTRR (x86-specific). */
++#define XENPF_read_memtype 33
++struct xenpf_read_memtype {
++ /* IN variables. */
++ uint32_t reg;
++ /* OUT variables. */
++ xen_pfn_t mfn;
++ uint64_t nr_mfns;
++ uint32_t type;
++};
++typedef struct xenpf_read_memtype xenpf_read_memtype_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_read_memtype_t);
++
++#define XENPF_microcode_update 35
++struct xenpf_microcode_update {
++ /* IN variables. */
++ XEN_GUEST_HANDLE(const_void) data;/* Pointer to microcode data */
++ uint32_t length; /* Length of microcode data. */
++};
++typedef struct xenpf_microcode_update xenpf_microcode_update_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_microcode_update_t);
++
++#define XENPF_platform_quirk 39
++#define QUIRK_NOIRQBALANCING 1 /* Do not restrict IO-APIC RTE targets */
++#define QUIRK_IOAPIC_BAD_REGSEL 2 /* IO-APIC REGSEL forgets its value */
++#define QUIRK_IOAPIC_GOOD_REGSEL 3 /* IO-APIC REGSEL behaves properly */
++struct xenpf_platform_quirk {
++ /* IN variables. */
++ uint32_t quirk_id;
++};
++typedef struct xenpf_platform_quirk xenpf_platform_quirk_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_platform_quirk_t);
++
++#define XENPF_firmware_info 50
++#define XEN_FW_DISK_INFO 1 /* from int 13 AH=08/41/48 */
++#define XEN_FW_DISK_MBR_SIGNATURE 2 /* from MBR offset 0x1b8 */
++#define XEN_FW_VBEDDC_INFO 3 /* from int 10 AX=4f15 */
++struct xenpf_firmware_info {
++ /* IN variables. */
++ uint32_t type;
++ uint32_t index;
++ /* OUT variables. */
++ union {
++ struct {
++ /* Int13, Fn48: Check Extensions Present. */
++ uint8_t device; /* %dl: bios device number */
++ uint8_t version; /* %ah: major version */
++ uint16_t interface_support; /* %cx: support bitmap */
++ /* Int13, Fn08: Legacy Get Device Parameters. */
++ uint16_t legacy_max_cylinder; /* %cl[7:6]:%ch: max cyl # */
++ uint8_t legacy_max_head; /* %dh: max head # */
++ uint8_t legacy_sectors_per_track; /* %cl[5:0]: max sector # */
++ /* Int13, Fn41: Get Device Parameters (as filled into %ds:%esi). */
++ /* NB. First uint16_t of buffer must be set to buffer size. */
++ XEN_GUEST_HANDLE(void) edd_params;
++ } disk_info; /* XEN_FW_DISK_INFO */
++ struct {
++ uint8_t device; /* bios device number */
++ uint32_t mbr_signature; /* offset 0x1b8 in mbr */
++ } disk_mbr_signature; /* XEN_FW_DISK_MBR_SIGNATURE */
++ struct {
++ /* Int10, AX=4F15: Get EDID info. */
++ uint8_t capabilities;
++ uint8_t edid_transfer_time;
++ /* must refer to 128-byte buffer */
++ XEN_GUEST_HANDLE(uint8) edid;
++ } vbeddc_info; /* XEN_FW_VBEDDC_INFO */
++ } u;
++};
++typedef struct xenpf_firmware_info xenpf_firmware_info_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_firmware_info_t);
++
++#define XENPF_enter_acpi_sleep 51
++struct xenpf_enter_acpi_sleep {
++ /* IN variables */
++ uint16_t pm1a_cnt_val; /* PM1a control value. */
++ uint16_t pm1b_cnt_val; /* PM1b control value. */
++ uint32_t sleep_state; /* Which state to enter (Sn). */
++ uint32_t flags; /* Must be zero. */
++};
++typedef struct xenpf_enter_acpi_sleep xenpf_enter_acpi_sleep_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_enter_acpi_sleep_t);
++
++#define XENPF_change_freq 52
++struct xenpf_change_freq {
++ /* IN variables */
++ uint32_t flags; /* Must be zero. */
++ uint32_t cpu; /* Physical cpu. */
++ uint64_t freq; /* New frequency (Hz). */
++};
++typedef struct xenpf_change_freq xenpf_change_freq_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_change_freq_t);
++
++/*
++ * Get idle times (nanoseconds since boot) for physical CPUs specified in the
++ * @cpumap_bitmap with range [0..@cpumap_nr_cpus-1]. The @idletime array is
++ * indexed by CPU number; only entries with the corresponding @cpumap_bitmap
++ * bit set are written to. On return, @cpumap_bitmap is modified so that any
++ * non-existent CPUs are cleared. Such CPUs have their @idletime array entry
++ * cleared.
++ */
++#define XENPF_getidletime 53
++struct xenpf_getidletime {
++ /* IN/OUT variables */
++ /* IN: CPUs to interrogate; OUT: subset of IN which are present */
++ XEN_GUEST_HANDLE(uint8) cpumap_bitmap;
++ /* IN variables */
++ /* Size of cpumap bitmap. */
++ uint32_t cpumap_nr_cpus;
++ /* Must be indexable for every cpu in cpumap_bitmap. */
++ XEN_GUEST_HANDLE(uint64) idletime;
++ /* OUT variables */
++ /* System time when the idletime snapshots were taken. */
++ uint64_t now;
++};
++typedef struct xenpf_getidletime xenpf_getidletime_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_getidletime_t);
++
++#define XENPF_set_processor_pminfo 54
++
++/* ability bits */
++#define XEN_PROCESSOR_PM_CX 1
++#define XEN_PROCESSOR_PM_PX 2
++#define XEN_PROCESSOR_PM_TX 4
++
++/* cmd type */
++#define XEN_PM_CX 0
++#define XEN_PM_PX 1
++#define XEN_PM_TX 2
++
++/* Px sub info type */
++#define XEN_PX_PCT 1
++#define XEN_PX_PSS 2
++#define XEN_PX_PPC 4
++#define XEN_PX_PSD 8
++
++struct xen_power_register {
++ uint32_t space_id;
++ uint32_t bit_width;
++ uint32_t bit_offset;
++ uint32_t access_size;
++ uint64_t address;
++};
++
++struct xen_processor_csd {
++ uint32_t domain; /* domain number of one dependent group */
++ uint32_t coord_type; /* coordination type */
++ uint32_t num; /* number of processors in same domain */
++};
++typedef struct xen_processor_csd xen_processor_csd_t;
++DEFINE_XEN_GUEST_HANDLE(xen_processor_csd_t);
++
++struct xen_processor_cx {
++ struct xen_power_register reg; /* GAS for Cx trigger register */
++ uint8_t type; /* cstate value, c0: 0, c1: 1, ... */
++ uint32_t latency; /* worst latency (ms) to enter/exit this cstate */
++ uint32_t power; /* average power consumption(mW) */
++ uint32_t dpcnt; /* number of dependency entries */
++ XEN_GUEST_HANDLE(xen_processor_csd_t) dp; /* NULL if no dependency */
++};
++typedef struct xen_processor_cx xen_processor_cx_t;
++DEFINE_XEN_GUEST_HANDLE(xen_processor_cx_t);
++
++struct xen_processor_flags {
++ uint32_t bm_control:1;
++ uint32_t bm_check:1;
++ uint32_t has_cst:1;
++ uint32_t power_setup_done:1;
++ uint32_t bm_rld_set:1;
++};
++
++struct xen_processor_power {
++ uint32_t count; /* number of C state entries in array below */
++ struct xen_processor_flags flags; /* global flags of this processor */
++ XEN_GUEST_HANDLE(xen_processor_cx_t) states; /* supported c states */
++};
++
++struct xen_pct_register {
++ uint8_t descriptor;
++ uint16_t length;
++ uint8_t space_id;
++ uint8_t bit_width;
++ uint8_t bit_offset;
++ uint8_t reserved;
++ uint64_t address;
++};
++
++struct xen_processor_px {
++ uint64_t core_frequency; /* megahertz */
++ uint64_t power; /* milliWatts */
++ uint64_t transition_latency; /* microseconds */
++ uint64_t bus_master_latency; /* microseconds */
++ uint64_t control; /* control value */
++ uint64_t status; /* success indicator */
++};
++typedef struct xen_processor_px xen_processor_px_t;
++DEFINE_XEN_GUEST_HANDLE(xen_processor_px_t);
++
++struct xen_psd_package {
++ uint64_t num_entries;
++ uint64_t revision;
++ uint64_t domain;
++ uint64_t coord_type;
++ uint64_t num_processors;
++};
++
++struct xen_processor_performance {
++ uint32_t flags; /* flag for Px sub info type */
++ uint32_t platform_limit; /* Platform limitation on freq usage */
++ struct xen_pct_register control_register;
++ struct xen_pct_register status_register;
++ uint32_t state_count; /* total available performance states */
++ XEN_GUEST_HANDLE(xen_processor_px_t) states;
++ struct xen_psd_package domain_info;
++ uint32_t shared_type; /* coordination type of this processor */
++};
++typedef struct xen_processor_performance xen_processor_performance_t;
++DEFINE_XEN_GUEST_HANDLE(xen_processor_performance_t);
++
++struct xenpf_set_processor_pminfo {
++ /* IN variables */
++ uint32_t id; /* ACPI CPU ID */
++ uint32_t type; /* {XEN_PM_CX, XEN_PM_PX} */
++ union {
++ struct xen_processor_power power;/* Cx: _CST/_CSD */
++ struct xen_processor_performance perf; /* Px: _PPC/_PCT/_PSS/_PSD */
++ };
++};
++typedef struct xenpf_set_processor_pminfo xenpf_set_processor_pminfo_t;
++DEFINE_XEN_GUEST_HANDLE(xenpf_set_processor_pminfo_t);
++
++struct xen_platform_op {
++ uint32_t cmd;
++ uint32_t interface_version; /* XENPF_INTERFACE_VERSION */
++ union {
++ struct xenpf_settime settime;
++ struct xenpf_add_memtype add_memtype;
++ struct xenpf_del_memtype del_memtype;
++ struct xenpf_read_memtype read_memtype;
++ struct xenpf_microcode_update microcode;
++ struct xenpf_platform_quirk platform_quirk;
++ struct xenpf_firmware_info firmware_info;
++ struct xenpf_enter_acpi_sleep enter_acpi_sleep;
++ struct xenpf_change_freq change_freq;
++ struct xenpf_getidletime getidletime;
++ struct xenpf_set_processor_pminfo set_pminfo;
++ uint8_t pad[128];
++ } u;
++};
++typedef struct xen_platform_op xen_platform_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_platform_op_t);
++
++#endif /* __XEN_PUBLIC_PLATFORM_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/sysctl.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/sysctl.h 2008-09-25 13:55:33.000000000 +0200
+@@ -0,0 +1,308 @@
++/******************************************************************************
++ * sysctl.h
++ *
++ * System management operations. For use by node control stack.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2002-2006, K Fraser
++ */
++
++#ifndef __XEN_PUBLIC_SYSCTL_H__
++#define __XEN_PUBLIC_SYSCTL_H__
++
++#if !defined(__XEN__) && !defined(__XEN_TOOLS__)
++#error "sysctl operations are intended for use by node control tools only"
++#endif
++
++#include "xen.h"
++#include "domctl.h"
++
++#define XEN_SYSCTL_INTERFACE_VERSION 0x00000006
++
++/*
++ * Read console content from Xen buffer ring.
++ */
++#define XEN_SYSCTL_readconsole 1
++struct xen_sysctl_readconsole {
++ /* IN: Non-zero -> clear after reading. */
++ uint8_t clear;
++ /* IN: Non-zero -> start index specified by @index field. */
++ uint8_t incremental;
++ uint8_t pad0, pad1;
++ /*
++ * IN: Start index for consuming from ring buffer (if @incremental);
++ * OUT: End index after consuming from ring buffer.
++ */
++ uint32_t index;
++ /* IN: Virtual address to write console data. */
++ XEN_GUEST_HANDLE_64(char) buffer;
++ /* IN: Size of buffer; OUT: Bytes written to buffer. */
++ uint32_t count;
++};
++typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t);
++
++/* Get trace buffers machine base address */
++#define XEN_SYSCTL_tbuf_op 2
++struct xen_sysctl_tbuf_op {
++ /* IN variables */
++#define XEN_SYSCTL_TBUFOP_get_info 0
++#define XEN_SYSCTL_TBUFOP_set_cpu_mask 1
++#define XEN_SYSCTL_TBUFOP_set_evt_mask 2
++#define XEN_SYSCTL_TBUFOP_set_size 3
++#define XEN_SYSCTL_TBUFOP_enable 4
++#define XEN_SYSCTL_TBUFOP_disable 5
++ uint32_t cmd;
++ /* IN/OUT variables */
++ struct xenctl_cpumap cpu_mask;
++ uint32_t evt_mask;
++ /* OUT variables */
++ uint64_aligned_t buffer_mfn;
++ uint32_t size;
++};
++typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
++
++/*
++ * Get physical information about the host machine
++ */
++#define XEN_SYSCTL_physinfo 3
++ /* (x86) The platform supports HVM guests. */
++#define _XEN_SYSCTL_PHYSCAP_hvm 0
++#define XEN_SYSCTL_PHYSCAP_hvm (1u<<_XEN_SYSCTL_PHYSCAP_hvm)
++ /* (x86) The platform supports HVM-guest direct access to I/O devices. */
++#define _XEN_SYSCTL_PHYSCAP_hvm_directio 1
++#define XEN_SYSCTL_PHYSCAP_hvm_directio (1u<<_XEN_SYSCTL_PHYSCAP_hvm_directio)
++struct xen_sysctl_physinfo {
++ uint32_t threads_per_core;
++ uint32_t cores_per_socket;
++ uint32_t nr_cpus;
++ uint32_t nr_nodes;
++ uint32_t cpu_khz;
++ uint64_aligned_t total_pages;
++ uint64_aligned_t free_pages;
++ uint64_aligned_t scrub_pages;
++ uint32_t hw_cap[8];
++
++ /*
++ * IN: maximum addressable entry in the caller-provided cpu_to_node array.
++ * OUT: largest cpu identifier in the system.
++ * If OUT is greater than IN then the cpu_to_node array is truncated!
++ */
++ uint32_t max_cpu_id;
++ /*
++ * If not NULL, this array is filled with node identifier for each cpu.
++ * If a cpu has no node information (e.g., cpu not present) then the
++ * sentinel value ~0u is written.
++ * The size of this array is specified by the caller in @max_cpu_id.
++ * If the actual @max_cpu_id is smaller than the array then the trailing
++ * elements of the array will not be written by the sysctl.
++ */
++ XEN_GUEST_HANDLE_64(uint32) cpu_to_node;
++
++ /* XEN_SYSCTL_PHYSCAP_??? */
++ uint32_t capabilities;
++};
++typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
++
++/*
++ * Get the ID of the current scheduler.
++ */
++#define XEN_SYSCTL_sched_id 4
++struct xen_sysctl_sched_id {
++ /* OUT variable */
++ uint32_t sched_id;
++};
++typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t);
++
++/* Interface for controlling Xen software performance counters. */
++#define XEN_SYSCTL_perfc_op 5
++/* Sub-operations: */
++#define XEN_SYSCTL_PERFCOP_reset 1 /* Reset all counters to zero. */
++#define XEN_SYSCTL_PERFCOP_query 2 /* Get perfctr information. */
++struct xen_sysctl_perfc_desc {
++ char name[80]; /* name of perf counter */
++ uint32_t nr_vals; /* number of values for this counter */
++};
++typedef struct xen_sysctl_perfc_desc xen_sysctl_perfc_desc_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_desc_t);
++typedef uint32_t xen_sysctl_perfc_val_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_val_t);
++
++struct xen_sysctl_perfc_op {
++ /* IN variables. */
++ uint32_t cmd; /* XEN_SYSCTL_PERFCOP_??? */
++ /* OUT variables. */
++ uint32_t nr_counters; /* number of counters description */
++ uint32_t nr_vals; /* number of values */
++ /* counter information (or NULL) */
++ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_desc_t) desc;
++ /* counter values (or NULL) */
++ XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val;
++};
++typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
++
++#define XEN_SYSCTL_getdomaininfolist 6
++struct xen_sysctl_getdomaininfolist {
++ /* IN variables. */
++ domid_t first_domain;
++ uint32_t max_domains;
++ XEN_GUEST_HANDLE_64(xen_domctl_getdomaininfo_t) buffer;
++ /* OUT variables. */
++ uint32_t num_domains;
++};
++typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
++
++/* Inject debug keys into Xen. */
++#define XEN_SYSCTL_debug_keys 7
++struct xen_sysctl_debug_keys {
++ /* IN variables. */
++ XEN_GUEST_HANDLE_64(char) keys;
++ uint32_t nr_keys;
++};
++typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t);
++
++/* Get physical CPU information. */
++#define XEN_SYSCTL_getcpuinfo 8
++struct xen_sysctl_cpuinfo {
++ uint64_aligned_t idletime;
++};
++typedef struct xen_sysctl_cpuinfo xen_sysctl_cpuinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpuinfo_t);
++struct xen_sysctl_getcpuinfo {
++ /* IN variables. */
++ uint32_t max_cpus;
++ XEN_GUEST_HANDLE_64(xen_sysctl_cpuinfo_t) info;
++ /* OUT variables. */
++ uint32_t nr_cpus;
++};
++typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t);
++
++#define XEN_SYSCTL_availheap 9
++struct xen_sysctl_availheap {
++ /* IN variables. */
++ uint32_t min_bitwidth; /* Smallest address width (zero if don't care). */
++ uint32_t max_bitwidth; /* Largest address width (zero if don't care). */
++ int32_t node; /* NUMA node of interest (-1 for all nodes). */
++ /* OUT variables. */
++ uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */
++};
++typedef struct xen_sysctl_availheap xen_sysctl_availheap_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t);
++
++#define XEN_SYSCTL_get_pmstat 10
++struct pm_px_val {
++ uint64_aligned_t freq; /* Px core frequency */
++ uint64_aligned_t residency; /* Px residency time */
++ uint64_aligned_t count; /* Px transition count */
++};
++typedef struct pm_px_val pm_px_val_t;
++DEFINE_XEN_GUEST_HANDLE(pm_px_val_t);
++
++struct pm_px_stat {
++ uint8_t total; /* total Px states */
++ uint8_t usable; /* usable Px states */
++ uint8_t last; /* last Px state */
++ uint8_t cur; /* current Px state */
++ XEN_GUEST_HANDLE_64(uint64) trans_pt; /* Px transition table */
++ XEN_GUEST_HANDLE_64(pm_px_val_t) pt;
++};
++typedef struct pm_px_stat pm_px_stat_t;
++DEFINE_XEN_GUEST_HANDLE(pm_px_stat_t);
++
++struct pm_cx_stat {
++ uint32_t nr; /* entry nr in triggers & residencies, including C0 */
++ uint32_t last; /* last Cx state */
++ uint64_aligned_t idle_time; /* idle time from boot */
++ XEN_GUEST_HANDLE_64(uint64) triggers; /* Cx trigger counts */
++ XEN_GUEST_HANDLE_64(uint64) residencies; /* Cx residencies */
++};
++
++struct xen_sysctl_get_pmstat {
++#define PMSTAT_CATEGORY_MASK 0xf0
++#define PMSTAT_PX 0x10
++#define PMSTAT_CX 0x20
++#define PMSTAT_get_max_px (PMSTAT_PX | 0x1)
++#define PMSTAT_get_pxstat (PMSTAT_PX | 0x2)
++#define PMSTAT_reset_pxstat (PMSTAT_PX | 0x3)
++#define PMSTAT_get_max_cx (PMSTAT_CX | 0x1)
++#define PMSTAT_get_cxstat (PMSTAT_CX | 0x2)
++#define PMSTAT_reset_cxstat (PMSTAT_CX | 0x3)
++ uint32_t type;
++ uint32_t cpuid;
++ union {
++ struct pm_px_stat getpx;
++ struct pm_cx_stat getcx;
++ /* other struct for tx, etc */
++ } u;
++};
++typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t);
++
++#define XEN_SYSCTL_cpu_hotplug 11
++struct xen_sysctl_cpu_hotplug {
++ /* IN variables */
++ uint32_t cpu; /* Physical cpu. */
++#define XEN_SYSCTL_CPU_HOTPLUG_ONLINE 0
++#define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1
++ uint32_t op; /* hotplug opcode */
++};
++typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_hotplug_t);
++
++
++struct xen_sysctl {
++ uint32_t cmd;
++ uint32_t interface_version; /* XEN_SYSCTL_INTERFACE_VERSION */
++ union {
++ struct xen_sysctl_readconsole readconsole;
++ struct xen_sysctl_tbuf_op tbuf_op;
++ struct xen_sysctl_physinfo physinfo;
++ struct xen_sysctl_sched_id sched_id;
++ struct xen_sysctl_perfc_op perfc_op;
++ struct xen_sysctl_getdomaininfolist getdomaininfolist;
++ struct xen_sysctl_debug_keys debug_keys;
++ struct xen_sysctl_getcpuinfo getcpuinfo;
++ struct xen_sysctl_availheap availheap;
++ struct xen_sysctl_get_pmstat get_pmstat;
++ struct xen_sysctl_cpu_hotplug cpu_hotplug;
++ uint8_t pad[128];
++ } u;
++};
++typedef struct xen_sysctl xen_sysctl_t;
++DEFINE_XEN_GUEST_HANDLE(xen_sysctl_t);
++
++#endif /* __XEN_PUBLIC_SYSCTL_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/trace.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/trace.h 2008-11-25 12:22:34.000000000 +0100
+@@ -0,0 +1,205 @@
++/******************************************************************************
++ * include/public/trace.h
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Mark Williamson, (C) 2004 Intel Research Cambridge
++ * Copyright (C) 2005 Bin Ren
++ */
++
++#ifndef __XEN_PUBLIC_TRACE_H__
++#define __XEN_PUBLIC_TRACE_H__
++
++#define TRACE_EXTRA_MAX 7
++#define TRACE_EXTRA_SHIFT 28
++
++/* Trace classes */
++#define TRC_CLS_SHIFT 16
++#define TRC_GEN 0x0001f000 /* General trace */
++#define TRC_SCHED 0x0002f000 /* Xen Scheduler trace */
++#define TRC_DOM0OP 0x0004f000 /* Xen DOM0 operation trace */
++#define TRC_HVM 0x0008f000 /* Xen HVM trace */
++#define TRC_MEM 0x0010f000 /* Xen memory trace */
++#define TRC_PV 0x0020f000 /* Xen PV traces */
++#define TRC_SHADOW 0x0040f000 /* Xen shadow tracing */
++#define TRC_PM 0x0080f000 /* Xen power management trace */
++#define TRC_ALL 0x0ffff000
++#define TRC_HD_TO_EVENT(x) ((x)&0x0fffffff)
++#define TRC_HD_CYCLE_FLAG (1UL<<31)
++#define TRC_HD_INCLUDES_CYCLE_COUNT(x) ( !!( (x) & TRC_HD_CYCLE_FLAG ) )
++#define TRC_HD_EXTRA(x) (((x)>>TRACE_EXTRA_SHIFT)&TRACE_EXTRA_MAX)
++
++/* Trace subclasses */
++#define TRC_SUBCLS_SHIFT 12
++
++/* trace subclasses for SVM */
++#define TRC_HVM_ENTRYEXIT 0x00081000 /* VMENTRY and #VMEXIT */
++#define TRC_HVM_HANDLER 0x00082000 /* various HVM handlers */
++
++#define TRC_SCHED_MIN 0x00021000 /* Just runstate changes */
++#define TRC_SCHED_VERBOSE 0x00028000 /* More inclusive scheduling */
++
++/* Trace events per class */
++#define TRC_LOST_RECORDS (TRC_GEN + 1)
++#define TRC_TRACE_WRAP_BUFFER (TRC_GEN + 2)
++#define TRC_TRACE_CPU_CHANGE (TRC_GEN + 3)
++
++#define TRC_SCHED_RUNSTATE_CHANGE (TRC_SCHED_MIN + 1)
++#define TRC_SCHED_DOM_ADD (TRC_SCHED_VERBOSE + 1)
++#define TRC_SCHED_DOM_REM (TRC_SCHED_VERBOSE + 2)
++#define TRC_SCHED_SLEEP (TRC_SCHED_VERBOSE + 3)
++#define TRC_SCHED_WAKE (TRC_SCHED_VERBOSE + 4)
++#define TRC_SCHED_YIELD (TRC_SCHED_VERBOSE + 5)
++#define TRC_SCHED_BLOCK (TRC_SCHED_VERBOSE + 6)
++#define TRC_SCHED_SHUTDOWN (TRC_SCHED_VERBOSE + 7)
++#define TRC_SCHED_CTL (TRC_SCHED_VERBOSE + 8)
++#define TRC_SCHED_ADJDOM (TRC_SCHED_VERBOSE + 9)
++#define TRC_SCHED_SWITCH (TRC_SCHED_VERBOSE + 10)
++#define TRC_SCHED_S_TIMER_FN (TRC_SCHED_VERBOSE + 11)
++#define TRC_SCHED_T_TIMER_FN (TRC_SCHED_VERBOSE + 12)
++#define TRC_SCHED_DOM_TIMER_FN (TRC_SCHED_VERBOSE + 13)
++#define TRC_SCHED_SWITCH_INFPREV (TRC_SCHED_VERBOSE + 14)
++#define TRC_SCHED_SWITCH_INFNEXT (TRC_SCHED_VERBOSE + 15)
++
++#define TRC_MEM_PAGE_GRANT_MAP (TRC_MEM + 1)
++#define TRC_MEM_PAGE_GRANT_UNMAP (TRC_MEM + 2)
++#define TRC_MEM_PAGE_GRANT_TRANSFER (TRC_MEM + 3)
++
++#define TRC_PV_HYPERCALL (TRC_PV + 1)
++#define TRC_PV_TRAP (TRC_PV + 3)
++#define TRC_PV_PAGE_FAULT (TRC_PV + 4)
++#define TRC_PV_FORCED_INVALID_OP (TRC_PV + 5)
++#define TRC_PV_EMULATE_PRIVOP (TRC_PV + 6)
++#define TRC_PV_EMULATE_4GB (TRC_PV + 7)
++#define TRC_PV_MATH_STATE_RESTORE (TRC_PV + 8)
++#define TRC_PV_PAGING_FIXUP (TRC_PV + 9)
++#define TRC_PV_GDT_LDT_MAPPING_FAULT (TRC_PV + 10)
++#define TRC_PV_PTWR_EMULATION (TRC_PV + 11)
++#define TRC_PV_PTWR_EMULATION_PAE (TRC_PV + 12)
++ /* Indicates that addresses in trace record are 64 bits */
++#define TRC_64_FLAG (0x100)
++
++#define TRC_SHADOW_NOT_SHADOW (TRC_SHADOW + 1)
++#define TRC_SHADOW_FAST_PROPAGATE (TRC_SHADOW + 2)
++#define TRC_SHADOW_FAST_MMIO (TRC_SHADOW + 3)
++#define TRC_SHADOW_FALSE_FAST_PATH (TRC_SHADOW + 4)
++#define TRC_SHADOW_MMIO (TRC_SHADOW + 5)
++#define TRC_SHADOW_FIXUP (TRC_SHADOW + 6)
++#define TRC_SHADOW_DOMF_DYING (TRC_SHADOW + 7)
++#define TRC_SHADOW_EMULATE (TRC_SHADOW + 8)
++#define TRC_SHADOW_EMULATE_UNSHADOW_USER (TRC_SHADOW + 9)
++#define TRC_SHADOW_EMULATE_UNSHADOW_EVTINJ (TRC_SHADOW + 10)
++#define TRC_SHADOW_EMULATE_UNSHADOW_UNHANDLED (TRC_SHADOW + 11)
++#define TRC_SHADOW_WRMAP_BF (TRC_SHADOW + 12)
++#define TRC_SHADOW_PREALLOC_UNPIN (TRC_SHADOW + 13)
++#define TRC_SHADOW_RESYNC_FULL (TRC_SHADOW + 14)
++#define TRC_SHADOW_RESYNC_ONLY (TRC_SHADOW + 15)
++
++/* trace events per subclass */
++#define TRC_HVM_VMENTRY (TRC_HVM_ENTRYEXIT + 0x01)
++#define TRC_HVM_VMEXIT (TRC_HVM_ENTRYEXIT + 0x02)
++#define TRC_HVM_VMEXIT64 (TRC_HVM_ENTRYEXIT + TRC_64_FLAG + 0x02)
++#define TRC_HVM_PF_XEN (TRC_HVM_HANDLER + 0x01)
++#define TRC_HVM_PF_XEN64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x01)
++#define TRC_HVM_PF_INJECT (TRC_HVM_HANDLER + 0x02)
++#define TRC_HVM_PF_INJECT64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x02)
++#define TRC_HVM_INJ_EXC (TRC_HVM_HANDLER + 0x03)
++#define TRC_HVM_INJ_VIRQ (TRC_HVM_HANDLER + 0x04)
++#define TRC_HVM_REINJ_VIRQ (TRC_HVM_HANDLER + 0x05)
++#define TRC_HVM_IO_READ (TRC_HVM_HANDLER + 0x06)
++#define TRC_HVM_IO_WRITE (TRC_HVM_HANDLER + 0x07)
++#define TRC_HVM_CR_READ (TRC_HVM_HANDLER + 0x08)
++#define TRC_HVM_CR_READ64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x08)
++#define TRC_HVM_CR_WRITE (TRC_HVM_HANDLER + 0x09)
++#define TRC_HVM_CR_WRITE64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x09)
++#define TRC_HVM_DR_READ (TRC_HVM_HANDLER + 0x0A)
++#define TRC_HVM_DR_WRITE (TRC_HVM_HANDLER + 0x0B)
++#define TRC_HVM_MSR_READ (TRC_HVM_HANDLER + 0x0C)
++#define TRC_HVM_MSR_WRITE (TRC_HVM_HANDLER + 0x0D)
++#define TRC_HVM_CPUID (TRC_HVM_HANDLER + 0x0E)
++#define TRC_HVM_INTR (TRC_HVM_HANDLER + 0x0F)
++#define TRC_HVM_NMI (TRC_HVM_HANDLER + 0x10)
++#define TRC_HVM_SMI (TRC_HVM_HANDLER + 0x11)
++#define TRC_HVM_VMMCALL (TRC_HVM_HANDLER + 0x12)
++#define TRC_HVM_HLT (TRC_HVM_HANDLER + 0x13)
++#define TRC_HVM_INVLPG (TRC_HVM_HANDLER + 0x14)
++#define TRC_HVM_INVLPG64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x14)
++#define TRC_HVM_MCE (TRC_HVM_HANDLER + 0x15)
++#define TRC_HVM_IO_ASSIST (TRC_HVM_HANDLER + 0x16)
++#define TRC_HVM_IO_ASSIST64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x16)
++#define TRC_HVM_MMIO_ASSIST (TRC_HVM_HANDLER + 0x17)
++#define TRC_HVM_MMIO_ASSIST64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x17)
++#define TRC_HVM_CLTS (TRC_HVM_HANDLER + 0x18)
++#define TRC_HVM_LMSW (TRC_HVM_HANDLER + 0x19)
++#define TRC_HVM_LMSW64 (TRC_HVM_HANDLER + TRC_64_FLAG + 0x19)
++
++/* trace subclasses for power management */
++#define TRC_PM_FREQ 0x00801000 /* xen cpu freq events */
++#define TRC_PM_IDLE 0x00802000 /* xen cpu idle events */
++
++/* trace events for per class */
++#define TRC_PM_FREQ_CHANGE (TRC_PM_FREQ + 0x01)
++#define TRC_PM_IDLE_ENTRY (TRC_PM_IDLE + 0x01)
++#define TRC_PM_IDLE_EXIT (TRC_PM_IDLE + 0x02)
++
++/* This structure represents a single trace buffer record. */
++struct t_rec {
++ uint32_t event:28;
++ uint32_t extra_u32:3; /* # entries in trailing extra_u32[] array */
++ uint32_t cycles_included:1; /* u.cycles or u.no_cycles? */
++ union {
++ struct {
++ uint32_t cycles_lo, cycles_hi; /* cycle counter timestamp */
++ uint32_t extra_u32[7]; /* event data items */
++ } cycles;
++ struct {
++ uint32_t extra_u32[7]; /* event data items */
++ } nocycles;
++ } u;
++};
++
++/*
++ * This structure contains the metadata for a single trace buffer. The head
++ * field, indexes into an array of struct t_rec's.
++ */
++struct t_buf {
++ /* Assume the data buffer size is X. X is generally not a power of 2.
++ * CONS and PROD are incremented modulo (2*X):
++ * 0 <= cons < 2*X
++ * 0 <= prod < 2*X
++ * This is done because addition modulo X breaks at 2^32 when X is not a
++ * power of 2:
++ * (((2^32 - 1) % X) + 1) % X != (2^32) % X
++ */
++ uint32_t cons; /* Offset of next item to be consumed by control tools. */
++ uint32_t prod; /* Offset of next item to be produced by Xen. */
++ /* Records follow immediately after the meta-data header. */
++};
++
++#endif /* __XEN_PUBLIC_TRACE_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/xen-compat.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/xen-compat.h 2008-09-01 12:07:31.000000000 +0200
+@@ -0,0 +1,44 @@
++/******************************************************************************
++ * xen-compat.h
++ *
++ * Guest OS interface to Xen. Compatibility layer.
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (c) 2006, Christian Limpach
++ */
++
++#ifndef __XEN_PUBLIC_XEN_COMPAT_H__
++#define __XEN_PUBLIC_XEN_COMPAT_H__
++
++#define __XEN_LATEST_INTERFACE_VERSION__ 0x00030209
++
++#if defined(__XEN__) || defined(__XEN_TOOLS__)
++/* Xen is built with matching headers and implements the latest interface. */
++#define __XEN_INTERFACE_VERSION__ __XEN_LATEST_INTERFACE_VERSION__
++#elif !defined(__XEN_INTERFACE_VERSION__)
++/* Guests which do not specify a version get the legacy interface. */
++#define __XEN_INTERFACE_VERSION__ 0x00000000
++#endif
++
++#if __XEN_INTERFACE_VERSION__ > __XEN_LATEST_INTERFACE_VERSION__
++#error "These header files do not support the requested interface version."
++#endif
++
++#endif /* __XEN_PUBLIC_XEN_COMPAT_H__ */
+Index: head-2008-11-25/include/xen/interface/xenoprof.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/xenoprof.h 2007-06-12 13:14:19.000000000 +0200
+@@ -0,0 +1,138 @@
++/******************************************************************************
++ * xenoprof.h
++ *
++ * Interface for enabling system wide profiling based on hardware performance
++ * counters
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ * Written by Aravind Menon & Jose Renato Santos
++ */
++
++#ifndef __XEN_PUBLIC_XENOPROF_H__
++#define __XEN_PUBLIC_XENOPROF_H__
++
++#include "xen.h"
++
++/*
++ * Commands to HYPERVISOR_xenoprof_op().
++ */
++#define XENOPROF_init 0
++#define XENOPROF_reset_active_list 1
++#define XENOPROF_reset_passive_list 2
++#define XENOPROF_set_active 3
++#define XENOPROF_set_passive 4
++#define XENOPROF_reserve_counters 5
++#define XENOPROF_counter 6
++#define XENOPROF_setup_events 7
++#define XENOPROF_enable_virq 8
++#define XENOPROF_start 9
++#define XENOPROF_stop 10
++#define XENOPROF_disable_virq 11
++#define XENOPROF_release_counters 12
++#define XENOPROF_shutdown 13
++#define XENOPROF_get_buffer 14
++#define XENOPROF_set_backtrace 15
++#define XENOPROF_last_op 15
++
++#define MAX_OPROF_EVENTS 32
++#define MAX_OPROF_DOMAINS 25
++#define XENOPROF_CPU_TYPE_SIZE 64
++
++/* Xenoprof performance events (not Xen events) */
++struct event_log {
++ uint64_t eip;
++ uint8_t mode;
++ uint8_t event;
++};
++
++/* PC value that indicates a special code */
++#define XENOPROF_ESCAPE_CODE ~0UL
++/* Transient events for the xenoprof->oprofile cpu buf */
++#define XENOPROF_TRACE_BEGIN 1
++
++/* Xenoprof buffer shared between Xen and domain - 1 per VCPU */
++struct xenoprof_buf {
++ uint32_t event_head;
++ uint32_t event_tail;
++ uint32_t event_size;
++ uint32_t vcpu_id;
++ uint64_t xen_samples;
++ uint64_t kernel_samples;
++ uint64_t user_samples;
++ uint64_t lost_samples;
++ struct event_log event_log[1];
++};
++#ifndef __XEN__
++typedef struct xenoprof_buf xenoprof_buf_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_buf_t);
++#endif
++
++struct xenoprof_init {
++ int32_t num_events;
++ int32_t is_primary;
++ char cpu_type[XENOPROF_CPU_TYPE_SIZE];
++};
++typedef struct xenoprof_init xenoprof_init_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_init_t);
++
++struct xenoprof_get_buffer {
++ int32_t max_samples;
++ int32_t nbuf;
++ int32_t bufsize;
++ uint64_t buf_gmaddr;
++};
++typedef struct xenoprof_get_buffer xenoprof_get_buffer_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_get_buffer_t);
++
++struct xenoprof_counter {
++ uint32_t ind;
++ uint64_t count;
++ uint32_t enabled;
++ uint32_t event;
++ uint32_t hypervisor;
++ uint32_t kernel;
++ uint32_t user;
++ uint64_t unit_mask;
++};
++typedef struct xenoprof_counter xenoprof_counter_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_counter_t);
++
++typedef struct xenoprof_passive {
++ uint16_t domain_id;
++ int32_t max_samples;
++ int32_t nbuf;
++ int32_t bufsize;
++ uint64_t buf_gmaddr;
++} xenoprof_passive_t;
++DEFINE_XEN_GUEST_HANDLE(xenoprof_passive_t);
++
++
++#endif /* __XEN_PUBLIC_XENOPROF_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/xsm/acm.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/xsm/acm.h 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,235 @@
++/*
++ * acm.h: Xen access control module interface defintions
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Reiner Sailer <sailer@watson.ibm.com>
++ * Copyright (c) 2005, International Business Machines Corporation.
++ */
++
++#ifndef _XEN_PUBLIC_ACM_H
++#define _XEN_PUBLIC_ACM_H
++
++#include "../xen.h"
++
++/* if ACM_DEBUG defined, all hooks should
++ * print a short trace message (comment it out
++ * when not in testing mode )
++ */
++/* #define ACM_DEBUG */
++
++#ifdef ACM_DEBUG
++# define printkd(fmt, args...) printk(fmt,## args)
++#else
++# define printkd(fmt, args...)
++#endif
++
++/* default ssid reference value if not supplied */
++#define ACM_DEFAULT_SSID 0x0
++#define ACM_DEFAULT_LOCAL_SSID 0x0
++
++/* Internal ACM ERROR types */
++#define ACM_OK 0
++#define ACM_UNDEF -1
++#define ACM_INIT_SSID_ERROR -2
++#define ACM_INIT_SOID_ERROR -3
++#define ACM_ERROR -4
++
++/* External ACCESS DECISIONS */
++#define ACM_ACCESS_PERMITTED 0
++#define ACM_ACCESS_DENIED -111
++#define ACM_NULL_POINTER_ERROR -200
++
++/*
++ Error codes reported in when trying to test for a new policy
++ These error codes are reported in an array of tuples where
++ each error code is followed by a parameter describing the error
++ more closely, such as a domain id.
++*/
++#define ACM_EVTCHN_SHARING_VIOLATION 0x100
++#define ACM_GNTTAB_SHARING_VIOLATION 0x101
++#define ACM_DOMAIN_LOOKUP 0x102
++#define ACM_CHWALL_CONFLICT 0x103
++#define ACM_SSIDREF_IN_USE 0x104
++
++
++/* primary policy in lower 4 bits */
++#define ACM_NULL_POLICY 0
++#define ACM_CHINESE_WALL_POLICY 1
++#define ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY 2
++#define ACM_POLICY_UNDEFINED 15
++
++/* combinations have secondary policy component in higher 4bit */
++#define ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY \
++ ((ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY << 4) | ACM_CHINESE_WALL_POLICY)
++
++/* policy: */
++#define ACM_POLICY_NAME(X) \
++ ((X) == (ACM_NULL_POLICY)) ? "NULL" : \
++ ((X) == (ACM_CHINESE_WALL_POLICY)) ? "CHINESE WALL" : \
++ ((X) == (ACM_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "SIMPLE TYPE ENFORCEMENT" : \
++ ((X) == (ACM_CHINESE_WALL_AND_SIMPLE_TYPE_ENFORCEMENT_POLICY)) ? "CHINESE WALL AND SIMPLE TYPE ENFORCEMENT" : \
++ "UNDEFINED"
++
++/* the following policy versions must be increased
++ * whenever the interpretation of the related
++ * policy's data structure changes
++ */
++#define ACM_POLICY_VERSION 4
++#define ACM_CHWALL_VERSION 1
++#define ACM_STE_VERSION 1
++
++/* defines a ssid reference used by xen */
++typedef uint32_t ssidref_t;
++
++/* hooks that are known to domains */
++#define ACMHOOK_none 0
++#define ACMHOOK_sharing 1
++#define ACMHOOK_authorization 2
++#define ACMHOOK_conflictset 3
++
++/* -------security policy relevant type definitions-------- */
++
++/* type identifier; compares to "equal" or "not equal" */
++typedef uint16_t domaintype_t;
++
++/* CHINESE WALL POLICY DATA STRUCTURES
++ *
++ * current accumulated conflict type set:
++ * When a domain is started and has a type that is in
++ * a conflict set, the conflicting types are incremented in
++ * the aggregate set. When a domain is destroyed, the
++ * conflicting types to its type are decremented.
++ * If a domain has multiple types, this procedure works over
++ * all those types.
++ *
++ * conflict_aggregate_set[i] holds the number of
++ * running domains that have a conflict with type i.
++ *
++ * running_types[i] holds the number of running domains
++ * that include type i in their ssidref-referenced type set
++ *
++ * conflict_sets[i][j] is "0" if type j has no conflict
++ * with type i and is "1" otherwise.
++ */
++/* high-16 = version, low-16 = check magic */
++#define ACM_MAGIC 0x0001debc
++
++/* size of the SHA1 hash identifying the XML policy from which the
++ binary policy was created */
++#define ACM_SHA1_HASH_SIZE 20
++
++/* each offset in bytes from start of the struct they
++ * are part of */
++
++/* V3 of the policy buffer aded a version structure */
++struct acm_policy_version
++{
++ uint32_t major;
++ uint32_t minor;
++};
++
++
++/* each buffer consists of all policy information for
++ * the respective policy given in the policy code
++ *
++ * acm_policy_buffer, acm_chwall_policy_buffer,
++ * and acm_ste_policy_buffer need to stay 32-bit aligned
++ * because we create binary policies also with external
++ * tools that assume packed representations (e.g. the java tool)
++ */
++struct acm_policy_buffer {
++ uint32_t magic;
++ uint32_t policy_version; /* ACM_POLICY_VERSION */
++ uint32_t len;
++ uint32_t policy_reference_offset;
++ uint32_t primary_policy_code;
++ uint32_t primary_buffer_offset;
++ uint32_t secondary_policy_code;
++ uint32_t secondary_buffer_offset;
++ struct acm_policy_version xml_pol_version; /* add in V3 */
++ uint8_t xml_policy_hash[ACM_SHA1_HASH_SIZE]; /* added in V4 */
++};
++
++
++struct acm_policy_reference_buffer {
++ uint32_t len;
++};
++
++struct acm_chwall_policy_buffer {
++ uint32_t policy_version; /* ACM_CHWALL_VERSION */
++ uint32_t policy_code;
++ uint32_t chwall_max_types;
++ uint32_t chwall_max_ssidrefs;
++ uint32_t chwall_max_conflictsets;
++ uint32_t chwall_ssid_offset;
++ uint32_t chwall_conflict_sets_offset;
++ uint32_t chwall_running_types_offset;
++ uint32_t chwall_conflict_aggregate_offset;
++};
++
++struct acm_ste_policy_buffer {
++ uint32_t policy_version; /* ACM_STE_VERSION */
++ uint32_t policy_code;
++ uint32_t ste_max_types;
++ uint32_t ste_max_ssidrefs;
++ uint32_t ste_ssid_offset;
++};
++
++struct acm_stats_buffer {
++ uint32_t magic;
++ uint32_t len;
++ uint32_t primary_policy_code;
++ uint32_t primary_stats_offset;
++ uint32_t secondary_policy_code;
++ uint32_t secondary_stats_offset;
++};
++
++struct acm_ste_stats_buffer {
++ uint32_t ec_eval_count;
++ uint32_t gt_eval_count;
++ uint32_t ec_denied_count;
++ uint32_t gt_denied_count;
++ uint32_t ec_cachehit_count;
++ uint32_t gt_cachehit_count;
++};
++
++struct acm_ssid_buffer {
++ uint32_t len;
++ ssidref_t ssidref;
++ uint32_t policy_reference_offset;
++ uint32_t primary_policy_code;
++ uint32_t primary_max_types;
++ uint32_t primary_types_offset;
++ uint32_t secondary_policy_code;
++ uint32_t secondary_max_types;
++ uint32_t secondary_types_offset;
++};
++
++#endif
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/xsm/acm_ops.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/xsm/acm_ops.h 2007-10-22 13:39:15.000000000 +0200
+@@ -0,0 +1,159 @@
++/*
++ * acm_ops.h: Xen access control module hypervisor commands
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ *
++ * Reiner Sailer <sailer@watson.ibm.com>
++ * Copyright (c) 2005,2006 International Business Machines Corporation.
++ */
++
++#ifndef __XEN_PUBLIC_ACM_OPS_H__
++#define __XEN_PUBLIC_ACM_OPS_H__
++
++#include "../xen.h"
++#include "acm.h"
++
++/*
++ * Make sure you increment the interface version whenever you modify this file!
++ * This makes sure that old versions of acm tools will stop working in a
++ * well-defined way (rather than crashing the machine, for instance).
++ */
++#define ACM_INTERFACE_VERSION 0xAAAA000A
++
++/************************************************************************/
++
++/*
++ * Prototype for this hypercall is:
++ * int acm_op(int cmd, void *args)
++ * @cmd == ACMOP_??? (access control module operation).
++ * @args == Operation-specific extra arguments (NULL if none).
++ */
++
++
++#define ACMOP_setpolicy 1
++struct acm_setpolicy {
++ /* IN */
++ XEN_GUEST_HANDLE_64(void) pushcache;
++ uint32_t pushcache_size;
++};
++
++
++#define ACMOP_getpolicy 2
++struct acm_getpolicy {
++ /* IN */
++ XEN_GUEST_HANDLE_64(void) pullcache;
++ uint32_t pullcache_size;
++};
++
++
++#define ACMOP_dumpstats 3
++struct acm_dumpstats {
++ /* IN */
++ XEN_GUEST_HANDLE_64(void) pullcache;
++ uint32_t pullcache_size;
++};
++
++
++#define ACMOP_getssid 4
++#define ACM_GETBY_ssidref 1
++#define ACM_GETBY_domainid 2
++struct acm_getssid {
++ /* IN */
++ uint32_t get_ssid_by; /* ACM_GETBY_* */
++ union {
++ domaintype_t domainid;
++ ssidref_t ssidref;
++ } id;
++ XEN_GUEST_HANDLE_64(void) ssidbuf;
++ uint32_t ssidbuf_size;
++};
++
++#define ACMOP_getdecision 5
++struct acm_getdecision {
++ /* IN */
++ uint32_t get_decision_by1; /* ACM_GETBY_* */
++ uint32_t get_decision_by2; /* ACM_GETBY_* */
++ union {
++ domaintype_t domainid;
++ ssidref_t ssidref;
++ } id1;
++ union {
++ domaintype_t domainid;
++ ssidref_t ssidref;
++ } id2;
++ uint32_t hook;
++ /* OUT */
++ uint32_t acm_decision;
++};
++
++
++#define ACMOP_chgpolicy 6
++struct acm_change_policy {
++ /* IN */
++ XEN_GUEST_HANDLE_64(void) policy_pushcache;
++ uint32_t policy_pushcache_size;
++ XEN_GUEST_HANDLE_64(void) del_array;
++ uint32_t delarray_size;
++ XEN_GUEST_HANDLE_64(void) chg_array;
++ uint32_t chgarray_size;
++ /* OUT */
++ /* array with error code */
++ XEN_GUEST_HANDLE_64(void) err_array;
++ uint32_t errarray_size;
++};
++
++#define ACMOP_relabeldoms 7
++struct acm_relabel_doms {
++ /* IN */
++ XEN_GUEST_HANDLE_64(void) relabel_map;
++ uint32_t relabel_map_size;
++ /* OUT */
++ XEN_GUEST_HANDLE_64(void) err_array;
++ uint32_t errarray_size;
++};
++
++/* future interface to Xen */
++struct xen_acmctl {
++ uint32_t cmd;
++ uint32_t interface_version;
++ union {
++ struct acm_setpolicy setpolicy;
++ struct acm_getpolicy getpolicy;
++ struct acm_dumpstats dumpstats;
++ struct acm_getssid getssid;
++ struct acm_getdecision getdecision;
++ struct acm_change_policy change_policy;
++ struct acm_relabel_doms relabel_doms;
++ } u;
++};
++
++typedef struct xen_acmctl xen_acmctl_t;
++DEFINE_XEN_GUEST_HANDLE(xen_acmctl_t);
++
++#endif /* __XEN_PUBLIC_ACM_OPS_H__ */
++
++/*
++ * Local variables:
++ * mode: C
++ * c-set-style: "BSD"
++ * c-basic-offset: 4
++ * tab-width: 4
++ * indent-tabs-mode: nil
++ * End:
++ */
+Index: head-2008-11-25/include/xen/interface/xsm/flask_op.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/interface/xsm/flask_op.h 2008-09-01 12:07:31.000000000 +0200
+@@ -0,0 +1,45 @@
++/*
++ * This file contains the flask_op hypercall commands and definitions.
++ *
++ * Author: George Coker, <gscoker@alpha.ncsc.mil>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2,
++ * as published by the Free Software Foundation.
++ */
++
++#ifndef __FLASK_OP_H__
++#define __FLASK_OP_H__
++
++#define FLASK_LOAD 1
++#define FLASK_GETENFORCE 2
++#define FLASK_SETENFORCE 3
++#define FLASK_CONTEXT_TO_SID 4
++#define FLASK_SID_TO_CONTEXT 5
++#define FLASK_ACCESS 6
++#define FLASK_CREATE 7
++#define FLASK_RELABEL 8
++#define FLASK_USER 9
++#define FLASK_POLICYVERS 10
++#define FLASK_GETBOOL 11
++#define FLASK_SETBOOL 12
++#define FLASK_COMMITBOOLS 13
++#define FLASK_MLS 14
++#define FLASK_DISABLE 15
++#define FLASK_GETAVC_THRESHOLD 16
++#define FLASK_SETAVC_THRESHOLD 17
++#define FLASK_AVC_HASHSTATS 18
++#define FLASK_AVC_CACHESTATS 19
++#define FLASK_MEMBER 20
++
++#define FLASK_LAST FLASK_MEMBER
++
++typedef struct flask_op {
++ uint32_t cmd;
++ uint32_t size;
++ char *buf;
++} flask_op_t;
++
++DEFINE_XEN_GUEST_HANDLE(flask_op_t);
++
++#endif
--- /dev/null
+Subject: xen3 xen-arch
+From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 728:832aac894efd)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+List of files having Xen derivates (perhaps created during the merging
+of newer kernel versions), for xen-port-patches.py to pick up (i.e. this
+must be retained here until the XenSource tree has these in the right
+places):
++++ linux/arch/x86/kernel/acpi/sleep-xen.c
++++ linux/arch/x86/kernel/cpu/common_64-xen.c
++++ linux/arch/x86/kernel/e820-xen.c
++++ linux/arch/x86/kernel/head-xen.c
++++ linux/arch/x86/kernel/head32-xen.c
++++ linux/arch/x86/kernel/ioport-xen.c
++++ linux/arch/x86/kernel/ipi-xen.c
++++ linux/arch/x86/kernel/ldt-xen.c
++++ linux/arch/x86/kernel/mpparse-xen.c
++++ linux/arch/x86/kernel/pci-nommu-xen.c
++++ linux/arch/x86/kernel/process-xen.c
++++ linux/arch/x86/kernel/setup-xen.c
++++ linux/arch/x86/kernel/setup_percpu-xen.c
++++ linux/arch/x86/kernel/smp-xen.c
++++ linux/arch/x86/mm/fault-xen.c
++++ linux/arch/x86/mm/ioremap-xen.c
++++ linux/arch/x86/mm/pageattr-xen.c
++++ linux/arch/x86/mm/pat-xen.c
++++ linux/arch/x86/mm/pgtable-xen.c
++++ linux/arch/x86/vdso/vdso32-setup-xen.c
++++ linux/drivers/char/mem-xen.c
++++ linux/include/asm-x86/mach-xen/asm/desc.h
++++ linux/include/asm-x86/mach-xen/asm/dma-mapping.h
++++ linux/include/asm-x86/mach-xen/asm/fixmap.h
++++ linux/include/asm-x86/mach-xen/asm/io.h
++++ linux/include/asm-x86/mach-xen/asm/irq_vectors.h
++++ linux/include/asm-x86/mach-xen/asm/irqflags.h
++++ linux/include/asm-x86/mach-xen/asm/mmu_context.h
++++ linux/include/asm-x86/mach-xen/asm/page.h
++++ linux/include/asm-x86/mach-xen/asm/pci.h
++++ linux/include/asm-x86/mach-xen/asm/pgalloc.h
++++ linux/include/asm-x86/mach-xen/asm/pgtable.h
++++ linux/include/asm-x86/mach-xen/asm/processor.h
++++ linux/include/asm-x86/mach-xen/asm/segment.h
++++ linux/include/asm-x86/mach-xen/asm/smp.h
++++ linux/include/asm-x86/mach-xen/asm/spinlock.h
++++ linux/include/asm-x86/mach-xen/asm/swiotlb.h
++++ linux/include/asm-x86/mach-xen/asm/system.h
++++ linux/include/asm-x86/mach-xen/asm/tlbflush.h
++++ linux/include/asm-x86/mach-xen/asm/xor.h
+
+List of files folded into their native counterparts (and hence removed
+from this patch for xen-port-patches.py to not needlessly pick them up;
+for reference, prefixed with the version the removal occured):
+2.6.18/include/asm-x86/mach-xen/asm/pgtable-2level.h
+2.6.18/include/asm-x86/mach-xen/asm/pgtable-2level-defs.h
+2.6.19/include/asm-x86/mach-xen/asm/ptrace.h
+2.6.23/arch/x86/kernel/vsyscall-note_32-xen.S
+2.6.23/include/asm-x86/mach-xen/asm/ptrace_64.h
+2.6.24/arch/x86/kernel/early_printk_32-xen.c
+2.6.24/include/asm-x86/mach-xen/asm/arch_hooks_64.h
+2.6.24/include/asm-x86/mach-xen/asm/bootsetup_64.h
+2.6.24/include/asm-x86/mach-xen/asm/mmu_32.h
+2.6.24/include/asm-x86/mach-xen/asm/mmu_64.h
+2.6.24/include/asm-x86/mach-xen/asm/nmi_64.h
+2.6.24/include/asm-x86/mach-xen/asm/setup.h
+2.6.24/include/asm-x86/mach-xen/asm/time_64.h (added in 2.6.20)
+2.6.24/include/asm-x86/mach-xen/mach_timer.h
+2.6.25/arch/x86/ia32/syscall32-xen.c
+2.6.25/arch/x86/ia32/syscall32_syscall-xen.S
+2.6.25/arch/x86/ia32/vsyscall-int80.S
+2.6.25/arch/x86/kernel/acpi/boot-xen.c
+2.6.25/include/asm-x86/mach-xen/asm/msr.h
+2.6.25/include/asm-x86/mach-xen/asm/page_32.h
+2.6.25/include/asm-x86/mach-xen/asm/spinlock_32.h
+2.6.25/include/asm-x86/mach-xen/asm/timer.h (added in 2.6.24)
+2.6.25/include/asm-x86/mach-xen/asm/timer_64.h
+2.6.25/include/asm-x86/mach-xen/mach_time.h
+2.6.26/arch/x86/kernel/pci-dma_32-xen.c
+2.6.26/arch/x86/kernel/pci-swiotlb_64-xen.c
+2.6.26/include/asm-x86/mach-xen/asm/dma-mapping_32.h
+2.6.26/include/asm-x86/mach-xen/asm/dma-mapping_64.h
+2.6.26/include/asm-x86/mach-xen/asm/nmi.h (added in 2.6.24)
+2.6.26/include/asm-x86/mach-xen/asm/scatterlist.h (added in 2.6.24)
+2.6.26/include/asm-x86/mach-xen/asm/scatterlist_32.h
+2.6.26/include/xen/xencomm.h
+2.6.27/arch/x86/kernel/e820_32-xen.c
+2.6.27/include/asm-x86/mach-xen/asm/e820.h (added in 2.6.24)
+2.6.27/include/asm-x86/mach-xen/asm/e820_64.h
+2.6.27/include/asm-x86/mach-xen/asm/hw_irq.h (added in 2.6.24)
+2.6.27/include/asm-x86/mach-xen/asm/hw_irq_32.h
+2.6.27/include/asm-x86/mach-xen/asm/hw_irq_64.h
+2.6.27/include/asm-x86/mach-xen/asm/io_32.h
+2.6.27/include/asm-x86/mach-xen/asm/io_64.h
+2.6.27/include/asm-x86/mach-xen/asm/irq.h (added in 2.6.24)
+2.6.27/include/asm-x86/mach-xen/asm/irq_64.h
+2.6.27.8/include/asm-x86/mach-xen/asm/pci_64.h
+
+Index: head-2008-11-25/arch/x86/kernel/acpi/processor_extcntl_xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/acpi/processor_extcntl_xen.c 2008-10-01 15:43:24.000000000 +0200
+@@ -0,0 +1,209 @@
++/*
++ * processor_extcntl_xen.c - interface to notify Xen
++ *
++ * Copyright (C) 2008, Intel corporation
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or (at
++ * your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful, but
++ * WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
++ * General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License along
++ * with this program; if not, write to the Free Software Foundation, Inc.,
++ * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/acpi.h>
++#include <linux/pm.h>
++#include <linux/cpu.h>
++
++#include <linux/cpufreq.h>
++#include <acpi/processor.h>
++#include <asm/hypercall.h>
++
++static int xen_cx_notifier(struct acpi_processor *pr, int action)
++{
++ int ret, count = 0, i;
++ xen_platform_op_t op = {
++ .cmd = XENPF_set_processor_pminfo,
++ .interface_version = XENPF_INTERFACE_VERSION,
++ .u.set_pminfo.id = pr->acpi_id,
++ .u.set_pminfo.type = XEN_PM_CX,
++ };
++ struct xen_processor_cx *data, *buf;
++ struct acpi_processor_cx *cx;
++
++ if (action == PROCESSOR_PM_CHANGE)
++ return -EINVAL;
++
++ /* Convert to Xen defined structure and hypercall */
++ buf = kzalloc(pr->power.count * sizeof(struct xen_processor_cx),
++ GFP_KERNEL);
++ if (!buf)
++ return -ENOMEM;
++
++ data = buf;
++ for (i = 1; i <= pr->power.count; i++) {
++ cx = &pr->power.states[i];
++ /* Skip invalid cstate entry */
++ if (!cx->valid)
++ continue;
++
++ data->type = cx->type;
++ data->latency = cx->latency;
++ data->power = cx->power;
++ data->reg.space_id = cx->reg.space_id;
++ data->reg.bit_width = cx->reg.bit_width;
++ data->reg.bit_offset = cx->reg.bit_offset;
++ data->reg.access_size = cx->reg.reserved;
++ data->reg.address = cx->reg.address;
++
++ /* Get dependency relationships */
++ if (cx->csd_count) {
++ printk("Wow! _CSD is found. Not support for now!\n");
++ kfree(buf);
++ return -EINVAL;
++ } else {
++ data->dpcnt = 0;
++ set_xen_guest_handle(data->dp, NULL);
++ }
++
++ data++;
++ count++;
++ }
++
++ if (!count) {
++ printk("No available Cx info for cpu %d\n", pr->acpi_id);
++ kfree(buf);
++ return -EINVAL;
++ }
++
++ op.u.set_pminfo.power.count = count;
++ op.u.set_pminfo.power.flags.bm_control = pr->flags.bm_control;
++ op.u.set_pminfo.power.flags.bm_check = pr->flags.bm_check;
++ op.u.set_pminfo.power.flags.has_cst = pr->flags.has_cst;
++ op.u.set_pminfo.power.flags.power_setup_done = pr->flags.power_setup_done;
++
++ set_xen_guest_handle(op.u.set_pminfo.power.states, buf);
++ ret = HYPERVISOR_platform_op(&op);
++ kfree(buf);
++ return ret;
++}
++
++static int xen_px_notifier(struct acpi_processor *pr, int action)
++{
++ int ret = -EINVAL;
++ xen_platform_op_t op = {
++ .cmd = XENPF_set_processor_pminfo,
++ .interface_version = XENPF_INTERFACE_VERSION,
++ .u.set_pminfo.id = pr->acpi_id,
++ .u.set_pminfo.type = XEN_PM_PX,
++ };
++ struct xen_processor_performance *perf;
++ struct xen_processor_px *states = NULL;
++ struct acpi_processor_performance *px;
++ struct acpi_psd_package *pdomain;
++
++ if (!pr)
++ return -EINVAL;
++
++ perf = &op.u.set_pminfo.perf;
++ px = pr->performance;
++
++ switch(action) {
++ case PROCESSOR_PM_CHANGE:
++ /* ppc dynamic handle */
++ perf->flags = XEN_PX_PPC;
++ perf->platform_limit = pr->performance_platform_limit;
++
++ ret = HYPERVISOR_platform_op(&op);
++ break;
++
++ case PROCESSOR_PM_INIT:
++ /* px normal init */
++ perf->flags = XEN_PX_PPC |
++ XEN_PX_PCT |
++ XEN_PX_PSS |
++ XEN_PX_PSD;
++
++ /* ppc */
++ perf->platform_limit = pr->performance_platform_limit;
++
++ /* pct */
++ xen_convert_pct_reg(&perf->control_register, &px->control_register);
++ xen_convert_pct_reg(&perf->status_register, &px->status_register);
++
++ /* pss */
++ perf->state_count = px->state_count;
++ states = kzalloc(px->state_count*sizeof(xen_processor_px_t),GFP_KERNEL);
++ if (!states)
++ return -ENOMEM;
++ xen_convert_pss_states(states, px->states, px->state_count);
++ set_xen_guest_handle(perf->states, states);
++
++ /* psd */
++ pdomain = &px->domain_info;
++ xen_convert_psd_pack(&perf->domain_info, pdomain);
++ if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
++ perf->shared_type = CPUFREQ_SHARED_TYPE_ALL;
++ else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
++ perf->shared_type = CPUFREQ_SHARED_TYPE_ANY;
++ else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
++ perf->shared_type = CPUFREQ_SHARED_TYPE_HW;
++ else {
++ ret = -ENODEV;
++ kfree(states);
++ break;
++ }
++
++ ret = HYPERVISOR_platform_op(&op);
++ kfree(states);
++ break;
++
++ default:
++ break;
++ }
++
++ return ret;
++}
++
++static int xen_tx_notifier(struct acpi_processor *pr, int action)
++{
++ return -EINVAL;
++}
++static int xen_hotplug_notifier(struct acpi_processor *pr, int event)
++{
++ return -EINVAL;
++}
++
++static struct processor_extcntl_ops xen_extcntl_ops = {
++ .hotplug = xen_hotplug_notifier,
++};
++
++void arch_acpi_processor_init_extcntl(const struct processor_extcntl_ops **ops)
++{
++ unsigned int pmbits = (xen_start_info->flags & SIF_PM_MASK) >> 8;
++
++ if (!pmbits)
++ return;
++ if (pmbits & XEN_PROCESSOR_PM_CX)
++ xen_extcntl_ops.pm_ops[PM_TYPE_IDLE] = xen_cx_notifier;
++ if (pmbits & XEN_PROCESSOR_PM_PX)
++ xen_extcntl_ops.pm_ops[PM_TYPE_PERF] = xen_px_notifier;
++ if (pmbits & XEN_PROCESSOR_PM_TX)
++ xen_extcntl_ops.pm_ops[PM_TYPE_THR] = xen_tx_notifier;
++
++ *ops = &xen_extcntl_ops;
++}
++EXPORT_SYMBOL(arch_acpi_processor_init_extcntl);
+Index: head-2008-11-25/arch/x86/kernel/acpi/sleep_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/acpi/sleep_32-xen.c 2008-04-15 09:29:41.000000000 +0200
+@@ -0,0 +1,113 @@
++/*
++ * sleep.c - x86-specific ACPI sleep support.
++ *
++ * Copyright (C) 2001-2003 Patrick Mochel
++ * Copyright (C) 2001-2003 Pavel Machek <pavel@suse.cz>
++ */
++
++#include <linux/acpi.h>
++#include <linux/bootmem.h>
++#include <linux/dmi.h>
++#include <linux/cpumask.h>
++
++#include <asm/smp.h>
++
++#ifndef CONFIG_ACPI_PV_SLEEP
++/* address in low memory of the wakeup routine. */
++unsigned long acpi_wakeup_address = 0;
++unsigned long acpi_video_flags;
++extern char wakeup_start, wakeup_end;
++
++extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
++#endif
++
++/**
++ * acpi_save_state_mem - save kernel state
++ *
++ * Create an identity mapped page table and copy the wakeup routine to
++ * low memory.
++ */
++int acpi_save_state_mem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++ if (!acpi_wakeup_address)
++ return 1;
++ memcpy((void *)acpi_wakeup_address, &wakeup_start,
++ &wakeup_end - &wakeup_start);
++ acpi_copy_wakeup_routine(acpi_wakeup_address);
++#endif
++ return 0;
++}
++
++/*
++ * acpi_restore_state - undo effects of acpi_save_state_mem
++ */
++void acpi_restore_state_mem(void)
++{
++}
++
++/**
++ * acpi_reserve_bootmem - do _very_ early ACPI initialisation
++ *
++ * We allocate a page from the first 1MB of memory for the wakeup
++ * routine for when we come back from a sleep state. The
++ * runtime allocator allows specification of <16MB pages, but not
++ * <1MB pages.
++ */
++void __init acpi_reserve_bootmem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++ if ((&wakeup_end - &wakeup_start) > PAGE_SIZE) {
++ printk(KERN_ERR
++ "ACPI: Wakeup code way too big, S3 disabled.\n");
++ return;
++ }
++
++ acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
++ if (!acpi_wakeup_address)
++ printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
++#endif
++}
++
++#ifndef CONFIG_ACPI_PV_SLEEP
++static int __init acpi_sleep_setup(char *str)
++{
++ while ((str != NULL) && (*str != '\0')) {
++ if (strncmp(str, "s3_bios", 7) == 0)
++ acpi_video_flags = 1;
++ if (strncmp(str, "s3_mode", 7) == 0)
++ acpi_video_flags |= 2;
++ str = strchr(str, ',');
++ if (str != NULL)
++ str += strspn(str, ", \t");
++ }
++ return 1;
++}
++
++__setup("acpi_sleep=", acpi_sleep_setup);
++
++static __init int reset_videomode_after_s3(struct dmi_system_id *d)
++{
++ acpi_video_flags |= 2;
++ return 0;
++}
++
++static __initdata struct dmi_system_id acpisleep_dmi_table[] = {
++ { /* Reset video mode after returning from ACPI S3 sleep */
++ .callback = reset_videomode_after_s3,
++ .ident = "Toshiba Satellite 4030cdt",
++ .matches = {
++ DMI_MATCH(DMI_PRODUCT_NAME, "S4030CDT/4.3"),
++ },
++ },
++ {}
++};
++
++static int __init acpisleep_dmi_init(void)
++{
++ dmi_check_system(acpisleep_dmi_table);
++ return 0;
++}
++
++core_initcall(acpisleep_dmi_init);
++#endif /* CONFIG_ACPI_PV_SLEEP */
+Index: head-2008-11-25/arch/x86/kernel/apic_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/apic_32-xen.c 2007-06-12 13:12:48.000000000 +0200
+@@ -0,0 +1,155 @@
++/*
++ * Local APIC handling, local APIC timers
++ *
++ * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
++ * thanks to Eric Gilmore
++ * and Rolf G. Tews
++ * for testing these extensively.
++ * Maciej W. Rozycki : Various updates and fixes.
++ * Mikael Pettersson : Power Management for UP-APIC.
++ * Pavel Machek and
++ * Mikael Pettersson : PM converted to driver model.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/mc146818rtc.h>
++#include <linux/kernel_stat.h>
++#include <linux/sysdev.h>
++#include <linux/cpu.h>
++#include <linux/module.h>
++
++#include <asm/atomic.h>
++#include <asm/smp.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/hpet.h>
++#include <asm/i8253.h>
++#include <asm/nmi.h>
++
++#include <mach_apic.h>
++#include <mach_apicdef.h>
++#include <mach_ipi.h>
++
++#include "io_ports.h"
++
++#ifndef CONFIG_XEN
++/*
++ * cpu_mask that denotes the CPUs that needs timer interrupt coming in as
++ * IPIs in place of local APIC timers
++ */
++static cpumask_t timer_bcast_ipi;
++#endif
++
++/*
++ * Knob to control our willingness to enable the local APIC.
++ */
++int enable_local_apic __initdata = 0; /* -1=force-disable, +1=force-enable */
++
++/*
++ * Debug level
++ */
++int apic_verbosity;
++
++#ifndef CONFIG_XEN
++static int modern_apic(void)
++{
++ unsigned int lvr, version;
++ /* AMD systems use old APIC versions, so check the CPU */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++ boot_cpu_data.x86 >= 0xf)
++ return 1;
++ lvr = apic_read(APIC_LVR);
++ version = GET_APIC_VERSION(lvr);
++ return version >= 0x14;
++}
++#endif /* !CONFIG_XEN */
++
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++ printk("unexpected IRQ trap at vector %02x\n", irq);
++ /*
++ * Currently unexpected vectors happen only on SMP and APIC.
++ * We _must_ ack these because every local APIC has only N
++ * irq slots per priority level, and a 'hanging, unacked' IRQ
++ * holds up an irq slot - in excessive cases (when multiple
++ * unexpected vectors occur) that might lock up the APIC
++ * completely.
++ * But only ack when the APIC is enabled -AK
++ */
++ if (cpu_has_apic)
++ ack_APIC_irq();
++}
++
++int get_physical_broadcast(void)
++{
++ return 0xff;
++}
++
++#ifndef CONFIG_XEN
++#ifndef CONFIG_SMP
++static void up_apic_timer_interrupt_call(struct pt_regs *regs)
++{
++ int cpu = smp_processor_id();
++
++ /*
++ * the NMI deadlock-detector uses this.
++ */
++ per_cpu(irq_stat, cpu).apic_timer_irqs++;
++
++ smp_local_timer_interrupt(regs);
++}
++#endif
++
++void smp_send_timer_broadcast_ipi(struct pt_regs *regs)
++{
++ cpumask_t mask;
++
++ cpus_and(mask, cpu_online_map, timer_bcast_ipi);
++ if (!cpus_empty(mask)) {
++#ifdef CONFIG_SMP
++ send_IPI_mask(mask, LOCAL_TIMER_VECTOR);
++#else
++ /*
++ * We can directly call the apic timer interrupt handler
++ * in UP case. Minus all irq related functions
++ */
++ up_apic_timer_interrupt_call(regs);
++#endif
++ }
++}
++#endif
++
++int setup_profiling_timer(unsigned int multiplier)
++{
++ return -EINVAL;
++}
++
++/*
++ * This initializes the IO-APIC and APIC hardware if this is
++ * a UP kernel.
++ */
++int __init APIC_init_uniprocessor (void)
++{
++#ifdef CONFIG_X86_IO_APIC
++ if (smp_found_config)
++ if (!skip_ioapic_setup && nr_ioapics)
++ setup_IO_APIC();
++#endif
++
++ return 0;
++}
+Index: head-2008-11-25/arch/x86/kernel/cpu/common-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/cpu/common-xen.c 2007-12-10 08:47:31.000000000 +0100
+@@ -0,0 +1,743 @@
++#include <linux/init.h>
++#include <linux/string.h>
++#include <linux/delay.h>
++#include <linux/smp.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++#include <linux/bootmem.h>
++#include <asm/semaphore.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/msr.h>
++#include <asm/io.h>
++#include <asm/mmu_context.h>
++#include <asm/mtrr.h>
++#include <asm/mce.h>
++#ifdef CONFIG_X86_LOCAL_APIC
++#include <asm/mpspec.h>
++#include <asm/apic.h>
++#include <mach_apic.h>
++#else
++#ifdef CONFIG_XEN
++#define phys_pkg_id(a,b) a
++#endif
++#endif
++#include <asm/hypervisor.h>
++
++#include "cpu.h"
++
++DEFINE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
++EXPORT_PER_CPU_SYMBOL(cpu_gdt_descr);
++
++#ifndef CONFIG_XEN
++DEFINE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
++EXPORT_PER_CPU_SYMBOL(cpu_16bit_stack);
++#endif
++
++static int cachesize_override __cpuinitdata = -1;
++static int disable_x86_fxsr __cpuinitdata;
++static int disable_x86_serial_nr __cpuinitdata = 1;
++static int disable_x86_sep __cpuinitdata;
++
++struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {};
++
++extern int disable_pse;
++
++static void default_init(struct cpuinfo_x86 * c)
++{
++ /* Not much we can do here... */
++ /* Check if at least it has cpuid */
++ if (c->cpuid_level == -1) {
++ /* No cpuid. It must be an ancient CPU */
++ if (c->x86 == 4)
++ strcpy(c->x86_model_id, "486");
++ else if (c->x86 == 3)
++ strcpy(c->x86_model_id, "386");
++ }
++}
++
++static struct cpu_dev default_cpu = {
++ .c_init = default_init,
++ .c_vendor = "Unknown",
++};
++static struct cpu_dev * this_cpu = &default_cpu;
++
++static int __init cachesize_setup(char *str)
++{
++ get_option (&str, &cachesize_override);
++ return 1;
++}
++__setup("cachesize=", cachesize_setup);
++
++int __cpuinit get_model_name(struct cpuinfo_x86 *c)
++{
++ unsigned int *v;
++ char *p, *q;
++
++ if (cpuid_eax(0x80000000) < 0x80000004)
++ return 0;
++
++ v = (unsigned int *) c->x86_model_id;
++ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
++ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
++ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
++ c->x86_model_id[48] = 0;
++
++ /* Intel chips right-justify this string for some dumb reason;
++ undo that brain damage */
++ p = q = &c->x86_model_id[0];
++ while ( *p == ' ' )
++ p++;
++ if ( p != q ) {
++ while ( *p )
++ *q++ = *p++;
++ while ( q <= &c->x86_model_id[48] )
++ *q++ = '\0'; /* Zero-pad the rest */
++ }
++
++ return 1;
++}
++
++
++void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
++{
++ unsigned int n, dummy, ecx, edx, l2size;
++
++ n = cpuid_eax(0x80000000);
++
++ if (n >= 0x80000005) {
++ cpuid(0x80000005, &dummy, &dummy, &ecx, &edx);
++ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
++ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
++ c->x86_cache_size=(ecx>>24)+(edx>>24);
++ }
++
++ if (n < 0x80000006) /* Some chips just has a large L1. */
++ return;
++
++ ecx = cpuid_ecx(0x80000006);
++ l2size = ecx >> 16;
++
++ /* do processor-specific cache resizing */
++ if (this_cpu->c_size_cache)
++ l2size = this_cpu->c_size_cache(c,l2size);
++
++ /* Allow user to override all this if necessary. */
++ if (cachesize_override != -1)
++ l2size = cachesize_override;
++
++ if ( l2size == 0 )
++ return; /* Again, no L2 cache is possible */
++
++ c->x86_cache_size = l2size;
++
++ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
++ l2size, ecx & 0xFF);
++}
++
++/* Naming convention should be: <Name> [(<Codename>)] */
++/* This table only is used unless init_<vendor>() below doesn't set it; */
++/* in particular, if CPUID levels 0x80000002..4 are supported, this isn't used */
++
++/* Look up CPU names by table lookup. */
++static char __cpuinit *table_lookup_model(struct cpuinfo_x86 *c)
++{
++ struct cpu_model_info *info;
++
++ if ( c->x86_model >= 16 )
++ return NULL; /* Range check */
++
++ if (!this_cpu)
++ return NULL;
++
++ info = this_cpu->c_models;
++
++ while (info && info->family) {
++ if (info->family == c->x86)
++ return info->model_names[c->x86_model];
++ info++;
++ }
++ return NULL; /* Not found */
++}
++
++
++static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c, int early)
++{
++ char *v = c->x86_vendor_id;
++ int i;
++ static int printed;
++
++ for (i = 0; i < X86_VENDOR_NUM; i++) {
++ if (cpu_devs[i]) {
++ if (!strcmp(v,cpu_devs[i]->c_ident[0]) ||
++ (cpu_devs[i]->c_ident[1] &&
++ !strcmp(v,cpu_devs[i]->c_ident[1]))) {
++ c->x86_vendor = i;
++ if (!early)
++ this_cpu = cpu_devs[i];
++ return;
++ }
++ }
++ }
++ if (!printed) {
++ printed++;
++ printk(KERN_ERR "CPU: Vendor unknown, using generic init.\n");
++ printk(KERN_ERR "CPU: Your system may be unstable.\n");
++ }
++ c->x86_vendor = X86_VENDOR_UNKNOWN;
++ this_cpu = &default_cpu;
++}
++
++
++static int __init x86_fxsr_setup(char * s)
++{
++ disable_x86_fxsr = 1;
++ return 1;
++}
++__setup("nofxsr", x86_fxsr_setup);
++
++
++static int __init x86_sep_setup(char * s)
++{
++ disable_x86_sep = 1;
++ return 1;
++}
++__setup("nosep", x86_sep_setup);
++
++
++/* Standard macro to see if a specific flag is changeable */
++static inline int flag_is_changeable_p(u32 flag)
++{
++ u32 f1, f2;
++
++ asm("pushfl\n\t"
++ "pushfl\n\t"
++ "popl %0\n\t"
++ "movl %0,%1\n\t"
++ "xorl %2,%0\n\t"
++ "pushl %0\n\t"
++ "popfl\n\t"
++ "pushfl\n\t"
++ "popl %0\n\t"
++ "popfl\n\t"
++ : "=&r" (f1), "=&r" (f2)
++ : "ir" (flag));
++
++ return ((f1^f2) & flag) != 0;
++}
++
++
++/* Probe for the CPUID instruction */
++static int __cpuinit have_cpuid_p(void)
++{
++ return flag_is_changeable_p(X86_EFLAGS_ID);
++}
++
++/* Do minimum CPU detection early.
++ Fields really needed: vendor, cpuid_level, family, model, mask, cache alignment.
++ The others are not touched to avoid unwanted side effects.
++
++ WARNING: this function is only called on the BP. Don't add code here
++ that is supposed to run on all CPUs. */
++static void __init early_cpu_detect(void)
++{
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++
++ c->x86_cache_alignment = 32;
++
++ if (!have_cpuid_p())
++ return;
++
++ /* Get vendor name */
++ cpuid(0x00000000, &c->cpuid_level,
++ (int *)&c->x86_vendor_id[0],
++ (int *)&c->x86_vendor_id[8],
++ (int *)&c->x86_vendor_id[4]);
++
++ get_cpu_vendor(c, 1);
++
++ c->x86 = 4;
++ if (c->cpuid_level >= 0x00000001) {
++ u32 junk, tfms, cap0, misc;
++ cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
++ c->x86 = (tfms >> 8) & 15;
++ c->x86_model = (tfms >> 4) & 15;
++ if (c->x86 == 0xf)
++ c->x86 += (tfms >> 20) & 0xff;
++ if (c->x86 >= 0x6)
++ c->x86_model += ((tfms >> 16) & 0xF) << 4;
++ c->x86_mask = tfms & 15;
++ if (cap0 & (1<<19))
++ c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
++ }
++}
++
++void __cpuinit generic_identify(struct cpuinfo_x86 * c)
++{
++ u32 tfms, xlvl;
++ int ebx;
++
++ if (have_cpuid_p()) {
++ /* Get vendor name */
++ cpuid(0x00000000, &c->cpuid_level,
++ (int *)&c->x86_vendor_id[0],
++ (int *)&c->x86_vendor_id[8],
++ (int *)&c->x86_vendor_id[4]);
++
++ get_cpu_vendor(c, 0);
++ /* Initialize the standard set of capabilities */
++ /* Note that the vendor-specific code below might override */
++
++ /* Intel-defined flags: level 0x00000001 */
++ if ( c->cpuid_level >= 0x00000001 ) {
++ u32 capability, excap;
++ cpuid(0x00000001, &tfms, &ebx, &excap, &capability);
++ c->x86_capability[0] = capability;
++ c->x86_capability[4] = excap;
++ c->x86 = (tfms >> 8) & 15;
++ c->x86_model = (tfms >> 4) & 15;
++ if (c->x86 == 0xf)
++ c->x86 += (tfms >> 20) & 0xff;
++ if (c->x86 >= 0x6)
++ c->x86_model += ((tfms >> 16) & 0xF) << 4;
++ c->x86_mask = tfms & 15;
++#ifdef CONFIG_X86_HT
++ c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
++#else
++ c->apicid = (ebx >> 24) & 0xFF;
++#endif
++ } else {
++ /* Have CPUID level 0 only - unheard of */
++ c->x86 = 4;
++ }
++
++ /* AMD-defined flags: level 0x80000001 */
++ xlvl = cpuid_eax(0x80000000);
++ if ( (xlvl & 0xffff0000) == 0x80000000 ) {
++ if ( xlvl >= 0x80000001 ) {
++ c->x86_capability[1] = cpuid_edx(0x80000001);
++ c->x86_capability[6] = cpuid_ecx(0x80000001);
++ }
++ if ( xlvl >= 0x80000004 )
++ get_model_name(c); /* Default name */
++ }
++ }
++
++ early_intel_workaround(c);
++
++#ifdef CONFIG_X86_HT
++ c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
++#endif
++}
++
++static void __cpuinit squash_the_stupid_serial_number(struct cpuinfo_x86 *c)
++{
++ if (cpu_has(c, X86_FEATURE_PN) && disable_x86_serial_nr ) {
++ /* Disable processor serial number */
++ unsigned long lo,hi;
++ rdmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
++ lo |= 0x200000;
++ wrmsr(MSR_IA32_BBL_CR_CTL,lo,hi);
++ printk(KERN_NOTICE "CPU serial number disabled.\n");
++ clear_bit(X86_FEATURE_PN, c->x86_capability);
++
++ /* Disabling the serial number may affect the cpuid level */
++ c->cpuid_level = cpuid_eax(0);
++ }
++}
++
++static int __init x86_serial_nr_setup(char *s)
++{
++ disable_x86_serial_nr = 0;
++ return 1;
++}
++__setup("serialnumber", x86_serial_nr_setup);
++
++
++
++/*
++ * This does the hard work of actually picking apart the CPU stuff...
++ */
++void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
++{
++ int i;
++
++ c->loops_per_jiffy = loops_per_jiffy;
++ c->x86_cache_size = -1;
++ c->x86_vendor = X86_VENDOR_UNKNOWN;
++ c->cpuid_level = -1; /* CPUID not detected */
++ c->x86_model = c->x86_mask = 0; /* So far unknown... */
++ c->x86_vendor_id[0] = '\0'; /* Unset */
++ c->x86_model_id[0] = '\0'; /* Unset */
++ c->x86_max_cores = 1;
++ memset(&c->x86_capability, 0, sizeof c->x86_capability);
++
++ if (!have_cpuid_p()) {
++ /* First of all, decide if this is a 486 or higher */
++ /* It's a 486 if we can modify the AC flag */
++ if ( flag_is_changeable_p(X86_EFLAGS_AC) )
++ c->x86 = 4;
++ else
++ c->x86 = 3;
++ }
++
++ generic_identify(c);
++
++ printk(KERN_DEBUG "CPU: After generic identify, caps:");
++ for (i = 0; i < NCAPINTS; i++)
++ printk(" %08lx", c->x86_capability[i]);
++ printk("\n");
++
++ if (this_cpu->c_identify) {
++ this_cpu->c_identify(c);
++
++ printk(KERN_DEBUG "CPU: After vendor identify, caps:");
++ for (i = 0; i < NCAPINTS; i++)
++ printk(" %08lx", c->x86_capability[i]);
++ printk("\n");
++ }
++
++ /*
++ * Vendor-specific initialization. In this section we
++ * canonicalize the feature flags, meaning if there are
++ * features a certain CPU supports which CPUID doesn't
++ * tell us, CPUID claiming incorrect flags, or other bugs,
++ * we handle them here.
++ *
++ * At the end of this section, c->x86_capability better
++ * indicate the features this CPU genuinely supports!
++ */
++ if (this_cpu->c_init)
++ this_cpu->c_init(c);
++
++ /* Disable the PN if appropriate */
++ squash_the_stupid_serial_number(c);
++
++ /*
++ * The vendor-specific functions might have changed features. Now
++ * we do "generic changes."
++ */
++
++ /* TSC disabled? */
++ if ( tsc_disable )
++ clear_bit(X86_FEATURE_TSC, c->x86_capability);
++
++ /* FXSR disabled? */
++ if (disable_x86_fxsr) {
++ clear_bit(X86_FEATURE_FXSR, c->x86_capability);
++ clear_bit(X86_FEATURE_XMM, c->x86_capability);
++ }
++
++ /* SEP disabled? */
++ if (disable_x86_sep)
++ clear_bit(X86_FEATURE_SEP, c->x86_capability);
++
++ if (disable_pse)
++ clear_bit(X86_FEATURE_PSE, c->x86_capability);
++
++ /* If the model name is still unset, do table lookup. */
++ if ( !c->x86_model_id[0] ) {
++ char *p;
++ p = table_lookup_model(c);
++ if ( p )
++ strcpy(c->x86_model_id, p);
++ else
++ /* Last resort... */
++ sprintf(c->x86_model_id, "%02x/%02x",
++ c->x86, c->x86_model);
++ }
++
++ /* Now the feature flags better reflect actual CPU features! */
++
++ printk(KERN_DEBUG "CPU: After all inits, caps:");
++ for (i = 0; i < NCAPINTS; i++)
++ printk(" %08lx", c->x86_capability[i]);
++ printk("\n");
++
++ /*
++ * On SMP, boot_cpu_data holds the common feature set between
++ * all CPUs; so make sure that we indicate which features are
++ * common between the CPUs. The first time this routine gets
++ * executed, c == &boot_cpu_data.
++ */
++ if ( c != &boot_cpu_data ) {
++ /* AND the already accumulated flags with these */
++ for ( i = 0 ; i < NCAPINTS ; i++ )
++ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
++ }
++
++ /* Init Machine Check Exception if available. */
++ mcheck_init(c);
++
++ if (c == &boot_cpu_data)
++ sysenter_setup();
++ enable_sep_cpu();
++
++ if (c == &boot_cpu_data)
++ mtrr_bp_init();
++ else
++ mtrr_ap_init();
++}
++
++#ifdef CONFIG_X86_HT
++void __cpuinit detect_ht(struct cpuinfo_x86 *c)
++{
++ u32 eax, ebx, ecx, edx;
++ int index_msb, core_bits;
++
++ cpuid(1, &eax, &ebx, &ecx, &edx);
++
++ if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
++ return;
++
++ smp_num_siblings = (ebx & 0xff0000) >> 16;
++
++ if (smp_num_siblings == 1) {
++ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
++ } else if (smp_num_siblings > 1 ) {
++
++ if (smp_num_siblings > NR_CPUS) {
++ printk(KERN_WARNING "CPU: Unsupported number of the "
++ "siblings %d", smp_num_siblings);
++ smp_num_siblings = 1;
++ return;
++ }
++
++ index_msb = get_count_order(smp_num_siblings);
++ c->phys_proc_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
++
++ printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
++ c->phys_proc_id);
++
++ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
++
++ index_msb = get_count_order(smp_num_siblings) ;
++
++ core_bits = get_count_order(c->x86_max_cores);
++
++ c->cpu_core_id = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
++ ((1 << core_bits) - 1);
++
++ if (c->x86_max_cores > 1)
++ printk(KERN_INFO "CPU: Processor Core ID: %d\n",
++ c->cpu_core_id);
++ }
++}
++#endif
++
++void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
++{
++ char *vendor = NULL;
++
++ if (c->x86_vendor < X86_VENDOR_NUM)
++ vendor = this_cpu->c_vendor;
++ else if (c->cpuid_level >= 0)
++ vendor = c->x86_vendor_id;
++
++ if (vendor && strncmp(c->x86_model_id, vendor, strlen(vendor)))
++ printk("%s ", vendor);
++
++ if (!c->x86_model_id[0])
++ printk("%d86", c->x86);
++ else
++ printk("%s", c->x86_model_id);
++
++ if (c->x86_mask || c->cpuid_level >= 0)
++ printk(" stepping %02x\n", c->x86_mask);
++ else
++ printk("\n");
++}
++
++cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
++
++/* This is hacky. :)
++ * We're emulating future behavior.
++ * In the future, the cpu-specific init functions will be called implicitly
++ * via the magic of initcalls.
++ * They will insert themselves into the cpu_devs structure.
++ * Then, when cpu_init() is called, we can just iterate over that array.
++ */
++
++extern int intel_cpu_init(void);
++extern int cyrix_init_cpu(void);
++extern int nsc_init_cpu(void);
++extern int amd_init_cpu(void);
++extern int centaur_init_cpu(void);
++extern int transmeta_init_cpu(void);
++extern int rise_init_cpu(void);
++extern int nexgen_init_cpu(void);
++extern int umc_init_cpu(void);
++
++void __init early_cpu_init(void)
++{
++ intel_cpu_init();
++ cyrix_init_cpu();
++ nsc_init_cpu();
++ amd_init_cpu();
++ centaur_init_cpu();
++ transmeta_init_cpu();
++ rise_init_cpu();
++ nexgen_init_cpu();
++ umc_init_cpu();
++ early_cpu_detect();
++
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ /* pse is not compatible with on-the-fly unmapping,
++ * disable it even if the cpus claim to support it.
++ */
++ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
++ disable_pse = 1;
++#endif
++}
++
++static void __cpuinit cpu_gdt_init(const struct Xgt_desc_struct *gdt_descr)
++{
++ unsigned long frames[16];
++ unsigned long va;
++ int f;
++
++ for (va = gdt_descr->address, f = 0;
++ va < gdt_descr->address + gdt_descr->size;
++ va += PAGE_SIZE, f++) {
++ frames[f] = virt_to_mfn(va);
++ make_lowmem_page_readonly(
++ (void *)va, XENFEAT_writable_descriptor_tables);
++ }
++ if (HYPERVISOR_set_gdt(frames, (gdt_descr->size + 1) / 8))
++ BUG();
++}
++
++/*
++ * cpu_init() initializes state that is per-CPU. Some data is already
++ * initialized (naturally) in the bootstrap process, such as the GDT
++ * and IDT. We reload them nevertheless, this function acts as a
++ * 'CPU state barrier', nothing should get across.
++ */
++void __cpuinit cpu_init(void)
++{
++ int cpu = smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct * t = &per_cpu(init_tss, cpu);
++#endif
++ struct thread_struct *thread = ¤t->thread;
++ struct desc_struct *gdt;
++ struct Xgt_desc_struct *cpu_gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++
++ if (cpu_test_and_set(cpu, cpu_initialized)) {
++ printk(KERN_WARNING "CPU#%d already initialized!\n", cpu);
++ for (;;) local_irq_enable();
++ }
++ printk(KERN_INFO "Initializing CPU#%d\n", cpu);
++
++ if (cpu_has_vme || cpu_has_de)
++ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
++ if (tsc_disable && cpu_has_tsc) {
++ printk(KERN_NOTICE "Disabling TSC...\n");
++ /**** FIX-HPA: DOES THIS REALLY BELONG HERE? ****/
++ clear_bit(X86_FEATURE_TSC, boot_cpu_data.x86_capability);
++ set_in_cr4(X86_CR4_TSD);
++ }
++
++#ifndef CONFIG_XEN
++ /* The CPU hotplug case */
++ if (cpu_gdt_descr->address) {
++ gdt = (struct desc_struct *)cpu_gdt_descr->address;
++ memset(gdt, 0, PAGE_SIZE);
++ goto old_gdt;
++ }
++ /*
++ * This is a horrible hack to allocate the GDT. The problem
++ * is that cpu_init() is called really early for the boot CPU
++ * (and hence needs bootmem) but much later for the secondary
++ * CPUs, when bootmem will have gone away
++ */
++ if (NODE_DATA(0)->bdata->node_bootmem_map) {
++ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
++ /* alloc_bootmem_pages panics on failure, so no check */
++ memset(gdt, 0, PAGE_SIZE);
++ } else {
++ gdt = (struct desc_struct *)get_zeroed_page(GFP_KERNEL);
++ if (unlikely(!gdt)) {
++ printk(KERN_CRIT "CPU%d failed to allocate GDT\n", cpu);
++ for (;;)
++ local_irq_enable();
++ }
++ }
++old_gdt:
++ /*
++ * Initialize the per-CPU GDT with the boot GDT,
++ * and set up the GDT descriptor:
++ */
++ memcpy(gdt, cpu_gdt_table, GDT_SIZE);
++
++ /* Set up GDT entry for 16bit stack */
++ *(__u64 *)(&gdt[GDT_ENTRY_ESPFIX_SS]) |=
++ ((((__u64)stk16_off) << 16) & 0x000000ffffff0000ULL) |
++ ((((__u64)stk16_off) << 32) & 0xff00000000000000ULL) |
++ (CPU_16BIT_STACK_SIZE - 1);
++
++ cpu_gdt_descr->size = GDT_SIZE - 1;
++ cpu_gdt_descr->address = (unsigned long)gdt;
++#else
++ if (cpu == 0 && cpu_gdt_descr->address == 0) {
++ gdt = (struct desc_struct *)alloc_bootmem_pages(PAGE_SIZE);
++ /* alloc_bootmem_pages panics on failure, so no check */
++ memset(gdt, 0, PAGE_SIZE);
++
++ memcpy(gdt, cpu_gdt_table, GDT_SIZE);
++
++ cpu_gdt_descr->size = GDT_SIZE;
++ cpu_gdt_descr->address = (unsigned long)gdt;
++ }
++#endif
++
++ cpu_gdt_init(cpu_gdt_descr);
++
++ /*
++ * Set up and load the per-CPU TSS and LDT
++ */
++ atomic_inc(&init_mm.mm_count);
++ current->active_mm = &init_mm;
++ if (current->mm)
++ BUG();
++ enter_lazy_tlb(&init_mm, current);
++
++ load_esp0(t, thread);
++
++ load_LDT(&init_mm.context);
++
++#ifdef CONFIG_DOUBLEFAULT
++ /* Set up doublefault TSS pointer in the GDT */
++ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
++#endif
++
++ /* Clear %fs and %gs. */
++ asm volatile ("xorl %eax, %eax; movl %eax, %fs; movl %eax, %gs");
++
++ /* Clear all 6 debug registers: */
++ set_debugreg(0, 0);
++ set_debugreg(0, 1);
++ set_debugreg(0, 2);
++ set_debugreg(0, 3);
++ set_debugreg(0, 6);
++ set_debugreg(0, 7);
++
++ /*
++ * Force FPU initialization:
++ */
++ current_thread_info()->status = 0;
++ clear_used_math();
++ mxcsr_feature_mask_init();
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++void __cpuinit cpu_uninit(void)
++{
++ int cpu = raw_smp_processor_id();
++ cpu_clear(cpu, cpu_initialized);
++
++ /* lazy TLB state */
++ per_cpu(cpu_tlbstate, cpu).state = 0;
++ per_cpu(cpu_tlbstate, cpu).active_mm = &init_mm;
++}
++#endif
+Index: head-2008-11-25/arch/x86/kernel/cpu/mtrr/main-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/cpu/mtrr/main-xen.c 2008-01-28 12:24:18.000000000 +0100
+@@ -0,0 +1,198 @@
++#include <linux/init.h>
++#include <linux/proc_fs.h>
++#include <linux/ctype.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <asm/uaccess.h>
++#include <linux/mutex.h>
++
++#include <asm/mtrr.h>
++#include "mtrr.h"
++
++static DEFINE_MUTEX(mtrr_mutex);
++
++void generic_get_mtrr(unsigned int reg, unsigned long *base,
++ unsigned int *size, mtrr_type * type)
++{
++ struct xen_platform_op op;
++
++ op.cmd = XENPF_read_memtype;
++ op.u.read_memtype.reg = reg;
++ if (unlikely(HYPERVISOR_platform_op(&op)))
++ memset(&op.u.read_memtype, 0, sizeof(op.u.read_memtype));
++
++ *size = op.u.read_memtype.nr_mfns;
++ *base = op.u.read_memtype.mfn;
++ *type = op.u.read_memtype.type;
++}
++
++struct mtrr_ops generic_mtrr_ops = {
++ .use_intel_if = 1,
++ .get = generic_get_mtrr,
++};
++
++struct mtrr_ops *mtrr_if = &generic_mtrr_ops;
++unsigned int num_var_ranges;
++unsigned int *usage_table;
++
++static void __init set_num_var_ranges(void)
++{
++ struct xen_platform_op op;
++
++ for (num_var_ranges = 0; ; num_var_ranges++) {
++ op.cmd = XENPF_read_memtype;
++ op.u.read_memtype.reg = num_var_ranges;
++ if (HYPERVISOR_platform_op(&op) != 0)
++ break;
++ }
++}
++
++static void __init init_table(void)
++{
++ int i, max;
++
++ max = num_var_ranges;
++ if ((usage_table = kmalloc(max * sizeof *usage_table, GFP_KERNEL))
++ == NULL) {
++ printk(KERN_ERR "mtrr: could not allocate\n");
++ return;
++ }
++ for (i = 0; i < max; i++)
++ usage_table[i] = 0;
++}
++
++int mtrr_add_page(unsigned long base, unsigned long size,
++ unsigned int type, char increment)
++{
++ int error;
++ struct xen_platform_op op;
++
++ mutex_lock(&mtrr_mutex);
++
++ op.cmd = XENPF_add_memtype;
++ op.u.add_memtype.mfn = base;
++ op.u.add_memtype.nr_mfns = size;
++ op.u.add_memtype.type = type;
++ error = HYPERVISOR_platform_op(&op);
++ if (error) {
++ mutex_unlock(&mtrr_mutex);
++ BUG_ON(error > 0);
++ return error;
++ }
++
++ if (increment)
++ ++usage_table[op.u.add_memtype.reg];
++
++ mutex_unlock(&mtrr_mutex);
++
++ return op.u.add_memtype.reg;
++}
++
++static int mtrr_check(unsigned long base, unsigned long size)
++{
++ if ((base & (PAGE_SIZE - 1)) || (size & (PAGE_SIZE - 1))) {
++ printk(KERN_WARNING
++ "mtrr: size and base must be multiples of 4 kiB\n");
++ printk(KERN_DEBUG
++ "mtrr: size: 0x%lx base: 0x%lx\n", size, base);
++ dump_stack();
++ return -1;
++ }
++ return 0;
++}
++
++int
++mtrr_add(unsigned long base, unsigned long size, unsigned int type,
++ char increment)
++{
++ if (mtrr_check(base, size))
++ return -EINVAL;
++ return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
++ increment);
++}
++
++int mtrr_del_page(int reg, unsigned long base, unsigned long size)
++{
++ unsigned i;
++ mtrr_type ltype;
++ unsigned long lbase;
++ unsigned int lsize;
++ int error = -EINVAL;
++ struct xen_platform_op op;
++
++ mutex_lock(&mtrr_mutex);
++
++ if (reg < 0) {
++ /* Search for existing MTRR */
++ for (i = 0; i < num_var_ranges; ++i) {
++ mtrr_if->get(i, &lbase, &lsize, <ype);
++ if (lbase == base && lsize == size) {
++ reg = i;
++ break;
++ }
++ }
++ if (reg < 0) {
++ printk(KERN_DEBUG "mtrr: no MTRR for %lx000,%lx000 found\n", base,
++ size);
++ goto out;
++ }
++ }
++ if (usage_table[reg] < 1) {
++ printk(KERN_WARNING "mtrr: reg: %d has count=0\n", reg);
++ goto out;
++ }
++ if (--usage_table[reg] < 1) {
++ op.cmd = XENPF_del_memtype;
++ op.u.del_memtype.handle = 0;
++ op.u.del_memtype.reg = reg;
++ error = HYPERVISOR_platform_op(&op);
++ if (error) {
++ BUG_ON(error > 0);
++ goto out;
++ }
++ }
++ error = reg;
++ out:
++ mutex_unlock(&mtrr_mutex);
++ return error;
++}
++
++int
++mtrr_del(int reg, unsigned long base, unsigned long size)
++{
++ if (mtrr_check(base, size))
++ return -EINVAL;
++ return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
++}
++
++EXPORT_SYMBOL(mtrr_add);
++EXPORT_SYMBOL(mtrr_del);
++
++void __init mtrr_bp_init(void)
++{
++}
++
++void mtrr_ap_init(void)
++{
++}
++
++static int __init mtrr_init(void)
++{
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++
++ if (!is_initial_xendomain())
++ return -ENODEV;
++
++ if ((!cpu_has(c, X86_FEATURE_MTRR)) &&
++ (!cpu_has(c, X86_FEATURE_K6_MTRR)) &&
++ (!cpu_has(c, X86_FEATURE_CYRIX_ARR)) &&
++ (!cpu_has(c, X86_FEATURE_CENTAUR_MCR)))
++ return -ENODEV;
++
++ set_num_var_ranges();
++ init_table();
++
++ return 0;
++}
++
++subsys_initcall(mtrr_init);
+Index: head-2008-11-25/arch/x86/kernel/entry_32-xen.S
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/entry_32-xen.S 2007-12-10 08:47:31.000000000 +0100
+@@ -0,0 +1,1238 @@
++/*
++ * linux/arch/i386/entry.S
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ */
++
++/*
++ * entry.S contains the system-call and fault low-level handling routines.
++ * This also contains the timer-interrupt handler, as well as all interrupts
++ * and faults that can result in a task-switch.
++ *
++ * NOTE: This code handles signal-recognition, which happens every time
++ * after a timer-interrupt and after each system call.
++ *
++ * I changed all the .align's to 4 (16 byte alignment), as that's faster
++ * on a 486.
++ *
++ * Stack layout in 'ret_from_system_call':
++ * ptrace needs to have all regs on the stack.
++ * if the order here is changed, it needs to be
++ * updated in fork.c:copy_process, signal.c:do_signal,
++ * ptrace.c and ptrace.h
++ *
++ * 0(%esp) - %ebx
++ * 4(%esp) - %ecx
++ * 8(%esp) - %edx
++ * C(%esp) - %esi
++ * 10(%esp) - %edi
++ * 14(%esp) - %ebp
++ * 18(%esp) - %eax
++ * 1C(%esp) - %ds
++ * 20(%esp) - %es
++ * 24(%esp) - orig_eax
++ * 28(%esp) - %eip
++ * 2C(%esp) - %cs
++ * 30(%esp) - %eflags
++ * 34(%esp) - %oldesp
++ * 38(%esp) - %oldss
++ *
++ * "current" is in register %ebx during any slow entries.
++ */
++
++#include <linux/linkage.h>
++#include <asm/thread_info.h>
++#include <asm/irqflags.h>
++#include <asm/errno.h>
++#include <asm/segment.h>
++#include <asm/smp.h>
++#include <asm/page.h>
++#include <asm/desc.h>
++#include <asm/dwarf2.h>
++#include "irq_vectors.h"
++#include <xen/interface/xen.h>
++
++#define nr_syscalls ((syscall_table_size)/4)
++
++EBX = 0x00
++ECX = 0x04
++EDX = 0x08
++ESI = 0x0C
++EDI = 0x10
++EBP = 0x14
++EAX = 0x18
++DS = 0x1C
++ES = 0x20
++ORIG_EAX = 0x24
++EIP = 0x28
++CS = 0x2C
++EFLAGS = 0x30
++OLDESP = 0x34
++OLDSS = 0x38
++
++CF_MASK = 0x00000001
++TF_MASK = 0x00000100
++IF_MASK = 0x00000200
++DF_MASK = 0x00000400
++NT_MASK = 0x00004000
++VM_MASK = 0x00020000
++/* Pseudo-eflags. */
++NMI_MASK = 0x80000000
++
++#ifndef CONFIG_XEN
++#define DISABLE_INTERRUPTS cli
++#define ENABLE_INTERRUPTS sti
++#else
++/* Offsets into shared_info_t. */
++#define evtchn_upcall_pending /* 0 */
++#define evtchn_upcall_mask 1
++
++#define sizeof_vcpu_shift 6
++
++#ifdef CONFIG_SMP
++#define GET_VCPU_INFO movl TI_cpu(%ebp),%esi ; \
++ shl $sizeof_vcpu_shift,%esi ; \
++ addl HYPERVISOR_shared_info,%esi
++#else
++#define GET_VCPU_INFO movl HYPERVISOR_shared_info,%esi
++#endif
++
++#define __DISABLE_INTERRUPTS movb $1,evtchn_upcall_mask(%esi)
++#define __ENABLE_INTERRUPTS movb $0,evtchn_upcall_mask(%esi)
++#define DISABLE_INTERRUPTS GET_VCPU_INFO ; \
++ __DISABLE_INTERRUPTS
++#define ENABLE_INTERRUPTS GET_VCPU_INFO ; \
++ __ENABLE_INTERRUPTS
++#define __TEST_PENDING testb $0xFF,evtchn_upcall_pending(%esi)
++#endif
++
++#ifdef CONFIG_PREEMPT
++#define preempt_stop cli; TRACE_IRQS_OFF
++#else
++#define preempt_stop
++#define resume_kernel restore_nocheck
++#endif
++
++.macro TRACE_IRQS_IRET
++#ifdef CONFIG_TRACE_IRQFLAGS
++ testl $IF_MASK,EFLAGS(%esp) # interrupts off?
++ jz 1f
++ TRACE_IRQS_ON
++1:
++#endif
++.endm
++
++#ifdef CONFIG_VM86
++#define resume_userspace_sig check_userspace
++#else
++#define resume_userspace_sig resume_userspace
++#endif
++
++#define SAVE_ALL \
++ cld; \
++ pushl %es; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET es, 0;*/\
++ pushl %ds; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ /*CFI_REL_OFFSET ds, 0;*/\
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET eax, 0;\
++ pushl %ebp; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebp, 0;\
++ pushl %edi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edi, 0;\
++ pushl %esi; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET esi, 0;\
++ pushl %edx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET edx, 0;\
++ pushl %ecx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ecx, 0;\
++ pushl %ebx; \
++ CFI_ADJUST_CFA_OFFSET 4;\
++ CFI_REL_OFFSET ebx, 0;\
++ movl $(__USER_DS), %edx; \
++ movl %edx, %ds; \
++ movl %edx, %es;
++
++#define RESTORE_INT_REGS \
++ popl %ebx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebx;\
++ popl %ecx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ecx;\
++ popl %edx; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edx;\
++ popl %esi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE esi;\
++ popl %edi; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE edi;\
++ popl %ebp; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE ebp;\
++ popl %eax; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ CFI_RESTORE eax
++
++#define RESTORE_REGS \
++ RESTORE_INT_REGS; \
++1: popl %ds; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE ds;*/\
++2: popl %es; \
++ CFI_ADJUST_CFA_OFFSET -4;\
++ /*CFI_RESTORE es;*/\
++.section .fixup,"ax"; \
++3: movl $0,(%esp); \
++ jmp 1b; \
++4: movl $0,(%esp); \
++ jmp 2b; \
++.previous; \
++.section __ex_table,"a";\
++ .align 4; \
++ .long 1b,3b; \
++ .long 2b,4b; \
++.previous
++
++#define RING0_INT_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_DEF_CFA esp, 3*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_EC_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_DEF_CFA esp, 4*4;\
++ /*CFI_OFFSET cs, -2*4;*/\
++ CFI_OFFSET eip, -3*4
++
++#define RING0_PTREGS_FRAME \
++ CFI_STARTPROC simple;\
++ CFI_DEF_CFA esp, OLDESP-EBX;\
++ /*CFI_OFFSET cs, CS-OLDESP;*/\
++ CFI_OFFSET eip, EIP-OLDESP;\
++ /*CFI_OFFSET es, ES-OLDESP;*/\
++ /*CFI_OFFSET ds, DS-OLDESP;*/\
++ CFI_OFFSET eax, EAX-OLDESP;\
++ CFI_OFFSET ebp, EBP-OLDESP;\
++ CFI_OFFSET edi, EDI-OLDESP;\
++ CFI_OFFSET esi, ESI-OLDESP;\
++ CFI_OFFSET edx, EDX-OLDESP;\
++ CFI_OFFSET ecx, ECX-OLDESP;\
++ CFI_OFFSET ebx, EBX-OLDESP
++
++ENTRY(ret_from_fork)
++ CFI_STARTPROC
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ call schedule_tail
++ GET_THREAD_INFO(%ebp)
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ pushl $0x0202 # Reset kernel eflags
++ CFI_ADJUST_CFA_OFFSET 4
++ popfl
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp syscall_exit
++ CFI_ENDPROC
++
++/*
++ * Return to user mode is not as complex as all this looks,
++ * but we want the default path for a system call return to
++ * go as quickly as possible which is why some of this is
++ * less clear than it otherwise should be.
++ */
++
++ # userspace resumption stub bypassing syscall exit tracing
++ ALIGN
++ RING0_PTREGS_FRAME
++ret_from_exception:
++ preempt_stop
++ret_from_intr:
++ GET_THREAD_INFO(%ebp)
++check_userspace:
++ movl EFLAGS(%esp), %eax # mix EFLAGS and CS
++ movb CS(%esp), %al
++ testl $(VM_MASK | 2), %eax
++ jz resume_kernel
++ENTRY(resume_userspace)
++ DISABLE_INTERRUPTS # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
++ # int/exception return?
++ jne work_pending
++ jmp restore_all
++
++#ifdef CONFIG_PREEMPT
++ENTRY(resume_kernel)
++ cli
++ cmpl $0,TI_preempt_count(%ebp) # non-zero preempt_count ?
++ jnz restore_nocheck
++need_resched:
++ movl TI_flags(%ebp), %ecx # need_resched set ?
++ testb $_TIF_NEED_RESCHED, %cl
++ jz restore_all
++ testl $IF_MASK,EFLAGS(%esp) # interrupts off (exception path) ?
++ jz restore_all
++ call preempt_schedule_irq
++ jmp need_resched
++#endif
++ CFI_ENDPROC
++
++/* SYSENTER_RETURN points to after the "sysenter" instruction in
++ the vsyscall page. See vsyscall-sysentry.S, which defines the symbol. */
++
++ # sysenter call handler stub
++ENTRY(sysenter_entry)
++ CFI_STARTPROC simple
++ CFI_DEF_CFA esp, 0
++ CFI_REGISTER esp, ebp
++ movl SYSENTER_stack_esp0(%esp),%esp
++sysenter_past_esp:
++ /*
++ * No need to follow this irqs on/off section: the syscall
++ * disabled irqs and here we enable it straight after entry:
++ */
++ sti
++ pushl $(__USER_DS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ss, 0*/
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esp, 0
++ pushfl
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $(__USER_CS)
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET cs, 0*/
++ /*
++ * Push current_thread_info()->sysenter_return to the stack.
++ * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
++ * pushed above; +8 corresponds to copy_thread's esp0 setting.
++ */
++ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eip, 0
++
++/*
++ * Load the potential sixth argument from user stack.
++ * Careful about security.
++ */
++ cmpl $__PAGE_OFFSET-3,%ebp
++ jae syscall_fault
++1: movl (%ebp),%ebp
++.section __ex_table,"a"
++ .align 4
++ .long 1b,syscall_fault
++.previous
++
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++ call *sys_call_table(,%eax,4)
++ movl %eax,EAX(%esp)
++ DISABLE_INTERRUPTS
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx
++ jne syscall_exit_work
++/* if something modifies registers it must also disable sysexit */
++ movl EIP(%esp), %edx
++ movl OLDESP(%esp), %ecx
++ xorl %ebp,%ebp
++#ifdef CONFIG_XEN
++ TRACE_IRQS_ON
++ __ENABLE_INTERRUPTS
++sysexit_scrit: /**** START OF SYSEXIT CRITICAL REGION ****/
++ __TEST_PENDING
++ jnz 14f # process more events if necessary...
++ movl ESI(%esp), %esi
++ sysexit
++14: __DISABLE_INTERRUPTS
++ TRACE_IRQS_OFF
++sysexit_ecrit: /**** END OF SYSEXIT CRITICAL REGION ****/
++ push %esp
++ call evtchn_do_upcall
++ add $4,%esp
++ jmp ret_from_intr
++#else
++ TRACE_IRQS_ON
++ sti
++ sysexit
++#endif /* !CONFIG_XEN */
++ CFI_ENDPROC
++
++ # pv sysenter call handler stub
++ENTRY(sysenter_entry_pv)
++ RING0_INT_FRAME
++ movl $__USER_DS,16(%esp)
++ movl %ebp,12(%esp)
++ movl $__USER_CS,4(%esp)
++ addl $4,%esp
++ /* +5*4 is SS:ESP,EFLAGS,CS:EIP. +8 is esp0 setting. */
++ pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
++/*
++ * Load the potential sixth argument from user stack.
++ * Careful about security.
++ */
++ cmpl $__PAGE_OFFSET-3,%ebp
++ jae syscall_fault
++1: movl (%ebp),%ebp
++.section __ex_table,"a"
++ .align 4
++ .long 1b,syscall_fault
++.previous
++ /* fall through */
++ CFI_ENDPROC
++ENDPROC(sysenter_entry_pv)
++
++ # system call handler stub
++ENTRY(system_call)
++ RING0_INT_FRAME # can't unwind into user space anyway
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ testl $TF_MASK,EFLAGS(%esp)
++ jz no_singlestep
++ orl $_TIF_SINGLESTEP,TI_flags(%ebp)
++no_singlestep:
++ # system call tracing in operation / emulation
++ /* Note, _TIF_SECCOMP is bit number 8, and so it needs testw and not testb */
++ testw $(_TIF_SYSCALL_EMU|_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
++ jnz syscall_trace_entry
++ cmpl $(nr_syscalls), %eax
++ jae syscall_badsys
++syscall_call:
++ call *sys_call_table(,%eax,4)
++ movl %eax,EAX(%esp) # store the return value
++syscall_exit:
++ DISABLE_INTERRUPTS # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ testw $_TIF_ALLWORK_MASK, %cx # current->work
++ jne syscall_exit_work
++
++restore_all:
++#ifndef CONFIG_XEN
++ movl EFLAGS(%esp), %eax # mix EFLAGS, SS and CS
++ # Warning: OLDSS(%esp) contains the wrong/random values if we
++ # are returning to the kernel.
++ # See comments in process.c:copy_thread() for details.
++ movb OLDSS(%esp), %ah
++ movb CS(%esp), %al
++ andl $(VM_MASK | (4 << 8) | 3), %eax
++ cmpl $((4 << 8) | 3), %eax
++ CFI_REMEMBER_STATE
++ je ldt_ss # returning to user-space with LDT SS
++restore_nocheck:
++#else
++restore_nocheck:
++ movl EFLAGS(%esp), %eax
++ testl $(VM_MASK|NMI_MASK), %eax
++ CFI_REMEMBER_STATE
++ jnz hypervisor_iret
++ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
++ GET_VCPU_INFO
++ andb evtchn_upcall_mask(%esi),%al
++ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
++ CFI_REMEMBER_STATE
++ jnz restore_all_enable_events # != 0 => enable event delivery
++#endif
++ TRACE_IRQS_IRET
++restore_nocheck_notrace:
++ RESTORE_REGS
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++1: iret
++.section .fixup,"ax"
++iret_exc:
++#ifndef CONFIG_XEN
++ TRACE_IRQS_ON
++ sti
++#endif
++ pushl $0 # no error code
++ pushl $do_iret_error
++ jmp error_code
++.previous
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++
++ CFI_RESTORE_STATE
++#ifndef CONFIG_XEN
++ldt_ss:
++ larl OLDSS(%esp), %eax
++ jnz restore_nocheck
++ testl $0x00400000, %eax # returning to 32bit stack?
++ jnz restore_nocheck # allright, normal return
++ /* If returning to userspace with 16bit stack,
++ * try to fix the higher word of ESP, as the CPU
++ * won't restore it.
++ * This is an "official" bug of all the x86-compatible
++ * CPUs, which we can try to work around to make
++ * dosemu and wine happy. */
++ subl $8, %esp # reserve space for switch16 pointer
++ CFI_ADJUST_CFA_OFFSET 8
++ cli
++ TRACE_IRQS_OFF
++ movl %esp, %eax
++ /* Set up the 16bit stack frame with switch32 pointer on top,
++ * and a switch16 pointer on top of the current frame. */
++ call setup_x86_bogus_stack
++ CFI_ADJUST_CFA_OFFSET -8 # frame has moved
++ TRACE_IRQS_IRET
++ RESTORE_REGS
++ lss 20+4(%esp), %esp # switch to 16bit stack
++1: iret
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++#else
++ ALIGN
++restore_all_enable_events:
++ TRACE_IRQS_ON
++ __ENABLE_INTERRUPTS
++scrit: /**** START OF CRITICAL REGION ****/
++ __TEST_PENDING
++ jnz 14f # process more events if necessary...
++ RESTORE_REGS
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++1: iret
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++14: __DISABLE_INTERRUPTS
++ TRACE_IRQS_OFF
++ jmp 11f
++ecrit: /**** END OF CRITICAL REGION ****/
++
++ CFI_RESTORE_STATE
++hypervisor_iret:
++ andl $~NMI_MASK, EFLAGS(%esp)
++ RESTORE_REGS
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp hypercall_page + (__HYPERVISOR_iret * 32)
++#endif
++ CFI_ENDPROC
++
++ # perform work that needs to be done immediately before resumption
++ ALIGN
++ RING0_PTREGS_FRAME # can't unwind into user space anyway
++work_pending:
++ testb $_TIF_NEED_RESCHED, %cl
++ jz work_notifysig
++work_resched:
++ call schedule
++ DISABLE_INTERRUPTS # make sure we don't miss an interrupt
++ # setting need_resched or sigpending
++ # between sampling and the iret
++ TRACE_IRQS_OFF
++ movl TI_flags(%ebp), %ecx
++ andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
++ # than syscall tracing?
++ jz restore_all
++ testb $_TIF_NEED_RESCHED, %cl
++ jnz work_resched
++
++work_notifysig: # deal with pending signals and
++ # notify-resume requests
++ testl $VM_MASK, EFLAGS(%esp)
++ movl %esp, %eax
++ jne work_notifysig_v86 # returning to kernel-space or
++ # vm86-space
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++
++ ALIGN
++work_notifysig_v86:
++#ifdef CONFIG_VM86
++ pushl %ecx # save ti_flags for do_notify_resume
++ CFI_ADJUST_CFA_OFFSET 4
++ call save_v86_state # %eax contains pt_regs pointer
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ movl %eax, %esp
++ xorl %edx, %edx
++ call do_notify_resume
++ jmp resume_userspace_sig
++#endif
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_trace_entry:
++ movl $-ENOSYS,EAX(%esp)
++ movl %esp, %eax
++ xorl %edx,%edx
++ call do_syscall_trace
++ cmpl $0, %eax
++ jne resume_userspace # ret != 0 -> running under PTRACE_SYSEMU,
++ # so must skip actual syscall
++ movl ORIG_EAX(%esp), %eax
++ cmpl $(nr_syscalls), %eax
++ jnae syscall_call
++ jmp syscall_exit
++
++ # perform syscall exit tracing
++ ALIGN
++syscall_exit_work:
++ testb $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP), %cl
++ jz work_pending
++ TRACE_IRQS_ON
++ ENABLE_INTERRUPTS # could let do_syscall_trace() call
++ # schedule() instead
++ movl %esp, %eax
++ movl $1, %edx
++ call do_syscall_trace
++ jmp resume_userspace
++ CFI_ENDPROC
++
++ RING0_INT_FRAME # can't unwind into user space anyway
++syscall_fault:
++ pushl %eax # save orig_eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ GET_THREAD_INFO(%ebp)
++ movl $-EFAULT,EAX(%esp)
++ jmp resume_userspace
++
++syscall_badsys:
++ movl $-ENOSYS,EAX(%esp)
++ jmp resume_userspace
++ CFI_ENDPROC
++
++#ifndef CONFIG_XEN
++#define FIXUP_ESPFIX_STACK \
++ movl %esp, %eax; \
++ /* switch to 32bit stack using the pointer on top of 16bit stack */ \
++ lss %ss:CPU_16BIT_STACK_SIZE-8, %esp; \
++ /* copy data from 16bit stack to 32bit stack */ \
++ call fixup_x86_bogus_stack; \
++ /* put ESP to the proper location */ \
++ movl %eax, %esp;
++#define UNWIND_ESPFIX_STACK \
++ pushl %eax; \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ movl %ss, %eax; \
++ /* see if on 16bit stack */ \
++ cmpw $__ESPFIX_SS, %ax; \
++ je 28f; \
++27: popl %eax; \
++ CFI_ADJUST_CFA_OFFSET -4; \
++.section .fixup,"ax"; \
++28: movl $__KERNEL_DS, %eax; \
++ movl %eax, %ds; \
++ movl %eax, %es; \
++ /* switch to 32bit stack */ \
++ FIXUP_ESPFIX_STACK; \
++ jmp 27b; \
++.previous
++
++/*
++ * Build the entry stubs and pointer table with
++ * some assembler magic.
++ */
++.data
++ENTRY(interrupt)
++.text
++
++vector=0
++ENTRY(irq_entries_start)
++ RING0_INT_FRAME
++.rept NR_IRQS
++ ALIGN
++ .if vector
++ CFI_ADJUST_CFA_OFFSET -4
++ .endif
++1: pushl $~(vector)
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp common_interrupt
++.data
++ .long 1b
++.text
++vector=vector+1
++.endr
++
++/*
++ * the CPU automatically disables interrupts when executing an IRQ vector,
++ * so IRQ-flags tracing has to follow that:
++ */
++ ALIGN
++common_interrupt:
++ SAVE_ALL
++ TRACE_IRQS_OFF
++ movl %esp,%eax
++ call do_IRQ
++ jmp ret_from_intr
++ CFI_ENDPROC
++
++#define BUILD_INTERRUPT(name, nr) \
++ENTRY(name) \
++ RING0_INT_FRAME; \
++ pushl $~(nr); \
++ CFI_ADJUST_CFA_OFFSET 4; \
++ SAVE_ALL; \
++ TRACE_IRQS_OFF \
++ movl %esp,%eax; \
++ call smp_/**/name; \
++ jmp ret_from_intr; \
++ CFI_ENDPROC
++
++/* The include is where all of the SMP etc. interrupts come from */
++#include "entry_arch.h"
++#else
++#define UNWIND_ESPFIX_STACK
++#endif
++
++ENTRY(divide_error)
++ RING0_INT_FRAME
++ pushl $0 # no error code
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_divide_error
++ CFI_ADJUST_CFA_OFFSET 4
++ ALIGN
++error_code:
++ pushl %ds
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET ds, 0*/
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET eax, 0
++ xorl %eax, %eax
++ pushl %ebp
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebp, 0
++ pushl %edi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edi, 0
++ pushl %esi
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET esi, 0
++ pushl %edx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET edx, 0
++ decl %eax # eax = -1
++ pushl %ecx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ecx, 0
++ pushl %ebx
++ CFI_ADJUST_CFA_OFFSET 4
++ CFI_REL_OFFSET ebx, 0
++ cld
++ pushl %es
++ CFI_ADJUST_CFA_OFFSET 4
++ /*CFI_REL_OFFSET es, 0*/
++ UNWIND_ESPFIX_STACK
++ popl %ecx
++ CFI_ADJUST_CFA_OFFSET -4
++ /*CFI_REGISTER es, ecx*/
++ movl ES(%esp), %edi # get the function address
++ movl ORIG_EAX(%esp), %edx # get the error code
++ movl %eax, ORIG_EAX(%esp)
++ movl %ecx, ES(%esp)
++ /*CFI_REL_OFFSET es, ES*/
++ movl $(__USER_DS), %ecx
++ movl %ecx, %ds
++ movl %ecx, %es
++ movl %esp,%eax # pt_regs pointer
++ call *%edi
++ jmp ret_from_exception
++ CFI_ENDPROC
++
++#ifdef CONFIG_XEN
++# A note on the "critical region" in our callback handler.
++# We want to avoid stacking callback handlers due to events occurring
++# during handling of the last event. To do this, we keep events disabled
++# until we've done all processing. HOWEVER, we must enable events before
++# popping the stack frame (can't be done atomically) and so it would still
++# be possible to get enough handler activations to overflow the stack.
++# Although unlikely, bugs of that kind are hard to track down, so we'd
++# like to avoid the possibility.
++# So, on entry to the handler we detect whether we interrupted an
++# existing activation in its critical region -- if so, we pop the current
++# activation and restart the handler using the previous one.
++#
++# The sysexit critical region is slightly different. sysexit
++# atomically removes the entire stack frame. If we interrupt in the
++# critical region we know that the entire frame is present and correct
++# so we can simply throw away the new one.
++ENTRY(hypervisor_callback)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ movl EIP(%esp),%eax
++ cmpl $scrit,%eax
++ jb 11f
++ cmpl $ecrit,%eax
++ jb critical_region_fixup
++ cmpl $sysexit_scrit,%eax
++ jb 11f
++ cmpl $sysexit_ecrit,%eax
++ ja 11f
++ addl $OLDESP,%esp # Remove eflags...ebx from stack frame.
++11: push %esp
++ CFI_ADJUST_CFA_OFFSET 4
++ call evtchn_do_upcall
++ add $4,%esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp ret_from_intr
++ CFI_ENDPROC
++
++# [How we do the fixup]. We want to merge the current stack frame with the
++# just-interrupted frame. How we do this depends on where in the critical
++# region the interrupted handler was executing, and so how many saved
++# registers are in each frame. We do this quickly using the lookup table
++# 'critical_fixup_table'. For each byte offset in the critical region, it
++# provides the number of bytes which have already been popped from the
++# interrupted stack frame.
++critical_region_fixup:
++ movzbl critical_fixup_table-scrit(%eax),%ecx # %eax contains num bytes popped
++ cmpb $0xff,%cl # 0xff => vcpu_info critical region
++ jne 15f
++ xorl %ecx,%ecx
++15: leal (%esp,%ecx),%esi # %esi points at end of src region
++ leal OLDESP(%esp),%edi # %edi points at end of dst region
++ shrl $2,%ecx # convert words to bytes
++ je 17f # skip loop if nothing to copy
++16: subl $4,%esi # pre-decrementing copy loop
++ subl $4,%edi
++ movl (%esi),%eax
++ movl %eax,(%edi)
++ loop 16b
++17: movl %edi,%esp # final %edi is top of merged stack
++ jmp 11b
++
++.section .rodata,"a"
++critical_fixup_table:
++ .byte 0xff,0xff,0xff # testb $0xff,(%esi) = __TEST_PENDING
++ .byte 0xff,0xff # jnz 14f
++ .byte 0x00 # pop %ebx
++ .byte 0x04 # pop %ecx
++ .byte 0x08 # pop %edx
++ .byte 0x0c # pop %esi
++ .byte 0x10 # pop %edi
++ .byte 0x14 # pop %ebp
++ .byte 0x18 # pop %eax
++ .byte 0x1c # pop %ds
++ .byte 0x20 # pop %es
++ .byte 0x24,0x24,0x24 # add $4,%esp
++ .byte 0x28 # iret
++ .byte 0xff,0xff,0xff,0xff # movb $1,1(%esi)
++ .byte 0x00,0x00 # jmp 11b
++.previous
++
++# Hypervisor uses this for application faults while it executes.
++# We get here for two reasons:
++# 1. Fault while reloading DS, ES, FS or GS
++# 2. Fault while executing IRET
++# Category 1 we fix up by reattempting the load, and zeroing the segment
++# register if the load fails.
++# Category 2 we fix up by jumping to do_iret_error. We cannot use the
++# normal Linux return path in this case because if we use the IRET hypercall
++# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
++# We distinguish between categories by maintaining a status value in EAX.
++ENTRY(failsafe_callback)
++ pushl %eax
++ movl $1,%eax
++1: mov 4(%esp),%ds
++2: mov 8(%esp),%es
++3: mov 12(%esp),%fs
++4: mov 16(%esp),%gs
++ testl %eax,%eax
++ popl %eax
++ jz 5f
++ addl $16,%esp # EAX != 0 => Category 2 (Bad IRET)
++ jmp iret_exc
++5: addl $16,%esp # EAX == 0 => Category 1 (Bad segment)
++ RING0_INT_FRAME
++ pushl $0
++ SAVE_ALL
++ jmp ret_from_exception
++.section .fixup,"ax"; \
++6: xorl %eax,%eax; \
++ movl %eax,4(%esp); \
++ jmp 1b; \
++7: xorl %eax,%eax; \
++ movl %eax,8(%esp); \
++ jmp 2b; \
++8: xorl %eax,%eax; \
++ movl %eax,12(%esp); \
++ jmp 3b; \
++9: xorl %eax,%eax; \
++ movl %eax,16(%esp); \
++ jmp 4b; \
++.previous; \
++.section __ex_table,"a"; \
++ .align 4; \
++ .long 1b,6b; \
++ .long 2b,7b; \
++ .long 3b,8b; \
++ .long 4b,9b; \
++.previous
++#endif
++ CFI_ENDPROC
++
++ENTRY(coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(simd_coprocessor_error)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_simd_coprocessor_error
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(device_not_available)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++#ifndef CONFIG_XEN
++ movl %cr0, %eax
++ testl $0x4, %eax # EM (math emulation bit)
++ je device_available_emulate
++ pushl $0 # temporary storage for ORIG_EIP
++ CFI_ADJUST_CFA_OFFSET 4
++ call math_emulate
++ addl $4, %esp
++ CFI_ADJUST_CFA_OFFSET -4
++ jmp ret_from_exception
++device_available_emulate:
++#endif
++ preempt_stop
++ call math_state_restore
++ jmp ret_from_exception
++ CFI_ENDPROC
++
++#ifndef CONFIG_XEN
++/*
++ * Debug traps and NMI can happen at the one SYSENTER instruction
++ * that sets up the real kernel stack. Check here, since we can't
++ * allow the wrong stack to be used.
++ *
++ * "SYSENTER_stack_esp0+12" is because the NMI/debug handler will have
++ * already pushed 3 words if it hits on the sysenter instruction:
++ * eflags, cs and eip.
++ *
++ * We just load the right stack, and push the three (known) values
++ * by hand onto the new stack - while updating the return eip past
++ * the instruction that would have done it for sysenter.
++ */
++#define FIX_STACK(offset, ok, label) \
++ cmpw $__KERNEL_CS,4(%esp); \
++ jne ok; \
++label: \
++ movl SYSENTER_stack_esp0+offset(%esp),%esp; \
++ pushfl; \
++ pushl $__KERNEL_CS; \
++ pushl $sysenter_past_esp
++#endif /* CONFIG_XEN */
++
++KPROBE_ENTRY(debug)
++ RING0_INT_FRAME
++#ifndef CONFIG_XEN
++ cmpl $sysenter_entry,(%esp)
++ jne debug_stack_correct
++ FIX_STACK(12, debug_stack_correct, debug_esp_fix_insn)
++debug_stack_correct:
++#endif /* !CONFIG_XEN */
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # error code 0
++ movl %esp,%eax # pt_regs pointer
++ call do_debug
++ jmp ret_from_exception
++ CFI_ENDPROC
++ .previous .text
++#ifndef CONFIG_XEN
++/*
++ * NMI is doubly nasty. It can happen _while_ we're handling
++ * a debug fault, and the debug fault hasn't yet been able to
++ * clear up the stack. So we first check whether we got an
++ * NMI on the sysenter entry path, but after that we need to
++ * check whether we got an NMI on the debug path where the debug
++ * fault happened on the sysenter path.
++ */
++ENTRY(nmi)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %ss, %eax
++ cmpw $__ESPFIX_SS, %ax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ je nmi_16bit_stack
++ cmpl $sysenter_entry,(%esp)
++ je nmi_stack_fixup
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ movl %esp,%eax
++ /* Do not access memory above the end of our stack page,
++ * it might not exist.
++ */
++ andl $(THREAD_SIZE-1),%eax
++ cmpl $(THREAD_SIZE-20),%eax
++ popl %eax
++ CFI_ADJUST_CFA_OFFSET -4
++ jae nmi_stack_correct
++ cmpl $sysenter_entry,12(%esp)
++ je nmi_debug_stack_check
++nmi_stack_correct:
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_nmi
++ jmp restore_nocheck_notrace
++ CFI_ENDPROC
++
++nmi_stack_fixup:
++ FIX_STACK(12,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++nmi_debug_stack_check:
++ cmpw $__KERNEL_CS,16(%esp)
++ jne nmi_stack_correct
++ cmpl $debug,(%esp)
++ jb nmi_stack_correct
++ cmpl $debug_esp_fix_insn,(%esp)
++ ja nmi_stack_correct
++ FIX_STACK(24,nmi_stack_correct, 1)
++ jmp nmi_stack_correct
++
++nmi_16bit_stack:
++ RING0_INT_FRAME
++ /* create the pointer to lss back */
++ pushl %ss
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl %esp
++ CFI_ADJUST_CFA_OFFSET 4
++ movzwl %sp, %esp
++ addw $4, (%esp)
++ /* copy the iret frame of 12 bytes */
++ .rept 3
++ pushl 16(%esp)
++ CFI_ADJUST_CFA_OFFSET 4
++ .endr
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ FIXUP_ESPFIX_STACK # %eax == %esp
++ CFI_ADJUST_CFA_OFFSET -20 # the frame has now moved
++ xorl %edx,%edx # zero error code
++ call do_nmi
++ RESTORE_REGS
++ lss 12+4(%esp), %esp # back to 16bit stack
++1: iret
++ CFI_ENDPROC
++.section __ex_table,"a"
++ .align 4
++ .long 1b,iret_exc
++.previous
++#else
++ENTRY(nmi)
++ RING0_INT_FRAME
++ pushl %eax
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_nmi
++ orl $NMI_MASK, EFLAGS(%esp)
++ jmp restore_all
++ CFI_ENDPROC
++#endif
++
++KPROBE_ENTRY(int3)
++ RING0_INT_FRAME
++ pushl $-1 # mark this as an int
++ CFI_ADJUST_CFA_OFFSET 4
++ SAVE_ALL
++ xorl %edx,%edx # zero error code
++ movl %esp,%eax # pt_regs pointer
++ call do_int3
++ jmp ret_from_exception
++ CFI_ENDPROC
++ .previous .text
++
++ENTRY(overflow)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_overflow
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(bounds)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_bounds
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(invalid_op)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_invalid_op
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(coprocessor_segment_overrun)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_coprocessor_segment_overrun
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(invalid_TSS)
++ RING0_EC_FRAME
++ pushl $do_invalid_TSS
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(segment_not_present)
++ RING0_EC_FRAME
++ pushl $do_segment_not_present
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++ENTRY(stack_segment)
++ RING0_EC_FRAME
++ pushl $do_stack_segment
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++KPROBE_ENTRY(general_protection)
++ RING0_EC_FRAME
++ pushl $do_general_protection
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++ .previous .text
++
++ENTRY(alignment_check)
++ RING0_EC_FRAME
++ pushl $do_alignment_check
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++KPROBE_ENTRY(page_fault)
++ RING0_EC_FRAME
++ pushl $do_page_fault
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++ .previous .text
++
++#ifdef CONFIG_X86_MCE
++ENTRY(machine_check)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl machine_check_vector
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++#endif
++
++#ifndef CONFIG_XEN
++ENTRY(spurious_interrupt_bug)
++ RING0_INT_FRAME
++ pushl $0
++ CFI_ADJUST_CFA_OFFSET 4
++ pushl $do_spurious_interrupt_bug
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++#endif /* !CONFIG_XEN */
++
++#ifdef CONFIG_STACK_UNWIND
++ENTRY(arch_unwind_init_running)
++ CFI_STARTPROC
++ movl 4(%esp), %edx
++ movl (%esp), %ecx
++ leal 4(%esp), %eax
++ movl %ebx, EBX(%edx)
++ xorl %ebx, %ebx
++ movl %ebx, ECX(%edx)
++ movl %ebx, EDX(%edx)
++ movl %esi, ESI(%edx)
++ movl %edi, EDI(%edx)
++ movl %ebp, EBP(%edx)
++ movl %ebx, EAX(%edx)
++ movl $__USER_DS, DS(%edx)
++ movl $__USER_DS, ES(%edx)
++ movl %ebx, ORIG_EAX(%edx)
++ movl %ecx, EIP(%edx)
++ movl 12(%esp), %ecx
++ movl $__KERNEL_CS, CS(%edx)
++ movl %ebx, EFLAGS(%edx)
++ movl %eax, OLDESP(%edx)
++ movl 8(%esp), %eax
++ movl %ecx, 8(%esp)
++ movl EBX(%edx), %ebx
++ movl $__KERNEL_DS, OLDSS(%edx)
++ jmpl *%eax
++ CFI_ENDPROC
++ENDPROC(arch_unwind_init_running)
++#endif
++
++ENTRY(fixup_4gb_segment)
++ RING0_EC_FRAME
++ pushl $do_fixup_4gb_segment
++ CFI_ADJUST_CFA_OFFSET 4
++ jmp error_code
++ CFI_ENDPROC
++
++.section .rodata,"a"
++#include "syscall_table.S"
++
++syscall_table_size=(.-sys_call_table)
+Index: head-2008-11-25/arch/x86/kernel/fixup.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/fixup.c 2008-01-28 12:24:18.000000000 +0100
+@@ -0,0 +1,88 @@
++/******************************************************************************
++ * fixup.c
++ *
++ * Binary-rewriting of certain IA32 instructions, on notification by Xen.
++ * Used to avoid repeated slow emulation of common instructions used by the
++ * user-space TLS (Thread-Local Storage) libraries.
++ *
++ * **** NOTE ****
++ * Issues with the binary rewriting have caused it to be removed. Instead
++ * we rely on Xen's emulator to boot the kernel, and then print a banner
++ * message recommending that the user disables /lib/tls.
++ *
++ * Copyright (c) 2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/kernel.h>
++#include <linux/delay.h>
++#include <linux/version.h>
++
++#define DP(_f, _args...) printk(KERN_ALERT " " _f "\n" , ## _args )
++
++fastcall void do_fixup_4gb_segment(struct pt_regs *regs, long error_code)
++{
++ static unsigned long printed = 0;
++ char info[100];
++ int i;
++
++ /* Ignore statically-linked init. */
++ if (current->tgid == 1)
++ return;
++
++ VOID(HYPERVISOR_vm_assist(VMASST_CMD_disable,
++ VMASST_TYPE_4gb_segments_notify));
++
++ if (test_and_set_bit(0, &printed))
++ return;
++
++ sprintf(info, "%s (pid=%d)", current->comm, current->tgid);
++
++ DP("");
++ DP("***************************************************************");
++ DP("***************************************************************");
++ DP("** WARNING: Currently emulating unsupported memory accesses **");
++ DP("** in /lib/tls glibc libraries. The emulation is **");
++ DP("** slow. To ensure full performance you should **");
++ DP("** install a 'xen-friendly' (nosegneg) version of **");
++ DP("** the library, or disable tls support by executing **");
++ DP("** the following as root: **");
++ DP("** mv /lib/tls /lib/tls.disabled **");
++ DP("** Offending process: %-38.38s **", info);
++ DP("***************************************************************");
++ DP("***************************************************************");
++ DP("");
++
++ for (i = 5; i > 0; i--) {
++ touch_softlockup_watchdog();
++ printk("Pausing... %d", i);
++ mdelay(1000);
++ printk("\b\b\b\b\b\b\b\b\b\b\b\b");
++ }
++
++ printk("Continuing...\n\n");
++}
++
++static int __init fixup_init(void)
++{
++ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
++ VMASST_TYPE_4gb_segments_notify));
++ return 0;
++}
++__initcall(fixup_init);
+Index: head-2008-11-25/arch/x86/kernel/head_32-xen.S
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/head_32-xen.S 2007-06-12 13:12:48.000000000 +0200
+@@ -0,0 +1,207 @@
++
++
++.text
++#include <linux/elfnote.h>
++#include <linux/threads.h>
++#include <linux/linkage.h>
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/cache.h>
++#include <asm/thread_info.h>
++#include <asm/asm-offsets.h>
++#include <asm/dwarf2.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/elfnote.h>
++
++/*
++ * References to members of the new_cpu_data structure.
++ */
++
++#define X86 new_cpu_data+CPUINFO_x86
++#define X86_VENDOR new_cpu_data+CPUINFO_x86_vendor
++#define X86_MODEL new_cpu_data+CPUINFO_x86_model
++#define X86_MASK new_cpu_data+CPUINFO_x86_mask
++#define X86_HARD_MATH new_cpu_data+CPUINFO_hard_math
++#define X86_CPUID new_cpu_data+CPUINFO_cpuid_level
++#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
++#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
++
++#define VIRT_ENTRY_OFFSET 0x0
++.org VIRT_ENTRY_OFFSET
++ENTRY(startup_32)
++ movl %esi,xen_start_info
++ cld
++
++ /* Set up the stack pointer */
++ movl $(init_thread_union+THREAD_SIZE),%esp
++
++ /* get vendor info */
++ xorl %eax,%eax # call CPUID with 0 -> return vendor ID
++ XEN_CPUID
++ movl %eax,X86_CPUID # save CPUID level
++ movl %ebx,X86_VENDOR_ID # lo 4 chars
++ movl %edx,X86_VENDOR_ID+4 # next 4 chars
++ movl %ecx,X86_VENDOR_ID+8 # last 4 chars
++
++ movl $1,%eax # Use the CPUID instruction to get CPU type
++ XEN_CPUID
++ movb %al,%cl # save reg for future use
++ andb $0x0f,%ah # mask processor family
++ movb %ah,X86
++ andb $0xf0,%al # mask model
++ shrb $4,%al
++ movb %al,X86_MODEL
++ andb $0x0f,%cl # mask mask revision
++ movb %cl,X86_MASK
++ movl %edx,X86_CAPABILITY
++
++ movb $1,X86_HARD_MATH
++
++ xorl %eax,%eax # Clear FS/GS and LDT
++ movl %eax,%fs
++ movl %eax,%gs
++ cld # gcc2 wants the direction flag cleared at all times
++
++ pushl %eax # fake return address
++ jmp start_kernel
++
++#define HYPERCALL_PAGE_OFFSET 0x1000
++.org HYPERCALL_PAGE_OFFSET
++ENTRY(hypercall_page)
++ CFI_STARTPROC
++.skip 0x1000
++ CFI_ENDPROC
++
++/*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
++
++/*
++ * BSS section
++ */
++.section ".bss.page_aligned","w"
++ENTRY(empty_zero_page)
++ .fill 4096,1,0
++
++/*
++ * This starts the data section.
++ */
++.data
++
++/*
++ * The Global Descriptor Table contains 28 quadwords, per-CPU.
++ */
++ .align L1_CACHE_BYTES
++ENTRY(cpu_gdt_table)
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x0000000000000000 /* 0x0b reserved */
++ .quad 0x0000000000000000 /* 0x13 reserved */
++ .quad 0x0000000000000000 /* 0x1b reserved */
++ .quad 0x0000000000000000 /* 0x20 unused */
++ .quad 0x0000000000000000 /* 0x28 unused */
++ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
++ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
++ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
++ .quad 0x0000000000000000 /* 0x4b reserved */
++ .quad 0x0000000000000000 /* 0x53 reserved */
++ .quad 0x0000000000000000 /* 0x5b reserved */
++
++ .quad 0x00cf9a000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
++ .quad 0x00cf92000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
++ .quad 0x00cffa000000ffff /* 0x73 user 4GB code at 0x00000000 */
++ .quad 0x00cff2000000ffff /* 0x7b user 4GB data at 0x00000000 */
++
++ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
++ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
++
++ /*
++ * Segments used for calling PnP BIOS have byte granularity.
++ * They code segments and data segments have fixed 64k limits,
++ * the transfer segment sizes are set at run time.
++ */
++ .quad 0x0000000000000000 /* 0x90 32-bit code */
++ .quad 0x0000000000000000 /* 0x98 16-bit code */
++ .quad 0x0000000000000000 /* 0xa0 16-bit data */
++ .quad 0x0000000000000000 /* 0xa8 16-bit data */
++ .quad 0x0000000000000000 /* 0xb0 16-bit data */
++
++ /*
++ * The APM segments have byte granularity and their bases
++ * are set at run time. All have 64k limits.
++ */
++ .quad 0x0000000000000000 /* 0xb8 APM CS code */
++ .quad 0x0000000000000000 /* 0xc0 APM CS 16 code (16 bit) */
++ .quad 0x0000000000000000 /* 0xc8 APM DS data */
++
++ .quad 0x0000000000000000 /* 0xd0 - ESPFIX 16-bit SS */
++ .quad 0x0000000000000000 /* 0xd8 - unused */
++ .quad 0x0000000000000000 /* 0xe0 - unused */
++ .quad 0x0000000000000000 /* 0xe8 - unused */
++ .quad 0x0000000000000000 /* 0xf0 - unused */
++ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++/*
++ * __xen_guest information
++ */
++.macro utoa value
++ .if (\value) < 0 || (\value) >= 0x10
++ utoa (((\value)>>4)&0x0fffffff)
++ .endif
++ .if ((\value) & 0xf) < 10
++ .byte '0' + ((\value) & 0xf)
++ .else
++ .byte 'A' + ((\value) & 0xf) - 10
++ .endif
++.endm
++
++.section __xen_guest
++ .ascii "GUEST_OS=linux,GUEST_VER=2.6"
++ .ascii ",XEN_VER=xen-3.0"
++ .ascii ",VIRT_BASE=0x"
++ utoa __PAGE_OFFSET
++ .ascii ",ELF_PADDR_OFFSET=0x"
++ utoa __PAGE_OFFSET
++ .ascii ",VIRT_ENTRY=0x"
++ utoa (__PAGE_OFFSET + __PHYSICAL_START + VIRT_ENTRY_OFFSET)
++ .ascii ",HYPERCALL_PAGE=0x"
++ utoa ((__PHYSICAL_START+HYPERCALL_PAGE_OFFSET)>>PAGE_SHIFT)
++ .ascii ",FEATURES=writable_page_tables"
++ .ascii "|writable_descriptor_tables"
++ .ascii "|auto_translated_physmap"
++ .ascii "|pae_pgdir_above_4gb"
++ .ascii "|supervisor_mode_kernel"
++#ifdef CONFIG_X86_PAE
++ .ascii ",PAE=yes[extended-cr3]"
++#else
++ .ascii ",PAE=no"
++#endif
++ .ascii ",LOADER=generic"
++ .byte 0
++#endif /* CONFIG_XEN_COMPAT <= 0x030002 */
++
++
++ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "linux")
++ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "2.6")
++ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
++ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .long, __PAGE_OFFSET)
++#if CONFIG_XEN_COMPAT <= 0x030002
++ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, __PAGE_OFFSET)
++#else
++ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .long, 0)
++#endif
++ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .long, startup_32)
++ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .long, hypercall_page)
++ ELFNOTE(Xen, XEN_ELFNOTE_HV_START_LOW, .long, HYPERVISOR_VIRT_START)
++ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
++#ifdef CONFIG_X86_PAE
++ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "yes")
++ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT)
++#else
++ ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz, "no")
++ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .long, _PAGE_PRESENT,_PAGE_PRESENT)
++#endif
++ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
++ ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1)
+Index: head-2008-11-25/arch/x86/kernel/init_task-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/init_task-xen.c 2007-06-12 13:12:48.000000000 +0200
+@@ -0,0 +1,51 @@
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/init.h>
++#include <linux/init_task.h>
++#include <linux/fs.h>
++#include <linux/mqueue.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/desc.h>
++
++static struct fs_struct init_fs = INIT_FS;
++static struct files_struct init_files = INIT_FILES;
++static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
++static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
++
++#define swapper_pg_dir ((pgd_t *)NULL)
++struct mm_struct init_mm = INIT_MM(init_mm);
++#undef swapper_pg_dir
++
++EXPORT_SYMBOL(init_mm);
++
++/*
++ * Initial thread structure.
++ *
++ * We need to make sure that this is THREAD_SIZE aligned due to the
++ * way process stacks are handled. This is done by having a special
++ * "init_task" linker map entry..
++ */
++union thread_union init_thread_union
++ __attribute__((__section__(".data.init_task"))) =
++ { INIT_THREAD_INFO(init_task) };
++
++/*
++ * Initial task structure.
++ *
++ * All other task structs will be allocated on slabs in fork.c
++ */
++struct task_struct init_task = INIT_TASK(init_task);
++
++EXPORT_SYMBOL(init_task);
++
++#ifndef CONFIG_X86_NO_TSS
++/*
++ * per-CPU TSS segments. Threads are completely 'soft' on Linux,
++ * no more per-task TSS's.
++ */
++DEFINE_PER_CPU(struct tss_struct, init_tss) ____cacheline_internodealigned_in_smp = INIT_TSS;
++#endif
++
+Index: head-2008-11-25/arch/x86/kernel/io_apic_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/io_apic_32-xen.c 2008-11-25 12:22:34.000000000 +0100
+@@ -0,0 +1,2776 @@
++/*
++ * Intel IO-APIC support for multi-Pentium hosts.
++ *
++ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
++ *
++ * Many thanks to Stig Venaas for trying out countless experimental
++ * patches and reporting/debugging problems patiently!
++ *
++ * (c) 1999, Multiple IO-APIC support, developed by
++ * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
++ * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
++ * further tested and cleaned up by Zach Brown <zab@redhat.com>
++ * and Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
++ * thanks to Eric Gilmore
++ * and Rolf G. Tews
++ * for testing these extensively
++ * Paul Diefenbaugh : Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/smp_lock.h>
++#include <linux/mc146818rtc.h>
++#include <linux/compiler.h>
++#include <linux/acpi.h>
++#include <linux/module.h>
++#include <linux/sysdev.h>
++
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/desc.h>
++#include <asm/timer.h>
++#include <asm/i8259.h>
++#include <asm/nmi.h>
++
++#include <mach_apic.h>
++
++#include "io_ports.h"
++
++#ifdef CONFIG_XEN
++
++#include <xen/interface/xen.h>
++#include <xen/interface/physdev.h>
++#include <xen/evtchn.h>
++
++/* Fake i8259 */
++#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
++#define disable_8259A_irq(_irq) ((void)0)
++#define i8259A_irq_pending(_irq) (0)
++
++unsigned long io_apic_irqs;
++
++static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
++{
++ struct physdev_apic apic_op;
++ int ret;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
++ if (ret)
++ return ret;
++ return apic_op.value;
++}
++
++static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
++{
++ struct physdev_apic apic_op;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ apic_op.value = value;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op));
++}
++
++#define io_apic_read(a,r) xen_io_apic_read(a,r)
++#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
++
++#endif /* CONFIG_XEN */
++
++int (*ioapic_renumber_irq)(int ioapic, int irq);
++atomic_t irq_mis_count;
++
++/* Where if anywhere is the i8259 connect in external int mode */
++static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
++
++static DEFINE_SPINLOCK(ioapic_lock);
++static DEFINE_SPINLOCK(vector_lock);
++
++int timer_over_8254 __initdata = 1;
++
++/*
++ * Is the SiS APIC rmw bug present ?
++ * -1 = don't know, 0 = no, 1 = yes
++ */
++int sis_apic_bug = -1;
++
++/*
++ * # of IRQ routing registers
++ */
++int nr_ioapic_registers[MAX_IO_APICS];
++
++int disable_timer_pin_1 __initdata;
++
++/*
++ * Rough estimation of how many shared IRQs there are, can
++ * be changed anytime.
++ */
++#define MAX_PLUS_SHARED_IRQS NR_IRQS
++#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
++
++/*
++ * This is performance-critical, we want to do it O(1)
++ *
++ * the indexing order of this array favors 1:1 mappings
++ * between pins and IRQs.
++ */
++
++static struct irq_pin_list {
++ int apic, pin, next;
++} irq_2_pin[PIN_MAP_SIZE];
++
++int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
++#ifdef CONFIG_PCI_MSI
++#define vector_to_irq(vector) \
++ (platform_legacy_irq(vector) ? vector : vector_irq[vector])
++#else
++#define vector_to_irq(vector) (vector)
++#endif
++
++/*
++ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
++ * shared ISA-space IRQs, so we have to support them. We are super
++ * fast in the common case, and fast for shared ISA-space IRQs.
++ */
++static void add_pin_to_irq(unsigned int irq, int apic, int pin)
++{
++ static int first_free_entry = NR_IRQS;
++ struct irq_pin_list *entry = irq_2_pin + irq;
++
++ while (entry->next)
++ entry = irq_2_pin + entry->next;
++
++ if (entry->pin != -1) {
++ entry->next = first_free_entry;
++ entry = irq_2_pin + entry->next;
++ if (++first_free_entry >= PIN_MAP_SIZE)
++ panic("io_apic.c: whoops");
++ }
++ entry->apic = apic;
++ entry->pin = pin;
++}
++
++#ifdef CONFIG_XEN
++#define clear_IO_APIC() ((void)0)
++#else
++/*
++ * Reroute an IRQ to a different pin.
++ */
++static void __init replace_pin_at_irq(unsigned int irq,
++ int oldapic, int oldpin,
++ int newapic, int newpin)
++{
++ struct irq_pin_list *entry = irq_2_pin + irq;
++
++ while (1) {
++ if (entry->apic == oldapic && entry->pin == oldpin) {
++ entry->apic = newapic;
++ entry->pin = newpin;
++ }
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++}
++
++static void __modify_IO_APIC_irq (unsigned int irq, unsigned long enable, unsigned long disable)
++{
++ struct irq_pin_list *entry = irq_2_pin + irq;
++ unsigned int pin, reg;
++
++ for (;;) {
++ pin = entry->pin;
++ if (pin == -1)
++ break;
++ reg = io_apic_read(entry->apic, 0x10 + pin*2);
++ reg &= ~disable;
++ reg |= enable;
++ io_apic_modify(entry->apic, 0x10 + pin*2, reg);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++}
++
++/* mask = 1 */
++static void __mask_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0x00010000, 0);
++}
++
++/* mask = 0 */
++static void __unmask_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0, 0x00010000);
++}
++
++/* mask = 1, trigger = 0 */
++static void __mask_and_edge_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0x00010000, 0x00008000);
++}
++
++/* mask = 0, trigger = 1 */
++static void __unmask_and_level_IO_APIC_irq (unsigned int irq)
++{
++ __modify_IO_APIC_irq(irq, 0x00008000, 0x00010000);
++}
++
++static void mask_IO_APIC_irq (unsigned int irq)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __mask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void unmask_IO_APIC_irq (unsigned int irq)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __unmask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ /* Check delivery_mode to be sure we're not clearing an SMI pin */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ if (entry.delivery_mode == dest_SMI)
++ return;
++
++ /*
++ * Disable it in the IO-APIC irq-routing table:
++ */
++ memset(&entry, 0, sizeof(entry));
++ entry.mask = 1;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC (void)
++{
++ int apic, pin;
++
++ for (apic = 0; apic < nr_ioapics; apic++)
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
++ clear_IO_APIC_pin(apic, pin);
++}
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t cpumask)
++{
++ unsigned long flags;
++ int pin;
++ struct irq_pin_list *entry = irq_2_pin + irq;
++ unsigned int apicid_value;
++ cpumask_t tmp;
++
++ cpus_and(tmp, cpumask, cpu_online_map);
++ if (cpus_empty(tmp))
++ tmp = TARGET_CPUS;
++
++ cpus_and(cpumask, tmp, CPU_MASK_ALL);
++
++ apicid_value = cpu_mask_to_apicid(cpumask);
++ /* Prepare to do the io_apic_write */
++ apicid_value = apicid_value << 24;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ for (;;) {
++ pin = entry->pin;
++ if (pin == -1)
++ break;
++ io_apic_write(entry->apic, 0x10 + 1 + pin*2, apicid_value);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++ set_irq_info(irq, cpumask);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++#if defined(CONFIG_IRQBALANCE)
++# include <asm/processor.h> /* kernel_thread() */
++# include <linux/kernel_stat.h> /* kstat */
++# include <linux/slab.h> /* kmalloc() */
++# include <linux/timer.h> /* time_after() */
++
++#ifdef CONFIG_BALANCED_IRQ_DEBUG
++# define TDprintk(x...) do { printk("<%ld:%s:%d>: ", jiffies, __FILE__, __LINE__); printk(x); } while (0)
++# define Dprintk(x...) do { TDprintk(x); } while (0)
++# else
++# define TDprintk(x...)
++# define Dprintk(x...)
++# endif
++
++#define IRQBALANCE_CHECK_ARCH -999
++#define MAX_BALANCED_IRQ_INTERVAL (5*HZ)
++#define MIN_BALANCED_IRQ_INTERVAL (HZ/2)
++#define BALANCED_IRQ_MORE_DELTA (HZ/10)
++#define BALANCED_IRQ_LESS_DELTA (HZ)
++
++static int irqbalance_disabled __read_mostly = IRQBALANCE_CHECK_ARCH;
++static int physical_balance __read_mostly;
++static long balanced_irq_interval __read_mostly = MAX_BALANCED_IRQ_INTERVAL;
++
++static struct irq_cpu_info {
++ unsigned long * last_irq;
++ unsigned long * irq_delta;
++ unsigned long irq;
++} irq_cpu_data[NR_CPUS];
++
++#define CPU_IRQ(cpu) (irq_cpu_data[cpu].irq)
++#define LAST_CPU_IRQ(cpu,irq) (irq_cpu_data[cpu].last_irq[irq])
++#define IRQ_DELTA(cpu,irq) (irq_cpu_data[cpu].irq_delta[irq])
++
++#define IDLE_ENOUGH(cpu,now) \
++ (idle_cpu(cpu) && ((now) - per_cpu(irq_stat, (cpu)).idle_timestamp > 1))
++
++#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
++
++#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i]))
++
++static cpumask_t balance_irq_affinity[NR_IRQS] = {
++ [0 ... NR_IRQS-1] = CPU_MASK_ALL
++};
++
++void set_balance_irq_affinity(unsigned int irq, cpumask_t mask)
++{
++ balance_irq_affinity[irq] = mask;
++}
++
++static unsigned long move(int curr_cpu, cpumask_t allowed_mask,
++ unsigned long now, int direction)
++{
++ int search_idle = 1;
++ int cpu = curr_cpu;
++
++ goto inside;
++
++ do {
++ if (unlikely(cpu == curr_cpu))
++ search_idle = 0;
++inside:
++ if (direction == 1) {
++ cpu++;
++ if (cpu >= NR_CPUS)
++ cpu = 0;
++ } else {
++ cpu--;
++ if (cpu == -1)
++ cpu = NR_CPUS-1;
++ }
++ } while (!cpu_online(cpu) || !IRQ_ALLOWED(cpu,allowed_mask) ||
++ (search_idle && !IDLE_ENOUGH(cpu,now)));
++
++ return cpu;
++}
++
++static inline void balance_irq(int cpu, int irq)
++{
++ unsigned long now = jiffies;
++ cpumask_t allowed_mask;
++ unsigned int new_cpu;
++
++ if (irqbalance_disabled)
++ return;
++
++ cpus_and(allowed_mask, cpu_online_map, balance_irq_affinity[irq]);
++ new_cpu = move(cpu, allowed_mask, now, 1);
++ if (cpu != new_cpu) {
++ set_pending_irq(irq, cpumask_of_cpu(new_cpu));
++ }
++}
++
++static inline void rotate_irqs_among_cpus(unsigned long useful_load_threshold)
++{
++ int i, j;
++ Dprintk("Rotating IRQs among CPUs.\n");
++ for_each_online_cpu(i) {
++ for (j = 0; j < NR_IRQS; j++) {
++ if (!irq_desc[j].action)
++ continue;
++ /* Is it a significant load ? */
++ if (IRQ_DELTA(CPU_TO_PACKAGEINDEX(i),j) <
++ useful_load_threshold)
++ continue;
++ balance_irq(i, j);
++ }
++ }
++ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
++ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
++ return;
++}
++
++static void do_irq_balance(void)
++{
++ int i, j;
++ unsigned long max_cpu_irq = 0, min_cpu_irq = (~0);
++ unsigned long move_this_load = 0;
++ int max_loaded = 0, min_loaded = 0;
++ int load;
++ unsigned long useful_load_threshold = balanced_irq_interval + 10;
++ int selected_irq;
++ int tmp_loaded, first_attempt = 1;
++ unsigned long tmp_cpu_irq;
++ unsigned long imbalance = 0;
++ cpumask_t allowed_mask, target_cpu_mask, tmp;
++
++ for_each_possible_cpu(i) {
++ int package_index;
++ CPU_IRQ(i) = 0;
++ if (!cpu_online(i))
++ continue;
++ package_index = CPU_TO_PACKAGEINDEX(i);
++ for (j = 0; j < NR_IRQS; j++) {
++ unsigned long value_now, delta;
++ /* Is this an active IRQ? */
++ if (!irq_desc[j].action)
++ continue;
++ if ( package_index == i )
++ IRQ_DELTA(package_index,j) = 0;
++ /* Determine the total count per processor per IRQ */
++ value_now = (unsigned long) kstat_cpu(i).irqs[j];
++
++ /* Determine the activity per processor per IRQ */
++ delta = value_now - LAST_CPU_IRQ(i,j);
++
++ /* Update last_cpu_irq[][] for the next time */
++ LAST_CPU_IRQ(i,j) = value_now;
++
++ /* Ignore IRQs whose rate is less than the clock */
++ if (delta < useful_load_threshold)
++ continue;
++ /* update the load for the processor or package total */
++ IRQ_DELTA(package_index,j) += delta;
++
++ /* Keep track of the higher numbered sibling as well */
++ if (i != package_index)
++ CPU_IRQ(i) += delta;
++ /*
++ * We have sibling A and sibling B in the package
++ *
++ * cpu_irq[A] = load for cpu A + load for cpu B
++ * cpu_irq[B] = load for cpu B
++ */
++ CPU_IRQ(package_index) += delta;
++ }
++ }
++ /* Find the least loaded processor package */
++ for_each_online_cpu(i) {
++ if (i != CPU_TO_PACKAGEINDEX(i))
++ continue;
++ if (min_cpu_irq > CPU_IRQ(i)) {
++ min_cpu_irq = CPU_IRQ(i);
++ min_loaded = i;
++ }
++ }
++ max_cpu_irq = ULONG_MAX;
++
++tryanothercpu:
++ /* Look for heaviest loaded processor.
++ * We may come back to get the next heaviest loaded processor.
++ * Skip processors with trivial loads.
++ */
++ tmp_cpu_irq = 0;
++ tmp_loaded = -1;
++ for_each_online_cpu(i) {
++ if (i != CPU_TO_PACKAGEINDEX(i))
++ continue;
++ if (max_cpu_irq <= CPU_IRQ(i))
++ continue;
++ if (tmp_cpu_irq < CPU_IRQ(i)) {
++ tmp_cpu_irq = CPU_IRQ(i);
++ tmp_loaded = i;
++ }
++ }
++
++ if (tmp_loaded == -1) {
++ /* In the case of small number of heavy interrupt sources,
++ * loading some of the cpus too much. We use Ingo's original
++ * approach to rotate them around.
++ */
++ if (!first_attempt && imbalance >= useful_load_threshold) {
++ rotate_irqs_among_cpus(useful_load_threshold);
++ return;
++ }
++ goto not_worth_the_effort;
++ }
++
++ first_attempt = 0; /* heaviest search */
++ max_cpu_irq = tmp_cpu_irq; /* load */
++ max_loaded = tmp_loaded; /* processor */
++ imbalance = (max_cpu_irq - min_cpu_irq) / 2;
++
++ Dprintk("max_loaded cpu = %d\n", max_loaded);
++ Dprintk("min_loaded cpu = %d\n", min_loaded);
++ Dprintk("max_cpu_irq load = %ld\n", max_cpu_irq);
++ Dprintk("min_cpu_irq load = %ld\n", min_cpu_irq);
++ Dprintk("load imbalance = %lu\n", imbalance);
++
++ /* if imbalance is less than approx 10% of max load, then
++ * observe diminishing returns action. - quit
++ */
++ if (imbalance < (max_cpu_irq >> 3)) {
++ Dprintk("Imbalance too trivial\n");
++ goto not_worth_the_effort;
++ }
++
++tryanotherirq:
++ /* if we select an IRQ to move that can't go where we want, then
++ * see if there is another one to try.
++ */
++ move_this_load = 0;
++ selected_irq = -1;
++ for (j = 0; j < NR_IRQS; j++) {
++ /* Is this an active IRQ? */
++ if (!irq_desc[j].action)
++ continue;
++ if (imbalance <= IRQ_DELTA(max_loaded,j))
++ continue;
++ /* Try to find the IRQ that is closest to the imbalance
++ * without going over.
++ */
++ if (move_this_load < IRQ_DELTA(max_loaded,j)) {
++ move_this_load = IRQ_DELTA(max_loaded,j);
++ selected_irq = j;
++ }
++ }
++ if (selected_irq == -1) {
++ goto tryanothercpu;
++ }
++
++ imbalance = move_this_load;
++
++ /* For physical_balance case, we accumlated both load
++ * values in the one of the siblings cpu_irq[],
++ * to use the same code for physical and logical processors
++ * as much as possible.
++ *
++ * NOTE: the cpu_irq[] array holds the sum of the load for
++ * sibling A and sibling B in the slot for the lowest numbered
++ * sibling (A), _AND_ the load for sibling B in the slot for
++ * the higher numbered sibling.
++ *
++ * We seek the least loaded sibling by making the comparison
++ * (A+B)/2 vs B
++ */
++ load = CPU_IRQ(min_loaded) >> 1;
++ for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) {
++ if (load > CPU_IRQ(j)) {
++ /* This won't change cpu_sibling_map[min_loaded] */
++ load = CPU_IRQ(j);
++ min_loaded = j;
++ }
++ }
++
++ cpus_and(allowed_mask,
++ cpu_online_map,
++ balance_irq_affinity[selected_irq]);
++ target_cpu_mask = cpumask_of_cpu(min_loaded);
++ cpus_and(tmp, target_cpu_mask, allowed_mask);
++
++ if (!cpus_empty(tmp)) {
++
++ Dprintk("irq = %d moved to cpu = %d\n",
++ selected_irq, min_loaded);
++ /* mark for change destination */
++ set_pending_irq(selected_irq, cpumask_of_cpu(min_loaded));
++
++ /* Since we made a change, come back sooner to
++ * check for more variation.
++ */
++ balanced_irq_interval = max((long)MIN_BALANCED_IRQ_INTERVAL,
++ balanced_irq_interval - BALANCED_IRQ_LESS_DELTA);
++ return;
++ }
++ goto tryanotherirq;
++
++not_worth_the_effort:
++ /*
++ * if we did not find an IRQ to move, then adjust the time interval
++ * upward
++ */
++ balanced_irq_interval = min((long)MAX_BALANCED_IRQ_INTERVAL,
++ balanced_irq_interval + BALANCED_IRQ_MORE_DELTA);
++ Dprintk("IRQ worth rotating not found\n");
++ return;
++}
++
++static int balanced_irq(void *unused)
++{
++ int i;
++ unsigned long prev_balance_time = jiffies;
++ long time_remaining = balanced_irq_interval;
++
++ daemonize("kirqd");
++
++ /* push everything to CPU 0 to give us a starting point. */
++ for (i = 0 ; i < NR_IRQS ; i++) {
++ irq_desc[i].pending_mask = cpumask_of_cpu(0);
++ set_pending_irq(i, cpumask_of_cpu(0));
++ }
++
++ for ( ; ; ) {
++ time_remaining = schedule_timeout_interruptible(time_remaining);
++ try_to_freeze();
++ if (time_after(jiffies,
++ prev_balance_time+balanced_irq_interval)) {
++ preempt_disable();
++ do_irq_balance();
++ prev_balance_time = jiffies;
++ time_remaining = balanced_irq_interval;
++ preempt_enable();
++ }
++ }
++ return 0;
++}
++
++static int __init balanced_irq_init(void)
++{
++ int i;
++ struct cpuinfo_x86 *c;
++ cpumask_t tmp;
++
++ cpus_shift_right(tmp, cpu_online_map, 2);
++ c = &boot_cpu_data;
++ /* When not overwritten by the command line ask subarchitecture. */
++ if (irqbalance_disabled == IRQBALANCE_CHECK_ARCH)
++ irqbalance_disabled = NO_BALANCE_IRQ;
++ if (irqbalance_disabled)
++ return 0;
++
++ /* disable irqbalance completely if there is only one processor online */
++ if (num_online_cpus() < 2) {
++ irqbalance_disabled = 1;
++ return 0;
++ }
++ /*
++ * Enable physical balance only if more than 1 physical processor
++ * is present
++ */
++ if (smp_num_siblings > 1 && !cpus_empty(tmp))
++ physical_balance = 1;
++
++ for_each_online_cpu(i) {
++ irq_cpu_data[i].irq_delta = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
++ irq_cpu_data[i].last_irq = kmalloc(sizeof(unsigned long) * NR_IRQS, GFP_KERNEL);
++ if (irq_cpu_data[i].irq_delta == NULL || irq_cpu_data[i].last_irq == NULL) {
++ printk(KERN_ERR "balanced_irq_init: out of memory");
++ goto failed;
++ }
++ memset(irq_cpu_data[i].irq_delta,0,sizeof(unsigned long) * NR_IRQS);
++ memset(irq_cpu_data[i].last_irq,0,sizeof(unsigned long) * NR_IRQS);
++ }
++
++ printk(KERN_INFO "Starting balanced_irq\n");
++ if (kernel_thread(balanced_irq, NULL, CLONE_KERNEL) >= 0)
++ return 0;
++ else
++ printk(KERN_ERR "balanced_irq_init: failed to spawn balanced_irq");
++failed:
++ for_each_possible_cpu(i) {
++ kfree(irq_cpu_data[i].irq_delta);
++ irq_cpu_data[i].irq_delta = NULL;
++ kfree(irq_cpu_data[i].last_irq);
++ irq_cpu_data[i].last_irq = NULL;
++ }
++ return 0;
++}
++
++int __init irqbalance_disable(char *str)
++{
++ irqbalance_disabled = 1;
++ return 1;
++}
++
++__setup("noirqbalance", irqbalance_disable);
++
++late_initcall(balanced_irq_init);
++#endif /* CONFIG_IRQBALANCE */
++#endif /* CONFIG_SMP */
++#endif
++
++#ifndef CONFIG_SMP
++void fastcall send_IPI_self(int vector)
++{
++#ifndef CONFIG_XEN
++ unsigned int cfg;
++
++ /*
++ * Wait for idle.
++ */
++ apic_wait_icr_idle();
++ cfg = APIC_DM_FIXED | APIC_DEST_SELF | vector | APIC_DEST_LOGICAL;
++ /*
++ * Send the IPI. The write to APIC_ICR fires this off.
++ */
++ apic_write_around(APIC_ICR, cfg);
++#endif
++}
++#endif /* !CONFIG_SMP */
++
++
++/*
++ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
++ * specific CPU-side IRQs.
++ */
++
++#define MAX_PIRQS 8
++static int pirq_entries [MAX_PIRQS];
++static int pirqs_enabled;
++int skip_ioapic_setup;
++
++static int __init ioapic_setup(char *str)
++{
++ skip_ioapic_setup = 1;
++ return 1;
++}
++
++__setup("noapic", ioapic_setup);
++
++static int __init ioapic_pirq_setup(char *str)
++{
++ int i, max;
++ int ints[MAX_PIRQS+1];
++
++ get_options(str, ARRAY_SIZE(ints), ints);
++
++ for (i = 0; i < MAX_PIRQS; i++)
++ pirq_entries[i] = -1;
++
++ pirqs_enabled = 1;
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "PIRQ redirection, working around broken MP-BIOS.\n");
++ max = MAX_PIRQS;
++ if (ints[0] < MAX_PIRQS)
++ max = ints[0];
++
++ for (i = 0; i < max; i++) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
++ /*
++ * PIRQs are mapped upside down, usually.
++ */
++ pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
++ }
++ return 1;
++}
++
++__setup("pirq=", ioapic_pirq_setup);
++
++/*
++ * Find the IRQ entry number of a certain pin.
++ */
++static int find_irq_entry(int apic, int pin, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++)
++ if (mp_irqs[i].mpc_irqtype == type &&
++ (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
++ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
++ mp_irqs[i].mpc_dstirq == pin)
++ return i;
++
++ return -1;
++}
++
++/*
++ * Find the pin to which IRQ[irq] (ISA) is connected
++ */
++static int __init find_isa_irq_pin(int irq, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_NEC98
++ ) &&
++ (mp_irqs[i].mpc_irqtype == type) &&
++ (mp_irqs[i].mpc_srcbusirq == irq))
++
++ return mp_irqs[i].mpc_dstirq;
++ }
++ return -1;
++}
++
++static int __init find_isa_irq_apic(int irq, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_NEC98
++ ) &&
++ (mp_irqs[i].mpc_irqtype == type) &&
++ (mp_irqs[i].mpc_srcbusirq == irq))
++ break;
++ }
++ if (i < mp_irq_entries) {
++ int apic;
++ for(apic = 0; apic < nr_ioapics; apic++) {
++ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
++ return apic;
++ }
++ }
++
++ return -1;
++}
++
++/*
++ * Find a specific PCI IRQ entry.
++ * Not an __init, possibly needed by modules
++ */
++static int pin_2_irq(int idx, int apic, int pin);
++
++int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
++{
++ int apic, i, best_guess = -1;
++
++ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, "
++ "slot:%d, pin:%d.\n", bus, slot, pin);
++ if (mp_bus_id_to_pci_bus[bus] == -1) {
++ printk(KERN_WARNING "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
++ return -1;
++ }
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ for (apic = 0; apic < nr_ioapics; apic++)
++ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
++ mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
++ break;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
++ !mp_irqs[i].mpc_irqtype &&
++ (bus == lbus) &&
++ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
++ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
++
++ if (!(apic || IO_APIC_IRQ(irq)))
++ continue;
++
++ if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
++ return irq;
++ /*
++ * Use the first all-but-pin matching entry as a
++ * best-guess fuzzy result for broken mptables.
++ */
++ if (best_guess < 0)
++ best_guess = irq;
++ }
++ }
++ return best_guess;
++}
++EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
++
++/*
++ * This function currently is only a helper for the i386 smp boot process where
++ * we need to reprogram the ioredtbls to cater for the cpus which have come online
++ * so mask in all cases should simply be TARGET_CPUS
++ */
++#ifdef CONFIG_SMP
++#ifndef CONFIG_XEN
++void __init setup_ioapic_dest(void)
++{
++ int pin, ioapic, irq, irq_entry;
++
++ if (skip_ioapic_setup == 1)
++ return;
++
++ for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
++ for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
++ irq_entry = find_irq_entry(ioapic, pin, mp_INT);
++ if (irq_entry == -1)
++ continue;
++ irq = pin_2_irq(irq_entry, ioapic, pin);
++ set_ioapic_affinity_irq(irq, TARGET_CPUS);
++ }
++
++ }
++}
++#endif /* !CONFIG_XEN */
++#endif
++
++/*
++ * EISA Edge/Level control register, ELCR
++ */
++static int EISA_ELCR(unsigned int irq)
++{
++ if (irq < 16) {
++ unsigned int port = 0x4d0 + (irq >> 3);
++ return (inb(port) >> (irq & 7)) & 1;
++ }
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "Broken MPtable reports ISA irq %d\n", irq);
++ return 0;
++}
++
++/* EISA interrupts are always polarity zero and can be edge or level
++ * trigger depending on the ELCR value. If an interrupt is listed as
++ * EISA conforming in the MP table, that means its trigger type must
++ * be read in from the ELCR */
++
++#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
++#define default_EISA_polarity(idx) (0)
++
++/* ISA interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_ISA_trigger(idx) (0)
++#define default_ISA_polarity(idx) (0)
++
++/* PCI interrupts are always polarity one level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_PCI_trigger(idx) (1)
++#define default_PCI_polarity(idx) (1)
++
++/* MCA interrupts are always polarity zero level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_MCA_trigger(idx) (1)
++#define default_MCA_polarity(idx) (0)
++
++/* NEC98 interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_NEC98_trigger(idx) (0)
++#define default_NEC98_polarity(idx) (0)
++
++static int __init MPBIOS_polarity(int idx)
++{
++ int bus = mp_irqs[idx].mpc_srcbus;
++ int polarity;
++
++ /*
++ * Determine IRQ line polarity (high active or low active):
++ */
++ switch (mp_irqs[idx].mpc_irqflag & 3)
++ {
++ case 0: /* conforms, ie. bus-type dependent polarity */
++ {
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ {
++ polarity = default_ISA_polarity(idx);
++ break;
++ }
++ case MP_BUS_EISA: /* EISA pin */
++ {
++ polarity = default_EISA_polarity(idx);
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ polarity = default_PCI_polarity(idx);
++ break;
++ }
++ case MP_BUS_MCA: /* MCA pin */
++ {
++ polarity = default_MCA_polarity(idx);
++ break;
++ }
++ case MP_BUS_NEC98: /* NEC 98 pin */
++ {
++ polarity = default_NEC98_polarity(idx);
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ }
++ break;
++ }
++ case 1: /* high active */
++ {
++ polarity = 0;
++ break;
++ }
++ case 2: /* reserved */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ case 3: /* low active */
++ {
++ polarity = 1;
++ break;
++ }
++ default: /* invalid */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ }
++ return polarity;
++}
++
++static int MPBIOS_trigger(int idx)
++{
++ int bus = mp_irqs[idx].mpc_srcbus;
++ int trigger;
++
++ /*
++ * Determine IRQ trigger mode (edge or level sensitive):
++ */
++ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
++ {
++ case 0: /* conforms, ie. bus-type dependent */
++ {
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ {
++ trigger = default_ISA_trigger(idx);
++ break;
++ }
++ case MP_BUS_EISA: /* EISA pin */
++ {
++ trigger = default_EISA_trigger(idx);
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ trigger = default_PCI_trigger(idx);
++ break;
++ }
++ case MP_BUS_MCA: /* MCA pin */
++ {
++ trigger = default_MCA_trigger(idx);
++ break;
++ }
++ case MP_BUS_NEC98: /* NEC 98 pin */
++ {
++ trigger = default_NEC98_trigger(idx);
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 1;
++ break;
++ }
++ }
++ break;
++ }
++ case 1: /* edge */
++ {
++ trigger = 0;
++ break;
++ }
++ case 2: /* reserved */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 1;
++ break;
++ }
++ case 3: /* level */
++ {
++ trigger = 1;
++ break;
++ }
++ default: /* invalid */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 0;
++ break;
++ }
++ }
++ return trigger;
++}
++
++static inline int irq_polarity(int idx)
++{
++ return MPBIOS_polarity(idx);
++}
++
++static inline int irq_trigger(int idx)
++{
++ return MPBIOS_trigger(idx);
++}
++
++static int pin_2_irq(int idx, int apic, int pin)
++{
++ int irq, i;
++ int bus = mp_irqs[idx].mpc_srcbus;
++
++ /*
++ * Debugging check, we are in big trouble if this message pops up!
++ */
++ if (mp_irqs[idx].mpc_dstirq != pin)
++ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
++
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ case MP_BUS_EISA:
++ case MP_BUS_MCA:
++ case MP_BUS_NEC98:
++ {
++ irq = mp_irqs[idx].mpc_srcbusirq;
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ /*
++ * PCI IRQs are mapped in order
++ */
++ i = irq = 0;
++ while (i < apic)
++ irq += nr_ioapic_registers[i++];
++ irq += pin;
++
++ /*
++ * For MPS mode, so far only needed by ES7000 platform
++ */
++ if (ioapic_renumber_irq)
++ irq = ioapic_renumber_irq(apic, irq);
++
++ break;
++ }
++ default:
++ {
++ printk(KERN_ERR "unknown bus type %d.\n",bus);
++ irq = 0;
++ break;
++ }
++ }
++
++ /*
++ * PCI IRQ command line redirection. Yes, limits are hardcoded.
++ */
++ if ((pin >= 16) && (pin <= 23)) {
++ if (pirq_entries[pin-16] != -1) {
++ if (!pirq_entries[pin-16]) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ "disabling PIRQ%d\n", pin-16);
++ } else {
++ irq = pirq_entries[pin-16];
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ "using PIRQ%d -> IRQ %d\n",
++ pin-16, irq);
++ }
++ }
++ }
++ return irq;
++}
++
++static inline int IO_APIC_irq_trigger(int irq)
++{
++ int apic, idx, pin;
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ idx = find_irq_entry(apic,pin,mp_INT);
++ if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
++ return irq_trigger(idx);
++ }
++ }
++ /*
++ * nonexistent IRQs are edge default
++ */
++ return 0;
++}
++
++/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
++u8 irq_vector[NR_IRQ_VECTORS] __read_mostly; /* = { FIRST_DEVICE_VECTOR , 0 }; */
++
++int assign_irq_vector(int irq)
++{
++ unsigned long flags;
++ int vector;
++ struct physdev_irq irq_op;
++
++ BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
++
++ if (irq < PIRQ_BASE || irq - PIRQ_BASE > NR_PIRQS)
++ return -EINVAL;
++
++ spin_lock_irqsave(&vector_lock, flags);
++
++ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
++ spin_unlock_irqrestore(&vector_lock, flags);
++ return IO_APIC_VECTOR(irq);
++ }
++
++ irq_op.irq = irq;
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
++ spin_unlock_irqrestore(&vector_lock, flags);
++ return -ENOSPC;
++ }
++
++ vector = irq_op.vector;
++ vector_irq[vector] = irq;
++ if (irq != AUTO_ASSIGN)
++ IO_APIC_VECTOR(irq) = vector;
++
++ spin_unlock_irqrestore(&vector_lock, flags);
++
++ return vector;
++}
++
++#ifndef CONFIG_XEN
++static struct hw_interrupt_type ioapic_level_type;
++static struct hw_interrupt_type ioapic_edge_type;
++
++#define IOAPIC_AUTO -1
++#define IOAPIC_EDGE 0
++#define IOAPIC_LEVEL 1
++
++static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
++{
++ unsigned idx;
++
++ idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
++
++ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
++ trigger == IOAPIC_LEVEL)
++ irq_desc[idx].chip = &ioapic_level_type;
++ else
++ irq_desc[idx].chip = &ioapic_edge_type;
++ set_intr_gate(vector, interrupt[idx]);
++}
++#else
++#define ioapic_register_intr(irq, vector, trigger) evtchn_register_pirq(irq)
++#endif
++
++static void __init setup_IO_APIC_irqs(void)
++{
++ struct IO_APIC_route_entry entry;
++ int apic, pin, idx, irq, first_notcon = 1, vector;
++ unsigned long flags;
++
++ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++
++ /*
++ * add it to the IO-APIC irq-routing table:
++ */
++ memset(&entry,0,sizeof(entry));
++
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* enable IRQ */
++ entry.dest.logical.logical_dest =
++ cpu_mask_to_apicid(TARGET_CPUS);
++
++ idx = find_irq_entry(apic,pin,mp_INT);
++ if (idx == -1) {
++ if (first_notcon) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG
++ " IO-APIC (apicid-pin) %d-%d",
++ mp_ioapics[apic].mpc_apicid,
++ pin);
++ first_notcon = 0;
++ } else
++ apic_printk(APIC_VERBOSE, ", %d-%d",
++ mp_ioapics[apic].mpc_apicid, pin);
++ continue;
++ }
++
++ entry.trigger = irq_trigger(idx);
++ entry.polarity = irq_polarity(idx);
++
++ if (irq_trigger(idx)) {
++ entry.trigger = 1;
++ entry.mask = 1;
++ }
++
++ irq = pin_2_irq(idx, apic, pin);
++ /*
++ * skip adding the timer int on secondary nodes, which causes
++ * a small but painful rift in the time-space continuum
++ */
++ if (multi_timer_check(apic, irq))
++ continue;
++ else
++ add_pin_to_irq(irq, apic, pin);
++
++ if (/*!apic &&*/ !IO_APIC_IRQ(irq))
++ continue;
++
++ if (IO_APIC_IRQ(irq)) {
++ vector = assign_irq_vector(irq);
++ entry.vector = vector;
++ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
++
++ if (!apic && (irq < 16))
++ disable_8259A_irq(irq);
++ }
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++ set_native_irq_info(irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ }
++ }
++
++ if (!first_notcon)
++ apic_printk(APIC_VERBOSE, " not connected.\n");
++}
++
++/*
++ * Set up the 8259A-master output pin:
++ */
++#ifndef CONFIG_XEN
++static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ memset(&entry,0,sizeof(entry));
++
++ disable_8259A_irq(0);
++
++ /* mask LVT0 */
++ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++
++ /*
++ * We use logical delivery to get the timer IRQ
++ * to the first CPU.
++ */
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* unmask IRQ now */
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.polarity = 0;
++ entry.trigger = 0;
++ entry.vector = vector;
++
++ /*
++ * The timer IRQ doesn't have to know that behind the
++ * scene we have a 8259A-master in AEOI mode ...
++ */
++ irq_desc[0].chip = &ioapic_edge_type;
++
++ /*
++ * Add it to the IO-APIC irq-routing table:
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ enable_8259A_irq(0);
++}
++
++static inline void UNEXPECTED_IO_APIC(void)
++{
++}
++
++void __init print_IO_APIC(void)
++{
++ int apic, i;
++ union IO_APIC_reg_00 reg_00;
++ union IO_APIC_reg_01 reg_01;
++ union IO_APIC_reg_02 reg_02;
++ union IO_APIC_reg_03 reg_03;
++ unsigned long flags;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
++ for (i = 0; i < nr_ioapics; i++)
++ printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
++ mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
++
++ /*
++ * We are a bit conservative about what we expect. We have to
++ * know about every hardware change ASAP.
++ */
++ printk(KERN_INFO "testing the IO APIC.......................\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ reg_01.raw = io_apic_read(apic, 1);
++ if (reg_01.bits.version >= 0x10)
++ reg_02.raw = io_apic_read(apic, 2);
++ if (reg_01.bits.version >= 0x20)
++ reg_03.raw = io_apic_read(apic, 3);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
++ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
++ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
++ printk(KERN_DEBUG "....... : Delivery Type: %X\n", reg_00.bits.delivery_type);
++ printk(KERN_DEBUG "....... : LTS : %X\n", reg_00.bits.LTS);
++ if (reg_00.bits.ID >= get_physical_broadcast())
++ UNEXPECTED_IO_APIC();
++ if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++
++ printk(KERN_DEBUG ".... register #01: %08X\n", reg_01.raw);
++ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
++ if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
++ (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
++ (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
++ (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
++ (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
++ (reg_01.bits.entries != 0x2E) &&
++ (reg_01.bits.entries != 0x3F)
++ )
++ UNEXPECTED_IO_APIC();
++
++ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
++ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
++ if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
++ (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
++ (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
++ (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
++ (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
++ )
++ UNEXPECTED_IO_APIC();
++ if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++
++ /*
++ * Some Intel chipsets with IO APIC VERSION of 0x1? don't have reg_02,
++ * but the value of reg_02 is read as the previous read register
++ * value, so ignore it if reg_02 == reg_01.
++ */
++ if (reg_01.bits.version >= 0x10 && reg_02.raw != reg_01.raw) {
++ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
++ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
++ if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++ }
++
++ /*
++ * Some Intel chipsets with IO APIC VERSION of 0x2? don't have reg_02
++ * or reg_03, but the value of reg_0[23] is read as the previous read
++ * register value, so ignore it if reg_03 == reg_0[12].
++ */
++ if (reg_01.bits.version >= 0x20 && reg_03.raw != reg_02.raw &&
++ reg_03.raw != reg_01.raw) {
++ printk(KERN_DEBUG ".... register #03: %08X\n", reg_03.raw);
++ printk(KERN_DEBUG "....... : Boot DT : %X\n", reg_03.bits.boot_DT);
++ if (reg_03.bits.__reserved_1)
++ UNEXPECTED_IO_APIC();
++ }
++
++ printk(KERN_DEBUG ".... IRQ redirection table:\n");
++
++ printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
++ " Stat Dest Deli Vect: \n");
++
++ for (i = 0; i <= reg_01.bits.entries; i++) {
++ struct IO_APIC_route_entry entry;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
++ *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ printk(KERN_DEBUG " %02x %03X %02X ",
++ i,
++ entry.dest.logical.logical_dest,
++ entry.dest.physical.physical_dest
++ );
++
++ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
++ entry.mask,
++ entry.trigger,
++ entry.irr,
++ entry.polarity,
++ entry.delivery_status,
++ entry.dest_mode,
++ entry.delivery_mode,
++ entry.vector
++ );
++ }
++ }
++ if (use_pci_vector())
++ printk(KERN_INFO "Using vector-based indexing\n");
++ printk(KERN_DEBUG "IRQ to pin mappings:\n");
++ for (i = 0; i < NR_IRQS; i++) {
++ struct irq_pin_list *entry = irq_2_pin + i;
++ if (entry->pin < 0)
++ continue;
++ if (use_pci_vector() && !platform_legacy_irq(i))
++ printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
++ else
++ printk(KERN_DEBUG "IRQ%d ", i);
++ for (;;) {
++ printk("-> %d:%d", entry->apic, entry->pin);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++ printk("\n");
++ }
++
++ printk(KERN_INFO ".................................... done.\n");
++
++ return;
++}
++
++static void print_APIC_bitfield (int base)
++{
++ unsigned int v;
++ int i, j;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
++ for (i = 0; i < 8; i++) {
++ v = apic_read(base + i*0x10);
++ for (j = 0; j < 32; j++) {
++ if (v & (1<<j))
++ printk("1");
++ else
++ printk("0");
++ }
++ printk("\n");
++ }
++}
++
++void /*__init*/ print_local_APIC(void * dummy)
++{
++ unsigned int v, ver, maxlvt;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
++ smp_processor_id(), hard_smp_processor_id());
++ v = apic_read(APIC_ID);
++ printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
++ v = apic_read(APIC_LVR);
++ printk(KERN_INFO "... APIC VERSION: %08x\n", v);
++ ver = GET_APIC_VERSION(v);
++ maxlvt = get_maxlvt();
++
++ v = apic_read(APIC_TASKPRI);
++ printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
++
++ if (APIC_INTEGRATED(ver)) { /* !82489DX */
++ v = apic_read(APIC_ARBPRI);
++ printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
++ v & APIC_ARBPRI_MASK);
++ v = apic_read(APIC_PROCPRI);
++ printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
++ }
++
++ v = apic_read(APIC_EOI);
++ printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
++ v = apic_read(APIC_RRR);
++ printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
++ v = apic_read(APIC_LDR);
++ printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
++ v = apic_read(APIC_DFR);
++ printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
++ v = apic_read(APIC_SPIV);
++ printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
++
++ printk(KERN_DEBUG "... APIC ISR field:\n");
++ print_APIC_bitfield(APIC_ISR);
++ printk(KERN_DEBUG "... APIC TMR field:\n");
++ print_APIC_bitfield(APIC_TMR);
++ printk(KERN_DEBUG "... APIC IRR field:\n");
++ print_APIC_bitfield(APIC_IRR);
++
++ if (APIC_INTEGRATED(ver)) { /* !82489DX */
++ if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
++ apic_write(APIC_ESR, 0);
++ v = apic_read(APIC_ESR);
++ printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
++ }
++
++ v = apic_read(APIC_ICR);
++ printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
++ v = apic_read(APIC_ICR2);
++ printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
++
++ v = apic_read(APIC_LVTT);
++ printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
++
++ if (maxlvt > 3) { /* PC is LVT#4. */
++ v = apic_read(APIC_LVTPC);
++ printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
++ }
++ v = apic_read(APIC_LVT0);
++ printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
++ v = apic_read(APIC_LVT1);
++ printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
++
++ if (maxlvt > 2) { /* ERR is LVT#3. */
++ v = apic_read(APIC_LVTERR);
++ printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
++ }
++
++ v = apic_read(APIC_TMICT);
++ printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
++ v = apic_read(APIC_TMCCT);
++ printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
++ v = apic_read(APIC_TDCR);
++ printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
++ printk("\n");
++}
++
++void print_all_local_APICs (void)
++{
++ on_each_cpu(print_local_APIC, NULL, 1, 1);
++}
++
++void /*__init*/ print_PIC(void)
++{
++ unsigned int v;
++ unsigned long flags;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "\nprinting PIC contents\n");
++
++ spin_lock_irqsave(&i8259A_lock, flags);
++
++ v = inb(0xa1) << 8 | inb(0x21);
++ printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
++
++ v = inb(0xa0) << 8 | inb(0x20);
++ printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
++
++ outb(0x0b,0xa0);
++ outb(0x0b,0x20);
++ v = inb(0xa0) << 8 | inb(0x20);
++ outb(0x0a,0xa0);
++ outb(0x0a,0x20);
++
++ spin_unlock_irqrestore(&i8259A_lock, flags);
++
++ printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
++
++ v = inb(0x4d1) << 8 | inb(0x4d0);
++ printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
++}
++#endif /* !CONFIG_XEN */
++
++static void __init enable_IO_APIC(void)
++{
++ union IO_APIC_reg_01 reg_01;
++ int i8259_apic, i8259_pin;
++ int i, apic;
++ unsigned long flags;
++
++ for (i = 0; i < PIN_MAP_SIZE; i++) {
++ irq_2_pin[i].pin = -1;
++ irq_2_pin[i].next = 0;
++ }
++ if (!pirqs_enabled)
++ for (i = 0; i < MAX_PIRQS; i++)
++ pirq_entries[i] = -1;
++
++ /*
++ * The number of IO-APIC IRQ registers (== #pins):
++ */
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(apic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
++ }
++ for(apic = 0; apic < nr_ioapics; apic++) {
++ int pin;
++ /* See if any of the pins is in ExtINT mode */
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ struct IO_APIC_route_entry entry;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++
++ /* If the interrupt line is enabled and in ExtInt mode
++ * I have found the pin where the i8259 is connected.
++ */
++ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
++ ioapic_i8259.apic = apic;
++ ioapic_i8259.pin = pin;
++ goto found_i8259;
++ }
++ }
++ }
++ found_i8259:
++ /* Look to see what if the MP table has reported the ExtINT */
++ /* If we could not find the appropriate pin by looking at the ioapic
++ * the i8259 probably is not connected the ioapic but give the
++ * mptable a chance anyway.
++ */
++ i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
++ i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
++ /* Trust the MP table if nothing is setup in the hardware */
++ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
++ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
++ ioapic_i8259.pin = i8259_pin;
++ ioapic_i8259.apic = i8259_apic;
++ }
++ /* Complain if the MP table and the hardware disagree */
++ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
++ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
++ {
++ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
++ }
++
++ /*
++ * Do not trust the IO-APIC being empty at bootup
++ */
++ clear_IO_APIC();
++}
++
++/*
++ * Not an __init, needed by the reboot code
++ */
++void disable_IO_APIC(void)
++{
++ /*
++ * Clear the IO-APIC before rebooting:
++ */
++ clear_IO_APIC();
++
++#ifndef CONFIG_XEN
++ /*
++ * If the i8259 is routed through an IOAPIC
++ * Put that IOAPIC in virtual wire mode
++ * so legacy interrupts can be delivered.
++ */
++ if (ioapic_i8259.pin != -1) {
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ memset(&entry, 0, sizeof(entry));
++ entry.mask = 0; /* Enabled */
++ entry.trigger = 0; /* Edge */
++ entry.irr = 0;
++ entry.polarity = 0; /* High */
++ entry.delivery_status = 0;
++ entry.dest_mode = 0; /* Physical */
++ entry.delivery_mode = dest_ExtINT; /* ExtInt */
++ entry.vector = 0;
++ entry.dest.physical.physical_dest =
++ GET_APIC_ID(apic_read(APIC_ID));
++
++ /*
++ * Add it to the IO-APIC irq-routing table:
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
++ *(((int *)&entry)+1));
++ io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
++ *(((int *)&entry)+0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ }
++ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
++#endif
++}
++
++/*
++ * function to set the IO-APIC physical IDs based on the
++ * values stored in the MPC table.
++ *
++ * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
++ */
++
++#if !defined(CONFIG_XEN) && !defined(CONFIG_X86_NUMAQ)
++static void __init setup_ioapic_ids_from_mpc(void)
++{
++ union IO_APIC_reg_00 reg_00;
++ physid_mask_t phys_id_present_map;
++ int apic;
++ int i;
++ unsigned char old_id;
++ unsigned long flags;
++
++ /*
++ * Don't check I/O APIC IDs for xAPIC systems. They have
++ * no meaning without the serial APIC bus.
++ */
++ if (!(boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ || APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
++ return;
++ /*
++ * This is broken; anything with a real cpu count has to
++ * circumvent this idiocy regardless.
++ */
++ phys_id_present_map = ioapic_phys_id_map(phys_cpu_present_map);
++
++ /*
++ * Set the IOAPIC ID to the value stored in the MPC table.
++ */
++ for (apic = 0; apic < nr_ioapics; apic++) {
++
++ /* Read the register 0 value */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ old_id = mp_ioapics[apic].mpc_apicid;
++
++ if (mp_ioapics[apic].mpc_apicid >= get_physical_broadcast()) {
++ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID is %d in the MPC table!...\n",
++ apic, mp_ioapics[apic].mpc_apicid);
++ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
++ reg_00.bits.ID);
++ mp_ioapics[apic].mpc_apicid = reg_00.bits.ID;
++ }
++
++ /*
++ * Sanity check, is the ID really free? Every APIC in a
++ * system must have a unique ID or we get lots of nice
++ * 'stuck on smp_invalidate_needed IPI wait' messages.
++ */
++ if (check_apicid_used(phys_id_present_map,
++ mp_ioapics[apic].mpc_apicid)) {
++ printk(KERN_ERR "BIOS bug, IO-APIC#%d ID %d is already used!...\n",
++ apic, mp_ioapics[apic].mpc_apicid);
++ for (i = 0; i < get_physical_broadcast(); i++)
++ if (!physid_isset(i, phys_id_present_map))
++ break;
++ if (i >= get_physical_broadcast())
++ panic("Max APIC ID exceeded!\n");
++ printk(KERN_ERR "... fixing up to %d. (tell your hw vendor)\n",
++ i);
++ physid_set(i, phys_id_present_map);
++ mp_ioapics[apic].mpc_apicid = i;
++ } else {
++ physid_mask_t tmp;
++ tmp = apicid_to_cpu_present(mp_ioapics[apic].mpc_apicid);
++ apic_printk(APIC_VERBOSE, "Setting %d in the "
++ "phys_id_present_map\n",
++ mp_ioapics[apic].mpc_apicid);
++ physids_or(phys_id_present_map, phys_id_present_map, tmp);
++ }
++
++
++ /*
++ * We need to adjust the IRQ routing table
++ * if the ID changed.
++ */
++ if (old_id != mp_ioapics[apic].mpc_apicid)
++ for (i = 0; i < mp_irq_entries; i++)
++ if (mp_irqs[i].mpc_dstapic == old_id)
++ mp_irqs[i].mpc_dstapic
++ = mp_ioapics[apic].mpc_apicid;
++
++ /*
++ * Read the right value from the MPC table and
++ * write it into the ID register.
++ */
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "...changing IO-APIC physical APIC ID to %d ...",
++ mp_ioapics[apic].mpc_apicid);
++
++ reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0, reg_00.raw);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ /*
++ * Sanity check
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
++ printk("could not set ID!\n");
++ else
++ apic_printk(APIC_VERBOSE, " ok.\n");
++ }
++}
++#else
++static void __init setup_ioapic_ids_from_mpc(void) { }
++#endif
++
++#ifndef CONFIG_XEN
++/*
++ * There is a nasty bug in some older SMP boards, their mptable lies
++ * about the timer IRQ. We do the following to work around the situation:
++ *
++ * - timer IRQ defaults to IO-APIC IRQ
++ * - if this function detects that timer IRQs are defunct, then we fall
++ * back to ISA timer IRQs
++ */
++static int __init timer_irq_works(void)
++{
++ unsigned long t1 = jiffies;
++
++ local_irq_enable();
++ /* Let ten ticks pass... */
++ mdelay((10 * 1000) / HZ);
++
++ /*
++ * Expect a few ticks at least, to be sure some possible
++ * glue logic does not lock up after one or two first
++ * ticks in a non-ExtINT mode. Also the local APIC
++ * might have cached one ExtINT interrupt. Finally, at
++ * least one tick may be lost due to delays.
++ */
++ if (jiffies - t1 > 4)
++ return 1;
++
++ return 0;
++}
++
++/*
++ * In the SMP+IOAPIC case it might happen that there are an unspecified
++ * number of pending IRQ events unhandled. These cases are very rare,
++ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
++ * better to do it this way as thus we do not have to be aware of
++ * 'pending' interrupts in the IRQ path, except at this point.
++ */
++/*
++ * Edge triggered needs to resend any interrupt
++ * that was delayed but this is now handled in the device
++ * independent code.
++ */
++
++/*
++ * Starting up a edge-triggered IO-APIC interrupt is
++ * nasty - we need to make sure that we get the edge.
++ * If it is already asserted for some reason, we need
++ * return 1 to indicate that is was pending.
++ *
++ * This is not complete - we should be able to fake
++ * an edge even if it isn't on the 8259A...
++ */
++static unsigned int startup_edge_ioapic_irq(unsigned int irq)
++{
++ int was_pending = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ if (irq < 16) {
++ disable_8259A_irq(irq);
++ if (i8259A_irq_pending(irq))
++ was_pending = 1;
++ }
++ __unmask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return was_pending;
++}
++
++/*
++ * Once we have recorded IRQ_PENDING already, we can mask the
++ * interrupt for real. This prevents IRQ storms from unhandled
++ * devices.
++ */
++static void ack_edge_ioapic_irq(unsigned int irq)
++{
++ move_irq(irq);
++ if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
++ == (IRQ_PENDING | IRQ_DISABLED))
++ mask_IO_APIC_irq(irq);
++ ack_APIC_irq();
++}
++
++/*
++ * Level triggered interrupts can just be masked,
++ * and shutting down and starting up the interrupt
++ * is the same as enabling and disabling them -- except
++ * with a startup need to return a "was pending" value.
++ *
++ * Level triggered interrupts are special because we
++ * do not touch any IO-APIC register while handling
++ * them. We ack the APIC in the end-IRQ handler, not
++ * in the start-IRQ-handler. Protection against reentrance
++ * from the same interrupt is still provided, both by the
++ * generic IRQ layer and by the fact that an unacked local
++ * APIC does not accept IRQs.
++ */
++static unsigned int startup_level_ioapic_irq (unsigned int irq)
++{
++ unmask_IO_APIC_irq(irq);
++
++ return 0; /* don't check for pending */
++}
++
++static void end_level_ioapic_irq (unsigned int irq)
++{
++ unsigned long v;
++ int i;
++
++ move_irq(irq);
++/*
++ * It appears there is an erratum which affects at least version 0x11
++ * of I/O APIC (that's the 82093AA and cores integrated into various
++ * chipsets). Under certain conditions a level-triggered interrupt is
++ * erroneously delivered as edge-triggered one but the respective IRR
++ * bit gets set nevertheless. As a result the I/O unit expects an EOI
++ * message but it will never arrive and further interrupts are blocked
++ * from the source. The exact reason is so far unknown, but the
++ * phenomenon was observed when two consecutive interrupt requests
++ * from a given source get delivered to the same CPU and the source is
++ * temporarily disabled in between.
++ *
++ * A workaround is to simulate an EOI message manually. We achieve it
++ * by setting the trigger mode to edge and then to level when the edge
++ * trigger mode gets detected in the TMR of a local APIC for a
++ * level-triggered interrupt. We mask the source for the time of the
++ * operation to prevent an edge-triggered interrupt escaping meanwhile.
++ * The idea is from Manfred Spraul. --macro
++ */
++ i = IO_APIC_VECTOR(irq);
++
++ v = apic_read(APIC_TMR + ((i & ~0x1f) >> 1));
++
++ ack_APIC_irq();
++
++ if (!(v & (1 << (i & 0x1f)))) {
++ atomic_inc(&irq_mis_count);
++ spin_lock(&ioapic_lock);
++ __mask_and_edge_IO_APIC_irq(irq);
++ __unmask_and_level_IO_APIC_irq(irq);
++ spin_unlock(&ioapic_lock);
++ }
++}
++
++#ifdef CONFIG_PCI_MSI
++static unsigned int startup_edge_ioapic_vector(unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ return startup_edge_ioapic_irq(irq);
++}
++
++static void ack_edge_ioapic_vector(unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ move_native_irq(vector);
++ ack_edge_ioapic_irq(irq);
++}
++
++static unsigned int startup_level_ioapic_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ return startup_level_ioapic_irq (irq);
++}
++
++static void end_level_ioapic_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ move_native_irq(vector);
++ end_level_ioapic_irq(irq);
++}
++
++static void mask_IO_APIC_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ mask_IO_APIC_irq(irq);
++}
++
++static void unmask_IO_APIC_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ unmask_IO_APIC_irq(irq);
++}
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_vector (unsigned int vector,
++ cpumask_t cpu_mask)
++{
++ int irq = vector_to_irq(vector);
++
++ set_native_irq_info(vector, cpu_mask);
++ set_ioapic_affinity_irq(irq, cpu_mask);
++}
++#endif
++#endif
++
++static int ioapic_retrigger(unsigned int irq)
++{
++ send_IPI_self(IO_APIC_VECTOR(irq));
++
++ return 1;
++}
++
++/*
++ * Level and edge triggered IO-APIC interrupts need different handling,
++ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
++ * handled with the level-triggered descriptor, but that one has slightly
++ * more overhead. Level-triggered interrupts cannot be handled with the
++ * edge-triggered handler, without risking IRQ storms and other ugly
++ * races.
++ */
++static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
++ .typename = "IO-APIC-edge",
++ .startup = startup_edge_ioapic,
++ .shutdown = shutdown_edge_ioapic,
++ .enable = enable_edge_ioapic,
++ .disable = disable_edge_ioapic,
++ .ack = ack_edge_ioapic,
++ .end = end_edge_ioapic,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ioapic_affinity,
++#endif
++ .retrigger = ioapic_retrigger,
++};
++
++static struct hw_interrupt_type ioapic_level_type __read_mostly = {
++ .typename = "IO-APIC-level",
++ .startup = startup_level_ioapic,
++ .shutdown = shutdown_level_ioapic,
++ .enable = enable_level_ioapic,
++ .disable = disable_level_ioapic,
++ .ack = mask_and_ack_level_ioapic,
++ .end = end_level_ioapic,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ioapic_affinity,
++#endif
++ .retrigger = ioapic_retrigger,
++};
++#endif /* !CONFIG_XEN */
++
++static inline void init_IO_APIC_traps(void)
++{
++ int irq;
++
++ /*
++ * NOTE! The local APIC isn't very good at handling
++ * multiple interrupts at the same interrupt level.
++ * As the interrupt level is determined by taking the
++ * vector number and shifting that right by 4, we
++ * want to spread these out a bit so that they don't
++ * all fall in the same interrupt level.
++ *
++ * Also, we've got to be careful not to trash gate
++ * 0x80, because int 0x80 is hm, kind of importantish. ;)
++ */
++ for (irq = 0; irq < NR_IRQS ; irq++) {
++ int tmp = irq;
++ if (use_pci_vector()) {
++ if (!platform_legacy_irq(tmp))
++ if ((tmp = vector_to_irq(tmp)) == -1)
++ continue;
++ }
++ if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
++ /*
++ * Hmm.. We don't have an entry for this,
++ * so default to an old-fashioned 8259
++ * interrupt if we can..
++ */
++ if (irq < 16)
++ make_8259A_irq(irq);
++#ifndef CONFIG_XEN
++ else
++ /* Strange. Oh, well.. */
++ irq_desc[irq].chip = &no_irq_type;
++#endif
++ }
++ }
++}
++
++#ifndef CONFIG_XEN
++static void enable_lapic_irq (unsigned int irq)
++{
++ unsigned long v;
++
++ v = apic_read(APIC_LVT0);
++ apic_write_around(APIC_LVT0, v & ~APIC_LVT_MASKED);
++}
++
++static void disable_lapic_irq (unsigned int irq)
++{
++ unsigned long v;
++
++ v = apic_read(APIC_LVT0);
++ apic_write_around(APIC_LVT0, v | APIC_LVT_MASKED);
++}
++
++static void ack_lapic_irq (unsigned int irq)
++{
++ ack_APIC_irq();
++}
++
++static void end_lapic_irq (unsigned int i) { /* nothing */ }
++
++static struct hw_interrupt_type lapic_irq_type __read_mostly = {
++ .typename = "local-APIC-edge",
++ .startup = NULL, /* startup_irq() not used for IRQ0 */
++ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
++ .enable = enable_lapic_irq,
++ .disable = disable_lapic_irq,
++ .ack = ack_lapic_irq,
++ .end = end_lapic_irq
++};
++
++static void setup_nmi (void)
++{
++ /*
++ * Dirty trick to enable the NMI watchdog ...
++ * We put the 8259A master into AEOI mode and
++ * unmask on all local APICs LVT0 as NMI.
++ *
++ * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
++ * is from Maciej W. Rozycki - so we do not have to EOI from
++ * the NMI handler or the timer interrupt.
++ */
++ apic_printk(APIC_VERBOSE, KERN_INFO "activating NMI Watchdog ...");
++
++ on_each_cpu(enable_NMI_through_LVT0, NULL, 1, 1);
++
++ apic_printk(APIC_VERBOSE, " done.\n");
++}
++
++/*
++ * This looks a bit hackish but it's about the only one way of sending
++ * a few INTA cycles to 8259As and any associated glue logic. ICR does
++ * not support the ExtINT mode, unfortunately. We need to send these
++ * cycles as some i82489DX-based boards have glue logic that keeps the
++ * 8259A interrupt line asserted until INTA. --macro
++ */
++static inline void unlock_ExtINT_logic(void)
++{
++ int apic, pin, i;
++ struct IO_APIC_route_entry entry0, entry1;
++ unsigned char save_control, save_freq_select;
++ unsigned long flags;
++
++ pin = find_isa_irq_pin(8, mp_INT);
++ apic = find_isa_irq_apic(8, mp_INT);
++ if (pin == -1)
++ return;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ clear_IO_APIC_pin(apic, pin);
++
++ memset(&entry1, 0, sizeof(entry1));
++
++ entry1.dest_mode = 0; /* physical delivery */
++ entry1.mask = 0; /* unmask IRQ now */
++ entry1.dest.physical.physical_dest = hard_smp_processor_id();
++ entry1.delivery_mode = dest_ExtINT;
++ entry1.polarity = entry0.polarity;
++ entry1.trigger = 0;
++ entry1.vector = 0;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ save_control = CMOS_READ(RTC_CONTROL);
++ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
++ CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
++ RTC_FREQ_SELECT);
++ CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
++
++ i = 100;
++ while (i-- > 0) {
++ mdelay(10);
++ if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
++ i -= 10;
++ }
++
++ CMOS_WRITE(save_control, RTC_CONTROL);
++ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
++ clear_IO_APIC_pin(apic, pin);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++int timer_uses_ioapic_pin_0;
++
++/*
++ * This code may look a bit paranoid, but it's supposed to cooperate with
++ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
++ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
++ * fanatically on his truly buggy board.
++ */
++static inline void check_timer(void)
++{
++ int apic1, pin1, apic2, pin2;
++ int vector;
++
++ /*
++ * get/set the timer IRQ vector:
++ */
++ disable_8259A_irq(0);
++ vector = assign_irq_vector(0);
++ set_intr_gate(vector, interrupt[0]);
++
++ /*
++ * Subtle, code in do_timer_interrupt() expects an AEOI
++ * mode for the 8259A whenever interrupts are routed
++ * through I/O APICs. Also IRQ0 has to be enabled in
++ * the 8259A which implies the virtual wire has to be
++ * disabled in the local APIC.
++ */
++ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++ init_8259A(1);
++ timer_ack = 1;
++ if (timer_over_8254 > 0)
++ enable_8259A_irq(0);
++
++ pin1 = find_isa_irq_pin(0, mp_INT);
++ apic1 = find_isa_irq_apic(0, mp_INT);
++ pin2 = ioapic_i8259.pin;
++ apic2 = ioapic_i8259.apic;
++
++ if (pin1 == 0)
++ timer_uses_ioapic_pin_0 = 1;
++
++ printk(KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
++ vector, apic1, pin1, apic2, pin2);
++
++ if (pin1 != -1) {
++ /*
++ * Ok, does IRQ0 through the IOAPIC work?
++ */
++ unmask_IO_APIC_irq(0);
++ if (timer_irq_works()) {
++ if (nmi_watchdog == NMI_IO_APIC) {
++ disable_8259A_irq(0);
++ setup_nmi();
++ enable_8259A_irq(0);
++ }
++ if (disable_timer_pin_1 > 0)
++ clear_IO_APIC_pin(0, pin1);
++ return;
++ }
++ clear_IO_APIC_pin(apic1, pin1);
++ printk(KERN_ERR "..MP-BIOS bug: 8254 timer not connected to "
++ "IO-APIC\n");
++ }
++
++ printk(KERN_INFO "...trying to set up timer (IRQ0) through the 8259A ... ");
++ if (pin2 != -1) {
++ printk("\n..... (found pin %d) ...", pin2);
++ /*
++ * legacy devices should be connected to IO APIC #0
++ */
++ setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
++ if (timer_irq_works()) {
++ printk("works.\n");
++ if (pin1 != -1)
++ replace_pin_at_irq(0, apic1, pin1, apic2, pin2);
++ else
++ add_pin_to_irq(0, apic2, pin2);
++ if (nmi_watchdog == NMI_IO_APIC) {
++ setup_nmi();
++ }
++ return;
++ }
++ /*
++ * Cleanup, just in case ...
++ */
++ clear_IO_APIC_pin(apic2, pin2);
++ }
++ printk(" failed.\n");
++
++ if (nmi_watchdog == NMI_IO_APIC) {
++ printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
++ nmi_watchdog = 0;
++ }
++
++ printk(KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
++
++ disable_8259A_irq(0);
++ irq_desc[0].chip = &lapic_irq_type;
++ apic_write_around(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
++ enable_8259A_irq(0);
++
++ if (timer_irq_works()) {
++ printk(" works.\n");
++ return;
++ }
++ apic_write_around(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
++ printk(" failed.\n");
++
++ printk(KERN_INFO "...trying to set up timer as ExtINT IRQ...");
++
++ timer_ack = 0;
++ init_8259A(0);
++ make_8259A_irq(0);
++ apic_write_around(APIC_LVT0, APIC_DM_EXTINT);
++
++ unlock_ExtINT_logic();
++
++ if (timer_irq_works()) {
++ printk(" works.\n");
++ return;
++ }
++ printk(" failed :(.\n");
++ panic("IO-APIC + timer doesn't work! Boot with apic=debug and send a "
++ "report. Then try booting with the 'noapic' option");
++}
++#else
++int timer_uses_ioapic_pin_0 = 0;
++#define check_timer() ((void)0)
++#endif
++
++/*
++ *
++ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
++ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
++ * Linux doesn't really care, as it's not actually used
++ * for any interrupt handling anyway.
++ */
++#define PIC_IRQS (1 << PIC_CASCADE_IR)
++
++void __init setup_IO_APIC(void)
++{
++ enable_IO_APIC();
++
++ if (acpi_ioapic)
++ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
++ else
++ io_apic_irqs = ~PIC_IRQS;
++
++ printk("ENABLING IO-APIC IRQs\n");
++
++ /*
++ * Set up IO-APIC IRQ routing.
++ */
++ if (!acpi_ioapic)
++ setup_ioapic_ids_from_mpc();
++#ifndef CONFIG_XEN
++ sync_Arb_IDs();
++#endif
++ setup_IO_APIC_irqs();
++ init_IO_APIC_traps();
++ check_timer();
++ if (!acpi_ioapic)
++ print_IO_APIC();
++}
++
++static int __init setup_disable_8254_timer(char *s)
++{
++ timer_over_8254 = -1;
++ return 1;
++}
++static int __init setup_enable_8254_timer(char *s)
++{
++ timer_over_8254 = 2;
++ return 1;
++}
++
++__setup("disable_8254_timer", setup_disable_8254_timer);
++__setup("enable_8254_timer", setup_enable_8254_timer);
++
++/*
++ * Called after all the initialization is done. If we didnt find any
++ * APIC bugs then we can allow the modify fast path
++ */
++
++static int __init io_apic_bug_finalize(void)
++{
++ if(sis_apic_bug == -1)
++ sis_apic_bug = 0;
++ if (is_initial_xendomain()) {
++ struct xen_platform_op op = { .cmd = XENPF_platform_quirk };
++ op.u.platform_quirk.quirk_id = sis_apic_bug ?
++ QUIRK_IOAPIC_BAD_REGSEL : QUIRK_IOAPIC_GOOD_REGSEL;
++ VOID(HYPERVISOR_platform_op(&op));
++ }
++ return 0;
++}
++
++late_initcall(io_apic_bug_finalize);
++
++struct sysfs_ioapic_data {
++ struct sys_device dev;
++ struct IO_APIC_route_entry entry[0];
++};
++static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
++
++static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
++{
++ struct IO_APIC_route_entry *entry;
++ struct sysfs_ioapic_data *data;
++ unsigned long flags;
++ int i;
++
++ data = container_of(dev, struct sysfs_ioapic_data, dev);
++ entry = data->entry;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++ *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
++ *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
++ }
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++static int ioapic_resume(struct sys_device *dev)
++{
++ struct IO_APIC_route_entry *entry;
++ struct sysfs_ioapic_data *data;
++ unsigned long flags;
++ union IO_APIC_reg_00 reg_00;
++ int i;
++
++ data = container_of(dev, struct sysfs_ioapic_data, dev);
++ entry = data->entry;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(dev->id, 0);
++ if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
++ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
++ io_apic_write(dev->id, 0, reg_00.raw);
++ }
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++ io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
++ io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
++ }
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++static struct sysdev_class ioapic_sysdev_class = {
++ set_kset_name("ioapic"),
++#ifndef CONFIG_XEN
++ .suspend = ioapic_suspend,
++ .resume = ioapic_resume,
++#endif
++};
++
++static int __init ioapic_init_sysfs(void)
++{
++ struct sys_device * dev;
++ int i, size, error = 0;
++
++ error = sysdev_class_register(&ioapic_sysdev_class);
++ if (error)
++ return error;
++
++ for (i = 0; i < nr_ioapics; i++ ) {
++ size = sizeof(struct sys_device) + nr_ioapic_registers[i]
++ * sizeof(struct IO_APIC_route_entry);
++ mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
++ if (!mp_ioapic_data[i]) {
++ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++ continue;
++ }
++ memset(mp_ioapic_data[i], 0, size);
++ dev = &mp_ioapic_data[i]->dev;
++ dev->id = i;
++ dev->cls = &ioapic_sysdev_class;
++ error = sysdev_register(dev);
++ if (error) {
++ kfree(mp_ioapic_data[i]);
++ mp_ioapic_data[i] = NULL;
++ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++ continue;
++ }
++ }
++
++ return 0;
++}
++
++device_initcall(ioapic_init_sysfs);
++
++/* --------------------------------------------------------------------------
++ ACPI-based IOAPIC Configuration
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++int __init io_apic_get_unique_id (int ioapic, int apic_id)
++{
++#ifndef CONFIG_XEN
++ union IO_APIC_reg_00 reg_00;
++ static physid_mask_t apic_id_map = PHYSID_MASK_NONE;
++ physid_mask_t tmp;
++ unsigned long flags;
++ int i = 0;
++
++ /*
++ * The P4 platform supports up to 256 APIC IDs on two separate APIC
++ * buses (one for LAPICs, one for IOAPICs), where predecessors only
++ * supports up to 16 on one shared APIC bus.
++ *
++ * TBD: Expand LAPIC/IOAPIC support on P4-class systems to take full
++ * advantage of new APIC bus architecture.
++ */
++
++ if (physids_empty(apic_id_map))
++ apic_id_map = ioapic_phys_id_map(phys_cpu_present_map);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(ioapic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ if (apic_id >= get_physical_broadcast()) {
++ printk(KERN_WARNING "IOAPIC[%d]: Invalid apic_id %d, trying "
++ "%d\n", ioapic, apic_id, reg_00.bits.ID);
++ apic_id = reg_00.bits.ID;
++ }
++
++ /*
++ * Every APIC in a system must have a unique ID or we get lots of nice
++ * 'stuck on smp_invalidate_needed IPI wait' messages.
++ */
++ if (check_apicid_used(apic_id_map, apic_id)) {
++
++ for (i = 0; i < get_physical_broadcast(); i++) {
++ if (!check_apicid_used(apic_id_map, i))
++ break;
++ }
++
++ if (i == get_physical_broadcast())
++ panic("Max apic_id exceeded!\n");
++
++ printk(KERN_WARNING "IOAPIC[%d]: apic_id %d already used, "
++ "trying %d\n", ioapic, apic_id, i);
++
++ apic_id = i;
++ }
++
++ tmp = apicid_to_cpu_present(apic_id);
++ physids_or(apic_id_map, apic_id_map, tmp);
++
++ if (reg_00.bits.ID != apic_id) {
++ reg_00.bits.ID = apic_id;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic, 0, reg_00.raw);
++ reg_00.raw = io_apic_read(ioapic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ /* Sanity check */
++ if (reg_00.bits.ID != apic_id) {
++ printk("IOAPIC[%d]: Unable to change apic_id!\n", ioapic);
++ return -1;
++ }
++ }
++
++ apic_printk(APIC_VERBOSE, KERN_INFO
++ "IOAPIC[%d]: Assigned apic_id %d\n", ioapic, apic_id);
++#endif /* !CONFIG_XEN */
++
++ return apic_id;
++}
++
++
++int __init io_apic_get_version (int ioapic)
++{
++ union IO_APIC_reg_01 reg_01;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(ioapic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return reg_01.bits.version;
++}
++
++
++int __init io_apic_get_redir_entries (int ioapic)
++{
++ union IO_APIC_reg_01 reg_01;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(ioapic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return reg_01.bits.entries;
++}
++
++
++int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ if (!IO_APIC_IRQ(irq)) {
++ printk(KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
++ ioapic);
++ return -EINVAL;
++ }
++
++ /*
++ * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
++ * Note that we mask (disable) IRQs now -- these get enabled when the
++ * corresponding device driver registers for this IRQ.
++ */
++
++ memset(&entry,0,sizeof(entry));
++
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.trigger = edge_level;
++ entry.polarity = active_high_low;
++ entry.mask = 1;
++
++ /*
++ * IRQs < 16 are already in the irq_2_pin[] map
++ */
++ if (irq >= 16)
++ add_pin_to_irq(irq, ioapic, pin);
++
++ entry.vector = assign_irq_vector(irq);
++
++ apic_printk(APIC_DEBUG, KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry "
++ "(%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i)\n", ioapic,
++ mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
++ edge_level, active_high_low);
++
++ ioapic_register_intr(irq, entry.vector, edge_level);
++
++ if (!ioapic && (irq < 16))
++ disable_8259A_irq(irq);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
++ set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++#endif /* CONFIG_ACPI */
+Index: head-2008-11-25/arch/x86/kernel/ioport_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/ioport_32-xen.c 2008-01-28 12:24:19.000000000 +0100
+@@ -0,0 +1,123 @@
++/*
++ * linux/arch/i386/kernel/ioport.c
++ *
++ * This contains the io-permission bitmap code - written by obz, with changes
++ * by Linus.
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/ioport.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/thread_info.h>
++#include <xen/interface/physdev.h>
++
++/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
++static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
++{
++ unsigned long mask;
++ unsigned long *bitmap_base = bitmap + (base / BITS_PER_LONG);
++ unsigned int low_index = base & (BITS_PER_LONG-1);
++ int length = low_index + extent;
++
++ if (low_index != 0) {
++ mask = (~0UL << low_index);
++ if (length < BITS_PER_LONG)
++ mask &= ~(~0UL << length);
++ if (new_value)
++ *bitmap_base++ |= mask;
++ else
++ *bitmap_base++ &= ~mask;
++ length -= BITS_PER_LONG;
++ }
++
++ mask = (new_value ? ~0UL : 0UL);
++ while (length >= BITS_PER_LONG) {
++ *bitmap_base++ = mask;
++ length -= BITS_PER_LONG;
++ }
++
++ if (length > 0) {
++ mask = ~(~0UL << length);
++ if (new_value)
++ *bitmap_base++ |= mask;
++ else
++ *bitmap_base++ &= ~mask;
++ }
++}
++
++
++/*
++ * this changes the io permissions bitmap in the current task.
++ */
++asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
++{
++ struct thread_struct * t = ¤t->thread;
++ unsigned long *bitmap;
++ struct physdev_set_iobitmap set_iobitmap;
++
++ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
++ return -EINVAL;
++ if (turn_on && !capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
++ /*
++ * If it's the first ioperm() call in this thread's lifetime, set the
++ * IO bitmap up. ioperm() is much less timing critical than clone(),
++ * this is why we delay this operation until now:
++ */
++ if (!t->io_bitmap_ptr) {
++ bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++ if (!bitmap)
++ return -ENOMEM;
++
++ memset(bitmap, 0xff, IO_BITMAP_BYTES);
++ t->io_bitmap_ptr = bitmap;
++ set_thread_flag(TIF_IO_BITMAP);
++
++ set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap);
++ set_iobitmap.nr_ports = IO_BITMAP_BITS;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
++ &set_iobitmap));
++ }
++
++ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
++
++ return 0;
++}
++
++/*
++ * sys_iopl has to be used when you want to access the IO ports
++ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
++ * you'd need 8kB of bitmaps/process, which is a bit excessive.
++ *
++ * Here we just change the eflags value on the stack: we allow
++ * only the super-user to do it. This depends on the stack-layout
++ * on system-call entry - see also fork() and the signal handling
++ * code.
++ */
++
++asmlinkage long sys_iopl(unsigned long unused)
++{
++ volatile struct pt_regs * regs = (struct pt_regs *) &unused;
++ unsigned int level = regs->ebx;
++ struct thread_struct *t = ¤t->thread;
++ unsigned int old = (t->iopl >> 12) & 3;
++
++ if (level > 3)
++ return -EINVAL;
++ /* Trying to gain more privileges? */
++ if (level > old) {
++ if (!capable(CAP_SYS_RAWIO))
++ return -EPERM;
++ }
++ t->iopl = level << 12;
++ set_iopl_mask(t->iopl);
++ return 0;
++}
+Index: head-2008-11-25/arch/x86/kernel/irq_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/irq_32-xen.c 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,324 @@
++/*
++ * linux/arch/i386/kernel/irq.c
++ *
++ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
++ *
++ * This file contains the lowest level x86-specific interrupt
++ * entry, irq-stacks and irq statistics code. All the remaining
++ * irq logic is done by the generic kernel/irq/ code and
++ * by the x86-specific irq controller code. (e.g. i8259.c and
++ * io_apic.c.)
++ */
++
++#include <asm/uaccess.h>
++#include <linux/module.h>
++#include <linux/seq_file.h>
++#include <linux/interrupt.h>
++#include <linux/kernel_stat.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <linux/delay.h>
++
++DEFINE_PER_CPU(irq_cpustat_t, irq_stat) ____cacheline_internodealigned_in_smp;
++EXPORT_PER_CPU_SYMBOL(irq_stat);
++
++#ifndef CONFIG_X86_LOCAL_APIC
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++ printk("unexpected IRQ trap at vector %02x\n", irq);
++}
++#endif
++
++#ifdef CONFIG_4KSTACKS
++/*
++ * per-CPU IRQ handling contexts (thread information and stack)
++ */
++union irq_ctx {
++ struct thread_info tinfo;
++ u32 stack[THREAD_SIZE/sizeof(u32)];
++};
++
++static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
++static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
++#endif
++
++/*
++ * do_IRQ handles all normal device IRQ's (the special
++ * SMP cross-CPU interrupts have their own specific
++ * handlers).
++ */
++fastcall unsigned int do_IRQ(struct pt_regs *regs)
++{
++ /* high bit used in ret_from_ code */
++ int irq = ~regs->orig_eax;
++#ifdef CONFIG_4KSTACKS
++ union irq_ctx *curctx, *irqctx;
++ u32 *isp;
++#endif
++
++ if (unlikely((unsigned)irq >= NR_IRQS)) {
++ printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
++ __FUNCTION__, irq);
++ BUG();
++ }
++
++ /*irq_enter();*/
++#ifdef CONFIG_DEBUG_STACKOVERFLOW
++ /* Debugging check for stack overflow: is there less than 1KB free? */
++ {
++ long esp;
++
++ __asm__ __volatile__("andl %%esp,%0" :
++ "=r" (esp) : "0" (THREAD_SIZE - 1));
++ if (unlikely(esp < (sizeof(struct thread_info) + STACK_WARN))) {
++ printk("do_IRQ: stack overflow: %ld\n",
++ esp - sizeof(struct thread_info));
++ dump_stack();
++ }
++ }
++#endif
++
++#ifdef CONFIG_4KSTACKS
++
++ curctx = (union irq_ctx *) current_thread_info();
++ irqctx = hardirq_ctx[smp_processor_id()];
++
++ /*
++ * this is where we switch to the IRQ stack. However, if we are
++ * already using the IRQ stack (because we interrupted a hardirq
++ * handler) we can't do that and just have to keep using the
++ * current stack (which is the irq stack already after all)
++ */
++ if (curctx != irqctx) {
++ int arg1, arg2, ebx;
++
++ /* build the stack frame on the IRQ stack */
++ isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
++ irqctx->tinfo.task = curctx->tinfo.task;
++ irqctx->tinfo.previous_esp = current_stack_pointer;
++
++ /*
++ * Copy the softirq bits in preempt_count so that the
++ * softirq checks work in the hardirq context.
++ */
++ irqctx->tinfo.preempt_count =
++ (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
++ (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
++
++ asm volatile(
++ " xchgl %%ebx,%%esp \n"
++ " call __do_IRQ \n"
++ " movl %%ebx,%%esp \n"
++ : "=a" (arg1), "=d" (arg2), "=b" (ebx)
++ : "0" (irq), "1" (regs), "2" (isp)
++ : "memory", "cc", "ecx"
++ );
++ } else
++#endif
++ __do_IRQ(irq, regs);
++
++ /*irq_exit();*/
++
++ return 1;
++}
++
++#ifdef CONFIG_4KSTACKS
++
++/*
++ * These should really be __section__(".bss.page_aligned") as well, but
++ * gcc's 3.0 and earlier don't handle that correctly.
++ */
++static char softirq_stack[NR_CPUS * THREAD_SIZE]
++ __attribute__((__aligned__(THREAD_SIZE)));
++
++static char hardirq_stack[NR_CPUS * THREAD_SIZE]
++ __attribute__((__aligned__(THREAD_SIZE)));
++
++/*
++ * allocate per-cpu stacks for hardirq and for softirq processing
++ */
++void irq_ctx_init(int cpu)
++{
++ union irq_ctx *irqctx;
++
++ if (hardirq_ctx[cpu])
++ return;
++
++ irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
++ irqctx->tinfo.task = NULL;
++ irqctx->tinfo.exec_domain = NULL;
++ irqctx->tinfo.cpu = cpu;
++ irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
++ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
++
++ hardirq_ctx[cpu] = irqctx;
++
++ irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
++ irqctx->tinfo.task = NULL;
++ irqctx->tinfo.exec_domain = NULL;
++ irqctx->tinfo.cpu = cpu;
++ irqctx->tinfo.preempt_count = 0;
++ irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
++
++ softirq_ctx[cpu] = irqctx;
++
++ printk("CPU %u irqstacks, hard=%p soft=%p\n",
++ cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
++}
++
++void irq_ctx_exit(int cpu)
++{
++ hardirq_ctx[cpu] = NULL;
++}
++
++extern asmlinkage void __do_softirq(void);
++
++asmlinkage void do_softirq(void)
++{
++ unsigned long flags;
++ struct thread_info *curctx;
++ union irq_ctx *irqctx;
++ u32 *isp;
++
++ if (in_interrupt())
++ return;
++
++ local_irq_save(flags);
++
++ if (local_softirq_pending()) {
++ curctx = current_thread_info();
++ irqctx = softirq_ctx[smp_processor_id()];
++ irqctx->tinfo.task = curctx->task;
++ irqctx->tinfo.previous_esp = current_stack_pointer;
++
++ /* build the stack frame on the softirq stack */
++ isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
++
++ asm volatile(
++ " xchgl %%ebx,%%esp \n"
++ " call __do_softirq \n"
++ " movl %%ebx,%%esp \n"
++ : "=b"(isp)
++ : "0"(isp)
++ : "memory", "cc", "edx", "ecx", "eax"
++ );
++ /*
++ * Shouldnt happen, we returned above if in_interrupt():
++ */
++ WARN_ON_ONCE(softirq_count());
++ }
++
++ local_irq_restore(flags);
++}
++
++EXPORT_SYMBOL(do_softirq);
++#endif
++
++/*
++ * Interrupt statistics:
++ */
++
++atomic_t irq_err_count;
++
++/*
++ * /proc/interrupts printing:
++ */
++
++int show_interrupts(struct seq_file *p, void *v)
++{
++ int i = *(loff_t *) v, j;
++ struct irqaction * action;
++ unsigned long flags;
++
++ if (i == 0) {
++ seq_printf(p, " ");
++ for_each_online_cpu(j)
++ seq_printf(p, "CPU%-8d",j);
++ seq_putc(p, '\n');
++ }
++
++ if (i < NR_IRQS) {
++ spin_lock_irqsave(&irq_desc[i].lock, flags);
++ action = irq_desc[i].action;
++ if (!action)
++ goto skip;
++ seq_printf(p, "%3d: ",i);
++#ifndef CONFIG_SMP
++ seq_printf(p, "%10u ", kstat_irqs(i));
++#else
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++#endif
++ seq_printf(p, " %14s", irq_desc[i].chip->typename);
++ seq_printf(p, " %s", action->name);
++
++ for (action=action->next; action; action = action->next)
++ seq_printf(p, ", %s", action->name);
++
++ seq_putc(p, '\n');
++skip:
++ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
++ } else if (i == NR_IRQS) {
++ seq_printf(p, "NMI: ");
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", nmi_count(j));
++ seq_putc(p, '\n');
++#ifdef CONFIG_X86_LOCAL_APIC
++ seq_printf(p, "LOC: ");
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ",
++ per_cpu(irq_stat,j).apic_timer_irqs);
++ seq_putc(p, '\n');
++#endif
++ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
++#if defined(CONFIG_X86_IO_APIC)
++ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
++#endif
++ }
++ return 0;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++void fixup_irqs(cpumask_t map)
++{
++ unsigned int irq;
++ static int warned;
++
++ for (irq = 0; irq < NR_IRQS; irq++) {
++ cpumask_t mask;
++ if (irq == 2)
++ continue;
++
++ cpus_and(mask, irq_desc[irq].affinity, map);
++ if (any_online_cpu(mask) == NR_CPUS) {
++ /*printk("Breaking affinity for irq %i\n", irq);*/
++ mask = map;
++ }
++ if (irq_desc[irq].chip->set_affinity)
++ irq_desc[irq].chip->set_affinity(irq, mask);
++ else if (irq_desc[irq].action && !(warned++))
++ printk("Cannot set affinity for irq %i\n", irq);
++ }
++
++#if 0
++ barrier();
++ /* Ingo Molnar says: "after the IO-APIC masks have been redirected
++ [note the nop - the interrupt-enable boundary on x86 is two
++ instructions from sti] - to flush out pending hardirqs and
++ IPIs. After this point nothing is supposed to reach this CPU." */
++ __asm__ __volatile__("sti; nop; cli");
++ barrier();
++#else
++ /* That doesn't seem sufficient. Give it 1ms. */
++ local_irq_enable();
++ mdelay(1);
++ local_irq_disable();
++#endif
++}
++#endif
++
+Index: head-2008-11-25/arch/x86/kernel/ldt_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/ldt_32-xen.c 2007-06-12 13:12:48.000000000 +0200
+@@ -0,0 +1,270 @@
++/*
++ * linux/kernel/ldt.c
++ *
++ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
++ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
++
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/ldt.h>
++#include <asm/desc.h>
++#include <asm/mmu_context.h>
++
++#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
++static void flush_ldt(void *null)
++{
++ if (current->active_mm)
++ load_LDT(¤t->active_mm->context);
++}
++#endif
++
++static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
++{
++ void *oldldt;
++ void *newldt;
++ int oldsize;
++
++ if (mincount <= pc->size)
++ return 0;
++ oldsize = pc->size;
++ mincount = (mincount+511)&(~511);
++ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
++ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
++ else
++ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
++
++ if (!newldt)
++ return -ENOMEM;
++
++ if (oldsize)
++ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
++ oldldt = pc->ldt;
++ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
++ pc->ldt = newldt;
++ wmb();
++ pc->size = mincount;
++ wmb();
++
++ if (reload) {
++#ifdef CONFIG_SMP
++ cpumask_t mask;
++ preempt_disable();
++#endif
++ make_pages_readonly(
++ pc->ldt,
++ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ load_LDT(pc);
++#ifdef CONFIG_SMP
++ mask = cpumask_of_cpu(smp_processor_id());
++ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
++ smp_call_function(flush_ldt, NULL, 1, 1);
++ preempt_enable();
++#endif
++ }
++ if (oldsize) {
++ make_pages_writable(
++ oldldt,
++ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(oldldt);
++ else
++ kfree(oldldt);
++ }
++ return 0;
++}
++
++static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++{
++ int err = alloc_ldt(new, old->size, 0);
++ if (err < 0)
++ return err;
++ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
++ make_pages_readonly(
++ new->ldt,
++ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ return 0;
++}
++
++/*
++ * we do not have to muck with descriptors here, that is
++ * done in switch_mm() as needed.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
++{
++ struct mm_struct * old_mm;
++ int retval = 0;
++
++ init_MUTEX(&mm->context.sem);
++ mm->context.size = 0;
++ mm->context.has_foreign_mappings = 0;
++ old_mm = current->mm;
++ if (old_mm && old_mm->context.size > 0) {
++ down(&old_mm->context.sem);
++ retval = copy_ldt(&mm->context, &old_mm->context);
++ up(&old_mm->context.sem);
++ }
++ return retval;
++}
++
++/*
++ * No need to lock the MM as we are the last user
++ */
++void destroy_context(struct mm_struct *mm)
++{
++ if (mm->context.size) {
++ if (mm == current->active_mm)
++ clear_LDT();
++ make_pages_writable(
++ mm->context.ldt,
++ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(mm->context.ldt);
++ else
++ kfree(mm->context.ldt);
++ mm->context.size = 0;
++ }
++}
++
++static int read_ldt(void __user * ptr, unsigned long bytecount)
++{
++ int err;
++ unsigned long size;
++ struct mm_struct * mm = current->mm;
++
++ if (!mm->context.size)
++ return 0;
++ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
++ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
++
++ down(&mm->context.sem);
++ size = mm->context.size*LDT_ENTRY_SIZE;
++ if (size > bytecount)
++ size = bytecount;
++
++ err = 0;
++ if (copy_to_user(ptr, mm->context.ldt, size))
++ err = -EFAULT;
++ up(&mm->context.sem);
++ if (err < 0)
++ goto error_return;
++ if (size != bytecount) {
++ /* zero-fill the rest */
++ if (clear_user(ptr+size, bytecount-size) != 0) {
++ err = -EFAULT;
++ goto error_return;
++ }
++ }
++ return bytecount;
++error_return:
++ return err;
++}
++
++static int read_default_ldt(void __user * ptr, unsigned long bytecount)
++{
++ int err;
++ unsigned long size;
++ void *address;
++
++ err = 0;
++ address = &default_ldt[0];
++ size = 5*sizeof(struct desc_struct);
++ if (size > bytecount)
++ size = bytecount;
++
++ err = size;
++ if (copy_to_user(ptr, address, size))
++ err = -EFAULT;
++
++ return err;
++}
++
++static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
++{
++ struct mm_struct * mm = current->mm;
++ __u32 entry_1, entry_2;
++ int error;
++ struct user_desc ldt_info;
++
++ error = -EINVAL;
++ if (bytecount != sizeof(ldt_info))
++ goto out;
++ error = -EFAULT;
++ if (copy_from_user(&ldt_info, ptr, sizeof(ldt_info)))
++ goto out;
++
++ error = -EINVAL;
++ if (ldt_info.entry_number >= LDT_ENTRIES)
++ goto out;
++ if (ldt_info.contents == 3) {
++ if (oldmode)
++ goto out;
++ if (ldt_info.seg_not_present == 0)
++ goto out;
++ }
++
++ down(&mm->context.sem);
++ if (ldt_info.entry_number >= mm->context.size) {
++ error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1);
++ if (error < 0)
++ goto out_unlock;
++ }
++
++ /* Allow LDTs to be cleared by the user. */
++ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
++ if (oldmode || LDT_empty(&ldt_info)) {
++ entry_1 = 0;
++ entry_2 = 0;
++ goto install;
++ }
++ }
++
++ entry_1 = LDT_entry_a(&ldt_info);
++ entry_2 = LDT_entry_b(&ldt_info);
++ if (oldmode)
++ entry_2 &= ~(1 << 20);
++
++ /* Install the new entry ... */
++install:
++ error = write_ldt_entry(mm->context.ldt, ldt_info.entry_number,
++ entry_1, entry_2);
++
++out_unlock:
++ up(&mm->context.sem);
++out:
++ return error;
++}
++
++asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
++{
++ int ret = -ENOSYS;
++
++ switch (func) {
++ case 0:
++ ret = read_ldt(ptr, bytecount);
++ break;
++ case 1:
++ ret = write_ldt(ptr, bytecount, 1);
++ break;
++ case 2:
++ ret = read_default_ldt(ptr, bytecount);
++ break;
++ case 0x11:
++ ret = write_ldt(ptr, bytecount, 0);
++ break;
++ }
++ return ret;
++}
+Index: head-2008-11-25/arch/x86/kernel/microcode-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/microcode-xen.c 2007-06-12 13:12:48.000000000 +0200
+@@ -0,0 +1,144 @@
++/*
++ * Intel CPU Microcode Update Driver for Linux
++ *
++ * Copyright (C) 2000-2004 Tigran Aivazian
++ *
++ * This driver allows to upgrade microcode on Intel processors
++ * belonging to IA-32 family - PentiumPro, Pentium II,
++ * Pentium III, Xeon, Pentium 4, etc.
++ *
++ * Reference: Section 8.10 of Volume III, Intel Pentium 4 Manual,
++ * Order Number 245472 or free download from:
++ *
++ * http://developer.intel.com/design/pentium4/manuals/245472.htm
++ *
++ * For more information, go to http://www.urbanmyth.org/microcode
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License
++ * as published by the Free Software Foundation; either version
++ * 2 of the License, or (at your option) any later version.
++ */
++
++//#define DEBUG /* pr_debug */
++#include <linux/capability.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/sched.h>
++#include <linux/cpumask.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/miscdevice.h>
++#include <linux/spinlock.h>
++#include <linux/mm.h>
++#include <linux/mutex.h>
++#include <linux/syscalls.h>
++
++#include <asm/msr.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++
++MODULE_DESCRIPTION("Intel CPU (IA-32) Microcode Update Driver");
++MODULE_AUTHOR("Tigran Aivazian <tigran@veritas.com>");
++MODULE_LICENSE("GPL");
++
++static int verbose;
++module_param(verbose, int, 0644);
++
++#define MICROCODE_VERSION "1.14a-xen"
++
++#define DEFAULT_UCODE_DATASIZE (2000) /* 2000 bytes */
++#define MC_HEADER_SIZE (sizeof (microcode_header_t)) /* 48 bytes */
++#define DEFAULT_UCODE_TOTALSIZE (DEFAULT_UCODE_DATASIZE + MC_HEADER_SIZE) /* 2048 bytes */
++
++/* no concurrent ->write()s are allowed on /dev/cpu/microcode */
++static DEFINE_MUTEX(microcode_mutex);
++
++static int microcode_open (struct inode *unused1, struct file *unused2)
++{
++ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
++
++
++static int do_microcode_update (const void __user *ubuf, size_t len)
++{
++ int err;
++ void *kbuf;
++
++ kbuf = vmalloc(len);
++ if (!kbuf)
++ return -ENOMEM;
++
++ if (copy_from_user(kbuf, ubuf, len) == 0) {
++ struct xen_platform_op op;
++
++ op.cmd = XENPF_microcode_update;
++ set_xen_guest_handle(op.u.microcode.data, kbuf);
++ op.u.microcode.length = len;
++ err = HYPERVISOR_platform_op(&op);
++ } else
++ err = -EFAULT;
++
++ vfree(kbuf);
++
++ return err;
++}
++
++static ssize_t microcode_write (struct file *file, const char __user *buf, size_t len, loff_t *ppos)
++{
++ ssize_t ret;
++
++ if (len < MC_HEADER_SIZE) {
++ printk(KERN_ERR "microcode: not enough data\n");
++ return -EINVAL;
++ }
++
++ mutex_lock(µcode_mutex);
++
++ ret = do_microcode_update(buf, len);
++ if (!ret)
++ ret = (ssize_t)len;
++
++ mutex_unlock(µcode_mutex);
++
++ return ret;
++}
++
++static struct file_operations microcode_fops = {
++ .owner = THIS_MODULE,
++ .write = microcode_write,
++ .open = microcode_open,
++};
++
++static struct miscdevice microcode_dev = {
++ .minor = MICROCODE_MINOR,
++ .name = "microcode",
++ .fops = µcode_fops,
++};
++
++static int __init microcode_init (void)
++{
++ int error;
++
++ error = misc_register(µcode_dev);
++ if (error) {
++ printk(KERN_ERR
++ "microcode: can't misc_register on minor=%d\n",
++ MICROCODE_MINOR);
++ return error;
++ }
++
++ printk(KERN_INFO
++ "IA-32 Microcode Update Driver: v" MICROCODE_VERSION " <tigran@veritas.com>\n");
++ return 0;
++}
++
++static void __exit microcode_exit (void)
++{
++ misc_deregister(µcode_dev);
++}
++
++module_init(microcode_init)
++module_exit(microcode_exit)
++MODULE_ALIAS_MISCDEV(MICROCODE_MINOR);
+Index: head-2008-11-25/arch/x86/kernel/mpparse_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/mpparse_32-xen.c 2007-06-12 13:12:48.000000000 +0200
+@@ -0,0 +1,1185 @@
++/*
++ * Intel Multiprocessor Specification 1.1 and 1.4
++ * compliant MP-table parsing routines.
++ *
++ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
++ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Erich Boleyn : MP v1.4 and additional changes.
++ * Alan Cox : Added EBDA scanning
++ * Ingo Molnar : various cleanups and rewrites
++ * Maciej W. Rozycki: Bits for default MP configurations
++ * Paul Diefenbaugh: Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/acpi.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/bitops.h>
++
++#include <asm/smp.h>
++#include <asm/acpi.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/io_apic.h>
++
++#include <mach_apic.h>
++#include <mach_mpparse.h>
++#include <bios_ebda.h>
++
++/* Have we found an MP table */
++int smp_found_config;
++unsigned int __initdata maxcpus = NR_CPUS;
++
++/*
++ * Various Linux-internal data structures created from the
++ * MP-table.
++ */
++int apic_version [MAX_APICS];
++int mp_bus_id_to_type [MAX_MP_BUSSES];
++int mp_bus_id_to_node [MAX_MP_BUSSES];
++int mp_bus_id_to_local [MAX_MP_BUSSES];
++int quad_local_to_mp_bus_id [NR_CPUS/4][4];
++int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++static int mp_current_pci_id;
++
++/* I/O APIC entries */
++struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
++
++/* # of MP IRQ source entries */
++struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
++
++/* MP IRQ source entries */
++int mp_irq_entries;
++
++int nr_ioapics;
++
++int pic_mode;
++unsigned long mp_lapic_addr;
++
++unsigned int def_to_bigsmp = 0;
++
++/* Processor that is doing the boot up */
++unsigned int boot_cpu_physical_apicid = -1U;
++/* Internal processor count */
++static unsigned int __devinitdata num_processors;
++
++/* Bitmask of physically existing CPUs */
++physid_mask_t phys_cpu_present_map;
++
++u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++/*
++ * Intel MP BIOS table parsing routines:
++ */
++
++
++/*
++ * Checksum an MP configuration block.
++ */
++
++static int __init mpf_checksum(unsigned char *mp, int len)
++{
++ int sum = 0;
++
++ while (len--)
++ sum += *mp++;
++
++ return sum & 0xFF;
++}
++
++/*
++ * Have to match translation table entries to main table entries by counter
++ * hence the mpc_record variable .... can't see a less disgusting way of
++ * doing this ....
++ */
++
++static int mpc_record;
++static struct mpc_config_translation *translation_table[MAX_MPC_ENTRY] __initdata;
++
++#ifndef CONFIG_XEN
++static void __devinit MP_processor_info (struct mpc_config_processor *m)
++{
++ int ver, apicid;
++ physid_mask_t phys_cpu;
++
++ if (!(m->mpc_cpuflag & CPU_ENABLED))
++ return;
++
++ apicid = mpc_apic_id(m, translation_table[mpc_record]);
++
++ if (m->mpc_featureflag&(1<<0))
++ Dprintk(" Floating point unit present.\n");
++ if (m->mpc_featureflag&(1<<7))
++ Dprintk(" Machine Exception supported.\n");
++ if (m->mpc_featureflag&(1<<8))
++ Dprintk(" 64 bit compare & exchange supported.\n");
++ if (m->mpc_featureflag&(1<<9))
++ Dprintk(" Internal APIC present.\n");
++ if (m->mpc_featureflag&(1<<11))
++ Dprintk(" SEP present.\n");
++ if (m->mpc_featureflag&(1<<12))
++ Dprintk(" MTRR present.\n");
++ if (m->mpc_featureflag&(1<<13))
++ Dprintk(" PGE present.\n");
++ if (m->mpc_featureflag&(1<<14))
++ Dprintk(" MCA present.\n");
++ if (m->mpc_featureflag&(1<<15))
++ Dprintk(" CMOV present.\n");
++ if (m->mpc_featureflag&(1<<16))
++ Dprintk(" PAT present.\n");
++ if (m->mpc_featureflag&(1<<17))
++ Dprintk(" PSE present.\n");
++ if (m->mpc_featureflag&(1<<18))
++ Dprintk(" PSN present.\n");
++ if (m->mpc_featureflag&(1<<19))
++ Dprintk(" Cache Line Flush Instruction present.\n");
++ /* 20 Reserved */
++ if (m->mpc_featureflag&(1<<21))
++ Dprintk(" Debug Trace and EMON Store present.\n");
++ if (m->mpc_featureflag&(1<<22))
++ Dprintk(" ACPI Thermal Throttle Registers present.\n");
++ if (m->mpc_featureflag&(1<<23))
++ Dprintk(" MMX present.\n");
++ if (m->mpc_featureflag&(1<<24))
++ Dprintk(" FXSR present.\n");
++ if (m->mpc_featureflag&(1<<25))
++ Dprintk(" XMM present.\n");
++ if (m->mpc_featureflag&(1<<26))
++ Dprintk(" Willamette New Instructions present.\n");
++ if (m->mpc_featureflag&(1<<27))
++ Dprintk(" Self Snoop present.\n");
++ if (m->mpc_featureflag&(1<<28))
++ Dprintk(" HT present.\n");
++ if (m->mpc_featureflag&(1<<29))
++ Dprintk(" Thermal Monitor present.\n");
++ /* 30, 31 Reserved */
++
++
++ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
++ Dprintk(" Bootup CPU\n");
++ boot_cpu_physical_apicid = m->mpc_apicid;
++ }
++
++ ver = m->mpc_apicver;
++
++ /*
++ * Validate version
++ */
++ if (ver == 0x0) {
++ printk(KERN_WARNING "BIOS bug, APIC version is 0 for CPU#%d! "
++ "fixing up to 0x10. (tell your hw vendor)\n",
++ m->mpc_apicid);
++ ver = 0x10;
++ }
++ apic_version[m->mpc_apicid] = ver;
++
++ phys_cpu = apicid_to_cpu_present(apicid);
++ physids_or(phys_cpu_present_map, phys_cpu_present_map, phys_cpu);
++
++ if (num_processors >= NR_CPUS) {
++ printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
++ " Processor ignored.\n", NR_CPUS);
++ return;
++ }
++
++ if (num_processors >= maxcpus) {
++ printk(KERN_WARNING "WARNING: maxcpus limit of %i reached."
++ " Processor ignored.\n", maxcpus);
++ return;
++ }
++
++ cpu_set(num_processors, cpu_possible_map);
++ num_processors++;
++
++ /*
++ * Would be preferable to switch to bigsmp when CONFIG_HOTPLUG_CPU=y
++ * but we need to work other dependencies like SMP_SUSPEND etc
++ * before this can be done without some confusion.
++ * if (CPU_HOTPLUG_ENABLED || num_processors > 8)
++ * - Ashok Raj <ashok.raj@intel.com>
++ */
++ if (num_processors > 8) {
++ switch (boot_cpu_data.x86_vendor) {
++ case X86_VENDOR_INTEL:
++ if (!APIC_XAPIC(ver)) {
++ def_to_bigsmp = 0;
++ break;
++ }
++ /* If P4 and above fall through */
++ case X86_VENDOR_AMD:
++ def_to_bigsmp = 1;
++ }
++ }
++ bios_cpu_apicid[num_processors - 1] = m->mpc_apicid;
++}
++#else
++void __init MP_processor_info (struct mpc_config_processor *m)
++{
++ num_processors++;
++}
++#endif /* CONFIG_XEN */
++
++static void __init MP_bus_info (struct mpc_config_bus *m)
++{
++ char str[7];
++
++ memcpy(str, m->mpc_bustype, 6);
++ str[6] = 0;
++
++ mpc_oem_bus_info(m, str, translation_table[mpc_record]);
++
++ if (m->mpc_busid >= MAX_MP_BUSSES) {
++ printk(KERN_WARNING "MP table busid value (%d) for bustype %s "
++ " is too large, max. supported is %d\n",
++ m->mpc_busid, str, MAX_MP_BUSSES - 1);
++ return;
++ }
++
++ if (strncmp(str, BUSTYPE_ISA, sizeof(BUSTYPE_ISA)-1) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
++ } else if (strncmp(str, BUSTYPE_EISA, sizeof(BUSTYPE_EISA)-1) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
++ } else if (strncmp(str, BUSTYPE_PCI, sizeof(BUSTYPE_PCI)-1) == 0) {
++ mpc_oem_pci_bus(m, translation_table[mpc_record]);
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
++ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
++ mp_current_pci_id++;
++ } else if (strncmp(str, BUSTYPE_MCA, sizeof(BUSTYPE_MCA)-1) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
++ } else if (strncmp(str, BUSTYPE_NEC98, sizeof(BUSTYPE_NEC98)-1) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_NEC98;
++ } else {
++ printk(KERN_WARNING "Unknown bustype %s - ignoring\n", str);
++ }
++}
++
++static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
++{
++ if (!(m->mpc_flags & MPC_APIC_USABLE))
++ return;
++
++ printk(KERN_INFO "I/O APIC #%d Version %d at 0x%lX.\n",
++ m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
++ if (nr_ioapics >= MAX_IO_APICS) {
++ printk(KERN_CRIT "Max # of I/O APICs (%d) exceeded (found %d).\n",
++ MAX_IO_APICS, nr_ioapics);
++ panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
++ }
++ if (!m->mpc_apicaddr) {
++ printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
++ " found in MP table, skipping!\n");
++ return;
++ }
++ mp_ioapics[nr_ioapics] = *m;
++ nr_ioapics++;
++}
++
++static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
++{
++ mp_irqs [mp_irq_entries] = *m;
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
++ " IRQ %02x, APIC ID %x, APIC INT %02x\n",
++ m->mpc_irqtype, m->mpc_irqflag & 3,
++ (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
++ m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!!\n");
++}
++
++static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
++{
++ Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
++ " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
++ m->mpc_irqtype, m->mpc_irqflag & 3,
++ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
++ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
++ /*
++ * Well it seems all SMP boards in existence
++ * use ExtINT/LVT1 == LINT0 and
++ * NMI/LVT2 == LINT1 - the following check
++ * will show us if this assumptions is false.
++ * Until then we do not have to add baggage.
++ */
++ if ((m->mpc_irqtype == mp_ExtINT) &&
++ (m->mpc_destapiclint != 0))
++ BUG();
++ if ((m->mpc_irqtype == mp_NMI) &&
++ (m->mpc_destapiclint != 1))
++ BUG();
++}
++
++#ifdef CONFIG_X86_NUMAQ
++static void __init MP_translation_info (struct mpc_config_translation *m)
++{
++ printk(KERN_INFO "Translation: record %d, type %d, quad %d, global %d, local %d\n", mpc_record, m->trans_type, m->trans_quad, m->trans_global, m->trans_local);
++
++ if (mpc_record >= MAX_MPC_ENTRY)
++ printk(KERN_ERR "MAX_MPC_ENTRY exceeded!\n");
++ else
++ translation_table[mpc_record] = m; /* stash this for later */
++ if (m->trans_quad < MAX_NUMNODES && !node_online(m->trans_quad))
++ node_set_online(m->trans_quad);
++}
++
++/*
++ * Read/parse the MPC oem tables
++ */
++
++static void __init smp_read_mpc_oem(struct mp_config_oemtable *oemtable, \
++ unsigned short oemsize)
++{
++ int count = sizeof (*oemtable); /* the header size */
++ unsigned char *oemptr = ((unsigned char *)oemtable)+count;
++
++ mpc_record = 0;
++ printk(KERN_INFO "Found an OEM MPC table at %8p - parsing it ... \n", oemtable);
++ if (memcmp(oemtable->oem_signature,MPC_OEM_SIGNATURE,4))
++ {
++ printk(KERN_WARNING "SMP mpc oemtable: bad signature [%c%c%c%c]!\n",
++ oemtable->oem_signature[0],
++ oemtable->oem_signature[1],
++ oemtable->oem_signature[2],
++ oemtable->oem_signature[3]);
++ return;
++ }
++ if (mpf_checksum((unsigned char *)oemtable,oemtable->oem_length))
++ {
++ printk(KERN_WARNING "SMP oem mptable: checksum error!\n");
++ return;
++ }
++ while (count < oemtable->oem_length) {
++ switch (*oemptr) {
++ case MP_TRANSLATION:
++ {
++ struct mpc_config_translation *m=
++ (struct mpc_config_translation *)oemptr;
++ MP_translation_info(m);
++ oemptr += sizeof(*m);
++ count += sizeof(*m);
++ ++mpc_record;
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "Unrecognised OEM table entry type! - %d\n", (int) *oemptr);
++ return;
++ }
++ }
++ }
++}
++
++static inline void mps_oem_check(struct mp_config_table *mpc, char *oem,
++ char *productid)
++{
++ if (strncmp(oem, "IBM NUMA", 8))
++ printk("Warning! May not be a NUMA-Q system!\n");
++ if (mpc->mpc_oemptr)
++ smp_read_mpc_oem((struct mp_config_oemtable *) mpc->mpc_oemptr,
++ mpc->mpc_oemsize);
++}
++#endif /* CONFIG_X86_NUMAQ */
++
++/*
++ * Read/parse the MPC
++ */
++
++static int __init smp_read_mpc(struct mp_config_table *mpc)
++{
++ char str[16];
++ char oem[10];
++ int count=sizeof(*mpc);
++ unsigned char *mpt=((unsigned char *)mpc)+count;
++
++ if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
++ printk(KERN_ERR "SMP mptable: bad signature [0x%x]!\n",
++ *(u32 *)mpc->mpc_signature);
++ return 0;
++ }
++ if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
++ printk(KERN_ERR "SMP mptable: checksum error!\n");
++ return 0;
++ }
++ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
++ printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
++ mpc->mpc_spec);
++ return 0;
++ }
++ if (!mpc->mpc_lapic) {
++ printk(KERN_ERR "SMP mptable: null local APIC address!\n");
++ return 0;
++ }
++ memcpy(oem,mpc->mpc_oem,8);
++ oem[8]=0;
++ printk(KERN_INFO "OEM ID: %s ",oem);
++
++ memcpy(str,mpc->mpc_productid,12);
++ str[12]=0;
++ printk("Product ID: %s ",str);
++
++ mps_oem_check(mpc, oem, str);
++
++ printk("APIC at: 0x%lX\n",mpc->mpc_lapic);
++
++ /*
++ * Save the local APIC address (it might be non-default) -- but only
++ * if we're not using ACPI.
++ */
++ if (!acpi_lapic)
++ mp_lapic_addr = mpc->mpc_lapic;
++
++ /*
++ * Now process the configuration blocks.
++ */
++ mpc_record = 0;
++ while (count < mpc->mpc_length) {
++ switch(*mpt) {
++ case MP_PROCESSOR:
++ {
++ struct mpc_config_processor *m=
++ (struct mpc_config_processor *)mpt;
++ /* ACPI may have already provided this data */
++ if (!acpi_lapic)
++ MP_processor_info(m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
++ break;
++ }
++ case MP_BUS:
++ {
++ struct mpc_config_bus *m=
++ (struct mpc_config_bus *)mpt;
++ MP_bus_info(m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
++ break;
++ }
++ case MP_IOAPIC:
++ {
++ struct mpc_config_ioapic *m=
++ (struct mpc_config_ioapic *)mpt;
++ MP_ioapic_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ case MP_INTSRC:
++ {
++ struct mpc_config_intsrc *m=
++ (struct mpc_config_intsrc *)mpt;
++
++ MP_intsrc_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ case MP_LINTSRC:
++ {
++ struct mpc_config_lintsrc *m=
++ (struct mpc_config_lintsrc *)mpt;
++ MP_lintsrc_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ default:
++ {
++ count = mpc->mpc_length;
++ break;
++ }
++ }
++ ++mpc_record;
++ }
++ clustered_apic_check();
++ if (!num_processors)
++ printk(KERN_ERR "SMP mptable: no processors registered!\n");
++ return num_processors;
++}
++
++static int __init ELCR_trigger(unsigned int irq)
++{
++ unsigned int port;
++
++ port = 0x4d0 + (irq >> 3);
++ return (inb(port) >> (irq & 7)) & 1;
++}
++
++static void __init construct_default_ioirq_mptable(int mpc_default_type)
++{
++ struct mpc_config_intsrc intsrc;
++ int i;
++ int ELCR_fallback = 0;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqflag = 0; /* conforming */
++ intsrc.mpc_srcbus = 0;
++ intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
++
++ intsrc.mpc_irqtype = mp_INT;
++
++ /*
++ * If true, we have an ISA/PCI system with no IRQ entries
++ * in the MP table. To prevent the PCI interrupts from being set up
++ * incorrectly, we try to use the ELCR. The sanity check to see if
++ * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
++ * never be level sensitive, so we simply see if the ELCR agrees.
++ * If it does, we assume it's valid.
++ */
++ if (mpc_default_type == 5) {
++ printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
++
++ if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
++ printk(KERN_WARNING "ELCR contains invalid data... not using ELCR\n");
++ else {
++ printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
++ ELCR_fallback = 1;
++ }
++ }
++
++ for (i = 0; i < 16; i++) {
++ switch (mpc_default_type) {
++ case 2:
++ if (i == 0 || i == 13)
++ continue; /* IRQ0 & IRQ13 not connected */
++ /* fall through */
++ default:
++ if (i == 2)
++ continue; /* IRQ2 is never connected */
++ }
++
++ if (ELCR_fallback) {
++ /*
++ * If the ELCR indicates a level-sensitive interrupt, we
++ * copy that information over to the MP table in the
++ * irqflag field (level sensitive, active high polarity).
++ */
++ if (ELCR_trigger(i))
++ intsrc.mpc_irqflag = 13;
++ else
++ intsrc.mpc_irqflag = 0;
++ }
++
++ intsrc.mpc_srcbusirq = i;
++ intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
++ MP_intsrc_info(&intsrc);
++ }
++
++ intsrc.mpc_irqtype = mp_ExtINT;
++ intsrc.mpc_srcbusirq = 0;
++ intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
++ MP_intsrc_info(&intsrc);
++}
++
++static inline void __init construct_default_ISA_mptable(int mpc_default_type)
++{
++ struct mpc_config_processor processor;
++ struct mpc_config_bus bus;
++ struct mpc_config_ioapic ioapic;
++ struct mpc_config_lintsrc lintsrc;
++ int linttypes[2] = { mp_ExtINT, mp_NMI };
++ int i;
++
++ /*
++ * local APIC has default address
++ */
++ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
++
++ /*
++ * 2 CPUs, numbered 0 & 1.
++ */
++ processor.mpc_type = MP_PROCESSOR;
++ /* Either an integrated APIC or a discrete 82489DX. */
++ processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++ processor.mpc_cpuflag = CPU_ENABLED;
++ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++ (boot_cpu_data.x86_model << 4) |
++ boot_cpu_data.x86_mask;
++ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++ processor.mpc_reserved[0] = 0;
++ processor.mpc_reserved[1] = 0;
++ for (i = 0; i < 2; i++) {
++ processor.mpc_apicid = i;
++ MP_processor_info(&processor);
++ }
++
++ bus.mpc_type = MP_BUS;
++ bus.mpc_busid = 0;
++ switch (mpc_default_type) {
++ default:
++ printk("???\n");
++ printk(KERN_ERR "Unknown standard configuration %d\n",
++ mpc_default_type);
++ /* fall through */
++ case 1:
++ case 5:
++ memcpy(bus.mpc_bustype, "ISA ", 6);
++ break;
++ case 2:
++ case 6:
++ case 3:
++ memcpy(bus.mpc_bustype, "EISA ", 6);
++ break;
++ case 4:
++ case 7:
++ memcpy(bus.mpc_bustype, "MCA ", 6);
++ }
++ MP_bus_info(&bus);
++ if (mpc_default_type > 4) {
++ bus.mpc_busid = 1;
++ memcpy(bus.mpc_bustype, "PCI ", 6);
++ MP_bus_info(&bus);
++ }
++
++ ioapic.mpc_type = MP_IOAPIC;
++ ioapic.mpc_apicid = 2;
++ ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++ ioapic.mpc_flags = MPC_APIC_USABLE;
++ ioapic.mpc_apicaddr = 0xFEC00000;
++ MP_ioapic_info(&ioapic);
++
++ /*
++ * We set up most of the low 16 IO-APIC pins according to MPS rules.
++ */
++ construct_default_ioirq_mptable(mpc_default_type);
++
++ lintsrc.mpc_type = MP_LINTSRC;
++ lintsrc.mpc_irqflag = 0; /* conforming */
++ lintsrc.mpc_srcbusid = 0;
++ lintsrc.mpc_srcbusirq = 0;
++ lintsrc.mpc_destapic = MP_APIC_ALL;
++ for (i = 0; i < 2; i++) {
++ lintsrc.mpc_irqtype = linttypes[i];
++ lintsrc.mpc_destapiclint = i;
++ MP_lintsrc_info(&lintsrc);
++ }
++}
++
++static struct intel_mp_floating *mpf_found;
++
++/*
++ * Scan the memory blocks for an SMP configuration block.
++ */
++void __init get_smp_config (void)
++{
++ struct intel_mp_floating *mpf = mpf_found;
++
++ /*
++ * ACPI supports both logical (e.g. Hyper-Threading) and physical
++ * processors, where MPS only supports physical.
++ */
++ if (acpi_lapic && acpi_ioapic) {
++ printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
++ return;
++ }
++ else if (acpi_lapic)
++ printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
++
++ printk(KERN_INFO "Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
++ if (mpf->mpf_feature2 & (1<<7)) {
++ printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
++ pic_mode = 1;
++ } else {
++ printk(KERN_INFO " Virtual Wire compatibility mode.\n");
++ pic_mode = 0;
++ }
++
++ /*
++ * Now see if we need to read further.
++ */
++ if (mpf->mpf_feature1 != 0) {
++
++ printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
++ construct_default_ISA_mptable(mpf->mpf_feature1);
++
++ } else if (mpf->mpf_physptr) {
++
++ /*
++ * Read the physical hardware table. Anything here will
++ * override the defaults.
++ */
++ if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
++ smp_found_config = 0;
++ printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
++ printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
++ return;
++ }
++ /*
++ * If there are no explicit MP IRQ entries, then we are
++ * broken. We set up most of the low 16 IO-APIC pins to
++ * ISA defaults and hope it will work.
++ */
++ if (!mp_irq_entries) {
++ struct mpc_config_bus bus;
++
++ printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
++
++ bus.mpc_type = MP_BUS;
++ bus.mpc_busid = 0;
++ memcpy(bus.mpc_bustype, "ISA ", 6);
++ MP_bus_info(&bus);
++
++ construct_default_ioirq_mptable(0);
++ }
++
++ } else
++ BUG();
++
++ printk(KERN_INFO "Processors: %d\n", num_processors);
++ /*
++ * Only use the first configuration found.
++ */
++}
++
++static int __init smp_scan_config (unsigned long base, unsigned long length)
++{
++ unsigned long *bp = isa_bus_to_virt(base);
++ struct intel_mp_floating *mpf;
++
++ Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
++ if (sizeof(*mpf) != 16)
++ printk("Error: MPF size\n");
++
++ while (length > 0) {
++ mpf = (struct intel_mp_floating *)bp;
++ if ((*bp == SMP_MAGIC_IDENT) &&
++ (mpf->mpf_length == 1) &&
++ !mpf_checksum((unsigned char *)bp, 16) &&
++ ((mpf->mpf_specification == 1)
++ || (mpf->mpf_specification == 4)) ) {
++
++ smp_found_config = 1;
++#ifndef CONFIG_XEN
++ printk(KERN_INFO "found SMP MP-table at %08lx\n",
++ virt_to_phys(mpf));
++ reserve_bootmem(virt_to_phys(mpf), PAGE_SIZE);
++ if (mpf->mpf_physptr) {
++ /*
++ * We cannot access to MPC table to compute
++ * table size yet, as only few megabytes from
++ * the bottom is mapped now.
++ * PC-9800's MPC table places on the very last
++ * of physical memory; so that simply reserving
++ * PAGE_SIZE from mpg->mpf_physptr yields BUG()
++ * in reserve_bootmem.
++ */
++ unsigned long size = PAGE_SIZE;
++ unsigned long end = max_low_pfn * PAGE_SIZE;
++ if (mpf->mpf_physptr + size > end)
++ size = end - mpf->mpf_physptr;
++ reserve_bootmem(mpf->mpf_physptr, size);
++ }
++#else
++ printk(KERN_INFO "found SMP MP-table at %08lx\n",
++ ((unsigned long)bp - (unsigned long)isa_bus_to_virt(base)) + base);
++#endif
++
++ mpf_found = mpf;
++ return 1;
++ }
++ bp += 4;
++ length -= 16;
++ }
++ return 0;
++}
++
++void __init find_smp_config (void)
++{
++#ifndef CONFIG_XEN
++ unsigned int address;
++#endif
++
++ /*
++ * FIXME: Linux assumes you have 640K of base ram..
++ * this continues the error...
++ *
++ * 1) Scan the bottom 1K for a signature
++ * 2) Scan the top 1K of base RAM
++ * 3) Scan the 64K of bios
++ */
++ if (smp_scan_config(0x0,0x400) ||
++ smp_scan_config(639*0x400,0x400) ||
++ smp_scan_config(0xF0000,0x10000))
++ return;
++ /*
++ * If it is an SMP machine we should know now, unless the
++ * configuration is in an EISA/MCA bus machine with an
++ * extended bios data area.
++ *
++ * there is a real-mode segmented pointer pointing to the
++ * 4K EBDA area at 0x40E, calculate and scan it here.
++ *
++ * NOTE! There are Linux loaders that will corrupt the EBDA
++ * area, and as such this kind of SMP config may be less
++ * trustworthy, simply because the SMP table may have been
++ * stomped on during early boot. These loaders are buggy and
++ * should be fixed.
++ *
++ * MP1.4 SPEC states to only scan first 1K of 4K EBDA.
++ */
++
++#ifndef CONFIG_XEN
++ address = get_bios_ebda();
++ if (address)
++ smp_scan_config(address, 0x400);
++#endif
++}
++
++int es7000_plat;
++
++/* --------------------------------------------------------------------------
++ ACPI-based MP Configuration
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++void __init mp_register_lapic_address (
++ u64 address)
++{
++#ifndef CONFIG_XEN
++ mp_lapic_addr = (unsigned long) address;
++
++ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
++
++ if (boot_cpu_physical_apicid == -1U)
++ boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
++
++ Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
++#endif
++}
++
++
++void __devinit mp_register_lapic (
++ u8 id,
++ u8 enabled)
++{
++ struct mpc_config_processor processor;
++ int boot_cpu = 0;
++
++ if (MAX_APICS - id <= 0) {
++ printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
++ id, MAX_APICS);
++ return;
++ }
++
++ if (id == boot_cpu_physical_apicid)
++ boot_cpu = 1;
++
++#ifndef CONFIG_XEN
++ processor.mpc_type = MP_PROCESSOR;
++ processor.mpc_apicid = id;
++ processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
++ processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
++ processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
++ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
++ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++ processor.mpc_reserved[0] = 0;
++ processor.mpc_reserved[1] = 0;
++#endif
++
++ MP_processor_info(&processor);
++}
++
++#ifdef CONFIG_X86_IO_APIC
++
++#define MP_ISA_BUS 0
++#define MP_MAX_IOAPIC_PIN 127
++
++static struct mp_ioapic_routing {
++ int apic_id;
++ int gsi_base;
++ int gsi_end;
++ u32 pin_programmed[4];
++} mp_ioapic_routing[MAX_IO_APICS];
++
++
++static int mp_find_ioapic (
++ int gsi)
++{
++ int i = 0;
++
++ /* Find the IOAPIC that manages this GSI. */
++ for (i = 0; i < nr_ioapics; i++) {
++ if ((gsi >= mp_ioapic_routing[i].gsi_base)
++ && (gsi <= mp_ioapic_routing[i].gsi_end))
++ return i;
++ }
++
++ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
++
++ return -1;
++}
++
++
++void __init mp_register_ioapic (
++ u8 id,
++ u32 address,
++ u32 gsi_base)
++{
++ int idx = 0;
++ int tmpid;
++
++ if (nr_ioapics >= MAX_IO_APICS) {
++ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
++ "(found %d)\n", MAX_IO_APICS, nr_ioapics);
++ panic("Recompile kernel with bigger MAX_IO_APICS!\n");
++ }
++ if (!address) {
++ printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
++ " found in MADT table, skipping!\n");
++ return;
++ }
++
++ idx = nr_ioapics++;
++
++ mp_ioapics[idx].mpc_type = MP_IOAPIC;
++ mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
++ mp_ioapics[idx].mpc_apicaddr = address;
++
++#ifndef CONFIG_XEN
++ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
++#endif
++ if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
++ && !APIC_XAPIC(apic_version[boot_cpu_physical_apicid]))
++ tmpid = io_apic_get_unique_id(idx, id);
++ else
++ tmpid = id;
++ if (tmpid == -1) {
++ nr_ioapics--;
++ return;
++ }
++ mp_ioapics[idx].mpc_apicid = tmpid;
++ mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
++
++ /*
++ * Build basic GSI lookup table to facilitate gsi->io_apic lookups
++ * and to prevent reprogramming of IOAPIC pins (PCI GSIs).
++ */
++ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
++ mp_ioapic_routing[idx].gsi_base = gsi_base;
++ mp_ioapic_routing[idx].gsi_end = gsi_base +
++ io_apic_get_redir_entries(idx);
++
++ printk("IOAPIC[%d]: apic_id %d, version %d, address 0x%lx, "
++ "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
++ mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
++ mp_ioapic_routing[idx].gsi_base,
++ mp_ioapic_routing[idx].gsi_end);
++
++ return;
++}
++
++
++void __init mp_override_legacy_irq (
++ u8 bus_irq,
++ u8 polarity,
++ u8 trigger,
++ u32 gsi)
++{
++ struct mpc_config_intsrc intsrc;
++ int ioapic = -1;
++ int pin = -1;
++
++ /*
++ * Convert 'gsi' to 'ioapic.pin'.
++ */
++ ioapic = mp_find_ioapic(gsi);
++ if (ioapic < 0)
++ return;
++ pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
++
++ /*
++ * TBD: This check is for faulty timer entries, where the override
++ * erroneously sets the trigger to level, resulting in a HUGE
++ * increase of timer interrupts!
++ */
++ if ((bus_irq == 0) && (trigger == 3))
++ trigger = 1;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqtype = mp_INT;
++ intsrc.mpc_irqflag = (trigger << 2) | polarity;
++ intsrc.mpc_srcbus = MP_ISA_BUS;
++ intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
++ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
++ intsrc.mpc_dstirq = pin; /* INTIN# */
++
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
++ intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
++ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
++ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
++
++ mp_irqs[mp_irq_entries] = intsrc;
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!\n");
++
++ return;
++}
++
++void __init mp_config_acpi_legacy_irqs (void)
++{
++ struct mpc_config_intsrc intsrc;
++ int i = 0;
++ int ioapic = -1;
++
++ /*
++ * Fabricate the legacy ISA bus (bus #31).
++ */
++ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
++ Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
++
++ /*
++ * Older generations of ES7000 have no legacy identity mappings
++ */
++ if (es7000_plat == 1)
++ return;
++
++ /*
++ * Locate the IOAPIC that manages the ISA IRQs (0-15).
++ */
++ ioapic = mp_find_ioapic(0);
++ if (ioapic < 0)
++ return;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqflag = 0; /* Conforming */
++ intsrc.mpc_srcbus = MP_ISA_BUS;
++ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
++
++ /*
++ * Use the default configuration for the IRQs 0-15. Unless
++ * overriden by (MADT) interrupt source override entries.
++ */
++ for (i = 0; i < 16; i++) {
++ int idx;
++
++ for (idx = 0; idx < mp_irq_entries; idx++) {
++ struct mpc_config_intsrc *irq = mp_irqs + idx;
++
++ /* Do we already have a mapping for this ISA IRQ? */
++ if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
++ break;
++
++ /* Do we already have a mapping for this IOAPIC pin */
++ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
++ (irq->mpc_dstirq == i))
++ break;
++ }
++
++ if (idx != mp_irq_entries) {
++ printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
++ continue; /* IRQ already used */
++ }
++
++ intsrc.mpc_irqtype = mp_INT;
++ intsrc.mpc_srcbusirq = i; /* Identity mapped */
++ intsrc.mpc_dstirq = i;
++
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
++ "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
++ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
++ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
++ intsrc.mpc_dstirq);
++
++ mp_irqs[mp_irq_entries] = intsrc;
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!\n");
++ }
++}
++
++#define MAX_GSI_NUM 4096
++
++int mp_register_gsi (u32 gsi, int triggering, int polarity)
++{
++ int ioapic = -1;
++ int ioapic_pin = 0;
++ int idx, bit = 0;
++ static int pci_irq = 16;
++ /*
++ * Mapping between Global System Interrups, which
++ * represent all possible interrupts, and IRQs
++ * assigned to actual devices.
++ */
++ static int gsi_to_irq[MAX_GSI_NUM];
++
++ /* Don't set up the ACPI SCI because it's already set up */
++ if (acpi_fadt.sci_int == gsi)
++ return gsi;
++
++ ioapic = mp_find_ioapic(gsi);
++ if (ioapic < 0) {
++ printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
++ return gsi;
++ }
++
++ ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_base;
++
++ if (ioapic_renumber_irq)
++ gsi = ioapic_renumber_irq(ioapic, gsi);
++
++ /*
++ * Avoid pin reprogramming. PRTs typically include entries
++ * with redundant pin->gsi mappings (but unique PCI devices);
++ * we only program the IOAPIC on the first.
++ */
++ bit = ioapic_pin % 32;
++ idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
++ if (idx > 3) {
++ printk(KERN_ERR "Invalid reference to IOAPIC pin "
++ "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
++ ioapic_pin);
++ return gsi;
++ }
++ if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
++ Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
++ mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
++ return gsi_to_irq[gsi];
++ }
++
++ mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
++
++ if (triggering == ACPI_LEVEL_SENSITIVE) {
++ /*
++ * For PCI devices assign IRQs in order, avoiding gaps
++ * due to unused I/O APIC pins.
++ */
++ int irq = gsi;
++ if (gsi < MAX_GSI_NUM) {
++ /*
++ * Retain the VIA chipset work-around (gsi > 15), but
++ * avoid a problem where the 8254 timer (IRQ0) is setup
++ * via an override (so it's not on pin 0 of the ioapic),
++ * and at the same time, the pin 0 interrupt is a PCI
++ * type. The gsi > 15 test could cause these two pins
++ * to be shared as IRQ0, and they are not shareable.
++ * So test for this condition, and if necessary, avoid
++ * the pin collision.
++ */
++ if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
++ gsi = pci_irq++;
++ /*
++ * Don't assign IRQ used by ACPI SCI
++ */
++ if (gsi == acpi_fadt.sci_int)
++ gsi = pci_irq++;
++ gsi_to_irq[irq] = gsi;
++ } else {
++ printk(KERN_ERR "GSI %u is too high\n", gsi);
++ return gsi;
++ }
++ }
++
++ io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
++ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
++ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
++ return gsi;
++}
++
++#endif /* CONFIG_X86_IO_APIC */
++#endif /* CONFIG_ACPI */
+Index: head-2008-11-25/arch/x86/kernel/pci-dma-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/pci-dma-xen.c 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,409 @@
++/*
++ * Dynamic DMA mapping support.
++ *
++ * On i386 there is no hardware dynamic DMA address translation,
++ * so consistent alloc/free are merely page allocation/freeing.
++ * The rest of the dynamic DMA mapping interface is implemented
++ * in asm/pci.h.
++ */
++
++#include <linux/types.h>
++#include <linux/mm.h>
++#include <linux/string.h>
++#include <linux/pci.h>
++#include <linux/module.h>
++#include <linux/version.h>
++#include <asm/io.h>
++#include <xen/balloon.h>
++#include <xen/gnttab.h>
++#include <asm/swiotlb.h>
++#include <asm/tlbflush.h>
++#include <asm-i386/mach-xen/asm/swiotlb.h>
++#include <asm-i386/mach-xen/asm/gnttab_dma.h>
++#include <asm/bug.h>
++
++#ifdef __x86_64__
++#include <asm/proto.h>
++
++int iommu_merge __read_mostly = 0;
++EXPORT_SYMBOL(iommu_merge);
++
++dma_addr_t bad_dma_address __read_mostly;
++EXPORT_SYMBOL(bad_dma_address);
++
++/* This tells the BIO block layer to assume merging. Default to off
++ because we cannot guarantee merging later. */
++int iommu_bio_merge __read_mostly = 0;
++EXPORT_SYMBOL(iommu_bio_merge);
++
++int force_iommu __read_mostly= 0;
++
++__init int iommu_setup(char *p)
++{
++ return 1;
++}
++
++void __init pci_iommu_alloc(void)
++{
++#ifdef CONFIG_SWIOTLB
++ pci_swiotlb_init();
++#endif
++}
++
++static int __init pci_iommu_init(void)
++{
++ no_iommu_init();
++ return 0;
++}
++
++/* Must execute after PCI subsystem */
++fs_initcall(pci_iommu_init);
++#endif
++
++struct dma_coherent_mem {
++ void *virt_base;
++ u32 device_base;
++ int size;
++ int flags;
++ unsigned long *bitmap;
++};
++
++#define IOMMU_BUG_ON(test) \
++do { \
++ if (unlikely(test)) { \
++ printk(KERN_ALERT "Fatal DMA error! " \
++ "Please use 'swiotlb=force'\n"); \
++ BUG(); \
++ } \
++} while (0)
++
++static int check_pages_physically_contiguous(unsigned long pfn,
++ unsigned int offset,
++ size_t length)
++{
++ unsigned long next_mfn;
++ int i;
++ int nr_pages;
++
++ next_mfn = pfn_to_mfn(pfn);
++ nr_pages = (offset + length + PAGE_SIZE-1) >> PAGE_SHIFT;
++
++ for (i = 1; i < nr_pages; i++) {
++ if (pfn_to_mfn(++pfn) != ++next_mfn)
++ return 0;
++ }
++ return 1;
++}
++
++int range_straddles_page_boundary(paddr_t p, size_t size)
++{
++ unsigned long pfn = p >> PAGE_SHIFT;
++ unsigned int offset = p & ~PAGE_MASK;
++
++ return ((offset + size > PAGE_SIZE) &&
++ !check_pages_physically_contiguous(pfn, offset, size));
++}
++
++int
++dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++ enum dma_data_direction direction)
++{
++ int i, rc;
++
++ if (direction == DMA_NONE)
++ BUG();
++ WARN_ON(nents == 0 || sg[0].length == 0);
++
++ if (swiotlb) {
++ rc = swiotlb_map_sg(hwdev, sg, nents, direction);
++ } else {
++ for (i = 0; i < nents; i++ ) {
++ BUG_ON(!sg[i].page);
++ sg[i].dma_address =
++ gnttab_dma_map_page(sg[i].page) + sg[i].offset;
++ sg[i].dma_length = sg[i].length;
++ IOMMU_BUG_ON(address_needs_mapping(
++ hwdev, sg[i].dma_address));
++ IOMMU_BUG_ON(range_straddles_page_boundary(
++ page_to_pseudophys(sg[i].page) + sg[i].offset,
++ sg[i].length));
++ }
++ rc = nents;
++ }
++
++ flush_write_buffers();
++ return rc;
++}
++EXPORT_SYMBOL(dma_map_sg);
++
++void
++dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++ enum dma_data_direction direction)
++{
++ int i;
++
++ BUG_ON(direction == DMA_NONE);
++ if (swiotlb)
++ swiotlb_unmap_sg(hwdev, sg, nents, direction);
++ else {
++ for (i = 0; i < nents; i++ )
++ gnttab_dma_unmap_page(sg[i].dma_address);
++ }
++}
++EXPORT_SYMBOL(dma_unmap_sg);
++
++#ifdef CONFIG_HIGHMEM
++dma_addr_t
++dma_map_page(struct device *dev, struct page *page, unsigned long offset,
++ size_t size, enum dma_data_direction direction)
++{
++ dma_addr_t dma_addr;
++
++ BUG_ON(direction == DMA_NONE);
++
++ if (swiotlb) {
++ dma_addr = swiotlb_map_page(
++ dev, page, offset, size, direction);
++ } else {
++ dma_addr = gnttab_dma_map_page(page) + offset;
++ IOMMU_BUG_ON(address_needs_mapping(dev, dma_addr));
++ }
++
++ return dma_addr;
++}
++EXPORT_SYMBOL(dma_map_page);
++
++void
++dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
++ enum dma_data_direction direction)
++{
++ BUG_ON(direction == DMA_NONE);
++ if (swiotlb)
++ swiotlb_unmap_page(dev, dma_address, size, direction);
++ else
++ gnttab_dma_unmap_page(dma_address);
++}
++EXPORT_SYMBOL(dma_unmap_page);
++#endif /* CONFIG_HIGHMEM */
++
++int
++dma_mapping_error(dma_addr_t dma_addr)
++{
++ if (swiotlb)
++ return swiotlb_dma_mapping_error(dma_addr);
++ return 0;
++}
++EXPORT_SYMBOL(dma_mapping_error);
++
++int
++dma_supported(struct device *dev, u64 mask)
++{
++ if (swiotlb)
++ return swiotlb_dma_supported(dev, mask);
++ /*
++ * By default we'll BUG when an infeasible DMA is requested, and
++ * request swiotlb=force (see IOMMU_BUG_ON).
++ */
++ return 1;
++}
++EXPORT_SYMBOL(dma_supported);
++
++void *dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp)
++{
++ void *ret;
++ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++ unsigned int order = get_order(size);
++ unsigned long vstart;
++ u64 mask;
++
++ /* ignore region specifiers */
++ gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
++
++ if (mem) {
++ int page = bitmap_find_free_region(mem->bitmap, mem->size,
++ order);
++ if (page >= 0) {
++ *dma_handle = mem->device_base + (page << PAGE_SHIFT);
++ ret = mem->virt_base + (page << PAGE_SHIFT);
++ memset(ret, 0, size);
++ return ret;
++ }
++ if (mem->flags & DMA_MEMORY_EXCLUSIVE)
++ return NULL;
++ }
++
++ if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
++ gfp |= GFP_DMA;
++
++ vstart = __get_free_pages(gfp, order);
++ ret = (void *)vstart;
++
++ if (dev != NULL && dev->coherent_dma_mask)
++ mask = dev->coherent_dma_mask;
++ else
++ mask = 0xffffffff;
++
++ if (ret != NULL) {
++ if (xen_create_contiguous_region(vstart, order,
++ fls64(mask)) != 0) {
++ free_pages(vstart, order);
++ return NULL;
++ }
++ memset(ret, 0, size);
++ *dma_handle = virt_to_bus(ret);
++ }
++ return ret;
++}
++EXPORT_SYMBOL(dma_alloc_coherent);
++
++void dma_free_coherent(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_handle)
++{
++ struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
++ int order = get_order(size);
++
++ if (mem && vaddr >= mem->virt_base && vaddr < (mem->virt_base + (mem->size << PAGE_SHIFT))) {
++ int page = (vaddr - mem->virt_base) >> PAGE_SHIFT;
++
++ bitmap_release_region(mem->bitmap, page, order);
++ } else {
++ xen_destroy_contiguous_region((unsigned long)vaddr, order);
++ free_pages((unsigned long)vaddr, order);
++ }
++}
++EXPORT_SYMBOL(dma_free_coherent);
++
++#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
++int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
++ dma_addr_t device_addr, size_t size, int flags)
++{
++ void __iomem *mem_base;
++ int pages = size >> PAGE_SHIFT;
++ int bitmap_size = (pages + 31)/32;
++
++ if ((flags & (DMA_MEMORY_MAP | DMA_MEMORY_IO)) == 0)
++ goto out;
++ if (!size)
++ goto out;
++ if (dev->dma_mem)
++ goto out;
++
++ /* FIXME: this routine just ignores DMA_MEMORY_INCLUDES_CHILDREN */
++
++ mem_base = ioremap(bus_addr, size);
++ if (!mem_base)
++ goto out;
++
++ dev->dma_mem = kmalloc(sizeof(struct dma_coherent_mem), GFP_KERNEL);
++ if (!dev->dma_mem)
++ goto out;
++ memset(dev->dma_mem, 0, sizeof(struct dma_coherent_mem));
++ dev->dma_mem->bitmap = kmalloc(bitmap_size, GFP_KERNEL);
++ if (!dev->dma_mem->bitmap)
++ goto free1_out;
++ memset(dev->dma_mem->bitmap, 0, bitmap_size);
++
++ dev->dma_mem->virt_base = mem_base;
++ dev->dma_mem->device_base = device_addr;
++ dev->dma_mem->size = pages;
++ dev->dma_mem->flags = flags;
++
++ if (flags & DMA_MEMORY_MAP)
++ return DMA_MEMORY_MAP;
++
++ return DMA_MEMORY_IO;
++
++ free1_out:
++ kfree(dev->dma_mem->bitmap);
++ out:
++ return 0;
++}
++EXPORT_SYMBOL(dma_declare_coherent_memory);
++
++void dma_release_declared_memory(struct device *dev)
++{
++ struct dma_coherent_mem *mem = dev->dma_mem;
++
++ if(!mem)
++ return;
++ dev->dma_mem = NULL;
++ iounmap(mem->virt_base);
++ kfree(mem->bitmap);
++ kfree(mem);
++}
++EXPORT_SYMBOL(dma_release_declared_memory);
++
++void *dma_mark_declared_memory_occupied(struct device *dev,
++ dma_addr_t device_addr, size_t size)
++{
++ struct dma_coherent_mem *mem = dev->dma_mem;
++ int pages = (size + (device_addr & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ int pos, err;
++
++ if (!mem)
++ return ERR_PTR(-EINVAL);
++
++ pos = (device_addr - mem->device_base) >> PAGE_SHIFT;
++ err = bitmap_allocate_region(mem->bitmap, pos, get_order(pages));
++ if (err != 0)
++ return ERR_PTR(err);
++ return mem->virt_base + (pos << PAGE_SHIFT);
++}
++EXPORT_SYMBOL(dma_mark_declared_memory_occupied);
++#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
++
++dma_addr_t
++dma_map_single(struct device *dev, void *ptr, size_t size,
++ enum dma_data_direction direction)
++{
++ dma_addr_t dma;
++
++ if (direction == DMA_NONE)
++ BUG();
++ WARN_ON(size == 0);
++
++ if (swiotlb) {
++ dma = swiotlb_map_single(dev, ptr, size, direction);
++ } else {
++ dma = gnttab_dma_map_page(virt_to_page(ptr)) +
++ offset_in_page(ptr);
++ IOMMU_BUG_ON(range_straddles_page_boundary(__pa(ptr), size));
++ IOMMU_BUG_ON(address_needs_mapping(dev, dma));
++ }
++
++ flush_write_buffers();
++ return dma;
++}
++EXPORT_SYMBOL(dma_map_single);
++
++void
++dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
++ enum dma_data_direction direction)
++{
++ if (direction == DMA_NONE)
++ BUG();
++ if (swiotlb)
++ swiotlb_unmap_single(dev, dma_addr, size, direction);
++ else
++ gnttab_dma_unmap_page(dma_addr);
++}
++EXPORT_SYMBOL(dma_unmap_single);
++
++void
++dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
++{
++ if (swiotlb)
++ swiotlb_sync_single_for_cpu(dev, dma_handle, size, direction);
++}
++EXPORT_SYMBOL(dma_sync_single_for_cpu);
++
++void
++dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction)
++{
++ if (swiotlb)
++ swiotlb_sync_single_for_device(dev, dma_handle, size, direction);
++}
++EXPORT_SYMBOL(dma_sync_single_for_device);
+Index: head-2008-11-25/arch/x86/kernel/process_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/process_32-xen.c 2008-07-21 11:00:32.000000000 +0200
+@@ -0,0 +1,877 @@
++/*
++ * linux/arch/i386/kernel/process.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Pentium III FXSR, SSE support
++ * Gareth Hughes <gareth@valinux.com>, May 2000
++ */
++
++/*
++ * This file handles the architecture-dependent parts of process handling..
++ */
++
++#include <stdarg.h>
++
++#include <linux/cpu.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/elfcore.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/user.h>
++#include <linux/a.out.h>
++#include <linux/interrupt.h>
++#include <linux/utsname.h>
++#include <linux/delay.h>
++#include <linux/reboot.h>
++#include <linux/init.h>
++#include <linux/mc146818rtc.h>
++#include <linux/module.h>
++#include <linux/kallsyms.h>
++#include <linux/ptrace.h>
++#include <linux/random.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/ldt.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/desc.h>
++#include <asm/vm86.h>
++#ifdef CONFIG_MATH_EMULATION
++#include <asm/math_emu.h>
++#endif
++
++#include <xen/interface/physdev.h>
++#include <xen/interface/vcpu.h>
++#include <xen/cpu_hotplug.h>
++
++#include <linux/err.h>
++
++#include <asm/tlbflush.h>
++#include <asm/cpu.h>
++
++asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
++
++static int hlt_counter;
++
++unsigned long boot_option_idle_override = 0;
++EXPORT_SYMBOL(boot_option_idle_override);
++
++/*
++ * Return saved PC of a blocked thread.
++ */
++unsigned long thread_saved_pc(struct task_struct *tsk)
++{
++ return ((unsigned long *)tsk->thread.esp)[3];
++}
++
++/*
++ * Powermanagement idle function, if any..
++ */
++void (*pm_idle)(void);
++EXPORT_SYMBOL(pm_idle);
++static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
++
++void disable_hlt(void)
++{
++ hlt_counter++;
++}
++
++EXPORT_SYMBOL(disable_hlt);
++
++void enable_hlt(void)
++{
++ hlt_counter--;
++}
++
++EXPORT_SYMBOL(enable_hlt);
++
++/*
++ * On SMP it's slightly faster (but much more power-consuming!)
++ * to poll the ->work.need_resched flag instead of waiting for the
++ * cross-CPU IPI to arrive. Use this option with caution.
++ */
++static void poll_idle (void)
++{
++ local_irq_enable();
++
++ asm volatile(
++ "2:"
++ "testl %0, %1;"
++ "rep; nop;"
++ "je 2b;"
++ : : "i"(_TIF_NEED_RESCHED), "m" (current_thread_info()->flags));
++}
++
++static void xen_idle(void)
++{
++ local_irq_disable();
++
++ if (need_resched())
++ local_irq_enable();
++ else {
++ current_thread_info()->status &= ~TS_POLLING;
++ smp_mb__after_clear_bit();
++ safe_halt();
++ current_thread_info()->status |= TS_POLLING;
++ }
++}
++#ifdef CONFIG_APM_MODULE
++EXPORT_SYMBOL(default_idle);
++#endif
++
++#ifdef CONFIG_HOTPLUG_CPU
++extern cpumask_t cpu_initialized;
++static inline void play_dead(void)
++{
++ idle_task_exit();
++ local_irq_disable();
++ cpu_clear(smp_processor_id(), cpu_initialized);
++ preempt_enable_no_resched();
++ VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL));
++ cpu_bringup();
++}
++#else
++static inline void play_dead(void)
++{
++ BUG();
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++/*
++ * The idle thread. There's no useful work to be
++ * done, so just try to conserve power and have a
++ * low exit latency (ie sit in a loop waiting for
++ * somebody to say that they'd like to reschedule)
++ */
++void cpu_idle(void)
++{
++ int cpu = smp_processor_id();
++
++ current_thread_info()->status |= TS_POLLING;
++
++ /* endless idle loop with no priority at all */
++ while (1) {
++ while (!need_resched()) {
++ void (*idle)(void);
++
++ if (__get_cpu_var(cpu_idle_state))
++ __get_cpu_var(cpu_idle_state) = 0;
++
++ rmb();
++ idle = xen_idle; /* no alternatives */
++
++ if (cpu_is_offline(cpu))
++ play_dead();
++
++ __get_cpu_var(irq_stat).idle_timestamp = jiffies;
++ idle();
++ }
++ preempt_enable_no_resched();
++ schedule();
++ preempt_disable();
++ }
++}
++
++void cpu_idle_wait(void)
++{
++ unsigned int cpu, this_cpu = get_cpu();
++ cpumask_t map;
++
++ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
++ put_cpu();
++
++ cpus_clear(map);
++ for_each_online_cpu(cpu) {
++ per_cpu(cpu_idle_state, cpu) = 1;
++ cpu_set(cpu, map);
++ }
++
++ __get_cpu_var(cpu_idle_state) = 0;
++
++ wmb();
++ do {
++ ssleep(1);
++ for_each_online_cpu(cpu) {
++ if (cpu_isset(cpu, map) && !per_cpu(cpu_idle_state, cpu))
++ cpu_clear(cpu, map);
++ }
++ cpus_and(map, map, cpu_online_map);
++ } while (!cpus_empty(map));
++}
++EXPORT_SYMBOL_GPL(cpu_idle_wait);
++
++void __devinit select_idle_routine(const struct cpuinfo_x86 *c)
++{
++}
++
++static int __init idle_setup (char *str)
++{
++ if (!strncmp(str, "poll", 4)) {
++ printk("using polling idle threads.\n");
++ pm_idle = poll_idle;
++ }
++
++ boot_option_idle_override = 1;
++ return 1;
++}
++
++__setup("idle=", idle_setup);
++
++void show_regs(struct pt_regs * regs)
++{
++ unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
++
++ printk("\n");
++ printk("Pid: %d, comm: %20s\n", current->pid, current->comm);
++ printk("EIP: %04x:[<%08lx>] CPU: %d\n",0xffff & regs->xcs,regs->eip, smp_processor_id());
++ print_symbol("EIP is at %s\n", regs->eip);
++
++ if (user_mode_vm(regs))
++ printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
++ printk(" EFLAGS: %08lx %s (%s %.*s)\n",
++ regs->eflags, print_tainted(), system_utsname.release,
++ (int)strcspn(system_utsname.version, " "),
++ system_utsname.version);
++ printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
++ regs->eax,regs->ebx,regs->ecx,regs->edx);
++ printk("ESI: %08lx EDI: %08lx EBP: %08lx",
++ regs->esi, regs->edi, regs->ebp);
++ printk(" DS: %04x ES: %04x\n",
++ 0xffff & regs->xds,0xffff & regs->xes);
++
++ cr0 = read_cr0();
++ cr2 = read_cr2();
++ cr3 = read_cr3();
++ cr4 = read_cr4_safe();
++ printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4);
++ show_trace(NULL, regs, ®s->esp);
++}
++
++/*
++ * This gets run with %ebx containing the
++ * function to call, and %edx containing
++ * the "args".
++ */
++extern void kernel_thread_helper(void);
++__asm__(".section .text\n"
++ ".align 4\n"
++ "kernel_thread_helper:\n\t"
++ "movl %edx,%eax\n\t"
++ "pushl %edx\n\t"
++ "call *%ebx\n\t"
++ "pushl %eax\n\t"
++ "call do_exit\n"
++ ".previous");
++
++/*
++ * Create a kernel thread
++ */
++int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
++{
++ struct pt_regs regs;
++
++ memset(®s, 0, sizeof(regs));
++
++ regs.ebx = (unsigned long) fn;
++ regs.edx = (unsigned long) arg;
++
++ regs.xds = __USER_DS;
++ regs.xes = __USER_DS;
++ regs.orig_eax = -1;
++ regs.eip = (unsigned long) kernel_thread_helper;
++ regs.xcs = GET_KERNEL_CS();
++ regs.eflags = X86_EFLAGS_IF | X86_EFLAGS_SF | X86_EFLAGS_PF | 0x2;
++
++ /* Ok, create the new process.. */
++ return do_fork(flags | CLONE_VM | CLONE_UNTRACED, 0, ®s, 0, NULL, NULL);
++}
++EXPORT_SYMBOL(kernel_thread);
++
++/*
++ * Free current thread data structures etc..
++ */
++void exit_thread(void)
++{
++ /* The process may have allocated an io port bitmap... nuke it. */
++ if (unlikely(test_thread_flag(TIF_IO_BITMAP))) {
++ struct task_struct *tsk = current;
++ struct thread_struct *t = &tsk->thread;
++ struct physdev_set_iobitmap set_iobitmap;
++ memset(&set_iobitmap, 0, sizeof(set_iobitmap));
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
++ &set_iobitmap));
++ kfree(t->io_bitmap_ptr);
++ t->io_bitmap_ptr = NULL;
++ clear_thread_flag(TIF_IO_BITMAP);
++ }
++}
++
++void flush_thread(void)
++{
++ struct task_struct *tsk = current;
++
++ memset(tsk->thread.debugreg, 0, sizeof(unsigned long)*8);
++ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
++ clear_tsk_thread_flag(tsk, TIF_DEBUG);
++ /*
++ * Forget coprocessor state..
++ */
++ clear_fpu(tsk);
++ clear_used_math();
++}
++
++void release_thread(struct task_struct *dead_task)
++{
++ BUG_ON(dead_task->mm);
++ release_vm86_irqs(dead_task);
++}
++
++/*
++ * This gets called before we allocate a new thread and copy
++ * the current task into it.
++ */
++void prepare_to_copy(struct task_struct *tsk)
++{
++ unlazy_fpu(tsk);
++}
++
++int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
++ unsigned long unused,
++ struct task_struct * p, struct pt_regs * regs)
++{
++ struct pt_regs * childregs;
++ struct task_struct *tsk;
++ int err;
++
++ childregs = task_pt_regs(p);
++ *childregs = *regs;
++ childregs->eax = 0;
++ childregs->esp = esp;
++
++ p->thread.esp = (unsigned long) childregs;
++ p->thread.esp0 = (unsigned long) (childregs+1);
++
++ p->thread.eip = (unsigned long) ret_from_fork;
++
++ savesegment(fs,p->thread.fs);
++ savesegment(gs,p->thread.gs);
++
++ tsk = current;
++ if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
++ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++ if (!p->thread.io_bitmap_ptr) {
++ p->thread.io_bitmap_max = 0;
++ return -ENOMEM;
++ }
++ memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
++ IO_BITMAP_BYTES);
++ set_tsk_thread_flag(p, TIF_IO_BITMAP);
++ }
++
++ /*
++ * Set a new TLS for the child thread?
++ */
++ if (clone_flags & CLONE_SETTLS) {
++ struct desc_struct *desc;
++ struct user_desc info;
++ int idx;
++
++ err = -EFAULT;
++ if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
++ goto out;
++ err = -EINVAL;
++ if (LDT_empty(&info))
++ goto out;
++
++ idx = info.entry_number;
++ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++ goto out;
++
++ desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
++ desc->a = LDT_entry_a(&info);
++ desc->b = LDT_entry_b(&info);
++ }
++
++ p->thread.iopl = current->thread.iopl;
++
++ err = 0;
++ out:
++ if (err && p->thread.io_bitmap_ptr) {
++ kfree(p->thread.io_bitmap_ptr);
++ p->thread.io_bitmap_max = 0;
++ }
++ return err;
++}
++
++/*
++ * fill in the user structure for a core dump..
++ */
++void dump_thread(struct pt_regs * regs, struct user * dump)
++{
++ int i;
++
++/* changed the size calculations - should hopefully work better. lbt */
++ dump->magic = CMAGIC;
++ dump->start_code = 0;
++ dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
++ dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
++ dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
++ dump->u_dsize -= dump->u_tsize;
++ dump->u_ssize = 0;
++ for (i = 0; i < 8; i++)
++ dump->u_debugreg[i] = current->thread.debugreg[i];
++
++ if (dump->start_stack < TASK_SIZE)
++ dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
++
++ dump->regs.ebx = regs->ebx;
++ dump->regs.ecx = regs->ecx;
++ dump->regs.edx = regs->edx;
++ dump->regs.esi = regs->esi;
++ dump->regs.edi = regs->edi;
++ dump->regs.ebp = regs->ebp;
++ dump->regs.eax = regs->eax;
++ dump->regs.ds = regs->xds;
++ dump->regs.es = regs->xes;
++ savesegment(fs,dump->regs.fs);
++ savesegment(gs,dump->regs.gs);
++ dump->regs.orig_eax = regs->orig_eax;
++ dump->regs.eip = regs->eip;
++ dump->regs.cs = regs->xcs;
++ dump->regs.eflags = regs->eflags;
++ dump->regs.esp = regs->esp;
++ dump->regs.ss = regs->xss;
++
++ dump->u_fpvalid = dump_fpu (regs, &dump->i387);
++}
++EXPORT_SYMBOL(dump_thread);
++
++/*
++ * Capture the user space registers if the task is not running (in user space)
++ */
++int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
++{
++ struct pt_regs ptregs = *task_pt_regs(tsk);
++ ptregs.xcs &= 0xffff;
++ ptregs.xds &= 0xffff;
++ ptregs.xes &= 0xffff;
++ ptregs.xss &= 0xffff;
++
++ elf_core_copy_regs(regs, &ptregs);
++
++ return 1;
++}
++
++static noinline void __switch_to_xtra(struct task_struct *next_p)
++{
++ struct thread_struct *next;
++
++ next = &next_p->thread;
++
++ if (test_tsk_thread_flag(next_p, TIF_DEBUG)) {
++ set_debugreg(next->debugreg[0], 0);
++ set_debugreg(next->debugreg[1], 1);
++ set_debugreg(next->debugreg[2], 2);
++ set_debugreg(next->debugreg[3], 3);
++ /* no 4 and 5 */
++ set_debugreg(next->debugreg[6], 6);
++ set_debugreg(next->debugreg[7], 7);
++ }
++}
++
++/*
++ * This function selects if the context switch from prev to next
++ * has to tweak the TSC disable bit in the cr4.
++ */
++static inline void disable_tsc(struct task_struct *prev_p,
++ struct task_struct *next_p)
++{
++ struct thread_info *prev, *next;
++
++ /*
++ * gcc should eliminate the ->thread_info dereference if
++ * has_secure_computing returns 0 at compile time (SECCOMP=n).
++ */
++ prev = task_thread_info(prev_p);
++ next = task_thread_info(next_p);
++
++ if (has_secure_computing(prev) || has_secure_computing(next)) {
++ /* slow path here */
++ if (has_secure_computing(prev) &&
++ !has_secure_computing(next)) {
++ write_cr4(read_cr4() & ~X86_CR4_TSD);
++ } else if (!has_secure_computing(prev) &&
++ has_secure_computing(next))
++ write_cr4(read_cr4() | X86_CR4_TSD);
++ }
++}
++
++/*
++ * switch_to(x,yn) should switch tasks from x to y.
++ *
++ * We fsave/fwait so that an exception goes off at the right time
++ * (as a call from the fsave or fwait in effect) rather than to
++ * the wrong process. Lazy FP saving no longer makes any sense
++ * with modern CPU's, and this simplifies a lot of things (SMP
++ * and UP become the same).
++ *
++ * NOTE! We used to use the x86 hardware context switching. The
++ * reason for not using it any more becomes apparent when you
++ * try to recover gracefully from saved state that is no longer
++ * valid (stale segment register values in particular). With the
++ * hardware task-switch, there is no way to fix up bad state in
++ * a reasonable manner.
++ *
++ * The fact that Intel documents the hardware task-switching to
++ * be slow is a fairly red herring - this code is not noticeably
++ * faster. However, there _is_ some room for improvement here,
++ * so the performance issues may eventually be a valid point.
++ * More important, however, is the fact that this allows us much
++ * more flexibility.
++ *
++ * The return value (in %eax) will be the "prev" task after
++ * the task-switch, and shows up in ret_from_fork in entry.S,
++ * for example.
++ */
++struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
++{
++ struct thread_struct *prev = &prev_p->thread,
++ *next = &next_p->thread;
++ int cpu = smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct *tss = &per_cpu(init_tss, cpu);
++#endif
++#if CONFIG_XEN_COMPAT > 0x030002
++ struct physdev_set_iopl iopl_op;
++ struct physdev_set_iobitmap iobmp_op;
++#else
++ struct physdev_op _pdo[2], *pdo = _pdo;
++#define iopl_op pdo->u.set_iopl
++#define iobmp_op pdo->u.set_iobitmap
++#endif
++ multicall_entry_t _mcl[8], *mcl = _mcl;
++
++ /* XEN NOTE: FS/GS saved in switch_mm(), not here. */
++
++ /*
++ * This is basically '__unlazy_fpu', except that we queue a
++ * multicall to indicate FPU task switch, rather than
++ * synchronously trapping to Xen.
++ */
++ if (prev_p->thread_info->status & TS_USEDFPU) {
++ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
++ mcl->op = __HYPERVISOR_fpu_taskswitch;
++ mcl->args[0] = 1;
++ mcl++;
++ }
++#if 0 /* lazy fpu sanity check */
++ else BUG_ON(!(read_cr0() & 8));
++#endif
++
++ /*
++ * Reload esp0.
++ * This is load_esp0(tss, next) with a multicall.
++ */
++ mcl->op = __HYPERVISOR_stack_switch;
++ mcl->args[0] = __KERNEL_DS;
++ mcl->args[1] = next->esp0;
++ mcl++;
++
++ /*
++ * Load the per-thread Thread-Local Storage descriptor.
++ * This is load_TLS(next, cpu) with multicalls.
++ */
++#define C(i) do { \
++ if (unlikely(next->tls_array[i].a != prev->tls_array[i].a || \
++ next->tls_array[i].b != prev->tls_array[i].b)) { \
++ mcl->op = __HYPERVISOR_update_descriptor; \
++ *(u64 *)&mcl->args[0] = virt_to_machine( \
++ &get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]);\
++ *(u64 *)&mcl->args[2] = *(u64 *)&next->tls_array[i]; \
++ mcl++; \
++ } \
++} while (0)
++ C(0); C(1); C(2);
++#undef C
++
++ if (unlikely(prev->iopl != next->iopl)) {
++ iopl_op.iopl = (next->iopl == 0) ? 1 : (next->iopl >> 12) & 3;
++#if CONFIG_XEN_COMPAT > 0x030002
++ mcl->op = __HYPERVISOR_physdev_op;
++ mcl->args[0] = PHYSDEVOP_set_iopl;
++ mcl->args[1] = (unsigned long)&iopl_op;
++#else
++ mcl->op = __HYPERVISOR_physdev_op_compat;
++ pdo->cmd = PHYSDEVOP_set_iopl;
++ mcl->args[0] = (unsigned long)pdo++;
++#endif
++ mcl++;
++ }
++
++ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
++ set_xen_guest_handle(iobmp_op.bitmap,
++ (char *)next->io_bitmap_ptr);
++ iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
++#if CONFIG_XEN_COMPAT > 0x030002
++ mcl->op = __HYPERVISOR_physdev_op;
++ mcl->args[0] = PHYSDEVOP_set_iobitmap;
++ mcl->args[1] = (unsigned long)&iobmp_op;
++#else
++ mcl->op = __HYPERVISOR_physdev_op_compat;
++ pdo->cmd = PHYSDEVOP_set_iobitmap;
++ mcl->args[0] = (unsigned long)pdo++;
++#endif
++ mcl++;
++ }
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ BUG_ON(pdo > _pdo + ARRAY_SIZE(_pdo));
++#endif
++ BUG_ON(mcl > _mcl + ARRAY_SIZE(_mcl));
++ if (unlikely(HYPERVISOR_multicall_check(_mcl, mcl - _mcl, NULL)))
++ BUG();
++
++ /*
++ * Restore %fs and %gs if needed.
++ *
++ * Glibc normally makes %fs be zero, and %gs is one of
++ * the TLS segments.
++ */
++ if (unlikely(next->fs))
++ loadsegment(fs, next->fs);
++
++ if (next->gs)
++ loadsegment(gs, next->gs);
++
++ /*
++ * Now maybe handle debug registers
++ */
++ if (unlikely(task_thread_info(next_p)->flags & _TIF_WORK_CTXSW))
++ __switch_to_xtra(next_p);
++
++ disable_tsc(prev_p, next_p);
++
++ return prev_p;
++}
++
++asmlinkage int sys_fork(struct pt_regs regs)
++{
++ return do_fork(SIGCHLD, regs.esp, ®s, 0, NULL, NULL);
++}
++
++asmlinkage int sys_clone(struct pt_regs regs)
++{
++ unsigned long clone_flags;
++ unsigned long newsp;
++ int __user *parent_tidptr, *child_tidptr;
++
++ clone_flags = regs.ebx;
++ newsp = regs.ecx;
++ parent_tidptr = (int __user *)regs.edx;
++ child_tidptr = (int __user *)regs.edi;
++ if (!newsp)
++ newsp = regs.esp;
++ return do_fork(clone_flags, newsp, ®s, 0, parent_tidptr, child_tidptr);
++}
++
++/*
++ * This is trivial, and on the face of it looks like it
++ * could equally well be done in user mode.
++ *
++ * Not so, for quite unobvious reasons - register pressure.
++ * In user mode vfork() cannot have a stack frame, and if
++ * done by calling the "clone()" system call directly, you
++ * do not have enough call-clobbered registers to hold all
++ * the information you need.
++ */
++asmlinkage int sys_vfork(struct pt_regs regs)
++{
++ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs.esp, ®s, 0, NULL, NULL);
++}
++
++/*
++ * sys_execve() executes a new program.
++ */
++asmlinkage int sys_execve(struct pt_regs regs)
++{
++ int error;
++ char * filename;
++
++ filename = getname((char __user *) regs.ebx);
++ error = PTR_ERR(filename);
++ if (IS_ERR(filename))
++ goto out;
++ error = do_execve(filename,
++ (char __user * __user *) regs.ecx,
++ (char __user * __user *) regs.edx,
++ ®s);
++ if (error == 0) {
++ task_lock(current);
++ current->ptrace &= ~PT_DTRACE;
++ task_unlock(current);
++ /* Make sure we don't return using sysenter.. */
++ set_thread_flag(TIF_IRET);
++ }
++ putname(filename);
++out:
++ return error;
++}
++
++#define top_esp (THREAD_SIZE - sizeof(unsigned long))
++#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
++
++unsigned long get_wchan(struct task_struct *p)
++{
++ unsigned long ebp, esp, eip;
++ unsigned long stack_page;
++ int count = 0;
++ if (!p || p == current || p->state == TASK_RUNNING)
++ return 0;
++ stack_page = (unsigned long)task_stack_page(p);
++ esp = p->thread.esp;
++ if (!stack_page || esp < stack_page || esp > top_esp+stack_page)
++ return 0;
++ /* include/asm-i386/system.h:switch_to() pushes ebp last. */
++ ebp = *(unsigned long *) esp;
++ do {
++ if (ebp < stack_page || ebp > top_ebp+stack_page)
++ return 0;
++ eip = *(unsigned long *) (ebp+4);
++ if (!in_sched_functions(eip))
++ return eip;
++ ebp = *(unsigned long *) ebp;
++ } while (count++ < 16);
++ return 0;
++}
++
++/*
++ * sys_alloc_thread_area: get a yet unused TLS descriptor index.
++ */
++static int get_free_idx(void)
++{
++ struct thread_struct *t = ¤t->thread;
++ int idx;
++
++ for (idx = 0; idx < GDT_ENTRY_TLS_ENTRIES; idx++)
++ if (desc_empty(t->tls_array + idx))
++ return idx + GDT_ENTRY_TLS_MIN;
++ return -ESRCH;
++}
++
++/*
++ * Set a given TLS descriptor:
++ */
++asmlinkage int sys_set_thread_area(struct user_desc __user *u_info)
++{
++ struct thread_struct *t = ¤t->thread;
++ struct user_desc info;
++ struct desc_struct *desc;
++ int cpu, idx;
++
++ if (copy_from_user(&info, u_info, sizeof(info)))
++ return -EFAULT;
++ idx = info.entry_number;
++
++ /*
++ * index -1 means the kernel should try to find and
++ * allocate an empty descriptor:
++ */
++ if (idx == -1) {
++ idx = get_free_idx();
++ if (idx < 0)
++ return idx;
++ if (put_user(idx, &u_info->entry_number))
++ return -EFAULT;
++ }
++
++ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++ return -EINVAL;
++
++ desc = t->tls_array + idx - GDT_ENTRY_TLS_MIN;
++
++ /*
++ * We must not get preempted while modifying the TLS.
++ */
++ cpu = get_cpu();
++
++ if (LDT_empty(&info)) {
++ desc->a = 0;
++ desc->b = 0;
++ } else {
++ desc->a = LDT_entry_a(&info);
++ desc->b = LDT_entry_b(&info);
++ }
++ load_TLS(t, cpu);
++
++ put_cpu();
++
++ return 0;
++}
++
++/*
++ * Get the current Thread-Local Storage area:
++ */
++
++#define GET_BASE(desc) ( \
++ (((desc)->a >> 16) & 0x0000ffff) | \
++ (((desc)->b << 16) & 0x00ff0000) | \
++ ( (desc)->b & 0xff000000) )
++
++#define GET_LIMIT(desc) ( \
++ ((desc)->a & 0x0ffff) | \
++ ((desc)->b & 0xf0000) )
++
++#define GET_32BIT(desc) (((desc)->b >> 22) & 1)
++#define GET_CONTENTS(desc) (((desc)->b >> 10) & 3)
++#define GET_WRITABLE(desc) (((desc)->b >> 9) & 1)
++#define GET_LIMIT_PAGES(desc) (((desc)->b >> 23) & 1)
++#define GET_PRESENT(desc) (((desc)->b >> 15) & 1)
++#define GET_USEABLE(desc) (((desc)->b >> 20) & 1)
++
++asmlinkage int sys_get_thread_area(struct user_desc __user *u_info)
++{
++ struct user_desc info;
++ struct desc_struct *desc;
++ int idx;
++
++ if (get_user(idx, &u_info->entry_number))
++ return -EFAULT;
++ if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
++ return -EINVAL;
++
++ memset(&info, 0, sizeof(info));
++
++ desc = current->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
++
++ info.entry_number = idx;
++ info.base_addr = GET_BASE(desc);
++ info.limit = GET_LIMIT(desc);
++ info.seg_32bit = GET_32BIT(desc);
++ info.contents = GET_CONTENTS(desc);
++ info.read_exec_only = !GET_WRITABLE(desc);
++ info.limit_in_pages = GET_LIMIT_PAGES(desc);
++ info.seg_not_present = !GET_PRESENT(desc);
++ info.useable = GET_USEABLE(desc);
++
++ if (copy_to_user(u_info, &info, sizeof(info)))
++ return -EFAULT;
++ return 0;
++}
++
++unsigned long arch_align_stack(unsigned long sp)
++{
++ if (randomize_va_space)
++ sp -= get_random_int() % 8192;
++ return sp & ~0xf;
++}
+Index: head-2008-11-25/arch/x86/kernel/quirks-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/quirks-xen.c 2008-01-28 12:24:19.000000000 +0100
+@@ -0,0 +1,47 @@
++/*
++ * This file contains work-arounds for x86 and x86_64 platform bugs.
++ */
++#include <linux/pci.h>
++#include <linux/irq.h>
++
++#if defined(CONFIG_X86_IO_APIC) && (defined(CONFIG_SMP) || defined(CONFIG_XEN)) && defined(CONFIG_PCI)
++
++static void __devinit quirk_intel_irqbalance(struct pci_dev *dev)
++{
++ u8 config, rev;
++ u32 word;
++
++ /* BIOS may enable hardware IRQ balancing for
++ * E7520/E7320/E7525(revision ID 0x9 and below)
++ * based platforms.
++ * Disable SW irqbalance/affinity on those platforms.
++ */
++ pci_read_config_byte(dev, PCI_CLASS_REVISION, &rev);
++ if (rev > 0x9)
++ return;
++
++ printk(KERN_INFO "Intel E7520/7320/7525 detected.");
++
++ /* enable access to config space*/
++ pci_read_config_byte(dev, 0xf4, &config);
++ pci_write_config_byte(dev, 0xf4, config|0x2);
++
++ /* read xTPR register */
++ raw_pci_ops->read(0, 0, 0x40, 0x4c, 2, &word);
++
++ if (!(word & (1 << 13))) {
++ struct xen_platform_op op;
++ printk(KERN_INFO "Disabling irq balancing and affinity\n");
++ op.cmd = XENPF_platform_quirk;
++ op.u.platform_quirk.quirk_id = QUIRK_NOIRQBALANCING;
++ WARN_ON(HYPERVISOR_platform_op(&op));
++ }
++
++ /* put back the original value for config space*/
++ if (!(config & 0x2))
++ pci_write_config_byte(dev, 0xf4, config);
++}
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7320_MCH, quirk_intel_irqbalance);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7525_MCH, quirk_intel_irqbalance);
++DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_E7520_MCH, quirk_intel_irqbalance);
++#endif
+Index: head-2008-11-25/arch/x86/kernel/setup_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/setup_32-xen.c 2008-04-22 15:41:51.000000000 +0200
+@@ -0,0 +1,1919 @@
++/*
++ * linux/arch/i386/kernel/setup.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ *
++ * Memory region support
++ * David Parsons <orc@pell.chi.il.us>, July-August 1999
++ *
++ * Added E820 sanitization routine (removes overlapping memory regions);
++ * Brian Moyle <bmoyle@mvista.com>, February 2001
++ *
++ * Moved CPU detection code to cpu/${cpu}.c
++ * Patrick Mochel <mochel@osdl.org>, March 2002
++ *
++ * Provisions for empty E820 memory regions (reported by certain BIOSes).
++ * Alex Achenbach <xela@slit.de>, December 2002.
++ *
++ */
++
++/*
++ * This file handles the architecture-dependent parts of initialization
++ */
++
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/mmzone.h>
++#include <linux/screen_info.h>
++#include <linux/ioport.h>
++#include <linux/acpi.h>
++#include <linux/apm_bios.h>
++#include <linux/initrd.h>
++#include <linux/bootmem.h>
++#include <linux/seq_file.h>
++#include <linux/platform_device.h>
++#include <linux/console.h>
++#include <linux/mca.h>
++#include <linux/root_dev.h>
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/efi.h>
++#include <linux/init.h>
++#include <linux/edd.h>
++#include <linux/nodemask.h>
++#include <linux/kernel.h>
++#include <linux/percpu.h>
++#include <linux/notifier.h>
++#include <linux/kexec.h>
++#include <linux/crash_dump.h>
++#include <linux/dmi.h>
++#include <linux/pfn.h>
++
++#include <video/edid.h>
++
++#include <asm/apic.h>
++#include <asm/e820.h>
++#include <asm/mpspec.h>
++#include <asm/setup.h>
++#include <asm/arch_hooks.h>
++#include <asm/sections.h>
++#include <asm/io_apic.h>
++#include <asm/ist.h>
++#include <asm/io.h>
++#include <asm/hypervisor.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/memory.h>
++#include <xen/features.h>
++#include <xen/firmware.h>
++#include <xen/xencons.h>
++#include <setup_arch.h>
++#include <bios_ebda.h>
++
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#endif
++
++/* Forward Declaration. */
++void __init find_max_pfn(void);
++
++static int xen_panic_event(struct notifier_block *, unsigned long, void *);
++static struct notifier_block xen_panic_block = {
++ xen_panic_event, NULL, 0 /* try to go last */
++};
++
++extern char hypercall_page[PAGE_SIZE];
++EXPORT_SYMBOL(hypercall_page);
++
++int disable_pse __devinitdata = 0;
++
++/*
++ * Machine setup..
++ */
++
++#ifdef CONFIG_EFI
++int efi_enabled = 0;
++EXPORT_SYMBOL(efi_enabled);
++#endif
++
++/* cpu data as detected by the assembly code in head.S */
++struct cpuinfo_x86 new_cpu_data __initdata = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
++/* common cpu data for all cpus */
++struct cpuinfo_x86 boot_cpu_data __read_mostly = { 0, 0, 0, 0, -1, 1, 0, 0, -1 };
++EXPORT_SYMBOL(boot_cpu_data);
++
++unsigned long mmu_cr4_features;
++
++#ifdef CONFIG_ACPI
++ int acpi_disabled = 0;
++#else
++ int acpi_disabled = 1;
++#endif
++EXPORT_SYMBOL(acpi_disabled);
++
++#ifdef CONFIG_ACPI
++int __initdata acpi_force = 0;
++extern acpi_interrupt_flags acpi_sci_flags;
++#endif
++
++/* for MCA, but anyone else can use it if they want */
++unsigned int machine_id;
++#ifdef CONFIG_MCA
++EXPORT_SYMBOL(machine_id);
++#endif
++unsigned int machine_submodel_id;
++unsigned int BIOS_revision;
++unsigned int mca_pentium_flag;
++
++/* For PCI or other memory-mapped resources */
++unsigned long pci_mem_start = 0x10000000;
++#ifdef CONFIG_PCI
++EXPORT_SYMBOL(pci_mem_start);
++#endif
++
++/* Boot loader ID as an integer, for the benefit of proc_dointvec */
++int bootloader_type;
++
++/* user-defined highmem size */
++static unsigned int highmem_pages = -1;
++
++/*
++ * Setup options
++ */
++struct drive_info_struct { char dummy[32]; } drive_info;
++#if defined(CONFIG_BLK_DEV_IDE) || defined(CONFIG_BLK_DEV_HD) || \
++ defined(CONFIG_BLK_DEV_IDE_MODULE) || defined(CONFIG_BLK_DEV_HD_MODULE)
++EXPORT_SYMBOL(drive_info);
++#endif
++struct screen_info screen_info;
++EXPORT_SYMBOL(screen_info);
++struct apm_info apm_info;
++EXPORT_SYMBOL(apm_info);
++struct sys_desc_table_struct {
++ unsigned short length;
++ unsigned char table[0];
++};
++struct edid_info edid_info;
++EXPORT_SYMBOL_GPL(edid_info);
++#ifndef CONFIG_XEN
++#define copy_edid() (edid_info = EDID_INFO)
++#endif
++struct ist_info ist_info;
++#if defined(CONFIG_X86_SPEEDSTEP_SMI) || \
++ defined(CONFIG_X86_SPEEDSTEP_SMI_MODULE)
++EXPORT_SYMBOL(ist_info);
++#endif
++struct e820map e820;
++#ifdef CONFIG_XEN
++struct e820map machine_e820;
++#endif
++
++extern void early_cpu_init(void);
++extern void generic_apic_probe(char *);
++extern int root_mountflags;
++
++unsigned long saved_videomode;
++
++#define RAMDISK_IMAGE_START_MASK 0x07FF
++#define RAMDISK_PROMPT_FLAG 0x8000
++#define RAMDISK_LOAD_FLAG 0x4000
++
++static char command_line[COMMAND_LINE_SIZE];
++
++unsigned char __initdata boot_params[PARAM_SIZE];
++
++static struct resource data_resource = {
++ .name = "Kernel data",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++static struct resource code_resource = {
++ .name = "Kernel code",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++static struct resource system_rom_resource = {
++ .name = "System ROM",
++ .start = 0xf0000,
++ .end = 0xfffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource extension_rom_resource = {
++ .name = "Extension ROM",
++ .start = 0xe0000,
++ .end = 0xeffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource adapter_rom_resources[] = { {
++ .name = "Adapter ROM",
++ .start = 0xc8000,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++}, {
++ .name = "Adapter ROM",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++} };
++
++#define ADAPTER_ROM_RESOURCES \
++ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
++
++static struct resource video_rom_resource = {
++ .name = "Video ROM",
++ .start = 0xc0000,
++ .end = 0xc7fff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM
++};
++
++static struct resource video_ram_resource = {
++ .name = "Video RAM area",
++ .start = 0xa0000,
++ .end = 0xbffff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_MEM
++};
++
++static struct resource standard_io_resources[] = { {
++ .name = "dma1",
++ .start = 0x0000,
++ .end = 0x001f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "pic1",
++ .start = 0x0020,
++ .end = 0x0021,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "timer0",
++ .start = 0x0040,
++ .end = 0x0043,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "timer1",
++ .start = 0x0050,
++ .end = 0x0053,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "keyboard",
++ .start = 0x0060,
++ .end = 0x006f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "dma page reg",
++ .start = 0x0080,
++ .end = 0x008f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "pic2",
++ .start = 0x00a0,
++ .end = 0x00a1,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "dma2",
++ .start = 0x00c0,
++ .end = 0x00df,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++}, {
++ .name = "fpu",
++ .start = 0x00f0,
++ .end = 0x00ff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO
++} };
++
++#define STANDARD_IO_RESOURCES \
++ (sizeof standard_io_resources / sizeof standard_io_resources[0])
++
++#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
++
++static int __init romchecksum(unsigned char *rom, unsigned long length)
++{
++ unsigned char *p, sum = 0;
++
++ for (p = rom; p < rom + length; p++)
++ sum += *p;
++ return sum == 0;
++}
++
++static void __init probe_roms(void)
++{
++ unsigned long start, length, upper;
++ unsigned char *rom;
++ int i;
++
++#ifdef CONFIG_XEN
++ /* Nothing to do if not running in dom0. */
++ if (!is_initial_xendomain())
++ return;
++#endif
++
++ /* video rom */
++ upper = adapter_rom_resources[0].start;
++ for (start = video_rom_resource.start; start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ video_rom_resource.start = start;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = rom[2] * 512;
++
++ /* if checksum okay, trust length byte */
++ if (length && romchecksum(rom, length))
++ video_rom_resource.end = start + length - 1;
++
++ request_resource(&iomem_resource, &video_rom_resource);
++ break;
++ }
++
++ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
++ if (start < upper)
++ start = upper;
++
++ /* system rom */
++ request_resource(&iomem_resource, &system_rom_resource);
++ upper = system_rom_resource.start;
++
++ /* check for extension rom (ignore length byte!) */
++ rom = isa_bus_to_virt(extension_rom_resource.start);
++ if (romsignature(rom)) {
++ length = extension_rom_resource.end - extension_rom_resource.start + 1;
++ if (romchecksum(rom, length)) {
++ request_resource(&iomem_resource, &extension_rom_resource);
++ upper = extension_rom_resource.start;
++ }
++ }
++
++ /* check for adapter roms on 2k boundaries */
++ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = rom[2] * 512;
++
++ /* but accept any length that fits if checksum okay */
++ if (!length || start + length > upper || !romchecksum(rom, length))
++ continue;
++
++ adapter_rom_resources[i].start = start;
++ adapter_rom_resources[i].end = start + length - 1;
++ request_resource(&iomem_resource, &adapter_rom_resources[i]);
++
++ start = adapter_rom_resources[i++].end & ~2047UL;
++ }
++}
++
++/*
++ * Point at the empty zero page to start with. We map the real shared_info
++ * page as soon as fixmap is up and running.
++ */
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
++
++unsigned long *phys_to_machine_mapping;
++unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[16];
++EXPORT_SYMBOL(phys_to_machine_mapping);
++
++/* Raw start-of-day parameters from the hypervisor. */
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
++
++void __init add_memory_region(unsigned long long start,
++ unsigned long long size, int type)
++{
++ int x;
++
++ if (!efi_enabled) {
++ x = e820.nr_map;
++
++ if (x == E820MAX) {
++ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
++ return;
++ }
++
++ e820.map[x].addr = start;
++ e820.map[x].size = size;
++ e820.map[x].type = type;
++ e820.nr_map++;
++ }
++} /* add_memory_region */
++
++static void __init limit_regions(unsigned long long size)
++{
++ unsigned long long current_addr = 0;
++ int i;
++
++ if (efi_enabled) {
++ efi_memory_desc_t *md;
++ void *p;
++
++ for (p = memmap.map, i = 0; p < memmap.map_end;
++ p += memmap.desc_size, i++) {
++ md = p;
++ current_addr = md->phys_addr + (md->num_pages << 12);
++ if (md->type == EFI_CONVENTIONAL_MEMORY) {
++ if (current_addr >= size) {
++ md->num_pages -=
++ (((current_addr-size) + PAGE_SIZE-1) >> PAGE_SHIFT);
++ memmap.nr_map = i + 1;
++ return;
++ }
++ }
++ }
++ }
++ for (i = 0; i < e820.nr_map; i++) {
++ current_addr = e820.map[i].addr + e820.map[i].size;
++ if (current_addr < size)
++ continue;
++
++ if (e820.map[i].type != E820_RAM)
++ continue;
++
++ if (e820.map[i].addr >= size) {
++ /*
++ * This region starts past the end of the
++ * requested size, skip it completely.
++ */
++ e820.nr_map = i;
++ } else {
++ e820.nr_map = i + 1;
++ e820.map[i].size -= current_addr - size;
++ }
++ return;
++ }
++#ifdef CONFIG_XEN
++ if (i==e820.nr_map && current_addr < size) {
++ /*
++ * The e820 map finished before our requested size so
++ * extend the final entry to the requested address.
++ */
++ --i;
++ if (e820.map[i].type == E820_RAM)
++ e820.map[i].size -= current_addr - size;
++ else
++ add_memory_region(current_addr, size - current_addr, E820_RAM);
++ }
++#endif
++}
++
++#define E820_DEBUG 1
++
++static void __init print_memory_map(char *who)
++{
++ int i;
++
++ for (i = 0; i < e820.nr_map; i++) {
++ printk(" %s: %016Lx - %016Lx ", who,
++ e820.map[i].addr,
++ e820.map[i].addr + e820.map[i].size);
++ switch (e820.map[i].type) {
++ case E820_RAM: printk("(usable)\n");
++ break;
++ case E820_RESERVED:
++ printk("(reserved)\n");
++ break;
++ case E820_ACPI:
++ printk("(ACPI data)\n");
++ break;
++ case E820_NVS:
++ printk("(ACPI NVS)\n");
++ break;
++ default: printk("type %lu\n", e820.map[i].type);
++ break;
++ }
++ }
++}
++
++/*
++ * Sanitize the BIOS e820 map.
++ *
++ * Some e820 responses include overlapping entries. The following
++ * replaces the original e820 map with a new one, removing overlaps.
++ *
++ */
++struct change_member {
++ struct e820entry *pbios; /* pointer to original bios entry */
++ unsigned long long addr; /* address for this change point */
++};
++static struct change_member change_point_list[2*E820MAX] __initdata;
++static struct change_member *change_point[2*E820MAX] __initdata;
++static struct e820entry *overlap_list[E820MAX] __initdata;
++static struct e820entry new_bios[E820MAX] __initdata;
++
++int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
++{
++ struct change_member *change_tmp;
++ unsigned long current_type, last_type;
++ unsigned long long last_addr;
++ int chgidx, still_changing;
++ int overlap_entries;
++ int new_bios_entry;
++ int old_nr, new_nr, chg_nr;
++ int i;
++
++ /*
++ Visually we're performing the following (1,2,3,4 = memory types)...
++
++ Sample memory map (w/overlaps):
++ ____22__________________
++ ______________________4_
++ ____1111________________
++ _44_____________________
++ 11111111________________
++ ____________________33__
++ ___________44___________
++ __________33333_________
++ ______________22________
++ ___________________2222_
++ _________111111111______
++ _____________________11_
++ _________________4______
++
++ Sanitized equivalent (no overlap):
++ 1_______________________
++ _44_____________________
++ ___1____________________
++ ____22__________________
++ ______11________________
++ _________1______________
++ __________3_____________
++ ___________44___________
++ _____________33_________
++ _______________2________
++ ________________1_______
++ _________________4______
++ ___________________2____
++ ____________________33__
++ ______________________4_
++ */
++
++ /* if there's only one memory region, don't bother */
++ if (*pnr_map < 2)
++ return -1;
++
++ old_nr = *pnr_map;
++
++ /* bail out if we find any unreasonable addresses in bios map */
++ for (i=0; i<old_nr; i++)
++ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
++ return -1;
++
++ /* create pointers for initial change-point information (for sorting) */
++ for (i=0; i < 2*old_nr; i++)
++ change_point[i] = &change_point_list[i];
++
++ /* record all known change-points (starting and ending addresses),
++ omitting those that are for empty memory regions */
++ chgidx = 0;
++ for (i=0; i < old_nr; i++) {
++ if (biosmap[i].size != 0) {
++ change_point[chgidx]->addr = biosmap[i].addr;
++ change_point[chgidx++]->pbios = &biosmap[i];
++ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
++ change_point[chgidx++]->pbios = &biosmap[i];
++ }
++ }
++ chg_nr = chgidx; /* true number of change-points */
++
++ /* sort change-point list by memory addresses (low -> high) */
++ still_changing = 1;
++ while (still_changing) {
++ still_changing = 0;
++ for (i=1; i < chg_nr; i++) {
++ /* if <current_addr> > <last_addr>, swap */
++ /* or, if current=<start_addr> & last=<end_addr>, swap */
++ if ((change_point[i]->addr < change_point[i-1]->addr) ||
++ ((change_point[i]->addr == change_point[i-1]->addr) &&
++ (change_point[i]->addr == change_point[i]->pbios->addr) &&
++ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
++ )
++ {
++ change_tmp = change_point[i];
++ change_point[i] = change_point[i-1];
++ change_point[i-1] = change_tmp;
++ still_changing=1;
++ }
++ }
++ }
++
++ /* create a new bios memory map, removing overlaps */
++ overlap_entries=0; /* number of entries in the overlap table */
++ new_bios_entry=0; /* index for creating new bios map entries */
++ last_type = 0; /* start with undefined memory type */
++ last_addr = 0; /* start with 0 as last starting address */
++ /* loop through change-points, determining affect on the new bios map */
++ for (chgidx=0; chgidx < chg_nr; chgidx++)
++ {
++ /* keep track of all overlapping bios entries */
++ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
++ {
++ /* add map entry to overlap list (> 1 entry implies an overlap) */
++ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
++ }
++ else
++ {
++ /* remove entry from list (order independent, so swap with last) */
++ for (i=0; i<overlap_entries; i++)
++ {
++ if (overlap_list[i] == change_point[chgidx]->pbios)
++ overlap_list[i] = overlap_list[overlap_entries-1];
++ }
++ overlap_entries--;
++ }
++ /* if there are overlapping entries, decide which "type" to use */
++ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
++ current_type = 0;
++ for (i=0; i<overlap_entries; i++)
++ if (overlap_list[i]->type > current_type)
++ current_type = overlap_list[i]->type;
++ /* continue building up new bios map based on this information */
++ if (current_type != last_type) {
++ if (last_type != 0) {
++ new_bios[new_bios_entry].size =
++ change_point[chgidx]->addr - last_addr;
++ /* move forward only if the new size was non-zero */
++ if (new_bios[new_bios_entry].size != 0)
++ if (++new_bios_entry >= E820MAX)
++ break; /* no more space left for new bios entries */
++ }
++ if (current_type != 0) {
++ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
++ new_bios[new_bios_entry].type = current_type;
++ last_addr=change_point[chgidx]->addr;
++ }
++ last_type = current_type;
++ }
++ }
++ new_nr = new_bios_entry; /* retain count for new bios entries */
++
++ /* copy new bios mapping into original location */
++ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
++ *pnr_map = new_nr;
++
++ return 0;
++}
++
++/*
++ * Copy the BIOS e820 map into a safe place.
++ *
++ * Sanity-check it while we're at it..
++ *
++ * If we're lucky and live on a modern system, the setup code
++ * will have given us a memory map that we can use to properly
++ * set up memory. If we aren't, we'll fake a memory map.
++ *
++ * We check to see that the memory map contains at least 2 elements
++ * before we'll use it, because the detection code in setup.S may
++ * not be perfect and most every PC known to man has two memory
++ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
++ * thinkpad 560x, for example, does not cooperate with the memory
++ * detection code.)
++ */
++int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
++{
++#ifndef CONFIG_XEN
++ /* Only one memory region (or negative)? Ignore it */
++ if (nr_map < 2)
++ return -1;
++#else
++ BUG_ON(nr_map < 1);
++#endif
++
++ do {
++ unsigned long long start = biosmap->addr;
++ unsigned long long size = biosmap->size;
++ unsigned long long end = start + size;
++ unsigned long type = biosmap->type;
++
++ /* Overflow in 64 bits? Ignore the memory map. */
++ if (start > end)
++ return -1;
++
++#ifndef CONFIG_XEN
++ /*
++ * Some BIOSes claim RAM in the 640k - 1M region.
++ * Not right. Fix it up.
++ */
++ if (type == E820_RAM) {
++ if (start < 0x100000ULL && end > 0xA0000ULL) {
++ if (start < 0xA0000ULL)
++ add_memory_region(start, 0xA0000ULL-start, type);
++ if (end <= 0x100000ULL)
++ continue;
++ start = 0x100000ULL;
++ size = end - start;
++ }
++ }
++#endif
++ add_memory_region(start, size, type);
++ } while (biosmap++,--nr_map);
++
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain()) {
++ struct xen_memory_map memmap;
++
++ memmap.nr_entries = E820MAX;
++ set_xen_guest_handle(memmap.buffer, machine_e820.map);
++
++ if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
++ BUG();
++ machine_e820.nr_map = memmap.nr_entries;
++ } else
++ machine_e820 = e820;
++#endif
++
++ return 0;
++}
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++struct edd edd;
++#ifdef CONFIG_EDD_MODULE
++EXPORT_SYMBOL(edd);
++#endif
++#ifndef CONFIG_XEN
++/**
++ * copy_edd() - Copy the BIOS EDD information
++ * from boot_params into a safe place.
++ *
++ */
++static inline void copy_edd(void)
++{
++ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
++ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
++ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
++ edd.edd_info_nr = EDD_NR;
++}
++#endif
++#else
++static inline void copy_edd(void)
++{
++}
++#endif
++
++static void __init parse_cmdline_early (char ** cmdline_p)
++{
++ char c = ' ', *to = command_line, *from = saved_command_line;
++ int len = 0, max_cmdline;
++ int userdef = 0;
++
++ if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
++ max_cmdline = COMMAND_LINE_SIZE;
++ memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
++ /* Save unparsed command line copy for /proc/cmdline */
++ saved_command_line[max_cmdline-1] = '\0';
++
++ for (;;) {
++ if (c != ' ')
++ goto next_char;
++ /*
++ * "mem=nopentium" disables the 4MB page tables.
++ * "mem=XXX[kKmM]" defines a memory region from HIGH_MEM
++ * to <mem>, overriding the bios size.
++ * "memmap=XXX[KkmM]@XXX[KkmM]" defines a memory region from
++ * <start> to <start>+<mem>, overriding the bios size.
++ *
++ * HPA tells me bootloaders need to parse mem=, so no new
++ * option should be mem= [also see Documentation/i386/boot.txt]
++ */
++ if (!memcmp(from, "mem=", 4)) {
++ if (to != command_line)
++ to--;
++ if (!memcmp(from+4, "nopentium", 9)) {
++ from += 9+4;
++ clear_bit(X86_FEATURE_PSE, boot_cpu_data.x86_capability);
++ disable_pse = 1;
++ } else {
++ /* If the user specifies memory size, we
++ * limit the BIOS-provided memory map to
++ * that size. exactmap can be used to specify
++ * the exact map. mem=number can be used to
++ * trim the existing memory map.
++ */
++ unsigned long long mem_size;
++
++ mem_size = memparse(from+4, &from);
++ limit_regions(mem_size);
++ userdef=1;
++ }
++ }
++
++ else if (!memcmp(from, "memmap=", 7)) {
++ if (to != command_line)
++ to--;
++ if (!memcmp(from+7, "exactmap", 8)) {
++#ifdef CONFIG_CRASH_DUMP
++ /* If we are doing a crash dump, we
++ * still need to know the real mem
++ * size before original memory map is
++ * reset.
++ */
++ find_max_pfn();
++ saved_max_pfn = max_pfn;
++#endif
++ from += 8+7;
++ e820.nr_map = 0;
++ userdef = 1;
++ } else {
++ /* If the user specifies memory size, we
++ * limit the BIOS-provided memory map to
++ * that size. exactmap can be used to specify
++ * the exact map. mem=number can be used to
++ * trim the existing memory map.
++ */
++ unsigned long long start_at, mem_size;
++
++ mem_size = memparse(from+7, &from);
++ if (*from == '@') {
++ start_at = memparse(from+1, &from);
++ add_memory_region(start_at, mem_size, E820_RAM);
++ } else if (*from == '#') {
++ start_at = memparse(from+1, &from);
++ add_memory_region(start_at, mem_size, E820_ACPI);
++ } else if (*from == '$') {
++ start_at = memparse(from+1, &from);
++ add_memory_region(start_at, mem_size, E820_RESERVED);
++ } else {
++ limit_regions(mem_size);
++ userdef=1;
++ }
++ }
++ }
++
++ else if (!memcmp(from, "noexec=", 7))
++ noexec_setup(from + 7);
++
++
++#ifdef CONFIG_X86_MPPARSE
++ /*
++ * If the BIOS enumerates physical processors before logical,
++ * maxcpus=N at enumeration-time can be used to disable HT.
++ */
++ else if (!memcmp(from, "maxcpus=", 8)) {
++ extern unsigned int maxcpus;
++
++ maxcpus = simple_strtoul(from + 8, NULL, 0);
++ }
++#endif
++
++#ifdef CONFIG_ACPI
++ /* "acpi=off" disables both ACPI table parsing and interpreter */
++ else if (!memcmp(from, "acpi=off", 8)) {
++ disable_acpi();
++ }
++
++ /* acpi=force to over-ride black-list */
++ else if (!memcmp(from, "acpi=force", 10)) {
++ acpi_force = 1;
++ acpi_ht = 1;
++ acpi_disabled = 0;
++ }
++
++ /* acpi=strict disables out-of-spec workarounds */
++ else if (!memcmp(from, "acpi=strict", 11)) {
++ acpi_strict = 1;
++ }
++
++ /* Limit ACPI just to boot-time to enable HT */
++ else if (!memcmp(from, "acpi=ht", 7)) {
++ if (!acpi_force)
++ disable_acpi();
++ acpi_ht = 1;
++ }
++
++ /* "pci=noacpi" disable ACPI IRQ routing and PCI scan */
++ else if (!memcmp(from, "pci=noacpi", 10)) {
++ acpi_disable_pci();
++ }
++ /* "acpi=noirq" disables ACPI interrupt routing */
++ else if (!memcmp(from, "acpi=noirq", 10)) {
++ acpi_noirq_set();
++ }
++
++ else if (!memcmp(from, "acpi_sci=edge", 13))
++ acpi_sci_flags.trigger = 1;
++
++ else if (!memcmp(from, "acpi_sci=level", 14))
++ acpi_sci_flags.trigger = 3;
++
++ else if (!memcmp(from, "acpi_sci=high", 13))
++ acpi_sci_flags.polarity = 1;
++
++ else if (!memcmp(from, "acpi_sci=low", 12))
++ acpi_sci_flags.polarity = 3;
++
++#ifdef CONFIG_X86_IO_APIC
++ else if (!memcmp(from, "acpi_skip_timer_override", 24))
++ acpi_skip_timer_override = 1;
++
++ if (!memcmp(from, "disable_timer_pin_1", 19))
++ disable_timer_pin_1 = 1;
++ if (!memcmp(from, "enable_timer_pin_1", 18))
++ disable_timer_pin_1 = -1;
++
++ /* disable IO-APIC */
++ else if (!memcmp(from, "noapic", 6))
++ disable_ioapic_setup();
++#endif /* CONFIG_X86_IO_APIC */
++#endif /* CONFIG_ACPI */
++
++#ifdef CONFIG_X86_LOCAL_APIC
++ /* enable local APIC */
++ else if (!memcmp(from, "lapic", 5))
++ lapic_enable();
++
++ /* disable local APIC */
++ else if (!memcmp(from, "nolapic", 6))
++ lapic_disable();
++#endif /* CONFIG_X86_LOCAL_APIC */
++
++#ifdef CONFIG_KEXEC
++ /* crashkernel=size@addr specifies the location to reserve for
++ * a crash kernel. By reserving this memory we guarantee
++ * that linux never set's it up as a DMA target.
++ * Useful for holding code to do something appropriate
++ * after a kernel panic.
++ */
++ else if (!memcmp(from, "crashkernel=", 12)) {
++#ifndef CONFIG_XEN
++ unsigned long size, base;
++ size = memparse(from+12, &from);
++ if (*from == '@') {
++ base = memparse(from+1, &from);
++ /* FIXME: Do I want a sanity check
++ * to validate the memory range?
++ */
++ crashk_res.start = base;
++ crashk_res.end = base + size - 1;
++ }
++#else
++ printk("Ignoring crashkernel command line, "
++ "parameter will be supplied by xen\n");
++#endif
++ }
++#endif
++#ifdef CONFIG_PROC_VMCORE
++ /* elfcorehdr= specifies the location of elf core header
++ * stored by the crashed kernel.
++ */
++ else if (!memcmp(from, "elfcorehdr=", 11))
++ elfcorehdr_addr = memparse(from+11, &from);
++#endif
++
++ /*
++ * highmem=size forces highmem to be exactly 'size' bytes.
++ * This works even on boxes that have no highmem otherwise.
++ * This also works to reduce highmem size on bigger boxes.
++ */
++ else if (!memcmp(from, "highmem=", 8))
++ highmem_pages = memparse(from+8, &from) >> PAGE_SHIFT;
++
++ /*
++ * vmalloc=size forces the vmalloc area to be exactly 'size'
++ * bytes. This can be used to increase (or decrease) the
++ * vmalloc area - the default is 128m.
++ */
++ else if (!memcmp(from, "vmalloc=", 8))
++ __VMALLOC_RESERVE = memparse(from+8, &from);
++
++ next_char:
++ c = *(from++);
++ if (!c)
++ break;
++ if (COMMAND_LINE_SIZE <= ++len)
++ break;
++ *(to++) = c;
++ }
++ *to = '\0';
++ *cmdline_p = command_line;
++ if (userdef) {
++ printk(KERN_INFO "user-defined physical RAM map:\n");
++ print_memory_map("user");
++ }
++}
++
++/*
++ * Callback for efi_memory_walk.
++ */
++static int __init
++efi_find_max_pfn(unsigned long start, unsigned long end, void *arg)
++{
++ unsigned long *max_pfn = arg, pfn;
++
++ if (start < end) {
++ pfn = PFN_UP(end -1);
++ if (pfn > *max_pfn)
++ *max_pfn = pfn;
++ }
++ return 0;
++}
++
++static int __init
++efi_memory_present_wrapper(unsigned long start, unsigned long end, void *arg)
++{
++ memory_present(0, start, end);
++ return 0;
++}
++
++/*
++ * This function checks if any part of the range <start,end> is mapped
++ * with type.
++ */
++int
++e820_any_mapped(u64 start, u64 end, unsigned type)
++{
++ int i;
++
++#ifndef CONFIG_XEN
++ for (i = 0; i < e820.nr_map; i++) {
++ const struct e820entry *ei = &e820.map[i];
++#else
++ if (!is_initial_xendomain())
++ return 0;
++ for (i = 0; i < machine_e820.nr_map; ++i) {
++ const struct e820entry *ei = &machine_e820.map[i];
++#endif
++
++ if (type && ei->type != type)
++ continue;
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(e820_any_mapped);
++
++ /*
++ * This function checks if the entire range <start,end> is mapped with type.
++ *
++ * Note: this function only works correct if the e820 table is sorted and
++ * not-overlapping, which is the case
++ */
++int __init
++e820_all_mapped(unsigned long s, unsigned long e, unsigned type)
++{
++ u64 start = s;
++ u64 end = e;
++ int i;
++
++#ifndef CONFIG_XEN
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++#else
++ if (!is_initial_xendomain())
++ return 0;
++ for (i = 0; i < machine_e820.nr_map; ++i) {
++ const struct e820entry *ei = &machine_e820.map[i];
++#endif
++ if (type && ei->type != type)
++ continue;
++ /* is the region (part) in overlap with the current region ?*/
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++ /* if the region is at the beginning of <start,end> we move
++ * start to the end of the region since it's ok until there
++ */
++ if (ei->addr <= start)
++ start = ei->addr + ei->size;
++ /* if start is now at or beyond end, we're done, full
++ * coverage */
++ if (start >= end)
++ return 1; /* we're done */
++ }
++ return 0;
++}
++
++/*
++ * Find the highest page frame number we have available
++ */
++void __init find_max_pfn(void)
++{
++ int i;
++
++ max_pfn = 0;
++ if (efi_enabled) {
++ efi_memmap_walk(efi_find_max_pfn, &max_pfn);
++ efi_memmap_walk(efi_memory_present_wrapper, NULL);
++ return;
++ }
++
++ for (i = 0; i < e820.nr_map; i++) {
++ unsigned long start, end;
++ /* RAM? */
++ if (e820.map[i].type != E820_RAM)
++ continue;
++ start = PFN_UP(e820.map[i].addr);
++ end = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
++ if (start >= end)
++ continue;
++ if (end > max_pfn)
++ max_pfn = end;
++ memory_present(0, start, end);
++ }
++}
++
++/*
++ * Determine low and high memory ranges:
++ */
++unsigned long __init find_max_low_pfn(void)
++{
++ unsigned long max_low_pfn;
++
++ max_low_pfn = max_pfn;
++ if (max_low_pfn > MAXMEM_PFN) {
++ if (highmem_pages == -1)
++ highmem_pages = max_pfn - MAXMEM_PFN;
++ if (highmem_pages + MAXMEM_PFN < max_pfn)
++ max_pfn = MAXMEM_PFN + highmem_pages;
++ if (highmem_pages + MAXMEM_PFN > max_pfn) {
++ printk("only %luMB highmem pages available, ignoring highmem size of %uMB.\n", pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages));
++ highmem_pages = 0;
++ }
++ max_low_pfn = MAXMEM_PFN;
++#ifndef CONFIG_HIGHMEM
++ /* Maximum memory usable is what is directly addressable */
++ printk(KERN_WARNING "Warning only %ldMB will be used.\n",
++ MAXMEM>>20);
++ if (max_pfn > MAX_NONPAE_PFN)
++ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
++ else
++ printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n");
++ max_pfn = MAXMEM_PFN;
++#else /* !CONFIG_HIGHMEM */
++#ifndef CONFIG_X86_PAE
++ if (max_pfn > MAX_NONPAE_PFN) {
++ max_pfn = MAX_NONPAE_PFN;
++ printk(KERN_WARNING "Warning only 4GB will be used.\n");
++ printk(KERN_WARNING "Use a PAE enabled kernel.\n");
++ }
++#endif /* !CONFIG_X86_PAE */
++#endif /* !CONFIG_HIGHMEM */
++ } else {
++ if (highmem_pages == -1)
++ highmem_pages = 0;
++#ifdef CONFIG_HIGHMEM
++ if (highmem_pages >= max_pfn) {
++ printk(KERN_ERR "highmem size specified (%uMB) is bigger than pages available (%luMB)!.\n", pages_to_mb(highmem_pages), pages_to_mb(max_pfn));
++ highmem_pages = 0;
++ }
++ if (highmem_pages) {
++ if (max_low_pfn-highmem_pages < 64*1024*1024/PAGE_SIZE){
++ printk(KERN_ERR "highmem size %uMB results in smaller than 64MB lowmem, ignoring it.\n", pages_to_mb(highmem_pages));
++ highmem_pages = 0;
++ }
++ max_low_pfn -= highmem_pages;
++ }
++#else
++ if (highmem_pages)
++ printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n");
++#endif
++ }
++ return max_low_pfn;
++}
++
++/*
++ * Free all available memory for boot time allocation. Used
++ * as a callback function by efi_memory_walk()
++ */
++
++static int __init
++free_available_memory(unsigned long start, unsigned long end, void *arg)
++{
++ /* check max_low_pfn */
++ if (start >= (max_low_pfn << PAGE_SHIFT))
++ return 0;
++ if (end >= (max_low_pfn << PAGE_SHIFT))
++ end = max_low_pfn << PAGE_SHIFT;
++ if (start < end)
++ free_bootmem(start, end - start);
++
++ return 0;
++}
++/*
++ * Register fully available low RAM pages with the bootmem allocator.
++ */
++static void __init register_bootmem_low_pages(unsigned long max_low_pfn)
++{
++ int i;
++
++ if (efi_enabled) {
++ efi_memmap_walk(free_available_memory, NULL);
++ return;
++ }
++ for (i = 0; i < e820.nr_map; i++) {
++ unsigned long curr_pfn, last_pfn, size;
++ /*
++ * Reserve usable low memory
++ */
++ if (e820.map[i].type != E820_RAM)
++ continue;
++ /*
++ * We are rounding up the start address of usable memory:
++ */
++ curr_pfn = PFN_UP(e820.map[i].addr);
++ if (curr_pfn >= max_low_pfn)
++ continue;
++ /*
++ * ... and at the end of the usable range downwards:
++ */
++ last_pfn = PFN_DOWN(e820.map[i].addr + e820.map[i].size);
++
++#ifdef CONFIG_XEN
++ /*
++ * Truncate to the number of actual pages currently
++ * present.
++ */
++ if (last_pfn > xen_start_info->nr_pages)
++ last_pfn = xen_start_info->nr_pages;
++#endif
++
++ if (last_pfn > max_low_pfn)
++ last_pfn = max_low_pfn;
++
++ /*
++ * .. finally, did all the rounding and playing
++ * around just make the area go away?
++ */
++ if (last_pfn <= curr_pfn)
++ continue;
++
++ size = last_pfn - curr_pfn;
++ free_bootmem(PFN_PHYS(curr_pfn), PFN_PHYS(size));
++ }
++}
++
++#ifndef CONFIG_XEN
++/*
++ * workaround for Dell systems that neglect to reserve EBDA
++ */
++static void __init reserve_ebda_region(void)
++{
++ unsigned int addr;
++ addr = get_bios_ebda();
++ if (addr)
++ reserve_bootmem(addr, PAGE_SIZE);
++}
++#endif
++
++#ifndef CONFIG_NEED_MULTIPLE_NODES
++void __init setup_bootmem_allocator(void);
++static unsigned long __init setup_memory(void)
++{
++ /*
++ * partially used pages are not usable - thus
++ * we are rounding upwards:
++ */
++ min_low_pfn = PFN_UP(__pa(xen_start_info->pt_base)) +
++ xen_start_info->nr_pt_frames;
++
++ find_max_pfn();
++
++ max_low_pfn = find_max_low_pfn();
++
++#ifdef CONFIG_HIGHMEM
++ highstart_pfn = highend_pfn = max_pfn;
++ if (max_pfn > max_low_pfn) {
++ highstart_pfn = max_low_pfn;
++ }
++ printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
++ pages_to_mb(highend_pfn - highstart_pfn));
++#endif
++ printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
++ pages_to_mb(max_low_pfn));
++
++ setup_bootmem_allocator();
++
++ return max_low_pfn;
++}
++
++void __init zone_sizes_init(void)
++{
++ unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
++ unsigned int max_dma, low;
++
++ max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
++ low = max_low_pfn;
++
++ if (low < max_dma)
++ zones_size[ZONE_DMA] = low;
++ else {
++ zones_size[ZONE_DMA] = max_dma;
++ zones_size[ZONE_NORMAL] = low - max_dma;
++#ifdef CONFIG_HIGHMEM
++ zones_size[ZONE_HIGHMEM] = highend_pfn - low;
++#endif
++ }
++ free_area_init(zones_size);
++}
++#else
++extern unsigned long __init setup_memory(void);
++extern void zone_sizes_init(void);
++#endif /* !CONFIG_NEED_MULTIPLE_NODES */
++
++void __init setup_bootmem_allocator(void)
++{
++ unsigned long bootmap_size;
++ /*
++ * Initialize the boot-time allocator (with low memory only):
++ */
++ bootmap_size = init_bootmem(min_low_pfn, max_low_pfn);
++
++ register_bootmem_low_pages(max_low_pfn);
++
++ /*
++ * Reserve the bootmem bitmap itself as well. We do this in two
++ * steps (first step was init_bootmem()) because this catches
++ * the (very unlikely) case of us accidentally initializing the
++ * bootmem allocator with an invalid RAM area.
++ */
++ reserve_bootmem(__PHYSICAL_START, (PFN_PHYS(min_low_pfn) +
++ bootmap_size + PAGE_SIZE-1) - (__PHYSICAL_START));
++
++#ifndef CONFIG_XEN
++ /*
++ * reserve physical page 0 - it's a special BIOS page on many boxes,
++ * enabling clean reboots, SMP operation, laptop functions.
++ */
++ reserve_bootmem(0, PAGE_SIZE);
++
++ /* reserve EBDA region, it's a 4K region */
++ reserve_ebda_region();
++
++ /* could be an AMD 768MPX chipset. Reserve a page before VGA to prevent
++ PCI prefetch into it (errata #56). Usually the page is reserved anyways,
++ unless you have no PS/2 mouse plugged in. */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++ boot_cpu_data.x86 == 6)
++ reserve_bootmem(0xa0000 - 4096, 4096);
++
++#ifdef CONFIG_SMP
++ /*
++ * But first pinch a few for the stack/trampoline stuff
++ * FIXME: Don't need the extra page at 4K, but need to fix
++ * trampoline before removing it. (see the GDT stuff)
++ */
++ reserve_bootmem(PAGE_SIZE, PAGE_SIZE);
++#endif
++#ifdef CONFIG_ACPI_SLEEP
++ /*
++ * Reserve low memory region for sleep support.
++ */
++ acpi_reserve_bootmem();
++#endif
++#endif /* !CONFIG_XEN */
++
++#ifdef CONFIG_BLK_DEV_INITRD
++ if (xen_start_info->mod_start) {
++ if (INITRD_START + INITRD_SIZE <= (max_low_pfn << PAGE_SHIFT)) {
++ /*reserve_bootmem(INITRD_START, INITRD_SIZE);*/
++ initrd_start = INITRD_START + PAGE_OFFSET;
++ initrd_end = initrd_start+INITRD_SIZE;
++ initrd_below_start_ok = 1;
++ }
++ else {
++ printk(KERN_ERR "initrd extends beyond end of memory "
++ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++ INITRD_START + INITRD_SIZE,
++ max_low_pfn << PAGE_SHIFT);
++ initrd_start = 0;
++ }
++ }
++#endif
++#ifdef CONFIG_KEXEC
++#ifdef CONFIG_XEN
++ xen_machine_kexec_setup_resources();
++#else
++ if (crashk_res.start != crashk_res.end)
++ reserve_bootmem(crashk_res.start,
++ crashk_res.end - crashk_res.start + 1);
++#endif
++#endif
++}
++
++/*
++ * The node 0 pgdat is initialized before all of these because
++ * it's needed for bootmem. node>0 pgdats have their virtual
++ * space allocated before the pagetables are in place to access
++ * them, so they can't be cleared then.
++ *
++ * This should all compile down to nothing when NUMA is off.
++ */
++void __init remapped_pgdat_init(void)
++{
++ int nid;
++
++ for_each_online_node(nid) {
++ if (nid != 0)
++ memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
++ }
++}
++
++/*
++ * Request address space for all standard RAM and ROM resources
++ * and also for regions reported as reserved by the e820.
++ */
++static void __init
++legacy_init_iomem_resources(struct e820entry *e820, int nr_map,
++ struct resource *code_resource,
++ struct resource *data_resource)
++{
++ int i;
++
++ probe_roms();
++
++ for (i = 0; i < nr_map; i++) {
++ struct resource *res;
++#ifndef CONFIG_RESOURCES_64BIT
++ if (e820[i].addr + e820[i].size > 0x100000000ULL)
++ continue;
++#endif
++ res = kzalloc(sizeof(struct resource), GFP_ATOMIC);
++ switch (e820[i].type) {
++ case E820_RAM: res->name = "System RAM"; break;
++ case E820_ACPI: res->name = "ACPI Tables"; break;
++ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
++ default: res->name = "reserved";
++ }
++ res->start = e820[i].addr;
++ res->end = res->start + e820[i].size - 1;
++ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++ if (request_resource(&iomem_resource, res)) {
++ kfree(res);
++ continue;
++ }
++ if (e820[i].type == E820_RAM) {
++ /*
++ * We don't know which RAM region contains kernel data,
++ * so we try it repeatedly and let the resource manager
++ * test it.
++ */
++#ifndef CONFIG_XEN
++ request_resource(res, code_resource);
++ request_resource(res, data_resource);
++#endif
++#ifdef CONFIG_KEXEC
++ if (crashk_res.start != crashk_res.end)
++ request_resource(res, &crashk_res);
++#ifdef CONFIG_XEN
++ xen_machine_kexec_register_resources(res);
++#endif
++#endif
++ }
++ }
++}
++
++/*
++ * Locate a unused range of the physical address space below 4G which
++ * can be used for PCI mappings.
++ */
++static void __init
++e820_setup_gap(struct e820entry *e820, int nr_map)
++{
++ unsigned long gapstart, gapsize, round;
++ unsigned long long last;
++ int i;
++
++ /*
++ * Search for the bigest gap in the low 32 bits of the e820
++ * memory space.
++ */
++ last = 0x100000000ull;
++ gapstart = 0x10000000;
++ gapsize = 0x400000;
++ i = nr_map;
++ while (--i >= 0) {
++ unsigned long long start = e820[i].addr;
++ unsigned long long end = start + e820[i].size;
++
++ /*
++ * Since "last" is at most 4GB, we know we'll
++ * fit in 32 bits if this condition is true
++ */
++ if (last > end) {
++ unsigned long gap = last - end;
++
++ if (gap > gapsize) {
++ gapsize = gap;
++ gapstart = end;
++ }
++ }
++ if (start < last)
++ last = start;
++ }
++
++ /*
++ * See how much we want to round up: start off with
++ * rounding to the next 1MB area.
++ */
++ round = 0x100000;
++ while ((gapsize >> 4) > round)
++ round += round;
++ /* Fun with two's complement */
++ pci_mem_start = (gapstart + round) & -round;
++
++ printk("Allocating PCI resources starting at %08lx (gap: %08lx:%08lx)\n",
++ pci_mem_start, gapstart, gapsize);
++}
++
++/*
++ * Request address space for all standard resources
++ *
++ * This is called just before pcibios_init(), which is also a
++ * subsys_initcall, but is linked in later (in arch/i386/pci/common.c).
++ */
++static int __init request_standard_resources(void)
++{
++ int i;
++
++ /* Nothing to do if not running in dom0. */
++ if (!is_initial_xendomain())
++ return 0;
++
++ printk("Setting up standard PCI resources\n");
++#ifdef CONFIG_XEN
++ legacy_init_iomem_resources(machine_e820.map, machine_e820.nr_map,
++ &code_resource, &data_resource);
++#else
++ if (efi_enabled)
++ efi_initialize_iomem_resources(&code_resource, &data_resource);
++ else
++ legacy_init_iomem_resources(e820.map, e820.nr_map,
++ &code_resource, &data_resource);
++#endif
++
++ /* EFI systems may still have VGA */
++ request_resource(&iomem_resource, &video_ram_resource);
++
++ /* request I/O space for devices used on all i[345]86 PCs */
++ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
++ request_resource(&ioport_resource, &standard_io_resources[i]);
++ return 0;
++}
++
++subsys_initcall(request_standard_resources);
++
++static void __init register_memory(void)
++{
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain())
++ e820_setup_gap(machine_e820.map, machine_e820.nr_map);
++ else
++#endif
++ e820_setup_gap(e820.map, e820.nr_map);
++}
++
++#ifdef CONFIG_MCA
++static void set_mca_bus(int x)
++{
++ MCA_bus = x;
++}
++#else
++static void set_mca_bus(int x) { }
++#endif
++
++/*
++ * Determine if we were loaded by an EFI loader. If so, then we have also been
++ * passed the efi memmap, systab, etc., so we should use these data structures
++ * for initialization. Note, the efi init code path is determined by the
++ * global efi_enabled. This allows the same kernel image to be used on existing
++ * systems (with a traditional BIOS) as well as on EFI systems.
++ */
++void __init setup_arch(char **cmdline_p)
++{
++ int i, j, k, fpp;
++ struct physdev_set_iopl set_iopl;
++ unsigned long max_low_pfn;
++ unsigned long p2m_pages;
++
++ /* Force a quick death if the kernel panics (not domain 0). */
++ extern int panic_timeout;
++ if (!panic_timeout && !is_initial_xendomain())
++ panic_timeout = 1;
++
++ /* Register a call for panic conditions. */
++ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
++
++ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
++ VMASST_TYPE_4gb_segments));
++ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
++ VMASST_TYPE_writable_pagetables));
++
++ memcpy(&boot_cpu_data, &new_cpu_data, sizeof(new_cpu_data));
++ pre_setup_arch_hook();
++ early_cpu_init();
++#ifdef CONFIG_SMP
++ prefill_possible_map();
++#endif
++
++ /*
++ * FIXME: This isn't an official loader_type right
++ * now but does currently work with elilo.
++ * If we were configured as an EFI kernel, check to make
++ * sure that we were loaded correctly from elilo and that
++ * the system table is valid. If not, then initialize normally.
++ */
++#ifdef CONFIG_EFI
++ if ((LOADER_TYPE == 0x50) && EFI_SYSTAB)
++ efi_enabled = 1;
++#endif
++
++ /* This must be initialized to UNNAMED_MAJOR for ipconfig to work
++ properly. Setting ROOT_DEV to default to /dev/ram0 breaks initrd.
++ */
++ ROOT_DEV = MKDEV(UNNAMED_MAJOR,0);
++ drive_info = DRIVE_INFO;
++ screen_info = SCREEN_INFO;
++ copy_edid();
++ apm_info.bios = APM_BIOS_INFO;
++ ist_info = IST_INFO;
++ saved_videomode = VIDEO_MODE;
++ if( SYS_DESC_TABLE.length != 0 ) {
++ set_mca_bus(SYS_DESC_TABLE.table[3] & 0x2);
++ machine_id = SYS_DESC_TABLE.table[0];
++ machine_submodel_id = SYS_DESC_TABLE.table[1];
++ BIOS_revision = SYS_DESC_TABLE.table[2];
++ }
++ bootloader_type = LOADER_TYPE;
++
++ if (is_initial_xendomain()) {
++ const struct dom0_vga_console_info *info =
++ (void *)((char *)xen_start_info +
++ xen_start_info->console.dom0.info_off);
++
++ dom0_init_screen_info(info,
++ xen_start_info->console.dom0.info_size);
++ xen_start_info->console.domU.mfn = 0;
++ xen_start_info->console.domU.evtchn = 0;
++ } else
++ screen_info.orig_video_isVGA = 0;
++
++#ifdef CONFIG_BLK_DEV_RAM
++ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
++ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
++ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
++#endif
++
++ ARCH_SETUP
++ if (efi_enabled)
++ efi_init();
++ else {
++ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++ print_memory_map(machine_specific_memory_setup());
++ }
++
++ copy_edd();
++
++ if (!MOUNT_ROOT_RDONLY)
++ root_mountflags &= ~MS_RDONLY;
++ init_mm.start_code = (unsigned long) _text;
++ init_mm.end_code = (unsigned long) _etext;
++ init_mm.end_data = (unsigned long) _edata;
++ init_mm.brk = (PFN_UP(__pa(xen_start_info->pt_base)) +
++ xen_start_info->nr_pt_frames) << PAGE_SHIFT;
++
++ code_resource.start = virt_to_phys(_text);
++ code_resource.end = virt_to_phys(_etext)-1;
++ data_resource.start = virt_to_phys(_etext);
++ data_resource.end = virt_to_phys(_edata)-1;
++
++ parse_cmdline_early(cmdline_p);
++
++#ifdef CONFIG_EARLY_PRINTK
++ {
++ char *s = strstr(*cmdline_p, "earlyprintk=");
++ if (s) {
++ setup_early_printk(strchr(s, '=') + 1);
++ printk("early console enabled\n");
++ }
++ }
++#endif
++
++ max_low_pfn = setup_memory();
++
++ /*
++ * NOTE: before this point _nobody_ is allowed to allocate
++ * any memory using the bootmem allocator. Although the
++ * alloctor is now initialised only the first 8Mb of the kernel
++ * virtual address space has been mapped. All allocations before
++ * paging_init() has completed must use the alloc_bootmem_low_pages()
++ * variant (which allocates DMA'able memory) and care must be taken
++ * not to exceed the 8Mb limit.
++ */
++
++#ifdef CONFIG_SMP
++ smp_alloc_memory(); /* AP processor realmode stacks in low memory*/
++#endif
++ paging_init();
++ remapped_pgdat_init();
++ sparse_init();
++ zone_sizes_init();
++
++#ifdef CONFIG_X86_FIND_SMP_CONFIG
++ /*
++ * Find and reserve possible boot-time SMP configuration:
++ */
++ find_smp_config();
++#endif
++
++ p2m_pages = max_pfn;
++ if (xen_start_info->nr_pages > max_pfn) {
++ /*
++ * the max_pfn was shrunk (probably by mem= or highmem=
++ * kernel parameter); shrink reservation with the HV
++ */
++ struct xen_memory_reservation reservation = {
++ .address_bits = 0,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++ unsigned int difference;
++ int ret;
++
++ difference = xen_start_info->nr_pages - max_pfn;
++
++ set_xen_guest_handle(reservation.extent_start,
++ ((unsigned long *)xen_start_info->mfn_list) + max_pfn);
++ reservation.nr_extents = difference;
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &reservation);
++ BUG_ON (ret != difference);
++ }
++ else if (max_pfn > xen_start_info->nr_pages)
++ p2m_pages = xen_start_info->nr_pages;
++
++ /* Make sure we have a correctly sized P->M table. */
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ phys_to_machine_mapping = alloc_bootmem_low_pages(
++ max_pfn * sizeof(unsigned long));
++ memset(phys_to_machine_mapping, ~0,
++ max_pfn * sizeof(unsigned long));
++ memcpy(phys_to_machine_mapping,
++ (unsigned long *)xen_start_info->mfn_list,
++ p2m_pages * sizeof(unsigned long));
++ free_bootmem(
++ __pa(xen_start_info->mfn_list),
++ PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
++ sizeof(unsigned long))));
++
++ /*
++ * Initialise the list of the frames that specify the list of
++ * frames that make up the p2m table. Used by save/restore
++ */
++ pfn_to_mfn_frame_list_list = alloc_bootmem_low_pages(PAGE_SIZE);
++
++ fpp = PAGE_SIZE/sizeof(unsigned long);
++ for (i=0, j=0, k=-1; i< max_pfn; i+=fpp, j++) {
++ if ((j % fpp) == 0) {
++ k++;
++ BUG_ON(k>=16);
++ pfn_to_mfn_frame_list[k] =
++ alloc_bootmem_low_pages(PAGE_SIZE);
++ pfn_to_mfn_frame_list_list[k] =
++ virt_to_mfn(pfn_to_mfn_frame_list[k]);
++ j=0;
++ }
++ pfn_to_mfn_frame_list[k][j] =
++ virt_to_mfn(&phys_to_machine_mapping[i]);
++ }
++ HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
++ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++ virt_to_mfn(pfn_to_mfn_frame_list_list);
++ }
++
++ /* Mark all ISA DMA channels in-use - using them wouldn't work. */
++ for (i = 0; i < MAX_DMA_CHANNELS; ++i)
++ if (i != 4 && request_dma(i, "xen") != 0)
++ BUG();
++
++ /*
++ * NOTE: at this point the bootmem allocator is fully available.
++ */
++
++ if (is_initial_xendomain())
++ dmi_scan_machine();
++
++#ifdef CONFIG_X86_GENERICARCH
++ generic_apic_probe(*cmdline_p);
++#endif
++ if (efi_enabled)
++ efi_map_memmap();
++
++ set_iopl.iopl = 1;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
++
++#ifdef CONFIG_ACPI
++ if (!is_initial_xendomain()) {
++ printk(KERN_INFO "ACPI in unprivileged domain disabled\n");
++ acpi_disabled = 1;
++ acpi_ht = 0;
++ }
++
++ /*
++ * Parse the ACPI tables for possible boot-time SMP configuration.
++ */
++ acpi_boot_table_init();
++#endif
++
++#ifdef CONFIG_X86_IO_APIC
++ check_acpi_pci(); /* Checks more than just ACPI actually */
++#endif
++
++#ifdef CONFIG_ACPI
++ acpi_boot_init();
++
++#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
++ if (def_to_bigsmp)
++ printk(KERN_WARNING "More than 8 CPUs detected and "
++ "CONFIG_X86_PC cannot handle it.\nUse "
++ "CONFIG_X86_GENERICARCH or CONFIG_X86_BIGSMP.\n");
++#endif
++#endif
++#ifdef CONFIG_X86_LOCAL_APIC
++ if (smp_found_config)
++ get_smp_config();
++#endif
++
++ register_memory();
++
++ if (is_initial_xendomain()) {
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++ if (!efi_enabled ||
++ (efi_mem_type(0xa0000) != EFI_CONVENTIONAL_MEMORY))
++ conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++#endif
++ } else {
++#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++ }
++ tsc_init();
++}
++
++static int
++xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
++{
++ HYPERVISOR_shutdown(SHUTDOWN_crash);
++ /* we're never actually going to get here... */
++ return NOTIFY_DONE;
++}
++
++static __init int add_pcspkr(void)
++{
++ struct platform_device *pd;
++ int ret;
++
++ if (!is_initial_xendomain())
++ return 0;
++
++ pd = platform_device_alloc("pcspkr", -1);
++ if (!pd)
++ return -ENOMEM;
++
++ ret = platform_device_add(pd);
++ if (ret)
++ platform_device_put(pd);
++
++ return ret;
++}
++device_initcall(add_pcspkr);
++
++/*
++ * Local Variables:
++ * mode:c
++ * c-file-style:"k&r"
++ * c-basic-offset:8
++ * End:
++ */
+Index: head-2008-11-25/arch/x86/kernel/smp_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/smp_32-xen.c 2007-12-10 08:47:31.000000000 +0100
+@@ -0,0 +1,605 @@
++/*
++ * Intel SMP support routines.
++ *
++ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
++ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * This code is released under the GNU General Public License version 2 or
++ * later.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/cache.h>
++#include <linux/interrupt.h>
++#include <linux/cpu.h>
++#include <linux/module.h>
++
++#include <asm/mtrr.h>
++#include <asm/tlbflush.h>
++#if 0
++#include <mach_apic.h>
++#endif
++#include <xen/evtchn.h>
++
++/*
++ * Some notes on x86 processor bugs affecting SMP operation:
++ *
++ * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
++ * The Linux implications for SMP are handled as follows:
++ *
++ * Pentium III / [Xeon]
++ * None of the E1AP-E3AP errata are visible to the user.
++ *
++ * E1AP. see PII A1AP
++ * E2AP. see PII A2AP
++ * E3AP. see PII A3AP
++ *
++ * Pentium II / [Xeon]
++ * None of the A1AP-A3AP errata are visible to the user.
++ *
++ * A1AP. see PPro 1AP
++ * A2AP. see PPro 2AP
++ * A3AP. see PPro 7AP
++ *
++ * Pentium Pro
++ * None of 1AP-9AP errata are visible to the normal user,
++ * except occasional delivery of 'spurious interrupt' as trap #15.
++ * This is very rare and a non-problem.
++ *
++ * 1AP. Linux maps APIC as non-cacheable
++ * 2AP. worked around in hardware
++ * 3AP. fixed in C0 and above steppings microcode update.
++ * Linux does not use excessive STARTUP_IPIs.
++ * 4AP. worked around in hardware
++ * 5AP. symmetric IO mode (normal Linux operation) not affected.
++ * 'noapic' mode has vector 0xf filled out properly.
++ * 6AP. 'noapic' mode might be affected - fixed in later steppings
++ * 7AP. We do not assume writes to the LVT deassering IRQs
++ * 8AP. We do not enable low power mode (deep sleep) during MP bootup
++ * 9AP. We do not use mixed mode
++ *
++ * Pentium
++ * There is a marginal case where REP MOVS on 100MHz SMP
++ * machines with B stepping processors can fail. XXX should provide
++ * an L1cache=Writethrough or L1cache=off option.
++ *
++ * B stepping CPUs may hang. There are hardware work arounds
++ * for this. We warn about it in case your board doesn't have the work
++ * arounds. Basically thats so I can tell anyone with a B stepping
++ * CPU and SMP problems "tough".
++ *
++ * Specific items [From Pentium Processor Specification Update]
++ *
++ * 1AP. Linux doesn't use remote read
++ * 2AP. Linux doesn't trust APIC errors
++ * 3AP. We work around this
++ * 4AP. Linux never generated 3 interrupts of the same priority
++ * to cause a lost local interrupt.
++ * 5AP. Remote read is never used
++ * 6AP. not affected - worked around in hardware
++ * 7AP. not affected - worked around in hardware
++ * 8AP. worked around in hardware - we get explicit CS errors if not
++ * 9AP. only 'noapic' mode affected. Might generate spurious
++ * interrupts, we log only the first one and count the
++ * rest silently.
++ * 10AP. not affected - worked around in hardware
++ * 11AP. Linux reads the APIC between writes to avoid this, as per
++ * the documentation. Make sure you preserve this as it affects
++ * the C stepping chips too.
++ * 12AP. not affected - worked around in hardware
++ * 13AP. not affected - worked around in hardware
++ * 14AP. we always deassert INIT during bootup
++ * 15AP. not affected - worked around in hardware
++ * 16AP. not affected - worked around in hardware
++ * 17AP. not affected - worked around in hardware
++ * 18AP. not affected - worked around in hardware
++ * 19AP. not affected - worked around in BIOS
++ *
++ * If this sounds worrying believe me these bugs are either ___RARE___,
++ * or are signal timing bugs worked around in hardware and there's
++ * about nothing of note with C stepping upwards.
++ */
++
++DEFINE_PER_CPU(struct tlb_state, cpu_tlbstate) ____cacheline_aligned = { &init_mm, 0, };
++
++/*
++ * the following functions deal with sending IPIs between CPUs.
++ *
++ * We use 'broadcast', CPU->CPU IPIs and self-IPIs too.
++ */
++
++static inline int __prepare_ICR (unsigned int shortcut, int vector)
++{
++ unsigned int icr = shortcut | APIC_DEST_LOGICAL;
++
++ switch (vector) {
++ default:
++ icr |= APIC_DM_FIXED | vector;
++ break;
++ case NMI_VECTOR:
++ icr |= APIC_DM_NMI;
++ break;
++ }
++ return icr;
++}
++
++static inline int __prepare_ICR2 (unsigned int mask)
++{
++ return SET_APIC_DEST_FIELD(mask);
++}
++
++DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++
++static inline void __send_IPI_one(unsigned int cpu, int vector)
++{
++ int irq = per_cpu(ipi_to_irq, cpu)[vector];
++ BUG_ON(irq < 0);
++ notify_remote_via_irq(irq);
++}
++
++void __send_IPI_shortcut(unsigned int shortcut, int vector)
++{
++ int cpu;
++
++ switch (shortcut) {
++ case APIC_DEST_SELF:
++ __send_IPI_one(smp_processor_id(), vector);
++ break;
++ case APIC_DEST_ALLBUT:
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu == smp_processor_id())
++ continue;
++ if (cpu_isset(cpu, cpu_online_map)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++ break;
++ default:
++ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
++ vector);
++ break;
++ }
++}
++
++void fastcall send_IPI_self(int vector)
++{
++ __send_IPI_shortcut(APIC_DEST_SELF, vector);
++}
++
++/*
++ * This is only used on smaller machines.
++ */
++void send_IPI_mask_bitmask(cpumask_t mask, int vector)
++{
++ unsigned long flags;
++ unsigned int cpu;
++
++ local_irq_save(flags);
++ WARN_ON(cpus_addr(mask)[0] & ~cpus_addr(cpu_online_map)[0]);
++
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu_isset(cpu, mask)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++
++ local_irq_restore(flags);
++}
++
++void send_IPI_mask_sequence(cpumask_t mask, int vector)
++{
++
++ send_IPI_mask_bitmask(mask, vector);
++}
++
++#include <mach_ipi.h> /* must come after the send_IPI functions above for inlining */
++
++#if 0 /* XEN */
++/*
++ * Smarter SMP flushing macros.
++ * c/o Linus Torvalds.
++ *
++ * These mean you can really definitely utterly forget about
++ * writing to user space from interrupts. (Its not allowed anyway).
++ *
++ * Optimizations Manfred Spraul <manfred@colorfullife.com>
++ */
++
++static cpumask_t flush_cpumask;
++static struct mm_struct * flush_mm;
++static unsigned long flush_va;
++static DEFINE_SPINLOCK(tlbstate_lock);
++#define FLUSH_ALL 0xffffffff
++
++/*
++ * We cannot call mmdrop() because we are in interrupt context,
++ * instead update mm->cpu_vm_mask.
++ *
++ * We need to reload %cr3 since the page tables may be going
++ * away from under us..
++ */
++static inline void leave_mm (unsigned long cpu)
++{
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
++ BUG();
++ cpu_clear(cpu, per_cpu(cpu_tlbstate, cpu).active_mm->cpu_vm_mask);
++ load_cr3(swapper_pg_dir);
++}
++
++/*
++ *
++ * The flush IPI assumes that a thread switch happens in this order:
++ * [cpu0: the cpu that switches]
++ * 1) switch_mm() either 1a) or 1b)
++ * 1a) thread switch to a different mm
++ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
++ * Stop ipi delivery for the old mm. This is not synchronized with
++ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
++ * for the wrong mm, and in the worst case we perform a superflous
++ * tlb flush.
++ * 1a2) set cpu_tlbstate to TLBSTATE_OK
++ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
++ * was in lazy tlb mode.
++ * 1a3) update cpu_tlbstate[].active_mm
++ * Now cpu0 accepts tlb flushes for the new mm.
++ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
++ * Now the other cpus will send tlb flush ipis.
++ * 1a4) change cr3.
++ * 1b) thread switch without mm change
++ * cpu_tlbstate[].active_mm is correct, cpu0 already handles
++ * flush ipis.
++ * 1b1) set cpu_tlbstate to TLBSTATE_OK
++ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
++ * Atomically set the bit [other cpus will start sending flush ipis],
++ * and test the bit.
++ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
++ * 2) switch %%esp, ie current
++ *
++ * The interrupt must handle 2 special cases:
++ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
++ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
++ * runs in kernel space, the cpu could load tlb entries for user space
++ * pages.
++ *
++ * The good news is that cpu_tlbstate is local to each cpu, no
++ * write/read ordering problems.
++ */
++
++/*
++ * TLB flush IPI:
++ *
++ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
++ * 2) Leave the mm if we are in the lazy tlb mode.
++ */
++
++irqreturn_t smp_invalidate_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ unsigned long cpu;
++
++ cpu = get_cpu();
++
++ if (!cpu_isset(cpu, flush_cpumask))
++ goto out;
++ /*
++ * This was a BUG() but until someone can quote me the
++ * line from the intel manual that guarantees an IPI to
++ * multiple CPUs is retried _only_ on the erroring CPUs
++ * its staying as a return
++ *
++ * BUG();
++ */
++
++ if (flush_mm == per_cpu(cpu_tlbstate, cpu).active_mm) {
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK) {
++ if (flush_va == FLUSH_ALL)
++ local_flush_tlb();
++ else
++ __flush_tlb_one(flush_va);
++ } else
++ leave_mm(cpu);
++ }
++ smp_mb__before_clear_bit();
++ cpu_clear(cpu, flush_cpumask);
++ smp_mb__after_clear_bit();
++out:
++ put_cpu_no_resched();
++
++ return IRQ_HANDLED;
++}
++
++static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
++ unsigned long va)
++{
++ /*
++ * A couple of (to be removed) sanity checks:
++ *
++ * - current CPU must not be in mask
++ * - mask must exist :)
++ */
++ BUG_ON(cpus_empty(cpumask));
++ BUG_ON(cpu_isset(smp_processor_id(), cpumask));
++ BUG_ON(!mm);
++
++ /* If a CPU which we ran on has gone down, OK. */
++ cpus_and(cpumask, cpumask, cpu_online_map);
++ if (cpus_empty(cpumask))
++ return;
++
++ /*
++ * i'm not happy about this global shared spinlock in the
++ * MM hot path, but we'll see how contended it is.
++ * Temporarily this turns IRQs off, so that lockups are
++ * detected by the NMI watchdog.
++ */
++ spin_lock(&tlbstate_lock);
++
++ flush_mm = mm;
++ flush_va = va;
++#if NR_CPUS <= BITS_PER_LONG
++ atomic_set_mask(cpumask, &flush_cpumask);
++#else
++ {
++ int k;
++ unsigned long *flush_mask = (unsigned long *)&flush_cpumask;
++ unsigned long *cpu_mask = (unsigned long *)&cpumask;
++ for (k = 0; k < BITS_TO_LONGS(NR_CPUS); ++k)
++ atomic_set_mask(cpu_mask[k], &flush_mask[k]);
++ }
++#endif
++ /*
++ * We have to send the IPI only to
++ * CPUs affected.
++ */
++ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR);
++
++ while (!cpus_empty(flush_cpumask))
++ /* nothing. lockup detection does not belong here */
++ mb();
++
++ flush_mm = NULL;
++ flush_va = 0;
++ spin_unlock(&tlbstate_lock);
++}
++
++void flush_tlb_current_task(void)
++{
++ struct mm_struct *mm = current->mm;
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ local_flush_tlb();
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++ preempt_enable();
++}
++
++void flush_tlb_mm (struct mm_struct * mm)
++{
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ if (current->active_mm == mm) {
++ if (current->mm)
++ local_flush_tlb();
++ else
++ leave_mm(smp_processor_id());
++ }
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++
++ preempt_enable();
++}
++
++void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ if (current->active_mm == mm) {
++ if(current->mm)
++ __flush_tlb_one(va);
++ else
++ leave_mm(smp_processor_id());
++ }
++
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, va);
++
++ preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_page);
++
++static void do_flush_tlb_all(void* info)
++{
++ unsigned long cpu = smp_processor_id();
++
++ __flush_tlb_all();
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_LAZY)
++ leave_mm(cpu);
++}
++
++void flush_tlb_all(void)
++{
++ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
++}
++
++#endif /* XEN */
++
++/*
++ * this function sends a 'reschedule' IPI to another CPU.
++ * it goes straight through and wastes no time serializing
++ * anything. Worst case is that we lose a reschedule ...
++ */
++void smp_send_reschedule(int cpu)
++{
++ WARN_ON(cpu_is_offline(cpu));
++ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
++}
++
++/*
++ * Structure and data for smp_call_function(). This is designed to minimise
++ * static memory requirements. It also looks cleaner.
++ */
++static DEFINE_SPINLOCK(call_lock);
++
++struct call_data_struct {
++ void (*func) (void *info);
++ void *info;
++ atomic_t started;
++ atomic_t finished;
++ int wait;
++};
++
++void lock_ipi_call_lock(void)
++{
++ spin_lock_irq(&call_lock);
++}
++
++void unlock_ipi_call_lock(void)
++{
++ spin_unlock_irq(&call_lock);
++}
++
++static struct call_data_struct *call_data;
++
++/**
++ * smp_call_function(): Run a function on all other CPUs.
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: currently unused.
++ * @wait: If true, wait (atomically) until function has completed on other CPUs.
++ *
++ * Returns 0 on success, else a negative status code. Does not return until
++ * remote CPUs are nearly ready to execute <<func>> or are or have executed.
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler or from a bottom half handler.
++ */
++int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
++ int wait)
++{
++ struct call_data_struct data;
++ int cpus;
++
++ /* Holding any lock stops cpus from going down. */
++ spin_lock(&call_lock);
++ cpus = num_online_cpus() - 1;
++ if (!cpus) {
++ spin_unlock(&call_lock);
++ return 0;
++ }
++
++ /* Can deadlock when called with interrupts disabled */
++ WARN_ON(irqs_disabled());
++
++ data.func = func;
++ data.info = info;
++ atomic_set(&data.started, 0);
++ data.wait = wait;
++ if (wait)
++ atomic_set(&data.finished, 0);
++
++ call_data = &data;
++ mb();
++
++ /* Send a message to all other CPUs and wait for them to respond */
++ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++
++ /* Wait for response */
++ while (atomic_read(&data.started) != cpus)
++ cpu_relax();
++
++ if (wait)
++ while (atomic_read(&data.finished) != cpus)
++ cpu_relax();
++ spin_unlock(&call_lock);
++
++ return 0;
++}
++EXPORT_SYMBOL(smp_call_function);
++
++static void stop_this_cpu (void * dummy)
++{
++ /*
++ * Remove this CPU:
++ */
++ cpu_clear(smp_processor_id(), cpu_online_map);
++ local_irq_disable();
++ disable_all_local_evtchn();
++ if (cpu_data[smp_processor_id()].hlt_works_ok)
++ for(;;) halt();
++ for (;;);
++}
++
++/*
++ * this function calls the 'stop' function on all other CPUs in the system.
++ */
++
++void smp_send_stop(void)
++{
++ smp_call_function(stop_this_cpu, NULL, 1, 0);
++
++ local_irq_disable();
++ disable_all_local_evtchn();
++ local_irq_enable();
++}
++
++/*
++ * Reschedule call back. Nothing to do,
++ * all the work is done automatically when
++ * we return from the interrupt.
++ */
++irqreturn_t smp_reschedule_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++
++ return IRQ_HANDLED;
++}
++
++#include <linux/kallsyms.h>
++irqreturn_t smp_call_function_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ void (*func) (void *info) = call_data->func;
++ void *info = call_data->info;
++ int wait = call_data->wait;
++
++ /*
++ * Notify initiating CPU that I've grabbed the data and am
++ * about to execute the function
++ */
++ mb();
++ atomic_inc(&call_data->started);
++ /*
++ * At this point the info structure may be out of scope unless wait==1
++ */
++ irq_enter();
++ (*func)(info);
++ irq_exit();
++
++ if (wait) {
++ mb();
++ atomic_inc(&call_data->finished);
++ }
++
++ return IRQ_HANDLED;
++}
++
+Index: head-2008-11-25/arch/x86/kernel/time_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/time_32-xen.c 2008-09-01 12:07:31.000000000 +0200
+@@ -0,0 +1,1209 @@
++/*
++ * linux/arch/i386/kernel/time.c
++ *
++ * Copyright (C) 1991, 1992, 1995 Linus Torvalds
++ *
++ * This file contains the PC-specific time handling details:
++ * reading the RTC at bootup, etc..
++ * 1994-07-02 Alan Modra
++ * fixed set_rtc_mmss, fixed time.year for >= 2000, new mktime
++ * 1995-03-26 Markus Kuhn
++ * fixed 500 ms bug at call to set_rtc_mmss, fixed DS12887
++ * precision CMOS clock update
++ * 1996-05-03 Ingo Molnar
++ * fixed time warps in do_[slow|fast]_gettimeoffset()
++ * 1997-09-10 Updated NTP code according to technical memorandum Jan '96
++ * "A Kernel Model for Precision Timekeeping" by Dave Mills
++ * 1998-09-05 (Various)
++ * More robust do_fast_gettimeoffset() algorithm implemented
++ * (works with APM, Cyrix 6x86MX and Centaur C6),
++ * monotonic gettimeofday() with fast_get_timeoffset(),
++ * drift-proof precision TSC calibration on boot
++ * (C. Scott Ananian <cananian@alumni.princeton.edu>, Andrew D.
++ * Balsa <andrebalsa@altern.org>, Philip Gladstone <philip@raptor.com>;
++ * ported from 2.0.35 Jumbo-9 by Michael Krause <m.krause@tu-harburg.de>).
++ * 1998-12-16 Andrea Arcangeli
++ * Fixed Jumbo-9 code in 2.1.131: do_gettimeofday was missing 1 jiffy
++ * because was not accounting lost_ticks.
++ * 1998-12-24 Copyright (C) 1998 Andrea Arcangeli
++ * Fixed a xtime SMP race (we need the xtime_lock rw spinlock to
++ * serialize accesses to xtime/lost_ticks).
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/param.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/time.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/smp.h>
++#include <linux/module.h>
++#include <linux/sysdev.h>
++#include <linux/bcd.h>
++#include <linux/efi.h>
++#include <linux/mca.h>
++#include <linux/sysctl.h>
++#include <linux/percpu.h>
++#include <linux/kernel_stat.h>
++#include <linux/posix-timers.h>
++#include <linux/cpufreq.h>
++
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/irq.h>
++#include <asm/msr.h>
++#include <asm/delay.h>
++#include <asm/mpspec.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++#include <asm/timer.h>
++#include <asm/sections.h>
++
++#include "mach_time.h"
++
++#include <linux/timex.h>
++
++#include <asm/hpet.h>
++
++#include <asm/arch_hooks.h>
++
++#include <xen/evtchn.h>
++#include <xen/interface/vcpu.h>
++
++#if defined (__i386__)
++#include <asm/i8259.h>
++#endif
++
++int pit_latch_buggy; /* extern */
++
++#if defined(__x86_64__)
++unsigned long vxtime_hz = PIT_TICK_RATE;
++struct vxtime_data __vxtime __section_vxtime; /* for vsyscalls */
++volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
++unsigned long __wall_jiffies __section_wall_jiffies = INITIAL_JIFFIES;
++struct timespec __xtime __section_xtime;
++struct timezone __sys_tz __section_sys_tz;
++#endif
++
++unsigned int cpu_khz; /* Detected as we calibrate the TSC */
++EXPORT_SYMBOL(cpu_khz);
++
++extern unsigned long wall_jiffies;
++
++DEFINE_SPINLOCK(rtc_lock);
++EXPORT_SYMBOL(rtc_lock);
++
++extern struct init_timer_opts timer_tsc_init;
++extern struct timer_opts timer_tsc;
++#define timer_none timer_tsc
++
++/* These are peridically updated in shared_info, and then copied here. */
++struct shadow_time_info {
++ u64 tsc_timestamp; /* TSC at last update of time vals. */
++ u64 system_timestamp; /* Time, in nanosecs, since boot. */
++ u32 tsc_to_nsec_mul;
++ u32 tsc_to_usec_mul;
++ int tsc_shift;
++ u32 version;
++};
++static DEFINE_PER_CPU(struct shadow_time_info, shadow_time);
++static struct timespec shadow_tv;
++static u32 shadow_tv_version;
++
++static struct timeval monotonic_tv;
++static spinlock_t monotonic_lock = SPIN_LOCK_UNLOCKED;
++
++/* Keep track of last time we did processing/updating of jiffies and xtime. */
++static u64 processed_system_time; /* System time (ns) at last processing. */
++static DEFINE_PER_CPU(u64, processed_system_time);
++
++/* How much CPU time was spent blocked and how much was 'stolen'? */
++static DEFINE_PER_CPU(u64, processed_stolen_time);
++static DEFINE_PER_CPU(u64, processed_blocked_time);
++
++/* Current runstate of each CPU (updated automatically by the hypervisor). */
++static DEFINE_PER_CPU(struct vcpu_runstate_info, runstate);
++
++/* Must be signed, as it's compared with s64 quantities which can be -ve. */
++#define NS_PER_TICK (1000000000LL/HZ)
++
++static void __clock_was_set(void *unused)
++{
++ clock_was_set();
++}
++static DECLARE_WORK(clock_was_set_work, __clock_was_set, NULL);
++
++/*
++ * GCC 4.3 can turn loops over an induction variable into division. We do
++ * not support arbitrary 64-bit division, and so must break the induction.
++ */
++#define clobber_induction_variable(v) asm ( "" : "+r" (v) )
++
++static inline void __normalize_time(time_t *sec, s64 *nsec)
++{
++ while (*nsec >= NSEC_PER_SEC) {
++ clobber_induction_variable(*nsec);
++ (*nsec) -= NSEC_PER_SEC;
++ (*sec)++;
++ }
++ while (*nsec < 0) {
++ clobber_induction_variable(*nsec);
++ (*nsec) += NSEC_PER_SEC;
++ (*sec)--;
++ }
++}
++
++/* Does this guest OS track Xen time, or set its wall clock independently? */
++static int independent_wallclock = 0;
++static int __init __independent_wallclock(char *str)
++{
++ independent_wallclock = 1;
++ return 1;
++}
++__setup("independent_wallclock", __independent_wallclock);
++
++/* Permitted clock jitter, in nsecs, beyond which a warning will be printed. */
++static unsigned long permitted_clock_jitter = 10000000UL; /* 10ms */
++static int __init __permitted_clock_jitter(char *str)
++{
++ permitted_clock_jitter = simple_strtoul(str, NULL, 0);
++ return 1;
++}
++__setup("permitted_clock_jitter=", __permitted_clock_jitter);
++
++#if 0
++static void delay_tsc(unsigned long loops)
++{
++ unsigned long bclock, now;
++
++ rdtscl(bclock);
++ do {
++ rep_nop();
++ rdtscl(now);
++ } while ((now - bclock) < loops);
++}
++
++struct timer_opts timer_tsc = {
++ .name = "tsc",
++ .delay = delay_tsc,
++};
++#endif
++
++/*
++ * Scale a 64-bit delta by scaling and multiplying by a 32-bit fraction,
++ * yielding a 64-bit result.
++ */
++static inline u64 scale_delta(u64 delta, u32 mul_frac, int shift)
++{
++ u64 product;
++#ifdef __i386__
++ u32 tmp1, tmp2;
++#endif
++
++ if (shift < 0)
++ delta >>= -shift;
++ else
++ delta <<= shift;
++
++#ifdef __i386__
++ __asm__ (
++ "mul %5 ; "
++ "mov %4,%%eax ; "
++ "mov %%edx,%4 ; "
++ "mul %5 ; "
++ "xor %5,%5 ; "
++ "add %4,%%eax ; "
++ "adc %5,%%edx ; "
++ : "=A" (product), "=r" (tmp1), "=r" (tmp2)
++ : "a" ((u32)delta), "1" ((u32)(delta >> 32)), "2" (mul_frac) );
++#else
++ __asm__ (
++ "mul %%rdx ; shrd $32,%%rdx,%%rax"
++ : "=a" (product) : "0" (delta), "d" ((u64)mul_frac) );
++#endif
++
++ return product;
++}
++
++#if 0 /* defined (__i386__) */
++int read_current_timer(unsigned long *timer_val)
++{
++ rdtscl(*timer_val);
++ return 0;
++}
++#endif
++
++void init_cpu_khz(void)
++{
++ u64 __cpu_khz = 1000000ULL << 32;
++ struct vcpu_time_info *info = &vcpu_info(0)->time;
++ do_div(__cpu_khz, info->tsc_to_system_mul);
++ if (info->tsc_shift < 0)
++ cpu_khz = __cpu_khz << -info->tsc_shift;
++ else
++ cpu_khz = __cpu_khz >> info->tsc_shift;
++}
++
++static u64 get_nsec_offset(struct shadow_time_info *shadow)
++{
++ u64 now, delta;
++ rdtscll(now);
++ delta = now - shadow->tsc_timestamp;
++ return scale_delta(delta, shadow->tsc_to_nsec_mul, shadow->tsc_shift);
++}
++
++static unsigned long get_usec_offset(struct shadow_time_info *shadow)
++{
++ u64 now, delta;
++ rdtscll(now);
++ delta = now - shadow->tsc_timestamp;
++ return scale_delta(delta, shadow->tsc_to_usec_mul, shadow->tsc_shift);
++}
++
++static void __update_wallclock(time_t sec, long nsec)
++{
++ long wtm_nsec, xtime_nsec;
++ time_t wtm_sec, xtime_sec;
++ u64 tmp, wc_nsec;
++
++ /* Adjust wall-clock time base based on wall_jiffies ticks. */
++ wc_nsec = processed_system_time;
++ wc_nsec += sec * (u64)NSEC_PER_SEC;
++ wc_nsec += nsec;
++ wc_nsec -= (jiffies - wall_jiffies) * (u64)NS_PER_TICK;
++
++ /* Split wallclock base into seconds and nanoseconds. */
++ tmp = wc_nsec;
++ xtime_nsec = do_div(tmp, 1000000000);
++ xtime_sec = (time_t)tmp;
++
++ wtm_sec = wall_to_monotonic.tv_sec + (xtime.tv_sec - xtime_sec);
++ wtm_nsec = wall_to_monotonic.tv_nsec + (xtime.tv_nsec - xtime_nsec);
++
++ set_normalized_timespec(&xtime, xtime_sec, xtime_nsec);
++ set_normalized_timespec(&wall_to_monotonic, wtm_sec, wtm_nsec);
++
++ ntp_clear();
++}
++
++static void update_wallclock(void)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++
++ do {
++ shadow_tv_version = s->wc_version;
++ rmb();
++ shadow_tv.tv_sec = s->wc_sec;
++ shadow_tv.tv_nsec = s->wc_nsec;
++ rmb();
++ } while ((s->wc_version & 1) | (shadow_tv_version ^ s->wc_version));
++
++ if (!independent_wallclock)
++ __update_wallclock(shadow_tv.tv_sec, shadow_tv.tv_nsec);
++}
++
++/*
++ * Reads a consistent set of time-base values from Xen, into a shadow data
++ * area.
++ */
++static void get_time_values_from_xen(unsigned int cpu)
++{
++ struct vcpu_time_info *src;
++ struct shadow_time_info *dst;
++ unsigned long flags;
++ u32 pre_version, post_version;
++
++ src = &vcpu_info(cpu)->time;
++ dst = &per_cpu(shadow_time, cpu);
++
++ local_irq_save(flags);
++
++ do {
++ pre_version = dst->version = src->version;
++ rmb();
++ dst->tsc_timestamp = src->tsc_timestamp;
++ dst->system_timestamp = src->system_time;
++ dst->tsc_to_nsec_mul = src->tsc_to_system_mul;
++ dst->tsc_shift = src->tsc_shift;
++ rmb();
++ post_version = src->version;
++ } while ((pre_version & 1) | (pre_version ^ post_version));
++
++ dst->tsc_to_usec_mul = dst->tsc_to_nsec_mul / 1000;
++
++ local_irq_restore(flags);
++}
++
++static inline int time_values_up_to_date(unsigned int cpu)
++{
++ struct vcpu_time_info *src;
++ struct shadow_time_info *dst;
++
++ src = &vcpu_info(cpu)->time;
++ dst = &per_cpu(shadow_time, cpu);
++
++ rmb();
++ return (dst->version == src->version);
++}
++
++/*
++ * This is a special lock that is owned by the CPU and holds the index
++ * register we are working with. It is required for NMI access to the
++ * CMOS/RTC registers. See include/asm-i386/mc146818rtc.h for details.
++ */
++volatile unsigned long cmos_lock = 0;
++EXPORT_SYMBOL(cmos_lock);
++
++/* Routines for accessing the CMOS RAM/RTC. */
++unsigned char rtc_cmos_read(unsigned char addr)
++{
++ unsigned char val;
++ lock_cmos_prefix(addr);
++ outb_p(addr, RTC_PORT(0));
++ val = inb_p(RTC_PORT(1));
++ lock_cmos_suffix(addr);
++ return val;
++}
++EXPORT_SYMBOL(rtc_cmos_read);
++
++void rtc_cmos_write(unsigned char val, unsigned char addr)
++{
++ lock_cmos_prefix(addr);
++ outb_p(addr, RTC_PORT(0));
++ outb_p(val, RTC_PORT(1));
++ lock_cmos_suffix(addr);
++}
++EXPORT_SYMBOL(rtc_cmos_write);
++
++/*
++ * This version of gettimeofday has microsecond resolution
++ * and better than microsecond precision on fast x86 machines with TSC.
++ */
++void do_gettimeofday(struct timeval *tv)
++{
++ unsigned long seq;
++ unsigned long usec, sec;
++ unsigned long flags;
++ s64 nsec;
++ unsigned int cpu;
++ struct shadow_time_info *shadow;
++ u32 local_time_version;
++
++ cpu = get_cpu();
++ shadow = &per_cpu(shadow_time, cpu);
++
++ do {
++ unsigned long lost;
++
++ local_time_version = shadow->version;
++ seq = read_seqbegin(&xtime_lock);
++
++ usec = get_usec_offset(shadow);
++ lost = jiffies - wall_jiffies;
++
++ if (unlikely(lost))
++ usec += lost * (USEC_PER_SEC / HZ);
++
++ sec = xtime.tv_sec;
++ usec += (xtime.tv_nsec / NSEC_PER_USEC);
++
++ nsec = shadow->system_timestamp - processed_system_time;
++ __normalize_time(&sec, &nsec);
++ usec += (long)nsec / NSEC_PER_USEC;
++
++ if (unlikely(!time_values_up_to_date(cpu))) {
++ /*
++ * We may have blocked for a long time,
++ * rendering our calculations invalid
++ * (e.g. the time delta may have
++ * overflowed). Detect that and recalculate
++ * with fresh values.
++ */
++ get_time_values_from_xen(cpu);
++ continue;
++ }
++ } while (read_seqretry(&xtime_lock, seq) ||
++ (local_time_version != shadow->version));
++
++ put_cpu();
++
++ while (usec >= USEC_PER_SEC) {
++ usec -= USEC_PER_SEC;
++ sec++;
++ }
++
++ spin_lock_irqsave(&monotonic_lock, flags);
++ if ((sec > monotonic_tv.tv_sec) ||
++ ((sec == monotonic_tv.tv_sec) && (usec > monotonic_tv.tv_usec)))
++ {
++ monotonic_tv.tv_sec = sec;
++ monotonic_tv.tv_usec = usec;
++ } else {
++ sec = monotonic_tv.tv_sec;
++ usec = monotonic_tv.tv_usec;
++ }
++ spin_unlock_irqrestore(&monotonic_lock, flags);
++
++ tv->tv_sec = sec;
++ tv->tv_usec = usec;
++}
++
++EXPORT_SYMBOL(do_gettimeofday);
++
++int do_settimeofday(struct timespec *tv)
++{
++ time_t sec;
++ s64 nsec;
++ unsigned int cpu;
++ struct shadow_time_info *shadow;
++ struct xen_platform_op op;
++
++ if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
++ return -EINVAL;
++
++ cpu = get_cpu();
++ shadow = &per_cpu(shadow_time, cpu);
++
++ write_seqlock_irq(&xtime_lock);
++
++ /*
++ * Ensure we don't get blocked for a long time so that our time delta
++ * overflows. If that were to happen then our shadow time values would
++ * be stale, so we can retry with fresh ones.
++ */
++ for (;;) {
++ nsec = tv->tv_nsec - get_nsec_offset(shadow);
++ if (time_values_up_to_date(cpu))
++ break;
++ get_time_values_from_xen(cpu);
++ }
++ sec = tv->tv_sec;
++ __normalize_time(&sec, &nsec);
++
++ if (is_initial_xendomain() && !independent_wallclock) {
++ op.cmd = XENPF_settime;
++ op.u.settime.secs = sec;
++ op.u.settime.nsecs = nsec;
++ op.u.settime.system_time = shadow->system_timestamp;
++ WARN_ON(HYPERVISOR_platform_op(&op));
++ update_wallclock();
++ } else if (independent_wallclock) {
++ nsec -= shadow->system_timestamp;
++ __normalize_time(&sec, &nsec);
++ __update_wallclock(sec, nsec);
++ }
++
++ /* Reset monotonic gettimeofday() timeval. */
++ spin_lock(&monotonic_lock);
++ monotonic_tv.tv_sec = 0;
++ monotonic_tv.tv_usec = 0;
++ spin_unlock(&monotonic_lock);
++
++ write_sequnlock_irq(&xtime_lock);
++
++ put_cpu();
++
++ clock_was_set();
++ return 0;
++}
++
++EXPORT_SYMBOL(do_settimeofday);
++
++static void sync_xen_wallclock(unsigned long dummy);
++static DEFINE_TIMER(sync_xen_wallclock_timer, sync_xen_wallclock, 0, 0);
++static void sync_xen_wallclock(unsigned long dummy)
++{
++ time_t sec;
++ s64 nsec;
++ struct xen_platform_op op;
++
++ if (!ntp_synced() || independent_wallclock || !is_initial_xendomain())
++ return;
++
++ write_seqlock_irq(&xtime_lock);
++
++ sec = xtime.tv_sec;
++ nsec = xtime.tv_nsec + ((jiffies - wall_jiffies) * (u64)NS_PER_TICK);
++ __normalize_time(&sec, &nsec);
++
++ op.cmd = XENPF_settime;
++ op.u.settime.secs = sec;
++ op.u.settime.nsecs = nsec;
++ op.u.settime.system_time = processed_system_time;
++ WARN_ON(HYPERVISOR_platform_op(&op));
++
++ update_wallclock();
++
++ write_sequnlock_irq(&xtime_lock);
++
++ /* Once per minute. */
++ mod_timer(&sync_xen_wallclock_timer, jiffies + 60*HZ);
++}
++
++static int set_rtc_mmss(unsigned long nowtime)
++{
++ int retval;
++ unsigned long flags;
++
++ if (independent_wallclock || !is_initial_xendomain())
++ return 0;
++
++ /* gets recalled with irq locally disabled */
++ /* XXX - does irqsave resolve this? -johnstul */
++ spin_lock_irqsave(&rtc_lock, flags);
++ if (efi_enabled)
++ retval = efi_set_rtc_mmss(nowtime);
++ else
++ retval = mach_set_rtc_mmss(nowtime);
++ spin_unlock_irqrestore(&rtc_lock, flags);
++
++ return retval;
++}
++
++/* monotonic_clock(): returns # of nanoseconds passed since time_init()
++ * Note: This function is required to return accurate
++ * time even in the absence of multiple timer ticks.
++ */
++unsigned long long monotonic_clock(void)
++{
++ unsigned int cpu = get_cpu();
++ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++ u64 time;
++ u32 local_time_version;
++
++ do {
++ local_time_version = shadow->version;
++ barrier();
++ time = shadow->system_timestamp + get_nsec_offset(shadow);
++ if (!time_values_up_to_date(cpu))
++ get_time_values_from_xen(cpu);
++ barrier();
++ } while (local_time_version != shadow->version);
++
++ put_cpu();
++
++ return time;
++}
++EXPORT_SYMBOL(monotonic_clock);
++
++#ifdef __x86_64__
++unsigned long long sched_clock(void)
++{
++ return monotonic_clock();
++}
++#endif
++
++#if defined(CONFIG_SMP) && defined(CONFIG_FRAME_POINTER)
++unsigned long profile_pc(struct pt_regs *regs)
++{
++ unsigned long pc = instruction_pointer(regs);
++
++#ifdef __x86_64__
++ /* Assume the lock function has either no stack frame or only a single word.
++ This checks if the address on the stack looks like a kernel text address.
++ There is a small window for false hits, but in that case the tick
++ is just accounted to the spinlock function.
++ Better would be to write these functions in assembler again
++ and check exactly. */
++ if (!user_mode_vm(regs) && in_lock_functions(pc)) {
++ char *v = *(char **)regs->rsp;
++ if ((v >= _stext && v <= _etext) ||
++ (v >= _sinittext && v <= _einittext) ||
++ (v >= (char *)MODULES_VADDR && v <= (char *)MODULES_END))
++ return (unsigned long)v;
++ return ((unsigned long *)regs->rsp)[1];
++ }
++#else
++ if (!user_mode_vm(regs) && in_lock_functions(pc))
++ return *(unsigned long *)(regs->ebp + 4);
++#endif
++
++ return pc;
++}
++EXPORT_SYMBOL(profile_pc);
++#endif
++
++/*
++ * This is the same as the above, except we _also_ save the current
++ * Time Stamp Counter value at the time of the timer interrupt, so that
++ * we later on can estimate the time of day more exactly.
++ */
++irqreturn_t timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
++{
++ s64 delta, delta_cpu, stolen, blocked;
++ u64 sched_time;
++ unsigned int i, cpu = smp_processor_id();
++ struct shadow_time_info *shadow = &per_cpu(shadow_time, cpu);
++ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++
++ /*
++ * Here we are in the timer irq handler. We just have irqs locally
++ * disabled but we don't know if the timer_bh is running on the other
++ * CPU. We need to avoid to SMP race with it. NOTE: we don' t need
++ * the irq version of write_lock because as just said we have irq
++ * locally disabled. -arca
++ */
++ write_seqlock(&xtime_lock);
++
++ do {
++ get_time_values_from_xen(cpu);
++
++ /* Obtain a consistent snapshot of elapsed wallclock cycles. */
++ delta = delta_cpu =
++ shadow->system_timestamp + get_nsec_offset(shadow);
++ delta -= processed_system_time;
++ delta_cpu -= per_cpu(processed_system_time, cpu);
++
++ /*
++ * Obtain a consistent snapshot of stolen/blocked cycles. We
++ * can use state_entry_time to detect if we get preempted here.
++ */
++ do {
++ sched_time = runstate->state_entry_time;
++ barrier();
++ stolen = runstate->time[RUNSTATE_runnable] +
++ runstate->time[RUNSTATE_offline] -
++ per_cpu(processed_stolen_time, cpu);
++ blocked = runstate->time[RUNSTATE_blocked] -
++ per_cpu(processed_blocked_time, cpu);
++ barrier();
++ } while (sched_time != runstate->state_entry_time);
++ } while (!time_values_up_to_date(cpu));
++
++ if ((unlikely(delta < -(s64)permitted_clock_jitter) ||
++ unlikely(delta_cpu < -(s64)permitted_clock_jitter))
++ && printk_ratelimit()) {
++ printk("Timer ISR/%u: Time went backwards: "
++ "delta=%lld delta_cpu=%lld shadow=%lld "
++ "off=%lld processed=%lld cpu_processed=%lld\n",
++ cpu, delta, delta_cpu, shadow->system_timestamp,
++ (s64)get_nsec_offset(shadow),
++ processed_system_time,
++ per_cpu(processed_system_time, cpu));
++ for (i = 0; i < num_online_cpus(); i++)
++ printk(" %d: %lld\n", i,
++ per_cpu(processed_system_time, i));
++ }
++
++ /* System-wide jiffy work. */
++ while (delta >= NS_PER_TICK) {
++ delta -= NS_PER_TICK;
++ processed_system_time += NS_PER_TICK;
++ do_timer(regs);
++ }
++
++ if (shadow_tv_version != HYPERVISOR_shared_info->wc_version) {
++ update_wallclock();
++ if (keventd_up())
++ schedule_work(&clock_was_set_work);
++ }
++
++ write_sequnlock(&xtime_lock);
++
++ /*
++ * Account stolen ticks.
++ * HACK: Passing NULL to account_steal_time()
++ * ensures that the ticks are accounted as stolen.
++ */
++ if ((stolen > 0) && (delta_cpu > 0)) {
++ delta_cpu -= stolen;
++ if (unlikely(delta_cpu < 0))
++ stolen += delta_cpu; /* clamp local-time progress */
++ do_div(stolen, NS_PER_TICK);
++ per_cpu(processed_stolen_time, cpu) += stolen * NS_PER_TICK;
++ per_cpu(processed_system_time, cpu) += stolen * NS_PER_TICK;
++ account_steal_time(NULL, (cputime_t)stolen);
++ }
++
++ /*
++ * Account blocked ticks.
++ * HACK: Passing idle_task to account_steal_time()
++ * ensures that the ticks are accounted as idle/wait.
++ */
++ if ((blocked > 0) && (delta_cpu > 0)) {
++ delta_cpu -= blocked;
++ if (unlikely(delta_cpu < 0))
++ blocked += delta_cpu; /* clamp local-time progress */
++ do_div(blocked, NS_PER_TICK);
++ per_cpu(processed_blocked_time, cpu) += blocked * NS_PER_TICK;
++ per_cpu(processed_system_time, cpu) += blocked * NS_PER_TICK;
++ account_steal_time(idle_task(cpu), (cputime_t)blocked);
++ }
++
++ /* Account user/system ticks. */
++ if (delta_cpu > 0) {
++ do_div(delta_cpu, NS_PER_TICK);
++ per_cpu(processed_system_time, cpu) += delta_cpu * NS_PER_TICK;
++ if (user_mode_vm(regs))
++ account_user_time(current, (cputime_t)delta_cpu);
++ else
++ account_system_time(current, HARDIRQ_OFFSET,
++ (cputime_t)delta_cpu);
++ }
++
++ /* Offlined for more than a few seconds? Avoid lockup warnings. */
++ if (stolen > 5*HZ)
++ touch_softlockup_watchdog();
++
++ /* Local timer processing (see update_process_times()). */
++ run_local_timers();
++ if (rcu_pending(cpu))
++ rcu_check_callbacks(cpu, user_mode_vm(regs));
++ scheduler_tick();
++ run_posix_cpu_timers(current);
++ profile_tick(CPU_PROFILING, regs);
++
++ return IRQ_HANDLED;
++}
++
++static void init_missing_ticks_accounting(unsigned int cpu)
++{
++ struct vcpu_register_runstate_memory_area area;
++ struct vcpu_runstate_info *runstate = &per_cpu(runstate, cpu);
++ int rc;
++
++ memset(runstate, 0, sizeof(*runstate));
++
++ area.addr.v = runstate;
++ rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area);
++ WARN_ON(rc && rc != -ENOSYS);
++
++ per_cpu(processed_blocked_time, cpu) =
++ runstate->time[RUNSTATE_blocked];
++ per_cpu(processed_stolen_time, cpu) =
++ runstate->time[RUNSTATE_runnable] +
++ runstate->time[RUNSTATE_offline];
++}
++
++/* not static: needed by APM */
++unsigned long get_cmos_time(void)
++{
++ unsigned long retval;
++ unsigned long flags;
++
++ spin_lock_irqsave(&rtc_lock, flags);
++
++ if (efi_enabled)
++ retval = efi_get_time();
++ else
++ retval = mach_get_cmos_time();
++
++ spin_unlock_irqrestore(&rtc_lock, flags);
++
++ return retval;
++}
++EXPORT_SYMBOL(get_cmos_time);
++
++static void sync_cmos_clock(unsigned long dummy);
++
++static DEFINE_TIMER(sync_cmos_timer, sync_cmos_clock, 0, 0);
++
++static void sync_cmos_clock(unsigned long dummy)
++{
++ struct timeval now, next;
++ int fail = 1;
++
++ /*
++ * If we have an externally synchronized Linux clock, then update
++ * CMOS clock accordingly every ~11 minutes. Set_rtc_mmss() has to be
++ * called as close as possible to 500 ms before the new second starts.
++ * This code is run on a timer. If the clock is set, that timer
++ * may not expire at the correct time. Thus, we adjust...
++ */
++ if (!ntp_synced())
++ /*
++ * Not synced, exit, do not restart a timer (if one is
++ * running, let it run out).
++ */
++ return;
++
++ do_gettimeofday(&now);
++ if (now.tv_usec >= USEC_AFTER - ((unsigned) TICK_SIZE) / 2 &&
++ now.tv_usec <= USEC_BEFORE + ((unsigned) TICK_SIZE) / 2)
++ fail = set_rtc_mmss(now.tv_sec);
++
++ next.tv_usec = USEC_AFTER - now.tv_usec;
++ if (next.tv_usec <= 0)
++ next.tv_usec += USEC_PER_SEC;
++
++ if (!fail)
++ next.tv_sec = 659;
++ else
++ next.tv_sec = 0;
++
++ if (next.tv_usec >= USEC_PER_SEC) {
++ next.tv_sec++;
++ next.tv_usec -= USEC_PER_SEC;
++ }
++ mod_timer(&sync_cmos_timer, jiffies + timeval_to_jiffies(&next));
++}
++
++void notify_arch_cmos_timer(void)
++{
++ mod_timer(&sync_cmos_timer, jiffies + 1);
++ mod_timer(&sync_xen_wallclock_timer, jiffies + 1);
++}
++
++static int timer_resume(struct sys_device *dev)
++{
++ extern void time_resume(void);
++ time_resume();
++ return 0;
++}
++
++static struct sysdev_class timer_sysclass = {
++ .resume = timer_resume,
++ set_kset_name("timer"),
++};
++
++
++/* XXX this driverfs stuff should probably go elsewhere later -john */
++static struct sys_device device_timer = {
++ .id = 0,
++ .cls = &timer_sysclass,
++};
++
++static int time_init_device(void)
++{
++ int error = sysdev_class_register(&timer_sysclass);
++ if (!error)
++ error = sysdev_register(&device_timer);
++ return error;
++}
++
++device_initcall(time_init_device);
++
++#ifdef CONFIG_HPET_TIMER
++extern void (*late_time_init)(void);
++/* Duplicate of time_init() below, with hpet_enable part added */
++static void __init hpet_time_init(void)
++{
++ xtime.tv_sec = get_cmos_time();
++ xtime.tv_nsec = (INITIAL_JIFFIES % HZ) * (NSEC_PER_SEC / HZ);
++ set_normalized_timespec(&wall_to_monotonic,
++ -xtime.tv_sec, -xtime.tv_nsec);
++
++ if ((hpet_enable() >= 0) && hpet_use_timer) {
++ printk("Using HPET for base-timer\n");
++ }
++
++ time_init_hook();
++}
++#endif
++
++/* Dynamically-mapped IRQ. */
++DEFINE_PER_CPU(int, timer_irq);
++
++extern void (*late_time_init)(void);
++static void setup_cpu0_timer_irq(void)
++{
++ per_cpu(timer_irq, 0) =
++ bind_virq_to_irqhandler(
++ VIRQ_TIMER,
++ 0,
++ timer_interrupt,
++ SA_INTERRUPT,
++ "timer0",
++ NULL);
++ BUG_ON(per_cpu(timer_irq, 0) < 0);
++}
++
++static struct vcpu_set_periodic_timer xen_set_periodic_tick = {
++ .period_ns = NS_PER_TICK
++};
++
++void __init time_init(void)
++{
++#ifdef CONFIG_HPET_TIMER
++ if (is_hpet_capable()) {
++ /*
++ * HPET initialization needs to do memory-mapped io. So, let
++ * us do a late initialization after mem_init().
++ */
++ late_time_init = hpet_time_init;
++ return;
++ }
++#endif
++
++ switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, 0,
++ &xen_set_periodic_tick)) {
++ case 0:
++#if CONFIG_XEN_COMPAT <= 0x030004
++ case -ENOSYS:
++#endif
++ break;
++ default:
++ BUG();
++ }
++
++ get_time_values_from_xen(0);
++
++ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++ per_cpu(processed_system_time, 0) = processed_system_time;
++ init_missing_ticks_accounting(0);
++
++ update_wallclock();
++
++ init_cpu_khz();
++ printk(KERN_INFO "Xen reported: %u.%03u MHz processor.\n",
++ cpu_khz / 1000, cpu_khz % 1000);
++
++#if defined(__x86_64__)
++ vxtime.mode = VXTIME_TSC;
++ vxtime.quot = (1000000L << 32) / vxtime_hz;
++ vxtime.tsc_quot = (1000L << 32) / cpu_khz;
++ sync_core();
++ rdtscll(vxtime.last_tsc);
++#endif
++
++ /* Cannot request_irq() until kmem is initialised. */
++ late_time_init = setup_cpu0_timer_irq;
++}
++
++/* Convert jiffies to system time. */
++u64 jiffies_to_st(unsigned long j)
++{
++ unsigned long seq;
++ long delta;
++ u64 st;
++
++ do {
++ seq = read_seqbegin(&xtime_lock);
++ delta = j - jiffies;
++ if (delta < 1) {
++ /* Triggers in some wrap-around cases, but that's okay:
++ * we just end up with a shorter timeout. */
++ st = processed_system_time + NS_PER_TICK;
++ } else if (((unsigned long)delta >> (BITS_PER_LONG-3)) != 0) {
++ /* Very long timeout means there is no pending timer.
++ * We indicate this to Xen by passing zero timeout. */
++ st = 0;
++ } else {
++ st = processed_system_time + delta * (u64)NS_PER_TICK;
++ }
++ } while (read_seqretry(&xtime_lock, seq));
++
++ return st;
++}
++EXPORT_SYMBOL(jiffies_to_st);
++
++/*
++ * stop_hz_timer / start_hz_timer - enter/exit 'tickless mode' on an idle cpu
++ * These functions are based on implementations from arch/s390/kernel/time.c
++ */
++static void stop_hz_timer(void)
++{
++ struct vcpu_set_singleshot_timer singleshot;
++ unsigned int cpu = smp_processor_id();
++ unsigned long j;
++ int rc;
++
++ cpu_set(cpu, nohz_cpu_mask);
++
++ /* See matching smp_mb in rcu_start_batch in rcupdate.c. These mbs */
++ /* ensure that if __rcu_pending (nested in rcu_needs_cpu) fetches a */
++ /* value of rcp->cur that matches rdp->quiescbatch and allows us to */
++ /* stop the hz timer then the cpumasks created for subsequent values */
++ /* of cur in rcu_start_batch are guaranteed to pick up the updated */
++ /* nohz_cpu_mask and so will not depend on this cpu. */
++
++ smp_mb();
++
++ /* Leave ourselves in tick mode if rcu or softirq or timer pending. */
++ if (rcu_needs_cpu(cpu) || local_softirq_pending() ||
++ (j = next_timer_interrupt(), time_before_eq(j, jiffies))) {
++ cpu_clear(cpu, nohz_cpu_mask);
++ j = jiffies + 1;
++ }
++
++ singleshot.timeout_abs_ns = jiffies_to_st(j) + NS_PER_TICK/2;
++ singleshot.flags = 0;
++ rc = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &singleshot);
++#if CONFIG_XEN_COMPAT <= 0x030004
++ if (rc) {
++ BUG_ON(rc != -ENOSYS);
++ rc = HYPERVISOR_set_timer_op(singleshot.timeout_abs_ns);
++ }
++#endif
++ BUG_ON(rc);
++}
++
++static void start_hz_timer(void)
++{
++ cpu_clear(smp_processor_id(), nohz_cpu_mask);
++}
++
++void raw_safe_halt(void)
++{
++ stop_hz_timer();
++ /* Blocking includes an implicit local_irq_enable(). */
++ HYPERVISOR_block();
++ start_hz_timer();
++}
++EXPORT_SYMBOL(raw_safe_halt);
++
++void halt(void)
++{
++ if (irqs_disabled())
++ VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL));
++}
++EXPORT_SYMBOL(halt);
++
++/* No locking required. Interrupts are disabled on all CPUs. */
++void time_resume(void)
++{
++ unsigned int cpu;
++
++ init_cpu_khz();
++
++ for_each_online_cpu(cpu) {
++ switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
++ &xen_set_periodic_tick)) {
++ case 0:
++#if CONFIG_XEN_COMPAT <= 0x030004
++ case -ENOSYS:
++#endif
++ break;
++ default:
++ BUG();
++ }
++ get_time_values_from_xen(cpu);
++ per_cpu(processed_system_time, cpu) =
++ per_cpu(shadow_time, 0).system_timestamp;
++ init_missing_ticks_accounting(cpu);
++ }
++
++ processed_system_time = per_cpu(shadow_time, 0).system_timestamp;
++
++ update_wallclock();
++}
++
++#ifdef CONFIG_SMP
++static char timer_name[NR_CPUS][15];
++
++int __cpuinit local_setup_timer(unsigned int cpu)
++{
++ int seq, irq;
++
++ BUG_ON(cpu == 0);
++
++ switch (HYPERVISOR_vcpu_op(VCPUOP_set_periodic_timer, cpu,
++ &xen_set_periodic_tick)) {
++ case 0:
++#if CONFIG_XEN_COMPAT <= 0x030004
++ case -ENOSYS:
++#endif
++ break;
++ default:
++ BUG();
++ }
++
++ do {
++ seq = read_seqbegin(&xtime_lock);
++ /* Use cpu0 timestamp: cpu's shadow is not initialised yet. */
++ per_cpu(processed_system_time, cpu) =
++ per_cpu(shadow_time, 0).system_timestamp;
++ init_missing_ticks_accounting(cpu);
++ } while (read_seqretry(&xtime_lock, seq));
++
++ sprintf(timer_name[cpu], "timer%u", cpu);
++ irq = bind_virq_to_irqhandler(VIRQ_TIMER,
++ cpu,
++ timer_interrupt,
++ SA_INTERRUPT,
++ timer_name[cpu],
++ NULL);
++ if (irq < 0)
++ return irq;
++ per_cpu(timer_irq, cpu) = irq;
++
++ return 0;
++}
++
++void __cpuexit local_teardown_timer(unsigned int cpu)
++{
++ BUG_ON(cpu == 0);
++ unbind_from_irqhandler(per_cpu(timer_irq, cpu), NULL);
++}
++#endif
++
++#ifdef CONFIG_CPU_FREQ
++static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
++ void *data)
++{
++ struct cpufreq_freqs *freq = data;
++ struct xen_platform_op op;
++
++ if (cpu_has(&cpu_data[freq->cpu], X86_FEATURE_CONSTANT_TSC))
++ return 0;
++
++ if (val == CPUFREQ_PRECHANGE)
++ return 0;
++
++ op.cmd = XENPF_change_freq;
++ op.u.change_freq.flags = 0;
++ op.u.change_freq.cpu = freq->cpu;
++ op.u.change_freq.freq = (u64)freq->new * 1000;
++ WARN_ON(HYPERVISOR_platform_op(&op));
++
++ return 0;
++}
++
++static struct notifier_block time_cpufreq_notifier_block = {
++ .notifier_call = time_cpufreq_notifier
++};
++
++static int __init cpufreq_time_setup(void)
++{
++ if (!cpufreq_register_notifier(&time_cpufreq_notifier_block,
++ CPUFREQ_TRANSITION_NOTIFIER)) {
++ printk(KERN_ERR "failed to set up cpufreq notifier\n");
++ return -ENODEV;
++ }
++ return 0;
++}
++
++core_initcall(cpufreq_time_setup);
++#endif
++
++/*
++ * /proc/sys/xen: This really belongs in another file. It can stay here for
++ * now however.
++ */
++static ctl_table xen_subtable[] = {
++ {
++ .ctl_name = 1,
++ .procname = "independent_wallclock",
++ .data = &independent_wallclock,
++ .maxlen = sizeof(independent_wallclock),
++ .mode = 0644,
++ .proc_handler = proc_dointvec
++ },
++ {
++ .ctl_name = 2,
++ .procname = "permitted_clock_jitter",
++ .data = &permitted_clock_jitter,
++ .maxlen = sizeof(permitted_clock_jitter),
++ .mode = 0644,
++ .proc_handler = proc_doulongvec_minmax
++ },
++ { 0 }
++};
++static ctl_table xen_table[] = {
++ {
++ .ctl_name = 123,
++ .procname = "xen",
++ .mode = 0555,
++ .child = xen_subtable},
++ { 0 }
++};
++static int __init xen_sysctl_init(void)
++{
++ (void)register_sysctl_table(xen_table, 0);
++ return 0;
++}
++__initcall(xen_sysctl_init);
+Index: head-2008-11-25/arch/x86/kernel/traps_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/traps_32-xen.c 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,1190 @@
++/*
++ * linux/arch/i386/traps.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ *
++ * Pentium III FXSR, SSE support
++ * Gareth Hughes <gareth@valinux.com>, May 2000
++ */
++
++/*
++ * 'Traps.c' handles hardware traps and faults after we have saved some
++ * state in 'asm.s'.
++ */
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/timer.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/highmem.h>
++#include <linux/kallsyms.h>
++#include <linux/ptrace.h>
++#include <linux/utsname.h>
++#include <linux/kprobes.h>
++#include <linux/kexec.h>
++#include <linux/unwind.h>
++
++#ifdef CONFIG_EISA
++#include <linux/ioport.h>
++#include <linux/eisa.h>
++#endif
++
++#ifdef CONFIG_MCA
++#include <linux/mca.h>
++#endif
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/debugreg.h>
++#include <asm/desc.h>
++#include <asm/i387.h>
++#include <asm/nmi.h>
++#include <asm/unwind.h>
++#include <asm/smp.h>
++#include <asm/arch_hooks.h>
++#include <asm/kdebug.h>
++
++#include <linux/module.h>
++
++#include "mach_traps.h"
++
++asmlinkage int system_call(void);
++
++struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
++ { 0, 0 }, { 0, 0 } };
++
++/* Do we ignore FPU interrupts ? */
++char ignore_fpu_irq = 0;
++
++#ifndef CONFIG_X86_NO_IDT
++/*
++ * The IDT has to be page-aligned to simplify the Pentium
++ * F0 0F bug workaround.. We have a special link segment
++ * for this.
++ */
++struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
++#endif
++
++asmlinkage void divide_error(void);
++asmlinkage void debug(void);
++asmlinkage void nmi(void);
++asmlinkage void int3(void);
++asmlinkage void overflow(void);
++asmlinkage void bounds(void);
++asmlinkage void invalid_op(void);
++asmlinkage void device_not_available(void);
++asmlinkage void coprocessor_segment_overrun(void);
++asmlinkage void invalid_TSS(void);
++asmlinkage void segment_not_present(void);
++asmlinkage void stack_segment(void);
++asmlinkage void general_protection(void);
++asmlinkage void page_fault(void);
++asmlinkage void coprocessor_error(void);
++asmlinkage void simd_coprocessor_error(void);
++asmlinkage void alignment_check(void);
++#ifndef CONFIG_XEN
++asmlinkage void spurious_interrupt_bug(void);
++#else
++asmlinkage void fixup_4gb_segment(void);
++#endif
++asmlinkage void machine_check(void);
++
++static int kstack_depth_to_print = 24;
++#ifdef CONFIG_STACK_UNWIND
++static int call_trace = 1;
++#else
++#define call_trace (-1)
++#endif
++ATOMIC_NOTIFIER_HEAD(i386die_chain);
++
++int register_die_notifier(struct notifier_block *nb)
++{
++ vmalloc_sync_all();
++ return atomic_notifier_chain_register(&i386die_chain, nb);
++}
++EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
++
++int unregister_die_notifier(struct notifier_block *nb)
++{
++ return atomic_notifier_chain_unregister(&i386die_chain, nb);
++}
++EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
++
++static inline int valid_stack_ptr(struct thread_info *tinfo, void *p)
++{
++ return p > (void *)tinfo &&
++ p < (void *)tinfo + THREAD_SIZE - 3;
++}
++
++/*
++ * Print one address/symbol entries per line.
++ */
++static inline void print_addr_and_symbol(unsigned long addr, char *log_lvl)
++{
++ printk(" [<%08lx>] ", addr);
++
++ print_symbol("%s\n", addr);
++}
++
++static inline unsigned long print_context_stack(struct thread_info *tinfo,
++ unsigned long *stack, unsigned long ebp,
++ char *log_lvl)
++{
++ unsigned long addr;
++
++#ifdef CONFIG_FRAME_POINTER
++ while (valid_stack_ptr(tinfo, (void *)ebp)) {
++ addr = *(unsigned long *)(ebp + 4);
++ print_addr_and_symbol(addr, log_lvl);
++ /*
++ * break out of recursive entries (such as
++ * end_of_stack_stop_unwind_function):
++ */
++ if (ebp == *(unsigned long *)ebp)
++ break;
++ ebp = *(unsigned long *)ebp;
++ }
++#else
++ while (valid_stack_ptr(tinfo, stack)) {
++ addr = *stack++;
++ if (__kernel_text_address(addr))
++ print_addr_and_symbol(addr, log_lvl);
++ }
++#endif
++ return ebp;
++}
++
++static asmlinkage int
++show_trace_unwind(struct unwind_frame_info *info, void *log_lvl)
++{
++ int n = 0;
++
++ while (unwind(info) == 0 && UNW_PC(info)) {
++ n++;
++ print_addr_and_symbol(UNW_PC(info), log_lvl);
++ if (arch_unw_user_mode(info))
++ break;
++ }
++ return n;
++}
++
++static void show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
++ unsigned long *stack, char *log_lvl)
++{
++ unsigned long ebp;
++
++ if (!task)
++ task = current;
++
++ if (call_trace >= 0) {
++ int unw_ret = 0;
++ struct unwind_frame_info info;
++
++ if (regs) {
++ if (unwind_init_frame_info(&info, task, regs) == 0)
++ unw_ret = show_trace_unwind(&info, log_lvl);
++ } else if (task == current)
++ unw_ret = unwind_init_running(&info, show_trace_unwind, log_lvl);
++ else {
++ if (unwind_init_blocked(&info, task) == 0)
++ unw_ret = show_trace_unwind(&info, log_lvl);
++ }
++ if (unw_ret > 0) {
++ if (call_trace == 1 && !arch_unw_user_mode(&info)) {
++ print_symbol("DWARF2 unwinder stuck at %s\n",
++ UNW_PC(&info));
++ if (UNW_SP(&info) >= PAGE_OFFSET) {
++ printk("Leftover inexact backtrace:\n");
++ stack = (void *)UNW_SP(&info);
++ } else
++ printk("Full inexact backtrace again:\n");
++ } else if (call_trace >= 1)
++ return;
++ else
++ printk("Full inexact backtrace again:\n");
++ } else
++ printk("Inexact backtrace:\n");
++ }
++
++ if (task == current) {
++ /* Grab ebp right from our regs */
++ asm ("movl %%ebp, %0" : "=r" (ebp) : );
++ } else {
++ /* ebp is the last reg pushed by switch_to */
++ ebp = *(unsigned long *) task->thread.esp;
++ }
++
++ while (1) {
++ struct thread_info *context;
++ context = (struct thread_info *)
++ ((unsigned long)stack & (~(THREAD_SIZE - 1)));
++ ebp = print_context_stack(context, stack, ebp, log_lvl);
++ stack = (unsigned long*)context->previous_esp;
++ if (!stack)
++ break;
++ printk("%s =======================\n", log_lvl);
++ }
++}
++
++void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long * stack)
++{
++ show_trace_log_lvl(task, regs, stack, "");
++}
++
++static void show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
++ unsigned long *esp, char *log_lvl)
++{
++ unsigned long *stack;
++ int i;
++
++ if (esp == NULL) {
++ if (task)
++ esp = (unsigned long*)task->thread.esp;
++ else
++ esp = (unsigned long *)&esp;
++ }
++
++ stack = esp;
++ for(i = 0; i < kstack_depth_to_print; i++) {
++ if (kstack_end(stack))
++ break;
++ if (i && ((i % 8) == 0))
++ printk("\n%s ", log_lvl);
++ printk("%08lx ", *stack++);
++ }
++ printk("\n%sCall Trace:\n", log_lvl);
++ show_trace_log_lvl(task, regs, esp, log_lvl);
++}
++
++void show_stack(struct task_struct *task, unsigned long *esp)
++{
++ printk(" ");
++ show_stack_log_lvl(task, NULL, esp, "");
++}
++
++/*
++ * The architecture-independent dump_stack generator
++ */
++void dump_stack(void)
++{
++ unsigned long stack;
++
++ show_trace(current, NULL, &stack);
++}
++
++EXPORT_SYMBOL(dump_stack);
++
++void show_registers(struct pt_regs *regs)
++{
++ int i;
++ int in_kernel = 1;
++ unsigned long esp;
++ unsigned short ss;
++
++ esp = (unsigned long) (®s->esp);
++ savesegment(ss, ss);
++ if (user_mode_vm(regs)) {
++ in_kernel = 0;
++ esp = regs->esp;
++ ss = regs->xss & 0xffff;
++ }
++ print_modules();
++ printk(KERN_EMERG "CPU: %d\nEIP: %04x:[<%08lx>] %s VLI\n"
++ "EFLAGS: %08lx (%s %.*s) \n",
++ smp_processor_id(), 0xffff & regs->xcs, regs->eip,
++ print_tainted(), regs->eflags, system_utsname.release,
++ (int)strcspn(system_utsname.version, " "),
++ system_utsname.version);
++ print_symbol(KERN_EMERG "EIP is at %s\n", regs->eip);
++ printk(KERN_EMERG "eax: %08lx ebx: %08lx ecx: %08lx edx: %08lx\n",
++ regs->eax, regs->ebx, regs->ecx, regs->edx);
++ printk(KERN_EMERG "esi: %08lx edi: %08lx ebp: %08lx esp: %08lx\n",
++ regs->esi, regs->edi, regs->ebp, esp);
++ printk(KERN_EMERG "ds: %04x es: %04x ss: %04x\n",
++ regs->xds & 0xffff, regs->xes & 0xffff, ss);
++ printk(KERN_EMERG "Process %.*s (pid: %d, ti=%p task=%p task.ti=%p)",
++ TASK_COMM_LEN, current->comm, current->pid,
++ current_thread_info(), current, current->thread_info);
++ /*
++ * When in-kernel, we also print out the stack and code at the
++ * time of the fault..
++ */
++ if (in_kernel) {
++ u8 __user *eip;
++
++ printk("\n" KERN_EMERG "Stack: ");
++ show_stack_log_lvl(NULL, regs, (unsigned long *)esp, KERN_EMERG);
++
++ printk(KERN_EMERG "Code: ");
++
++ eip = (u8 __user *)regs->eip - 43;
++ for (i = 0; i < 64; i++, eip++) {
++ unsigned char c;
++
++ if (eip < (u8 __user *)PAGE_OFFSET || __get_user(c, eip)) {
++ printk(" Bad EIP value.");
++ break;
++ }
++ if (eip == (u8 __user *)regs->eip)
++ printk("<%02x> ", c);
++ else
++ printk("%02x ", c);
++ }
++ }
++ printk("\n");
++}
++
++static void handle_BUG(struct pt_regs *regs)
++{
++ unsigned long eip = regs->eip;
++ unsigned short ud2;
++
++ if (eip < PAGE_OFFSET)
++ return;
++ if (__get_user(ud2, (unsigned short __user *)eip))
++ return;
++ if (ud2 != 0x0b0f)
++ return;
++
++ printk(KERN_EMERG "------------[ cut here ]------------\n");
++
++#ifdef CONFIG_DEBUG_BUGVERBOSE
++ do {
++ unsigned short line;
++ char *file;
++ char c;
++
++ if (__get_user(line, (unsigned short __user *)(eip + 2)))
++ break;
++ if (__get_user(file, (char * __user *)(eip + 4)) ||
++ (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
++ file = "<bad filename>";
++
++ printk(KERN_EMERG "kernel BUG at %s:%d!\n", file, line);
++ return;
++ } while (0);
++#endif
++ printk(KERN_EMERG "Kernel BUG at [verbose debug info unavailable]\n");
++}
++
++/* This is gone through when something in the kernel
++ * has done something bad and is about to be terminated.
++*/
++void die(const char * str, struct pt_regs * regs, long err)
++{
++ static struct {
++ spinlock_t lock;
++ u32 lock_owner;
++ int lock_owner_depth;
++ } die = {
++ .lock = SPIN_LOCK_UNLOCKED,
++ .lock_owner = -1,
++ .lock_owner_depth = 0
++ };
++ static int die_counter;
++ unsigned long flags;
++
++ oops_enter();
++
++ if (die.lock_owner != raw_smp_processor_id()) {
++ console_verbose();
++ spin_lock_irqsave(&die.lock, flags);
++ die.lock_owner = smp_processor_id();
++ die.lock_owner_depth = 0;
++ bust_spinlocks(1);
++ }
++ else
++ local_save_flags(flags);
++
++ if (++die.lock_owner_depth < 3) {
++ int nl = 0;
++ unsigned long esp;
++ unsigned short ss;
++
++ handle_BUG(regs);
++ printk(KERN_EMERG "%s: %04lx [#%d]\n", str, err & 0xffff, ++die_counter);
++#ifdef CONFIG_PREEMPT
++ printk(KERN_EMERG "PREEMPT ");
++ nl = 1;
++#endif
++#ifdef CONFIG_SMP
++ if (!nl)
++ printk(KERN_EMERG);
++ printk("SMP ");
++ nl = 1;
++#endif
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ if (!nl)
++ printk(KERN_EMERG);
++ printk("DEBUG_PAGEALLOC");
++ nl = 1;
++#endif
++ if (nl)
++ printk("\n");
++ if (notify_die(DIE_OOPS, str, regs, err,
++ current->thread.trap_no, SIGSEGV) !=
++ NOTIFY_STOP) {
++ show_registers(regs);
++ /* Executive summary in case the oops scrolled away */
++ esp = (unsigned long) (®s->esp);
++ savesegment(ss, ss);
++ if (user_mode(regs)) {
++ esp = regs->esp;
++ ss = regs->xss & 0xffff;
++ }
++ printk(KERN_EMERG "EIP: [<%08lx>] ", regs->eip);
++ print_symbol("%s", regs->eip);
++ printk(" SS:ESP %04x:%08lx\n", ss, esp);
++ }
++ else
++ regs = NULL;
++ } else
++ printk(KERN_EMERG "Recursive die() failure, output suppressed\n");
++
++ bust_spinlocks(0);
++ die.lock_owner = -1;
++ spin_unlock_irqrestore(&die.lock, flags);
++
++ if (!regs)
++ return;
++
++ if (kexec_should_crash(current))
++ crash_kexec(regs);
++
++ if (in_interrupt())
++ panic("Fatal exception in interrupt");
++
++ if (panic_on_oops)
++ panic("Fatal exception");
++
++ oops_exit();
++ do_exit(SIGSEGV);
++}
++
++static inline void die_if_kernel(const char * str, struct pt_regs * regs, long err)
++{
++ if (!user_mode_vm(regs))
++ die(str, regs, err);
++}
++
++static void __kprobes do_trap(int trapnr, int signr, char *str, int vm86,
++ struct pt_regs * regs, long error_code,
++ siginfo_t *info)
++{
++ struct task_struct *tsk = current;
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = trapnr;
++
++ if (regs->eflags & VM_MASK) {
++ if (vm86)
++ goto vm86_trap;
++ goto trap_signal;
++ }
++
++ if (!user_mode(regs))
++ goto kernel_trap;
++
++ trap_signal: {
++ if (info)
++ force_sig_info(signr, info, tsk);
++ else
++ force_sig(signr, tsk);
++ return;
++ }
++
++ kernel_trap: {
++ if (!fixup_exception(regs))
++ die(str, regs, error_code);
++ return;
++ }
++
++ vm86_trap: {
++ int ret = handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, trapnr);
++ if (ret) goto trap_signal;
++ return;
++ }
++}
++
++#define DO_ERROR(trapnr, signr, str, name) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 0, regs, error_code, NULL); \
++}
++
++#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ siginfo_t info; \
++ info.si_signo = signr; \
++ info.si_errno = 0; \
++ info.si_code = sicode; \
++ info.si_addr = (void __user *)siaddr; \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 0, regs, error_code, &info); \
++}
++
++#define DO_VM86_ERROR(trapnr, signr, str, name) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 1, regs, error_code, NULL); \
++}
++
++#define DO_VM86_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++fastcall void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ siginfo_t info; \
++ info.si_signo = signr; \
++ info.si_errno = 0; \
++ info.si_code = sicode; \
++ info.si_addr = (void __user *)siaddr; \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ do_trap(trapnr, signr, str, 1, regs, error_code, &info); \
++}
++
++DO_VM86_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->eip)
++#ifndef CONFIG_KPROBES
++DO_VM86_ERROR( 3, SIGTRAP, "int3", int3)
++#endif
++DO_VM86_ERROR( 4, SIGSEGV, "overflow", overflow)
++DO_VM86_ERROR( 5, SIGSEGV, "bounds", bounds)
++DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->eip)
++DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
++DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
++DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
++DO_ERROR(12, SIGBUS, "stack segment", stack_segment)
++DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
++DO_ERROR_INFO(32, SIGSEGV, "iret exception", iret_error, ILL_BADSTK, 0)
++
++fastcall void __kprobes do_general_protection(struct pt_regs * regs,
++ long error_code)
++{
++ current->thread.error_code = error_code;
++ current->thread.trap_no = 13;
++
++ if (regs->eflags & VM_MASK)
++ goto gp_in_vm86;
++
++ if (!user_mode(regs))
++ goto gp_in_kernel;
++
++ current->thread.error_code = error_code;
++ current->thread.trap_no = 13;
++ force_sig(SIGSEGV, current);
++ return;
++
++gp_in_vm86:
++ local_irq_enable();
++ handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
++ return;
++
++gp_in_kernel:
++ if (!fixup_exception(regs)) {
++ if (notify_die(DIE_GPF, "general protection fault", regs,
++ error_code, 13, SIGSEGV) == NOTIFY_STOP)
++ return;
++ die("general protection fault", regs, error_code);
++ }
++}
++
++static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
++{
++ printk(KERN_EMERG "Uhhuh. NMI received. Dazed and confused, but trying "
++ "to continue\n");
++ printk(KERN_EMERG "You probably have a hardware problem with your RAM "
++ "chips\n");
++
++ /* Clear and disable the memory parity error line. */
++ clear_mem_error(reason);
++}
++
++static void io_check_error(unsigned char reason, struct pt_regs * regs)
++{
++ printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
++ show_registers(regs);
++
++ /* Re-enable the IOCK line, wait for a few seconds */
++ clear_io_check_error(reason);
++}
++
++static void unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
++{
++#ifdef CONFIG_MCA
++ /* Might actually be able to figure out what the guilty party
++ * is. */
++ if( MCA_bus ) {
++ mca_handle_nmi();
++ return;
++ }
++#endif
++ printk("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n",
++ reason, smp_processor_id());
++ printk("Dazed and confused, but trying to continue\n");
++ printk("Do you have a strange power saving mode enabled?\n");
++}
++
++static DEFINE_SPINLOCK(nmi_print_lock);
++
++void die_nmi (struct pt_regs *regs, const char *msg)
++{
++ if (notify_die(DIE_NMIWATCHDOG, msg, regs, 0, 2, SIGINT) ==
++ NOTIFY_STOP)
++ return;
++
++ spin_lock(&nmi_print_lock);
++ /*
++ * We are in trouble anyway, lets at least try
++ * to get a message out.
++ */
++ bust_spinlocks(1);
++ printk(KERN_EMERG "%s", msg);
++ printk(" on CPU%d, eip %08lx, registers:\n",
++ smp_processor_id(), regs->eip);
++ show_registers(regs);
++ printk(KERN_EMERG "console shuts up ...\n");
++ console_silent();
++ spin_unlock(&nmi_print_lock);
++ bust_spinlocks(0);
++
++ /* If we are in kernel we are probably nested up pretty bad
++ * and might aswell get out now while we still can.
++ */
++ if (!user_mode_vm(regs)) {
++ current->thread.trap_no = 2;
++ crash_kexec(regs);
++ }
++
++ do_exit(SIGSEGV);
++}
++
++static void default_do_nmi(struct pt_regs * regs)
++{
++ unsigned char reason = 0;
++
++ /* Only the BSP gets external NMIs from the system. */
++ if (!smp_processor_id())
++ reason = get_nmi_reason();
++
++ if (!(reason & 0xc0)) {
++ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
++ == NOTIFY_STOP)
++ return;
++#ifdef CONFIG_X86_LOCAL_APIC
++ /*
++ * Ok, so this is none of the documented NMI sources,
++ * so it must be the NMI watchdog.
++ */
++ if (nmi_watchdog) {
++ nmi_watchdog_tick(regs);
++ return;
++ }
++#endif
++ unknown_nmi_error(reason, regs);
++ return;
++ }
++ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
++ return;
++ if (reason & 0x80)
++ mem_parity_error(reason, regs);
++ if (reason & 0x40)
++ io_check_error(reason, regs);
++ /*
++ * Reassert NMI in case it became active meanwhile
++ * as it's edge-triggered.
++ */
++ reassert_nmi();
++}
++
++static int dummy_nmi_callback(struct pt_regs * regs, int cpu)
++{
++ return 0;
++}
++
++static nmi_callback_t nmi_callback = dummy_nmi_callback;
++
++fastcall void do_nmi(struct pt_regs * regs, long error_code)
++{
++ int cpu;
++
++ nmi_enter();
++
++ cpu = smp_processor_id();
++
++ ++nmi_count(cpu);
++
++ if (!rcu_dereference(nmi_callback)(regs, cpu))
++ default_do_nmi(regs);
++
++ nmi_exit();
++}
++
++void set_nmi_callback(nmi_callback_t callback)
++{
++ vmalloc_sync_all();
++ rcu_assign_pointer(nmi_callback, callback);
++}
++EXPORT_SYMBOL_GPL(set_nmi_callback);
++
++void unset_nmi_callback(void)
++{
++ nmi_callback = dummy_nmi_callback;
++}
++EXPORT_SYMBOL_GPL(unset_nmi_callback);
++
++#ifdef CONFIG_KPROBES
++fastcall void __kprobes do_int3(struct pt_regs *regs, long error_code)
++{
++ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP)
++ == NOTIFY_STOP)
++ return;
++ /* This is an interrupt gate, because kprobes wants interrupts
++ disabled. Normal trap handlers don't. */
++ restore_interrupts(regs);
++ do_trap(3, SIGTRAP, "int3", 1, regs, error_code, NULL);
++}
++#endif
++
++/*
++ * Our handling of the processor debug registers is non-trivial.
++ * We do not clear them on entry and exit from the kernel. Therefore
++ * it is possible to get a watchpoint trap here from inside the kernel.
++ * However, the code in ./ptrace.c has ensured that the user can
++ * only set watchpoints on userspace addresses. Therefore the in-kernel
++ * watchpoint trap can only occur in code which is reading/writing
++ * from user space. Such code must not hold kernel locks (since it
++ * can equally take a page fault), therefore it is safe to call
++ * force_sig_info even though that claims and releases locks.
++ *
++ * Code in ./signal.c ensures that the debug control register
++ * is restored before we deliver any signal, and therefore that
++ * user code runs with the correct debug control register even though
++ * we clear it here.
++ *
++ * Being careful here means that we don't have to be as careful in a
++ * lot of more complicated places (task switching can be a bit lazy
++ * about restoring all the debug state, and ptrace doesn't have to
++ * find every occurrence of the TF bit that could be saved away even
++ * by user code)
++ */
++fastcall void __kprobes do_debug(struct pt_regs * regs, long error_code)
++{
++ unsigned int condition;
++ struct task_struct *tsk = current;
++
++ get_debugreg(condition, 6);
++
++ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
++ SIGTRAP) == NOTIFY_STOP)
++ return;
++ /* It's safe to allow irq's after DR6 has been saved */
++ if (regs->eflags & X86_EFLAGS_IF)
++ local_irq_enable();
++
++ /* Mask out spurious debug traps due to lazy DR7 setting */
++ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
++ if (!tsk->thread.debugreg[7])
++ goto clear_dr7;
++ }
++
++ if (regs->eflags & VM_MASK)
++ goto debug_vm86;
++
++ /* Save debug status register where ptrace can see it */
++ tsk->thread.debugreg[6] = condition;
++
++ /*
++ * Single-stepping through TF: make sure we ignore any events in
++ * kernel space (but re-enable TF when returning to user mode).
++ */
++ if (condition & DR_STEP) {
++ /*
++ * We already checked v86 mode above, so we can
++ * check for kernel mode by just checking the CPL
++ * of CS.
++ */
++ if (!user_mode(regs))
++ goto clear_TF_reenable;
++ }
++
++ /* Ok, finally something we can handle */
++ send_sigtrap(tsk, regs, error_code);
++
++ /* Disable additional traps. They'll be re-enabled when
++ * the signal is delivered.
++ */
++clear_dr7:
++ set_debugreg(0, 7);
++ return;
++
++debug_vm86:
++ handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code, 1);
++ return;
++
++clear_TF_reenable:
++ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++ regs->eflags &= ~TF_MASK;
++ return;
++}
++
++/*
++ * Note that we play around with the 'TS' bit in an attempt to get
++ * the correct behaviour even in the presence of the asynchronous
++ * IRQ13 behaviour
++ */
++void math_error(void __user *eip)
++{
++ struct task_struct * task;
++ siginfo_t info;
++ unsigned short cwd, swd;
++
++ /*
++ * Save the info for the exception handler and clear the error.
++ */
++ task = current;
++ save_init_fpu(task);
++ task->thread.trap_no = 16;
++ task->thread.error_code = 0;
++ info.si_signo = SIGFPE;
++ info.si_errno = 0;
++ info.si_code = __SI_FAULT;
++ info.si_addr = eip;
++ /*
++ * (~cwd & swd) will mask out exceptions that are not set to unmasked
++ * status. 0x3f is the exception bits in these regs, 0x200 is the
++ * C1 reg you need in case of a stack fault, 0x040 is the stack
++ * fault bit. We should only be taking one exception at a time,
++ * so if this combination doesn't produce any single exception,
++ * then we have a bad program that isn't syncronizing its FPU usage
++ * and it will suffer the consequences since we won't be able to
++ * fully reproduce the context of the exception
++ */
++ cwd = get_fpu_cwd(task);
++ swd = get_fpu_swd(task);
++ switch (swd & ~cwd & 0x3f) {
++ case 0x000: /* No unmasked exception */
++ return;
++ default: /* Multiple exceptions */
++ break;
++ case 0x001: /* Invalid Op */
++ /*
++ * swd & 0x240 == 0x040: Stack Underflow
++ * swd & 0x240 == 0x240: Stack Overflow
++ * User must clear the SF bit (0x40) if set
++ */
++ info.si_code = FPE_FLTINV;
++ break;
++ case 0x002: /* Denormalize */
++ case 0x010: /* Underflow */
++ info.si_code = FPE_FLTUND;
++ break;
++ case 0x004: /* Zero Divide */
++ info.si_code = FPE_FLTDIV;
++ break;
++ case 0x008: /* Overflow */
++ info.si_code = FPE_FLTOVF;
++ break;
++ case 0x020: /* Precision */
++ info.si_code = FPE_FLTRES;
++ break;
++ }
++ force_sig_info(SIGFPE, &info, task);
++}
++
++fastcall void do_coprocessor_error(struct pt_regs * regs, long error_code)
++{
++ ignore_fpu_irq = 1;
++ math_error((void __user *)regs->eip);
++}
++
++static void simd_math_error(void __user *eip)
++{
++ struct task_struct * task;
++ siginfo_t info;
++ unsigned short mxcsr;
++
++ /*
++ * Save the info for the exception handler and clear the error.
++ */
++ task = current;
++ save_init_fpu(task);
++ task->thread.trap_no = 19;
++ task->thread.error_code = 0;
++ info.si_signo = SIGFPE;
++ info.si_errno = 0;
++ info.si_code = __SI_FAULT;
++ info.si_addr = eip;
++ /*
++ * The SIMD FPU exceptions are handled a little differently, as there
++ * is only a single status/control register. Thus, to determine which
++ * unmasked exception was caught we must mask the exception mask bits
++ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
++ */
++ mxcsr = get_fpu_mxcsr(task);
++ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
++ case 0x000:
++ default:
++ break;
++ case 0x001: /* Invalid Op */
++ info.si_code = FPE_FLTINV;
++ break;
++ case 0x002: /* Denormalize */
++ case 0x010: /* Underflow */
++ info.si_code = FPE_FLTUND;
++ break;
++ case 0x004: /* Zero Divide */
++ info.si_code = FPE_FLTDIV;
++ break;
++ case 0x008: /* Overflow */
++ info.si_code = FPE_FLTOVF;
++ break;
++ case 0x020: /* Precision */
++ info.si_code = FPE_FLTRES;
++ break;
++ }
++ force_sig_info(SIGFPE, &info, task);
++}
++
++fastcall void do_simd_coprocessor_error(struct pt_regs * regs,
++ long error_code)
++{
++ if (cpu_has_xmm) {
++ /* Handle SIMD FPU exceptions on PIII+ processors. */
++ ignore_fpu_irq = 1;
++ simd_math_error((void __user *)regs->eip);
++ } else {
++ /*
++ * Handle strange cache flush from user space exception
++ * in all other cases. This is undocumented behaviour.
++ */
++ if (regs->eflags & VM_MASK) {
++ handle_vm86_fault((struct kernel_vm86_regs *)regs,
++ error_code);
++ return;
++ }
++ current->thread.trap_no = 19;
++ current->thread.error_code = error_code;
++ die_if_kernel("cache flush denied", regs, error_code);
++ force_sig(SIGSEGV, current);
++ }
++}
++
++#ifndef CONFIG_XEN
++fastcall void do_spurious_interrupt_bug(struct pt_regs * regs,
++ long error_code)
++{
++#if 0
++ /* No need to warn about this any longer. */
++ printk("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
++#endif
++}
++
++fastcall void setup_x86_bogus_stack(unsigned char * stk)
++{
++ unsigned long *switch16_ptr, *switch32_ptr;
++ struct pt_regs *regs;
++ unsigned long stack_top, stack_bot;
++ unsigned short iret_frame16_off;
++ int cpu = smp_processor_id();
++ /* reserve the space on 32bit stack for the magic switch16 pointer */
++ memmove(stk, stk + 8, sizeof(struct pt_regs));
++ switch16_ptr = (unsigned long *)(stk + sizeof(struct pt_regs));
++ regs = (struct pt_regs *)stk;
++ /* now the switch32 on 16bit stack */
++ stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
++ stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
++ switch32_ptr = (unsigned long *)(stack_top - 8);
++ iret_frame16_off = CPU_16BIT_STACK_SIZE - 8 - 20;
++ /* copy iret frame on 16bit stack */
++ memcpy((void *)(stack_bot + iret_frame16_off), ®s->eip, 20);
++ /* fill in the switch pointers */
++ switch16_ptr[0] = (regs->esp & 0xffff0000) | iret_frame16_off;
++ switch16_ptr[1] = __ESPFIX_SS;
++ switch32_ptr[0] = (unsigned long)stk + sizeof(struct pt_regs) +
++ 8 - CPU_16BIT_STACK_SIZE;
++ switch32_ptr[1] = __KERNEL_DS;
++}
++
++fastcall unsigned char * fixup_x86_bogus_stack(unsigned short sp)
++{
++ unsigned long *switch32_ptr;
++ unsigned char *stack16, *stack32;
++ unsigned long stack_top, stack_bot;
++ int len;
++ int cpu = smp_processor_id();
++ stack_bot = (unsigned long)&per_cpu(cpu_16bit_stack, cpu);
++ stack_top = stack_bot + CPU_16BIT_STACK_SIZE;
++ switch32_ptr = (unsigned long *)(stack_top - 8);
++ /* copy the data from 16bit stack to 32bit stack */
++ len = CPU_16BIT_STACK_SIZE - 8 - sp;
++ stack16 = (unsigned char *)(stack_bot + sp);
++ stack32 = (unsigned char *)
++ (switch32_ptr[0] + CPU_16BIT_STACK_SIZE - 8 - len);
++ memcpy(stack32, stack16, len);
++ return stack32;
++}
++#endif
++
++/*
++ * 'math_state_restore()' saves the current math information in the
++ * old math state array, and gets the new ones from the current task
++ *
++ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
++ * Don't touch unless you *really* know how it works.
++ *
++ * Must be called with kernel preemption disabled (in this case,
++ * local interrupts are disabled at the call-site in entry.S).
++ */
++asmlinkage void math_state_restore(struct pt_regs regs)
++{
++ struct thread_info *thread = current_thread_info();
++ struct task_struct *tsk = thread->task;
++
++ /* NB. 'clts' is done for us by Xen during virtual trap. */
++ if (!tsk_used_math(tsk))
++ init_fpu(tsk);
++ restore_fpu(tsk);
++ thread->status |= TS_USEDFPU; /* So we fnsave on switch_to() */
++}
++
++#ifndef CONFIG_MATH_EMULATION
++
++asmlinkage void math_emulate(long arg)
++{
++ printk(KERN_EMERG "math-emulation not enabled and no coprocessor found.\n");
++ printk(KERN_EMERG "killing %s.\n",current->comm);
++ force_sig(SIGFPE,current);
++ schedule();
++}
++
++#endif /* CONFIG_MATH_EMULATION */
++
++#ifdef CONFIG_X86_F00F_BUG
++void __init trap_init_f00f_bug(void)
++{
++ __set_fixmap(FIX_F00F_IDT, __pa(&idt_table), PAGE_KERNEL_RO);
++
++ /*
++ * Update the IDT descriptor and reload the IDT so that
++ * it uses the read-only mapped virtual address.
++ */
++ idt_descr.address = fix_to_virt(FIX_F00F_IDT);
++ load_idt(&idt_descr);
++}
++#endif
++
++
++/*
++ * NB. All these are "trap gates" (i.e. events_mask isn't set) except
++ * for those that specify <dpl>|4 in the second field.
++ */
++static trap_info_t __cpuinitdata trap_table[] = {
++ { 0, 0, __KERNEL_CS, (unsigned long)divide_error },
++ { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
++ { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
++ { 4, 3, __KERNEL_CS, (unsigned long)overflow },
++ { 5, 0, __KERNEL_CS, (unsigned long)bounds },
++ { 6, 0, __KERNEL_CS, (unsigned long)invalid_op },
++ { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
++ { 9, 0, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun },
++ { 10, 0, __KERNEL_CS, (unsigned long)invalid_TSS },
++ { 11, 0, __KERNEL_CS, (unsigned long)segment_not_present },
++ { 12, 0, __KERNEL_CS, (unsigned long)stack_segment },
++ { 13, 0, __KERNEL_CS, (unsigned long)general_protection },
++ { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
++ { 15, 0, __KERNEL_CS, (unsigned long)fixup_4gb_segment },
++ { 16, 0, __KERNEL_CS, (unsigned long)coprocessor_error },
++ { 17, 0, __KERNEL_CS, (unsigned long)alignment_check },
++#ifdef CONFIG_X86_MCE
++ { 18, 0, __KERNEL_CS, (unsigned long)machine_check },
++#endif
++ { 19, 0, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
++ { SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)system_call },
++ { 0, 0, 0, 0 }
++};
++
++void __init trap_init(void)
++{
++ int ret;
++
++ ret = HYPERVISOR_set_trap_table(trap_table);
++ if (ret)
++ printk("HYPERVISOR_set_trap_table failed: error %d\n", ret);
++
++ if (cpu_has_fxsr) {
++ /*
++ * Verify that the FXSAVE/FXRSTOR data will be 16-byte aligned.
++ * Generates a compile-time "error: zero width for bit-field" if
++ * the alignment is wrong.
++ */
++ struct fxsrAlignAssert {
++ int _:!(offsetof(struct task_struct,
++ thread.i387.fxsave) & 15);
++ };
++
++ printk(KERN_INFO "Enabling fast FPU save and restore... ");
++ set_in_cr4(X86_CR4_OSFXSR);
++ printk("done.\n");
++ }
++ if (cpu_has_xmm) {
++ printk(KERN_INFO "Enabling unmasked SIMD FPU exception "
++ "support... ");
++ set_in_cr4(X86_CR4_OSXMMEXCPT);
++ printk("done.\n");
++ }
++
++ /*
++ * Should be a barrier for any external CPU state.
++ */
++ cpu_init();
++}
++
++void __cpuinit smp_trap_init(trap_info_t *trap_ctxt)
++{
++ const trap_info_t *t = trap_table;
++
++ for (t = trap_table; t->address; t++) {
++ trap_ctxt[t->vector].flags = t->flags;
++ trap_ctxt[t->vector].cs = t->cs;
++ trap_ctxt[t->vector].address = t->address;
++ }
++}
++
++static int __init kstack_setup(char *s)
++{
++ kstack_depth_to_print = simple_strtoul(s, NULL, 0);
++ return 1;
++}
++__setup("kstack=", kstack_setup);
++
++#ifdef CONFIG_STACK_UNWIND
++static int __init call_trace_setup(char *s)
++{
++ if (strcmp(s, "old") == 0)
++ call_trace = -1;
++ else if (strcmp(s, "both") == 0)
++ call_trace = 0;
++ else if (strcmp(s, "newfallback") == 0)
++ call_trace = 1;
++ else if (strcmp(s, "new") == 2)
++ call_trace = 2;
++ return 1;
++}
++__setup("call_trace=", call_trace_setup);
++#endif
+Index: head-2008-11-25/arch/x86/mach-xen/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/mach-xen/Makefile 2007-06-12 13:12:48.000000000 +0200
+@@ -0,0 +1,5 @@
++#
++# Makefile for the linux kernel.
++#
++
++obj-y := setup.o
+Index: head-2008-11-25/arch/x86/mach-xen/setup.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/mach-xen/setup.c 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,158 @@
++/*
++ * Machine specific setup for generic
++ */
++
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <asm/acpi.h>
++#include <asm/arch_hooks.h>
++#include <asm/e820.h>
++#include <asm/setup.h>
++#include <asm/fixmap.h>
++
++#include <xen/interface/callback.h>
++#include <xen/interface/memory.h>
++
++#ifdef CONFIG_HOTPLUG_CPU
++#define DEFAULT_SEND_IPI (1)
++#else
++#define DEFAULT_SEND_IPI (0)
++#endif
++
++int no_broadcast=DEFAULT_SEND_IPI;
++
++static __init int no_ipi_broadcast(char *str)
++{
++ get_option(&str, &no_broadcast);
++ printk ("Using %s mode\n", no_broadcast ? "No IPI Broadcast" :
++ "IPI Broadcast");
++ return 1;
++}
++
++__setup("no_ipi_broadcast", no_ipi_broadcast);
++
++static int __init print_ipi_mode(void)
++{
++ printk ("Using IPI %s mode\n", no_broadcast ? "No-Shortcut" :
++ "Shortcut");
++ return 0;
++}
++
++late_initcall(print_ipi_mode);
++
++/**
++ * machine_specific_memory_setup - Hook for machine specific memory setup.
++ *
++ * Description:
++ * This is included late in kernel/setup.c so that it can make
++ * use of all of the static functions.
++ **/
++
++char * __init machine_specific_memory_setup(void)
++{
++ int rc;
++ struct xen_memory_map memmap;
++ /*
++ * This is rather large for a stack variable but this early in
++ * the boot process we know we have plenty slack space.
++ */
++ struct e820entry map[E820MAX];
++
++ memmap.nr_entries = E820MAX;
++ set_xen_guest_handle(memmap.buffer, map);
++
++ rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
++ if ( rc == -ENOSYS ) {
++ memmap.nr_entries = 1;
++ map[0].addr = 0ULL;
++ map[0].size = PFN_PHYS((unsigned long long)xen_start_info->nr_pages);
++ /* 8MB slack (to balance backend allocations). */
++ map[0].size += 8ULL << 20;
++ map[0].type = E820_RAM;
++ rc = 0;
++ }
++ BUG_ON(rc);
++
++ sanitize_e820_map(map, (char *)&memmap.nr_entries);
++
++ BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
++
++ return "Xen";
++}
++
++
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void nmi(void);
++
++unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START;
++EXPORT_SYMBOL(machine_to_phys_mapping);
++unsigned int machine_to_phys_order;
++EXPORT_SYMBOL(machine_to_phys_order);
++
++void __init pre_setup_arch_hook(void)
++{
++ struct xen_machphys_mapping mapping;
++ unsigned long machine_to_phys_nr_ents;
++ struct xen_platform_parameters pp;
++
++ init_mm.pgd = swapper_pg_dir = (pgd_t *)xen_start_info->pt_base;
++
++ setup_xen_features();
++
++ if (HYPERVISOR_xen_version(XENVER_platform_parameters, &pp) == 0)
++ set_fixaddr_top(pp.virt_start);
++
++ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
++ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
++ machine_to_phys_nr_ents = mapping.max_mfn + 1;
++ } else
++ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
++ machine_to_phys_order = fls(machine_to_phys_nr_ents - 1);
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap))
++ phys_to_machine_mapping =
++ (unsigned long *)xen_start_info->mfn_list;
++}
++
++void __init machine_specific_arch_setup(void)
++{
++ int ret;
++ static struct callback_register __initdata event = {
++ .type = CALLBACKTYPE_event,
++ .address = { __KERNEL_CS, (unsigned long)hypervisor_callback },
++ };
++ static struct callback_register __initdata failsafe = {
++ .type = CALLBACKTYPE_failsafe,
++ .address = { __KERNEL_CS, (unsigned long)failsafe_callback },
++ };
++ static struct callback_register __initdata nmi_cb = {
++ .type = CALLBACKTYPE_nmi,
++ .address = { __KERNEL_CS, (unsigned long)nmi },
++ };
++
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
++ if (ret == 0)
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret == -ENOSYS)
++ ret = HYPERVISOR_set_callbacks(
++ event.address.cs, event.address.eip,
++ failsafe.address.cs, failsafe.address.eip);
++#endif
++ BUG_ON(ret);
++
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret == -ENOSYS) {
++ static struct xennmi_callback __initdata cb = {
++ .handler_address = (unsigned long)nmi
++ };
++
++ HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
++ }
++#endif
++}
+Index: head-2008-11-25/arch/x86/lib/scrub.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/lib/scrub.c 2008-02-08 12:30:51.000000000 +0100
+@@ -0,0 +1,21 @@
++#include <asm/cpufeature.h>
++#include <asm/page.h>
++#include <asm/processor.h>
++
++void scrub_pages(void *v, unsigned int count)
++{
++ if (likely(cpu_has_xmm2)) {
++ unsigned long n = count * (PAGE_SIZE / sizeof(long) / 4);
++
++ for (; n--; v += sizeof(long) * 4)
++ asm("movnti %1,(%0)\n\t"
++ "movnti %1,%c2(%0)\n\t"
++ "movnti %1,2*%c2(%0)\n\t"
++ "movnti %1,3*%c2(%0)\n\t"
++ : : "r" (v), "r" (0L), "i" (sizeof(long))
++ : "memory");
++ asm volatile("sfence" : : : "memory");
++ } else
++ for (; count--; v += PAGE_SIZE)
++ clear_page(v);
++}
+Index: head-2008-11-25/arch/x86/mm/fault_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/mm/fault_32-xen.c 2007-12-10 08:47:31.000000000 +0100
+@@ -0,0 +1,779 @@
++/*
++ * linux/arch/i386/mm/fault.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/tty.h>
++#include <linux/vt_kern.h> /* For unblank_screen() */
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/kprobes.h>
++
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/desc.h>
++#include <asm/kdebug.h>
++
++extern void die(const char *,struct pt_regs *,long);
++
++#ifdef CONFIG_KPROBES
++ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
++int register_page_fault_notifier(struct notifier_block *nb)
++{
++ vmalloc_sync_all();
++ return atomic_notifier_chain_register(¬ify_page_fault_chain, nb);
++}
++
++int unregister_page_fault_notifier(struct notifier_block *nb)
++{
++ return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb);
++}
++
++static inline int notify_page_fault(enum die_val val, const char *str,
++ struct pt_regs *regs, long err, int trap, int sig)
++{
++ struct die_args args = {
++ .regs = regs,
++ .str = str,
++ .err = err,
++ .trapnr = trap,
++ .signr = sig
++ };
++ return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args);
++}
++#else
++static inline int notify_page_fault(enum die_val val, const char *str,
++ struct pt_regs *regs, long err, int trap, int sig)
++{
++ return NOTIFY_DONE;
++}
++#endif
++
++
++/*
++ * Unlock any spinlocks which will prevent us from getting the
++ * message out
++ */
++void bust_spinlocks(int yes)
++{
++ int loglevel_save = console_loglevel;
++
++ if (yes) {
++ oops_in_progress = 1;
++ return;
++ }
++#ifdef CONFIG_VT
++ unblank_screen();
++#endif
++ oops_in_progress = 0;
++ /*
++ * OK, the message is on the console. Now we call printk()
++ * without oops_in_progress set so that printk will give klogd
++ * a poke. Hold onto your hats...
++ */
++ console_loglevel = 15; /* NMI oopser may have shut the console up */
++ printk(" ");
++ console_loglevel = loglevel_save;
++}
++
++/*
++ * Return EIP plus the CS segment base. The segment limit is also
++ * adjusted, clamped to the kernel/user address space (whichever is
++ * appropriate), and returned in *eip_limit.
++ *
++ * The segment is checked, because it might have been changed by another
++ * task between the original faulting instruction and here.
++ *
++ * If CS is no longer a valid code segment, or if EIP is beyond the
++ * limit, or if it is a kernel address when CS is not a kernel segment,
++ * then the returned value will be greater than *eip_limit.
++ *
++ * This is slow, but is very rarely executed.
++ */
++static inline unsigned long get_segment_eip(struct pt_regs *regs,
++ unsigned long *eip_limit)
++{
++ unsigned long eip = regs->eip;
++ unsigned seg = regs->xcs & 0xffff;
++ u32 seg_ar, seg_limit, base, *desc;
++
++ /* Unlikely, but must come before segment checks. */
++ if (unlikely(regs->eflags & VM_MASK)) {
++ base = seg << 4;
++ *eip_limit = base + 0xffff;
++ return base + (eip & 0xffff);
++ }
++
++ /* The standard kernel/user address space limit. */
++ *eip_limit = (seg & 2) ? USER_DS.seg : KERNEL_DS.seg;
++
++ /* By far the most common cases. */
++ if (likely(seg == __USER_CS || seg == GET_KERNEL_CS()))
++ return eip;
++
++ /* Check the segment exists, is within the current LDT/GDT size,
++ that kernel/user (ring 0..3) has the appropriate privilege,
++ that it's a code segment, and get the limit. */
++ __asm__ ("larl %3,%0; lsll %3,%1"
++ : "=&r" (seg_ar), "=r" (seg_limit) : "0" (0), "rm" (seg));
++ if ((~seg_ar & 0x9800) || eip > seg_limit) {
++ *eip_limit = 0;
++ return 1; /* So that returned eip > *eip_limit. */
++ }
++
++ /* Get the GDT/LDT descriptor base.
++ When you look for races in this code remember that
++ LDT and other horrors are only used in user space. */
++ if (seg & (1<<2)) {
++ /* Must lock the LDT while reading it. */
++ down(¤t->mm->context.sem);
++ desc = current->mm->context.ldt;
++ desc = (void *)desc + (seg & ~7);
++ } else {
++ /* Must disable preemption while reading the GDT. */
++ desc = (u32 *)get_cpu_gdt_table(get_cpu());
++ desc = (void *)desc + (seg & ~7);
++ }
++
++ /* Decode the code segment base from the descriptor */
++ base = get_desc_base((unsigned long *)desc);
++
++ if (seg & (1<<2)) {
++ up(¤t->mm->context.sem);
++ } else
++ put_cpu();
++
++ /* Adjust EIP and segment limit, and clamp at the kernel limit.
++ It's legitimate for segments to wrap at 0xffffffff. */
++ seg_limit += base;
++ if (seg_limit < *eip_limit && seg_limit >= base)
++ *eip_limit = seg_limit;
++ return eip + base;
++}
++
++/*
++ * Sometimes AMD Athlon/Opteron CPUs report invalid exceptions on prefetch.
++ * Check that here and ignore it.
++ */
++static int __is_prefetch(struct pt_regs *regs, unsigned long addr)
++{
++ unsigned long limit;
++ unsigned long instr = get_segment_eip (regs, &limit);
++ int scan_more = 1;
++ int prefetch = 0;
++ int i;
++
++ for (i = 0; scan_more && i < 15; i++) {
++ unsigned char opcode;
++ unsigned char instr_hi;
++ unsigned char instr_lo;
++
++ if (instr > limit)
++ break;
++ if (__get_user(opcode, (unsigned char __user *) instr))
++ break;
++
++ instr_hi = opcode & 0xf0;
++ instr_lo = opcode & 0x0f;
++ instr++;
++
++ switch (instr_hi) {
++ case 0x20:
++ case 0x30:
++ /* Values 0x26,0x2E,0x36,0x3E are valid x86 prefixes. */
++ scan_more = ((instr_lo & 7) == 0x6);
++ break;
++
++ case 0x60:
++ /* 0x64 thru 0x67 are valid prefixes in all modes. */
++ scan_more = (instr_lo & 0xC) == 0x4;
++ break;
++ case 0xF0:
++ /* 0xF0, 0xF2, and 0xF3 are valid prefixes */
++ scan_more = !instr_lo || (instr_lo>>1) == 1;
++ break;
++ case 0x00:
++ /* Prefetch instruction is 0x0F0D or 0x0F18 */
++ scan_more = 0;
++ if (instr > limit)
++ break;
++ if (__get_user(opcode, (unsigned char __user *) instr))
++ break;
++ prefetch = (instr_lo == 0xF) &&
++ (opcode == 0x0D || opcode == 0x18);
++ break;
++ default:
++ scan_more = 0;
++ break;
++ }
++ }
++ return prefetch;
++}
++
++static inline int is_prefetch(struct pt_regs *regs, unsigned long addr,
++ unsigned long error_code)
++{
++ if (unlikely(boot_cpu_data.x86_vendor == X86_VENDOR_AMD &&
++ boot_cpu_data.x86 >= 6)) {
++ /* Catch an obscure case of prefetch inside an NX page. */
++ if (nx_enabled && (error_code & 16))
++ return 0;
++ return __is_prefetch(regs, addr);
++ }
++ return 0;
++}
++
++static noinline void force_sig_info_fault(int si_signo, int si_code,
++ unsigned long address, struct task_struct *tsk)
++{
++ siginfo_t info;
++
++ info.si_signo = si_signo;
++ info.si_errno = 0;
++ info.si_code = si_code;
++ info.si_addr = (void __user *)address;
++ force_sig_info(si_signo, &info, tsk);
++}
++
++fastcall void do_invalid_op(struct pt_regs *, unsigned long);
++
++#ifdef CONFIG_X86_PAE
++static void dump_fault_path(unsigned long address)
++{
++ unsigned long *p, page;
++ unsigned long mfn;
++
++ page = read_cr3();
++ p = (unsigned long *)__va(page);
++ p += (address >> 30) * 2;
++ printk(KERN_ALERT "%08lx -> *pde = %08lx:%08lx\n", page, p[1], p[0]);
++ if (p[0] & _PAGE_PRESENT) {
++ mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
++ page = mfn_to_pfn(mfn) << PAGE_SHIFT;
++ p = (unsigned long *)__va(page);
++ address &= 0x3fffffff;
++ p += (address >> 21) * 2;
++ printk(KERN_ALERT "%08lx -> *pme = %08lx:%08lx\n",
++ page, p[1], p[0]);
++ mfn = (p[0] >> PAGE_SHIFT) | (p[1] << 20);
++#ifdef CONFIG_HIGHPTE
++ if (mfn_to_pfn(mfn) >= highstart_pfn)
++ return;
++#endif
++ if (p[0] & _PAGE_PRESENT) {
++ page = mfn_to_pfn(mfn) << PAGE_SHIFT;
++ p = (unsigned long *) __va(page);
++ address &= 0x001fffff;
++ p += (address >> 12) * 2;
++ printk(KERN_ALERT "%08lx -> *pte = %08lx:%08lx\n",
++ page, p[1], p[0]);
++ }
++ }
++}
++#else
++static void dump_fault_path(unsigned long address)
++{
++ unsigned long page;
++
++ page = read_cr3();
++ page = ((unsigned long *) __va(page))[address >> 22];
++ if (oops_may_print())
++ printk(KERN_ALERT "*pde = ma %08lx pa %08lx\n", page,
++ machine_to_phys(page));
++ /*
++ * We must not directly access the pte in the highpte
++ * case if the page table is located in highmem.
++ * And lets rather not kmap-atomic the pte, just in case
++ * it's allocated already.
++ */
++#ifdef CONFIG_HIGHPTE
++ if ((page >> PAGE_SHIFT) >= highstart_pfn)
++ return;
++#endif
++ if ((page & 1) && oops_may_print()) {
++ page &= PAGE_MASK;
++ address &= 0x003ff000;
++ page = machine_to_phys(page);
++ page = ((unsigned long *) __va(page))[address >> PAGE_SHIFT];
++ printk(KERN_ALERT "*pte = ma %08lx pa %08lx\n", page,
++ machine_to_phys(page));
++ }
++}
++#endif
++
++static int spurious_fault(struct pt_regs *regs,
++ unsigned long address,
++ unsigned long error_code)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ /* Reserved-bit violation or user access to kernel space? */
++ if (error_code & 0x0c)
++ return 0;
++
++ pgd = init_mm.pgd + pgd_index(address);
++ if (!pgd_present(*pgd))
++ return 0;
++
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return 0;
++
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return 0;
++
++ pte = pte_offset_kernel(pmd, address);
++ if (!pte_present(*pte))
++ return 0;
++ if ((error_code & 0x02) && !pte_write(*pte))
++ return 0;
++#ifdef CONFIG_X86_PAE
++ if ((error_code & 0x10) && (__pte_val(*pte) & _PAGE_NX))
++ return 0;
++#endif
++
++ return 1;
++}
++
++static inline pmd_t *vmalloc_sync_one(pgd_t *pgd, unsigned long address)
++{
++ unsigned index = pgd_index(address);
++ pgd_t *pgd_k;
++ pud_t *pud, *pud_k;
++ pmd_t *pmd, *pmd_k;
++
++ pgd += index;
++ pgd_k = init_mm.pgd + index;
++
++ if (!pgd_present(*pgd_k))
++ return NULL;
++
++ /*
++ * set_pgd(pgd, *pgd_k); here would be useless on PAE
++ * and redundant with the set_pmd() on non-PAE. As would
++ * set_pud.
++ */
++
++ pud = pud_offset(pgd, address);
++ pud_k = pud_offset(pgd_k, address);
++ if (!pud_present(*pud_k))
++ return NULL;
++
++ pmd = pmd_offset(pud, address);
++ pmd_k = pmd_offset(pud_k, address);
++ if (!pmd_present(*pmd_k))
++ return NULL;
++ if (!pmd_present(*pmd))
++#if CONFIG_XEN_COMPAT > 0x030002
++ set_pmd(pmd, *pmd_k);
++#else
++ /*
++ * When running on older Xen we must launder *pmd_k through
++ * pmd_val() to ensure that _PAGE_PRESENT is correctly set.
++ */
++ set_pmd(pmd, __pmd(pmd_val(*pmd_k)));
++#endif
++ else
++ BUG_ON(pmd_page(*pmd) != pmd_page(*pmd_k));
++ return pmd_k;
++}
++
++/*
++ * Handle a fault on the vmalloc or module mapping area
++ *
++ * This assumes no large pages in there.
++ */
++static inline int vmalloc_fault(unsigned long address)
++{
++ unsigned long pgd_paddr;
++ pmd_t *pmd_k;
++ pte_t *pte_k;
++ /*
++ * Synchronize this task's top level page-table
++ * with the 'reference' page table.
++ *
++ * Do _not_ use "current" here. We might be inside
++ * an interrupt in the middle of a task switch..
++ */
++ pgd_paddr = read_cr3();
++ pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
++ if (!pmd_k)
++ return -1;
++ pte_k = pte_offset_kernel(pmd_k, address);
++ if (!pte_present(*pte_k))
++ return -1;
++ return 0;
++}
++
++/*
++ * This routine handles page faults. It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ *
++ * error_code:
++ * bit 0 == 0 means no page found, 1 means protection fault
++ * bit 1 == 0 means read, 1 means write
++ * bit 2 == 0 means kernel, 1 means user-mode
++ * bit 3 == 1 means use of reserved bit detected
++ * bit 4 == 1 means fault was an instruction fetch
++ */
++fastcall void __kprobes do_page_fault(struct pt_regs *regs,
++ unsigned long error_code)
++{
++ struct task_struct *tsk;
++ struct mm_struct *mm;
++ struct vm_area_struct * vma;
++ unsigned long address;
++ int write, si_code;
++
++ /* get the address */
++ address = read_cr2();
++
++ /* Set the "privileged fault" bit to something sane. */
++ error_code &= ~4;
++ error_code |= (regs->xcs & 2) << 1;
++ if (regs->eflags & X86_EFLAGS_VM)
++ error_code |= 4;
++
++ tsk = current;
++
++ si_code = SEGV_MAPERR;
++
++ /*
++ * We fault-in kernel-space virtual memory on-demand. The
++ * 'reference' page table is init_mm.pgd.
++ *
++ * NOTE! We MUST NOT take any locks for this case. We may
++ * be in an interrupt or a critical region, and should
++ * only copy the information from the master page table,
++ * nothing more.
++ *
++ * This verifies that the fault happens in kernel space
++ * (error_code & 4) == 0, and that the fault was not a
++ * protection error (error_code & 9) == 0.
++ */
++ if (unlikely(address >= TASK_SIZE)) {
++#ifdef CONFIG_XEN
++ /* Faults in hypervisor area can never be patched up. */
++ if (address >= hypervisor_virt_start)
++ goto bad_area_nosemaphore;
++#endif
++ if (!(error_code & 0x0000000d) && vmalloc_fault(address) >= 0)
++ return;
++ /* Can take a spurious fault if mapping changes R/O -> R/W. */
++ if (spurious_fault(regs, address, error_code))
++ return;
++ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++ SIGSEGV) == NOTIFY_STOP)
++ return;
++ /*
++ * Don't take the mm semaphore here. If we fixup a prefetch
++ * fault we could otherwise deadlock.
++ */
++ goto bad_area_nosemaphore;
++ }
++
++ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++ SIGSEGV) == NOTIFY_STOP)
++ return;
++
++ /* It's safe to allow irq's after cr2 has been saved and the vmalloc
++ fault has been handled. */
++ if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
++ local_irq_enable();
++
++ mm = tsk->mm;
++
++ /*
++ * If we're in an interrupt, have no user context or are running in an
++ * atomic region then we must not take the fault..
++ */
++ if (in_atomic() || !mm)
++ goto bad_area_nosemaphore;
++
++ /* When running in the kernel we expect faults to occur only to
++ * addresses in user space. All other faults represent errors in the
++ * kernel and should generate an OOPS. Unfortunatly, in the case of an
++ * erroneous fault occurring in a code path which already holds mmap_sem
++ * we will deadlock attempting to validate the fault against the
++ * address space. Luckily the kernel only validly references user
++ * space from well defined areas of code, which are listed in the
++ * exceptions table.
++ *
++ * As the vast majority of faults will be valid we will only perform
++ * the source reference check when there is a possibilty of a deadlock.
++ * Attempt to lock the address space, if we cannot we then validate the
++ * source. If this is invalid we can skip the address space check,
++ * thus avoiding the deadlock.
++ */
++ if (!down_read_trylock(&mm->mmap_sem)) {
++ if ((error_code & 4) == 0 &&
++ !search_exception_tables(regs->eip))
++ goto bad_area_nosemaphore;
++ down_read(&mm->mmap_sem);
++ }
++
++ vma = find_vma(mm, address);
++ if (!vma)
++ goto bad_area;
++ if (vma->vm_start <= address)
++ goto good_area;
++ if (!(vma->vm_flags & VM_GROWSDOWN))
++ goto bad_area;
++ if (error_code & 4) {
++ /*
++ * Accessing the stack below %esp is always a bug.
++ * The large cushion allows instructions like enter
++ * and pusha to work. ("enter $65535,$31" pushes
++ * 32 pointers and then decrements %esp by 65535.)
++ */
++ if (address + 65536 + 32 * sizeof(unsigned long) < regs->esp)
++ goto bad_area;
++ }
++ if (expand_stack(vma, address))
++ goto bad_area;
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++ si_code = SEGV_ACCERR;
++ write = 0;
++ switch (error_code & 3) {
++ default: /* 3: write, present */
++#ifdef TEST_VERIFY_AREA
++ if (regs->cs == GET_KERNEL_CS())
++ printk("WP fault at %08lx\n", regs->eip);
++#endif
++ /* fall through */
++ case 2: /* write, not present */
++ if (!(vma->vm_flags & VM_WRITE))
++ goto bad_area;
++ write++;
++ break;
++ case 1: /* read, present */
++ goto bad_area;
++ case 0: /* read, not present */
++ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
++ goto bad_area;
++ }
++
++ survive:
++ /*
++ * If for any reason at all we couldn't handle the fault,
++ * make sure we exit gracefully rather than endlessly redo
++ * the fault.
++ */
++ switch (handle_mm_fault(mm, vma, address, write)) {
++ case VM_FAULT_MINOR:
++ tsk->min_flt++;
++ break;
++ case VM_FAULT_MAJOR:
++ tsk->maj_flt++;
++ break;
++ case VM_FAULT_SIGBUS:
++ goto do_sigbus;
++ case VM_FAULT_OOM:
++ goto out_of_memory;
++ default:
++ BUG();
++ }
++
++ /*
++ * Did it hit the DOS screen memory VA from vm86 mode?
++ */
++ if (regs->eflags & VM_MASK) {
++ unsigned long bit = (address - 0xA0000) >> PAGE_SHIFT;
++ if (bit < 32)
++ tsk->thread.screen_bitmap |= 1 << bit;
++ }
++ up_read(&mm->mmap_sem);
++ return;
++
++/*
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
++ */
++bad_area:
++ up_read(&mm->mmap_sem);
++
++bad_area_nosemaphore:
++ /* User mode accesses just cause a SIGSEGV */
++ if (error_code & 4) {
++ /*
++ * Valid to do another page fault here because this one came
++ * from user space.
++ */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ tsk->thread.cr2 = address;
++ /* Kernel addresses are always protection faults */
++ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++ tsk->thread.trap_no = 14;
++ force_sig_info_fault(SIGSEGV, si_code, address, tsk);
++ return;
++ }
++
++#ifdef CONFIG_X86_F00F_BUG
++ /*
++ * Pentium F0 0F C7 C8 bug workaround.
++ */
++ if (boot_cpu_data.f00f_bug) {
++ unsigned long nr;
++
++ nr = (address - idt_descr.address) >> 3;
++
++ if (nr == 6) {
++ do_invalid_op(regs, 0);
++ return;
++ }
++ }
++#endif
++
++no_context:
++ /* Are we prepared to handle this kernel fault? */
++ if (fixup_exception(regs))
++ return;
++
++ /*
++ * Valid to do another page fault here, because if this fault
++ * had been triggered by is_prefetch fixup_exception would have
++ * handled it.
++ */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++/*
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
++ */
++
++ bust_spinlocks(1);
++
++ if (oops_may_print()) {
++ #ifdef CONFIG_X86_PAE
++ if (error_code & 16) {
++ pte_t *pte = lookup_address(address);
++
++ if (pte && pte_present(*pte) && !pte_exec_kernel(*pte))
++ printk(KERN_CRIT "kernel tried to execute "
++ "NX-protected page - exploit attempt? "
++ "(uid: %d)\n", current->uid);
++ }
++ #endif
++ if (address < PAGE_SIZE)
++ printk(KERN_ALERT "BUG: unable to handle kernel NULL "
++ "pointer dereference");
++ else
++ printk(KERN_ALERT "BUG: unable to handle kernel paging"
++ " request");
++ printk(" at virtual address %08lx\n",address);
++ printk(KERN_ALERT " printing eip:\n");
++ printk("%08lx\n", regs->eip);
++ }
++ dump_fault_path(address);
++ tsk->thread.cr2 = address;
++ tsk->thread.trap_no = 14;
++ tsk->thread.error_code = error_code;
++ die("Oops", regs, error_code);
++ bust_spinlocks(0);
++ do_exit(SIGKILL);
++
++/*
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
++ */
++out_of_memory:
++ up_read(&mm->mmap_sem);
++ if (tsk->pid == 1) {
++ yield();
++ down_read(&mm->mmap_sem);
++ goto survive;
++ }
++ printk("VM: killing process %s\n", tsk->comm);
++ if (error_code & 4)
++ do_exit(SIGKILL);
++ goto no_context;
++
++do_sigbus:
++ up_read(&mm->mmap_sem);
++
++ /* Kernel mode? Handle exceptions or die */
++ if (!(error_code & 4))
++ goto no_context;
++
++ /* User space => ok to do another page fault */
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ tsk->thread.cr2 = address;
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 14;
++ force_sig_info_fault(SIGBUS, BUS_ADRERR, address, tsk);
++}
++
++#if !HAVE_SHARED_KERNEL_PMD
++void vmalloc_sync_all(void)
++{
++ /*
++ * Note that races in the updates of insync and start aren't
++ * problematic: insync can only get set bits added, and updates to
++ * start are only improving performance (without affecting correctness
++ * if undone).
++ * XEN: To work on PAE, we need to iterate over PMDs rather than PGDs.
++ * This change works just fine with 2-level paging too.
++ */
++#define sync_index(a) ((a) >> PMD_SHIFT)
++ static DECLARE_BITMAP(insync, PTRS_PER_PGD*PTRS_PER_PMD);
++ static unsigned long start = TASK_SIZE;
++ unsigned long address;
++
++ BUILD_BUG_ON(TASK_SIZE & ~PGDIR_MASK);
++ for (address = start;
++ address >= TASK_SIZE && address < hypervisor_virt_start;
++ address += 1UL << PMD_SHIFT) {
++ if (!test_bit(sync_index(address), insync)) {
++ unsigned long flags;
++ struct page *page;
++
++ spin_lock_irqsave(&pgd_lock, flags);
++ /* XEN: failure path assumes non-empty pgd_list. */
++ if (unlikely(!pgd_list)) {
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ return;
++ }
++ for (page = pgd_list; page; page =
++ (struct page *)page->index)
++ if (!vmalloc_sync_one(page_address(page),
++ address)) {
++ BUG_ON(page != pgd_list);
++ break;
++ }
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ if (!page)
++ set_bit(sync_index(address), insync);
++ }
++ if (address == start && test_bit(sync_index(address), insync))
++ start = address + (1UL << PMD_SHIFT);
++ }
++}
++#endif
+Index: head-2008-11-25/arch/x86/mm/highmem_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/mm/highmem_32-xen.c 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,183 @@
++#include <linux/highmem.h>
++#include <linux/module.h>
++
++void *kmap(struct page *page)
++{
++ might_sleep();
++ if (!PageHighMem(page))
++ return page_address(page);
++ return kmap_high(page);
++}
++
++void kunmap(struct page *page)
++{
++ if (in_interrupt())
++ BUG();
++ if (!PageHighMem(page))
++ return;
++ kunmap_high(page);
++}
++
++/*
++ * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
++ * no global lock is needed and because the kmap code must perform a global TLB
++ * invalidation when the kmap pool wraps.
++ *
++ * However when holding an atomic kmap is is not legal to sleep, so atomic
++ * kmaps are appropriate for short, tight code paths only.
++ */
++static void *__kmap_atomic(struct page *page, enum km_type type, pgprot_t prot)
++{
++ enum fixed_addresses idx;
++ unsigned long vaddr;
++
++ /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
++ inc_preempt_count();
++ if (!PageHighMem(page))
++ return page_address(page);
++
++ idx = type + KM_TYPE_NR*smp_processor_id();
++ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++#ifdef CONFIG_DEBUG_HIGHMEM
++ if (!pte_none(*(kmap_pte-idx)))
++ BUG();
++#endif
++ set_pte_at_sync(&init_mm, vaddr, kmap_pte-idx, mk_pte(page, prot));
++
++ return (void*) vaddr;
++}
++
++void *kmap_atomic(struct page *page, enum km_type type)
++{
++ return __kmap_atomic(page, type, kmap_prot);
++}
++
++/* Same as kmap_atomic but with PAGE_KERNEL_RO page protection. */
++void *kmap_atomic_pte(struct page *page, enum km_type type)
++{
++ return __kmap_atomic(page, type,
++ test_bit(PG_pinned, &page->flags)
++ ? PAGE_KERNEL_RO : kmap_prot);
++}
++
++void kunmap_atomic(void *kvaddr, enum km_type type)
++{
++#if defined(CONFIG_DEBUG_HIGHMEM) || defined(CONFIG_XEN)
++ unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
++ enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
++
++ if (vaddr < FIXADDR_START) { // FIXME
++ dec_preempt_count();
++ preempt_check_resched();
++ return;
++ }
++#endif
++
++#if defined(CONFIG_DEBUG_HIGHMEM)
++ if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx))
++ BUG();
++
++ /*
++ * force other mappings to Oops if they'll try to access
++ * this pte without first remap it
++ */
++ pte_clear(&init_mm, vaddr, kmap_pte-idx);
++ __flush_tlb_one(vaddr);
++#elif defined(CONFIG_XEN)
++ /*
++ * We must ensure there are no dangling pagetable references when
++ * returning memory to Xen (decrease_reservation).
++ * XXX TODO: We could make this faster by only zapping when
++ * kmap_flush_unused is called but that is trickier and more invasive.
++ */
++ pte_clear(&init_mm, vaddr, kmap_pte-idx);
++#endif
++
++ dec_preempt_count();
++ preempt_check_resched();
++}
++
++/* This is the same as kmap_atomic() but can map memory that doesn't
++ * have a struct page associated with it.
++ */
++void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
++{
++ enum fixed_addresses idx;
++ unsigned long vaddr;
++
++ inc_preempt_count();
++
++ idx = type + KM_TYPE_NR*smp_processor_id();
++ vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
++ set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
++ __flush_tlb_one(vaddr);
++
++ return (void*) vaddr;
++}
++
++struct page *kmap_atomic_to_page(void *ptr)
++{
++ unsigned long idx, vaddr = (unsigned long)ptr;
++ pte_t *pte;
++
++ if (vaddr < FIXADDR_START)
++ return virt_to_page(ptr);
++
++ idx = virt_to_fix(vaddr);
++ pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
++ return pte_page(*pte);
++}
++
++void clear_highpage(struct page *page)
++{
++ void *kaddr;
++
++ if (likely(xen_feature(XENFEAT_highmem_assist))
++ && PageHighMem(page)) {
++ struct mmuext_op meo;
++
++ meo.cmd = MMUEXT_CLEAR_PAGE;
++ meo.arg1.mfn = pfn_to_mfn(page_to_pfn(page));
++ if (HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0)
++ return;
++ }
++
++ kaddr = kmap_atomic(page, KM_USER0);
++ clear_page(kaddr);
++ kunmap_atomic(kaddr, KM_USER0);
++}
++
++void copy_highpage(struct page *to, struct page *from)
++{
++ void *vfrom, *vto;
++
++ if (likely(xen_feature(XENFEAT_highmem_assist))
++ && (PageHighMem(from) || PageHighMem(to))) {
++ unsigned long from_pfn = page_to_pfn(from);
++ unsigned long to_pfn = page_to_pfn(to);
++ struct mmuext_op meo;
++
++ meo.cmd = MMUEXT_COPY_PAGE;
++ meo.arg1.mfn = pfn_to_mfn(to_pfn);
++ meo.arg2.src_mfn = pfn_to_mfn(from_pfn);
++ if (mfn_to_pfn(meo.arg2.src_mfn) == from_pfn
++ && mfn_to_pfn(meo.arg1.mfn) == to_pfn
++ && HYPERVISOR_mmuext_op(&meo, 1, NULL, DOMID_SELF) == 0)
++ return;
++ }
++
++ vfrom = kmap_atomic(from, KM_USER0);
++ vto = kmap_atomic(to, KM_USER1);
++ copy_page(vto, vfrom);
++ kunmap_atomic(vfrom, KM_USER0);
++ kunmap_atomic(vto, KM_USER1);
++}
++
++EXPORT_SYMBOL(kmap);
++EXPORT_SYMBOL(kunmap);
++EXPORT_SYMBOL(kmap_atomic);
++EXPORT_SYMBOL(kmap_atomic_pte);
++EXPORT_SYMBOL(kunmap_atomic);
++EXPORT_SYMBOL(kmap_atomic_to_page);
++EXPORT_SYMBOL(clear_highpage);
++EXPORT_SYMBOL(copy_highpage);
+Index: head-2008-11-25/arch/x86/mm/hypervisor.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/mm/hypervisor.c 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,547 @@
++/******************************************************************************
++ * mm/hypervisor.c
++ *
++ * Update page tables via the hypervisor.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/vmalloc.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/hypervisor.h>
++#include <xen/balloon.h>
++#include <xen/features.h>
++#include <xen/interface/memory.h>
++#include <linux/module.h>
++#include <linux/percpu.h>
++#include <asm/tlbflush.h>
++#include <linux/highmem.h>
++
++void xen_l1_entry_update(pte_t *ptr, pte_t val)
++{
++ mmu_update_t u;
++#ifdef CONFIG_HIGHPTE
++ u.ptr = ((unsigned long)ptr >= (unsigned long)high_memory) ?
++ arbitrary_virt_to_machine(ptr) : virt_to_machine(ptr);
++#else
++ u.ptr = virt_to_machine(ptr);
++#endif
++ u.val = __pte_val(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL_GPL(xen_l1_entry_update);
++
++void xen_l2_entry_update(pmd_t *ptr, pmd_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = __pmd_val(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++
++#if defined(CONFIG_X86_PAE) || defined(CONFIG_X86_64)
++void xen_l3_entry_update(pud_t *ptr, pud_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = __pud_val(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++#endif
++
++#ifdef CONFIG_X86_64
++void xen_l4_entry_update(pgd_t *ptr, pgd_t val)
++{
++ mmu_update_t u;
++ u.ptr = virt_to_machine(ptr);
++ u.val = __pgd_val(val);
++ BUG_ON(HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF) < 0);
++}
++#endif /* CONFIG_X86_64 */
++
++void xen_pt_switch(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_NEW_BASEPTR;
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_new_user_pt(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_NEW_USER_BASEPTR;
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_tlb_flush(void)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL(xen_tlb_flush);
++
++void xen_invlpg(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_INVLPG_LOCAL;
++ op.arg1.linear_addr = ptr & PAGE_MASK;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++EXPORT_SYMBOL(xen_invlpg);
++
++#ifdef CONFIG_SMP
++
++void xen_tlb_flush_all(void)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_TLB_FLUSH_ALL;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_tlb_flush_mask(cpumask_t *mask)
++{
++ struct mmuext_op op;
++ if ( cpus_empty(*mask) )
++ return;
++ op.cmd = MMUEXT_TLB_FLUSH_MULTI;
++ set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_invlpg_all(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_INVLPG_ALL;
++ op.arg1.linear_addr = ptr & PAGE_MASK;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr)
++{
++ struct mmuext_op op;
++ if ( cpus_empty(*mask) )
++ return;
++ op.cmd = MMUEXT_INVLPG_MULTI;
++ op.arg1.linear_addr = ptr & PAGE_MASK;
++ set_xen_guest_handle(op.arg2.vcpumask, mask->bits);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++#endif /* CONFIG_SMP */
++
++void xen_pgd_pin(unsigned long ptr)
++{
++ struct mmuext_op op;
++#ifdef CONFIG_X86_64
++ op.cmd = MMUEXT_PIN_L4_TABLE;
++#elif defined(CONFIG_X86_PAE)
++ op.cmd = MMUEXT_PIN_L3_TABLE;
++#else
++ op.cmd = MMUEXT_PIN_L2_TABLE;
++#endif
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_pgd_unpin(unsigned long ptr)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_UNPIN_TABLE;
++ op.arg1.mfn = pfn_to_mfn(ptr >> PAGE_SHIFT);
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++void xen_set_ldt(const void *ptr, unsigned int ents)
++{
++ struct mmuext_op op;
++ op.cmd = MMUEXT_SET_LDT;
++ op.arg1.linear_addr = (unsigned long)ptr;
++ op.arg2.nr_ents = ents;
++ BUG_ON(HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0);
++}
++
++/* Protected by balloon_lock. */
++#define MAX_CONTIG_ORDER 9 /* 2MB */
++static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER];
++static unsigned long limited_frames[1<<MAX_CONTIG_ORDER];
++static multicall_entry_t cr_mcl[1<<MAX_CONTIG_ORDER];
++
++/* Ensure multi-page extents are contiguous in machine memory. */
++int xen_create_contiguous_region(
++ unsigned long vstart, unsigned int order, unsigned int address_bits)
++{
++ unsigned long *in_frames = discontig_frames, out_frame;
++ unsigned long frame, flags;
++ unsigned int i;
++ int rc, success;
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .nr_extents = 1UL << order,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .nr_extents = 1,
++ .extent_order = order,
++ .address_bits = address_bits,
++ .domid = DOMID_SELF
++ }
++ };
++
++ /*
++ * Currently an auto-translated guest will not perform I/O, nor will
++ * it require PAE page directories below 4GB. Therefore any calls to
++ * this function are redundant and can be ignored.
++ */
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return 0;
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return -ENOMEM;
++
++ set_xen_guest_handle(exchange.in.extent_start, in_frames);
++ set_xen_guest_handle(exchange.out.extent_start, &out_frame);
++
++ scrub_pages((void *)vstart, 1 << order);
++
++ balloon_lock(flags);
++
++ /* 1. Zap current PTEs, remembering MFNs. */
++ for (i = 0; i < (1U<<order); i++) {
++ in_frames[i] = pfn_to_mfn((__pa(vstart) >> PAGE_SHIFT) + i);
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ __pte_ma(0), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
++ INVALID_P2M_ENTRY);
++ }
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ /* 2. Get a new contiguous memory extent. */
++ out_frame = __pa(vstart) >> PAGE_SHIFT;
++ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == (1UL << order));
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++ BUG_ON(success && (rc != 0));
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in) != (1UL << order))
++ BUG();
++ success = (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out) == 1);
++ if (!success) {
++ /* Couldn't get special memory: fall back to normal. */
++ for (i = 0; i < (1U<<order); i++)
++ in_frames[i] = (__pa(vstart)>>PAGE_SHIFT) + i;
++ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.in) != (1UL<<order))
++ BUG();
++ }
++ }
++#endif
++
++ /* 3. Map the new extent in place of old pages. */
++ for (i = 0; i < (1U<<order); i++) {
++ frame = success ? (out_frame + i) : in_frames[i];
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ pfn_pte_ma(frame, PAGE_KERNEL), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
++ }
++
++ cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
++ ? UVMF_TLB_FLUSH|UVMF_ALL
++ : UVMF_INVLPG|UVMF_ALL;
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ balloon_unlock(flags);
++
++ return success ? 0 : -ENOMEM;
++}
++EXPORT_SYMBOL_GPL(xen_create_contiguous_region);
++
++void xen_destroy_contiguous_region(unsigned long vstart, unsigned int order)
++{
++ unsigned long *out_frames = discontig_frames, in_frame;
++ unsigned long frame, flags;
++ unsigned int i;
++ int rc, success;
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .nr_extents = 1,
++ .extent_order = order,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .nr_extents = 1UL << order,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ }
++ };
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return;
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return;
++
++ set_xen_guest_handle(exchange.in.extent_start, &in_frame);
++ set_xen_guest_handle(exchange.out.extent_start, out_frames);
++
++ scrub_pages((void *)vstart, 1 << order);
++
++ balloon_lock(flags);
++
++ /* 1. Find start MFN of contiguous extent. */
++ in_frame = pfn_to_mfn(__pa(vstart) >> PAGE_SHIFT);
++
++ /* 2. Zap current PTEs. */
++ for (i = 0; i < (1U<<order); i++) {
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ __pte_ma(0), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i,
++ INVALID_P2M_ENTRY);
++ out_frames[i] = (__pa(vstart) >> PAGE_SHIFT) + i;
++ }
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ /* 3. Do the exchange for non-contiguous MFNs. */
++ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == 1);
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++ BUG_ON(success && (rc != 0));
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in) != 1)
++ BUG();
++ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out) != (1UL << order))
++ BUG();
++ success = 1;
++ }
++#endif
++
++ /* 4. Map new pages in place of old pages. */
++ for (i = 0; i < (1U<<order); i++) {
++ frame = success ? out_frames[i] : (in_frame + i);
++ MULTI_update_va_mapping(cr_mcl + i, vstart + (i*PAGE_SIZE),
++ pfn_pte_ma(frame, PAGE_KERNEL), 0);
++ set_phys_to_machine((__pa(vstart)>>PAGE_SHIFT)+i, frame);
++ }
++
++ cr_mcl[i - 1].args[MULTI_UVMFLAGS_INDEX] = order
++ ? UVMF_TLB_FLUSH|UVMF_ALL
++ : UVMF_INVLPG|UVMF_ALL;
++ if (HYPERVISOR_multicall_check(cr_mcl, i, NULL))
++ BUG();
++
++ balloon_unlock(flags);
++}
++EXPORT_SYMBOL_GPL(xen_destroy_contiguous_region);
++
++int xen_limit_pages_to_max_mfn(
++ struct page *pages, unsigned int order, unsigned int address_bits)
++{
++ unsigned long flags, frame;
++ unsigned long *in_frames = discontig_frames, *out_frames = limited_frames;
++ struct page *page;
++ unsigned int i, n, nr_mcl;
++ int rc, success;
++ DECLARE_BITMAP(limit_map, 1 << MAX_CONTIG_ORDER);
++
++ struct xen_memory_exchange exchange = {
++ .in = {
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ },
++ .out = {
++ .extent_order = 0,
++ .address_bits = address_bits,
++ .domid = DOMID_SELF
++ }
++ };
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return 0;
++
++ if (unlikely(order > MAX_CONTIG_ORDER))
++ return -ENOMEM;
++
++ bitmap_zero(limit_map, 1U << order);
++ set_xen_guest_handle(exchange.in.extent_start, in_frames);
++ set_xen_guest_handle(exchange.out.extent_start, out_frames);
++
++ /* 0. Scrub the pages. */
++ for (i = 0, n = 0; i < 1U<<order ; i++) {
++ page = &pages[i];
++ if (!(pfn_to_mfn(page_to_pfn(page)) >> (address_bits - PAGE_SHIFT)))
++ continue;
++ __set_bit(i, limit_map);
++
++ if (!PageHighMem(page))
++ scrub_pages(page_address(page), 1);
++#ifdef CONFIG_XEN_SCRUB_PAGES
++ else {
++ scrub_pages(kmap(page), 1);
++ kunmap(page);
++ ++n;
++ }
++#endif
++ }
++ if (bitmap_empty(limit_map, 1U << order))
++ return 0;
++
++ if (n)
++ kmap_flush_unused();
++
++ balloon_lock(flags);
++
++ /* 1. Zap current PTEs (if any), remembering MFNs. */
++ for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
++ if(!test_bit(i, limit_map))
++ continue;
++ page = &pages[i];
++
++ out_frames[n] = page_to_pfn(page);
++ in_frames[n] = pfn_to_mfn(out_frames[n]);
++
++ if (!PageHighMem(page))
++ MULTI_update_va_mapping(cr_mcl + nr_mcl++,
++ (unsigned long)page_address(page),
++ __pte_ma(0), 0);
++
++ set_phys_to_machine(out_frames[n], INVALID_P2M_ENTRY);
++ ++n;
++ }
++ if (nr_mcl && HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
++ BUG();
++
++ /* 2. Get new memory below the required limit. */
++ exchange.in.nr_extents = n;
++ exchange.out.nr_extents = n;
++ rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange);
++ success = (exchange.nr_exchanged == n);
++ BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0)));
++ BUG_ON(success && (rc != 0));
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ /* Compatibility when XENMEM_exchange is unsupported. */
++ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &exchange.in) != n)
++ BUG();
++ if (HYPERVISOR_memory_op(XENMEM_populate_physmap,
++ &exchange.out) != n)
++ BUG();
++ success = 1;
++ }
++#endif
++
++ /* 3. Map the new pages in place of old pages. */
++ for (i = 0, n = 0, nr_mcl = 0; i < (1U<<order); i++) {
++ if(!test_bit(i, limit_map))
++ continue;
++ page = &pages[i];
++
++ frame = success ? out_frames[n] : in_frames[n];
++
++ if (!PageHighMem(page))
++ MULTI_update_va_mapping(cr_mcl + nr_mcl++,
++ (unsigned long)page_address(page),
++ pfn_pte_ma(frame, PAGE_KERNEL), 0);
++
++ set_phys_to_machine(page_to_pfn(page), frame);
++ ++n;
++ }
++ if (nr_mcl) {
++ cr_mcl[nr_mcl - 1].args[MULTI_UVMFLAGS_INDEX] = order
++ ? UVMF_TLB_FLUSH|UVMF_ALL
++ : UVMF_INVLPG|UVMF_ALL;
++ if (HYPERVISOR_multicall_check(cr_mcl, nr_mcl, NULL))
++ BUG();
++ }
++
++ balloon_unlock(flags);
++
++ return success ? 0 : -ENOMEM;
++}
++EXPORT_SYMBOL_GPL(xen_limit_pages_to_max_mfn);
++
++#ifdef __i386__
++int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b)
++{
++ __u32 *lp = (__u32 *)((char *)ldt + entry * 8);
++ maddr_t mach_lp = arbitrary_virt_to_machine(lp);
++ return HYPERVISOR_update_descriptor(
++ mach_lp, (u64)entry_a | ((u64)entry_b<<32));
++}
++#endif
++
++#define MAX_BATCHED_FULL_PTES 32
++
++int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
++ unsigned long addr, unsigned long end, pgprot_t newprot)
++{
++ int rc = 0, i = 0;
++ mmu_update_t u[MAX_BATCHED_FULL_PTES];
++ pte_t *pte;
++ spinlock_t *ptl;
++
++ if (!xen_feature(XENFEAT_mmu_pt_update_preserve_ad))
++ return 0;
++
++ pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
++ do {
++ if (pte_present(*pte)) {
++ u[i].ptr = (__pmd_val(*pmd) & PHYSICAL_PAGE_MASK)
++ | ((unsigned long)pte & ~PAGE_MASK)
++ | MMU_PT_UPDATE_PRESERVE_AD;
++ u[i].val = __pte_val(pte_modify(*pte, newprot));
++ if (++i == MAX_BATCHED_FULL_PTES) {
++ if ((rc = HYPERVISOR_mmu_update(
++ &u[0], i, NULL, DOMID_SELF)) != 0)
++ break;
++ i = 0;
++ }
++ }
++ } while (pte++, addr += PAGE_SIZE, addr != end);
++ if (i)
++ rc = HYPERVISOR_mmu_update( &u[0], i, NULL, DOMID_SELF);
++ pte_unmap_unlock(pte - 1, ptl);
++ BUG_ON(rc && rc != -ENOSYS);
++ return !rc;
++}
+Index: head-2008-11-25/arch/x86/mm/init_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/mm/init_32-xen.c 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,840 @@
++/*
++ * linux/arch/i386/mm/init.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ */
++
++#include <linux/module.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/hugetlb.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/poison.h>
++#include <linux/bootmem.h>
++#include <linux/slab.h>
++#include <linux/proc_fs.h>
++#include <linux/efi.h>
++#include <linux/memory_hotplug.h>
++#include <linux/initrd.h>
++#include <linux/cpumask.h>
++#include <linux/dma-mapping.h>
++#include <linux/scatterlist.h>
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/dma.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/apic.h>
++#include <asm/tlb.h>
++#include <asm/tlbflush.h>
++#include <asm/sections.h>
++#include <asm/hypervisor.h>
++#include <asm/swiotlb.h>
++
++unsigned int __VMALLOC_RESERVE = 128 << 20;
++
++DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
++unsigned long highstart_pfn, highend_pfn;
++
++static int noinline do_test_wp_bit(void);
++
++/*
++ * Creates a middle page table and puts a pointer to it in the
++ * given global directory entry. This only returns the gd entry
++ * in non-PAE compilation mode, since the middle layer is folded.
++ */
++static pmd_t * __init one_md_table_init(pgd_t *pgd)
++{
++ pud_t *pud;
++ pmd_t *pmd_table;
++
++#ifdef CONFIG_X86_PAE
++ pmd_table = (pmd_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++ make_lowmem_page_readonly(pmd_table, XENFEAT_writable_page_tables);
++ set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
++ pud = pud_offset(pgd, 0);
++ if (pmd_table != pmd_offset(pud, 0))
++ BUG();
++#else
++ pud = pud_offset(pgd, 0);
++ pmd_table = pmd_offset(pud, 0);
++#endif
++
++ return pmd_table;
++}
++
++/*
++ * Create a page table and place a pointer to it in a middle page
++ * directory entry.
++ */
++static pte_t * __init one_page_table_init(pmd_t *pmd)
++{
++ if (pmd_none(*pmd)) {
++ pte_t *page_table = (pte_t *) alloc_bootmem_low_pages(PAGE_SIZE);
++ make_lowmem_page_readonly(page_table,
++ XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
++ if (page_table != pte_offset_kernel(pmd, 0))
++ BUG();
++
++ return page_table;
++ }
++
++ return pte_offset_kernel(pmd, 0);
++}
++
++/*
++ * This function initializes a certain range of kernel virtual memory
++ * with new bootmem page tables, everywhere page tables are missing in
++ * the given range.
++ */
++
++/*
++ * NOTE: The pagetables are allocated contiguous on the physical space
++ * so we can cache the place of the first one and move around without
++ * checking the pgd every time.
++ */
++static void __init page_table_range_init (unsigned long start, unsigned long end, pgd_t *pgd_base)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ int pgd_idx, pmd_idx;
++ unsigned long vaddr;
++
++ vaddr = start;
++ pgd_idx = pgd_index(vaddr);
++ pmd_idx = pmd_index(vaddr);
++ pgd = pgd_base + pgd_idx;
++
++ for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
++ if (pgd_none(*pgd))
++ one_md_table_init(pgd);
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++ for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) {
++ if (vaddr < hypervisor_virt_start && pmd_none(*pmd))
++ one_page_table_init(pmd);
++
++ vaddr += PMD_SIZE;
++ }
++ pmd_idx = 0;
++ }
++}
++
++static inline int is_kernel_text(unsigned long addr)
++{
++ if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
++ return 1;
++ return 0;
++}
++
++/*
++ * This maps the physical memory to kernel virtual address space, a total
++ * of max_low_pfn pages, by creating page tables starting from address
++ * PAGE_OFFSET.
++ */
++static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
++{
++ unsigned long pfn;
++ pgd_t *pgd;
++ pmd_t *pmd;
++ pte_t *pte;
++ int pgd_idx, pmd_idx, pte_ofs;
++
++ unsigned long max_ram_pfn = xen_start_info->nr_pages;
++ if (max_ram_pfn > max_low_pfn)
++ max_ram_pfn = max_low_pfn;
++
++ pgd_idx = pgd_index(PAGE_OFFSET);
++ pgd = pgd_base + pgd_idx;
++ pfn = 0;
++ pmd_idx = pmd_index(PAGE_OFFSET);
++ pte_ofs = pte_index(PAGE_OFFSET);
++
++ for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
++#ifdef CONFIG_XEN
++ /*
++ * Native linux hasn't PAE-paging enabled yet at this
++ * point. When running as xen domain we are in PAE
++ * mode already, thus we can't simply hook a empty
++ * pmd. That would kill the mappings we are currently
++ * using ...
++ */
++ pmd = pmd_offset(pud_offset(pgd, PAGE_OFFSET), PAGE_OFFSET);
++#else
++ pmd = one_md_table_init(pgd);
++#endif
++ if (pfn >= max_low_pfn)
++ continue;
++ pmd += pmd_idx;
++ for (; pmd_idx < PTRS_PER_PMD && pfn < max_low_pfn; pmd++, pmd_idx++) {
++ unsigned int address = pfn * PAGE_SIZE + PAGE_OFFSET;
++ if (address >= hypervisor_virt_start)
++ continue;
++
++ /* Map with big pages if possible, otherwise create normal page tables. */
++ if (cpu_has_pse) {
++ unsigned int address2 = (pfn + PTRS_PER_PTE - 1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1;
++
++ if (is_kernel_text(address) || is_kernel_text(address2))
++ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC));
++ else
++ set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE));
++ pfn += PTRS_PER_PTE;
++ } else {
++ pte = one_page_table_init(pmd);
++
++ pte += pte_ofs;
++ for (; pte_ofs < PTRS_PER_PTE && pfn < max_low_pfn; pte++, pfn++, pte_ofs++) {
++ /* XEN: Only map initial RAM allocation. */
++ if ((pfn >= max_ram_pfn) || pte_present(*pte))
++ continue;
++ if (is_kernel_text(address))
++ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC));
++ else
++ set_pte(pte, pfn_pte(pfn, PAGE_KERNEL));
++ }
++ pte_ofs = 0;
++ }
++ }
++ pmd_idx = 0;
++ }
++}
++
++#ifndef CONFIG_XEN
++
++static inline int page_kills_ppro(unsigned long pagenr)
++{
++ if (pagenr >= 0x70000 && pagenr <= 0x7003F)
++ return 1;
++ return 0;
++}
++
++#else
++
++#define page_kills_ppro(p) 0
++
++#endif
++
++extern int is_available_memory(efi_memory_desc_t *);
++
++int page_is_ram(unsigned long pagenr)
++{
++ int i;
++ unsigned long addr, end;
++
++ if (efi_enabled) {
++ efi_memory_desc_t *md;
++ void *p;
++
++ for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
++ md = p;
++ if (!is_available_memory(md))
++ continue;
++ addr = (md->phys_addr+PAGE_SIZE-1) >> PAGE_SHIFT;
++ end = (md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT)) >> PAGE_SHIFT;
++
++ if ((pagenr >= addr) && (pagenr < end))
++ return 1;
++ }
++ return 0;
++ }
++
++ for (i = 0; i < e820.nr_map; i++) {
++
++ if (e820.map[i].type != E820_RAM) /* not usable memory */
++ continue;
++ /*
++ * !!!FIXME!!! Some BIOSen report areas as RAM that
++ * are not. Notably the 640->1Mb area. We need a sanity
++ * check here.
++ */
++ addr = (e820.map[i].addr+PAGE_SIZE-1) >> PAGE_SHIFT;
++ end = (e820.map[i].addr+e820.map[i].size) >> PAGE_SHIFT;
++ if ((pagenr >= addr) && (pagenr < end))
++ return 1;
++ }
++ return 0;
++}
++
++#ifdef CONFIG_HIGHMEM
++pte_t *kmap_pte;
++pgprot_t kmap_prot;
++
++#define kmap_get_fixmap_pte(vaddr) \
++ pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), (vaddr)), (vaddr))
++
++static void __init kmap_init(void)
++{
++ unsigned long kmap_vstart;
++
++ /* cache the first kmap pte */
++ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN);
++ kmap_pte = kmap_get_fixmap_pte(kmap_vstart);
++
++ kmap_prot = PAGE_KERNEL;
++}
++
++static void __init permanent_kmaps_init(pgd_t *pgd_base)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++ unsigned long vaddr;
++
++ vaddr = PKMAP_BASE;
++ page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
++
++ pgd = swapper_pg_dir + pgd_index(vaddr);
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++ pte = pte_offset_kernel(pmd, vaddr);
++ pkmap_page_table = pte;
++}
++
++static void __meminit free_new_highpage(struct page *page, int pfn)
++{
++ init_page_count(page);
++ if (pfn < xen_start_info->nr_pages)
++ __free_page(page);
++ totalhigh_pages++;
++}
++
++void __init add_one_highpage_init(struct page *page, int pfn, int bad_ppro)
++{
++ if (page_is_ram(pfn) && !(bad_ppro && page_kills_ppro(pfn))) {
++ ClearPageReserved(page);
++ free_new_highpage(page, pfn);
++ } else
++ SetPageReserved(page);
++}
++
++static int add_one_highpage_hotplug(struct page *page, unsigned long pfn)
++{
++ free_new_highpage(page, pfn);
++ totalram_pages++;
++#ifdef CONFIG_FLATMEM
++ max_mapnr = max(pfn, max_mapnr);
++#endif
++ num_physpages++;
++ return 0;
++}
++
++/*
++ * Not currently handling the NUMA case.
++ * Assuming single node and all memory that
++ * has been added dynamically that would be
++ * onlined here is in HIGHMEM
++ */
++void online_page(struct page *page)
++{
++ ClearPageReserved(page);
++ add_one_highpage_hotplug(page, page_to_pfn(page));
++}
++
++
++#ifdef CONFIG_NUMA
++extern void set_highmem_pages_init(int);
++#else
++static void __init set_highmem_pages_init(int bad_ppro)
++{
++ int pfn;
++ for (pfn = highstart_pfn; pfn < highend_pfn; pfn++)
++ add_one_highpage_init(pfn_to_page(pfn), pfn, bad_ppro);
++ totalram_pages += totalhigh_pages;
++}
++#endif /* CONFIG_FLATMEM */
++
++#else
++#define kmap_init() do { } while (0)
++#define permanent_kmaps_init(pgd_base) do { } while (0)
++#define set_highmem_pages_init(bad_ppro) do { } while (0)
++#endif /* CONFIG_HIGHMEM */
++
++unsigned long long __PAGE_KERNEL = _PAGE_KERNEL;
++EXPORT_SYMBOL(__PAGE_KERNEL);
++unsigned long long __PAGE_KERNEL_EXEC = _PAGE_KERNEL_EXEC;
++
++#ifdef CONFIG_NUMA
++extern void __init remap_numa_kva(void);
++#else
++#define remap_numa_kva() do {} while (0)
++#endif
++
++pgd_t *swapper_pg_dir;
++
++static void __init pagetable_init (void)
++{
++ unsigned long vaddr;
++ pgd_t *pgd_base = (pgd_t *)xen_start_info->pt_base;
++
++ /* Enable PSE if available */
++ if (cpu_has_pse) {
++ set_in_cr4(X86_CR4_PSE);
++ }
++
++ /* Enable PGE if available */
++ if (cpu_has_pge) {
++ set_in_cr4(X86_CR4_PGE);
++ __PAGE_KERNEL |= _PAGE_GLOBAL;
++ __PAGE_KERNEL_EXEC |= _PAGE_GLOBAL;
++ }
++
++ kernel_physical_mapping_init(pgd_base);
++ remap_numa_kva();
++
++ /*
++ * Fixed mappings, only the page table structure has to be
++ * created - mappings will be set by set_fixmap():
++ */
++ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
++ page_table_range_init(vaddr, hypervisor_virt_start, pgd_base);
++
++ permanent_kmaps_init(pgd_base);
++}
++
++#if defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_ACPI_SLEEP)
++/*
++ * Swap suspend & friends need this for resume because things like the intel-agp
++ * driver might have split up a kernel 4MB mapping.
++ */
++char __nosavedata swsusp_pg_dir[PAGE_SIZE]
++ __attribute__ ((aligned (PAGE_SIZE)));
++
++static inline void save_pg_dir(void)
++{
++ memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
++}
++#else
++static inline void save_pg_dir(void)
++{
++}
++#endif
++
++void zap_low_mappings (void)
++{
++ int i;
++
++ save_pg_dir();
++
++ /*
++ * Zap initial low-memory mappings.
++ *
++ * Note that "pgd_clear()" doesn't do it for
++ * us, because pgd_clear() is a no-op on i386.
++ */
++ for (i = 0; i < USER_PTRS_PER_PGD; i++)
++#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
++ set_pgd(swapper_pg_dir+i, __pgd(1 + __pa(empty_zero_page)));
++#else
++ set_pgd(swapper_pg_dir+i, __pgd(0));
++#endif
++ flush_tlb_all();
++}
++
++static int disable_nx __initdata = 0;
++u64 __supported_pte_mask __read_mostly = ~_PAGE_NX;
++EXPORT_SYMBOL(__supported_pte_mask);
++
++/*
++ * noexec = on|off
++ *
++ * Control non executable mappings.
++ *
++ * on Enable
++ * off Disable
++ */
++void __init noexec_setup(const char *str)
++{
++ if (!strncmp(str, "on",2) && cpu_has_nx) {
++ __supported_pte_mask |= _PAGE_NX;
++ disable_nx = 0;
++ } else if (!strncmp(str,"off",3)) {
++ disable_nx = 1;
++ __supported_pte_mask &= ~_PAGE_NX;
++ }
++}
++
++int nx_enabled = 0;
++#ifdef CONFIG_X86_PAE
++
++static void __init set_nx(void)
++{
++ unsigned int v[4], l, h;
++
++ if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
++ cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
++ if ((v[3] & (1 << 20)) && !disable_nx) {
++ rdmsr(MSR_EFER, l, h);
++ l |= EFER_NX;
++ wrmsr(MSR_EFER, l, h);
++ nx_enabled = 1;
++ __supported_pte_mask |= _PAGE_NX;
++ }
++ }
++}
++
++/*
++ * Enables/disables executability of a given kernel page and
++ * returns the previous setting.
++ */
++int __init set_kernel_exec(unsigned long vaddr, int enable)
++{
++ pte_t *pte;
++ int ret = 1;
++
++ if (!nx_enabled)
++ goto out;
++
++ pte = lookup_address(vaddr);
++ BUG_ON(!pte);
++
++ if (!pte_exec_kernel(*pte))
++ ret = 0;
++
++ if (enable)
++ pte->pte_high &= ~(1 << (_PAGE_BIT_NX - 32));
++ else
++ pte->pte_high |= 1 << (_PAGE_BIT_NX - 32);
++ __flush_tlb_all();
++out:
++ return ret;
++}
++
++#endif
++
++/*
++ * paging_init() sets up the page tables - note that the first 8MB are
++ * already mapped by head.S.
++ *
++ * This routines also unmaps the page at virtual kernel address 0, so
++ * that we can trap those pesky NULL-reference errors in the kernel.
++ */
++void __init paging_init(void)
++{
++ int i;
++
++#ifdef CONFIG_X86_PAE
++ set_nx();
++ if (nx_enabled)
++ printk("NX (Execute Disable) protection: active\n");
++#endif
++
++ pagetable_init();
++
++#if defined(CONFIG_X86_PAE) && !defined(CONFIG_XEN)
++ /*
++ * We will bail out later - printk doesn't work right now so
++ * the user would just see a hanging kernel.
++ * when running as xen domain we are already in PAE mode at
++ * this point.
++ */
++ if (cpu_has_pae)
++ set_in_cr4(X86_CR4_PAE);
++#endif
++ __flush_tlb_all();
++
++ kmap_init();
++
++ /* Switch to the real shared_info page, and clear the
++ * dummy page. */
++ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
++ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++ memset(empty_zero_page, 0, sizeof(empty_zero_page));
++
++ /* Setup mapping of lower 1st MB */
++ for (i = 0; i < NR_FIX_ISAMAPS; i++)
++ if (is_initial_xendomain())
++ set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
++ else
++ __set_fixmap(FIX_ISAMAP_BEGIN - i,
++ virt_to_machine(empty_zero_page),
++ PAGE_KERNEL_RO);
++}
++
++/*
++ * Test if the WP bit works in supervisor mode. It isn't supported on 386's
++ * and also on some strange 486's (NexGen etc.). All 586+'s are OK. This
++ * used to involve black magic jumps to work around some nasty CPU bugs,
++ * but fortunately the switch to using exceptions got rid of all that.
++ */
++
++static void __init test_wp_bit(void)
++{
++ printk("Checking if this processor honours the WP bit even in supervisor mode... ");
++
++ /* Any page-aligned address will do, the test is non-destructive */
++ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY);
++ boot_cpu_data.wp_works_ok = do_test_wp_bit();
++ clear_fixmap(FIX_WP_TEST);
++
++ if (!boot_cpu_data.wp_works_ok) {
++ printk("No.\n");
++#ifdef CONFIG_X86_WP_WORKS_OK
++ panic("This kernel doesn't support CPU's with broken WP. Recompile it for a 386!");
++#endif
++ } else {
++ printk("Ok.\n");
++ }
++}
++
++static void __init set_max_mapnr_init(void)
++{
++#ifdef CONFIG_HIGHMEM
++ num_physpages = highend_pfn;
++#else
++ num_physpages = max_low_pfn;
++#endif
++#ifdef CONFIG_FLATMEM
++ max_mapnr = num_physpages;
++#endif
++}
++
++static struct kcore_list kcore_mem, kcore_vmalloc;
++
++void __init mem_init(void)
++{
++ extern int ppro_with_ram_bug(void);
++ int codesize, reservedpages, datasize, initsize;
++ int tmp;
++ int bad_ppro;
++ unsigned long pfn;
++
++#if defined(CONFIG_SWIOTLB)
++ swiotlb_init();
++#endif
++
++#ifdef CONFIG_FLATMEM
++ if (!mem_map)
++ BUG();
++#endif
++
++ bad_ppro = ppro_with_ram_bug();
++
++#ifdef CONFIG_HIGHMEM
++ /* check that fixmap and pkmap do not overlap */
++ if (PKMAP_BASE+LAST_PKMAP*PAGE_SIZE >= FIXADDR_START) {
++ printk(KERN_ERR "fixmap and kmap areas overlap - this will crash\n");
++ printk(KERN_ERR "pkstart: %lxh pkend: %lxh fixstart %lxh\n",
++ PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, FIXADDR_START);
++ BUG();
++ }
++#endif
++
++ set_max_mapnr_init();
++
++#ifdef CONFIG_HIGHMEM
++ high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1;
++#else
++ high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1;
++#endif
++ printk("vmalloc area: %lx-%lx, maxmem %lx\n",
++ VMALLOC_START,VMALLOC_END,MAXMEM);
++ BUG_ON(VMALLOC_START > VMALLOC_END);
++
++ /* this will put all low memory onto the freelists */
++ totalram_pages += free_all_bootmem();
++ /* XEN: init and count low-mem pages outside initial allocation. */
++ for (pfn = xen_start_info->nr_pages; pfn < max_low_pfn; pfn++) {
++ ClearPageReserved(pfn_to_page(pfn));
++ init_page_count(pfn_to_page(pfn));
++ totalram_pages++;
++ }
++
++ reservedpages = 0;
++ for (tmp = 0; tmp < max_low_pfn; tmp++)
++ /*
++ * Only count reserved RAM pages
++ */
++ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp)))
++ reservedpages++;
++
++ set_highmem_pages_init(bad_ppro);
++
++ codesize = (unsigned long) &_etext - (unsigned long) &_text;
++ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
++ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
++
++ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
++ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
++ VMALLOC_END-VMALLOC_START);
++
++ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
++ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
++ num_physpages << (PAGE_SHIFT-10),
++ codesize >> 10,
++ reservedpages << (PAGE_SHIFT-10),
++ datasize >> 10,
++ initsize >> 10,
++ (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
++ );
++
++#ifdef CONFIG_X86_PAE
++ if (!cpu_has_pae)
++ panic("cannot execute a PAE-enabled kernel on a PAE-less CPU!");
++#endif
++ if (boot_cpu_data.wp_works_ok < 0)
++ test_wp_bit();
++
++ /*
++ * Subtle. SMP is doing it's boot stuff late (because it has to
++ * fork idle threads) - but it also needs low mappings for the
++ * protected-mode entry to work. We zap these entries only after
++ * the WP-bit has been tested.
++ */
++#ifndef CONFIG_SMP
++ zap_low_mappings();
++#endif
++
++ set_bit(PG_pinned, &virt_to_page(init_mm.pgd)->flags);
++}
++
++/*
++ * this is for the non-NUMA, single node SMP system case.
++ * Specifically, in the case of x86, we will always add
++ * memory to the highmem for now.
++ */
++#ifdef CONFIG_MEMORY_HOTPLUG
++#ifndef CONFIG_NEED_MULTIPLE_NODES
++int arch_add_memory(int nid, u64 start, u64 size)
++{
++ struct pglist_data *pgdata = &contig_page_data;
++ struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
++ unsigned long start_pfn = start >> PAGE_SHIFT;
++ unsigned long nr_pages = size >> PAGE_SHIFT;
++
++ return __add_pages(zone, start_pfn, nr_pages);
++}
++
++int remove_memory(u64 start, u64 size)
++{
++ return -EINVAL;
++}
++#endif
++#endif
++
++kmem_cache_t *pgd_cache;
++kmem_cache_t *pmd_cache;
++
++void __init pgtable_cache_init(void)
++{
++ if (PTRS_PER_PMD > 1) {
++ pmd_cache = kmem_cache_create("pmd",
++ PTRS_PER_PMD*sizeof(pmd_t),
++ PTRS_PER_PMD*sizeof(pmd_t),
++ 0,
++ pmd_ctor,
++ NULL);
++ if (!pmd_cache)
++ panic("pgtable_cache_init(): cannot create pmd cache");
++ }
++ pgd_cache = kmem_cache_create("pgd",
++#ifndef CONFIG_XEN
++ PTRS_PER_PGD*sizeof(pgd_t),
++ PTRS_PER_PGD*sizeof(pgd_t),
++#else
++ PAGE_SIZE,
++ PAGE_SIZE,
++#endif
++ 0,
++ pgd_ctor,
++ PTRS_PER_PMD == 1 ? pgd_dtor : NULL);
++ if (!pgd_cache)
++ panic("pgtable_cache_init(): Cannot create pgd cache");
++}
++
++/*
++ * This function cannot be __init, since exceptions don't work in that
++ * section. Put this after the callers, so that it cannot be inlined.
++ */
++static int noinline do_test_wp_bit(void)
++{
++ char tmp_reg;
++ int flag;
++
++ __asm__ __volatile__(
++ " movb %0,%1 \n"
++ "1: movb %1,%0 \n"
++ " xorl %2,%2 \n"
++ "2: \n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4 \n"
++ " .long 1b,2b \n"
++ ".previous \n"
++ :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)),
++ "=q" (tmp_reg),
++ "=r" (flag)
++ :"2" (1)
++ :"memory");
++
++ return flag;
++}
++
++#ifdef CONFIG_DEBUG_RODATA
++
++void mark_rodata_ro(void)
++{
++ unsigned long addr = (unsigned long)__start_rodata;
++
++ for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
++ change_page_attr(virt_to_page(addr), 1, PAGE_KERNEL_RO);
++
++ printk("Write protecting the kernel read-only data: %uk\n",
++ (__end_rodata - __start_rodata) >> 10);
++
++ /*
++ * change_page_attr() requires a global_flush_tlb() call after it.
++ * We do this after the printk so that if something went wrong in the
++ * change, the printk gets out at least to give a better debug hint
++ * of who is the culprit.
++ */
++ global_flush_tlb();
++}
++#endif
++
++void free_init_pages(char *what, unsigned long begin, unsigned long end)
++{
++ unsigned long addr;
++
++ for (addr = begin; addr < end; addr += PAGE_SIZE) {
++ ClearPageReserved(virt_to_page(addr));
++ init_page_count(virt_to_page(addr));
++ memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
++ free_page(addr);
++ totalram_pages++;
++ }
++ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
++}
++
++void free_initmem(void)
++{
++ free_init_pages("unused kernel memory",
++ (unsigned long)(&__init_begin),
++ (unsigned long)(&__init_end));
++}
++
++#ifdef CONFIG_BLK_DEV_INITRD
++void free_initrd_mem(unsigned long start, unsigned long end)
++{
++ free_init_pages("initrd memory", start, end);
++}
++#endif
++
+Index: head-2008-11-25/arch/x86/mm/ioremap_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/mm/ioremap_32-xen.c 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,443 @@
++/*
++ * arch/i386/mm/ioremap.c
++ *
++ * Re-map IO memory to kernel address space so that we can access it.
++ * This is needed for high PCI addresses that aren't mapped in the
++ * 640k-1MB IO memory area on PC's
++ *
++ * (C) Copyright 1995 1996 Linus Torvalds
++ */
++
++#include <linux/vmalloc.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/module.h>
++#include <asm/io.h>
++#include <asm/fixmap.h>
++#include <asm/cacheflush.h>
++#include <asm/tlbflush.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++
++#define ISA_START_ADDRESS 0x0
++#define ISA_END_ADDRESS 0x100000
++
++static int direct_remap_area_pte_fn(pte_t *pte,
++ struct page *pmd_page,
++ unsigned long address,
++ void *data)
++{
++ mmu_update_t **v = (mmu_update_t **)data;
++
++ BUG_ON(!pte_none(*pte));
++
++ (*v)->ptr = ((u64)pfn_to_mfn(page_to_pfn(pmd_page)) <<
++ PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
++ (*v)++;
++
++ return 0;
++}
++
++static int __direct_remap_pfn_range(struct mm_struct *mm,
++ unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid)
++{
++ int rc;
++ unsigned long i, start_address;
++ mmu_update_t *u, *v, *w;
++
++ u = v = w = (mmu_update_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
++ if (u == NULL)
++ return -ENOMEM;
++
++ start_address = address;
++
++ flush_cache_all();
++
++ for (i = 0; i < size; i += PAGE_SIZE) {
++ if ((v - u) == (PAGE_SIZE / sizeof(mmu_update_t))) {
++ /* Flush a full batch after filling in the PTE ptrs. */
++ rc = apply_to_page_range(mm, start_address,
++ address - start_address,
++ direct_remap_area_pte_fn, &w);
++ if (rc)
++ goto out;
++ rc = -EFAULT;
++ if (HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0)
++ goto out;
++ v = w = u;
++ start_address = address;
++ }
++
++ /*
++ * Fill in the machine address: PTE ptr is done later by
++ * apply_to_page_range().
++ */
++ v->val = __pte_val(pfn_pte_ma(mfn, prot)) | _PAGE_IO;
++
++ mfn++;
++ address += PAGE_SIZE;
++ v++;
++ }
++
++ if (v != u) {
++ /* Final batch. */
++ rc = apply_to_page_range(mm, start_address,
++ address - start_address,
++ direct_remap_area_pte_fn, &w);
++ if (rc)
++ goto out;
++ rc = -EFAULT;
++ if (unlikely(HYPERVISOR_mmu_update(u, v - u, NULL, domid) < 0))
++ goto out;
++ }
++
++ rc = 0;
++
++ out:
++ flush_tlb_all();
++
++ free_page((unsigned long)u);
++
++ return rc;
++}
++
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++ unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return remap_pfn_range(vma, address, mfn, size, prot);
++
++ if (domid == DOMID_SELF)
++ return -EINVAL;
++
++ vma->vm_flags |= VM_IO | VM_RESERVED;
++
++ vma->vm_mm->context.has_foreign_mappings = 1;
++
++ return __direct_remap_pfn_range(
++ vma->vm_mm, address, mfn, size, prot, domid);
++}
++EXPORT_SYMBOL(direct_remap_pfn_range);
++
++int direct_kernel_remap_pfn_range(unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid)
++{
++ return __direct_remap_pfn_range(
++ &init_mm, address, mfn, size, prot, domid);
++}
++EXPORT_SYMBOL(direct_kernel_remap_pfn_range);
++
++static int lookup_pte_fn(
++ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++{
++ uint64_t *ptep = (uint64_t *)data;
++ if (ptep)
++ *ptep = ((uint64_t)pfn_to_mfn(page_to_pfn(pmd_page)) <<
++ PAGE_SHIFT) | ((unsigned long)pte & ~PAGE_MASK);
++ return 0;
++}
++
++int create_lookup_pte_addr(struct mm_struct *mm,
++ unsigned long address,
++ uint64_t *ptep)
++{
++ return apply_to_page_range(mm, address, PAGE_SIZE,
++ lookup_pte_fn, ptep);
++}
++
++EXPORT_SYMBOL(create_lookup_pte_addr);
++
++static int noop_fn(
++ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++{
++ return 0;
++}
++
++int touch_pte_range(struct mm_struct *mm,
++ unsigned long address,
++ unsigned long size)
++{
++ return apply_to_page_range(mm, address, size, noop_fn, NULL);
++}
++
++EXPORT_SYMBOL(touch_pte_range);
++
++/*
++ * Does @address reside within a non-highmem page that is local to this virtual
++ * machine (i.e., not an I/O page, nor a memory page belonging to another VM).
++ * See the comment that accompanies mfn_to_local_pfn() in page.h to understand
++ * why this works.
++ */
++static inline int is_local_lowmem(unsigned long address)
++{
++ extern unsigned long max_low_pfn;
++ return (mfn_to_local_pfn(address >> PAGE_SHIFT) < max_low_pfn);
++}
++
++/*
++ * Generic mapping function (not visible outside):
++ */
++
++/*
++ * Remap an arbitrary physical address space into the kernel virtual
++ * address space. Needed when the kernel wants to access high addresses
++ * directly.
++ *
++ * NOTE! We need to allow non-page-aligned mappings too: we will obviously
++ * have to convert them into an offset in a page-aligned mapping, but the
++ * caller shouldn't need to know that small detail.
++ */
++void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
++{
++ void __iomem * addr;
++ struct vm_struct * area;
++ unsigned long offset, last_addr;
++ domid_t domid = DOMID_IO;
++
++ /* Don't allow wraparound or zero size */
++ last_addr = phys_addr + size - 1;
++ if (!size || last_addr < phys_addr)
++ return NULL;
++
++ /*
++ * Don't remap the low PCI/ISA area, it's always mapped..
++ */
++ if (is_initial_xendomain() &&
++ phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
++ return (void __iomem *) isa_bus_to_virt(phys_addr);
++
++ /*
++ * Don't allow anybody to remap normal RAM that we're using..
++ */
++ if (is_local_lowmem(phys_addr)) {
++ char *t_addr, *t_end;
++ struct page *page;
++
++ t_addr = bus_to_virt(phys_addr);
++ t_end = t_addr + (size - 1);
++
++ for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
++ if(!PageReserved(page))
++ return NULL;
++
++ domid = DOMID_SELF;
++ }
++
++ /*
++ * Mappings have to be page-aligned
++ */
++ offset = phys_addr & ~PAGE_MASK;
++ phys_addr &= PAGE_MASK;
++ size = PAGE_ALIGN(last_addr+1) - phys_addr;
++
++ /*
++ * Ok, go for it..
++ */
++ area = get_vm_area(size, VM_IOREMAP | (flags << 20));
++ if (!area)
++ return NULL;
++ area->phys_addr = phys_addr;
++ addr = (void __iomem *) area->addr;
++ flags |= _KERNPG_TABLE;
++ if (__direct_remap_pfn_range(&init_mm, (unsigned long)addr,
++ phys_addr>>PAGE_SHIFT,
++ size, __pgprot(flags), domid)) {
++ vunmap((void __force *) addr);
++ return NULL;
++ }
++ return (void __iomem *) (offset + (char __iomem *)addr);
++}
++EXPORT_SYMBOL(__ioremap);
++
++/**
++ * ioremap_nocache - map bus memory into CPU space
++ * @offset: bus address of the memory
++ * @size: size of the resource to map
++ *
++ * ioremap_nocache performs a platform specific sequence of operations to
++ * make bus memory CPU accessible via the readb/readw/readl/writeb/
++ * writew/writel functions and the other mmio helpers. The returned
++ * address is not guaranteed to be usable directly as a virtual
++ * address.
++ *
++ * This version of ioremap ensures that the memory is marked uncachable
++ * on the CPU as well as honouring existing caching rules from things like
++ * the PCI bus. Note that there are other caches and buffers on many
++ * busses. In particular driver authors should read up on PCI writes
++ *
++ * It's useful if some control registers are in such an area and
++ * write combining or read caching is not desirable:
++ *
++ * Must be freed with iounmap.
++ */
++
++void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
++{
++ unsigned long last_addr;
++ void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
++ if (!p)
++ return p;
++
++ /* Guaranteed to be > phys_addr, as per __ioremap() */
++ last_addr = phys_addr + size - 1;
++
++ if (is_local_lowmem(last_addr)) {
++ struct page *ppage = virt_to_page(bus_to_virt(phys_addr));
++ unsigned long npages;
++
++ phys_addr &= PAGE_MASK;
++
++ /* This might overflow and become zero.. */
++ last_addr = PAGE_ALIGN(last_addr);
++
++ /* .. but that's ok, because modulo-2**n arithmetic will make
++ * the page-aligned "last - first" come out right.
++ */
++ npages = (last_addr - phys_addr) >> PAGE_SHIFT;
++
++ if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
++ iounmap(p);
++ p = NULL;
++ }
++ global_flush_tlb();
++ }
++
++ return p;
++}
++EXPORT_SYMBOL(ioremap_nocache);
++
++/**
++ * iounmap - Free a IO remapping
++ * @addr: virtual address from ioremap_*
++ *
++ * Caller must ensure there is only one unmapping for the same pointer.
++ */
++void iounmap(volatile void __iomem *addr)
++{
++ struct vm_struct *p, *o;
++
++ if ((void __force *)addr <= high_memory)
++ return;
++
++ /*
++ * __ioremap special-cases the PCI/ISA range by not instantiating a
++ * vm_area and by simply returning an address into the kernel mapping
++ * of ISA space. So handle that here.
++ */
++ if ((unsigned long) addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
++ return;
++
++ addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
++
++ /* Use the vm area unlocked, assuming the caller
++ ensures there isn't another iounmap for the same address
++ in parallel. Reuse of the virtual address is prevented by
++ leaving it in the global lists until we're done with it.
++ cpa takes care of the direct mappings. */
++ read_lock(&vmlist_lock);
++ for (p = vmlist; p; p = p->next) {
++ if (p->addr == addr)
++ break;
++ }
++ read_unlock(&vmlist_lock);
++
++ if (!p) {
++ printk("iounmap: bad address %p\n", addr);
++ dump_stack();
++ return;
++ }
++
++ /* Reset the direct mapping. Can block */
++ if ((p->flags >> 20) && is_local_lowmem(p->phys_addr)) {
++ /* p->size includes the guard page, but cpa doesn't like that */
++ change_page_attr(virt_to_page(bus_to_virt(p->phys_addr)),
++ (p->size - PAGE_SIZE) >> PAGE_SHIFT,
++ PAGE_KERNEL);
++ global_flush_tlb();
++ }
++
++ /* Finally remove it */
++ o = remove_vm_area((void *)addr);
++ BUG_ON(p != o || o == NULL);
++ kfree(p);
++}
++EXPORT_SYMBOL(iounmap);
++
++void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
++{
++ unsigned long offset, last_addr;
++ unsigned int nrpages;
++ enum fixed_addresses idx;
++
++ /* Don't allow wraparound or zero size */
++ last_addr = phys_addr + size - 1;
++ if (!size || last_addr < phys_addr)
++ return NULL;
++
++ /*
++ * Don't remap the low PCI/ISA area, it's always mapped..
++ */
++ if (is_initial_xendomain() &&
++ phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
++ return isa_bus_to_virt(phys_addr);
++
++ /*
++ * Mappings have to be page-aligned
++ */
++ offset = phys_addr & ~PAGE_MASK;
++ phys_addr &= PAGE_MASK;
++ size = PAGE_ALIGN(last_addr) - phys_addr;
++
++ /*
++ * Mappings have to fit in the FIX_BTMAP area.
++ */
++ nrpages = size >> PAGE_SHIFT;
++ if (nrpages > NR_FIX_BTMAPS)
++ return NULL;
++
++ /*
++ * Ok, go for it..
++ */
++ idx = FIX_BTMAP_BEGIN;
++ while (nrpages > 0) {
++ set_fixmap(idx, phys_addr);
++ phys_addr += PAGE_SIZE;
++ --idx;
++ --nrpages;
++ }
++ return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
++}
++
++void __init bt_iounmap(void *addr, unsigned long size)
++{
++ unsigned long virt_addr;
++ unsigned long offset;
++ unsigned int nrpages;
++ enum fixed_addresses idx;
++
++ virt_addr = (unsigned long)addr;
++ if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
++ return;
++ if (virt_addr >= fix_to_virt(FIX_ISAMAP_BEGIN))
++ return;
++ offset = virt_addr & ~PAGE_MASK;
++ nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
++
++ idx = FIX_BTMAP_BEGIN;
++ while (nrpages > 0) {
++ clear_fixmap(idx);
++ --idx;
++ --nrpages;
++ }
++}
+Index: head-2008-11-25/arch/x86/mm/pgtable_32-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/mm/pgtable_32-xen.c 2007-10-09 11:48:25.000000000 +0200
+@@ -0,0 +1,725 @@
++/*
++ * linux/arch/i386/mm/pgtable.c
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/highmem.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/spinlock.h>
++#include <linux/module.h>
++
++#include <asm/system.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/tlb.h>
++#include <asm/tlbflush.h>
++#include <asm/io.h>
++#include <asm/mmu_context.h>
++
++#include <xen/features.h>
++#include <asm/hypervisor.h>
++
++static void pgd_test_and_unpin(pgd_t *pgd);
++
++void show_mem(void)
++{
++ int total = 0, reserved = 0;
++ int shared = 0, cached = 0;
++ int highmem = 0;
++ struct page *page;
++ pg_data_t *pgdat;
++ unsigned long i;
++ unsigned long flags;
++
++ printk(KERN_INFO "Mem-info:\n");
++ show_free_areas();
++ printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
++ for_each_online_pgdat(pgdat) {
++ pgdat_resize_lock(pgdat, &flags);
++ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
++ page = pgdat_page_nr(pgdat, i);
++ total++;
++ if (PageHighMem(page))
++ highmem++;
++ if (PageReserved(page))
++ reserved++;
++ else if (PageSwapCache(page))
++ cached++;
++ else if (page_count(page))
++ shared += page_count(page) - 1;
++ }
++ pgdat_resize_unlock(pgdat, &flags);
++ }
++ printk(KERN_INFO "%d pages of RAM\n", total);
++ printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
++ printk(KERN_INFO "%d reserved pages\n", reserved);
++ printk(KERN_INFO "%d pages shared\n", shared);
++ printk(KERN_INFO "%d pages swap cached\n", cached);
++
++ printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
++ printk(KERN_INFO "%lu pages writeback\n",
++ global_page_state(NR_WRITEBACK));
++ printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
++ printk(KERN_INFO "%lu pages slab\n", global_page_state(NR_SLAB));
++ printk(KERN_INFO "%lu pages pagetables\n",
++ global_page_state(NR_PAGETABLE));
++}
++
++/*
++ * Associate a large virtual page frame with a given physical page frame
++ * and protection flags for that frame. pfn is for the base of the page,
++ * vaddr is what the page gets mapped to - both must be properly aligned.
++ * The pmd must already be instantiated. Assumes PAE mode.
++ */
++void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++
++ if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
++ printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
++ return; /* BUG(); */
++ }
++ if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
++ printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
++ return; /* BUG(); */
++ }
++ pgd = swapper_pg_dir + pgd_index(vaddr);
++ if (pgd_none(*pgd)) {
++ printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
++ return; /* BUG(); */
++ }
++ pud = pud_offset(pgd, vaddr);
++ pmd = pmd_offset(pud, vaddr);
++ set_pmd(pmd, pfn_pmd(pfn, flags));
++ /*
++ * It's enough to flush this one mapping.
++ * (PGE mappings get flushed as well)
++ */
++ __flush_tlb_one(vaddr);
++}
++
++static int nr_fixmaps = 0;
++unsigned long hypervisor_virt_start = HYPERVISOR_VIRT_START;
++unsigned long __FIXADDR_TOP = (HYPERVISOR_VIRT_START - 2 * PAGE_SIZE);
++EXPORT_SYMBOL(__FIXADDR_TOP);
++
++void __init set_fixaddr_top(unsigned long top)
++{
++ BUG_ON(nr_fixmaps > 0);
++ hypervisor_virt_start = top;
++ __FIXADDR_TOP = hypervisor_virt_start - 2 * PAGE_SIZE;
++}
++
++void __set_fixmap (enum fixed_addresses idx, maddr_t phys, pgprot_t flags)
++{
++ unsigned long address = __fix_to_virt(idx);
++ pte_t pte;
++
++ if (idx >= __end_of_fixed_addresses) {
++ BUG();
++ return;
++ }
++ switch (idx) {
++ case FIX_WP_TEST:
++ case FIX_VDSO:
++ pte = pfn_pte(phys >> PAGE_SHIFT, flags);
++ break;
++ default:
++ pte = pfn_pte_ma(phys >> PAGE_SHIFT, flags);
++ break;
++ }
++ if (HYPERVISOR_update_va_mapping(address, pte,
++ UVMF_INVLPG|UVMF_ALL))
++ BUG();
++ nr_fixmaps++;
++}
++
++pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
++{
++ pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
++ if (pte)
++ make_lowmem_page_readonly(pte, XENFEAT_writable_page_tables);
++ return pte;
++}
++
++struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
++{
++ struct page *pte;
++
++#ifdef CONFIG_HIGHPTE
++ pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
++#else
++ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
++#endif
++ if (pte) {
++ SetPageForeign(pte, pte_free);
++ init_page_count(pte);
++ }
++ return pte;
++}
++
++void pte_free(struct page *pte)
++{
++ unsigned long pfn = page_to_pfn(pte);
++
++ if (!PageHighMem(pte)) {
++ unsigned long va = (unsigned long)__va(pfn << PAGE_SHIFT);
++
++ if (!pte_write(*virt_to_ptep(va)))
++ if (HYPERVISOR_update_va_mapping(
++ va, pfn_pte(pfn, PAGE_KERNEL), 0))
++ BUG();
++ } else
++ clear_bit(PG_pinned, &pte->flags);
++
++ ClearPageForeign(pte);
++ init_page_count(pte);
++
++ __free_page(pte);
++}
++
++void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
++{
++ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++}
++
++/*
++ * List of all pgd's needed for non-PAE so it can invalidate entries
++ * in both cached and uncached pgd's; not needed for PAE since the
++ * kernel pmd is shared. If PAE were not to share the pmd a similar
++ * tactic would be needed. This is essentially codepath-based locking
++ * against pageattr.c; it is the unique case in which a valid change
++ * of kernel pagetables can't be lazily synchronized by vmalloc faults.
++ * vmalloc faults work because attached pagetables are never freed.
++ * The locking scheme was chosen on the basis of manfred's
++ * recommendations and having no core impact whatsoever.
++ * -- wli
++ */
++DEFINE_SPINLOCK(pgd_lock);
++struct page *pgd_list;
++
++static inline void pgd_list_add(pgd_t *pgd)
++{
++ struct page *page = virt_to_page(pgd);
++ page->index = (unsigned long)pgd_list;
++ if (pgd_list)
++ set_page_private(pgd_list, (unsigned long)&page->index);
++ pgd_list = page;
++ set_page_private(page, (unsigned long)&pgd_list);
++}
++
++static inline void pgd_list_del(pgd_t *pgd)
++{
++ struct page *next, **pprev, *page = virt_to_page(pgd);
++ next = (struct page *)page->index;
++ pprev = (struct page **)page_private(page);
++ *pprev = next;
++ if (next)
++ set_page_private(next, (unsigned long)pprev);
++}
++
++void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
++{
++ unsigned long flags;
++
++ if (PTRS_PER_PMD > 1) {
++ if (HAVE_SHARED_KERNEL_PMD)
++ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
++ swapper_pg_dir + USER_PTRS_PER_PGD,
++ KERNEL_PGD_PTRS);
++ } else {
++ spin_lock_irqsave(&pgd_lock, flags);
++ clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
++ swapper_pg_dir + USER_PTRS_PER_PGD,
++ KERNEL_PGD_PTRS);
++ memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
++ pgd_list_add(pgd);
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ }
++}
++
++/* never called when PTRS_PER_PMD > 1 */
++void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
++{
++ unsigned long flags; /* can be called from interrupt context */
++
++ spin_lock_irqsave(&pgd_lock, flags);
++ pgd_list_del(pgd);
++ spin_unlock_irqrestore(&pgd_lock, flags);
++
++ pgd_test_and_unpin(pgd);
++}
++
++pgd_t *pgd_alloc(struct mm_struct *mm)
++{
++ int i;
++ pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
++ pmd_t **pmd;
++ unsigned long flags;
++
++ pgd_test_and_unpin(pgd);
++
++ if (PTRS_PER_PMD == 1 || !pgd)
++ return pgd;
++
++ if (HAVE_SHARED_KERNEL_PMD) {
++ for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
++ pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++ if (!pmd)
++ goto out_oom;
++ set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
++ }
++ return pgd;
++ }
++
++ /*
++ * We can race save/restore (if we sleep during a GFP_KERNEL memory
++ * allocation). We therefore store virtual addresses of pmds as they
++ * do not change across save/restore, and poke the machine addresses
++ * into the pgdir under the pgd_lock.
++ */
++ pmd = kmalloc(PTRS_PER_PGD * sizeof(pmd_t *), GFP_KERNEL);
++ if (!pmd) {
++ kmem_cache_free(pgd_cache, pgd);
++ return NULL;
++ }
++
++ /* Allocate pmds, remember virtual addresses. */
++ for (i = 0; i < PTRS_PER_PGD; ++i) {
++ pmd[i] = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
++ if (!pmd[i])
++ goto out_oom;
++ }
++
++ spin_lock_irqsave(&pgd_lock, flags);
++
++ /* Protect against save/restore: move below 4GB under pgd_lock. */
++ if (!xen_feature(XENFEAT_pae_pgdir_above_4gb)) {
++ int rc = xen_create_contiguous_region(
++ (unsigned long)pgd, 0, 32);
++ if (rc) {
++ spin_unlock_irqrestore(&pgd_lock, flags);
++ goto out_oom;
++ }
++ }
++
++ /* Copy kernel pmd contents and write-protect the new pmds. */
++ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++ unsigned long v = (unsigned long)i << PGDIR_SHIFT;
++ pgd_t *kpgd = pgd_offset_k(v);
++ pud_t *kpud = pud_offset(kpgd, v);
++ pmd_t *kpmd = pmd_offset(kpud, v);
++ memcpy(pmd[i], kpmd, PAGE_SIZE);
++ make_lowmem_page_readonly(
++ pmd[i], XENFEAT_writable_page_tables);
++ }
++
++ /* It is safe to poke machine addresses of pmds under the pmd_lock. */
++ for (i = 0; i < PTRS_PER_PGD; i++)
++ set_pgd(&pgd[i], __pgd(1 + __pa(pmd[i])));
++
++ /* Ensure this pgd gets picked up and pinned on save/restore. */
++ pgd_list_add(pgd);
++
++ spin_unlock_irqrestore(&pgd_lock, flags);
++
++ kfree(pmd);
++
++ return pgd;
++
++out_oom:
++ if (HAVE_SHARED_KERNEL_PMD) {
++ for (i--; i >= 0; i--)
++ kmem_cache_free(pmd_cache,
++ (void *)__va(pgd_val(pgd[i])-1));
++ } else {
++ for (i--; i >= 0; i--)
++ kmem_cache_free(pmd_cache, pmd[i]);
++ kfree(pmd);
++ }
++ kmem_cache_free(pgd_cache, pgd);
++ return NULL;
++}
++
++void pgd_free(pgd_t *pgd)
++{
++ int i;
++
++ /*
++ * After this the pgd should not be pinned for the duration of this
++ * function's execution. We should never sleep and thus never race:
++ * 1. User pmds will not become write-protected under our feet due
++ * to a concurrent mm_pin_all().
++ * 2. The machine addresses in PGD entries will not become invalid
++ * due to a concurrent save/restore.
++ */
++ pgd_test_and_unpin(pgd);
++
++ /* in the PAE case user pgd entries are overwritten before usage */
++ if (PTRS_PER_PMD > 1) {
++ for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
++ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++ kmem_cache_free(pmd_cache, pmd);
++ }
++
++ if (!HAVE_SHARED_KERNEL_PMD) {
++ unsigned long flags;
++ spin_lock_irqsave(&pgd_lock, flags);
++ pgd_list_del(pgd);
++ spin_unlock_irqrestore(&pgd_lock, flags);
++
++ for (i = USER_PTRS_PER_PGD; i < PTRS_PER_PGD; i++) {
++ pmd_t *pmd = (void *)__va(pgd_val(pgd[i])-1);
++ make_lowmem_page_writable(
++ pmd, XENFEAT_writable_page_tables);
++ memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
++ kmem_cache_free(pmd_cache, pmd);
++ }
++
++ if (!xen_feature(XENFEAT_pae_pgdir_above_4gb))
++ xen_destroy_contiguous_region(
++ (unsigned long)pgd, 0);
++ }
++ }
++
++ /* in the non-PAE case, free_pgtables() clears user pgd entries */
++ kmem_cache_free(pgd_cache, pgd);
++}
++
++void make_lowmem_page_readonly(void *va, unsigned int feature)
++{
++ pte_t *pte;
++ int rc;
++
++ if (xen_feature(feature))
++ return;
++
++ pte = virt_to_ptep(va);
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)va, pte_wrprotect(*pte), 0);
++ BUG_ON(rc);
++}
++
++void make_lowmem_page_writable(void *va, unsigned int feature)
++{
++ pte_t *pte;
++ int rc;
++
++ if (xen_feature(feature))
++ return;
++
++ pte = virt_to_ptep(va);
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)va, pte_mkwrite(*pte), 0);
++ BUG_ON(rc);
++}
++
++void make_page_readonly(void *va, unsigned int feature)
++{
++ pte_t *pte;
++ int rc;
++
++ if (xen_feature(feature))
++ return;
++
++ pte = virt_to_ptep(va);
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)va, pte_wrprotect(*pte), 0);
++ if (rc) /* fallback? */
++ xen_l1_entry_update(pte, pte_wrprotect(*pte));
++ if ((unsigned long)va >= (unsigned long)high_memory) {
++ unsigned long pfn = pte_pfn(*pte);
++#ifdef CONFIG_HIGHMEM
++ if (pfn >= highstart_pfn)
++ kmap_flush_unused(); /* flush stale writable kmaps */
++ else
++#endif
++ make_lowmem_page_readonly(
++ phys_to_virt(pfn << PAGE_SHIFT), feature);
++ }
++}
++
++void make_page_writable(void *va, unsigned int feature)
++{
++ pte_t *pte;
++ int rc;
++
++ if (xen_feature(feature))
++ return;
++
++ pte = virt_to_ptep(va);
++ rc = HYPERVISOR_update_va_mapping(
++ (unsigned long)va, pte_mkwrite(*pte), 0);
++ if (rc) /* fallback? */
++ xen_l1_entry_update(pte, pte_mkwrite(*pte));
++ if ((unsigned long)va >= (unsigned long)high_memory) {
++ unsigned long pfn = pte_pfn(*pte);
++#ifdef CONFIG_HIGHMEM
++ if (pfn < highstart_pfn)
++#endif
++ make_lowmem_page_writable(
++ phys_to_virt(pfn << PAGE_SHIFT), feature);
++ }
++}
++
++void make_pages_readonly(void *va, unsigned int nr, unsigned int feature)
++{
++ if (xen_feature(feature))
++ return;
++
++ while (nr-- != 0) {
++ make_page_readonly(va, feature);
++ va = (void *)((unsigned long)va + PAGE_SIZE);
++ }
++}
++
++void make_pages_writable(void *va, unsigned int nr, unsigned int feature)
++{
++ if (xen_feature(feature))
++ return;
++
++ while (nr-- != 0) {
++ make_page_writable(va, feature);
++ va = (void *)((unsigned long)va + PAGE_SIZE);
++ }
++}
++
++static void _pin_lock(struct mm_struct *mm, int lock) {
++ if (lock)
++ spin_lock(&mm->page_table_lock);
++#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
++ /* While mm->page_table_lock protects us against insertions and
++ * removals of higher level page table pages, it doesn't protect
++ * against updates of pte-s. Such updates, however, require the
++ * pte pages to be in consistent state (unpinned+writable or
++ * pinned+readonly). The pinning and attribute changes, however
++ * cannot be done atomically, which is why such updates must be
++ * prevented from happening concurrently.
++ * Note that no pte lock can ever elsewhere be acquired nesting
++ * with an already acquired one in the same mm, or with the mm's
++ * page_table_lock already acquired, as that would break in the
++ * non-split case (where all these are actually resolving to the
++ * one page_table_lock). Thus acquiring all of them here is not
++ * going to result in dead locks, and the order of acquires
++ * doesn't matter.
++ */
++ {
++ pgd_t *pgd = mm->pgd;
++ unsigned g;
++
++ for (g = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
++ pud_t *pud;
++ unsigned u;
++
++ if (pgd_none(*pgd))
++ continue;
++ pud = pud_offset(pgd, 0);
++ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++ pmd_t *pmd;
++ unsigned m;
++
++ if (pud_none(*pud))
++ continue;
++ pmd = pmd_offset(pud, 0);
++ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++ spinlock_t *ptl;
++
++ if (pmd_none(*pmd))
++ continue;
++ ptl = pte_lockptr(0, pmd);
++ if (lock)
++ spin_lock(ptl);
++ else
++ spin_unlock(ptl);
++ }
++ }
++ }
++ }
++#endif
++ if (!lock)
++ spin_unlock(&mm->page_table_lock);
++}
++#define pin_lock(mm) _pin_lock(mm, 1)
++#define pin_unlock(mm) _pin_lock(mm, 0)
++
++#define PIN_BATCH 4
++static DEFINE_PER_CPU(multicall_entry_t[PIN_BATCH], pb_mcl);
++
++static inline unsigned int pgd_walk_set_prot(struct page *page, pgprot_t flags,
++ unsigned int cpu, unsigned seq)
++{
++ unsigned long pfn = page_to_pfn(page);
++
++ if (PageHighMem(page)) {
++ if (pgprot_val(flags) & _PAGE_RW)
++ clear_bit(PG_pinned, &page->flags);
++ else
++ set_bit(PG_pinned, &page->flags);
++ } else {
++ MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
++ (unsigned long)__va(pfn << PAGE_SHIFT),
++ pfn_pte(pfn, flags), 0);
++ if (unlikely(++seq == PIN_BATCH)) {
++ if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
++ PIN_BATCH, NULL)))
++ BUG();
++ seq = 0;
++ }
++ }
++
++ return seq;
++}
++
++static void pgd_walk(pgd_t *pgd_base, pgprot_t flags)
++{
++ pgd_t *pgd = pgd_base;
++ pud_t *pud;
++ pmd_t *pmd;
++ int g, u, m;
++ unsigned int cpu, seq;
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return;
++
++ cpu = get_cpu();
++
++ for (g = 0, seq = 0; g < USER_PTRS_PER_PGD; g++, pgd++) {
++ if (pgd_none(*pgd))
++ continue;
++ pud = pud_offset(pgd, 0);
++ if (PTRS_PER_PUD > 1) /* not folded */
++ seq = pgd_walk_set_prot(virt_to_page(pud),flags,cpu,seq);
++ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++ if (pud_none(*pud))
++ continue;
++ pmd = pmd_offset(pud, 0);
++ if (PTRS_PER_PMD > 1) /* not folded */
++ seq = pgd_walk_set_prot(virt_to_page(pmd),flags,cpu,seq);
++ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++ if (pmd_none(*pmd))
++ continue;
++ seq = pgd_walk_set_prot(pmd_page(*pmd),flags,cpu,seq);
++ }
++ }
++ }
++
++ if (likely(seq != 0)) {
++ MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
++ (unsigned long)pgd_base,
++ pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
++ UVMF_TLB_FLUSH);
++ if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
++ seq + 1, NULL)))
++ BUG();
++ } else if(HYPERVISOR_update_va_mapping((unsigned long)pgd_base,
++ pfn_pte(virt_to_phys(pgd_base)>>PAGE_SHIFT, flags),
++ UVMF_TLB_FLUSH))
++ BUG();
++
++ put_cpu();
++}
++
++static void __pgd_pin(pgd_t *pgd)
++{
++ pgd_walk(pgd, PAGE_KERNEL_RO);
++ kmap_flush_unused();
++ xen_pgd_pin(__pa(pgd));
++ set_bit(PG_pinned, &virt_to_page(pgd)->flags);
++}
++
++static void __pgd_unpin(pgd_t *pgd)
++{
++ xen_pgd_unpin(__pa(pgd));
++ pgd_walk(pgd, PAGE_KERNEL);
++ clear_bit(PG_pinned, &virt_to_page(pgd)->flags);
++}
++
++static void pgd_test_and_unpin(pgd_t *pgd)
++{
++ if (test_bit(PG_pinned, &virt_to_page(pgd)->flags))
++ __pgd_unpin(pgd);
++}
++
++void mm_pin(struct mm_struct *mm)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++ pin_lock(mm);
++ __pgd_pin(mm->pgd);
++ pin_unlock(mm);
++}
++
++void mm_unpin(struct mm_struct *mm)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++ pin_lock(mm);
++ __pgd_unpin(mm->pgd);
++ pin_unlock(mm);
++}
++
++void mm_pin_all(void)
++{
++ struct page *page;
++ unsigned long flags;
++
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++
++ /*
++ * Allow uninterrupted access to the pgd_list. Also protects
++ * __pgd_pin() by disabling preemption.
++ * All other CPUs must be at a safe point (e.g., in stop_machine
++ * or offlined entirely).
++ */
++ spin_lock_irqsave(&pgd_lock, flags);
++ for (page = pgd_list; page; page = (struct page *)page->index) {
++ if (!test_bit(PG_pinned, &page->flags))
++ __pgd_pin((pgd_t *)page_address(page));
++ }
++ spin_unlock_irqrestore(&pgd_lock, flags);
++}
++
++void _arch_dup_mmap(struct mm_struct *mm)
++{
++ if (!test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags))
++ mm_pin(mm);
++}
++
++void _arch_exit_mmap(struct mm_struct *mm)
++{
++ struct task_struct *tsk = current;
++
++ task_lock(tsk);
++
++ /*
++ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
++ * *much* faster this way, as no tlb flushes means bigger wrpt batches.
++ */
++ if (tsk->active_mm == mm) {
++ tsk->active_mm = &init_mm;
++ atomic_inc(&init_mm.mm_count);
++
++ switch_mm(mm, &init_mm, tsk);
++
++ atomic_dec(&mm->mm_count);
++ BUG_ON(atomic_read(&mm->mm_count) == 0);
++ }
++
++ task_unlock(tsk);
++
++ if (test_bit(PG_pinned, &virt_to_page(mm->pgd)->flags) &&
++ (atomic_read(&mm->mm_count) == 1) &&
++ !mm->context.has_foreign_mappings)
++ mm_unpin(mm);
++}
+Index: head-2008-11-25/arch/x86/oprofile/xenoprof.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/oprofile/xenoprof.c 2008-01-28 12:24:19.000000000 +0100
+@@ -0,0 +1,179 @@
++/**
++ * @file xenoprof.c
++ *
++ * @remark Copyright 2002 OProfile authors
++ * @remark Read the file COPYING
++ *
++ * @author John Levon <levon@movementarian.org>
++ *
++ * Modified by Aravind Menon and Jose Renato Santos for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ *
++ * x86-specific part
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ */
++
++#include <linux/init.h>
++#include <linux/oprofile.h>
++#include <linux/sched.h>
++#include <asm/pgtable.h>
++
++#include <xen/driver_util.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/xenoprof.h>
++#include <xen/xenoprof.h>
++#include "op_counter.h"
++
++static unsigned int num_events = 0;
++
++void __init xenoprof_arch_init_counter(struct xenoprof_init *init)
++{
++ num_events = init->num_events;
++ /* just in case - make sure we do not overflow event list
++ (i.e. counter_config list) */
++ if (num_events > OP_MAX_COUNTER) {
++ num_events = OP_MAX_COUNTER;
++ init->num_events = num_events;
++ }
++}
++
++void xenoprof_arch_counter(void)
++{
++ int i;
++ struct xenoprof_counter counter;
++
++ for (i=0; i<num_events; i++) {
++ counter.ind = i;
++ counter.count = (uint64_t)counter_config[i].count;
++ counter.enabled = (uint32_t)counter_config[i].enabled;
++ counter.event = (uint32_t)counter_config[i].event;
++ counter.kernel = (uint32_t)counter_config[i].kernel;
++ counter.user = (uint32_t)counter_config[i].user;
++ counter.unit_mask = (uint64_t)counter_config[i].unit_mask;
++ WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_counter,
++ &counter));
++ }
++}
++
++void xenoprof_arch_start(void)
++{
++ /* nothing */
++}
++
++void xenoprof_arch_stop(void)
++{
++ /* nothing */
++}
++
++void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer * sbuf)
++{
++ if (sbuf->buffer) {
++ vunmap(sbuf->buffer);
++ sbuf->buffer = NULL;
++ }
++}
++
++int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer * get_buffer,
++ struct xenoprof_shared_buffer * sbuf)
++{
++ int npages, ret;
++ struct vm_struct *area;
++
++ sbuf->buffer = NULL;
++ if ( (ret = HYPERVISOR_xenoprof_op(XENOPROF_get_buffer, get_buffer)) )
++ return ret;
++
++ npages = (get_buffer->bufsize * get_buffer->nbuf - 1) / PAGE_SIZE + 1;
++
++ area = alloc_vm_area(npages * PAGE_SIZE);
++ if (area == NULL)
++ return -ENOMEM;
++
++ if ( (ret = direct_kernel_remap_pfn_range(
++ (unsigned long)area->addr,
++ get_buffer->buf_gmaddr >> PAGE_SHIFT,
++ npages * PAGE_SIZE, __pgprot(_KERNPG_TABLE),
++ DOMID_SELF)) ) {
++ vunmap(area->addr);
++ return ret;
++ }
++
++ sbuf->buffer = area->addr;
++ return ret;
++}
++
++int xenoprof_arch_set_passive(struct xenoprof_passive * pdomain,
++ struct xenoprof_shared_buffer * sbuf)
++{
++ int ret;
++ int npages;
++ struct vm_struct *area;
++ pgprot_t prot = __pgprot(_KERNPG_TABLE);
++
++ sbuf->buffer = NULL;
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_passive, pdomain);
++ if (ret)
++ goto out;
++
++ npages = (pdomain->bufsize * pdomain->nbuf - 1) / PAGE_SIZE + 1;
++
++ area = alloc_vm_area(npages * PAGE_SIZE);
++ if (area == NULL) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ ret = direct_kernel_remap_pfn_range(
++ (unsigned long)area->addr,
++ pdomain->buf_gmaddr >> PAGE_SHIFT,
++ npages * PAGE_SIZE, prot, DOMID_SELF);
++ if (ret) {
++ vunmap(area->addr);
++ goto out;
++ }
++ sbuf->buffer = area->addr;
++
++out:
++ return ret;
++}
++
++struct op_counter_config counter_config[OP_MAX_COUNTER];
++
++int xenoprof_create_files(struct super_block * sb, struct dentry * root)
++{
++ unsigned int i;
++
++ for (i = 0; i < num_events; ++i) {
++ struct dentry * dir;
++ char buf[2];
++
++ snprintf(buf, 2, "%d", i);
++ dir = oprofilefs_mkdir(sb, root, buf);
++ oprofilefs_create_ulong(sb, dir, "enabled",
++ &counter_config[i].enabled);
++ oprofilefs_create_ulong(sb, dir, "event",
++ &counter_config[i].event);
++ oprofilefs_create_ulong(sb, dir, "count",
++ &counter_config[i].count);
++ oprofilefs_create_ulong(sb, dir, "unit_mask",
++ &counter_config[i].unit_mask);
++ oprofilefs_create_ulong(sb, dir, "kernel",
++ &counter_config[i].kernel);
++ oprofilefs_create_ulong(sb, dir, "user",
++ &counter_config[i].user);
++ }
++
++ return 0;
++}
++
++int __init oprofile_arch_init(struct oprofile_operations * ops)
++{
++ return xenoprofile_init(ops);
++}
++
++void oprofile_arch_exit(void)
++{
++ xenoprofile_exit();
++}
+Index: head-2008-11-25/arch/x86/pci/irq-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/pci/irq-xen.c 2008-03-06 08:54:32.000000000 +0100
+@@ -0,0 +1,1211 @@
++/*
++ * Low-Level PCI Support for PC -- Routing of Interrupts
++ *
++ * (c) 1999--2000 Martin Mares <mj@ucw.cz>
++ */
++
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include <linux/init.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/dmi.h>
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/io_apic.h>
++#include <linux/irq.h>
++#include <linux/acpi.h>
++
++#include "pci.h"
++
++#define PIRQ_SIGNATURE (('$' << 0) + ('P' << 8) + ('I' << 16) + ('R' << 24))
++#define PIRQ_VERSION 0x0100
++
++static int broken_hp_bios_irq9;
++static int acer_tm360_irqrouting;
++
++static struct irq_routing_table *pirq_table;
++
++static int pirq_enable_irq(struct pci_dev *dev);
++
++/*
++ * Never use: 0, 1, 2 (timer, keyboard, and cascade)
++ * Avoid using: 13, 14 and 15 (FP error and IDE).
++ * Penalize: 3, 4, 6, 7, 12 (known ISA uses: serial, floppy, parallel and mouse)
++ */
++unsigned int pcibios_irq_mask = 0xfff8;
++
++static int pirq_penalty[16] = {
++ 1000000, 1000000, 1000000, 1000, 1000, 0, 1000, 1000,
++ 0, 0, 0, 0, 1000, 100000, 100000, 100000
++};
++
++struct irq_router {
++ char *name;
++ u16 vendor, device;
++ int (*get)(struct pci_dev *router, struct pci_dev *dev, int pirq);
++ int (*set)(struct pci_dev *router, struct pci_dev *dev, int pirq, int new);
++};
++
++struct irq_router_handler {
++ u16 vendor;
++ int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
++};
++
++int (*pcibios_enable_irq)(struct pci_dev *dev) = NULL;
++void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
++
++/*
++ * Check passed address for the PCI IRQ Routing Table signature
++ * and perform checksum verification.
++ */
++
++static inline struct irq_routing_table * pirq_check_routing_table(u8 *addr)
++{
++ struct irq_routing_table *rt;
++ int i;
++ u8 sum;
++
++ rt = (struct irq_routing_table *) addr;
++ if (rt->signature != PIRQ_SIGNATURE ||
++ rt->version != PIRQ_VERSION ||
++ rt->size % 16 ||
++ rt->size < sizeof(struct irq_routing_table))
++ return NULL;
++ sum = 0;
++ for (i=0; i < rt->size; i++)
++ sum += addr[i];
++ if (!sum) {
++ DBG(KERN_DEBUG "PCI: Interrupt Routing Table found at 0x%p\n", rt);
++ return rt;
++ }
++ return NULL;
++}
++
++
++
++/*
++ * Search 0xf0000 -- 0xfffff for the PCI IRQ Routing Table.
++ */
++
++static struct irq_routing_table * __init pirq_find_routing_table(void)
++{
++ u8 *addr;
++ struct irq_routing_table *rt;
++
++#ifdef CONFIG_XEN
++ if (!is_initial_xendomain())
++ return NULL;
++#endif
++ if (pirq_table_addr) {
++ rt = pirq_check_routing_table((u8 *) isa_bus_to_virt(pirq_table_addr));
++ if (rt)
++ return rt;
++ printk(KERN_WARNING "PCI: PIRQ table NOT found at pirqaddr\n");
++ }
++ for(addr = (u8 *) isa_bus_to_virt(0xf0000); addr < (u8 *) isa_bus_to_virt(0x100000); addr += 16) {
++ rt = pirq_check_routing_table(addr);
++ if (rt)
++ return rt;
++ }
++ return NULL;
++}
++
++/*
++ * If we have a IRQ routing table, use it to search for peer host
++ * bridges. It's a gross hack, but since there are no other known
++ * ways how to get a list of buses, we have to go this way.
++ */
++
++static void __init pirq_peer_trick(void)
++{
++ struct irq_routing_table *rt = pirq_table;
++ u8 busmap[256];
++ int i;
++ struct irq_info *e;
++
++ memset(busmap, 0, sizeof(busmap));
++ for(i=0; i < (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info); i++) {
++ e = &rt->slots[i];
++#ifdef DEBUG
++ {
++ int j;
++ DBG(KERN_DEBUG "%02x:%02x slot=%02x", e->bus, e->devfn/8, e->slot);
++ for(j=0; j<4; j++)
++ DBG(" %d:%02x/%04x", j, e->irq[j].link, e->irq[j].bitmap);
++ DBG("\n");
++ }
++#endif
++ busmap[e->bus] = 1;
++ }
++ for(i = 1; i < 256; i++) {
++ if (!busmap[i] || pci_find_bus(0, i))
++ continue;
++ if (pci_scan_bus(i, &pci_root_ops, NULL))
++ printk(KERN_INFO "PCI: Discovered primary peer bus %02x [IRQ]\n", i);
++ }
++ pcibios_last_bus = -1;
++}
++
++/*
++ * Code for querying and setting of IRQ routes on various interrupt routers.
++ */
++
++void eisa_set_level_irq(unsigned int irq)
++{
++ unsigned char mask = 1 << (irq & 7);
++ unsigned int port = 0x4d0 + (irq >> 3);
++ unsigned char val;
++ static u16 eisa_irq_mask;
++
++ if (irq >= 16 || (1 << irq) & eisa_irq_mask)
++ return;
++
++ eisa_irq_mask |= (1 << irq);
++ printk(KERN_DEBUG "PCI: setting IRQ %u as level-triggered\n", irq);
++ val = inb(port);
++ if (!(val & mask)) {
++ DBG(KERN_DEBUG " -> edge");
++ outb(val | mask, port);
++ }
++}
++
++/*
++ * Common IRQ routing practice: nybbles in config space,
++ * offset by some magic constant.
++ */
++static unsigned int read_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr)
++{
++ u8 x;
++ unsigned reg = offset + (nr >> 1);
++
++ pci_read_config_byte(router, reg, &x);
++ return (nr & 1) ? (x >> 4) : (x & 0xf);
++}
++
++static void write_config_nybble(struct pci_dev *router, unsigned offset, unsigned nr, unsigned int val)
++{
++ u8 x;
++ unsigned reg = offset + (nr >> 1);
++
++ pci_read_config_byte(router, reg, &x);
++ x = (nr & 1) ? ((x & 0x0f) | (val << 4)) : ((x & 0xf0) | val);
++ pci_write_config_byte(router, reg, x);
++}
++
++/*
++ * ALI pirq entries are damn ugly, and completely undocumented.
++ * This has been figured out from pirq tables, and it's not a pretty
++ * picture.
++ */
++static int pirq_ali_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ static const unsigned char irqmap[16] = { 0, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 };
++
++ return irqmap[read_config_nybble(router, 0x48, pirq-1)];
++}
++
++static int pirq_ali_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ static const unsigned char irqmap[16] = { 0, 8, 0, 2, 4, 5, 7, 6, 0, 1, 3, 9, 11, 0, 13, 15 };
++ unsigned int val = irqmap[irq];
++
++ if (val) {
++ write_config_nybble(router, 0x48, pirq-1, val);
++ return 1;
++ }
++ return 0;
++}
++
++/*
++ * The Intel PIIX4 pirq rules are fairly simple: "pirq" is
++ * just a pointer to the config space.
++ */
++static int pirq_piix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ u8 x;
++
++ pci_read_config_byte(router, pirq, &x);
++ return (x < 16) ? x : 0;
++}
++
++static int pirq_piix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ pci_write_config_byte(router, pirq, irq);
++ return 1;
++}
++
++/*
++ * The VIA pirq rules are nibble-based, like ALI,
++ * but without the ugly irq number munging.
++ * However, PIRQD is in the upper instead of lower 4 bits.
++ */
++static int pirq_via_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ return read_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq);
++}
++
++static int pirq_via_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ write_config_nybble(router, 0x55, pirq == 4 ? 5 : pirq, irq);
++ return 1;
++}
++
++/*
++ * The VIA pirq rules are nibble-based, like ALI,
++ * but without the ugly irq number munging.
++ * However, for 82C586, nibble map is different .
++ */
++static int pirq_via586_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
++ return read_config_nybble(router, 0x55, pirqmap[pirq-1]);
++}
++
++static int pirq_via586_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ static const unsigned int pirqmap[5] = { 3, 2, 5, 1, 1 };
++ write_config_nybble(router, 0x55, pirqmap[pirq-1], irq);
++ return 1;
++}
++
++/*
++ * ITE 8330G pirq rules are nibble-based
++ * FIXME: pirqmap may be { 1, 0, 3, 2 },
++ * 2+3 are both mapped to irq 9 on my system
++ */
++static int pirq_ite_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
++ return read_config_nybble(router,0x43, pirqmap[pirq-1]);
++}
++
++static int pirq_ite_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ static const unsigned char pirqmap[4] = { 1, 0, 2, 3 };
++ write_config_nybble(router, 0x43, pirqmap[pirq-1], irq);
++ return 1;
++}
++
++/*
++ * OPTI: high four bits are nibble pointer..
++ * I wonder what the low bits do?
++ */
++static int pirq_opti_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ return read_config_nybble(router, 0xb8, pirq >> 4);
++}
++
++static int pirq_opti_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ write_config_nybble(router, 0xb8, pirq >> 4, irq);
++ return 1;
++}
++
++/*
++ * Cyrix: nibble offset 0x5C
++ * 0x5C bits 7:4 is INTB bits 3:0 is INTA
++ * 0x5D bits 7:4 is INTD bits 3:0 is INTC
++ */
++static int pirq_cyrix_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ return read_config_nybble(router, 0x5C, (pirq-1)^1);
++}
++
++static int pirq_cyrix_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ write_config_nybble(router, 0x5C, (pirq-1)^1, irq);
++ return 1;
++}
++
++/*
++ * PIRQ routing for SiS 85C503 router used in several SiS chipsets.
++ * We have to deal with the following issues here:
++ * - vendors have different ideas about the meaning of link values
++ * - some onboard devices (integrated in the chipset) have special
++ * links and are thus routed differently (i.e. not via PCI INTA-INTD)
++ * - different revision of the router have a different layout for
++ * the routing registers, particularly for the onchip devices
++ *
++ * For all routing registers the common thing is we have one byte
++ * per routeable link which is defined as:
++ * bit 7 IRQ mapping enabled (0) or disabled (1)
++ * bits [6:4] reserved (sometimes used for onchip devices)
++ * bits [3:0] IRQ to map to
++ * allowed: 3-7, 9-12, 14-15
++ * reserved: 0, 1, 2, 8, 13
++ *
++ * The config-space registers located at 0x41/0x42/0x43/0x44 are
++ * always used to route the normal PCI INT A/B/C/D respectively.
++ * Apparently there are systems implementing PCI routing table using
++ * link values 0x01-0x04 and others using 0x41-0x44 for PCI INTA..D.
++ * We try our best to handle both link mappings.
++ *
++ * Currently (2003-05-21) it appears most SiS chipsets follow the
++ * definition of routing registers from the SiS-5595 southbridge.
++ * According to the SiS 5595 datasheets the revision id's of the
++ * router (ISA-bridge) should be 0x01 or 0xb0.
++ *
++ * Furthermore we've also seen lspci dumps with revision 0x00 and 0xb1.
++ * Looks like these are used in a number of SiS 5xx/6xx/7xx chipsets.
++ * They seem to work with the current routing code. However there is
++ * some concern because of the two USB-OHCI HCs (original SiS 5595
++ * had only one). YMMV.
++ *
++ * Onchip routing for router rev-id 0x01/0xb0 and probably 0x00/0xb1:
++ *
++ * 0x61: IDEIRQ:
++ * bits [6:5] must be written 01
++ * bit 4 channel-select primary (0), secondary (1)
++ *
++ * 0x62: USBIRQ:
++ * bit 6 OHCI function disabled (0), enabled (1)
++ *
++ * 0x6a: ACPI/SCI IRQ: bits 4-6 reserved
++ *
++ * 0x7e: Data Acq. Module IRQ - bits 4-6 reserved
++ *
++ * We support USBIRQ (in addition to INTA-INTD) and keep the
++ * IDE, ACPI and DAQ routing untouched as set by the BIOS.
++ *
++ * Currently the only reported exception is the new SiS 65x chipset
++ * which includes the SiS 69x southbridge. Here we have the 85C503
++ * router revision 0x04 and there are changes in the register layout
++ * mostly related to the different USB HCs with USB 2.0 support.
++ *
++ * Onchip routing for router rev-id 0x04 (try-and-error observation)
++ *
++ * 0x60/0x61/0x62/0x63: 1xEHCI and 3xOHCI (companion) USB-HCs
++ * bit 6-4 are probably unused, not like 5595
++ */
++
++#define PIRQ_SIS_IRQ_MASK 0x0f
++#define PIRQ_SIS_IRQ_DISABLE 0x80
++#define PIRQ_SIS_USB_ENABLE 0x40
++
++static int pirq_sis_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ u8 x;
++ int reg;
++
++ reg = pirq;
++ if (reg >= 0x01 && reg <= 0x04)
++ reg += 0x40;
++ pci_read_config_byte(router, reg, &x);
++ return (x & PIRQ_SIS_IRQ_DISABLE) ? 0 : (x & PIRQ_SIS_IRQ_MASK);
++}
++
++static int pirq_sis_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ u8 x;
++ int reg;
++
++ reg = pirq;
++ if (reg >= 0x01 && reg <= 0x04)
++ reg += 0x40;
++ pci_read_config_byte(router, reg, &x);
++ x &= ~(PIRQ_SIS_IRQ_MASK | PIRQ_SIS_IRQ_DISABLE);
++ x |= irq ? irq: PIRQ_SIS_IRQ_DISABLE;
++ pci_write_config_byte(router, reg, x);
++ return 1;
++}
++
++
++/*
++ * VLSI: nibble offset 0x74 - educated guess due to routing table and
++ * config space of VLSI 82C534 PCI-bridge/router (1004:0102)
++ * Tested on HP OmniBook 800 covering PIRQ 1, 2, 4, 8 for onboard
++ * devices, PIRQ 3 for non-pci(!) soundchip and (untested) PIRQ 6
++ * for the busbridge to the docking station.
++ */
++
++static int pirq_vlsi_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ if (pirq > 8) {
++ printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
++ return 0;
++ }
++ return read_config_nybble(router, 0x74, pirq-1);
++}
++
++static int pirq_vlsi_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ if (pirq > 8) {
++ printk(KERN_INFO "VLSI router pirq escape (%d)\n", pirq);
++ return 0;
++ }
++ write_config_nybble(router, 0x74, pirq-1, irq);
++ return 1;
++}
++
++/*
++ * ServerWorks: PCI interrupts mapped to system IRQ lines through Index
++ * and Redirect I/O registers (0x0c00 and 0x0c01). The Index register
++ * format is (PCIIRQ## | 0x10), e.g.: PCIIRQ10=0x1a. The Redirect
++ * register is a straight binary coding of desired PIC IRQ (low nibble).
++ *
++ * The 'link' value in the PIRQ table is already in the correct format
++ * for the Index register. There are some special index values:
++ * 0x00 for ACPI (SCI), 0x01 for USB, 0x02 for IDE0, 0x04 for IDE1,
++ * and 0x03 for SMBus.
++ */
++static int pirq_serverworks_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ outb_p(pirq, 0xc00);
++ return inb(0xc01) & 0xf;
++}
++
++static int pirq_serverworks_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ outb_p(pirq, 0xc00);
++ outb_p(irq, 0xc01);
++ return 1;
++}
++
++/* Support for AMD756 PCI IRQ Routing
++ * Jhon H. Caicedo <jhcaiced@osso.org.co>
++ * Jun/21/2001 0.2.0 Release, fixed to use "nybble" functions... (jhcaiced)
++ * Jun/19/2001 Alpha Release 0.1.0 (jhcaiced)
++ * The AMD756 pirq rules are nibble-based
++ * offset 0x56 0-3 PIRQA 4-7 PIRQB
++ * offset 0x57 0-3 PIRQC 4-7 PIRQD
++ */
++static int pirq_amd756_get(struct pci_dev *router, struct pci_dev *dev, int pirq)
++{
++ u8 irq;
++ irq = 0;
++ if (pirq <= 4)
++ {
++ irq = read_config_nybble(router, 0x56, pirq - 1);
++ }
++ printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d get irq : %2d\n",
++ dev->vendor, dev->device, pirq, irq);
++ return irq;
++}
++
++static int pirq_amd756_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ printk(KERN_INFO "AMD756: dev %04x:%04x, router pirq : %d SET irq : %2d\n",
++ dev->vendor, dev->device, pirq, irq);
++ if (pirq <= 4)
++ {
++ write_config_nybble(router, 0x56, pirq - 1, irq);
++ }
++ return 1;
++}
++
++#ifdef CONFIG_PCI_BIOS
++
++static int pirq_bios_set(struct pci_dev *router, struct pci_dev *dev, int pirq, int irq)
++{
++ struct pci_dev *bridge;
++ int pin = pci_get_interrupt_pin(dev, &bridge);
++ return pcibios_set_irq_routing(bridge, pin, irq);
++}
++
++#endif
++
++static __init int intel_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ static struct pci_device_id __initdata pirq_440gx[] = {
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) },
++ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) },
++ { },
++ };
++
++ /* 440GX has a proprietary PIRQ router -- don't use it */
++ if (pci_dev_present(pirq_440gx))
++ return 0;
++
++ switch(device)
++ {
++ case PCI_DEVICE_ID_INTEL_82371FB_0:
++ case PCI_DEVICE_ID_INTEL_82371SB_0:
++ case PCI_DEVICE_ID_INTEL_82371AB_0:
++ case PCI_DEVICE_ID_INTEL_82371MX:
++ case PCI_DEVICE_ID_INTEL_82443MX_0:
++ case PCI_DEVICE_ID_INTEL_82801AA_0:
++ case PCI_DEVICE_ID_INTEL_82801AB_0:
++ case PCI_DEVICE_ID_INTEL_82801BA_0:
++ case PCI_DEVICE_ID_INTEL_82801BA_10:
++ case PCI_DEVICE_ID_INTEL_82801CA_0:
++ case PCI_DEVICE_ID_INTEL_82801CA_12:
++ case PCI_DEVICE_ID_INTEL_82801DB_0:
++ case PCI_DEVICE_ID_INTEL_82801E_0:
++ case PCI_DEVICE_ID_INTEL_82801EB_0:
++ case PCI_DEVICE_ID_INTEL_ESB_1:
++ case PCI_DEVICE_ID_INTEL_ICH6_0:
++ case PCI_DEVICE_ID_INTEL_ICH6_1:
++ case PCI_DEVICE_ID_INTEL_ICH7_0:
++ case PCI_DEVICE_ID_INTEL_ICH7_1:
++ case PCI_DEVICE_ID_INTEL_ICH7_30:
++ case PCI_DEVICE_ID_INTEL_ICH7_31:
++ case PCI_DEVICE_ID_INTEL_ESB2_0:
++ case PCI_DEVICE_ID_INTEL_ICH8_0:
++ case PCI_DEVICE_ID_INTEL_ICH8_1:
++ case PCI_DEVICE_ID_INTEL_ICH8_2:
++ case PCI_DEVICE_ID_INTEL_ICH8_3:
++ case PCI_DEVICE_ID_INTEL_ICH8_4:
++ case PCI_DEVICE_ID_INTEL_ICH9_0:
++ case PCI_DEVICE_ID_INTEL_ICH9_1:
++ case PCI_DEVICE_ID_INTEL_ICH9_2:
++ case PCI_DEVICE_ID_INTEL_ICH9_3:
++ case PCI_DEVICE_ID_INTEL_ICH9_4:
++ case PCI_DEVICE_ID_INTEL_ICH9_5:
++ r->name = "PIIX/ICH";
++ r->get = pirq_piix_get;
++ r->set = pirq_piix_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int via_router_probe(struct irq_router *r,
++ struct pci_dev *router, u16 device)
++{
++ /* FIXME: We should move some of the quirk fixup stuff here */
++
++ /*
++ * work arounds for some buggy BIOSes
++ */
++ if (device == PCI_DEVICE_ID_VIA_82C586_0) {
++ switch(router->device) {
++ case PCI_DEVICE_ID_VIA_82C686:
++ /*
++ * Asus k7m bios wrongly reports 82C686A
++ * as 586-compatible
++ */
++ device = PCI_DEVICE_ID_VIA_82C686;
++ break;
++ case PCI_DEVICE_ID_VIA_8235:
++ /**
++ * Asus a7v-x bios wrongly reports 8235
++ * as 586-compatible
++ */
++ device = PCI_DEVICE_ID_VIA_8235;
++ break;
++ }
++ }
++
++ switch(device) {
++ case PCI_DEVICE_ID_VIA_82C586_0:
++ r->name = "VIA";
++ r->get = pirq_via586_get;
++ r->set = pirq_via586_set;
++ return 1;
++ case PCI_DEVICE_ID_VIA_82C596:
++ case PCI_DEVICE_ID_VIA_82C686:
++ case PCI_DEVICE_ID_VIA_8231:
++ case PCI_DEVICE_ID_VIA_8233A:
++ case PCI_DEVICE_ID_VIA_8235:
++ case PCI_DEVICE_ID_VIA_8237:
++ /* FIXME: add new ones for 8233/5 */
++ r->name = "VIA";
++ r->get = pirq_via_get;
++ r->set = pirq_via_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int vlsi_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_VLSI_82C534:
++ r->name = "VLSI 82C534";
++ r->get = pirq_vlsi_get;
++ r->set = pirq_vlsi_set;
++ return 1;
++ }
++ return 0;
++}
++
++
++static __init int serverworks_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_SERVERWORKS_OSB4:
++ case PCI_DEVICE_ID_SERVERWORKS_CSB5:
++ r->name = "ServerWorks";
++ r->get = pirq_serverworks_get;
++ r->set = pirq_serverworks_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int sis_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ if (device != PCI_DEVICE_ID_SI_503)
++ return 0;
++
++ r->name = "SIS";
++ r->get = pirq_sis_get;
++ r->set = pirq_sis_set;
++ return 1;
++}
++
++static __init int cyrix_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_CYRIX_5520:
++ r->name = "NatSemi";
++ r->get = pirq_cyrix_get;
++ r->set = pirq_cyrix_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int opti_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_OPTI_82C700:
++ r->name = "OPTI";
++ r->get = pirq_opti_get;
++ r->set = pirq_opti_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int ite_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_ITE_IT8330G_0:
++ r->name = "ITE";
++ r->get = pirq_ite_get;
++ r->set = pirq_ite_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int ali_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_AL_M1533:
++ case PCI_DEVICE_ID_AL_M1563:
++ printk(KERN_DEBUG "PCI: Using ALI IRQ Router\n");
++ r->name = "ALI";
++ r->get = pirq_ali_get;
++ r->set = pirq_ali_set;
++ return 1;
++ }
++ return 0;
++}
++
++static __init int amd_router_probe(struct irq_router *r, struct pci_dev *router, u16 device)
++{
++ switch(device)
++ {
++ case PCI_DEVICE_ID_AMD_VIPER_740B:
++ r->name = "AMD756";
++ break;
++ case PCI_DEVICE_ID_AMD_VIPER_7413:
++ r->name = "AMD766";
++ break;
++ case PCI_DEVICE_ID_AMD_VIPER_7443:
++ r->name = "AMD768";
++ break;
++ default:
++ return 0;
++ }
++ r->get = pirq_amd756_get;
++ r->set = pirq_amd756_set;
++ return 1;
++}
++
++static __initdata struct irq_router_handler pirq_routers[] = {
++ { PCI_VENDOR_ID_INTEL, intel_router_probe },
++ { PCI_VENDOR_ID_AL, ali_router_probe },
++ { PCI_VENDOR_ID_ITE, ite_router_probe },
++ { PCI_VENDOR_ID_VIA, via_router_probe },
++ { PCI_VENDOR_ID_OPTI, opti_router_probe },
++ { PCI_VENDOR_ID_SI, sis_router_probe },
++ { PCI_VENDOR_ID_CYRIX, cyrix_router_probe },
++ { PCI_VENDOR_ID_VLSI, vlsi_router_probe },
++ { PCI_VENDOR_ID_SERVERWORKS, serverworks_router_probe },
++ { PCI_VENDOR_ID_AMD, amd_router_probe },
++ /* Someone with docs needs to add the ATI Radeon IGP */
++ { 0, NULL }
++};
++static struct irq_router pirq_router;
++static struct pci_dev *pirq_router_dev;
++
++
++/*
++ * FIXME: should we have an option to say "generic for
++ * chipset" ?
++ */
++
++static void __init pirq_find_router(struct irq_router *r)
++{
++ struct irq_routing_table *rt = pirq_table;
++ struct irq_router_handler *h;
++
++#ifdef CONFIG_PCI_BIOS
++ if (!rt->signature) {
++ printk(KERN_INFO "PCI: Using BIOS for IRQ routing\n");
++ r->set = pirq_bios_set;
++ r->name = "BIOS";
++ return;
++ }
++#endif
++
++ /* Default unless a driver reloads it */
++ r->name = "default";
++ r->get = NULL;
++ r->set = NULL;
++
++ DBG(KERN_DEBUG "PCI: Attempting to find IRQ router for %04x:%04x\n",
++ rt->rtr_vendor, rt->rtr_device);
++
++ pirq_router_dev = pci_find_slot(rt->rtr_bus, rt->rtr_devfn);
++ if (!pirq_router_dev) {
++ DBG(KERN_DEBUG "PCI: Interrupt router not found at "
++ "%02x:%02x\n", rt->rtr_bus, rt->rtr_devfn);
++ return;
++ }
++
++ for( h = pirq_routers; h->vendor; h++) {
++ /* First look for a router match */
++ if (rt->rtr_vendor == h->vendor && h->probe(r, pirq_router_dev, rt->rtr_device))
++ break;
++ /* Fall back to a device match */
++ if (pirq_router_dev->vendor == h->vendor && h->probe(r, pirq_router_dev, pirq_router_dev->device))
++ break;
++ }
++ printk(KERN_INFO "PCI: Using IRQ router %s [%04x/%04x] at %s\n",
++ pirq_router.name,
++ pirq_router_dev->vendor,
++ pirq_router_dev->device,
++ pci_name(pirq_router_dev));
++}
++
++static struct irq_info *pirq_get_info(struct pci_dev *dev)
++{
++ struct irq_routing_table *rt = pirq_table;
++ int entries = (rt->size - sizeof(struct irq_routing_table)) / sizeof(struct irq_info);
++ struct irq_info *info;
++
++ for (info = rt->slots; entries--; info++)
++ if (info->bus == dev->bus->number && PCI_SLOT(info->devfn) == PCI_SLOT(dev->devfn))
++ return info;
++ return NULL;
++}
++
++static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
++{
++ u8 pin;
++ struct irq_info *info;
++ int i, pirq, newirq;
++ int irq = 0;
++ u32 mask;
++ struct irq_router *r = &pirq_router;
++ struct pci_dev *dev2 = NULL;
++ char *msg = NULL;
++
++ /* Find IRQ pin */
++ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
++ if (!pin) {
++ DBG(KERN_DEBUG " -> no interrupt pin\n");
++ return 0;
++ }
++ pin = pin - 1;
++
++ /* Find IRQ routing entry */
++
++ if (!pirq_table)
++ return 0;
++
++ DBG(KERN_DEBUG "IRQ for %s[%c]", pci_name(dev), 'A' + pin);
++ info = pirq_get_info(dev);
++ if (!info) {
++ DBG(" -> not found in routing table\n" KERN_DEBUG);
++ return 0;
++ }
++ pirq = info->irq[pin].link;
++ mask = info->irq[pin].bitmap;
++ if (!pirq) {
++ DBG(" -> not routed\n" KERN_DEBUG);
++ return 0;
++ }
++ DBG(" -> PIRQ %02x, mask %04x, excl %04x", pirq, mask, pirq_table->exclusive_irqs);
++ mask &= pcibios_irq_mask;
++
++ /* Work around broken HP Pavilion Notebooks which assign USB to
++ IRQ 9 even though it is actually wired to IRQ 11 */
++
++ if (broken_hp_bios_irq9 && pirq == 0x59 && dev->irq == 9) {
++ dev->irq = 11;
++ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, 11);
++ r->set(pirq_router_dev, dev, pirq, 11);
++ }
++
++ /* same for Acer Travelmate 360, but with CB and irq 11 -> 10 */
++ if (acer_tm360_irqrouting && dev->irq == 11 && dev->vendor == PCI_VENDOR_ID_O2) {
++ pirq = 0x68;
++ mask = 0x400;
++ dev->irq = r->get(pirq_router_dev, dev, pirq);
++ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq);
++ }
++
++ /*
++ * Find the best IRQ to assign: use the one
++ * reported by the device if possible.
++ */
++ newirq = dev->irq;
++ if (newirq && !((1 << newirq) & mask)) {
++ if ( pci_probe & PCI_USE_PIRQ_MASK) newirq = 0;
++ else printk("\n" KERN_WARNING
++ "PCI: IRQ %i for device %s doesn't match PIRQ mask "
++ "- try pci=usepirqmask\n" KERN_DEBUG, newirq,
++ pci_name(dev));
++ }
++ if (!newirq && assign) {
++ for (i = 0; i < 16; i++) {
++ if (!(mask & (1 << i)))
++ continue;
++ if (pirq_penalty[i] < pirq_penalty[newirq] && can_request_irq(i, IRQF_SHARED))
++ newirq = i;
++ }
++ }
++ DBG(" -> newirq=%d", newirq);
++
++ /* Check if it is hardcoded */
++ if ((pirq & 0xf0) == 0xf0) {
++ irq = pirq & 0xf;
++ DBG(" -> hardcoded IRQ %d\n", irq);
++ msg = "Hardcoded";
++ } else if ( r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \
++ ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask)) ) {
++ DBG(" -> got IRQ %d\n", irq);
++ msg = "Found";
++ eisa_set_level_irq(irq);
++ } else if (newirq && r->set && (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
++ DBG(" -> assigning IRQ %d", newirq);
++ if (r->set(pirq_router_dev, dev, pirq, newirq)) {
++ eisa_set_level_irq(newirq);
++ DBG(" ... OK\n");
++ msg = "Assigned";
++ irq = newirq;
++ }
++ }
++
++ if (!irq) {
++ DBG(" ... failed\n");
++ if (newirq && mask == (1 << newirq)) {
++ msg = "Guessed";
++ irq = newirq;
++ } else
++ return 0;
++ }
++ printk(KERN_INFO "PCI: %s IRQ %d for device %s\n", msg, irq, pci_name(dev));
++
++ /* Update IRQ for all devices with the same pirq value */
++ while ((dev2 = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev2)) != NULL) {
++ pci_read_config_byte(dev2, PCI_INTERRUPT_PIN, &pin);
++ if (!pin)
++ continue;
++ pin--;
++ info = pirq_get_info(dev2);
++ if (!info)
++ continue;
++ if (info->irq[pin].link == pirq) {
++ /* We refuse to override the dev->irq information. Give a warning! */
++ if ( dev2->irq && dev2->irq != irq && \
++ (!(pci_probe & PCI_USE_PIRQ_MASK) || \
++ ((1 << dev2->irq) & mask)) ) {
++#ifndef CONFIG_PCI_MSI
++ printk(KERN_INFO "IRQ routing conflict for %s, have irq %d, want irq %d\n",
++ pci_name(dev2), dev2->irq, irq);
++#endif
++ continue;
++ }
++ dev2->irq = irq;
++ pirq_penalty[irq]++;
++ if (dev != dev2)
++ printk(KERN_INFO "PCI: Sharing IRQ %d with %s\n", irq, pci_name(dev2));
++ }
++ }
++ return 1;
++}
++
++static void __init pcibios_fixup_irqs(void)
++{
++ struct pci_dev *dev = NULL;
++ u8 pin;
++
++ DBG(KERN_DEBUG "PCI: IRQ fixup\n");
++ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
++ /*
++ * If the BIOS has set an out of range IRQ number, just ignore it.
++ * Also keep track of which IRQ's are already in use.
++ */
++ if (dev->irq >= 16) {
++ DBG(KERN_DEBUG "%s: ignoring bogus IRQ %d\n", pci_name(dev), dev->irq);
++ dev->irq = 0;
++ }
++ /* If the IRQ is already assigned to a PCI device, ignore its ISA use penalty */
++ if (pirq_penalty[dev->irq] >= 100 && pirq_penalty[dev->irq] < 100000)
++ pirq_penalty[dev->irq] = 0;
++ pirq_penalty[dev->irq]++;
++ }
++
++ dev = NULL;
++ while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
++ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
++#ifdef CONFIG_X86_IO_APIC
++ /*
++ * Recalculate IRQ numbers if we use the I/O APIC.
++ */
++ if (io_apic_assign_pci_irqs)
++ {
++ int irq;
++
++ if (pin) {
++ pin--; /* interrupt pins are numbered starting from 1 */
++ irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
++ /*
++ * Busses behind bridges are typically not listed in the MP-table.
++ * In this case we have to look up the IRQ based on the parent bus,
++ * parent slot, and pin number. The SMP code detects such bridged
++ * busses itself so we should get into this branch reliably.
++ */
++ if (irq < 0 && dev->bus->parent) { /* go back to the bridge */
++ struct pci_dev * bridge = dev->bus->self;
++
++ pin = (pin + PCI_SLOT(dev->devfn)) % 4;
++ irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
++ PCI_SLOT(bridge->devfn), pin);
++ if (irq >= 0)
++ printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
++ pci_name(bridge), 'A' + pin, irq);
++ }
++ if (irq >= 0) {
++ if (use_pci_vector() &&
++ !platform_legacy_irq(irq))
++ irq = IO_APIC_VECTOR(irq);
++
++ printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
++ pci_name(dev), 'A' + pin, irq);
++ dev->irq = irq;
++ }
++ }
++ }
++#endif
++ /*
++ * Still no IRQ? Try to lookup one...
++ */
++ if (pin && !dev->irq)
++ pcibios_lookup_irq(dev, 0);
++ }
++}
++
++/*
++ * Work around broken HP Pavilion Notebooks which assign USB to
++ * IRQ 9 even though it is actually wired to IRQ 11
++ */
++static int __init fix_broken_hp_bios_irq9(struct dmi_system_id *d)
++{
++ if (!broken_hp_bios_irq9) {
++ broken_hp_bios_irq9 = 1;
++ printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
++ }
++ return 0;
++}
++
++/*
++ * Work around broken Acer TravelMate 360 Notebooks which assign
++ * Cardbus to IRQ 11 even though it is actually wired to IRQ 10
++ */
++static int __init fix_acer_tm360_irqrouting(struct dmi_system_id *d)
++{
++ if (!acer_tm360_irqrouting) {
++ acer_tm360_irqrouting = 1;
++ printk(KERN_INFO "%s detected - fixing broken IRQ routing\n", d->ident);
++ }
++ return 0;
++}
++
++static struct dmi_system_id __initdata pciirq_dmi_table[] = {
++ {
++ .callback = fix_broken_hp_bios_irq9,
++ .ident = "HP Pavilion N5400 Series Laptop",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
++ DMI_MATCH(DMI_BIOS_VERSION, "GE.M1.03"),
++ DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook Model GE"),
++ DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"),
++ },
++ },
++ {
++ .callback = fix_acer_tm360_irqrouting,
++ .ident = "Acer TravelMate 36x Laptop",
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"),
++ },
++ },
++ { }
++};
++
++static int __init pcibios_irq_init(void)
++{
++ DBG(KERN_DEBUG "PCI: IRQ init\n");
++
++ if (pcibios_enable_irq || raw_pci_ops == NULL)
++ return 0;
++
++ dmi_check_system(pciirq_dmi_table);
++
++ pirq_table = pirq_find_routing_table();
++
++#ifdef CONFIG_PCI_BIOS
++ if (!pirq_table && (pci_probe & PCI_BIOS_IRQ_SCAN))
++ pirq_table = pcibios_get_irq_routing_table();
++#endif
++ if (pirq_table) {
++ pirq_peer_trick();
++ pirq_find_router(&pirq_router);
++ if (pirq_table->exclusive_irqs) {
++ int i;
++ for (i=0; i<16; i++)
++ if (!(pirq_table->exclusive_irqs & (1 << i)))
++ pirq_penalty[i] += 100;
++ }
++ /* If we're using the I/O APIC, avoid using the PCI IRQ routing table */
++ if (io_apic_assign_pci_irqs)
++ pirq_table = NULL;
++ }
++
++ pcibios_enable_irq = pirq_enable_irq;
++
++ pcibios_fixup_irqs();
++ return 0;
++}
++
++subsys_initcall(pcibios_irq_init);
++
++
++static void pirq_penalize_isa_irq(int irq, int active)
++{
++ /*
++ * If any ISAPnP device reports an IRQ in its list of possible
++ * IRQ's, we try to avoid assigning it to PCI devices.
++ */
++ if (irq < 16) {
++ if (active)
++ pirq_penalty[irq] += 1000;
++ else
++ pirq_penalty[irq] += 100;
++ }
++}
++
++void pcibios_penalize_isa_irq(int irq, int active)
++{
++#ifdef CONFIG_ACPI
++ if (!acpi_noirq)
++ acpi_penalize_isa_irq(irq, active);
++ else
++#endif
++ pirq_penalize_isa_irq(irq, active);
++}
++
++static int pirq_enable_irq(struct pci_dev *dev)
++{
++ u8 pin;
++ struct pci_dev *temp_dev;
++
++ pci_read_config_byte(dev, PCI_INTERRUPT_PIN, &pin);
++ if (pin && !pcibios_lookup_irq(dev, 1) && !dev->irq) {
++ char *msg = "";
++
++ pin--; /* interrupt pins are numbered starting from 1 */
++
++ if (io_apic_assign_pci_irqs) {
++ int irq;
++
++ irq = IO_APIC_get_PCI_irq_vector(dev->bus->number, PCI_SLOT(dev->devfn), pin);
++ /*
++ * Busses behind bridges are typically not listed in the MP-table.
++ * In this case we have to look up the IRQ based on the parent bus,
++ * parent slot, and pin number. The SMP code detects such bridged
++ * busses itself so we should get into this branch reliably.
++ */
++ temp_dev = dev;
++ while (irq < 0 && dev->bus->parent) { /* go back to the bridge */
++ struct pci_dev * bridge = dev->bus->self;
++
++ pin = (pin + PCI_SLOT(dev->devfn)) % 4;
++ irq = IO_APIC_get_PCI_irq_vector(bridge->bus->number,
++ PCI_SLOT(bridge->devfn), pin);
++ if (irq >= 0)
++ printk(KERN_WARNING "PCI: using PPB %s[%c] to get irq %d\n",
++ pci_name(bridge), 'A' + pin, irq);
++ dev = bridge;
++ }
++ dev = temp_dev;
++ if (irq >= 0) {
++#ifdef CONFIG_PCI_MSI
++ if (!platform_legacy_irq(irq))
++ irq = IO_APIC_VECTOR(irq);
++#endif
++ printk(KERN_INFO "PCI->APIC IRQ transform: %s[%c] -> IRQ %d\n",
++ pci_name(dev), 'A' + pin, irq);
++ dev->irq = irq;
++ return 0;
++ } else
++ msg = " Probably buggy MP table.";
++ } else if (pci_probe & PCI_BIOS_IRQ_SCAN)
++ msg = "";
++ else
++ msg = " Please try using pci=biosirq.";
++
++ /* With IDE legacy devices the IRQ lookup failure is not a problem.. */
++ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE && !(dev->class & 0x5))
++ return 0;
++
++ printk(KERN_WARNING "PCI: No IRQ known for interrupt pin %c of device %s.%s\n",
++ 'A' + pin, pci_name(dev), msg);
++ }
++ return 0;
++}
++
++int pci_vector_resources(int last, int nr_released)
++{
++ int count = nr_released;
++
++ int next = last;
++ int offset = (last % 8);
++
++ while (next < FIRST_SYSTEM_VECTOR) {
++ next += 8;
++#ifdef CONFIG_X86_64
++ if (next == IA32_SYSCALL_VECTOR)
++ continue;
++#else
++ if (next == SYSCALL_VECTOR)
++ continue;
++#endif
++ count++;
++ if (next >= FIRST_SYSTEM_VECTOR) {
++ if (offset%8) {
++ next = FIRST_DEVICE_VECTOR + offset;
++ offset++;
++ continue;
++ }
++ count--;
++ }
++ }
++
++ return count;
++}
+Index: head-2008-11-25/arch/x86/pci/pcifront.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/pci/pcifront.c 2007-06-12 13:12:49.000000000 +0200
+@@ -0,0 +1,55 @@
++/*
++ * PCI Frontend Stub - puts some "dummy" functions in to the Linux x86 PCI core
++ * to support the Xen PCI Frontend's operation
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <asm/acpi.h>
++#include "pci.h"
++
++static int pcifront_enable_irq(struct pci_dev *dev)
++{
++ u8 irq;
++ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &irq);
++ dev->irq = irq;
++
++ return 0;
++}
++
++extern u8 pci_cache_line_size;
++
++static int __init pcifront_x86_stub_init(void)
++{
++ struct cpuinfo_x86 *c = &boot_cpu_data;
++
++ /* Only install our method if we haven't found real hardware already */
++ if (raw_pci_ops)
++ return 0;
++
++ printk(KERN_INFO "PCI: setting up Xen PCI frontend stub\n");
++
++ /* Copied from arch/i386/pci/common.c */
++ pci_cache_line_size = 32 >> 2;
++ if (c->x86 >= 6 && c->x86_vendor == X86_VENDOR_AMD)
++ pci_cache_line_size = 64 >> 2; /* K7 & K8 */
++ else if (c->x86 > 6 && c->x86_vendor == X86_VENDOR_INTEL)
++ pci_cache_line_size = 128 >> 2; /* P4 */
++
++ /* On x86, we need to disable the normal IRQ routing table and
++ * just ask the backend
++ */
++ pcibios_enable_irq = pcifront_enable_irq;
++ pcibios_disable_irq = NULL;
++
++#ifdef CONFIG_ACPI
++ /* Keep ACPI out of the picture */
++ acpi_noirq = 1;
++#endif
++
++ return 0;
++}
++
++arch_initcall(pcifront_x86_stub_init);
+Index: head-2008-11-25/arch/x86/ia32/ia32entry-xen.S
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/ia32/ia32entry-xen.S 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,666 @@
++/*
++ * Compatibility mode system call entry point for x86-64.
++ *
++ * Copyright 2000-2002 Andi Kleen, SuSE Labs.
++ */
++
++#include <asm/dwarf2.h>
++#include <asm/calling.h>
++#include <asm/asm-offsets.h>
++#include <asm/current.h>
++#include <asm/errno.h>
++#include <asm/ia32_unistd.h>
++#include <asm/thread_info.h>
++#include <asm/segment.h>
++#include <asm/vsyscall32.h>
++#include <asm/irqflags.h>
++#include <linux/linkage.h>
++
++#define IA32_NR_syscalls ((ia32_syscall_end - ia32_sys_call_table)/8)
++
++ .macro IA32_ARG_FIXUP noebp=0
++ movl %edi,%r8d
++ .if \noebp
++ .else
++ movl %ebp,%r9d
++ .endif
++ xchg %ecx,%esi
++ movl %ebx,%edi
++ movl %edx,%edx /* zero extension */
++ .endm
++
++ /* clobbers %eax */
++ .macro CLEAR_RREGS
++ xorl %eax,%eax
++ movq %rax,R11(%rsp)
++ movq %rax,R10(%rsp)
++ movq %rax,R9(%rsp)
++ movq %rax,R8(%rsp)
++ .endm
++
++ .macro LOAD_ARGS32 offset
++ movl \offset(%rsp),%r11d
++ movl \offset+8(%rsp),%r10d
++ movl \offset+16(%rsp),%r9d
++ movl \offset+24(%rsp),%r8d
++ movl \offset+40(%rsp),%ecx
++ movl \offset+48(%rsp),%edx
++ movl \offset+56(%rsp),%esi
++ movl \offset+64(%rsp),%edi
++ movl \offset+72(%rsp),%eax
++ .endm
++
++ .macro CFI_STARTPROC32 simple
++ CFI_STARTPROC \simple
++ CFI_UNDEFINED r8
++ CFI_UNDEFINED r9
++ CFI_UNDEFINED r10
++ CFI_UNDEFINED r11
++ CFI_UNDEFINED r12
++ CFI_UNDEFINED r13
++ CFI_UNDEFINED r14
++ CFI_UNDEFINED r15
++ .endm
++
++/*
++ * 32bit SYSENTER instruction entry.
++ *
++ * Arguments:
++ * %eax System call number.
++ * %ebx Arg1
++ * %ecx Arg2
++ * %edx Arg3
++ * %esi Arg4
++ * %edi Arg5
++ * %ebp user stack
++ * 0(%ebp) Arg6
++ *
++ * Interrupts on.
++ *
++ * This is purely a fast path. For anything complicated we use the int 0x80
++ * path below. Set up a complete hardware stack frame to share code
++ * with the int 0x80 path.
++ */
++ENTRY(ia32_sysenter_target)
++ CFI_STARTPROC32 simple
++ CFI_DEF_CFA rsp,SS+8-RIP+16
++ /*CFI_REL_OFFSET ss,SS-RIP+16*/
++ CFI_REL_OFFSET rsp,RSP-RIP+16
++ /*CFI_REL_OFFSET rflags,EFLAGS-RIP+16*/
++ /*CFI_REL_OFFSET cs,CS-RIP+16*/
++ CFI_REL_OFFSET rip,RIP-RIP+16
++ CFI_REL_OFFSET r11,8
++ CFI_REL_OFFSET rcx,0
++ movq 8(%rsp),%r11
++ CFI_RESTORE r11
++ popq %rcx
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_RESTORE rcx
++ movl %ebp,%ebp /* zero extension */
++ movl %eax,%eax
++ movl $__USER32_DS,40(%rsp)
++ movq %rbp,32(%rsp)
++ movl $__USER32_CS,16(%rsp)
++ movl $VSYSCALL32_SYSEXIT,8(%rsp)
++ movq %rax,(%rsp)
++ cld
++ SAVE_ARGS 0,0,0
++ /* no need to do an access_ok check here because rbp has been
++ 32bit zero extended */
++1: movl (%rbp),%r9d
++ .section __ex_table,"a"
++ .quad 1b,ia32_badarg
++ .previous
++ GET_THREAD_INFO(%r10)
++ orl $TS_COMPAT,threadinfo_status(%r10)
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
++ jnz sysenter_tracesys
++sysenter_do_call:
++ cmpl $(IA32_NR_syscalls-1),%eax
++ ja ia32_badsys
++ IA32_ARG_FIXUP 1
++ call *ia32_sys_call_table(,%rax,8)
++ movq %rax,RAX-ARGOFFSET(%rsp)
++ jmp int_ret_from_sys_call
++
++sysenter_tracesys:
++ SAVE_REST
++ CLEAR_RREGS
++ movq $-ENOSYS,RAX(%rsp) /* really needed? */
++ movq %rsp,%rdi /* &pt_regs -> arg1 */
++ call syscall_trace_enter
++ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
++ RESTORE_REST
++ movl %ebp, %ebp
++ /* no need to do an access_ok check here because rbp has been
++ 32bit zero extended */
++1: movl (%rbp),%r9d
++ .section __ex_table,"a"
++ .quad 1b,ia32_badarg
++ .previous
++ jmp sysenter_do_call
++ CFI_ENDPROC
++ENDPROC(ia32_sysenter_target)
++
++/*
++ * 32bit SYSCALL instruction entry.
++ *
++ * Arguments:
++ * %eax System call number.
++ * %ebx Arg1
++ * %ecx return EIP
++ * %edx Arg3
++ * %esi Arg4
++ * %edi Arg5
++ * %ebp Arg2 [note: not saved in the stack frame, should not be touched]
++ * %esp user stack
++ * 0(%esp) Arg6
++ *
++ * Interrupts on.
++ *
++ * This is purely a fast path. For anything complicated we use the int 0x80
++ * path below. Set up a complete hardware stack frame to share code
++ * with the int 0x80 path.
++ */
++ENTRY(ia32_cstar_target)
++ CFI_STARTPROC32 simple
++ CFI_DEF_CFA rsp,SS+8-RIP+16
++ /*CFI_REL_OFFSET ss,SS-RIP+16*/
++ CFI_REL_OFFSET rsp,RSP-RIP+16
++ /*CFI_REL_OFFSET rflags,EFLAGS-RIP+16*/
++ /*CFI_REL_OFFSET cs,CS-RIP+16*/
++ CFI_REL_OFFSET rip,RIP-RIP+16
++ movl %eax,%eax /* zero extension */
++ movl RSP-RIP+16(%rsp),%r8d
++ SAVE_ARGS -8,1,1
++ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
++ movq %rbp,RCX-ARGOFFSET(%rsp) /* this lies slightly to ptrace */
++ movl %ebp,%ecx
++ movl $__USER32_CS,CS-ARGOFFSET(%rsp)
++ movl $__USER32_DS,SS-ARGOFFSET(%rsp)
++ /* no need to do an access_ok check here because r8 has been
++ 32bit zero extended */
++ /* hardware stack frame is complete now */
++1: movl (%r8),%r9d
++ .section __ex_table,"a"
++ .quad 1b,ia32_badarg
++ .previous
++ GET_THREAD_INFO(%r10)
++ orl $TS_COMPAT,threadinfo_status(%r10)
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
++ jnz cstar_tracesys
++cstar_do_call:
++ cmpl $IA32_NR_syscalls-1,%eax
++ ja ia32_badsys
++ IA32_ARG_FIXUP 1
++ call *ia32_sys_call_table(,%rax,8)
++ movq %rax,RAX-ARGOFFSET(%rsp)
++ jmp int_ret_from_sys_call
++
++cstar_tracesys:
++ SAVE_REST
++ CLEAR_RREGS
++ movq $-ENOSYS,RAX(%rsp) /* really needed? */
++ movq %rsp,%rdi /* &pt_regs -> arg1 */
++ call syscall_trace_enter
++ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
++ RESTORE_REST
++ movl RSP-ARGOFFSET(%rsp), %r8d
++ /* no need to do an access_ok check here because r8 has been
++ 32bit zero extended */
++1: movl (%r8),%r9d
++ .section __ex_table,"a"
++ .quad 1b,ia32_badarg
++ .previous
++ jmp cstar_do_call
++END(ia32_cstar_target)
++
++ia32_badarg:
++ movq $-EFAULT,%rax
++ jmp ia32_sysret
++ CFI_ENDPROC
++
++/*
++ * Emulated IA32 system calls via int 0x80.
++ *
++ * Arguments:
++ * %eax System call number.
++ * %ebx Arg1
++ * %ecx Arg2
++ * %edx Arg3
++ * %esi Arg4
++ * %edi Arg5
++ * %ebp Arg6 [note: not saved in the stack frame, should not be touched]
++ *
++ * Notes:
++ * Uses the same stack frame as the x86-64 version.
++ * All registers except %eax must be saved (but ptrace may violate that)
++ * Arguments are zero extended. For system calls that want sign extension and
++ * take long arguments a wrapper is needed. Most calls can just be called
++ * directly.
++ * Assumes it is only called from user space and entered with interrupts on.
++ */
++
++ENTRY(ia32_syscall)
++ CFI_STARTPROC simple
++ CFI_DEF_CFA rsp,SS+8-RIP+16
++ /*CFI_REL_OFFSET ss,SS-RIP+16*/
++ CFI_REL_OFFSET rsp,RSP-RIP+16
++ /*CFI_REL_OFFSET rflags,EFLAGS-RIP+16*/
++ /*CFI_REL_OFFSET cs,CS-RIP+16*/
++ CFI_REL_OFFSET rip,RIP-RIP+16
++ CFI_REL_OFFSET r11,8
++ CFI_REL_OFFSET rcx,0
++ movq 8(%rsp),%r11
++ CFI_RESTORE r11
++ popq %rcx
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_RESTORE rcx
++ movl %eax,%eax
++ movq %rax,(%rsp)
++ cld
++ /* note the registers are not zero extended to the sf.
++ this could be a problem. */
++ SAVE_ARGS 0,0,1
++ GET_THREAD_INFO(%r10)
++ orl $TS_COMPAT,threadinfo_status(%r10)
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%r10)
++ jnz ia32_tracesys
++ia32_do_syscall:
++ cmpl $(IA32_NR_syscalls-1),%eax
++ ja ia32_badsys
++ IA32_ARG_FIXUP
++ call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
++ia32_sysret:
++ movq %rax,RAX-ARGOFFSET(%rsp)
++ jmp int_ret_from_sys_call
++
++ia32_tracesys:
++ SAVE_REST
++ movq $-ENOSYS,RAX(%rsp) /* really needed? */
++ movq %rsp,%rdi /* &pt_regs -> arg1 */
++ call syscall_trace_enter
++ LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
++ RESTORE_REST
++ jmp ia32_do_syscall
++END(ia32_syscall)
++
++ia32_badsys:
++ movq $0,ORIG_RAX-ARGOFFSET(%rsp)
++ movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
++ jmp int_ret_from_sys_call
++
++quiet_ni_syscall:
++ movq $-ENOSYS,%rax
++ ret
++ CFI_ENDPROC
++
++ .macro PTREGSCALL label, func, arg
++ .globl \label
++\label:
++ leaq \func(%rip),%rax
++ leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
++ jmp ia32_ptregs_common
++ .endm
++
++ CFI_STARTPROC32
++
++ PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn, %rdi
++ PTREGSCALL stub32_sigreturn, sys32_sigreturn, %rdi
++ PTREGSCALL stub32_sigaltstack, sys32_sigaltstack, %rdx
++ PTREGSCALL stub32_sigsuspend, sys32_sigsuspend, %rcx
++ PTREGSCALL stub32_execve, sys32_execve, %rcx
++ PTREGSCALL stub32_fork, sys_fork, %rdi
++ PTREGSCALL stub32_clone, sys32_clone, %rdx
++ PTREGSCALL stub32_vfork, sys_vfork, %rdi
++ PTREGSCALL stub32_iopl, sys_iopl, %rsi
++ PTREGSCALL stub32_rt_sigsuspend, sys_rt_sigsuspend, %rdx
++
++ENTRY(ia32_ptregs_common)
++ popq %r11
++ CFI_ENDPROC
++ CFI_STARTPROC32 simple
++ CFI_DEF_CFA rsp,SS+8-ARGOFFSET
++ CFI_REL_OFFSET rax,RAX-ARGOFFSET
++ CFI_REL_OFFSET rcx,RCX-ARGOFFSET
++ CFI_REL_OFFSET rdx,RDX-ARGOFFSET
++ CFI_REL_OFFSET rsi,RSI-ARGOFFSET
++ CFI_REL_OFFSET rdi,RDI-ARGOFFSET
++ CFI_REL_OFFSET rip,RIP-ARGOFFSET
++/* CFI_REL_OFFSET cs,CS-ARGOFFSET*/
++/* CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
++ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
++/* CFI_REL_OFFSET ss,SS-ARGOFFSET*/
++ SAVE_REST
++ call *%rax
++ RESTORE_REST
++ jmp ia32_sysret /* misbalances the return cache */
++ CFI_ENDPROC
++END(ia32_ptregs_common)
++
++ .section .rodata,"a"
++ .align 8
++ia32_sys_call_table:
++ .quad sys_restart_syscall
++ .quad sys_exit
++ .quad stub32_fork
++ .quad sys_read
++ .quad sys_write
++ .quad compat_sys_open /* 5 */
++ .quad sys_close
++ .quad sys32_waitpid
++ .quad sys_creat
++ .quad sys_link
++ .quad sys_unlink /* 10 */
++ .quad stub32_execve
++ .quad sys_chdir
++ .quad compat_sys_time
++ .quad sys_mknod
++ .quad sys_chmod /* 15 */
++ .quad sys_lchown16
++ .quad quiet_ni_syscall /* old break syscall holder */
++ .quad sys_stat
++ .quad sys32_lseek
++ .quad sys_getpid /* 20 */
++ .quad compat_sys_mount /* mount */
++ .quad sys_oldumount /* old_umount */
++ .quad sys_setuid16
++ .quad sys_getuid16
++ .quad compat_sys_stime /* stime */ /* 25 */
++ .quad sys32_ptrace /* ptrace */
++ .quad sys_alarm
++ .quad sys_fstat /* (old)fstat */
++ .quad sys_pause
++ .quad compat_sys_utime /* 30 */
++ .quad quiet_ni_syscall /* old stty syscall holder */
++ .quad quiet_ni_syscall /* old gtty syscall holder */
++ .quad sys_access
++ .quad sys_nice
++ .quad quiet_ni_syscall /* 35 */ /* old ftime syscall holder */
++ .quad sys_sync
++ .quad sys32_kill
++ .quad sys_rename
++ .quad sys_mkdir
++ .quad sys_rmdir /* 40 */
++ .quad sys_dup
++ .quad sys32_pipe
++ .quad compat_sys_times
++ .quad quiet_ni_syscall /* old prof syscall holder */
++ .quad sys_brk /* 45 */
++ .quad sys_setgid16
++ .quad sys_getgid16
++ .quad sys_signal
++ .quad sys_geteuid16
++ .quad sys_getegid16 /* 50 */
++ .quad sys_acct
++ .quad sys_umount /* new_umount */
++ .quad quiet_ni_syscall /* old lock syscall holder */
++ .quad compat_sys_ioctl
++ .quad compat_sys_fcntl64 /* 55 */
++ .quad quiet_ni_syscall /* old mpx syscall holder */
++ .quad sys_setpgid
++ .quad quiet_ni_syscall /* old ulimit syscall holder */
++ .quad sys32_olduname
++ .quad sys_umask /* 60 */
++ .quad sys_chroot
++ .quad sys32_ustat
++ .quad sys_dup2
++ .quad sys_getppid
++ .quad sys_getpgrp /* 65 */
++ .quad sys_setsid
++ .quad sys32_sigaction
++ .quad sys_sgetmask
++ .quad sys_ssetmask
++ .quad sys_setreuid16 /* 70 */
++ .quad sys_setregid16
++ .quad stub32_sigsuspend
++ .quad compat_sys_sigpending
++ .quad sys_sethostname
++ .quad compat_sys_setrlimit /* 75 */
++ .quad compat_sys_old_getrlimit /* old_getrlimit */
++ .quad compat_sys_getrusage
++ .quad sys32_gettimeofday
++ .quad sys32_settimeofday
++ .quad sys_getgroups16 /* 80 */
++ .quad sys_setgroups16
++ .quad sys32_old_select
++ .quad sys_symlink
++ .quad sys_lstat
++ .quad sys_readlink /* 85 */
++#ifdef CONFIG_IA32_AOUT
++ .quad sys_uselib
++#else
++ .quad quiet_ni_syscall
++#endif
++ .quad sys_swapon
++ .quad sys_reboot
++ .quad compat_sys_old_readdir
++ .quad sys32_mmap /* 90 */
++ .quad sys_munmap
++ .quad sys_truncate
++ .quad sys_ftruncate
++ .quad sys_fchmod
++ .quad sys_fchown16 /* 95 */
++ .quad sys_getpriority
++ .quad sys_setpriority
++ .quad quiet_ni_syscall /* old profil syscall holder */
++ .quad compat_sys_statfs
++ .quad compat_sys_fstatfs /* 100 */
++ .quad sys_ioperm
++ .quad compat_sys_socketcall
++ .quad sys_syslog
++ .quad compat_sys_setitimer
++ .quad compat_sys_getitimer /* 105 */
++ .quad compat_sys_newstat
++ .quad compat_sys_newlstat
++ .quad compat_sys_newfstat
++ .quad sys32_uname
++ .quad stub32_iopl /* 110 */
++ .quad sys_vhangup
++ .quad quiet_ni_syscall /* old "idle" system call */
++ .quad sys32_vm86_warning /* vm86old */
++ .quad compat_sys_wait4
++ .quad sys_swapoff /* 115 */
++ .quad sys32_sysinfo
++ .quad sys32_ipc
++ .quad sys_fsync
++ .quad stub32_sigreturn
++ .quad stub32_clone /* 120 */
++ .quad sys_setdomainname
++ .quad sys_uname
++ .quad sys_modify_ldt
++ .quad compat_sys_adjtimex
++ .quad sys32_mprotect /* 125 */
++ .quad compat_sys_sigprocmask
++ .quad quiet_ni_syscall /* create_module */
++ .quad sys_init_module
++ .quad sys_delete_module
++ .quad quiet_ni_syscall /* 130 get_kernel_syms */
++ .quad sys_quotactl
++ .quad sys_getpgid
++ .quad sys_fchdir
++ .quad quiet_ni_syscall /* bdflush */
++ .quad sys_sysfs /* 135 */
++ .quad sys_personality
++ .quad quiet_ni_syscall /* for afs_syscall */
++ .quad sys_setfsuid16
++ .quad sys_setfsgid16
++ .quad sys_llseek /* 140 */
++ .quad compat_sys_getdents
++ .quad compat_sys_select
++ .quad sys_flock
++ .quad sys_msync
++ .quad compat_sys_readv /* 145 */
++ .quad compat_sys_writev
++ .quad sys_getsid
++ .quad sys_fdatasync
++ .quad sys32_sysctl /* sysctl */
++ .quad sys_mlock /* 150 */
++ .quad sys_munlock
++ .quad sys_mlockall
++ .quad sys_munlockall
++ .quad sys_sched_setparam
++ .quad sys_sched_getparam /* 155 */
++ .quad sys_sched_setscheduler
++ .quad sys_sched_getscheduler
++ .quad sys_sched_yield
++ .quad sys_sched_get_priority_max
++ .quad sys_sched_get_priority_min /* 160 */
++ .quad sys_sched_rr_get_interval
++ .quad compat_sys_nanosleep
++ .quad sys_mremap
++ .quad sys_setresuid16
++ .quad sys_getresuid16 /* 165 */
++ .quad sys32_vm86_warning /* vm86 */
++ .quad quiet_ni_syscall /* query_module */
++ .quad sys_poll
++ .quad compat_sys_nfsservctl
++ .quad sys_setresgid16 /* 170 */
++ .quad sys_getresgid16
++ .quad sys_prctl
++ .quad stub32_rt_sigreturn
++ .quad sys32_rt_sigaction
++ .quad sys32_rt_sigprocmask /* 175 */
++ .quad sys32_rt_sigpending
++ .quad compat_sys_rt_sigtimedwait
++ .quad sys32_rt_sigqueueinfo
++ .quad stub32_rt_sigsuspend
++ .quad sys32_pread /* 180 */
++ .quad sys32_pwrite
++ .quad sys_chown16
++ .quad sys_getcwd
++ .quad sys_capget
++ .quad sys_capset
++ .quad stub32_sigaltstack
++ .quad sys32_sendfile
++ .quad quiet_ni_syscall /* streams1 */
++ .quad quiet_ni_syscall /* streams2 */
++ .quad stub32_vfork /* 190 */
++ .quad compat_sys_getrlimit
++ .quad sys32_mmap2
++ .quad sys32_truncate64
++ .quad sys32_ftruncate64
++ .quad sys32_stat64 /* 195 */
++ .quad sys32_lstat64
++ .quad sys32_fstat64
++ .quad sys_lchown
++ .quad sys_getuid
++ .quad sys_getgid /* 200 */
++ .quad sys_geteuid
++ .quad sys_getegid
++ .quad sys_setreuid
++ .quad sys_setregid
++ .quad sys_getgroups /* 205 */
++ .quad sys_setgroups
++ .quad sys_fchown
++ .quad sys_setresuid
++ .quad sys_getresuid
++ .quad sys_setresgid /* 210 */
++ .quad sys_getresgid
++ .quad sys_chown
++ .quad sys_setuid
++ .quad sys_setgid
++ .quad sys_setfsuid /* 215 */
++ .quad sys_setfsgid
++ .quad sys_pivot_root
++ .quad sys_mincore
++ .quad sys_madvise
++ .quad compat_sys_getdents64 /* 220 getdents64 */
++ .quad compat_sys_fcntl64
++ .quad quiet_ni_syscall /* tux */
++ .quad quiet_ni_syscall /* security */
++ .quad sys_gettid
++ .quad sys_readahead /* 225 */
++ .quad sys_setxattr
++ .quad sys_lsetxattr
++ .quad sys_fsetxattr
++ .quad sys_getxattr
++ .quad sys_lgetxattr /* 230 */
++ .quad sys_fgetxattr
++ .quad sys_listxattr
++ .quad sys_llistxattr
++ .quad sys_flistxattr
++ .quad sys_removexattr /* 235 */
++ .quad sys_lremovexattr
++ .quad sys_fremovexattr
++ .quad sys_tkill
++ .quad sys_sendfile64
++ .quad compat_sys_futex /* 240 */
++ .quad compat_sys_sched_setaffinity
++ .quad compat_sys_sched_getaffinity
++ .quad sys32_set_thread_area
++ .quad sys32_get_thread_area
++ .quad compat_sys_io_setup /* 245 */
++ .quad sys_io_destroy
++ .quad compat_sys_io_getevents
++ .quad compat_sys_io_submit
++ .quad sys_io_cancel
++ .quad sys_fadvise64 /* 250 */
++ .quad quiet_ni_syscall /* free_huge_pages */
++ .quad sys_exit_group
++ .quad sys32_lookup_dcookie
++ .quad sys_epoll_create
++ .quad sys_epoll_ctl /* 255 */
++ .quad sys_epoll_wait
++ .quad sys_remap_file_pages
++ .quad sys_set_tid_address
++ .quad compat_sys_timer_create
++ .quad compat_sys_timer_settime /* 260 */
++ .quad compat_sys_timer_gettime
++ .quad sys_timer_getoverrun
++ .quad sys_timer_delete
++ .quad compat_sys_clock_settime
++ .quad compat_sys_clock_gettime /* 265 */
++ .quad compat_sys_clock_getres
++ .quad compat_sys_clock_nanosleep
++ .quad compat_sys_statfs64
++ .quad compat_sys_fstatfs64
++ .quad sys_tgkill /* 270 */
++ .quad compat_sys_utimes
++ .quad sys32_fadvise64_64
++ .quad quiet_ni_syscall /* sys_vserver */
++ .quad sys_mbind
++ .quad compat_sys_get_mempolicy /* 275 */
++ .quad sys_set_mempolicy
++ .quad compat_sys_mq_open
++ .quad sys_mq_unlink
++ .quad compat_sys_mq_timedsend
++ .quad compat_sys_mq_timedreceive /* 280 */
++ .quad compat_sys_mq_notify
++ .quad compat_sys_mq_getsetattr
++ .quad compat_sys_kexec_load /* reserved for kexec */
++ .quad compat_sys_waitid
++ .quad quiet_ni_syscall /* 285: sys_altroot */
++ .quad sys_add_key
++ .quad sys_request_key
++ .quad sys_keyctl
++ .quad sys_ioprio_set
++ .quad sys_ioprio_get /* 290 */
++ .quad sys_inotify_init
++ .quad sys_inotify_add_watch
++ .quad sys_inotify_rm_watch
++ .quad sys_migrate_pages
++ .quad compat_sys_openat /* 295 */
++ .quad sys_mkdirat
++ .quad sys_mknodat
++ .quad sys_fchownat
++ .quad compat_sys_futimesat
++ .quad sys32_fstatat /* 300 */
++ .quad sys_unlinkat
++ .quad sys_renameat
++ .quad sys_linkat
++ .quad sys_symlinkat
++ .quad sys_readlinkat /* 305 */
++ .quad sys_fchmodat
++ .quad sys_faccessat
++ .quad quiet_ni_syscall /* pselect6 for now */
++ .quad quiet_ni_syscall /* ppoll for now */
++ .quad sys_unshare /* 310 */
++ .quad compat_sys_set_robust_list
++ .quad compat_sys_get_robust_list
++ .quad sys_splice
++ .quad sys_sync_file_range
++ .quad sys_tee
++ .quad compat_sys_vmsplice
++ .quad compat_sys_move_pages
++ia32_syscall_end:
+Index: head-2008-11-25/arch/x86/kernel/acpi/sleep_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/acpi/sleep_64-xen.c 2008-04-15 09:29:41.000000000 +0200
+@@ -0,0 +1,146 @@
++/*
++ * acpi.c - Architecture-Specific Low-Level ACPI Support
++ *
++ * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
++ * Copyright (C) 2001 Jun Nakajima <jun.nakajima@intel.com>
++ * Copyright (C) 2001 Patrick Mochel <mochel@osdl.org>
++ * Copyright (C) 2002 Andi Kleen, SuSE Labs (x86-64 port)
++ * Copyright (C) 2003 Pavel Machek, SuSE Labs
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/types.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/pci.h>
++#include <linux/bootmem.h>
++#include <linux/acpi.h>
++#include <linux/cpumask.h>
++
++#include <asm/mpspec.h>
++#include <asm/io.h>
++#include <asm/apic.h>
++#include <asm/apicdef.h>
++#include <asm/page.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/io_apic.h>
++#include <asm/proto.h>
++#include <asm/tlbflush.h>
++
++/* --------------------------------------------------------------------------
++ Low-Level Sleep Support
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI_SLEEP
++
++#ifndef CONFIG_ACPI_PV_SLEEP
++/* address in low memory of the wakeup routine. */
++unsigned long acpi_wakeup_address = 0;
++unsigned long acpi_video_flags;
++extern char wakeup_start, wakeup_end;
++
++extern unsigned long FASTCALL(acpi_copy_wakeup_routine(unsigned long));
++
++static pgd_t low_ptr;
++
++static void init_low_mapping(void)
++{
++ pgd_t *slot0 = pgd_offset(current->mm, 0UL);
++ low_ptr = *slot0;
++ set_pgd(slot0, *pgd_offset(current->mm, PAGE_OFFSET));
++ WARN_ON(num_online_cpus() != 1);
++ local_flush_tlb();
++}
++#endif
++
++/**
++ * acpi_save_state_mem - save kernel state
++ *
++ * Create an identity mapped page table and copy the wakeup routine to
++ * low memory.
++ */
++int acpi_save_state_mem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++ init_low_mapping();
++
++ memcpy((void *)acpi_wakeup_address, &wakeup_start,
++ &wakeup_end - &wakeup_start);
++ acpi_copy_wakeup_routine(acpi_wakeup_address);
++#endif
++ return 0;
++}
++
++/*
++ * acpi_restore_state
++ */
++void acpi_restore_state_mem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++ set_pgd(pgd_offset(current->mm, 0UL), low_ptr);
++ local_flush_tlb();
++#endif
++}
++
++/**
++ * acpi_reserve_bootmem - do _very_ early ACPI initialisation
++ *
++ * We allocate a page in low memory for the wakeup
++ * routine for when we come back from a sleep state. The
++ * runtime allocator allows specification of <16M pages, but not
++ * <1M pages.
++ */
++void __init acpi_reserve_bootmem(void)
++{
++#ifndef CONFIG_ACPI_PV_SLEEP
++ acpi_wakeup_address = (unsigned long)alloc_bootmem_low(PAGE_SIZE);
++ if ((&wakeup_end - &wakeup_start) > PAGE_SIZE)
++ printk(KERN_CRIT
++ "ACPI: Wakeup code way too big, will crash on attempt to suspend\n");
++#endif
++}
++
++#ifndef CONFIG_ACPI_PV_SLEEP
++static int __init acpi_sleep_setup(char *str)
++{
++ while ((str != NULL) && (*str != '\0')) {
++ if (strncmp(str, "s3_bios", 7) == 0)
++ acpi_video_flags = 1;
++ if (strncmp(str, "s3_mode", 7) == 0)
++ acpi_video_flags |= 2;
++ str = strchr(str, ',');
++ if (str != NULL)
++ str += strspn(str, ", \t");
++ }
++
++ return 1;
++}
++
++__setup("acpi_sleep=", acpi_sleep_setup);
++#endif /* CONFIG_ACPI_PV_SLEEP */
++
++#endif /*CONFIG_ACPI_SLEEP */
++
++void acpi_pci_link_exit(void)
++{
++}
+Index: head-2008-11-25/arch/x86/kernel/apic_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/apic_64-xen.c 2007-06-12 13:13:01.000000000 +0200
+@@ -0,0 +1,197 @@
++/*
++ * Local APIC handling, local APIC timers
++ *
++ * (c) 1999, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
++ * thanks to Eric Gilmore
++ * and Rolf G. Tews
++ * for testing these extensively.
++ * Maciej W. Rozycki : Various updates and fixes.
++ * Mikael Pettersson : Power Management for UP-APIC.
++ * Pavel Machek and
++ * Mikael Pettersson : PM converted to driver model.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/mc146818rtc.h>
++#include <linux/kernel_stat.h>
++#include <linux/sysdev.h>
++#include <linux/module.h>
++
++#include <asm/atomic.h>
++#include <asm/smp.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/hpet.h>
++#include <asm/idle.h>
++
++int apic_verbosity;
++
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++ printk("unexpected IRQ trap at vector %02x\n", irq);
++ /*
++ * Currently unexpected vectors happen only on SMP and APIC.
++ * We _must_ ack these because every local APIC has only N
++ * irq slots per priority level, and a 'hanging, unacked' IRQ
++ * holds up an irq slot - in excessive cases (when multiple
++ * unexpected vectors occur) that might lock up the APIC
++ * completely.
++ * But don't ack when the APIC is disabled. -AK
++ */
++ if (!disable_apic)
++ ack_APIC_irq();
++}
++
++int setup_profiling_timer(unsigned int multiplier)
++{
++ return -EINVAL;
++}
++
++void smp_local_timer_interrupt(struct pt_regs *regs)
++{
++ profile_tick(CPU_PROFILING, regs);
++#ifndef CONFIG_XEN
++#ifdef CONFIG_SMP
++ update_process_times(user_mode(regs));
++#endif
++#endif
++ /*
++ * We take the 'long' return path, and there every subsystem
++ * grabs the appropriate locks (kernel lock/ irq lock).
++ *
++ * we might want to decouple profiling from the 'long path',
++ * and do the profiling totally in assembly.
++ *
++ * Currently this isn't too much of an issue (performance wise),
++ * we can take more than 100K local irqs per second on a 100 MHz P5.
++ */
++}
++
++/*
++ * Local APIC timer interrupt. This is the most natural way for doing
++ * local interrupts, but local timer interrupts can be emulated by
++ * broadcast interrupts too. [in case the hw doesn't support APIC timers]
++ *
++ * [ if a single-CPU system runs an SMP kernel then we call the local
++ * interrupt as well. Thus we cannot inline the local irq ... ]
++ */
++void smp_apic_timer_interrupt(struct pt_regs *regs)
++{
++ /*
++ * the NMI deadlock-detector uses this.
++ */
++ add_pda(apic_timer_irqs, 1);
++
++ /*
++ * NOTE! We'd better ACK the irq immediately,
++ * because timer handling can be slow.
++ */
++ ack_APIC_irq();
++ /*
++ * update_process_times() expects us to have done irq_enter().
++ * Besides, if we don't timer interrupts ignore the global
++ * interrupt lock, which is the WrongThing (tm) to do.
++ */
++ exit_idle();
++ irq_enter();
++ smp_local_timer_interrupt(regs);
++ irq_exit();
++}
++
++/*
++ * This interrupt should _never_ happen with our APIC/SMP architecture
++ */
++asmlinkage void smp_spurious_interrupt(void)
++{
++ unsigned int v;
++ exit_idle();
++ irq_enter();
++ /*
++ * Check if this really is a spurious interrupt and ACK it
++ * if it is a vectored one. Just in case...
++ * Spurious interrupts should not be ACKed.
++ */
++ v = apic_read(APIC_ISR + ((SPURIOUS_APIC_VECTOR & ~0x1f) >> 1));
++ if (v & (1 << (SPURIOUS_APIC_VECTOR & 0x1f)))
++ ack_APIC_irq();
++
++#if 0
++ static unsigned long last_warning;
++ static unsigned long skipped;
++
++ /* see sw-dev-man vol 3, chapter 7.4.13.5 */
++ if (time_before(last_warning+30*HZ,jiffies)) {
++ printk(KERN_INFO "spurious APIC interrupt on CPU#%d, %ld skipped.\n",
++ smp_processor_id(), skipped);
++ last_warning = jiffies;
++ skipped = 0;
++ } else {
++ skipped++;
++ }
++#endif
++ irq_exit();
++}
++
++/*
++ * This interrupt should never happen with our APIC/SMP architecture
++ */
++
++asmlinkage void smp_error_interrupt(void)
++{
++ unsigned int v, v1;
++
++ exit_idle();
++ irq_enter();
++ /* First tickle the hardware, only then report what went on. -- REW */
++ v = apic_read(APIC_ESR);
++ apic_write(APIC_ESR, 0);
++ v1 = apic_read(APIC_ESR);
++ ack_APIC_irq();
++ atomic_inc(&irq_err_count);
++
++ /* Here is what the APIC error bits mean:
++ 0: Send CS error
++ 1: Receive CS error
++ 2: Send accept error
++ 3: Receive accept error
++ 4: Reserved
++ 5: Send illegal vector
++ 6: Received illegal vector
++ 7: Illegal register address
++ */
++ printk (KERN_DEBUG "APIC error on CPU%d: %02x(%02x)\n",
++ smp_processor_id(), v , v1);
++ irq_exit();
++}
++
++int disable_apic;
++
++/*
++ * This initializes the IO-APIC and APIC hardware if this is
++ * a UP kernel.
++ */
++int __init APIC_init_uniprocessor (void)
++{
++#ifdef CONFIG_X86_IO_APIC
++ if (smp_found_config)
++ if (!skip_ioapic_setup && nr_ioapics)
++ setup_IO_APIC();
++#endif
++
++ return 1;
++}
+Index: head-2008-11-25/arch/x86/kernel/e820_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/e820_64-xen.c 2008-04-22 19:56:27.000000000 +0200
+@@ -0,0 +1,798 @@
++/*
++ * Handle the memory map.
++ * The functions here do the job until bootmem takes over.
++ *
++ * Getting sanitize_e820_map() in sync with i386 version by applying change:
++ * - Provisions for empty E820 memory regions (reported by certain BIOSes).
++ * Alex Achenbach <xela@slit.de>, December 2002.
++ * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
++ *
++ */
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <linux/init.h>
++#include <linux/bootmem.h>
++#include <linux/ioport.h>
++#include <linux/string.h>
++#include <linux/kexec.h>
++#include <linux/module.h>
++
++#include <asm/pgtable.h>
++#include <asm/page.h>
++#include <asm/e820.h>
++#include <asm/proto.h>
++#include <asm/bootsetup.h>
++#include <asm/sections.h>
++#include <xen/interface/memory.h>
++
++/*
++ * PFN of last memory page.
++ */
++unsigned long end_pfn;
++EXPORT_SYMBOL(end_pfn);
++
++/*
++ * end_pfn only includes RAM, while end_pfn_map includes all e820 entries.
++ * The direct mapping extends to end_pfn_map, so that we can directly access
++ * apertures, ACPI and other tables without having to play with fixmaps.
++ */
++unsigned long end_pfn_map;
++
++/*
++ * Last pfn which the user wants to use.
++ */
++unsigned long end_user_pfn = MAXMEM>>PAGE_SHIFT;
++
++extern struct resource code_resource, data_resource;
++
++#ifdef CONFIG_XEN
++extern struct e820map machine_e820;
++#endif
++
++/* Check for some hardcoded bad areas that early boot is not allowed to touch */
++static inline int bad_addr(unsigned long *addrp, unsigned long size)
++{
++ unsigned long addr = *addrp, last = addr + size;
++
++#ifndef CONFIG_XEN
++ /* various gunk below that needed for SMP startup */
++ if (addr < 0x8000) {
++ *addrp = 0x8000;
++ return 1;
++ }
++
++ /* direct mapping tables of the kernel */
++ if (last >= table_start<<PAGE_SHIFT && addr < table_end<<PAGE_SHIFT) {
++ *addrp = table_end << PAGE_SHIFT;
++ return 1;
++ }
++
++ /* initrd */
++#ifdef CONFIG_BLK_DEV_INITRD
++ if (LOADER_TYPE && INITRD_START && last >= INITRD_START &&
++ addr < INITRD_START+INITRD_SIZE) {
++ *addrp = INITRD_START + INITRD_SIZE;
++ return 1;
++ }
++#endif
++ /* kernel code + 640k memory hole (later should not be needed, but
++ be paranoid for now) */
++ if (last >= 640*1024 && addr < 1024*1024) {
++ *addrp = 1024*1024;
++ return 1;
++ }
++ if (last >= __pa_symbol(&_text) && last < __pa_symbol(&_end)) {
++ *addrp = __pa_symbol(&_end);
++ return 1;
++ }
++
++ if (last >= ebda_addr && addr < ebda_addr + ebda_size) {
++ *addrp = ebda_addr + ebda_size;
++ return 1;
++ }
++
++ /* XXX ramdisk image here? */
++#else
++ if (last < (table_end<<PAGE_SHIFT)) {
++ *addrp = table_end << PAGE_SHIFT;
++ return 1;
++ }
++#endif
++ return 0;
++}
++
++/*
++ * This function checks if any part of the range <start,end> is mapped
++ * with type.
++ */
++int e820_any_mapped(unsigned long start, unsigned long end, unsigned type)
++{
++ int i;
++
++#ifndef CONFIG_XEN
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++#else
++ if (!is_initial_xendomain())
++ return 0;
++ for (i = 0; i < machine_e820.nr_map; i++) {
++ const struct e820entry *ei = &machine_e820.map[i];
++#endif
++
++ if (type && ei->type != type)
++ continue;
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++ return 1;
++ }
++ return 0;
++}
++EXPORT_SYMBOL_GPL(e820_any_mapped);
++
++/*
++ * This function checks if the entire range <start,end> is mapped with type.
++ *
++ * Note: this function only works correct if the e820 table is sorted and
++ * not-overlapping, which is the case
++ */
++int __init e820_all_mapped(unsigned long start, unsigned long end, unsigned type)
++{
++ int i;
++
++#ifndef CONFIG_XEN
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++#else
++ if (!is_initial_xendomain())
++ return 0;
++ for (i = 0; i < machine_e820.nr_map; i++) {
++ const struct e820entry *ei = &machine_e820.map[i];
++#endif
++
++ if (type && ei->type != type)
++ continue;
++ /* is the region (part) in overlap with the current region ?*/
++ if (ei->addr >= end || ei->addr + ei->size <= start)
++ continue;
++
++ /* if the region is at the beginning of <start,end> we move
++ * start to the end of the region since it's ok until there
++ */
++ if (ei->addr <= start)
++ start = ei->addr + ei->size;
++ /* if start is now at or beyond end, we're done, full coverage */
++ if (start >= end)
++ return 1; /* we're done */
++ }
++ return 0;
++}
++
++/*
++ * Find a free area in a specific range.
++ */
++unsigned long __init find_e820_area(unsigned long start, unsigned long end, unsigned size)
++{
++ int i;
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++ unsigned long addr = ei->addr, last;
++ if (ei->type != E820_RAM)
++ continue;
++ if (addr < start)
++ addr = start;
++ if (addr > ei->addr + ei->size)
++ continue;
++ while (bad_addr(&addr, size) && addr+size <= ei->addr+ei->size)
++ ;
++ last = addr + size;
++ if (last > ei->addr + ei->size)
++ continue;
++ if (last > end)
++ continue;
++ return addr;
++ }
++ return -1UL;
++}
++
++/*
++ * Free bootmem based on the e820 table for a node.
++ */
++void __init e820_bootmem_free(pg_data_t *pgdat, unsigned long start,unsigned long end)
++{
++ int i;
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++ unsigned long last, addr;
++
++ if (ei->type != E820_RAM ||
++ ei->addr+ei->size <= start ||
++ ei->addr >= end)
++ continue;
++
++ addr = round_up(ei->addr, PAGE_SIZE);
++ if (addr < start)
++ addr = start;
++
++ last = round_down(ei->addr + ei->size, PAGE_SIZE);
++ if (last >= end)
++ last = end;
++
++ if (last > addr && last-addr >= PAGE_SIZE)
++ free_bootmem_node(pgdat, addr, last-addr);
++ }
++}
++
++/*
++ * Find the highest page frame number we have available
++ */
++unsigned long __init e820_end_of_ram(void)
++{
++ int i;
++ unsigned long end_pfn = 0;
++
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++ unsigned long start, end;
++
++ start = round_up(ei->addr, PAGE_SIZE);
++ end = round_down(ei->addr + ei->size, PAGE_SIZE);
++ if (start >= end)
++ continue;
++ if (ei->type == E820_RAM) {
++ if (end > end_pfn<<PAGE_SHIFT)
++ end_pfn = end>>PAGE_SHIFT;
++ } else {
++ if (end > end_pfn_map<<PAGE_SHIFT)
++ end_pfn_map = end>>PAGE_SHIFT;
++ }
++ }
++
++ if (end_pfn > end_pfn_map)
++ end_pfn_map = end_pfn;
++ if (end_pfn_map > MAXMEM>>PAGE_SHIFT)
++ end_pfn_map = MAXMEM>>PAGE_SHIFT;
++ if (end_pfn > end_user_pfn)
++ end_pfn = end_user_pfn;
++ if (end_pfn > end_pfn_map)
++ end_pfn = end_pfn_map;
++
++ return end_pfn;
++}
++
++/*
++ * Compute how much memory is missing in a range.
++ * Unlike the other functions in this file the arguments are in page numbers.
++ */
++unsigned long __init
++e820_hole_size(unsigned long start_pfn, unsigned long end_pfn)
++{
++ unsigned long ram = 0;
++ unsigned long start = start_pfn << PAGE_SHIFT;
++ unsigned long end = end_pfn << PAGE_SHIFT;
++ int i;
++ for (i = 0; i < e820.nr_map; i++) {
++ struct e820entry *ei = &e820.map[i];
++ unsigned long last, addr;
++
++ if (ei->type != E820_RAM ||
++ ei->addr+ei->size <= start ||
++ ei->addr >= end)
++ continue;
++
++ addr = round_up(ei->addr, PAGE_SIZE);
++ if (addr < start)
++ addr = start;
++
++ last = round_down(ei->addr + ei->size, PAGE_SIZE);
++ if (last >= end)
++ last = end;
++
++ if (last > addr)
++ ram += last - addr;
++ }
++ return ((end - start) - ram) >> PAGE_SHIFT;
++}
++
++/*
++ * Mark e820 reserved areas as busy for the resource manager.
++ */
++void __init e820_reserve_resources(struct e820entry *e820, int nr_map)
++{
++ int i;
++ for (i = 0; i < nr_map; i++) {
++ struct resource *res;
++ res = alloc_bootmem_low(sizeof(struct resource));
++ switch (e820[i].type) {
++ case E820_RAM: res->name = "System RAM"; break;
++ case E820_ACPI: res->name = "ACPI Tables"; break;
++ case E820_NVS: res->name = "ACPI Non-volatile Storage"; break;
++ default: res->name = "reserved";
++ }
++ res->start = e820[i].addr;
++ res->end = res->start + e820[i].size - 1;
++ res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
++ request_resource(&iomem_resource, res);
++ if (e820[i].type == E820_RAM) {
++ /*
++ * We don't know which RAM region contains kernel data,
++ * so we try it repeatedly and let the resource manager
++ * test it.
++ */
++#ifndef CONFIG_XEN
++ request_resource(res, &code_resource);
++ request_resource(res, &data_resource);
++#endif
++#ifdef CONFIG_KEXEC
++ if (crashk_res.start != crashk_res.end)
++ request_resource(res, &crashk_res);
++#ifdef CONFIG_XEN
++ xen_machine_kexec_register_resources(res);
++#endif
++#endif
++ }
++ }
++}
++
++/*
++ * Add a memory region to the kernel e820 map.
++ */
++void __init add_memory_region(unsigned long start, unsigned long size, int type)
++{
++ int x = e820.nr_map;
++
++ if (x == E820MAX) {
++ printk(KERN_ERR "Ooops! Too many entries in the memory map!\n");
++ return;
++ }
++
++ e820.map[x].addr = start;
++ e820.map[x].size = size;
++ e820.map[x].type = type;
++ e820.nr_map++;
++}
++
++void __init e820_print_map(char *who)
++{
++ int i;
++
++ for (i = 0; i < e820.nr_map; i++) {
++ printk(" %s: %016Lx - %016Lx ", who,
++ (unsigned long long) e820.map[i].addr,
++ (unsigned long long) (e820.map[i].addr + e820.map[i].size));
++ switch (e820.map[i].type) {
++ case E820_RAM: printk("(usable)\n");
++ break;
++ case E820_RESERVED:
++ printk("(reserved)\n");
++ break;
++ case E820_ACPI:
++ printk("(ACPI data)\n");
++ break;
++ case E820_NVS:
++ printk("(ACPI NVS)\n");
++ break;
++ default: printk("type %u\n", e820.map[i].type);
++ break;
++ }
++ }
++}
++
++/*
++ * Sanitize the BIOS e820 map.
++ *
++ * Some e820 responses include overlapping entries. The following
++ * replaces the original e820 map with a new one, removing overlaps.
++ *
++ */
++static int __init sanitize_e820_map(struct e820entry * biosmap, char * pnr_map)
++{
++ struct change_member {
++ struct e820entry *pbios; /* pointer to original bios entry */
++ unsigned long long addr; /* address for this change point */
++ };
++ static struct change_member change_point_list[2*E820MAX] __initdata;
++ static struct change_member *change_point[2*E820MAX] __initdata;
++ static struct e820entry *overlap_list[E820MAX] __initdata;
++ static struct e820entry new_bios[E820MAX] __initdata;
++ struct change_member *change_tmp;
++ unsigned long current_type, last_type;
++ unsigned long long last_addr;
++ int chgidx, still_changing;
++ int overlap_entries;
++ int new_bios_entry;
++ int old_nr, new_nr, chg_nr;
++ int i;
++
++ /*
++ Visually we're performing the following (1,2,3,4 = memory types)...
++
++ Sample memory map (w/overlaps):
++ ____22__________________
++ ______________________4_
++ ____1111________________
++ _44_____________________
++ 11111111________________
++ ____________________33__
++ ___________44___________
++ __________33333_________
++ ______________22________
++ ___________________2222_
++ _________111111111______
++ _____________________11_
++ _________________4______
++
++ Sanitized equivalent (no overlap):
++ 1_______________________
++ _44_____________________
++ ___1____________________
++ ____22__________________
++ ______11________________
++ _________1______________
++ __________3_____________
++ ___________44___________
++ _____________33_________
++ _______________2________
++ ________________1_______
++ _________________4______
++ ___________________2____
++ ____________________33__
++ ______________________4_
++ */
++
++ /* if there's only one memory region, don't bother */
++ if (*pnr_map < 2)
++ return -1;
++
++ old_nr = *pnr_map;
++
++ /* bail out if we find any unreasonable addresses in bios map */
++ for (i=0; i<old_nr; i++)
++ if (biosmap[i].addr + biosmap[i].size < biosmap[i].addr)
++ return -1;
++
++ /* create pointers for initial change-point information (for sorting) */
++ for (i=0; i < 2*old_nr; i++)
++ change_point[i] = &change_point_list[i];
++
++ /* record all known change-points (starting and ending addresses),
++ omitting those that are for empty memory regions */
++ chgidx = 0;
++ for (i=0; i < old_nr; i++) {
++ if (biosmap[i].size != 0) {
++ change_point[chgidx]->addr = biosmap[i].addr;
++ change_point[chgidx++]->pbios = &biosmap[i];
++ change_point[chgidx]->addr = biosmap[i].addr + biosmap[i].size;
++ change_point[chgidx++]->pbios = &biosmap[i];
++ }
++ }
++ chg_nr = chgidx;
++
++ /* sort change-point list by memory addresses (low -> high) */
++ still_changing = 1;
++ while (still_changing) {
++ still_changing = 0;
++ for (i=1; i < chg_nr; i++) {
++ /* if <current_addr> > <last_addr>, swap */
++ /* or, if current=<start_addr> & last=<end_addr>, swap */
++ if ((change_point[i]->addr < change_point[i-1]->addr) ||
++ ((change_point[i]->addr == change_point[i-1]->addr) &&
++ (change_point[i]->addr == change_point[i]->pbios->addr) &&
++ (change_point[i-1]->addr != change_point[i-1]->pbios->addr))
++ )
++ {
++ change_tmp = change_point[i];
++ change_point[i] = change_point[i-1];
++ change_point[i-1] = change_tmp;
++ still_changing=1;
++ }
++ }
++ }
++
++ /* create a new bios memory map, removing overlaps */
++ overlap_entries=0; /* number of entries in the overlap table */
++ new_bios_entry=0; /* index for creating new bios map entries */
++ last_type = 0; /* start with undefined memory type */
++ last_addr = 0; /* start with 0 as last starting address */
++ /* loop through change-points, determining affect on the new bios map */
++ for (chgidx=0; chgidx < chg_nr; chgidx++)
++ {
++ /* keep track of all overlapping bios entries */
++ if (change_point[chgidx]->addr == change_point[chgidx]->pbios->addr)
++ {
++ /* add map entry to overlap list (> 1 entry implies an overlap) */
++ overlap_list[overlap_entries++]=change_point[chgidx]->pbios;
++ }
++ else
++ {
++ /* remove entry from list (order independent, so swap with last) */
++ for (i=0; i<overlap_entries; i++)
++ {
++ if (overlap_list[i] == change_point[chgidx]->pbios)
++ overlap_list[i] = overlap_list[overlap_entries-1];
++ }
++ overlap_entries--;
++ }
++ /* if there are overlapping entries, decide which "type" to use */
++ /* (larger value takes precedence -- 1=usable, 2,3,4,4+=unusable) */
++ current_type = 0;
++ for (i=0; i<overlap_entries; i++)
++ if (overlap_list[i]->type > current_type)
++ current_type = overlap_list[i]->type;
++ /* continue building up new bios map based on this information */
++ if (current_type != last_type) {
++ if (last_type != 0) {
++ new_bios[new_bios_entry].size =
++ change_point[chgidx]->addr - last_addr;
++ /* move forward only if the new size was non-zero */
++ if (new_bios[new_bios_entry].size != 0)
++ if (++new_bios_entry >= E820MAX)
++ break; /* no more space left for new bios entries */
++ }
++ if (current_type != 0) {
++ new_bios[new_bios_entry].addr = change_point[chgidx]->addr;
++ new_bios[new_bios_entry].type = current_type;
++ last_addr=change_point[chgidx]->addr;
++ }
++ last_type = current_type;
++ }
++ }
++ new_nr = new_bios_entry; /* retain count for new bios entries */
++
++ /* copy new bios mapping into original location */
++ memcpy(biosmap, new_bios, new_nr*sizeof(struct e820entry));
++ *pnr_map = new_nr;
++
++ return 0;
++}
++
++/*
++ * Copy the BIOS e820 map into a safe place.
++ *
++ * Sanity-check it while we're at it..
++ *
++ * If we're lucky and live on a modern system, the setup code
++ * will have given us a memory map that we can use to properly
++ * set up memory. If we aren't, we'll fake a memory map.
++ *
++ * We check to see that the memory map contains at least 2 elements
++ * before we'll use it, because the detection code in setup.S may
++ * not be perfect and most every PC known to man has two memory
++ * regions: one from 0 to 640k, and one from 1mb up. (The IBM
++ * thinkpad 560x, for example, does not cooperate with the memory
++ * detection code.)
++ */
++static int __init copy_e820_map(struct e820entry * biosmap, int nr_map)
++{
++#ifndef CONFIG_XEN
++ /* Only one memory region (or negative)? Ignore it */
++ if (nr_map < 2)
++ return -1;
++#else
++ BUG_ON(nr_map < 1);
++#endif
++
++ do {
++ unsigned long start = biosmap->addr;
++ unsigned long size = biosmap->size;
++ unsigned long end = start + size;
++ unsigned long type = biosmap->type;
++
++ /* Overflow in 64 bits? Ignore the memory map. */
++ if (start > end)
++ return -1;
++
++#ifndef CONFIG_XEN
++ /*
++ * Some BIOSes claim RAM in the 640k - 1M region.
++ * Not right. Fix it up.
++ *
++ * This should be removed on Hammer which is supposed to not
++ * have non e820 covered ISA mappings there, but I had some strange
++ * problems so it stays for now. -AK
++ */
++ if (type == E820_RAM) {
++ if (start < 0x100000ULL && end > 0xA0000ULL) {
++ if (start < 0xA0000ULL)
++ add_memory_region(start, 0xA0000ULL-start, type);
++ if (end <= 0x100000ULL)
++ continue;
++ start = 0x100000ULL;
++ size = end - start;
++ }
++ }
++#endif
++
++ add_memory_region(start, size, type);
++ } while (biosmap++,--nr_map);
++
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain()) {
++ struct xen_memory_map memmap;
++
++ memmap.nr_entries = E820MAX;
++ set_xen_guest_handle(memmap.buffer, machine_e820.map);
++
++ if (HYPERVISOR_memory_op(XENMEM_machine_memory_map, &memmap))
++ BUG();
++ machine_e820.nr_map = memmap.nr_entries;
++ } else
++ machine_e820 = e820;
++#endif
++
++ return 0;
++}
++
++#ifndef CONFIG_XEN
++void __init setup_memory_region(void)
++{
++ char *who = "BIOS-e820";
++
++ /*
++ * Try to copy the BIOS-supplied E820-map.
++ *
++ * Otherwise fake a memory map; one section from 0k->640k,
++ * the next section from 1mb->appropriate_mem_k
++ */
++ sanitize_e820_map(E820_MAP, &E820_MAP_NR);
++ if (copy_e820_map(E820_MAP, E820_MAP_NR) < 0) {
++ unsigned long mem_size;
++
++ /* compare results from other methods and take the greater */
++ if (ALT_MEM_K < EXT_MEM_K) {
++ mem_size = EXT_MEM_K;
++ who = "BIOS-88";
++ } else {
++ mem_size = ALT_MEM_K;
++ who = "BIOS-e801";
++ }
++
++ e820.nr_map = 0;
++ add_memory_region(0, LOWMEMSIZE(), E820_RAM);
++ add_memory_region(HIGH_MEMORY, mem_size << 10, E820_RAM);
++ }
++ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++ e820_print_map(who);
++}
++
++#else /* CONFIG_XEN */
++
++void __init setup_memory_region(void)
++{
++ int rc;
++ struct xen_memory_map memmap;
++ /*
++ * This is rather large for a stack variable but this early in
++ * the boot process we know we have plenty slack space.
++ */
++ struct e820entry map[E820MAX];
++
++ memmap.nr_entries = E820MAX;
++ set_xen_guest_handle(memmap.buffer, map);
++
++ rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap);
++ if ( rc == -ENOSYS ) {
++ memmap.nr_entries = 1;
++ map[0].addr = 0ULL;
++ map[0].size = xen_start_info->nr_pages << PAGE_SHIFT;
++ /* 8MB slack (to balance backend allocations). */
++ map[0].size += 8 << 20;
++ map[0].type = E820_RAM;
++ rc = 0;
++ }
++ BUG_ON(rc);
++
++ sanitize_e820_map(map, (char *)&memmap.nr_entries);
++
++ BUG_ON(copy_e820_map(map, (char)memmap.nr_entries) < 0);
++
++ printk(KERN_INFO "BIOS-provided physical RAM map:\n");
++ e820_print_map("Xen");
++}
++#endif
++
++void __init parse_memopt(char *p, char **from)
++{
++ int i;
++ unsigned long current_end;
++ unsigned long end;
++
++ end_user_pfn = memparse(p, from);
++ end_user_pfn >>= PAGE_SHIFT;
++
++ end = end_user_pfn<<PAGE_SHIFT;
++ i = e820.nr_map-1;
++ current_end = e820.map[i].addr + e820.map[i].size;
++
++ if (current_end < end) {
++ /*
++ * The e820 map ends before our requested size so
++ * extend the final entry to the requested address.
++ */
++ if (e820.map[i].type == E820_RAM)
++ e820.map[i].size = end - e820.map[i].addr;
++ else
++ add_memory_region(current_end, end - current_end, E820_RAM);
++ }
++}
++
++void __init parse_memmapopt(char *p, char **from)
++{
++ unsigned long long start_at, mem_size;
++
++ mem_size = memparse(p, from);
++ p = *from;
++ if (*p == '@') {
++ start_at = memparse(p+1, from);
++ add_memory_region(start_at, mem_size, E820_RAM);
++ } else if (*p == '#') {
++ start_at = memparse(p+1, from);
++ add_memory_region(start_at, mem_size, E820_ACPI);
++ } else if (*p == '$') {
++ start_at = memparse(p+1, from);
++ add_memory_region(start_at, mem_size, E820_RESERVED);
++ } else {
++ end_user_pfn = (mem_size >> PAGE_SHIFT);
++ }
++ p = *from;
++}
++
++unsigned long pci_mem_start = 0xaeedbabe;
++EXPORT_SYMBOL(pci_mem_start);
++
++/*
++ * Search for the biggest gap in the low 32 bits of the e820
++ * memory space. We pass this space to PCI to assign MMIO resources
++ * for hotplug or unconfigured devices in.
++ * Hopefully the BIOS let enough space left.
++ */
++__init void e820_setup_gap(struct e820entry *e820, int nr_map)
++{
++ unsigned long gapstart, gapsize, round;
++ unsigned long last;
++ int i;
++ int found = 0;
++
++ last = 0x100000000ull;
++ gapstart = 0x10000000;
++ gapsize = 0x400000;
++ i = nr_map;
++ while (--i >= 0) {
++ unsigned long long start = e820[i].addr;
++ unsigned long long end = start + e820[i].size;
++
++ /*
++ * Since "last" is at most 4GB, we know we'll
++ * fit in 32 bits if this condition is true
++ */
++ if (last > end) {
++ unsigned long gap = last - end;
++
++ if (gap > gapsize) {
++ gapsize = gap;
++ gapstart = end;
++ found = 1;
++ }
++ }
++ if (start < last)
++ last = start;
++ }
++
++ if (!found) {
++ gapstart = (end_pfn << PAGE_SHIFT) + 1024*1024;
++ printk(KERN_ERR "PCI: Warning: Cannot find a gap in the 32bit address range\n"
++ KERN_ERR "PCI: Unassigned devices with 32bit resource registers may break!\n");
++ }
++
++ /*
++ * See how much we want to round up: start off with
++ * rounding to the next 1MB area.
++ */
++ round = 0x100000;
++ while ((gapsize >> 4) > round)
++ round += round;
++ /* Fun with two's complement */
++ pci_mem_start = (gapstart + round) & -round;
++
++ printk(KERN_INFO "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n",
++ pci_mem_start, gapstart, gapsize);
++}
+Index: head-2008-11-25/arch/x86/kernel/early_printk-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/early_printk-xen.c 2007-06-12 13:13:01.000000000 +0200
+@@ -0,0 +1,302 @@
++#include <linux/console.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/string.h>
++#include <linux/screen_info.h>
++#include <asm/io.h>
++#include <asm/processor.h>
++#include <asm/fcntl.h>
++
++/* Simple VGA output */
++
++#ifdef __i386__
++#include <asm/setup.h>
++#define VGABASE (__ISA_IO_base + 0xb8000)
++#else
++#include <asm/bootsetup.h>
++#define VGABASE ((void __iomem *)0xffffffff800b8000UL)
++#endif
++
++#ifndef CONFIG_XEN
++static int max_ypos = 25, max_xpos = 80;
++static int current_ypos = 25, current_xpos = 0;
++
++static void early_vga_write(struct console *con, const char *str, unsigned n)
++{
++ char c;
++ int i, k, j;
++
++ while ((c = *str++) != '\0' && n-- > 0) {
++ if (current_ypos >= max_ypos) {
++ /* scroll 1 line up */
++ for (k = 1, j = 0; k < max_ypos; k++, j++) {
++ for (i = 0; i < max_xpos; i++) {
++ writew(readw(VGABASE+2*(max_xpos*k+i)),
++ VGABASE + 2*(max_xpos*j + i));
++ }
++ }
++ for (i = 0; i < max_xpos; i++)
++ writew(0x720, VGABASE + 2*(max_xpos*j + i));
++ current_ypos = max_ypos-1;
++ }
++ if (c == '\n') {
++ current_xpos = 0;
++ current_ypos++;
++ } else if (c != '\r') {
++ writew(((0x7 << 8) | (unsigned short) c),
++ VGABASE + 2*(max_xpos*current_ypos +
++ current_xpos++));
++ if (current_xpos >= max_xpos) {
++ current_xpos = 0;
++ current_ypos++;
++ }
++ }
++ }
++}
++
++static struct console early_vga_console = {
++ .name = "earlyvga",
++ .write = early_vga_write,
++ .flags = CON_PRINTBUFFER,
++ .index = -1,
++};
++
++/* Serial functions loosely based on a similar package from Klaus P. Gerlicher */
++
++static int early_serial_base = 0x3f8; /* ttyS0 */
++
++#define XMTRDY 0x20
++
++#define DLAB 0x80
++
++#define TXR 0 /* Transmit register (WRITE) */
++#define RXR 0 /* Receive register (READ) */
++#define IER 1 /* Interrupt Enable */
++#define IIR 2 /* Interrupt ID */
++#define FCR 2 /* FIFO control */
++#define LCR 3 /* Line control */
++#define MCR 4 /* Modem control */
++#define LSR 5 /* Line Status */
++#define MSR 6 /* Modem Status */
++#define DLL 0 /* Divisor Latch Low */
++#define DLH 1 /* Divisor latch High */
++
++static int early_serial_putc(unsigned char ch)
++{
++ unsigned timeout = 0xffff;
++ while ((inb(early_serial_base + LSR) & XMTRDY) == 0 && --timeout)
++ cpu_relax();
++ outb(ch, early_serial_base + TXR);
++ return timeout ? 0 : -1;
++}
++
++static void early_serial_write(struct console *con, const char *s, unsigned n)
++{
++ while (*s && n-- > 0) {
++ early_serial_putc(*s);
++ if (*s == '\n')
++ early_serial_putc('\r');
++ s++;
++ }
++}
++
++#define DEFAULT_BAUD 9600
++
++static __init void early_serial_init(char *s)
++{
++ unsigned char c;
++ unsigned divisor;
++ unsigned baud = DEFAULT_BAUD;
++ char *e;
++
++ if (*s == ',')
++ ++s;
++
++ if (*s) {
++ unsigned port;
++ if (!strncmp(s,"0x",2)) {
++ early_serial_base = simple_strtoul(s, &e, 16);
++ } else {
++ static int bases[] = { 0x3f8, 0x2f8 };
++
++ if (!strncmp(s,"ttyS",4))
++ s += 4;
++ port = simple_strtoul(s, &e, 10);
++ if (port > 1 || s == e)
++ port = 0;
++ early_serial_base = bases[port];
++ }
++ s += strcspn(s, ",");
++ if (*s == ',')
++ s++;
++ }
++
++ outb(0x3, early_serial_base + LCR); /* 8n1 */
++ outb(0, early_serial_base + IER); /* no interrupt */
++ outb(0, early_serial_base + FCR); /* no fifo */
++ outb(0x3, early_serial_base + MCR); /* DTR + RTS */
++
++ if (*s) {
++ baud = simple_strtoul(s, &e, 0);
++ if (baud == 0 || s == e)
++ baud = DEFAULT_BAUD;
++ }
++
++ divisor = 115200 / baud;
++ c = inb(early_serial_base + LCR);
++ outb(c | DLAB, early_serial_base + LCR);
++ outb(divisor & 0xff, early_serial_base + DLL);
++ outb((divisor >> 8) & 0xff, early_serial_base + DLH);
++ outb(c & ~DLAB, early_serial_base + LCR);
++}
++
++#else /* CONFIG_XEN */
++
++static void
++early_serial_write(struct console *con, const char *s, unsigned count)
++{
++ int n;
++
++ while (count > 0) {
++ n = HYPERVISOR_console_io(CONSOLEIO_write, count, (char *)s);
++ if (n <= 0)
++ break;
++ count -= n;
++ s += n;
++ }
++}
++
++static __init void early_serial_init(char *s)
++{
++}
++
++/*
++ * No early VGA console on Xen, as we do not have convenient ISA-space
++ * mappings. Someone should fix this for domain 0. For now, use fake serial.
++ */
++#define early_vga_console early_serial_console
++
++#endif
++
++static struct console early_serial_console = {
++ .name = "earlyser",
++ .write = early_serial_write,
++ .flags = CON_PRINTBUFFER,
++ .index = -1,
++};
++
++/* Console interface to a host file on AMD's SimNow! */
++
++static int simnow_fd;
++
++enum {
++ MAGIC1 = 0xBACCD00A,
++ MAGIC2 = 0xCA110000,
++ XOPEN = 5,
++ XWRITE = 4,
++};
++
++static noinline long simnow(long cmd, long a, long b, long c)
++{
++ long ret;
++ asm volatile("cpuid" :
++ "=a" (ret) :
++ "b" (a), "c" (b), "d" (c), "0" (MAGIC1), "D" (cmd + MAGIC2));
++ return ret;
++}
++
++void __init simnow_init(char *str)
++{
++ char *fn = "klog";
++ if (*str == '=')
++ fn = ++str;
++ /* error ignored */
++ simnow_fd = simnow(XOPEN, (unsigned long)fn, O_WRONLY|O_APPEND|O_CREAT, 0644);
++}
++
++static void simnow_write(struct console *con, const char *s, unsigned n)
++{
++ simnow(XWRITE, simnow_fd, (unsigned long)s, n);
++}
++
++static struct console simnow_console = {
++ .name = "simnow",
++ .write = simnow_write,
++ .flags = CON_PRINTBUFFER,
++ .index = -1,
++};
++
++/* Direct interface for emergencies */
++struct console *early_console = &early_vga_console;
++static int early_console_initialized = 0;
++
++void early_printk(const char *fmt, ...)
++{
++ char buf[512];
++ int n;
++ va_list ap;
++
++ va_start(ap,fmt);
++ n = vscnprintf(buf,512,fmt,ap);
++ early_console->write(early_console,buf,n);
++ va_end(ap);
++}
++
++static int __initdata keep_early;
++
++int __init setup_early_printk(char *opt)
++{
++ char *space;
++ char buf[256];
++
++ if (early_console_initialized)
++ return 1;
++
++ strlcpy(buf,opt,sizeof(buf));
++ space = strchr(buf, ' ');
++ if (space)
++ *space = 0;
++
++ if (strstr(buf,"keep"))
++ keep_early = 1;
++
++ if (!strncmp(buf, "serial", 6)) {
++ early_serial_init(buf + 6);
++ early_console = &early_serial_console;
++ } else if (!strncmp(buf, "ttyS", 4)) {
++ early_serial_init(buf);
++ early_console = &early_serial_console;
++ } else if (!strncmp(buf, "vga", 3)
++#ifndef CONFIG_XEN
++ && SCREEN_INFO.orig_video_isVGA == 1) {
++ max_xpos = SCREEN_INFO.orig_video_cols;
++ max_ypos = SCREEN_INFO.orig_video_lines;
++ current_ypos = SCREEN_INFO.orig_y;
++#else
++ || !strncmp(buf, "xen", 3)) {
++#endif
++ early_console = &early_vga_console;
++ } else if (!strncmp(buf, "simnow", 6)) {
++ simnow_init(buf + 6);
++ early_console = &simnow_console;
++ keep_early = 1;
++ }
++ early_console_initialized = 1;
++ register_console(early_console);
++ return 0;
++}
++
++void __init disable_early_printk(void)
++{
++ if (!early_console_initialized || !early_console)
++ return;
++ if (!keep_early) {
++ printk("disabling early console\n");
++ unregister_console(early_console);
++ early_console_initialized = 0;
++ } else {
++ printk("keeping early console\n");
++ }
++}
++
++__setup("earlyprintk=", setup_early_printk);
+Index: head-2008-11-25/arch/x86/kernel/entry_64-xen.S
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/entry_64-xen.S 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,1322 @@
++/*
++ * linux/arch/x86_64/entry.S
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ * Copyright (C) 2000, 2001, 2002 Andi Kleen SuSE Labs
++ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
++ *
++ * $Id$
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Asit Mallick <asit.k.mallick@intel.com>
++ * Modified for Xen
++ */
++
++/*
++ * entry.S contains the system-call and fault low-level handling routines.
++ *
++ * NOTE: This code handles signal-recognition, which happens every time
++ * after an interrupt and after each system call.
++ *
++ * Normal syscalls and interrupts don't save a full stack frame, this is
++ * only done for syscall tracing, signals or fork/exec et.al.
++ *
++ * A note on terminology:
++ * - top of stack: Architecture defined interrupt frame from SS to RIP
++ * at the top of the kernel process stack.
++ * - partial stack frame: partially saved registers upto R11.
++ * - full stack frame: Like partial stack frame, but all register saved.
++ *
++ * TODO:
++ * - schedule it carefully for the final hardware.
++ */
++
++#define ASSEMBLY 1
++#include <linux/linkage.h>
++#include <asm/segment.h>
++#include <asm/smp.h>
++#include <asm/cache.h>
++#include <asm/errno.h>
++#include <asm/dwarf2.h>
++#include <asm/calling.h>
++#include <asm/asm-offsets.h>
++#include <asm/msr.h>
++#include <asm/unistd.h>
++#include <asm/thread_info.h>
++#include <asm/hw_irq.h>
++#include <asm/page.h>
++#include <asm/irqflags.h>
++#include <asm/errno.h>
++#include <xen/interface/arch-x86_64.h>
++#include <xen/interface/features.h>
++
++#include "xen_entry.S"
++
++ .code64
++
++#ifndef CONFIG_PREEMPT
++#define retint_kernel retint_restore_args
++#endif
++
++
++.macro TRACE_IRQS_IRETQ offset=ARGOFFSET
++#ifdef CONFIG_TRACE_IRQFLAGS
++ bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
++ jnc 1f
++ TRACE_IRQS_ON
++1:
++#endif
++.endm
++
++NMI_MASK = 0x80000000
++
++/*
++ * C code is not supposed to know about undefined top of stack. Every time
++ * a C function with an pt_regs argument is called from the SYSCALL based
++ * fast path FIXUP_TOP_OF_STACK is needed.
++ * RESTORE_TOP_OF_STACK syncs the syscall state after any possible ptregs
++ * manipulation.
++ */
++
++ /* %rsp:at FRAMEEND */
++ .macro FIXUP_TOP_OF_STACK tmp
++ movq $__USER_CS,CS(%rsp)
++ movq $-1,RCX(%rsp)
++ .endm
++
++ .macro RESTORE_TOP_OF_STACK tmp,offset=0
++ .endm
++
++ .macro FAKE_STACK_FRAME child_rip
++ /* push in order ss, rsp, eflags, cs, rip */
++ xorl %eax, %eax
++ pushq %rax /* ss */
++ CFI_ADJUST_CFA_OFFSET 8
++ /*CFI_REL_OFFSET ss,0*/
++ pushq %rax /* rsp */
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rsp,0
++ pushq $(1<<9) /* eflags - interrupts on */
++ CFI_ADJUST_CFA_OFFSET 8
++ /*CFI_REL_OFFSET rflags,0*/
++ pushq $__KERNEL_CS /* cs */
++ CFI_ADJUST_CFA_OFFSET 8
++ /*CFI_REL_OFFSET cs,0*/
++ pushq \child_rip /* rip */
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rip,0
++ pushq %rax /* orig rax */
++ CFI_ADJUST_CFA_OFFSET 8
++ .endm
++
++ .macro UNFAKE_STACK_FRAME
++ addq $8*6, %rsp
++ CFI_ADJUST_CFA_OFFSET -(6*8)
++ .endm
++
++ .macro CFI_DEFAULT_STACK start=1,adj=0
++ .if \start
++ CFI_STARTPROC simple
++ CFI_DEF_CFA rsp,SS+8 - \adj*ARGOFFSET
++ .else
++ CFI_DEF_CFA_OFFSET SS+8 - \adj*ARGOFFSET
++ .endif
++ .if \adj == 0
++ CFI_REL_OFFSET r15,R15
++ CFI_REL_OFFSET r14,R14
++ CFI_REL_OFFSET r13,R13
++ CFI_REL_OFFSET r12,R12
++ CFI_REL_OFFSET rbp,RBP
++ CFI_REL_OFFSET rbx,RBX
++ .endif
++ CFI_REL_OFFSET r11,R11 - \adj*ARGOFFSET
++ CFI_REL_OFFSET r10,R10 - \adj*ARGOFFSET
++ CFI_REL_OFFSET r9,R9 - \adj*ARGOFFSET
++ CFI_REL_OFFSET r8,R8 - \adj*ARGOFFSET
++ CFI_REL_OFFSET rax,RAX - \adj*ARGOFFSET
++ CFI_REL_OFFSET rcx,RCX - \adj*ARGOFFSET
++ CFI_REL_OFFSET rdx,RDX - \adj*ARGOFFSET
++ CFI_REL_OFFSET rsi,RSI - \adj*ARGOFFSET
++ CFI_REL_OFFSET rdi,RDI - \adj*ARGOFFSET
++ CFI_REL_OFFSET rip,RIP - \adj*ARGOFFSET
++ /*CFI_REL_OFFSET cs,CS - \adj*ARGOFFSET*/
++ /*CFI_REL_OFFSET rflags,EFLAGS - \adj*ARGOFFSET*/
++ CFI_REL_OFFSET rsp,RSP - \adj*ARGOFFSET
++ /*CFI_REL_OFFSET ss,SS - \adj*ARGOFFSET*/
++ .endm
++
++ /*
++ * Must be consistent with the definition in arch-x86/xen-x86_64.h:
++ * struct iret_context {
++ * u64 rax, r11, rcx, flags, rip, cs, rflags, rsp, ss;
++ * };
++ * with rax, r11, and rcx being taken care of in the hypercall stub.
++ */
++ .macro HYPERVISOR_IRET flag
++ testb $3,1*8(%rsp)
++ jnz 2f
++ testl $NMI_MASK,2*8(%rsp)
++ jnz 2f
++
++ cmpb $0,(xen_features+XENFEAT_supervisor_mode_kernel)(%rip)
++ jne 1f
++
++ /* Direct iret to kernel space. Correct CS and SS. */
++ orl $3,1*8(%rsp)
++ orl $3,4*8(%rsp)
++1: iretq
++
++2: /* Slow iret via hypervisor. */
++ andl $~NMI_MASK, 2*8(%rsp)
++ pushq $\flag
++ jmp hypercall_page + (__HYPERVISOR_iret * 32)
++ .endm
++
++/*
++ * A newly forked process directly context switches into this.
++ */
++/* rdi: prev */
++ENTRY(ret_from_fork)
++ CFI_DEFAULT_STACK
++ push kernel_eflags(%rip)
++ CFI_ADJUST_CFA_OFFSET 4
++ popf # reset kernel eflags
++ CFI_ADJUST_CFA_OFFSET -4
++ call schedule_tail
++ GET_THREAD_INFO(%rcx)
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),threadinfo_flags(%rcx)
++ jnz rff_trace
++rff_action:
++ RESTORE_REST
++ testl $3,CS-ARGOFFSET(%rsp) # from kernel_thread?
++ je int_ret_from_sys_call
++ testl $_TIF_IA32,threadinfo_flags(%rcx)
++ jnz int_ret_from_sys_call
++ RESTORE_TOP_OF_STACK %rdi,ARGOFFSET
++ jmp ret_from_sys_call
++rff_trace:
++ movq %rsp,%rdi
++ call syscall_trace_leave
++ GET_THREAD_INFO(%rcx)
++ jmp rff_action
++ CFI_ENDPROC
++END(ret_from_fork)
++
++/*
++ * initial frame state for interrupts and exceptions
++ */
++ .macro _frame ref
++ CFI_STARTPROC simple
++ CFI_DEF_CFA rsp,SS+8-\ref
++ /*CFI_REL_OFFSET ss,SS-\ref*/
++ CFI_REL_OFFSET rsp,RSP-\ref
++ /*CFI_REL_OFFSET rflags,EFLAGS-\ref*/
++ /*CFI_REL_OFFSET cs,CS-\ref*/
++ CFI_REL_OFFSET rip,RIP-\ref
++ .endm
++
++/*
++ * System call entry. Upto 6 arguments in registers are supported.
++ *
++ * SYSCALL does not save anything on the stack and does not change the
++ * stack pointer.
++ */
++
++/*
++ * Register setup:
++ * rax system call number
++ * rdi arg0
++ * rcx return address for syscall/sysret, C arg3
++ * rsi arg1
++ * rdx arg2
++ * r10 arg3 (--> moved to rcx for C)
++ * r8 arg4
++ * r9 arg5
++ * r11 eflags for syscall/sysret, temporary for C
++ * r12-r15,rbp,rbx saved by C code, not touched.
++ *
++ * Interrupts are enabled on entry.
++ * Only called from user space.
++ *
++ * XXX if we had a free scratch register we could save the RSP into the stack frame
++ * and report it properly in ps. Unfortunately we haven't.
++ *
++ * When user can change the frames always force IRET. That is because
++ * it deals with uncanonical addresses better. SYSRET has trouble
++ * with them due to bugs in both AMD and Intel CPUs.
++ */
++
++ENTRY(system_call)
++ _frame (RIP-0x10)
++ SAVE_ARGS -8,0
++ movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
++ GET_THREAD_INFO(%rcx)
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SECCOMP),threadinfo_flags(%rcx)
++ CFI_REMEMBER_STATE
++ jnz tracesys
++ cmpq $__NR_syscall_max,%rax
++ ja badsys
++ movq %r10,%rcx
++ call *sys_call_table(,%rax,8) # XXX: rip relative
++ movq %rax,RAX-ARGOFFSET(%rsp)
++/*
++ * Syscall return path ending with SYSRET (fast path)
++ * Has incomplete stack frame and undefined top of stack.
++ */
++ .globl ret_from_sys_call
++ret_from_sys_call:
++ movl $_TIF_ALLWORK_MASK,%edi
++ /* edi: flagmask */
++sysret_check:
++ GET_THREAD_INFO(%rcx)
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ movl threadinfo_flags(%rcx),%edx
++ andl %edi,%edx
++ CFI_REMEMBER_STATE
++ jnz sysret_careful
++ /*
++ * sysretq will re-enable interrupts:
++ */
++ TRACE_IRQS_ON
++ XEN_UNBLOCK_EVENTS(%rsi)
++ RESTORE_ARGS 0,8,0
++ HYPERVISOR_IRET VGCF_IN_SYSCALL
++
++ /* Handle reschedules */
++ /* edx: work, edi: workmask */
++sysret_careful:
++ CFI_RESTORE_STATE
++ bt $TIF_NEED_RESCHED,%edx
++ jnc sysret_signal
++ TRACE_IRQS_ON
++ XEN_UNBLOCK_EVENTS(%rsi)
++ pushq %rdi
++ CFI_ADJUST_CFA_OFFSET 8
++ call schedule
++ popq %rdi
++ CFI_ADJUST_CFA_OFFSET -8
++ jmp sysret_check
++
++ /* Handle a signal */
++sysret_signal:
++ TRACE_IRQS_ON
++/* sti */
++ XEN_UNBLOCK_EVENTS(%rsi)
++ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
++ jz 1f
++
++ /* Really a signal */
++ /* edx: work flags (arg3) */
++ leaq do_notify_resume(%rip),%rax
++ leaq -ARGOFFSET(%rsp),%rdi # &pt_regs -> arg1
++ xorl %esi,%esi # oldset -> arg2
++ call ptregscall_common
++1: movl $_TIF_NEED_RESCHED,%edi
++ /* Use IRET because user could have changed frame. This
++ works because ptregscall_common has called FIXUP_TOP_OF_STACK. */
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ jmp int_with_check
++
++badsys:
++ movq $-ENOSYS,RAX-ARGOFFSET(%rsp)
++ jmp ret_from_sys_call
++
++ /* Do syscall tracing */
++tracesys:
++ CFI_RESTORE_STATE
++ SAVE_REST
++ movq $-ENOSYS,RAX(%rsp)
++ FIXUP_TOP_OF_STACK %rdi
++ movq %rsp,%rdi
++ call syscall_trace_enter
++ LOAD_ARGS ARGOFFSET /* reload args from stack in case ptrace changed it */
++ RESTORE_REST
++ cmpq $__NR_syscall_max,%rax
++ ja 1f
++ movq %r10,%rcx /* fixup for C */
++ call *sys_call_table(,%rax,8)
++1: movq %rax,RAX-ARGOFFSET(%rsp)
++ /* Use IRET because user could have changed frame */
++ jmp int_ret_from_sys_call
++ CFI_ENDPROC
++END(system_call)
++
++/*
++ * Syscall return path ending with IRET.
++ * Has correct top of stack, but partial stack frame.
++ */
++ENTRY(int_ret_from_sys_call)
++ CFI_STARTPROC simple
++ CFI_DEF_CFA rsp,SS+8-ARGOFFSET
++ /*CFI_REL_OFFSET ss,SS-ARGOFFSET*/
++ CFI_REL_OFFSET rsp,RSP-ARGOFFSET
++ /*CFI_REL_OFFSET rflags,EFLAGS-ARGOFFSET*/
++ /*CFI_REL_OFFSET cs,CS-ARGOFFSET*/
++ CFI_REL_OFFSET rip,RIP-ARGOFFSET
++ CFI_REL_OFFSET rdx,RDX-ARGOFFSET
++ CFI_REL_OFFSET rcx,RCX-ARGOFFSET
++ CFI_REL_OFFSET rax,RAX-ARGOFFSET
++ CFI_REL_OFFSET rdi,RDI-ARGOFFSET
++ CFI_REL_OFFSET rsi,RSI-ARGOFFSET
++ CFI_REL_OFFSET r8,R8-ARGOFFSET
++ CFI_REL_OFFSET r9,R9-ARGOFFSET
++ CFI_REL_OFFSET r10,R10-ARGOFFSET
++ CFI_REL_OFFSET r11,R11-ARGOFFSET
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ testb $3,CS-ARGOFFSET(%rsp)
++ jnz 1f
++ /* Need to set the proper %ss (not NULL) for ring 3 iretq */
++ movl $__KERNEL_DS,SS-ARGOFFSET(%rsp)
++ jmp retint_restore_args # retrun from ring3 kernel
++1:
++ movl $_TIF_ALLWORK_MASK,%edi
++ /* edi: mask to check */
++int_with_check:
++ GET_THREAD_INFO(%rcx)
++ movl threadinfo_flags(%rcx),%edx
++ andl %edi,%edx
++ jnz int_careful
++ andl $~TS_COMPAT,threadinfo_status(%rcx)
++ jmp retint_restore_args
++
++ /* Either reschedule or signal or syscall exit tracking needed. */
++ /* First do a reschedule test. */
++ /* edx: work, edi: workmask */
++int_careful:
++ bt $TIF_NEED_RESCHED,%edx
++ jnc int_very_careful
++ TRACE_IRQS_ON
++/* sti */
++ XEN_UNBLOCK_EVENTS(%rsi)
++ pushq %rdi
++ CFI_ADJUST_CFA_OFFSET 8
++ call schedule
++ popq %rdi
++ CFI_ADJUST_CFA_OFFSET -8
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ jmp int_with_check
++
++ /* handle signals and tracing -- both require a full stack frame */
++int_very_careful:
++ TRACE_IRQS_ON
++/* sti */
++ XEN_UNBLOCK_EVENTS(%rsi)
++ SAVE_REST
++ /* Check for syscall exit trace */
++ testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edx
++ jz int_signal
++ pushq %rdi
++ CFI_ADJUST_CFA_OFFSET 8
++ leaq 8(%rsp),%rdi # &ptregs -> arg1
++ call syscall_trace_leave
++ popq %rdi
++ CFI_ADJUST_CFA_OFFSET -8
++ andl $~(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT|_TIF_SINGLESTEP),%edi
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ jmp int_restore_rest
++
++int_signal:
++ testl $(_TIF_NOTIFY_RESUME|_TIF_SIGPENDING|_TIF_SINGLESTEP),%edx
++ jz 1f
++ movq %rsp,%rdi # &ptregs -> arg1
++ xorl %esi,%esi # oldset -> arg2
++ call do_notify_resume
++1: movl $_TIF_NEED_RESCHED,%edi
++int_restore_rest:
++ RESTORE_REST
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ jmp int_with_check
++ CFI_ENDPROC
++END(int_ret_from_sys_call)
++
++/*
++ * Certain special system calls that need to save a complete full stack frame.
++ */
++
++ .macro PTREGSCALL label,func,arg
++ .globl \label
++\label:
++ leaq \func(%rip),%rax
++ leaq -ARGOFFSET+8(%rsp),\arg /* 8 for return address */
++ jmp ptregscall_common
++END(\label)
++ .endm
++
++ CFI_STARTPROC
++
++ PTREGSCALL stub_clone, sys_clone, %r8
++ PTREGSCALL stub_fork, sys_fork, %rdi
++ PTREGSCALL stub_vfork, sys_vfork, %rdi
++ PTREGSCALL stub_rt_sigsuspend, sys_rt_sigsuspend, %rdx
++ PTREGSCALL stub_sigaltstack, sys_sigaltstack, %rdx
++ PTREGSCALL stub_iopl, sys_iopl, %rsi
++
++ENTRY(ptregscall_common)
++ popq %r11
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_REGISTER rip, r11
++ SAVE_REST
++ movq %r11, %r15
++ CFI_REGISTER rip, r15
++ FIXUP_TOP_OF_STACK %r11
++ call *%rax
++ RESTORE_TOP_OF_STACK %r11
++ movq %r15, %r11
++ CFI_REGISTER rip, r11
++ RESTORE_REST
++ pushq %r11
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rip, 0
++ ret
++ CFI_ENDPROC
++END(ptregscall_common)
++
++ENTRY(stub_execve)
++ CFI_STARTPROC
++ popq %r11
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_REGISTER rip, r11
++ SAVE_REST
++ FIXUP_TOP_OF_STACK %r11
++ call sys_execve
++ RESTORE_TOP_OF_STACK %r11
++ movq %rax,RAX(%rsp)
++ RESTORE_REST
++ jmp int_ret_from_sys_call
++ CFI_ENDPROC
++END(stub_execve)
++
++/*
++ * sigreturn is special because it needs to restore all registers on return.
++ * This cannot be done with SYSRET, so use the IRET return path instead.
++ */
++ENTRY(stub_rt_sigreturn)
++ CFI_STARTPROC
++ addq $8, %rsp
++ CFI_ADJUST_CFA_OFFSET -8
++ SAVE_REST
++ movq %rsp,%rdi
++ FIXUP_TOP_OF_STACK %r11
++ call sys_rt_sigreturn
++ movq %rax,RAX(%rsp) # fixme, this could be done at the higher layer
++ RESTORE_REST
++ jmp int_ret_from_sys_call
++ CFI_ENDPROC
++END(stub_rt_sigreturn)
++
++/* initial frame state for interrupts (and exceptions without error code) */
++#define INTR_FRAME _frame (RIP-0x10); \
++ CFI_REL_OFFSET rcx,0; \
++ CFI_REL_OFFSET r11,8
++
++/* initial frame state for exceptions with error code (and interrupts with
++ vector already pushed) */
++#define XCPT_FRAME _frame (RIP-0x18); \
++ CFI_REL_OFFSET rcx,0; \
++ CFI_REL_OFFSET r11,8
++
++/*
++ * Interrupt exit.
++ *
++ */
++
++retint_check:
++ CFI_DEFAULT_STACK adj=1
++ movl threadinfo_flags(%rcx),%edx
++ andl %edi,%edx
++ CFI_REMEMBER_STATE
++ jnz retint_careful
++retint_restore_args:
++ movl EFLAGS-REST_SKIP(%rsp), %eax
++ shr $9, %eax # EAX[0] == IRET_EFLAGS.IF
++ XEN_GET_VCPU_INFO(%rsi)
++ andb evtchn_upcall_mask(%rsi),%al
++ andb $1,%al # EAX[0] == IRET_EFLAGS.IF & event_mask
++ jnz restore_all_enable_events # != 0 => enable event delivery
++ XEN_PUT_VCPU_INFO(%rsi)
++
++ RESTORE_ARGS 0,8,0
++ HYPERVISOR_IRET 0
++
++ /* edi: workmask, edx: work */
++retint_careful:
++ CFI_RESTORE_STATE
++ bt $TIF_NEED_RESCHED,%edx
++ jnc retint_signal
++ TRACE_IRQS_ON
++ XEN_UNBLOCK_EVENTS(%rsi)
++/* sti */
++ pushq %rdi
++ CFI_ADJUST_CFA_OFFSET 8
++ call schedule
++ popq %rdi
++ CFI_ADJUST_CFA_OFFSET -8
++ GET_THREAD_INFO(%rcx)
++ XEN_BLOCK_EVENTS(%rsi)
++/* cli */
++ TRACE_IRQS_OFF
++ jmp retint_check
++
++retint_signal:
++ testl $(_TIF_SIGPENDING|_TIF_NOTIFY_RESUME|_TIF_SINGLESTEP),%edx
++ jz retint_restore_args
++ TRACE_IRQS_ON
++ XEN_UNBLOCK_EVENTS(%rsi)
++ SAVE_REST
++ movq $-1,ORIG_RAX(%rsp)
++ xorl %esi,%esi # oldset
++ movq %rsp,%rdi # &pt_regs
++ call do_notify_resume
++ RESTORE_REST
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ movl $_TIF_NEED_RESCHED,%edi
++ GET_THREAD_INFO(%rcx)
++ jmp retint_check
++
++#ifdef CONFIG_PREEMPT
++ /* Returning to kernel space. Check if we need preemption */
++ /* rcx: threadinfo. interrupts off. */
++ .p2align
++retint_kernel:
++ cmpl $0,threadinfo_preempt_count(%rcx)
++ jnz retint_restore_args
++ bt $TIF_NEED_RESCHED,threadinfo_flags(%rcx)
++ jnc retint_restore_args
++ bt $9,EFLAGS-ARGOFFSET(%rsp) /* interrupts off? */
++ jnc retint_restore_args
++ call preempt_schedule_irq
++ jmp retint_kernel /* check again */
++#endif
++
++ CFI_ENDPROC
++END(retint_check)
++
++#ifndef CONFIG_XEN
++/*
++ * APIC interrupts.
++ */
++ .macro apicinterrupt num,func
++ INTR_FRAME
++ pushq $~(\num)
++ CFI_ADJUST_CFA_OFFSET 8
++ interrupt \func
++ jmp error_entry
++ CFI_ENDPROC
++ .endm
++
++ENTRY(thermal_interrupt)
++ apicinterrupt THERMAL_APIC_VECTOR,smp_thermal_interrupt
++END(thermal_interrupt)
++
++ENTRY(threshold_interrupt)
++ apicinterrupt THRESHOLD_APIC_VECTOR,mce_threshold_interrupt
++END(threshold_interrupt)
++
++#ifdef CONFIG_SMP
++ENTRY(reschedule_interrupt)
++ apicinterrupt RESCHEDULE_VECTOR,smp_reschedule_interrupt
++END(reschedule_interrupt)
++
++ .macro INVALIDATE_ENTRY num
++ENTRY(invalidate_interrupt\num)
++ apicinterrupt INVALIDATE_TLB_VECTOR_START+\num,smp_invalidate_interrupt
++END(invalidate_interrupt\num)
++ .endm
++
++ INVALIDATE_ENTRY 0
++ INVALIDATE_ENTRY 1
++ INVALIDATE_ENTRY 2
++ INVALIDATE_ENTRY 3
++ INVALIDATE_ENTRY 4
++ INVALIDATE_ENTRY 5
++ INVALIDATE_ENTRY 6
++ INVALIDATE_ENTRY 7
++
++ENTRY(call_function_interrupt)
++ apicinterrupt CALL_FUNCTION_VECTOR,smp_call_function_interrupt
++END(call_function_interrupt)
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++ENTRY(apic_timer_interrupt)
++ apicinterrupt LOCAL_TIMER_VECTOR,smp_apic_timer_interrupt
++END(apic_timer_interrupt)
++
++ENTRY(error_interrupt)
++ apicinterrupt ERROR_APIC_VECTOR,smp_error_interrupt
++END(error_interrupt)
++
++ENTRY(spurious_interrupt)
++ apicinterrupt SPURIOUS_APIC_VECTOR,smp_spurious_interrupt
++END(spurious_interrupt)
++#endif
++#endif /* !CONFIG_XEN */
++
++/*
++ * Exception entry points.
++ */
++ .macro zeroentry sym
++ INTR_FRAME
++ movq (%rsp),%rcx
++ CFI_RESTORE rcx
++ movq 8(%rsp),%r11
++ CFI_RESTORE r11
++ addq $0x10,%rsp /* skip rcx and r11 */
++ CFI_ADJUST_CFA_OFFSET -0x10
++ pushq $0 /* push error code/oldrax */
++ CFI_ADJUST_CFA_OFFSET 8
++ pushq %rax /* push real oldrax to the rdi slot */
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rax,0
++ leaq \sym(%rip),%rax
++ jmp error_entry
++ CFI_ENDPROC
++ .endm
++
++ .macro errorentry sym
++ XCPT_FRAME
++ movq (%rsp),%rcx
++ CFI_RESTORE rcx
++ movq 8(%rsp),%r11
++ CFI_RESTORE r11
++ addq $0x10,%rsp /* rsp points to the error code */
++ CFI_ADJUST_CFA_OFFSET -0x10
++ pushq %rax
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rax,0
++ leaq \sym(%rip),%rax
++ jmp error_entry
++ CFI_ENDPROC
++ .endm
++
++#if 0 /* not XEN */
++ /* error code is on the stack already */
++ /* handle NMI like exceptions that can happen everywhere */
++ .macro paranoidentry sym, ist=0, irqtrace=1
++ movq (%rsp),%rcx
++ movq 8(%rsp),%r11
++ addq $0x10,%rsp /* skip rcx and r11 */
++ SAVE_ALL
++ cld
++#if 0 /* not XEN */
++ movl $1,%ebx
++ movl $MSR_GS_BASE,%ecx
++ rdmsr
++ testl %edx,%edx
++ js 1f
++ swapgs
++ xorl %ebx,%ebx
++1:
++#endif
++ .if \ist
++ movq %gs:pda_data_offset, %rbp
++ .endif
++ movq %rsp,%rdi
++ movq ORIG_RAX(%rsp),%rsi
++ movq $-1,ORIG_RAX(%rsp)
++ .if \ist
++ subq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
++ .endif
++ call \sym
++ .if \ist
++ addq $EXCEPTION_STKSZ, per_cpu__init_tss + TSS_ist + (\ist - 1) * 8(%rbp)
++ .endif
++/* cli */
++ XEN_BLOCK_EVENTS(%rsi)
++ .if \irqtrace
++ TRACE_IRQS_OFF
++ .endif
++ .endm
++
++ /*
++ * "Paranoid" exit path from exception stack.
++ * Paranoid because this is used by NMIs and cannot take
++ * any kernel state for granted.
++ * We don't do kernel preemption checks here, because only
++ * NMI should be common and it does not enable IRQs and
++ * cannot get reschedule ticks.
++ *
++ * "trace" is 0 for the NMI handler only, because irq-tracing
++ * is fundamentally NMI-unsafe. (we cannot change the soft and
++ * hard flags at once, atomically)
++ */
++ .macro paranoidexit trace=1
++ /* ebx: no swapgs flag */
++paranoid_exit\trace:
++ testl %ebx,%ebx /* swapgs needed? */
++ jnz paranoid_restore\trace
++ testl $3,CS(%rsp)
++ jnz paranoid_userspace\trace
++paranoid_swapgs\trace:
++ TRACE_IRQS_IRETQ 0
++ swapgs
++paranoid_restore\trace:
++ RESTORE_ALL 8
++ iretq
++paranoid_userspace\trace:
++ GET_THREAD_INFO(%rcx)
++ movl threadinfo_flags(%rcx),%ebx
++ andl $_TIF_WORK_MASK,%ebx
++ jz paranoid_swapgs\trace
++ movq %rsp,%rdi /* &pt_regs */
++ call sync_regs
++ movq %rax,%rsp /* switch stack for scheduling */
++ testl $_TIF_NEED_RESCHED,%ebx
++ jnz paranoid_schedule\trace
++ movl %ebx,%edx /* arg3: thread flags */
++ .if \trace
++ TRACE_IRQS_ON
++ .endif
++ sti
++ xorl %esi,%esi /* arg2: oldset */
++ movq %rsp,%rdi /* arg1: &pt_regs */
++ call do_notify_resume
++ cli
++ .if \trace
++ TRACE_IRQS_OFF
++ .endif
++ jmp paranoid_userspace\trace
++paranoid_schedule\trace:
++ .if \trace
++ TRACE_IRQS_ON
++ .endif
++ sti
++ call schedule
++ cli
++ .if \trace
++ TRACE_IRQS_OFF
++ .endif
++ jmp paranoid_userspace\trace
++ CFI_ENDPROC
++ .endm
++#endif
++
++/*
++ * Exception entry point. This expects an error code/orig_rax on the stack
++ * and the exception handler in %rax.
++ */
++ENTRY(error_entry)
++ _frame RDI
++ CFI_REL_OFFSET rax,0
++ /* rdi slot contains rax, oldrax contains error code */
++ cld
++ subq $14*8,%rsp
++ CFI_ADJUST_CFA_OFFSET (14*8)
++ movq %rsi,13*8(%rsp)
++ CFI_REL_OFFSET rsi,RSI
++ movq 14*8(%rsp),%rsi /* load rax from rdi slot */
++ CFI_REGISTER rax,rsi
++ movq %rdx,12*8(%rsp)
++ CFI_REL_OFFSET rdx,RDX
++ movq %rcx,11*8(%rsp)
++ CFI_REL_OFFSET rcx,RCX
++ movq %rsi,10*8(%rsp) /* store rax */
++ CFI_REL_OFFSET rax,RAX
++ movq %r8, 9*8(%rsp)
++ CFI_REL_OFFSET r8,R8
++ movq %r9, 8*8(%rsp)
++ CFI_REL_OFFSET r9,R9
++ movq %r10,7*8(%rsp)
++ CFI_REL_OFFSET r10,R10
++ movq %r11,6*8(%rsp)
++ CFI_REL_OFFSET r11,R11
++ movq %rbx,5*8(%rsp)
++ CFI_REL_OFFSET rbx,RBX
++ movq %rbp,4*8(%rsp)
++ CFI_REL_OFFSET rbp,RBP
++ movq %r12,3*8(%rsp)
++ CFI_REL_OFFSET r12,R12
++ movq %r13,2*8(%rsp)
++ CFI_REL_OFFSET r13,R13
++ movq %r14,1*8(%rsp)
++ CFI_REL_OFFSET r14,R14
++ movq %r15,(%rsp)
++ CFI_REL_OFFSET r15,R15
++#if 0
++ cmpl $__KERNEL_CS,CS(%rsp)
++ CFI_REMEMBER_STATE
++ je error_kernelspace
++#endif
++error_call_handler:
++ movq %rdi, RDI(%rsp)
++ CFI_REL_OFFSET rdi,RDI
++ movq %rsp,%rdi
++ movq ORIG_RAX(%rsp),%rsi # get error code
++ movq $-1,ORIG_RAX(%rsp)
++ call *%rax
++error_exit:
++ RESTORE_REST
++/* cli */
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ GET_THREAD_INFO(%rcx)
++ testb $3,CS-ARGOFFSET(%rsp)
++ jz retint_kernel
++ movl threadinfo_flags(%rcx),%edx
++ movl $_TIF_WORK_MASK,%edi
++ andl %edi,%edx
++ jnz retint_careful
++ /*
++ * The iret might restore flags:
++ */
++ TRACE_IRQS_IRETQ
++ jmp retint_restore_args
++
++#if 0
++ /*
++ * We need to re-write the logic here because we don't do iretq to
++ * to return to user mode. It's still possible that we get trap/fault
++ * in the kernel (when accessing buffers pointed to by system calls,
++ * for example).
++ *
++ */
++ CFI_RESTORE_STATE
++error_kernelspace:
++ incl %ebx
++ /* There are two places in the kernel that can potentially fault with
++ usergs. Handle them here. The exception handlers after
++ iret run with kernel gs again, so don't set the user space flag.
++ B stepping K8s sometimes report an truncated RIP for IRET
++ exceptions returning to compat mode. Check for these here too. */
++ leaq iret_label(%rip),%rbp
++ cmpq %rbp,RIP(%rsp)
++ je error_swapgs
++ movl %ebp,%ebp /* zero extend */
++ cmpq %rbp,RIP(%rsp)
++ je error_swapgs
++ cmpq $gs_change,RIP(%rsp)
++ je error_swapgs
++ jmp error_sti
++#endif
++ CFI_ENDPROC
++END(error_entry)
++
++ENTRY(hypervisor_callback)
++ zeroentry do_hypervisor_callback
++END(hypervisor_callback)
++
++/*
++ * Copied from arch/xen/i386/kernel/entry.S
++ */
++# A note on the "critical region" in our callback handler.
++# We want to avoid stacking callback handlers due to events occurring
++# during handling of the last event. To do this, we keep events disabled
++# until we've done all processing. HOWEVER, we must enable events before
++# popping the stack frame (can't be done atomically) and so it would still
++# be possible to get enough handler activations to overflow the stack.
++# Although unlikely, bugs of that kind are hard to track down, so we'd
++# like to avoid the possibility.
++# So, on entry to the handler we detect whether we interrupted an
++# existing activation in its critical region -- if so, we pop the current
++# activation and restart the handler using the previous one.
++ENTRY(do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
++ CFI_STARTPROC
++# Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
++# see the correct pointer to the pt_regs
++ movq %rdi, %rsp # we don't return, adjust the stack frame
++ CFI_ENDPROC
++ CFI_DEFAULT_STACK
++11: incl %gs:pda_irqcount
++ movq %rsp,%rbp
++ CFI_DEF_CFA_REGISTER rbp
++ cmovzq %gs:pda_irqstackptr,%rsp
++ pushq %rbp # backlink for old unwinder
++ call evtchn_do_upcall
++ popq %rsp
++ CFI_DEF_CFA_REGISTER rsp
++ decl %gs:pda_irqcount
++ jmp error_exit
++ CFI_ENDPROC
++END(do_hypervisor_callback)
++
++#ifdef CONFIG_X86_LOCAL_APIC
++KPROBE_ENTRY(nmi)
++ zeroentry do_nmi_callback
++ENTRY(do_nmi_callback)
++ CFI_STARTPROC
++ addq $8, %rsp
++ CFI_ENDPROC
++ CFI_DEFAULT_STACK
++ call do_nmi
++ orl $NMI_MASK,EFLAGS(%rsp)
++ RESTORE_REST
++ XEN_BLOCK_EVENTS(%rsi)
++ TRACE_IRQS_OFF
++ GET_THREAD_INFO(%rcx)
++ jmp retint_restore_args
++ CFI_ENDPROC
++ .previous .text
++END(nmi)
++#endif
++
++ ALIGN
++restore_all_enable_events:
++ CFI_DEFAULT_STACK adj=1
++ TRACE_IRQS_ON
++ XEN_UNBLOCK_EVENTS(%rsi) # %rsi is already set up...
++
++scrit: /**** START OF CRITICAL REGION ****/
++ XEN_TEST_PENDING(%rsi)
++ CFI_REMEMBER_STATE
++ jnz 14f # process more events if necessary...
++ XEN_PUT_VCPU_INFO(%rsi)
++ RESTORE_ARGS 0,8,0
++ HYPERVISOR_IRET 0
++
++ CFI_RESTORE_STATE
++14: XEN_LOCKED_BLOCK_EVENTS(%rsi)
++ XEN_PUT_VCPU_INFO(%rsi)
++ SAVE_REST
++ movq %rsp,%rdi # set the argument again
++ jmp 11b
++ CFI_ENDPROC
++ecrit: /**** END OF CRITICAL REGION ****/
++# At this point, unlike on x86-32, we don't do the fixup to simplify the
++# code and the stack frame is more complex on x86-64.
++# When the kernel is interrupted in the critical section, the kernel
++# will do IRET in that case, and everything will be restored at that point,
++# i.e. it just resumes from the next instruction interrupted with the same context.
++
++# Hypervisor uses this for application faults while it executes.
++# We get here for two reasons:
++# 1. Fault while reloading DS, ES, FS or GS
++# 2. Fault while executing IRET
++# Category 1 we do not need to fix up as Xen has already reloaded all segment
++# registers that could be reloaded and zeroed the others.
++# Category 2 we fix up by killing the current process. We cannot use the
++# normal Linux return path in this case because if we use the IRET hypercall
++# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
++# We distinguish between categories by comparing each saved segment register
++# with its current contents: any discrepancy means we in category 1.
++ENTRY(failsafe_callback)
++ _frame (RIP-0x30)
++ CFI_REL_OFFSET rcx, 0
++ CFI_REL_OFFSET r11, 8
++ movw %ds,%cx
++ cmpw %cx,0x10(%rsp)
++ CFI_REMEMBER_STATE
++ jne 1f
++ movw %es,%cx
++ cmpw %cx,0x18(%rsp)
++ jne 1f
++ movw %fs,%cx
++ cmpw %cx,0x20(%rsp)
++ jne 1f
++ movw %gs,%cx
++ cmpw %cx,0x28(%rsp)
++ jne 1f
++ /* All segments match their saved values => Category 2 (Bad IRET). */
++ movq (%rsp),%rcx
++ CFI_RESTORE rcx
++ movq 8(%rsp),%r11
++ CFI_RESTORE r11
++ addq $0x30,%rsp
++ CFI_ADJUST_CFA_OFFSET -0x30
++ movq $11,%rdi /* SIGSEGV */
++ jmp do_exit
++ CFI_RESTORE_STATE
++1: /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
++ movq (%rsp),%rcx
++ CFI_RESTORE rcx
++ movq 8(%rsp),%r11
++ CFI_RESTORE r11
++ addq $0x30,%rsp
++ CFI_ADJUST_CFA_OFFSET -0x30
++ pushq $0
++ CFI_ADJUST_CFA_OFFSET 8
++ SAVE_ALL
++ jmp error_exit
++ CFI_ENDPROC
++#if 0
++ .section __ex_table,"a"
++ .align 8
++ .quad gs_change,bad_gs
++ .previous
++ .section .fixup,"ax"
++ /* running with kernelgs */
++bad_gs:
++/* swapgs */ /* switch back to user gs */
++ xorl %eax,%eax
++ movl %eax,%gs
++ jmp 2b
++ .previous
++#endif
++
++/*
++ * Create a kernel thread.
++ *
++ * C extern interface:
++ * extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
++ *
++ * asm input arguments:
++ * rdi: fn, rsi: arg, rdx: flags
++ */
++ENTRY(kernel_thread)
++ CFI_STARTPROC
++ FAKE_STACK_FRAME $child_rip
++ SAVE_ALL
++
++ # rdi: flags, rsi: usp, rdx: will be &pt_regs
++ movq %rdx,%rdi
++ orq kernel_thread_flags(%rip),%rdi
++ movq $-1, %rsi
++ movq %rsp, %rdx
++
++ xorl %r8d,%r8d
++ xorl %r9d,%r9d
++
++ # clone now
++ call do_fork
++ movq %rax,RAX(%rsp)
++ xorl %edi,%edi
++
++ /*
++ * It isn't worth to check for reschedule here,
++ * so internally to the x86_64 port you can rely on kernel_thread()
++ * not to reschedule the child before returning, this avoids the need
++ * of hacks for example to fork off the per-CPU idle tasks.
++ * [Hopefully no generic code relies on the reschedule -AK]
++ */
++ RESTORE_ALL
++ UNFAKE_STACK_FRAME
++ ret
++ CFI_ENDPROC
++ENDPROC(kernel_thread)
++
++child_rip:
++ pushq $0 # fake return address
++ CFI_STARTPROC
++ /*
++ * Here we are in the child and the registers are set as they were
++ * at kernel_thread() invocation in the parent.
++ */
++ movq %rdi, %rax
++ movq %rsi, %rdi
++ call *%rax
++ # exit
++ xorl %edi, %edi
++ call do_exit
++ CFI_ENDPROC
++ENDPROC(child_rip)
++
++/*
++ * execve(). This function needs to use IRET, not SYSRET, to set up all state properly.
++ *
++ * C extern interface:
++ * extern long execve(char *name, char **argv, char **envp)
++ *
++ * asm input arguments:
++ * rdi: name, rsi: argv, rdx: envp
++ *
++ * We want to fallback into:
++ * extern long sys_execve(char *name, char **argv,char **envp, struct pt_regs regs)
++ *
++ * do_sys_execve asm fallback arguments:
++ * rdi: name, rsi: argv, rdx: envp, fake frame on the stack
++ */
++ENTRY(execve)
++ CFI_STARTPROC
++ FAKE_STACK_FRAME $0
++ SAVE_ALL
++ call sys_execve
++ movq %rax, RAX(%rsp)
++ RESTORE_REST
++ testq %rax,%rax
++ jne 1f
++ jmp int_ret_from_sys_call
++1: RESTORE_ARGS
++ UNFAKE_STACK_FRAME
++ ret
++ CFI_ENDPROC
++ENDPROC(execve)
++
++KPROBE_ENTRY(page_fault)
++ errorentry do_page_fault
++END(page_fault)
++ .previous .text
++
++ENTRY(coprocessor_error)
++ zeroentry do_coprocessor_error
++END(coprocessor_error)
++
++ENTRY(simd_coprocessor_error)
++ zeroentry do_simd_coprocessor_error
++END(simd_coprocessor_error)
++
++ENTRY(device_not_available)
++ zeroentry math_state_restore
++END(device_not_available)
++
++ /* runs on exception stack */
++KPROBE_ENTRY(debug)
++/* INTR_FRAME
++ pushq $0
++ CFI_ADJUST_CFA_OFFSET 8 */
++ zeroentry do_debug
++/* paranoidexit
++ CFI_ENDPROC */
++END(debug)
++ .previous .text
++
++#if 0
++ /* runs on exception stack */
++KPROBE_ENTRY(nmi)
++ INTR_FRAME
++ pushq $-1
++ CFI_ADJUST_CFA_OFFSET 8
++ paranoidentry do_nmi, 0, 0
++#ifdef CONFIG_TRACE_IRQFLAGS
++ paranoidexit 0
++#else
++ jmp paranoid_exit1
++ CFI_ENDPROC
++#endif
++END(nmi)
++ .previous .text
++#endif
++
++KPROBE_ENTRY(int3)
++/* INTR_FRAME
++ pushq $0
++ CFI_ADJUST_CFA_OFFSET 8 */
++ zeroentry do_int3
++/* jmp paranoid_exit1
++ CFI_ENDPROC */
++END(int3)
++ .previous .text
++
++ENTRY(overflow)
++ zeroentry do_overflow
++END(overflow)
++
++ENTRY(bounds)
++ zeroentry do_bounds
++END(bounds)
++
++ENTRY(invalid_op)
++ zeroentry do_invalid_op
++END(invalid_op)
++
++ENTRY(coprocessor_segment_overrun)
++ zeroentry do_coprocessor_segment_overrun
++END(coprocessor_segment_overrun)
++
++ENTRY(reserved)
++ zeroentry do_reserved
++END(reserved)
++
++#if 0
++ /* runs on exception stack */
++ENTRY(double_fault)
++ XCPT_FRAME
++ paranoidentry do_double_fault
++ jmp paranoid_exit1
++ CFI_ENDPROC
++END(double_fault)
++#endif
++
++ENTRY(invalid_TSS)
++ errorentry do_invalid_TSS
++END(invalid_TSS)
++
++ENTRY(segment_not_present)
++ errorentry do_segment_not_present
++END(segment_not_present)
++
++ /* runs on exception stack */
++ENTRY(stack_segment)
++/* XCPT_FRAME
++ paranoidentry do_stack_segment */
++ errorentry do_stack_segment
++/* jmp paranoid_exit1
++ CFI_ENDPROC */
++END(stack_segment)
++
++KPROBE_ENTRY(general_protection)
++ errorentry do_general_protection
++END(general_protection)
++ .previous .text
++
++ENTRY(alignment_check)
++ errorentry do_alignment_check
++END(alignment_check)
++
++ENTRY(divide_error)
++ zeroentry do_divide_error
++END(divide_error)
++
++ENTRY(spurious_interrupt_bug)
++ zeroentry do_spurious_interrupt_bug
++END(spurious_interrupt_bug)
++
++#ifdef CONFIG_X86_MCE
++ /* runs on exception stack */
++ENTRY(machine_check)
++ INTR_FRAME
++ pushq $0
++ CFI_ADJUST_CFA_OFFSET 8
++ paranoidentry do_machine_check
++ jmp paranoid_exit1
++ CFI_ENDPROC
++END(machine_check)
++#endif
++
++/* Call softirq on interrupt stack. Interrupts are off. */
++ENTRY(call_softirq)
++ CFI_STARTPROC
++ push %rbp
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rbp,0
++ mov %rsp,%rbp
++ CFI_DEF_CFA_REGISTER rbp
++ incl %gs:pda_irqcount
++ cmove %gs:pda_irqstackptr,%rsp
++ push %rbp # backlink for old unwinder
++ call __do_softirq
++ leaveq
++ CFI_DEF_CFA_REGISTER rsp
++ CFI_ADJUST_CFA_OFFSET -8
++ decl %gs:pda_irqcount
++ ret
++ CFI_ENDPROC
++ENDPROC(call_softirq)
++
++#ifdef CONFIG_STACK_UNWIND
++ENTRY(arch_unwind_init_running)
++ CFI_STARTPROC
++ movq %r15, R15(%rdi)
++ movq %r14, R14(%rdi)
++ xchgq %rsi, %rdx
++ movq %r13, R13(%rdi)
++ movq %r12, R12(%rdi)
++ xorl %eax, %eax
++ movq %rbp, RBP(%rdi)
++ movq %rbx, RBX(%rdi)
++ movq (%rsp), %rcx
++ movq %rax, R11(%rdi)
++ movq %rax, R10(%rdi)
++ movq %rax, R9(%rdi)
++ movq %rax, R8(%rdi)
++ movq %rax, RAX(%rdi)
++ movq %rax, RCX(%rdi)
++ movq %rax, RDX(%rdi)
++ movq %rax, RSI(%rdi)
++ movq %rax, RDI(%rdi)
++ movq %rax, ORIG_RAX(%rdi)
++ movq %rcx, RIP(%rdi)
++ leaq 8(%rsp), %rcx
++ movq $__KERNEL_CS, CS(%rdi)
++ movq %rax, EFLAGS(%rdi)
++ movq %rcx, RSP(%rdi)
++ movq $__KERNEL_DS, SS(%rdi)
++ jmpq *%rdx
++ CFI_ENDPROC
++ENDPROC(arch_unwind_init_running)
++#endif
+Index: head-2008-11-25/arch/x86/kernel/genapic_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/genapic_64-xen.c 2007-06-12 13:13:01.000000000 +0200
+@@ -0,0 +1,143 @@
++/*
++ * Copyright 2004 James Cleverdon, IBM.
++ * Subject to the GNU Public License, v.2
++ *
++ * Generic APIC sub-arch probe layer.
++ *
++ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
++ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
++ * James Cleverdon.
++ */
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#include <linux/module.h>
++
++#include <asm/smp.h>
++#include <asm/ipi.h>
++
++#if defined(CONFIG_ACPI)
++#include <acpi/acpi_bus.h>
++#endif
++
++/* which logical CPU number maps to which CPU (physical APIC ID) */
++u8 x86_cpu_to_apicid[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
++EXPORT_SYMBOL(x86_cpu_to_apicid);
++u8 x86_cpu_to_log_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++extern struct genapic apic_cluster;
++extern struct genapic apic_flat;
++extern struct genapic apic_physflat;
++
++#ifndef CONFIG_XEN
++struct genapic *genapic = &apic_flat;
++#else
++extern struct genapic apic_xen;
++struct genapic *genapic = &apic_xen;
++#endif
++
++
++/*
++ * Check the APIC IDs in bios_cpu_apicid and choose the APIC mode.
++ */
++void __init clustered_apic_check(void)
++{
++#ifndef CONFIG_XEN
++ long i;
++ u8 clusters, max_cluster;
++ u8 id;
++ u8 cluster_cnt[NUM_APIC_CLUSTERS];
++ int max_apic = 0;
++
++#if defined(CONFIG_ACPI)
++ /*
++ * Some x86_64 machines use physical APIC mode regardless of how many
++ * procs/clusters are present (x86_64 ES7000 is an example).
++ */
++ if (acpi_fadt.revision > FADT2_REVISION_ID)
++ if (acpi_fadt.force_apic_physical_destination_mode) {
++ genapic = &apic_cluster;
++ goto print;
++ }
++#endif
++
++ memset(cluster_cnt, 0, sizeof(cluster_cnt));
++ for (i = 0; i < NR_CPUS; i++) {
++ id = bios_cpu_apicid[i];
++ if (id == BAD_APICID)
++ continue;
++ if (id > max_apic)
++ max_apic = id;
++ cluster_cnt[APIC_CLUSTERID(id)]++;
++ }
++
++ /* Don't use clustered mode on AMD platforms. */
++ if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
++ genapic = &apic_physflat;
++#ifndef CONFIG_HOTPLUG_CPU
++ /* In the CPU hotplug case we cannot use broadcast mode
++ because that opens a race when a CPU is removed.
++ Stay at physflat mode in this case.
++ It is bad to do this unconditionally though. Once
++ we have ACPI platform support for CPU hotplug
++ we should detect hotplug capablity from ACPI tables and
++ only do this when really needed. -AK */
++ if (max_apic <= 8)
++ genapic = &apic_flat;
++#endif
++ goto print;
++ }
++
++ clusters = 0;
++ max_cluster = 0;
++
++ for (i = 0; i < NUM_APIC_CLUSTERS; i++) {
++ if (cluster_cnt[i] > 0) {
++ ++clusters;
++ if (cluster_cnt[i] > max_cluster)
++ max_cluster = cluster_cnt[i];
++ }
++ }
++
++ /*
++ * If we have clusters <= 1 and CPUs <= 8 in cluster 0, then flat mode,
++ * else if max_cluster <= 4 and cluster_cnt[15] == 0, clustered logical
++ * else physical mode.
++ * (We don't use lowest priority delivery + HW APIC IRQ steering, so
++ * can ignore the clustered logical case and go straight to physical.)
++ */
++ if (clusters <= 1 && max_cluster <= 8 && cluster_cnt[0] == max_cluster) {
++#ifdef CONFIG_HOTPLUG_CPU
++ /* Don't use APIC shortcuts in CPU hotplug to avoid races */
++ genapic = &apic_physflat;
++#else
++ genapic = &apic_flat;
++#endif
++ } else
++ genapic = &apic_cluster;
++
++print:
++#else
++ /* hardcode to xen apic functions */
++ genapic = &apic_xen;
++#endif
++ printk(KERN_INFO "Setting APIC routing to %s\n", genapic->name);
++}
++
++/* Same for both flat and clustered. */
++
++#ifdef CONFIG_XEN
++extern void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest);
++#endif
++
++void send_IPI_self(int vector)
++{
++#ifndef CONFIG_XEN
++ __send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
++#else
++ xen_send_IPI_shortcut(APIC_DEST_SELF, vector, APIC_DEST_PHYSICAL);
++#endif
++}
+Index: head-2008-11-25/arch/x86/kernel/genapic_xen_64.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/genapic_xen_64.c 2007-06-12 13:13:01.000000000 +0200
+@@ -0,0 +1,161 @@
++/*
++ * Copyright 2004 James Cleverdon, IBM.
++ * Subject to the GNU Public License, v.2
++ *
++ * Xen APIC subarch code. Maximum 8 CPUs, logical delivery.
++ *
++ * Hacked for x86-64 by James Cleverdon from i386 architecture code by
++ * Martin Bligh, Andi Kleen, James Bottomley, John Stultz, and
++ * James Cleverdon.
++ *
++ * Hacked to pieces for Xen by Chris Wright.
++ */
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#include <linux/string.h>
++#include <linux/kernel.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++#include <asm/smp.h>
++#include <asm/ipi.h>
++#else
++#include <asm/apic.h>
++#include <asm/apicdef.h>
++#include <asm/genapic.h>
++#endif
++#include <xen/evtchn.h>
++
++DECLARE_PER_CPU(int, ipi_to_irq[NR_IPIS]);
++
++static inline void __send_IPI_one(unsigned int cpu, int vector)
++{
++ int irq = per_cpu(ipi_to_irq, cpu)[vector];
++ BUG_ON(irq < 0);
++ notify_remote_via_irq(irq);
++}
++
++void xen_send_IPI_shortcut(unsigned int shortcut, int vector, unsigned int dest)
++{
++ int cpu;
++
++ switch (shortcut) {
++ case APIC_DEST_SELF:
++ __send_IPI_one(smp_processor_id(), vector);
++ break;
++ case APIC_DEST_ALLBUT:
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu == smp_processor_id())
++ continue;
++ if (cpu_isset(cpu, cpu_online_map)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++ break;
++ case APIC_DEST_ALLINC:
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu_isset(cpu, cpu_online_map)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++ break;
++ default:
++ printk("XXXXXX __send_IPI_shortcut %08x vector %d\n", shortcut,
++ vector);
++ break;
++ }
++}
++
++static cpumask_t xen_target_cpus(void)
++{
++ return cpu_online_map;
++}
++
++/*
++ * Set up the logical destination ID.
++ * Do nothing, not called now.
++ */
++static void xen_init_apic_ldr(void)
++{
++ Dprintk("%s\n", __FUNCTION__);
++ return;
++}
++
++static void xen_send_IPI_allbutself(int vector)
++{
++ /*
++ * if there are no other CPUs in the system then
++ * we get an APIC send error if we try to broadcast.
++ * thus we have to avoid sending IPIs in this case.
++ */
++ Dprintk("%s\n", __FUNCTION__);
++ if (num_online_cpus() > 1)
++ xen_send_IPI_shortcut(APIC_DEST_ALLBUT, vector, APIC_DEST_LOGICAL);
++}
++
++static void xen_send_IPI_all(int vector)
++{
++ Dprintk("%s\n", __FUNCTION__);
++ xen_send_IPI_shortcut(APIC_DEST_ALLINC, vector, APIC_DEST_LOGICAL);
++}
++
++static void xen_send_IPI_mask(cpumask_t cpumask, int vector)
++{
++ unsigned long mask = cpus_addr(cpumask)[0];
++ unsigned int cpu;
++ unsigned long flags;
++
++ Dprintk("%s\n", __FUNCTION__);
++ local_irq_save(flags);
++ WARN_ON(mask & ~cpus_addr(cpu_online_map)[0]);
++
++ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
++ if (cpu_isset(cpu, cpumask)) {
++ __send_IPI_one(cpu, vector);
++ }
++ }
++ local_irq_restore(flags);
++}
++
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++static int xen_apic_id_registered(void)
++{
++ /* better be set */
++ Dprintk("%s\n", __FUNCTION__);
++ return physid_isset(smp_processor_id(), phys_cpu_present_map);
++}
++#endif
++
++static unsigned int xen_cpu_mask_to_apicid(cpumask_t cpumask)
++{
++ Dprintk("%s\n", __FUNCTION__);
++ return cpus_addr(cpumask)[0] & APIC_ALL_CPUS;
++}
++
++static unsigned int phys_pkg_id(int index_msb)
++{
++ u32 ebx;
++
++ Dprintk("%s\n", __FUNCTION__);
++ ebx = cpuid_ebx(1);
++ return ((ebx >> 24) & 0xFF) >> index_msb;
++}
++
++struct genapic apic_xen = {
++ .name = "xen",
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++ .int_delivery_mode = dest_LowestPrio,
++#endif
++ .int_dest_mode = (APIC_DEST_LOGICAL != 0),
++ .int_delivery_dest = APIC_DEST_LOGICAL | APIC_DM_LOWEST,
++ .target_cpus = xen_target_cpus,
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++ .apic_id_registered = xen_apic_id_registered,
++#endif
++ .init_apic_ldr = xen_init_apic_ldr,
++ .send_IPI_all = xen_send_IPI_all,
++ .send_IPI_allbutself = xen_send_IPI_allbutself,
++ .send_IPI_mask = xen_send_IPI_mask,
++ .cpu_mask_to_apicid = xen_cpu_mask_to_apicid,
++ .phys_pkg_id = phys_pkg_id,
++};
+Index: head-2008-11-25/arch/x86/kernel/head_64-xen.S
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/head_64-xen.S 2007-08-06 15:10:49.000000000 +0200
+@@ -0,0 +1,214 @@
++/*
++ * linux/arch/x86_64/kernel/head.S -- start in 32bit and switch to 64bit
++ *
++ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
++ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
++ * Copyright (C) 2000 Karsten Keil <kkeil@suse.de>
++ * Copyright (C) 2001,2002 Andi Kleen <ak@suse.de>
++ *
++ * $Id: head.S,v 1.49 2002/03/19 17:39:25 ak Exp $
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Modified for Xen
++ */
++
++
++#include <linux/linkage.h>
++#include <linux/threads.h>
++#include <linux/init.h>
++#include <linux/elfnote.h>
++#include <asm/desc.h>
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/msr.h>
++#include <asm/cache.h>
++#include <asm/dwarf2.h>
++#include <xen/interface/elfnote.h>
++
++ .section .bootstrap.text, "ax", @progbits
++ .code64
++ .globl startup_64
++startup_64:
++ movq $(init_thread_union+THREAD_SIZE-8),%rsp
++
++ /* rsi is pointer to startup info structure.
++ pass it to C */
++ movq %rsi,%rdi
++ pushq $0 # fake return address
++ jmp x86_64_start_kernel
++
++#ifdef CONFIG_ACPI_SLEEP
++.org 0xf00
++ .globl pGDT32
++pGDT32:
++ .word gdt_end-cpu_gdt_table-1
++ .long cpu_gdt_table-__START_KERNEL_map
++#endif
++ENTRY(stext)
++ENTRY(_stext)
++
++ $page = 0
++#define NEXT_PAGE(name) \
++ $page = $page + 1; \
++ .org $page * 0x1000; \
++ phys_##name = $page * 0x1000 + __PHYSICAL_START; \
++ENTRY(name)
++
++NEXT_PAGE(init_level4_pgt)
++ /* This gets initialized in x86_64_start_kernel */
++ .fill 512,8,0
++NEXT_PAGE(init_level4_user_pgt)
++ /*
++ * We update two pgd entries to make kernel and user pgd consistent
++ * at pgd_populate(). It can be used for kernel modules. So we place
++ * this page here for those cases to avoid memory corruption.
++ * We also use this page to establish the initial mapping for the
++ * vsyscall area.
++ */
++ .fill 512,8,0
++
++NEXT_PAGE(level3_kernel_pgt)
++ .fill 512,8,0
++
++ /*
++ * This is used for vsyscall area mapping as we have a different
++ * level4 page table for user.
++ */
++NEXT_PAGE(level3_user_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(level2_kernel_pgt)
++ .fill 512,8,0
++
++NEXT_PAGE(hypercall_page)
++ CFI_STARTPROC
++ .rept 0x1000 / 0x20
++ .skip 1 /* push %rcx */
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rcx,0
++ .skip 2 /* push %r11 */
++ CFI_ADJUST_CFA_OFFSET 8
++ CFI_REL_OFFSET rcx,0
++ .skip 5 /* mov $#,%eax */
++ .skip 2 /* syscall */
++ .skip 2 /* pop %r11 */
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_RESTORE r11
++ .skip 1 /* pop %rcx */
++ CFI_ADJUST_CFA_OFFSET -8
++ CFI_RESTORE rcx
++ .align 0x20,0 /* ret */
++ .endr
++ CFI_ENDPROC
++
++#undef NEXT_PAGE
++
++ .data
++/* Just dummy symbol to allow compilation. Not used in sleep path */
++#ifdef CONFIG_ACPI_SLEEP
++ .align PAGE_SIZE
++ENTRY(wakeup_level4_pgt)
++ .fill 512,8,0
++#endif
++
++ .data
++
++ .align 16
++ .globl cpu_gdt_descr
++cpu_gdt_descr:
++ .word gdt_end-cpu_gdt_table-1
++gdt:
++ .quad cpu_gdt_table
++#ifdef CONFIG_SMP
++ .rept NR_CPUS-1
++ .word 0
++ .quad 0
++ .endr
++#endif
++
++/* We need valid kernel segments for data and code in long mode too
++ * IRET will check the segment types kkeil 2000/10/28
++ * Also sysret mandates a special GDT layout
++ */
++
++ .section .data.page_aligned, "aw"
++ .align PAGE_SIZE
++
++/* The TLS descriptors are currently at a different place compared to i386.
++ Hopefully nobody expects them at a fixed place (Wine?) */
++
++ENTRY(cpu_gdt_table)
++ .quad 0x0000000000000000 /* NULL descriptor */
++ .quad 0x0 /* unused */
++ .quad 0x00af9a000000ffff /* __KERNEL_CS */
++ .quad 0x00cf92000000ffff /* __KERNEL_DS */
++ .quad 0x00cffa000000ffff /* __USER32_CS */
++ .quad 0x00cff2000000ffff /* __USER_DS, __USER32_DS */
++ .quad 0x00affa000000ffff /* __USER_CS */
++ .quad 0x00cf9a000000ffff /* __KERNEL32_CS */
++ .quad 0,0 /* TSS */
++ .quad 0,0 /* LDT */
++ .quad 0,0,0 /* three TLS descriptors */
++ .quad 0 /* unused */
++gdt_end:
++ /* asm/segment.h:GDT_ENTRIES must match this */
++ /* This should be a multiple of the cache line size */
++ /* GDTs of other CPUs are now dynamically allocated */
++
++ /* zero the remaining page */
++ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
++
++ .section .bss.page_aligned, "aw", @nobits
++ .align PAGE_SIZE
++ENTRY(empty_zero_page)
++ .skip PAGE_SIZE
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++/*
++ * __xen_guest information
++ */
++.macro utoh value
++ .if (\value) < 0 || (\value) >= 0x10
++ utoh (((\value)>>4)&0x0fffffffffffffff)
++ .endif
++ .if ((\value) & 0xf) < 10
++ .byte '0' + ((\value) & 0xf)
++ .else
++ .byte 'A' + ((\value) & 0xf) - 10
++ .endif
++.endm
++
++.section __xen_guest
++ .ascii "GUEST_OS=linux,GUEST_VER=2.6"
++ .ascii ",XEN_VER=xen-3.0"
++ .ascii ",VIRT_BASE=0x"
++ utoh __START_KERNEL_map
++ .ascii ",ELF_PADDR_OFFSET=0x"
++ utoh __START_KERNEL_map
++ .ascii ",VIRT_ENTRY=0x"
++ utoh (__START_KERNEL_map + __PHYSICAL_START)
++ .ascii ",HYPERCALL_PAGE=0x"
++ utoh (phys_hypercall_page >> PAGE_SHIFT)
++ .ascii ",FEATURES=writable_page_tables"
++ .ascii "|writable_descriptor_tables"
++ .ascii "|auto_translated_physmap"
++ .ascii "|supervisor_mode_kernel"
++ .ascii ",LOADER=generic"
++ .byte 0
++#endif /* CONFIG_XEN_COMPAT <= 0x030002 */
++
++ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz, "linux")
++ ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz, "2.6")
++ ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz, "xen-3.0")
++ ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, .quad, __START_KERNEL_map)
++#if CONFIG_XEN_COMPAT <= 0x030002
++ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .quad, __START_KERNEL_map)
++#else
++ ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, .quad, 0)
++#endif
++ ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, .quad, startup_64)
++ ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, .quad, hypercall_page)
++ ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, .quad, _PAGE_PRESENT,_PAGE_PRESENT)
++ ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .asciz, "writable_page_tables|writable_descriptor_tables|auto_translated_physmap|pae_pgdir_above_4gb|supervisor_mode_kernel")
++ ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz, "generic")
++ ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long, 1)
+Index: head-2008-11-25/arch/x86/kernel/head64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/head64-xen.c 2007-06-12 13:13:01.000000000 +0200
+@@ -0,0 +1,162 @@
++/*
++ * linux/arch/x86_64/kernel/head64.c -- prepare to run common code
++ *
++ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Modified for Xen.
++ */
++
++#include <linux/init.h>
++#include <linux/linkage.h>
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/percpu.h>
++#include <linux/module.h>
++
++#include <asm/processor.h>
++#include <asm/proto.h>
++#include <asm/smp.h>
++#include <asm/bootsetup.h>
++#include <asm/setup.h>
++#include <asm/desc.h>
++#include <asm/pgtable.h>
++#include <asm/sections.h>
++
++unsigned long start_pfn;
++
++/* Don't add a printk in there. printk relies on the PDA which is not initialized
++ yet. */
++#if 0
++static void __init clear_bss(void)
++{
++ memset(__bss_start, 0,
++ (unsigned long) __bss_stop - (unsigned long) __bss_start);
++}
++#endif
++
++#define NEW_CL_POINTER 0x228 /* Relative to real mode data */
++#define OLD_CL_MAGIC_ADDR 0x90020
++#define OLD_CL_MAGIC 0xA33F
++#define OLD_CL_BASE_ADDR 0x90000
++#define OLD_CL_OFFSET 0x90022
++
++extern char saved_command_line[];
++
++static void __init copy_bootdata(char *real_mode_data)
++{
++#ifndef CONFIG_XEN
++ int new_data;
++ char * command_line;
++
++ memcpy(x86_boot_params, real_mode_data, BOOT_PARAM_SIZE);
++ new_data = *(int *) (x86_boot_params + NEW_CL_POINTER);
++ if (!new_data) {
++ if (OLD_CL_MAGIC != * (u16 *) OLD_CL_MAGIC_ADDR) {
++ printk("so old bootloader that it does not support commandline?!\n");
++ return;
++ }
++ new_data = OLD_CL_BASE_ADDR + * (u16 *) OLD_CL_OFFSET;
++ printk("old bootloader convention, maybe loadlin?\n");
++ }
++ command_line = (char *) ((u64)(new_data));
++ memcpy(saved_command_line, command_line, COMMAND_LINE_SIZE);
++#else
++ int max_cmdline;
++
++ if ((max_cmdline = MAX_GUEST_CMDLINE) > COMMAND_LINE_SIZE)
++ max_cmdline = COMMAND_LINE_SIZE;
++ memcpy(saved_command_line, xen_start_info->cmd_line, max_cmdline);
++ saved_command_line[max_cmdline-1] = '\0';
++#endif
++ printk("Bootdata ok (command line is %s)\n", saved_command_line);
++}
++
++static void __init setup_boot_cpu_data(void)
++{
++ unsigned int dummy, eax;
++
++ /* get vendor info */
++ cpuid(0, (unsigned int *)&boot_cpu_data.cpuid_level,
++ (unsigned int *)&boot_cpu_data.x86_vendor_id[0],
++ (unsigned int *)&boot_cpu_data.x86_vendor_id[8],
++ (unsigned int *)&boot_cpu_data.x86_vendor_id[4]);
++
++ /* get cpu type */
++ cpuid(1, &eax, &dummy, &dummy,
++ (unsigned int *) &boot_cpu_data.x86_capability);
++ boot_cpu_data.x86 = (eax >> 8) & 0xf;
++ boot_cpu_data.x86_model = (eax >> 4) & 0xf;
++ boot_cpu_data.x86_mask = eax & 0xf;
++}
++
++#include <xen/interface/memory.h>
++unsigned long *machine_to_phys_mapping;
++EXPORT_SYMBOL(machine_to_phys_mapping);
++unsigned int machine_to_phys_order;
++EXPORT_SYMBOL(machine_to_phys_order);
++
++void __init x86_64_start_kernel(char * real_mode_data)
++{
++ struct xen_machphys_mapping mapping;
++ unsigned long machine_to_phys_nr_ents;
++ char *s;
++ int i;
++
++ setup_xen_features();
++
++ xen_start_info = (struct start_info *)real_mode_data;
++ if (!xen_feature(XENFEAT_auto_translated_physmap))
++ phys_to_machine_mapping =
++ (unsigned long *)xen_start_info->mfn_list;
++ start_pfn = (__pa(xen_start_info->pt_base) >> PAGE_SHIFT) +
++ xen_start_info->nr_pt_frames;
++
++ machine_to_phys_mapping = (unsigned long *)MACH2PHYS_VIRT_START;
++ machine_to_phys_nr_ents = MACH2PHYS_NR_ENTRIES;
++ if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) {
++ machine_to_phys_mapping = (unsigned long *)mapping.v_start;
++ machine_to_phys_nr_ents = mapping.max_mfn + 1;
++ }
++ while ((1UL << machine_to_phys_order) < machine_to_phys_nr_ents )
++ machine_to_phys_order++;
++
++#if 0
++ for (i = 0; i < 256; i++)
++ set_intr_gate(i, early_idt_handler);
++ asm volatile("lidt %0" :: "m" (idt_descr));
++#endif
++
++ /*
++ * This must be called really, really early:
++ */
++ lockdep_init();
++
++ for (i = 0; i < NR_CPUS; i++)
++ cpu_pda(i) = &boot_cpu_pda[i];
++
++ pda_init(0);
++ copy_bootdata(real_mode_data);
++#ifdef CONFIG_SMP
++ cpu_set(0, cpu_online_map);
++#endif
++ s = strstr(saved_command_line, "earlyprintk=");
++ if (s != NULL)
++ setup_early_printk(strchr(s, '=') + 1);
++#ifdef CONFIG_NUMA
++ s = strstr(saved_command_line, "numa=");
++ if (s != NULL)
++ numa_setup(s+5);
++#endif
++#ifdef CONFIG_X86_IO_APIC
++ if (strstr(saved_command_line, "disableapic"))
++ disable_apic = 1;
++#endif
++ /* You need early console to see that */
++ if (__pa_symbol(&_end) >= KERNEL_TEXT_SIZE)
++ panic("Kernel too big for kernel mapping\n");
++
++ setup_boot_cpu_data();
++ start_kernel();
++}
+Index: head-2008-11-25/arch/x86/kernel/io_apic_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/io_apic_64-xen.c 2008-11-25 12:22:34.000000000 +0100
+@@ -0,0 +1,2268 @@
++/*
++ * Intel IO-APIC support for multi-Pentium hosts.
++ *
++ * Copyright (C) 1997, 1998, 1999, 2000 Ingo Molnar, Hajnalka Szabo
++ *
++ * Many thanks to Stig Venaas for trying out countless experimental
++ * patches and reporting/debugging problems patiently!
++ *
++ * (c) 1999, Multiple IO-APIC support, developed by
++ * Ken-ichi Yaku <yaku@css1.kbnes.nec.co.jp> and
++ * Hidemi Kishimoto <kisimoto@css1.kbnes.nec.co.jp>,
++ * further tested and cleaned up by Zach Brown <zab@redhat.com>
++ * and Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Maciej W. Rozycki : Bits for genuine 82489DX APICs;
++ * thanks to Eric Gilmore
++ * and Rolf G. Tews
++ * for testing these extensively
++ * Paul Diefenbaugh : Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/sched.h>
++#include <linux/smp_lock.h>
++#include <linux/mc146818rtc.h>
++#include <linux/acpi.h>
++#include <linux/sysdev.h>
++#ifdef CONFIG_ACPI
++#include <acpi/acpi_bus.h>
++#endif
++
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/desc.h>
++#include <asm/proto.h>
++#include <asm/mach_apic.h>
++#include <asm/acpi.h>
++#include <asm/dma.h>
++#include <asm/nmi.h>
++
++#define __apicdebuginit __init
++
++int sis_apic_bug; /* not actually supported, dummy for compile */
++
++static int no_timer_check;
++
++int disable_timer_pin_1 __initdata;
++
++#ifndef CONFIG_XEN
++int timer_over_8254 __initdata = 0;
++
++/* Where if anywhere is the i8259 connect in external int mode */
++static struct { int pin, apic; } ioapic_i8259 = { -1, -1 };
++#endif
++
++static DEFINE_SPINLOCK(ioapic_lock);
++static DEFINE_SPINLOCK(vector_lock);
++
++/*
++ * # of IRQ routing registers
++ */
++int nr_ioapic_registers[MAX_IO_APICS];
++
++/*
++ * Rough estimation of how many shared IRQs there are, can
++ * be changed anytime.
++ */
++#define MAX_PLUS_SHARED_IRQS NR_IRQ_VECTORS
++#define PIN_MAP_SIZE (MAX_PLUS_SHARED_IRQS + NR_IRQS)
++
++/*
++ * This is performance-critical, we want to do it O(1)
++ *
++ * the indexing order of this array favors 1:1 mappings
++ * between pins and IRQs.
++ */
++
++static struct irq_pin_list {
++ short apic, pin, next;
++} irq_2_pin[PIN_MAP_SIZE];
++
++int vector_irq[NR_VECTORS] __read_mostly = { [0 ... NR_VECTORS - 1] = -1};
++#ifdef CONFIG_PCI_MSI
++#define vector_to_irq(vector) \
++ (platform_legacy_irq(vector) ? vector : vector_irq[vector])
++#else
++#define vector_to_irq(vector) (vector)
++#endif
++
++#ifdef CONFIG_XEN
++
++#include <xen/interface/xen.h>
++#include <xen/interface/physdev.h>
++#include <xen/evtchn.h>
++
++/* Fake i8259 */
++#define make_8259A_irq(_irq) (io_apic_irqs &= ~(1UL<<(_irq)))
++#define disable_8259A_irq(_irq) ((void)0)
++#define i8259A_irq_pending(_irq) (0)
++
++unsigned long io_apic_irqs;
++
++static inline unsigned int xen_io_apic_read(unsigned int apic, unsigned int reg)
++{
++ struct physdev_apic apic_op;
++ int ret;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op);
++ if (ret)
++ return ret;
++ return apic_op.value;
++}
++
++static inline void xen_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
++{
++ struct physdev_apic apic_op;
++
++ apic_op.apic_physbase = mp_ioapics[apic].mpc_apicaddr;
++ apic_op.reg = reg;
++ apic_op.value = value;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_apic_write, &apic_op));
++}
++
++#define io_apic_read(a,r) xen_io_apic_read(a,r)
++#define io_apic_write(a,r,v) xen_io_apic_write(a,r,v)
++
++#define clear_IO_APIC() ((void)0)
++
++#else
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_irq(unsigned int irq, cpumask_t mask)
++{
++ unsigned long flags;
++ unsigned int dest;
++ cpumask_t tmp;
++
++ cpus_and(tmp, mask, cpu_online_map);
++ if (cpus_empty(tmp))
++ tmp = TARGET_CPUS;
++
++ cpus_and(mask, tmp, CPU_MASK_ALL);
++
++ dest = cpu_mask_to_apicid(mask);
++
++ /*
++ * Only the high 8 bits are valid.
++ */
++ dest = SET_APIC_LOGICAL_ID(dest);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __DO_ACTION(1, = dest, )
++ set_irq_info(irq, mask);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++#endif
++
++#endif /* !CONFIG_XEN */
++
++/*
++ * The common case is 1:1 IRQ<->pin mappings. Sometimes there are
++ * shared ISA-space IRQs, so we have to support them. We are super
++ * fast in the common case, and fast for shared ISA-space IRQs.
++ */
++static void add_pin_to_irq(unsigned int irq, int apic, int pin)
++{
++ static int first_free_entry = NR_IRQS;
++ struct irq_pin_list *entry = irq_2_pin + irq;
++
++ BUG_ON(irq >= NR_IRQS);
++ while (entry->next)
++ entry = irq_2_pin + entry->next;
++
++ if (entry->pin != -1) {
++ entry->next = first_free_entry;
++ entry = irq_2_pin + entry->next;
++ if (++first_free_entry >= PIN_MAP_SIZE)
++ panic("io_apic.c: ran out of irq_2_pin entries!");
++ }
++ entry->apic = apic;
++ entry->pin = pin;
++}
++
++#ifndef CONFIG_XEN
++#define __DO_ACTION(R, ACTION, FINAL) \
++ \
++{ \
++ int pin; \
++ struct irq_pin_list *entry = irq_2_pin + irq; \
++ \
++ BUG_ON(irq >= NR_IRQS); \
++ for (;;) { \
++ unsigned int reg; \
++ pin = entry->pin; \
++ if (pin == -1) \
++ break; \
++ reg = io_apic_read(entry->apic, 0x10 + R + pin*2); \
++ reg ACTION; \
++ io_apic_modify(entry->apic, reg); \
++ if (!entry->next) \
++ break; \
++ entry = irq_2_pin + entry->next; \
++ } \
++ FINAL; \
++}
++
++#define DO_ACTION(name,R,ACTION, FINAL) \
++ \
++ static void name##_IO_APIC_irq (unsigned int irq) \
++ __DO_ACTION(R, ACTION, FINAL)
++
++DO_ACTION( __mask, 0, |= 0x00010000, io_apic_sync(entry->apic) )
++ /* mask = 1 */
++DO_ACTION( __unmask, 0, &= 0xfffeffff, )
++ /* mask = 0 */
++
++static void mask_IO_APIC_irq (unsigned int irq)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __mask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void unmask_IO_APIC_irq (unsigned int irq)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ __unmask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ /* Check delivery_mode to be sure we're not clearing an SMI pin */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int*)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ *(((int*)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ if (entry.delivery_mode == dest_SMI)
++ return;
++ /*
++ * Disable it in the IO-APIC irq-routing table:
++ */
++ memset(&entry, 0, sizeof(entry));
++ entry.mask = 1;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry) + 0));
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry) + 1));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++static void clear_IO_APIC (void)
++{
++ int apic, pin;
++
++ for (apic = 0; apic < nr_ioapics; apic++)
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++)
++ clear_IO_APIC_pin(apic, pin);
++}
++
++#endif /* !CONFIG_XEN */
++
++static u8 gsi_2_irq[NR_IRQ_VECTORS] = { [0 ... NR_IRQ_VECTORS-1] = 0xFF };
++
++/*
++ * support for broken MP BIOSs, enables hand-redirection of PIRQ0-7 to
++ * specific CPU-side IRQs.
++ */
++
++#define MAX_PIRQS 8
++static int pirq_entries [MAX_PIRQS];
++static int pirqs_enabled;
++int skip_ioapic_setup;
++int ioapic_force;
++
++/* dummy parsing: see setup.c */
++
++static int __init disable_ioapic_setup(char *str)
++{
++ skip_ioapic_setup = 1;
++ return 1;
++}
++
++static int __init enable_ioapic_setup(char *str)
++{
++ ioapic_force = 1;
++ skip_ioapic_setup = 0;
++ return 1;
++}
++
++__setup("noapic", disable_ioapic_setup);
++__setup("apic", enable_ioapic_setup);
++
++#ifndef CONFIG_XEN
++static int __init setup_disable_8254_timer(char *s)
++{
++ timer_over_8254 = -1;
++ return 1;
++}
++static int __init setup_enable_8254_timer(char *s)
++{
++ timer_over_8254 = 2;
++ return 1;
++}
++
++__setup("disable_8254_timer", setup_disable_8254_timer);
++__setup("enable_8254_timer", setup_enable_8254_timer);
++#endif /* !CONFIG_XEN */
++
++#include <asm/pci-direct.h>
++#include <linux/pci_ids.h>
++#include <linux/pci.h>
++
++
++#ifdef CONFIG_ACPI
++
++static int nvidia_hpet_detected __initdata;
++
++static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
++{
++ nvidia_hpet_detected = 1;
++ return 0;
++}
++#endif
++
++/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
++ off. Check for an Nvidia or VIA PCI bridge and turn it off.
++ Use pci direct infrastructure because this runs before the PCI subsystem.
++
++ Can be overwritten with "apic"
++
++ And another hack to disable the IOMMU on VIA chipsets.
++
++ ... and others. Really should move this somewhere else.
++
++ Kludge-O-Rama. */
++void __init check_ioapic(void)
++{
++ int num,slot,func;
++ /* Poor man's PCI discovery */
++ for (num = 0; num < 32; num++) {
++ for (slot = 0; slot < 32; slot++) {
++ for (func = 0; func < 8; func++) {
++ u32 class;
++ u32 vendor;
++ u8 type;
++ class = read_pci_config(num,slot,func,
++ PCI_CLASS_REVISION);
++ if (class == 0xffffffff)
++ break;
++
++ if ((class >> 16) != PCI_CLASS_BRIDGE_PCI)
++ continue;
++
++ vendor = read_pci_config(num, slot, func,
++ PCI_VENDOR_ID);
++ vendor &= 0xffff;
++ switch (vendor) {
++ case PCI_VENDOR_ID_VIA:
++#ifdef CONFIG_IOMMU
++ if ((end_pfn > MAX_DMA32_PFN ||
++ force_iommu) &&
++ !iommu_aperture_allowed) {
++ printk(KERN_INFO
++ "Looks like a VIA chipset. Disabling IOMMU. Override with \"iommu=allowed\"\n");
++ iommu_aperture_disabled = 1;
++ }
++#endif
++ return;
++ case PCI_VENDOR_ID_NVIDIA:
++#ifdef CONFIG_ACPI
++ /*
++ * All timer overrides on Nvidia are
++ * wrong unless HPET is enabled.
++ */
++ nvidia_hpet_detected = 0;
++ acpi_table_parse(ACPI_HPET,
++ nvidia_hpet_check);
++ if (nvidia_hpet_detected == 0) {
++ acpi_skip_timer_override = 1;
++ printk(KERN_INFO "Nvidia board "
++ "detected. Ignoring ACPI "
++ "timer override.\n");
++ }
++#endif
++ /* RED-PEN skip them on mptables too? */
++ return;
++ case PCI_VENDOR_ID_ATI:
++
++ /* This should be actually default, but
++ for 2.6.16 let's do it for ATI only where
++ it's really needed. */
++#ifndef CONFIG_XEN
++ if (timer_over_8254 == 1) {
++ timer_over_8254 = 0;
++ printk(KERN_INFO
++ "ATI board detected. Disabling timer routing over 8254.\n");
++ }
++#endif
++ return;
++ }
++
++
++ /* No multi-function device? */
++ type = read_pci_config_byte(num,slot,func,
++ PCI_HEADER_TYPE);
++ if (!(type & 0x80))
++ break;
++ }
++ }
++ }
++}
++
++static int __init ioapic_pirq_setup(char *str)
++{
++ int i, max;
++ int ints[MAX_PIRQS+1];
++
++ get_options(str, ARRAY_SIZE(ints), ints);
++
++ for (i = 0; i < MAX_PIRQS; i++)
++ pirq_entries[i] = -1;
++
++ pirqs_enabled = 1;
++ apic_printk(APIC_VERBOSE, "PIRQ redirection, working around broken MP-BIOS.\n");
++ max = MAX_PIRQS;
++ if (ints[0] < MAX_PIRQS)
++ max = ints[0];
++
++ for (i = 0; i < max; i++) {
++ apic_printk(APIC_VERBOSE, "... PIRQ%d -> IRQ %d\n", i, ints[i+1]);
++ /*
++ * PIRQs are mapped upside down, usually.
++ */
++ pirq_entries[MAX_PIRQS-i-1] = ints[i+1];
++ }
++ return 1;
++}
++
++__setup("pirq=", ioapic_pirq_setup);
++
++/*
++ * Find the IRQ entry number of a certain pin.
++ */
++static int find_irq_entry(int apic, int pin, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++)
++ if (mp_irqs[i].mpc_irqtype == type &&
++ (mp_irqs[i].mpc_dstapic == mp_ioapics[apic].mpc_apicid ||
++ mp_irqs[i].mpc_dstapic == MP_APIC_ALL) &&
++ mp_irqs[i].mpc_dstirq == pin)
++ return i;
++
++ return -1;
++}
++
++#ifndef CONFIG_XEN
++/*
++ * Find the pin to which IRQ[irq] (ISA) is connected
++ */
++static int __init find_isa_irq_pin(int irq, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
++ (mp_irqs[i].mpc_irqtype == type) &&
++ (mp_irqs[i].mpc_srcbusirq == irq))
++
++ return mp_irqs[i].mpc_dstirq;
++ }
++ return -1;
++}
++
++static int __init find_isa_irq_apic(int irq, int type)
++{
++ int i;
++
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_ISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_EISA ||
++ mp_bus_id_to_type[lbus] == MP_BUS_MCA) &&
++ (mp_irqs[i].mpc_irqtype == type) &&
++ (mp_irqs[i].mpc_srcbusirq == irq))
++ break;
++ }
++ if (i < mp_irq_entries) {
++ int apic;
++ for(apic = 0; apic < nr_ioapics; apic++) {
++ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic)
++ return apic;
++ }
++ }
++
++ return -1;
++}
++#endif
++
++/*
++ * Find a specific PCI IRQ entry.
++ * Not an __init, possibly needed by modules
++ */
++static int pin_2_irq(int idx, int apic, int pin);
++
++int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin)
++{
++ int apic, i, best_guess = -1;
++
++ apic_printk(APIC_DEBUG, "querying PCI -> IRQ mapping bus:%d, slot:%d, pin:%d.\n",
++ bus, slot, pin);
++ if (mp_bus_id_to_pci_bus[bus] == -1) {
++ apic_printk(APIC_VERBOSE, "PCI BIOS passed nonexistent PCI bus %d!\n", bus);
++ return -1;
++ }
++ for (i = 0; i < mp_irq_entries; i++) {
++ int lbus = mp_irqs[i].mpc_srcbus;
++
++ for (apic = 0; apic < nr_ioapics; apic++)
++ if (mp_ioapics[apic].mpc_apicid == mp_irqs[i].mpc_dstapic ||
++ mp_irqs[i].mpc_dstapic == MP_APIC_ALL)
++ break;
++
++ if ((mp_bus_id_to_type[lbus] == MP_BUS_PCI) &&
++ !mp_irqs[i].mpc_irqtype &&
++ (bus == lbus) &&
++ (slot == ((mp_irqs[i].mpc_srcbusirq >> 2) & 0x1f))) {
++ int irq = pin_2_irq(i,apic,mp_irqs[i].mpc_dstirq);
++
++ if (!(apic || IO_APIC_IRQ(irq)))
++ continue;
++
++ if (pin == (mp_irqs[i].mpc_srcbusirq & 3))
++ return irq;
++ /*
++ * Use the first all-but-pin matching entry as a
++ * best-guess fuzzy result for broken mptables.
++ */
++ if (best_guess < 0)
++ best_guess = irq;
++ }
++ }
++ BUG_ON(best_guess >= NR_IRQS);
++ return best_guess;
++}
++
++/*
++ * EISA Edge/Level control register, ELCR
++ */
++static int EISA_ELCR(unsigned int irq)
++{
++ if (irq < 16) {
++ unsigned int port = 0x4d0 + (irq >> 3);
++ return (inb(port) >> (irq & 7)) & 1;
++ }
++ apic_printk(APIC_VERBOSE, "Broken MPtable reports ISA irq %d\n", irq);
++ return 0;
++}
++
++/* EISA interrupts are always polarity zero and can be edge or level
++ * trigger depending on the ELCR value. If an interrupt is listed as
++ * EISA conforming in the MP table, that means its trigger type must
++ * be read in from the ELCR */
++
++#define default_EISA_trigger(idx) (EISA_ELCR(mp_irqs[idx].mpc_srcbusirq))
++#define default_EISA_polarity(idx) (0)
++
++/* ISA interrupts are always polarity zero edge triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_ISA_trigger(idx) (0)
++#define default_ISA_polarity(idx) (0)
++
++/* PCI interrupts are always polarity one level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_PCI_trigger(idx) (1)
++#define default_PCI_polarity(idx) (1)
++
++/* MCA interrupts are always polarity zero level triggered,
++ * when listed as conforming in the MP table. */
++
++#define default_MCA_trigger(idx) (1)
++#define default_MCA_polarity(idx) (0)
++
++static int __init MPBIOS_polarity(int idx)
++{
++ int bus = mp_irqs[idx].mpc_srcbus;
++ int polarity;
++
++ /*
++ * Determine IRQ line polarity (high active or low active):
++ */
++ switch (mp_irqs[idx].mpc_irqflag & 3)
++ {
++ case 0: /* conforms, ie. bus-type dependent polarity */
++ {
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ {
++ polarity = default_ISA_polarity(idx);
++ break;
++ }
++ case MP_BUS_EISA: /* EISA pin */
++ {
++ polarity = default_EISA_polarity(idx);
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ polarity = default_PCI_polarity(idx);
++ break;
++ }
++ case MP_BUS_MCA: /* MCA pin */
++ {
++ polarity = default_MCA_polarity(idx);
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ }
++ break;
++ }
++ case 1: /* high active */
++ {
++ polarity = 0;
++ break;
++ }
++ case 2: /* reserved */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ case 3: /* low active */
++ {
++ polarity = 1;
++ break;
++ }
++ default: /* invalid */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ polarity = 1;
++ break;
++ }
++ }
++ return polarity;
++}
++
++static int MPBIOS_trigger(int idx)
++{
++ int bus = mp_irqs[idx].mpc_srcbus;
++ int trigger;
++
++ /*
++ * Determine IRQ trigger mode (edge or level sensitive):
++ */
++ switch ((mp_irqs[idx].mpc_irqflag>>2) & 3)
++ {
++ case 0: /* conforms, ie. bus-type dependent */
++ {
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ {
++ trigger = default_ISA_trigger(idx);
++ break;
++ }
++ case MP_BUS_EISA: /* EISA pin */
++ {
++ trigger = default_EISA_trigger(idx);
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ trigger = default_PCI_trigger(idx);
++ break;
++ }
++ case MP_BUS_MCA: /* MCA pin */
++ {
++ trigger = default_MCA_trigger(idx);
++ break;
++ }
++ default:
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 1;
++ break;
++ }
++ }
++ break;
++ }
++ case 1: /* edge */
++ {
++ trigger = 0;
++ break;
++ }
++ case 2: /* reserved */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 1;
++ break;
++ }
++ case 3: /* level */
++ {
++ trigger = 1;
++ break;
++ }
++ default: /* invalid */
++ {
++ printk(KERN_WARNING "broken BIOS!!\n");
++ trigger = 0;
++ break;
++ }
++ }
++ return trigger;
++}
++
++static inline int irq_polarity(int idx)
++{
++ return MPBIOS_polarity(idx);
++}
++
++static inline int irq_trigger(int idx)
++{
++ return MPBIOS_trigger(idx);
++}
++
++static int next_irq = 16;
++
++/*
++ * gsi_irq_sharing -- Name overload! "irq" can be either a legacy IRQ
++ * in the range 0-15, a linux IRQ in the range 0-223, or a GSI number
++ * from ACPI, which can reach 800 in large boxen.
++ *
++ * Compact the sparse GSI space into a sequential IRQ series and reuse
++ * vectors if possible.
++ */
++int gsi_irq_sharing(int gsi)
++{
++ int i, tries, vector;
++
++ BUG_ON(gsi >= NR_IRQ_VECTORS);
++
++ if (platform_legacy_irq(gsi))
++ return gsi;
++
++ if (gsi_2_irq[gsi] != 0xFF)
++ return (int)gsi_2_irq[gsi];
++
++ tries = NR_IRQS;
++ try_again:
++ vector = assign_irq_vector(gsi);
++
++ /*
++ * Sharing vectors means sharing IRQs, so scan irq_vectors for previous
++ * use of vector and if found, return that IRQ. However, we never want
++ * to share legacy IRQs, which usually have a different trigger mode
++ * than PCI.
++ */
++ for (i = 0; i < NR_IRQS; i++)
++ if (IO_APIC_VECTOR(i) == vector)
++ break;
++ if (platform_legacy_irq(i)) {
++ if (--tries >= 0) {
++ IO_APIC_VECTOR(i) = 0;
++ goto try_again;
++ }
++ panic("gsi_irq_sharing: didn't find an IRQ using vector 0x%02X for GSI %d", vector, gsi);
++ }
++ if (i < NR_IRQS) {
++ gsi_2_irq[gsi] = i;
++ printk(KERN_INFO "GSI %d sharing vector 0x%02X and IRQ %d\n",
++ gsi, vector, i);
++ return i;
++ }
++
++ i = next_irq++;
++ BUG_ON(i >= NR_IRQS);
++ gsi_2_irq[gsi] = i;
++ IO_APIC_VECTOR(i) = vector;
++ printk(KERN_INFO "GSI %d assigned vector 0x%02X and IRQ %d\n",
++ gsi, vector, i);
++ return i;
++}
++
++static int pin_2_irq(int idx, int apic, int pin)
++{
++ int irq, i;
++ int bus = mp_irqs[idx].mpc_srcbus;
++
++ /*
++ * Debugging check, we are in big trouble if this message pops up!
++ */
++ if (mp_irqs[idx].mpc_dstirq != pin)
++ printk(KERN_ERR "broken BIOS or MPTABLE parser, ayiee!!\n");
++
++ switch (mp_bus_id_to_type[bus])
++ {
++ case MP_BUS_ISA: /* ISA pin */
++ case MP_BUS_EISA:
++ case MP_BUS_MCA:
++ {
++ irq = mp_irqs[idx].mpc_srcbusirq;
++ break;
++ }
++ case MP_BUS_PCI: /* PCI pin */
++ {
++ /*
++ * PCI IRQs are mapped in order
++ */
++ i = irq = 0;
++ while (i < apic)
++ irq += nr_ioapic_registers[i++];
++ irq += pin;
++ irq = gsi_irq_sharing(irq);
++ break;
++ }
++ default:
++ {
++ printk(KERN_ERR "unknown bus type %d.\n",bus);
++ irq = 0;
++ break;
++ }
++ }
++ BUG_ON(irq >= NR_IRQS);
++
++ /*
++ * PCI IRQ command line redirection. Yes, limits are hardcoded.
++ */
++ if ((pin >= 16) && (pin <= 23)) {
++ if (pirq_entries[pin-16] != -1) {
++ if (!pirq_entries[pin-16]) {
++ apic_printk(APIC_VERBOSE, "disabling PIRQ%d\n", pin-16);
++ } else {
++ irq = pirq_entries[pin-16];
++ apic_printk(APIC_VERBOSE, "using PIRQ%d -> IRQ %d\n",
++ pin-16, irq);
++ }
++ }
++ }
++ BUG_ON(irq >= NR_IRQS);
++ return irq;
++}
++
++static inline int IO_APIC_irq_trigger(int irq)
++{
++ int apic, idx, pin;
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ idx = find_irq_entry(apic,pin,mp_INT);
++ if ((idx != -1) && (irq == pin_2_irq(idx,apic,pin)))
++ return irq_trigger(idx);
++ }
++ }
++ /*
++ * nonexistent IRQs are edge default
++ */
++ return 0;
++}
++
++/* irq_vectors is indexed by the sum of all RTEs in all I/O APICs. */
++u8 irq_vector[NR_IRQ_VECTORS] __read_mostly;
++
++int assign_irq_vector(int irq)
++{
++ unsigned long flags;
++ int vector;
++ struct physdev_irq irq_op;
++
++ BUG_ON(irq != AUTO_ASSIGN && (unsigned)irq >= NR_IRQ_VECTORS);
++
++ if (irq < PIRQ_BASE || irq - PIRQ_BASE > NR_PIRQS)
++ return -EINVAL;
++
++ spin_lock_irqsave(&vector_lock, flags);
++
++ if (irq != AUTO_ASSIGN && IO_APIC_VECTOR(irq) > 0) {
++ spin_unlock_irqrestore(&vector_lock, flags);
++ return IO_APIC_VECTOR(irq);
++ }
++
++ irq_op.irq = irq;
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
++ spin_unlock_irqrestore(&vector_lock, flags);
++ return -ENOSPC;
++ }
++
++ vector = irq_op.vector;
++ vector_irq[vector] = irq;
++ if (irq != AUTO_ASSIGN)
++ IO_APIC_VECTOR(irq) = vector;
++
++ spin_unlock_irqrestore(&vector_lock, flags);
++
++ return vector;
++}
++
++extern void (*interrupt[NR_IRQS])(void);
++#ifndef CONFIG_XEN
++static struct hw_interrupt_type ioapic_level_type;
++static struct hw_interrupt_type ioapic_edge_type;
++
++#define IOAPIC_AUTO -1
++#define IOAPIC_EDGE 0
++#define IOAPIC_LEVEL 1
++
++static void ioapic_register_intr(int irq, int vector, unsigned long trigger)
++{
++ unsigned idx;
++
++ idx = use_pci_vector() && !platform_legacy_irq(irq) ? vector : irq;
++
++ if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
++ trigger == IOAPIC_LEVEL)
++ irq_desc[idx].chip = &ioapic_level_type;
++ else
++ irq_desc[idx].chip = &ioapic_edge_type;
++ set_intr_gate(vector, interrupt[idx]);
++}
++#else
++#define ioapic_register_intr(irq, vector, trigger) evtchn_register_pirq(irq)
++#endif /* !CONFIG_XEN */
++
++static void __init setup_IO_APIC_irqs(void)
++{
++ struct IO_APIC_route_entry entry;
++ int apic, pin, idx, irq, first_notcon = 1, vector;
++ unsigned long flags;
++
++ apic_printk(APIC_VERBOSE, KERN_DEBUG "init IO_APIC IRQs\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++
++ /*
++ * add it to the IO-APIC irq-routing table:
++ */
++ memset(&entry,0,sizeof(entry));
++
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* enable IRQ */
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++
++ idx = find_irq_entry(apic,pin,mp_INT);
++ if (idx == -1) {
++ if (first_notcon) {
++ apic_printk(APIC_VERBOSE, KERN_DEBUG " IO-APIC (apicid-pin) %d-%d", mp_ioapics[apic].mpc_apicid, pin);
++ first_notcon = 0;
++ } else
++ apic_printk(APIC_VERBOSE, ", %d-%d", mp_ioapics[apic].mpc_apicid, pin);
++ continue;
++ }
++
++ entry.trigger = irq_trigger(idx);
++ entry.polarity = irq_polarity(idx);
++
++ if (irq_trigger(idx)) {
++ entry.trigger = 1;
++ entry.mask = 1;
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ }
++
++ irq = pin_2_irq(idx, apic, pin);
++ add_pin_to_irq(irq, apic, pin);
++
++ if (/* !apic && */ !IO_APIC_IRQ(irq))
++ continue;
++
++ if (IO_APIC_IRQ(irq)) {
++ vector = assign_irq_vector(irq);
++ entry.vector = vector;
++
++ ioapic_register_intr(irq, vector, IOAPIC_AUTO);
++ if (!apic && (irq < 16))
++ disable_8259A_irq(irq);
++ }
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++ set_native_irq_info(irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ }
++ }
++
++ if (!first_notcon)
++ apic_printk(APIC_VERBOSE," not connected.\n");
++}
++
++#ifndef CONFIG_XEN
++/*
++ * Set up the 8259A-master output pin as broadcast to all
++ * CPUs.
++ */
++static void __init setup_ExtINT_IRQ0_pin(unsigned int apic, unsigned int pin, int vector)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ memset(&entry,0,sizeof(entry));
++
++ disable_8259A_irq(0);
++
++ /* mask LVT0 */
++ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++
++ /*
++ * We use logical delivery to get the timer IRQ
++ * to the first CPU.
++ */
++ entry.dest_mode = INT_DEST_MODE;
++ entry.mask = 0; /* unmask IRQ now */
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.polarity = 0;
++ entry.trigger = 0;
++ entry.vector = vector;
++
++ /*
++ * The timer IRQ doesn't have to know that behind the
++ * scene we have a 8259A-master in AEOI mode ...
++ */
++ irq_desc[0].chip = &ioapic_edge_type;
++
++ /*
++ * Add it to the IO-APIC irq-routing table:
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(apic, 0x10+2*pin, *(((int *)&entry)+0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ enable_8259A_irq(0);
++}
++
++void __init UNEXPECTED_IO_APIC(void)
++{
++}
++
++void __apicdebuginit print_IO_APIC(void)
++{
++ int apic, i;
++ union IO_APIC_reg_00 reg_00;
++ union IO_APIC_reg_01 reg_01;
++ union IO_APIC_reg_02 reg_02;
++ unsigned long flags;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
++ for (i = 0; i < nr_ioapics; i++)
++ printk(KERN_DEBUG "number of IO-APIC #%d registers: %d.\n",
++ mp_ioapics[i].mpc_apicid, nr_ioapic_registers[i]);
++
++ /*
++ * We are a bit conservative about what we expect. We have to
++ * know about every hardware change ASAP.
++ */
++ printk(KERN_INFO "testing the IO APIC.......................\n");
++
++ for (apic = 0; apic < nr_ioapics; apic++) {
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ reg_01.raw = io_apic_read(apic, 1);
++ if (reg_01.bits.version >= 0x10)
++ reg_02.raw = io_apic_read(apic, 2);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ printk("\n");
++ printk(KERN_DEBUG "IO APIC #%d......\n", mp_ioapics[apic].mpc_apicid);
++ printk(KERN_DEBUG ".... register #00: %08X\n", reg_00.raw);
++ printk(KERN_DEBUG "....... : physical APIC id: %02X\n", reg_00.bits.ID);
++ if (reg_00.bits.__reserved_1 || reg_00.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++
++ printk(KERN_DEBUG ".... register #01: %08X\n", *(int *)®_01);
++ printk(KERN_DEBUG "....... : max redirection entries: %04X\n", reg_01.bits.entries);
++ if ( (reg_01.bits.entries != 0x0f) && /* older (Neptune) boards */
++ (reg_01.bits.entries != 0x17) && /* typical ISA+PCI boards */
++ (reg_01.bits.entries != 0x1b) && /* Compaq Proliant boards */
++ (reg_01.bits.entries != 0x1f) && /* dual Xeon boards */
++ (reg_01.bits.entries != 0x22) && /* bigger Xeon boards */
++ (reg_01.bits.entries != 0x2E) &&
++ (reg_01.bits.entries != 0x3F) &&
++ (reg_01.bits.entries != 0x03)
++ )
++ UNEXPECTED_IO_APIC();
++
++ printk(KERN_DEBUG "....... : PRQ implemented: %X\n", reg_01.bits.PRQ);
++ printk(KERN_DEBUG "....... : IO APIC version: %04X\n", reg_01.bits.version);
++ if ( (reg_01.bits.version != 0x01) && /* 82489DX IO-APICs */
++ (reg_01.bits.version != 0x02) && /* 82801BA IO-APICs (ICH2) */
++ (reg_01.bits.version != 0x10) && /* oldest IO-APICs */
++ (reg_01.bits.version != 0x11) && /* Pentium/Pro IO-APICs */
++ (reg_01.bits.version != 0x13) && /* Xeon IO-APICs */
++ (reg_01.bits.version != 0x20) /* Intel P64H (82806 AA) */
++ )
++ UNEXPECTED_IO_APIC();
++ if (reg_01.bits.__reserved_1 || reg_01.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++
++ if (reg_01.bits.version >= 0x10) {
++ printk(KERN_DEBUG ".... register #02: %08X\n", reg_02.raw);
++ printk(KERN_DEBUG "....... : arbitration: %02X\n", reg_02.bits.arbitration);
++ if (reg_02.bits.__reserved_1 || reg_02.bits.__reserved_2)
++ UNEXPECTED_IO_APIC();
++ }
++
++ printk(KERN_DEBUG ".... IRQ redirection table:\n");
++
++ printk(KERN_DEBUG " NR Log Phy Mask Trig IRR Pol"
++ " Stat Dest Deli Vect: \n");
++
++ for (i = 0; i <= reg_01.bits.entries; i++) {
++ struct IO_APIC_route_entry entry;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry)+0) = io_apic_read(apic, 0x10+i*2);
++ *(((int *)&entry)+1) = io_apic_read(apic, 0x11+i*2);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ printk(KERN_DEBUG " %02x %03X %02X ",
++ i,
++ entry.dest.logical.logical_dest,
++ entry.dest.physical.physical_dest
++ );
++
++ printk("%1d %1d %1d %1d %1d %1d %1d %02X\n",
++ entry.mask,
++ entry.trigger,
++ entry.irr,
++ entry.polarity,
++ entry.delivery_status,
++ entry.dest_mode,
++ entry.delivery_mode,
++ entry.vector
++ );
++ }
++ }
++ if (use_pci_vector())
++ printk(KERN_INFO "Using vector-based indexing\n");
++ printk(KERN_DEBUG "IRQ to pin mappings:\n");
++ for (i = 0; i < NR_IRQS; i++) {
++ struct irq_pin_list *entry = irq_2_pin + i;
++ if (entry->pin < 0)
++ continue;
++ if (use_pci_vector() && !platform_legacy_irq(i))
++ printk(KERN_DEBUG "IRQ%d ", IO_APIC_VECTOR(i));
++ else
++ printk(KERN_DEBUG "IRQ%d ", i);
++ for (;;) {
++ printk("-> %d:%d", entry->apic, entry->pin);
++ if (!entry->next)
++ break;
++ entry = irq_2_pin + entry->next;
++ }
++ printk("\n");
++ }
++
++ printk(KERN_INFO ".................................... done.\n");
++
++ return;
++}
++
++static __apicdebuginit void print_APIC_bitfield (int base)
++{
++ unsigned int v;
++ int i, j;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "0123456789abcdef0123456789abcdef\n" KERN_DEBUG);
++ for (i = 0; i < 8; i++) {
++ v = apic_read(base + i*0x10);
++ for (j = 0; j < 32; j++) {
++ if (v & (1<<j))
++ printk("1");
++ else
++ printk("0");
++ }
++ printk("\n");
++ }
++}
++
++void __apicdebuginit print_local_APIC(void * dummy)
++{
++ unsigned int v, ver, maxlvt;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk("\n" KERN_DEBUG "printing local APIC contents on CPU#%d/%d:\n",
++ smp_processor_id(), hard_smp_processor_id());
++ v = apic_read(APIC_ID);
++ printk(KERN_INFO "... APIC ID: %08x (%01x)\n", v, GET_APIC_ID(v));
++ v = apic_read(APIC_LVR);
++ printk(KERN_INFO "... APIC VERSION: %08x\n", v);
++ ver = GET_APIC_VERSION(v);
++ maxlvt = get_maxlvt();
++
++ v = apic_read(APIC_TASKPRI);
++ printk(KERN_DEBUG "... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK);
++
++ v = apic_read(APIC_ARBPRI);
++ printk(KERN_DEBUG "... APIC ARBPRI: %08x (%02x)\n", v,
++ v & APIC_ARBPRI_MASK);
++ v = apic_read(APIC_PROCPRI);
++ printk(KERN_DEBUG "... APIC PROCPRI: %08x\n", v);
++
++ v = apic_read(APIC_EOI);
++ printk(KERN_DEBUG "... APIC EOI: %08x\n", v);
++ v = apic_read(APIC_RRR);
++ printk(KERN_DEBUG "... APIC RRR: %08x\n", v);
++ v = apic_read(APIC_LDR);
++ printk(KERN_DEBUG "... APIC LDR: %08x\n", v);
++ v = apic_read(APIC_DFR);
++ printk(KERN_DEBUG "... APIC DFR: %08x\n", v);
++ v = apic_read(APIC_SPIV);
++ printk(KERN_DEBUG "... APIC SPIV: %08x\n", v);
++
++ printk(KERN_DEBUG "... APIC ISR field:\n");
++ print_APIC_bitfield(APIC_ISR);
++ printk(KERN_DEBUG "... APIC TMR field:\n");
++ print_APIC_bitfield(APIC_TMR);
++ printk(KERN_DEBUG "... APIC IRR field:\n");
++ print_APIC_bitfield(APIC_IRR);
++
++ v = apic_read(APIC_ESR);
++ printk(KERN_DEBUG "... APIC ESR: %08x\n", v);
++
++ v = apic_read(APIC_ICR);
++ printk(KERN_DEBUG "... APIC ICR: %08x\n", v);
++ v = apic_read(APIC_ICR2);
++ printk(KERN_DEBUG "... APIC ICR2: %08x\n", v);
++
++ v = apic_read(APIC_LVTT);
++ printk(KERN_DEBUG "... APIC LVTT: %08x\n", v);
++
++ if (maxlvt > 3) { /* PC is LVT#4. */
++ v = apic_read(APIC_LVTPC);
++ printk(KERN_DEBUG "... APIC LVTPC: %08x\n", v);
++ }
++ v = apic_read(APIC_LVT0);
++ printk(KERN_DEBUG "... APIC LVT0: %08x\n", v);
++ v = apic_read(APIC_LVT1);
++ printk(KERN_DEBUG "... APIC LVT1: %08x\n", v);
++
++ if (maxlvt > 2) { /* ERR is LVT#3. */
++ v = apic_read(APIC_LVTERR);
++ printk(KERN_DEBUG "... APIC LVTERR: %08x\n", v);
++ }
++
++ v = apic_read(APIC_TMICT);
++ printk(KERN_DEBUG "... APIC TMICT: %08x\n", v);
++ v = apic_read(APIC_TMCCT);
++ printk(KERN_DEBUG "... APIC TMCCT: %08x\n", v);
++ v = apic_read(APIC_TDCR);
++ printk(KERN_DEBUG "... APIC TDCR: %08x\n", v);
++ printk("\n");
++}
++
++void print_all_local_APICs (void)
++{
++ on_each_cpu(print_local_APIC, NULL, 1, 1);
++}
++
++void __apicdebuginit print_PIC(void)
++{
++ unsigned int v;
++ unsigned long flags;
++
++ if (apic_verbosity == APIC_QUIET)
++ return;
++
++ printk(KERN_DEBUG "\nprinting PIC contents\n");
++
++ spin_lock_irqsave(&i8259A_lock, flags);
++
++ v = inb(0xa1) << 8 | inb(0x21);
++ printk(KERN_DEBUG "... PIC IMR: %04x\n", v);
++
++ v = inb(0xa0) << 8 | inb(0x20);
++ printk(KERN_DEBUG "... PIC IRR: %04x\n", v);
++
++ outb(0x0b,0xa0);
++ outb(0x0b,0x20);
++ v = inb(0xa0) << 8 | inb(0x20);
++ outb(0x0a,0xa0);
++ outb(0x0a,0x20);
++
++ spin_unlock_irqrestore(&i8259A_lock, flags);
++
++ printk(KERN_DEBUG "... PIC ISR: %04x\n", v);
++
++ v = inb(0x4d1) << 8 | inb(0x4d0);
++ printk(KERN_DEBUG "... PIC ELCR: %04x\n", v);
++}
++#endif /* !CONFIG_XEN */
++
++static void __init enable_IO_APIC(void)
++{
++ union IO_APIC_reg_01 reg_01;
++#ifndef CONFIG_XEN
++ int i8259_apic, i8259_pin;
++#endif
++ int i, apic;
++ unsigned long flags;
++
++ for (i = 0; i < PIN_MAP_SIZE; i++) {
++ irq_2_pin[i].pin = -1;
++ irq_2_pin[i].next = 0;
++ }
++ if (!pirqs_enabled)
++ for (i = 0; i < MAX_PIRQS; i++)
++ pirq_entries[i] = -1;
++
++ /*
++ * The number of IO-APIC IRQ registers (== #pins):
++ */
++ for (apic = 0; apic < nr_ioapics; apic++) {
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(apic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ nr_ioapic_registers[apic] = reg_01.bits.entries+1;
++ }
++#ifndef CONFIG_XEN
++ for(apic = 0; apic < nr_ioapics; apic++) {
++ int pin;
++ /* See if any of the pins is in ExtINT mode */
++ for (pin = 0; pin < nr_ioapic_registers[apic]; pin++) {
++ struct IO_APIC_route_entry entry;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ *(((int *)&entry) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++
++ /* If the interrupt line is enabled and in ExtInt mode
++ * I have found the pin where the i8259 is connected.
++ */
++ if ((entry.mask == 0) && (entry.delivery_mode == dest_ExtINT)) {
++ ioapic_i8259.apic = apic;
++ ioapic_i8259.pin = pin;
++ goto found_i8259;
++ }
++ }
++ }
++ found_i8259:
++ /* Look to see what if the MP table has reported the ExtINT */
++ i8259_pin = find_isa_irq_pin(0, mp_ExtINT);
++ i8259_apic = find_isa_irq_apic(0, mp_ExtINT);
++ /* Trust the MP table if nothing is setup in the hardware */
++ if ((ioapic_i8259.pin == -1) && (i8259_pin >= 0)) {
++ printk(KERN_WARNING "ExtINT not setup in hardware but reported by MP table\n");
++ ioapic_i8259.pin = i8259_pin;
++ ioapic_i8259.apic = i8259_apic;
++ }
++ /* Complain if the MP table and the hardware disagree */
++ if (((ioapic_i8259.apic != i8259_apic) || (ioapic_i8259.pin != i8259_pin)) &&
++ (i8259_pin >= 0) && (ioapic_i8259.pin >= 0))
++ {
++ printk(KERN_WARNING "ExtINT in hardware and MP table differ\n");
++ }
++#endif
++
++ /*
++ * Do not trust the IO-APIC being empty at bootup
++ */
++ clear_IO_APIC();
++}
++
++/*
++ * Not an __init, needed by the reboot code
++ */
++void disable_IO_APIC(void)
++{
++ /*
++ * Clear the IO-APIC before rebooting:
++ */
++ clear_IO_APIC();
++
++#ifndef CONFIG_XEN
++ /*
++ * If the i8259 is routed through an IOAPIC
++ * Put that IOAPIC in virtual wire mode
++ * so legacy interrupts can be delivered.
++ */
++ if (ioapic_i8259.pin != -1) {
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ memset(&entry, 0, sizeof(entry));
++ entry.mask = 0; /* Enabled */
++ entry.trigger = 0; /* Edge */
++ entry.irr = 0;
++ entry.polarity = 0; /* High */
++ entry.delivery_status = 0;
++ entry.dest_mode = 0; /* Physical */
++ entry.delivery_mode = dest_ExtINT; /* ExtInt */
++ entry.vector = 0;
++ entry.dest.physical.physical_dest =
++ GET_APIC_ID(apic_read(APIC_ID));
++
++ /*
++ * Add it to the IO-APIC irq-routing table:
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic_i8259.apic, 0x11+2*ioapic_i8259.pin,
++ *(((int *)&entry)+1));
++ io_apic_write(ioapic_i8259.apic, 0x10+2*ioapic_i8259.pin,
++ *(((int *)&entry)+0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ }
++
++ disconnect_bsp_APIC(ioapic_i8259.pin != -1);
++#endif
++}
++
++/*
++ * function to set the IO-APIC physical IDs based on the
++ * values stored in the MPC table.
++ *
++ * by Matt Domsch <Matt_Domsch@dell.com> Tue Dec 21 12:25:05 CST 1999
++ */
++
++#ifndef CONFIG_XEN
++static void __init setup_ioapic_ids_from_mpc (void)
++{
++ union IO_APIC_reg_00 reg_00;
++ int apic;
++ int i;
++ unsigned char old_id;
++ unsigned long flags;
++
++ /*
++ * Set the IOAPIC ID to the value stored in the MPC table.
++ */
++ for (apic = 0; apic < nr_ioapics; apic++) {
++
++ /* Read the register 0 value */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ old_id = mp_ioapics[apic].mpc_apicid;
++
++
++ printk(KERN_INFO "Using IO-APIC %d\n", mp_ioapics[apic].mpc_apicid);
++
++
++ /*
++ * We need to adjust the IRQ routing table
++ * if the ID changed.
++ */
++ if (old_id != mp_ioapics[apic].mpc_apicid)
++ for (i = 0; i < mp_irq_entries; i++)
++ if (mp_irqs[i].mpc_dstapic == old_id)
++ mp_irqs[i].mpc_dstapic
++ = mp_ioapics[apic].mpc_apicid;
++
++ /*
++ * Read the right value from the MPC table and
++ * write it into the ID register.
++ */
++ apic_printk(APIC_VERBOSE,KERN_INFO "...changing IO-APIC physical APIC ID to %d ...",
++ mp_ioapics[apic].mpc_apicid);
++
++ reg_00.bits.ID = mp_ioapics[apic].mpc_apicid;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0, reg_00.raw);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ /*
++ * Sanity check
++ */
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(apic, 0);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ if (reg_00.bits.ID != mp_ioapics[apic].mpc_apicid)
++ printk("could not set ID!\n");
++ else
++ apic_printk(APIC_VERBOSE," ok.\n");
++ }
++}
++#else
++static void __init setup_ioapic_ids_from_mpc(void) { }
++#endif
++
++/*
++ * There is a nasty bug in some older SMP boards, their mptable lies
++ * about the timer IRQ. We do the following to work around the situation:
++ *
++ * - timer IRQ defaults to IO-APIC IRQ
++ * - if this function detects that timer IRQs are defunct, then we fall
++ * back to ISA timer IRQs
++ */
++#ifndef CONFIG_XEN
++static int __init timer_irq_works(void)
++{
++ unsigned long t1 = jiffies;
++
++ local_irq_enable();
++ /* Let ten ticks pass... */
++ mdelay((10 * 1000) / HZ);
++
++ /*
++ * Expect a few ticks at least, to be sure some possible
++ * glue logic does not lock up after one or two first
++ * ticks in a non-ExtINT mode. Also the local APIC
++ * might have cached one ExtINT interrupt. Finally, at
++ * least one tick may be lost due to delays.
++ */
++
++ /* jiffies wrap? */
++ if (jiffies - t1 > 4)
++ return 1;
++ return 0;
++}
++
++/*
++ * In the SMP+IOAPIC case it might happen that there are an unspecified
++ * number of pending IRQ events unhandled. These cases are very rare,
++ * so we 'resend' these IRQs via IPIs, to the same CPU. It's much
++ * better to do it this way as thus we do not have to be aware of
++ * 'pending' interrupts in the IRQ path, except at this point.
++ */
++/*
++ * Edge triggered needs to resend any interrupt
++ * that was delayed but this is now handled in the device
++ * independent code.
++ */
++
++/*
++ * Starting up a edge-triggered IO-APIC interrupt is
++ * nasty - we need to make sure that we get the edge.
++ * If it is already asserted for some reason, we need
++ * return 1 to indicate that is was pending.
++ *
++ * This is not complete - we should be able to fake
++ * an edge even if it isn't on the 8259A...
++ */
++
++static unsigned int startup_edge_ioapic_irq(unsigned int irq)
++{
++ int was_pending = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ if (irq < 16) {
++ disable_8259A_irq(irq);
++ if (i8259A_irq_pending(irq))
++ was_pending = 1;
++ }
++ __unmask_IO_APIC_irq(irq);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return was_pending;
++}
++
++/*
++ * Once we have recorded IRQ_PENDING already, we can mask the
++ * interrupt for real. This prevents IRQ storms from unhandled
++ * devices.
++ */
++static void ack_edge_ioapic_irq(unsigned int irq)
++{
++ move_irq(irq);
++ if ((irq_desc[irq].status & (IRQ_PENDING | IRQ_DISABLED))
++ == (IRQ_PENDING | IRQ_DISABLED))
++ mask_IO_APIC_irq(irq);
++ ack_APIC_irq();
++}
++
++/*
++ * Level triggered interrupts can just be masked,
++ * and shutting down and starting up the interrupt
++ * is the same as enabling and disabling them -- except
++ * with a startup need to return a "was pending" value.
++ *
++ * Level triggered interrupts are special because we
++ * do not touch any IO-APIC register while handling
++ * them. We ack the APIC in the end-IRQ handler, not
++ * in the start-IRQ-handler. Protection against reentrance
++ * from the same interrupt is still provided, both by the
++ * generic IRQ layer and by the fact that an unacked local
++ * APIC does not accept IRQs.
++ */
++static unsigned int startup_level_ioapic_irq (unsigned int irq)
++{
++ unmask_IO_APIC_irq(irq);
++
++ return 0; /* don't check for pending */
++}
++
++static void end_level_ioapic_irq (unsigned int irq)
++{
++ move_irq(irq);
++ ack_APIC_irq();
++}
++
++#ifdef CONFIG_PCI_MSI
++static unsigned int startup_edge_ioapic_vector(unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ return startup_edge_ioapic_irq(irq);
++}
++
++static void ack_edge_ioapic_vector(unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ move_native_irq(vector);
++ ack_edge_ioapic_irq(irq);
++}
++
++static unsigned int startup_level_ioapic_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ return startup_level_ioapic_irq (irq);
++}
++
++static void end_level_ioapic_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ move_native_irq(vector);
++ end_level_ioapic_irq(irq);
++}
++
++static void mask_IO_APIC_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ mask_IO_APIC_irq(irq);
++}
++
++static void unmask_IO_APIC_vector (unsigned int vector)
++{
++ int irq = vector_to_irq(vector);
++
++ unmask_IO_APIC_irq(irq);
++}
++
++#ifdef CONFIG_SMP
++static void set_ioapic_affinity_vector (unsigned int vector,
++ cpumask_t cpu_mask)
++{
++ int irq = vector_to_irq(vector);
++
++ set_native_irq_info(vector, cpu_mask);
++ set_ioapic_affinity_irq(irq, cpu_mask);
++}
++#endif // CONFIG_SMP
++#endif // CONFIG_PCI_MSI
++
++static int ioapic_retrigger(unsigned int irq)
++{
++ send_IPI_self(IO_APIC_VECTOR(irq));
++
++ return 1;
++}
++
++/*
++ * Level and edge triggered IO-APIC interrupts need different handling,
++ * so we use two separate IRQ descriptors. Edge triggered IRQs can be
++ * handled with the level-triggered descriptor, but that one has slightly
++ * more overhead. Level-triggered interrupts cannot be handled with the
++ * edge-triggered handler, without risking IRQ storms and other ugly
++ * races.
++ */
++
++static struct hw_interrupt_type ioapic_edge_type __read_mostly = {
++ .typename = "IO-APIC-edge",
++ .startup = startup_edge_ioapic,
++ .shutdown = shutdown_edge_ioapic,
++ .enable = enable_edge_ioapic,
++ .disable = disable_edge_ioapic,
++ .ack = ack_edge_ioapic,
++ .end = end_edge_ioapic,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ioapic_affinity,
++#endif
++ .retrigger = ioapic_retrigger,
++};
++
++static struct hw_interrupt_type ioapic_level_type __read_mostly = {
++ .typename = "IO-APIC-level",
++ .startup = startup_level_ioapic,
++ .shutdown = shutdown_level_ioapic,
++ .enable = enable_level_ioapic,
++ .disable = disable_level_ioapic,
++ .ack = mask_and_ack_level_ioapic,
++ .end = end_level_ioapic,
++#ifdef CONFIG_SMP
++ .set_affinity = set_ioapic_affinity,
++#endif
++ .retrigger = ioapic_retrigger,
++};
++#endif /* !CONFIG_XEN */
++
++static inline void init_IO_APIC_traps(void)
++{
++ int irq;
++
++ /*
++ * NOTE! The local APIC isn't very good at handling
++ * multiple interrupts at the same interrupt level.
++ * As the interrupt level is determined by taking the
++ * vector number and shifting that right by 4, we
++ * want to spread these out a bit so that they don't
++ * all fall in the same interrupt level.
++ *
++ * Also, we've got to be careful not to trash gate
++ * 0x80, because int 0x80 is hm, kind of importantish. ;)
++ */
++ for (irq = 0; irq < NR_IRQS ; irq++) {
++ int tmp = irq;
++ if (use_pci_vector()) {
++ if (!platform_legacy_irq(tmp))
++ if ((tmp = vector_to_irq(tmp)) == -1)
++ continue;
++ }
++ if (IO_APIC_IRQ(tmp) && !IO_APIC_VECTOR(tmp)) {
++ /*
++ * Hmm.. We don't have an entry for this,
++ * so default to an old-fashioned 8259
++ * interrupt if we can..
++ */
++ if (irq < 16)
++ make_8259A_irq(irq);
++#ifndef CONFIG_XEN
++ else
++ /* Strange. Oh, well.. */
++ irq_desc[irq].chip = &no_irq_type;
++#endif
++ }
++ }
++}
++
++#ifndef CONFIG_XEN
++static void enable_lapic_irq (unsigned int irq)
++{
++ unsigned long v;
++
++ v = apic_read(APIC_LVT0);
++ apic_write(APIC_LVT0, v & ~APIC_LVT_MASKED);
++}
++
++static void disable_lapic_irq (unsigned int irq)
++{
++ unsigned long v;
++
++ v = apic_read(APIC_LVT0);
++ apic_write(APIC_LVT0, v | APIC_LVT_MASKED);
++}
++
++static void ack_lapic_irq (unsigned int irq)
++{
++ ack_APIC_irq();
++}
++
++static void end_lapic_irq (unsigned int i) { /* nothing */ }
++
++static struct hw_interrupt_type lapic_irq_type __read_mostly = {
++ .typename = "local-APIC-edge",
++ .startup = NULL, /* startup_irq() not used for IRQ0 */
++ .shutdown = NULL, /* shutdown_irq() not used for IRQ0 */
++ .enable = enable_lapic_irq,
++ .disable = disable_lapic_irq,
++ .ack = ack_lapic_irq,
++ .end = end_lapic_irq,
++};
++
++static void setup_nmi (void)
++{
++ /*
++ * Dirty trick to enable the NMI watchdog ...
++ * We put the 8259A master into AEOI mode and
++ * unmask on all local APICs LVT0 as NMI.
++ *
++ * The idea to use the 8259A in AEOI mode ('8259A Virtual Wire')
++ * is from Maciej W. Rozycki - so we do not have to EOI from
++ * the NMI handler or the timer interrupt.
++ */
++ printk(KERN_INFO "activating NMI Watchdog ...");
++
++ enable_NMI_through_LVT0(NULL);
++
++ printk(" done.\n");
++}
++
++/*
++ * This looks a bit hackish but it's about the only one way of sending
++ * a few INTA cycles to 8259As and any associated glue logic. ICR does
++ * not support the ExtINT mode, unfortunately. We need to send these
++ * cycles as some i82489DX-based boards have glue logic that keeps the
++ * 8259A interrupt line asserted until INTA. --macro
++ */
++static inline void unlock_ExtINT_logic(void)
++{
++ int apic, pin, i;
++ struct IO_APIC_route_entry entry0, entry1;
++ unsigned char save_control, save_freq_select;
++ unsigned long flags;
++
++ pin = find_isa_irq_pin(8, mp_INT);
++ apic = find_isa_irq_apic(8, mp_INT);
++ if (pin == -1)
++ return;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ *(((int *)&entry0) + 1) = io_apic_read(apic, 0x11 + 2 * pin);
++ *(((int *)&entry0) + 0) = io_apic_read(apic, 0x10 + 2 * pin);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++ clear_IO_APIC_pin(apic, pin);
++
++ memset(&entry1, 0, sizeof(entry1));
++
++ entry1.dest_mode = 0; /* physical delivery */
++ entry1.mask = 0; /* unmask IRQ now */
++ entry1.dest.physical.physical_dest = hard_smp_processor_id();
++ entry1.delivery_mode = dest_ExtINT;
++ entry1.polarity = entry0.polarity;
++ entry1.trigger = 0;
++ entry1.vector = 0;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry1) + 1));
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry1) + 0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ save_control = CMOS_READ(RTC_CONTROL);
++ save_freq_select = CMOS_READ(RTC_FREQ_SELECT);
++ CMOS_WRITE((save_freq_select & ~RTC_RATE_SELECT) | 0x6,
++ RTC_FREQ_SELECT);
++ CMOS_WRITE(save_control | RTC_PIE, RTC_CONTROL);
++
++ i = 100;
++ while (i-- > 0) {
++ mdelay(10);
++ if ((CMOS_READ(RTC_INTR_FLAGS) & RTC_PF) == RTC_PF)
++ i -= 10;
++ }
++
++ CMOS_WRITE(save_control, RTC_CONTROL);
++ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
++ clear_IO_APIC_pin(apic, pin);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(apic, 0x11 + 2 * pin, *(((int *)&entry0) + 1));
++ io_apic_write(apic, 0x10 + 2 * pin, *(((int *)&entry0) + 0));
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++}
++
++int timer_uses_ioapic_pin_0;
++
++/*
++ * This code may look a bit paranoid, but it's supposed to cooperate with
++ * a wide range of boards and BIOS bugs. Fortunately only the timer IRQ
++ * is so screwy. Thanks to Brian Perkins for testing/hacking this beast
++ * fanatically on his truly buggy board.
++ *
++ * FIXME: really need to revamp this for modern platforms only.
++ */
++static inline void check_timer(void)
++{
++ int apic1, pin1, apic2, pin2;
++ int vector;
++
++ /*
++ * get/set the timer IRQ vector:
++ */
++ disable_8259A_irq(0);
++ vector = assign_irq_vector(0);
++ set_intr_gate(vector, interrupt[0]);
++
++ /*
++ * Subtle, code in do_timer_interrupt() expects an AEOI
++ * mode for the 8259A whenever interrupts are routed
++ * through I/O APICs. Also IRQ0 has to be enabled in
++ * the 8259A which implies the virtual wire has to be
++ * disabled in the local APIC.
++ */
++ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_EXTINT);
++ init_8259A(1);
++ if (timer_over_8254 > 0)
++ enable_8259A_irq(0);
++
++ pin1 = find_isa_irq_pin(0, mp_INT);
++ apic1 = find_isa_irq_apic(0, mp_INT);
++ pin2 = ioapic_i8259.pin;
++ apic2 = ioapic_i8259.apic;
++
++ if (pin1 == 0)
++ timer_uses_ioapic_pin_0 = 1;
++
++ apic_printk(APIC_VERBOSE,KERN_INFO "..TIMER: vector=0x%02X apic1=%d pin1=%d apic2=%d pin2=%d\n",
++ vector, apic1, pin1, apic2, pin2);
++
++ if (pin1 != -1) {
++ /*
++ * Ok, does IRQ0 through the IOAPIC work?
++ */
++ unmask_IO_APIC_irq(0);
++ if (!no_timer_check && timer_irq_works()) {
++ nmi_watchdog_default();
++ if (nmi_watchdog == NMI_IO_APIC) {
++ disable_8259A_irq(0);
++ setup_nmi();
++ enable_8259A_irq(0);
++ }
++ if (disable_timer_pin_1 > 0)
++ clear_IO_APIC_pin(0, pin1);
++ return;
++ }
++ clear_IO_APIC_pin(apic1, pin1);
++ apic_printk(APIC_QUIET,KERN_ERR "..MP-BIOS bug: 8254 timer not "
++ "connected to IO-APIC\n");
++ }
++
++ apic_printk(APIC_VERBOSE,KERN_INFO "...trying to set up timer (IRQ0) "
++ "through the 8259A ... ");
++ if (pin2 != -1) {
++ apic_printk(APIC_VERBOSE,"\n..... (found apic %d pin %d) ...",
++ apic2, pin2);
++ /*
++ * legacy devices should be connected to IO APIC #0
++ */
++ setup_ExtINT_IRQ0_pin(apic2, pin2, vector);
++ if (timer_irq_works()) {
++ apic_printk(APIC_VERBOSE," works.\n");
++ nmi_watchdog_default();
++ if (nmi_watchdog == NMI_IO_APIC) {
++ setup_nmi();
++ }
++ return;
++ }
++ /*
++ * Cleanup, just in case ...
++ */
++ clear_IO_APIC_pin(apic2, pin2);
++ }
++ apic_printk(APIC_VERBOSE," failed.\n");
++
++ if (nmi_watchdog == NMI_IO_APIC) {
++ printk(KERN_WARNING "timer doesn't work through the IO-APIC - disabling NMI Watchdog!\n");
++ nmi_watchdog = 0;
++ }
++
++ apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as Virtual Wire IRQ...");
++
++ disable_8259A_irq(0);
++ irq_desc[0].chip = &lapic_irq_type;
++ apic_write(APIC_LVT0, APIC_DM_FIXED | vector); /* Fixed mode */
++ enable_8259A_irq(0);
++
++ if (timer_irq_works()) {
++ apic_printk(APIC_VERBOSE," works.\n");
++ return;
++ }
++ apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | vector);
++ apic_printk(APIC_VERBOSE," failed.\n");
++
++ apic_printk(APIC_VERBOSE, KERN_INFO "...trying to set up timer as ExtINT IRQ...");
++
++ init_8259A(0);
++ make_8259A_irq(0);
++ apic_write(APIC_LVT0, APIC_DM_EXTINT);
++
++ unlock_ExtINT_logic();
++
++ if (timer_irq_works()) {
++ apic_printk(APIC_VERBOSE," works.\n");
++ return;
++ }
++ apic_printk(APIC_VERBOSE," failed :(.\n");
++ panic("IO-APIC + timer doesn't work! Try using the 'noapic' kernel parameter\n");
++}
++#else
++#define check_timer() ((void)0)
++int timer_uses_ioapic_pin_0 = 0;
++#endif /* !CONFIG_XEN */
++
++static int __init notimercheck(char *s)
++{
++ no_timer_check = 1;
++ return 1;
++}
++__setup("no_timer_check", notimercheck);
++
++/*
++ *
++ * IRQ's that are handled by the PIC in the MPS IOAPIC case.
++ * - IRQ2 is the cascade IRQ, and cannot be a io-apic IRQ.
++ * Linux doesn't really care, as it's not actually used
++ * for any interrupt handling anyway.
++ */
++#define PIC_IRQS (1<<2)
++
++void __init setup_IO_APIC(void)
++{
++ enable_IO_APIC();
++
++ if (acpi_ioapic)
++ io_apic_irqs = ~0; /* all IRQs go through IOAPIC */
++ else
++ io_apic_irqs = ~PIC_IRQS;
++
++ apic_printk(APIC_VERBOSE, "ENABLING IO-APIC IRQs\n");
++
++ /*
++ * Set up the IO-APIC IRQ routing table.
++ */
++ if (!acpi_ioapic)
++ setup_ioapic_ids_from_mpc();
++#ifndef CONFIG_XEN
++ sync_Arb_IDs();
++#endif /* !CONFIG_XEN */
++ setup_IO_APIC_irqs();
++ init_IO_APIC_traps();
++ check_timer();
++ if (!acpi_ioapic)
++ print_IO_APIC();
++}
++
++struct sysfs_ioapic_data {
++ struct sys_device dev;
++ struct IO_APIC_route_entry entry[0];
++};
++static struct sysfs_ioapic_data * mp_ioapic_data[MAX_IO_APICS];
++
++static int ioapic_suspend(struct sys_device *dev, pm_message_t state)
++{
++ struct IO_APIC_route_entry *entry;
++ struct sysfs_ioapic_data *data;
++ unsigned long flags;
++ int i;
++
++ data = container_of(dev, struct sysfs_ioapic_data, dev);
++ entry = data->entry;
++ spin_lock_irqsave(&ioapic_lock, flags);
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++ *(((int *)entry) + 1) = io_apic_read(dev->id, 0x11 + 2 * i);
++ *(((int *)entry) + 0) = io_apic_read(dev->id, 0x10 + 2 * i);
++ }
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++static int ioapic_resume(struct sys_device *dev)
++{
++ struct IO_APIC_route_entry *entry;
++ struct sysfs_ioapic_data *data;
++ unsigned long flags;
++ union IO_APIC_reg_00 reg_00;
++ int i;
++
++ data = container_of(dev, struct sysfs_ioapic_data, dev);
++ entry = data->entry;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_00.raw = io_apic_read(dev->id, 0);
++ if (reg_00.bits.ID != mp_ioapics[dev->id].mpc_apicid) {
++ reg_00.bits.ID = mp_ioapics[dev->id].mpc_apicid;
++ io_apic_write(dev->id, 0, reg_00.raw);
++ }
++ for (i = 0; i < nr_ioapic_registers[dev->id]; i ++, entry ++ ) {
++ io_apic_write(dev->id, 0x11+2*i, *(((int *)entry)+1));
++ io_apic_write(dev->id, 0x10+2*i, *(((int *)entry)+0));
++ }
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++static struct sysdev_class ioapic_sysdev_class = {
++ set_kset_name("ioapic"),
++#ifndef CONFIG_XEN
++ .suspend = ioapic_suspend,
++ .resume = ioapic_resume,
++#endif
++};
++
++static int __init ioapic_init_sysfs(void)
++{
++ struct sys_device * dev;
++ int i, size, error = 0;
++
++ error = sysdev_class_register(&ioapic_sysdev_class);
++ if (error)
++ return error;
++
++ for (i = 0; i < nr_ioapics; i++ ) {
++ size = sizeof(struct sys_device) + nr_ioapic_registers[i]
++ * sizeof(struct IO_APIC_route_entry);
++ mp_ioapic_data[i] = kmalloc(size, GFP_KERNEL);
++ if (!mp_ioapic_data[i]) {
++ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++ continue;
++ }
++ memset(mp_ioapic_data[i], 0, size);
++ dev = &mp_ioapic_data[i]->dev;
++ dev->id = i;
++ dev->cls = &ioapic_sysdev_class;
++ error = sysdev_register(dev);
++ if (error) {
++ kfree(mp_ioapic_data[i]);
++ mp_ioapic_data[i] = NULL;
++ printk(KERN_ERR "Can't suspend/resume IOAPIC %d\n", i);
++ continue;
++ }
++ }
++
++ return 0;
++}
++
++device_initcall(ioapic_init_sysfs);
++
++/* --------------------------------------------------------------------------
++ ACPI-based IOAPIC Configuration
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++#define IO_APIC_MAX_ID 0xFE
++
++int __init io_apic_get_version (int ioapic)
++{
++ union IO_APIC_reg_01 reg_01;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(ioapic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return reg_01.bits.version;
++}
++
++
++int __init io_apic_get_redir_entries (int ioapic)
++{
++ union IO_APIC_reg_01 reg_01;
++ unsigned long flags;
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ reg_01.raw = io_apic_read(ioapic, 1);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return reg_01.bits.entries;
++}
++
++
++int io_apic_set_pci_routing (int ioapic, int pin, int irq, int edge_level, int active_high_low)
++{
++ struct IO_APIC_route_entry entry;
++ unsigned long flags;
++
++ if (!IO_APIC_IRQ(irq)) {
++ apic_printk(APIC_QUIET,KERN_ERR "IOAPIC[%d]: Invalid reference to IRQ 0\n",
++ ioapic);
++ return -EINVAL;
++ }
++
++ /*
++ * Generate a PCI IRQ routing entry and program the IOAPIC accordingly.
++ * Note that we mask (disable) IRQs now -- these get enabled when the
++ * corresponding device driver registers for this IRQ.
++ */
++
++ memset(&entry,0,sizeof(entry));
++
++ entry.delivery_mode = INT_DELIVERY_MODE;
++ entry.dest_mode = INT_DEST_MODE;
++ entry.dest.logical.logical_dest = cpu_mask_to_apicid(TARGET_CPUS);
++ entry.trigger = edge_level;
++ entry.polarity = active_high_low;
++ entry.mask = 1; /* Disabled (masked) */
++
++ irq = gsi_irq_sharing(irq);
++ /*
++ * IRQs < 16 are already in the irq_2_pin[] map
++ */
++ if (irq >= 16)
++ add_pin_to_irq(irq, ioapic, pin);
++
++ entry.vector = assign_irq_vector(irq);
++
++ apic_printk(APIC_VERBOSE,KERN_DEBUG "IOAPIC[%d]: Set PCI routing entry (%d-%d -> 0x%x -> "
++ "IRQ %d Mode:%i Active:%i)\n", ioapic,
++ mp_ioapics[ioapic].mpc_apicid, pin, entry.vector, irq,
++ edge_level, active_high_low);
++
++ ioapic_register_intr(irq, entry.vector, edge_level);
++
++ if (!ioapic && (irq < 16))
++ disable_8259A_irq(irq);
++
++ spin_lock_irqsave(&ioapic_lock, flags);
++ io_apic_write(ioapic, 0x11+2*pin, *(((int *)&entry)+1));
++ io_apic_write(ioapic, 0x10+2*pin, *(((int *)&entry)+0));
++ set_native_irq_info(use_pci_vector() ? entry.vector : irq, TARGET_CPUS);
++ spin_unlock_irqrestore(&ioapic_lock, flags);
++
++ return 0;
++}
++
++#endif /* CONFIG_ACPI */
++
++
++#ifndef CONFIG_XEN
++/*
++ * This function currently is only a helper for the i386 smp boot process where
++ * we need to reprogram the ioredtbls to cater for the cpus which have come online
++ * so mask in all cases should simply be TARGET_CPUS
++ */
++#ifdef CONFIG_SMP
++void __init setup_ioapic_dest(void)
++{
++ int pin, ioapic, irq, irq_entry;
++
++ if (skip_ioapic_setup == 1)
++ return;
++
++ for (ioapic = 0; ioapic < nr_ioapics; ioapic++) {
++ for (pin = 0; pin < nr_ioapic_registers[ioapic]; pin++) {
++ irq_entry = find_irq_entry(ioapic, pin, mp_INT);
++ if (irq_entry == -1)
++ continue;
++ irq = pin_2_irq(irq_entry, ioapic, pin);
++ set_ioapic_affinity_irq(irq, TARGET_CPUS);
++ }
++
++ }
++}
++#endif
++#endif /* !CONFIG_XEN */
+Index: head-2008-11-25/arch/x86/kernel/ioport_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/ioport_64-xen.c 2008-01-28 12:24:19.000000000 +0100
+@@ -0,0 +1,100 @@
++/*
++ * linux/arch/x86_64/kernel/ioport.c
++ *
++ * This contains the io-permission bitmap code - written by obz, with changes
++ * by Linus.
++ */
++
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/types.h>
++#include <linux/ioport.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/stddef.h>
++#include <linux/slab.h>
++#include <linux/thread_info.h>
++#include <xen/interface/physdev.h>
++
++/* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
++static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
++{
++ int i;
++
++ if (new_value)
++ for (i = base; i < base + extent; i++)
++ __set_bit(i, bitmap);
++ else
++ for (i = base; i < base + extent; i++)
++ clear_bit(i, bitmap);
++}
++
++/*
++ * this changes the io permissions bitmap in the current task.
++ */
++asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
++{
++ struct thread_struct * t = ¤t->thread;
++ unsigned long *bitmap;
++ struct physdev_set_iobitmap set_iobitmap;
++
++ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
++ return -EINVAL;
++ if (turn_on && !capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
++ /*
++ * If it's the first ioperm() call in this thread's lifetime, set the
++ * IO bitmap up. ioperm() is much less timing critical than clone(),
++ * this is why we delay this operation until now:
++ */
++ if (!t->io_bitmap_ptr) {
++ bitmap = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++ if (!bitmap)
++ return -ENOMEM;
++
++ memset(bitmap, 0xff, IO_BITMAP_BYTES);
++ t->io_bitmap_ptr = bitmap;
++
++ set_xen_guest_handle(set_iobitmap.bitmap, (char *)bitmap);
++ set_iobitmap.nr_ports = IO_BITMAP_BITS;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
++ &set_iobitmap));
++ }
++
++ set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
++
++ return 0;
++}
++
++/*
++ * sys_iopl has to be used when you want to access the IO ports
++ * beyond the 0x3ff range: to get the full 65536 ports bitmapped
++ * you'd need 8kB of bitmaps/process, which is a bit excessive.
++ *
++ */
++
++asmlinkage long sys_iopl(unsigned int new_iopl, struct pt_regs *regs)
++{
++ unsigned int old_iopl = current->thread.iopl;
++ struct physdev_set_iopl set_iopl;
++
++ if (new_iopl > 3)
++ return -EINVAL;
++
++ /* Need "raw I/O" privileges for direct port access. */
++ if ((new_iopl > old_iopl) && !capable(CAP_SYS_RAWIO))
++ return -EPERM;
++
++ /* Change our version of the privilege levels. */
++ current->thread.iopl = new_iopl;
++
++ /* Force the change at ring 0. */
++ set_iopl.iopl = (new_iopl == 0) ? 1 : new_iopl;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
++
++ return 0;
++}
+Index: head-2008-11-25/arch/x86/kernel/irq_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/irq_64-xen.c 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,197 @@
++/*
++ * linux/arch/x86_64/kernel/irq.c
++ *
++ * Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
++ *
++ * This file contains the lowest level x86_64-specific interrupt
++ * entry and irq statistics code. All the remaining irq logic is
++ * done by the generic kernel/irq/ code and in the
++ * x86_64-specific irq controller code. (e.g. i8259.c and
++ * io_apic.c.)
++ */
++
++#include <linux/kernel_stat.h>
++#include <linux/interrupt.h>
++#include <linux/seq_file.h>
++#include <linux/module.h>
++#include <linux/delay.h>
++#include <asm/uaccess.h>
++#include <asm/io_apic.h>
++#include <asm/idle.h>
++
++atomic_t irq_err_count;
++#ifdef CONFIG_X86_IO_APIC
++#ifdef APIC_MISMATCH_DEBUG
++atomic_t irq_mis_count;
++#endif
++#endif
++
++#ifdef CONFIG_DEBUG_STACKOVERFLOW
++/*
++ * Probabilistic stack overflow check:
++ *
++ * Only check the stack in process context, because everything else
++ * runs on the big interrupt stacks. Checking reliably is too expensive,
++ * so we just check from interrupts.
++ */
++static inline void stack_overflow_check(struct pt_regs *regs)
++{
++ u64 curbase = (u64) current->thread_info;
++ static unsigned long warned = -60*HZ;
++
++ if (regs->rsp >= curbase && regs->rsp <= curbase + THREAD_SIZE &&
++ regs->rsp < curbase + sizeof(struct thread_info) + 128 &&
++ time_after(jiffies, warned + 60*HZ)) {
++ printk("do_IRQ: %s near stack overflow (cur:%Lx,rsp:%lx)\n",
++ current->comm, curbase, regs->rsp);
++ show_stack(NULL,NULL);
++ warned = jiffies;
++ }
++}
++#endif
++
++/*
++ * Generic, controller-independent functions:
++ */
++
++int show_interrupts(struct seq_file *p, void *v)
++{
++ int i = *(loff_t *) v, j;
++ struct irqaction * action;
++ unsigned long flags;
++
++ if (i == 0) {
++ seq_printf(p, " ");
++ for_each_online_cpu(j)
++ seq_printf(p, "CPU%-8d",j);
++ seq_putc(p, '\n');
++ }
++
++ if (i < NR_IRQS) {
++ spin_lock_irqsave(&irq_desc[i].lock, flags);
++ action = irq_desc[i].action;
++ if (!action)
++ goto skip;
++ seq_printf(p, "%3d: ",i);
++#ifndef CONFIG_SMP
++ seq_printf(p, "%10u ", kstat_irqs(i));
++#else
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
++#endif
++ seq_printf(p, " %14s", irq_desc[i].chip->typename);
++
++ seq_printf(p, " %s", action->name);
++ for (action=action->next; action; action = action->next)
++ seq_printf(p, ", %s", action->name);
++ seq_putc(p, '\n');
++skip:
++ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
++ } else if (i == NR_IRQS) {
++ seq_printf(p, "NMI: ");
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", cpu_pda(j)->__nmi_count);
++ seq_putc(p, '\n');
++#ifdef CONFIG_X86_LOCAL_APIC
++ seq_printf(p, "LOC: ");
++ for_each_online_cpu(j)
++ seq_printf(p, "%10u ", cpu_pda(j)->apic_timer_irqs);
++ seq_putc(p, '\n');
++#endif
++ seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
++#ifdef CONFIG_X86_IO_APIC
++#ifdef APIC_MISMATCH_DEBUG
++ seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
++#endif
++#endif
++ }
++ return 0;
++}
++
++/*
++ * do_IRQ handles all normal device IRQ's (the special
++ * SMP cross-CPU interrupts have their own specific
++ * handlers).
++ */
++asmlinkage unsigned int do_IRQ(struct pt_regs *regs)
++{
++ /* high bit used in ret_from_ code */
++ unsigned irq = ~regs->orig_rax;
++
++ if (unlikely(irq >= NR_IRQS)) {
++ printk(KERN_EMERG "%s: cannot handle IRQ %d\n",
++ __FUNCTION__, irq);
++ BUG();
++ }
++
++ /*exit_idle();*/
++ /*irq_enter();*/
++#ifdef CONFIG_DEBUG_STACKOVERFLOW
++ stack_overflow_check(regs);
++#endif
++ __do_IRQ(irq, regs);
++ /*irq_exit();*/
++
++ return 1;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++void fixup_irqs(cpumask_t map)
++{
++ unsigned int irq;
++ static int warned;
++
++ for (irq = 0; irq < NR_IRQS; irq++) {
++ cpumask_t mask;
++ if (irq == 2)
++ continue;
++
++ cpus_and(mask, irq_desc[irq].affinity, map);
++ if (any_online_cpu(mask) == NR_CPUS) {
++ /*printk("Breaking affinity for irq %i\n", irq);*/
++ mask = map;
++ }
++ if (irq_desc[irq].chip->set_affinity)
++ irq_desc[irq].chip->set_affinity(irq, mask);
++ else if (irq_desc[irq].action && !(warned++))
++ printk("Cannot set affinity for irq %i\n", irq);
++ }
++
++ /* That doesn't seem sufficient. Give it 1ms. */
++ local_irq_enable();
++ mdelay(1);
++ local_irq_disable();
++}
++#endif
++
++extern void call_softirq(void);
++
++asmlinkage void do_softirq(void)
++{
++ __u32 pending;
++ unsigned long flags;
++
++ if (in_interrupt())
++ return;
++
++ local_irq_save(flags);
++ pending = local_softirq_pending();
++ /* Switch to interrupt stack */
++ if (pending) {
++ call_softirq();
++ WARN_ON_ONCE(softirq_count());
++ }
++ local_irq_restore(flags);
++}
++EXPORT_SYMBOL(do_softirq);
++
++#ifndef CONFIG_X86_LOCAL_APIC
++/*
++ * 'what should we do if we get a hw irq event on an illegal vector'.
++ * each architecture has to answer this themselves.
++ */
++void ack_bad_irq(unsigned int irq)
++{
++ printk("unexpected IRQ trap at vector %02x\n", irq);
++}
++#endif
+Index: head-2008-11-25/arch/x86/kernel/ldt_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/ldt_64-xen.c 2007-06-12 13:13:01.000000000 +0200
+@@ -0,0 +1,282 @@
++/*
++ * linux/arch/x86_64/kernel/ldt.c
++ *
++ * Copyright (C) 1992 Krishna Balasubramanian and Linus Torvalds
++ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
++ * Copyright (C) 2002 Andi Kleen
++ *
++ * This handles calls from both 32bit and 64bit mode.
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/vmalloc.h>
++#include <linux/slab.h>
++
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/ldt.h>
++#include <asm/desc.h>
++#include <asm/proto.h>
++#include <asm/pgalloc.h>
++
++#ifdef CONFIG_SMP /* avoids "defined but not used" warnig */
++static void flush_ldt(void *null)
++{
++ if (current->active_mm)
++ load_LDT(¤t->active_mm->context);
++}
++#endif
++
++static int alloc_ldt(mm_context_t *pc, unsigned mincount, int reload)
++{
++ void *oldldt;
++ void *newldt;
++ unsigned oldsize;
++
++ if (mincount <= (unsigned)pc->size)
++ return 0;
++ oldsize = pc->size;
++ mincount = (mincount+511)&(~511);
++ if (mincount*LDT_ENTRY_SIZE > PAGE_SIZE)
++ newldt = vmalloc(mincount*LDT_ENTRY_SIZE);
++ else
++ newldt = kmalloc(mincount*LDT_ENTRY_SIZE, GFP_KERNEL);
++
++ if (!newldt)
++ return -ENOMEM;
++
++ if (oldsize)
++ memcpy(newldt, pc->ldt, oldsize*LDT_ENTRY_SIZE);
++ oldldt = pc->ldt;
++ memset(newldt+oldsize*LDT_ENTRY_SIZE, 0, (mincount-oldsize)*LDT_ENTRY_SIZE);
++ wmb();
++ pc->ldt = newldt;
++ wmb();
++ pc->size = mincount;
++ wmb();
++ if (reload) {
++#ifdef CONFIG_SMP
++ cpumask_t mask;
++
++ preempt_disable();
++#endif
++ make_pages_readonly(
++ pc->ldt,
++ (pc->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ load_LDT(pc);
++#ifdef CONFIG_SMP
++ mask = cpumask_of_cpu(smp_processor_id());
++ if (!cpus_equal(current->mm->cpu_vm_mask, mask))
++ smp_call_function(flush_ldt, NULL, 1, 1);
++ preempt_enable();
++#endif
++ }
++ if (oldsize) {
++ make_pages_writable(
++ oldldt,
++ (oldsize * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ if (oldsize*LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(oldldt);
++ else
++ kfree(oldldt);
++ }
++ return 0;
++}
++
++static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
++{
++ int err = alloc_ldt(new, old->size, 0);
++ if (err < 0)
++ return err;
++ memcpy(new->ldt, old->ldt, old->size*LDT_ENTRY_SIZE);
++ make_pages_readonly(
++ new->ldt,
++ (new->size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ return 0;
++}
++
++/*
++ * we do not have to muck with descriptors here, that is
++ * done in switch_mm() as needed.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
++{
++ struct mm_struct * old_mm;
++ int retval = 0;
++
++ memset(&mm->context, 0, sizeof(mm->context));
++ init_MUTEX(&mm->context.sem);
++ old_mm = current->mm;
++ if (old_mm && old_mm->context.size > 0) {
++ down(&old_mm->context.sem);
++ retval = copy_ldt(&mm->context, &old_mm->context);
++ up(&old_mm->context.sem);
++ }
++ if (retval == 0) {
++ spin_lock(&mm_unpinned_lock);
++ list_add(&mm->context.unpinned, &mm_unpinned);
++ spin_unlock(&mm_unpinned_lock);
++ }
++ return retval;
++}
++
++/*
++ *
++ * Don't touch the LDT register - we're already in the next thread.
++ */
++void destroy_context(struct mm_struct *mm)
++{
++ if (mm->context.size) {
++ if (mm == current->active_mm)
++ clear_LDT();
++ make_pages_writable(
++ mm->context.ldt,
++ (mm->context.size * LDT_ENTRY_SIZE) / PAGE_SIZE,
++ XENFEAT_writable_descriptor_tables);
++ if (mm->context.size*LDT_ENTRY_SIZE > PAGE_SIZE)
++ vfree(mm->context.ldt);
++ else
++ kfree(mm->context.ldt);
++ mm->context.size = 0;
++ }
++ if (!mm->context.pinned) {
++ spin_lock(&mm_unpinned_lock);
++ list_del(&mm->context.unpinned);
++ spin_unlock(&mm_unpinned_lock);
++ }
++}
++
++static int read_ldt(void __user * ptr, unsigned long bytecount)
++{
++ int err;
++ unsigned long size;
++ struct mm_struct * mm = current->mm;
++
++ if (!mm->context.size)
++ return 0;
++ if (bytecount > LDT_ENTRY_SIZE*LDT_ENTRIES)
++ bytecount = LDT_ENTRY_SIZE*LDT_ENTRIES;
++
++ down(&mm->context.sem);
++ size = mm->context.size*LDT_ENTRY_SIZE;
++ if (size > bytecount)
++ size = bytecount;
++
++ err = 0;
++ if (copy_to_user(ptr, mm->context.ldt, size))
++ err = -EFAULT;
++ up(&mm->context.sem);
++ if (err < 0)
++ goto error_return;
++ if (size != bytecount) {
++ /* zero-fill the rest */
++ if (clear_user(ptr+size, bytecount-size) != 0) {
++ err = -EFAULT;
++ goto error_return;
++ }
++ }
++ return bytecount;
++error_return:
++ return err;
++}
++
++static int read_default_ldt(void __user * ptr, unsigned long bytecount)
++{
++ /* Arbitrary number */
++ /* x86-64 default LDT is all zeros */
++ if (bytecount > 128)
++ bytecount = 128;
++ if (clear_user(ptr, bytecount))
++ return -EFAULT;
++ return bytecount;
++}
++
++static int write_ldt(void __user * ptr, unsigned long bytecount, int oldmode)
++{
++ struct task_struct *me = current;
++ struct mm_struct * mm = me->mm;
++ __u32 entry_1, entry_2, *lp;
++ unsigned long mach_lp;
++ int error;
++ struct user_desc ldt_info;
++
++ error = -EINVAL;
++
++ if (bytecount != sizeof(ldt_info))
++ goto out;
++ error = -EFAULT;
++ if (copy_from_user(&ldt_info, ptr, bytecount))
++ goto out;
++
++ error = -EINVAL;
++ if (ldt_info.entry_number >= LDT_ENTRIES)
++ goto out;
++ if (ldt_info.contents == 3) {
++ if (oldmode)
++ goto out;
++ if (ldt_info.seg_not_present == 0)
++ goto out;
++ }
++
++ down(&mm->context.sem);
++ if (ldt_info.entry_number >= (unsigned)mm->context.size) {
++ error = alloc_ldt(¤t->mm->context, ldt_info.entry_number+1, 1);
++ if (error < 0)
++ goto out_unlock;
++ }
++
++ lp = (__u32 *) ((ldt_info.entry_number << 3) + (char *) mm->context.ldt);
++ mach_lp = arbitrary_virt_to_machine(lp);
++
++ /* Allow LDTs to be cleared by the user. */
++ if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
++ if (oldmode || LDT_empty(&ldt_info)) {
++ entry_1 = 0;
++ entry_2 = 0;
++ goto install;
++ }
++ }
++
++ entry_1 = LDT_entry_a(&ldt_info);
++ entry_2 = LDT_entry_b(&ldt_info);
++ if (oldmode)
++ entry_2 &= ~(1 << 20);
++
++ /* Install the new entry ... */
++install:
++ error = HYPERVISOR_update_descriptor(mach_lp, (unsigned long)((entry_1 | (unsigned long) entry_2 << 32)));
++
++out_unlock:
++ up(&mm->context.sem);
++out:
++ return error;
++}
++
++asmlinkage int sys_modify_ldt(int func, void __user *ptr, unsigned long bytecount)
++{
++ int ret = -ENOSYS;
++
++ switch (func) {
++ case 0:
++ ret = read_ldt(ptr, bytecount);
++ break;
++ case 1:
++ ret = write_ldt(ptr, bytecount, 1);
++ break;
++ case 2:
++ ret = read_default_ldt(ptr, bytecount);
++ break;
++ case 0x11:
++ ret = write_ldt(ptr, bytecount, 0);
++ break;
++ }
++ return ret;
++}
+Index: head-2008-11-25/arch/x86/kernel/mpparse_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/mpparse_64-xen.c 2007-06-12 13:13:01.000000000 +0200
+@@ -0,0 +1,1011 @@
++/*
++ * Intel Multiprocessor Specification 1.1 and 1.4
++ * compliant MP-table parsing routines.
++ *
++ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
++ * (c) 1998, 1999, 2000 Ingo Molnar <mingo@redhat.com>
++ *
++ * Fixes
++ * Erich Boleyn : MP v1.4 and additional changes.
++ * Alan Cox : Added EBDA scanning
++ * Ingo Molnar : various cleanups and rewrites
++ * Maciej W. Rozycki: Bits for default MP configurations
++ * Paul Diefenbaugh: Added full ACPI support
++ */
++
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/bootmem.h>
++#include <linux/smp_lock.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/acpi.h>
++#include <linux/module.h>
++
++#include <asm/smp.h>
++#include <asm/mtrr.h>
++#include <asm/mpspec.h>
++#include <asm/pgalloc.h>
++#include <asm/io_apic.h>
++#include <asm/proto.h>
++#include <asm/acpi.h>
++
++/* Have we found an MP table */
++int smp_found_config;
++unsigned int __initdata maxcpus = NR_CPUS;
++
++int acpi_found_madt;
++
++/*
++ * Various Linux-internal data structures created from the
++ * MP-table.
++ */
++unsigned char apic_version [MAX_APICS];
++unsigned char mp_bus_id_to_type [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++int mp_bus_id_to_pci_bus [MAX_MP_BUSSES] = { [0 ... MAX_MP_BUSSES-1] = -1 };
++
++static int mp_current_pci_id = 0;
++/* I/O APIC entries */
++struct mpc_config_ioapic mp_ioapics[MAX_IO_APICS];
++
++/* # of MP IRQ source entries */
++struct mpc_config_intsrc mp_irqs[MAX_IRQ_SOURCES];
++
++/* MP IRQ source entries */
++int mp_irq_entries;
++
++int nr_ioapics;
++int pic_mode;
++unsigned long mp_lapic_addr = 0;
++
++
++
++/* Processor that is doing the boot up */
++unsigned int boot_cpu_id = -1U;
++/* Internal processor count */
++unsigned int num_processors __initdata = 0;
++
++unsigned disabled_cpus __initdata;
++
++/* Bitmask of physically existing CPUs */
++physid_mask_t phys_cpu_present_map = PHYSID_MASK_NONE;
++
++/* ACPI MADT entry parsing functions */
++#ifdef CONFIG_ACPI
++extern struct acpi_boot_flags acpi_boot;
++#ifdef CONFIG_X86_LOCAL_APIC
++extern int acpi_parse_lapic (acpi_table_entry_header *header);
++extern int acpi_parse_lapic_addr_ovr (acpi_table_entry_header *header);
++extern int acpi_parse_lapic_nmi (acpi_table_entry_header *header);
++#endif /*CONFIG_X86_LOCAL_APIC*/
++#ifdef CONFIG_X86_IO_APIC
++extern int acpi_parse_ioapic (acpi_table_entry_header *header);
++#endif /*CONFIG_X86_IO_APIC*/
++#endif /*CONFIG_ACPI*/
++
++u8 bios_cpu_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++
++/*
++ * Intel MP BIOS table parsing routines:
++ */
++
++/*
++ * Checksum an MP configuration block.
++ */
++
++static int __init mpf_checksum(unsigned char *mp, int len)
++{
++ int sum = 0;
++
++ while (len--)
++ sum += *mp++;
++
++ return sum & 0xFF;
++}
++
++#ifndef CONFIG_XEN
++static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
++{
++ int cpu;
++ unsigned char ver;
++ cpumask_t tmp_map;
++
++ if (!(m->mpc_cpuflag & CPU_ENABLED)) {
++ disabled_cpus++;
++ return;
++ }
++
++ printk(KERN_INFO "Processor #%d %d:%d APIC version %d\n",
++ m->mpc_apicid,
++ (m->mpc_cpufeature & CPU_FAMILY_MASK)>>8,
++ (m->mpc_cpufeature & CPU_MODEL_MASK)>>4,
++ m->mpc_apicver);
++
++ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
++ Dprintk(" Bootup CPU\n");
++ boot_cpu_id = m->mpc_apicid;
++ }
++ if (num_processors >= NR_CPUS) {
++ printk(KERN_WARNING "WARNING: NR_CPUS limit of %i reached."
++ " Processor ignored.\n", NR_CPUS);
++ return;
++ }
++
++ num_processors++;
++ cpus_complement(tmp_map, cpu_present_map);
++ cpu = first_cpu(tmp_map);
++
++#if MAX_APICS < 255
++ if ((int)m->mpc_apicid > MAX_APICS) {
++ printk(KERN_ERR "Processor #%d INVALID. (Max ID: %d).\n",
++ m->mpc_apicid, MAX_APICS);
++ return;
++ }
++#endif
++ ver = m->mpc_apicver;
++
++ physid_set(m->mpc_apicid, phys_cpu_present_map);
++ /*
++ * Validate version
++ */
++ if (ver == 0x0) {
++ printk(KERN_ERR "BIOS bug, APIC version is 0 for CPU#%d! fixing up to 0x10. (tell your hw vendor)\n", m->mpc_apicid);
++ ver = 0x10;
++ }
++ apic_version[m->mpc_apicid] = ver;
++ if (m->mpc_cpuflag & CPU_BOOTPROCESSOR) {
++ /*
++ * bios_cpu_apicid is required to have processors listed
++ * in same order as logical cpu numbers. Hence the first
++ * entry is BSP, and so on.
++ */
++ cpu = 0;
++ }
++ bios_cpu_apicid[cpu] = m->mpc_apicid;
++ x86_cpu_to_apicid[cpu] = m->mpc_apicid;
++
++ cpu_set(cpu, cpu_possible_map);
++ cpu_set(cpu, cpu_present_map);
++}
++#else
++static void __cpuinit MP_processor_info (struct mpc_config_processor *m)
++{
++ num_processors++;
++}
++#endif /* CONFIG_XEN */
++
++static void __init MP_bus_info (struct mpc_config_bus *m)
++{
++ char str[7];
++
++ memcpy(str, m->mpc_bustype, 6);
++ str[6] = 0;
++ Dprintk("Bus #%d is %s\n", m->mpc_busid, str);
++
++ if (strncmp(str, "ISA", 3) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_ISA;
++ } else if (strncmp(str, "EISA", 4) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_EISA;
++ } else if (strncmp(str, "PCI", 3) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_PCI;
++ mp_bus_id_to_pci_bus[m->mpc_busid] = mp_current_pci_id;
++ mp_current_pci_id++;
++ } else if (strncmp(str, "MCA", 3) == 0) {
++ mp_bus_id_to_type[m->mpc_busid] = MP_BUS_MCA;
++ } else {
++ printk(KERN_ERR "Unknown bustype %s\n", str);
++ }
++}
++
++static void __init MP_ioapic_info (struct mpc_config_ioapic *m)
++{
++ if (!(m->mpc_flags & MPC_APIC_USABLE))
++ return;
++
++ printk("I/O APIC #%d Version %d at 0x%X.\n",
++ m->mpc_apicid, m->mpc_apicver, m->mpc_apicaddr);
++ if (nr_ioapics >= MAX_IO_APICS) {
++ printk(KERN_ERR "Max # of I/O APICs (%d) exceeded (found %d).\n",
++ MAX_IO_APICS, nr_ioapics);
++ panic("Recompile kernel with bigger MAX_IO_APICS!.\n");
++ }
++ if (!m->mpc_apicaddr) {
++ printk(KERN_ERR "WARNING: bogus zero I/O APIC address"
++ " found in MP table, skipping!\n");
++ return;
++ }
++ mp_ioapics[nr_ioapics] = *m;
++ nr_ioapics++;
++}
++
++static void __init MP_intsrc_info (struct mpc_config_intsrc *m)
++{
++ mp_irqs [mp_irq_entries] = *m;
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d,"
++ " IRQ %02x, APIC ID %x, APIC INT %02x\n",
++ m->mpc_irqtype, m->mpc_irqflag & 3,
++ (m->mpc_irqflag >> 2) & 3, m->mpc_srcbus,
++ m->mpc_srcbusirq, m->mpc_dstapic, m->mpc_dstirq);
++ if (++mp_irq_entries >= MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!!\n");
++}
++
++static void __init MP_lintsrc_info (struct mpc_config_lintsrc *m)
++{
++ Dprintk("Lint: type %d, pol %d, trig %d, bus %d,"
++ " IRQ %02x, APIC ID %x, APIC LINT %02x\n",
++ m->mpc_irqtype, m->mpc_irqflag & 3,
++ (m->mpc_irqflag >> 2) &3, m->mpc_srcbusid,
++ m->mpc_srcbusirq, m->mpc_destapic, m->mpc_destapiclint);
++ /*
++ * Well it seems all SMP boards in existence
++ * use ExtINT/LVT1 == LINT0 and
++ * NMI/LVT2 == LINT1 - the following check
++ * will show us if this assumptions is false.
++ * Until then we do not have to add baggage.
++ */
++ if ((m->mpc_irqtype == mp_ExtINT) &&
++ (m->mpc_destapiclint != 0))
++ BUG();
++ if ((m->mpc_irqtype == mp_NMI) &&
++ (m->mpc_destapiclint != 1))
++ BUG();
++}
++
++/*
++ * Read/parse the MPC
++ */
++
++static int __init smp_read_mpc(struct mp_config_table *mpc)
++{
++ char str[16];
++ int count=sizeof(*mpc);
++ unsigned char *mpt=((unsigned char *)mpc)+count;
++
++ if (memcmp(mpc->mpc_signature,MPC_SIGNATURE,4)) {
++ printk("SMP mptable: bad signature [%c%c%c%c]!\n",
++ mpc->mpc_signature[0],
++ mpc->mpc_signature[1],
++ mpc->mpc_signature[2],
++ mpc->mpc_signature[3]);
++ return 0;
++ }
++ if (mpf_checksum((unsigned char *)mpc,mpc->mpc_length)) {
++ printk("SMP mptable: checksum error!\n");
++ return 0;
++ }
++ if (mpc->mpc_spec!=0x01 && mpc->mpc_spec!=0x04) {
++ printk(KERN_ERR "SMP mptable: bad table version (%d)!!\n",
++ mpc->mpc_spec);
++ return 0;
++ }
++ if (!mpc->mpc_lapic) {
++ printk(KERN_ERR "SMP mptable: null local APIC address!\n");
++ return 0;
++ }
++ memcpy(str,mpc->mpc_oem,8);
++ str[8]=0;
++ printk(KERN_INFO "OEM ID: %s ",str);
++
++ memcpy(str,mpc->mpc_productid,12);
++ str[12]=0;
++ printk("Product ID: %s ",str);
++
++ printk("APIC at: 0x%X\n",mpc->mpc_lapic);
++
++ /* save the local APIC address, it might be non-default */
++ if (!acpi_lapic)
++ mp_lapic_addr = mpc->mpc_lapic;
++
++ /*
++ * Now process the configuration blocks.
++ */
++ while (count < mpc->mpc_length) {
++ switch(*mpt) {
++ case MP_PROCESSOR:
++ {
++ struct mpc_config_processor *m=
++ (struct mpc_config_processor *)mpt;
++ if (!acpi_lapic)
++ MP_processor_info(m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
++ break;
++ }
++ case MP_BUS:
++ {
++ struct mpc_config_bus *m=
++ (struct mpc_config_bus *)mpt;
++ MP_bus_info(m);
++ mpt += sizeof(*m);
++ count += sizeof(*m);
++ break;
++ }
++ case MP_IOAPIC:
++ {
++ struct mpc_config_ioapic *m=
++ (struct mpc_config_ioapic *)mpt;
++ MP_ioapic_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ case MP_INTSRC:
++ {
++ struct mpc_config_intsrc *m=
++ (struct mpc_config_intsrc *)mpt;
++
++ MP_intsrc_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ case MP_LINTSRC:
++ {
++ struct mpc_config_lintsrc *m=
++ (struct mpc_config_lintsrc *)mpt;
++ MP_lintsrc_info(m);
++ mpt+=sizeof(*m);
++ count+=sizeof(*m);
++ break;
++ }
++ }
++ }
++ clustered_apic_check();
++ if (!num_processors)
++ printk(KERN_ERR "SMP mptable: no processors registered!\n");
++ return num_processors;
++}
++
++static int __init ELCR_trigger(unsigned int irq)
++{
++ unsigned int port;
++
++ port = 0x4d0 + (irq >> 3);
++ return (inb(port) >> (irq & 7)) & 1;
++}
++
++static void __init construct_default_ioirq_mptable(int mpc_default_type)
++{
++ struct mpc_config_intsrc intsrc;
++ int i;
++ int ELCR_fallback = 0;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqflag = 0; /* conforming */
++ intsrc.mpc_srcbus = 0;
++ intsrc.mpc_dstapic = mp_ioapics[0].mpc_apicid;
++
++ intsrc.mpc_irqtype = mp_INT;
++
++ /*
++ * If true, we have an ISA/PCI system with no IRQ entries
++ * in the MP table. To prevent the PCI interrupts from being set up
++ * incorrectly, we try to use the ELCR. The sanity check to see if
++ * there is good ELCR data is very simple - IRQ0, 1, 2 and 13 can
++ * never be level sensitive, so we simply see if the ELCR agrees.
++ * If it does, we assume it's valid.
++ */
++ if (mpc_default_type == 5) {
++ printk(KERN_INFO "ISA/PCI bus type with no IRQ information... falling back to ELCR\n");
++
++ if (ELCR_trigger(0) || ELCR_trigger(1) || ELCR_trigger(2) || ELCR_trigger(13))
++ printk(KERN_ERR "ELCR contains invalid data... not using ELCR\n");
++ else {
++ printk(KERN_INFO "Using ELCR to identify PCI interrupts\n");
++ ELCR_fallback = 1;
++ }
++ }
++
++ for (i = 0; i < 16; i++) {
++ switch (mpc_default_type) {
++ case 2:
++ if (i == 0 || i == 13)
++ continue; /* IRQ0 & IRQ13 not connected */
++ /* fall through */
++ default:
++ if (i == 2)
++ continue; /* IRQ2 is never connected */
++ }
++
++ if (ELCR_fallback) {
++ /*
++ * If the ELCR indicates a level-sensitive interrupt, we
++ * copy that information over to the MP table in the
++ * irqflag field (level sensitive, active high polarity).
++ */
++ if (ELCR_trigger(i))
++ intsrc.mpc_irqflag = 13;
++ else
++ intsrc.mpc_irqflag = 0;
++ }
++
++ intsrc.mpc_srcbusirq = i;
++ intsrc.mpc_dstirq = i ? i : 2; /* IRQ0 to INTIN2 */
++ MP_intsrc_info(&intsrc);
++ }
++
++ intsrc.mpc_irqtype = mp_ExtINT;
++ intsrc.mpc_srcbusirq = 0;
++ intsrc.mpc_dstirq = 0; /* 8259A to INTIN0 */
++ MP_intsrc_info(&intsrc);
++}
++
++static inline void __init construct_default_ISA_mptable(int mpc_default_type)
++{
++ struct mpc_config_processor processor;
++ struct mpc_config_bus bus;
++ struct mpc_config_ioapic ioapic;
++ struct mpc_config_lintsrc lintsrc;
++ int linttypes[2] = { mp_ExtINT, mp_NMI };
++ int i;
++
++ /*
++ * local APIC has default address
++ */
++ mp_lapic_addr = APIC_DEFAULT_PHYS_BASE;
++
++ /*
++ * 2 CPUs, numbered 0 & 1.
++ */
++ processor.mpc_type = MP_PROCESSOR;
++ /* Either an integrated APIC or a discrete 82489DX. */
++ processor.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++ processor.mpc_cpuflag = CPU_ENABLED;
++ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++ (boot_cpu_data.x86_model << 4) |
++ boot_cpu_data.x86_mask;
++ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++ processor.mpc_reserved[0] = 0;
++ processor.mpc_reserved[1] = 0;
++ for (i = 0; i < 2; i++) {
++ processor.mpc_apicid = i;
++ MP_processor_info(&processor);
++ }
++
++ bus.mpc_type = MP_BUS;
++ bus.mpc_busid = 0;
++ switch (mpc_default_type) {
++ default:
++ printk(KERN_ERR "???\nUnknown standard configuration %d\n",
++ mpc_default_type);
++ /* fall through */
++ case 1:
++ case 5:
++ memcpy(bus.mpc_bustype, "ISA ", 6);
++ break;
++ case 2:
++ case 6:
++ case 3:
++ memcpy(bus.mpc_bustype, "EISA ", 6);
++ break;
++ case 4:
++ case 7:
++ memcpy(bus.mpc_bustype, "MCA ", 6);
++ }
++ MP_bus_info(&bus);
++ if (mpc_default_type > 4) {
++ bus.mpc_busid = 1;
++ memcpy(bus.mpc_bustype, "PCI ", 6);
++ MP_bus_info(&bus);
++ }
++
++ ioapic.mpc_type = MP_IOAPIC;
++ ioapic.mpc_apicid = 2;
++ ioapic.mpc_apicver = mpc_default_type > 4 ? 0x10 : 0x01;
++ ioapic.mpc_flags = MPC_APIC_USABLE;
++ ioapic.mpc_apicaddr = 0xFEC00000;
++ MP_ioapic_info(&ioapic);
++
++ /*
++ * We set up most of the low 16 IO-APIC pins according to MPS rules.
++ */
++ construct_default_ioirq_mptable(mpc_default_type);
++
++ lintsrc.mpc_type = MP_LINTSRC;
++ lintsrc.mpc_irqflag = 0; /* conforming */
++ lintsrc.mpc_srcbusid = 0;
++ lintsrc.mpc_srcbusirq = 0;
++ lintsrc.mpc_destapic = MP_APIC_ALL;
++ for (i = 0; i < 2; i++) {
++ lintsrc.mpc_irqtype = linttypes[i];
++ lintsrc.mpc_destapiclint = i;
++ MP_lintsrc_info(&lintsrc);
++ }
++}
++
++static struct intel_mp_floating *mpf_found;
++
++/*
++ * Scan the memory blocks for an SMP configuration block.
++ */
++void __init get_smp_config (void)
++{
++ struct intel_mp_floating *mpf = mpf_found;
++
++ /*
++ * ACPI supports both logical (e.g. Hyper-Threading) and physical
++ * processors, where MPS only supports physical.
++ */
++ if (acpi_lapic && acpi_ioapic) {
++ printk(KERN_INFO "Using ACPI (MADT) for SMP configuration information\n");
++ return;
++ }
++ else if (acpi_lapic)
++ printk(KERN_INFO "Using ACPI for processor (LAPIC) configuration information\n");
++
++ printk("Intel MultiProcessor Specification v1.%d\n", mpf->mpf_specification);
++ if (mpf->mpf_feature2 & (1<<7)) {
++ printk(KERN_INFO " IMCR and PIC compatibility mode.\n");
++ pic_mode = 1;
++ } else {
++ printk(KERN_INFO " Virtual Wire compatibility mode.\n");
++ pic_mode = 0;
++ }
++
++ /*
++ * Now see if we need to read further.
++ */
++ if (mpf->mpf_feature1 != 0) {
++
++ printk(KERN_INFO "Default MP configuration #%d\n", mpf->mpf_feature1);
++ construct_default_ISA_mptable(mpf->mpf_feature1);
++
++ } else if (mpf->mpf_physptr) {
++
++ /*
++ * Read the physical hardware table. Anything here will
++ * override the defaults.
++ */
++ if (!smp_read_mpc(isa_bus_to_virt(mpf->mpf_physptr))) {
++ smp_found_config = 0;
++ printk(KERN_ERR "BIOS bug, MP table errors detected!...\n");
++ printk(KERN_ERR "... disabling SMP support. (tell your hw vendor)\n");
++ return;
++ }
++ /*
++ * If there are no explicit MP IRQ entries, then we are
++ * broken. We set up most of the low 16 IO-APIC pins to
++ * ISA defaults and hope it will work.
++ */
++ if (!mp_irq_entries) {
++ struct mpc_config_bus bus;
++
++ printk(KERN_ERR "BIOS bug, no explicit IRQ entries, using default mptable. (tell your hw vendor)\n");
++
++ bus.mpc_type = MP_BUS;
++ bus.mpc_busid = 0;
++ memcpy(bus.mpc_bustype, "ISA ", 6);
++ MP_bus_info(&bus);
++
++ construct_default_ioirq_mptable(0);
++ }
++
++ } else
++ BUG();
++
++ printk(KERN_INFO "Processors: %d\n", num_processors);
++ /*
++ * Only use the first configuration found.
++ */
++}
++
++static int __init smp_scan_config (unsigned long base, unsigned long length)
++{
++ extern void __bad_mpf_size(void);
++ unsigned int *bp = isa_bus_to_virt(base);
++ struct intel_mp_floating *mpf;
++
++ Dprintk("Scan SMP from %p for %ld bytes.\n", bp,length);
++ if (sizeof(*mpf) != 16)
++ __bad_mpf_size();
++
++ while (length > 0) {
++ mpf = (struct intel_mp_floating *)bp;
++ if ((*bp == SMP_MAGIC_IDENT) &&
++ (mpf->mpf_length == 1) &&
++ !mpf_checksum((unsigned char *)bp, 16) &&
++ ((mpf->mpf_specification == 1)
++ || (mpf->mpf_specification == 4)) ) {
++
++ smp_found_config = 1;
++ mpf_found = mpf;
++ return 1;
++ }
++ bp += 4;
++ length -= 16;
++ }
++ return 0;
++}
++
++void __init find_intel_smp (void)
++{
++ unsigned int address;
++
++ /*
++ * FIXME: Linux assumes you have 640K of base ram..
++ * this continues the error...
++ *
++ * 1) Scan the bottom 1K for a signature
++ * 2) Scan the top 1K of base RAM
++ * 3) Scan the 64K of bios
++ */
++ if (smp_scan_config(0x0,0x400) ||
++ smp_scan_config(639*0x400,0x400) ||
++ smp_scan_config(0xF0000,0x10000))
++ return;
++ /*
++ * If it is an SMP machine we should know now, unless the
++ * configuration is in an EISA/MCA bus machine with an
++ * extended bios data area.
++ *
++ * there is a real-mode segmented pointer pointing to the
++ * 4K EBDA area at 0x40E, calculate and scan it here.
++ *
++ * NOTE! There are Linux loaders that will corrupt the EBDA
++ * area, and as such this kind of SMP config may be less
++ * trustworthy, simply because the SMP table may have been
++ * stomped on during early boot. These loaders are buggy and
++ * should be fixed.
++ */
++
++ address = *(unsigned short *)phys_to_virt(0x40E);
++ address <<= 4;
++ if (smp_scan_config(address, 0x1000))
++ return;
++
++ /* If we have come this far, we did not find an MP table */
++ printk(KERN_INFO "No mptable found.\n");
++}
++
++/*
++ * - Intel MP Configuration Table
++ */
++void __init find_smp_config (void)
++{
++#ifdef CONFIG_X86_LOCAL_APIC
++ find_intel_smp();
++#endif
++}
++
++
++/* --------------------------------------------------------------------------
++ ACPI-based MP Configuration
++ -------------------------------------------------------------------------- */
++
++#ifdef CONFIG_ACPI
++
++void __init mp_register_lapic_address (
++ u64 address)
++{
++#ifndef CONFIG_XEN
++ mp_lapic_addr = (unsigned long) address;
++
++ set_fixmap_nocache(FIX_APIC_BASE, mp_lapic_addr);
++
++ if (boot_cpu_id == -1U)
++ boot_cpu_id = GET_APIC_ID(apic_read(APIC_ID));
++
++ Dprintk("Boot CPU = %d\n", boot_cpu_physical_apicid);
++#endif
++}
++
++
++void __cpuinit mp_register_lapic (
++ u8 id,
++ u8 enabled)
++{
++ struct mpc_config_processor processor;
++ int boot_cpu = 0;
++
++ if (id >= MAX_APICS) {
++ printk(KERN_WARNING "Processor #%d invalid (max %d)\n",
++ id, MAX_APICS);
++ return;
++ }
++
++ if (id == boot_cpu_physical_apicid)
++ boot_cpu = 1;
++
++#ifndef CONFIG_XEN
++ processor.mpc_type = MP_PROCESSOR;
++ processor.mpc_apicid = id;
++ processor.mpc_apicver = GET_APIC_VERSION(apic_read(APIC_LVR));
++ processor.mpc_cpuflag = (enabled ? CPU_ENABLED : 0);
++ processor.mpc_cpuflag |= (boot_cpu ? CPU_BOOTPROCESSOR : 0);
++ processor.mpc_cpufeature = (boot_cpu_data.x86 << 8) |
++ (boot_cpu_data.x86_model << 4) | boot_cpu_data.x86_mask;
++ processor.mpc_featureflag = boot_cpu_data.x86_capability[0];
++ processor.mpc_reserved[0] = 0;
++ processor.mpc_reserved[1] = 0;
++#endif
++
++ MP_processor_info(&processor);
++}
++
++#ifdef CONFIG_X86_IO_APIC
++
++#define MP_ISA_BUS 0
++#define MP_MAX_IOAPIC_PIN 127
++
++static struct mp_ioapic_routing {
++ int apic_id;
++ int gsi_start;
++ int gsi_end;
++ u32 pin_programmed[4];
++} mp_ioapic_routing[MAX_IO_APICS];
++
++
++static int mp_find_ioapic (
++ int gsi)
++{
++ int i = 0;
++
++ /* Find the IOAPIC that manages this GSI. */
++ for (i = 0; i < nr_ioapics; i++) {
++ if ((gsi >= mp_ioapic_routing[i].gsi_start)
++ && (gsi <= mp_ioapic_routing[i].gsi_end))
++ return i;
++ }
++
++ printk(KERN_ERR "ERROR: Unable to locate IOAPIC for GSI %d\n", gsi);
++
++ return -1;
++}
++
++
++void __init mp_register_ioapic (
++ u8 id,
++ u32 address,
++ u32 gsi_base)
++{
++ int idx = 0;
++
++ if (nr_ioapics >= MAX_IO_APICS) {
++ printk(KERN_ERR "ERROR: Max # of I/O APICs (%d) exceeded "
++ "(found %d)\n", MAX_IO_APICS, nr_ioapics);
++ panic("Recompile kernel with bigger MAX_IO_APICS!\n");
++ }
++ if (!address) {
++ printk(KERN_ERR "WARNING: Bogus (zero) I/O APIC address"
++ " found in MADT table, skipping!\n");
++ return;
++ }
++
++ idx = nr_ioapics++;
++
++ mp_ioapics[idx].mpc_type = MP_IOAPIC;
++ mp_ioapics[idx].mpc_flags = MPC_APIC_USABLE;
++ mp_ioapics[idx].mpc_apicaddr = address;
++
++#ifndef CONFIG_XEN
++ set_fixmap_nocache(FIX_IO_APIC_BASE_0 + idx, address);
++#endif
++ mp_ioapics[idx].mpc_apicid = id;
++ mp_ioapics[idx].mpc_apicver = io_apic_get_version(idx);
++
++ /*
++ * Build basic IRQ lookup table to facilitate gsi->io_apic lookups
++ * and to prevent reprogramming of IOAPIC pins (PCI IRQs).
++ */
++ mp_ioapic_routing[idx].apic_id = mp_ioapics[idx].mpc_apicid;
++ mp_ioapic_routing[idx].gsi_start = gsi_base;
++ mp_ioapic_routing[idx].gsi_end = gsi_base +
++ io_apic_get_redir_entries(idx);
++
++ printk(KERN_INFO "IOAPIC[%d]: apic_id %d, version %d, address 0x%x, "
++ "GSI %d-%d\n", idx, mp_ioapics[idx].mpc_apicid,
++ mp_ioapics[idx].mpc_apicver, mp_ioapics[idx].mpc_apicaddr,
++ mp_ioapic_routing[idx].gsi_start,
++ mp_ioapic_routing[idx].gsi_end);
++
++ return;
++}
++
++
++void __init mp_override_legacy_irq (
++ u8 bus_irq,
++ u8 polarity,
++ u8 trigger,
++ u32 gsi)
++{
++ struct mpc_config_intsrc intsrc;
++ int ioapic = -1;
++ int pin = -1;
++
++ /*
++ * Convert 'gsi' to 'ioapic.pin'.
++ */
++ ioapic = mp_find_ioapic(gsi);
++ if (ioapic < 0)
++ return;
++ pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
++
++ /*
++ * TBD: This check is for faulty timer entries, where the override
++ * erroneously sets the trigger to level, resulting in a HUGE
++ * increase of timer interrupts!
++ */
++ if ((bus_irq == 0) && (trigger == 3))
++ trigger = 1;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqtype = mp_INT;
++ intsrc.mpc_irqflag = (trigger << 2) | polarity;
++ intsrc.mpc_srcbus = MP_ISA_BUS;
++ intsrc.mpc_srcbusirq = bus_irq; /* IRQ */
++ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid; /* APIC ID */
++ intsrc.mpc_dstirq = pin; /* INTIN# */
++
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, %d-%d\n",
++ intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
++ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
++ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic, intsrc.mpc_dstirq);
++
++ mp_irqs[mp_irq_entries] = intsrc;
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!\n");
++
++ return;
++}
++
++
++void __init mp_config_acpi_legacy_irqs (void)
++{
++ struct mpc_config_intsrc intsrc;
++ int i = 0;
++ int ioapic = -1;
++
++ /*
++ * Fabricate the legacy ISA bus (bus #31).
++ */
++ mp_bus_id_to_type[MP_ISA_BUS] = MP_BUS_ISA;
++ Dprintk("Bus #%d is ISA\n", MP_ISA_BUS);
++
++ /*
++ * Locate the IOAPIC that manages the ISA IRQs (0-15).
++ */
++ ioapic = mp_find_ioapic(0);
++ if (ioapic < 0)
++ return;
++
++ intsrc.mpc_type = MP_INTSRC;
++ intsrc.mpc_irqflag = 0; /* Conforming */
++ intsrc.mpc_srcbus = MP_ISA_BUS;
++ intsrc.mpc_dstapic = mp_ioapics[ioapic].mpc_apicid;
++
++ /*
++ * Use the default configuration for the IRQs 0-15. Unless
++ * overridden by (MADT) interrupt source override entries.
++ */
++ for (i = 0; i < 16; i++) {
++ int idx;
++
++ for (idx = 0; idx < mp_irq_entries; idx++) {
++ struct mpc_config_intsrc *irq = mp_irqs + idx;
++
++ /* Do we already have a mapping for this ISA IRQ? */
++ if (irq->mpc_srcbus == MP_ISA_BUS && irq->mpc_srcbusirq == i)
++ break;
++
++ /* Do we already have a mapping for this IOAPIC pin */
++ if ((irq->mpc_dstapic == intsrc.mpc_dstapic) &&
++ (irq->mpc_dstirq == i))
++ break;
++ }
++
++ if (idx != mp_irq_entries) {
++ printk(KERN_DEBUG "ACPI: IRQ%d used by override.\n", i);
++ continue; /* IRQ already used */
++ }
++
++ intsrc.mpc_irqtype = mp_INT;
++ intsrc.mpc_srcbusirq = i; /* Identity mapped */
++ intsrc.mpc_dstirq = i;
++
++ Dprintk("Int: type %d, pol %d, trig %d, bus %d, irq %d, "
++ "%d-%d\n", intsrc.mpc_irqtype, intsrc.mpc_irqflag & 3,
++ (intsrc.mpc_irqflag >> 2) & 3, intsrc.mpc_srcbus,
++ intsrc.mpc_srcbusirq, intsrc.mpc_dstapic,
++ intsrc.mpc_dstirq);
++
++ mp_irqs[mp_irq_entries] = intsrc;
++ if (++mp_irq_entries == MAX_IRQ_SOURCES)
++ panic("Max # of irq sources exceeded!\n");
++ }
++
++ return;
++}
++
++#define MAX_GSI_NUM 4096
++
++int mp_register_gsi(u32 gsi, int triggering, int polarity)
++{
++ int ioapic = -1;
++ int ioapic_pin = 0;
++ int idx, bit = 0;
++ static int pci_irq = 16;
++ /*
++ * Mapping between Global System Interrupts, which
++ * represent all possible interrupts, to the IRQs
++ * assigned to actual devices.
++ */
++ static int gsi_to_irq[MAX_GSI_NUM];
++
++ if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
++ return gsi;
++
++ /* Don't set up the ACPI SCI because it's already set up */
++ if (acpi_fadt.sci_int == gsi)
++ return gsi;
++
++ ioapic = mp_find_ioapic(gsi);
++ if (ioapic < 0) {
++ printk(KERN_WARNING "No IOAPIC for GSI %u\n", gsi);
++ return gsi;
++ }
++
++ ioapic_pin = gsi - mp_ioapic_routing[ioapic].gsi_start;
++
++ /*
++ * Avoid pin reprogramming. PRTs typically include entries
++ * with redundant pin->gsi mappings (but unique PCI devices);
++ * we only program the IOAPIC on the first.
++ */
++ bit = ioapic_pin % 32;
++ idx = (ioapic_pin < 32) ? 0 : (ioapic_pin / 32);
++ if (idx > 3) {
++ printk(KERN_ERR "Invalid reference to IOAPIC pin "
++ "%d-%d\n", mp_ioapic_routing[ioapic].apic_id,
++ ioapic_pin);
++ return gsi;
++ }
++ if ((1<<bit) & mp_ioapic_routing[ioapic].pin_programmed[idx]) {
++ Dprintk(KERN_DEBUG "Pin %d-%d already programmed\n",
++ mp_ioapic_routing[ioapic].apic_id, ioapic_pin);
++ return gsi_to_irq[gsi];
++ }
++
++ mp_ioapic_routing[ioapic].pin_programmed[idx] |= (1<<bit);
++
++ if (triggering == ACPI_LEVEL_SENSITIVE) {
++ /*
++ * For PCI devices assign IRQs in order, avoiding gaps
++ * due to unused I/O APIC pins.
++ */
++ int irq = gsi;
++ if (gsi < MAX_GSI_NUM) {
++ /*
++ * Retain the VIA chipset work-around (gsi > 15), but
++ * avoid a problem where the 8254 timer (IRQ0) is setup
++ * via an override (so it's not on pin 0 of the ioapic),
++ * and at the same time, the pin 0 interrupt is a PCI
++ * type. The gsi > 15 test could cause these two pins
++ * to be shared as IRQ0, and they are not shareable.
++ * So test for this condition, and if necessary, avoid
++ * the pin collision.
++ */
++ if (gsi > 15 || (gsi == 0 && !timer_uses_ioapic_pin_0))
++ gsi = pci_irq++;
++ /*
++ * Don't assign IRQ used by ACPI SCI
++ */
++ if (gsi == acpi_fadt.sci_int)
++ gsi = pci_irq++;
++ gsi_to_irq[irq] = gsi;
++ } else {
++ printk(KERN_ERR "GSI %u is too high\n", gsi);
++ return gsi;
++ }
++ }
++
++ io_apic_set_pci_routing(ioapic, ioapic_pin, gsi,
++ triggering == ACPI_EDGE_SENSITIVE ? 0 : 1,
++ polarity == ACPI_ACTIVE_HIGH ? 0 : 1);
++ return gsi;
++}
++
++#endif /*CONFIG_X86_IO_APIC*/
++#endif /*CONFIG_ACPI*/
+Index: head-2008-11-25/arch/x86/kernel/process_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/process_64-xen.c 2008-08-07 12:44:36.000000000 +0200
+@@ -0,0 +1,848 @@
++/*
++ * linux/arch/x86-64/kernel/process.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Pentium III FXSR, SSE support
++ * Gareth Hughes <gareth@valinux.com>, May 2000
++ *
++ * X86-64 port
++ * Andi Kleen.
++ *
++ * CPU hotplug support - ashok.raj@intel.com
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Modified for Xen
++ */
++
++/*
++ * This file handles the architecture-dependent parts of process handling..
++ */
++
++#include <stdarg.h>
++
++#include <linux/cpu.h>
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/elfcore.h>
++#include <linux/smp.h>
++#include <linux/slab.h>
++#include <linux/user.h>
++#include <linux/module.h>
++#include <linux/a.out.h>
++#include <linux/interrupt.h>
++#include <linux/delay.h>
++#include <linux/ptrace.h>
++#include <linux/utsname.h>
++#include <linux/random.h>
++#include <linux/notifier.h>
++#include <linux/kprobes.h>
++
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/processor.h>
++#include <asm/i387.h>
++#include <asm/mmu_context.h>
++#include <asm/pda.h>
++#include <asm/prctl.h>
++#include <asm/kdebug.h>
++#include <xen/interface/platform.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/vcpu.h>
++#include <asm/desc.h>
++#include <asm/proto.h>
++#include <asm/hardirq.h>
++#include <asm/ia32.h>
++#include <asm/idle.h>
++
++#include <xen/cpu_hotplug.h>
++
++asmlinkage extern void ret_from_fork(void);
++
++unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
++
++unsigned long boot_option_idle_override = 0;
++EXPORT_SYMBOL(boot_option_idle_override);
++
++/*
++ * Powermanagement idle function, if any..
++ */
++void (*pm_idle)(void);
++EXPORT_SYMBOL(pm_idle);
++static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
++
++static ATOMIC_NOTIFIER_HEAD(idle_notifier);
++
++void idle_notifier_register(struct notifier_block *n)
++{
++ atomic_notifier_chain_register(&idle_notifier, n);
++}
++EXPORT_SYMBOL_GPL(idle_notifier_register);
++
++void idle_notifier_unregister(struct notifier_block *n)
++{
++ atomic_notifier_chain_unregister(&idle_notifier, n);
++}
++EXPORT_SYMBOL(idle_notifier_unregister);
++
++enum idle_state { CPU_IDLE, CPU_NOT_IDLE };
++static DEFINE_PER_CPU(enum idle_state, idle_state) = CPU_NOT_IDLE;
++
++void enter_idle(void)
++{
++ __get_cpu_var(idle_state) = CPU_IDLE;
++ atomic_notifier_call_chain(&idle_notifier, IDLE_START, NULL);
++}
++
++static void __exit_idle(void)
++{
++ __get_cpu_var(idle_state) = CPU_NOT_IDLE;
++ atomic_notifier_call_chain(&idle_notifier, IDLE_END, NULL);
++}
++
++/* Called from interrupts to signify idle end */
++void exit_idle(void)
++{
++ if (current->pid | read_pda(irqcount))
++ return;
++ __exit_idle();
++}
++
++/*
++ * On SMP it's slightly faster (but much more power-consuming!)
++ * to poll the ->need_resched flag instead of waiting for the
++ * cross-CPU IPI to arrive. Use this option with caution.
++ */
++static void poll_idle (void)
++{
++ local_irq_enable();
++
++ asm volatile(
++ "2:"
++ "testl %0,%1;"
++ "rep; nop;"
++ "je 2b;"
++ : :
++ "i" (_TIF_NEED_RESCHED),
++ "m" (current_thread_info()->flags));
++}
++
++static void xen_idle(void)
++{
++ local_irq_disable();
++
++ if (need_resched())
++ local_irq_enable();
++ else {
++ current_thread_info()->status &= ~TS_POLLING;
++ smp_mb__after_clear_bit();
++ safe_halt();
++ current_thread_info()->status |= TS_POLLING;
++ }
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static inline void play_dead(void)
++{
++ idle_task_exit();
++ local_irq_disable();
++ cpu_clear(smp_processor_id(), cpu_initialized);
++ preempt_enable_no_resched();
++ VOID(HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL));
++ cpu_bringup();
++}
++#else
++static inline void play_dead(void)
++{
++ BUG();
++}
++#endif /* CONFIG_HOTPLUG_CPU */
++
++/*
++ * The idle thread. There's no useful work to be
++ * done, so just try to conserve power and have a
++ * low exit latency (ie sit in a loop waiting for
++ * somebody to say that they'd like to reschedule)
++ */
++void cpu_idle (void)
++{
++ current_thread_info()->status |= TS_POLLING;
++ /* endless idle loop with no priority at all */
++ while (1) {
++ while (!need_resched()) {
++ void (*idle)(void);
++
++ if (__get_cpu_var(cpu_idle_state))
++ __get_cpu_var(cpu_idle_state) = 0;
++ rmb();
++ idle = xen_idle; /* no alternatives */
++ if (cpu_is_offline(smp_processor_id()))
++ play_dead();
++ enter_idle();
++ idle();
++ __exit_idle();
++ }
++
++ preempt_enable_no_resched();
++ schedule();
++ preempt_disable();
++ }
++}
++
++void cpu_idle_wait(void)
++{
++ unsigned int cpu, this_cpu = get_cpu();
++ cpumask_t map;
++
++ set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
++ put_cpu();
++
++ cpus_clear(map);
++ for_each_online_cpu(cpu) {
++ per_cpu(cpu_idle_state, cpu) = 1;
++ cpu_set(cpu, map);
++ }
++
++ __get_cpu_var(cpu_idle_state) = 0;
++
++ wmb();
++ do {
++ ssleep(1);
++ for_each_online_cpu(cpu) {
++ if (cpu_isset(cpu, map) &&
++ !per_cpu(cpu_idle_state, cpu))
++ cpu_clear(cpu, map);
++ }
++ cpus_and(map, map, cpu_online_map);
++ } while (!cpus_empty(map));
++}
++EXPORT_SYMBOL_GPL(cpu_idle_wait);
++
++void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
++{
++}
++
++static int __init idle_setup (char *str)
++{
++ if (!strncmp(str, "poll", 4)) {
++ printk("using polling idle threads.\n");
++ pm_idle = poll_idle;
++ }
++
++ boot_option_idle_override = 1;
++ return 1;
++}
++
++__setup("idle=", idle_setup);
++
++/* Prints also some state that isn't saved in the pt_regs */
++void __show_regs(struct pt_regs * regs)
++{
++ unsigned long fs, gs, shadowgs;
++ unsigned int fsindex,gsindex;
++ unsigned int ds,cs,es;
++
++ printk("\n");
++ print_modules();
++ printk("Pid: %d, comm: %.20s %s %s %.*s\n",
++ current->pid, current->comm, print_tainted(),
++ system_utsname.release,
++ (int)strcspn(system_utsname.version, " "),
++ system_utsname.version);
++ printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
++ printk_address(regs->rip);
++ printk("RSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
++ regs->eflags);
++ printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
++ regs->rax, regs->rbx, regs->rcx);
++ printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
++ regs->rdx, regs->rsi, regs->rdi);
++ printk("RBP: %016lx R08: %016lx R09: %016lx\n",
++ regs->rbp, regs->r8, regs->r9);
++ printk("R10: %016lx R11: %016lx R12: %016lx\n",
++ regs->r10, regs->r11, regs->r12);
++ printk("R13: %016lx R14: %016lx R15: %016lx\n",
++ regs->r13, regs->r14, regs->r15);
++
++ asm("mov %%ds,%0" : "=r" (ds));
++ asm("mov %%cs,%0" : "=r" (cs));
++ asm("mov %%es,%0" : "=r" (es));
++ asm("mov %%fs,%0" : "=r" (fsindex));
++ asm("mov %%gs,%0" : "=r" (gsindex));
++
++ rdmsrl(MSR_FS_BASE, fs);
++ rdmsrl(MSR_GS_BASE, gs);
++ rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
++
++ printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
++ fs,fsindex,gs,gsindex,shadowgs);
++ printk("CS: %04x DS: %04x ES: %04x\n", cs, ds, es);
++
++}
++
++void show_regs(struct pt_regs *regs)
++{
++ printk("CPU %d:", smp_processor_id());
++ __show_regs(regs);
++ show_trace(NULL, regs, (void *)(regs + 1));
++}
++
++/*
++ * Free current thread data structures etc..
++ */
++void exit_thread(void)
++{
++ struct task_struct *me = current;
++ struct thread_struct *t = &me->thread;
++
++ if (me->thread.io_bitmap_ptr) {
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
++#endif
++#ifdef CONFIG_XEN
++ struct physdev_set_iobitmap iobmp_op;
++ memset(&iobmp_op, 0, sizeof(iobmp_op));
++#endif
++
++ kfree(t->io_bitmap_ptr);
++ t->io_bitmap_ptr = NULL;
++ /*
++ * Careful, clear this in the TSS too:
++ */
++#ifndef CONFIG_X86_NO_TSS
++ memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
++ put_cpu();
++#endif
++#ifdef CONFIG_XEN
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap,
++ &iobmp_op));
++#endif
++ t->io_bitmap_max = 0;
++ }
++}
++
++void load_gs_index(unsigned gs)
++{
++ WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, gs));
++}
++
++void flush_thread(void)
++{
++ struct task_struct *tsk = current;
++ struct thread_info *t = current_thread_info();
++
++ if (t->flags & _TIF_ABI_PENDING) {
++ t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
++ if (t->flags & _TIF_IA32)
++ current_thread_info()->status |= TS_COMPAT;
++ }
++
++ tsk->thread.debugreg0 = 0;
++ tsk->thread.debugreg1 = 0;
++ tsk->thread.debugreg2 = 0;
++ tsk->thread.debugreg3 = 0;
++ tsk->thread.debugreg6 = 0;
++ tsk->thread.debugreg7 = 0;
++ memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
++ /*
++ * Forget coprocessor state..
++ */
++ clear_fpu(tsk);
++ clear_used_math();
++}
++
++void release_thread(struct task_struct *dead_task)
++{
++ if (dead_task->mm) {
++ if (dead_task->mm->context.size) {
++ printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
++ dead_task->comm,
++ dead_task->mm->context.ldt,
++ dead_task->mm->context.size);
++ BUG();
++ }
++ }
++}
++
++static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
++{
++ struct user_desc ud = {
++ .base_addr = addr,
++ .limit = 0xfffff,
++ .seg_32bit = 1,
++ .limit_in_pages = 1,
++ .useable = 1,
++ };
++ struct n_desc_struct *desc = (void *)t->thread.tls_array;
++ desc += tls;
++ desc->a = LDT_entry_a(&ud);
++ desc->b = LDT_entry_b(&ud);
++}
++
++static inline u32 read_32bit_tls(struct task_struct *t, int tls)
++{
++ struct desc_struct *desc = (void *)t->thread.tls_array;
++ desc += tls;
++ return desc->base0 |
++ (((u32)desc->base1) << 16) |
++ (((u32)desc->base2) << 24);
++}
++
++/*
++ * This gets called before we allocate a new thread and copy
++ * the current task into it.
++ */
++void prepare_to_copy(struct task_struct *tsk)
++{
++ unlazy_fpu(tsk);
++}
++
++int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
++ unsigned long unused,
++ struct task_struct * p, struct pt_regs * regs)
++{
++ int err;
++ struct pt_regs * childregs;
++ struct task_struct *me = current;
++
++ childregs = ((struct pt_regs *)
++ (THREAD_SIZE + task_stack_page(p))) - 1;
++ *childregs = *regs;
++
++ childregs->rax = 0;
++ childregs->rsp = rsp;
++ if (rsp == ~0UL)
++ childregs->rsp = (unsigned long)childregs;
++
++ p->thread.rsp = (unsigned long) childregs;
++ p->thread.rsp0 = (unsigned long) (childregs+1);
++ p->thread.userrsp = me->thread.userrsp;
++
++ set_tsk_thread_flag(p, TIF_FORK);
++
++ p->thread.fs = me->thread.fs;
++ p->thread.gs = me->thread.gs;
++
++ asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
++ asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
++ asm("mov %%es,%0" : "=m" (p->thread.es));
++ asm("mov %%ds,%0" : "=m" (p->thread.ds));
++
++ if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
++ p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
++ if (!p->thread.io_bitmap_ptr) {
++ p->thread.io_bitmap_max = 0;
++ return -ENOMEM;
++ }
++ memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
++ IO_BITMAP_BYTES);
++ }
++
++ /*
++ * Set a new TLS for the child thread?
++ */
++ if (clone_flags & CLONE_SETTLS) {
++#ifdef CONFIG_IA32_EMULATION
++ if (test_thread_flag(TIF_IA32))
++ err = ia32_child_tls(p, childregs);
++ else
++#endif
++ err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
++ if (err)
++ goto out;
++ }
++ p->thread.iopl = current->thread.iopl;
++
++ err = 0;
++out:
++ if (err && p->thread.io_bitmap_ptr) {
++ kfree(p->thread.io_bitmap_ptr);
++ p->thread.io_bitmap_max = 0;
++ }
++ return err;
++}
++
++static inline void __save_init_fpu( struct task_struct *tsk )
++{
++ asm volatile( "rex64 ; fxsave %0 ; fnclex"
++ : "=m" (tsk->thread.i387.fxsave));
++ tsk->thread_info->status &= ~TS_USEDFPU;
++}
++
++/*
++ * switch_to(x,y) should switch tasks from x to y.
++ *
++ * This could still be optimized:
++ * - fold all the options into a flag word and test it with a single test.
++ * - could test fs/gs bitsliced
++ *
++ * Kprobes not supported here. Set the probe on schedule instead.
++ */
++__kprobes struct task_struct *
++__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
++{
++ struct thread_struct *prev = &prev_p->thread,
++ *next = &next_p->thread;
++ int cpu = smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct *tss = &per_cpu(init_tss, cpu);
++#endif
++#if CONFIG_XEN_COMPAT > 0x030002
++ struct physdev_set_iopl iopl_op;
++ struct physdev_set_iobitmap iobmp_op;
++#else
++ struct physdev_op _pdo[2], *pdo = _pdo;
++#define iopl_op pdo->u.set_iopl
++#define iobmp_op pdo->u.set_iobitmap
++#endif
++ multicall_entry_t _mcl[8], *mcl = _mcl;
++
++ /*
++ * This is basically '__unlazy_fpu', except that we queue a
++ * multicall to indicate FPU task switch, rather than
++ * synchronously trapping to Xen.
++ * The AMD workaround requires it to be after DS reload, or
++ * after DS has been cleared, which we do in __prepare_arch_switch.
++ */
++ if (prev_p->thread_info->status & TS_USEDFPU) {
++ __save_init_fpu(prev_p); /* _not_ save_init_fpu() */
++ mcl->op = __HYPERVISOR_fpu_taskswitch;
++ mcl->args[0] = 1;
++ mcl++;
++ }
++
++ /*
++ * Reload esp0, LDT and the page table pointer:
++ */
++ mcl->op = __HYPERVISOR_stack_switch;
++ mcl->args[0] = __KERNEL_DS;
++ mcl->args[1] = next->rsp0;
++ mcl++;
++
++ /*
++ * Load the per-thread Thread-Local Storage descriptor.
++ * This is load_TLS(next, cpu) with multicalls.
++ */
++#define C(i) do { \
++ if (unlikely(next->tls_array[i] != prev->tls_array[i])) { \
++ mcl->op = __HYPERVISOR_update_descriptor; \
++ mcl->args[0] = virt_to_machine( \
++ &cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]); \
++ mcl->args[1] = next->tls_array[i]; \
++ mcl++; \
++ } \
++} while (0)
++ C(0); C(1); C(2);
++#undef C
++
++ if (unlikely(prev->iopl != next->iopl)) {
++ iopl_op.iopl = (next->iopl == 0) ? 1 : next->iopl;
++#if CONFIG_XEN_COMPAT > 0x030002
++ mcl->op = __HYPERVISOR_physdev_op;
++ mcl->args[0] = PHYSDEVOP_set_iopl;
++ mcl->args[1] = (unsigned long)&iopl_op;
++#else
++ mcl->op = __HYPERVISOR_physdev_op_compat;
++ pdo->cmd = PHYSDEVOP_set_iopl;
++ mcl->args[0] = (unsigned long)pdo++;
++#endif
++ mcl++;
++ }
++
++ if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
++ set_xen_guest_handle(iobmp_op.bitmap,
++ (char *)next->io_bitmap_ptr);
++ iobmp_op.nr_ports = next->io_bitmap_ptr ? IO_BITMAP_BITS : 0;
++#if CONFIG_XEN_COMPAT > 0x030002
++ mcl->op = __HYPERVISOR_physdev_op;
++ mcl->args[0] = PHYSDEVOP_set_iobitmap;
++ mcl->args[1] = (unsigned long)&iobmp_op;
++#else
++ mcl->op = __HYPERVISOR_physdev_op_compat;
++ pdo->cmd = PHYSDEVOP_set_iobitmap;
++ mcl->args[0] = (unsigned long)pdo++;
++#endif
++ mcl++;
++ }
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ BUG_ON(pdo > _pdo + ARRAY_SIZE(_pdo));
++#endif
++ BUG_ON(mcl > _mcl + ARRAY_SIZE(_mcl));
++ if (unlikely(HYPERVISOR_multicall_check(_mcl, mcl - _mcl, NULL)))
++ BUG();
++
++ /*
++ * Switch DS and ES.
++ * This won't pick up thread selector changes, but I guess that is ok.
++ */
++ if (unlikely(next->es))
++ loadsegment(es, next->es);
++
++ if (unlikely(next->ds))
++ loadsegment(ds, next->ds);
++
++ /*
++ * Switch FS and GS.
++ */
++ if (unlikely(next->fsindex))
++ loadsegment(fs, next->fsindex);
++
++ if (next->fs)
++ WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_FS, next->fs));
++
++ if (unlikely(next->gsindex))
++ load_gs_index(next->gsindex);
++
++ if (next->gs)
++ WARN_ON(HYPERVISOR_set_segment_base(SEGBASE_GS_USER, next->gs));
++
++ /*
++ * Switch the PDA context.
++ */
++ prev->userrsp = read_pda(oldrsp);
++ write_pda(oldrsp, next->userrsp);
++ write_pda(pcurrent, next_p);
++ write_pda(kernelstack,
++ task_stack_page(next_p) + THREAD_SIZE - PDA_STACKOFFSET);
++
++ /*
++ * Now maybe reload the debug registers
++ */
++ if (unlikely(next->debugreg7)) {
++ set_debugreg(next->debugreg0, 0);
++ set_debugreg(next->debugreg1, 1);
++ set_debugreg(next->debugreg2, 2);
++ set_debugreg(next->debugreg3, 3);
++ /* no 4 and 5 */
++ set_debugreg(next->debugreg6, 6);
++ set_debugreg(next->debugreg7, 7);
++ }
++
++ return prev_p;
++}
++
++/*
++ * sys_execve() executes a new program.
++ */
++asmlinkage
++long sys_execve(char __user *name, char __user * __user *argv,
++ char __user * __user *envp, struct pt_regs regs)
++{
++ long error;
++ char * filename;
++
++ filename = getname(name);
++ error = PTR_ERR(filename);
++ if (IS_ERR(filename))
++ return error;
++ error = do_execve(filename, argv, envp, ®s);
++ if (error == 0) {
++ task_lock(current);
++ current->ptrace &= ~PT_DTRACE;
++ task_unlock(current);
++ }
++ putname(filename);
++ return error;
++}
++
++void set_personality_64bit(void)
++{
++ /* inherit personality from parent */
++
++ /* Make sure to be in 64bit mode */
++ clear_thread_flag(TIF_IA32);
++
++ /* TBD: overwrites user setup. Should have two bits.
++ But 64bit processes have always behaved this way,
++ so it's not too bad. The main problem is just that
++ 32bit childs are affected again. */
++ current->personality &= ~READ_IMPLIES_EXEC;
++}
++
++asmlinkage long sys_fork(struct pt_regs *regs)
++{
++ return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
++}
++
++asmlinkage long
++sys_clone(unsigned long clone_flags, unsigned long newsp,
++ void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
++{
++ if (!newsp)
++ newsp = regs->rsp;
++ return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
++}
++
++/*
++ * This is trivial, and on the face of it looks like it
++ * could equally well be done in user mode.
++ *
++ * Not so, for quite unobvious reasons - register pressure.
++ * In user mode vfork() cannot have a stack frame, and if
++ * done by calling the "clone()" system call directly, you
++ * do not have enough call-clobbered registers to hold all
++ * the information you need.
++ */
++asmlinkage long sys_vfork(struct pt_regs *regs)
++{
++ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
++ NULL, NULL);
++}
++
++unsigned long get_wchan(struct task_struct *p)
++{
++ unsigned long stack;
++ u64 fp,rip;
++ int count = 0;
++
++ if (!p || p == current || p->state==TASK_RUNNING)
++ return 0;
++ stack = (unsigned long)task_stack_page(p);
++ if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
++ return 0;
++ fp = *(u64 *)(p->thread.rsp);
++ do {
++ if (fp < (unsigned long)stack ||
++ fp > (unsigned long)stack+THREAD_SIZE)
++ return 0;
++ rip = *(u64 *)(fp+8);
++ if (!in_sched_functions(rip))
++ return rip;
++ fp = *(u64 *)fp;
++ } while (count++ < 16);
++ return 0;
++}
++
++long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
++{
++ int ret = 0;
++ int doit = task == current;
++ int cpu;
++
++ switch (code) {
++ case ARCH_SET_GS:
++ if (addr >= TASK_SIZE_OF(task))
++ return -EPERM;
++ cpu = get_cpu();
++ /* handle small bases via the GDT because that's faster to
++ switch. */
++ if (addr <= 0xffffffff) {
++ set_32bit_tls(task, GS_TLS, addr);
++ if (doit) {
++ load_TLS(&task->thread, cpu);
++ load_gs_index(GS_TLS_SEL);
++ }
++ task->thread.gsindex = GS_TLS_SEL;
++ task->thread.gs = 0;
++ } else {
++ task->thread.gsindex = 0;
++ task->thread.gs = addr;
++ if (doit) {
++ load_gs_index(0);
++ ret = HYPERVISOR_set_segment_base(
++ SEGBASE_GS_USER, addr);
++ }
++ }
++ put_cpu();
++ break;
++ case ARCH_SET_FS:
++ /* Not strictly needed for fs, but do it for symmetry
++ with gs */
++ if (addr >= TASK_SIZE_OF(task))
++ return -EPERM;
++ cpu = get_cpu();
++ /* handle small bases via the GDT because that's faster to
++ switch. */
++ if (addr <= 0xffffffff) {
++ set_32bit_tls(task, FS_TLS, addr);
++ if (doit) {
++ load_TLS(&task->thread, cpu);
++ asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
++ }
++ task->thread.fsindex = FS_TLS_SEL;
++ task->thread.fs = 0;
++ } else {
++ task->thread.fsindex = 0;
++ task->thread.fs = addr;
++ if (doit) {
++ /* set the selector to 0 to not confuse
++ __switch_to */
++ asm volatile("movl %0,%%fs" :: "r" (0));
++ ret = HYPERVISOR_set_segment_base(SEGBASE_FS,
++ addr);
++ }
++ }
++ put_cpu();
++ break;
++ case ARCH_GET_FS: {
++ unsigned long base;
++ if (task->thread.fsindex == FS_TLS_SEL)
++ base = read_32bit_tls(task, FS_TLS);
++ else if (doit)
++ rdmsrl(MSR_FS_BASE, base);
++ else
++ base = task->thread.fs;
++ ret = put_user(base, (unsigned long __user *)addr);
++ break;
++ }
++ case ARCH_GET_GS: {
++ unsigned long base;
++ unsigned gsindex;
++ if (task->thread.gsindex == GS_TLS_SEL)
++ base = read_32bit_tls(task, GS_TLS);
++ else if (doit) {
++ asm("movl %%gs,%0" : "=r" (gsindex));
++ if (gsindex)
++ rdmsrl(MSR_KERNEL_GS_BASE, base);
++ else
++ base = task->thread.gs;
++ }
++ else
++ base = task->thread.gs;
++ ret = put_user(base, (unsigned long __user *)addr);
++ break;
++ }
++
++ default:
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++long sys_arch_prctl(int code, unsigned long addr)
++{
++ return do_arch_prctl(current, code, addr);
++}
++
++/*
++ * Capture the user space registers if the task is not running (in user space)
++ */
++int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
++{
++ struct pt_regs *pp, ptregs;
++
++ pp = task_pt_regs(tsk);
++
++ ptregs = *pp;
++ ptregs.cs &= 0xffff;
++ ptregs.ss &= 0xffff;
++
++ elf_core_copy_regs(regs, &ptregs);
++
++ boot_option_idle_override = 1;
++ return 1;
++}
++
++unsigned long arch_align_stack(unsigned long sp)
++{
++ if (randomize_va_space)
++ sp -= get_random_int() % 8192;
++ return sp & ~0xf;
++}
+Index: head-2008-11-25/arch/x86/kernel/setup_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/setup_64-xen.c 2008-04-22 15:41:51.000000000 +0200
+@@ -0,0 +1,1652 @@
++/*
++ * linux/arch/x86-64/kernel/setup.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ *
++ * Nov 2001 Dave Jones <davej@suse.de>
++ * Forked from i386 setup code.
++ */
++
++/*
++ * This file handles the architecture-dependent parts of initialization
++ */
++
++#include <linux/errno.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/stddef.h>
++#include <linux/unistd.h>
++#include <linux/ptrace.h>
++#include <linux/slab.h>
++#include <linux/user.h>
++#include <linux/a.out.h>
++#include <linux/screen_info.h>
++#include <linux/ioport.h>
++#include <linux/delay.h>
++#include <linux/init.h>
++#include <linux/initrd.h>
++#include <linux/highmem.h>
++#include <linux/bootmem.h>
++#include <linux/module.h>
++#include <asm/processor.h>
++#include <linux/console.h>
++#include <linux/seq_file.h>
++#include <linux/crash_dump.h>
++#include <linux/root_dev.h>
++#include <linux/pci.h>
++#include <linux/acpi.h>
++#include <linux/kallsyms.h>
++#include <linux/edd.h>
++#include <linux/mmzone.h>
++#include <linux/kexec.h>
++#include <linux/cpufreq.h>
++#include <linux/dmi.h>
++#include <linux/dma-mapping.h>
++#include <linux/ctype.h>
++
++#include <asm/mtrr.h>
++#include <asm/uaccess.h>
++#include <asm/system.h>
++#include <asm/io.h>
++#include <asm/smp.h>
++#include <asm/msr.h>
++#include <asm/desc.h>
++#include <video/edid.h>
++#include <asm/e820.h>
++#include <asm/dma.h>
++#include <asm/mpspec.h>
++#include <asm/mmu_context.h>
++#include <asm/bootsetup.h>
++#include <asm/proto.h>
++#include <asm/setup.h>
++#include <asm/mach_apic.h>
++#include <asm/numa.h>
++#include <asm/sections.h>
++#include <asm/dmi.h>
++#ifdef CONFIG_XEN
++#include <linux/percpu.h>
++#include <xen/interface/physdev.h>
++#include "setup_arch_pre.h"
++#include <asm/hypervisor.h>
++#include <xen/interface/nmi.h>
++#include <xen/features.h>
++#include <xen/firmware.h>
++#include <xen/xencons.h>
++#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
++#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
++#include <asm/mach-xen/setup_arch_post.h>
++#include <xen/interface/memory.h>
++
++#ifdef CONFIG_XEN
++#include <xen/interface/kexec.h>
++#endif
++
++extern unsigned long start_pfn;
++extern struct edid_info edid_info;
++
++shared_info_t *HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++EXPORT_SYMBOL(HYPERVISOR_shared_info);
++
++extern char hypercall_page[PAGE_SIZE];
++EXPORT_SYMBOL(hypercall_page);
++
++static int xen_panic_event(struct notifier_block *, unsigned long, void *);
++static struct notifier_block xen_panic_block = {
++ xen_panic_event, NULL, 0 /* try to go last */
++};
++
++unsigned long *phys_to_machine_mapping;
++unsigned long *pfn_to_mfn_frame_list_list, *pfn_to_mfn_frame_list[512];
++
++EXPORT_SYMBOL(phys_to_machine_mapping);
++
++DEFINE_PER_CPU(multicall_entry_t, multicall_list[8]);
++DEFINE_PER_CPU(int, nr_multicall_ents);
++
++/* Raw start-of-day parameters from the hypervisor. */
++start_info_t *xen_start_info;
++EXPORT_SYMBOL(xen_start_info);
++#endif
++
++/*
++ * Machine setup..
++ */
++
++struct cpuinfo_x86 boot_cpu_data __read_mostly;
++EXPORT_SYMBOL(boot_cpu_data);
++
++unsigned long mmu_cr4_features;
++
++int acpi_disabled;
++EXPORT_SYMBOL(acpi_disabled);
++#ifdef CONFIG_ACPI
++extern int __initdata acpi_ht;
++extern acpi_interrupt_flags acpi_sci_flags;
++int __initdata acpi_force = 0;
++#endif
++
++int acpi_numa __initdata;
++
++/* Boot loader ID as an integer, for the benefit of proc_dointvec */
++int bootloader_type;
++
++unsigned long saved_video_mode;
++
++/*
++ * Early DMI memory
++ */
++int dmi_alloc_index;
++char dmi_alloc_data[DMI_MAX_DATA];
++
++/*
++ * Setup options
++ */
++struct screen_info screen_info;
++EXPORT_SYMBOL(screen_info);
++struct sys_desc_table_struct {
++ unsigned short length;
++ unsigned char table[0];
++};
++
++struct edid_info edid_info;
++EXPORT_SYMBOL_GPL(edid_info);
++struct e820map e820;
++#ifdef CONFIG_XEN
++struct e820map machine_e820;
++#endif
++
++extern int root_mountflags;
++
++char command_line[COMMAND_LINE_SIZE];
++
++struct resource standard_io_resources[] = {
++ { .name = "dma1", .start = 0x00, .end = 0x1f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "pic1", .start = 0x20, .end = 0x21,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "timer0", .start = 0x40, .end = 0x43,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "timer1", .start = 0x50, .end = 0x53,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "keyboard", .start = 0x60, .end = 0x6f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "dma page reg", .start = 0x80, .end = 0x8f,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "pic2", .start = 0xa0, .end = 0xa1,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "dma2", .start = 0xc0, .end = 0xdf,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO },
++ { .name = "fpu", .start = 0xf0, .end = 0xff,
++ .flags = IORESOURCE_BUSY | IORESOURCE_IO }
++};
++
++#define STANDARD_IO_RESOURCES \
++ (sizeof standard_io_resources / sizeof standard_io_resources[0])
++
++#define IORESOURCE_RAM (IORESOURCE_BUSY | IORESOURCE_MEM)
++
++struct resource data_resource = {
++ .name = "Kernel data",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_RAM,
++};
++struct resource code_resource = {
++ .name = "Kernel code",
++ .start = 0,
++ .end = 0,
++ .flags = IORESOURCE_RAM,
++};
++
++#define IORESOURCE_ROM (IORESOURCE_BUSY | IORESOURCE_READONLY | IORESOURCE_MEM)
++
++static struct resource system_rom_resource = {
++ .name = "System ROM",
++ .start = 0xf0000,
++ .end = 0xfffff,
++ .flags = IORESOURCE_ROM,
++};
++
++static struct resource extension_rom_resource = {
++ .name = "Extension ROM",
++ .start = 0xe0000,
++ .end = 0xeffff,
++ .flags = IORESOURCE_ROM,
++};
++
++static struct resource adapter_rom_resources[] = {
++ { .name = "Adapter ROM", .start = 0xc8000, .end = 0,
++ .flags = IORESOURCE_ROM },
++ { .name = "Adapter ROM", .start = 0, .end = 0,
++ .flags = IORESOURCE_ROM },
++ { .name = "Adapter ROM", .start = 0, .end = 0,
++ .flags = IORESOURCE_ROM },
++ { .name = "Adapter ROM", .start = 0, .end = 0,
++ .flags = IORESOURCE_ROM },
++ { .name = "Adapter ROM", .start = 0, .end = 0,
++ .flags = IORESOURCE_ROM },
++ { .name = "Adapter ROM", .start = 0, .end = 0,
++ .flags = IORESOURCE_ROM }
++};
++
++#define ADAPTER_ROM_RESOURCES \
++ (sizeof adapter_rom_resources / sizeof adapter_rom_resources[0])
++
++static struct resource video_rom_resource = {
++ .name = "Video ROM",
++ .start = 0xc0000,
++ .end = 0xc7fff,
++ .flags = IORESOURCE_ROM,
++};
++
++static struct resource video_ram_resource = {
++ .name = "Video RAM area",
++ .start = 0xa0000,
++ .end = 0xbffff,
++ .flags = IORESOURCE_RAM,
++};
++
++#define romsignature(x) (*(unsigned short *)(x) == 0xaa55)
++
++static int __init romchecksum(unsigned char *rom, unsigned long length)
++{
++ unsigned char *p, sum = 0;
++
++ for (p = rom; p < rom + length; p++)
++ sum += *p;
++ return sum == 0;
++}
++
++static void __init probe_roms(void)
++{
++ unsigned long start, length, upper;
++ unsigned char *rom;
++ int i;
++
++#ifdef CONFIG_XEN
++ /* Nothing to do if not running in dom0. */
++ if (!is_initial_xendomain())
++ return;
++#endif
++
++ /* video rom */
++ upper = adapter_rom_resources[0].start;
++ for (start = video_rom_resource.start; start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ video_rom_resource.start = start;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = rom[2] * 512;
++
++ /* if checksum okay, trust length byte */
++ if (length && romchecksum(rom, length))
++ video_rom_resource.end = start + length - 1;
++
++ request_resource(&iomem_resource, &video_rom_resource);
++ break;
++ }
++
++ start = (video_rom_resource.end + 1 + 2047) & ~2047UL;
++ if (start < upper)
++ start = upper;
++
++ /* system rom */
++ request_resource(&iomem_resource, &system_rom_resource);
++ upper = system_rom_resource.start;
++
++ /* check for extension rom (ignore length byte!) */
++ rom = isa_bus_to_virt(extension_rom_resource.start);
++ if (romsignature(rom)) {
++ length = extension_rom_resource.end - extension_rom_resource.start + 1;
++ if (romchecksum(rom, length)) {
++ request_resource(&iomem_resource, &extension_rom_resource);
++ upper = extension_rom_resource.start;
++ }
++ }
++
++ /* check for adapter roms on 2k boundaries */
++ for (i = 0; i < ADAPTER_ROM_RESOURCES && start < upper; start += 2048) {
++ rom = isa_bus_to_virt(start);
++ if (!romsignature(rom))
++ continue;
++
++ /* 0 < length <= 0x7f * 512, historically */
++ length = rom[2] * 512;
++
++ /* but accept any length that fits if checksum okay */
++ if (!length || start + length > upper || !romchecksum(rom, length))
++ continue;
++
++ adapter_rom_resources[i].start = start;
++ adapter_rom_resources[i].end = start + length - 1;
++ request_resource(&iomem_resource, &adapter_rom_resources[i]);
++
++ start = adapter_rom_resources[i++].end & ~2047UL;
++ }
++}
++
++/* Check for full argument with no trailing characters */
++static int fullarg(char *p, char *arg)
++{
++ int l = strlen(arg);
++ return !memcmp(p, arg, l) && (p[l] == 0 || isspace(p[l]));
++}
++
++static __init void parse_cmdline_early (char ** cmdline_p)
++{
++ char c = ' ', *to = command_line, *from = COMMAND_LINE;
++ int len = 0;
++ int userdef = 0;
++
++ for (;;) {
++ if (c != ' ')
++ goto next_char;
++
++#ifdef CONFIG_SMP
++ /*
++ * If the BIOS enumerates physical processors before logical,
++ * maxcpus=N at enumeration-time can be used to disable HT.
++ */
++ else if (!memcmp(from, "maxcpus=", 8)) {
++ extern unsigned int maxcpus;
++
++ maxcpus = simple_strtoul(from + 8, NULL, 0);
++ }
++#endif
++#ifdef CONFIG_ACPI
++ /* "acpi=off" disables both ACPI table parsing and interpreter init */
++ if (fullarg(from,"acpi=off"))
++ disable_acpi();
++
++ if (fullarg(from, "acpi=force")) {
++ /* add later when we do DMI horrors: */
++ acpi_force = 1;
++ acpi_disabled = 0;
++ }
++
++ /* acpi=ht just means: do ACPI MADT parsing
++ at bootup, but don't enable the full ACPI interpreter */
++ if (fullarg(from, "acpi=ht")) {
++ if (!acpi_force)
++ disable_acpi();
++ acpi_ht = 1;
++ }
++ else if (fullarg(from, "pci=noacpi"))
++ acpi_disable_pci();
++ else if (fullarg(from, "acpi=noirq"))
++ acpi_noirq_set();
++
++ else if (fullarg(from, "acpi_sci=edge"))
++ acpi_sci_flags.trigger = 1;
++ else if (fullarg(from, "acpi_sci=level"))
++ acpi_sci_flags.trigger = 3;
++ else if (fullarg(from, "acpi_sci=high"))
++ acpi_sci_flags.polarity = 1;
++ else if (fullarg(from, "acpi_sci=low"))
++ acpi_sci_flags.polarity = 3;
++
++ /* acpi=strict disables out-of-spec workarounds */
++ else if (fullarg(from, "acpi=strict")) {
++ acpi_strict = 1;
++ }
++#ifdef CONFIG_X86_IO_APIC
++ else if (fullarg(from, "acpi_skip_timer_override"))
++ acpi_skip_timer_override = 1;
++#endif
++#endif
++
++#ifndef CONFIG_XEN
++ if (fullarg(from, "nolapic") || fullarg(from, "disableapic")) {
++ clear_bit(X86_FEATURE_APIC, boot_cpu_data.x86_capability);
++ disable_apic = 1;
++ }
++
++ if (fullarg(from, "noapic"))
++ skip_ioapic_setup = 1;
++
++ if (fullarg(from,"apic")) {
++ skip_ioapic_setup = 0;
++ ioapic_force = 1;
++ }
++#endif
++
++ if (!memcmp(from, "mem=", 4))
++ parse_memopt(from+4, &from);
++
++ if (!memcmp(from, "memmap=", 7)) {
++ /* exactmap option is for used defined memory */
++ if (!memcmp(from+7, "exactmap", 8)) {
++#ifdef CONFIG_CRASH_DUMP
++ /* If we are doing a crash dump, we
++ * still need to know the real mem
++ * size before original memory map is
++ * reset.
++ */
++ saved_max_pfn = e820_end_of_ram();
++#endif
++ from += 8+7;
++ end_pfn_map = 0;
++ e820.nr_map = 0;
++ userdef = 1;
++ }
++ else {
++ parse_memmapopt(from+7, &from);
++ userdef = 1;
++ }
++ }
++
++#ifdef CONFIG_NUMA
++ if (!memcmp(from, "numa=", 5))
++ numa_setup(from+5);
++#endif
++
++ if (!memcmp(from,"iommu=",6)) {
++ iommu_setup(from+6);
++ }
++
++ if (fullarg(from,"oops=panic"))
++ panic_on_oops = 1;
++
++ if (!memcmp(from, "noexec=", 7))
++ nonx_setup(from + 7);
++
++#ifdef CONFIG_KEXEC
++ /* crashkernel=size@addr specifies the location to reserve for
++ * a crash kernel. By reserving this memory we guarantee
++ * that linux never set's it up as a DMA target.
++ * Useful for holding code to do something appropriate
++ * after a kernel panic.
++ */
++ else if (!memcmp(from, "crashkernel=", 12)) {
++#ifndef CONFIG_XEN
++ unsigned long size, base;
++ size = memparse(from+12, &from);
++ if (*from == '@') {
++ base = memparse(from+1, &from);
++ /* FIXME: Do I want a sanity check
++ * to validate the memory range?
++ */
++ crashk_res.start = base;
++ crashk_res.end = base + size - 1;
++ }
++#else
++ printk("Ignoring crashkernel command line, "
++ "parameter will be supplied by xen\n");
++#endif
++ }
++#endif
++
++#ifdef CONFIG_PROC_VMCORE
++ /* elfcorehdr= specifies the location of elf core header
++ * stored by the crashed kernel. This option will be passed
++ * by kexec loader to the capture kernel.
++ */
++ else if(!memcmp(from, "elfcorehdr=", 11))
++ elfcorehdr_addr = memparse(from+11, &from);
++#endif
++
++#if defined(CONFIG_HOTPLUG_CPU) && !defined(CONFIG_XEN)
++ else if (!memcmp(from, "additional_cpus=", 16))
++ setup_additional_cpus(from+16);
++#endif
++
++ next_char:
++ c = *(from++);
++ if (!c)
++ break;
++ if (COMMAND_LINE_SIZE <= ++len)
++ break;
++ *(to++) = c;
++ }
++ if (userdef) {
++ printk(KERN_INFO "user-defined physical RAM map:\n");
++ e820_print_map("user");
++ }
++ *to = '\0';
++ *cmdline_p = command_line;
++}
++
++#ifndef CONFIG_NUMA
++static void __init
++contig_initmem_init(unsigned long start_pfn, unsigned long end_pfn)
++{
++ unsigned long bootmap_size, bootmap;
++
++ bootmap_size = bootmem_bootmap_pages(end_pfn)<<PAGE_SHIFT;
++ bootmap = find_e820_area(0, end_pfn<<PAGE_SHIFT, bootmap_size);
++ if (bootmap == -1L)
++ panic("Cannot find bootmem map of size %ld\n",bootmap_size);
++ bootmap_size = init_bootmem(bootmap >> PAGE_SHIFT, end_pfn);
++#ifdef CONFIG_XEN
++ e820_bootmem_free(NODE_DATA(0), 0, xen_start_info->nr_pages<<PAGE_SHIFT);
++#else
++ e820_bootmem_free(NODE_DATA(0), 0, end_pfn << PAGE_SHIFT);
++#endif
++ reserve_bootmem(bootmap, bootmap_size);
++}
++#endif
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++struct edd edd;
++#ifdef CONFIG_EDD_MODULE
++EXPORT_SYMBOL(edd);
++#endif
++#ifndef CONFIG_XEN
++/**
++ * copy_edd() - Copy the BIOS EDD information
++ * from boot_params into a safe place.
++ *
++ */
++static inline void copy_edd(void)
++{
++ memcpy(edd.mbr_signature, EDD_MBR_SIGNATURE, sizeof(edd.mbr_signature));
++ memcpy(edd.edd_info, EDD_BUF, sizeof(edd.edd_info));
++ edd.mbr_signature_nr = EDD_MBR_SIG_NR;
++ edd.edd_info_nr = EDD_NR;
++}
++#endif
++#else
++static inline void copy_edd(void)
++{
++}
++#endif
++
++#ifndef CONFIG_XEN
++#define EBDA_ADDR_POINTER 0x40E
++
++unsigned __initdata ebda_addr;
++unsigned __initdata ebda_size;
++
++static void discover_ebda(void)
++{
++ /*
++ * there is a real-mode segmented pointer pointing to the
++ * 4K EBDA area at 0x40E
++ */
++ ebda_addr = *(unsigned short *)EBDA_ADDR_POINTER;
++ ebda_addr <<= 4;
++
++ ebda_size = *(unsigned short *)(unsigned long)ebda_addr;
++
++ /* Round EBDA up to pages */
++ if (ebda_size == 0)
++ ebda_size = 1;
++ ebda_size <<= 10;
++ ebda_size = round_up(ebda_size + (ebda_addr & ~PAGE_MASK), PAGE_SIZE);
++ if (ebda_size > 64*1024)
++ ebda_size = 64*1024;
++}
++#else
++#define discover_ebda() ((void)0)
++#endif
++
++void __init setup_arch(char **cmdline_p)
++{
++#ifdef CONFIG_XEN
++ /* Register a call for panic conditions. */
++ atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block);
++
++ ROOT_DEV = MKDEV(RAMDISK_MAJOR,0);
++ screen_info = SCREEN_INFO;
++
++ if (is_initial_xendomain()) {
++ const struct dom0_vga_console_info *info =
++ (void *)((char *)xen_start_info +
++ xen_start_info->console.dom0.info_off);
++
++ dom0_init_screen_info(info,
++ xen_start_info->console.dom0.info_size);
++ xen_start_info->console.domU.mfn = 0;
++ xen_start_info->console.domU.evtchn = 0;
++ } else
++ screen_info.orig_video_isVGA = 0;
++
++ copy_edid();
++
++ WARN_ON(HYPERVISOR_vm_assist(VMASST_CMD_enable,
++ VMASST_TYPE_writable_pagetables));
++
++ ARCH_SETUP
++#else
++ ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV);
++ screen_info = SCREEN_INFO;
++ edid_info = EDID_INFO;
++#endif /* !CONFIG_XEN */
++ saved_video_mode = SAVED_VIDEO_MODE;
++ bootloader_type = LOADER_TYPE;
++
++#ifdef CONFIG_BLK_DEV_RAM
++ rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK;
++ rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0);
++ rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0);
++#endif
++ setup_memory_region();
++ copy_edd();
++
++ if (!MOUNT_ROOT_RDONLY)
++ root_mountflags &= ~MS_RDONLY;
++ init_mm.start_code = (unsigned long) &_text;
++ init_mm.end_code = (unsigned long) &_etext;
++ init_mm.end_data = (unsigned long) &_edata;
++ init_mm.brk = (unsigned long) &_end;
++
++ code_resource.start = virt_to_phys(&_text);
++ code_resource.end = virt_to_phys(&_etext)-1;
++ data_resource.start = virt_to_phys(&_etext);
++ data_resource.end = virt_to_phys(&_edata)-1;
++
++ parse_cmdline_early(cmdline_p);
++
++ early_identify_cpu(&boot_cpu_data);
++
++ /*
++ * partially used pages are not usable - thus
++ * we are rounding upwards:
++ */
++ end_pfn = e820_end_of_ram();
++ num_physpages = end_pfn; /* for pfn_valid */
++
++ check_efer();
++
++ discover_ebda();
++
++ init_memory_mapping(0, (end_pfn_map << PAGE_SHIFT));
++
++ if (is_initial_xendomain())
++ dmi_scan_machine();
++
++#ifdef CONFIG_ACPI_NUMA
++ /*
++ * Parse SRAT to discover nodes.
++ */
++ acpi_numa_init();
++#endif
++
++#ifdef CONFIG_NUMA
++ numa_initmem_init(0, end_pfn);
++#else
++ contig_initmem_init(0, end_pfn);
++#endif
++
++#ifdef CONFIG_XEN
++ /*
++ * Reserve kernel, physmap, start info, initial page tables, and
++ * direct mapping.
++ */
++ reserve_bootmem_generic(__pa_symbol(&_text),
++ (table_end << PAGE_SHIFT) - __pa_symbol(&_text));
++#else
++ /* Reserve direct mapping */
++ reserve_bootmem_generic(table_start << PAGE_SHIFT,
++ (table_end - table_start) << PAGE_SHIFT);
++
++ /* reserve kernel */
++ reserve_bootmem_generic(__pa_symbol(&_text),
++ __pa_symbol(&_end) - __pa_symbol(&_text));
++
++ /*
++ * reserve physical page 0 - it's a special BIOS page on many boxes,
++ * enabling clean reboots, SMP operation, laptop functions.
++ */
++ reserve_bootmem_generic(0, PAGE_SIZE);
++
++ /* reserve ebda region */
++ if (ebda_addr)
++ reserve_bootmem_generic(ebda_addr, ebda_size);
++
++#ifdef CONFIG_SMP
++ /*
++ * But first pinch a few for the stack/trampoline stuff
++ * FIXME: Don't need the extra page at 4K, but need to fix
++ * trampoline before removing it. (see the GDT stuff)
++ */
++ reserve_bootmem_generic(PAGE_SIZE, PAGE_SIZE);
++
++ /* Reserve SMP trampoline */
++ reserve_bootmem_generic(SMP_TRAMPOLINE_BASE, PAGE_SIZE);
++#endif
++#endif
++
++#ifdef CONFIG_ACPI_SLEEP
++ /*
++ * Reserve low memory region for sleep support.
++ */
++ acpi_reserve_bootmem();
++#endif
++#ifdef CONFIG_XEN
++#ifdef CONFIG_BLK_DEV_INITRD
++ if (xen_start_info->mod_start) {
++ if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
++ /*reserve_bootmem_generic(INITRD_START, INITRD_SIZE);*/
++ initrd_start = INITRD_START + PAGE_OFFSET;
++ initrd_end = initrd_start+INITRD_SIZE;
++ initrd_below_start_ok = 1;
++ } else {
++ printk(KERN_ERR "initrd extends beyond end of memory "
++ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++ (unsigned long)(INITRD_START + INITRD_SIZE),
++ (unsigned long)(end_pfn << PAGE_SHIFT));
++ initrd_start = 0;
++ }
++ }
++#endif
++#else /* CONFIG_XEN */
++#ifdef CONFIG_BLK_DEV_INITRD
++ if (LOADER_TYPE && INITRD_START) {
++ if (INITRD_START + INITRD_SIZE <= (end_pfn << PAGE_SHIFT)) {
++ reserve_bootmem_generic(INITRD_START, INITRD_SIZE);
++ initrd_start =
++ INITRD_START ? INITRD_START + PAGE_OFFSET : 0;
++ initrd_end = initrd_start+INITRD_SIZE;
++ }
++ else {
++ printk(KERN_ERR "initrd extends beyond end of memory "
++ "(0x%08lx > 0x%08lx)\ndisabling initrd\n",
++ (unsigned long)(INITRD_START + INITRD_SIZE),
++ (unsigned long)(end_pfn << PAGE_SHIFT));
++ initrd_start = 0;
++ }
++ }
++#endif
++#endif /* !CONFIG_XEN */
++#ifdef CONFIG_KEXEC
++#ifdef CONFIG_XEN
++ xen_machine_kexec_setup_resources();
++#else
++ if (crashk_res.start != crashk_res.end) {
++ reserve_bootmem_generic(crashk_res.start,
++ crashk_res.end - crashk_res.start + 1);
++ }
++#endif
++#endif
++
++ paging_init();
++#ifdef CONFIG_X86_LOCAL_APIC
++ /*
++ * Find and reserve possible boot-time SMP configuration:
++ */
++ find_smp_config();
++#endif
++#ifdef CONFIG_XEN
++ {
++ int i, j, k, fpp;
++ unsigned long p2m_pages;
++
++ p2m_pages = end_pfn;
++ if (xen_start_info->nr_pages > end_pfn) {
++ /*
++ * the end_pfn was shrunk (probably by mem= or highmem=
++ * kernel parameter); shrink reservation with the HV
++ */
++ struct xen_memory_reservation reservation = {
++ .address_bits = 0,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++ unsigned int difference;
++ int ret;
++
++ difference = xen_start_info->nr_pages - end_pfn;
++
++ set_xen_guest_handle(reservation.extent_start,
++ ((unsigned long *)xen_start_info->mfn_list) + end_pfn);
++ reservation.nr_extents = difference;
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &reservation);
++ BUG_ON (ret != difference);
++ }
++ else if (end_pfn > xen_start_info->nr_pages)
++ p2m_pages = xen_start_info->nr_pages;
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Make sure we have a large enough P->M table. */
++ phys_to_machine_mapping = alloc_bootmem_pages(
++ end_pfn * sizeof(unsigned long));
++ memset(phys_to_machine_mapping, ~0,
++ end_pfn * sizeof(unsigned long));
++ memcpy(phys_to_machine_mapping,
++ (unsigned long *)xen_start_info->mfn_list,
++ p2m_pages * sizeof(unsigned long));
++ free_bootmem(
++ __pa(xen_start_info->mfn_list),
++ PFN_PHYS(PFN_UP(xen_start_info->nr_pages *
++ sizeof(unsigned long))));
++
++ /*
++ * Initialise the list of the frames that specify the
++ * list of frames that make up the p2m table. Used by
++ * save/restore.
++ */
++ pfn_to_mfn_frame_list_list = alloc_bootmem_pages(PAGE_SIZE);
++
++ fpp = PAGE_SIZE/sizeof(unsigned long);
++ for (i=0, j=0, k=-1; i< end_pfn; i+=fpp, j++) {
++ if ((j % fpp) == 0) {
++ k++;
++ BUG_ON(k>=fpp);
++ pfn_to_mfn_frame_list[k] =
++ alloc_bootmem_pages(PAGE_SIZE);
++ pfn_to_mfn_frame_list_list[k] =
++ virt_to_mfn(pfn_to_mfn_frame_list[k]);
++ j=0;
++ }
++ pfn_to_mfn_frame_list[k][j] =
++ virt_to_mfn(&phys_to_machine_mapping[i]);
++ }
++ HYPERVISOR_shared_info->arch.max_pfn = end_pfn;
++ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++ virt_to_mfn(pfn_to_mfn_frame_list_list);
++ }
++
++ /* Mark all ISA DMA channels in-use - using them wouldn't work. */
++ for (i = 0; i < MAX_DMA_CHANNELS; ++i)
++ if (i != 4 && request_dma(i, "xen") != 0)
++ BUG();
++ }
++
++ if (!is_initial_xendomain()) {
++ acpi_disabled = 1;
++#ifdef CONFIG_ACPI
++ acpi_ht = 0;
++#endif
++ }
++#endif
++
++#ifndef CONFIG_XEN
++ check_ioapic();
++#endif
++
++ zap_low_mappings(0);
++
++ /*
++ * set this early, so we dont allocate cpu0
++ * if MADT list doesnt list BSP first
++ * mpparse.c/MP_processor_info() allocates logical cpu numbers.
++ */
++ cpu_set(0, cpu_present_map);
++#ifdef CONFIG_ACPI
++ /*
++ * Initialize the ACPI boot-time table parser (gets the RSDP and SDT).
++ * Call this early for SRAT node setup.
++ */
++ acpi_boot_table_init();
++
++ /*
++ * Read APIC and some other early information from ACPI tables.
++ */
++ acpi_boot_init();
++#endif
++
++ init_cpu_to_node();
++
++#ifdef CONFIG_X86_LOCAL_APIC
++ /*
++ * get boot-time SMP configuration:
++ */
++ if (smp_found_config)
++ get_smp_config();
++#ifndef CONFIG_XEN
++ init_apic_mappings();
++#endif
++#endif
++#if defined(CONFIG_XEN) && defined(CONFIG_SMP) && !defined(CONFIG_HOTPLUG_CPU)
++ prefill_possible_map();
++#endif
++
++ /*
++ * Request address space for all standard RAM and ROM resources
++ * and also for regions reported as reserved by the e820.
++ */
++ probe_roms();
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain())
++ e820_reserve_resources(machine_e820.map, machine_e820.nr_map);
++#else
++ e820_reserve_resources(e820.map, e820.nr_map);
++#endif
++
++ request_resource(&iomem_resource, &video_ram_resource);
++
++ {
++ unsigned i;
++ /* request I/O space for devices used on all i[345]86 PCs */
++ for (i = 0; i < STANDARD_IO_RESOURCES; i++)
++ request_resource(&ioport_resource, &standard_io_resources[i]);
++ }
++
++#ifdef CONFIG_XEN
++ if (is_initial_xendomain())
++ e820_setup_gap(machine_e820.map, machine_e820.nr_map);
++#else
++ e820_setup_gap(e820.map, e820.nr_map);
++#endif
++
++#ifdef CONFIG_XEN
++ {
++ struct physdev_set_iopl set_iopl;
++
++ set_iopl.iopl = 1;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
++
++ if (is_initial_xendomain()) {
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++ conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++#endif
++ } else {
++#if defined(CONFIG_VT) && defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++ }
++ }
++#else /* CONFIG_XEN */
++
++#ifdef CONFIG_VT
++#if defined(CONFIG_VGA_CONSOLE)
++ conswitchp = &vga_con;
++#elif defined(CONFIG_DUMMY_CONSOLE)
++ conswitchp = &dummy_con;
++#endif
++#endif
++
++#endif /* !CONFIG_XEN */
++}
++
++#ifdef CONFIG_XEN
++static int
++xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
++{
++ HYPERVISOR_shutdown(SHUTDOWN_crash);
++ /* we're never actually going to get here... */
++ return NOTIFY_DONE;
++}
++#endif /* !CONFIG_XEN */
++
++
++static int __cpuinit get_model_name(struct cpuinfo_x86 *c)
++{
++ unsigned int *v;
++
++ if (c->extended_cpuid_level < 0x80000004)
++ return 0;
++
++ v = (unsigned int *) c->x86_model_id;
++ cpuid(0x80000002, &v[0], &v[1], &v[2], &v[3]);
++ cpuid(0x80000003, &v[4], &v[5], &v[6], &v[7]);
++ cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
++ c->x86_model_id[48] = 0;
++ return 1;
++}
++
++
++static void __cpuinit display_cacheinfo(struct cpuinfo_x86 *c)
++{
++ unsigned int n, dummy, eax, ebx, ecx, edx;
++
++ n = c->extended_cpuid_level;
++
++ if (n >= 0x80000005) {
++ cpuid(0x80000005, &dummy, &ebx, &ecx, &edx);
++ printk(KERN_INFO "CPU: L1 I Cache: %dK (%d bytes/line), D cache %dK (%d bytes/line)\n",
++ edx>>24, edx&0xFF, ecx>>24, ecx&0xFF);
++ c->x86_cache_size=(ecx>>24)+(edx>>24);
++ /* On K8 L1 TLB is inclusive, so don't count it */
++ c->x86_tlbsize = 0;
++ }
++
++ if (n >= 0x80000006) {
++ cpuid(0x80000006, &dummy, &ebx, &ecx, &edx);
++ ecx = cpuid_ecx(0x80000006);
++ c->x86_cache_size = ecx >> 16;
++ c->x86_tlbsize += ((ebx >> 16) & 0xfff) + (ebx & 0xfff);
++
++ printk(KERN_INFO "CPU: L2 Cache: %dK (%d bytes/line)\n",
++ c->x86_cache_size, ecx & 0xFF);
++ }
++
++ if (n >= 0x80000007)
++ cpuid(0x80000007, &dummy, &dummy, &dummy, &c->x86_power);
++ if (n >= 0x80000008) {
++ cpuid(0x80000008, &eax, &dummy, &dummy, &dummy);
++ c->x86_virt_bits = (eax >> 8) & 0xff;
++ c->x86_phys_bits = eax & 0xff;
++ }
++}
++
++#ifdef CONFIG_NUMA
++static int nearby_node(int apicid)
++{
++ int i;
++ for (i = apicid - 1; i >= 0; i--) {
++ int node = apicid_to_node[i];
++ if (node != NUMA_NO_NODE && node_online(node))
++ return node;
++ }
++ for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
++ int node = apicid_to_node[i];
++ if (node != NUMA_NO_NODE && node_online(node))
++ return node;
++ }
++ return first_node(node_online_map); /* Shouldn't happen */
++}
++#endif
++
++/*
++ * On a AMD dual core setup the lower bits of the APIC id distingush the cores.
++ * Assumes number of cores is a power of two.
++ */
++static void __init amd_detect_cmp(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++ unsigned bits;
++#ifdef CONFIG_NUMA
++ int cpu = smp_processor_id();
++ int node = 0;
++ unsigned apicid = hard_smp_processor_id();
++#endif
++ unsigned ecx = cpuid_ecx(0x80000008);
++
++ c->x86_max_cores = (ecx & 0xff) + 1;
++
++ /* CPU telling us the core id bits shift? */
++ bits = (ecx >> 12) & 0xF;
++
++ /* Otherwise recompute */
++ if (bits == 0) {
++ while ((1 << bits) < c->x86_max_cores)
++ bits++;
++ }
++
++ /* Low order bits define the core id (index of core in socket) */
++ c->cpu_core_id = c->phys_proc_id & ((1 << bits)-1);
++ /* Convert the APIC ID into the socket ID */
++ c->phys_proc_id = phys_pkg_id(bits);
++
++#ifdef CONFIG_NUMA
++ node = c->phys_proc_id;
++ if (apicid_to_node[apicid] != NUMA_NO_NODE)
++ node = apicid_to_node[apicid];
++ if (!node_online(node)) {
++ /* Two possibilities here:
++ - The CPU is missing memory and no node was created.
++ In that case try picking one from a nearby CPU
++ - The APIC IDs differ from the HyperTransport node IDs
++ which the K8 northbridge parsing fills in.
++ Assume they are all increased by a constant offset,
++ but in the same order as the HT nodeids.
++ If that doesn't result in a usable node fall back to the
++ path for the previous case. */
++ int ht_nodeid = apicid - (cpu_data[0].phys_proc_id << bits);
++ if (ht_nodeid >= 0 &&
++ apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
++ node = apicid_to_node[ht_nodeid];
++ /* Pick a nearby node */
++ if (!node_online(node))
++ node = nearby_node(apicid);
++ }
++ numa_set_node(cpu, node);
++
++ printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
++#endif
++#endif
++}
++
++static void __init init_amd(struct cpuinfo_x86 *c)
++{
++ unsigned level;
++
++#ifdef CONFIG_SMP
++ unsigned long value;
++
++ /*
++ * Disable TLB flush filter by setting HWCR.FFDIS on K8
++ * bit 6 of msr C001_0015
++ *
++ * Errata 63 for SH-B3 steppings
++ * Errata 122 for all steppings (F+ have it disabled by default)
++ */
++ if (c->x86 == 15) {
++ rdmsrl(MSR_K8_HWCR, value);
++ value |= 1 << 6;
++ wrmsrl(MSR_K8_HWCR, value);
++ }
++#endif
++
++ /* Bit 31 in normal CPUID used for nonstandard 3DNow ID;
++ 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */
++ clear_bit(0*32+31, &c->x86_capability);
++
++ /* On C+ stepping K8 rep microcode works well for copy/memset */
++ level = cpuid_eax(1);
++ if (c->x86 == 15 && ((level >= 0x0f48 && level < 0x0f50) || level >= 0x0f58))
++ set_bit(X86_FEATURE_REP_GOOD, &c->x86_capability);
++
++ /* Enable workaround for FXSAVE leak */
++ if (c->x86 >= 6)
++ set_bit(X86_FEATURE_FXSAVE_LEAK, &c->x86_capability);
++
++ level = get_model_name(c);
++ if (!level) {
++ switch (c->x86) {
++ case 15:
++ /* Should distinguish Models here, but this is only
++ a fallback anyways. */
++ strcpy(c->x86_model_id, "Hammer");
++ break;
++ }
++ }
++ display_cacheinfo(c);
++
++ /* c->x86_power is 8000_0007 edx. Bit 8 is constant TSC */
++ if (c->x86_power & (1<<8))
++ set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
++
++ /* Multi core CPU? */
++ if (c->extended_cpuid_level >= 0x80000008)
++ amd_detect_cmp(c);
++
++ /* Fix cpuid4 emulation for more */
++ num_cache_leaves = 3;
++}
++
++static void __cpuinit detect_ht(struct cpuinfo_x86 *c)
++{
++#ifdef CONFIG_SMP
++ u32 eax, ebx, ecx, edx;
++ int index_msb, core_bits;
++
++ cpuid(1, &eax, &ebx, &ecx, &edx);
++
++
++ if (!cpu_has(c, X86_FEATURE_HT))
++ return;
++ if (cpu_has(c, X86_FEATURE_CMP_LEGACY))
++ goto out;
++
++ smp_num_siblings = (ebx & 0xff0000) >> 16;
++
++ if (smp_num_siblings == 1) {
++ printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
++ } else if (smp_num_siblings > 1 ) {
++
++ if (smp_num_siblings > NR_CPUS) {
++ printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
++ smp_num_siblings = 1;
++ return;
++ }
++
++ index_msb = get_count_order(smp_num_siblings);
++ c->phys_proc_id = phys_pkg_id(index_msb);
++
++ smp_num_siblings = smp_num_siblings / c->x86_max_cores;
++
++ index_msb = get_count_order(smp_num_siblings) ;
++
++ core_bits = get_count_order(c->x86_max_cores);
++
++ c->cpu_core_id = phys_pkg_id(index_msb) &
++ ((1 << core_bits) - 1);
++ }
++out:
++ if ((c->x86_max_cores * smp_num_siblings) > 1) {
++ printk(KERN_INFO "CPU: Physical Processor ID: %d\n", c->phys_proc_id);
++ printk(KERN_INFO "CPU: Processor Core ID: %d\n", c->cpu_core_id);
++ }
++
++#endif
++}
++
++/*
++ * find out the number of processor cores on the die
++ */
++static int __cpuinit intel_num_cpu_cores(struct cpuinfo_x86 *c)
++{
++ unsigned int eax, t;
++
++ if (c->cpuid_level < 4)
++ return 1;
++
++ cpuid_count(4, 0, &eax, &t, &t, &t);
++
++ if (eax & 0x1f)
++ return ((eax >> 26) + 1);
++ else
++ return 1;
++}
++
++static void srat_detect_node(void)
++{
++#ifdef CONFIG_NUMA
++ unsigned node;
++ int cpu = smp_processor_id();
++ int apicid = hard_smp_processor_id();
++
++ /* Don't do the funky fallback heuristics the AMD version employs
++ for now. */
++ node = apicid_to_node[apicid];
++ if (node == NUMA_NO_NODE)
++ node = first_node(node_online_map);
++ numa_set_node(cpu, node);
++
++ if (acpi_numa > 0)
++ printk(KERN_INFO "CPU %d/%x -> Node %d\n", cpu, apicid, node);
++#endif
++}
++
++static void __cpuinit init_intel(struct cpuinfo_x86 *c)
++{
++ /* Cache sizes */
++ unsigned n;
++
++ init_intel_cacheinfo(c);
++ if (c->cpuid_level > 9 ) {
++ unsigned eax = cpuid_eax(10);
++ /* Check for version and the number of counters */
++ if ((eax & 0xff) && (((eax>>8) & 0xff) > 1))
++ set_bit(X86_FEATURE_ARCH_PERFMON, &c->x86_capability);
++ }
++
++ n = c->extended_cpuid_level;
++ if (n >= 0x80000008) {
++ unsigned eax = cpuid_eax(0x80000008);
++ c->x86_virt_bits = (eax >> 8) & 0xff;
++ c->x86_phys_bits = eax & 0xff;
++ /* CPUID workaround for Intel 0F34 CPU */
++ if (c->x86_vendor == X86_VENDOR_INTEL &&
++ c->x86 == 0xF && c->x86_model == 0x3 &&
++ c->x86_mask == 0x4)
++ c->x86_phys_bits = 36;
++ }
++
++ if (c->x86 == 15)
++ c->x86_cache_alignment = c->x86_clflush_size * 2;
++ if ((c->x86 == 0xf && c->x86_model >= 0x03) ||
++ (c->x86 == 0x6 && c->x86_model >= 0x0e))
++ set_bit(X86_FEATURE_CONSTANT_TSC, &c->x86_capability);
++ set_bit(X86_FEATURE_SYNC_RDTSC, &c->x86_capability);
++ c->x86_max_cores = intel_num_cpu_cores(c);
++
++ srat_detect_node();
++}
++
++static void __cpuinit get_cpu_vendor(struct cpuinfo_x86 *c)
++{
++ char *v = c->x86_vendor_id;
++
++ if (!strcmp(v, "AuthenticAMD"))
++ c->x86_vendor = X86_VENDOR_AMD;
++ else if (!strcmp(v, "GenuineIntel"))
++ c->x86_vendor = X86_VENDOR_INTEL;
++ else
++ c->x86_vendor = X86_VENDOR_UNKNOWN;
++}
++
++struct cpu_model_info {
++ int vendor;
++ int family;
++ char *model_names[16];
++};
++
++/* Do some early cpuid on the boot CPU to get some parameter that are
++ needed before check_bugs. Everything advanced is in identify_cpu
++ below. */
++void __cpuinit early_identify_cpu(struct cpuinfo_x86 *c)
++{
++ u32 tfms;
++
++ c->loops_per_jiffy = loops_per_jiffy;
++ c->x86_cache_size = -1;
++ c->x86_vendor = X86_VENDOR_UNKNOWN;
++ c->x86_model = c->x86_mask = 0; /* So far unknown... */
++ c->x86_vendor_id[0] = '\0'; /* Unset */
++ c->x86_model_id[0] = '\0'; /* Unset */
++ c->x86_clflush_size = 64;
++ c->x86_cache_alignment = c->x86_clflush_size;
++ c->x86_max_cores = 1;
++ c->extended_cpuid_level = 0;
++ memset(&c->x86_capability, 0, sizeof c->x86_capability);
++
++ /* Get vendor name */
++ cpuid(0x00000000, (unsigned int *)&c->cpuid_level,
++ (unsigned int *)&c->x86_vendor_id[0],
++ (unsigned int *)&c->x86_vendor_id[8],
++ (unsigned int *)&c->x86_vendor_id[4]);
++
++ get_cpu_vendor(c);
++
++ /* Initialize the standard set of capabilities */
++ /* Note that the vendor-specific code below might override */
++
++ /* Intel-defined flags: level 0x00000001 */
++ if (c->cpuid_level >= 0x00000001) {
++ __u32 misc;
++ cpuid(0x00000001, &tfms, &misc, &c->x86_capability[4],
++ &c->x86_capability[0]);
++ c->x86 = (tfms >> 8) & 0xf;
++ c->x86_model = (tfms >> 4) & 0xf;
++ c->x86_mask = tfms & 0xf;
++ if (c->x86 == 0xf)
++ c->x86 += (tfms >> 20) & 0xff;
++ if (c->x86 >= 0x6)
++ c->x86_model += ((tfms >> 16) & 0xF) << 4;
++ if (c->x86_capability[0] & (1<<19))
++ c->x86_clflush_size = ((misc >> 8) & 0xff) * 8;
++ } else {
++ /* Have CPUID level 0 only - unheard of */
++ c->x86 = 4;
++ }
++
++#ifdef CONFIG_SMP
++ c->phys_proc_id = (cpuid_ebx(1) >> 24) & 0xff;
++#endif
++}
++
++/*
++ * This does the hard work of actually picking apart the CPU stuff...
++ */
++void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
++{
++ int i;
++ u32 xlvl;
++
++ early_identify_cpu(c);
++
++ /* AMD-defined flags: level 0x80000001 */
++ xlvl = cpuid_eax(0x80000000);
++ c->extended_cpuid_level = xlvl;
++ if ((xlvl & 0xffff0000) == 0x80000000) {
++ if (xlvl >= 0x80000001) {
++ c->x86_capability[1] = cpuid_edx(0x80000001);
++ c->x86_capability[6] = cpuid_ecx(0x80000001);
++ }
++ if (xlvl >= 0x80000004)
++ get_model_name(c); /* Default name */
++ }
++
++ /* Transmeta-defined flags: level 0x80860001 */
++ xlvl = cpuid_eax(0x80860000);
++ if ((xlvl & 0xffff0000) == 0x80860000) {
++ /* Don't set x86_cpuid_level here for now to not confuse. */
++ if (xlvl >= 0x80860001)
++ c->x86_capability[2] = cpuid_edx(0x80860001);
++ }
++
++ c->apicid = phys_pkg_id(0);
++
++ /*
++ * Vendor-specific initialization. In this section we
++ * canonicalize the feature flags, meaning if there are
++ * features a certain CPU supports which CPUID doesn't
++ * tell us, CPUID claiming incorrect flags, or other bugs,
++ * we handle them here.
++ *
++ * At the end of this section, c->x86_capability better
++ * indicate the features this CPU genuinely supports!
++ */
++ switch (c->x86_vendor) {
++ case X86_VENDOR_AMD:
++ init_amd(c);
++ break;
++
++ case X86_VENDOR_INTEL:
++ init_intel(c);
++ break;
++
++ case X86_VENDOR_UNKNOWN:
++ default:
++ display_cacheinfo(c);
++ break;
++ }
++
++ select_idle_routine(c);
++ detect_ht(c);
++
++ /*
++ * On SMP, boot_cpu_data holds the common feature set between
++ * all CPUs; so make sure that we indicate which features are
++ * common between the CPUs. The first time this routine gets
++ * executed, c == &boot_cpu_data.
++ */
++ if (c != &boot_cpu_data) {
++ /* AND the already accumulated flags with these */
++ for (i = 0 ; i < NCAPINTS ; i++)
++ boot_cpu_data.x86_capability[i] &= c->x86_capability[i];
++ }
++
++#ifdef CONFIG_X86_MCE
++ mcheck_init(c);
++#endif
++ if (c == &boot_cpu_data)
++ mtrr_bp_init();
++ else
++ mtrr_ap_init();
++#ifdef CONFIG_NUMA
++ numa_add_cpu(smp_processor_id());
++#endif
++}
++
++
++void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
++{
++ if (c->x86_model_id[0])
++ printk("%s", c->x86_model_id);
++
++ if (c->x86_mask || c->cpuid_level >= 0)
++ printk(" stepping %02x\n", c->x86_mask);
++ else
++ printk("\n");
++}
++
++/*
++ * Get CPU information for use by the procfs.
++ */
++
++static int show_cpuinfo(struct seq_file *m, void *v)
++{
++ struct cpuinfo_x86 *c = v;
++
++ /*
++ * These flag bits must match the definitions in <asm/cpufeature.h>.
++ * NULL means this bit is undefined or reserved; either way it doesn't
++ * have meaning as far as Linux is concerned. Note that it's important
++ * to realize there is a difference between this table and CPUID -- if
++ * applications want to get the raw CPUID data, they should access
++ * /dev/cpu/<cpu_nr>/cpuid instead.
++ */
++ static char *x86_cap_flags[] = {
++ /* Intel-defined */
++ "fpu", "vme", "de", "pse", "tsc", "msr", "pae", "mce",
++ "cx8", "apic", NULL, "sep", "mtrr", "pge", "mca", "cmov",
++ "pat", "pse36", "pn", "clflush", NULL, "dts", "acpi", "mmx",
++ "fxsr", "sse", "sse2", "ss", "ht", "tm", "ia64", NULL,
++
++ /* AMD-defined */
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, "syscall", NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, "nx", NULL, "mmxext", NULL,
++ NULL, "fxsr_opt", NULL, "rdtscp", NULL, "lm", "3dnowext", "3dnow",
++
++ /* Transmeta-defined */
++ "recovery", "longrun", NULL, "lrti", NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++ /* Other (Linux-defined) */
++ "cxmmx", NULL, "cyrix_arr", "centaur_mcr", NULL,
++ "constant_tsc", NULL, NULL,
++ "up", NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++ /* Intel-defined (#2) */
++ "pni", NULL, NULL, "monitor", "ds_cpl", "vmx", "smx", "est",
++ "tm2", NULL, "cid", NULL, NULL, "cx16", "xtpr", NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++ /* VIA/Cyrix/Centaur-defined */
++ NULL, NULL, "rng", "rng_en", NULL, NULL, "ace", "ace_en",
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++
++ /* AMD-defined (#2) */
++ "lahf_lm", "cmp_legacy", "svm", NULL, "cr8_legacy", NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
++ };
++ static char *x86_power_flags[] = {
++ "ts", /* temperature sensor */
++ "fid", /* frequency id control */
++ "vid", /* voltage id control */
++ "ttp", /* thermal trip */
++ "tm",
++ "stc",
++ NULL,
++ /* nothing */ /* constant_tsc - moved to flags */
++ };
++
++
++#ifdef CONFIG_SMP
++ if (!cpu_online(c-cpu_data))
++ return 0;
++#endif
++
++ seq_printf(m,"processor\t: %u\n"
++ "vendor_id\t: %s\n"
++ "cpu family\t: %d\n"
++ "model\t\t: %d\n"
++ "model name\t: %s\n",
++ (unsigned)(c-cpu_data),
++ c->x86_vendor_id[0] ? c->x86_vendor_id : "unknown",
++ c->x86,
++ (int)c->x86_model,
++ c->x86_model_id[0] ? c->x86_model_id : "unknown");
++
++ if (c->x86_mask || c->cpuid_level >= 0)
++ seq_printf(m, "stepping\t: %d\n", c->x86_mask);
++ else
++ seq_printf(m, "stepping\t: unknown\n");
++
++ if (cpu_has(c,X86_FEATURE_TSC)) {
++ unsigned int freq = cpufreq_quick_get((unsigned)(c-cpu_data));
++ if (!freq)
++ freq = cpu_khz;
++ seq_printf(m, "cpu MHz\t\t: %u.%03u\n",
++ freq / 1000, (freq % 1000));
++ }
++
++ /* Cache size */
++ if (c->x86_cache_size >= 0)
++ seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
++
++#ifdef CONFIG_SMP
++ if (smp_num_siblings * c->x86_max_cores > 1) {
++ int cpu = c - cpu_data;
++ seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
++ seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu]));
++ seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
++ seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
++ }
++#endif
++
++ seq_printf(m,
++ "fpu\t\t: yes\n"
++ "fpu_exception\t: yes\n"
++ "cpuid level\t: %d\n"
++ "wp\t\t: yes\n"
++ "flags\t\t:",
++ c->cpuid_level);
++
++ {
++ int i;
++ for ( i = 0 ; i < 32*NCAPINTS ; i++ )
++ if (cpu_has(c, i) && x86_cap_flags[i] != NULL)
++ seq_printf(m, " %s", x86_cap_flags[i]);
++ }
++
++ seq_printf(m, "\nbogomips\t: %lu.%02lu\n",
++ c->loops_per_jiffy/(500000/HZ),
++ (c->loops_per_jiffy/(5000/HZ)) % 100);
++
++ if (c->x86_tlbsize > 0)
++ seq_printf(m, "TLB size\t: %d 4K pages\n", c->x86_tlbsize);
++ seq_printf(m, "clflush size\t: %d\n", c->x86_clflush_size);
++ seq_printf(m, "cache_alignment\t: %d\n", c->x86_cache_alignment);
++
++ seq_printf(m, "address sizes\t: %u bits physical, %u bits virtual\n",
++ c->x86_phys_bits, c->x86_virt_bits);
++
++ seq_printf(m, "power management:");
++ {
++ unsigned i;
++ for (i = 0; i < 32; i++)
++ if (c->x86_power & (1 << i)) {
++ if (i < ARRAY_SIZE(x86_power_flags) &&
++ x86_power_flags[i])
++ seq_printf(m, "%s%s",
++ x86_power_flags[i][0]?" ":"",
++ x86_power_flags[i]);
++ else
++ seq_printf(m, " [%d]", i);
++ }
++ }
++
++ seq_printf(m, "\n\n");
++
++ return 0;
++}
++
++static void *c_start(struct seq_file *m, loff_t *pos)
++{
++ return *pos < NR_CPUS ? cpu_data + *pos : NULL;
++}
++
++static void *c_next(struct seq_file *m, void *v, loff_t *pos)
++{
++ ++*pos;
++ return c_start(m, pos);
++}
++
++static void c_stop(struct seq_file *m, void *v)
++{
++}
++
++struct seq_operations cpuinfo_op = {
++ .start =c_start,
++ .next = c_next,
++ .stop = c_stop,
++ .show = show_cpuinfo,
++};
++
++#if defined(CONFIG_INPUT_PCSPKR) || defined(CONFIG_INPUT_PCSPKR_MODULE)
++#include <linux/platform_device.h>
++static __init int add_pcspkr(void)
++{
++ struct platform_device *pd;
++ int ret;
++
++ if (!is_initial_xendomain())
++ return 0;
++
++ pd = platform_device_alloc("pcspkr", -1);
++ if (!pd)
++ return -ENOMEM;
++
++ ret = platform_device_add(pd);
++ if (ret)
++ platform_device_put(pd);
++
++ return ret;
++}
++device_initcall(add_pcspkr);
++#endif
+Index: head-2008-11-25/arch/x86/kernel/setup64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/setup64-xen.c 2008-01-28 12:24:19.000000000 +0100
+@@ -0,0 +1,367 @@
++/*
++ * X86-64 specific CPU setup.
++ * Copyright (C) 1995 Linus Torvalds
++ * Copyright 2001, 2002, 2003 SuSE Labs / Andi Kleen.
++ * See setup.c for older changelog.
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Modified for Xen
++ *
++ */
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/string.h>
++#include <linux/bootmem.h>
++#include <linux/bitops.h>
++#include <linux/module.h>
++#include <asm/bootsetup.h>
++#include <asm/pda.h>
++#include <asm/pgtable.h>
++#include <asm/processor.h>
++#include <asm/desc.h>
++#include <asm/atomic.h>
++#include <asm/mmu_context.h>
++#include <asm/smp.h>
++#include <asm/i387.h>
++#include <asm/percpu.h>
++#include <asm/proto.h>
++#include <asm/sections.h>
++#ifdef CONFIG_XEN
++#include <asm/hypervisor.h>
++#endif
++
++char x86_boot_params[BOOT_PARAM_SIZE] __initdata = {0,};
++
++cpumask_t cpu_initialized __cpuinitdata = CPU_MASK_NONE;
++
++struct x8664_pda *_cpu_pda[NR_CPUS] __read_mostly;
++EXPORT_SYMBOL(_cpu_pda);
++struct x8664_pda boot_cpu_pda[NR_CPUS] __cacheline_aligned;
++
++#ifndef CONFIG_X86_NO_IDT
++struct desc_ptr idt_descr = { 256 * 16 - 1, (unsigned long) idt_table };
++#endif
++
++char boot_cpu_stack[IRQSTACKSIZE] __attribute__((section(".bss.page_aligned")));
++
++unsigned long __supported_pte_mask __read_mostly = ~0UL;
++EXPORT_SYMBOL(__supported_pte_mask);
++static int do_not_nx __cpuinitdata = 0;
++
++/* noexec=on|off
++Control non executable mappings for 64bit processes.
++
++on Enable(default)
++off Disable
++*/
++int __init nonx_setup(char *str)
++{
++ if (!strncmp(str, "on", 2)) {
++ __supported_pte_mask |= _PAGE_NX;
++ do_not_nx = 0;
++ } else if (!strncmp(str, "off", 3)) {
++ do_not_nx = 1;
++ __supported_pte_mask &= ~_PAGE_NX;
++ }
++ return 1;
++}
++__setup("noexec=", nonx_setup); /* parsed early actually */
++
++int force_personality32 = 0;
++
++/* noexec32=on|off
++Control non executable heap for 32bit processes.
++To control the stack too use noexec=off
++
++on PROT_READ does not imply PROT_EXEC for 32bit processes
++off PROT_READ implies PROT_EXEC (default)
++*/
++static int __init nonx32_setup(char *str)
++{
++ if (!strcmp(str, "on"))
++ force_personality32 &= ~READ_IMPLIES_EXEC;
++ else if (!strcmp(str, "off"))
++ force_personality32 |= READ_IMPLIES_EXEC;
++ return 1;
++}
++__setup("noexec32=", nonx32_setup);
++
++/*
++ * Great future plan:
++ * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data.
++ * Always point %gs to its beginning
++ */
++void __init setup_per_cpu_areas(void)
++{
++ int i;
++ unsigned long size;
++
++#ifdef CONFIG_HOTPLUG_CPU
++ prefill_possible_map();
++#endif
++
++ /* Copy section for each CPU (we discard the original) */
++ size = ALIGN(__per_cpu_end - __per_cpu_start, SMP_CACHE_BYTES);
++#ifdef CONFIG_MODULES
++ if (size < PERCPU_ENOUGH_ROOM)
++ size = PERCPU_ENOUGH_ROOM;
++#endif
++
++ for_each_cpu_mask (i, cpu_possible_map) {
++ char *ptr;
++
++ if (!NODE_DATA(cpu_to_node(i))) {
++ printk("cpu with no node %d, num_online_nodes %d\n",
++ i, num_online_nodes());
++ ptr = alloc_bootmem(size);
++ } else {
++ ptr = alloc_bootmem_node(NODE_DATA(cpu_to_node(i)), size);
++ }
++ if (!ptr)
++ panic("Cannot allocate cpu data for CPU %d\n", i);
++ cpu_pda(i)->data_offset = ptr - __per_cpu_start;
++ memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
++ }
++}
++
++#ifdef CONFIG_XEN
++static void switch_pt(void)
++{
++ xen_pt_switch(__pa_symbol(init_level4_pgt));
++ xen_new_user_pt(__pa_symbol(__user_pgd(init_level4_pgt)));
++}
++
++static void __cpuinit cpu_gdt_init(const struct desc_ptr *gdt_descr)
++{
++ unsigned long frames[16];
++ unsigned long va;
++ int f;
++
++ for (va = gdt_descr->address, f = 0;
++ va < gdt_descr->address + gdt_descr->size;
++ va += PAGE_SIZE, f++) {
++ frames[f] = virt_to_mfn(va);
++ make_page_readonly(
++ (void *)va, XENFEAT_writable_descriptor_tables);
++ }
++ if (HYPERVISOR_set_gdt(frames, (gdt_descr->size + 1) /
++ sizeof (struct desc_struct)))
++ BUG();
++}
++#else
++static void switch_pt(void)
++{
++ asm volatile("movq %0,%%cr3" :: "r" (__pa_symbol(&init_level4_pgt)));
++}
++
++static void __cpuinit cpu_gdt_init(const struct desc_ptr *gdt_descr)
++{
++ asm volatile("lgdt %0" :: "m" (*gdt_descr));
++ asm volatile("lidt %0" :: "m" (idt_descr));
++}
++#endif
++
++void pda_init(int cpu)
++{
++ struct x8664_pda *pda = cpu_pda(cpu);
++
++ /* Setup up data that may be needed in __get_free_pages early */
++ asm volatile("movl %0,%%fs ; movl %0,%%gs" :: "r" (0));
++#ifndef CONFIG_XEN
++ wrmsrl(MSR_GS_BASE, pda);
++#else
++ if (HYPERVISOR_set_segment_base(SEGBASE_GS_KERNEL,
++ (unsigned long)pda))
++ BUG();
++#endif
++ pda->cpunumber = cpu;
++ pda->irqcount = -1;
++ pda->kernelstack =
++ (unsigned long)stack_thread_info() - PDA_STACKOFFSET + THREAD_SIZE;
++ pda->active_mm = &init_mm;
++ pda->mmu_state = 0;
++
++ if (cpu == 0) {
++#ifdef CONFIG_XEN
++ xen_init_pt();
++#endif
++ /* others are initialized in smpboot.c */
++ pda->pcurrent = &init_task;
++ pda->irqstackptr = boot_cpu_stack;
++ } else {
++ pda->irqstackptr = (char *)
++ __get_free_pages(GFP_ATOMIC, IRQSTACK_ORDER);
++ if (!pda->irqstackptr)
++ panic("cannot allocate irqstack for cpu %d", cpu);
++ }
++
++ switch_pt();
++
++ pda->irqstackptr += IRQSTACKSIZE-64;
++}
++
++#ifndef CONFIG_X86_NO_TSS
++char boot_exception_stacks[(N_EXCEPTION_STACKS - 1) * EXCEPTION_STKSZ + DEBUG_STKSZ]
++__attribute__((section(".bss.page_aligned")));
++#endif
++
++/* May not be marked __init: used by software suspend */
++void syscall_init(void)
++{
++#ifndef CONFIG_XEN
++ /*
++ * LSTAR and STAR live in a bit strange symbiosis.
++ * They both write to the same internal register. STAR allows to set CS/DS
++ * but only a 32bit target. LSTAR sets the 64bit rip.
++ */
++ wrmsrl(MSR_STAR, ((u64)__USER32_CS)<<48 | ((u64)__KERNEL_CS)<<32);
++ wrmsrl(MSR_LSTAR, system_call);
++
++ /* Flags to clear on syscall */
++ wrmsrl(MSR_SYSCALL_MASK, EF_TF|EF_DF|EF_IE|0x3000);
++#endif
++#ifdef CONFIG_IA32_EMULATION
++ syscall32_cpu_init ();
++#endif
++}
++
++void __cpuinit check_efer(void)
++{
++ unsigned long efer;
++
++ rdmsrl(MSR_EFER, efer);
++ if (!(efer & EFER_NX) || do_not_nx) {
++ __supported_pte_mask &= ~_PAGE_NX;
++ }
++}
++
++unsigned long kernel_eflags;
++
++/*
++ * cpu_init() initializes state that is per-CPU. Some data is already
++ * initialized (naturally) in the bootstrap process, such as the GDT
++ * and IDT. We reload them nevertheless, this function acts as a
++ * 'CPU state barrier', nothing should get across.
++ * A lot of state is already set up in PDA init.
++ */
++void __cpuinit cpu_init (void)
++{
++ int cpu = stack_smp_processor_id();
++#ifndef CONFIG_X86_NO_TSS
++ struct tss_struct *t = &per_cpu(init_tss, cpu);
++ struct orig_ist *orig_ist = &per_cpu(orig_ist, cpu);
++ unsigned long v;
++ char *estacks = NULL;
++ unsigned i;
++#endif
++ struct task_struct *me;
++
++ /* CPU 0 is initialised in head64.c */
++ if (cpu != 0) {
++ pda_init(cpu);
++ zap_low_mappings(cpu);
++ }
++#ifndef CONFIG_X86_NO_TSS
++ else
++ estacks = boot_exception_stacks;
++#endif
++
++ me = current;
++
++ if (cpu_test_and_set(cpu, cpu_initialized))
++ panic("CPU#%d already initialized!\n", cpu);
++
++ printk("Initializing CPU#%d\n", cpu);
++
++ clear_in_cr4(X86_CR4_VME|X86_CR4_PVI|X86_CR4_TSD|X86_CR4_DE);
++
++ /*
++ * Initialize the per-CPU GDT with the boot GDT,
++ * and set up the GDT descriptor:
++ */
++#ifndef CONFIG_XEN
++ if (cpu)
++ memcpy(cpu_gdt(cpu), cpu_gdt_table, GDT_SIZE);
++#endif
++
++ cpu_gdt_descr[cpu].size = GDT_SIZE;
++ cpu_gdt_init(&cpu_gdt_descr[cpu]);
++
++ memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
++ syscall_init();
++
++ wrmsrl(MSR_FS_BASE, 0);
++ wrmsrl(MSR_KERNEL_GS_BASE, 0);
++ barrier();
++
++ check_efer();
++
++#ifndef CONFIG_X86_NO_TSS
++ /*
++ * set up and load the per-CPU TSS
++ */
++ for (v = 0; v < N_EXCEPTION_STACKS; v++) {
++ if (cpu) {
++ static const unsigned int order[N_EXCEPTION_STACKS] = {
++ [0 ... N_EXCEPTION_STACKS - 1] = EXCEPTION_STACK_ORDER,
++ [DEBUG_STACK - 1] = DEBUG_STACK_ORDER
++ };
++
++ estacks = (char *)__get_free_pages(GFP_ATOMIC, order[v]);
++ if (!estacks)
++ panic("Cannot allocate exception stack %ld %d\n",
++ v, cpu);
++ }
++ switch (v + 1) {
++#if DEBUG_STKSZ > EXCEPTION_STKSZ
++ case DEBUG_STACK:
++ cpu_pda(cpu)->debugstack = (unsigned long)estacks;
++ estacks += DEBUG_STKSZ;
++ break;
++#endif
++ default:
++ estacks += EXCEPTION_STKSZ;
++ break;
++ }
++ orig_ist->ist[v] = t->ist[v] = (unsigned long)estacks;
++ }
++
++ t->io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
++ /*
++ * <= is required because the CPU will access up to
++ * 8 bits beyond the end of the IO permission bitmap.
++ */
++ for (i = 0; i <= IO_BITMAP_LONGS; i++)
++ t->io_bitmap[i] = ~0UL;
++#endif
++
++ atomic_inc(&init_mm.mm_count);
++ me->active_mm = &init_mm;
++ if (me->mm)
++ BUG();
++ enter_lazy_tlb(&init_mm, me);
++
++#ifndef CONFIG_X86_NO_TSS
++ set_tss_desc(cpu, t);
++#endif
++#ifndef CONFIG_XEN
++ load_TR_desc();
++#endif
++ load_LDT(&init_mm.context);
++
++ /*
++ * Clear all 6 debug registers:
++ */
++
++ set_debugreg(0UL, 0);
++ set_debugreg(0UL, 1);
++ set_debugreg(0UL, 2);
++ set_debugreg(0UL, 3);
++ set_debugreg(0UL, 6);
++ set_debugreg(0UL, 7);
++
++ fpu_init();
++
++ raw_local_save_flags(kernel_eflags);
++}
+Index: head-2008-11-25/arch/x86/kernel/smp_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/smp_64-xen.c 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,575 @@
++/*
++ * Intel SMP support routines.
++ *
++ * (c) 1995 Alan Cox, Building #3 <alan@redhat.com>
++ * (c) 1998-99, 2000 Ingo Molnar <mingo@redhat.com>
++ * (c) 2002,2003 Andi Kleen, SuSE Labs.
++ *
++ * This code is released under the GNU General Public License version 2 or
++ * later.
++ */
++
++#include <linux/init.h>
++
++#include <linux/mm.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/smp_lock.h>
++#include <linux/smp.h>
++#include <linux/kernel_stat.h>
++#include <linux/mc146818rtc.h>
++#include <linux/interrupt.h>
++
++#include <asm/mtrr.h>
++#include <asm/pgalloc.h>
++#include <asm/tlbflush.h>
++#include <asm/mach_apic.h>
++#include <asm/mmu_context.h>
++#include <asm/proto.h>
++#include <asm/apicdef.h>
++#include <asm/idle.h>
++#ifdef CONFIG_XEN
++#include <xen/evtchn.h>
++#endif
++
++#ifndef CONFIG_XEN
++/*
++ * Smarter SMP flushing macros.
++ * c/o Linus Torvalds.
++ *
++ * These mean you can really definitely utterly forget about
++ * writing to user space from interrupts. (Its not allowed anyway).
++ *
++ * Optimizations Manfred Spraul <manfred@colorfullife.com>
++ *
++ * More scalable flush, from Andi Kleen
++ *
++ * To avoid global state use 8 different call vectors.
++ * Each CPU uses a specific vector to trigger flushes on other
++ * CPUs. Depending on the received vector the target CPUs look into
++ * the right per cpu variable for the flush data.
++ *
++ * With more than 8 CPUs they are hashed to the 8 available
++ * vectors. The limited global vector space forces us to this right now.
++ * In future when interrupts are split into per CPU domains this could be
++ * fixed, at the cost of triggering multiple IPIs in some cases.
++ */
++
++union smp_flush_state {
++ struct {
++ cpumask_t flush_cpumask;
++ struct mm_struct *flush_mm;
++ unsigned long flush_va;
++#define FLUSH_ALL -1ULL
++ spinlock_t tlbstate_lock;
++ };
++ char pad[SMP_CACHE_BYTES];
++} ____cacheline_aligned;
++
++/* State is put into the per CPU data section, but padded
++ to a full cache line because other CPUs can access it and we don't
++ want false sharing in the per cpu data segment. */
++static DEFINE_PER_CPU(union smp_flush_state, flush_state);
++
++/*
++ * We cannot call mmdrop() because we are in interrupt context,
++ * instead update mm->cpu_vm_mask.
++ */
++static inline void leave_mm(unsigned long cpu)
++{
++ if (read_pda(mmu_state) == TLBSTATE_OK)
++ BUG();
++ cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
++ load_cr3(swapper_pg_dir);
++}
++
++/*
++ *
++ * The flush IPI assumes that a thread switch happens in this order:
++ * [cpu0: the cpu that switches]
++ * 1) switch_mm() either 1a) or 1b)
++ * 1a) thread switch to a different mm
++ * 1a1) cpu_clear(cpu, old_mm->cpu_vm_mask);
++ * Stop ipi delivery for the old mm. This is not synchronized with
++ * the other cpus, but smp_invalidate_interrupt ignore flush ipis
++ * for the wrong mm, and in the worst case we perform a superfluous
++ * tlb flush.
++ * 1a2) set cpu mmu_state to TLBSTATE_OK
++ * Now the smp_invalidate_interrupt won't call leave_mm if cpu0
++ * was in lazy tlb mode.
++ * 1a3) update cpu active_mm
++ * Now cpu0 accepts tlb flushes for the new mm.
++ * 1a4) cpu_set(cpu, new_mm->cpu_vm_mask);
++ * Now the other cpus will send tlb flush ipis.
++ * 1a4) change cr3.
++ * 1b) thread switch without mm change
++ * cpu active_mm is correct, cpu0 already handles
++ * flush ipis.
++ * 1b1) set cpu mmu_state to TLBSTATE_OK
++ * 1b2) test_and_set the cpu bit in cpu_vm_mask.
++ * Atomically set the bit [other cpus will start sending flush ipis],
++ * and test the bit.
++ * 1b3) if the bit was 0: leave_mm was called, flush the tlb.
++ * 2) switch %%esp, ie current
++ *
++ * The interrupt must handle 2 special cases:
++ * - cr3 is changed before %%esp, ie. it cannot use current->{active_,}mm.
++ * - the cpu performs speculative tlb reads, i.e. even if the cpu only
++ * runs in kernel space, the cpu could load tlb entries for user space
++ * pages.
++ *
++ * The good news is that cpu mmu_state is local to each cpu, no
++ * write/read ordering problems.
++ */
++
++/*
++ * TLB flush IPI:
++ *
++ * 1) Flush the tlb entries if the cpu uses the mm that's being flushed.
++ * 2) Leave the mm if we are in the lazy tlb mode.
++ *
++ * Interrupts are disabled.
++ */
++
++asmlinkage void smp_invalidate_interrupt(struct pt_regs *regs)
++{
++ int cpu;
++ int sender;
++ union smp_flush_state *f;
++
++ cpu = smp_processor_id();
++ /*
++ * orig_rax contains the negated interrupt vector.
++ * Use that to determine where the sender put the data.
++ */
++ sender = ~regs->orig_rax - INVALIDATE_TLB_VECTOR_START;
++ f = &per_cpu(flush_state, sender);
++
++ if (!cpu_isset(cpu, f->flush_cpumask))
++ goto out;
++ /*
++ * This was a BUG() but until someone can quote me the
++ * line from the intel manual that guarantees an IPI to
++ * multiple CPUs is retried _only_ on the erroring CPUs
++ * its staying as a return
++ *
++ * BUG();
++ */
++
++ if (f->flush_mm == read_pda(active_mm)) {
++ if (read_pda(mmu_state) == TLBSTATE_OK) {
++ if (f->flush_va == FLUSH_ALL)
++ local_flush_tlb();
++ else
++ __flush_tlb_one(f->flush_va);
++ } else
++ leave_mm(cpu);
++ }
++out:
++ ack_APIC_irq();
++ cpu_clear(cpu, f->flush_cpumask);
++}
++
++static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
++ unsigned long va)
++{
++ int sender;
++ union smp_flush_state *f;
++
++ /* Caller has disabled preemption */
++ sender = smp_processor_id() % NUM_INVALIDATE_TLB_VECTORS;
++ f = &per_cpu(flush_state, sender);
++
++ /* Could avoid this lock when
++ num_online_cpus() <= NUM_INVALIDATE_TLB_VECTORS, but it is
++ probably not worth checking this for a cache-hot lock. */
++ spin_lock(&f->tlbstate_lock);
++
++ f->flush_mm = mm;
++ f->flush_va = va;
++ cpus_or(f->flush_cpumask, cpumask, f->flush_cpumask);
++
++ /*
++ * We have to send the IPI only to
++ * CPUs affected.
++ */
++ send_IPI_mask(cpumask, INVALIDATE_TLB_VECTOR_START + sender);
++
++ while (!cpus_empty(f->flush_cpumask))
++ cpu_relax();
++
++ f->flush_mm = NULL;
++ f->flush_va = 0;
++ spin_unlock(&f->tlbstate_lock);
++}
++
++int __cpuinit init_smp_flush(void)
++{
++ int i;
++ for_each_cpu_mask(i, cpu_possible_map) {
++ spin_lock_init(&per_cpu(flush_state, i).tlbstate_lock);
++ }
++ return 0;
++}
++
++core_initcall(init_smp_flush);
++
++void flush_tlb_current_task(void)
++{
++ struct mm_struct *mm = current->mm;
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ local_flush_tlb();
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++ preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_current_task);
++
++void flush_tlb_mm (struct mm_struct * mm)
++{
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ if (current->active_mm == mm) {
++ if (current->mm)
++ local_flush_tlb();
++ else
++ leave_mm(smp_processor_id());
++ }
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, FLUSH_ALL);
++
++ preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_mm);
++
++void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
++{
++ struct mm_struct *mm = vma->vm_mm;
++ cpumask_t cpu_mask;
++
++ preempt_disable();
++ cpu_mask = mm->cpu_vm_mask;
++ cpu_clear(smp_processor_id(), cpu_mask);
++
++ if (current->active_mm == mm) {
++ if(current->mm)
++ __flush_tlb_one(va);
++ else
++ leave_mm(smp_processor_id());
++ }
++
++ if (!cpus_empty(cpu_mask))
++ flush_tlb_others(cpu_mask, mm, va);
++
++ preempt_enable();
++}
++EXPORT_SYMBOL(flush_tlb_page);
++
++static void do_flush_tlb_all(void* info)
++{
++ unsigned long cpu = smp_processor_id();
++
++ __flush_tlb_all();
++ if (read_pda(mmu_state) == TLBSTATE_LAZY)
++ leave_mm(cpu);
++}
++
++void flush_tlb_all(void)
++{
++ on_each_cpu(do_flush_tlb_all, NULL, 1, 1);
++}
++#endif /* Xen */
++
++/*
++ * this function sends a 'reschedule' IPI to another CPU.
++ * it goes straight through and wastes no time serializing
++ * anything. Worst case is that we lose a reschedule ...
++ */
++
++void smp_send_reschedule(int cpu)
++{
++ send_IPI_mask(cpumask_of_cpu(cpu), RESCHEDULE_VECTOR);
++}
++
++/*
++ * Structure and data for smp_call_function(). This is designed to minimise
++ * static memory requirements. It also looks cleaner.
++ */
++static DEFINE_SPINLOCK(call_lock);
++
++struct call_data_struct {
++ void (*func) (void *info);
++ void *info;
++ atomic_t started;
++ atomic_t finished;
++ int wait;
++};
++
++static struct call_data_struct * call_data;
++
++void lock_ipi_call_lock(void)
++{
++ spin_lock_irq(&call_lock);
++}
++
++void unlock_ipi_call_lock(void)
++{
++ spin_unlock_irq(&call_lock);
++}
++
++/*
++ * this function sends a 'generic call function' IPI to one other CPU
++ * in the system.
++ *
++ * cpu is a standard Linux logical CPU number.
++ */
++static void
++__smp_call_function_single(int cpu, void (*func) (void *info), void *info,
++ int nonatomic, int wait)
++{
++ struct call_data_struct data;
++ int cpus = 1;
++
++ data.func = func;
++ data.info = info;
++ atomic_set(&data.started, 0);
++ data.wait = wait;
++ if (wait)
++ atomic_set(&data.finished, 0);
++
++ call_data = &data;
++ wmb();
++ /* Send a message to all other CPUs and wait for them to respond */
++ send_IPI_mask(cpumask_of_cpu(cpu), CALL_FUNCTION_VECTOR);
++
++ /* Wait for response */
++ while (atomic_read(&data.started) != cpus)
++ cpu_relax();
++
++ if (!wait)
++ return;
++
++ while (atomic_read(&data.finished) != cpus)
++ cpu_relax();
++}
++
++/*
++ * smp_call_function_single - Run a function on another CPU
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: Currently unused.
++ * @wait: If true, wait until function has completed on other CPUs.
++ *
++ * Retrurns 0 on success, else a negative status code.
++ *
++ * Does not return until the remote CPU is nearly ready to execute <func>
++ * or is or has executed.
++ */
++
++int smp_call_function_single (int cpu, void (*func) (void *info), void *info,
++ int nonatomic, int wait)
++{
++ /* prevent preemption and reschedule on another processor */
++ int me = get_cpu();
++ if (cpu == me) {
++ WARN_ON(1);
++ put_cpu();
++ return -EBUSY;
++ }
++ spin_lock_bh(&call_lock);
++ __smp_call_function_single(cpu, func, info, nonatomic, wait);
++ spin_unlock_bh(&call_lock);
++ put_cpu();
++ return 0;
++}
++
++/*
++ * this function sends a 'generic call function' IPI to all other CPUs
++ * in the system.
++ */
++static void __smp_call_function (void (*func) (void *info), void *info,
++ int nonatomic, int wait)
++{
++ struct call_data_struct data;
++ int cpus = num_online_cpus()-1;
++
++ if (!cpus)
++ return;
++
++ data.func = func;
++ data.info = info;
++ atomic_set(&data.started, 0);
++ data.wait = wait;
++ if (wait)
++ atomic_set(&data.finished, 0);
++
++ call_data = &data;
++ wmb();
++ /* Send a message to all other CPUs and wait for them to respond */
++ send_IPI_allbutself(CALL_FUNCTION_VECTOR);
++
++ /* Wait for response */
++ while (atomic_read(&data.started) != cpus)
++ cpu_relax();
++
++ if (!wait)
++ return;
++
++ while (atomic_read(&data.finished) != cpus)
++ cpu_relax();
++}
++
++/*
++ * smp_call_function - run a function on all other CPUs.
++ * @func: The function to run. This must be fast and non-blocking.
++ * @info: An arbitrary pointer to pass to the function.
++ * @nonatomic: currently unused.
++ * @wait: If true, wait (atomically) until function has completed on other
++ * CPUs.
++ *
++ * Returns 0 on success, else a negative status code. Does not return until
++ * remote CPUs are nearly ready to execute func or are or have executed.
++ *
++ * You must not call this function with disabled interrupts or from a
++ * hardware interrupt handler or from a bottom half handler.
++ * Actually there are a few legal cases, like panic.
++ */
++int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
++ int wait)
++{
++ spin_lock(&call_lock);
++ __smp_call_function(func,info,nonatomic,wait);
++ spin_unlock(&call_lock);
++ return 0;
++}
++EXPORT_SYMBOL(smp_call_function);
++
++void smp_stop_cpu(void)
++{
++ unsigned long flags;
++ /*
++ * Remove this CPU:
++ */
++ cpu_clear(smp_processor_id(), cpu_online_map);
++ local_irq_save(flags);
++ disable_all_local_evtchn();
++ local_irq_restore(flags);
++}
++
++static void smp_really_stop_cpu(void *dummy)
++{
++ smp_stop_cpu();
++ for (;;)
++ halt();
++}
++
++void smp_send_stop(void)
++{
++ int nolock = 0;
++#ifndef CONFIG_XEN
++ if (reboot_force)
++ return;
++#endif
++ /* Don't deadlock on the call lock in panic */
++ if (!spin_trylock(&call_lock)) {
++ /* ignore locking because we have panicked anyways */
++ nolock = 1;
++ }
++ __smp_call_function(smp_really_stop_cpu, NULL, 0, 0);
++ if (!nolock)
++ spin_unlock(&call_lock);
++
++ local_irq_disable();
++ disable_all_local_evtchn();
++ local_irq_enable();
++}
++
++/*
++ * Reschedule call back. Nothing to do,
++ * all the work is done automatically when
++ * we return from the interrupt.
++ */
++#ifndef CONFIG_XEN
++asmlinkage void smp_reschedule_interrupt(void)
++#else
++asmlinkage irqreturn_t smp_reschedule_interrupt(void)
++#endif
++{
++#ifndef CONFIG_XEN
++ ack_APIC_irq();
++#else
++ return IRQ_HANDLED;
++#endif
++}
++
++#ifndef CONFIG_XEN
++asmlinkage void smp_call_function_interrupt(void)
++#else
++asmlinkage irqreturn_t smp_call_function_interrupt(void)
++#endif
++{
++ void (*func) (void *info) = call_data->func;
++ void *info = call_data->info;
++ int wait = call_data->wait;
++
++#ifndef CONFIG_XEN
++ ack_APIC_irq();
++#endif
++ /*
++ * Notify initiating CPU that I've grabbed the data and am
++ * about to execute the function
++ */
++ mb();
++ atomic_inc(&call_data->started);
++ /*
++ * At this point the info structure may be out of scope unless wait==1
++ */
++ exit_idle();
++ irq_enter();
++ (*func)(info);
++ irq_exit();
++ if (wait) {
++ mb();
++ atomic_inc(&call_data->finished);
++ }
++#ifdef CONFIG_XEN
++ return IRQ_HANDLED;
++#endif
++}
++
++int safe_smp_processor_id(void)
++{
++#ifdef CONFIG_XEN
++ return smp_processor_id();
++#else
++ unsigned apicid, i;
++
++ if (disable_apic)
++ return 0;
++
++ apicid = hard_smp_processor_id();
++ if (apicid < NR_CPUS && x86_cpu_to_apicid[apicid] == apicid)
++ return apicid;
++
++ for (i = 0; i < NR_CPUS; ++i) {
++ if (x86_cpu_to_apicid[i] == apicid)
++ return i;
++ }
++
++ /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI,
++ * or called too early. Either way, we must be CPU 0. */
++ if (x86_cpu_to_apicid[0] == BAD_APICID)
++ return 0;
++
++ return 0; /* Should not happen */
++#endif
++}
+Index: head-2008-11-25/arch/x86/kernel/traps_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/traps_64-xen.c 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,1173 @@
++/*
++ * linux/arch/x86-64/traps.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
++ *
++ * Pentium III FXSR, SSE support
++ * Gareth Hughes <gareth@valinux.com>, May 2000
++ */
++
++/*
++ * 'Traps.c' handles hardware traps and faults after we have saved some
++ * state in 'entry.S'.
++ */
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/ptrace.h>
++#include <linux/timer.h>
++#include <linux/mm.h>
++#include <linux/init.h>
++#include <linux/delay.h>
++#include <linux/spinlock.h>
++#include <linux/interrupt.h>
++#include <linux/module.h>
++#include <linux/moduleparam.h>
++#include <linux/nmi.h>
++#include <linux/kprobes.h>
++#include <linux/kexec.h>
++#include <linux/unwind.h>
++
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/debugreg.h>
++#include <asm/desc.h>
++#include <asm/i387.h>
++#include <asm/kdebug.h>
++#include <asm/processor.h>
++#include <asm/unwind.h>
++#include <asm/smp.h>
++#include <asm/pgalloc.h>
++#include <asm/pda.h>
++#include <asm/proto.h>
++#include <asm/nmi.h>
++
++asmlinkage void divide_error(void);
++asmlinkage void debug(void);
++asmlinkage void nmi(void);
++asmlinkage void int3(void);
++asmlinkage void overflow(void);
++asmlinkage void bounds(void);
++asmlinkage void invalid_op(void);
++asmlinkage void device_not_available(void);
++asmlinkage void double_fault(void);
++asmlinkage void coprocessor_segment_overrun(void);
++asmlinkage void invalid_TSS(void);
++asmlinkage void segment_not_present(void);
++asmlinkage void stack_segment(void);
++asmlinkage void general_protection(void);
++asmlinkage void page_fault(void);
++asmlinkage void coprocessor_error(void);
++asmlinkage void simd_coprocessor_error(void);
++asmlinkage void reserved(void);
++asmlinkage void alignment_check(void);
++asmlinkage void machine_check(void);
++asmlinkage void spurious_interrupt_bug(void);
++
++ATOMIC_NOTIFIER_HEAD(die_chain);
++EXPORT_SYMBOL(die_chain);
++
++int register_die_notifier(struct notifier_block *nb)
++{
++ vmalloc_sync_all();
++ return atomic_notifier_chain_register(&die_chain, nb);
++}
++EXPORT_SYMBOL(register_die_notifier); /* used modular by kdb */
++
++int unregister_die_notifier(struct notifier_block *nb)
++{
++ return atomic_notifier_chain_unregister(&die_chain, nb);
++}
++EXPORT_SYMBOL(unregister_die_notifier); /* used modular by kdb */
++
++static inline void conditional_sti(struct pt_regs *regs)
++{
++ if (regs->eflags & X86_EFLAGS_IF)
++ local_irq_enable();
++}
++
++static inline void preempt_conditional_sti(struct pt_regs *regs)
++{
++ preempt_disable();
++ if (regs->eflags & X86_EFLAGS_IF)
++ local_irq_enable();
++}
++
++static inline void preempt_conditional_cli(struct pt_regs *regs)
++{
++ if (regs->eflags & X86_EFLAGS_IF)
++ local_irq_disable();
++ /* Make sure to not schedule here because we could be running
++ on an exception stack. */
++ preempt_enable_no_resched();
++}
++
++static int kstack_depth_to_print = 12;
++#ifdef CONFIG_STACK_UNWIND
++static int call_trace = 1;
++#else
++#define call_trace (-1)
++#endif
++
++#ifdef CONFIG_KALLSYMS
++# include <linux/kallsyms.h>
++void printk_address(unsigned long address)
++{
++ unsigned long offset = 0, symsize;
++ const char *symname;
++ char *modname;
++ char *delim = ":";
++ char namebuf[128];
++
++ symname = kallsyms_lookup(address, &symsize, &offset,
++ &modname, namebuf);
++ if (!symname) {
++ printk(" [<%016lx>]\n", address);
++ return;
++ }
++ if (!modname)
++ modname = delim = "";
++ printk(" [<%016lx>] %s%s%s%s+0x%lx/0x%lx\n",
++ address, delim, modname, delim, symname, offset, symsize);
++}
++#else
++void printk_address(unsigned long address)
++{
++ printk(" [<%016lx>]\n", address);
++}
++#endif
++
++static unsigned long *in_exception_stack(unsigned cpu, unsigned long stack,
++ unsigned *usedp, const char **idp)
++{
++#ifndef CONFIG_X86_NO_TSS
++ static char ids[][8] = {
++ [DEBUG_STACK - 1] = "#DB",
++ [NMI_STACK - 1] = "NMI",
++ [DOUBLEFAULT_STACK - 1] = "#DF",
++ [STACKFAULT_STACK - 1] = "#SS",
++ [MCE_STACK - 1] = "#MC",
++#if DEBUG_STKSZ > EXCEPTION_STKSZ
++ [N_EXCEPTION_STACKS ... N_EXCEPTION_STACKS + DEBUG_STKSZ / EXCEPTION_STKSZ - 2] = "#DB[?]"
++#endif
++ };
++ unsigned k;
++
++ /*
++ * Iterate over all exception stacks, and figure out whether
++ * 'stack' is in one of them:
++ */
++ for (k = 0; k < N_EXCEPTION_STACKS; k++) {
++ unsigned long end;
++
++ /*
++ * set 'end' to the end of the exception stack.
++ */
++ switch (k + 1) {
++ /*
++ * TODO: this block is not needed i think, because
++ * setup64.c:cpu_init() sets up t->ist[DEBUG_STACK]
++ * properly too.
++ */
++#if DEBUG_STKSZ > EXCEPTION_STKSZ
++ case DEBUG_STACK:
++ end = cpu_pda(cpu)->debugstack + DEBUG_STKSZ;
++ break;
++#endif
++ default:
++ end = per_cpu(orig_ist, cpu).ist[k];
++ break;
++ }
++ /*
++ * Is 'stack' above this exception frame's end?
++ * If yes then skip to the next frame.
++ */
++ if (stack >= end)
++ continue;
++ /*
++ * Is 'stack' above this exception frame's start address?
++ * If yes then we found the right frame.
++ */
++ if (stack >= end - EXCEPTION_STKSZ) {
++ /*
++ * Make sure we only iterate through an exception
++ * stack once. If it comes up for the second time
++ * then there's something wrong going on - just
++ * break out and return NULL:
++ */
++ if (*usedp & (1U << k))
++ break;
++ *usedp |= 1U << k;
++ *idp = ids[k];
++ return (unsigned long *)end;
++ }
++ /*
++ * If this is a debug stack, and if it has a larger size than
++ * the usual exception stacks, then 'stack' might still
++ * be within the lower portion of the debug stack:
++ */
++#if DEBUG_STKSZ > EXCEPTION_STKSZ
++ if (k == DEBUG_STACK - 1 && stack >= end - DEBUG_STKSZ) {
++ unsigned j = N_EXCEPTION_STACKS - 1;
++
++ /*
++ * Black magic. A large debug stack is composed of
++ * multiple exception stack entries, which we
++ * iterate through now. Dont look:
++ */
++ do {
++ ++j;
++ end -= EXCEPTION_STKSZ;
++ ids[j][4] = '1' + (j - N_EXCEPTION_STACKS);
++ } while (stack < end - EXCEPTION_STKSZ);
++ if (*usedp & (1U << j))
++ break;
++ *usedp |= 1U << j;
++ *idp = ids[j];
++ return (unsigned long *)end;
++ }
++#endif
++ }
++#endif
++ return NULL;
++}
++
++static int show_trace_unwind(struct unwind_frame_info *info, void *context)
++{
++ int n = 0;
++
++ while (unwind(info) == 0 && UNW_PC(info)) {
++ n++;
++ printk_address(UNW_PC(info));
++ if (arch_unw_user_mode(info))
++ break;
++ }
++ return n;
++}
++
++/*
++ * x86-64 can have upto three kernel stacks:
++ * process stack
++ * interrupt stack
++ * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
++ */
++
++void show_trace(struct task_struct *tsk, struct pt_regs *regs, unsigned long * stack)
++{
++ const unsigned cpu = safe_smp_processor_id();
++ unsigned long *irqstack_end = (unsigned long *)cpu_pda(cpu)->irqstackptr;
++ unsigned used = 0;
++
++ printk("\nCall Trace:\n");
++
++ if (!tsk)
++ tsk = current;
++
++ if (call_trace >= 0) {
++ int unw_ret = 0;
++ struct unwind_frame_info info;
++
++ if (regs) {
++ if (unwind_init_frame_info(&info, tsk, regs) == 0)
++ unw_ret = show_trace_unwind(&info, NULL);
++ } else if (tsk == current)
++ unw_ret = unwind_init_running(&info, show_trace_unwind, NULL);
++ else {
++ if (unwind_init_blocked(&info, tsk) == 0)
++ unw_ret = show_trace_unwind(&info, NULL);
++ }
++ if (unw_ret > 0) {
++ if (call_trace == 1 && !arch_unw_user_mode(&info)) {
++ print_symbol("DWARF2 unwinder stuck at %s\n",
++ UNW_PC(&info));
++ if ((long)UNW_SP(&info) < 0) {
++ printk("Leftover inexact backtrace:\n");
++ stack = (unsigned long *)UNW_SP(&info);
++ } else
++ printk("Full inexact backtrace again:\n");
++ } else if (call_trace >= 1)
++ return;
++ else
++ printk("Full inexact backtrace again:\n");
++ } else
++ printk("Inexact backtrace:\n");
++ }
++
++ /*
++ * Print function call entries within a stack. 'cond' is the
++ * "end of stackframe" condition, that the 'stack++'
++ * iteration will eventually trigger.
++ */
++#define HANDLE_STACK(cond) \
++ do while (cond) { \
++ unsigned long addr = *stack++; \
++ if (kernel_text_address(addr)) { \
++ /* \
++ * If the address is either in the text segment of the \
++ * kernel, or in the region which contains vmalloc'ed \
++ * memory, it *may* be the address of a calling \
++ * routine; if so, print it so that someone tracing \
++ * down the cause of the crash will be able to figure \
++ * out the call path that was taken. \
++ */ \
++ printk_address(addr); \
++ } \
++ } while (0)
++
++ /*
++ * Print function call entries in all stacks, starting at the
++ * current stack address. If the stacks consist of nested
++ * exceptions
++ */
++ for ( ; ; ) {
++ const char *id;
++ unsigned long *estack_end;
++ estack_end = in_exception_stack(cpu, (unsigned long)stack,
++ &used, &id);
++
++ if (estack_end) {
++ printk(" <%s>", id);
++ HANDLE_STACK (stack < estack_end);
++ printk(" <EOE>");
++ /*
++ * We link to the next stack via the
++ * second-to-last pointer (index -2 to end) in the
++ * exception stack:
++ */
++ stack = (unsigned long *) estack_end[-2];
++ continue;
++ }
++ if (irqstack_end) {
++ unsigned long *irqstack;
++ irqstack = irqstack_end -
++ (IRQSTACKSIZE - 64) / sizeof(*irqstack);
++
++ if (stack >= irqstack && stack < irqstack_end) {
++ printk(" <IRQ>");
++ HANDLE_STACK (stack < irqstack_end);
++ /*
++ * We link to the next stack (which would be
++ * the process stack normally) the last
++ * pointer (index -1 to end) in the IRQ stack:
++ */
++ stack = (unsigned long *) (irqstack_end[-1]);
++ irqstack_end = NULL;
++ printk(" <EOI>");
++ continue;
++ }
++ }
++ break;
++ }
++
++ /*
++ * This prints the process stack:
++ */
++ HANDLE_STACK (((long) stack & (THREAD_SIZE-1)) != 0);
++#undef HANDLE_STACK
++
++ printk("\n");
++}
++
++static void _show_stack(struct task_struct *tsk, struct pt_regs *regs, unsigned long * rsp)
++{
++ unsigned long *stack;
++ int i;
++ const int cpu = safe_smp_processor_id();
++ unsigned long *irqstack_end = (unsigned long *) (cpu_pda(cpu)->irqstackptr);
++ unsigned long *irqstack = (unsigned long *) (cpu_pda(cpu)->irqstackptr - IRQSTACKSIZE);
++
++ // debugging aid: "show_stack(NULL, NULL);" prints the
++ // back trace for this cpu.
++
++ if (rsp == NULL) {
++ if (tsk)
++ rsp = (unsigned long *)tsk->thread.rsp;
++ else
++ rsp = (unsigned long *)&rsp;
++ }
++
++ stack = rsp;
++ for(i=0; i < kstack_depth_to_print; i++) {
++ if (stack >= irqstack && stack <= irqstack_end) {
++ if (stack == irqstack_end) {
++ stack = (unsigned long *) (irqstack_end[-1]);
++ printk(" <EOI> ");
++ }
++ } else {
++ if (((long) stack & (THREAD_SIZE-1)) == 0)
++ break;
++ }
++ if (i && ((i % 4) == 0))
++ printk("\n");
++ printk(" %016lx", *stack++);
++ touch_nmi_watchdog();
++ }
++ show_trace(tsk, regs, rsp);
++}
++
++void show_stack(struct task_struct *tsk, unsigned long * rsp)
++{
++ _show_stack(tsk, NULL, rsp);
++}
++
++/*
++ * The architecture-independent dump_stack generator
++ */
++void dump_stack(void)
++{
++ unsigned long dummy;
++ show_trace(NULL, NULL, &dummy);
++}
++
++EXPORT_SYMBOL(dump_stack);
++
++void show_registers(struct pt_regs *regs)
++{
++ int i;
++ int in_kernel = !user_mode(regs);
++ unsigned long rsp;
++ const int cpu = safe_smp_processor_id();
++ struct task_struct *cur = cpu_pda(cpu)->pcurrent;
++
++ rsp = regs->rsp;
++
++ printk("CPU %d ", cpu);
++ __show_regs(regs);
++ printk("Process %s (pid: %d, threadinfo %p, task %p)\n",
++ cur->comm, cur->pid, task_thread_info(cur), cur);
++
++ /*
++ * When in-kernel, we also print out the stack and code at the
++ * time of the fault..
++ */
++ if (in_kernel) {
++
++ printk("Stack: ");
++ _show_stack(NULL, regs, (unsigned long*)rsp);
++
++ printk("\nCode: ");
++ if (regs->rip < PAGE_OFFSET)
++ goto bad;
++
++ for (i=0; i<20; i++) {
++ unsigned char c;
++ if (__get_user(c, &((unsigned char*)regs->rip)[i])) {
++bad:
++ printk(" Bad RIP value.");
++ break;
++ }
++ printk("%02x ", c);
++ }
++ }
++ printk("\n");
++}
++
++void handle_BUG(struct pt_regs *regs)
++{
++ struct bug_frame f;
++ long len;
++ const char *prefix = "";
++
++ if (user_mode(regs))
++ return;
++ if (__copy_from_user(&f, (const void __user *) regs->rip,
++ sizeof(struct bug_frame)))
++ return;
++ if (f.filename >= 0 ||
++ f.ud2[0] != 0x0f || f.ud2[1] != 0x0b)
++ return;
++ len = __strnlen_user((char *)(long)f.filename, PATH_MAX) - 1;
++ if (len < 0 || len >= PATH_MAX)
++ f.filename = (int)(long)"unmapped filename";
++ else if (len > 50) {
++ f.filename += len - 50;
++ prefix = "...";
++ }
++ printk("----------- [cut here ] --------- [please bite here ] ---------\n");
++ printk(KERN_ALERT "Kernel BUG at %s%.50s:%d\n", prefix, (char *)(long)f.filename, f.line);
++}
++
++#ifdef CONFIG_BUG
++void out_of_line_bug(void)
++{
++ BUG();
++}
++EXPORT_SYMBOL(out_of_line_bug);
++#endif
++
++static DEFINE_SPINLOCK(die_lock);
++static int die_owner = -1;
++static unsigned int die_nest_count;
++
++unsigned __kprobes long oops_begin(void)
++{
++ int cpu = safe_smp_processor_id();
++ unsigned long flags;
++
++ /* racy, but better than risking deadlock. */
++ local_irq_save(flags);
++ if (!spin_trylock(&die_lock)) {
++ if (cpu == die_owner)
++ /* nested oops. should stop eventually */;
++ else
++ spin_lock(&die_lock);
++ }
++ die_nest_count++;
++ die_owner = cpu;
++ console_verbose();
++ bust_spinlocks(1);
++ return flags;
++}
++
++void __kprobes oops_end(unsigned long flags)
++{
++ die_owner = -1;
++ bust_spinlocks(0);
++ die_nest_count--;
++ if (die_nest_count)
++ /* We still own the lock */
++ local_irq_restore(flags);
++ else
++ /* Nest count reaches zero, release the lock. */
++ spin_unlock_irqrestore(&die_lock, flags);
++ if (panic_on_oops)
++ panic("Fatal exception");
++}
++
++void __kprobes __die(const char * str, struct pt_regs * regs, long err)
++{
++ static int die_counter;
++ printk(KERN_EMERG "%s: %04lx [%u] ", str, err & 0xffff,++die_counter);
++#ifdef CONFIG_PREEMPT
++ printk("PREEMPT ");
++#endif
++#ifdef CONFIG_SMP
++ printk("SMP ");
++#endif
++#ifdef CONFIG_DEBUG_PAGEALLOC
++ printk("DEBUG_PAGEALLOC");
++#endif
++ printk("\n");
++ notify_die(DIE_OOPS, str, regs, err, current->thread.trap_no, SIGSEGV);
++ show_registers(regs);
++ /* Executive summary in case the oops scrolled away */
++ printk(KERN_ALERT "RIP ");
++ printk_address(regs->rip);
++ printk(" RSP <%016lx>\n", regs->rsp);
++ if (kexec_should_crash(current))
++ crash_kexec(regs);
++}
++
++void die(const char * str, struct pt_regs * regs, long err)
++{
++ unsigned long flags = oops_begin();
++
++ handle_BUG(regs);
++ __die(str, regs, err);
++ oops_end(flags);
++ do_exit(SIGSEGV);
++}
++
++#ifdef CONFIG_X86_LOCAL_APIC
++void __kprobes die_nmi(char *str, struct pt_regs *regs)
++{
++ unsigned long flags = oops_begin();
++
++ /*
++ * We are in trouble anyway, lets at least try
++ * to get a message out.
++ */
++ printk(str, safe_smp_processor_id());
++ show_registers(regs);
++ if (kexec_should_crash(current))
++ crash_kexec(regs);
++ if (panic_on_timeout || panic_on_oops)
++ panic("nmi watchdog");
++ printk("console shuts up ...\n");
++ oops_end(flags);
++ nmi_exit();
++ local_irq_enable();
++ do_exit(SIGSEGV);
++}
++#endif
++
++static void __kprobes do_trap(int trapnr, int signr, char *str,
++ struct pt_regs * regs, long error_code,
++ siginfo_t *info)
++{
++ struct task_struct *tsk = current;
++
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = trapnr;
++
++ if (user_mode(regs)) {
++ if (exception_trace && unhandled_signal(tsk, signr))
++ printk(KERN_INFO
++ "%s[%d] trap %s rip:%lx rsp:%lx error:%lx\n",
++ tsk->comm, tsk->pid, str,
++ regs->rip, regs->rsp, error_code);
++
++ if (info)
++ force_sig_info(signr, info, tsk);
++ else
++ force_sig(signr, tsk);
++ return;
++ }
++
++
++ /* kernel trap */
++ {
++ const struct exception_table_entry *fixup;
++ fixup = search_exception_tables(regs->rip);
++ if (fixup)
++ regs->rip = fixup->fixup;
++ else
++ die(str, regs, error_code);
++ return;
++ }
++}
++
++#define DO_ERROR(trapnr, signr, str, name) \
++asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ conditional_sti(regs); \
++ do_trap(trapnr, signr, str, regs, error_code, NULL); \
++}
++
++#define DO_ERROR_INFO(trapnr, signr, str, name, sicode, siaddr) \
++asmlinkage void do_##name(struct pt_regs * regs, long error_code) \
++{ \
++ siginfo_t info; \
++ info.si_signo = signr; \
++ info.si_errno = 0; \
++ info.si_code = sicode; \
++ info.si_addr = (void __user *)siaddr; \
++ if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) \
++ == NOTIFY_STOP) \
++ return; \
++ conditional_sti(regs); \
++ do_trap(trapnr, signr, str, regs, error_code, &info); \
++}
++
++DO_ERROR_INFO( 0, SIGFPE, "divide error", divide_error, FPE_INTDIV, regs->rip)
++DO_ERROR( 4, SIGSEGV, "overflow", overflow)
++DO_ERROR( 5, SIGSEGV, "bounds", bounds)
++DO_ERROR_INFO( 6, SIGILL, "invalid opcode", invalid_op, ILL_ILLOPN, regs->rip)
++DO_ERROR( 7, SIGSEGV, "device not available", device_not_available)
++DO_ERROR( 9, SIGFPE, "coprocessor segment overrun", coprocessor_segment_overrun)
++DO_ERROR(10, SIGSEGV, "invalid TSS", invalid_TSS)
++DO_ERROR(11, SIGBUS, "segment not present", segment_not_present)
++DO_ERROR_INFO(17, SIGBUS, "alignment check", alignment_check, BUS_ADRALN, 0)
++DO_ERROR(18, SIGSEGV, "reserved", reserved)
++
++/* Runs on IST stack */
++asmlinkage void do_stack_segment(struct pt_regs *regs, long error_code)
++{
++ if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
++ 12, SIGBUS) == NOTIFY_STOP)
++ return;
++ preempt_conditional_sti(regs);
++ do_trap(12, SIGBUS, "stack segment", regs, error_code, NULL);
++ preempt_conditional_cli(regs);
++}
++
++asmlinkage void do_double_fault(struct pt_regs * regs, long error_code)
++{
++ static const char str[] = "double fault";
++ struct task_struct *tsk = current;
++
++ /* Return not checked because double check cannot be ignored */
++ notify_die(DIE_TRAP, str, regs, error_code, 8, SIGSEGV);
++
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 8;
++
++ /* This is always a kernel trap and never fixable (and thus must
++ never return). */
++ for (;;)
++ die(str, regs, error_code);
++}
++
++asmlinkage void __kprobes do_general_protection(struct pt_regs * regs,
++ long error_code)
++{
++ struct task_struct *tsk = current;
++
++ conditional_sti(regs);
++
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 13;
++
++ if (user_mode(regs)) {
++ if (exception_trace && unhandled_signal(tsk, SIGSEGV))
++ printk(KERN_INFO
++ "%s[%d] general protection rip:%lx rsp:%lx error:%lx\n",
++ tsk->comm, tsk->pid,
++ regs->rip, regs->rsp, error_code);
++
++ force_sig(SIGSEGV, tsk);
++ return;
++ }
++
++ /* kernel gp */
++ {
++ const struct exception_table_entry *fixup;
++ fixup = search_exception_tables(regs->rip);
++ if (fixup) {
++ regs->rip = fixup->fixup;
++ return;
++ }
++ if (notify_die(DIE_GPF, "general protection fault", regs,
++ error_code, 13, SIGSEGV) == NOTIFY_STOP)
++ return;
++ die("general protection fault", regs, error_code);
++ }
++}
++
++static __kprobes void
++mem_parity_error(unsigned char reason, struct pt_regs * regs)
++{
++ printk("Uhhuh. NMI received. Dazed and confused, but trying to continue\n");
++ printk("You probably have a hardware problem with your RAM chips\n");
++
++#if 0 /* XEN */
++ /* Clear and disable the memory parity error line. */
++ reason = (reason & 0xf) | 4;
++ outb(reason, 0x61);
++#endif /* XEN */
++}
++
++static __kprobes void
++io_check_error(unsigned char reason, struct pt_regs * regs)
++{
++ printk("NMI: IOCK error (debug interrupt?)\n");
++ show_registers(regs);
++
++#if 0 /* XEN */
++ /* Re-enable the IOCK line, wait for a few seconds */
++ reason = (reason & 0xf) | 8;
++ outb(reason, 0x61);
++ mdelay(2000);
++ reason &= ~8;
++ outb(reason, 0x61);
++#endif /* XEN */
++}
++
++static __kprobes void
++unknown_nmi_error(unsigned char reason, struct pt_regs * regs)
++{ printk("Uhhuh. NMI received for unknown reason %02x.\n", reason);
++ printk("Dazed and confused, but trying to continue\n");
++ printk("Do you have a strange power saving mode enabled?\n");
++}
++
++/* Runs on IST stack. This code must keep interrupts off all the time.
++ Nested NMIs are prevented by the CPU. */
++asmlinkage __kprobes void default_do_nmi(struct pt_regs *regs)
++{
++ unsigned char reason = 0;
++ int cpu;
++
++ cpu = smp_processor_id();
++
++ /* Only the BSP gets external NMIs from the system. */
++ if (!cpu)
++ reason = get_nmi_reason();
++
++ if (!(reason & 0xc0)) {
++ if (notify_die(DIE_NMI_IPI, "nmi_ipi", regs, reason, 2, SIGINT)
++ == NOTIFY_STOP)
++ return;
++#ifdef CONFIG_X86_LOCAL_APIC
++ /*
++ * Ok, so this is none of the documented NMI sources,
++ * so it must be the NMI watchdog.
++ */
++ if (nmi_watchdog > 0) {
++ nmi_watchdog_tick(regs,reason);
++ return;
++ }
++#endif
++ unknown_nmi_error(reason, regs);
++ return;
++ }
++ if (notify_die(DIE_NMI, "nmi", regs, reason, 2, SIGINT) == NOTIFY_STOP)
++ return;
++
++ /* AK: following checks seem to be broken on modern chipsets. FIXME */
++
++ if (reason & 0x80)
++ mem_parity_error(reason, regs);
++ if (reason & 0x40)
++ io_check_error(reason, regs);
++}
++
++/* runs on IST stack. */
++asmlinkage void __kprobes do_int3(struct pt_regs * regs, long error_code)
++{
++ if (notify_die(DIE_INT3, "int3", regs, error_code, 3, SIGTRAP) == NOTIFY_STOP) {
++ return;
++ }
++ preempt_conditional_sti(regs);
++ do_trap(3, SIGTRAP, "int3", regs, error_code, NULL);
++ preempt_conditional_cli(regs);
++}
++
++/* Help handler running on IST stack to switch back to user stack
++ for scheduling or signal handling. The actual stack switch is done in
++ entry.S */
++asmlinkage __kprobes struct pt_regs *sync_regs(struct pt_regs *eregs)
++{
++ struct pt_regs *regs = eregs;
++ /* Did already sync */
++ if (eregs == (struct pt_regs *)eregs->rsp)
++ ;
++ /* Exception from user space */
++ else if (user_mode(eregs))
++ regs = task_pt_regs(current);
++ /* Exception from kernel and interrupts are enabled. Move to
++ kernel process stack. */
++ else if (eregs->eflags & X86_EFLAGS_IF)
++ regs = (struct pt_regs *)(eregs->rsp -= sizeof(struct pt_regs));
++ if (eregs != regs)
++ *regs = *eregs;
++ return regs;
++}
++
++/* runs on IST stack. */
++asmlinkage void __kprobes do_debug(struct pt_regs * regs,
++ unsigned long error_code)
++{
++ unsigned long condition;
++ struct task_struct *tsk = current;
++ siginfo_t info;
++
++ get_debugreg(condition, 6);
++
++ if (notify_die(DIE_DEBUG, "debug", regs, condition, error_code,
++ SIGTRAP) == NOTIFY_STOP)
++ return;
++
++ preempt_conditional_sti(regs);
++
++ /* Mask out spurious debug traps due to lazy DR7 setting */
++ if (condition & (DR_TRAP0|DR_TRAP1|DR_TRAP2|DR_TRAP3)) {
++ if (!tsk->thread.debugreg7) {
++ goto clear_dr7;
++ }
++ }
++
++ tsk->thread.debugreg6 = condition;
++
++ /* Mask out spurious TF errors due to lazy TF clearing */
++ if (condition & DR_STEP) {
++ /*
++ * The TF error should be masked out only if the current
++ * process is not traced and if the TRAP flag has been set
++ * previously by a tracing process (condition detected by
++ * the PT_DTRACE flag); remember that the i386 TRAP flag
++ * can be modified by the process itself in user mode,
++ * allowing programs to debug themselves without the ptrace()
++ * interface.
++ */
++ if (!user_mode(regs))
++ goto clear_TF_reenable;
++ /*
++ * Was the TF flag set by a debugger? If so, clear it now,
++ * so that register information is correct.
++ */
++ if (tsk->ptrace & PT_DTRACE) {
++ regs->eflags &= ~TF_MASK;
++ tsk->ptrace &= ~PT_DTRACE;
++ }
++ }
++
++ /* Ok, finally something we can handle */
++ tsk->thread.trap_no = 1;
++ tsk->thread.error_code = error_code;
++ info.si_signo = SIGTRAP;
++ info.si_errno = 0;
++ info.si_code = TRAP_BRKPT;
++ info.si_addr = user_mode(regs) ? (void __user *)regs->rip : NULL;
++ force_sig_info(SIGTRAP, &info, tsk);
++
++clear_dr7:
++ set_debugreg(0UL, 7);
++ preempt_conditional_cli(regs);
++ return;
++
++clear_TF_reenable:
++ set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
++ regs->eflags &= ~TF_MASK;
++ preempt_conditional_cli(regs);
++}
++
++static int kernel_math_error(struct pt_regs *regs, const char *str, int trapnr)
++{
++ const struct exception_table_entry *fixup;
++ fixup = search_exception_tables(regs->rip);
++ if (fixup) {
++ regs->rip = fixup->fixup;
++ return 1;
++ }
++ notify_die(DIE_GPF, str, regs, 0, trapnr, SIGFPE);
++ /* Illegal floating point operation in the kernel */
++ current->thread.trap_no = trapnr;
++ die(str, regs, 0);
++ return 0;
++}
++
++/*
++ * Note that we play around with the 'TS' bit in an attempt to get
++ * the correct behaviour even in the presence of the asynchronous
++ * IRQ13 behaviour
++ */
++asmlinkage void do_coprocessor_error(struct pt_regs *regs)
++{
++ void __user *rip = (void __user *)(regs->rip);
++ struct task_struct * task;
++ siginfo_t info;
++ unsigned short cwd, swd;
++
++ conditional_sti(regs);
++ if (!user_mode(regs) &&
++ kernel_math_error(regs, "kernel x87 math error", 16))
++ return;
++
++ /*
++ * Save the info for the exception handler and clear the error.
++ */
++ task = current;
++ save_init_fpu(task);
++ task->thread.trap_no = 16;
++ task->thread.error_code = 0;
++ info.si_signo = SIGFPE;
++ info.si_errno = 0;
++ info.si_code = __SI_FAULT;
++ info.si_addr = rip;
++ /*
++ * (~cwd & swd) will mask out exceptions that are not set to unmasked
++ * status. 0x3f is the exception bits in these regs, 0x200 is the
++ * C1 reg you need in case of a stack fault, 0x040 is the stack
++ * fault bit. We should only be taking one exception at a time,
++ * so if this combination doesn't produce any single exception,
++ * then we have a bad program that isn't synchronizing its FPU usage
++ * and it will suffer the consequences since we won't be able to
++ * fully reproduce the context of the exception
++ */
++ cwd = get_fpu_cwd(task);
++ swd = get_fpu_swd(task);
++ switch (swd & ~cwd & 0x3f) {
++ case 0x000:
++ default:
++ break;
++ case 0x001: /* Invalid Op */
++ /*
++ * swd & 0x240 == 0x040: Stack Underflow
++ * swd & 0x240 == 0x240: Stack Overflow
++ * User must clear the SF bit (0x40) if set
++ */
++ info.si_code = FPE_FLTINV;
++ break;
++ case 0x002: /* Denormalize */
++ case 0x010: /* Underflow */
++ info.si_code = FPE_FLTUND;
++ break;
++ case 0x004: /* Zero Divide */
++ info.si_code = FPE_FLTDIV;
++ break;
++ case 0x008: /* Overflow */
++ info.si_code = FPE_FLTOVF;
++ break;
++ case 0x020: /* Precision */
++ info.si_code = FPE_FLTRES;
++ break;
++ }
++ force_sig_info(SIGFPE, &info, task);
++}
++
++asmlinkage void bad_intr(void)
++{
++ printk("bad interrupt");
++}
++
++asmlinkage void do_simd_coprocessor_error(struct pt_regs *regs)
++{
++ void __user *rip = (void __user *)(regs->rip);
++ struct task_struct * task;
++ siginfo_t info;
++ unsigned short mxcsr;
++
++ conditional_sti(regs);
++ if (!user_mode(regs) &&
++ kernel_math_error(regs, "kernel simd math error", 19))
++ return;
++
++ /*
++ * Save the info for the exception handler and clear the error.
++ */
++ task = current;
++ save_init_fpu(task);
++ task->thread.trap_no = 19;
++ task->thread.error_code = 0;
++ info.si_signo = SIGFPE;
++ info.si_errno = 0;
++ info.si_code = __SI_FAULT;
++ info.si_addr = rip;
++ /*
++ * The SIMD FPU exceptions are handled a little differently, as there
++ * is only a single status/control register. Thus, to determine which
++ * unmasked exception was caught we must mask the exception mask bits
++ * at 0x1f80, and then use these to mask the exception bits at 0x3f.
++ */
++ mxcsr = get_fpu_mxcsr(task);
++ switch (~((mxcsr & 0x1f80) >> 7) & (mxcsr & 0x3f)) {
++ case 0x000:
++ default:
++ break;
++ case 0x001: /* Invalid Op */
++ info.si_code = FPE_FLTINV;
++ break;
++ case 0x002: /* Denormalize */
++ case 0x010: /* Underflow */
++ info.si_code = FPE_FLTUND;
++ break;
++ case 0x004: /* Zero Divide */
++ info.si_code = FPE_FLTDIV;
++ break;
++ case 0x008: /* Overflow */
++ info.si_code = FPE_FLTOVF;
++ break;
++ case 0x020: /* Precision */
++ info.si_code = FPE_FLTRES;
++ break;
++ }
++ force_sig_info(SIGFPE, &info, task);
++}
++
++asmlinkage void do_spurious_interrupt_bug(struct pt_regs * regs)
++{
++}
++
++#if 0
++asmlinkage void __attribute__((weak)) smp_thermal_interrupt(void)
++{
++}
++#endif
++
++asmlinkage void __attribute__((weak)) mce_threshold_interrupt(void)
++{
++}
++
++/*
++ * 'math_state_restore()' saves the current math information in the
++ * old math state array, and gets the new ones from the current task
++ *
++ * Careful.. There are problems with IBM-designed IRQ13 behaviour.
++ * Don't touch unless you *really* know how it works.
++ */
++asmlinkage void math_state_restore(void)
++{
++ struct task_struct *me = current;
++ /* clts(); */ /* 'clts' is done for us by Xen during virtual trap. */
++
++ if (!used_math())
++ init_fpu(me);
++ restore_fpu_checking(&me->thread.i387.fxsave);
++ task_thread_info(me)->status |= TS_USEDFPU;
++}
++
++
++/*
++ * NB. All these are "interrupt gates" (i.e. events_mask is set) because we
++ * specify <dpl>|4 in the second field.
++ */
++static trap_info_t __cpuinitdata trap_table[] = {
++ { 0, 0|4, __KERNEL_CS, (unsigned long)divide_error },
++ { 1, 0|4, __KERNEL_CS, (unsigned long)debug },
++ { 3, 3|4, __KERNEL_CS, (unsigned long)int3 },
++ { 4, 3|4, __KERNEL_CS, (unsigned long)overflow },
++ { 5, 0|4, __KERNEL_CS, (unsigned long)bounds },
++ { 6, 0|4, __KERNEL_CS, (unsigned long)invalid_op },
++ { 7, 0|4, __KERNEL_CS, (unsigned long)device_not_available },
++ { 9, 0|4, __KERNEL_CS, (unsigned long)coprocessor_segment_overrun},
++ { 10, 0|4, __KERNEL_CS, (unsigned long)invalid_TSS },
++ { 11, 0|4, __KERNEL_CS, (unsigned long)segment_not_present },
++ { 12, 0|4, __KERNEL_CS, (unsigned long)stack_segment },
++ { 13, 0|4, __KERNEL_CS, (unsigned long)general_protection },
++ { 14, 0|4, __KERNEL_CS, (unsigned long)page_fault },
++ { 15, 0|4, __KERNEL_CS, (unsigned long)spurious_interrupt_bug },
++ { 16, 0|4, __KERNEL_CS, (unsigned long)coprocessor_error },
++ { 17, 0|4, __KERNEL_CS, (unsigned long)alignment_check },
++#ifdef CONFIG_X86_MCE
++ { 18, 0|4, __KERNEL_CS, (unsigned long)machine_check },
++#endif
++ { 19, 0|4, __KERNEL_CS, (unsigned long)simd_coprocessor_error },
++#ifdef CONFIG_IA32_EMULATION
++ { IA32_SYSCALL_VECTOR, 3, __KERNEL_CS, (unsigned long)ia32_syscall},
++#endif
++ { 0, 0, 0, 0 }
++};
++
++void __init trap_init(void)
++{
++ int ret;
++
++ ret = HYPERVISOR_set_trap_table(trap_table);
++ if (ret)
++ printk("HYPERVISOR_set_trap_table failed: error %d\n", ret);
++
++ /*
++ * Should be a barrier for any external CPU state.
++ */
++ cpu_init();
++}
++
++void __cpuinit smp_trap_init(trap_info_t *trap_ctxt)
++{
++ const trap_info_t *t = trap_table;
++
++ for (t = trap_table; t->address; t++) {
++ trap_ctxt[t->vector].flags = t->flags;
++ trap_ctxt[t->vector].cs = t->cs;
++ trap_ctxt[t->vector].address = t->address;
++ }
++}
++
++
++/* Actual parsing is done early in setup.c. */
++static int __init oops_dummy(char *s)
++{
++ panic_on_oops = 1;
++ return 1;
++}
++__setup("oops=", oops_dummy);
++
++static int __init kstack_setup(char *s)
++{
++ kstack_depth_to_print = simple_strtoul(s,NULL,0);
++ return 1;
++}
++__setup("kstack=", kstack_setup);
++
++#ifdef CONFIG_STACK_UNWIND
++static int __init call_trace_setup(char *s)
++{
++ if (strcmp(s, "old") == 0)
++ call_trace = -1;
++ else if (strcmp(s, "both") == 0)
++ call_trace = 0;
++ else if (strcmp(s, "newfallback") == 0)
++ call_trace = 1;
++ else if (strcmp(s, "new") == 0)
++ call_trace = 2;
++ return 1;
++}
++__setup("call_trace=", call_trace_setup);
++#endif
+Index: head-2008-11-25/arch/x86/kernel/vsyscall_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/vsyscall_64-xen.c 2007-06-18 08:38:13.000000000 +0200
+@@ -0,0 +1,227 @@
++/*
++ * linux/arch/x86_64/kernel/vsyscall.c
++ *
++ * Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
++ * Copyright 2003 Andi Kleen, SuSE Labs.
++ *
++ * Thanks to hpa@transmeta.com for some useful hint.
++ * Special thanks to Ingo Molnar for his early experience with
++ * a different vsyscall implementation for Linux/IA32 and for the name.
++ *
++ * vsyscall 1 is located at -10Mbyte, vsyscall 2 is located
++ * at virtual address -10Mbyte+1024bytes etc... There are at max 4
++ * vsyscalls. One vsyscall can reserve more than 1 slot to avoid
++ * jumping out of line if necessary. We cannot add more with this
++ * mechanism because older kernels won't return -ENOSYS.
++ * If we want more than four we need a vDSO.
++ *
++ * Note: the concept clashes with user mode linux. If you use UML and
++ * want per guest time just set the kernel.vsyscall64 sysctl to 0.
++ */
++
++#include <linux/time.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/timer.h>
++#include <linux/seqlock.h>
++#include <linux/jiffies.h>
++#include <linux/sysctl.h>
++
++#include <asm/vsyscall.h>
++#include <asm/pgtable.h>
++#include <asm/page.h>
++#include <asm/fixmap.h>
++#include <asm/errno.h>
++#include <asm/io.h>
++
++#define __vsyscall(nr) __attribute__ ((unused,__section__(".vsyscall_" #nr)))
++
++int __sysctl_vsyscall __section_sysctl_vsyscall = 1;
++seqlock_t __xtime_lock __section_xtime_lock = SEQLOCK_UNLOCKED;
++
++#include <asm/unistd.h>
++
++static __always_inline void timeval_normalize(struct timeval * tv)
++{
++ time_t __sec;
++
++ __sec = tv->tv_usec / 1000000;
++ if (__sec) {
++ tv->tv_usec %= 1000000;
++ tv->tv_sec += __sec;
++ }
++}
++
++static __always_inline void do_vgettimeofday(struct timeval * tv)
++{
++ long sequence, t;
++ unsigned long sec, usec;
++
++ do {
++ sequence = read_seqbegin(&__xtime_lock);
++
++ sec = __xtime.tv_sec;
++ usec = (__xtime.tv_nsec / 1000) +
++ (__jiffies - __wall_jiffies) * (1000000 / HZ);
++
++ if (__vxtime.mode != VXTIME_HPET) {
++ t = get_cycles_sync();
++ if (t < __vxtime.last_tsc)
++ t = __vxtime.last_tsc;
++ usec += ((t - __vxtime.last_tsc) *
++ __vxtime.tsc_quot) >> 32;
++ /* See comment in x86_64 do_gettimeofday. */
++ } else {
++ usec += ((readl((void *)fix_to_virt(VSYSCALL_HPET) + 0xf0) -
++ __vxtime.last) * __vxtime.quot) >> 32;
++ }
++ } while (read_seqretry(&__xtime_lock, sequence));
++
++ tv->tv_sec = sec + usec / 1000000;
++ tv->tv_usec = usec % 1000000;
++}
++
++/* RED-PEN may want to readd seq locking, but then the variable should be write-once. */
++static __always_inline void do_get_tz(struct timezone * tz)
++{
++ *tz = __sys_tz;
++}
++
++static __always_inline int gettimeofday(struct timeval *tv, struct timezone *tz)
++{
++ int ret;
++ asm volatile("vsysc2: syscall"
++ : "=a" (ret)
++ : "0" (__NR_gettimeofday),"D" (tv),"S" (tz) : __syscall_clobber );
++ return ret;
++}
++
++static __always_inline long time_syscall(long *t)
++{
++ long secs;
++ asm volatile("vsysc1: syscall"
++ : "=a" (secs)
++ : "0" (__NR_time),"D" (t) : __syscall_clobber);
++ return secs;
++}
++
++int __vsyscall(0) vgettimeofday(struct timeval * tv, struct timezone * tz)
++{
++ if (!__sysctl_vsyscall)
++ return gettimeofday(tv,tz);
++ if (tv)
++ do_vgettimeofday(tv);
++ if (tz)
++ do_get_tz(tz);
++ return 0;
++}
++
++/* This will break when the xtime seconds get inaccurate, but that is
++ * unlikely */
++time_t __vsyscall(1) vtime(time_t *t)
++{
++ if (!__sysctl_vsyscall)
++ return time_syscall(t);
++ else if (t)
++ *t = __xtime.tv_sec;
++ return __xtime.tv_sec;
++}
++
++long __vsyscall(2) venosys_0(void)
++{
++ return -ENOSYS;
++}
++
++long __vsyscall(3) venosys_1(void)
++{
++ return -ENOSYS;
++}
++
++#ifdef CONFIG_SYSCTL
++
++#define SYSCALL 0x050f
++#define NOP2 0x9090
++
++/*
++ * NOP out syscall in vsyscall page when not needed.
++ */
++static int vsyscall_sysctl_change(ctl_table *ctl, int write, struct file * filp,
++ void __user *buffer, size_t *lenp, loff_t *ppos)
++{
++ extern u16 vsysc1, vsysc2;
++ u16 *map1, *map2;
++ int ret = proc_dointvec(ctl, write, filp, buffer, lenp, ppos);
++ if (!write)
++ return ret;
++ /* gcc has some trouble with __va(__pa()), so just do it this
++ way. */
++ map1 = ioremap(__pa_symbol(&vsysc1), 2);
++ if (!map1)
++ return -ENOMEM;
++ map2 = ioremap(__pa_symbol(&vsysc2), 2);
++ if (!map2) {
++ ret = -ENOMEM;
++ goto out;
++ }
++ if (!sysctl_vsyscall) {
++ *map1 = SYSCALL;
++ *map2 = SYSCALL;
++ } else {
++ *map1 = NOP2;
++ *map2 = NOP2;
++ }
++ iounmap(map2);
++out:
++ iounmap(map1);
++ return ret;
++}
++
++static int vsyscall_sysctl_nostrat(ctl_table *t, int __user *name, int nlen,
++ void __user *oldval, size_t __user *oldlenp,
++ void __user *newval, size_t newlen,
++ void **context)
++{
++ return -ENOSYS;
++}
++
++static ctl_table kernel_table2[] = {
++ { .ctl_name = 99, .procname = "vsyscall64",
++ .data = &sysctl_vsyscall, .maxlen = sizeof(int), .mode = 0644,
++ .strategy = vsyscall_sysctl_nostrat,
++ .proc_handler = vsyscall_sysctl_change },
++ { 0, }
++};
++
++static ctl_table kernel_root_table2[] = {
++ { .ctl_name = CTL_KERN, .procname = "kernel", .mode = 0555,
++ .child = kernel_table2 },
++ { 0 },
++};
++
++#endif
++
++static void __init map_vsyscall(void)
++{
++ extern char __vsyscall_0;
++ unsigned long physaddr_page0 = __pa_symbol(&__vsyscall_0);
++
++ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_page0, PAGE_KERNEL_VSYSCALL);
++}
++
++static int __init vsyscall_init(void)
++{
++ BUG_ON(((unsigned long) &vgettimeofday !=
++ VSYSCALL_ADDR(__NR_vgettimeofday)));
++ BUG_ON((unsigned long) &vtime != VSYSCALL_ADDR(__NR_vtime));
++ BUG_ON((VSYSCALL_ADDR(0) != __fix_to_virt(VSYSCALL_FIRST_PAGE)));
++ map_vsyscall();
++#ifdef CONFIG_XEN
++ sysctl_vsyscall = 0; /* disable vgettimeofay() */
++#endif
++#ifdef CONFIG_SYSCTL
++ register_sysctl_table(kernel_root_table2, 0);
++#endif
++ return 0;
++}
++
++__initcall(vsyscall_init);
+Index: head-2008-11-25/arch/x86/kernel/xen_entry_64.S
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/kernel/xen_entry_64.S 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,36 @@
++/*
++ * Copied from arch/xen/i386/kernel/entry.S
++ */
++/* Offsets into shared_info_t. */
++#define evtchn_upcall_pending /* 0 */
++#define evtchn_upcall_mask 1
++
++#define sizeof_vcpu_shift 6
++
++#ifdef CONFIG_SMP
++//#define preempt_disable(reg) incl threadinfo_preempt_count(reg)
++//#define preempt_enable(reg) decl threadinfo_preempt_count(reg)
++#define preempt_disable(reg)
++#define preempt_enable(reg)
++#define XEN_GET_VCPU_INFO(reg) preempt_disable(%rbp) ; \
++ movq %gs:pda_cpunumber,reg ; \
++ shl $32, reg ; \
++ shr $32-sizeof_vcpu_shift,reg ; \
++ addq HYPERVISOR_shared_info,reg
++#define XEN_PUT_VCPU_INFO(reg) preempt_enable(%rbp) ; \
++#define XEN_PUT_VCPU_INFO_fixup .byte 0xff,0xff,0xff
++#else
++#define XEN_GET_VCPU_INFO(reg) movq HYPERVISOR_shared_info,reg
++#define XEN_PUT_VCPU_INFO(reg)
++#define XEN_PUT_VCPU_INFO_fixup
++#endif
++
++#define XEN_LOCKED_BLOCK_EVENTS(reg) movb $1,evtchn_upcall_mask(reg)
++#define XEN_LOCKED_UNBLOCK_EVENTS(reg) movb $0,evtchn_upcall_mask(reg)
++#define XEN_BLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
++ XEN_LOCKED_BLOCK_EVENTS(reg) ; \
++ XEN_PUT_VCPU_INFO(reg)
++#define XEN_UNBLOCK_EVENTS(reg) XEN_GET_VCPU_INFO(reg) ; \
++ XEN_LOCKED_UNBLOCK_EVENTS(reg) ; \
++ XEN_PUT_VCPU_INFO(reg)
++#define XEN_TEST_PENDING(reg) testb $0xFF,evtchn_upcall_pending(reg)
+Index: head-2008-11-25/arch/x86/mm/fault_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/mm/fault_64-xen.c 2007-11-02 17:34:23.000000000 +0100
+@@ -0,0 +1,724 @@
++/*
++ * linux/arch/x86-64/mm/fault.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ * Copyright (C) 2001,2002 Andi Kleen, SuSE Labs.
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/smp.h>
++#include <linux/smp_lock.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/tty.h>
++#include <linux/vt_kern.h> /* For unblank_screen() */
++#include <linux/compiler.h>
++#include <linux/module.h>
++#include <linux/kprobes.h>
++
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgalloc.h>
++#include <asm/smp.h>
++#include <asm/tlbflush.h>
++#include <asm/proto.h>
++#include <asm/kdebug.h>
++#include <asm-generic/sections.h>
++
++/* Page fault error code bits */
++#define PF_PROT (1<<0) /* or no page found */
++#define PF_WRITE (1<<1)
++#define PF_USER (1<<2)
++#define PF_RSVD (1<<3)
++#define PF_INSTR (1<<4)
++
++#ifdef CONFIG_KPROBES
++ATOMIC_NOTIFIER_HEAD(notify_page_fault_chain);
++
++/* Hook to register for page fault notifications */
++int register_page_fault_notifier(struct notifier_block *nb)
++{
++ vmalloc_sync_all();
++ return atomic_notifier_chain_register(¬ify_page_fault_chain, nb);
++}
++
++int unregister_page_fault_notifier(struct notifier_block *nb)
++{
++ return atomic_notifier_chain_unregister(¬ify_page_fault_chain, nb);
++}
++
++static inline int notify_page_fault(enum die_val val, const char *str,
++ struct pt_regs *regs, long err, int trap, int sig)
++{
++ struct die_args args = {
++ .regs = regs,
++ .str = str,
++ .err = err,
++ .trapnr = trap,
++ .signr = sig
++ };
++ return atomic_notifier_call_chain(¬ify_page_fault_chain, val, &args);
++}
++#else
++static inline int notify_page_fault(enum die_val val, const char *str,
++ struct pt_regs *regs, long err, int trap, int sig)
++{
++ return NOTIFY_DONE;
++}
++#endif
++
++void bust_spinlocks(int yes)
++{
++ int loglevel_save = console_loglevel;
++ if (yes) {
++ oops_in_progress = 1;
++ } else {
++#ifdef CONFIG_VT
++ unblank_screen();
++#endif
++ oops_in_progress = 0;
++ /*
++ * OK, the message is on the console. Now we call printk()
++ * without oops_in_progress set so that printk will give klogd
++ * a poke. Hold onto your hats...
++ */
++ console_loglevel = 15; /* NMI oopser may have shut the console up */
++ printk(" ");
++ console_loglevel = loglevel_save;
++ }
++}
++
++/* Sometimes the CPU reports invalid exceptions on prefetch.
++ Check that here and ignore.
++ Opcode checker based on code by Richard Brunner */
++static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
++ unsigned long error_code)
++{
++ unsigned char *instr;
++ int scan_more = 1;
++ int prefetch = 0;
++ unsigned char *max_instr;
++
++ /* If it was a exec fault ignore */
++ if (error_code & PF_INSTR)
++ return 0;
++
++ instr = (unsigned char *)convert_rip_to_linear(current, regs);
++ max_instr = instr + 15;
++
++ if (user_mode(regs) && instr >= (unsigned char *)TASK_SIZE)
++ return 0;
++
++ while (scan_more && instr < max_instr) {
++ unsigned char opcode;
++ unsigned char instr_hi;
++ unsigned char instr_lo;
++
++ if (__get_user(opcode, instr))
++ break;
++
++ instr_hi = opcode & 0xf0;
++ instr_lo = opcode & 0x0f;
++ instr++;
++
++ switch (instr_hi) {
++ case 0x20:
++ case 0x30:
++ /* Values 0x26,0x2E,0x36,0x3E are valid x86
++ prefixes. In long mode, the CPU will signal
++ invalid opcode if some of these prefixes are
++ present so we will never get here anyway */
++ scan_more = ((instr_lo & 7) == 0x6);
++ break;
++
++ case 0x40:
++ /* In AMD64 long mode, 0x40 to 0x4F are valid REX prefixes
++ Need to figure out under what instruction mode the
++ instruction was issued ... */
++ /* Could check the LDT for lm, but for now it's good
++ enough to assume that long mode only uses well known
++ segments or kernel. */
++ scan_more = (!user_mode(regs)) || (regs->cs == __USER_CS);
++ break;
++
++ case 0x60:
++ /* 0x64 thru 0x67 are valid prefixes in all modes. */
++ scan_more = (instr_lo & 0xC) == 0x4;
++ break;
++ case 0xF0:
++ /* 0xF0, 0xF2, and 0xF3 are valid prefixes in all modes. */
++ scan_more = !instr_lo || (instr_lo>>1) == 1;
++ break;
++ case 0x00:
++ /* Prefetch instruction is 0x0F0D or 0x0F18 */
++ scan_more = 0;
++ if (__get_user(opcode, instr))
++ break;
++ prefetch = (instr_lo == 0xF) &&
++ (opcode == 0x0D || opcode == 0x18);
++ break;
++ default:
++ scan_more = 0;
++ break;
++ }
++ }
++ return prefetch;
++}
++
++static int bad_address(void *p)
++{
++ unsigned long dummy;
++ return __get_user(dummy, (unsigned long *)p);
++}
++
++void dump_pagetable(unsigned long address)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
++ pgd += pgd_index(address);
++ if (bad_address(pgd)) goto bad;
++ printk("PGD %lx ", pgd_val(*pgd));
++ if (!pgd_present(*pgd)) goto ret;
++
++ pud = pud_offset(pgd, address);
++ if (bad_address(pud)) goto bad;
++ printk("PUD %lx ", pud_val(*pud));
++ if (!pud_present(*pud)) goto ret;
++
++ pmd = pmd_offset(pud, address);
++ if (bad_address(pmd)) goto bad;
++ printk("PMD %lx ", pmd_val(*pmd));
++ if (!pmd_present(*pmd)) goto ret;
++
++ pte = pte_offset_kernel(pmd, address);
++ if (bad_address(pte)) goto bad;
++ printk("PTE %lx", pte_val(*pte));
++ret:
++ printk("\n");
++ return;
++bad:
++ printk("BAD\n");
++}
++
++static const char errata93_warning[] =
++KERN_ERR "******* Your BIOS seems to not contain a fix for K8 errata #93\n"
++KERN_ERR "******* Working around it, but it may cause SEGVs or burn power.\n"
++KERN_ERR "******* Please consider a BIOS update.\n"
++KERN_ERR "******* Disabling USB legacy in the BIOS may also help.\n";
++
++/* Workaround for K8 erratum #93 & buggy BIOS.
++ BIOS SMM functions are required to use a specific workaround
++ to avoid corruption of the 64bit RIP register on C stepping K8.
++ A lot of BIOS that didn't get tested properly miss this.
++ The OS sees this as a page fault with the upper 32bits of RIP cleared.
++ Try to work around it here.
++ Note we only handle faults in kernel here. */
++
++static int is_errata93(struct pt_regs *regs, unsigned long address)
++{
++ static int warned;
++ if (address != regs->rip)
++ return 0;
++ if ((address >> 32) != 0)
++ return 0;
++ address |= 0xffffffffUL << 32;
++ if ((address >= (u64)_stext && address <= (u64)_etext) ||
++ (address >= MODULES_VADDR && address <= MODULES_END)) {
++ if (!warned) {
++ printk(errata93_warning);
++ warned = 1;
++ }
++ regs->rip = address;
++ return 1;
++ }
++ return 0;
++}
++
++int unhandled_signal(struct task_struct *tsk, int sig)
++{
++ if (tsk->pid == 1)
++ return 1;
++ if (tsk->ptrace & PT_PTRACED)
++ return 0;
++ return (tsk->sighand->action[sig-1].sa.sa_handler == SIG_IGN) ||
++ (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
++}
++
++static noinline void pgtable_bad(unsigned long address, struct pt_regs *regs,
++ unsigned long error_code)
++{
++ unsigned long flags = oops_begin();
++ struct task_struct *tsk;
++
++ printk(KERN_ALERT "%s: Corrupted page table at address %lx\n",
++ current->comm, address);
++ dump_pagetable(address);
++ tsk = current;
++ tsk->thread.cr2 = address;
++ tsk->thread.trap_no = 14;
++ tsk->thread.error_code = error_code;
++ __die("Bad pagetable", regs, error_code);
++ oops_end(flags);
++ do_exit(SIGKILL);
++}
++
++/*
++ * Handle a fault on the vmalloc area
++ *
++ * This assumes no large pages in there.
++ */
++static int vmalloc_fault(unsigned long address)
++{
++ pgd_t *pgd, *pgd_ref;
++ pud_t *pud, *pud_ref;
++ pmd_t *pmd, *pmd_ref;
++ pte_t *pte, *pte_ref;
++
++ /* Copy kernel mappings over when needed. This can also
++ happen within a race in page table update. In the later
++ case just flush. */
++
++ /* On Xen the line below does not always work. Needs investigating! */
++ /*pgd = pgd_offset(current->mm ?: &init_mm, address);*/
++ pgd = __va(read_cr3() & PHYSICAL_PAGE_MASK);
++ pgd += pgd_index(address);
++ pgd_ref = pgd_offset_k(address);
++ if (pgd_none(*pgd_ref))
++ return -1;
++ if (pgd_none(*pgd))
++ set_pgd(pgd, *pgd_ref);
++ else
++ BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
++
++ /* Below here mismatches are bugs because these lower tables
++ are shared */
++
++ pud = pud_offset(pgd, address);
++ pud_ref = pud_offset(pgd_ref, address);
++ if (pud_none(*pud_ref))
++ return -1;
++ if (pud_none(*pud) || pud_page(*pud) != pud_page(*pud_ref))
++ BUG();
++ pmd = pmd_offset(pud, address);
++ pmd_ref = pmd_offset(pud_ref, address);
++ if (pmd_none(*pmd_ref))
++ return -1;
++ if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
++ BUG();
++ pte_ref = pte_offset_kernel(pmd_ref, address);
++ if (!pte_present(*pte_ref))
++ return -1;
++ pte = pte_offset_kernel(pmd, address);
++ /* Don't use pte_page here, because the mappings can point
++ outside mem_map, and the NUMA hash lookup cannot handle
++ that. */
++ if (!pte_present(*pte) || pte_pfn(*pte) != pte_pfn(*pte_ref))
++ BUG();
++ return 0;
++}
++
++int page_fault_trace = 0;
++int exception_trace = 1;
++
++
++#define MEM_VERBOSE 1
++
++#ifdef MEM_VERBOSE
++#define MEM_LOG(_f, _a...) \
++ printk("fault.c:[%d]-> " _f "\n", \
++ __LINE__ , ## _a )
++#else
++#define MEM_LOG(_f, _a...) ((void)0)
++#endif
++
++static int spurious_fault(struct pt_regs *regs,
++ unsigned long address,
++ unsigned long error_code)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++#ifdef CONFIG_XEN
++ /* Faults in hypervisor area are never spurious. */
++ if ((address >= HYPERVISOR_VIRT_START) &&
++ (address < HYPERVISOR_VIRT_END))
++ return 0;
++#endif
++
++ /* Reserved-bit violation or user access to kernel space? */
++ if (error_code & (PF_RSVD|PF_USER))
++ return 0;
++
++ pgd = init_mm.pgd + pgd_index(address);
++ if (!pgd_present(*pgd))
++ return 0;
++
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return 0;
++
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return 0;
++
++ pte = pte_offset_kernel(pmd, address);
++ if (!pte_present(*pte))
++ return 0;
++ if ((error_code & PF_WRITE) && !pte_write(*pte))
++ return 0;
++ if ((error_code & PF_INSTR) && (__pte_val(*pte) & _PAGE_NX))
++ return 0;
++
++ return 1;
++}
++
++/*
++ * This routine handles page faults. It determines the address,
++ * and the problem, and then passes it off to one of the appropriate
++ * routines.
++ */
++asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
++ unsigned long error_code)
++{
++ struct task_struct *tsk;
++ struct mm_struct *mm;
++ struct vm_area_struct * vma;
++ unsigned long address;
++ const struct exception_table_entry *fixup;
++ int write;
++ unsigned long flags;
++ siginfo_t info;
++
++ if (!user_mode(regs))
++ error_code &= ~PF_USER; /* means kernel */
++
++ tsk = current;
++ mm = tsk->mm;
++ prefetchw(&mm->mmap_sem);
++
++ /* get the address */
++ address = current_vcpu_info()->arch.cr2;
++
++ info.si_code = SEGV_MAPERR;
++
++
++ /*
++ * We fault-in kernel-space virtual memory on-demand. The
++ * 'reference' page table is init_mm.pgd.
++ *
++ * NOTE! We MUST NOT take any locks for this case. We may
++ * be in an interrupt or a critical region, and should
++ * only copy the information from the master page table,
++ * nothing more.
++ *
++ * This verifies that the fault happens in kernel space
++ * (error_code & 4) == 0, and that the fault was not a
++ * protection error (error_code & 9) == 0.
++ */
++ if (unlikely(address >= TASK_SIZE64)) {
++ /*
++ * Don't check for the module range here: its PML4
++ * is always initialized because it's shared with the main
++ * kernel text. Only vmalloc may need PML4 syncups.
++ */
++ if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
++ ((address >= VMALLOC_START && address < VMALLOC_END))) {
++ if (vmalloc_fault(address) >= 0)
++ return;
++ }
++ /* Can take a spurious fault if mapping changes R/O -> R/W. */
++ if (spurious_fault(regs, address, error_code))
++ return;
++ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++ SIGSEGV) == NOTIFY_STOP)
++ return;
++ /*
++ * Don't take the mm semaphore here. If we fixup a prefetch
++ * fault we could otherwise deadlock.
++ */
++ goto bad_area_nosemaphore;
++ }
++
++ if (notify_page_fault(DIE_PAGE_FAULT, "page fault", regs, error_code, 14,
++ SIGSEGV) == NOTIFY_STOP)
++ return;
++
++ if (likely(regs->eflags & X86_EFLAGS_IF))
++ local_irq_enable();
++
++ if (unlikely(page_fault_trace))
++ printk("pagefault rip:%lx rsp:%lx cs:%lu ss:%lu address %lx error %lx\n",
++ regs->rip,regs->rsp,regs->cs,regs->ss,address,error_code);
++
++ if (unlikely(error_code & PF_RSVD))
++ pgtable_bad(address, regs, error_code);
++
++ /*
++ * If we're in an interrupt or have no user
++ * context, we must not take the fault..
++ */
++ if (unlikely(in_atomic() || !mm))
++ goto bad_area_nosemaphore;
++
++ again:
++ /* When running in the kernel we expect faults to occur only to
++ * addresses in user space. All other faults represent errors in the
++ * kernel and should generate an OOPS. Unfortunatly, in the case of an
++ * erroneous fault occurring in a code path which already holds mmap_sem
++ * we will deadlock attempting to validate the fault against the
++ * address space. Luckily the kernel only validly references user
++ * space from well defined areas of code, which are listed in the
++ * exceptions table.
++ *
++ * As the vast majority of faults will be valid we will only perform
++ * the source reference check when there is a possibilty of a deadlock.
++ * Attempt to lock the address space, if we cannot we then validate the
++ * source. If this is invalid we can skip the address space check,
++ * thus avoiding the deadlock.
++ */
++ if (!down_read_trylock(&mm->mmap_sem)) {
++ if ((error_code & PF_USER) == 0 &&
++ !search_exception_tables(regs->rip))
++ goto bad_area_nosemaphore;
++ down_read(&mm->mmap_sem);
++ }
++
++ vma = find_vma(mm, address);
++ if (!vma)
++ goto bad_area;
++ if (likely(vma->vm_start <= address))
++ goto good_area;
++ if (!(vma->vm_flags & VM_GROWSDOWN))
++ goto bad_area;
++ if (error_code & 4) {
++ /* Allow userspace just enough access below the stack pointer
++ * to let the 'enter' instruction work.
++ */
++ if (address + 65536 + 32 * sizeof(unsigned long) < regs->rsp)
++ goto bad_area;
++ }
++ if (expand_stack(vma, address))
++ goto bad_area;
++/*
++ * Ok, we have a good vm_area for this memory access, so
++ * we can handle it..
++ */
++good_area:
++ info.si_code = SEGV_ACCERR;
++ write = 0;
++ switch (error_code & (PF_PROT|PF_WRITE)) {
++ default: /* 3: write, present */
++ /* fall through */
++ case PF_WRITE: /* write, not present */
++ if (!(vma->vm_flags & VM_WRITE))
++ goto bad_area;
++ write++;
++ break;
++ case PF_PROT: /* read, present */
++ goto bad_area;
++ case 0: /* read, not present */
++ if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
++ goto bad_area;
++ }
++
++ /*
++ * If for any reason at all we couldn't handle the fault,
++ * make sure we exit gracefully rather than endlessly redo
++ * the fault.
++ */
++ switch (handle_mm_fault(mm, vma, address, write)) {
++ case VM_FAULT_MINOR:
++ tsk->min_flt++;
++ break;
++ case VM_FAULT_MAJOR:
++ tsk->maj_flt++;
++ break;
++ case VM_FAULT_SIGBUS:
++ goto do_sigbus;
++ default:
++ goto out_of_memory;
++ }
++
++ up_read(&mm->mmap_sem);
++ return;
++
++/*
++ * Something tried to access memory that isn't in our memory map..
++ * Fix it, but check if it's kernel or user first..
++ */
++bad_area:
++ up_read(&mm->mmap_sem);
++
++bad_area_nosemaphore:
++ /* User mode accesses just cause a SIGSEGV */
++ if (error_code & PF_USER) {
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ /* Work around K8 erratum #100 K8 in compat mode
++ occasionally jumps to illegal addresses >4GB. We
++ catch this here in the page fault handler because
++ these addresses are not reachable. Just detect this
++ case and return. Any code segment in LDT is
++ compatibility mode. */
++ if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) &&
++ (address >> 32))
++ return;
++
++ if (exception_trace && unhandled_signal(tsk, SIGSEGV)) {
++ printk(
++ "%s%s[%d]: segfault at %016lx rip %016lx rsp %016lx error %lx\n",
++ tsk->pid > 1 ? KERN_INFO : KERN_EMERG,
++ tsk->comm, tsk->pid, address, regs->rip,
++ regs->rsp, error_code);
++ }
++
++ tsk->thread.cr2 = address;
++ /* Kernel addresses are always protection faults */
++ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
++ tsk->thread.trap_no = 14;
++ info.si_signo = SIGSEGV;
++ info.si_errno = 0;
++ /* info.si_code has been set above */
++ info.si_addr = (void __user *)address;
++ force_sig_info(SIGSEGV, &info, tsk);
++ return;
++ }
++
++no_context:
++
++ /* Are we prepared to handle this kernel fault? */
++ fixup = search_exception_tables(regs->rip);
++ if (fixup) {
++ regs->rip = fixup->fixup;
++ return;
++ }
++
++ /*
++ * Hall of shame of CPU/BIOS bugs.
++ */
++
++ if (is_prefetch(regs, address, error_code))
++ return;
++
++ if (is_errata93(regs, address))
++ return;
++
++/*
++ * Oops. The kernel tried to access some bad page. We'll have to
++ * terminate things with extreme prejudice.
++ */
++
++ flags = oops_begin();
++
++ if (address < PAGE_SIZE)
++ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
++ else
++ printk(KERN_ALERT "Unable to handle kernel paging request");
++ printk(" at %016lx RIP: \n" KERN_ALERT,address);
++ printk_address(regs->rip);
++ dump_pagetable(address);
++ tsk->thread.cr2 = address;
++ tsk->thread.trap_no = 14;
++ tsk->thread.error_code = error_code;
++ __die("Oops", regs, error_code);
++ /* Executive summary in case the body of the oops scrolled away */
++ printk(KERN_EMERG "CR2: %016lx\n", address);
++ oops_end(flags);
++ do_exit(SIGKILL);
++
++/*
++ * We ran out of memory, or some other thing happened to us that made
++ * us unable to handle the page fault gracefully.
++ */
++out_of_memory:
++ up_read(&mm->mmap_sem);
++ if (current->pid == 1) {
++ yield();
++ goto again;
++ }
++ printk("VM: killing process %s\n", tsk->comm);
++ if (error_code & 4)
++ do_exit(SIGKILL);
++ goto no_context;
++
++do_sigbus:
++ up_read(&mm->mmap_sem);
++
++ /* Kernel mode? Handle exceptions or die */
++ if (!(error_code & PF_USER))
++ goto no_context;
++
++ tsk->thread.cr2 = address;
++ tsk->thread.error_code = error_code;
++ tsk->thread.trap_no = 14;
++ info.si_signo = SIGBUS;
++ info.si_errno = 0;
++ info.si_code = BUS_ADRERR;
++ info.si_addr = (void __user *)address;
++ force_sig_info(SIGBUS, &info, tsk);
++ return;
++}
++
++DEFINE_SPINLOCK(pgd_lock);
++struct page *pgd_list;
++
++void vmalloc_sync_all(void)
++{
++ /* Note that races in the updates of insync and start aren't
++ problematic:
++ insync can only get set bits added, and updates to start are only
++ improving performance (without affecting correctness if undone). */
++ static DECLARE_BITMAP(insync, PTRS_PER_PGD);
++ static unsigned long start = VMALLOC_START & PGDIR_MASK;
++ unsigned long address;
++
++ for (address = start; address <= VMALLOC_END; address += PGDIR_SIZE) {
++ if (!test_bit(pgd_index(address), insync)) {
++ const pgd_t *pgd_ref = pgd_offset_k(address);
++ struct page *page;
++
++ if (pgd_none(*pgd_ref))
++ continue;
++ spin_lock(&pgd_lock);
++ for (page = pgd_list; page;
++ page = (struct page *)page->index) {
++ pgd_t *pgd;
++ pgd = (pgd_t *)page_address(page) + pgd_index(address);
++ if (pgd_none(*pgd))
++ set_pgd(pgd, *pgd_ref);
++ else
++ BUG_ON(pgd_page(*pgd) != pgd_page(*pgd_ref));
++ }
++ spin_unlock(&pgd_lock);
++ set_bit(pgd_index(address), insync);
++ }
++ if (address == start)
++ start = address + PGDIR_SIZE;
++ }
++ /* Check that there is no need to do the same for the modules area. */
++ BUILD_BUG_ON(!(MODULES_VADDR > __START_KERNEL));
++ BUILD_BUG_ON(!(((MODULES_END - 1) & PGDIR_MASK) ==
++ (__START_KERNEL & PGDIR_MASK)));
++}
++
++static int __init enable_pagefaulttrace(char *str)
++{
++ page_fault_trace = 1;
++ return 1;
++}
++__setup("pagefaulttrace", enable_pagefaulttrace);
+Index: head-2008-11-25/arch/x86/mm/init_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/mm/init_64-xen.c 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,1206 @@
++/*
++ * linux/arch/x86_64/mm/init.c
++ *
++ * Copyright (C) 1995 Linus Torvalds
++ * Copyright (C) 2000 Pavel Machek <pavel@suse.cz>
++ * Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
++ *
++ * Jun Nakajima <jun.nakajima@intel.com>
++ * Modified for Xen.
++ */
++
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ptrace.h>
++#include <linux/mman.h>
++#include <linux/mm.h>
++#include <linux/swap.h>
++#include <linux/smp.h>
++#include <linux/init.h>
++#include <linux/pagemap.h>
++#include <linux/bootmem.h>
++#include <linux/proc_fs.h>
++#include <linux/pci.h>
++#include <linux/poison.h>
++#include <linux/dma-mapping.h>
++#include <linux/module.h>
++#include <linux/memory_hotplug.h>
++
++#include <asm/processor.h>
++#include <asm/system.h>
++#include <asm/uaccess.h>
++#include <asm/pgtable.h>
++#include <asm/pgalloc.h>
++#include <asm/dma.h>
++#include <asm/fixmap.h>
++#include <asm/e820.h>
++#include <asm/apic.h>
++#include <asm/tlb.h>
++#include <asm/mmu_context.h>
++#include <asm/proto.h>
++#include <asm/smp.h>
++#include <asm/sections.h>
++
++#include <xen/features.h>
++
++#ifndef Dprintk
++#define Dprintk(x...)
++#endif
++
++struct dma_mapping_ops* dma_ops;
++EXPORT_SYMBOL(dma_ops);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++unsigned int __kernel_page_user;
++EXPORT_SYMBOL(__kernel_page_user);
++#endif
++
++int after_bootmem;
++
++static unsigned long dma_reserve __initdata;
++
++DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
++extern unsigned long start_pfn;
++
++/*
++ * Use this until direct mapping is established, i.e. before __va() is
++ * available in init_memory_mapping().
++ */
++
++#define addr_to_page(addr, page) \
++ (addr) &= PHYSICAL_PAGE_MASK; \
++ (page) = ((unsigned long *) ((unsigned long) \
++ (((mfn_to_pfn((addr) >> PAGE_SHIFT)) << PAGE_SHIFT) + \
++ __START_KERNEL_map)))
++
++static void __meminit early_make_page_readonly(void *va, unsigned int feature)
++{
++ unsigned long addr, _va = (unsigned long)va;
++ pte_t pte, *ptep;
++ unsigned long *page = (unsigned long *) init_level4_pgt;
++
++ BUG_ON(after_bootmem);
++
++ if (xen_feature(feature))
++ return;
++
++ addr = (unsigned long) page[pgd_index(_va)];
++ addr_to_page(addr, page);
++
++ addr = page[pud_index(_va)];
++ addr_to_page(addr, page);
++
++ addr = page[pmd_index(_va)];
++ addr_to_page(addr, page);
++
++ ptep = (pte_t *) &page[pte_index(_va)];
++
++ pte.pte = ptep->pte & ~_PAGE_RW;
++ if (HYPERVISOR_update_va_mapping(_va, pte, 0))
++ BUG();
++}
++
++static void __make_page_readonly(void *va)
++{
++ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
++ unsigned long addr = (unsigned long) va;
++
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ ptep = pte_offset_kernel(pmd, addr);
++
++ pte.pte = ptep->pte & ~_PAGE_RW;
++ if (HYPERVISOR_update_va_mapping(addr, pte, 0))
++ xen_l1_entry_update(ptep, pte); /* fallback */
++
++ if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
++ __make_page_readonly(__va(pte_pfn(pte) << PAGE_SHIFT));
++}
++
++static void __make_page_writable(void *va)
++{
++ pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t pte, *ptep;
++ unsigned long addr = (unsigned long) va;
++
++ pgd = pgd_offset_k(addr);
++ pud = pud_offset(pgd, addr);
++ pmd = pmd_offset(pud, addr);
++ ptep = pte_offset_kernel(pmd, addr);
++
++ pte.pte = ptep->pte | _PAGE_RW;
++ if (HYPERVISOR_update_va_mapping(addr, pte, 0))
++ xen_l1_entry_update(ptep, pte); /* fallback */
++
++ if ((addr >= VMALLOC_START) && (addr < VMALLOC_END))
++ __make_page_writable(__va(pte_pfn(pte) << PAGE_SHIFT));
++}
++
++void make_page_readonly(void *va, unsigned int feature)
++{
++ if (!xen_feature(feature))
++ __make_page_readonly(va);
++}
++
++void make_page_writable(void *va, unsigned int feature)
++{
++ if (!xen_feature(feature))
++ __make_page_writable(va);
++}
++
++void make_pages_readonly(void *va, unsigned nr, unsigned int feature)
++{
++ if (xen_feature(feature))
++ return;
++
++ while (nr-- != 0) {
++ __make_page_readonly(va);
++ va = (void*)((unsigned long)va + PAGE_SIZE);
++ }
++}
++
++void make_pages_writable(void *va, unsigned nr, unsigned int feature)
++{
++ if (xen_feature(feature))
++ return;
++
++ while (nr-- != 0) {
++ __make_page_writable(va);
++ va = (void*)((unsigned long)va + PAGE_SIZE);
++ }
++}
++
++/*
++ * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
++ * physical space so we can cache the place of the first one and move
++ * around without checking the pgd every time.
++ */
++
++void show_mem(void)
++{
++ long i, total = 0, reserved = 0;
++ long shared = 0, cached = 0;
++ pg_data_t *pgdat;
++ struct page *page;
++
++ printk(KERN_INFO "Mem-info:\n");
++ show_free_areas();
++ printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
++
++ for_each_online_pgdat(pgdat) {
++ for (i = 0; i < pgdat->node_spanned_pages; ++i) {
++ page = pfn_to_page(pgdat->node_start_pfn + i);
++ total++;
++ if (PageReserved(page))
++ reserved++;
++ else if (PageSwapCache(page))
++ cached++;
++ else if (page_count(page))
++ shared += page_count(page) - 1;
++ }
++ }
++ printk(KERN_INFO "%lu pages of RAM\n", total);
++ printk(KERN_INFO "%lu reserved pages\n",reserved);
++ printk(KERN_INFO "%lu pages shared\n",shared);
++ printk(KERN_INFO "%lu pages swap cached\n",cached);
++}
++
++
++static __init void *spp_getpage(void)
++{
++ void *ptr;
++ if (after_bootmem)
++ ptr = (void *) get_zeroed_page(GFP_ATOMIC);
++ else if (start_pfn < table_end) {
++ ptr = __va(start_pfn << PAGE_SHIFT);
++ start_pfn++;
++ memset(ptr, 0, PAGE_SIZE);
++ } else
++ ptr = alloc_bootmem_pages(PAGE_SIZE);
++ if (!ptr || ((unsigned long)ptr & ~PAGE_MASK))
++ panic("set_pte_phys: cannot allocate page data %s\n", after_bootmem?"after bootmem":"");
++
++ Dprintk("spp_getpage %p\n", ptr);
++ return ptr;
++}
++
++#define pgd_offset_u(address) (__user_pgd(init_level4_pgt) + pgd_index(address))
++#define pud_offset_u(address) (level3_user_pgt + pud_index(address))
++
++static __init void set_pte_phys(unsigned long vaddr,
++ unsigned long phys, pgprot_t prot, int user_mode)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte, new_pte;
++
++ Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
++
++ pgd = (user_mode ? pgd_offset_u(vaddr) : pgd_offset_k(vaddr));
++ if (pgd_none(*pgd)) {
++ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
++ return;
++ }
++ pud = (user_mode ? pud_offset_u(vaddr) : pud_offset(pgd, vaddr));
++ if (pud_none(*pud)) {
++ pmd = (pmd_t *) spp_getpage();
++ make_page_readonly(pmd, XENFEAT_writable_page_tables);
++ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
++ if (pmd != pmd_offset(pud, 0)) {
++ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
++ return;
++ }
++ }
++ pmd = pmd_offset(pud, vaddr);
++ if (pmd_none(*pmd)) {
++ pte = (pte_t *) spp_getpage();
++ make_page_readonly(pte, XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
++ if (pte != pte_offset_kernel(pmd, 0)) {
++ printk("PAGETABLE BUG #02!\n");
++ return;
++ }
++ }
++ if (pgprot_val(prot))
++ new_pte = pfn_pte(phys >> PAGE_SHIFT, prot);
++ else
++ new_pte = __pte(0);
++
++ pte = pte_offset_kernel(pmd, vaddr);
++ if (!pte_none(*pte) && __pte_val(new_pte) &&
++ __pte_val(*pte) != (__pte_val(new_pte) & __supported_pte_mask))
++ pte_ERROR(*pte);
++ set_pte(pte, new_pte);
++
++ /*
++ * It's enough to flush this one mapping.
++ * (PGE mappings get flushed as well)
++ */
++ __flush_tlb_one(vaddr);
++}
++
++static __init void set_pte_phys_ma(unsigned long vaddr,
++ unsigned long phys, pgprot_t prot)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte, new_pte;
++
++ Dprintk("set_pte_phys %lx to %lx\n", vaddr, phys);
++
++ pgd = pgd_offset_k(vaddr);
++ if (pgd_none(*pgd)) {
++ printk("PGD FIXMAP MISSING, it should be setup in head.S!\n");
++ return;
++ }
++ pud = pud_offset(pgd, vaddr);
++ if (pud_none(*pud)) {
++
++ pmd = (pmd_t *) spp_getpage();
++ make_page_readonly(pmd, XENFEAT_writable_page_tables);
++ set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | _PAGE_USER));
++ if (pmd != pmd_offset(pud, 0)) {
++ printk("PAGETABLE BUG #01! %p <-> %p\n", pmd, pmd_offset(pud,0));
++ return;
++ }
++ }
++ pmd = pmd_offset(pud, vaddr);
++ if (pmd_none(*pmd)) {
++ pte = (pte_t *) spp_getpage();
++ make_page_readonly(pte, XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE | _PAGE_USER));
++ if (pte != pte_offset_kernel(pmd, 0)) {
++ printk("PAGETABLE BUG #02!\n");
++ return;
++ }
++ }
++ new_pte = pfn_pte_ma(phys >> PAGE_SHIFT, prot);
++
++ pte = pte_offset_kernel(pmd, vaddr);
++ if (!pte_none(*pte) && __pte_val(new_pte) &&
++#ifdef CONFIG_ACPI
++ /* __acpi_map_table() fails to properly call clear_fixmap() */
++ (vaddr < __fix_to_virt(FIX_ACPI_END) ||
++ vaddr > __fix_to_virt(FIX_ACPI_BEGIN)) &&
++#endif
++ __pte_val(*pte) != (__pte_val(new_pte) & __supported_pte_mask))
++ pte_ERROR(*pte);
++ set_pte(pte, new_pte);
++
++ /*
++ * It's enough to flush this one mapping.
++ * (PGE mappings get flushed as well)
++ */
++ __flush_tlb_one(vaddr);
++}
++
++/* NOTE: this is meant to be run only at boot */
++void __init
++__set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t prot)
++{
++ unsigned long address = __fix_to_virt(idx);
++
++ if (idx >= __end_of_fixed_addresses) {
++ printk("Invalid __set_fixmap\n");
++ return;
++ }
++ switch (idx) {
++ case VSYSCALL_LAST_PAGE ... VSYSCALL_FIRST_PAGE:
++ set_pte_phys(address, phys, prot, 0);
++ set_pte_phys(address, phys, prot, 1);
++ break;
++ default:
++ set_pte_phys_ma(address, phys, prot);
++ break;
++ }
++}
++
++unsigned long __initdata table_start, table_end;
++
++static __meminit void *alloc_static_page(unsigned long *phys)
++{
++ unsigned long va = (start_pfn << PAGE_SHIFT) + __START_KERNEL_map;
++
++ if (after_bootmem) {
++ void *adr = (void *)get_zeroed_page(GFP_ATOMIC);
++
++ *phys = __pa(adr);
++ return adr;
++ }
++
++ *phys = start_pfn << PAGE_SHIFT;
++ start_pfn++;
++ memset((void *)va, 0, PAGE_SIZE);
++ return (void *)va;
++}
++
++#define PTE_SIZE PAGE_SIZE
++
++static inline int make_readonly(unsigned long paddr)
++{
++ extern char __vsyscall_0;
++ int readonly = 0;
++
++ /* Make new page tables read-only. */
++ if (!xen_feature(XENFEAT_writable_page_tables)
++ && (paddr >= (table_start << PAGE_SHIFT))
++ && (paddr < (table_end << PAGE_SHIFT)))
++ readonly = 1;
++ /* Make old page tables read-only. */
++ if (!xen_feature(XENFEAT_writable_page_tables)
++ && (paddr >= (xen_start_info->pt_base - __START_KERNEL_map))
++ && (paddr < (start_pfn << PAGE_SHIFT)))
++ readonly = 1;
++
++ /*
++ * No need for writable mapping of kernel image. This also ensures that
++ * page and descriptor tables embedded inside don't have writable
++ * mappings. Exclude the vsyscall area here, allowing alternative
++ * instruction patching to work.
++ */
++ if ((paddr >= __pa_symbol(&_text)) && (paddr < __pa_symbol(&_end))
++ && !(paddr >= __pa_symbol(&__vsyscall_0)
++ && paddr < __pa_symbol(&__vsyscall_0) + PAGE_SIZE))
++ readonly = 1;
++
++ return readonly;
++}
++
++#ifndef CONFIG_XEN
++/* Must run before zap_low_mappings */
++__init void *early_ioremap(unsigned long addr, unsigned long size)
++{
++ unsigned long map = round_down(addr, LARGE_PAGE_SIZE);
++
++ /* actually usually some more */
++ if (size >= LARGE_PAGE_SIZE) {
++ printk("SMBIOS area too long %lu\n", size);
++ return NULL;
++ }
++ set_pmd(temp_mappings[0].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
++ map += LARGE_PAGE_SIZE;
++ set_pmd(temp_mappings[1].pmd, __pmd(map | _KERNPG_TABLE | _PAGE_PSE));
++ __flush_tlb();
++ return temp_mappings[0].address + (addr & (LARGE_PAGE_SIZE-1));
++}
++
++/* To avoid virtual aliases later */
++__init void early_iounmap(void *addr, unsigned long size)
++{
++ if ((void *)round_down((unsigned long)addr, LARGE_PAGE_SIZE) != temp_mappings[0].address)
++ printk("early_iounmap: bad address %p\n", addr);
++ set_pmd(temp_mappings[0].pmd, __pmd(0));
++ set_pmd(temp_mappings[1].pmd, __pmd(0));
++ __flush_tlb();
++}
++#endif
++
++static void __meminit
++phys_pmd_init(pmd_t *pmd, unsigned long address, unsigned long end)
++{
++ int i, k;
++
++ for (i = 0; i < PTRS_PER_PMD; pmd++, i++) {
++ unsigned long pte_phys;
++ pte_t *pte, *pte_save;
++
++ if (address >= end)
++ break;
++ pte = alloc_static_page(&pte_phys);
++ pte_save = pte;
++ for (k = 0; k < PTRS_PER_PTE; pte++, k++, address += PTE_SIZE) {
++ unsigned long pteval = address | _PAGE_NX | _KERNPG_TABLE;
++
++ if (address >= (after_bootmem
++ ? end
++ : xen_start_info->nr_pages << PAGE_SHIFT))
++ pteval = 0;
++ else if (make_readonly(address))
++ pteval &= ~_PAGE_RW;
++ set_pte(pte, __pte(pteval & __supported_pte_mask));
++ }
++ if (!after_bootmem) {
++ early_make_page_readonly(pte_save, XENFEAT_writable_page_tables);
++ *pmd = __pmd(pte_phys | _KERNPG_TABLE);
++ } else {
++ make_page_readonly(pte_save, XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(pte_phys | _KERNPG_TABLE));
++ }
++ }
++}
++
++static void __meminit
++phys_pmd_update(pud_t *pud, unsigned long address, unsigned long end)
++{
++ pmd_t *pmd = pmd_offset(pud, (unsigned long)__va(address));
++
++ if (pmd_none(*pmd)) {
++ spin_lock(&init_mm.page_table_lock);
++ phys_pmd_init(pmd, address, end);
++ spin_unlock(&init_mm.page_table_lock);
++ __flush_tlb_all();
++ }
++}
++
++static void __meminit phys_pud_init(pud_t *pud, unsigned long address, unsigned long end)
++{
++ long i = pud_index(address);
++
++ pud = pud + i;
++
++ if (after_bootmem && pud_val(*pud)) {
++ phys_pmd_update(pud, address, end);
++ return;
++ }
++
++ for (; i < PTRS_PER_PUD; pud++, i++) {
++ unsigned long paddr, pmd_phys;
++ pmd_t *pmd;
++
++ paddr = (address & PGDIR_MASK) + i*PUD_SIZE;
++ if (paddr >= end)
++ break;
++
++ pmd = alloc_static_page(&pmd_phys);
++
++ spin_lock(&init_mm.page_table_lock);
++ *pud = __pud(pmd_phys | _KERNPG_TABLE);
++ phys_pmd_init(pmd, paddr, end);
++ spin_unlock(&init_mm.page_table_lock);
++
++ early_make_page_readonly(pmd, XENFEAT_writable_page_tables);
++ }
++ __flush_tlb();
++}
++
++void __init xen_init_pt(void)
++{
++ unsigned long addr, *page;
++
++ /* Find the initial pte page that was built for us. */
++ page = (unsigned long *)xen_start_info->pt_base;
++ addr = page[pgd_index(__START_KERNEL_map)];
++ addr_to_page(addr, page);
++ addr = page[pud_index(__START_KERNEL_map)];
++ addr_to_page(addr, page);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ /* On Xen 3.0.2 and older we may need to explicitly specify _PAGE_USER
++ in kernel PTEs. We check that here. */
++ if (HYPERVISOR_xen_version(XENVER_version, NULL) <= 0x30000) {
++ unsigned long *pg;
++ pte_t pte;
++
++ /* Mess with the initial mapping of page 0. It's not needed. */
++ BUILD_BUG_ON(__START_KERNEL <= __START_KERNEL_map);
++ addr = page[pmd_index(__START_KERNEL_map)];
++ addr_to_page(addr, pg);
++ pte.pte = pg[pte_index(__START_KERNEL_map)];
++ BUG_ON(!(pte.pte & _PAGE_PRESENT));
++
++ /* If _PAGE_USER isn't set, we obviously do not need it. */
++ if (pte.pte & _PAGE_USER) {
++ /* _PAGE_USER is needed, but is it set implicitly? */
++ pte.pte &= ~_PAGE_USER;
++ if ((HYPERVISOR_update_va_mapping(__START_KERNEL_map,
++ pte, 0) != 0) ||
++ !(pg[pte_index(__START_KERNEL_map)] & _PAGE_USER))
++ /* We need to explicitly specify _PAGE_USER. */
++ __kernel_page_user = _PAGE_USER;
++ }
++ }
++#endif
++
++ /* Construct mapping of initial pte page in our own directories. */
++ init_level4_pgt[pgd_index(__START_KERNEL_map)] =
++ __pgd(__pa_symbol(level3_kernel_pgt) | _PAGE_TABLE);
++ level3_kernel_pgt[pud_index(__START_KERNEL_map)] =
++ __pud(__pa_symbol(level2_kernel_pgt) | _PAGE_TABLE);
++ memcpy(level2_kernel_pgt, page, PAGE_SIZE);
++
++ __user_pgd(init_level4_pgt)[pgd_index(VSYSCALL_START)] =
++ __pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE);
++
++ early_make_page_readonly(init_level4_pgt,
++ XENFEAT_writable_page_tables);
++ early_make_page_readonly(__user_pgd(init_level4_pgt),
++ XENFEAT_writable_page_tables);
++ early_make_page_readonly(level3_kernel_pgt,
++ XENFEAT_writable_page_tables);
++ early_make_page_readonly(level3_user_pgt,
++ XENFEAT_writable_page_tables);
++ early_make_page_readonly(level2_kernel_pgt,
++ XENFEAT_writable_page_tables);
++
++ if (!xen_feature(XENFEAT_writable_page_tables)) {
++ xen_pgd_pin(__pa_symbol(init_level4_pgt));
++ xen_pgd_pin(__pa_symbol(__user_pgd(init_level4_pgt)));
++ }
++}
++
++static void __init extend_init_mapping(unsigned long tables_space)
++{
++ unsigned long va = __START_KERNEL_map;
++ unsigned long phys, addr, *pte_page;
++ pmd_t *pmd;
++ pte_t *pte, new_pte;
++ unsigned long *page = (unsigned long *)init_level4_pgt;
++
++ addr = page[pgd_index(va)];
++ addr_to_page(addr, page);
++ addr = page[pud_index(va)];
++ addr_to_page(addr, page);
++
++ /* Kill mapping of low 1MB. */
++ while (va < (unsigned long)&_text) {
++ if (HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0))
++ BUG();
++ va += PAGE_SIZE;
++ }
++
++ /* Ensure init mappings cover kernel text/data and initial tables. */
++ while (va < (__START_KERNEL_map
++ + (start_pfn << PAGE_SHIFT)
++ + tables_space)) {
++ pmd = (pmd_t *)&page[pmd_index(va)];
++ if (pmd_none(*pmd)) {
++ pte_page = alloc_static_page(&phys);
++ early_make_page_readonly(
++ pte_page, XENFEAT_writable_page_tables);
++ set_pmd(pmd, __pmd(phys | _KERNPG_TABLE));
++ } else {
++ addr = page[pmd_index(va)];
++ addr_to_page(addr, pte_page);
++ }
++ pte = (pte_t *)&pte_page[pte_index(va)];
++ if (pte_none(*pte)) {
++ new_pte = pfn_pte(
++ (va - __START_KERNEL_map) >> PAGE_SHIFT,
++ __pgprot(_KERNPG_TABLE));
++ xen_l1_entry_update(pte, new_pte);
++ }
++ va += PAGE_SIZE;
++ }
++
++ /* Finally, blow away any spurious initial mappings. */
++ while (1) {
++ pmd = (pmd_t *)&page[pmd_index(va)];
++ if (pmd_none(*pmd))
++ break;
++ if (HYPERVISOR_update_va_mapping(va, __pte_ma(0), 0))
++ BUG();
++ va += PAGE_SIZE;
++ }
++}
++
++static void __init find_early_table_space(unsigned long end)
++{
++ unsigned long puds, pmds, ptes, tables;
++
++ puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
++ pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT;
++ ptes = (end + PTE_SIZE - 1) >> PAGE_SHIFT;
++
++ tables = round_up(puds * 8, PAGE_SIZE) +
++ round_up(pmds * 8, PAGE_SIZE) +
++ round_up(ptes * 8, PAGE_SIZE);
++
++ extend_init_mapping(tables);
++
++ table_start = start_pfn;
++ table_end = table_start + (tables>>PAGE_SHIFT);
++
++ early_printk("kernel direct mapping tables up to %lx @ %lx-%lx\n",
++ end, table_start << PAGE_SHIFT,
++ (table_start << PAGE_SHIFT) + tables);
++}
++
++static void xen_finish_init_mapping(void)
++{
++ unsigned long i, start, end;
++
++ /* Re-vector virtual addresses pointing into the initial
++ mapping to the just-established permanent ones. */
++ xen_start_info = __va(__pa(xen_start_info));
++ xen_start_info->pt_base = (unsigned long)
++ __va(__pa(xen_start_info->pt_base));
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ phys_to_machine_mapping =
++ __va(__pa(xen_start_info->mfn_list));
++ xen_start_info->mfn_list = (unsigned long)
++ phys_to_machine_mapping;
++ }
++ if (xen_start_info->mod_start)
++ xen_start_info->mod_start = (unsigned long)
++ __va(__pa(xen_start_info->mod_start));
++
++ /* Destroy the Xen-created mappings beyond the kernel image as
++ * well as the temporary mappings created above. Prevents
++ * overlap with modules area (if init mapping is very big).
++ */
++ start = PAGE_ALIGN((unsigned long)_end);
++ end = __START_KERNEL_map + (table_end << PAGE_SHIFT);
++ for (; start < end; start += PAGE_SIZE)
++ if (HYPERVISOR_update_va_mapping(start, __pte_ma(0), 0))
++ BUG();
++
++ /* Allocate pte's for initial fixmaps from 'start_pfn' allocator. */
++ table_end = ~0UL;
++
++ /*
++ * Prefetch pte's for the bt_ioremap() area. It gets used before the
++ * boot-time allocator is online, so allocate-on-demand would fail.
++ */
++ for (i = FIX_BTMAP_END; i <= FIX_BTMAP_BEGIN; i++)
++ __set_fixmap(i, 0, __pgprot(0));
++
++ /* Switch to the real shared_info page, and clear the dummy page. */
++ set_fixmap(FIX_SHARED_INFO, xen_start_info->shared_info);
++ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++ memset(empty_zero_page, 0, sizeof(empty_zero_page));
++
++ /* Set up mapping of lowest 1MB of physical memory. */
++ for (i = 0; i < NR_FIX_ISAMAPS; i++)
++ if (is_initial_xendomain())
++ set_fixmap(FIX_ISAMAP_BEGIN - i, i * PAGE_SIZE);
++ else
++ __set_fixmap(FIX_ISAMAP_BEGIN - i,
++ virt_to_mfn(empty_zero_page)
++ << PAGE_SHIFT,
++ PAGE_KERNEL_RO);
++
++ /* Disable the 'start_pfn' allocator. */
++ table_end = start_pfn;
++}
++
++/* Setup the direct mapping of the physical memory at PAGE_OFFSET.
++ This runs before bootmem is initialized and gets pages directly from the
++ physical memory. To access them they are temporarily mapped. */
++void __meminit init_memory_mapping(unsigned long start, unsigned long end)
++{
++ unsigned long next;
++
++ Dprintk("init_memory_mapping\n");
++
++ /*
++ * Find space for the kernel direct mapping tables.
++ * Later we should allocate these tables in the local node of the memory
++ * mapped. Unfortunately this is done currently before the nodes are
++ * discovered.
++ */
++ if (!after_bootmem)
++ find_early_table_space(end);
++
++ start = (unsigned long)__va(start);
++ end = (unsigned long)__va(end);
++
++ for (; start < end; start = next) {
++ unsigned long pud_phys;
++ pgd_t *pgd = pgd_offset_k(start);
++ pud_t *pud;
++
++ if (after_bootmem)
++ pud = pud_offset(pgd, start & PGDIR_MASK);
++ else
++ pud = alloc_static_page(&pud_phys);
++ next = start + PGDIR_SIZE;
++ if (next > end)
++ next = end;
++ phys_pud_init(pud, __pa(start), __pa(next));
++ if (!after_bootmem) {
++ early_make_page_readonly(pud, XENFEAT_writable_page_tables);
++ set_pgd(pgd_offset_k(start), mk_kernel_pgd(pud_phys));
++ }
++ }
++
++ if (!after_bootmem) {
++ BUG_ON(start_pfn != table_end);
++ xen_finish_init_mapping();
++ }
++
++ __flush_tlb_all();
++}
++
++void __cpuinit zap_low_mappings(int cpu)
++{
++ /* this is not required for Xen */
++#if 0
++ swap_low_mappings();
++#endif
++}
++
++/* Compute zone sizes for the DMA and DMA32 zones in a node. */
++__init void
++size_zones(unsigned long *z, unsigned long *h,
++ unsigned long start_pfn, unsigned long end_pfn)
++{
++ int i;
++ unsigned long w;
++
++ for (i = 0; i < MAX_NR_ZONES; i++)
++ z[i] = 0;
++
++ if (start_pfn < MAX_DMA_PFN)
++ z[ZONE_DMA] = MAX_DMA_PFN - start_pfn;
++ if (start_pfn < MAX_DMA32_PFN) {
++ unsigned long dma32_pfn = MAX_DMA32_PFN;
++ if (dma32_pfn > end_pfn)
++ dma32_pfn = end_pfn;
++ z[ZONE_DMA32] = dma32_pfn - start_pfn;
++ }
++ z[ZONE_NORMAL] = end_pfn - start_pfn;
++
++ /* Remove lower zones from higher ones. */
++ w = 0;
++ for (i = 0; i < MAX_NR_ZONES; i++) {
++ if (z[i])
++ z[i] -= w;
++ w += z[i];
++ }
++
++ /* Compute holes */
++ w = start_pfn;
++ for (i = 0; i < MAX_NR_ZONES; i++) {
++ unsigned long s = w;
++ w += z[i];
++ h[i] = e820_hole_size(s, w);
++ }
++
++ /* Add the space pace needed for mem_map to the holes too. */
++ for (i = 0; i < MAX_NR_ZONES; i++)
++ h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
++
++ /* The 16MB DMA zone has the kernel and other misc mappings.
++ Account them too */
++ if (h[ZONE_DMA]) {
++ h[ZONE_DMA] += dma_reserve;
++ if (h[ZONE_DMA] >= z[ZONE_DMA]) {
++ printk(KERN_WARNING
++ "Kernel too large and filling up ZONE_DMA?\n");
++ h[ZONE_DMA] = z[ZONE_DMA];
++ }
++ }
++}
++
++#ifndef CONFIG_NUMA
++void __init paging_init(void)
++{
++ unsigned long zones[MAX_NR_ZONES], holes[MAX_NR_ZONES];
++
++ memory_present(0, 0, end_pfn);
++ sparse_init();
++ size_zones(zones, holes, 0, end_pfn);
++ free_area_init_node(0, NODE_DATA(0), zones,
++ __pa(PAGE_OFFSET) >> PAGE_SHIFT, holes);
++
++ init_mm.context.pinned = 1;
++}
++#endif
++
++/* Unmap a kernel mapping if it exists. This is useful to avoid prefetches
++ from the CPU leading to inconsistent cache lines. address and size
++ must be aligned to 2MB boundaries.
++ Does nothing when the mapping doesn't exist. */
++void __init clear_kernel_mapping(unsigned long address, unsigned long size)
++{
++ unsigned long end = address + size;
++
++ BUG_ON(address & ~LARGE_PAGE_MASK);
++ BUG_ON(size & ~LARGE_PAGE_MASK);
++
++ for (; address < end; address += LARGE_PAGE_SIZE) {
++ pgd_t *pgd = pgd_offset_k(address);
++ pud_t *pud;
++ pmd_t *pmd;
++ if (pgd_none(*pgd))
++ continue;
++ pud = pud_offset(pgd, address);
++ if (pud_none(*pud))
++ continue;
++ pmd = pmd_offset(pud, address);
++ if (!pmd || pmd_none(*pmd))
++ continue;
++ if (0 == (__pmd_val(*pmd) & _PAGE_PSE)) {
++ /* Could handle this, but it should not happen currently. */
++ printk(KERN_ERR
++ "clear_kernel_mapping: mapping has been split. will leak memory\n");
++ pmd_ERROR(*pmd);
++ }
++ set_pmd(pmd, __pmd(0));
++ }
++ __flush_tlb_all();
++}
++
++/*
++ * Memory hotplug specific functions
++ */
++void online_page(struct page *page)
++{
++ ClearPageReserved(page);
++ init_page_count(page);
++ __free_page(page);
++ totalram_pages++;
++ num_physpages++;
++}
++
++#ifdef CONFIG_MEMORY_HOTPLUG
++/*
++ * XXX: memory_add_physaddr_to_nid() is to find node id from physical address
++ * via probe interface of sysfs. If acpi notifies hot-add event, then it
++ * can tell node id by searching dsdt. But, probe interface doesn't have
++ * node id. So, return 0 as node id at this time.
++ */
++#ifdef CONFIG_NUMA
++int memory_add_physaddr_to_nid(u64 start)
++{
++ return 0;
++}
++#endif
++
++/*
++ * Memory is added always to NORMAL zone. This means you will never get
++ * additional DMA/DMA32 memory.
++ */
++int arch_add_memory(int nid, u64 start, u64 size)
++{
++ struct pglist_data *pgdat = NODE_DATA(nid);
++ struct zone *zone = pgdat->node_zones + MAX_NR_ZONES-2;
++ unsigned long start_pfn = start >> PAGE_SHIFT;
++ unsigned long nr_pages = size >> PAGE_SHIFT;
++ int ret;
++
++ ret = __add_pages(zone, start_pfn, nr_pages);
++ if (ret)
++ goto error;
++
++ init_memory_mapping(start, (start + size -1));
++
++ return ret;
++error:
++ printk("%s: Problem encountered in __add_pages!\n", __func__);
++ return ret;
++}
++EXPORT_SYMBOL_GPL(arch_add_memory);
++
++int remove_memory(u64 start, u64 size)
++{
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(remove_memory);
++
++#else /* CONFIG_MEMORY_HOTPLUG */
++/*
++ * Memory Hotadd without sparsemem. The mem_maps have been allocated in advance,
++ * just online the pages.
++ */
++int __add_pages(struct zone *z, unsigned long start_pfn, unsigned long nr_pages)
++{
++ int err = -EIO;
++ unsigned long pfn;
++ unsigned long total = 0, mem = 0;
++ for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
++ if (pfn_valid(pfn)) {
++ online_page(pfn_to_page(pfn));
++ err = 0;
++ mem++;
++ }
++ total++;
++ }
++ if (!err) {
++ z->spanned_pages += total;
++ z->present_pages += mem;
++ z->zone_pgdat->node_spanned_pages += total;
++ z->zone_pgdat->node_present_pages += mem;
++ }
++ return err;
++}
++#endif /* CONFIG_MEMORY_HOTPLUG */
++
++static struct kcore_list kcore_mem, kcore_vmalloc, kcore_kernel, kcore_modules,
++ kcore_vsyscall;
++
++void __init mem_init(void)
++{
++ long codesize, reservedpages, datasize, initsize;
++ unsigned long pfn;
++
++ pci_iommu_alloc();
++
++ /* How many end-of-memory variables you have, grandma! */
++ max_low_pfn = end_pfn;
++ max_pfn = end_pfn;
++ num_physpages = end_pfn;
++ high_memory = (void *) __va(end_pfn * PAGE_SIZE);
++
++ /* clear the zero-page */
++ memset(empty_zero_page, 0, PAGE_SIZE);
++
++ reservedpages = 0;
++
++ /* this will put all low memory onto the freelists */
++#ifdef CONFIG_NUMA
++ totalram_pages = numa_free_all_bootmem();
++#else
++ totalram_pages = free_all_bootmem();
++#endif
++ /* XEN: init and count pages outside initial allocation. */
++ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
++ ClearPageReserved(pfn_to_page(pfn));
++ init_page_count(pfn_to_page(pfn));
++ totalram_pages++;
++ }
++ reservedpages = end_pfn - totalram_pages - e820_hole_size(0, end_pfn);
++
++ after_bootmem = 1;
++
++ codesize = (unsigned long) &_etext - (unsigned long) &_text;
++ datasize = (unsigned long) &_edata - (unsigned long) &_etext;
++ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
++
++ /* Register memory areas for /proc/kcore */
++ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
++ kclist_add(&kcore_vmalloc, (void *)VMALLOC_START,
++ VMALLOC_END-VMALLOC_START);
++ kclist_add(&kcore_kernel, &_stext, _end - _stext);
++ kclist_add(&kcore_modules, (void *)MODULES_VADDR, MODULES_LEN);
++ kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
++ VSYSCALL_END - VSYSCALL_START);
++
++ printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, %ldk data, %ldk init)\n",
++ (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
++ end_pfn << (PAGE_SHIFT-10),
++ codesize >> 10,
++ reservedpages << (PAGE_SHIFT-10),
++ datasize >> 10,
++ initsize >> 10);
++
++#ifndef CONFIG_XEN
++#ifdef CONFIG_SMP
++ /*
++ * Sync boot_level4_pgt mappings with the init_level4_pgt
++ * except for the low identity mappings which are already zapped
++ * in init_level4_pgt. This sync-up is essential for AP's bringup
++ */
++ memcpy(boot_level4_pgt+1, init_level4_pgt+1, (PTRS_PER_PGD-1)*sizeof(pgd_t));
++#endif
++#endif
++}
++
++void free_init_pages(char *what, unsigned long begin, unsigned long end)
++{
++ unsigned long addr;
++
++ if (begin >= end)
++ return;
++
++ printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
++ for (addr = begin; addr < end; addr += PAGE_SIZE) {
++ ClearPageReserved(virt_to_page(addr));
++ init_page_count(virt_to_page(addr));
++ memset((void *)(addr & ~(PAGE_SIZE-1)),
++ POISON_FREE_INITMEM, PAGE_SIZE);
++ if (addr >= __START_KERNEL_map) {
++ /* make_readonly() reports all kernel addresses. */
++ __make_page_writable(__va(__pa(addr)));
++ if (HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
++ pgd_t *pgd = pgd_offset_k(addr);
++ pud_t *pud = pud_offset(pgd, addr);
++ pmd_t *pmd = pmd_offset(pud, addr);
++ pte_t *pte = pte_offset_kernel(pmd, addr);
++
++ xen_l1_entry_update(pte, __pte(0)); /* fallback */
++ }
++ }
++ free_page(addr);
++ totalram_pages++;
++ }
++}
++
++void free_initmem(void)
++{
++ memset(__initdata_begin, POISON_FREE_INITDATA,
++ __initdata_end - __initdata_begin);
++ free_init_pages("unused kernel memory",
++ (unsigned long)(&__init_begin),
++ (unsigned long)(&__init_end));
++}
++
++#ifdef CONFIG_DEBUG_RODATA
++
++void mark_rodata_ro(void)
++{
++ unsigned long addr = (unsigned long)__start_rodata;
++
++ for (; addr < (unsigned long)__end_rodata; addr += PAGE_SIZE)
++ change_page_attr_addr(addr, 1, PAGE_KERNEL_RO);
++
++ printk ("Write protecting the kernel read-only data: %luk\n",
++ (__end_rodata - __start_rodata) >> 10);
++
++ /*
++ * change_page_attr_addr() requires a global_flush_tlb() call after it.
++ * We do this after the printk so that if something went wrong in the
++ * change, the printk gets out at least to give a better debug hint
++ * of who is the culprit.
++ */
++ global_flush_tlb();
++}
++#endif
++
++#ifdef CONFIG_BLK_DEV_INITRD
++void free_initrd_mem(unsigned long start, unsigned long end)
++{
++ free_init_pages("initrd memory", start, end);
++}
++#endif
++
++void __init reserve_bootmem_generic(unsigned long phys, unsigned len)
++{
++ /* Should check here against the e820 map to avoid double free */
++#ifdef CONFIG_NUMA
++ int nid = phys_to_nid(phys);
++ reserve_bootmem_node(NODE_DATA(nid), phys, len);
++#else
++ reserve_bootmem(phys, len);
++#endif
++ if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
++ dma_reserve += len / PAGE_SIZE;
++}
++
++int kern_addr_valid(unsigned long addr)
++{
++ unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++
++ if (above != 0 && above != -1UL)
++ return 0;
++
++ pgd = pgd_offset_k(addr);
++ if (pgd_none(*pgd))
++ return 0;
++
++ pud = pud_offset(pgd, addr);
++ if (pud_none(*pud))
++ return 0;
++
++ pmd = pmd_offset(pud, addr);
++ if (pmd_none(*pmd))
++ return 0;
++ if (pmd_large(*pmd))
++ return pfn_valid(pmd_pfn(*pmd));
++
++ pte = pte_offset_kernel(pmd, addr);
++ if (pte_none(*pte))
++ return 0;
++ return pfn_valid(pte_pfn(*pte));
++}
++
++#ifdef CONFIG_SYSCTL
++#include <linux/sysctl.h>
++
++extern int exception_trace, page_fault_trace;
++
++static ctl_table debug_table2[] = {
++ { 99, "exception-trace", &exception_trace, sizeof(int), 0644, NULL,
++ proc_dointvec },
++ { 0, }
++};
++
++static ctl_table debug_root_table2[] = {
++ { .ctl_name = CTL_DEBUG, .procname = "debug", .mode = 0555,
++ .child = debug_table2 },
++ { 0 },
++};
++
++static __init int x8664_sysctl_init(void)
++{
++ register_sysctl_table(debug_root_table2, 1);
++ return 0;
++}
++__initcall(x8664_sysctl_init);
++#endif
++
++/* A pseudo VMAs to allow ptrace access for the vsyscall page. This only
++ covers the 64bit vsyscall page now. 32bit has a real VMA now and does
++ not need special handling anymore. */
++
++static struct vm_area_struct gate_vma = {
++ .vm_start = VSYSCALL_START,
++ .vm_end = VSYSCALL_END,
++ .vm_page_prot = PAGE_READONLY
++};
++
++struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
++{
++#ifdef CONFIG_IA32_EMULATION
++ if (test_tsk_thread_flag(tsk, TIF_IA32))
++ return NULL;
++#endif
++ return &gate_vma;
++}
++
++int in_gate_area(struct task_struct *task, unsigned long addr)
++{
++ struct vm_area_struct *vma = get_gate_vma(task);
++ if (!vma)
++ return 0;
++ return (addr >= vma->vm_start) && (addr < vma->vm_end);
++}
++
++/* Use this when you have no reliable task/vma, typically from interrupt
++ * context. It is less reliable than using the task's vma and may give
++ * false positives.
++ */
++int in_gate_area_no_task(unsigned long addr)
++{
++ return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
++}
+Index: head-2008-11-25/arch/x86/mm/pageattr_64-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/arch/x86/mm/pageattr_64-xen.c 2008-07-21 11:00:32.000000000 +0200
+@@ -0,0 +1,502 @@
++/*
++ * Copyright 2002 Andi Kleen, SuSE Labs.
++ * Thanks to Ben LaHaise for precious feedback.
++ */
++
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/highmem.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <asm/uaccess.h>
++#include <asm/processor.h>
++#include <asm/tlbflush.h>
++#include <asm/io.h>
++
++#ifdef CONFIG_XEN
++#include <asm/pgalloc.h>
++#include <asm/mmu_context.h>
++
++LIST_HEAD(mm_unpinned);
++DEFINE_SPINLOCK(mm_unpinned_lock);
++
++static void _pin_lock(struct mm_struct *mm, int lock) {
++ if (lock)
++ spin_lock(&mm->page_table_lock);
++#if NR_CPUS >= CONFIG_SPLIT_PTLOCK_CPUS
++ /* While mm->page_table_lock protects us against insertions and
++ * removals of higher level page table pages, it doesn't protect
++ * against updates of pte-s. Such updates, however, require the
++ * pte pages to be in consistent state (unpinned+writable or
++ * pinned+readonly). The pinning and attribute changes, however
++ * cannot be done atomically, which is why such updates must be
++ * prevented from happening concurrently.
++ * Note that no pte lock can ever elsewhere be acquired nesting
++ * with an already acquired one in the same mm, or with the mm's
++ * page_table_lock already acquired, as that would break in the
++ * non-split case (where all these are actually resolving to the
++ * one page_table_lock). Thus acquiring all of them here is not
++ * going to result in dead locks, and the order of acquires
++ * doesn't matter.
++ */
++ {
++ pgd_t *pgd = mm->pgd;
++ unsigned g;
++
++ for (g = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
++ pud_t *pud;
++ unsigned u;
++
++ if (pgd_none(*pgd))
++ continue;
++ pud = pud_offset(pgd, 0);
++ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++ pmd_t *pmd;
++ unsigned m;
++
++ if (pud_none(*pud))
++ continue;
++ pmd = pmd_offset(pud, 0);
++ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++ spinlock_t *ptl;
++
++ if (pmd_none(*pmd))
++ continue;
++ ptl = pte_lockptr(0, pmd);
++ if (lock)
++ spin_lock(ptl);
++ else
++ spin_unlock(ptl);
++ }
++ }
++ }
++ }
++#endif
++ if (!lock)
++ spin_unlock(&mm->page_table_lock);
++}
++#define pin_lock(mm) _pin_lock(mm, 1)
++#define pin_unlock(mm) _pin_lock(mm, 0)
++
++#define PIN_BATCH 8
++static DEFINE_PER_CPU(multicall_entry_t[PIN_BATCH], pb_mcl);
++
++static inline unsigned int mm_walk_set_prot(void *pt, pgprot_t flags,
++ unsigned int cpu, unsigned int seq)
++{
++ struct page *page = virt_to_page(pt);
++ unsigned long pfn = page_to_pfn(page);
++
++ MULTI_update_va_mapping(per_cpu(pb_mcl, cpu) + seq,
++ (unsigned long)__va(pfn << PAGE_SHIFT),
++ pfn_pte(pfn, flags), 0);
++ if (unlikely(++seq == PIN_BATCH)) {
++ if (unlikely(HYPERVISOR_multicall_check(per_cpu(pb_mcl, cpu),
++ PIN_BATCH, NULL)))
++ BUG();
++ seq = 0;
++ }
++
++ return seq;
++}
++
++static void mm_walk(struct mm_struct *mm, pgprot_t flags)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++ int g,u,m;
++ unsigned int cpu, seq;
++ multicall_entry_t *mcl;
++
++ pgd = mm->pgd;
++ cpu = get_cpu();
++
++ /*
++ * Cannot iterate up to USER_PTRS_PER_PGD as these pagetables may not
++ * be the 'current' task's pagetables (e.g., current may be 32-bit,
++ * but the pagetables may be for a 64-bit task).
++ * Subtracting 1 from TASK_SIZE64 means the loop limit is correct
++ * regardless of whether TASK_SIZE64 is a multiple of PGDIR_SIZE.
++ */
++ for (g = 0, seq = 0; g <= ((TASK_SIZE64-1) / PGDIR_SIZE); g++, pgd++) {
++ if (pgd_none(*pgd))
++ continue;
++ pud = pud_offset(pgd, 0);
++ if (PTRS_PER_PUD > 1) /* not folded */
++ seq = mm_walk_set_prot(pud,flags,cpu,seq);
++ for (u = 0; u < PTRS_PER_PUD; u++, pud++) {
++ if (pud_none(*pud))
++ continue;
++ pmd = pmd_offset(pud, 0);
++ if (PTRS_PER_PMD > 1) /* not folded */
++ seq = mm_walk_set_prot(pmd,flags,cpu,seq);
++ for (m = 0; m < PTRS_PER_PMD; m++, pmd++) {
++ if (pmd_none(*pmd))
++ continue;
++ pte = pte_offset_kernel(pmd,0);
++ seq = mm_walk_set_prot(pte,flags,cpu,seq);
++ }
++ }
++ }
++
++ mcl = per_cpu(pb_mcl, cpu);
++ if (unlikely(seq > PIN_BATCH - 2)) {
++ if (unlikely(HYPERVISOR_multicall_check(mcl, seq, NULL)))
++ BUG();
++ seq = 0;
++ }
++ MULTI_update_va_mapping(mcl + seq,
++ (unsigned long)__user_pgd(mm->pgd),
++ pfn_pte(virt_to_phys(__user_pgd(mm->pgd))>>PAGE_SHIFT, flags),
++ 0);
++ MULTI_update_va_mapping(mcl + seq + 1,
++ (unsigned long)mm->pgd,
++ pfn_pte(virt_to_phys(mm->pgd)>>PAGE_SHIFT, flags),
++ UVMF_TLB_FLUSH);
++ if (unlikely(HYPERVISOR_multicall_check(mcl, seq + 2, NULL)))
++ BUG();
++
++ put_cpu();
++}
++
++void mm_pin(struct mm_struct *mm)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++
++ pin_lock(mm);
++
++ mm_walk(mm, PAGE_KERNEL_RO);
++ xen_pgd_pin(__pa(mm->pgd)); /* kernel */
++ xen_pgd_pin(__pa(__user_pgd(mm->pgd))); /* user */
++ mm->context.pinned = 1;
++ spin_lock(&mm_unpinned_lock);
++ list_del(&mm->context.unpinned);
++ spin_unlock(&mm_unpinned_lock);
++
++ pin_unlock(mm);
++}
++
++void mm_unpin(struct mm_struct *mm)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++
++ pin_lock(mm);
++
++ xen_pgd_unpin(__pa(mm->pgd));
++ xen_pgd_unpin(__pa(__user_pgd(mm->pgd)));
++ mm_walk(mm, PAGE_KERNEL);
++ mm->context.pinned = 0;
++ spin_lock(&mm_unpinned_lock);
++ list_add(&mm->context.unpinned, &mm_unpinned);
++ spin_unlock(&mm_unpinned_lock);
++
++ pin_unlock(mm);
++}
++
++void mm_pin_all(void)
++{
++ if (xen_feature(XENFEAT_writable_page_tables))
++ return;
++
++ /*
++ * Allow uninterrupted access to the mm_unpinned list. We don't
++ * actually take the mm_unpinned_lock as it is taken inside mm_pin().
++ * All other CPUs must be at a safe point (e.g., in stop_machine
++ * or offlined entirely).
++ */
++ preempt_disable();
++ while (!list_empty(&mm_unpinned))
++ mm_pin(list_entry(mm_unpinned.next, struct mm_struct,
++ context.unpinned));
++ preempt_enable();
++}
++
++void _arch_dup_mmap(struct mm_struct *mm)
++{
++ if (!mm->context.pinned)
++ mm_pin(mm);
++}
++
++void _arch_exit_mmap(struct mm_struct *mm)
++{
++ struct task_struct *tsk = current;
++
++ task_lock(tsk);
++
++ /*
++ * We aggressively remove defunct pgd from cr3. We execute unmap_vmas()
++ * *much* faster this way, as no tlb flushes means bigger wrpt batches.
++ */
++ if (tsk->active_mm == mm) {
++ tsk->active_mm = &init_mm;
++ atomic_inc(&init_mm.mm_count);
++
++ switch_mm(mm, &init_mm, tsk);
++
++ atomic_dec(&mm->mm_count);
++ BUG_ON(atomic_read(&mm->mm_count) == 0);
++ }
++
++ task_unlock(tsk);
++
++ if ( mm->context.pinned && (atomic_read(&mm->mm_count) == 1) &&
++ !mm->context.has_foreign_mappings )
++ mm_unpin(mm);
++}
++
++struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
++{
++ struct page *pte;
++
++ pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
++ if (pte) {
++ SetPageForeign(pte, pte_free);
++ init_page_count(pte);
++ }
++ return pte;
++}
++
++void pte_free(struct page *pte)
++{
++ unsigned long va = (unsigned long)__va(page_to_pfn(pte)<<PAGE_SHIFT);
++
++ if (!pte_write(*virt_to_ptep(va)))
++ if (HYPERVISOR_update_va_mapping(
++ va, pfn_pte(page_to_pfn(pte), PAGE_KERNEL), 0))
++ BUG();
++
++ ClearPageForeign(pte);
++ init_page_count(pte);
++
++ __free_page(pte);
++}
++#endif /* CONFIG_XEN */
++
++pte_t *lookup_address(unsigned long address)
++{
++ pgd_t *pgd = pgd_offset_k(address);
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t *pte;
++ if (pgd_none(*pgd))
++ return NULL;
++ pud = pud_offset(pgd, address);
++ if (!pud_present(*pud))
++ return NULL;
++ pmd = pmd_offset(pud, address);
++ if (!pmd_present(*pmd))
++ return NULL;
++ if (pmd_large(*pmd))
++ return (pte_t *)pmd;
++ pte = pte_offset_kernel(pmd, address);
++ if (pte && !pte_present(*pte))
++ pte = NULL;
++ return pte;
++}
++
++static struct page *split_large_page(unsigned long address, pgprot_t prot,
++ pgprot_t ref_prot)
++{
++ int i;
++ unsigned long addr;
++ struct page *base = alloc_pages(GFP_KERNEL, 0);
++ pte_t *pbase;
++ if (!base)
++ return NULL;
++ /*
++ * page_private is used to track the number of entries in
++ * the page table page have non standard attributes.
++ */
++ SetPagePrivate(base);
++ page_private(base) = 0;
++
++ address = __pa(address);
++ addr = address & LARGE_PAGE_MASK;
++ pbase = (pte_t *)page_address(base);
++ for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
++ pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
++ addr == address ? prot : ref_prot);
++ }
++ return base;
++}
++
++
++static void flush_kernel_map(void *address)
++{
++ if (0 && address && cpu_has_clflush) {
++ /* is this worth it? */
++ int i;
++ for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
++ asm volatile("clflush (%0)" :: "r" (address + i));
++ } else
++ asm volatile("wbinvd":::"memory");
++ if (address)
++ __flush_tlb_one(address);
++ else
++ __flush_tlb_all();
++}
++
++
++static inline void flush_map(unsigned long address)
++{
++ on_each_cpu(flush_kernel_map, (void *)address, 1, 1);
++}
++
++static struct page *deferred_pages; /* protected by init_mm.mmap_sem */
++
++static inline void save_page(struct page *fpage)
++{
++ fpage->lru.next = (struct list_head *)deferred_pages;
++ deferred_pages = fpage;
++}
++
++/*
++ * No more special protections in this 2/4MB area - revert to a
++ * large page again.
++ */
++static void revert_page(unsigned long address, pgprot_t ref_prot)
++{
++ pgd_t *pgd;
++ pud_t *pud;
++ pmd_t *pmd;
++ pte_t large_pte;
++
++ pgd = pgd_offset_k(address);
++ BUG_ON(pgd_none(*pgd));
++ pud = pud_offset(pgd,address);
++ BUG_ON(pud_none(*pud));
++ pmd = pmd_offset(pud, address);
++ BUG_ON(__pmd_val(*pmd) & _PAGE_PSE);
++ pgprot_val(ref_prot) |= _PAGE_PSE;
++ large_pte = mk_pte_phys(__pa(address) & LARGE_PAGE_MASK, ref_prot);
++ set_pte((pte_t *)pmd, large_pte);
++}
++
++static int
++__change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
++ pgprot_t ref_prot)
++{
++ pte_t *kpte;
++ struct page *kpte_page;
++ unsigned kpte_flags;
++ pgprot_t ref_prot2;
++ kpte = lookup_address(address);
++ if (!kpte) return 0;
++ kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
++ kpte_flags = pte_val(*kpte);
++ if (pgprot_val(prot) != pgprot_val(ref_prot)) {
++ if ((kpte_flags & _PAGE_PSE) == 0) {
++ set_pte(kpte, pfn_pte(pfn, prot));
++ } else {
++ /*
++ * split_large_page will take the reference for this
++ * change_page_attr on the split page.
++ */
++
++ struct page *split;
++ ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
++
++ split = split_large_page(address, prot, ref_prot2);
++ if (!split)
++ return -ENOMEM;
++ set_pte(kpte,mk_pte(split, ref_prot2));
++ kpte_page = split;
++ }
++ page_private(kpte_page)++;
++ } else if ((kpte_flags & _PAGE_PSE) == 0) {
++ set_pte(kpte, pfn_pte(pfn, ref_prot));
++ BUG_ON(page_private(kpte_page) == 0);
++ page_private(kpte_page)--;
++ } else
++ BUG();
++
++ /* on x86-64 the direct mapping set at boot is not using 4k pages */
++ /*
++ * ..., but the XEN guest kernels (currently) do:
++ * If the pte was reserved, it means it was created at boot
++ * time (not via split_large_page) and in turn we must not
++ * replace it with a large page.
++ */
++#ifndef CONFIG_XEN
++ BUG_ON(PageReserved(kpte_page));
++#else
++ if (PageReserved(kpte_page))
++ return 0;
++#endif
++
++ if (page_private(kpte_page) == 0) {
++ save_page(kpte_page);
++ revert_page(address, ref_prot);
++ }
++ return 0;
++}
++
++/*
++ * Change the page attributes of an page in the linear mapping.
++ *
++ * This should be used when a page is mapped with a different caching policy
++ * than write-back somewhere - some CPUs do not like it when mappings with
++ * different caching policies exist. This changes the page attributes of the
++ * in kernel linear mapping too.
++ *
++ * The caller needs to ensure that there are no conflicting mappings elsewhere.
++ * This function only deals with the kernel linear map.
++ *
++ * Caller must call global_flush_tlb() after this.
++ */
++int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
++{
++ int err = 0;
++ int i;
++
++ down_write(&init_mm.mmap_sem);
++ for (i = 0; i < numpages; i++, address += PAGE_SIZE) {
++ unsigned long pfn = __pa(address) >> PAGE_SHIFT;
++
++ err = __change_page_attr(address, pfn, prot, PAGE_KERNEL);
++ if (err)
++ break;
++ /* Handle kernel mapping too which aliases part of the
++ * lowmem */
++ if (__pa(address) < KERNEL_TEXT_SIZE) {
++ unsigned long addr2;
++ pgprot_t prot2 = prot;
++ addr2 = __START_KERNEL_map + __pa(address);
++ pgprot_val(prot2) &= ~_PAGE_NX;
++ err = __change_page_attr(addr2, pfn, prot2, PAGE_KERNEL_EXEC);
++ }
++ }
++ up_write(&init_mm.mmap_sem);
++ return err;
++}
++
++/* Don't call this for MMIO areas that may not have a mem_map entry */
++int change_page_attr(struct page *page, int numpages, pgprot_t prot)
++{
++ unsigned long addr = (unsigned long)page_address(page);
++ return change_page_attr_addr(addr, numpages, prot);
++}
++
++void global_flush_tlb(void)
++{
++ struct page *dpage;
++
++ down_read(&init_mm.mmap_sem);
++ dpage = xchg(&deferred_pages, NULL);
++ up_read(&init_mm.mmap_sem);
++
++ flush_map((dpage && !dpage->lru.next) ? (unsigned long)page_address(dpage) : 0);
++ while (dpage) {
++ struct page *tmp = dpage;
++ dpage = (struct page *)dpage->lru.next;
++ ClearPagePrivate(tmp);
++ __free_page(tmp);
++ }
++}
++
++EXPORT_SYMBOL(change_page_attr);
++EXPORT_SYMBOL(global_flush_tlb);
+Index: head-2008-11-25/drivers/pci/msi-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/pci/msi-xen.c 2008-10-13 13:43:45.000000000 +0200
+@@ -0,0 +1,809 @@
++/*
++ * File: msi.c
++ * Purpose: PCI Message Signaled Interrupt (MSI)
++ *
++ * Copyright (C) 2003-2004 Intel
++ * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com)
++ */
++
++#include <linux/mm.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/init.h>
++#include <linux/ioport.h>
++#include <linux/smp_lock.h>
++#include <linux/pci.h>
++#include <linux/proc_fs.h>
++
++#include <xen/evtchn.h>
++
++#include <asm/errno.h>
++#include <asm/io.h>
++#include <asm/smp.h>
++
++#include "pci.h"
++#include "msi.h"
++
++static int pci_msi_enable = 1;
++
++static struct msi_ops *msi_ops;
++
++int msi_register(struct msi_ops *ops)
++{
++ msi_ops = ops;
++ return 0;
++}
++
++static LIST_HEAD(msi_dev_head);
++DEFINE_SPINLOCK(msi_dev_lock);
++
++struct msi_dev_list {
++ struct pci_dev *dev;
++ struct list_head list;
++ spinlock_t pirq_list_lock;
++ struct list_head pirq_list_head;
++};
++
++struct msi_pirq_entry {
++ struct list_head list;
++ int pirq;
++ int entry_nr;
++};
++
++static struct msi_dev_list *get_msi_dev_pirq_list(struct pci_dev *dev)
++{
++ struct msi_dev_list *msi_dev_list, *ret = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&msi_dev_lock, flags);
++
++ list_for_each_entry(msi_dev_list, &msi_dev_head, list)
++ if ( msi_dev_list->dev == dev )
++ ret = msi_dev_list;
++
++ if ( ret ) {
++ spin_unlock_irqrestore(&msi_dev_lock, flags);
++ return ret;
++ }
++
++ /* Has not allocate msi_dev until now. */
++ ret = kzalloc(sizeof(struct msi_dev_list), GFP_ATOMIC);
++
++ /* Failed to allocate msi_dev structure */
++ if ( !ret ) {
++ spin_unlock_irqrestore(&msi_dev_lock, flags);
++ return NULL;
++ }
++
++ ret->dev = dev;
++ spin_lock_init(&ret->pirq_list_lock);
++ INIT_LIST_HEAD(&ret->pirq_list_head);
++ list_add_tail(&ret->list, &msi_dev_head);
++ spin_unlock_irqrestore(&msi_dev_lock, flags);
++ return ret;
++}
++
++static int attach_pirq_entry(int pirq, int entry_nr,
++ struct msi_dev_list *msi_dev_entry)
++{
++ struct msi_pirq_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
++ unsigned long flags;
++
++ if (!entry)
++ return -ENOMEM;
++ entry->pirq = pirq;
++ entry->entry_nr = entry_nr;
++ spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
++ list_add_tail(&entry->list, &msi_dev_entry->pirq_list_head);
++ spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
++ return 0;
++}
++
++static void detach_pirq_entry(int entry_nr,
++ struct msi_dev_list *msi_dev_entry)
++{
++ unsigned long flags;
++ struct msi_pirq_entry *pirq_entry;
++
++ list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
++ if (pirq_entry->entry_nr == entry_nr) {
++ spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
++ list_del(&pirq_entry->list);
++ spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
++ kfree(pirq_entry);
++ return;
++ }
++ }
++}
++
++/*
++ * pciback will provide device's owner
++ */
++static int (*get_owner)(struct pci_dev *dev);
++
++int register_msi_get_owner(int (*func)(struct pci_dev *dev))
++{
++ if (get_owner) {
++ printk(KERN_WARNING "register msi_get_owner again\n");
++ return -EEXIST;
++ }
++ get_owner = func;
++ return 0;
++}
++
++int unregister_msi_get_owner(int (*func)(struct pci_dev *dev))
++{
++ if (get_owner != func)
++ return -EINVAL;
++ get_owner = NULL;
++ return 0;
++}
++
++static int msi_get_dev_owner(struct pci_dev *dev)
++{
++ int owner;
++
++ BUG_ON(!is_initial_xendomain());
++ if (get_owner && (owner = get_owner(dev)) >= 0) {
++ printk(KERN_INFO "get owner for dev %x get %x \n",
++ dev->devfn, owner);
++ return owner;
++ }
++
++ return DOMID_SELF;
++}
++
++static int msi_unmap_pirq(struct pci_dev *dev, int pirq)
++{
++ struct physdev_unmap_pirq unmap;
++ int rc;
++
++ unmap.domid = msi_get_dev_owner(dev);
++ /* See comments in msi_map_pirq_to_vector, input parameter pirq
++ * mean irq number only if the device belongs to dom0 itself.
++ */
++ unmap.pirq = (unmap.domid != DOMID_SELF)
++ ? pirq : evtchn_get_xen_pirq(pirq);
++
++ if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap)))
++ printk(KERN_WARNING "unmap irq %x failed\n", pirq);
++
++ if (rc < 0)
++ return rc;
++
++ if (unmap.domid == DOMID_SELF)
++ evtchn_map_pirq(pirq, 0);
++
++ return 0;
++}
++
++static u64 find_table_base(struct pci_dev *dev, int pos)
++{
++ u8 bar;
++ u32 reg;
++ unsigned long flags;
++
++ pci_read_config_dword(dev, msix_table_offset_reg(pos), ®);
++ bar = reg & PCI_MSIX_FLAGS_BIRMASK;
++
++ flags = pci_resource_flags(dev, bar);
++ if (flags & (IORESOURCE_DISABLED | IORESOURCE_UNSET | IORESOURCE_BUSY))
++ return 0;
++
++ return pci_resource_start(dev, bar);
++}
++
++/*
++ * Protected by msi_lock
++ */
++static int msi_map_pirq_to_vector(struct pci_dev *dev, int pirq,
++ int entry_nr, u64 table_base)
++{
++ struct physdev_map_pirq map_irq;
++ int rc;
++ domid_t domid = DOMID_SELF;
++
++ domid = msi_get_dev_owner(dev);
++
++ map_irq.domid = domid;
++ map_irq.type = MAP_PIRQ_TYPE_MSI;
++ map_irq.index = -1;
++ map_irq.pirq = pirq < 0 ? -1 : evtchn_get_xen_pirq(pirq);
++ map_irq.bus = dev->bus->number;
++ map_irq.devfn = dev->devfn;
++ map_irq.entry_nr = entry_nr;
++ map_irq.table_base = table_base;
++
++ if ((rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq)))
++ printk(KERN_WARNING "map irq failed\n");
++
++ if (rc < 0)
++ return rc;
++ /* This happens when MSI support is not enabled in Xen. */
++ if (rc == 0 && map_irq.pirq < 0)
++ return -ENOSYS;
++
++ BUG_ON(map_irq.pirq <= 0);
++
++ /* If mapping of this particular MSI is on behalf of another domain,
++ * we do not need to get an irq in dom0. This also implies:
++ * dev->irq in dom0 will be 'Xen pirq' if this device belongs to
++ * to another domain, and will be 'Linux irq' if it belongs to dom0.
++ */
++ return ((domid != DOMID_SELF) ?
++ map_irq.pirq : evtchn_map_pirq(pirq, map_irq.pirq));
++}
++
++static int msi_map_vector(struct pci_dev *dev, int entry_nr, u64 table_base)
++{
++ return msi_map_pirq_to_vector(dev, -1, entry_nr, table_base);
++}
++
++static int msi_init(void)
++{
++ static int status = 0;
++
++ if (pci_msi_quirk) {
++ pci_msi_enable = 0;
++ printk(KERN_WARNING "PCI: MSI quirk detected. MSI disabled.\n");
++ status = -EINVAL;
++ }
++
++ return status;
++}
++
++void pci_scan_msi_device(struct pci_dev *dev) { }
++
++void disable_msi_mode(struct pci_dev *dev, int pos, int type)
++{
++ u16 control;
++
++ pci_read_config_word(dev, msi_control_reg(pos), &control);
++ if (type == PCI_CAP_ID_MSI) {
++ /* Set enabled bits to single MSI & enable MSI_enable bit */
++ msi_disable(control);
++ pci_write_config_word(dev, msi_control_reg(pos), control);
++ dev->msi_enabled = 0;
++ } else {
++ msix_disable(control);
++ pci_write_config_word(dev, msi_control_reg(pos), control);
++ dev->msix_enabled = 0;
++ }
++ if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
++ /* PCI Express Endpoint device detected */
++ pci_intx(dev, 1); /* enable intx */
++ }
++}
++
++static void enable_msi_mode(struct pci_dev *dev, int pos, int type)
++{
++ u16 control;
++
++ pci_read_config_word(dev, msi_control_reg(pos), &control);
++ if (type == PCI_CAP_ID_MSI) {
++ /* Set enabled bits to single MSI & enable MSI_enable bit */
++ msi_enable(control, 1);
++ pci_write_config_word(dev, msi_control_reg(pos), control);
++ dev->msi_enabled = 1;
++ } else {
++ msix_enable(control);
++ pci_write_config_word(dev, msi_control_reg(pos), control);
++ dev->msix_enabled = 1;
++ }
++ if (pci_find_capability(dev, PCI_CAP_ID_EXP)) {
++ /* PCI Express Endpoint device detected */
++ pci_intx(dev, 0); /* disable intx */
++ }
++}
++
++#ifdef CONFIG_PM
++int pci_save_msi_state(struct pci_dev *dev)
++{
++ int pos;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++ if (pos <= 0 || dev->no_msi)
++ return 0;
++
++ if (!dev->msi_enabled)
++ return 0;
++
++ /* Restore dev->irq to its default pin-assertion vector */
++ msi_unmap_pirq(dev, dev->irq);
++ /* Disable MSI mode */
++ disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
++ /* Set the flags for use of restore */
++ dev->msi_enabled = 1;
++ return 0;
++}
++
++void pci_restore_msi_state(struct pci_dev *dev)
++{
++ int pos, pirq;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++ if (pos <= 0)
++ return;
++
++ if (!dev->msi_enabled)
++ return;
++
++ pirq = msi_map_pirq_to_vector(dev, dev->irq, 0, 0);
++ if (pirq < 0)
++ return;
++ enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
++}
++
++int pci_save_msix_state(struct pci_dev *dev)
++{
++ int pos;
++ unsigned long flags;
++ struct msi_dev_list *msi_dev_entry;
++ struct msi_pirq_entry *pirq_entry, *tmp;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
++ if (pos <= 0 || dev->no_msi)
++ return 0;
++
++ /* save the capability */
++ if (!dev->msix_enabled)
++ return 0;
++
++ msi_dev_entry = get_msi_dev_pirq_list(dev);
++
++ spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
++ list_for_each_entry_safe(pirq_entry, tmp,
++ &msi_dev_entry->pirq_list_head, list)
++ msi_unmap_pirq(dev, pirq_entry->pirq);
++ spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
++
++ disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
++ /* Set the flags for use of restore */
++ dev->msix_enabled = 1;
++
++ return 0;
++}
++
++void pci_restore_msix_state(struct pci_dev *dev)
++{
++ int pos;
++ unsigned long flags;
++ u64 table_base;
++ struct msi_dev_list *msi_dev_entry;
++ struct msi_pirq_entry *pirq_entry, *tmp;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
++ if (pos <= 0)
++ return;
++
++ if (!dev->msix_enabled)
++ return;
++
++ msi_dev_entry = get_msi_dev_pirq_list(dev);
++ table_base = find_table_base(dev, pos);
++ if (!table_base)
++ return;
++
++ spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
++ list_for_each_entry_safe(pirq_entry, tmp,
++ &msi_dev_entry->pirq_list_head, list) {
++ int rc = msi_map_pirq_to_vector(dev, pirq_entry->pirq,
++ pirq_entry->entry_nr, table_base);
++ if (rc < 0)
++ printk(KERN_WARNING
++ "%s: re-mapping irq #%d (pirq%d) failed: %d\n",
++ pci_name(dev), pirq_entry->entry_nr,
++ pirq_entry->pirq, rc);
++ }
++ spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
++
++ enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
++}
++#endif
++
++/**
++ * msi_capability_init - configure device's MSI capability structure
++ * @dev: pointer to the pci_dev data structure of MSI device function
++ *
++ * Setup the MSI capability structure of device function with a single
++ * MSI vector, regardless of device function is capable of handling
++ * multiple messages. A return of zero indicates the successful setup
++ * of an entry zero with the new MSI vector or non-zero for otherwise.
++ **/
++static int msi_capability_init(struct pci_dev *dev)
++{
++ int pos, pirq;
++ u16 control;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++ pci_read_config_word(dev, msi_control_reg(pos), &control);
++
++ pirq = msi_map_vector(dev, 0, 0);
++ if (pirq < 0)
++ return -EBUSY;
++
++ dev->irq = pirq;
++ /* Set MSI enabled bits */
++ enable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
++ dev->msi_enabled = 1;
++
++ return 0;
++}
++
++/**
++ * msix_capability_init - configure device's MSI-X capability
++ * @dev: pointer to the pci_dev data structure of MSI-X device function
++ * @entries: pointer to an array of struct msix_entry entries
++ * @nvec: number of @entries
++ *
++ * Setup the MSI-X capability structure of device function with a
++ * single MSI-X vector. A return of zero indicates the successful setup of
++ * requested MSI-X entries with allocated vectors or non-zero for otherwise.
++ **/
++static int msix_capability_init(struct pci_dev *dev,
++ struct msix_entry *entries, int nvec)
++{
++ u64 table_base;
++ int pirq, i, j, mapped, pos;
++ struct msi_dev_list *msi_dev_entry = get_msi_dev_pirq_list(dev);
++ struct msi_pirq_entry *pirq_entry;
++
++ if (!msi_dev_entry)
++ return -ENOMEM;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
++ table_base = find_table_base(dev, pos);
++ if (!table_base)
++ return -ENODEV;
++
++ /* MSI-X Table Initialization */
++ for (i = 0; i < nvec; i++) {
++ mapped = 0;
++ list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
++ if (pirq_entry->entry_nr == entries[i].entry) {
++ printk(KERN_WARNING "msix entry %d for dev %02x:%02x:%01x are \
++ not freed before acquire again.\n", entries[i].entry,
++ dev->bus->number, PCI_SLOT(dev->devfn),
++ PCI_FUNC(dev->devfn));
++ (entries + i)->vector = pirq_entry->pirq;
++ mapped = 1;
++ break;
++ }
++ }
++ if (mapped)
++ continue;
++ pirq = msi_map_vector(dev, entries[i].entry, table_base);
++ if (pirq < 0)
++ break;
++ attach_pirq_entry(pirq, entries[i].entry, msi_dev_entry);
++ (entries + i)->vector = pirq;
++ }
++
++ if (i != nvec) {
++ for (j = --i; j >= 0; j--) {
++ msi_unmap_pirq(dev, entries[j].vector);
++ detach_pirq_entry(entries[j].entry, msi_dev_entry);
++ entries[j].vector = 0;
++ }
++ return -EBUSY;
++ }
++
++ enable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
++ dev->msix_enabled = 1;
++
++ return 0;
++}
++
++/**
++ * pci_enable_msi - configure device's MSI capability structure
++ * @dev: pointer to the pci_dev data structure of MSI device function
++ *
++ * Setup the MSI capability structure of device function with
++ * a single MSI vector upon its software driver call to request for
++ * MSI mode enabled on its hardware device function. A return of zero
++ * indicates the successful setup of an entry zero with the new MSI
++ * vector or non-zero for otherwise.
++ **/
++extern int pci_frontend_enable_msi(struct pci_dev *dev);
++int pci_enable_msi(struct pci_dev* dev)
++{
++ struct pci_bus *bus;
++ int pos, temp, status = -EINVAL;
++
++ if (!pci_msi_enable || !dev)
++ return status;
++
++ if (dev->no_msi)
++ return status;
++
++ for (bus = dev->bus; bus; bus = bus->parent)
++ if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
++ return -EINVAL;
++
++ status = msi_init();
++ if (status < 0)
++ return status;
++
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++ if (!is_initial_xendomain())
++ {
++ int ret;
++
++ temp = dev->irq;
++ ret = pci_frontend_enable_msi(dev);
++ if (ret)
++ return ret;
++
++ dev->irq = evtchn_map_pirq(-1, dev->irq);
++ dev->irq_old = temp;
++
++ return ret;
++ }
++#endif
++
++ temp = dev->irq;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++ if (!pos)
++ return -EINVAL;
++
++ /* Check whether driver already requested for MSI-X vectors */
++ if (dev->msix_enabled) {
++ printk(KERN_INFO "PCI: %s: Can't enable MSI. "
++ "Device already has MSI-X vectors assigned\n",
++ pci_name(dev));
++ dev->irq = temp;
++ return -EINVAL;
++ }
++
++ status = msi_capability_init(dev);
++ if ( !status )
++ dev->irq_old = temp;
++ else
++ dev->irq = temp;
++
++ return status;
++}
++
++extern void pci_frontend_disable_msi(struct pci_dev* dev);
++void pci_disable_msi(struct pci_dev* dev)
++{
++ int pos;
++ int pirq;
++
++ if (!pci_msi_enable)
++ return;
++ if (!dev)
++ return;
++
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++ if (!is_initial_xendomain()) {
++ evtchn_map_pirq(dev->irq, 0);
++ pci_frontend_disable_msi(dev);
++ dev->irq = dev->irq_old;
++ return;
++ }
++#endif
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
++ if (!pos)
++ return;
++
++ pirq = dev->irq;
++ /* Restore dev->irq to its default pin-assertion vector */
++ dev->irq = dev->irq_old;
++ msi_unmap_pirq(dev, pirq);
++
++ /* Disable MSI mode */
++ disable_msi_mode(dev, pos, PCI_CAP_ID_MSI);
++}
++
++/**
++ * pci_enable_msix - configure device's MSI-X capability structure
++ * @dev: pointer to the pci_dev data structure of MSI-X device function
++ * @entries: pointer to an array of MSI-X entries
++ * @nvec: number of MSI-X vectors requested for allocation by device driver
++ *
++ * Setup the MSI-X capability structure of device function with the number
++ * of requested vectors upon its software driver call to request for
++ * MSI-X mode enabled on its hardware device function. A return of zero
++ * indicates the successful configuration of MSI-X capability structure
++ * with new allocated MSI-X vectors. A return of < 0 indicates a failure.
++ * Or a return of > 0 indicates that driver request is exceeding the number
++ * of vectors available. Driver should use the returned value to re-send
++ * its request.
++ **/
++extern int pci_frontend_enable_msix(struct pci_dev *dev,
++ struct msix_entry *entries, int nvec);
++int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
++{
++ struct pci_bus *bus;
++ int status, pos, nr_entries;
++ int i, j, temp;
++ u16 control;
++
++ if (!pci_msi_enable || !dev || !entries)
++ return -EINVAL;
++
++ if (dev->no_msi)
++ return -EINVAL;
++
++ for (bus = dev->bus; bus; bus = bus->parent)
++ if (bus->bus_flags & PCI_BUS_FLAGS_NO_MSI)
++ return -EINVAL;
++
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++ if (!is_initial_xendomain()) {
++ struct msi_dev_list *msi_dev_entry;
++ struct msi_pirq_entry *pirq_entry;
++ int ret, irq;
++
++ ret = pci_frontend_enable_msix(dev, entries, nvec);
++ if (ret) {
++ printk("get %x from pci_frontend_enable_msix\n", ret);
++ return ret;
++ }
++
++ msi_dev_entry = get_msi_dev_pirq_list(dev);
++ for (i = 0; i < nvec; i++) {
++ int mapped = 0;
++
++ list_for_each_entry(pirq_entry, &msi_dev_entry->pirq_list_head, list) {
++ if (pirq_entry->entry_nr == entries[i].entry) {
++ irq = pirq_entry->pirq;
++ BUG_ON(entries[i].vector != evtchn_get_xen_pirq(irq));
++ entries[i].vector = irq;
++ mapped = 1;
++ break;
++ }
++ }
++ if (mapped)
++ continue;
++ irq = evtchn_map_pirq(-1, entries[i].vector);
++ attach_pirq_entry(irq, entries[i].entry, msi_dev_entry);
++ entries[i].vector = irq;
++ }
++ return 0;
++ }
++#endif
++
++ status = msi_init();
++ if (status < 0)
++ return status;
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
++ if (!pos)
++ return -EINVAL;
++
++ pci_read_config_word(dev, msi_control_reg(pos), &control);
++ nr_entries = multi_msix_capable(control);
++ if (nvec > nr_entries)
++ return -EINVAL;
++
++ /* Check for any invalid entries */
++ for (i = 0; i < nvec; i++) {
++ if (entries[i].entry >= nr_entries)
++ return -EINVAL; /* invalid entry */
++ for (j = i + 1; j < nvec; j++) {
++ if (entries[i].entry == entries[j].entry)
++ return -EINVAL; /* duplicate entry */
++ }
++ }
++
++ temp = dev->irq;
++ /* Check whether driver already requested for MSI vector */
++ if (dev->msi_enabled) {
++ printk(KERN_INFO "PCI: %s: Can't enable MSI-X. "
++ "Device already has an MSI vector assigned\n",
++ pci_name(dev));
++ dev->irq = temp;
++ return -EINVAL;
++ }
++
++ status = msix_capability_init(dev, entries, nvec);
++
++ if ( !status )
++ dev->irq_old = temp;
++ else
++ dev->irq = temp;
++
++ return status;
++}
++
++extern void pci_frontend_disable_msix(struct pci_dev* dev);
++void pci_disable_msix(struct pci_dev* dev)
++{
++ int pos;
++ u16 control;
++
++
++ if (!pci_msi_enable)
++ return;
++ if (!dev)
++ return;
++
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++ if (!is_initial_xendomain()) {
++ struct msi_dev_list *msi_dev_entry;
++ struct msi_pirq_entry *pirq_entry, *tmp;
++
++ pci_frontend_disable_msix(dev);
++
++ msi_dev_entry = get_msi_dev_pirq_list(dev);
++ list_for_each_entry_safe(pirq_entry, tmp,
++ &msi_dev_entry->pirq_list_head, list) {
++ evtchn_map_pirq(pirq_entry->pirq, 0);
++ list_del(&pirq_entry->list);
++ kfree(pirq_entry);
++ }
++
++ dev->irq = dev->irq_old;
++ return;
++ }
++#endif
++
++ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
++ if (!pos)
++ return;
++
++ pci_read_config_word(dev, msi_control_reg(pos), &control);
++ if (!(control & PCI_MSIX_FLAGS_ENABLE))
++ return;
++
++ msi_remove_pci_irq_vectors(dev);
++
++ /* Disable MSI mode */
++ disable_msi_mode(dev, pos, PCI_CAP_ID_MSIX);
++}
++
++/**
++ * msi_remove_pci_irq_vectors - reclaim MSI(X) vectors to unused state
++ * @dev: pointer to the pci_dev data structure of MSI(X) device function
++ *
++ * Being called during hotplug remove, from which the device function
++ * is hot-removed. All previous assigned MSI/MSI-X vectors, if
++ * allocated for this device function, are reclaimed to unused state,
++ * which may be used later on.
++ **/
++void msi_remove_pci_irq_vectors(struct pci_dev* dev)
++{
++ unsigned long flags;
++ struct msi_dev_list *msi_dev_entry;
++ struct msi_pirq_entry *pirq_entry, *tmp;
++
++ if (!pci_msi_enable || !dev)
++ return;
++
++ msi_dev_entry = get_msi_dev_pirq_list(dev);
++
++ spin_lock_irqsave(&msi_dev_entry->pirq_list_lock, flags);
++ if (!list_empty(&msi_dev_entry->pirq_list_head))
++ {
++ printk(KERN_WARNING "msix pirqs for dev %02x:%02x:%01x are not freed \
++ before acquire again.\n", dev->bus->number, PCI_SLOT(dev->devfn),
++ PCI_FUNC(dev->devfn));
++ list_for_each_entry_safe(pirq_entry, tmp,
++ &msi_dev_entry->pirq_list_head, list) {
++ msi_unmap_pirq(dev, pirq_entry->pirq);
++ list_del(&pirq_entry->list);
++ kfree(pirq_entry);
++ }
++ }
++ spin_unlock_irqrestore(&msi_dev_entry->pirq_list_lock, flags);
++ dev->irq = dev->irq_old;
++}
++
++void pci_no_msi(void)
++{
++ pci_msi_enable = 0;
++}
++
++EXPORT_SYMBOL(pci_enable_msi);
++EXPORT_SYMBOL(pci_disable_msi);
++EXPORT_SYMBOL(pci_enable_msix);
++EXPORT_SYMBOL(pci_disable_msix);
++#ifdef CONFIG_XEN
++EXPORT_SYMBOL(register_msi_get_owner);
++EXPORT_SYMBOL(unregister_msi_get_owner);
++#endif
++
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/agp.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/agp.h 2007-06-22 09:08:06.000000000 +0200
+@@ -0,0 +1,44 @@
++#ifndef AGP_H
++#define AGP_H 1
++
++#include <asm/pgtable.h>
++#include <asm/cacheflush.h>
++#include <asm/system.h>
++
++/*
++ * Functions to keep the agpgart mappings coherent with the MMU.
++ * The GART gives the CPU a physical alias of pages in memory. The alias region is
++ * mapped uncacheable. Make sure there are no conflicting mappings
++ * with different cachability attributes for the same page. This avoids
++ * data corruption on some CPUs.
++ */
++
++/* Caller's responsibility to call global_flush_tlb() for
++ * performance reasons */
++#define map_page_into_agp(page) ( \
++ xen_create_contiguous_region((unsigned long)page_address(page), 0, 32) \
++ ?: change_page_attr(page, 1, PAGE_KERNEL_NOCACHE))
++#define unmap_page_from_agp(page) ( \
++ xen_destroy_contiguous_region((unsigned long)page_address(page), 0), \
++ /* only a fallback: xen_destroy_contiguous_region uses PAGE_KERNEL */ \
++ change_page_attr(page, 1, PAGE_KERNEL))
++#define flush_agp_mappings() global_flush_tlb()
++
++/* Could use CLFLUSH here if the cpu supports it. But then it would
++ need to be called for each cacheline of the whole page so it may not be
++ worth it. Would need a page for it. */
++#define flush_agp_cache() wbinvd()
++
++/* Convert a physical address to an address suitable for the GART. */
++#define phys_to_gart(x) phys_to_machine(x)
++#define gart_to_phys(x) machine_to_phys(x)
++
++/* GATT allocation. Returns/accepts GATT kernel virtual address. */
++#define alloc_gatt_pages(order) ({ \
++ char *_t; dma_addr_t _d; \
++ _t = dma_alloc_coherent(NULL,PAGE_SIZE<<(order),&_d,GFP_KERNEL); \
++ _t; })
++#define free_gatt_pages(table, order) \
++ dma_free_coherent(NULL,PAGE_SIZE<<(order),(table),virt_to_bus(table))
++
++#endif
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/desc_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/desc_32.h 2008-01-28 12:24:19.000000000 +0100
+@@ -0,0 +1,166 @@
++#ifndef __ARCH_DESC_H
++#define __ARCH_DESC_H
++
++#include <asm/ldt.h>
++#include <asm/segment.h>
++
++#define CPU_16BIT_STACK_SIZE 1024
++
++#ifndef __ASSEMBLY__
++
++#include <linux/preempt.h>
++#include <linux/smp.h>
++
++#include <asm/mmu.h>
++
++extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
++
++DECLARE_PER_CPU(unsigned char, cpu_16bit_stack[CPU_16BIT_STACK_SIZE]);
++
++struct Xgt_desc_struct {
++ unsigned short size;
++ unsigned long address __attribute__((packed));
++ unsigned short pad;
++} __attribute__ ((packed));
++
++extern struct Xgt_desc_struct idt_descr;
++DECLARE_PER_CPU(struct Xgt_desc_struct, cpu_gdt_descr);
++
++
++static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
++{
++ return (struct desc_struct *)per_cpu(cpu_gdt_descr, cpu).address;
++}
++
++#define load_TR_desc() __asm__ __volatile__("ltr %w0"::"q" (GDT_ENTRY_TSS*8))
++#define load_LDT_desc() __asm__ __volatile__("lldt %w0"::"q" (GDT_ENTRY_LDT*8))
++
++#define load_gdt(dtr) __asm__ __volatile("lgdt %0"::"m" (*dtr))
++#define load_idt(dtr) __asm__ __volatile("lidt %0"::"m" (*dtr))
++#define load_tr(tr) __asm__ __volatile("ltr %0"::"mr" (tr))
++#define load_ldt(ldt) __asm__ __volatile("lldt %0"::"mr" (ldt))
++
++#define store_gdt(dtr) __asm__ ("sgdt %0":"=m" (*dtr))
++#define store_idt(dtr) __asm__ ("sidt %0":"=m" (*dtr))
++#define store_tr(tr) __asm__ ("str %0":"=mr" (tr))
++#define store_ldt(ldt) __asm__ ("sldt %0":"=mr" (ldt))
++
++/*
++ * This is the ldt that every process will get unless we need
++ * something other than this.
++ */
++extern struct desc_struct default_ldt[];
++extern void set_intr_gate(unsigned int irq, void * addr);
++
++#define _set_tssldt_desc(n,addr,limit,type) \
++__asm__ __volatile__ ("movw %w3,0(%2)\n\t" \
++ "movw %w1,2(%2)\n\t" \
++ "rorl $16,%1\n\t" \
++ "movb %b1,4(%2)\n\t" \
++ "movb %4,5(%2)\n\t" \
++ "movb $0,6(%2)\n\t" \
++ "movb %h1,7(%2)\n\t" \
++ "rorl $16,%1" \
++ : "=m"(*(n)) : "q" (addr), "r"(n), "ir"(limit), "i"(type))
++
++#ifndef CONFIG_X86_NO_TSS
++static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
++{
++ _set_tssldt_desc(&get_cpu_gdt_table(cpu)[entry], (int)addr,
++ offsetof(struct tss_struct, __cacheline_filler) - 1, 0x89);
++}
++
++#define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
++#endif
++
++static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
++{
++ _set_tssldt_desc(&get_cpu_gdt_table(cpu)[GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
++}
++
++#define LDT_entry_a(info) \
++ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
++
++#define LDT_entry_b(info) \
++ (((info)->base_addr & 0xff000000) | \
++ (((info)->base_addr & 0x00ff0000) >> 16) | \
++ ((info)->limit & 0xf0000) | \
++ (((info)->read_exec_only ^ 1) << 9) | \
++ ((info)->contents << 10) | \
++ (((info)->seg_not_present ^ 1) << 15) | \
++ ((info)->seg_32bit << 22) | \
++ ((info)->limit_in_pages << 23) | \
++ ((info)->useable << 20) | \
++ 0x7000)
++
++#define LDT_empty(info) (\
++ (info)->base_addr == 0 && \
++ (info)->limit == 0 && \
++ (info)->contents == 0 && \
++ (info)->read_exec_only == 1 && \
++ (info)->seg_32bit == 0 && \
++ (info)->limit_in_pages == 0 && \
++ (info)->seg_not_present == 1 && \
++ (info)->useable == 0 )
++
++extern int write_ldt_entry(void *ldt, int entry, __u32 entry_a, __u32 entry_b);
++
++#if TLS_SIZE != 24
++# error update this code.
++#endif
++
++static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
++{
++#define C(i) if (HYPERVISOR_update_descriptor(virt_to_machine(&get_cpu_gdt_table(cpu)[GDT_ENTRY_TLS_MIN + i]), \
++ *(u64 *)&t->tls_array[i])) \
++ BUG();
++ C(0); C(1); C(2);
++#undef C
++}
++
++static inline void clear_LDT(void)
++{
++ int cpu = get_cpu();
++
++ /*
++ * NB. We load the default_ldt for lcall7/27 handling on demand, as
++ * it slows down context switching. Noone uses it anyway.
++ */
++ cpu = cpu; /* XXX avoid compiler warning */
++ xen_set_ldt(NULL, 0);
++ put_cpu();
++}
++
++/*
++ * load one particular LDT into the current CPU
++ */
++static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
++{
++ void *segments = pc->ldt;
++ int count = pc->size;
++
++ if (likely(!count))
++ segments = NULL;
++
++ xen_set_ldt(segments, count);
++}
++
++static inline void load_LDT(mm_context_t *pc)
++{
++ int cpu = get_cpu();
++ load_LDT_nolock(pc, cpu);
++ put_cpu();
++}
++
++static inline unsigned long get_desc_base(unsigned long *desc)
++{
++ unsigned long base;
++ base = ((desc[0] >> 16) & 0x0000ffff) |
++ ((desc[1] << 16) & 0x00ff0000) |
++ (desc[1] & 0xff000000);
++ return base;
++}
++
++#endif /* !__ASSEMBLY__ */
++
++#endif
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/dma-mapping_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/dma-mapping_32.h 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,151 @@
++#ifndef _ASM_I386_DMA_MAPPING_H
++#define _ASM_I386_DMA_MAPPING_H
++
++/*
++ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
++ * documentation.
++ */
++
++#include <linux/mm.h>
++#include <asm/cache.h>
++#include <asm/io.h>
++#include <asm/scatterlist.h>
++#include <asm/swiotlb.h>
++
++static inline int
++address_needs_mapping(struct device *hwdev, dma_addr_t addr)
++{
++ dma_addr_t mask = 0xffffffff;
++ /* If the device has a mask, use it, otherwise default to 32 bits */
++ if (hwdev && hwdev->dma_mask)
++ mask = *hwdev->dma_mask;
++ return (addr & ~mask) != 0;
++}
++
++extern int range_straddles_page_boundary(paddr_t p, size_t size);
++
++#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
++#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
++
++void *dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t flag);
++
++void dma_free_coherent(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_handle);
++
++extern dma_addr_t
++dma_map_single(struct device *dev, void *ptr, size_t size,
++ enum dma_data_direction direction);
++
++extern void
++dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
++ enum dma_data_direction direction);
++
++extern int dma_map_sg(struct device *hwdev, struct scatterlist *sg,
++ int nents, enum dma_data_direction direction);
++extern void dma_unmap_sg(struct device *hwdev, struct scatterlist *sg,
++ int nents, enum dma_data_direction direction);
++
++#ifdef CONFIG_HIGHMEM
++extern dma_addr_t
++dma_map_page(struct device *dev, struct page *page, unsigned long offset,
++ size_t size, enum dma_data_direction direction);
++
++extern void
++dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
++ enum dma_data_direction direction);
++#else
++#define dma_map_page(dev, page, offset, size, dir) \
++ dma_map_single(dev, page_address(page) + (offset), (size), (dir))
++#define dma_unmap_page dma_unmap_single
++#endif
++
++extern void
++dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction);
++
++extern void
++dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
++ enum dma_data_direction direction);
++
++static inline void
++dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction)
++{
++ dma_sync_single_for_cpu(dev, dma_handle+offset, size, direction);
++}
++
++static inline void
++dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction)
++{
++ dma_sync_single_for_device(dev, dma_handle+offset, size, direction);
++}
++
++static inline void
++dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
++ enum dma_data_direction direction)
++{
++ if (swiotlb)
++ swiotlb_sync_sg_for_cpu(dev,sg,nelems,direction);
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
++ enum dma_data_direction direction)
++{
++ if (swiotlb)
++ swiotlb_sync_sg_for_device(dev,sg,nelems,direction);
++ flush_write_buffers();
++}
++
++extern int
++dma_mapping_error(dma_addr_t dma_addr);
++
++extern int
++dma_supported(struct device *dev, u64 mask);
++
++static inline int
++dma_set_mask(struct device *dev, u64 mask)
++{
++ if(!dev->dma_mask || !dma_supported(dev, mask))
++ return -EIO;
++
++ *dev->dma_mask = mask;
++
++ return 0;
++}
++
++static inline int
++dma_get_cache_alignment(void)
++{
++ /* no easy way to get cache size on all x86, so return the
++ * maximum possible, to be safe */
++ return (1 << INTERNODE_CACHE_SHIFT);
++}
++
++#define dma_is_consistent(d) (1)
++
++static inline void
++dma_cache_sync(void *vaddr, size_t size,
++ enum dma_data_direction direction)
++{
++ flush_write_buffers();
++}
++
++#define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
++extern int
++dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
++ dma_addr_t device_addr, size_t size, int flags);
++
++extern void
++dma_release_declared_memory(struct device *dev);
++
++extern void *
++dma_mark_declared_memory_occupied(struct device *dev,
++ dma_addr_t device_addr, size_t size);
++
++#endif
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/fixmap_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/fixmap_32.h 2007-06-12 13:14:02.000000000 +0200
+@@ -0,0 +1,155 @@
++/*
++ * fixmap.h: compile-time virtual memory allocation
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 1998 Ingo Molnar
++ *
++ * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999
++ */
++
++#ifndef _ASM_FIXMAP_H
++#define _ASM_FIXMAP_H
++
++
++/* used by vmalloc.c, vsyscall.lds.S.
++ *
++ * Leave one empty page between vmalloc'ed areas and
++ * the start of the fixmap.
++ */
++extern unsigned long __FIXADDR_TOP;
++
++#ifndef __ASSEMBLY__
++#include <linux/kernel.h>
++#include <asm/acpi.h>
++#include <asm/apicdef.h>
++#include <asm/page.h>
++#ifdef CONFIG_HIGHMEM
++#include <linux/threads.h>
++#include <asm/kmap_types.h>
++#endif
++
++/*
++ * Here we define all the compile-time 'special' virtual
++ * addresses. The point is to have a constant address at
++ * compile time, but to set the physical address only
++ * in the boot process. We allocate these special addresses
++ * from the end of virtual memory (0xfffff000) backwards.
++ * Also this lets us do fail-safe vmalloc(), we
++ * can guarantee that these special addresses and
++ * vmalloc()-ed addresses never overlap.
++ *
++ * these 'compile-time allocated' memory buffers are
++ * fixed-size 4k pages. (or larger if used with an increment
++ * highger than 1) use fixmap_set(idx,phys) to associate
++ * physical memory with fixmap indices.
++ *
++ * TLB entries of such buffers will not be flushed across
++ * task switches.
++ */
++enum fixed_addresses {
++ FIX_HOLE,
++ FIX_VDSO,
++#ifdef CONFIG_X86_LOCAL_APIC
++ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
++#endif
++#ifdef CONFIG_X86_IO_APIC
++ FIX_IO_APIC_BASE_0,
++ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
++#endif
++#ifdef CONFIG_X86_VISWS_APIC
++ FIX_CO_CPU, /* Cobalt timer */
++ FIX_CO_APIC, /* Cobalt APIC Redirection Table */
++ FIX_LI_PCIA, /* Lithium PCI Bridge A */
++ FIX_LI_PCIB, /* Lithium PCI Bridge B */
++#endif
++#ifdef CONFIG_X86_F00F_BUG
++ FIX_F00F_IDT, /* Virtual mapping for IDT */
++#endif
++#ifdef CONFIG_X86_CYCLONE_TIMER
++ FIX_CYCLONE_TIMER, /*cyclone timer register*/
++#endif
++#ifdef CONFIG_HIGHMEM
++ FIX_KMAP_BEGIN, /* reserved pte's for temporary kernel mappings */
++ FIX_KMAP_END = FIX_KMAP_BEGIN+(KM_TYPE_NR*NR_CPUS)-1,
++#endif
++#ifdef CONFIG_ACPI
++ FIX_ACPI_BEGIN,
++ FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
++#endif
++#ifdef CONFIG_PCI_MMCONFIG
++ FIX_PCIE_MCFG,
++#endif
++ FIX_SHARED_INFO,
++#define NR_FIX_ISAMAPS 256
++ FIX_ISAMAP_END,
++ FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
++ __end_of_permanent_fixed_addresses,
++ /* temporary boot-time mappings, used before ioremap() is functional */
++#define NR_FIX_BTMAPS 16
++ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
++ FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
++ FIX_WP_TEST,
++ __end_of_fixed_addresses
++};
++
++extern void set_fixaddr_top(unsigned long top);
++
++extern void __set_fixmap(enum fixed_addresses idx,
++ maddr_t phys, pgprot_t flags);
++
++#define set_fixmap(idx, phys) \
++ __set_fixmap(idx, phys, PAGE_KERNEL)
++/*
++ * Some hardware wants to get fixmapped without caching.
++ */
++#define set_fixmap_nocache(idx, phys) \
++ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
++
++#define clear_fixmap(idx) \
++ __set_fixmap(idx, 0, __pgprot(0))
++
++#define FIXADDR_TOP ((unsigned long)__FIXADDR_TOP)
++
++#define __FIXADDR_SIZE (__end_of_permanent_fixed_addresses << PAGE_SHIFT)
++#define __FIXADDR_BOOT_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
++#define FIXADDR_START (FIXADDR_TOP - __FIXADDR_SIZE)
++#define FIXADDR_BOOT_START (FIXADDR_TOP - __FIXADDR_BOOT_SIZE)
++
++#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
++#define __virt_to_fix(x) ((FIXADDR_TOP - ((x)&PAGE_MASK)) >> PAGE_SHIFT)
++
++extern void __this_fixmap_does_not_exist(void);
++
++/*
++ * 'index to address' translation. If anyone tries to use the idx
++ * directly without tranlation, we catch the bug with a NULL-deference
++ * kernel oops. Illegal ranges of incoming indices are caught too.
++ */
++static __always_inline unsigned long fix_to_virt(const unsigned int idx)
++{
++ /*
++ * this branch gets completely eliminated after inlining,
++ * except when someone tries to use fixaddr indices in an
++ * illegal way. (such as mixing up address types or using
++ * out-of-range indices).
++ *
++ * If it doesn't get removed, the linker will complain
++ * loudly with a reasonably clear error message..
++ */
++ if (idx >= __end_of_fixed_addresses)
++ __this_fixmap_does_not_exist();
++
++ return __fix_to_virt(idx);
++}
++
++static inline unsigned long virt_to_fix(const unsigned long vaddr)
++{
++ BUG_ON(vaddr >= FIXADDR_TOP || vaddr < FIXADDR_START);
++ return __virt_to_fix(vaddr);
++}
++
++#endif /* !__ASSEMBLY__ */
++#endif
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/gnttab_dma.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/gnttab_dma.h 2007-08-06 15:10:49.000000000 +0200
+@@ -0,0 +1,41 @@
++/*
++ * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
++ * Copyright (c) 2007 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#ifndef _ASM_I386_GNTTAB_DMA_H
++#define _ASM_I386_GNTTAB_DMA_H
++
++static inline int gnttab_dma_local_pfn(struct page *page)
++{
++ /* Has it become a local MFN? */
++ return pfn_valid(mfn_to_local_pfn(pfn_to_mfn(page_to_pfn(page))));
++}
++
++static inline maddr_t gnttab_dma_map_page(struct page *page)
++{
++ __gnttab_dma_map_page(page);
++ return ((maddr_t)pfn_to_mfn(page_to_pfn(page)) << PAGE_SHIFT);
++}
++
++static inline void gnttab_dma_unmap_page(maddr_t maddr)
++{
++ __gnttab_dma_unmap_page(virt_to_page(bus_to_virt(maddr)));
++}
++
++#endif /* _ASM_I386_GNTTAB_DMA_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/highmem.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/highmem.h 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,97 @@
++/*
++ * highmem.h: virtual kernel memory mappings for high memory
++ *
++ * Used in CONFIG_HIGHMEM systems for memory pages which
++ * are not addressable by direct kernel virtual addresses.
++ *
++ * Copyright (C) 1999 Gerhard Wichert, Siemens AG
++ * Gerhard.Wichert@pdb.siemens.de
++ *
++ *
++ * Redesigned the x86 32-bit VM architecture to deal with
++ * up to 16 Terabyte physical memory. With current x86 CPUs
++ * we now support up to 64 Gigabytes physical RAM.
++ *
++ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
++ */
++
++#ifndef _ASM_HIGHMEM_H
++#define _ASM_HIGHMEM_H
++
++#ifdef __KERNEL__
++
++#include <linux/interrupt.h>
++#include <linux/threads.h>
++#include <asm/kmap_types.h>
++#include <asm/tlbflush.h>
++
++/* declarations for highmem.c */
++extern unsigned long highstart_pfn, highend_pfn;
++
++extern pte_t *kmap_pte;
++extern pgprot_t kmap_prot;
++extern pte_t *pkmap_page_table;
++
++/*
++ * Right now we initialize only a single pte table. It can be extended
++ * easily, subsequent pte tables have to be allocated in one physical
++ * chunk of RAM.
++ */
++#ifdef CONFIG_X86_PAE
++#define LAST_PKMAP 512
++#else
++#define LAST_PKMAP 1024
++#endif
++/*
++ * Ordering is:
++ *
++ * FIXADDR_TOP
++ * fixed_addresses
++ * FIXADDR_START
++ * temp fixed addresses
++ * FIXADDR_BOOT_START
++ * Persistent kmap area
++ * PKMAP_BASE
++ * VMALLOC_END
++ * Vmalloc area
++ * VMALLOC_START
++ * high_memory
++ */
++#define PKMAP_BASE ( (FIXADDR_BOOT_START - PAGE_SIZE*(LAST_PKMAP + 1)) & PMD_MASK )
++#define LAST_PKMAP_MASK (LAST_PKMAP-1)
++#define PKMAP_NR(virt) ((virt-PKMAP_BASE) >> PAGE_SHIFT)
++#define PKMAP_ADDR(nr) (PKMAP_BASE + ((nr) << PAGE_SHIFT))
++
++extern void * FASTCALL(kmap_high(struct page *page));
++extern void FASTCALL(kunmap_high(struct page *page));
++
++void *kmap(struct page *page);
++void kunmap(struct page *page);
++void *kmap_atomic(struct page *page, enum km_type type);
++void *kmap_atomic_pte(struct page *page, enum km_type type);
++void kunmap_atomic(void *kvaddr, enum km_type type);
++void *kmap_atomic_pfn(unsigned long pfn, enum km_type type);
++struct page *kmap_atomic_to_page(void *ptr);
++
++#define flush_cache_kmaps() do { } while (0)
++
++void clear_highpage(struct page *);
++static inline void clear_user_highpage(struct page *page, unsigned long vaddr)
++{
++ clear_highpage(page);
++}
++#define __HAVE_ARCH_CLEAR_HIGHPAGE
++#define __HAVE_ARCH_CLEAR_USER_HIGHPAGE
++
++void copy_highpage(struct page *to, struct page *from);
++static inline void copy_user_highpage(struct page *to, struct page *from,
++ unsigned long vaddr)
++{
++ copy_highpage(to, from);
++}
++#define __HAVE_ARCH_COPY_HIGHPAGE
++#define __HAVE_ARCH_COPY_USER_HIGHPAGE
++
++#endif /* __KERNEL__ */
++
++#endif /* _ASM_HIGHMEM_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/hypercall_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/hypercall_32.h 2008-11-25 12:22:34.000000000 +0100
+@@ -0,0 +1,409 @@
++/******************************************************************************
++ * hypercall.h
++ *
++ * Linux-specific hypervisor handling.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERCALL_H__
++#define __HYPERCALL_H__
++
++#include <linux/string.h> /* memcpy() */
++#include <linux/stringify.h>
++
++#ifndef __HYPERVISOR_H__
++# error "please don't include this file directly"
++#endif
++
++#ifdef CONFIG_XEN
++#define HYPERCALL_STR(name) \
++ "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)"
++#else
++#define HYPERCALL_STR(name) \
++ "mov hypercall_stubs,%%eax; " \
++ "add $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\
++ "call *%%eax"
++#endif
++
++#define _hypercall0(type, name) \
++({ \
++ type __res; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res) \
++ : \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall1(type, name, a1) \
++({ \
++ type __res; \
++ long __ign1; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=b" (__ign1) \
++ : "1" ((long)(a1)) \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall2(type, name, a1, a2) \
++({ \
++ type __res; \
++ long __ign1, __ign2; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=b" (__ign1), "=c" (__ign2) \
++ : "1" ((long)(a1)), "2" ((long)(a2)) \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall3(type, name, a1, a2, a3) \
++({ \
++ type __res; \
++ long __ign1, __ign2, __ign3; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
++ "=d" (__ign3) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)) \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall4(type, name, a1, a2, a3, a4) \
++({ \
++ type __res; \
++ long __ign1, __ign2, __ign3, __ign4; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
++ "=d" (__ign3), "=S" (__ign4) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)), "4" ((long)(a4)) \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
++({ \
++ type __res; \
++ long __ign1, __ign2, __ign3, __ign4, __ign5; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=b" (__ign1), "=c" (__ign2), \
++ "=d" (__ign3), "=S" (__ign4), "=D" (__ign5) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)), "4" ((long)(a4)), \
++ "5" ((long)(a5)) \
++ : "memory" ); \
++ __res; \
++})
++
++static inline int __must_check
++HYPERVISOR_set_trap_table(
++ const trap_info_t *table)
++{
++ return _hypercall1(int, set_trap_table, table);
++}
++
++static inline int __must_check
++HYPERVISOR_mmu_update(
++ mmu_update_t *req, unsigned int count, unsigned int *success_count,
++ domid_t domid)
++{
++ return _hypercall4(int, mmu_update, req, count, success_count, domid);
++}
++
++static inline int __must_check
++HYPERVISOR_mmuext_op(
++ struct mmuext_op *op, unsigned int count, unsigned int *success_count,
++ domid_t domid)
++{
++ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
++}
++
++static inline int __must_check
++HYPERVISOR_set_gdt(
++ unsigned long *frame_list, unsigned int entries)
++{
++ return _hypercall2(int, set_gdt, frame_list, entries);
++}
++
++static inline int __must_check
++HYPERVISOR_stack_switch(
++ unsigned long ss, unsigned long esp)
++{
++ return _hypercall2(int, stack_switch, ss, esp);
++}
++
++static inline int __must_check
++HYPERVISOR_set_callbacks(
++ unsigned long event_selector, unsigned long event_address,
++ unsigned long failsafe_selector, unsigned long failsafe_address)
++{
++ return _hypercall4(int, set_callbacks,
++ event_selector, event_address,
++ failsafe_selector, failsafe_address);
++}
++
++static inline int
++HYPERVISOR_fpu_taskswitch(
++ int set)
++{
++ return _hypercall1(int, fpu_taskswitch, set);
++}
++
++static inline int __must_check
++HYPERVISOR_sched_op_compat(
++ int cmd, unsigned long arg)
++{
++ return _hypercall2(int, sched_op_compat, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_sched_op(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, sched_op, cmd, arg);
++}
++
++static inline long __must_check
++HYPERVISOR_set_timer_op(
++ u64 timeout)
++{
++ unsigned long timeout_hi = (unsigned long)(timeout>>32);
++ unsigned long timeout_lo = (unsigned long)timeout;
++ return _hypercall2(long, set_timer_op, timeout_lo, timeout_hi);
++}
++
++static inline int __must_check
++HYPERVISOR_platform_op(
++ struct xen_platform_op *platform_op)
++{
++ platform_op->interface_version = XENPF_INTERFACE_VERSION;
++ return _hypercall1(int, platform_op, platform_op);
++}
++
++static inline int __must_check
++HYPERVISOR_set_debugreg(
++ unsigned int reg, unsigned long value)
++{
++ return _hypercall2(int, set_debugreg, reg, value);
++}
++
++static inline unsigned long __must_check
++HYPERVISOR_get_debugreg(
++ unsigned int reg)
++{
++ return _hypercall1(unsigned long, get_debugreg, reg);
++}
++
++static inline int __must_check
++HYPERVISOR_update_descriptor(
++ u64 ma, u64 desc)
++{
++ return _hypercall4(int, update_descriptor, ma, ma>>32, desc, desc>>32);
++}
++
++static inline int __must_check
++HYPERVISOR_memory_op(
++ unsigned int cmd, void *arg)
++{
++ return _hypercall2(int, memory_op, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_multicall(
++ multicall_entry_t *call_list, unsigned int nr_calls)
++{
++ return _hypercall2(int, multicall, call_list, nr_calls);
++}
++
++static inline int __must_check
++HYPERVISOR_update_va_mapping(
++ unsigned long va, pte_t new_val, unsigned long flags)
++{
++ unsigned long pte_hi = 0;
++#ifdef CONFIG_X86_PAE
++ pte_hi = new_val.pte_high;
++#endif
++ return _hypercall4(int, update_va_mapping, va,
++ new_val.pte_low, pte_hi, flags);
++}
++
++static inline int __must_check
++HYPERVISOR_event_channel_op(
++ int cmd, void *arg)
++{
++ int rc = _hypercall2(int, event_channel_op, cmd, arg);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ struct evtchn_op op;
++ op.cmd = cmd;
++ memcpy(&op.u, arg, sizeof(op.u));
++ rc = _hypercall1(int, event_channel_op_compat, &op);
++ memcpy(arg, &op.u, sizeof(op.u));
++ }
++#endif
++
++ return rc;
++}
++
++static inline int __must_check
++HYPERVISOR_xen_version(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, xen_version, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_console_io(
++ int cmd, unsigned int count, char *str)
++{
++ return _hypercall3(int, console_io, cmd, count, str);
++}
++
++static inline int __must_check
++HYPERVISOR_physdev_op(
++ int cmd, void *arg)
++{
++ int rc = _hypercall2(int, physdev_op, cmd, arg);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ struct physdev_op op;
++ op.cmd = cmd;
++ memcpy(&op.u, arg, sizeof(op.u));
++ rc = _hypercall1(int, physdev_op_compat, &op);
++ memcpy(arg, &op.u, sizeof(op.u));
++ }
++#endif
++
++ return rc;
++}
++
++static inline int __must_check
++HYPERVISOR_grant_table_op(
++ unsigned int cmd, void *uop, unsigned int count)
++{
++ return _hypercall3(int, grant_table_op, cmd, uop, count);
++}
++
++static inline int __must_check
++HYPERVISOR_update_va_mapping_otherdomain(
++ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
++{
++ unsigned long pte_hi = 0;
++#ifdef CONFIG_X86_PAE
++ pte_hi = new_val.pte_high;
++#endif
++ return _hypercall5(int, update_va_mapping_otherdomain, va,
++ new_val.pte_low, pte_hi, flags, domid);
++}
++
++static inline int __must_check
++HYPERVISOR_vm_assist(
++ unsigned int cmd, unsigned int type)
++{
++ return _hypercall2(int, vm_assist, cmd, type);
++}
++
++static inline int __must_check
++HYPERVISOR_vcpu_op(
++ int cmd, unsigned int vcpuid, void *extra_args)
++{
++ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
++}
++
++static inline int __must_check
++HYPERVISOR_suspend(
++ unsigned long srec)
++{
++ struct sched_shutdown sched_shutdown = {
++ .reason = SHUTDOWN_suspend
++ };
++
++ int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
++ &sched_shutdown, srec);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (rc == -ENOSYS)
++ rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
++ SHUTDOWN_suspend, srec);
++#endif
++
++ return rc;
++}
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++static inline int
++HYPERVISOR_nmi_op(
++ unsigned long op, void *arg)
++{
++ return _hypercall2(int, nmi_op, op, arg);
++}
++#endif
++
++#ifndef CONFIG_XEN
++static inline unsigned long __must_check
++HYPERVISOR_hvm_op(
++ int op, void *arg)
++{
++ return _hypercall2(unsigned long, hvm_op, op, arg);
++}
++#endif
++
++static inline int __must_check
++HYPERVISOR_callback_op(
++ int cmd, const void *arg)
++{
++ return _hypercall2(int, callback_op, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_xenoprof_op(
++ int op, void *arg)
++{
++ return _hypercall2(int, xenoprof_op, op, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_kexec_op(
++ unsigned long op, void *args)
++{
++ return _hypercall2(int, kexec_op, op, args);
++}
++
++
++
++#endif /* __HYPERCALL_H__ */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/hypervisor.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/hypervisor.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,259 @@
++/******************************************************************************
++ * hypervisor.h
++ *
++ * Linux-specific hypervisor handling.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERVISOR_H__
++#define __HYPERVISOR_H__
++
++#include <linux/types.h>
++#include <linux/kernel.h>
++#include <linux/version.h>
++#include <linux/errno.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/platform.h>
++#include <xen/interface/event_channel.h>
++#include <xen/interface/physdev.h>
++#include <xen/interface/sched.h>
++#include <xen/interface/nmi.h>
++#include <asm/ptrace.h>
++#include <asm/page.h>
++#if defined(__i386__)
++# ifdef CONFIG_X86_PAE
++# include <asm-generic/pgtable-nopud.h>
++# else
++# include <asm-generic/pgtable-nopmd.h>
++# endif
++#elif defined(__x86_64__) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
++# include <asm-generic/pgtable-nopud.h>
++#endif
++
++extern shared_info_t *HYPERVISOR_shared_info;
++
++#define vcpu_info(cpu) (HYPERVISOR_shared_info->vcpu_info + (cpu))
++#ifdef CONFIG_SMP
++#define current_vcpu_info() vcpu_info(smp_processor_id())
++#else
++#define current_vcpu_info() vcpu_info(0)
++#endif
++
++#ifdef CONFIG_X86_32
++extern unsigned long hypervisor_virt_start;
++#endif
++
++/* arch/xen/i386/kernel/setup.c */
++extern start_info_t *xen_start_info;
++#ifdef CONFIG_XEN_PRIVILEGED_GUEST
++#define is_initial_xendomain() (xen_start_info->flags & SIF_INITDOMAIN)
++#else
++#define is_initial_xendomain() 0
++#endif
++
++/* arch/xen/kernel/evtchn.c */
++/* Force a proper event-channel callback from Xen. */
++void force_evtchn_callback(void);
++
++/* arch/xen/kernel/process.c */
++void xen_cpu_idle (void);
++
++/* arch/xen/i386/kernel/hypervisor.c */
++void do_hypervisor_callback(struct pt_regs *regs);
++
++/* arch/xen/i386/mm/hypervisor.c */
++/*
++ * NB. ptr values should be PHYSICAL, not MACHINE. 'vals' should be already
++ * be MACHINE addresses.
++ */
++
++void xen_pt_switch(unsigned long ptr);
++void xen_new_user_pt(unsigned long ptr); /* x86_64 only */
++void xen_load_gs(unsigned int selector); /* x86_64 only */
++void xen_tlb_flush(void);
++void xen_invlpg(unsigned long ptr);
++
++void xen_l1_entry_update(pte_t *ptr, pte_t val);
++void xen_l2_entry_update(pmd_t *ptr, pmd_t val);
++void xen_l3_entry_update(pud_t *ptr, pud_t val); /* x86_64/PAE */
++void xen_l4_entry_update(pgd_t *ptr, pgd_t val); /* x86_64 only */
++void xen_pgd_pin(unsigned long ptr);
++void xen_pgd_unpin(unsigned long ptr);
++
++void xen_set_ldt(const void *ptr, unsigned int ents);
++
++#ifdef CONFIG_SMP
++#include <linux/cpumask.h>
++void xen_tlb_flush_all(void);
++void xen_invlpg_all(unsigned long ptr);
++void xen_tlb_flush_mask(cpumask_t *mask);
++void xen_invlpg_mask(cpumask_t *mask, unsigned long ptr);
++#endif
++
++/* Returns zero on success else negative errno. */
++int xen_create_contiguous_region(
++ unsigned long vstart, unsigned int order, unsigned int address_bits);
++void xen_destroy_contiguous_region(
++ unsigned long vstart, unsigned int order);
++
++struct page;
++
++int xen_limit_pages_to_max_mfn(
++ struct page *pages, unsigned int order, unsigned int address_bits);
++
++/* Turn jiffies into Xen system time. */
++u64 jiffies_to_st(unsigned long jiffies);
++
++#ifdef CONFIG_XEN_SCRUB_PAGES
++void scrub_pages(void *, unsigned int);
++#else
++#define scrub_pages(_p,_n) ((void)0)
++#endif
++
++#include <xen/hypercall.h>
++
++#if defined(CONFIG_X86_64)
++#define MULTI_UVMFLAGS_INDEX 2
++#define MULTI_UVMDOMID_INDEX 3
++#else
++#define MULTI_UVMFLAGS_INDEX 3
++#define MULTI_UVMDOMID_INDEX 4
++#endif
++
++#ifdef CONFIG_XEN
++#define is_running_on_xen() 1
++#else
++extern char *hypercall_stubs;
++#define is_running_on_xen() (!!hypercall_stubs)
++#endif
++
++static inline int
++HYPERVISOR_yield(
++ void)
++{
++ int rc = HYPERVISOR_sched_op(SCHEDOP_yield, NULL);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (rc == -ENOSYS)
++ rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
++#endif
++
++ return rc;
++}
++
++static inline int
++HYPERVISOR_block(
++ void)
++{
++ int rc = HYPERVISOR_sched_op(SCHEDOP_block, NULL);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (rc == -ENOSYS)
++ rc = HYPERVISOR_sched_op_compat(SCHEDOP_block, 0);
++#endif
++
++ return rc;
++}
++
++static inline void /*__noreturn*/
++HYPERVISOR_shutdown(
++ unsigned int reason)
++{
++ struct sched_shutdown sched_shutdown = {
++ .reason = reason
++ };
++
++ VOID(HYPERVISOR_sched_op(SCHEDOP_shutdown, &sched_shutdown));
++#if CONFIG_XEN_COMPAT <= 0x030002
++ VOID(HYPERVISOR_sched_op_compat(SCHEDOP_shutdown, reason));
++#endif
++ /* Don't recurse needlessly. */
++ BUG_ON(reason != SHUTDOWN_crash);
++ for(;;);
++}
++
++static inline int __must_check
++HYPERVISOR_poll(
++ evtchn_port_t *ports, unsigned int nr_ports, u64 timeout)
++{
++ int rc;
++ struct sched_poll sched_poll = {
++ .nr_ports = nr_ports,
++ .timeout = jiffies_to_st(timeout)
++ };
++ set_xen_guest_handle(sched_poll.ports, ports);
++
++ rc = HYPERVISOR_sched_op(SCHEDOP_poll, &sched_poll);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (rc == -ENOSYS)
++ rc = HYPERVISOR_sched_op_compat(SCHEDOP_yield, 0);
++#endif
++
++ return rc;
++}
++
++#ifdef CONFIG_XEN
++
++static inline void
++MULTI_update_va_mapping(
++ multicall_entry_t *mcl, unsigned long va,
++ pte_t new_val, unsigned long flags)
++{
++ mcl->op = __HYPERVISOR_update_va_mapping;
++ mcl->args[0] = va;
++#if defined(CONFIG_X86_64)
++ mcl->args[1] = new_val.pte;
++#elif defined(CONFIG_X86_PAE)
++ mcl->args[1] = new_val.pte_low;
++ mcl->args[2] = new_val.pte_high;
++#else
++ mcl->args[1] = new_val.pte_low;
++ mcl->args[2] = 0;
++#endif
++ mcl->args[MULTI_UVMFLAGS_INDEX] = flags;
++}
++
++static inline void
++MULTI_grant_table_op(multicall_entry_t *mcl, unsigned int cmd,
++ void *uop, unsigned int count)
++{
++ mcl->op = __HYPERVISOR_grant_table_op;
++ mcl->args[0] = cmd;
++ mcl->args[1] = (unsigned long)uop;
++ mcl->args[2] = count;
++}
++
++#else /* !defined(CONFIG_XEN) */
++
++/* Multicalls not supported for HVM guests. */
++#define MULTI_update_va_mapping(a,b,c,d) ((void)0)
++#define MULTI_grant_table_op(a,b,c,d) ((void)0)
++
++#endif
++
++#endif /* __HYPERVISOR_H__ */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/irqflags_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/irqflags_32.h 2007-06-12 13:14:02.000000000 +0200
+@@ -0,0 +1,127 @@
++/*
++ * include/asm-i386/irqflags.h
++ *
++ * IRQ flags handling
++ *
++ * This file gets included from lowlevel asm headers too, to provide
++ * wrapped versions of the local_irq_*() APIs, based on the
++ * raw_local_irq_*() functions from the lowlevel headers.
++ */
++#ifndef _ASM_IRQFLAGS_H
++#define _ASM_IRQFLAGS_H
++
++#ifndef __ASSEMBLY__
++
++/*
++ * The use of 'barrier' in the following reflects their use as local-lock
++ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
++ * critical operations are executed. All critical operations must complete
++ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
++ * includes these barriers, for example.
++ */
++
++#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
++
++#define raw_local_save_flags(flags) \
++ do { (flags) = __raw_local_save_flags(); } while (0)
++
++#define raw_local_irq_restore(x) \
++do { \
++ vcpu_info_t *_vcpu; \
++ barrier(); \
++ _vcpu = current_vcpu_info(); \
++ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
++ barrier(); /* unmask then check (avoid races) */ \
++ if (unlikely(_vcpu->evtchn_upcall_pending)) \
++ force_evtchn_callback(); \
++ } \
++} while (0)
++
++#define raw_local_irq_disable() \
++do { \
++ current_vcpu_info()->evtchn_upcall_mask = 1; \
++ barrier(); \
++} while (0)
++
++#define raw_local_irq_enable() \
++do { \
++ vcpu_info_t *_vcpu; \
++ barrier(); \
++ _vcpu = current_vcpu_info(); \
++ _vcpu->evtchn_upcall_mask = 0; \
++ barrier(); /* unmask then check (avoid races) */ \
++ if (unlikely(_vcpu->evtchn_upcall_pending)) \
++ force_evtchn_callback(); \
++} while (0)
++
++/*
++ * Used in the idle loop; sti takes one instruction cycle
++ * to complete:
++ */
++void raw_safe_halt(void);
++
++/*
++ * Used when interrupts are already enabled or to
++ * shutdown the processor:
++ */
++void halt(void);
++
++static inline int raw_irqs_disabled_flags(unsigned long flags)
++{
++ return (flags != 0);
++}
++
++#define raw_irqs_disabled() \
++({ \
++ unsigned long flags = __raw_local_save_flags(); \
++ \
++ raw_irqs_disabled_flags(flags); \
++})
++
++/*
++ * For spinlocks, etc:
++ */
++#define __raw_local_irq_save() \
++({ \
++ unsigned long flags = __raw_local_save_flags(); \
++ \
++ raw_local_irq_disable(); \
++ \
++ flags; \
++})
++
++#define raw_local_irq_save(flags) \
++ do { (flags) = __raw_local_irq_save(); } while (0)
++
++#endif /* __ASSEMBLY__ */
++
++/*
++ * Do the CPU's IRQ-state tracing from assembly code. We call a
++ * C function, so save all the C-clobbered registers:
++ */
++#ifdef CONFIG_TRACE_IRQFLAGS
++
++# define TRACE_IRQS_ON \
++ pushl %eax; \
++ pushl %ecx; \
++ pushl %edx; \
++ call trace_hardirqs_on; \
++ popl %edx; \
++ popl %ecx; \
++ popl %eax;
++
++# define TRACE_IRQS_OFF \
++ pushl %eax; \
++ pushl %ecx; \
++ pushl %edx; \
++ call trace_hardirqs_off; \
++ popl %edx; \
++ popl %ecx; \
++ popl %eax;
++
++#else
++# define TRACE_IRQS_ON
++# define TRACE_IRQS_OFF
++#endif
++
++#endif
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/maddr_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/maddr_32.h 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,193 @@
++#ifndef _I386_MADDR_H
++#define _I386_MADDR_H
++
++#include <xen/features.h>
++#include <xen/interface/xen.h>
++
++/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
++#define INVALID_P2M_ENTRY (~0UL)
++#define FOREIGN_FRAME_BIT (1UL<<31)
++#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
++
++/* Definitions for machine and pseudophysical addresses. */
++#ifdef CONFIG_X86_PAE
++typedef unsigned long long paddr_t;
++typedef unsigned long long maddr_t;
++#else
++typedef unsigned long paddr_t;
++typedef unsigned long maddr_t;
++#endif
++
++#ifdef CONFIG_XEN
++
++extern unsigned long *phys_to_machine_mapping;
++extern unsigned long max_mapnr;
++
++#undef machine_to_phys_mapping
++extern unsigned long *machine_to_phys_mapping;
++extern unsigned int machine_to_phys_order;
++
++static inline unsigned long pfn_to_mfn(unsigned long pfn)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return pfn;
++ BUG_ON(max_mapnr && pfn >= max_mapnr);
++ return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT;
++}
++
++static inline int phys_to_machine_mapping_valid(unsigned long pfn)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return 1;
++ BUG_ON(max_mapnr && pfn >= max_mapnr);
++ return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
++}
++
++static inline unsigned long mfn_to_pfn(unsigned long mfn)
++{
++ unsigned long pfn;
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return mfn;
++
++ if (unlikely((mfn >> machine_to_phys_order) != 0))
++ return max_mapnr;
++
++ /* The array access can fail (e.g., device space beyond end of RAM). */
++ asm (
++ "1: movl %1,%0\n"
++ "2:\n"
++ ".section .fixup,\"ax\"\n"
++ "3: movl %2,%0\n"
++ " jmp 2b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 4\n"
++ " .long 1b,3b\n"
++ ".previous"
++ : "=r" (pfn)
++ : "m" (machine_to_phys_mapping[mfn]), "m" (max_mapnr) );
++
++ return pfn;
++}
++
++/*
++ * We detect special mappings in one of two ways:
++ * 1. If the MFN is an I/O page then Xen will set the m2p entry
++ * to be outside our maximum possible pseudophys range.
++ * 2. If the MFN belongs to a different domain then we will certainly
++ * not have MFN in our p2m table. Conversely, if the page is ours,
++ * then we'll have p2m(m2p(MFN))==MFN.
++ * If we detect a special mapping then it doesn't have a 'struct page'.
++ * We force !pfn_valid() by returning an out-of-range pointer.
++ *
++ * NB. These checks require that, for any MFN that is not in our reservation,
++ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
++ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
++ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
++ *
++ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
++ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
++ * require. In all the cases we care about, the FOREIGN_FRAME bit is
++ * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
++ */
++static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
++{
++ unsigned long pfn = mfn_to_pfn(mfn);
++ if ((pfn < max_mapnr)
++ && !xen_feature(XENFEAT_auto_translated_physmap)
++ && (phys_to_machine_mapping[pfn] != mfn))
++ return max_mapnr; /* force !pfn_valid() */
++ return pfn;
++}
++
++static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
++{
++ BUG_ON(max_mapnr && pfn >= max_mapnr);
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
++ return;
++ }
++ phys_to_machine_mapping[pfn] = mfn;
++}
++
++static inline maddr_t phys_to_machine(paddr_t phys)
++{
++ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
++ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
++ return machine;
++}
++
++static inline paddr_t machine_to_phys(maddr_t machine)
++{
++ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
++ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
++ return phys;
++}
++
++#ifdef CONFIG_X86_PAE
++static inline paddr_t pte_phys_to_machine(paddr_t phys)
++{
++ /*
++ * In PAE mode, the NX bit needs to be dealt with in the value
++ * passed to pfn_to_mfn(). On x86_64, we need to mask it off,
++ * but for i386 the conversion to ulong for the argument will
++ * clip it off.
++ */
++ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
++ machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK);
++ return machine;
++}
++
++static inline paddr_t pte_machine_to_phys(maddr_t machine)
++{
++ /*
++ * In PAE mode, the NX bit needs to be dealt with in the value
++ * passed to mfn_to_pfn(). On x86_64, we need to mask it off,
++ * but for i386 the conversion to ulong for the argument will
++ * clip it off.
++ */
++ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
++ phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK);
++ return phys;
++}
++#endif
++
++#ifdef CONFIG_X86_PAE
++#define __pte_ma(x) ((pte_t) { (x), (maddr_t)(x) >> 32 } )
++static inline pte_t pfn_pte_ma(unsigned long page_nr, pgprot_t pgprot)
++{
++ pte_t pte;
++
++ pte.pte_high = (page_nr >> (32 - PAGE_SHIFT)) | \
++ (pgprot_val(pgprot) >> 32);
++ pte.pte_high &= (__supported_pte_mask >> 32);
++ pte.pte_low = ((page_nr << PAGE_SHIFT) | pgprot_val(pgprot)) & \
++ __supported_pte_mask;
++ return pte;
++}
++#else
++#define __pte_ma(x) ((pte_t) { (x) } )
++#define pfn_pte_ma(pfn, prot) __pte_ma(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
++#endif
++
++#else /* !CONFIG_XEN */
++
++#define pfn_to_mfn(pfn) (pfn)
++#define mfn_to_pfn(mfn) (mfn)
++#define mfn_to_local_pfn(mfn) (mfn)
++#define set_phys_to_machine(pfn, mfn) ((void)0)
++#define phys_to_machine_mapping_valid(pfn) (1)
++#define phys_to_machine(phys) ((maddr_t)(phys))
++#define machine_to_phys(mach) ((paddr_t)(mach))
++#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot)
++#define __pte_ma(x) __pte(x)
++
++#endif /* !CONFIG_XEN */
++
++/* VIRT <-> MACHINE conversion */
++#define virt_to_machine(v) (phys_to_machine(__pa(v)))
++#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
++#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
++
++#endif /* _I386_MADDR_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/mmu_context_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/mmu_context_32.h 2007-06-12 13:14:02.000000000 +0200
+@@ -0,0 +1,108 @@
++#ifndef __I386_SCHED_H
++#define __I386_SCHED_H
++
++#include <asm/desc.h>
++#include <asm/atomic.h>
++#include <asm/pgalloc.h>
++#include <asm/tlbflush.h>
++
++/*
++ * Used for LDT copy/destruction.
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
++void destroy_context(struct mm_struct *mm);
++
++
++static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
++{
++#if 0 /* XEN: no lazy tlb */
++ unsigned cpu = smp_processor_id();
++ if (per_cpu(cpu_tlbstate, cpu).state == TLBSTATE_OK)
++ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_LAZY;
++#endif
++}
++
++#define prepare_arch_switch(next) __prepare_arch_switch()
++
++static inline void __prepare_arch_switch(void)
++{
++ /*
++ * Save away %fs and %gs. No need to save %es and %ds, as those
++ * are always kernel segments while inside the kernel. Must
++ * happen before reload of cr3/ldt (i.e., not in __switch_to).
++ */
++ asm volatile ( "mov %%fs,%0 ; mov %%gs,%1"
++ : "=m" (current->thread.fs),
++ "=m" (current->thread.gs));
++ asm volatile ( "movl %0,%%fs ; movl %0,%%gs"
++ : : "r" (0) );
++}
++
++extern void mm_pin(struct mm_struct *mm);
++extern void mm_unpin(struct mm_struct *mm);
++void mm_pin_all(void);
++
++static inline void switch_mm(struct mm_struct *prev,
++ struct mm_struct *next,
++ struct task_struct *tsk)
++{
++ int cpu = smp_processor_id();
++ struct mmuext_op _op[2], *op = _op;
++
++ if (likely(prev != next)) {
++ BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
++ !test_bit(PG_pinned, &virt_to_page(next->pgd)->flags));
++
++ /* stop flush ipis for the previous mm */
++ cpu_clear(cpu, prev->cpu_vm_mask);
++#if 0 /* XEN: no lazy tlb */
++ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
++ per_cpu(cpu_tlbstate, cpu).active_mm = next;
++#endif
++ cpu_set(cpu, next->cpu_vm_mask);
++
++ /* Re-load page tables: load_cr3(next->pgd) */
++ op->cmd = MMUEXT_NEW_BASEPTR;
++ op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
++ op++;
++
++ /*
++ * load the LDT, if the LDT is different:
++ */
++ if (unlikely(prev->context.ldt != next->context.ldt)) {
++ /* load_LDT_nolock(&next->context, cpu) */
++ op->cmd = MMUEXT_SET_LDT;
++ op->arg1.linear_addr = (unsigned long)next->context.ldt;
++ op->arg2.nr_ents = next->context.size;
++ op++;
++ }
++
++ BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
++ }
++#if 0 /* XEN: no lazy tlb */
++ else {
++ per_cpu(cpu_tlbstate, cpu).state = TLBSTATE_OK;
++ BUG_ON(per_cpu(cpu_tlbstate, cpu).active_mm != next);
++
++ if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
++ /* We were in lazy tlb mode and leave_mm disabled
++ * tlb flush IPI delivery. We must reload %cr3.
++ */
++ load_cr3(next->pgd);
++ load_LDT_nolock(&next->context, cpu);
++ }
++ }
++#endif
++}
++
++#define deactivate_mm(tsk, mm) \
++ asm("movl %0,%%fs ; movl %0,%%gs": :"r" (0))
++
++static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
++{
++ if (!test_bit(PG_pinned, &virt_to_page(next->pgd)->flags))
++ mm_pin(next);
++ switch_mm(prev, next, NULL);
++}
++
++#endif
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/pci_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/pci_32.h 2007-09-14 11:14:51.000000000 +0200
+@@ -0,0 +1,148 @@
++#ifndef __i386_PCI_H
++#define __i386_PCI_H
++
++
++#ifdef __KERNEL__
++#include <linux/mm.h> /* for struct page */
++
++/* Can be used to override the logic in pci_scan_bus for skipping
++ already-configured bus numbers - to be used for buggy BIOSes
++ or architectures with incomplete PCI setup by the loader */
++
++#ifdef CONFIG_PCI
++extern unsigned int pcibios_assign_all_busses(void);
++#else
++#define pcibios_assign_all_busses() 0
++#endif
++
++#include <asm/hypervisor.h>
++#define pcibios_scan_all_fns(a, b) (!is_initial_xendomain())
++
++extern unsigned long pci_mem_start;
++#define PCIBIOS_MIN_IO 0x1000
++#define PCIBIOS_MIN_MEM (pci_mem_start)
++
++#define PCIBIOS_MIN_CARDBUS_IO 0x4000
++
++void pcibios_config_init(void);
++struct pci_bus * pcibios_scan_root(int bus);
++
++void pcibios_set_master(struct pci_dev *dev);
++void pcibios_penalize_isa_irq(int irq, int active);
++struct irq_routing_table *pcibios_get_irq_routing_table(void);
++int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq);
++
++/* Dynamic DMA mapping stuff.
++ * i386 has everything mapped statically.
++ */
++
++#include <linux/types.h>
++#include <linux/slab.h>
++#include <asm/scatterlist.h>
++#include <linux/string.h>
++#include <asm/io.h>
++
++struct pci_dev;
++
++#ifdef CONFIG_SWIOTLB
++
++
++/* On Xen we use SWIOTLB instead of blk-specific bounce buffers. */
++#define PCI_DMA_BUS_IS_PHYS (0)
++
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \
++ dma_addr_t ADDR_NAME;
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \
++ __u32 LEN_NAME;
++#define pci_unmap_addr(PTR, ADDR_NAME) \
++ ((PTR)->ADDR_NAME)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \
++ (((PTR)->ADDR_NAME) = (VAL))
++#define pci_unmap_len(PTR, LEN_NAME) \
++ ((PTR)->LEN_NAME)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \
++ (((PTR)->LEN_NAME) = (VAL))
++
++#else
++
++/* The PCI address space does equal the physical memory
++ * address space. The networking and block device layers use
++ * this boolean for bounce buffer decisions.
++ */
++#define PCI_DMA_BUS_IS_PHYS (1)
++
++/* pci_unmap_{page,single} is a nop so... */
++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
++#define pci_unmap_addr(PTR, ADDR_NAME) (0)
++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
++#define pci_unmap_len(PTR, LEN_NAME) (0)
++#define pci_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
++
++#endif
++
++/* This is always fine. */
++#define pci_dac_dma_supported(pci_dev, mask) (1)
++
++static inline dma64_addr_t
++pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page, unsigned long offset, int direction)
++{
++ return ((dma64_addr_t) page_to_phys(page) +
++ (dma64_addr_t) offset);
++}
++
++static inline struct page *
++pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
++{
++ return pfn_to_page(dma_addr >> PAGE_SHIFT);
++}
++
++static inline unsigned long
++pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
++{
++ return (dma_addr & ~PAGE_MASK);
++}
++
++static inline void
++pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++{
++}
++
++static inline void
++pci_dac_dma_sync_single_for_device(struct pci_dev *pdev, dma64_addr_t dma_addr, size_t len, int direction)
++{
++ flush_write_buffers();
++}
++
++#define HAVE_PCI_MMAP
++extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
++ enum pci_mmap_state mmap_state, int write_combine);
++
++
++static inline void pcibios_add_platform_entries(struct pci_dev *dev)
++{
++}
++
++#ifdef CONFIG_PCI
++static inline void pci_dma_burst_advice(struct pci_dev *pdev,
++ enum pci_dma_burst_strategy *strat,
++ unsigned long *strategy_parameter)
++{
++ *strat = PCI_DMA_BURST_INFINITY;
++ *strategy_parameter = ~0UL;
++}
++#endif
++
++#endif /* __KERNEL__ */
++
++#ifdef CONFIG_XEN_PCIDEV_FRONTEND
++#include <xen/pcifront.h>
++#endif /* CONFIG_XEN_PCIDEV_FRONTEND */
++
++/* implement the pci_ DMA API in terms of the generic device dma_ one */
++#include <asm-generic/pci-dma-compat.h>
++
++/* generic pci stuff */
++#include <asm-generic/pci.h>
++
++#endif /* __i386_PCI_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/pgalloc_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/pgalloc_32.h 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,59 @@
++#ifndef _I386_PGALLOC_H
++#define _I386_PGALLOC_H
++
++#include <asm/fixmap.h>
++#include <linux/threads.h>
++#include <linux/mm.h> /* for struct page */
++#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
++
++#define pmd_populate_kernel(mm, pmd, pte) \
++ set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
++
++#define pmd_populate(mm, pmd, pte) \
++do { \
++ unsigned long pfn = page_to_pfn(pte); \
++ if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) { \
++ if (!PageHighMem(pte)) \
++ BUG_ON(HYPERVISOR_update_va_mapping( \
++ (unsigned long)__va(pfn << PAGE_SHIFT), \
++ pfn_pte(pfn, PAGE_KERNEL_RO), 0)); \
++ else if (!test_and_set_bit(PG_pinned, &pte->flags)) \
++ kmap_flush_unused(); \
++ set_pmd(pmd, \
++ __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT))); \
++ } else \
++ *(pmd) = __pmd(_PAGE_TABLE + ((paddr_t)pfn << PAGE_SHIFT)); \
++} while (0)
++
++/*
++ * Allocate and free page tables.
++ */
++extern pgd_t *pgd_alloc(struct mm_struct *);
++extern void pgd_free(pgd_t *pgd);
++
++extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
++extern struct page *pte_alloc_one(struct mm_struct *, unsigned long);
++
++static inline void pte_free_kernel(pte_t *pte)
++{
++ make_lowmem_page_writable(pte, XENFEAT_writable_page_tables);
++ free_page((unsigned long)pte);
++}
++
++extern void pte_free(struct page *pte);
++
++#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
++
++#ifdef CONFIG_X86_PAE
++/*
++ * In the PAE case we free the pmds as part of the pgd.
++ */
++#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
++#define pmd_free(x) do { } while (0)
++#define __pmd_free_tlb(tlb,x) do { } while (0)
++#define pud_populate(mm, pmd, pte) BUG()
++#endif
++
++#define check_pgt_cache() do { } while (0)
++
++#endif /* _I386_PGALLOC_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/pgtable-3level-defs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/pgtable-3level-defs.h 2007-06-12 13:14:02.000000000 +0200
+@@ -0,0 +1,24 @@
++#ifndef _I386_PGTABLE_3LEVEL_DEFS_H
++#define _I386_PGTABLE_3LEVEL_DEFS_H
++
++#define HAVE_SHARED_KERNEL_PMD 0
++
++/*
++ * PGDIR_SHIFT determines what a top-level page table entry can map
++ */
++#define PGDIR_SHIFT 30
++#define PTRS_PER_PGD 4
++
++/*
++ * PMD_SHIFT determines the size of the area a middle-level
++ * page table can map
++ */
++#define PMD_SHIFT 21
++#define PTRS_PER_PMD 512
++
++/*
++ * entries per page directory level
++ */
++#define PTRS_PER_PTE 512
++
++#endif /* _I386_PGTABLE_3LEVEL_DEFS_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/pgtable-3level.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/pgtable-3level.h 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,211 @@
++#ifndef _I386_PGTABLE_3LEVEL_H
++#define _I386_PGTABLE_3LEVEL_H
++
++#include <asm-generic/pgtable-nopud.h>
++
++/*
++ * Intel Physical Address Extension (PAE) Mode - three-level page
++ * tables on PPro+ CPUs.
++ *
++ * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
++ */
++
++#define pte_ERROR(e) \
++ printk("%s:%d: bad pte %p(%016Lx pfn %08lx).\n", __FILE__, __LINE__, \
++ &(e), __pte_val(e), pte_pfn(e))
++#define pmd_ERROR(e) \
++ printk("%s:%d: bad pmd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \
++ &(e), __pmd_val(e), (pmd_val(e) & PTE_MASK) >> PAGE_SHIFT)
++#define pgd_ERROR(e) \
++ printk("%s:%d: bad pgd %p(%016Lx pfn %08Lx).\n", __FILE__, __LINE__, \
++ &(e), __pgd_val(e), (pgd_val(e) & PTE_MASK) >> PAGE_SHIFT)
++
++#define pud_none(pud) 0
++#define pud_bad(pud) 0
++#define pud_present(pud) 1
++
++/*
++ * Is the pte executable?
++ */
++static inline int pte_x(pte_t pte)
++{
++ return !(__pte_val(pte) & _PAGE_NX);
++}
++
++/*
++ * All present user-pages with !NX bit are user-executable:
++ */
++static inline int pte_exec(pte_t pte)
++{
++ return pte_user(pte) && pte_x(pte);
++}
++/*
++ * All present pages with !NX bit are kernel-executable:
++ */
++static inline int pte_exec_kernel(pte_t pte)
++{
++ return pte_x(pte);
++}
++
++/* Rules for using set_pte: the pte being assigned *must* be
++ * either not present or in a state where the hardware will
++ * not attempt to update the pte. In places where this is
++ * not possible, use pte_get_and_clear to obtain the old pte
++ * value and then use set_pte to update it. -ben
++ */
++#define __HAVE_ARCH_SET_PTE_ATOMIC
++
++static inline void set_pte(pte_t *ptep, pte_t pte)
++{
++ ptep->pte_high = pte.pte_high;
++ smp_wmb();
++ ptep->pte_low = pte.pte_low;
++}
++#define set_pte_atomic(pteptr,pteval) \
++ set_64bit((unsigned long long *)(pteptr),__pte_val(pteval))
++
++#define set_pte_at(_mm,addr,ptep,pteval) do { \
++ if (((_mm) != current->mm && (_mm) != &init_mm) || \
++ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
++ set_pte((ptep), (pteval)); \
++} while (0)
++
++#define set_pte_at_sync(_mm,addr,ptep,pteval) do { \
++ if (((_mm) != current->mm && (_mm) != &init_mm) || \
++ HYPERVISOR_update_va_mapping((addr), (pteval), UVMF_INVLPG)) { \
++ set_pte((ptep), (pteval)); \
++ xen_invlpg((addr)); \
++ } \
++} while (0)
++
++#define set_pmd(pmdptr,pmdval) \
++ xen_l2_entry_update((pmdptr), (pmdval))
++#define set_pud(pudptr,pudval) \
++ xen_l3_entry_update((pudptr), (pudval))
++
++/*
++ * Pentium-II erratum A13: in PAE mode we explicitly have to flush
++ * the TLB via cr3 if the top-level pgd is changed...
++ * We do not let the generic code free and clear pgd entries due to
++ * this erratum.
++ */
++static inline void pud_clear (pud_t * pud) { }
++
++#define pud_page(pud) \
++((struct page *) __va(pud_val(pud) & PAGE_MASK))
++
++#define pud_page_kernel(pud) \
++((unsigned long) __va(pud_val(pud) & PAGE_MASK))
++
++
++/* Find an entry in the second-level page table.. */
++#define pmd_offset(pud, address) ((pmd_t *) pud_page(*(pud)) + \
++ pmd_index(address))
++
++static inline int pte_none(pte_t pte)
++{
++ return !(pte.pte_low | pte.pte_high);
++}
++
++/*
++ * For PTEs and PDEs, we must clear the P-bit first when clearing a page table
++ * entry, so clear the bottom half first and enforce ordering with a compiler
++ * barrier.
++ */
++static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ if ((mm != current->mm && mm != &init_mm)
++ || HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
++ ptep->pte_low = 0;
++ smp_wmb();
++ ptep->pte_high = 0;
++ }
++}
++
++#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
++
++static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ pte_t pte = *ptep;
++ if (!pte_none(pte)) {
++ if ((mm != &init_mm) ||
++ HYPERVISOR_update_va_mapping(addr, __pte(0), 0)) {
++ uint64_t val = __pte_val(pte);
++ if (__cmpxchg64(ptep, val, 0) != val) {
++ /* xchg acts as a barrier before the setting of the high bits */
++ pte.pte_low = xchg(&ptep->pte_low, 0);
++ pte.pte_high = ptep->pte_high;
++ ptep->pte_high = 0;
++ }
++ }
++ }
++ return pte;
++}
++
++#define ptep_clear_flush(vma, addr, ptep) \
++({ \
++ pte_t *__ptep = (ptep); \
++ pte_t __res = *__ptep; \
++ if (!pte_none(__res) && \
++ ((vma)->vm_mm != current->mm || \
++ HYPERVISOR_update_va_mapping(addr, __pte(0), \
++ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++ UVMF_INVLPG|UVMF_MULTI))) { \
++ __ptep->pte_low = 0; \
++ smp_wmb(); \
++ __ptep->pte_high = 0; \
++ flush_tlb_page(vma, addr); \
++ } \
++ __res; \
++})
++
++static inline int pte_same(pte_t a, pte_t b)
++{
++ return a.pte_low == b.pte_low && a.pte_high == b.pte_high;
++}
++
++#define pte_page(x) pfn_to_page(pte_pfn(x))
++
++#define __pte_mfn(_pte) (((_pte).pte_low >> PAGE_SHIFT) | \
++ ((_pte).pte_high << (32-PAGE_SHIFT)))
++#define pte_mfn(_pte) ((_pte).pte_low & _PAGE_PRESENT ? \
++ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
++#define pte_pfn(_pte) ((_pte).pte_low & _PAGE_IO ? max_mapnr : \
++ (_pte).pte_low & _PAGE_PRESENT ? \
++ mfn_to_local_pfn(__pte_mfn(_pte)) : \
++ __pte_mfn(_pte))
++
++extern unsigned long long __supported_pte_mask;
++
++static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
++{
++ return __pte((((unsigned long long)page_nr << PAGE_SHIFT) |
++ pgprot_val(pgprot)) & __supported_pte_mask);
++}
++
++static inline pmd_t pfn_pmd(unsigned long page_nr, pgprot_t pgprot)
++{
++ return __pmd((((unsigned long long)page_nr << PAGE_SHIFT) |
++ pgprot_val(pgprot)) & __supported_pte_mask);
++}
++
++/*
++ * Bits 0, 6 and 7 are taken in the low part of the pte,
++ * put the 32 bits of offset into the high part.
++ */
++#define pte_to_pgoff(pte) ((pte).pte_high)
++#define pgoff_to_pte(off) ((pte_t) { _PAGE_FILE, (off) })
++#define PTE_FILE_MAX_BITS 32
++
++/* Encode and de-code a swap entry */
++#define __swp_type(x) (((x).val) & 0x1f)
++#define __swp_offset(x) ((x).val >> 5)
++#define __swp_entry(type, offset) ((swp_entry_t){(type) | (offset) << 5})
++#define __pte_to_swp_entry(pte) ((swp_entry_t){ (pte).pte_high })
++#define __swp_entry_to_pte(x) ((pte_t){ 0, (x).val })
++
++#define __pmd_free_tlb(tlb, x) do { } while (0)
++
++void vmalloc_sync_all(void);
++
++#endif /* _I386_PGTABLE_3LEVEL_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/pgtable_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/pgtable_32.h 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,537 @@
++#ifndef _I386_PGTABLE_H
++#define _I386_PGTABLE_H
++
++#include <asm/hypervisor.h>
++
++/*
++ * The Linux memory management assumes a three-level page table setup. On
++ * the i386, we use that, but "fold" the mid level into the top-level page
++ * table, so that we physically have the same two-level page table as the
++ * i386 mmu expects.
++ *
++ * This file contains the functions and defines necessary to modify and use
++ * the i386 page table tree.
++ */
++#ifndef __ASSEMBLY__
++#include <asm/processor.h>
++#include <asm/fixmap.h>
++#include <linux/threads.h>
++
++#ifndef _I386_BITOPS_H
++#include <asm/bitops.h>
++#endif
++
++#include <linux/slab.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++
++/* Is this pagetable pinned? */
++#define PG_pinned PG_arch_1
++
++struct mm_struct;
++struct vm_area_struct;
++
++/*
++ * ZERO_PAGE is a global shared page that is always zero: used
++ * for zero-mapped memory areas etc..
++ */
++#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
++extern unsigned long empty_zero_page[1024];
++extern pgd_t *swapper_pg_dir;
++extern kmem_cache_t *pgd_cache;
++extern kmem_cache_t *pmd_cache;
++extern spinlock_t pgd_lock;
++extern struct page *pgd_list;
++
++void pmd_ctor(void *, kmem_cache_t *, unsigned long);
++void pgd_ctor(void *, kmem_cache_t *, unsigned long);
++void pgd_dtor(void *, kmem_cache_t *, unsigned long);
++void pgtable_cache_init(void);
++void paging_init(void);
++
++/*
++ * The Linux x86 paging architecture is 'compile-time dual-mode', it
++ * implements both the traditional 2-level x86 page tables and the
++ * newer 3-level PAE-mode page tables.
++ */
++#ifdef CONFIG_X86_PAE
++# include <asm/pgtable-3level-defs.h>
++# define PMD_SIZE (1UL << PMD_SHIFT)
++# define PMD_MASK (~(PMD_SIZE-1))
++#else
++# include <asm/pgtable-2level-defs.h>
++#endif
++
++#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
++#define PGDIR_MASK (~(PGDIR_SIZE-1))
++
++#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
++#define FIRST_USER_ADDRESS 0
++
++#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
++#define KERNEL_PGD_PTRS (PTRS_PER_PGD-USER_PGD_PTRS)
++
++#define TWOLEVEL_PGDIR_SHIFT 22
++#define BOOT_USER_PGD_PTRS (__PAGE_OFFSET >> TWOLEVEL_PGDIR_SHIFT)
++#define BOOT_KERNEL_PGD_PTRS (1024-BOOT_USER_PGD_PTRS)
++
++/* Just any arbitrary offset to the start of the vmalloc VM area: the
++ * current 8MB value just means that there will be a 8MB "hole" after the
++ * physical memory until the kernel virtual memory starts. That means that
++ * any out-of-bounds memory accesses will hopefully be caught.
++ * The vmalloc() routines leaves a hole of 4kB between each vmalloced
++ * area for the same reason. ;)
++ */
++#define VMALLOC_OFFSET (8*1024*1024)
++#define VMALLOC_START (((unsigned long) high_memory + vmalloc_earlyreserve + \
++ 2*VMALLOC_OFFSET-1) & ~(VMALLOC_OFFSET-1))
++#ifdef CONFIG_HIGHMEM
++# define VMALLOC_END (PKMAP_BASE-2*PAGE_SIZE)
++#else
++# define VMALLOC_END (FIXADDR_START-2*PAGE_SIZE)
++#endif
++
++/*
++ * _PAGE_PSE set in the page directory entry just means that
++ * the page directory entry points directly to a 4MB-aligned block of
++ * memory.
++ */
++#define _PAGE_BIT_PRESENT 0
++#define _PAGE_BIT_RW 1
++#define _PAGE_BIT_USER 2
++#define _PAGE_BIT_PWT 3
++#define _PAGE_BIT_PCD 4
++#define _PAGE_BIT_ACCESSED 5
++#define _PAGE_BIT_DIRTY 6
++#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page, Pentium+, if present.. */
++#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
++/*#define _PAGE_BIT_UNUSED1 9*/ /* available for programmer */
++#define _PAGE_BIT_UNUSED2 10
++#define _PAGE_BIT_UNUSED3 11
++#define _PAGE_BIT_NX 63
++
++#define _PAGE_PRESENT 0x001
++#define _PAGE_RW 0x002
++#define _PAGE_USER 0x004
++#define _PAGE_PWT 0x008
++#define _PAGE_PCD 0x010
++#define _PAGE_ACCESSED 0x020
++#define _PAGE_DIRTY 0x040
++#define _PAGE_PSE 0x080 /* 4 MB (or 2MB) page, Pentium+, if present.. */
++#define _PAGE_GLOBAL 0x100 /* Global TLB entry PPro+ */
++/*#define _PAGE_UNUSED1 0x200*/ /* available for programmer */
++#define _PAGE_UNUSED2 0x400
++#define _PAGE_UNUSED3 0x800
++
++/* If _PAGE_PRESENT is clear, we use these: */
++#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
++#define _PAGE_PROTNONE 0x080 /* if the user mapped it with PROT_NONE;
++ pte_present gives true */
++#ifdef CONFIG_X86_PAE
++#define _PAGE_NX (1ULL<<_PAGE_BIT_NX)
++#else
++#define _PAGE_NX 0
++#endif
++
++/* Mapped page is I/O or foreign and has no associated page struct. */
++#define _PAGE_IO 0x200
++
++#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
++#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY)
++#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_IO)
++
++#define PAGE_NONE \
++ __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
++#define PAGE_SHARED \
++ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
++
++#define PAGE_SHARED_EXEC \
++ __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_COPY_NOEXEC \
++ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_COPY_EXEC \
++ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_COPY \
++ PAGE_COPY_NOEXEC
++#define PAGE_READONLY \
++ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_READONLY_EXEC \
++ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++
++#define _PAGE_KERNEL \
++ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
++#define _PAGE_KERNEL_EXEC \
++ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
++
++extern unsigned long long __PAGE_KERNEL, __PAGE_KERNEL_EXEC;
++#define __PAGE_KERNEL_RO (__PAGE_KERNEL & ~_PAGE_RW)
++#define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD)
++#define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
++#define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
++
++#define PAGE_KERNEL __pgprot(__PAGE_KERNEL)
++#define PAGE_KERNEL_RO __pgprot(__PAGE_KERNEL_RO)
++#define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC)
++#define PAGE_KERNEL_NOCACHE __pgprot(__PAGE_KERNEL_NOCACHE)
++#define PAGE_KERNEL_LARGE __pgprot(__PAGE_KERNEL_LARGE)
++#define PAGE_KERNEL_LARGE_EXEC __pgprot(__PAGE_KERNEL_LARGE_EXEC)
++
++/*
++ * The i386 can't do page protection for execute, and considers that
++ * the same are read. Also, write permissions imply read permissions.
++ * This is the closest we can get..
++ */
++#define __P000 PAGE_NONE
++#define __P001 PAGE_READONLY
++#define __P010 PAGE_COPY
++#define __P011 PAGE_COPY
++#define __P100 PAGE_READONLY_EXEC
++#define __P101 PAGE_READONLY_EXEC
++#define __P110 PAGE_COPY_EXEC
++#define __P111 PAGE_COPY_EXEC
++
++#define __S000 PAGE_NONE
++#define __S001 PAGE_READONLY
++#define __S010 PAGE_SHARED
++#define __S011 PAGE_SHARED
++#define __S100 PAGE_READONLY_EXEC
++#define __S101 PAGE_READONLY_EXEC
++#define __S110 PAGE_SHARED_EXEC
++#define __S111 PAGE_SHARED_EXEC
++
++/*
++ * Define this if things work differently on an i386 and an i486:
++ * it will (on an i486) warn about kernel memory accesses that are
++ * done without a 'access_ok(VERIFY_WRITE,..)'
++ */
++#undef TEST_ACCESS_OK
++
++/* The boot page tables (all created as a single array) */
++extern unsigned long pg0[];
++
++#define pte_present(x) ((x).pte_low & (_PAGE_PRESENT | _PAGE_PROTNONE))
++
++/* To avoid harmful races, pmd_none(x) should check only the lower when PAE */
++#define pmd_none(x) (!(unsigned long)__pmd_val(x))
++#if CONFIG_XEN_COMPAT <= 0x030002
++/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
++ can temporarily clear it. */
++#define pmd_present(x) (__pmd_val(x))
++#define pmd_bad(x) ((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER & ~_PAGE_PRESENT)) != (_KERNPG_TABLE & ~_PAGE_PRESENT))
++#else
++#define pmd_present(x) (__pmd_val(x) & _PAGE_PRESENT)
++#define pmd_bad(x) ((__pmd_val(x) & (~PAGE_MASK & ~_PAGE_USER)) != _KERNPG_TABLE)
++#endif
++
++
++#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
++
++/*
++ * The following only work if pte_present() is true.
++ * Undefined behaviour if not..
++ */
++static inline int pte_user(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
++static inline int pte_read(pte_t pte) { return (pte).pte_low & _PAGE_USER; }
++static inline int pte_dirty(pte_t pte) { return (pte).pte_low & _PAGE_DIRTY; }
++static inline int pte_young(pte_t pte) { return (pte).pte_low & _PAGE_ACCESSED; }
++static inline int pte_write(pte_t pte) { return (pte).pte_low & _PAGE_RW; }
++static inline int pte_huge(pte_t pte) { return (pte).pte_low & _PAGE_PSE; }
++
++/*
++ * The following only works if pte_present() is not true.
++ */
++static inline int pte_file(pte_t pte) { return (pte).pte_low & _PAGE_FILE; }
++
++static inline pte_t pte_rdprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_exprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_mkclean(pte_t pte) { (pte).pte_low &= ~_PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkold(pte_t pte) { (pte).pte_low &= ~_PAGE_ACCESSED; return pte; }
++static inline pte_t pte_wrprotect(pte_t pte) { (pte).pte_low &= ~_PAGE_RW; return pte; }
++static inline pte_t pte_mkread(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkexec(pte_t pte) { (pte).pte_low |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkdirty(pte_t pte) { (pte).pte_low |= _PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkyoung(pte_t pte) { (pte).pte_low |= _PAGE_ACCESSED; return pte; }
++static inline pte_t pte_mkwrite(pte_t pte) { (pte).pte_low |= _PAGE_RW; return pte; }
++static inline pte_t pte_mkhuge(pte_t pte) { (pte).pte_low |= _PAGE_PSE; return pte; }
++
++#ifdef CONFIG_X86_PAE
++# include <asm/pgtable-3level.h>
++#else
++# include <asm/pgtable-2level.h>
++#endif
++
++#define ptep_test_and_clear_dirty(vma, addr, ptep) \
++({ \
++ pte_t __pte = *(ptep); \
++ int __ret = pte_dirty(__pte); \
++ if (__ret) { \
++ __pte = pte_mkclean(__pte); \
++ if ((vma)->vm_mm != current->mm || \
++ HYPERVISOR_update_va_mapping(addr, __pte, 0)) \
++ (ptep)->pte_low = __pte.pte_low; \
++ } \
++ __ret; \
++})
++
++#define ptep_test_and_clear_young(vma, addr, ptep) \
++({ \
++ pte_t __pte = *(ptep); \
++ int __ret = pte_young(__pte); \
++ if (__ret) \
++ __pte = pte_mkold(__pte); \
++ if ((vma)->vm_mm != current->mm || \
++ HYPERVISOR_update_va_mapping(addr, __pte, 0)) \
++ (ptep)->pte_low = __pte.pte_low; \
++ __ret; \
++})
++
++#define ptep_get_and_clear_full(mm, addr, ptep, full) \
++ ((full) ? ({ \
++ pte_t __res = *(ptep); \
++ if (test_bit(PG_pinned, &virt_to_page((mm)->pgd)->flags)) \
++ xen_l1_entry_update(ptep, __pte(0)); \
++ else \
++ *(ptep) = __pte(0); \
++ __res; \
++ }) : \
++ ptep_get_and_clear(mm, addr, ptep))
++
++static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ pte_t pte = *ptep;
++ if (pte_write(pte))
++ set_pte_at(mm, addr, ptep, pte_wrprotect(pte));
++}
++
++/*
++ * clone_pgd_range(pgd_t *dst, pgd_t *src, int count);
++ *
++ * dst - pointer to pgd range anwhere on a pgd page
++ * src - ""
++ * count - the number of pgds to copy.
++ *
++ * dst and src can be on the same page, but the range must not overlap,
++ * and must not cross a page boundary.
++ */
++static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
++{
++ memcpy(dst, src, count * sizeof(pgd_t));
++}
++
++/*
++ * Macro to mark a page protection value as "uncacheable". On processors which do not support
++ * it, this is a no-op.
++ */
++#define pgprot_noncached(prot) ((boot_cpu_data.x86 > 3) \
++ ? (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT)) : (prot))
++
++/*
++ * Conversion functions: convert a page and protection to a page entry,
++ * and a page entry and page directory to the page they refer to.
++ */
++
++#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
++
++static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
++{
++ /*
++ * Since this might change the present bit (which controls whether
++ * a pte_t object has undergone p2m translation), we must use
++ * pte_val() on the input pte and __pte() for the return value.
++ */
++ paddr_t pteval = pte_val(pte);
++
++ pteval &= _PAGE_CHG_MASK;
++ pteval |= pgprot_val(newprot);
++#ifdef CONFIG_X86_PAE
++ pteval &= __supported_pte_mask;
++#endif
++ return __pte(pteval);
++}
++
++#define pmd_large(pmd) \
++((__pmd_val(pmd) & (_PAGE_PSE|_PAGE_PRESENT)) == (_PAGE_PSE|_PAGE_PRESENT))
++
++/*
++ * the pgd page can be thought of an array like this: pgd_t[PTRS_PER_PGD]
++ *
++ * this macro returns the index of the entry in the pgd page which would
++ * control the given virtual address
++ */
++#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
++#define pgd_index_k(addr) pgd_index(addr)
++
++/*
++ * pgd_offset() returns a (pgd_t *)
++ * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
++ */
++#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
++
++/*
++ * a shortcut which implies the use of the kernel's pgd, instead
++ * of a process's
++ */
++#define pgd_offset_k(address) pgd_offset(&init_mm, address)
++
++/*
++ * the pmd page can be thought of an array like this: pmd_t[PTRS_PER_PMD]
++ *
++ * this macro returns the index of the entry in the pmd page which would
++ * control the given virtual address
++ */
++#define pmd_index(address) \
++ (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
++
++/*
++ * the pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
++ *
++ * this macro returns the index of the entry in the pte page which would
++ * control the given virtual address
++ */
++#define pte_index(address) \
++ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
++#define pte_offset_kernel(dir, address) \
++ ((pte_t *) pmd_page_kernel(*(dir)) + pte_index(address))
++
++#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
++
++#define pmd_page_kernel(pmd) \
++ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
++
++/*
++ * Helper function that returns the kernel pagetable entry controlling
++ * the virtual address 'address'. NULL means no pagetable entry present.
++ * NOTE: the return type is pte_t but if the pmd is PSE then we return it
++ * as a pte too.
++ */
++extern pte_t *lookup_address(unsigned long address);
++
++/*
++ * Make a given kernel text page executable/non-executable.
++ * Returns the previous executability setting of that page (which
++ * is used to restore the previous state). Used by the SMP bootup code.
++ * NOTE: this is an __init function for security reasons.
++ */
++#ifdef CONFIG_X86_PAE
++ extern int set_kernel_exec(unsigned long vaddr, int enable);
++#else
++ static inline int set_kernel_exec(unsigned long vaddr, int enable) { return 0;}
++#endif
++
++extern void noexec_setup(const char *str);
++
++#if defined(CONFIG_HIGHPTE)
++#define pte_offset_map(dir, address) \
++ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE0) + \
++ pte_index(address))
++#define pte_offset_map_nested(dir, address) \
++ ((pte_t *)kmap_atomic_pte(pmd_page(*(dir)),KM_PTE1) + \
++ pte_index(address))
++#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
++#define pte_unmap_nested(pte) kunmap_atomic(pte, KM_PTE1)
++#else
++#define pte_offset_map(dir, address) \
++ ((pte_t *)page_address(pmd_page(*(dir))) + pte_index(address))
++#define pte_offset_map_nested(dir, address) pte_offset_map(dir, address)
++#define pte_unmap(pte) do { } while (0)
++#define pte_unmap_nested(pte) do { } while (0)
++#endif
++
++#define __HAVE_ARCH_PTEP_ESTABLISH
++#define ptep_establish(vma, address, ptep, pteval) \
++ do { \
++ if ( likely((vma)->vm_mm == current->mm) ) { \
++ BUG_ON(HYPERVISOR_update_va_mapping(address, \
++ pteval, \
++ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++ UVMF_INVLPG|UVMF_MULTI)); \
++ } else { \
++ xen_l1_entry_update(ptep, pteval); \
++ flush_tlb_page(vma, address); \
++ } \
++ } while (0)
++
++/*
++ * The i386 doesn't have any external MMU info: the kernel page
++ * tables contain all the necessary information.
++ *
++ * Also, we only update the dirty/accessed state if we set
++ * the dirty bit by hand in the kernel, since the hardware
++ * will do the accessed bit for us, and we don't want to
++ * race with other CPU's that might be updating the dirty
++ * bit at the same time.
++ */
++#define update_mmu_cache(vma,address,pte) do { } while (0)
++#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
++#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
++ do { \
++ if (dirty) \
++ ptep_establish(vma, address, ptep, entry); \
++ } while (0)
++
++#include <xen/features.h>
++void make_lowmem_page_readonly(void *va, unsigned int feature);
++void make_lowmem_page_writable(void *va, unsigned int feature);
++void make_page_readonly(void *va, unsigned int feature);
++void make_page_writable(void *va, unsigned int feature);
++void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
++void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
++
++#define virt_to_ptep(va) \
++({ \
++ pte_t *__ptep = lookup_address((unsigned long)(va)); \
++ BUG_ON(!__ptep || !pte_present(*__ptep)); \
++ __ptep; \
++})
++
++#define arbitrary_virt_to_machine(va) \
++ (((maddr_t)pte_mfn(*virt_to_ptep(va)) << PAGE_SHIFT) \
++ | ((unsigned long)(va) & (PAGE_SIZE - 1)))
++
++#endif /* !__ASSEMBLY__ */
++
++#ifdef CONFIG_FLATMEM
++#define kern_addr_valid(addr) (1)
++#endif /* CONFIG_FLATMEM */
++
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++ unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid);
++int direct_kernel_remap_pfn_range(unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid);
++int create_lookup_pte_addr(struct mm_struct *mm,
++ unsigned long address,
++ uint64_t *ptep);
++int touch_pte_range(struct mm_struct *mm,
++ unsigned long address,
++ unsigned long size);
++
++int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
++ unsigned long addr, unsigned long end, pgprot_t newprot);
++
++#define arch_change_pte_range(mm, pmd, addr, end, newprot) \
++ xen_change_pte_range(mm, pmd, addr, end, newprot)
++
++#define io_remap_pfn_range(vma,from,pfn,size,prot) \
++direct_remap_pfn_range(vma,from,pfn,size,prot,DOMID_IO)
++
++#define MK_IOSPACE_PFN(space, pfn) (pfn)
++#define GET_IOSPACE(pfn) 0
++#define GET_PFN(pfn) (pfn)
++
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
++#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
++#define __HAVE_ARCH_PTEP_SET_WRPROTECT
++#define __HAVE_ARCH_PTE_SAME
++#include <asm-generic/pgtable.h>
++
++#endif /* _I386_PGTABLE_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/processor_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/processor_32.h 2008-01-28 12:24:19.000000000 +0100
+@@ -0,0 +1,743 @@
++/*
++ * include/asm-i386/processor.h
++ *
++ * Copyright (C) 1994 Linus Torvalds
++ */
++
++#ifndef __ASM_I386_PROCESSOR_H
++#define __ASM_I386_PROCESSOR_H
++
++#include <asm/vm86.h>
++#include <asm/math_emu.h>
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/types.h>
++#include <asm/sigcontext.h>
++#include <asm/cpufeature.h>
++#include <asm/msr.h>
++#include <asm/system.h>
++#include <linux/cache.h>
++#include <linux/threads.h>
++#include <asm/percpu.h>
++#include <linux/cpumask.h>
++#include <xen/interface/physdev.h>
++
++/* flag for disabling the tsc */
++extern int tsc_disable;
++
++struct desc_struct {
++ unsigned long a,b;
++};
++
++#define desc_empty(desc) \
++ (!((desc)->a | (desc)->b))
++
++#define desc_equal(desc1, desc2) \
++ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
++/*
++ * Default implementation of macro that returns current
++ * instruction pointer ("program counter").
++ */
++#define current_text_addr() ({ void *pc; __asm__("movl $1f,%0\n1:":"=g" (pc)); pc; })
++
++/*
++ * CPU type and hardware bug flags. Kept separately for each CPU.
++ * Members of this structure are referenced in head.S, so think twice
++ * before touching them. [mj]
++ */
++
++struct cpuinfo_x86 {
++ __u8 x86; /* CPU family */
++ __u8 x86_vendor; /* CPU vendor */
++ __u8 x86_model;
++ __u8 x86_mask;
++ char wp_works_ok; /* It doesn't on 386's */
++ char hlt_works_ok; /* Problems on some 486Dx4's and old 386's */
++ char hard_math;
++ char rfu;
++ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
++ unsigned long x86_capability[NCAPINTS];
++ char x86_vendor_id[16];
++ char x86_model_id[64];
++ int x86_cache_size; /* in KB - valid for CPUS which support this
++ call */
++ int x86_cache_alignment; /* In bytes */
++ char fdiv_bug;
++ char f00f_bug;
++ char coma_bug;
++ char pad0;
++ int x86_power;
++ unsigned long loops_per_jiffy;
++#ifdef CONFIG_SMP
++ cpumask_t llc_shared_map; /* cpus sharing the last level cache */
++#endif
++ unsigned char x86_max_cores; /* cpuid returned max cores value */
++ unsigned char apicid;
++#ifdef CONFIG_SMP
++ unsigned char booted_cores; /* number of cores as seen by OS */
++ __u8 phys_proc_id; /* Physical processor id. */
++ __u8 cpu_core_id; /* Core id */
++#endif
++} __attribute__((__aligned__(SMP_CACHE_BYTES)));
++
++#define X86_VENDOR_INTEL 0
++#define X86_VENDOR_CYRIX 1
++#define X86_VENDOR_AMD 2
++#define X86_VENDOR_UMC 3
++#define X86_VENDOR_NEXGEN 4
++#define X86_VENDOR_CENTAUR 5
++#define X86_VENDOR_RISE 6
++#define X86_VENDOR_TRANSMETA 7
++#define X86_VENDOR_NSC 8
++#define X86_VENDOR_NUM 9
++#define X86_VENDOR_UNKNOWN 0xff
++
++/*
++ * capabilities of CPUs
++ */
++
++extern struct cpuinfo_x86 boot_cpu_data;
++extern struct cpuinfo_x86 new_cpu_data;
++#ifndef CONFIG_X86_NO_TSS
++extern struct tss_struct doublefault_tss;
++DECLARE_PER_CPU(struct tss_struct, init_tss);
++#endif
++
++#ifdef CONFIG_SMP
++extern struct cpuinfo_x86 cpu_data[];
++#define current_cpu_data cpu_data[smp_processor_id()]
++#else
++#define cpu_data (&boot_cpu_data)
++#define current_cpu_data boot_cpu_data
++#endif
++
++extern int cpu_llc_id[NR_CPUS];
++extern char ignore_fpu_irq;
++
++extern void identify_cpu(struct cpuinfo_x86 *);
++extern void print_cpu_info(struct cpuinfo_x86 *);
++extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
++extern unsigned short num_cache_leaves;
++
++#ifdef CONFIG_X86_HT
++extern void detect_ht(struct cpuinfo_x86 *c);
++#else
++static inline void detect_ht(struct cpuinfo_x86 *c) {}
++#endif
++
++/*
++ * EFLAGS bits
++ */
++#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
++#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
++#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
++#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
++#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
++#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
++#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
++#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
++#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
++#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
++#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
++#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
++#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
++#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
++#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
++#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
++#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
++
++/*
++ * Generic CPUID function
++ * clear %ecx since some cpus (Cyrix MII) do not set or clear %ecx
++ * resulting in stale register contents being returned.
++ */
++static inline void cpuid(unsigned int op, unsigned int *eax, unsigned int *ebx, unsigned int *ecx, unsigned int *edx)
++{
++ __asm__(XEN_CPUID
++ : "=a" (*eax),
++ "=b" (*ebx),
++ "=c" (*ecx),
++ "=d" (*edx)
++ : "0" (op), "c"(0));
++}
++
++/* Some CPUID calls want 'count' to be placed in ecx */
++static inline void cpuid_count(int op, int count, int *eax, int *ebx, int *ecx,
++ int *edx)
++{
++ __asm__(XEN_CPUID
++ : "=a" (*eax),
++ "=b" (*ebx),
++ "=c" (*ecx),
++ "=d" (*edx)
++ : "0" (op), "c" (count));
++}
++
++/*
++ * CPUID functions returning a single datum
++ */
++static inline unsigned int cpuid_eax(unsigned int op)
++{
++ unsigned int eax;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax)
++ : "0" (op)
++ : "bx", "cx", "dx");
++ return eax;
++}
++static inline unsigned int cpuid_ebx(unsigned int op)
++{
++ unsigned int eax, ebx;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax), "=b" (ebx)
++ : "0" (op)
++ : "cx", "dx" );
++ return ebx;
++}
++static inline unsigned int cpuid_ecx(unsigned int op)
++{
++ unsigned int eax, ecx;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax), "=c" (ecx)
++ : "0" (op)
++ : "bx", "dx" );
++ return ecx;
++}
++static inline unsigned int cpuid_edx(unsigned int op)
++{
++ unsigned int eax, edx;
++
++ __asm__(XEN_CPUID
++ : "=a" (eax), "=d" (edx)
++ : "0" (op)
++ : "bx", "cx");
++ return edx;
++}
++
++#define load_cr3(pgdir) write_cr3(__pa(pgdir))
++
++/*
++ * Intel CPU features in CR4
++ */
++#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
++#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
++#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
++#define X86_CR4_DE 0x0008 /* enable debugging extensions */
++#define X86_CR4_PSE 0x0010 /* enable page size extensions */
++#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
++#define X86_CR4_MCE 0x0040 /* Machine check enable */
++#define X86_CR4_PGE 0x0080 /* enable global pages */
++#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
++#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
++#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
++
++/*
++ * Save the cr4 feature set we're using (ie
++ * Pentium 4MB enable and PPro Global page
++ * enable), so that any CPU's that boot up
++ * after us can get the correct flags.
++ */
++extern unsigned long mmu_cr4_features;
++
++static inline void set_in_cr4 (unsigned long mask)
++{
++ unsigned cr4;
++ mmu_cr4_features |= mask;
++ cr4 = read_cr4();
++ cr4 |= mask;
++ write_cr4(cr4);
++}
++
++static inline void clear_in_cr4 (unsigned long mask)
++{
++ unsigned cr4;
++ mmu_cr4_features &= ~mask;
++ cr4 = read_cr4();
++ cr4 &= ~mask;
++ write_cr4(cr4);
++}
++
++/*
++ * NSC/Cyrix CPU configuration register indexes
++ */
++
++#define CX86_PCR0 0x20
++#define CX86_GCR 0xb8
++#define CX86_CCR0 0xc0
++#define CX86_CCR1 0xc1
++#define CX86_CCR2 0xc2
++#define CX86_CCR3 0xc3
++#define CX86_CCR4 0xe8
++#define CX86_CCR5 0xe9
++#define CX86_CCR6 0xea
++#define CX86_CCR7 0xeb
++#define CX86_PCR1 0xf0
++#define CX86_DIR0 0xfe
++#define CX86_DIR1 0xff
++#define CX86_ARR_BASE 0xc4
++#define CX86_RCR_BASE 0xdc
++
++/*
++ * NSC/Cyrix CPU indexed register access macros
++ */
++
++#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
++
++#define setCx86(reg, data) do { \
++ outb((reg), 0x22); \
++ outb((data), 0x23); \
++} while (0)
++
++/* Stop speculative execution */
++static inline void sync_core(void)
++{
++ int tmp;
++ asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
++}
++
++static inline void __monitor(const void *eax, unsigned long ecx,
++ unsigned long edx)
++{
++ /* "monitor %eax,%ecx,%edx;" */
++ asm volatile(
++ ".byte 0x0f,0x01,0xc8;"
++ : :"a" (eax), "c" (ecx), "d"(edx));
++}
++
++static inline void __mwait(unsigned long eax, unsigned long ecx)
++{
++ /* "mwait %eax,%ecx;" */
++ asm volatile(
++ ".byte 0x0f,0x01,0xc9;"
++ : :"a" (eax), "c" (ecx));
++}
++
++/* from system description table in BIOS. Mostly for MCA use, but
++others may find it useful. */
++extern unsigned int machine_id;
++extern unsigned int machine_submodel_id;
++extern unsigned int BIOS_revision;
++extern unsigned int mca_pentium_flag;
++
++/* Boot loader type from the setup header */
++extern int bootloader_type;
++
++/*
++ * User space process size: 3GB (default).
++ */
++#define TASK_SIZE (PAGE_OFFSET)
++
++/* This decides where the kernel will search for a free chunk of vm
++ * space during mmap's.
++ */
++#define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
++
++#define HAVE_ARCH_PICK_MMAP_LAYOUT
++
++/*
++ * Size of io_bitmap.
++ */
++#define IO_BITMAP_BITS 65536
++#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
++#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
++#ifndef CONFIG_X86_NO_TSS
++#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
++#endif
++#define INVALID_IO_BITMAP_OFFSET 0x8000
++#define INVALID_IO_BITMAP_OFFSET_LAZY 0x9000
++
++struct i387_fsave_struct {
++ long cwd;
++ long swd;
++ long twd;
++ long fip;
++ long fcs;
++ long foo;
++ long fos;
++ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
++ long status; /* software status information */
++};
++
++struct i387_fxsave_struct {
++ unsigned short cwd;
++ unsigned short swd;
++ unsigned short twd;
++ unsigned short fop;
++ long fip;
++ long fcs;
++ long foo;
++ long fos;
++ long mxcsr;
++ long mxcsr_mask;
++ long st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
++ long xmm_space[32]; /* 8*16 bytes for each XMM-reg = 128 bytes */
++ long padding[56];
++} __attribute__ ((aligned (16)));
++
++struct i387_soft_struct {
++ long cwd;
++ long swd;
++ long twd;
++ long fip;
++ long fcs;
++ long foo;
++ long fos;
++ long st_space[20]; /* 8*10 bytes for each FP-reg = 80 bytes */
++ unsigned char ftop, changed, lookahead, no_update, rm, alimit;
++ struct info *info;
++ unsigned long entry_eip;
++};
++
++union i387_union {
++ struct i387_fsave_struct fsave;
++ struct i387_fxsave_struct fxsave;
++ struct i387_soft_struct soft;
++};
++
++typedef struct {
++ unsigned long seg;
++} mm_segment_t;
++
++struct thread_struct;
++
++#ifndef CONFIG_X86_NO_TSS
++struct tss_struct {
++ unsigned short back_link,__blh;
++ unsigned long esp0;
++ unsigned short ss0,__ss0h;
++ unsigned long esp1;
++ unsigned short ss1,__ss1h; /* ss1 is used to cache MSR_IA32_SYSENTER_CS */
++ unsigned long esp2;
++ unsigned short ss2,__ss2h;
++ unsigned long __cr3;
++ unsigned long eip;
++ unsigned long eflags;
++ unsigned long eax,ecx,edx,ebx;
++ unsigned long esp;
++ unsigned long ebp;
++ unsigned long esi;
++ unsigned long edi;
++ unsigned short es, __esh;
++ unsigned short cs, __csh;
++ unsigned short ss, __ssh;
++ unsigned short ds, __dsh;
++ unsigned short fs, __fsh;
++ unsigned short gs, __gsh;
++ unsigned short ldt, __ldth;
++ unsigned short trace, io_bitmap_base;
++ /*
++ * The extra 1 is there because the CPU will access an
++ * additional byte beyond the end of the IO permission
++ * bitmap. The extra byte must be all 1 bits, and must
++ * be within the limit.
++ */
++ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
++ /*
++ * Cache the current maximum and the last task that used the bitmap:
++ */
++ unsigned long io_bitmap_max;
++ struct thread_struct *io_bitmap_owner;
++ /*
++ * pads the TSS to be cacheline-aligned (size is 0x100)
++ */
++ unsigned long __cacheline_filler[35];
++ /*
++ * .. and then another 0x100 bytes for emergency kernel stack
++ */
++ unsigned long stack[64];
++} __attribute__((packed));
++#endif
++
++#define ARCH_MIN_TASKALIGN 16
++
++struct thread_struct {
++/* cached TLS descriptors. */
++ struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
++ unsigned long esp0;
++ unsigned long sysenter_cs;
++ unsigned long eip;
++ unsigned long esp;
++ unsigned long fs;
++ unsigned long gs;
++/* Hardware debugging registers */
++ unsigned long debugreg[8]; /* %%db0-7 debug registers */
++/* fault info */
++ unsigned long cr2, trap_no, error_code;
++/* floating point info */
++ union i387_union i387;
++/* virtual 86 mode info */
++ struct vm86_struct __user * vm86_info;
++ unsigned long screen_bitmap;
++ unsigned long v86flags, v86mask, saved_esp0;
++ unsigned int saved_fs, saved_gs;
++/* IO permissions */
++ unsigned long *io_bitmap_ptr;
++ unsigned long iopl;
++/* max allowed port in the bitmap, in bytes: */
++ unsigned long io_bitmap_max;
++};
++
++#define INIT_THREAD { \
++ .vm86_info = NULL, \
++ .sysenter_cs = __KERNEL_CS, \
++ .io_bitmap_ptr = NULL, \
++}
++
++#ifndef CONFIG_X86_NO_TSS
++/*
++ * Note that the .io_bitmap member must be extra-big. This is because
++ * the CPU will access an additional byte beyond the end of the IO
++ * permission bitmap. The extra byte must be all 1 bits, and must
++ * be within the limit.
++ */
++#define INIT_TSS { \
++ .esp0 = sizeof(init_stack) + (long)&init_stack, \
++ .ss0 = __KERNEL_DS, \
++ .ss1 = __KERNEL_CS, \
++ .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
++ .io_bitmap = { [ 0 ... IO_BITMAP_LONGS] = ~0 }, \
++}
++
++static inline void __load_esp0(struct tss_struct *tss, struct thread_struct *thread)
++{
++ tss->esp0 = thread->esp0;
++ /* This can only happen when SEP is enabled, no need to test "SEP"arately */
++ if (unlikely(tss->ss1 != thread->sysenter_cs)) {
++ tss->ss1 = thread->sysenter_cs;
++ wrmsr(MSR_IA32_SYSENTER_CS, thread->sysenter_cs, 0);
++ }
++}
++#define load_esp0(tss, thread) \
++ __load_esp0(tss, thread)
++#else
++#define load_esp0(tss, thread) do { \
++ if (HYPERVISOR_stack_switch(__KERNEL_DS, (thread)->esp0)) \
++ BUG(); \
++} while (0)
++#endif
++
++#define start_thread(regs, new_eip, new_esp) do { \
++ __asm__("movl %0,%%fs ; movl %0,%%gs": :"r" (0)); \
++ set_fs(USER_DS); \
++ regs->xds = __USER_DS; \
++ regs->xes = __USER_DS; \
++ regs->xss = __USER_DS; \
++ regs->xcs = __USER_CS; \
++ regs->eip = new_eip; \
++ regs->esp = new_esp; \
++} while (0)
++
++/*
++ * These special macros can be used to get or set a debugging register
++ */
++#define get_debugreg(var, register) \
++ (var) = HYPERVISOR_get_debugreg((register))
++#define set_debugreg(value, register) \
++ WARN_ON(HYPERVISOR_set_debugreg((register), (value)))
++
++/*
++ * Set IOPL bits in EFLAGS from given mask
++ */
++static inline void set_iopl_mask(unsigned mask)
++{
++ struct physdev_set_iopl set_iopl;
++
++ /* Force the change at ring 0. */
++ set_iopl.iopl = (mask == 0) ? 1 : (mask >> 12) & 3;
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl));
++}
++
++/* Forward declaration, a strange C thing */
++struct task_struct;
++struct mm_struct;
++
++/* Free all resources held by a thread. */
++extern void release_thread(struct task_struct *);
++
++/* Prepare to copy thread state - unlazy all lazy status */
++extern void prepare_to_copy(struct task_struct *tsk);
++
++/*
++ * create a kernel thread without removing it from tasklists
++ */
++extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
++
++extern unsigned long thread_saved_pc(struct task_struct *tsk);
++void show_trace(struct task_struct *task, struct pt_regs *regs, unsigned long *stack);
++
++unsigned long get_wchan(struct task_struct *p);
++
++#define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
++#define KSTK_TOP(info) \
++({ \
++ unsigned long *__ptr = (unsigned long *)(info); \
++ (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
++})
++
++/*
++ * The below -8 is to reserve 8 bytes on top of the ring0 stack.
++ * This is necessary to guarantee that the entire "struct pt_regs"
++ * is accessable even if the CPU haven't stored the SS/ESP registers
++ * on the stack (interrupt gate does not save these registers
++ * when switching to the same priv ring).
++ * Therefore beware: accessing the xss/esp fields of the
++ * "struct pt_regs" is possible, but they may contain the
++ * completely wrong values.
++ */
++#define task_pt_regs(task) \
++({ \
++ struct pt_regs *__regs__; \
++ __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
++ __regs__ - 1; \
++})
++
++#define KSTK_EIP(task) (task_pt_regs(task)->eip)
++#define KSTK_ESP(task) (task_pt_regs(task)->esp)
++
++
++struct microcode_header {
++ unsigned int hdrver;
++ unsigned int rev;
++ unsigned int date;
++ unsigned int sig;
++ unsigned int cksum;
++ unsigned int ldrver;
++ unsigned int pf;
++ unsigned int datasize;
++ unsigned int totalsize;
++ unsigned int reserved[3];
++};
++
++struct microcode {
++ struct microcode_header hdr;
++ unsigned int bits[0];
++};
++
++typedef struct microcode microcode_t;
++typedef struct microcode_header microcode_header_t;
++
++/* microcode format is extended from prescott processors */
++struct extended_signature {
++ unsigned int sig;
++ unsigned int pf;
++ unsigned int cksum;
++};
++
++struct extended_sigtable {
++ unsigned int count;
++ unsigned int cksum;
++ unsigned int reserved[3];
++ struct extended_signature sigs[0];
++};
++
++/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
++static inline void rep_nop(void)
++{
++ __asm__ __volatile__("rep;nop": : :"memory");
++}
++
++#define cpu_relax() rep_nop()
++
++/* generic versions from gas */
++#define GENERIC_NOP1 ".byte 0x90\n"
++#define GENERIC_NOP2 ".byte 0x89,0xf6\n"
++#define GENERIC_NOP3 ".byte 0x8d,0x76,0x00\n"
++#define GENERIC_NOP4 ".byte 0x8d,0x74,0x26,0x00\n"
++#define GENERIC_NOP5 GENERIC_NOP1 GENERIC_NOP4
++#define GENERIC_NOP6 ".byte 0x8d,0xb6,0x00,0x00,0x00,0x00\n"
++#define GENERIC_NOP7 ".byte 0x8d,0xb4,0x26,0x00,0x00,0x00,0x00\n"
++#define GENERIC_NOP8 GENERIC_NOP1 GENERIC_NOP7
++
++/* Opteron nops */
++#define K8_NOP1 GENERIC_NOP1
++#define K8_NOP2 ".byte 0x66,0x90\n"
++#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
++#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
++#define K8_NOP5 K8_NOP3 K8_NOP2
++#define K8_NOP6 K8_NOP3 K8_NOP3
++#define K8_NOP7 K8_NOP4 K8_NOP3
++#define K8_NOP8 K8_NOP4 K8_NOP4
++
++/* K7 nops */
++/* uses eax dependencies (arbitary choice) */
++#define K7_NOP1 GENERIC_NOP1
++#define K7_NOP2 ".byte 0x8b,0xc0\n"
++#define K7_NOP3 ".byte 0x8d,0x04,0x20\n"
++#define K7_NOP4 ".byte 0x8d,0x44,0x20,0x00\n"
++#define K7_NOP5 K7_NOP4 ASM_NOP1
++#define K7_NOP6 ".byte 0x8d,0x80,0,0,0,0\n"
++#define K7_NOP7 ".byte 0x8D,0x04,0x05,0,0,0,0\n"
++#define K7_NOP8 K7_NOP7 ASM_NOP1
++
++#ifdef CONFIG_MK8
++#define ASM_NOP1 K8_NOP1
++#define ASM_NOP2 K8_NOP2
++#define ASM_NOP3 K8_NOP3
++#define ASM_NOP4 K8_NOP4
++#define ASM_NOP5 K8_NOP5
++#define ASM_NOP6 K8_NOP6
++#define ASM_NOP7 K8_NOP7
++#define ASM_NOP8 K8_NOP8
++#elif defined(CONFIG_MK7)
++#define ASM_NOP1 K7_NOP1
++#define ASM_NOP2 K7_NOP2
++#define ASM_NOP3 K7_NOP3
++#define ASM_NOP4 K7_NOP4
++#define ASM_NOP5 K7_NOP5
++#define ASM_NOP6 K7_NOP6
++#define ASM_NOP7 K7_NOP7
++#define ASM_NOP8 K7_NOP8
++#else
++#define ASM_NOP1 GENERIC_NOP1
++#define ASM_NOP2 GENERIC_NOP2
++#define ASM_NOP3 GENERIC_NOP3
++#define ASM_NOP4 GENERIC_NOP4
++#define ASM_NOP5 GENERIC_NOP5
++#define ASM_NOP6 GENERIC_NOP6
++#define ASM_NOP7 GENERIC_NOP7
++#define ASM_NOP8 GENERIC_NOP8
++#endif
++
++#define ASM_NOP_MAX 8
++
++/* Prefetch instructions for Pentium III and AMD Athlon */
++/* It's not worth to care about 3dnow! prefetches for the K6
++ because they are microcoded there and very slow.
++ However we don't do prefetches for pre XP Athlons currently
++ That should be fixed. */
++#define ARCH_HAS_PREFETCH
++static inline void prefetch(const void *x)
++{
++ alternative_input(ASM_NOP4,
++ "prefetchnta (%1)",
++ X86_FEATURE_XMM,
++ "r" (x));
++}
++
++#define ARCH_HAS_PREFETCH
++#define ARCH_HAS_PREFETCHW
++#define ARCH_HAS_SPINLOCK_PREFETCH
++
++/* 3dnow! prefetch to get an exclusive cache line. Useful for
++ spinlocks to avoid one state transition in the cache coherency protocol. */
++static inline void prefetchw(const void *x)
++{
++ alternative_input(ASM_NOP4,
++ "prefetchw (%1)",
++ X86_FEATURE_3DNOW,
++ "r" (x));
++}
++#define spin_lock_prefetch(x) prefetchw(x)
++
++extern void select_idle_routine(const struct cpuinfo_x86 *c);
++
++#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
++
++extern unsigned long boot_option_idle_override;
++extern void enable_sep_cpu(void);
++extern int sysenter_setup(void);
++
++#endif /* __ASM_I386_PROCESSOR_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/segment_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/segment_32.h 2007-06-12 13:14:02.000000000 +0200
+@@ -0,0 +1,117 @@
++#ifndef _ASM_SEGMENT_H
++#define _ASM_SEGMENT_H
++
++/*
++ * The layout of the per-CPU GDT under Linux:
++ *
++ * 0 - null
++ * 1 - reserved
++ * 2 - reserved
++ * 3 - reserved
++ *
++ * 4 - unused <==== new cacheline
++ * 5 - unused
++ *
++ * ------- start of TLS (Thread-Local Storage) segments:
++ *
++ * 6 - TLS segment #1 [ glibc's TLS segment ]
++ * 7 - TLS segment #2 [ Wine's %fs Win32 segment ]
++ * 8 - TLS segment #3
++ * 9 - reserved
++ * 10 - reserved
++ * 11 - reserved
++ *
++ * ------- start of kernel segments:
++ *
++ * 12 - kernel code segment <==== new cacheline
++ * 13 - kernel data segment
++ * 14 - default user CS
++ * 15 - default user DS
++ * 16 - TSS
++ * 17 - LDT
++ * 18 - PNPBIOS support (16->32 gate)
++ * 19 - PNPBIOS support
++ * 20 - PNPBIOS support
++ * 21 - PNPBIOS support
++ * 22 - PNPBIOS support
++ * 23 - APM BIOS support
++ * 24 - APM BIOS support
++ * 25 - APM BIOS support
++ *
++ * 26 - ESPFIX small SS
++ * 27 - unused
++ * 28 - unused
++ * 29 - unused
++ * 30 - unused
++ * 31 - TSS for double fault handler
++ */
++#define GDT_ENTRY_TLS_ENTRIES 3
++#define GDT_ENTRY_TLS_MIN 6
++#define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
++
++#define TLS_SIZE (GDT_ENTRY_TLS_ENTRIES * 8)
++
++#define GDT_ENTRY_DEFAULT_USER_CS 14
++#define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS * 8 + 3)
++
++#define GDT_ENTRY_DEFAULT_USER_DS 15
++#define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS * 8 + 3)
++
++#define GDT_ENTRY_KERNEL_BASE 12
++
++#define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
++#define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
++#define GET_KERNEL_CS() (__KERNEL_CS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
++
++#define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
++#define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
++#define GET_KERNEL_DS() (__KERNEL_DS | (xen_feature(XENFEAT_supervisor_mode_kernel)?0:1) )
++
++#define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
++#define GDT_ENTRY_LDT (GDT_ENTRY_KERNEL_BASE + 5)
++
++#define GDT_ENTRY_PNPBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 6)
++#define GDT_ENTRY_APMBIOS_BASE (GDT_ENTRY_KERNEL_BASE + 11)
++
++#define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
++#define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
++
++#define GDT_ENTRY_DOUBLEFAULT_TSS 31
++
++/*
++ * The GDT has 32 entries
++ */
++#define GDT_ENTRIES 32
++
++#define GDT_SIZE (GDT_ENTRIES * 8)
++
++/* Simple and small GDT entries for booting only */
++
++#define GDT_ENTRY_BOOT_CS 2
++#define __BOOT_CS (GDT_ENTRY_BOOT_CS * 8)
++
++#define GDT_ENTRY_BOOT_DS (GDT_ENTRY_BOOT_CS + 1)
++#define __BOOT_DS (GDT_ENTRY_BOOT_DS * 8)
++
++/* The PnP BIOS entries in the GDT */
++#define GDT_ENTRY_PNPBIOS_CS32 (GDT_ENTRY_PNPBIOS_BASE + 0)
++#define GDT_ENTRY_PNPBIOS_CS16 (GDT_ENTRY_PNPBIOS_BASE + 1)
++#define GDT_ENTRY_PNPBIOS_DS (GDT_ENTRY_PNPBIOS_BASE + 2)
++#define GDT_ENTRY_PNPBIOS_TS1 (GDT_ENTRY_PNPBIOS_BASE + 3)
++#define GDT_ENTRY_PNPBIOS_TS2 (GDT_ENTRY_PNPBIOS_BASE + 4)
++
++/* The PnP BIOS selectors */
++#define PNP_CS32 (GDT_ENTRY_PNPBIOS_CS32 * 8) /* segment for calling fn */
++#define PNP_CS16 (GDT_ENTRY_PNPBIOS_CS16 * 8) /* code segment for BIOS */
++#define PNP_DS (GDT_ENTRY_PNPBIOS_DS * 8) /* data segment for BIOS */
++#define PNP_TS1 (GDT_ENTRY_PNPBIOS_TS1 * 8) /* transfer data segment */
++#define PNP_TS2 (GDT_ENTRY_PNPBIOS_TS2 * 8) /* another data segment */
++
++/*
++ * The interrupt descriptor table has room for 256 idt's,
++ * the global descriptor table is dependent on the number
++ * of tasks we can have..
++ */
++#define IDT_ENTRIES 256
++
++#endif
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/smp_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/smp_32.h 2007-06-12 13:14:02.000000000 +0200
+@@ -0,0 +1,103 @@
++#ifndef __ASM_SMP_H
++#define __ASM_SMP_H
++
++/*
++ * We need the APIC definitions automatically as part of 'smp.h'
++ */
++#ifndef __ASSEMBLY__
++#include <linux/kernel.h>
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++#ifndef __ASSEMBLY__
++#include <asm/fixmap.h>
++#include <asm/bitops.h>
++#include <asm/mpspec.h>
++#ifdef CONFIG_X86_IO_APIC
++#include <asm/io_apic.h>
++#endif
++#include <asm/apic.h>
++#endif
++#endif
++
++#define BAD_APICID 0xFFu
++#ifdef CONFIG_SMP
++#ifndef __ASSEMBLY__
++
++/*
++ * Private routines/data
++ */
++
++extern void smp_alloc_memory(void);
++extern int pic_mode;
++extern int smp_num_siblings;
++extern cpumask_t cpu_sibling_map[];
++extern cpumask_t cpu_core_map[];
++
++extern void (*mtrr_hook) (void);
++extern void zap_low_mappings (void);
++extern void lock_ipi_call_lock(void);
++extern void unlock_ipi_call_lock(void);
++
++#define MAX_APICID 256
++extern u8 x86_cpu_to_apicid[];
++
++#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
++
++#ifdef CONFIG_HOTPLUG_CPU
++extern void cpu_exit_clear(void);
++extern void cpu_uninit(void);
++#endif
++
++/*
++ * This function is needed by all SMP systems. It must _always_ be valid
++ * from the initial startup. We map APIC_BASE very early in page_setup(),
++ * so this is correct in the x86 case.
++ */
++#define raw_smp_processor_id() (current_thread_info()->cpu)
++
++extern cpumask_t cpu_possible_map;
++#define cpu_callin_map cpu_possible_map
++
++/* We don't mark CPUs online until __cpu_up(), so we need another measure */
++static inline int num_booting_cpus(void)
++{
++ return cpus_weight(cpu_possible_map);
++}
++
++#ifdef CONFIG_X86_LOCAL_APIC
++
++#ifdef APIC_DEFINITION
++extern int hard_smp_processor_id(void);
++#else
++#include <mach_apicdef.h>
++static inline int hard_smp_processor_id(void)
++{
++ /* we don't want to mark this access volatile - bad code generation */
++ return GET_APIC_ID(*(unsigned long *)(APIC_BASE+APIC_ID));
++}
++#endif
++
++static __inline int logical_smp_processor_id(void)
++{
++ /* we don't want to mark this access volatile - bad code generation */
++ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
++}
++
++#endif
++
++extern int __cpu_disable(void);
++extern void __cpu_die(unsigned int cpu);
++extern void prefill_possible_map(void);
++#endif /* !__ASSEMBLY__ */
++
++#else /* CONFIG_SMP */
++
++#define cpu_physical_id(cpu) boot_cpu_physical_apicid
++
++#define NO_PROC_ID 0xFF /* No processor magic marker */
++
++#endif
++#endif
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/swiotlb_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/swiotlb_32.h 2007-06-12 13:14:02.000000000 +0200
+@@ -0,0 +1,43 @@
++#ifndef _ASM_SWIOTLB_H
++#define _ASM_SWIOTLB_H 1
++
++/* SWIOTLB interface */
++
++extern dma_addr_t swiotlb_map_single(struct device *hwdev, void *ptr, size_t size,
++ int dir);
++extern void swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir);
++extern void swiotlb_sync_single_for_cpu(struct device *hwdev,
++ dma_addr_t dev_addr,
++ size_t size, int dir);
++extern void swiotlb_sync_single_for_device(struct device *hwdev,
++ dma_addr_t dev_addr,
++ size_t size, int dir);
++extern void swiotlb_sync_sg_for_cpu(struct device *hwdev,
++ struct scatterlist *sg, int nelems,
++ int dir);
++extern void swiotlb_sync_sg_for_device(struct device *hwdev,
++ struct scatterlist *sg, int nelems,
++ int dir);
++extern int swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg,
++ int nents, int direction);
++extern void swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg,
++ int nents, int direction);
++extern int swiotlb_dma_mapping_error(dma_addr_t dma_addr);
++#ifdef CONFIG_HIGHMEM
++extern dma_addr_t swiotlb_map_page(struct device *hwdev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction);
++extern void swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
++ size_t size, enum dma_data_direction direction);
++#endif
++extern int swiotlb_dma_supported(struct device *hwdev, u64 mask);
++extern void swiotlb_init(void);
++
++#ifdef CONFIG_SWIOTLB
++extern int swiotlb;
++#else
++#define swiotlb 0
++#endif
++
++#endif
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/synch_bitops.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/synch_bitops.h 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,126 @@
++#ifndef __XEN_SYNCH_BITOPS_H__
++#define __XEN_SYNCH_BITOPS_H__
++
++/*
++ * Copyright 1992, Linus Torvalds.
++ * Heavily modified to provide guaranteed strong synchronisation
++ * when communicating with Xen or other guest OSes running on other CPUs.
++ */
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#define ADDR (*(volatile long *) addr)
++
++static __inline__ void synch_set_bit(int nr, volatile void * addr)
++{
++ __asm__ __volatile__ (
++ "lock btsl %1,%0"
++ : "+m" (ADDR) : "Ir" (nr) : "memory" );
++}
++
++static __inline__ void synch_clear_bit(int nr, volatile void * addr)
++{
++ __asm__ __volatile__ (
++ "lock btrl %1,%0"
++ : "+m" (ADDR) : "Ir" (nr) : "memory" );
++}
++
++static __inline__ void synch_change_bit(int nr, volatile void * addr)
++{
++ __asm__ __volatile__ (
++ "lock btcl %1,%0"
++ : "+m" (ADDR) : "Ir" (nr) : "memory" );
++}
++
++static __inline__ int synch_test_and_set_bit(int nr, volatile void * addr)
++{
++ int oldbit;
++ __asm__ __volatile__ (
++ "lock btsl %2,%1\n\tsbbl %0,%0"
++ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
++ return oldbit;
++}
++
++static __inline__ int synch_test_and_clear_bit(int nr, volatile void * addr)
++{
++ int oldbit;
++ __asm__ __volatile__ (
++ "lock btrl %2,%1\n\tsbbl %0,%0"
++ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
++ return oldbit;
++}
++
++static __inline__ int synch_test_and_change_bit(int nr, volatile void * addr)
++{
++ int oldbit;
++
++ __asm__ __volatile__ (
++ "lock btcl %2,%1\n\tsbbl %0,%0"
++ : "=r" (oldbit), "+m" (ADDR) : "Ir" (nr) : "memory");
++ return oldbit;
++}
++
++struct __synch_xchg_dummy { unsigned long a[100]; };
++#define __synch_xg(x) ((struct __synch_xchg_dummy *)(x))
++
++#define synch_cmpxchg(ptr, old, new) \
++((__typeof__(*(ptr)))__synch_cmpxchg((ptr),\
++ (unsigned long)(old), \
++ (unsigned long)(new), \
++ sizeof(*(ptr))))
++
++static inline unsigned long __synch_cmpxchg(volatile void *ptr,
++ unsigned long old,
++ unsigned long new, int size)
++{
++ unsigned long prev;
++ switch (size) {
++ case 1:
++ __asm__ __volatile__("lock; cmpxchgb %b1,%2"
++ : "=a"(prev)
++ : "q"(new), "m"(*__synch_xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++ case 2:
++ __asm__ __volatile__("lock; cmpxchgw %w1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__synch_xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++#ifdef CONFIG_X86_64
++ case 4:
++ __asm__ __volatile__("lock; cmpxchgl %k1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__synch_xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++ case 8:
++ __asm__ __volatile__("lock; cmpxchgq %1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__synch_xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++#else
++ case 4:
++ __asm__ __volatile__("lock; cmpxchgl %1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__synch_xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++#endif
++ }
++ return old;
++}
++
++#define synch_test_bit test_bit
++
++#define synch_cmpxchg_subword synch_cmpxchg
++
++#endif /* __XEN_SYNCH_BITOPS_H__ */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/system_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/system_32.h 2007-06-12 13:14:02.000000000 +0200
+@@ -0,0 +1,488 @@
++#ifndef __ASM_SYSTEM_H
++#define __ASM_SYSTEM_H
++
++#include <linux/kernel.h>
++#include <asm/segment.h>
++#include <asm/cpufeature.h>
++#include <linux/bitops.h> /* for LOCK_PREFIX */
++#include <asm/synch_bitops.h>
++#include <asm/hypervisor.h>
++
++#ifdef __KERNEL__
++
++struct task_struct; /* one of the stranger aspects of C forward declarations.. */
++extern struct task_struct * FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
++
++/*
++ * Saving eflags is important. It switches not only IOPL between tasks,
++ * it also protects other tasks from NT leaking through sysenter etc.
++ */
++#define switch_to(prev,next,last) do { \
++ unsigned long esi,edi; \
++ asm volatile("pushfl\n\t" /* Save flags */ \
++ "pushl %%ebp\n\t" \
++ "movl %%esp,%0\n\t" /* save ESP */ \
++ "movl %5,%%esp\n\t" /* restore ESP */ \
++ "movl $1f,%1\n\t" /* save EIP */ \
++ "pushl %6\n\t" /* restore EIP */ \
++ "jmp __switch_to\n" \
++ "1:\t" \
++ "popl %%ebp\n\t" \
++ "popfl" \
++ :"=m" (prev->thread.esp),"=m" (prev->thread.eip), \
++ "=a" (last),"=S" (esi),"=D" (edi) \
++ :"m" (next->thread.esp),"m" (next->thread.eip), \
++ "2" (prev), "d" (next)); \
++} while (0)
++
++#define _set_base(addr,base) do { unsigned long __pr; \
++__asm__ __volatile__ ("movw %%dx,%1\n\t" \
++ "rorl $16,%%edx\n\t" \
++ "movb %%dl,%2\n\t" \
++ "movb %%dh,%3" \
++ :"=&d" (__pr) \
++ :"m" (*((addr)+2)), \
++ "m" (*((addr)+4)), \
++ "m" (*((addr)+7)), \
++ "0" (base) \
++ ); } while(0)
++
++#define _set_limit(addr,limit) do { unsigned long __lr; \
++__asm__ __volatile__ ("movw %%dx,%1\n\t" \
++ "rorl $16,%%edx\n\t" \
++ "movb %2,%%dh\n\t" \
++ "andb $0xf0,%%dh\n\t" \
++ "orb %%dh,%%dl\n\t" \
++ "movb %%dl,%2" \
++ :"=&d" (__lr) \
++ :"m" (*(addr)), \
++ "m" (*((addr)+6)), \
++ "0" (limit) \
++ ); } while(0)
++
++#define set_base(ldt,base) _set_base( ((char *)&(ldt)) , (base) )
++#define set_limit(ldt,limit) _set_limit( ((char *)&(ldt)) , ((limit)-1) )
++
++/*
++ * Load a segment. Fall back on loading the zero
++ * segment if something goes wrong..
++ */
++#define loadsegment(seg,value) \
++ asm volatile("\n" \
++ "1:\t" \
++ "mov %0,%%" #seg "\n" \
++ "2:\n" \
++ ".section .fixup,\"ax\"\n" \
++ "3:\t" \
++ "pushl $0\n\t" \
++ "popl %%" #seg "\n\t" \
++ "jmp 2b\n" \
++ ".previous\n" \
++ ".section __ex_table,\"a\"\n\t" \
++ ".align 4\n\t" \
++ ".long 1b,3b\n" \
++ ".previous" \
++ : :"rm" (value))
++
++/*
++ * Save a segment register away
++ */
++#define savesegment(seg, value) \
++ asm volatile("mov %%" #seg ",%0":"=rm" (value))
++
++#define read_cr0() ({ \
++ unsigned int __dummy; \
++ __asm__ __volatile__( \
++ "movl %%cr0,%0\n\t" \
++ :"=r" (__dummy)); \
++ __dummy; \
++})
++#define write_cr0(x) \
++ __asm__ __volatile__("movl %0,%%cr0": :"r" (x))
++
++#define read_cr2() (current_vcpu_info()->arch.cr2)
++#define write_cr2(x) \
++ __asm__ __volatile__("movl %0,%%cr2": :"r" (x))
++
++#define read_cr3() ({ \
++ unsigned int __dummy; \
++ __asm__ ( \
++ "movl %%cr3,%0\n\t" \
++ :"=r" (__dummy)); \
++ __dummy = xen_cr3_to_pfn(__dummy); \
++ mfn_to_pfn(__dummy) << PAGE_SHIFT; \
++})
++#define write_cr3(x) ({ \
++ unsigned int __dummy = pfn_to_mfn((x) >> PAGE_SHIFT); \
++ __dummy = xen_pfn_to_cr3(__dummy); \
++ __asm__ __volatile__("movl %0,%%cr3": :"r" (__dummy)); \
++})
++#define read_cr4() ({ \
++ unsigned int __dummy; \
++ __asm__( \
++ "movl %%cr4,%0\n\t" \
++ :"=r" (__dummy)); \
++ __dummy; \
++})
++#define read_cr4_safe() ({ \
++ unsigned int __dummy; \
++ /* This could fault if %cr4 does not exist */ \
++ __asm__("1: movl %%cr4, %0 \n" \
++ "2: \n" \
++ ".section __ex_table,\"a\" \n" \
++ ".long 1b,2b \n" \
++ ".previous \n" \
++ : "=r" (__dummy): "0" (0)); \
++ __dummy; \
++})
++
++#define write_cr4(x) \
++ __asm__ __volatile__("movl %0,%%cr4": :"r" (x))
++
++/*
++ * Clear and set 'TS' bit respectively
++ */
++#define clts() (HYPERVISOR_fpu_taskswitch(0))
++#define stts() (HYPERVISOR_fpu_taskswitch(1))
++
++#endif /* __KERNEL__ */
++
++#define wbinvd() \
++ __asm__ __volatile__ ("wbinvd": : :"memory")
++
++static inline unsigned long get_limit(unsigned long segment)
++{
++ unsigned long __limit;
++ __asm__("lsll %1,%0"
++ :"=r" (__limit):"r" (segment));
++ return __limit+1;
++}
++
++#define nop() __asm__ __volatile__ ("nop")
++
++#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
++
++#define tas(ptr) (xchg((ptr),1))
++
++struct __xchg_dummy { unsigned long a[100]; };
++#define __xg(x) ((struct __xchg_dummy *)(x))
++
++
++#ifdef CONFIG_X86_CMPXCHG64
++
++/*
++ * The semantics of XCHGCMP8B are a bit strange, this is why
++ * there is a loop and the loading of %%eax and %%edx has to
++ * be inside. This inlines well in most cases, the cached
++ * cost is around ~38 cycles. (in the future we might want
++ * to do an SIMD/3DNOW!/MMX/FPU 64-bit store here, but that
++ * might have an implicit FPU-save as a cost, so it's not
++ * clear which path to go.)
++ *
++ * cmpxchg8b must be used with the lock prefix here to allow
++ * the instruction to be executed atomically, see page 3-102
++ * of the instruction set reference 24319102.pdf. We need
++ * the reader side to see the coherent 64bit value.
++ */
++static inline void __set_64bit (unsigned long long * ptr,
++ unsigned int low, unsigned int high)
++{
++ __asm__ __volatile__ (
++ "\n1:\t"
++ "movl (%0), %%eax\n\t"
++ "movl 4(%0), %%edx\n\t"
++ "lock cmpxchg8b (%0)\n\t"
++ "jnz 1b"
++ : /* no outputs */
++ : "D"(ptr),
++ "b"(low),
++ "c"(high)
++ : "ax","dx","memory");
++}
++
++static inline void __set_64bit_constant (unsigned long long *ptr,
++ unsigned long long value)
++{
++ __set_64bit(ptr,(unsigned int)(value), (unsigned int)((value)>>32ULL));
++}
++#define ll_low(x) *(((unsigned int*)&(x))+0)
++#define ll_high(x) *(((unsigned int*)&(x))+1)
++
++static inline void __set_64bit_var (unsigned long long *ptr,
++ unsigned long long value)
++{
++ __set_64bit(ptr,ll_low(value), ll_high(value));
++}
++
++#define set_64bit(ptr,value) \
++(__builtin_constant_p(value) ? \
++ __set_64bit_constant(ptr, value) : \
++ __set_64bit_var(ptr, value) )
++
++#define _set_64bit(ptr,value) \
++(__builtin_constant_p(value) ? \
++ __set_64bit(ptr, (unsigned int)(value), (unsigned int)((value)>>32ULL) ) : \
++ __set_64bit(ptr, ll_low(value), ll_high(value)) )
++
++#endif
++
++/*
++ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
++ * Note 2: xchg has side effect, so that attribute volatile is necessary,
++ * but generally the primitive is invalid, *ptr is output argument. --ANK
++ */
++static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
++{
++ switch (size) {
++ case 1:
++ __asm__ __volatile__("xchgb %b0,%1"
++ :"=q" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ case 2:
++ __asm__ __volatile__("xchgw %w0,%1"
++ :"=r" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ case 4:
++ __asm__ __volatile__("xchgl %0,%1"
++ :"=r" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ }
++ return x;
++}
++
++/*
++ * Atomic compare and exchange. Compare OLD with MEM, if identical,
++ * store NEW in MEM. Return the initial value in MEM. Success is
++ * indicated by comparing RETURN with OLD.
++ */
++
++#ifdef CONFIG_X86_CMPXCHG
++#define __HAVE_ARCH_CMPXCHG 1
++#define cmpxchg(ptr,o,n)\
++ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
++ (unsigned long)(n),sizeof(*(ptr))))
++#endif
++
++static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
++ unsigned long new, int size)
++{
++ unsigned long prev;
++ switch (size) {
++ case 1:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
++ : "=a"(prev)
++ : "q"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 2:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 4:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ }
++ return old;
++}
++
++#ifndef CONFIG_X86_CMPXCHG
++/*
++ * Building a kernel capable running on 80386. It may be necessary to
++ * simulate the cmpxchg on the 80386 CPU. For that purpose we define
++ * a function for each of the sizes we support.
++ */
++
++extern unsigned long cmpxchg_386_u8(volatile void *, u8, u8);
++extern unsigned long cmpxchg_386_u16(volatile void *, u16, u16);
++extern unsigned long cmpxchg_386_u32(volatile void *, u32, u32);
++
++static inline unsigned long cmpxchg_386(volatile void *ptr, unsigned long old,
++ unsigned long new, int size)
++{
++ switch (size) {
++ case 1:
++ return cmpxchg_386_u8(ptr, old, new);
++ case 2:
++ return cmpxchg_386_u16(ptr, old, new);
++ case 4:
++ return cmpxchg_386_u32(ptr, old, new);
++ }
++ return old;
++}
++
++#define cmpxchg(ptr,o,n) \
++({ \
++ __typeof__(*(ptr)) __ret; \
++ if (likely(boot_cpu_data.x86 > 3)) \
++ __ret = __cmpxchg((ptr), (unsigned long)(o), \
++ (unsigned long)(n), sizeof(*(ptr))); \
++ else \
++ __ret = cmpxchg_386((ptr), (unsigned long)(o), \
++ (unsigned long)(n), sizeof(*(ptr))); \
++ __ret; \
++})
++#endif
++
++#ifdef CONFIG_X86_CMPXCHG64
++
++static inline unsigned long long __cmpxchg64(volatile void *ptr, unsigned long long old,
++ unsigned long long new)
++{
++ unsigned long long prev;
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchg8b %3"
++ : "=A"(prev)
++ : "b"((unsigned long)new),
++ "c"((unsigned long)(new >> 32)),
++ "m"(*__xg(ptr)),
++ "0"(old)
++ : "memory");
++ return prev;
++}
++
++#define cmpxchg64(ptr,o,n)\
++ ((__typeof__(*(ptr)))__cmpxchg64((ptr),(unsigned long long)(o),\
++ (unsigned long long)(n)))
++
++#endif
++
++/*
++ * Force strict CPU ordering.
++ * And yes, this is required on UP too when we're talking
++ * to devices.
++ *
++ * For now, "wmb()" doesn't actually do anything, as all
++ * Intel CPU's follow what Intel calls a *Processor Order*,
++ * in which all writes are seen in the program order even
++ * outside the CPU.
++ *
++ * I expect future Intel CPU's to have a weaker ordering,
++ * but I'd also expect them to finally get their act together
++ * and add some real memory barriers if so.
++ *
++ * Some non intel clones support out of order store. wmb() ceases to be a
++ * nop for these.
++ */
++
++
++/*
++ * Actually only lfence would be needed for mb() because all stores done
++ * by the kernel should be already ordered. But keep a full barrier for now.
++ */
++
++#define mb() alternative("lock; addl $0,0(%%esp)", "mfence", X86_FEATURE_XMM2)
++#define rmb() alternative("lock; addl $0,0(%%esp)", "lfence", X86_FEATURE_XMM2)
++
++/**
++ * read_barrier_depends - Flush all pending reads that subsequents reads
++ * depend on.
++ *
++ * No data-dependent reads from memory-like regions are ever reordered
++ * over this barrier. All reads preceding this primitive are guaranteed
++ * to access memory (but not necessarily other CPUs' caches) before any
++ * reads following this primitive that depend on the data return by
++ * any of the preceding reads. This primitive is much lighter weight than
++ * rmb() on most CPUs, and is never heavier weight than is
++ * rmb().
++ *
++ * These ordering constraints are respected by both the local CPU
++ * and the compiler.
++ *
++ * Ordering is not guaranteed by anything other than these primitives,
++ * not even by data dependencies. See the documentation for
++ * memory_barrier() for examples and URLs to more information.
++ *
++ * For example, the following code would force ordering (the initial
++ * value of "a" is zero, "b" is one, and "p" is "&a"):
++ *
++ * <programlisting>
++ * CPU 0 CPU 1
++ *
++ * b = 2;
++ * memory_barrier();
++ * p = &b; q = p;
++ * read_barrier_depends();
++ * d = *q;
++ * </programlisting>
++ *
++ * because the read of "*q" depends on the read of "p" and these
++ * two reads are separated by a read_barrier_depends(). However,
++ * the following code, with the same initial values for "a" and "b":
++ *
++ * <programlisting>
++ * CPU 0 CPU 1
++ *
++ * a = 2;
++ * memory_barrier();
++ * b = 3; y = b;
++ * read_barrier_depends();
++ * x = a;
++ * </programlisting>
++ *
++ * does not enforce ordering, since there is no data dependency between
++ * the read of "a" and the read of "b". Therefore, on some CPUs, such
++ * as Alpha, "y" could be set to 3 and "x" to 0. Use rmb()
++ * in cases like this where there are no data dependencies.
++ **/
++
++#define read_barrier_depends() do { } while(0)
++
++#ifdef CONFIG_X86_OOSTORE
++/* Actually there are no OOO store capable CPUs for now that do SSE,
++ but make it already an possibility. */
++#define wmb() alternative("lock; addl $0,0(%%esp)", "sfence", X86_FEATURE_XMM)
++#else
++#define wmb() __asm__ __volatile__ ("": : :"memory")
++#endif
++
++#ifdef CONFIG_SMP
++#define smp_mb() mb()
++#define smp_rmb() rmb()
++#define smp_wmb() wmb()
++#define smp_read_barrier_depends() read_barrier_depends()
++#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
++#else
++#define smp_mb() barrier()
++#define smp_rmb() barrier()
++#define smp_wmb() barrier()
++#define smp_read_barrier_depends() do { } while(0)
++#define set_mb(var, value) do { var = value; barrier(); } while (0)
++#endif
++
++#include <linux/irqflags.h>
++
++/*
++ * disable hlt during certain critical i/o operations
++ */
++#define HAVE_DISABLE_HLT
++void disable_hlt(void);
++void enable_hlt(void);
++
++extern int es7000_plat;
++void cpu_idle_wait(void);
++
++/*
++ * On SMP systems, when the scheduler does migration-cost autodetection,
++ * it needs a way to flush as much of the CPU's caches as possible:
++ */
++static inline void sched_cacheflush(void)
++{
++ wbinvd();
++}
++
++extern unsigned long arch_align_stack(unsigned long sp);
++extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
++
++void default_idle(void);
++
++#endif
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/tlbflush_32.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/tlbflush_32.h 2007-11-26 16:59:25.000000000 +0100
+@@ -0,0 +1,101 @@
++#ifndef _I386_TLBFLUSH_H
++#define _I386_TLBFLUSH_H
++
++#include <linux/mm.h>
++#include <asm/processor.h>
++
++#define __flush_tlb() xen_tlb_flush()
++#define __flush_tlb_global() xen_tlb_flush()
++#define __flush_tlb_all() xen_tlb_flush()
++
++extern unsigned long pgkern_mask;
++
++#define cpu_has_invlpg (boot_cpu_data.x86 > 3)
++
++#define __flush_tlb_single(addr) xen_invlpg(addr)
++
++#define __flush_tlb_one(addr) __flush_tlb_single(addr)
++
++/*
++ * TLB flushing:
++ *
++ * - flush_tlb() flushes the current mm struct TLBs
++ * - flush_tlb_all() flushes all processes TLBs
++ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
++ * - flush_tlb_page(vma, vmaddr) flushes one page
++ * - flush_tlb_range(vma, start, end) flushes a range of pages
++ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
++ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
++ *
++ * ..but the i386 has somewhat limited tlb flushing capabilities,
++ * and page-granular flushes are available only on i486 and up.
++ */
++
++#ifndef CONFIG_SMP
++
++#define flush_tlb() __flush_tlb()
++#define flush_tlb_all() __flush_tlb_all()
++#define local_flush_tlb() __flush_tlb()
++
++static inline void flush_tlb_mm(struct mm_struct *mm)
++{
++ if (mm == current->active_mm)
++ __flush_tlb();
++}
++
++static inline void flush_tlb_page(struct vm_area_struct *vma,
++ unsigned long addr)
++{
++ if (vma->vm_mm == current->active_mm)
++ __flush_tlb_one(addr);
++}
++
++static inline void flush_tlb_range(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end)
++{
++ if (vma->vm_mm == current->active_mm)
++ __flush_tlb();
++}
++
++#else
++
++#include <asm/smp.h>
++
++#define local_flush_tlb() \
++ __flush_tlb()
++
++#define flush_tlb_all xen_tlb_flush_all
++#define flush_tlb_current_task() xen_tlb_flush_mask(¤t->mm->cpu_vm_mask)
++#define flush_tlb_mm(mm) xen_tlb_flush_mask(&(mm)->cpu_vm_mask)
++#define flush_tlb_page(vma, va) xen_invlpg_mask(&(vma)->vm_mm->cpu_vm_mask, va)
++
++#define flush_tlb() flush_tlb_current_task()
++
++static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
++{
++ flush_tlb_mm(vma->vm_mm);
++}
++
++#define TLBSTATE_OK 1
++#define TLBSTATE_LAZY 2
++
++struct tlb_state
++{
++ struct mm_struct *active_mm;
++ int state;
++ char __cacheline_padding[L1_CACHE_BYTES-8];
++};
++DECLARE_PER_CPU(struct tlb_state, cpu_tlbstate);
++
++
++#endif
++
++#define flush_tlb_kernel_range(start, end) flush_tlb_all()
++
++static inline void flush_tlb_pgtables(struct mm_struct *mm,
++ unsigned long start, unsigned long end)
++{
++ /* i386 does not keep any page table caches in TLB */
++}
++
++#endif /* _I386_TLBFLUSH_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/vga.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/vga.h 2007-06-12 13:14:02.000000000 +0200
+@@ -0,0 +1,20 @@
++/*
++ * Access to VGA videoram
++ *
++ * (c) 1998 Martin Mares <mj@ucw.cz>
++ */
++
++#ifndef _LINUX_ASM_VGA_H_
++#define _LINUX_ASM_VGA_H_
++
++/*
++ * On the PC, we can just recalculate addresses and then
++ * access the videoram directly without any black magic.
++ */
++
++#define VGA_MAP_MEM(x,s) (unsigned long)isa_bus_to_virt(x)
++
++#define vga_readb(x) (*(x))
++#define vga_writeb(x,y) (*(y) = (x))
++
++#endif
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/xenoprof.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/xenoprof.h 2007-06-12 13:14:02.000000000 +0200
+@@ -0,0 +1,48 @@
++/******************************************************************************
++ * asm-i386/mach-xen/asm/xenoprof.h
++ *
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++#ifndef __ASM_XENOPROF_H__
++#define __ASM_XENOPROF_H__
++#ifdef CONFIG_XEN
++
++struct super_block;
++struct dentry;
++int xenoprof_create_files(struct super_block * sb, struct dentry * root);
++#define HAVE_XENOPROF_CREATE_FILES
++
++struct xenoprof_init;
++void xenoprof_arch_init_counter(struct xenoprof_init *init);
++void xenoprof_arch_counter(void);
++void xenoprof_arch_start(void);
++void xenoprof_arch_stop(void);
++
++struct xenoprof_arch_shared_buffer {
++ /* nothing */
++};
++struct xenoprof_shared_buffer;
++void xenoprof_arch_unmap_shared_buffer(struct xenoprof_shared_buffer* sbuf);
++struct xenoprof_get_buffer;
++int xenoprof_arch_map_shared_buffer(struct xenoprof_get_buffer* get_buffer, struct xenoprof_shared_buffer* sbuf);
++struct xenoprof_passive;
++int xenoprof_arch_set_passive(struct xenoprof_passive* pdomain, struct xenoprof_shared_buffer* sbuf);
++
++#endif /* CONFIG_XEN */
++#endif /* __ASM_XENOPROF_H__ */
+Index: head-2008-11-25/include/asm-x86/mach-xen/irq_vectors.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/irq_vectors.h 2008-09-25 13:55:32.000000000 +0200
+@@ -0,0 +1,125 @@
++/*
++ * This file should contain #defines for all of the interrupt vector
++ * numbers used by this architecture.
++ *
++ * In addition, there are some standard defines:
++ *
++ * FIRST_EXTERNAL_VECTOR:
++ * The first free place for external interrupts
++ *
++ * SYSCALL_VECTOR:
++ * The IRQ vector a syscall makes the user to kernel transition
++ * under.
++ *
++ * TIMER_IRQ:
++ * The IRQ number the timer interrupt comes in at.
++ *
++ * NR_IRQS:
++ * The total number of interrupt vectors (including all the
++ * architecture specific interrupts) needed.
++ *
++ */
++#ifndef _ASM_IRQ_VECTORS_H
++#define _ASM_IRQ_VECTORS_H
++
++/*
++ * IDT vectors usable for external interrupt sources start
++ * at 0x20:
++ */
++#define FIRST_EXTERNAL_VECTOR 0x20
++
++#define SYSCALL_VECTOR 0x80
++
++/*
++ * Vectors 0x20-0x2f are used for ISA interrupts.
++ */
++
++#if 0
++/*
++ * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
++ *
++ * some of the following vectors are 'rare', they are merged
++ * into a single vector (CALL_FUNCTION_VECTOR) to save vector space.
++ * TLB, reschedule and local APIC vectors are performance-critical.
++ *
++ * Vectors 0xf0-0xfa are free (reserved for future Linux use).
++ */
++#define SPURIOUS_APIC_VECTOR 0xff
++#define ERROR_APIC_VECTOR 0xfe
++#define INVALIDATE_TLB_VECTOR 0xfd
++#define RESCHEDULE_VECTOR 0xfc
++#define CALL_FUNCTION_VECTOR 0xfb
++
++#define THERMAL_APIC_VECTOR 0xf0
++/*
++ * Local APIC timer IRQ vector is on a different priority level,
++ * to work around the 'lost local interrupt if more than 2 IRQ
++ * sources per level' errata.
++ */
++#define LOCAL_TIMER_VECTOR 0xef
++#endif
++
++#define SPURIOUS_APIC_VECTOR 0xff
++#define ERROR_APIC_VECTOR 0xfe
++
++/*
++ * First APIC vector available to drivers: (vectors 0x30-0xee)
++ * we start at 0x31 to spread out vectors evenly between priority
++ * levels. (0x80 is the syscall vector)
++ */
++#define FIRST_DEVICE_VECTOR 0x31
++#define FIRST_SYSTEM_VECTOR 0xef
++
++/*
++ * 16 8259A IRQ's, 208 potential APIC interrupt sources.
++ * Right now the APIC is mostly only used for SMP.
++ * 256 vectors is an architectural limit. (we can have
++ * more than 256 devices theoretically, but they will
++ * have to use shared interrupts)
++ * Since vectors 0x00-0x1f are used/reserved for the CPU,
++ * the usable vector space is 0x20-0xff (224 vectors)
++ */
++
++#define RESCHEDULE_VECTOR 0
++#define CALL_FUNCTION_VECTOR 1
++#define NR_IPIS 2
++
++/*
++ * The maximum number of vectors supported by i386 processors
++ * is limited to 256. For processors other than i386, NR_VECTORS
++ * should be changed accordingly.
++ */
++#define NR_VECTORS 256
++
++#define FPU_IRQ 13
++
++#define FIRST_VM86_IRQ 3
++#define LAST_VM86_IRQ 15
++#define invalid_vm86_irq(irq) ((irq) < 3 || (irq) > 15)
++
++/*
++ * The flat IRQ space is divided into two regions:
++ * 1. A one-to-one mapping of real physical IRQs. This space is only used
++ * if we have physical device-access privilege. This region is at the
++ * start of the IRQ space so that existing device drivers do not need
++ * to be modified to translate physical IRQ numbers into our IRQ space.
++ * 3. A dynamic mapping of inter-domain and Xen-sourced virtual IRQs. These
++ * are bound using the provided bind/unbind functions.
++ */
++
++#define PIRQ_BASE 0
++#if !defined(MAX_IO_APICS)
++# define NR_PIRQS (NR_VECTORS + 32 * NR_CPUS)
++#elif NR_CPUS < MAX_IO_APICS
++# define NR_PIRQS (NR_VECTORS + 32 * NR_CPUS)
++#else
++# define NR_PIRQS (NR_VECTORS + 32 * MAX_IO_APICS)
++#endif
++
++#define DYNIRQ_BASE (PIRQ_BASE + NR_PIRQS)
++#define NR_DYNIRQS 256
++
++#define NR_IRQS (NR_PIRQS + NR_DYNIRQS)
++#define NR_IRQ_VECTORS NR_IRQS
++
++#endif /* _ASM_IRQ_VECTORS_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/mach_traps.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/mach_traps.h 2007-06-12 13:14:02.000000000 +0200
+@@ -0,0 +1,33 @@
++/*
++ * include/asm-xen/asm-i386/mach-xen/mach_traps.h
++ *
++ * Machine specific NMI handling for Xen
++ */
++#ifndef _MACH_TRAPS_H
++#define _MACH_TRAPS_H
++
++#include <linux/bitops.h>
++#include <xen/interface/nmi.h>
++
++static inline void clear_mem_error(unsigned char reason) {}
++static inline void clear_io_check_error(unsigned char reason) {}
++
++static inline unsigned char get_nmi_reason(void)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ unsigned char reason = 0;
++
++ /* construct a value which looks like it came from
++ * port 0x61.
++ */
++ if (test_bit(_XEN_NMIREASON_io_error, &s->arch.nmi_reason))
++ reason |= 0x40;
++ if (test_bit(_XEN_NMIREASON_parity_error, &s->arch.nmi_reason))
++ reason |= 0x80;
++
++ return reason;
++}
++
++static inline void reassert_nmi(void) {}
++
++#endif /* !_MACH_TRAPS_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/setup_arch.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/setup_arch.h 2007-06-12 13:14:02.000000000 +0200
+@@ -0,0 +1,5 @@
++/* Hook to call BIOS initialisation function */
++
++#define ARCH_SETUP machine_specific_arch_setup();
++
++void __init machine_specific_arch_setup(void);
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/desc_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/desc_64.h 2008-01-28 12:24:19.000000000 +0100
+@@ -0,0 +1,265 @@
++/* Written 2000 by Andi Kleen */
++#ifndef __ARCH_DESC_H
++#define __ARCH_DESC_H
++
++#include <linux/threads.h>
++#include <asm/ldt.h>
++
++#ifndef __ASSEMBLY__
++
++#include <linux/string.h>
++#include <linux/smp.h>
++
++#include <asm/segment.h>
++#include <asm/mmu.h>
++
++// 8 byte segment descriptor
++struct desc_struct {
++ u16 limit0;
++ u16 base0;
++ unsigned base1 : 8, type : 4, s : 1, dpl : 2, p : 1;
++ unsigned limit : 4, avl : 1, l : 1, d : 1, g : 1, base2 : 8;
++} __attribute__((packed));
++
++struct n_desc_struct {
++ unsigned int a,b;
++};
++
++enum {
++ GATE_INTERRUPT = 0xE,
++ GATE_TRAP = 0xF,
++ GATE_CALL = 0xC,
++};
++
++// 16byte gate
++struct gate_struct {
++ u16 offset_low;
++ u16 segment;
++ unsigned ist : 3, zero0 : 5, type : 5, dpl : 2, p : 1;
++ u16 offset_middle;
++ u32 offset_high;
++ u32 zero1;
++} __attribute__((packed));
++
++#define PTR_LOW(x) ((unsigned long)(x) & 0xFFFF)
++#define PTR_MIDDLE(x) (((unsigned long)(x) >> 16) & 0xFFFF)
++#define PTR_HIGH(x) ((unsigned long)(x) >> 32)
++
++enum {
++ DESC_TSS = 0x9,
++ DESC_LDT = 0x2,
++};
++
++// LDT or TSS descriptor in the GDT. 16 bytes.
++struct ldttss_desc {
++ u16 limit0;
++ u16 base0;
++ unsigned base1 : 8, type : 5, dpl : 2, p : 1;
++ unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
++ u32 base3;
++ u32 zero1;
++} __attribute__((packed));
++
++struct desc_ptr {
++ unsigned short size;
++ unsigned long address;
++} __attribute__((packed)) ;
++
++extern struct desc_ptr idt_descr, cpu_gdt_descr[NR_CPUS];
++
++extern struct desc_struct cpu_gdt_table[GDT_ENTRIES];
++
++#define load_TR_desc() asm volatile("ltr %w0"::"r" (GDT_ENTRY_TSS*8))
++#define load_LDT_desc() asm volatile("lldt %w0"::"r" (GDT_ENTRY_LDT*8))
++
++static inline void clear_LDT(void)
++{
++ int cpu = get_cpu();
++
++ /*
++ * NB. We load the default_ldt for lcall7/27 handling on demand, as
++ * it slows down context switching. Noone uses it anyway.
++ */
++ cpu = cpu; /* XXX avoid compiler warning */
++ xen_set_ldt(NULL, 0);
++ put_cpu();
++}
++
++/*
++ * This is the ldt that every process will get unless we need
++ * something other than this.
++ */
++extern struct desc_struct default_ldt[];
++#ifndef CONFIG_X86_NO_IDT
++extern struct gate_struct idt_table[];
++#endif
++extern struct desc_ptr cpu_gdt_descr[];
++
++/* the cpu gdt accessor */
++#define cpu_gdt(_cpu) ((struct desc_struct *)cpu_gdt_descr[_cpu].address)
++
++static inline void _set_gate(void *adr, unsigned type, unsigned long func, unsigned dpl, unsigned ist)
++{
++ struct gate_struct s;
++ s.offset_low = PTR_LOW(func);
++ s.segment = __KERNEL_CS;
++ s.ist = ist;
++ s.p = 1;
++ s.dpl = dpl;
++ s.zero0 = 0;
++ s.zero1 = 0;
++ s.type = type;
++ s.offset_middle = PTR_MIDDLE(func);
++ s.offset_high = PTR_HIGH(func);
++ /* does not need to be atomic because it is only done once at setup time */
++ memcpy(adr, &s, 16);
++}
++
++#ifndef CONFIG_X86_NO_IDT
++static inline void set_intr_gate(int nr, void *func)
++{
++ BUG_ON((unsigned)nr > 0xFF);
++ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, 0);
++}
++
++static inline void set_intr_gate_ist(int nr, void *func, unsigned ist)
++{
++ BUG_ON((unsigned)nr > 0xFF);
++ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 0, ist);
++}
++
++static inline void set_system_gate(int nr, void *func)
++{
++ BUG_ON((unsigned)nr > 0xFF);
++ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, 0);
++}
++
++static inline void set_system_gate_ist(int nr, void *func, unsigned ist)
++{
++ _set_gate(&idt_table[nr], GATE_INTERRUPT, (unsigned long) func, 3, ist);
++}
++#endif
++
++static inline void set_tssldt_descriptor(void *ptr, unsigned long tss, unsigned type,
++ unsigned size)
++{
++ struct ldttss_desc d;
++ memset(&d,0,sizeof(d));
++ d.limit0 = size & 0xFFFF;
++ d.base0 = PTR_LOW(tss);
++ d.base1 = PTR_MIDDLE(tss) & 0xFF;
++ d.type = type;
++ d.p = 1;
++ d.limit1 = (size >> 16) & 0xF;
++ d.base2 = (PTR_MIDDLE(tss) >> 8) & 0xFF;
++ d.base3 = PTR_HIGH(tss);
++ memcpy(ptr, &d, 16);
++}
++
++#ifndef CONFIG_X86_NO_TSS
++static inline void set_tss_desc(unsigned cpu, void *addr)
++{
++ /*
++ * sizeof(unsigned long) coming from an extra "long" at the end
++ * of the iobitmap. See tss_struct definition in processor.h
++ *
++ * -1? seg base+limit should be pointing to the address of the
++ * last valid byte
++ */
++ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_TSS],
++ (unsigned long)addr, DESC_TSS,
++ IO_BITMAP_OFFSET + IO_BITMAP_BYTES + sizeof(unsigned long) - 1);
++}
++#endif
++
++static inline void set_ldt_desc(unsigned cpu, void *addr, int size)
++{
++ set_tssldt_descriptor(&cpu_gdt(cpu)[GDT_ENTRY_LDT], (unsigned long)addr,
++ DESC_LDT, size * 8 - 1);
++}
++
++static inline void set_seg_base(unsigned cpu, int entry, void *base)
++{
++ struct desc_struct *d = &cpu_gdt(cpu)[entry];
++ u32 addr = (u32)(u64)base;
++ BUG_ON((u64)base >> 32);
++ d->base0 = addr & 0xffff;
++ d->base1 = (addr >> 16) & 0xff;
++ d->base2 = (addr >> 24) & 0xff;
++}
++
++#define LDT_entry_a(info) \
++ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
++/* Don't allow setting of the lm bit. It is useless anyways because
++ 64bit system calls require __USER_CS. */
++#define LDT_entry_b(info) \
++ (((info)->base_addr & 0xff000000) | \
++ (((info)->base_addr & 0x00ff0000) >> 16) | \
++ ((info)->limit & 0xf0000) | \
++ (((info)->read_exec_only ^ 1) << 9) | \
++ ((info)->contents << 10) | \
++ (((info)->seg_not_present ^ 1) << 15) | \
++ ((info)->seg_32bit << 22) | \
++ ((info)->limit_in_pages << 23) | \
++ ((info)->useable << 20) | \
++ /* ((info)->lm << 21) | */ \
++ 0x7000)
++
++#define LDT_empty(info) (\
++ (info)->base_addr == 0 && \
++ (info)->limit == 0 && \
++ (info)->contents == 0 && \
++ (info)->read_exec_only == 1 && \
++ (info)->seg_32bit == 0 && \
++ (info)->limit_in_pages == 0 && \
++ (info)->seg_not_present == 1 && \
++ (info)->useable == 0 && \
++ (info)->lm == 0)
++
++#if TLS_SIZE != 24
++# error update this code.
++#endif
++
++static inline void load_TLS(struct thread_struct *t, unsigned int cpu)
++{
++#if 0
++ u64 *gdt = (u64 *)(cpu_gdt(cpu) + GDT_ENTRY_TLS_MIN);
++ gdt[0] = t->tls_array[0];
++ gdt[1] = t->tls_array[1];
++ gdt[2] = t->tls_array[2];
++#endif
++#define C(i) \
++ if (HYPERVISOR_update_descriptor(virt_to_machine(&cpu_gdt(cpu)[GDT_ENTRY_TLS_MIN + i]), \
++ t->tls_array[i])) \
++ BUG();
++
++ C(0); C(1); C(2);
++#undef C
++}
++
++/*
++ * load one particular LDT into the current CPU
++ */
++static inline void load_LDT_nolock (mm_context_t *pc, int cpu)
++{
++ void *segments = pc->ldt;
++ int count = pc->size;
++
++ if (likely(!count))
++ segments = NULL;
++
++ xen_set_ldt(segments, count);
++}
++
++static inline void load_LDT(mm_context_t *pc)
++{
++ int cpu = get_cpu();
++ load_LDT_nolock(pc, cpu);
++ put_cpu();
++}
++
++extern struct desc_ptr idt_descr;
++
++#endif /* !__ASSEMBLY__ */
++
++#endif
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/dma-mapping_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/dma-mapping_64.h 2007-06-12 13:14:13.000000000 +0200
+@@ -0,0 +1,207 @@
++#ifndef _X8664_DMA_MAPPING_H
++#define _X8664_DMA_MAPPING_H 1
++
++/*
++ * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
++ * documentation.
++ */
++
++
++#include <asm/scatterlist.h>
++#include <asm/io.h>
++#include <asm/swiotlb.h>
++
++struct dma_mapping_ops {
++ int (*mapping_error)(dma_addr_t dma_addr);
++ void* (*alloc_coherent)(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp);
++ void (*free_coherent)(struct device *dev, size_t size,
++ void *vaddr, dma_addr_t dma_handle);
++ dma_addr_t (*map_single)(struct device *hwdev, void *ptr,
++ size_t size, int direction);
++ /* like map_single, but doesn't check the device mask */
++ dma_addr_t (*map_simple)(struct device *hwdev, char *ptr,
++ size_t size, int direction);
++ void (*unmap_single)(struct device *dev, dma_addr_t addr,
++ size_t size, int direction);
++ void (*sync_single_for_cpu)(struct device *hwdev,
++ dma_addr_t dma_handle, size_t size,
++ int direction);
++ void (*sync_single_for_device)(struct device *hwdev,
++ dma_addr_t dma_handle, size_t size,
++ int direction);
++ void (*sync_single_range_for_cpu)(struct device *hwdev,
++ dma_addr_t dma_handle, unsigned long offset,
++ size_t size, int direction);
++ void (*sync_single_range_for_device)(struct device *hwdev,
++ dma_addr_t dma_handle, unsigned long offset,
++ size_t size, int direction);
++ void (*sync_sg_for_cpu)(struct device *hwdev,
++ struct scatterlist *sg, int nelems,
++ int direction);
++ void (*sync_sg_for_device)(struct device *hwdev,
++ struct scatterlist *sg, int nelems,
++ int direction);
++ int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
++ int nents, int direction);
++ void (*unmap_sg)(struct device *hwdev,
++ struct scatterlist *sg, int nents,
++ int direction);
++ int (*dma_supported)(struct device *hwdev, u64 mask);
++ int is_phys;
++};
++
++extern dma_addr_t bad_dma_address;
++extern struct dma_mapping_ops* dma_ops;
++extern int iommu_merge;
++
++static inline int valid_dma_direction(int dma_direction)
++{
++ return ((dma_direction == DMA_BIDIRECTIONAL) ||
++ (dma_direction == DMA_TO_DEVICE) ||
++ (dma_direction == DMA_FROM_DEVICE));
++}
++
++#if 0
++static inline int dma_mapping_error(dma_addr_t dma_addr)
++{
++ if (dma_ops->mapping_error)
++ return dma_ops->mapping_error(dma_addr);
++
++ return (dma_addr == bad_dma_address);
++}
++
++extern void *dma_alloc_coherent(struct device *dev, size_t size,
++ dma_addr_t *dma_handle, gfp_t gfp);
++extern void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
++ dma_addr_t dma_handle);
++
++static inline dma_addr_t
++dma_map_single(struct device *hwdev, void *ptr, size_t size,
++ int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ return dma_ops->map_single(hwdev, ptr, size, direction);
++}
++
++static inline void
++dma_unmap_single(struct device *dev, dma_addr_t addr,size_t size,
++ int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ dma_ops->unmap_single(dev, addr, size, direction);
++}
++
++#define dma_map_page(dev,page,offset,size,dir) \
++ dma_map_single((dev), page_address(page)+(offset), (size), (dir))
++
++#define dma_unmap_page dma_unmap_single
++
++static inline void
++dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
++ size_t size, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_single_for_cpu)
++ dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
++ direction);
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
++ size_t size, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_single_for_device)
++ dma_ops->sync_single_for_device(hwdev, dma_handle, size,
++ direction);
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
++ unsigned long offset, size_t size, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_single_range_for_cpu) {
++ dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, size, direction);
++ }
++
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
++ unsigned long offset, size_t size, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_single_range_for_device)
++ dma_ops->sync_single_range_for_device(hwdev, dma_handle,
++ offset, size, direction);
++
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_sg_for_cpu)
++ dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
++ flush_write_buffers();
++}
++
++static inline void
++dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ if (dma_ops->sync_sg_for_device) {
++ dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
++ }
++
++ flush_write_buffers();
++}
++
++static inline int
++dma_map_sg(struct device *hwdev, struct scatterlist *sg, int nents, int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ return dma_ops->map_sg(hwdev, sg, nents, direction);
++}
++
++static inline void
++dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
++ int direction)
++{
++ BUG_ON(!valid_dma_direction(direction));
++ dma_ops->unmap_sg(hwdev, sg, nents, direction);
++}
++
++extern int dma_supported(struct device *hwdev, u64 mask);
++
++/* same for gart, swiotlb, and nommu */
++static inline int dma_get_cache_alignment(void)
++{
++ return boot_cpu_data.x86_clflush_size;
++}
++
++#define dma_is_consistent(h) 1
++
++extern int dma_set_mask(struct device *dev, u64 mask);
++
++static inline void
++dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction dir)
++{
++ flush_write_buffers();
++}
++
++extern struct device fallback_dev;
++extern int panic_on_overflow;
++#endif
++
++#endif /* _X8664_DMA_MAPPING_H */
++
++#include <asm-i386/mach-xen/asm/dma-mapping.h>
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/fixmap_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/fixmap_64.h 2007-06-12 13:14:13.000000000 +0200
+@@ -0,0 +1,112 @@
++/*
++ * fixmap.h: compile-time virtual memory allocation
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file "COPYING" in the main directory of this archive
++ * for more details.
++ *
++ * Copyright (C) 1998 Ingo Molnar
++ */
++
++#ifndef _ASM_FIXMAP_H
++#define _ASM_FIXMAP_H
++
++#include <linux/kernel.h>
++#include <asm/apicdef.h>
++#include <asm/page.h>
++#include <asm/vsyscall.h>
++#include <asm/vsyscall32.h>
++#include <asm/acpi.h>
++
++/*
++ * Here we define all the compile-time 'special' virtual
++ * addresses. The point is to have a constant address at
++ * compile time, but to set the physical address only
++ * in the boot process.
++ *
++ * these 'compile-time allocated' memory buffers are
++ * fixed-size 4k pages. (or larger if used with an increment
++ * highger than 1) use fixmap_set(idx,phys) to associate
++ * physical memory with fixmap indices.
++ *
++ * TLB entries of such buffers will not be flushed across
++ * task switches.
++ */
++
++enum fixed_addresses {
++ VSYSCALL_LAST_PAGE,
++ VSYSCALL_FIRST_PAGE = VSYSCALL_LAST_PAGE + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
++ VSYSCALL_HPET,
++ FIX_HPET_BASE,
++#ifdef CONFIG_X86_LOCAL_APIC
++ FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
++#endif
++#ifdef CONFIG_X86_IO_APIC
++ FIX_IO_APIC_BASE_0,
++ FIX_IO_APIC_BASE_END = FIX_IO_APIC_BASE_0 + MAX_IO_APICS-1,
++#endif
++#ifdef CONFIG_ACPI
++ FIX_ACPI_BEGIN,
++ FIX_ACPI_END = FIX_ACPI_BEGIN + FIX_ACPI_PAGES - 1,
++#endif
++ FIX_SHARED_INFO,
++#define NR_FIX_ISAMAPS 256
++ FIX_ISAMAP_END,
++ FIX_ISAMAP_BEGIN = FIX_ISAMAP_END + NR_FIX_ISAMAPS - 1,
++ __end_of_permanent_fixed_addresses,
++ /* temporary boot-time mappings, used before ioremap() is functional */
++#define NR_FIX_BTMAPS 16
++ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
++ FIX_BTMAP_BEGIN = FIX_BTMAP_END + NR_FIX_BTMAPS - 1,
++ __end_of_fixed_addresses
++};
++
++extern void __set_fixmap (enum fixed_addresses idx,
++ unsigned long phys, pgprot_t flags);
++
++#define set_fixmap(idx, phys) \
++ __set_fixmap(idx, phys, PAGE_KERNEL)
++/*
++ * Some hardware wants to get fixmapped without caching.
++ */
++#define set_fixmap_nocache(idx, phys) \
++ __set_fixmap(idx, phys, PAGE_KERNEL_NOCACHE)
++
++#define clear_fixmap(idx) \
++ __set_fixmap(idx, 0, __pgprot(0))
++
++#define FIXADDR_TOP (VSYSCALL_END-PAGE_SIZE)
++#define FIXADDR_SIZE (__end_of_fixed_addresses << PAGE_SHIFT)
++#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
++
++/* Only covers 32bit vsyscalls currently. Need another set for 64bit. */
++#define FIXADDR_USER_START ((unsigned long)VSYSCALL32_VSYSCALL)
++#define FIXADDR_USER_END (FIXADDR_USER_START + PAGE_SIZE)
++
++#define __fix_to_virt(x) (FIXADDR_TOP - ((x) << PAGE_SHIFT))
++
++extern void __this_fixmap_does_not_exist(void);
++
++/*
++ * 'index to address' translation. If anyone tries to use the idx
++ * directly without translation, we catch the bug with a NULL-deference
++ * kernel oops. Illegal ranges of incoming indices are caught too.
++ */
++static __always_inline unsigned long fix_to_virt(const unsigned int idx)
++{
++ /*
++ * this branch gets completely eliminated after inlining,
++ * except when someone tries to use fixaddr indices in an
++ * illegal way. (such as mixing up address types or using
++ * out-of-range indices).
++ *
++ * If it doesn't get removed, the linker will complain
++ * loudly with a reasonably clear error message..
++ */
++ if (idx >= __end_of_fixed_addresses)
++ __this_fixmap_does_not_exist();
++
++ return __fix_to_virt(idx);
++}
++
++#endif
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/hypercall_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/hypercall_64.h 2008-11-25 12:22:34.000000000 +0100
+@@ -0,0 +1,408 @@
++/******************************************************************************
++ * hypercall.h
++ *
++ * Linux-specific hypervisor handling.
++ *
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ * 64-bit updates:
++ * Benjamin Liu <benjamin.liu@intel.com>
++ * Jun Nakajima <jun.nakajima@intel.com>
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __HYPERCALL_H__
++#define __HYPERCALL_H__
++
++#include <linux/string.h> /* memcpy() */
++#include <linux/stringify.h>
++
++#ifndef __HYPERVISOR_H__
++# error "please don't include this file directly"
++#endif
++
++#ifdef CONFIG_XEN
++#define HYPERCALL_STR(name) \
++ "call hypercall_page + ("__stringify(__HYPERVISOR_##name)" * 32)"
++#else
++#define HYPERCALL_STR(name) \
++ "mov $("__stringify(__HYPERVISOR_##name)" * 32),%%eax; "\
++ "add hypercall_stubs(%%rip),%%rax; " \
++ "call *%%rax"
++#endif
++
++#define _hypercall0(type, name) \
++({ \
++ type __res; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res) \
++ : \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall1(type, name, a1) \
++({ \
++ type __res; \
++ long __ign1; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=D" (__ign1) \
++ : "1" ((long)(a1)) \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall2(type, name, a1, a2) \
++({ \
++ type __res; \
++ long __ign1, __ign2; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=D" (__ign1), "=S" (__ign2) \
++ : "1" ((long)(a1)), "2" ((long)(a2)) \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall3(type, name, a1, a2, a3) \
++({ \
++ type __res; \
++ long __ign1, __ign2, __ign3; \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
++ "=d" (__ign3) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)) \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall4(type, name, a1, a2, a3, a4) \
++({ \
++ type __res; \
++ long __ign1, __ign2, __ign3; \
++ register long __arg4 asm("r10") = (long)(a4); \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
++ "=d" (__ign3), "+r" (__arg4) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)) \
++ : "memory" ); \
++ __res; \
++})
++
++#define _hypercall5(type, name, a1, a2, a3, a4, a5) \
++({ \
++ type __res; \
++ long __ign1, __ign2, __ign3; \
++ register long __arg4 asm("r10") = (long)(a4); \
++ register long __arg5 asm("r8") = (long)(a5); \
++ asm volatile ( \
++ HYPERCALL_STR(name) \
++ : "=a" (__res), "=D" (__ign1), "=S" (__ign2), \
++ "=d" (__ign3), "+r" (__arg4), "+r" (__arg5) \
++ : "1" ((long)(a1)), "2" ((long)(a2)), \
++ "3" ((long)(a3)) \
++ : "memory" ); \
++ __res; \
++})
++
++static inline int __must_check
++HYPERVISOR_set_trap_table(
++ const trap_info_t *table)
++{
++ return _hypercall1(int, set_trap_table, table);
++}
++
++static inline int __must_check
++HYPERVISOR_mmu_update(
++ mmu_update_t *req, unsigned int count, unsigned int *success_count,
++ domid_t domid)
++{
++ return _hypercall4(int, mmu_update, req, count, success_count, domid);
++}
++
++static inline int __must_check
++HYPERVISOR_mmuext_op(
++ struct mmuext_op *op, unsigned int count, unsigned int *success_count,
++ domid_t domid)
++{
++ return _hypercall4(int, mmuext_op, op, count, success_count, domid);
++}
++
++static inline int __must_check
++HYPERVISOR_set_gdt(
++ unsigned long *frame_list, unsigned int entries)
++{
++ return _hypercall2(int, set_gdt, frame_list, entries);
++}
++
++static inline int __must_check
++HYPERVISOR_stack_switch(
++ unsigned long ss, unsigned long esp)
++{
++ return _hypercall2(int, stack_switch, ss, esp);
++}
++
++static inline int __must_check
++HYPERVISOR_set_callbacks(
++ unsigned long event_address, unsigned long failsafe_address,
++ unsigned long syscall_address)
++{
++ return _hypercall3(int, set_callbacks,
++ event_address, failsafe_address, syscall_address);
++}
++
++static inline int
++HYPERVISOR_fpu_taskswitch(
++ int set)
++{
++ return _hypercall1(int, fpu_taskswitch, set);
++}
++
++static inline int __must_check
++HYPERVISOR_sched_op_compat(
++ int cmd, unsigned long arg)
++{
++ return _hypercall2(int, sched_op_compat, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_sched_op(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, sched_op, cmd, arg);
++}
++
++static inline long __must_check
++HYPERVISOR_set_timer_op(
++ u64 timeout)
++{
++ return _hypercall1(long, set_timer_op, timeout);
++}
++
++static inline int __must_check
++HYPERVISOR_platform_op(
++ struct xen_platform_op *platform_op)
++{
++ platform_op->interface_version = XENPF_INTERFACE_VERSION;
++ return _hypercall1(int, platform_op, platform_op);
++}
++
++static inline int __must_check
++HYPERVISOR_set_debugreg(
++ unsigned int reg, unsigned long value)
++{
++ return _hypercall2(int, set_debugreg, reg, value);
++}
++
++static inline unsigned long __must_check
++HYPERVISOR_get_debugreg(
++ unsigned int reg)
++{
++ return _hypercall1(unsigned long, get_debugreg, reg);
++}
++
++static inline int __must_check
++HYPERVISOR_update_descriptor(
++ unsigned long ma, unsigned long word)
++{
++ return _hypercall2(int, update_descriptor, ma, word);
++}
++
++static inline int __must_check
++HYPERVISOR_memory_op(
++ unsigned int cmd, void *arg)
++{
++ return _hypercall2(int, memory_op, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_multicall(
++ multicall_entry_t *call_list, unsigned int nr_calls)
++{
++ return _hypercall2(int, multicall, call_list, nr_calls);
++}
++
++static inline int __must_check
++HYPERVISOR_update_va_mapping(
++ unsigned long va, pte_t new_val, unsigned long flags)
++{
++ return _hypercall3(int, update_va_mapping, va, new_val.pte, flags);
++}
++
++static inline int __must_check
++HYPERVISOR_event_channel_op(
++ int cmd, void *arg)
++{
++ int rc = _hypercall2(int, event_channel_op, cmd, arg);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ struct evtchn_op op;
++ op.cmd = cmd;
++ memcpy(&op.u, arg, sizeof(op.u));
++ rc = _hypercall1(int, event_channel_op_compat, &op);
++ memcpy(arg, &op.u, sizeof(op.u));
++ }
++#endif
++
++ return rc;
++}
++
++static inline int __must_check
++HYPERVISOR_xen_version(
++ int cmd, void *arg)
++{
++ return _hypercall2(int, xen_version, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_console_io(
++ int cmd, unsigned int count, char *str)
++{
++ return _hypercall3(int, console_io, cmd, count, str);
++}
++
++static inline int __must_check
++HYPERVISOR_physdev_op(
++ int cmd, void *arg)
++{
++ int rc = _hypercall2(int, physdev_op, cmd, arg);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (unlikely(rc == -ENOSYS)) {
++ struct physdev_op op;
++ op.cmd = cmd;
++ memcpy(&op.u, arg, sizeof(op.u));
++ rc = _hypercall1(int, physdev_op_compat, &op);
++ memcpy(arg, &op.u, sizeof(op.u));
++ }
++#endif
++
++ return rc;
++}
++
++static inline int __must_check
++HYPERVISOR_grant_table_op(
++ unsigned int cmd, void *uop, unsigned int count)
++{
++ return _hypercall3(int, grant_table_op, cmd, uop, count);
++}
++
++static inline int __must_check
++HYPERVISOR_update_va_mapping_otherdomain(
++ unsigned long va, pte_t new_val, unsigned long flags, domid_t domid)
++{
++ return _hypercall4(int, update_va_mapping_otherdomain, va,
++ new_val.pte, flags, domid);
++}
++
++static inline int __must_check
++HYPERVISOR_vm_assist(
++ unsigned int cmd, unsigned int type)
++{
++ return _hypercall2(int, vm_assist, cmd, type);
++}
++
++static inline int __must_check
++HYPERVISOR_vcpu_op(
++ int cmd, unsigned int vcpuid, void *extra_args)
++{
++ return _hypercall3(int, vcpu_op, cmd, vcpuid, extra_args);
++}
++
++static inline int __must_check
++HYPERVISOR_set_segment_base(
++ int reg, unsigned long value)
++{
++ return _hypercall2(int, set_segment_base, reg, value);
++}
++
++static inline int __must_check
++HYPERVISOR_suspend(
++ unsigned long srec)
++{
++ struct sched_shutdown sched_shutdown = {
++ .reason = SHUTDOWN_suspend
++ };
++
++ int rc = _hypercall3(int, sched_op, SCHEDOP_shutdown,
++ &sched_shutdown, srec);
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (rc == -ENOSYS)
++ rc = _hypercall3(int, sched_op_compat, SCHEDOP_shutdown,
++ SHUTDOWN_suspend, srec);
++#endif
++
++ return rc;
++}
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++static inline int
++HYPERVISOR_nmi_op(
++ unsigned long op, void *arg)
++{
++ return _hypercall2(int, nmi_op, op, arg);
++}
++#endif
++
++#ifndef CONFIG_XEN
++static inline unsigned long __must_check
++HYPERVISOR_hvm_op(
++ int op, void *arg)
++{
++ return _hypercall2(unsigned long, hvm_op, op, arg);
++}
++#endif
++
++static inline int __must_check
++HYPERVISOR_callback_op(
++ int cmd, const void *arg)
++{
++ return _hypercall2(int, callback_op, cmd, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_xenoprof_op(
++ int op, void *arg)
++{
++ return _hypercall2(int, xenoprof_op, op, arg);
++}
++
++static inline int __must_check
++HYPERVISOR_kexec_op(
++ unsigned long op, void *args)
++{
++ return _hypercall2(int, kexec_op, op, args);
++}
++
++#endif /* __HYPERCALL_H__ */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/irqflags_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/irqflags_64.h 2007-06-12 13:14:13.000000000 +0200
+@@ -0,0 +1,139 @@
++/*
++ * include/asm-x86_64/irqflags.h
++ *
++ * IRQ flags handling
++ *
++ * This file gets included from lowlevel asm headers too, to provide
++ * wrapped versions of the local_irq_*() APIs, based on the
++ * raw_local_irq_*() functions from the lowlevel headers.
++ */
++#ifndef _ASM_IRQFLAGS_H
++#define _ASM_IRQFLAGS_H
++
++#ifndef __ASSEMBLY__
++/*
++ * Interrupt control:
++ */
++
++/*
++ * The use of 'barrier' in the following reflects their use as local-lock
++ * operations. Reentrancy must be prevented (e.g., __cli()) /before/ following
++ * critical operations are executed. All critical operations must complete
++ * /before/ reentrancy is permitted (e.g., __sti()). Alpha architecture also
++ * includes these barriers, for example.
++ */
++
++#define __raw_local_save_flags() (current_vcpu_info()->evtchn_upcall_mask)
++
++#define raw_local_save_flags(flags) \
++ do { (flags) = __raw_local_save_flags(); } while (0)
++
++#define raw_local_irq_restore(x) \
++do { \
++ vcpu_info_t *_vcpu; \
++ barrier(); \
++ _vcpu = current_vcpu_info(); \
++ if ((_vcpu->evtchn_upcall_mask = (x)) == 0) { \
++ barrier(); /* unmask then check (avoid races) */ \
++ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
++ force_evtchn_callback(); \
++ } \
++} while (0)
++
++#ifdef CONFIG_X86_VSMP
++
++/*
++ * Interrupt control for the VSMP architecture:
++ */
++
++static inline void raw_local_irq_disable(void)
++{
++ unsigned long flags = __raw_local_save_flags();
++
++ raw_local_irq_restore((flags & ~(1 << 9)) | (1 << 18));
++}
++
++static inline void raw_local_irq_enable(void)
++{
++ unsigned long flags = __raw_local_save_flags();
++
++ raw_local_irq_restore((flags | (1 << 9)) & ~(1 << 18));
++}
++
++static inline int raw_irqs_disabled_flags(unsigned long flags)
++{
++ return !(flags & (1<<9)) || (flags & (1 << 18));
++}
++
++#else /* CONFIG_X86_VSMP */
++
++#define raw_local_irq_disable() \
++do { \
++ current_vcpu_info()->evtchn_upcall_mask = 1; \
++ barrier(); \
++} while (0)
++
++#define raw_local_irq_enable() \
++do { \
++ vcpu_info_t *_vcpu; \
++ barrier(); \
++ _vcpu = current_vcpu_info(); \
++ _vcpu->evtchn_upcall_mask = 0; \
++ barrier(); /* unmask then check (avoid races) */ \
++ if ( unlikely(_vcpu->evtchn_upcall_pending) ) \
++ force_evtchn_callback(); \
++} while (0)
++
++static inline int raw_irqs_disabled_flags(unsigned long flags)
++{
++ return (flags != 0);
++}
++
++#endif
++
++/*
++ * For spinlocks, etc.:
++ */
++
++#define __raw_local_irq_save() \
++({ \
++ unsigned long flags = __raw_local_save_flags(); \
++ \
++ raw_local_irq_disable(); \
++ \
++ flags; \
++})
++
++#define raw_local_irq_save(flags) \
++ do { (flags) = __raw_local_irq_save(); } while (0)
++
++#define raw_irqs_disabled() \
++({ \
++ unsigned long flags = __raw_local_save_flags(); \
++ \
++ raw_irqs_disabled_flags(flags); \
++})
++
++/*
++ * Used in the idle loop; sti takes one instruction cycle
++ * to complete:
++ */
++void raw_safe_halt(void);
++
++/*
++ * Used when interrupts are already enabled or to
++ * shutdown the processor:
++ */
++void halt(void);
++
++#else /* __ASSEMBLY__: */
++# ifdef CONFIG_TRACE_IRQFLAGS
++# define TRACE_IRQS_ON call trace_hardirqs_on_thunk
++# define TRACE_IRQS_OFF call trace_hardirqs_off_thunk
++# else
++# define TRACE_IRQS_ON
++# define TRACE_IRQS_OFF
++# endif
++#endif
++
++#endif
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/maddr_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/maddr_64.h 2007-06-12 13:14:13.000000000 +0200
+@@ -0,0 +1,161 @@
++#ifndef _X86_64_MADDR_H
++#define _X86_64_MADDR_H
++
++#include <xen/features.h>
++#include <xen/interface/xen.h>
++
++/**** MACHINE <-> PHYSICAL CONVERSION MACROS ****/
++#define INVALID_P2M_ENTRY (~0UL)
++#define FOREIGN_FRAME_BIT (1UL<<63)
++#define FOREIGN_FRAME(m) ((m) | FOREIGN_FRAME_BIT)
++
++/* Definitions for machine and pseudophysical addresses. */
++typedef unsigned long paddr_t;
++typedef unsigned long maddr_t;
++
++#ifdef CONFIG_XEN
++
++extern unsigned long *phys_to_machine_mapping;
++
++#undef machine_to_phys_mapping
++extern unsigned long *machine_to_phys_mapping;
++extern unsigned int machine_to_phys_order;
++
++static inline unsigned long pfn_to_mfn(unsigned long pfn)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return pfn;
++ BUG_ON(end_pfn && pfn >= end_pfn);
++ return phys_to_machine_mapping[pfn] & ~FOREIGN_FRAME_BIT;
++}
++
++static inline int phys_to_machine_mapping_valid(unsigned long pfn)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return 1;
++ BUG_ON(end_pfn && pfn >= end_pfn);
++ return (phys_to_machine_mapping[pfn] != INVALID_P2M_ENTRY);
++}
++
++static inline unsigned long mfn_to_pfn(unsigned long mfn)
++{
++ unsigned long pfn;
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return mfn;
++
++ if (unlikely((mfn >> machine_to_phys_order) != 0))
++ return end_pfn;
++
++ /* The array access can fail (e.g., device space beyond end of RAM). */
++ asm (
++ "1: movq %1,%0\n"
++ "2:\n"
++ ".section .fixup,\"ax\"\n"
++ "3: movq %2,%0\n"
++ " jmp 2b\n"
++ ".previous\n"
++ ".section __ex_table,\"a\"\n"
++ " .align 8\n"
++ " .quad 1b,3b\n"
++ ".previous"
++ : "=r" (pfn)
++ : "m" (machine_to_phys_mapping[mfn]), "m" (end_pfn) );
++
++ return pfn;
++}
++
++/*
++ * We detect special mappings in one of two ways:
++ * 1. If the MFN is an I/O page then Xen will set the m2p entry
++ * to be outside our maximum possible pseudophys range.
++ * 2. If the MFN belongs to a different domain then we will certainly
++ * not have MFN in our p2m table. Conversely, if the page is ours,
++ * then we'll have p2m(m2p(MFN))==MFN.
++ * If we detect a special mapping then it doesn't have a 'struct page'.
++ * We force !pfn_valid() by returning an out-of-range pointer.
++ *
++ * NB. These checks require that, for any MFN that is not in our reservation,
++ * there is no PFN such that p2m(PFN) == MFN. Otherwise we can get confused if
++ * we are foreign-mapping the MFN, and the other domain as m2p(MFN) == PFN.
++ * Yikes! Various places must poke in INVALID_P2M_ENTRY for safety.
++ *
++ * NB2. When deliberately mapping foreign pages into the p2m table, you *must*
++ * use FOREIGN_FRAME(). This will cause pte_pfn() to choke on it, as we
++ * require. In all the cases we care about, the FOREIGN_FRAME bit is
++ * masked (e.g., pfn_to_mfn()) so behaviour there is correct.
++ */
++static inline unsigned long mfn_to_local_pfn(unsigned long mfn)
++{
++ unsigned long pfn = mfn_to_pfn(mfn);
++ if ((pfn < end_pfn)
++ && !xen_feature(XENFEAT_auto_translated_physmap)
++ && (phys_to_machine_mapping[pfn] != mfn))
++ return end_pfn; /* force !pfn_valid() */
++ return pfn;
++}
++
++static inline void set_phys_to_machine(unsigned long pfn, unsigned long mfn)
++{
++ BUG_ON(end_pfn && pfn >= end_pfn);
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY);
++ return;
++ }
++ phys_to_machine_mapping[pfn] = mfn;
++}
++
++static inline maddr_t phys_to_machine(paddr_t phys)
++{
++ maddr_t machine = pfn_to_mfn(phys >> PAGE_SHIFT);
++ machine = (machine << PAGE_SHIFT) | (phys & ~PAGE_MASK);
++ return machine;
++}
++
++static inline paddr_t machine_to_phys(maddr_t machine)
++{
++ paddr_t phys = mfn_to_pfn(machine >> PAGE_SHIFT);
++ phys = (phys << PAGE_SHIFT) | (machine & ~PAGE_MASK);
++ return phys;
++}
++
++static inline paddr_t pte_phys_to_machine(paddr_t phys)
++{
++ maddr_t machine;
++ machine = pfn_to_mfn((phys & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT);
++ machine = (machine << PAGE_SHIFT) | (phys & ~PHYSICAL_PAGE_MASK);
++ return machine;
++}
++
++static inline paddr_t pte_machine_to_phys(maddr_t machine)
++{
++ paddr_t phys;
++ phys = mfn_to_pfn((machine & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT);
++ phys = (phys << PAGE_SHIFT) | (machine & ~PHYSICAL_PAGE_MASK);
++ return phys;
++}
++
++#define __pte_ma(x) ((pte_t) { (x) } )
++#define pfn_pte_ma(pfn, prot) __pte_ma((((pfn) << PAGE_SHIFT) | pgprot_val(prot)) & __supported_pte_mask)
++
++#else /* !CONFIG_XEN */
++
++#define pfn_to_mfn(pfn) (pfn)
++#define mfn_to_pfn(mfn) (mfn)
++#define mfn_to_local_pfn(mfn) (mfn)
++#define set_phys_to_machine(pfn, mfn) ((void)0)
++#define phys_to_machine_mapping_valid(pfn) (1)
++#define phys_to_machine(phys) ((maddr_t)(phys))
++#define machine_to_phys(mach) ((paddr_t)(mach))
++#define pfn_pte_ma(pfn, prot) pfn_pte(pfn, prot)
++#define __pte_ma(x) __pte(x)
++
++#endif /* !CONFIG_XEN */
++
++/* VIRT <-> MACHINE conversion */
++#define virt_to_machine(v) (phys_to_machine(__pa(v)))
++#define virt_to_mfn(v) (pfn_to_mfn(__pa(v) >> PAGE_SHIFT))
++#define mfn_to_virt(m) (__va(mfn_to_pfn(m) << PAGE_SHIFT))
++
++#endif /* _X86_64_MADDR_H */
++
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/mmu_context_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/mmu_context_64.h 2007-06-12 13:14:13.000000000 +0200
+@@ -0,0 +1,136 @@
++#ifndef __X86_64_MMU_CONTEXT_H
++#define __X86_64_MMU_CONTEXT_H
++
++#include <asm/desc.h>
++#include <asm/atomic.h>
++#include <asm/pgalloc.h>
++#include <asm/page.h>
++#include <asm/pda.h>
++#include <asm/pgtable.h>
++#include <asm/tlbflush.h>
++
++/*
++ * possibly do the LDT unload here?
++ */
++int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
++void destroy_context(struct mm_struct *mm);
++
++static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
++{
++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
++ if (read_pda(mmu_state) == TLBSTATE_OK)
++ write_pda(mmu_state, TLBSTATE_LAZY);
++#endif
++}
++
++#define prepare_arch_switch(next) __prepare_arch_switch()
++
++static inline void __prepare_arch_switch(void)
++{
++ /*
++ * Save away %es, %ds, %fs and %gs. Must happen before reload
++ * of cr3/ldt (i.e., not in __switch_to).
++ */
++ __asm__ __volatile__ (
++ "mov %%es,%0 ; mov %%ds,%1 ; mov %%fs,%2 ; mov %%gs,%3"
++ : "=m" (current->thread.es),
++ "=m" (current->thread.ds),
++ "=m" (current->thread.fsindex),
++ "=m" (current->thread.gsindex) );
++
++ if (current->thread.ds)
++ __asm__ __volatile__ ( "movl %0,%%ds" : : "r" (0) );
++
++ if (current->thread.es)
++ __asm__ __volatile__ ( "movl %0,%%es" : : "r" (0) );
++
++ if (current->thread.fsindex) {
++ __asm__ __volatile__ ( "movl %0,%%fs" : : "r" (0) );
++ current->thread.fs = 0;
++ }
++
++ if (current->thread.gsindex) {
++ load_gs_index(0);
++ current->thread.gs = 0;
++ }
++}
++
++extern void mm_pin(struct mm_struct *mm);
++extern void mm_unpin(struct mm_struct *mm);
++void mm_pin_all(void);
++
++static inline void load_cr3(pgd_t *pgd)
++{
++ asm volatile("movq %0,%%cr3" :: "r" (phys_to_machine(__pa(pgd))) :
++ "memory");
++}
++
++static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
++ struct task_struct *tsk)
++{
++ unsigned cpu = smp_processor_id();
++ struct mmuext_op _op[3], *op = _op;
++
++ if (likely(prev != next)) {
++ BUG_ON(!xen_feature(XENFEAT_writable_page_tables) &&
++ !next->context.pinned);
++
++ /* stop flush ipis for the previous mm */
++ cpu_clear(cpu, prev->cpu_vm_mask);
++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
++ write_pda(mmu_state, TLBSTATE_OK);
++ write_pda(active_mm, next);
++#endif
++ cpu_set(cpu, next->cpu_vm_mask);
++
++ /* load_cr3(next->pgd) */
++ op->cmd = MMUEXT_NEW_BASEPTR;
++ op->arg1.mfn = pfn_to_mfn(__pa(next->pgd) >> PAGE_SHIFT);
++ op++;
++
++ /* xen_new_user_pt(__pa(__user_pgd(next->pgd))) */
++ op->cmd = MMUEXT_NEW_USER_BASEPTR;
++ op->arg1.mfn = pfn_to_mfn(__pa(__user_pgd(next->pgd)) >> PAGE_SHIFT);
++ op++;
++
++ if (unlikely(next->context.ldt != prev->context.ldt)) {
++ /* load_LDT_nolock(&next->context, cpu) */
++ op->cmd = MMUEXT_SET_LDT;
++ op->arg1.linear_addr = (unsigned long)next->context.ldt;
++ op->arg2.nr_ents = next->context.size;
++ op++;
++ }
++
++ BUG_ON(HYPERVISOR_mmuext_op(_op, op-_op, NULL, DOMID_SELF));
++ }
++#if defined(CONFIG_SMP) && !defined(CONFIG_XEN)
++ else {
++ write_pda(mmu_state, TLBSTATE_OK);
++ if (read_pda(active_mm) != next)
++ out_of_line_bug();
++ if (!cpu_test_and_set(cpu, next->cpu_vm_mask)) {
++ /* We were in lazy tlb mode and leave_mm disabled
++ * tlb flush IPI delivery. We must reload CR3
++ * to make sure to use no freed page tables.
++ */
++ load_cr3(next->pgd);
++ xen_new_user_pt(__pa(__user_pgd(next->pgd)));
++ load_LDT_nolock(&next->context, cpu);
++ }
++ }
++#endif
++}
++
++#define deactivate_mm(tsk,mm) do { \
++ load_gs_index(0); \
++ asm volatile("movl %0,%%fs"::"r"(0)); \
++} while(0)
++
++static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
++{
++ if (!next->context.pinned)
++ mm_pin(next);
++ switch_mm(prev, next, NULL);
++}
++
++#endif
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/page_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/page_64.h 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,212 @@
++#ifndef _X86_64_PAGE_H
++#define _X86_64_PAGE_H
++
++/* #include <linux/string.h> */
++#ifndef __ASSEMBLY__
++#include <linux/kernel.h>
++#include <linux/types.h>
++#include <asm/bug.h>
++#endif
++#include <xen/interface/xen.h>
++
++/*
++ * Need to repeat this here in order to not include pgtable.h (which in turn
++ * depends on definitions made here), but to be able to use the symbolic
++ * below. The preprocessor will warn if the two definitions aren't identical.
++ */
++#define _PAGE_PRESENT 0x001
++#define _PAGE_IO 0x200
++
++/* PAGE_SHIFT determines the page size */
++#define PAGE_SHIFT 12
++#ifdef __ASSEMBLY__
++#define PAGE_SIZE (0x1 << PAGE_SHIFT)
++#else
++#define PAGE_SIZE (1UL << PAGE_SHIFT)
++#endif
++#define PAGE_MASK (~(PAGE_SIZE-1))
++
++/* See Documentation/x86_64/mm.txt for a description of the memory map. */
++#define __PHYSICAL_MASK_SHIFT 46
++#define __PHYSICAL_MASK ((1UL << __PHYSICAL_MASK_SHIFT) - 1)
++#define __VIRTUAL_MASK_SHIFT 48
++#define __VIRTUAL_MASK ((1UL << __VIRTUAL_MASK_SHIFT) - 1)
++
++#define PHYSICAL_PAGE_MASK (~(PAGE_SIZE-1) & __PHYSICAL_MASK)
++
++#define THREAD_ORDER 1
++#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
++#define CURRENT_MASK (~(THREAD_SIZE-1))
++
++#define EXCEPTION_STACK_ORDER 0
++#define EXCEPTION_STKSZ (PAGE_SIZE << EXCEPTION_STACK_ORDER)
++
++#define DEBUG_STACK_ORDER (EXCEPTION_STACK_ORDER + 1)
++#define DEBUG_STKSZ (PAGE_SIZE << DEBUG_STACK_ORDER)
++
++#define IRQSTACK_ORDER 2
++#define IRQSTACKSIZE (PAGE_SIZE << IRQSTACK_ORDER)
++
++#define STACKFAULT_STACK 1
++#define DOUBLEFAULT_STACK 2
++#define NMI_STACK 3
++#define DEBUG_STACK 4
++#define MCE_STACK 5
++#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
++
++#define LARGE_PAGE_MASK (~(LARGE_PAGE_SIZE-1))
++#define LARGE_PAGE_SIZE (1UL << PMD_SHIFT)
++
++#define HPAGE_SHIFT PMD_SHIFT
++#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
++#define HPAGE_MASK (~(HPAGE_SIZE - 1))
++#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
++
++#ifdef __KERNEL__
++#ifndef __ASSEMBLY__
++
++extern unsigned long end_pfn;
++
++#include <asm/maddr.h>
++
++void clear_page(void *);
++void copy_page(void *, void *);
++
++#define clear_user_page(page, vaddr, pg) clear_page(page)
++#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
++
++#define alloc_zeroed_user_highpage(vma, vaddr) alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO, vma, vaddr)
++#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
++
++/*
++ * These are used to make use of C type-checking..
++ */
++typedef struct { unsigned long pte; } pte_t;
++typedef struct { unsigned long pmd; } pmd_t;
++typedef struct { unsigned long pud; } pud_t;
++typedef struct { unsigned long pgd; } pgd_t;
++#define PTE_MASK PHYSICAL_PAGE_MASK
++
++typedef struct { unsigned long pgprot; } pgprot_t;
++
++#define __pte_val(x) ((x).pte)
++#define pte_val(x) ((__pte_val(x) & (_PAGE_PRESENT|_PAGE_IO)) \
++ == _PAGE_PRESENT ? \
++ pte_machine_to_phys(__pte_val(x)) : \
++ __pte_val(x))
++
++#define __pmd_val(x) ((x).pmd)
++static inline unsigned long pmd_val(pmd_t x)
++{
++ unsigned long ret = __pmd_val(x);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret) ret = pte_machine_to_phys(ret) | _PAGE_PRESENT;
++#else
++ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++#endif
++ return ret;
++}
++
++#define __pud_val(x) ((x).pud)
++static inline unsigned long pud_val(pud_t x)
++{
++ unsigned long ret = __pud_val(x);
++ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++ return ret;
++}
++
++#define __pgd_val(x) ((x).pgd)
++static inline unsigned long pgd_val(pgd_t x)
++{
++ unsigned long ret = __pgd_val(x);
++ if (ret & _PAGE_PRESENT) ret = pte_machine_to_phys(ret);
++ return ret;
++}
++
++#define pgprot_val(x) ((x).pgprot)
++
++static inline pte_t __pte(unsigned long x)
++{
++ if ((x & (_PAGE_PRESENT|_PAGE_IO)) == _PAGE_PRESENT)
++ x = pte_phys_to_machine(x);
++ return ((pte_t) { (x) });
++}
++
++static inline pmd_t __pmd(unsigned long x)
++{
++ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
++ return ((pmd_t) { (x) });
++}
++
++static inline pud_t __pud(unsigned long x)
++{
++ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
++ return ((pud_t) { (x) });
++}
++
++static inline pgd_t __pgd(unsigned long x)
++{
++ if (x & _PAGE_PRESENT) x = pte_phys_to_machine(x);
++ return ((pgd_t) { (x) });
++}
++
++#define __pgprot(x) ((pgprot_t) { (x) } )
++
++#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
++#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
++#define __START_KERNEL_map 0xffffffff80000000UL
++#define __PAGE_OFFSET 0xffff880000000000UL
++
++#else
++#define __PHYSICAL_START CONFIG_PHYSICAL_START
++#define __START_KERNEL (__START_KERNEL_map + __PHYSICAL_START)
++#define __START_KERNEL_map 0xffffffff80000000
++#define __PAGE_OFFSET 0xffff880000000000
++#endif /* !__ASSEMBLY__ */
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++#undef LOAD_OFFSET
++#define LOAD_OFFSET 0
++#endif
++
++/* to align the pointer to the (next) page boundary */
++#define PAGE_ALIGN(addr) (((addr)+PAGE_SIZE-1)&PAGE_MASK)
++
++#define KERNEL_TEXT_SIZE (40UL*1024*1024)
++#define KERNEL_TEXT_START 0xffffffff80000000UL
++
++#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
++
++/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol.
++ Otherwise you risk miscompilation. */
++#define __pa(x) (((unsigned long)(x)>=__START_KERNEL_map)?(unsigned long)(x) - (unsigned long)__START_KERNEL_map:(unsigned long)(x) - PAGE_OFFSET)
++/* __pa_symbol should be used for C visible symbols.
++ This seems to be the official gcc blessed way to do such arithmetic. */
++#define __pa_symbol(x) \
++ ({unsigned long v; \
++ asm("" : "=r" (v) : "0" (x)); \
++ __pa(v); })
++
++#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
++#define __boot_va(x) __va(x)
++#define __boot_pa(x) __pa(x)
++#ifdef CONFIG_FLATMEM
++#define pfn_valid(pfn) ((pfn) < end_pfn)
++#endif
++
++#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
++#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
++#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
++
++#define VM_DATA_DEFAULT_FLAGS \
++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0 ) | \
++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++
++#define __HAVE_ARCH_GATE_AREA 1
++
++#include <asm-generic/memory_model.h>
++#include <asm-generic/page.h>
++
++#endif /* __KERNEL__ */
++
++#endif /* _X86_64_PAGE_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/pgalloc_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/pgalloc_64.h 2007-06-18 08:38:13.000000000 +0200
+@@ -0,0 +1,204 @@
++#ifndef _X86_64_PGALLOC_H
++#define _X86_64_PGALLOC_H
++
++#include <asm/fixmap.h>
++#include <asm/pda.h>
++#include <linux/threads.h>
++#include <linux/mm.h>
++#include <asm/io.h> /* for phys_to_virt and page_to_pseudophys */
++
++#include <xen/features.h>
++void make_page_readonly(void *va, unsigned int feature);
++void make_page_writable(void *va, unsigned int feature);
++void make_pages_readonly(void *va, unsigned int nr, unsigned int feature);
++void make_pages_writable(void *va, unsigned int nr, unsigned int feature);
++
++#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
++
++static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
++{
++ set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)));
++}
++
++static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
++{
++ if (unlikely((mm)->context.pinned)) {
++ BUG_ON(HYPERVISOR_update_va_mapping(
++ (unsigned long)__va(page_to_pfn(pte) << PAGE_SHIFT),
++ pfn_pte(page_to_pfn(pte), PAGE_KERNEL_RO), 0));
++ set_pmd(pmd, __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT)));
++ } else {
++ *(pmd) = __pmd(_PAGE_TABLE | (page_to_pfn(pte) << PAGE_SHIFT));
++ }
++}
++
++static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
++{
++ if (unlikely((mm)->context.pinned)) {
++ BUG_ON(HYPERVISOR_update_va_mapping(
++ (unsigned long)pmd,
++ pfn_pte(virt_to_phys(pmd)>>PAGE_SHIFT,
++ PAGE_KERNEL_RO), 0));
++ set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
++ } else {
++ *(pud) = __pud(_PAGE_TABLE | __pa(pmd));
++ }
++}
++
++/*
++ * We need to use the batch mode here, but pgd_pupulate() won't be
++ * be called frequently.
++ */
++static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
++{
++ if (unlikely((mm)->context.pinned)) {
++ BUG_ON(HYPERVISOR_update_va_mapping(
++ (unsigned long)pud,
++ pfn_pte(virt_to_phys(pud)>>PAGE_SHIFT,
++ PAGE_KERNEL_RO), 0));
++ set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
++ set_pgd(__user_pgd(pgd), __pgd(_PAGE_TABLE | __pa(pud)));
++ } else {
++ *(pgd) = __pgd(_PAGE_TABLE | __pa(pud));
++ *(__user_pgd(pgd)) = *(pgd);
++ }
++}
++
++extern struct page *pte_alloc_one(struct mm_struct *mm, unsigned long addr);
++extern void pte_free(struct page *pte);
++
++static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
++{
++ struct page *pg;
++
++ pg = pte_alloc_one(mm, addr);
++ return pg ? page_address(pg) : NULL;
++}
++
++static inline void pmd_free(pmd_t *pmd)
++{
++ BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
++ pte_free(virt_to_page(pmd));
++}
++
++static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
++{
++ struct page *pg;
++
++ pg = pte_alloc_one(mm, addr);
++ return pg ? page_address(pg) : NULL;
++}
++
++static inline void pud_free(pud_t *pud)
++{
++ BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
++ pte_free(virt_to_page(pud));
++}
++
++static inline void pgd_list_add(pgd_t *pgd)
++{
++ struct page *page = virt_to_page(pgd);
++
++ spin_lock(&pgd_lock);
++ page->index = (pgoff_t)pgd_list;
++ if (pgd_list)
++ pgd_list->private = (unsigned long)&page->index;
++ pgd_list = page;
++ page->private = (unsigned long)&pgd_list;
++ spin_unlock(&pgd_lock);
++}
++
++static inline void pgd_list_del(pgd_t *pgd)
++{
++ struct page *next, **pprev, *page = virt_to_page(pgd);
++
++ spin_lock(&pgd_lock);
++ next = (struct page *)page->index;
++ pprev = (struct page **)page->private;
++ *pprev = next;
++ if (next)
++ next->private = (unsigned long)pprev;
++ spin_unlock(&pgd_lock);
++}
++
++static inline pgd_t *pgd_alloc(struct mm_struct *mm)
++{
++ /*
++ * We allocate two contiguous pages for kernel and user.
++ */
++ unsigned boundary;
++ pgd_t *pgd = (pgd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT, 1);
++ if (!pgd)
++ return NULL;
++ pgd_list_add(pgd);
++ /*
++ * Copy kernel pointers in from init.
++ * Could keep a freelist or slab cache of those because the kernel
++ * part never changes.
++ */
++ boundary = pgd_index(__PAGE_OFFSET);
++ memset(pgd, 0, boundary * sizeof(pgd_t));
++ memcpy(pgd + boundary,
++ init_level4_pgt + boundary,
++ (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
++
++ memset(__user_pgd(pgd), 0, PAGE_SIZE); /* clean up user pgd */
++ /*
++ * Set level3_user_pgt for vsyscall area
++ */
++ __user_pgd(pgd)[pgd_index(VSYSCALL_START)] =
++ __pgd(__pa_symbol(level3_user_pgt) | _PAGE_TABLE);
++ return pgd;
++}
++
++static inline void pgd_free(pgd_t *pgd)
++{
++ pte_t *ptep = virt_to_ptep(pgd);
++
++ if (!pte_write(*ptep)) {
++ xen_pgd_unpin(__pa(pgd));
++ BUG_ON(HYPERVISOR_update_va_mapping(
++ (unsigned long)pgd,
++ pfn_pte(virt_to_phys(pgd)>>PAGE_SHIFT, PAGE_KERNEL),
++ 0));
++ }
++
++ ptep = virt_to_ptep(__user_pgd(pgd));
++
++ if (!pte_write(*ptep)) {
++ xen_pgd_unpin(__pa(__user_pgd(pgd)));
++ BUG_ON(HYPERVISOR_update_va_mapping(
++ (unsigned long)__user_pgd(pgd),
++ pfn_pte(virt_to_phys(__user_pgd(pgd))>>PAGE_SHIFT,
++ PAGE_KERNEL),
++ 0));
++ }
++
++ pgd_list_del(pgd);
++ free_pages((unsigned long)pgd, 1);
++}
++
++static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
++{
++ pte_t *pte = (pte_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
++ if (pte)
++ make_page_readonly(pte, XENFEAT_writable_page_tables);
++
++ return pte;
++}
++
++/* Should really implement gc for free page table pages. This could be
++ done with a reference count in struct page. */
++
++static inline void pte_free_kernel(pte_t *pte)
++{
++ BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
++ make_page_writable(pte, XENFEAT_writable_page_tables);
++ free_page((unsigned long)pte);
++}
++
++#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
++#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
++#define __pud_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
++
++#endif /* _X86_64_PGALLOC_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/pgtable_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/pgtable_64.h 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,583 @@
++#ifndef _X86_64_PGTABLE_H
++#define _X86_64_PGTABLE_H
++
++/*
++ * This file contains the functions and defines necessary to modify and use
++ * the x86-64 page table tree.
++ */
++#include <asm/processor.h>
++#include <asm/fixmap.h>
++#include <asm/bitops.h>
++#include <linux/threads.h>
++#include <linux/sched.h>
++#include <asm/pda.h>
++#ifdef CONFIG_XEN
++#include <asm/hypervisor.h>
++
++extern pud_t level3_user_pgt[512];
++
++extern void xen_init_pt(void);
++
++extern pte_t *lookup_address(unsigned long address);
++
++#define virt_to_ptep(va) \
++({ \
++ pte_t *__ptep = lookup_address((unsigned long)(va)); \
++ BUG_ON(!__ptep || !pte_present(*__ptep)); \
++ __ptep; \
++})
++
++#define arbitrary_virt_to_machine(va) \
++ (((maddr_t)pte_mfn(*virt_to_ptep(va)) << PAGE_SHIFT) \
++ | ((unsigned long)(va) & (PAGE_SIZE - 1)))
++#endif
++
++extern pud_t level3_kernel_pgt[512];
++extern pud_t level3_physmem_pgt[512];
++extern pud_t level3_ident_pgt[512];
++extern pmd_t level2_kernel_pgt[512];
++extern pgd_t init_level4_pgt[];
++extern pgd_t boot_level4_pgt[];
++extern unsigned long __supported_pte_mask;
++
++#define swapper_pg_dir init_level4_pgt
++
++extern int nonx_setup(char *str);
++extern void paging_init(void);
++extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
++
++extern unsigned long pgkern_mask;
++
++/*
++ * ZERO_PAGE is a global shared page that is always zero: used
++ * for zero-mapped memory areas etc..
++ */
++extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
++#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
++
++/*
++ * PGDIR_SHIFT determines what a top-level page table entry can map
++ */
++#define PGDIR_SHIFT 39
++#define PTRS_PER_PGD 512
++
++/*
++ * 3rd level page
++ */
++#define PUD_SHIFT 30
++#define PTRS_PER_PUD 512
++
++/*
++ * PMD_SHIFT determines the size of the area a middle-level
++ * page table can map
++ */
++#define PMD_SHIFT 21
++#define PTRS_PER_PMD 512
++
++/*
++ * entries per page directory level
++ */
++#define PTRS_PER_PTE 512
++
++#define pte_ERROR(e) \
++ printk("%s:%d: bad pte %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
++ &(e), __pte_val(e), pte_pfn(e))
++#define pmd_ERROR(e) \
++ printk("%s:%d: bad pmd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
++ &(e), __pmd_val(e), pmd_pfn(e))
++#define pud_ERROR(e) \
++ printk("%s:%d: bad pud %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
++ &(e), __pud_val(e), (pud_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT)
++#define pgd_ERROR(e) \
++ printk("%s:%d: bad pgd %p(%016lx pfn %010lx).\n", __FILE__, __LINE__, \
++ &(e), __pgd_val(e), (pgd_val(e) & __PHYSICAL_MASK) >> PAGE_SHIFT)
++
++#define pgd_none(x) (!__pgd_val(x))
++#define pud_none(x) (!__pud_val(x))
++
++static inline void set_pte(pte_t *dst, pte_t val)
++{
++ *dst = val;
++}
++
++#define set_pmd(pmdptr, pmdval) xen_l2_entry_update(pmdptr, (pmdval))
++#define set_pud(pudptr, pudval) xen_l3_entry_update(pudptr, (pudval))
++#define set_pgd(pgdptr, pgdval) xen_l4_entry_update(pgdptr, (pgdval))
++
++static inline void pud_clear (pud_t * pud)
++{
++ set_pud(pud, __pud(0));
++}
++
++#define __user_pgd(pgd) ((pgd) + PTRS_PER_PGD)
++
++static inline void pgd_clear (pgd_t * pgd)
++{
++ set_pgd(pgd, __pgd(0));
++ set_pgd(__user_pgd(pgd), __pgd(0));
++}
++
++#define pud_page(pud) \
++ ((unsigned long) __va(pud_val(pud) & PHYSICAL_PAGE_MASK))
++
++#define pte_same(a, b) ((a).pte == (b).pte)
++
++#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
++
++#define PMD_SIZE (1UL << PMD_SHIFT)
++#define PMD_MASK (~(PMD_SIZE-1))
++#define PUD_SIZE (1UL << PUD_SHIFT)
++#define PUD_MASK (~(PUD_SIZE-1))
++#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
++#define PGDIR_MASK (~(PGDIR_SIZE-1))
++
++#define USER_PTRS_PER_PGD ((TASK_SIZE-1)/PGDIR_SIZE+1)
++#define FIRST_USER_ADDRESS 0
++
++#ifndef __ASSEMBLY__
++#define MAXMEM 0x3fffffffffffUL
++#define VMALLOC_START 0xffffc20000000000UL
++#define VMALLOC_END 0xffffe1ffffffffffUL
++#define MODULES_VADDR 0xffffffff88000000UL
++#define MODULES_END 0xfffffffffff00000UL
++#define MODULES_LEN (MODULES_END - MODULES_VADDR)
++
++#define _PAGE_BIT_PRESENT 0
++#define _PAGE_BIT_RW 1
++#define _PAGE_BIT_USER 2
++#define _PAGE_BIT_PWT 3
++#define _PAGE_BIT_PCD 4
++#define _PAGE_BIT_ACCESSED 5
++#define _PAGE_BIT_DIRTY 6
++#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
++#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
++#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
++
++#define _PAGE_PRESENT 0x001
++#define _PAGE_RW 0x002
++#define _PAGE_USER 0x004
++#define _PAGE_PWT 0x008
++#define _PAGE_PCD 0x010
++#define _PAGE_ACCESSED 0x020
++#define _PAGE_DIRTY 0x040
++#define _PAGE_PSE 0x080 /* 2MB page */
++#define _PAGE_FILE 0x040 /* nonlinear file mapping, saved PTE; unset:swap */
++#define _PAGE_GLOBAL 0x100 /* Global TLB entry */
++
++#define _PAGE_PROTNONE 0x080 /* If not present */
++#define _PAGE_NX (1UL<<_PAGE_BIT_NX)
++
++/* Mapped page is I/O or foreign and has no associated page struct. */
++#define _PAGE_IO 0x200
++
++#if CONFIG_XEN_COMPAT <= 0x030002
++extern unsigned int __kernel_page_user;
++#else
++#define __kernel_page_user 0
++#endif
++
++#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_DIRTY)
++#define _KERNPG_TABLE (_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED | _PAGE_DIRTY | __kernel_page_user)
++
++#define _PAGE_CHG_MASK (PTE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_IO)
++
++#define PAGE_NONE __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
++#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_SHARED_EXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_COPY PAGE_COPY_NOEXEC
++#define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
++#define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define __PAGE_KERNEL \
++ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
++#define __PAGE_KERNEL_EXEC \
++ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | __kernel_page_user)
++#define __PAGE_KERNEL_NOCACHE \
++ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_PCD | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
++#define __PAGE_KERNEL_RO \
++ (_PAGE_PRESENT | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX | __kernel_page_user)
++#define __PAGE_KERNEL_VSYSCALL \
++ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++#define __PAGE_KERNEL_VSYSCALL_NOCACHE \
++ (_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_PCD)
++#define __PAGE_KERNEL_LARGE \
++ (__PAGE_KERNEL | _PAGE_PSE)
++#define __PAGE_KERNEL_LARGE_EXEC \
++ (__PAGE_KERNEL_EXEC | _PAGE_PSE)
++
++/*
++ * We don't support GLOBAL page in xenolinux64
++ */
++#define MAKE_GLOBAL(x) __pgprot((x))
++
++#define PAGE_KERNEL MAKE_GLOBAL(__PAGE_KERNEL)
++#define PAGE_KERNEL_EXEC MAKE_GLOBAL(__PAGE_KERNEL_EXEC)
++#define PAGE_KERNEL_RO MAKE_GLOBAL(__PAGE_KERNEL_RO)
++#define PAGE_KERNEL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_NOCACHE)
++#define PAGE_KERNEL_VSYSCALL32 __pgprot(__PAGE_KERNEL_VSYSCALL)
++#define PAGE_KERNEL_VSYSCALL MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL)
++#define PAGE_KERNEL_LARGE MAKE_GLOBAL(__PAGE_KERNEL_LARGE)
++#define PAGE_KERNEL_VSYSCALL_NOCACHE MAKE_GLOBAL(__PAGE_KERNEL_VSYSCALL_NOCACHE)
++
++/* xwr */
++#define __P000 PAGE_NONE
++#define __P001 PAGE_READONLY
++#define __P010 PAGE_COPY
++#define __P011 PAGE_COPY
++#define __P100 PAGE_READONLY_EXEC
++#define __P101 PAGE_READONLY_EXEC
++#define __P110 PAGE_COPY_EXEC
++#define __P111 PAGE_COPY_EXEC
++
++#define __S000 PAGE_NONE
++#define __S001 PAGE_READONLY
++#define __S010 PAGE_SHARED
++#define __S011 PAGE_SHARED
++#define __S100 PAGE_READONLY_EXEC
++#define __S101 PAGE_READONLY_EXEC
++#define __S110 PAGE_SHARED_EXEC
++#define __S111 PAGE_SHARED_EXEC
++
++static inline unsigned long pgd_bad(pgd_t pgd)
++{
++ unsigned long val = __pgd_val(pgd);
++ val &= ~PTE_MASK;
++ val &= ~(_PAGE_USER | _PAGE_DIRTY);
++ return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
++}
++
++static inline unsigned long pud_bad(pud_t pud)
++{
++ unsigned long val = __pud_val(pud);
++ val &= ~PTE_MASK;
++ val &= ~(_PAGE_USER | _PAGE_DIRTY);
++ return val & ~(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED);
++}
++
++#define set_pte_at(_mm,addr,ptep,pteval) do { \
++ if (((_mm) != current->mm && (_mm) != &init_mm) || \
++ HYPERVISOR_update_va_mapping((addr), (pteval), 0)) \
++ set_pte((ptep), (pteval)); \
++} while (0)
++
++#define pte_none(x) (!(x).pte)
++#define pte_present(x) ((x).pte & (_PAGE_PRESENT | _PAGE_PROTNONE))
++#define pte_clear(mm,addr,xp) do { set_pte_at(mm, addr, xp, __pte(0)); } while (0)
++
++#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
++
++#define __pte_mfn(_pte) (((_pte).pte & PTE_MASK) >> PAGE_SHIFT)
++#define pte_mfn(_pte) ((_pte).pte & _PAGE_PRESENT ? \
++ __pte_mfn(_pte) : pfn_to_mfn(__pte_mfn(_pte)))
++#define pte_pfn(_pte) ((_pte).pte & _PAGE_IO ? end_pfn : \
++ (_pte).pte & _PAGE_PRESENT ? \
++ mfn_to_local_pfn(__pte_mfn(_pte)) : \
++ __pte_mfn(_pte))
++
++#define pte_page(x) pfn_to_page(pte_pfn(x))
++
++static inline pte_t pfn_pte(unsigned long page_nr, pgprot_t pgprot)
++{
++ unsigned long pte = page_nr << PAGE_SHIFT;
++ pte |= pgprot_val(pgprot);
++ pte &= __supported_pte_mask;
++ return __pte(pte);
++}
++
++static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ pte_t pte = *ptep;
++ if (!pte_none(pte)) {
++ if ((mm != &init_mm) ||
++ HYPERVISOR_update_va_mapping(addr, __pte(0), 0))
++ pte = __pte_ma(xchg(&ptep->pte, 0));
++ }
++ return pte;
++}
++
++static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long addr, pte_t *ptep, int full)
++{
++ if (full) {
++ pte_t pte = *ptep;
++ if (mm->context.pinned)
++ xen_l1_entry_update(ptep, __pte(0));
++ else
++ *ptep = __pte(0);
++ return pte;
++ }
++ return ptep_get_and_clear(mm, addr, ptep);
++}
++
++#define ptep_clear_flush(vma, addr, ptep) \
++({ \
++ pte_t *__ptep = (ptep); \
++ pte_t __res = *__ptep; \
++ if (!pte_none(__res) && \
++ ((vma)->vm_mm != current->mm || \
++ HYPERVISOR_update_va_mapping(addr, __pte(0), \
++ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++ UVMF_INVLPG|UVMF_MULTI))) { \
++ __ptep->pte = 0; \
++ flush_tlb_page(vma, addr); \
++ } \
++ __res; \
++})
++
++/*
++ * The following only work if pte_present() is true.
++ * Undefined behaviour if not..
++ */
++#define __LARGE_PTE (_PAGE_PSE|_PAGE_PRESENT)
++static inline int pte_user(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
++static inline int pte_read(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
++static inline int pte_exec(pte_t pte) { return __pte_val(pte) & _PAGE_USER; }
++static inline int pte_dirty(pte_t pte) { return __pte_val(pte) & _PAGE_DIRTY; }
++static inline int pte_young(pte_t pte) { return __pte_val(pte) & _PAGE_ACCESSED; }
++static inline int pte_write(pte_t pte) { return __pte_val(pte) & _PAGE_RW; }
++static inline int pte_file(pte_t pte) { return __pte_val(pte) & _PAGE_FILE; }
++static inline int pte_huge(pte_t pte) { return __pte_val(pte) & _PAGE_PSE; }
++
++static inline pte_t pte_rdprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_exprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_USER; return pte; }
++static inline pte_t pte_mkclean(pte_t pte) { __pte_val(pte) &= ~_PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkold(pte_t pte) { __pte_val(pte) &= ~_PAGE_ACCESSED; return pte; }
++static inline pte_t pte_wrprotect(pte_t pte) { __pte_val(pte) &= ~_PAGE_RW; return pte; }
++static inline pte_t pte_mkread(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkexec(pte_t pte) { __pte_val(pte) |= _PAGE_USER; return pte; }
++static inline pte_t pte_mkdirty(pte_t pte) { __pte_val(pte) |= _PAGE_DIRTY; return pte; }
++static inline pte_t pte_mkyoung(pte_t pte) { __pte_val(pte) |= _PAGE_ACCESSED; return pte; }
++static inline pte_t pte_mkwrite(pte_t pte) { __pte_val(pte) |= _PAGE_RW; return pte; }
++static inline pte_t pte_mkhuge(pte_t pte) { __pte_val(pte) |= _PAGE_PSE; return pte; }
++
++#define ptep_test_and_clear_dirty(vma, addr, ptep) \
++({ \
++ pte_t __pte = *(ptep); \
++ int __ret = pte_dirty(__pte); \
++ if (__ret) \
++ set_pte_at((vma)->vm_mm, addr, ptep, pte_mkclean(__pte)); \
++ __ret; \
++})
++
++#define ptep_test_and_clear_young(vma, addr, ptep) \
++({ \
++ pte_t __pte = *(ptep); \
++ int __ret = pte_young(__pte); \
++ if (__ret) \
++ set_pte_at((vma)->vm_mm, addr, ptep, pte_mkold(__pte)); \
++ __ret; \
++})
++
++static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
++{
++ pte_t pte = *ptep;
++ if (pte_write(pte))
++ set_pte_at(mm, addr, ptep, pte_wrprotect(pte));
++}
++
++/*
++ * Macro to mark a page protection value as "uncacheable".
++ */
++#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) | _PAGE_PCD | _PAGE_PWT))
++
++static inline int pmd_large(pmd_t pte) {
++ return (__pmd_val(pte) & __LARGE_PTE) == __LARGE_PTE;
++}
++
++
++/*
++ * Conversion functions: convert a page and protection to a page entry,
++ * and a page entry and page directory to the page they refer to.
++ */
++
++/*
++ * Level 4 access.
++ * Never use these in the common code.
++ */
++#define pgd_page(pgd) ((unsigned long) __va(pgd_val(pgd) & PTE_MASK))
++#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
++#define pgd_offset(mm, addr) ((mm)->pgd + pgd_index(addr))
++#define pgd_offset_k(address) (init_level4_pgt + pgd_index(address))
++#define pgd_present(pgd) (__pgd_val(pgd) & _PAGE_PRESENT)
++#define mk_kernel_pgd(address) __pgd((address) | _KERNPG_TABLE)
++
++/* PUD - Level3 access */
++/* to find an entry in a page-table-directory. */
++#define pud_index(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
++#define pud_offset(pgd, address) ((pud_t *) pgd_page(*(pgd)) + pud_index(address))
++#define pud_present(pud) (__pud_val(pud) & _PAGE_PRESENT)
++
++/* PMD - Level 2 access */
++#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PTE_MASK))
++#define pmd_page(pmd) (pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT))
++
++#define pmd_index(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
++#define pmd_offset(dir, address) ((pmd_t *) pud_page(*(dir)) + \
++ pmd_index(address))
++#define pmd_none(x) (!__pmd_val(x))
++#if CONFIG_XEN_COMPAT <= 0x030002
++/* pmd_present doesn't just test the _PAGE_PRESENT bit since wr.p.t.
++ can temporarily clear it. */
++#define pmd_present(x) (__pmd_val(x))
++#else
++#define pmd_present(x) (__pmd_val(x) & _PAGE_PRESENT)
++#endif
++#define pmd_clear(xp) do { set_pmd(xp, __pmd(0)); } while (0)
++#define pmd_bad(x) ((__pmd_val(x) & ~(PTE_MASK | _PAGE_USER | _PAGE_PRESENT)) \
++ != (_KERNPG_TABLE & ~(_PAGE_USER | _PAGE_PRESENT)))
++#define pfn_pmd(nr,prot) (__pmd(((nr) << PAGE_SHIFT) | pgprot_val(prot)))
++#define pmd_pfn(x) ((pmd_val(x) & __PHYSICAL_MASK) >> PAGE_SHIFT)
++
++#define pte_to_pgoff(pte) ((__pte_val(pte) & PHYSICAL_PAGE_MASK) >> PAGE_SHIFT)
++#define pgoff_to_pte(off) ((pte_t) { ((off) << PAGE_SHIFT) | _PAGE_FILE })
++#define PTE_FILE_MAX_BITS __PHYSICAL_MASK_SHIFT
++
++/* PTE - Level 1 access. */
++
++/* page, protection -> pte */
++#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
++#define mk_pte_huge(entry) (__pte_val(entry) |= _PAGE_PRESENT | _PAGE_PSE)
++
++/* physical address -> PTE */
++static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
++{
++ unsigned long pteval;
++ pteval = physpage | pgprot_val(pgprot);
++ return __pte(pteval);
++}
++
++/* Change flags of a PTE */
++static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
++{
++ /*
++ * Since this might change the present bit (which controls whether
++ * a pte_t object has undergone p2m translation), we must use
++ * pte_val() on the input pte and __pte() for the return value.
++ */
++ unsigned long pteval = pte_val(pte);
++
++ pteval &= _PAGE_CHG_MASK;
++ pteval |= pgprot_val(newprot);
++ pteval &= __supported_pte_mask;
++ return __pte(pteval);
++}
++
++#define pte_index(address) \
++ (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1))
++#define pte_offset_kernel(dir, address) ((pte_t *) pmd_page_kernel(*(dir)) + \
++ pte_index(address))
++
++/* x86-64 always has all page tables mapped. */
++#define pte_offset_map(dir,address) pte_offset_kernel(dir,address)
++#define pte_offset_map_nested(dir,address) pte_offset_kernel(dir,address)
++#define pte_unmap(pte) /* NOP */
++#define pte_unmap_nested(pte) /* NOP */
++
++#define update_mmu_cache(vma,address,pte) do { } while (0)
++
++/*
++ * Rules for using ptep_establish: the pte MUST be a user pte, and
++ * must be a present->present transition.
++ */
++#define __HAVE_ARCH_PTEP_ESTABLISH
++#define ptep_establish(vma, address, ptep, pteval) \
++ do { \
++ if ( likely((vma)->vm_mm == current->mm) ) { \
++ BUG_ON(HYPERVISOR_update_va_mapping(address, \
++ pteval, \
++ (unsigned long)(vma)->vm_mm->cpu_vm_mask.bits| \
++ UVMF_INVLPG|UVMF_MULTI)); \
++ } else { \
++ xen_l1_entry_update(ptep, pteval); \
++ flush_tlb_page(vma, address); \
++ } \
++ } while (0)
++
++/* We only update the dirty/accessed state if we set
++ * the dirty bit by hand in the kernel, since the hardware
++ * will do the accessed bit for us, and we don't want to
++ * race with other CPU's that might be updating the dirty
++ * bit at the same time. */
++#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
++#define ptep_set_access_flags(vma, address, ptep, entry, dirty) \
++ do { \
++ if (dirty) \
++ ptep_establish(vma, address, ptep, entry); \
++ } while (0)
++
++/* Encode and de-code a swap entry */
++#define __swp_type(x) (((x).val >> 1) & 0x3f)
++#define __swp_offset(x) ((x).val >> 8)
++#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 1) | ((offset) << 8) })
++#define __pte_to_swp_entry(pte) ((swp_entry_t) { __pte_val(pte) })
++#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
++
++extern spinlock_t pgd_lock;
++extern struct page *pgd_list;
++void vmalloc_sync_all(void);
++
++#endif /* !__ASSEMBLY__ */
++
++extern int kern_addr_valid(unsigned long addr);
++
++#define DOMID_LOCAL (0xFFFFU)
++
++struct vm_area_struct;
++
++int direct_remap_pfn_range(struct vm_area_struct *vma,
++ unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid);
++
++int direct_kernel_remap_pfn_range(unsigned long address,
++ unsigned long mfn,
++ unsigned long size,
++ pgprot_t prot,
++ domid_t domid);
++
++int create_lookup_pte_addr(struct mm_struct *mm,
++ unsigned long address,
++ uint64_t *ptep);
++
++int touch_pte_range(struct mm_struct *mm,
++ unsigned long address,
++ unsigned long size);
++
++int xen_change_pte_range(struct mm_struct *mm, pmd_t *pmd,
++ unsigned long addr, unsigned long end, pgprot_t newprot);
++
++#define arch_change_pte_range(mm, pmd, addr, end, newprot) \
++ xen_change_pte_range(mm, pmd, addr, end, newprot)
++
++#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \
++ direct_remap_pfn_range(vma,vaddr,pfn,size,prot,DOMID_IO)
++
++#define MK_IOSPACE_PFN(space, pfn) (pfn)
++#define GET_IOSPACE(pfn) 0
++#define GET_PFN(pfn) (pfn)
++
++#define HAVE_ARCH_UNMAPPED_AREA
++
++#define pgtable_cache_init() do { } while (0)
++#define check_pgt_cache() do { } while (0)
++
++#define PAGE_AGP PAGE_KERNEL_NOCACHE
++#define HAVE_PAGE_AGP 1
++
++/* fs/proc/kcore.c */
++#define kc_vaddr_to_offset(v) ((v) & __VIRTUAL_MASK)
++#define kc_offset_to_vaddr(o) \
++ (((o) & (1UL << (__VIRTUAL_MASK_SHIFT-1))) ? ((o) | (~__VIRTUAL_MASK)) : (o))
++
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
++#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_DIRTY
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
++#define __HAVE_ARCH_PTEP_GET_AND_CLEAR_FULL
++#define __HAVE_ARCH_PTEP_CLEAR_FLUSH
++#define __HAVE_ARCH_PTEP_SET_WRPROTECT
++#define __HAVE_ARCH_PTE_SAME
++#include <asm-generic/pgtable.h>
++
++#endif /* _X86_64_PGTABLE_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/processor_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/processor_64.h 2008-03-06 08:54:32.000000000 +0100
+@@ -0,0 +1,502 @@
++/*
++ * include/asm-x86_64/processor.h
++ *
++ * Copyright (C) 1994 Linus Torvalds
++ */
++
++#ifndef __ASM_X86_64_PROCESSOR_H
++#define __ASM_X86_64_PROCESSOR_H
++
++#include <asm/segment.h>
++#include <asm/page.h>
++#include <asm/types.h>
++#include <asm/sigcontext.h>
++#include <asm/cpufeature.h>
++#include <linux/threads.h>
++#include <asm/msr.h>
++#include <asm/current.h>
++#include <asm/system.h>
++#include <asm/mmsegment.h>
++#include <asm/percpu.h>
++#include <linux/personality.h>
++#include <linux/cpumask.h>
++
++#define TF_MASK 0x00000100
++#define IF_MASK 0x00000200
++#define IOPL_MASK 0x00003000
++#define NT_MASK 0x00004000
++#define VM_MASK 0x00020000
++#define AC_MASK 0x00040000
++#define VIF_MASK 0x00080000 /* virtual interrupt flag */
++#define VIP_MASK 0x00100000 /* virtual interrupt pending */
++#define ID_MASK 0x00200000
++
++#define desc_empty(desc) \
++ (!((desc)->a | (desc)->b))
++
++#define desc_equal(desc1, desc2) \
++ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
++
++/*
++ * Default implementation of macro that returns current
++ * instruction pointer ("program counter").
++ */
++#define current_text_addr() ({ void *pc; asm volatile("leaq 1f(%%rip),%0\n1:":"=r"(pc)); pc; })
++
++/*
++ * CPU type and hardware bug flags. Kept separately for each CPU.
++ */
++
++struct cpuinfo_x86 {
++ __u8 x86; /* CPU family */
++ __u8 x86_vendor; /* CPU vendor */
++ __u8 x86_model;
++ __u8 x86_mask;
++ int cpuid_level; /* Maximum supported CPUID level, -1=no CPUID */
++ __u32 x86_capability[NCAPINTS];
++ char x86_vendor_id[16];
++ char x86_model_id[64];
++ int x86_cache_size; /* in KB */
++ int x86_clflush_size;
++ int x86_cache_alignment;
++ int x86_tlbsize; /* number of 4K pages in DTLB/ITLB combined(in pages)*/
++ __u8 x86_virt_bits, x86_phys_bits;
++ __u8 x86_max_cores; /* cpuid returned max cores value */
++ __u32 x86_power;
++ __u32 extended_cpuid_level; /* Max extended CPUID function supported */
++ unsigned long loops_per_jiffy;
++#ifdef CONFIG_SMP
++ cpumask_t llc_shared_map; /* cpus sharing the last level cache */
++#endif
++ __u8 apicid;
++#ifdef CONFIG_SMP
++ __u8 booted_cores; /* number of cores as seen by OS */
++ __u8 phys_proc_id; /* Physical Processor id. */
++ __u8 cpu_core_id; /* Core id. */
++#endif
++} ____cacheline_aligned;
++
++#define X86_VENDOR_INTEL 0
++#define X86_VENDOR_CYRIX 1
++#define X86_VENDOR_AMD 2
++#define X86_VENDOR_UMC 3
++#define X86_VENDOR_NEXGEN 4
++#define X86_VENDOR_CENTAUR 5
++#define X86_VENDOR_RISE 6
++#define X86_VENDOR_TRANSMETA 7
++#define X86_VENDOR_NUM 8
++#define X86_VENDOR_UNKNOWN 0xff
++
++#ifdef CONFIG_SMP
++extern struct cpuinfo_x86 cpu_data[];
++#define current_cpu_data cpu_data[smp_processor_id()]
++#else
++#define cpu_data (&boot_cpu_data)
++#define current_cpu_data boot_cpu_data
++#endif
++
++extern char ignore_irq13;
++
++extern void identify_cpu(struct cpuinfo_x86 *);
++extern void print_cpu_info(struct cpuinfo_x86 *);
++extern unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c);
++extern unsigned short num_cache_leaves;
++
++/*
++ * EFLAGS bits
++ */
++#define X86_EFLAGS_CF 0x00000001 /* Carry Flag */
++#define X86_EFLAGS_PF 0x00000004 /* Parity Flag */
++#define X86_EFLAGS_AF 0x00000010 /* Auxillary carry Flag */
++#define X86_EFLAGS_ZF 0x00000040 /* Zero Flag */
++#define X86_EFLAGS_SF 0x00000080 /* Sign Flag */
++#define X86_EFLAGS_TF 0x00000100 /* Trap Flag */
++#define X86_EFLAGS_IF 0x00000200 /* Interrupt Flag */
++#define X86_EFLAGS_DF 0x00000400 /* Direction Flag */
++#define X86_EFLAGS_OF 0x00000800 /* Overflow Flag */
++#define X86_EFLAGS_IOPL 0x00003000 /* IOPL mask */
++#define X86_EFLAGS_NT 0x00004000 /* Nested Task */
++#define X86_EFLAGS_RF 0x00010000 /* Resume Flag */
++#define X86_EFLAGS_VM 0x00020000 /* Virtual Mode */
++#define X86_EFLAGS_AC 0x00040000 /* Alignment Check */
++#define X86_EFLAGS_VIF 0x00080000 /* Virtual Interrupt Flag */
++#define X86_EFLAGS_VIP 0x00100000 /* Virtual Interrupt Pending */
++#define X86_EFLAGS_ID 0x00200000 /* CPUID detection flag */
++
++/*
++ * Intel CPU features in CR4
++ */
++#define X86_CR4_VME 0x0001 /* enable vm86 extensions */
++#define X86_CR4_PVI 0x0002 /* virtual interrupts flag enable */
++#define X86_CR4_TSD 0x0004 /* disable time stamp at ipl 3 */
++#define X86_CR4_DE 0x0008 /* enable debugging extensions */
++#define X86_CR4_PSE 0x0010 /* enable page size extensions */
++#define X86_CR4_PAE 0x0020 /* enable physical address extensions */
++#define X86_CR4_MCE 0x0040 /* Machine check enable */
++#define X86_CR4_PGE 0x0080 /* enable global pages */
++#define X86_CR4_PCE 0x0100 /* enable performance counters at ipl 3 */
++#define X86_CR4_OSFXSR 0x0200 /* enable fast FPU save and restore */
++#define X86_CR4_OSXMMEXCPT 0x0400 /* enable unmasked SSE exceptions */
++
++/*
++ * Save the cr4 feature set we're using (ie
++ * Pentium 4MB enable and PPro Global page
++ * enable), so that any CPU's that boot up
++ * after us can get the correct flags.
++ */
++extern unsigned long mmu_cr4_features;
++
++static inline void set_in_cr4 (unsigned long mask)
++{
++ mmu_cr4_features |= mask;
++ __asm__("movq %%cr4,%%rax\n\t"
++ "orq %0,%%rax\n\t"
++ "movq %%rax,%%cr4\n"
++ : : "irg" (mask)
++ :"ax");
++}
++
++static inline void clear_in_cr4 (unsigned long mask)
++{
++ mmu_cr4_features &= ~mask;
++ __asm__("movq %%cr4,%%rax\n\t"
++ "andq %0,%%rax\n\t"
++ "movq %%rax,%%cr4\n"
++ : : "irg" (~mask)
++ :"ax");
++}
++
++
++/*
++ * User space process size. 47bits minus one guard page.
++ */
++#define TASK_SIZE64 (0x800000000000UL - 4096)
++
++/* This decides where the kernel will search for a free chunk of vm
++ * space during mmap's.
++ */
++#define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? 0xc0000000 : 0xFFFFe000)
++
++#define TASK_SIZE (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE64)
++#define TASK_SIZE_OF(child) ((test_tsk_thread_flag(child, TIF_IA32)) ? IA32_PAGE_OFFSET : TASK_SIZE64)
++
++#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE/3)
++
++/*
++ * Size of io_bitmap.
++ */
++#define IO_BITMAP_BITS 65536
++#define IO_BITMAP_BYTES (IO_BITMAP_BITS/8)
++#define IO_BITMAP_LONGS (IO_BITMAP_BYTES/sizeof(long))
++#ifndef CONFIG_X86_NO_TSS
++#define IO_BITMAP_OFFSET offsetof(struct tss_struct,io_bitmap)
++#endif
++#define INVALID_IO_BITMAP_OFFSET 0x8000
++
++struct i387_fxsave_struct {
++ u16 cwd;
++ u16 swd;
++ u16 twd;
++ u16 fop;
++ u64 rip;
++ u64 rdp;
++ u32 mxcsr;
++ u32 mxcsr_mask;
++ u32 st_space[32]; /* 8*16 bytes for each FP-reg = 128 bytes */
++ u32 xmm_space[64]; /* 16*16 bytes for each XMM-reg = 128 bytes */
++ u32 padding[24];
++} __attribute__ ((aligned (16)));
++
++union i387_union {
++ struct i387_fxsave_struct fxsave;
++};
++
++#ifndef CONFIG_X86_NO_TSS
++struct tss_struct {
++ u32 reserved1;
++ u64 rsp0;
++ u64 rsp1;
++ u64 rsp2;
++ u64 reserved2;
++ u64 ist[7];
++ u32 reserved3;
++ u32 reserved4;
++ u16 reserved5;
++ u16 io_bitmap_base;
++ /*
++ * The extra 1 is there because the CPU will access an
++ * additional byte beyond the end of the IO permission
++ * bitmap. The extra byte must be all 1 bits, and must
++ * be within the limit. Thus we have:
++ *
++ * 128 bytes, the bitmap itself, for ports 0..0x3ff
++ * 8 bytes, for an extra "long" of ~0UL
++ */
++ unsigned long io_bitmap[IO_BITMAP_LONGS + 1];
++} __attribute__((packed)) ____cacheline_aligned;
++
++DECLARE_PER_CPU(struct tss_struct,init_tss);
++#endif
++
++
++extern struct cpuinfo_x86 boot_cpu_data;
++#ifndef CONFIG_X86_NO_TSS
++/* Save the original ist values for checking stack pointers during debugging */
++struct orig_ist {
++ unsigned long ist[7];
++};
++DECLARE_PER_CPU(struct orig_ist, orig_ist);
++#endif
++
++#ifdef CONFIG_X86_VSMP
++#define ARCH_MIN_TASKALIGN (1 << INTERNODE_CACHE_SHIFT)
++#define ARCH_MIN_MMSTRUCT_ALIGN (1 << INTERNODE_CACHE_SHIFT)
++#else
++#define ARCH_MIN_TASKALIGN 16
++#define ARCH_MIN_MMSTRUCT_ALIGN 0
++#endif
++
++struct thread_struct {
++ unsigned long rsp0;
++ unsigned long rsp;
++ unsigned long userrsp; /* Copy from PDA */
++ unsigned long fs;
++ unsigned long gs;
++ unsigned short es, ds, fsindex, gsindex;
++/* Hardware debugging registers */
++ unsigned long debugreg0;
++ unsigned long debugreg1;
++ unsigned long debugreg2;
++ unsigned long debugreg3;
++ unsigned long debugreg6;
++ unsigned long debugreg7;
++/* fault info */
++ unsigned long cr2, trap_no, error_code;
++/* floating point info */
++ union i387_union i387 __attribute__((aligned(16)));
++/* IO permissions. the bitmap could be moved into the GDT, that would make
++ switch faster for a limited number of ioperm using tasks. -AK */
++ int ioperm;
++ unsigned long *io_bitmap_ptr;
++ unsigned io_bitmap_max;
++/* cached TLS descriptors. */
++ u64 tls_array[GDT_ENTRY_TLS_ENTRIES];
++ unsigned int iopl;
++} __attribute__((aligned(16)));
++
++#define INIT_THREAD { \
++ .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++}
++
++#ifndef CONFIG_X86_NO_TSS
++#define INIT_TSS { \
++ .rsp0 = (unsigned long)&init_stack + sizeof(init_stack) \
++}
++#endif
++
++#define INIT_MMAP \
++{ &init_mm, 0, 0, NULL, PAGE_SHARED, VM_READ | VM_WRITE | VM_EXEC, 1, NULL, NULL }
++
++#define start_thread(regs,new_rip,new_rsp) do { \
++ asm volatile("movl %0,%%fs; movl %0,%%es; movl %0,%%ds": :"r" (0)); \
++ load_gs_index(0); \
++ (regs)->rip = (new_rip); \
++ (regs)->rsp = (new_rsp); \
++ write_pda(oldrsp, (new_rsp)); \
++ (regs)->cs = __USER_CS; \
++ (regs)->ss = __USER_DS; \
++ (regs)->eflags = 0x200; \
++ set_fs(USER_DS); \
++} while(0)
++
++#define get_debugreg(var, register) \
++ var = HYPERVISOR_get_debugreg(register)
++#define set_debugreg(value, register) do { \
++ if (HYPERVISOR_set_debugreg(register, value)) \
++ BUG(); \
++} while (0)
++
++struct task_struct;
++struct mm_struct;
++
++/* Free all resources held by a thread. */
++extern void release_thread(struct task_struct *);
++
++/* Prepare to copy thread state - unlazy all lazy status */
++extern void prepare_to_copy(struct task_struct *tsk);
++
++/*
++ * create a kernel thread without removing it from tasklists
++ */
++extern long kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
++
++/*
++ * Return saved PC of a blocked thread.
++ * What is this good for? it will be always the scheduler or ret_from_fork.
++ */
++#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.rsp - 8))
++
++extern unsigned long get_wchan(struct task_struct *p);
++#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.rsp0 - 1)
++#define KSTK_EIP(tsk) (task_pt_regs(tsk)->rip)
++#define KSTK_ESP(tsk) -1 /* sorry. doesn't work for syscall. */
++
++
++struct microcode_header {
++ unsigned int hdrver;
++ unsigned int rev;
++ unsigned int date;
++ unsigned int sig;
++ unsigned int cksum;
++ unsigned int ldrver;
++ unsigned int pf;
++ unsigned int datasize;
++ unsigned int totalsize;
++ unsigned int reserved[3];
++};
++
++struct microcode {
++ struct microcode_header hdr;
++ unsigned int bits[0];
++};
++
++typedef struct microcode microcode_t;
++typedef struct microcode_header microcode_header_t;
++
++/* microcode format is extended from prescott processors */
++struct extended_signature {
++ unsigned int sig;
++ unsigned int pf;
++ unsigned int cksum;
++};
++
++struct extended_sigtable {
++ unsigned int count;
++ unsigned int cksum;
++ unsigned int reserved[3];
++ struct extended_signature sigs[0];
++};
++
++
++#define ASM_NOP1 K8_NOP1
++#define ASM_NOP2 K8_NOP2
++#define ASM_NOP3 K8_NOP3
++#define ASM_NOP4 K8_NOP4
++#define ASM_NOP5 K8_NOP5
++#define ASM_NOP6 K8_NOP6
++#define ASM_NOP7 K8_NOP7
++#define ASM_NOP8 K8_NOP8
++
++/* Opteron nops */
++#define K8_NOP1 ".byte 0x90\n"
++#define K8_NOP2 ".byte 0x66,0x90\n"
++#define K8_NOP3 ".byte 0x66,0x66,0x90\n"
++#define K8_NOP4 ".byte 0x66,0x66,0x66,0x90\n"
++#define K8_NOP5 K8_NOP3 K8_NOP2
++#define K8_NOP6 K8_NOP3 K8_NOP3
++#define K8_NOP7 K8_NOP4 K8_NOP3
++#define K8_NOP8 K8_NOP4 K8_NOP4
++
++#define ASM_NOP_MAX 8
++
++/* REP NOP (PAUSE) is a good thing to insert into busy-wait loops. */
++static inline void rep_nop(void)
++{
++ __asm__ __volatile__("rep;nop": : :"memory");
++}
++
++/* Stop speculative execution */
++static inline void sync_core(void)
++{
++ int tmp;
++ asm volatile("cpuid" : "=a" (tmp) : "0" (1) : "ebx","ecx","edx","memory");
++}
++
++#define cpu_has_fpu 1
++
++#define ARCH_HAS_PREFETCH
++static inline void prefetch(void *x)
++{
++ asm volatile("prefetcht0 %0" :: "m" (*(unsigned long *)x));
++}
++
++#define ARCH_HAS_PREFETCHW 1
++static inline void prefetchw(void *x)
++{
++ alternative_input("prefetcht0 (%1)",
++ "prefetchw (%1)",
++ X86_FEATURE_3DNOW,
++ "r" (x));
++}
++
++#define ARCH_HAS_SPINLOCK_PREFETCH 1
++
++#define spin_lock_prefetch(x) prefetchw(x)
++
++#define cpu_relax() rep_nop()
++
++/*
++ * NSC/Cyrix CPU configuration register indexes
++ */
++#define CX86_CCR0 0xc0
++#define CX86_CCR1 0xc1
++#define CX86_CCR2 0xc2
++#define CX86_CCR3 0xc3
++#define CX86_CCR4 0xe8
++#define CX86_CCR5 0xe9
++#define CX86_CCR6 0xea
++#define CX86_CCR7 0xeb
++#define CX86_DIR0 0xfe
++#define CX86_DIR1 0xff
++#define CX86_ARR_BASE 0xc4
++#define CX86_RCR_BASE 0xdc
++
++/*
++ * NSC/Cyrix CPU indexed register access macros
++ */
++
++#define getCx86(reg) ({ outb((reg), 0x22); inb(0x23); })
++
++#define setCx86(reg, data) do { \
++ outb((reg), 0x22); \
++ outb((data), 0x23); \
++} while (0)
++
++static inline void serialize_cpu(void)
++{
++ __asm__ __volatile__ ("cpuid" : : : "ax", "bx", "cx", "dx");
++}
++
++static inline void __monitor(const void *eax, unsigned long ecx,
++ unsigned long edx)
++{
++ /* "monitor %eax,%ecx,%edx;" */
++ asm volatile(
++ ".byte 0x0f,0x01,0xc8;"
++ : :"a" (eax), "c" (ecx), "d"(edx));
++}
++
++static inline void __mwait(unsigned long eax, unsigned long ecx)
++{
++ /* "mwait %eax,%ecx;" */
++ asm volatile(
++ ".byte 0x0f,0x01,0xc9;"
++ : :"a" (eax), "c" (ecx));
++}
++
++#define stack_current() \
++({ \
++ struct thread_info *ti; \
++ asm("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
++ ti->task; \
++})
++
++#define cache_line_size() (boot_cpu_data.x86_cache_alignment)
++
++extern unsigned long boot_option_idle_override;
++/* Boot loader type from the setup header */
++extern int bootloader_type;
++
++#define HAVE_ARCH_PICK_MMAP_LAYOUT 1
++
++#endif /* __ASM_X86_64_PROCESSOR_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/smp_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/smp_64.h 2007-06-12 13:14:13.000000000 +0200
+@@ -0,0 +1,150 @@
++#ifndef __ASM_SMP_H
++#define __ASM_SMP_H
++
++/*
++ * We need the APIC definitions automatically as part of 'smp.h'
++ */
++#ifndef __ASSEMBLY__
++#include <linux/threads.h>
++#include <linux/cpumask.h>
++#include <linux/bitops.h>
++extern int disable_apic;
++#endif
++
++#ifdef CONFIG_X86_LOCAL_APIC
++#ifndef __ASSEMBLY__
++#include <asm/fixmap.h>
++#include <asm/mpspec.h>
++#ifdef CONFIG_X86_IO_APIC
++#include <asm/io_apic.h>
++#endif
++#include <asm/apic.h>
++#include <asm/thread_info.h>
++#endif
++#endif
++
++#ifdef CONFIG_SMP
++#ifndef ASSEMBLY
++
++#include <asm/pda.h>
++
++struct pt_regs;
++
++extern cpumask_t cpu_present_mask;
++extern cpumask_t cpu_possible_map;
++extern cpumask_t cpu_online_map;
++extern cpumask_t cpu_initialized;
++
++/*
++ * Private routines/data
++ */
++
++extern void smp_alloc_memory(void);
++extern volatile unsigned long smp_invalidate_needed;
++extern int pic_mode;
++extern void lock_ipi_call_lock(void);
++extern void unlock_ipi_call_lock(void);
++extern int smp_num_siblings;
++extern void smp_send_reschedule(int cpu);
++void smp_stop_cpu(void);
++extern int smp_call_function_single(int cpuid, void (*func) (void *info),
++ void *info, int retry, int wait);
++
++extern cpumask_t cpu_sibling_map[NR_CPUS];
++extern cpumask_t cpu_core_map[NR_CPUS];
++extern u8 cpu_llc_id[NR_CPUS];
++
++#define SMP_TRAMPOLINE_BASE 0x6000
++
++/*
++ * On x86 all CPUs are mapped 1:1 to the APIC space.
++ * This simplifies scheduling and IPI sending and
++ * compresses data structures.
++ */
++
++static inline int num_booting_cpus(void)
++{
++ return cpus_weight(cpu_possible_map);
++}
++
++#define raw_smp_processor_id() read_pda(cpunumber)
++
++#ifdef CONFIG_X86_LOCAL_APIC
++static inline int hard_smp_processor_id(void)
++{
++ /* we don't want to mark this access volatile - bad code generation */
++ return GET_APIC_ID(*(unsigned int *)(APIC_BASE+APIC_ID));
++}
++#endif
++
++extern int safe_smp_processor_id(void);
++extern int __cpu_disable(void);
++extern void __cpu_die(unsigned int cpu);
++extern void prefill_possible_map(void);
++extern unsigned num_processors;
++extern unsigned disabled_cpus;
++
++#endif /* !ASSEMBLY */
++
++#define NO_PROC_ID 0xFF /* No processor magic marker */
++
++#endif
++
++#ifndef ASSEMBLY
++/*
++ * Some lowlevel functions might want to know about
++ * the real APIC ID <-> CPU # mapping.
++ */
++extern u8 x86_cpu_to_apicid[NR_CPUS]; /* physical ID */
++extern u8 x86_cpu_to_log_apicid[NR_CPUS];
++extern u8 bios_cpu_apicid[];
++
++#ifdef CONFIG_X86_LOCAL_APIC
++static inline unsigned int cpu_mask_to_apicid(cpumask_t cpumask)
++{
++ return cpus_addr(cpumask)[0];
++}
++
++static inline int cpu_present_to_apicid(int mps_cpu)
++{
++ if (mps_cpu < NR_CPUS)
++ return (int)bios_cpu_apicid[mps_cpu];
++ else
++ return BAD_APICID;
++}
++#endif
++
++#endif /* !ASSEMBLY */
++
++#ifndef CONFIG_SMP
++#define stack_smp_processor_id() 0
++#define safe_smp_processor_id() 0
++#define cpu_logical_map(x) (x)
++#else
++#include <asm/thread_info.h>
++#define stack_smp_processor_id() \
++({ \
++ struct thread_info *ti; \
++ __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
++ ti->cpu; \
++})
++#endif
++
++#ifndef __ASSEMBLY__
++#ifdef CONFIG_X86_LOCAL_APIC
++static __inline int logical_smp_processor_id(void)
++{
++ /* we don't want to mark this access volatile - bad code generation */
++ return GET_APIC_LOGICAL_ID(*(unsigned long *)(APIC_BASE+APIC_LDR));
++}
++#endif
++#endif
++
++#ifdef CONFIG_SMP
++#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
++#else
++#define cpu_physical_id(cpu) boot_cpu_id
++#endif
++
++#endif
++
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/system_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/system_64.h 2007-11-26 16:59:25.000000000 +0100
+@@ -0,0 +1,256 @@
++#ifndef __ASM_SYSTEM_H
++#define __ASM_SYSTEM_H
++
++#include <linux/kernel.h>
++#include <asm/segment.h>
++#include <asm/alternative.h>
++
++#include <asm/synch_bitops.h>
++#include <asm/hypervisor.h>
++#include <xen/interface/arch-x86_64.h>
++
++#ifdef __KERNEL__
++
++#define __STR(x) #x
++#define STR(x) __STR(x)
++
++#define __SAVE(reg,offset) "movq %%" #reg ",(14-" #offset ")*8(%%rsp)\n\t"
++#define __RESTORE(reg,offset) "movq (14-" #offset ")*8(%%rsp),%%" #reg "\n\t"
++
++/* frame pointer must be last for get_wchan */
++#define SAVE_CONTEXT "pushf ; pushq %%rbp ; movq %%rsi,%%rbp\n\t"
++#define RESTORE_CONTEXT "movq %%rbp,%%rsi ; popq %%rbp ; popf\n\t"
++
++#define __EXTRA_CLOBBER \
++ ,"rcx","rbx","rdx","r8","r9","r10","r11","r12","r13","r14","r15"
++
++#define switch_to(prev,next,last) \
++ asm volatile(SAVE_CONTEXT \
++ "movq %%rsp,%P[threadrsp](%[prev])\n\t" /* save RSP */ \
++ "movq %P[threadrsp](%[next]),%%rsp\n\t" /* restore RSP */ \
++ "call __switch_to\n\t" \
++ ".globl thread_return\n" \
++ "thread_return:\n\t" \
++ "movq %%gs:%P[pda_pcurrent],%%rsi\n\t" \
++ "movq %P[thread_info](%%rsi),%%r8\n\t" \
++ LOCK_PREFIX "btr %[tif_fork],%P[ti_flags](%%r8)\n\t" \
++ "movq %%rax,%%rdi\n\t" \
++ "jc ret_from_fork\n\t" \
++ RESTORE_CONTEXT \
++ : "=a" (last) \
++ : [next] "S" (next), [prev] "D" (prev), \
++ [threadrsp] "i" (offsetof(struct task_struct, thread.rsp)), \
++ [ti_flags] "i" (offsetof(struct thread_info, flags)),\
++ [tif_fork] "i" (TIF_FORK), \
++ [thread_info] "i" (offsetof(struct task_struct, thread_info)), \
++ [pda_pcurrent] "i" (offsetof(struct x8664_pda, pcurrent)) \
++ : "memory", "cc" __EXTRA_CLOBBER)
++
++extern void load_gs_index(unsigned);
++
++/*
++ * Load a segment. Fall back on loading the zero
++ * segment if something goes wrong..
++ */
++#define loadsegment(seg,value) \
++ asm volatile("\n" \
++ "1:\t" \
++ "movl %k0,%%" #seg "\n" \
++ "2:\n" \
++ ".section .fixup,\"ax\"\n" \
++ "3:\t" \
++ "movl %1,%%" #seg "\n\t" \
++ "jmp 2b\n" \
++ ".previous\n" \
++ ".section __ex_table,\"a\"\n\t" \
++ ".align 8\n\t" \
++ ".quad 1b,3b\n" \
++ ".previous" \
++ : :"r" (value), "r" (0))
++
++/*
++ * Clear and set 'TS' bit respectively
++ */
++#define clts() (HYPERVISOR_fpu_taskswitch(0))
++
++static inline unsigned long read_cr0(void)
++{
++ unsigned long cr0;
++ asm volatile("movq %%cr0,%0" : "=r" (cr0));
++ return cr0;
++}
++
++static inline void write_cr0(unsigned long val)
++{
++ asm volatile("movq %0,%%cr0" :: "r" (val));
++}
++
++#define read_cr3() ({ \
++ unsigned long __dummy; \
++ asm("movq %%cr3,%0" : "=r" (__dummy)); \
++ machine_to_phys(__dummy); \
++})
++
++static inline unsigned long read_cr4(void)
++{
++ unsigned long cr4;
++ asm("movq %%cr4,%0" : "=r" (cr4));
++ return cr4;
++}
++
++static inline void write_cr4(unsigned long val)
++{
++ asm volatile("movq %0,%%cr4" :: "r" (val));
++}
++
++#define stts() (HYPERVISOR_fpu_taskswitch(1))
++
++#define wbinvd() \
++ __asm__ __volatile__ ("wbinvd": : :"memory");
++
++/*
++ * On SMP systems, when the scheduler does migration-cost autodetection,
++ * it needs a way to flush as much of the CPU's caches as possible.
++ */
++static inline void sched_cacheflush(void)
++{
++ wbinvd();
++}
++
++#endif /* __KERNEL__ */
++
++#define nop() __asm__ __volatile__ ("nop")
++
++#define xchg(ptr,v) ((__typeof__(*(ptr)))__xchg((unsigned long)(v),(ptr),sizeof(*(ptr))))
++
++#define tas(ptr) (xchg((ptr),1))
++
++#define __xg(x) ((volatile long *)(x))
++
++static inline void set_64bit(volatile unsigned long *ptr, unsigned long val)
++{
++ *ptr = val;
++}
++
++#define _set_64bit set_64bit
++
++/*
++ * Note: no "lock" prefix even on SMP: xchg always implies lock anyway
++ * Note 2: xchg has side effect, so that attribute volatile is necessary,
++ * but generally the primitive is invalid, *ptr is output argument. --ANK
++ */
++static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int size)
++{
++ switch (size) {
++ case 1:
++ __asm__ __volatile__("xchgb %b0,%1"
++ :"=q" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ case 2:
++ __asm__ __volatile__("xchgw %w0,%1"
++ :"=r" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ case 4:
++ __asm__ __volatile__("xchgl %k0,%1"
++ :"=r" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ case 8:
++ __asm__ __volatile__("xchgq %0,%1"
++ :"=r" (x)
++ :"m" (*__xg(ptr)), "0" (x)
++ :"memory");
++ break;
++ }
++ return x;
++}
++
++/*
++ * Atomic compare and exchange. Compare OLD with MEM, if identical,
++ * store NEW in MEM. Return the initial value in MEM. Success is
++ * indicated by comparing RETURN with OLD.
++ */
++
++#define __HAVE_ARCH_CMPXCHG 1
++
++static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
++ unsigned long new, int size)
++{
++ unsigned long prev;
++ switch (size) {
++ case 1:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgb %b1,%2"
++ : "=a"(prev)
++ : "q"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 2:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgw %w1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 4:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgl %k1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ case 8:
++ __asm__ __volatile__(LOCK_PREFIX "cmpxchgq %1,%2"
++ : "=a"(prev)
++ : "r"(new), "m"(*__xg(ptr)), "0"(old)
++ : "memory");
++ return prev;
++ }
++ return old;
++}
++
++#define cmpxchg(ptr,o,n)\
++ ((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
++ (unsigned long)(n),sizeof(*(ptr))))
++
++#ifdef CONFIG_SMP
++#define smp_mb() mb()
++#define smp_rmb() rmb()
++#define smp_wmb() wmb()
++#define smp_read_barrier_depends() do {} while(0)
++#else
++#define smp_mb() barrier()
++#define smp_rmb() barrier()
++#define smp_wmb() barrier()
++#define smp_read_barrier_depends() do {} while(0)
++#endif
++
++
++/*
++ * Force strict CPU ordering.
++ * And yes, this is required on UP too when we're talking
++ * to devices.
++ */
++#define mb() asm volatile("mfence":::"memory")
++#define rmb() asm volatile("lfence":::"memory")
++
++#ifdef CONFIG_UNORDERED_IO
++#define wmb() asm volatile("sfence" ::: "memory")
++#else
++#define wmb() asm volatile("" ::: "memory")
++#endif
++#define read_barrier_depends() do {} while(0)
++#define set_mb(var, value) do { (void) xchg(&var, value); } while (0)
++
++#define warn_if_not_ulong(x) do { unsigned long foo; (void) (&(x) == &foo); } while (0)
++
++#include <linux/irqflags.h>
++
++void cpu_idle_wait(void);
++
++extern unsigned long arch_align_stack(unsigned long sp);
++extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
++
++#endif
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/tlbflush_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/tlbflush_64.h 2007-11-26 16:59:25.000000000 +0100
+@@ -0,0 +1,103 @@
++#ifndef _X8664_TLBFLUSH_H
++#define _X8664_TLBFLUSH_H
++
++#include <linux/mm.h>
++#include <asm/processor.h>
++
++#define __flush_tlb() xen_tlb_flush()
++
++/*
++ * Global pages have to be flushed a bit differently. Not a real
++ * performance problem because this does not happen often.
++ */
++#define __flush_tlb_global() xen_tlb_flush()
++
++
++extern unsigned long pgkern_mask;
++
++#define __flush_tlb_all() __flush_tlb_global()
++
++#define __flush_tlb_one(addr) xen_invlpg((unsigned long)addr)
++
++
++/*
++ * TLB flushing:
++ *
++ * - flush_tlb() flushes the current mm struct TLBs
++ * - flush_tlb_all() flushes all processes TLBs
++ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
++ * - flush_tlb_page(vma, vmaddr) flushes one page
++ * - flush_tlb_range(vma, start, end) flushes a range of pages
++ * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
++ * - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
++ *
++ * x86-64 can only flush individual pages or full VMs. For a range flush
++ * we always do the full VM. Might be worth trying if for a small
++ * range a few INVLPGs in a row are a win.
++ */
++
++#ifndef CONFIG_SMP
++
++#define flush_tlb() __flush_tlb()
++#define flush_tlb_all() __flush_tlb_all()
++#define local_flush_tlb() __flush_tlb()
++
++static inline void flush_tlb_mm(struct mm_struct *mm)
++{
++ if (mm == current->active_mm)
++ __flush_tlb();
++}
++
++static inline void flush_tlb_page(struct vm_area_struct *vma,
++ unsigned long addr)
++{
++ if (vma->vm_mm == current->active_mm)
++ __flush_tlb_one(addr);
++}
++
++static inline void flush_tlb_range(struct vm_area_struct *vma,
++ unsigned long start, unsigned long end)
++{
++ if (vma->vm_mm == current->active_mm)
++ __flush_tlb();
++}
++
++#else
++
++#include <asm/smp.h>
++
++#define local_flush_tlb() \
++ __flush_tlb()
++
++#define flush_tlb_all xen_tlb_flush_all
++#define flush_tlb_current_task() xen_tlb_flush_mask(¤t->mm->cpu_vm_mask)
++#define flush_tlb_mm(mm) xen_tlb_flush_mask(&(mm)->cpu_vm_mask)
++#define flush_tlb_page(vma, va) xen_invlpg_mask(&(vma)->vm_mm->cpu_vm_mask, va)
++
++#define flush_tlb() flush_tlb_current_task()
++
++static inline void flush_tlb_range(struct vm_area_struct * vma, unsigned long start, unsigned long end)
++{
++ flush_tlb_mm(vma->vm_mm);
++}
++
++#define TLBSTATE_OK 1
++#define TLBSTATE_LAZY 2
++
++/* Roughly an IPI every 20MB with 4k pages for freeing page table
++ ranges. Cost is about 42k of memory for each CPU. */
++#define ARCH_FREE_PTE_NR 5350
++
++#endif
++
++#define flush_tlb_kernel_range(start, end) flush_tlb_all()
++
++static inline void flush_tlb_pgtables(struct mm_struct *mm,
++ unsigned long start, unsigned long end)
++{
++ /* x86_64 does not keep any page table caches in a software TLB.
++ The CPUs do in their hardware TLBs, but they are handled
++ by the normal TLB flushing algorithms. */
++}
++
++#endif /* _X8664_TLBFLUSH_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/asm/xor_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/asm/xor_64.h 2007-06-12 13:14:13.000000000 +0200
+@@ -0,0 +1,328 @@
++/*
++ * x86-64 changes / gcc fixes from Andi Kleen.
++ * Copyright 2002 Andi Kleen, SuSE Labs.
++ *
++ * This hasn't been optimized for the hammer yet, but there are likely
++ * no advantages to be gotten from x86-64 here anyways.
++ */
++
++typedef struct { unsigned long a,b; } __attribute__((aligned(16))) xmm_store_t;
++
++/* Doesn't use gcc to save the XMM registers, because there is no easy way to
++ tell it to do a clts before the register saving. */
++#define XMMS_SAVE do { \
++ preempt_disable(); \
++ if (!(current_thread_info()->status & TS_USEDFPU)) \
++ clts(); \
++ __asm__ __volatile__ ( \
++ "movups %%xmm0,(%1) ;\n\t" \
++ "movups %%xmm1,0x10(%1) ;\n\t" \
++ "movups %%xmm2,0x20(%1) ;\n\t" \
++ "movups %%xmm3,0x30(%1) ;\n\t" \
++ : "=&r" (cr0) \
++ : "r" (xmm_save) \
++ : "memory"); \
++} while(0)
++
++#define XMMS_RESTORE do { \
++ asm volatile ( \
++ "sfence ;\n\t" \
++ "movups (%1),%%xmm0 ;\n\t" \
++ "movups 0x10(%1),%%xmm1 ;\n\t" \
++ "movups 0x20(%1),%%xmm2 ;\n\t" \
++ "movups 0x30(%1),%%xmm3 ;\n\t" \
++ : \
++ : "r" (cr0), "r" (xmm_save) \
++ : "memory"); \
++ if (!(current_thread_info()->status & TS_USEDFPU)) \
++ stts(); \
++ preempt_enable(); \
++} while(0)
++
++#define OFFS(x) "16*("#x")"
++#define PF_OFFS(x) "256+16*("#x")"
++#define PF0(x) " prefetchnta "PF_OFFS(x)"(%[p1]) ;\n"
++#define LD(x,y) " movaps "OFFS(x)"(%[p1]), %%xmm"#y" ;\n"
++#define ST(x,y) " movaps %%xmm"#y", "OFFS(x)"(%[p1]) ;\n"
++#define PF1(x) " prefetchnta "PF_OFFS(x)"(%[p2]) ;\n"
++#define PF2(x) " prefetchnta "PF_OFFS(x)"(%[p3]) ;\n"
++#define PF3(x) " prefetchnta "PF_OFFS(x)"(%[p4]) ;\n"
++#define PF4(x) " prefetchnta "PF_OFFS(x)"(%[p5]) ;\n"
++#define PF5(x) " prefetchnta "PF_OFFS(x)"(%[p6]) ;\n"
++#define XO1(x,y) " xorps "OFFS(x)"(%[p2]), %%xmm"#y" ;\n"
++#define XO2(x,y) " xorps "OFFS(x)"(%[p3]), %%xmm"#y" ;\n"
++#define XO3(x,y) " xorps "OFFS(x)"(%[p4]), %%xmm"#y" ;\n"
++#define XO4(x,y) " xorps "OFFS(x)"(%[p5]), %%xmm"#y" ;\n"
++#define XO5(x,y) " xorps "OFFS(x)"(%[p6]), %%xmm"#y" ;\n"
++
++
++static void
++xor_sse_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
++{
++ unsigned int lines = bytes >> 8;
++ unsigned long cr0;
++ xmm_store_t xmm_save[4];
++
++ XMMS_SAVE;
++
++ asm volatile (
++#undef BLOCK
++#define BLOCK(i) \
++ LD(i,0) \
++ LD(i+1,1) \
++ PF1(i) \
++ PF1(i+2) \
++ LD(i+2,2) \
++ LD(i+3,3) \
++ PF0(i+4) \
++ PF0(i+6) \
++ XO1(i,0) \
++ XO1(i+1,1) \
++ XO1(i+2,2) \
++ XO1(i+3,3) \
++ ST(i,0) \
++ ST(i+1,1) \
++ ST(i+2,2) \
++ ST(i+3,3) \
++
++
++ PF0(0)
++ PF0(2)
++
++ " .align 32 ;\n"
++ " 1: ;\n"
++
++ BLOCK(0)
++ BLOCK(4)
++ BLOCK(8)
++ BLOCK(12)
++
++ " addq %[inc], %[p1] ;\n"
++ " addq %[inc], %[p2] ;\n"
++ " decl %[cnt] ; jnz 1b"
++ : [p1] "+r" (p1), [p2] "+r" (p2), [cnt] "+r" (lines)
++ : [inc] "r" (256UL)
++ : "memory");
++
++ XMMS_RESTORE;
++}
++
++static void
++xor_sse_3(unsigned long bytes, unsigned long *p1, unsigned long *p2,
++ unsigned long *p3)
++{
++ unsigned int lines = bytes >> 8;
++ xmm_store_t xmm_save[4];
++ unsigned long cr0;
++
++ XMMS_SAVE;
++
++ __asm__ __volatile__ (
++#undef BLOCK
++#define BLOCK(i) \
++ PF1(i) \
++ PF1(i+2) \
++ LD(i,0) \
++ LD(i+1,1) \
++ LD(i+2,2) \
++ LD(i+3,3) \
++ PF2(i) \
++ PF2(i+2) \
++ PF0(i+4) \
++ PF0(i+6) \
++ XO1(i,0) \
++ XO1(i+1,1) \
++ XO1(i+2,2) \
++ XO1(i+3,3) \
++ XO2(i,0) \
++ XO2(i+1,1) \
++ XO2(i+2,2) \
++ XO2(i+3,3) \
++ ST(i,0) \
++ ST(i+1,1) \
++ ST(i+2,2) \
++ ST(i+3,3) \
++
++
++ PF0(0)
++ PF0(2)
++
++ " .align 32 ;\n"
++ " 1: ;\n"
++
++ BLOCK(0)
++ BLOCK(4)
++ BLOCK(8)
++ BLOCK(12)
++
++ " addq %[inc], %[p1] ;\n"
++ " addq %[inc], %[p2] ;\n"
++ " addq %[inc], %[p3] ;\n"
++ " decl %[cnt] ; jnz 1b"
++ : [cnt] "+r" (lines),
++ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3)
++ : [inc] "r" (256UL)
++ : "memory");
++ XMMS_RESTORE;
++}
++
++static void
++xor_sse_4(unsigned long bytes, unsigned long *p1, unsigned long *p2,
++ unsigned long *p3, unsigned long *p4)
++{
++ unsigned int lines = bytes >> 8;
++ xmm_store_t xmm_save[4];
++ unsigned long cr0;
++
++ XMMS_SAVE;
++
++ __asm__ __volatile__ (
++#undef BLOCK
++#define BLOCK(i) \
++ PF1(i) \
++ PF1(i+2) \
++ LD(i,0) \
++ LD(i+1,1) \
++ LD(i+2,2) \
++ LD(i+3,3) \
++ PF2(i) \
++ PF2(i+2) \
++ XO1(i,0) \
++ XO1(i+1,1) \
++ XO1(i+2,2) \
++ XO1(i+3,3) \
++ PF3(i) \
++ PF3(i+2) \
++ PF0(i+4) \
++ PF0(i+6) \
++ XO2(i,0) \
++ XO2(i+1,1) \
++ XO2(i+2,2) \
++ XO2(i+3,3) \
++ XO3(i,0) \
++ XO3(i+1,1) \
++ XO3(i+2,2) \
++ XO3(i+3,3) \
++ ST(i,0) \
++ ST(i+1,1) \
++ ST(i+2,2) \
++ ST(i+3,3) \
++
++
++ PF0(0)
++ PF0(2)
++
++ " .align 32 ;\n"
++ " 1: ;\n"
++
++ BLOCK(0)
++ BLOCK(4)
++ BLOCK(8)
++ BLOCK(12)
++
++ " addq %[inc], %[p1] ;\n"
++ " addq %[inc], %[p2] ;\n"
++ " addq %[inc], %[p3] ;\n"
++ " addq %[inc], %[p4] ;\n"
++ " decl %[cnt] ; jnz 1b"
++ : [cnt] "+c" (lines),
++ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4)
++ : [inc] "r" (256UL)
++ : "memory" );
++
++ XMMS_RESTORE;
++}
++
++static void
++xor_sse_5(unsigned long bytes, unsigned long *p1, unsigned long *p2,
++ unsigned long *p3, unsigned long *p4, unsigned long *p5)
++{
++ unsigned int lines = bytes >> 8;
++ xmm_store_t xmm_save[4];
++ unsigned long cr0;
++
++ XMMS_SAVE;
++
++ __asm__ __volatile__ (
++#undef BLOCK
++#define BLOCK(i) \
++ PF1(i) \
++ PF1(i+2) \
++ LD(i,0) \
++ LD(i+1,1) \
++ LD(i+2,2) \
++ LD(i+3,3) \
++ PF2(i) \
++ PF2(i+2) \
++ XO1(i,0) \
++ XO1(i+1,1) \
++ XO1(i+2,2) \
++ XO1(i+3,3) \
++ PF3(i) \
++ PF3(i+2) \
++ XO2(i,0) \
++ XO2(i+1,1) \
++ XO2(i+2,2) \
++ XO2(i+3,3) \
++ PF4(i) \
++ PF4(i+2) \
++ PF0(i+4) \
++ PF0(i+6) \
++ XO3(i,0) \
++ XO3(i+1,1) \
++ XO3(i+2,2) \
++ XO3(i+3,3) \
++ XO4(i,0) \
++ XO4(i+1,1) \
++ XO4(i+2,2) \
++ XO4(i+3,3) \
++ ST(i,0) \
++ ST(i+1,1) \
++ ST(i+2,2) \
++ ST(i+3,3) \
++
++
++ PF0(0)
++ PF0(2)
++
++ " .align 32 ;\n"
++ " 1: ;\n"
++
++ BLOCK(0)
++ BLOCK(4)
++ BLOCK(8)
++ BLOCK(12)
++
++ " addq %[inc], %[p1] ;\n"
++ " addq %[inc], %[p2] ;\n"
++ " addq %[inc], %[p3] ;\n"
++ " addq %[inc], %[p4] ;\n"
++ " addq %[inc], %[p5] ;\n"
++ " decl %[cnt] ; jnz 1b"
++ : [cnt] "+c" (lines),
++ [p1] "+r" (p1), [p2] "+r" (p2), [p3] "+r" (p3), [p4] "+r" (p4),
++ [p5] "+r" (p5)
++ : [inc] "r" (256UL)
++ : "memory");
++
++ XMMS_RESTORE;
++}
++
++static struct xor_block_template xor_block_sse = {
++ .name = "generic_sse",
++ .do_2 = xor_sse_2,
++ .do_3 = xor_sse_3,
++ .do_4 = xor_sse_4,
++ .do_5 = xor_sse_5,
++};
++
++#undef XOR_TRY_TEMPLATES
++#define XOR_TRY_TEMPLATES \
++ do { \
++ xor_speed(&xor_block_sse); \
++ } while (0)
++
++/* We force the use of the SSE xor block because it can write around L2.
++ We may also be able to load into the L1 only depending on how the cpu
++ deals with a load to a line that is being prefetched. */
++#define XOR_SELECT_TEMPLATE(FASTEST) (&xor_block_sse)
+Index: head-2008-11-25/include/asm-x86/mach-xen/mach_time.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/mach_time.h 2007-06-12 13:14:13.000000000 +0200
+@@ -0,0 +1,111 @@
++/*
++ * include/asm-i386/mach-default/mach_time.h
++ *
++ * Machine specific set RTC function for generic.
++ * Split out from time.c by Osamu Tomita <tomita@cinet.co.jp>
++ */
++#ifndef _MACH_TIME_H
++#define _MACH_TIME_H
++
++#include <asm-i386/mc146818rtc.h>
++
++/* for check timing call set_rtc_mmss() 500ms */
++/* used in arch/i386/time.c::do_timer_interrupt() */
++#define USEC_AFTER 500000
++#define USEC_BEFORE 500000
++
++/*
++ * In order to set the CMOS clock precisely, set_rtc_mmss has to be
++ * called 500 ms after the second nowtime has started, because when
++ * nowtime is written into the registers of the CMOS clock, it will
++ * jump to the next second precisely 500 ms later. Check the Motorola
++ * MC146818A or Dallas DS12887 data sheet for details.
++ *
++ * BUG: This routine does not handle hour overflow properly; it just
++ * sets the minutes. Usually you'll only notice that after reboot!
++ */
++static inline int mach_set_rtc_mmss(unsigned long nowtime)
++{
++ int retval = 0;
++ int real_seconds, real_minutes, cmos_minutes;
++ unsigned char save_control, save_freq_select;
++
++ save_control = CMOS_READ(RTC_CONTROL); /* tell the clock it's being set */
++ CMOS_WRITE((save_control|RTC_SET), RTC_CONTROL);
++
++ save_freq_select = CMOS_READ(RTC_FREQ_SELECT); /* stop and reset prescaler */
++ CMOS_WRITE((save_freq_select|RTC_DIV_RESET2), RTC_FREQ_SELECT);
++
++ cmos_minutes = CMOS_READ(RTC_MINUTES);
++ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD)
++ BCD_TO_BIN(cmos_minutes);
++
++ /*
++ * since we're only adjusting minutes and seconds,
++ * don't interfere with hour overflow. This avoids
++ * messing with unknown time zones but requires your
++ * RTC not to be off by more than 15 minutes
++ */
++ real_seconds = nowtime % 60;
++ real_minutes = nowtime / 60;
++ if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
++ real_minutes += 30; /* correct for half hour time zone */
++ real_minutes %= 60;
++
++ if (abs(real_minutes - cmos_minutes) < 30) {
++ if (!(save_control & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
++ BIN_TO_BCD(real_seconds);
++ BIN_TO_BCD(real_minutes);
++ }
++ CMOS_WRITE(real_seconds,RTC_SECONDS);
++ CMOS_WRITE(real_minutes,RTC_MINUTES);
++ } else {
++ printk(KERN_WARNING
++ "set_rtc_mmss: can't update from %d to %d\n",
++ cmos_minutes, real_minutes);
++ retval = -1;
++ }
++
++ /* The following flags have to be released exactly in this order,
++ * otherwise the DS12887 (popular MC146818A clone with integrated
++ * battery and quartz) will not reset the oscillator and will not
++ * update precisely 500 ms later. You won't find this mentioned in
++ * the Dallas Semiconductor data sheets, but who believes data
++ * sheets anyway ... -- Markus Kuhn
++ */
++ CMOS_WRITE(save_control, RTC_CONTROL);
++ CMOS_WRITE(save_freq_select, RTC_FREQ_SELECT);
++
++ return retval;
++}
++
++static inline unsigned long mach_get_cmos_time(void)
++{
++ unsigned int year, mon, day, hour, min, sec;
++
++ do {
++ sec = CMOS_READ(RTC_SECONDS);
++ min = CMOS_READ(RTC_MINUTES);
++ hour = CMOS_READ(RTC_HOURS);
++ day = CMOS_READ(RTC_DAY_OF_MONTH);
++ mon = CMOS_READ(RTC_MONTH);
++ year = CMOS_READ(RTC_YEAR);
++ } while (sec != CMOS_READ(RTC_SECONDS));
++
++ if (!(CMOS_READ(RTC_CONTROL) & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
++ BCD_TO_BIN(sec);
++ BCD_TO_BIN(min);
++ BCD_TO_BIN(hour);
++ BCD_TO_BIN(day);
++ BCD_TO_BIN(mon);
++ BCD_TO_BIN(year);
++ }
++
++ year += 1900;
++ if (year < 1970)
++ year += 100;
++
++ return mktime(year, mon, day, hour, min, sec);
++}
++
++#endif /* !_MACH_TIME_H */
+Index: head-2008-11-25/include/asm-x86/mach-xen/setup_arch_post.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/setup_arch_post.h 2007-06-12 13:14:13.000000000 +0200
+@@ -0,0 +1,63 @@
++/**
++ * machine_specific_* - Hooks for machine specific setup.
++ *
++ * Description:
++ * This is included late in kernel/setup.c so that it can make
++ * use of all of the static functions.
++ **/
++
++#include <xen/interface/callback.h>
++
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void nmi(void);
++
++static void __init machine_specific_arch_setup(void)
++{
++ int ret;
++ static struct callback_register __initdata event = {
++ .type = CALLBACKTYPE_event,
++ .address = (unsigned long) hypervisor_callback,
++ };
++ static struct callback_register __initdata failsafe = {
++ .type = CALLBACKTYPE_failsafe,
++ .address = (unsigned long)failsafe_callback,
++ };
++ static struct callback_register __initdata syscall = {
++ .type = CALLBACKTYPE_syscall,
++ .address = (unsigned long)system_call,
++ };
++#ifdef CONFIG_X86_LOCAL_APIC
++ static struct callback_register __initdata nmi_cb = {
++ .type = CALLBACKTYPE_nmi,
++ .address = (unsigned long)nmi,
++ };
++#endif
++
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &event);
++ if (ret == 0)
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &failsafe);
++ if (ret == 0)
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &syscall);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret == -ENOSYS)
++ ret = HYPERVISOR_set_callbacks(
++ event.address,
++ failsafe.address,
++ syscall.address);
++#endif
++ BUG_ON(ret);
++
++#ifdef CONFIG_X86_LOCAL_APIC
++ ret = HYPERVISOR_callback_op(CALLBACKOP_register, &nmi_cb);
++#if CONFIG_XEN_COMPAT <= 0x030002
++ if (ret == -ENOSYS) {
++ static struct xennmi_callback __initdata cb = {
++ .handler_address = (unsigned long)nmi
++ };
++
++ HYPERVISOR_nmi_op(XENNMI_register_callback, &cb);
++ }
++#endif
++#endif
++}
+Index: head-2008-11-25/include/asm-x86/mach-xen/setup_arch_pre.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/asm-x86/mach-xen/setup_arch_pre.h 2007-06-12 13:14:13.000000000 +0200
+@@ -0,0 +1,5 @@
++/* Hook to call BIOS initialisation function */
++
++#define ARCH_SETUP machine_specific_arch_setup();
++
++static void __init machine_specific_arch_setup(void);
+Index: head-2008-11-25/include/xen/blkif.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/blkif.h 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,123 @@
++/*
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this software and associated documentation files (the "Software"), to
++ * deal in the Software without restriction, including without limitation the
++ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
++ * sell copies of the Software, and to permit persons to whom the Software is
++ * furnished to do so, subject to the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++ * DEALINGS IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_BLKIF_H__
++#define __XEN_BLKIF_H__
++
++#include <xen/interface/io/ring.h>
++#include <xen/interface/io/blkif.h>
++#include <xen/interface/io/protocols.h>
++
++/* Not a real protocol. Used to generate ring structs which contain
++ * the elements common to all protocols only. This way we get a
++ * compiler-checkable way to use common struct elements, so we can
++ * avoid using switch(protocol) in a number of places. */
++struct blkif_common_request {
++ char dummy;
++};
++struct blkif_common_response {
++ char dummy;
++};
++
++/* i386 protocol version */
++#pragma pack(push, 4)
++struct blkif_x86_32_request {
++ uint8_t operation; /* BLKIF_OP_??? */
++ uint8_t nr_segments; /* number of segments */
++ blkif_vdev_t handle; /* only for read/write requests */
++ uint64_t id; /* private guest value, echoed in resp */
++ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
++ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++};
++struct blkif_x86_32_response {
++ uint64_t id; /* copied from request */
++ uint8_t operation; /* copied from request */
++ int16_t status; /* BLKIF_RSP_??? */
++};
++typedef struct blkif_x86_32_request blkif_x86_32_request_t;
++typedef struct blkif_x86_32_response blkif_x86_32_response_t;
++#pragma pack(pop)
++
++/* x86_64 protocol version */
++struct blkif_x86_64_request {
++ uint8_t operation; /* BLKIF_OP_??? */
++ uint8_t nr_segments; /* number of segments */
++ blkif_vdev_t handle; /* only for read/write requests */
++ uint64_t __attribute__((__aligned__(8))) id;
++ blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
++ struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++};
++struct blkif_x86_64_response {
++ uint64_t __attribute__((__aligned__(8))) id;
++ uint8_t operation; /* copied from request */
++ int16_t status; /* BLKIF_RSP_??? */
++};
++typedef struct blkif_x86_64_request blkif_x86_64_request_t;
++typedef struct blkif_x86_64_response blkif_x86_64_response_t;
++
++DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response);
++DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response);
++DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response);
++
++union blkif_back_rings {
++ blkif_back_ring_t native;
++ blkif_common_back_ring_t common;
++ blkif_x86_32_back_ring_t x86_32;
++ blkif_x86_64_back_ring_t x86_64;
++};
++typedef union blkif_back_rings blkif_back_rings_t;
++
++enum blkif_protocol {
++ BLKIF_PROTOCOL_NATIVE = 1,
++ BLKIF_PROTOCOL_X86_32 = 2,
++ BLKIF_PROTOCOL_X86_64 = 3,
++};
++
++static void inline blkif_get_x86_32_req(blkif_request_t *dst, blkif_x86_32_request_t *src)
++{
++ int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
++ dst->operation = src->operation;
++ dst->nr_segments = src->nr_segments;
++ dst->handle = src->handle;
++ dst->id = src->id;
++ dst->sector_number = src->sector_number;
++ barrier();
++ if (n > dst->nr_segments)
++ n = dst->nr_segments;
++ for (i = 0; i < n; i++)
++ dst->seg[i] = src->seg[i];
++}
++
++static void inline blkif_get_x86_64_req(blkif_request_t *dst, blkif_x86_64_request_t *src)
++{
++ int i, n = BLKIF_MAX_SEGMENTS_PER_REQUEST;
++ dst->operation = src->operation;
++ dst->nr_segments = src->nr_segments;
++ dst->handle = src->handle;
++ dst->id = src->id;
++ dst->sector_number = src->sector_number;
++ barrier();
++ if (n > dst->nr_segments)
++ n = dst->nr_segments;
++ for (i = 0; i < n; i++)
++ dst->seg[i] = src->seg[i];
++}
++
++#endif /* __XEN_BLKIF_H__ */
+Index: head-2008-11-25/include/xen/compat_ioctl.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/compat_ioctl.h 2007-07-10 09:42:30.000000000 +0200
+@@ -0,0 +1,45 @@
++/*
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License as
++ * published by the Free Software Foundation; either version 2 of the
++ * License, or (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
++ *
++ * Copyright IBM Corp. 2007
++ *
++ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
++ * Hollis Blanchard <hollisb@us.ibm.com>
++ */
++
++#ifndef __LINUX_XEN_COMPAT_H__
++#define __LINUX_XEN_COMPAT_H__
++
++#include <linux/compat.h>
++
++extern int privcmd_ioctl_32(int fd, unsigned int cmd, unsigned long arg);
++struct privcmd_mmap_32 {
++ int num;
++ domid_t dom;
++ compat_uptr_t entry;
++};
++
++struct privcmd_mmapbatch_32 {
++ int num; /* number of pages to populate */
++ domid_t dom; /* target domain */
++ __u64 addr; /* virtual address */
++ compat_uptr_t arr; /* array of mfns - top nibble set on err */
++};
++#define IOCTL_PRIVCMD_MMAP_32 \
++ _IOC(_IOC_NONE, 'P', 2, sizeof(struct privcmd_mmap_32))
++#define IOCTL_PRIVCMD_MMAPBATCH_32 \
++ _IOC(_IOC_NONE, 'P', 3, sizeof(struct privcmd_mmapbatch_32))
++
++#endif /* __LINUX_XEN_COMPAT_H__ */
+Index: head-2008-11-25/include/xen/cpu_hotplug.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/cpu_hotplug.h 2007-08-16 18:07:01.000000000 +0200
+@@ -0,0 +1,41 @@
++#ifndef __XEN_CPU_HOTPLUG_H__
++#define __XEN_CPU_HOTPLUG_H__
++
++#include <linux/kernel.h>
++#include <linux/cpumask.h>
++
++#if defined(CONFIG_X86) && defined(CONFIG_SMP)
++extern cpumask_t cpu_initialized_map;
++#endif
++
++#if defined(CONFIG_HOTPLUG_CPU)
++
++int cpu_up_check(unsigned int cpu);
++void init_xenbus_allowed_cpumask(void);
++int smp_suspend(void);
++void smp_resume(void);
++
++void cpu_bringup(void);
++
++#else /* !defined(CONFIG_HOTPLUG_CPU) */
++
++#define cpu_up_check(cpu) (0)
++#define init_xenbus_allowed_cpumask() ((void)0)
++
++static inline int smp_suspend(void)
++{
++ if (num_online_cpus() > 1) {
++ printk(KERN_WARNING "Can't suspend SMP guests "
++ "without CONFIG_HOTPLUG_CPU\n");
++ return -EOPNOTSUPP;
++ }
++ return 0;
++}
++
++static inline void smp_resume(void)
++{
++}
++
++#endif /* !defined(CONFIG_HOTPLUG_CPU) */
++
++#endif /* __XEN_CPU_HOTPLUG_H__ */
+Index: head-2008-11-25/include/xen/driver_util.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/driver_util.h 2007-06-12 13:14:19.000000000 +0200
+@@ -0,0 +1,14 @@
++
++#ifndef __ASM_XEN_DRIVER_UTIL_H__
++#define __ASM_XEN_DRIVER_UTIL_H__
++
++#include <linux/vmalloc.h>
++#include <linux/device.h>
++
++/* Allocate/destroy a 'vmalloc' VM area. */
++extern struct vm_struct *alloc_vm_area(unsigned long size);
++extern void free_vm_area(struct vm_struct *area);
++
++extern struct class *get_xen_class(void);
++
++#endif /* __ASM_XEN_DRIVER_UTIL_H__ */
+Index: head-2008-11-25/include/xen/evtchn.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/evtchn.h 2008-09-15 13:40:15.000000000 +0200
+@@ -0,0 +1,160 @@
++/******************************************************************************
++ * evtchn.h
++ *
++ * Communication via Xen event channels.
++ * Also definitions for the device that demuxes notifications to userspace.
++ *
++ * Copyright (c) 2004-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __ASM_EVTCHN_H__
++#define __ASM_EVTCHN_H__
++
++#include <linux/interrupt.h>
++#include <asm/hypervisor.h>
++#include <asm/ptrace.h>
++#include <asm/synch_bitops.h>
++#include <xen/interface/event_channel.h>
++#include <linux/smp.h>
++
++/*
++ * LOW-LEVEL DEFINITIONS
++ */
++
++/*
++ * Dynamically bind an event source to an IRQ-like callback handler.
++ * On some platforms this may not be implemented via the Linux IRQ subsystem.
++ * The IRQ argument passed to the callback handler is the same as returned
++ * from the bind call. It may not correspond to a Linux IRQ number.
++ * Returns IRQ or negative errno.
++ */
++int bind_caller_port_to_irqhandler(
++ unsigned int caller_port,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id);
++int bind_listening_port_to_irqhandler(
++ unsigned int remote_domain,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id);
++int bind_interdomain_evtchn_to_irqhandler(
++ unsigned int remote_domain,
++ unsigned int remote_port,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id);
++int bind_virq_to_irqhandler(
++ unsigned int virq,
++ unsigned int cpu,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id);
++int bind_ipi_to_irqhandler(
++ unsigned int ipi,
++ unsigned int cpu,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id);
++
++/*
++ * Common unbind function for all event sources. Takes IRQ to unbind from.
++ * Automatically closes the underlying event channel (except for bindings
++ * made with bind_caller_port_to_irqhandler()).
++ */
++void unbind_from_irqhandler(unsigned int irq, void *dev_id);
++
++void irq_resume(void);
++
++/* Entry point for notifications into Linux subsystems. */
++asmlinkage void evtchn_do_upcall(struct pt_regs *regs);
++
++/* Entry point for notifications into the userland character device. */
++void evtchn_device_upcall(int port);
++
++/* Mark a PIRQ as unavailable for dynamic allocation. */
++void evtchn_register_pirq(int irq);
++/* Map a Xen-supplied PIRQ to a dynamically allocated one. */
++int evtchn_map_pirq(int irq, int xen_pirq);
++/* Look up a Xen-supplied PIRQ for a dynamically allocated one. */
++int evtchn_get_xen_pirq(int irq);
++
++void mask_evtchn(int port);
++void disable_all_local_evtchn(void);
++void unmask_evtchn(int port);
++
++#ifdef CONFIG_SMP
++void rebind_evtchn_to_cpu(int port, unsigned int cpu);
++#else
++#define rebind_evtchn_to_cpu(port, cpu) ((void)0)
++#endif
++
++static inline int test_and_set_evtchn_mask(int port)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ return synch_test_and_set_bit(port, s->evtchn_mask);
++}
++
++static inline void clear_evtchn(int port)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ synch_clear_bit(port, s->evtchn_pending);
++}
++
++static inline void notify_remote_via_evtchn(int port)
++{
++ struct evtchn_send send = { .port = port };
++ VOID(HYPERVISOR_event_channel_op(EVTCHNOP_send, &send));
++}
++
++/*
++ * Use these to access the event channel underlying the IRQ handle returned
++ * by bind_*_to_irqhandler().
++ */
++void notify_remote_via_irq(int irq);
++int irq_to_evtchn_port(int irq);
++
++#define PIRQ_SET_MAPPING 0x0
++#define PIRQ_CLEAR_MAPPING 0x1
++#define PIRQ_GET_MAPPING 0x3
++int pirq_mapstatus(int pirq, int action);
++int set_pirq_hw_action(int pirq, int (*action)(int pirq, int action));
++int clear_pirq_hw_action(int pirq);
++
++#define PIRQ_STARTUP 1
++#define PIRQ_SHUTDOWN 2
++#define PIRQ_ENABLE 3
++#define PIRQ_DISABLE 4
++#define PIRQ_END 5
++#define PIRQ_ACK 6
++
++#endif /* __ASM_EVTCHN_H__ */
+Index: head-2008-11-25/include/xen/firmware.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/firmware.h 2007-07-02 08:16:19.000000000 +0200
+@@ -0,0 +1,10 @@
++#ifndef __XEN_FIRMWARE_H__
++#define __XEN_FIRMWARE_H__
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++void copy_edd(void);
++#endif
++
++void copy_edid(void);
++
++#endif /* __XEN_FIRMWARE_H__ */
+Index: head-2008-11-25/include/xen/gnttab.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/gnttab.h 2008-11-04 11:13:10.000000000 +0100
+@@ -0,0 +1,164 @@
++/******************************************************************************
++ * gnttab.h
++ *
++ * Two sets of functionality:
++ * 1. Granting foreign access to our memory reservation.
++ * 2. Accessing others' memory reservations via grant references.
++ * (i.e., mechanisms for both sender and recipient of grant references)
++ *
++ * Copyright (c) 2004-2005, K A Fraser
++ * Copyright (c) 2005, Christopher Clark
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __ASM_GNTTAB_H__
++#define __ASM_GNTTAB_H__
++
++#include <asm/hypervisor.h>
++#include <asm/maddr.h> /* maddr_t */
++#include <linux/mm.h>
++#include <xen/interface/grant_table.h>
++#include <xen/features.h>
++
++struct gnttab_free_callback {
++ struct gnttab_free_callback *next;
++ void (*fn)(void *);
++ void *arg;
++ u16 count;
++ u8 queued;
++};
++
++int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
++ int flags);
++
++/*
++ * End access through the given grant reference, iff the grant entry is no
++ * longer in use. Return 1 if the grant entry was freed, 0 if it is still in
++ * use.
++ */
++int gnttab_end_foreign_access_ref(grant_ref_t ref);
++
++/*
++ * Eventually end access through the given grant reference, and once that
++ * access has been ended, free the given page too. Access will be ended
++ * immediately iff the grant entry is not in use, otherwise it will happen
++ * some time later. page may be 0, in which case no freeing will occur.
++ */
++void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page);
++
++int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn);
++
++unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref);
++unsigned long gnttab_end_foreign_transfer(grant_ref_t ref);
++
++int gnttab_query_foreign_access(grant_ref_t ref);
++
++/*
++ * operations on reserved batches of grant references
++ */
++int gnttab_alloc_grant_references(u16 count, grant_ref_t *pprivate_head);
++
++void gnttab_free_grant_reference(grant_ref_t ref);
++
++void gnttab_free_grant_references(grant_ref_t head);
++
++int gnttab_empty_grant_references(const grant_ref_t *pprivate_head);
++
++int gnttab_claim_grant_reference(grant_ref_t *pprivate_head);
++
++void gnttab_release_grant_reference(grant_ref_t *private_head,
++ grant_ref_t release);
++
++void gnttab_request_free_callback(struct gnttab_free_callback *callback,
++ void (*fn)(void *), void *arg, u16 count);
++void gnttab_cancel_free_callback(struct gnttab_free_callback *callback);
++
++void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
++ unsigned long frame, int flags);
++
++void gnttab_grant_foreign_transfer_ref(grant_ref_t, domid_t domid,
++ unsigned long pfn);
++
++int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep);
++void __gnttab_dma_map_page(struct page *page);
++static inline void __gnttab_dma_unmap_page(struct page *page)
++{
++}
++
++void gnttab_reset_grant_page(struct page *page);
++
++int gnttab_suspend(void);
++int gnttab_resume(void);
++
++void *arch_gnttab_alloc_shared(unsigned long *frames);
++
++static inline void
++gnttab_set_map_op(struct gnttab_map_grant_ref *map, maddr_t addr,
++ uint32_t flags, grant_ref_t ref, domid_t domid)
++{
++ if (flags & GNTMAP_contains_pte)
++ map->host_addr = addr;
++ else if (xen_feature(XENFEAT_auto_translated_physmap))
++ map->host_addr = __pa(addr);
++ else
++ map->host_addr = addr;
++
++ map->flags = flags;
++ map->ref = ref;
++ map->dom = domid;
++}
++
++static inline void
++gnttab_set_unmap_op(struct gnttab_unmap_grant_ref *unmap, maddr_t addr,
++ uint32_t flags, grant_handle_t handle)
++{
++ if (flags & GNTMAP_contains_pte)
++ unmap->host_addr = addr;
++ else if (xen_feature(XENFEAT_auto_translated_physmap))
++ unmap->host_addr = __pa(addr);
++ else
++ unmap->host_addr = addr;
++
++ unmap->handle = handle;
++ unmap->dev_bus_addr = 0;
++}
++
++static inline void
++gnttab_set_replace_op(struct gnttab_unmap_and_replace *unmap, maddr_t addr,
++ maddr_t new_addr, grant_handle_t handle)
++{
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ unmap->host_addr = __pa(addr);
++ unmap->new_addr = __pa(new_addr);
++ } else {
++ unmap->host_addr = addr;
++ unmap->new_addr = new_addr;
++ }
++
++ unmap->handle = handle;
++}
++
++#endif /* __ASM_GNTTAB_H__ */
+Index: head-2008-11-25/include/xen/hvm.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/hvm.h 2007-06-12 13:14:19.000000000 +0200
+@@ -0,0 +1,23 @@
++/* Simple wrappers around HVM functions */
++#ifndef XEN_HVM_H__
++#define XEN_HVM_H__
++
++#include <xen/interface/hvm/params.h>
++
++static inline unsigned long hvm_get_parameter(int idx)
++{
++ struct xen_hvm_param xhv;
++ int r;
++
++ xhv.domid = DOMID_SELF;
++ xhv.index = idx;
++ r = HYPERVISOR_hvm_op(HVMOP_get_param, &xhv);
++ if (r < 0) {
++ printk(KERN_ERR "cannot get hvm parameter %d: %d.\n",
++ idx, r);
++ return 0;
++ }
++ return xhv.value;
++}
++
++#endif /* XEN_HVM_H__ */
+Index: head-2008-11-25/include/xen/hypercall.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/hypercall.h 2008-01-28 12:24:19.000000000 +0100
+@@ -0,0 +1,30 @@
++#ifndef __XEN_HYPERCALL_H__
++#define __XEN_HYPERCALL_H__
++
++#include <asm/hypercall.h>
++
++static inline int __must_check
++HYPERVISOR_multicall_check(
++ multicall_entry_t *call_list, unsigned int nr_calls,
++ const unsigned long *rc_list)
++{
++ int rc = HYPERVISOR_multicall(call_list, nr_calls);
++
++ if (unlikely(rc < 0))
++ return rc;
++ BUG_ON(rc);
++ BUG_ON((int)nr_calls < 0);
++
++ for ( ; nr_calls > 0; --nr_calls, ++call_list)
++ if (unlikely(call_list->result != (rc_list ? *rc_list++ : 0)))
++ return nr_calls;
++
++ return 0;
++}
++
++/* A construct to ignore the return value of hypercall wrappers in a few
++ * exceptional cases (simply casting the function result to void doesn't
++ * avoid the compiler warning): */
++#define VOID(expr) ((void)((expr)?:0))
++
++#endif /* __XEN_HYPERCALL_H__ */
+Index: head-2008-11-25/include/xen/hypervisor_sysfs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/hypervisor_sysfs.h 2007-06-22 09:08:06.000000000 +0200
+@@ -0,0 +1,30 @@
++/*
++ * copyright (c) 2006 IBM Corporation
++ * Authored by: Mike D. Day <ncmike@us.ibm.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#ifndef _HYP_SYSFS_H_
++#define _HYP_SYSFS_H_
++
++#include <linux/kobject.h>
++#include <linux/sysfs.h>
++
++#define HYPERVISOR_ATTR_RO(_name) \
++static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name)
++
++#define HYPERVISOR_ATTR_RW(_name) \
++static struct hyp_sysfs_attr _name##_attr = \
++ __ATTR(_name, 0644, _name##_show, _name##_store)
++
++struct hyp_sysfs_attr {
++ struct attribute attr;
++ ssize_t (*show)(struct hyp_sysfs_attr *, char *);
++ ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t);
++ void *hyp_attr_data;
++};
++
++#endif /* _HYP_SYSFS_H_ */
+Index: head-2008-11-25/include/xen/pcifront.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/pcifront.h 2007-06-18 08:38:13.000000000 +0200
+@@ -0,0 +1,83 @@
++/*
++ * PCI Frontend - arch-dependendent declarations
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#ifndef __XEN_ASM_PCIFRONT_H__
++#define __XEN_ASM_PCIFRONT_H__
++
++#include <linux/spinlock.h>
++
++#ifdef __KERNEL__
++
++#ifndef __ia64__
++
++struct pcifront_device;
++struct pci_bus;
++
++struct pcifront_sd {
++ int domain;
++ struct pcifront_device *pdev;
++};
++
++static inline struct pcifront_device *
++pcifront_get_pdev(struct pcifront_sd *sd)
++{
++ return sd->pdev;
++}
++
++static inline void pcifront_init_sd(struct pcifront_sd *sd,
++ unsigned int domain, unsigned int bus,
++ struct pcifront_device *pdev)
++{
++ sd->domain = domain;
++ sd->pdev = pdev;
++}
++
++#if defined(CONFIG_PCI_DOMAINS)
++static inline int pci_domain_nr(struct pci_bus *bus)
++{
++ struct pcifront_sd *sd = bus->sysdata;
++ return sd->domain;
++}
++static inline int pci_proc_domain(struct pci_bus *bus)
++{
++ return pci_domain_nr(bus);
++}
++#endif /* CONFIG_PCI_DOMAINS */
++
++static inline void pcifront_setup_root_resources(struct pci_bus *bus,
++ struct pcifront_sd *sd)
++{
++}
++
++#else /* __ia64__ */
++
++#include <linux/acpi.h>
++#include <asm/pci.h>
++#define pcifront_sd pci_controller
++
++extern void xen_add_resource(struct pci_controller *, unsigned int,
++ unsigned int, struct acpi_resource *);
++extern void xen_pcibios_setup_root_windows(struct pci_bus *,
++ struct pci_controller *);
++
++static inline struct pcifront_device *
++pcifront_get_pdev(struct pcifront_sd *sd)
++{
++ return (struct pcifront_device *)sd->platform_data;
++}
++
++static inline void pcifront_setup_root_resources(struct pci_bus *bus,
++ struct pcifront_sd *sd)
++{
++ xen_pcibios_setup_root_windows(bus, sd);
++}
++
++#endif /* __ia64__ */
++
++extern struct rw_semaphore pci_bus_sem;
++
++#endif /* __KERNEL__ */
++
++#endif /* __XEN_ASM_PCIFRONT_H__ */
+Index: head-2008-11-25/include/xen/public/evtchn.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/public/evtchn.h 2007-06-12 13:14:19.000000000 +0200
+@@ -0,0 +1,88 @@
++/******************************************************************************
++ * evtchn.h
++ *
++ * Interface to /dev/xen/evtchn.
++ *
++ * Copyright (c) 2003-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __LINUX_PUBLIC_EVTCHN_H__
++#define __LINUX_PUBLIC_EVTCHN_H__
++
++/*
++ * Bind a fresh port to VIRQ @virq.
++ * Return allocated port.
++ */
++#define IOCTL_EVTCHN_BIND_VIRQ \
++ _IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq))
++struct ioctl_evtchn_bind_virq {
++ unsigned int virq;
++};
++
++/*
++ * Bind a fresh port to remote <@remote_domain, @remote_port>.
++ * Return allocated port.
++ */
++#define IOCTL_EVTCHN_BIND_INTERDOMAIN \
++ _IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain))
++struct ioctl_evtchn_bind_interdomain {
++ unsigned int remote_domain, remote_port;
++};
++
++/*
++ * Allocate a fresh port for binding to @remote_domain.
++ * Return allocated port.
++ */
++#define IOCTL_EVTCHN_BIND_UNBOUND_PORT \
++ _IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port))
++struct ioctl_evtchn_bind_unbound_port {
++ unsigned int remote_domain;
++};
++
++/*
++ * Unbind previously allocated @port.
++ */
++#define IOCTL_EVTCHN_UNBIND \
++ _IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind))
++struct ioctl_evtchn_unbind {
++ unsigned int port;
++};
++
++/*
++ * Unbind previously allocated @port.
++ */
++#define IOCTL_EVTCHN_NOTIFY \
++ _IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify))
++struct ioctl_evtchn_notify {
++ unsigned int port;
++};
++
++/* Clear and reinitialise the event buffer. Clear error condition. */
++#define IOCTL_EVTCHN_RESET \
++ _IOC(_IOC_NONE, 'E', 5, 0)
++
++#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
+Index: head-2008-11-25/include/xen/public/gntdev.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/public/gntdev.h 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,119 @@
++/******************************************************************************
++ * gntdev.h
++ *
++ * Interface to /dev/xen/gntdev.
++ *
++ * Copyright (c) 2007, D G Murray
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __LINUX_PUBLIC_GNTDEV_H__
++#define __LINUX_PUBLIC_GNTDEV_H__
++
++struct ioctl_gntdev_grant_ref {
++ /* The domain ID of the grant to be mapped. */
++ uint32_t domid;
++ /* The grant reference of the grant to be mapped. */
++ uint32_t ref;
++};
++
++/*
++ * Inserts the grant references into the mapping table of an instance
++ * of gntdev. N.B. This does not perform the mapping, which is deferred
++ * until mmap() is called with @index as the offset.
++ */
++#define IOCTL_GNTDEV_MAP_GRANT_REF \
++_IOC(_IOC_NONE, 'G', 0, sizeof(struct ioctl_gntdev_map_grant_ref))
++struct ioctl_gntdev_map_grant_ref {
++ /* IN parameters */
++ /* The number of grants to be mapped. */
++ uint32_t count;
++ uint32_t pad;
++ /* OUT parameters */
++ /* The offset to be used on a subsequent call to mmap(). */
++ uint64_t index;
++ /* Variable IN parameter. */
++ /* Array of grant references, of size @count. */
++ struct ioctl_gntdev_grant_ref refs[1];
++};
++
++/*
++ * Removes the grant references from the mapping table of an instance of
++ * of gntdev. N.B. munmap() must be called on the relevant virtual address(es)
++ * before this ioctl is called, or an error will result.
++ */
++#define IOCTL_GNTDEV_UNMAP_GRANT_REF \
++_IOC(_IOC_NONE, 'G', 1, sizeof(struct ioctl_gntdev_unmap_grant_ref))
++struct ioctl_gntdev_unmap_grant_ref {
++ /* IN parameters */
++ /* The offset was returned by the corresponding map operation. */
++ uint64_t index;
++ /* The number of pages to be unmapped. */
++ uint32_t count;
++ uint32_t pad;
++};
++
++/*
++ * Returns the offset in the driver's address space that corresponds
++ * to @vaddr. This can be used to perform a munmap(), followed by an
++ * UNMAP_GRANT_REF ioctl, where no state about the offset is retained by
++ * the caller. The number of pages that were allocated at the same time as
++ * @vaddr is returned in @count.
++ *
++ * N.B. Where more than one page has been mapped into a contiguous range, the
++ * supplied @vaddr must correspond to the start of the range; otherwise
++ * an error will result. It is only possible to munmap() the entire
++ * contiguously-allocated range at once, and not any subrange thereof.
++ */
++#define IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR \
++_IOC(_IOC_NONE, 'G', 2, sizeof(struct ioctl_gntdev_get_offset_for_vaddr))
++struct ioctl_gntdev_get_offset_for_vaddr {
++ /* IN parameters */
++ /* The virtual address of the first mapped page in a range. */
++ uint64_t vaddr;
++ /* OUT parameters */
++ /* The offset that was used in the initial mmap() operation. */
++ uint64_t offset;
++ /* The number of pages mapped in the VM area that begins at @vaddr. */
++ uint32_t count;
++ uint32_t pad;
++};
++
++/*
++ * Sets the maximum number of grants that may mapped at once by this gntdev
++ * instance.
++ *
++ * N.B. This must be called before any other ioctl is performed on the device.
++ */
++#define IOCTL_GNTDEV_SET_MAX_GRANTS \
++_IOC(_IOC_NONE, 'G', 3, sizeof(struct ioctl_gntdev_set_max_grants))
++struct ioctl_gntdev_set_max_grants {
++ /* IN parameter */
++ /* The maximum number of grants that may be mapped at once. */
++ uint32_t count;
++};
++
++#endif /* __LINUX_PUBLIC_GNTDEV_H__ */
+Index: head-2008-11-25/include/xen/public/privcmd.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/public/privcmd.h 2007-06-12 13:14:19.000000000 +0200
+@@ -0,0 +1,79 @@
++/******************************************************************************
++ * privcmd.h
++ *
++ * Interface to /proc/xen/privcmd.
++ *
++ * Copyright (c) 2003-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __LINUX_PUBLIC_PRIVCMD_H__
++#define __LINUX_PUBLIC_PRIVCMD_H__
++
++#include <linux/types.h>
++
++#ifndef __user
++#define __user
++#endif
++
++typedef struct privcmd_hypercall
++{
++ __u64 op;
++ __u64 arg[5];
++} privcmd_hypercall_t;
++
++typedef struct privcmd_mmap_entry {
++ __u64 va;
++ __u64 mfn;
++ __u64 npages;
++} privcmd_mmap_entry_t;
++
++typedef struct privcmd_mmap {
++ int num;
++ domid_t dom; /* target domain */
++ privcmd_mmap_entry_t __user *entry;
++} privcmd_mmap_t;
++
++typedef struct privcmd_mmapbatch {
++ int num; /* number of pages to populate */
++ domid_t dom; /* target domain */
++ __u64 addr; /* virtual address */
++ xen_pfn_t __user *arr; /* array of mfns - top nibble set on err */
++} privcmd_mmapbatch_t;
++
++/*
++ * @cmd: IOCTL_PRIVCMD_HYPERCALL
++ * @arg: &privcmd_hypercall_t
++ * Return: Value returned from execution of the specified hypercall.
++ */
++#define IOCTL_PRIVCMD_HYPERCALL \
++ _IOC(_IOC_NONE, 'P', 0, sizeof(privcmd_hypercall_t))
++#define IOCTL_PRIVCMD_MMAP \
++ _IOC(_IOC_NONE, 'P', 2, sizeof(privcmd_mmap_t))
++#define IOCTL_PRIVCMD_MMAPBATCH \
++ _IOC(_IOC_NONE, 'P', 3, sizeof(privcmd_mmapbatch_t))
++
++#endif /* __LINUX_PUBLIC_PRIVCMD_H__ */
+Index: head-2008-11-25/include/xen/xen_proc.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/xen_proc.h 2007-06-12 13:14:19.000000000 +0200
+@@ -0,0 +1,12 @@
++
++#ifndef __ASM_XEN_PROC_H__
++#define __ASM_XEN_PROC_H__
++
++#include <linux/proc_fs.h>
++
++extern struct proc_dir_entry *create_xen_proc_entry(
++ const char *name, mode_t mode);
++extern void remove_xen_proc_entry(
++ const char *name);
++
++#endif /* __ASM_XEN_PROC_H__ */
+Index: head-2008-11-25/include/xen/xencons.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/xencons.h 2007-10-15 09:39:38.000000000 +0200
+@@ -0,0 +1,17 @@
++#ifndef __ASM_XENCONS_H__
++#define __ASM_XENCONS_H__
++
++struct dom0_vga_console_info;
++void dom0_init_screen_info(const struct dom0_vga_console_info *, size_t);
++
++void xencons_force_flush(void);
++void xencons_resume(void);
++
++/* Interrupt work hooks. Receive data, or kick data out. */
++void xencons_rx(char *buf, unsigned len, struct pt_regs *regs);
++void xencons_tx(void);
++
++int xencons_ring_init(void);
++int xencons_ring_send(const char *data, unsigned len);
++
++#endif /* __ASM_XENCONS_H__ */
+Index: head-2008-11-25/include/xen/xenoprof.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/include/xen/xenoprof.h 2007-06-12 13:14:19.000000000 +0200
+@@ -0,0 +1,42 @@
++/******************************************************************************
++ * xen/xenoprof.h
++ *
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ */
++
++#ifndef __XEN_XENOPROF_H__
++#define __XEN_XENOPROF_H__
++#ifdef CONFIG_XEN
++
++#include <asm/xenoprof.h>
++
++struct oprofile_operations;
++int xenoprofile_init(struct oprofile_operations * ops);
++void xenoprofile_exit(void);
++
++struct xenoprof_shared_buffer {
++ char *buffer;
++ struct xenoprof_arch_shared_buffer arch;
++};
++#else
++#define xenoprofile_init(ops) (-ENOSYS)
++#define xenoprofile_exit() do { } while (0)
++
++#endif /* CONFIG_XEN */
++#endif /* __XEN_XENOPROF_H__ */
+Index: head-2008-11-25/lib/swiotlb-xen.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/lib/swiotlb-xen.c 2008-09-15 13:40:15.000000000 +0200
+@@ -0,0 +1,739 @@
++/*
++ * Dynamic DMA mapping support.
++ *
++ * This implementation is a fallback for platforms that do not support
++ * I/O TLBs (aka DMA address translation hardware).
++ * Copyright (C) 2000 Asit Mallick <Asit.K.Mallick@intel.com>
++ * Copyright (C) 2000 Goutham Rao <goutham.rao@intel.com>
++ * Copyright (C) 2000, 2003 Hewlett-Packard Co
++ * David Mosberger-Tang <davidm@hpl.hp.com>
++ * Copyright (C) 2005 Keir Fraser <keir@xensource.com>
++ */
++
++#include <linux/cache.h>
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include <linux/string.h>
++#include <linux/types.h>
++#include <linux/ctype.h>
++#include <linux/init.h>
++#include <linux/bootmem.h>
++#include <linux/highmem.h>
++#include <asm/io.h>
++#include <asm/pci.h>
++#include <asm/dma.h>
++#include <asm/uaccess.h>
++#include <xen/gnttab.h>
++#include <xen/interface/memory.h>
++#include <asm-i386/mach-xen/asm/gnttab_dma.h>
++
++int swiotlb;
++EXPORT_SYMBOL(swiotlb);
++
++#define OFFSET(val,align) ((unsigned long)((val) & ( (align) - 1)))
++
++/*
++ * Maximum allowable number of contiguous slabs to map,
++ * must be a power of 2. What is the appropriate value ?
++ * The complexity of {map,unmap}_single is linearly dependent on this value.
++ */
++#define IO_TLB_SEGSIZE 128
++
++/*
++ * log of the size of each IO TLB slab. The number of slabs is command line
++ * controllable.
++ */
++#define IO_TLB_SHIFT 11
++
++int swiotlb_force;
++
++static char *iotlb_virt_start;
++static unsigned long iotlb_nslabs;
++
++/*
++ * Used to do a quick range check in swiotlb_unmap_single and
++ * swiotlb_sync_single_*, to see if the memory was in fact allocated by this
++ * API.
++ */
++static unsigned long iotlb_pfn_start, iotlb_pfn_end;
++
++/* Does the given dma address reside within the swiotlb aperture? */
++static inline int in_swiotlb_aperture(dma_addr_t dev_addr)
++{
++ unsigned long pfn = mfn_to_local_pfn(dev_addr >> PAGE_SHIFT);
++ return (pfn_valid(pfn)
++ && (pfn >= iotlb_pfn_start)
++ && (pfn < iotlb_pfn_end));
++}
++
++/*
++ * When the IOMMU overflows we return a fallback buffer. This sets the size.
++ */
++static unsigned long io_tlb_overflow = 32*1024;
++
++void *io_tlb_overflow_buffer;
++
++/*
++ * This is a free list describing the number of free entries available from
++ * each index
++ */
++static unsigned int *io_tlb_list;
++static unsigned int io_tlb_index;
++
++/*
++ * We need to save away the original address corresponding to a mapped entry
++ * for the sync operations.
++ */
++static struct phys_addr {
++ struct page *page;
++ unsigned int offset;
++} *io_tlb_orig_addr;
++
++/*
++ * Protect the above data structures in the map and unmap calls
++ */
++static DEFINE_SPINLOCK(io_tlb_lock);
++
++static unsigned int dma_bits;
++static unsigned int __initdata max_dma_bits = 32;
++static int __init
++setup_dma_bits(char *str)
++{
++ max_dma_bits = simple_strtoul(str, NULL, 0);
++ return 0;
++}
++__setup("dma_bits=", setup_dma_bits);
++
++static int __init
++setup_io_tlb_npages(char *str)
++{
++ /* Unlike ia64, the size is aperture in megabytes, not 'slabs'! */
++ if (isdigit(*str)) {
++ iotlb_nslabs = simple_strtoul(str, &str, 0) <<
++ (20 - IO_TLB_SHIFT);
++ iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
++ }
++ if (*str == ',')
++ ++str;
++ /*
++ * NB. 'force' enables the swiotlb, but doesn't force its use for
++ * every DMA like it does on native Linux. 'off' forcibly disables
++ * use of the swiotlb.
++ */
++ if (!strcmp(str, "force"))
++ swiotlb_force = 1;
++ else if (!strcmp(str, "off"))
++ swiotlb_force = -1;
++ return 1;
++}
++__setup("swiotlb=", setup_io_tlb_npages);
++/* make io_tlb_overflow tunable too? */
++
++/*
++ * Statically reserve bounce buffer space and initialize bounce buffer data
++ * structures for the software IO TLB used to implement the PCI DMA API.
++ */
++void
++swiotlb_init_with_default_size (size_t default_size)
++{
++ unsigned long i, bytes;
++ int rc;
++
++ if (!iotlb_nslabs) {
++ iotlb_nslabs = (default_size >> IO_TLB_SHIFT);
++ iotlb_nslabs = ALIGN(iotlb_nslabs, IO_TLB_SEGSIZE);
++ }
++
++ bytes = iotlb_nslabs * (1UL << IO_TLB_SHIFT);
++
++ /*
++ * Get IO TLB memory from the low pages
++ */
++ iotlb_virt_start = alloc_bootmem_low_pages(bytes);
++ if (!iotlb_virt_start)
++ panic("Cannot allocate SWIOTLB buffer!\n");
++
++ dma_bits = get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT) + PAGE_SHIFT;
++ for (i = 0; i < iotlb_nslabs; i += IO_TLB_SEGSIZE) {
++ do {
++ rc = xen_create_contiguous_region(
++ (unsigned long)iotlb_virt_start + (i << IO_TLB_SHIFT),
++ get_order(IO_TLB_SEGSIZE << IO_TLB_SHIFT),
++ dma_bits);
++ } while (rc && dma_bits++ < max_dma_bits);
++ if (rc) {
++ if (i == 0)
++ panic("No suitable physical memory available for SWIOTLB buffer!\n"
++ "Use dom0_mem Xen boot parameter to reserve\n"
++ "some DMA memory (e.g., dom0_mem=-128M).\n");
++ iotlb_nslabs = i;
++ i <<= IO_TLB_SHIFT;
++ free_bootmem(__pa(iotlb_virt_start + i), bytes - i);
++ bytes = i;
++ for (dma_bits = 0; i > 0; i -= IO_TLB_SEGSIZE << IO_TLB_SHIFT) {
++ unsigned int bits = fls64(virt_to_bus(iotlb_virt_start + i - 1));
++
++ if (bits > dma_bits)
++ dma_bits = bits;
++ }
++ break;
++ }
++ }
++
++ /*
++ * Allocate and initialize the free list array. This array is used
++ * to find contiguous free memory regions of size up to IO_TLB_SEGSIZE.
++ */
++ io_tlb_list = alloc_bootmem(iotlb_nslabs * sizeof(int));
++ for (i = 0; i < iotlb_nslabs; i++)
++ io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
++ io_tlb_index = 0;
++ io_tlb_orig_addr = alloc_bootmem(
++ iotlb_nslabs * sizeof(*io_tlb_orig_addr));
++
++ /*
++ * Get the overflow emergency buffer
++ */
++ io_tlb_overflow_buffer = alloc_bootmem_low(io_tlb_overflow);
++ if (!io_tlb_overflow_buffer)
++ panic("Cannot allocate SWIOTLB overflow buffer!\n");
++
++ do {
++ rc = xen_create_contiguous_region(
++ (unsigned long)io_tlb_overflow_buffer,
++ get_order(io_tlb_overflow),
++ dma_bits);
++ } while (rc && dma_bits++ < max_dma_bits);
++ if (rc)
++ panic("No suitable physical memory available for SWIOTLB overflow buffer!\n");
++
++ iotlb_pfn_start = __pa(iotlb_virt_start) >> PAGE_SHIFT;
++ iotlb_pfn_end = iotlb_pfn_start + (bytes >> PAGE_SHIFT);
++
++ printk(KERN_INFO "Software IO TLB enabled: \n"
++ " Aperture: %lu megabytes\n"
++ " Kernel range: %p - %p\n"
++ " Address size: %u bits\n",
++ bytes >> 20,
++ iotlb_virt_start, iotlb_virt_start + bytes,
++ dma_bits);
++}
++
++void
++swiotlb_init(void)
++{
++ long ram_end;
++ size_t defsz = 64 * (1 << 20); /* 64MB default size */
++
++ if (swiotlb_force == 1) {
++ swiotlb = 1;
++ } else if ((swiotlb_force != -1) &&
++ is_running_on_xen() &&
++ is_initial_xendomain()) {
++ /* Domain 0 always has a swiotlb. */
++ ram_end = HYPERVISOR_memory_op(XENMEM_maximum_ram_page, NULL);
++ if (ram_end <= 0x7ffff)
++ defsz = 2 * (1 << 20); /* 2MB on <2GB on systems. */
++ swiotlb = 1;
++ }
++
++ if (swiotlb)
++ swiotlb_init_with_default_size(defsz);
++ else
++ printk(KERN_INFO "Software IO TLB disabled\n");
++}
++
++/*
++ * We use __copy_to_user_inatomic to transfer to the host buffer because the
++ * buffer may be mapped read-only (e.g, in blkback driver) but lower-level
++ * drivers map the buffer for DMA_BIDIRECTIONAL access. This causes an
++ * unnecessary copy from the aperture to the host buffer, and a page fault.
++ */
++static void
++__sync_single(struct phys_addr buffer, char *dma_addr, size_t size, int dir)
++{
++ if (PageHighMem(buffer.page)) {
++ size_t len, bytes;
++ char *dev, *host, *kmp;
++ len = size;
++ while (len != 0) {
++ unsigned long flags;
++
++ if (((bytes = len) + buffer.offset) > PAGE_SIZE)
++ bytes = PAGE_SIZE - buffer.offset;
++ local_irq_save(flags); /* protects KM_BOUNCE_READ */
++ kmp = kmap_atomic(buffer.page, KM_BOUNCE_READ);
++ dev = dma_addr + size - len;
++ host = kmp + buffer.offset;
++ if (dir == DMA_FROM_DEVICE) {
++ if (__copy_to_user_inatomic(host, dev, bytes))
++ /* inaccessible */;
++ } else
++ memcpy(dev, host, bytes);
++ kunmap_atomic(kmp, KM_BOUNCE_READ);
++ local_irq_restore(flags);
++ len -= bytes;
++ buffer.page++;
++ buffer.offset = 0;
++ }
++ } else {
++ char *host = (char *)phys_to_virt(
++ page_to_pseudophys(buffer.page)) + buffer.offset;
++ if (dir == DMA_FROM_DEVICE) {
++ if (__copy_to_user_inatomic(host, dma_addr, size))
++ /* inaccessible */;
++ } else if (dir == DMA_TO_DEVICE)
++ memcpy(dma_addr, host, size);
++ }
++}
++
++/*
++ * Allocates bounce buffer and returns its kernel virtual address.
++ */
++static void *
++map_single(struct device *hwdev, struct phys_addr buffer, size_t size, int dir)
++{
++ unsigned long flags;
++ char *dma_addr;
++ unsigned int nslots, stride, index, wrap;
++ struct phys_addr slot_buf;
++ int i;
++
++ /*
++ * For mappings greater than a page, we limit the stride (and
++ * hence alignment) to a page size.
++ */
++ nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++ if (size > PAGE_SIZE)
++ stride = (1 << (PAGE_SHIFT - IO_TLB_SHIFT));
++ else
++ stride = 1;
++
++ BUG_ON(!nslots);
++
++ /*
++ * Find suitable number of IO TLB entries size that will fit this
++ * request and allocate a buffer from that IO TLB pool.
++ */
++ spin_lock_irqsave(&io_tlb_lock, flags);
++ {
++ wrap = index = ALIGN(io_tlb_index, stride);
++
++ if (index >= iotlb_nslabs)
++ wrap = index = 0;
++
++ do {
++ /*
++ * If we find a slot that indicates we have 'nslots'
++ * number of contiguous buffers, we allocate the
++ * buffers from that slot and mark the entries as '0'
++ * indicating unavailable.
++ */
++ if (io_tlb_list[index] >= nslots) {
++ int count = 0;
++
++ for (i = index; i < (int)(index + nslots); i++)
++ io_tlb_list[i] = 0;
++ for (i = index - 1;
++ (OFFSET(i, IO_TLB_SEGSIZE) !=
++ IO_TLB_SEGSIZE -1) && io_tlb_list[i];
++ i--)
++ io_tlb_list[i] = ++count;
++ dma_addr = iotlb_virt_start +
++ (index << IO_TLB_SHIFT);
++
++ /*
++ * Update the indices to avoid searching in
++ * the next round.
++ */
++ io_tlb_index =
++ ((index + nslots) < iotlb_nslabs
++ ? (index + nslots) : 0);
++
++ goto found;
++ }
++ index += stride;
++ if (index >= iotlb_nslabs)
++ index = 0;
++ } while (index != wrap);
++
++ spin_unlock_irqrestore(&io_tlb_lock, flags);
++ return NULL;
++ }
++ found:
++ spin_unlock_irqrestore(&io_tlb_lock, flags);
++
++ /*
++ * Save away the mapping from the original address to the DMA address.
++ * This is needed when we sync the memory. Then we sync the buffer if
++ * needed.
++ */
++ slot_buf = buffer;
++ for (i = 0; i < nslots; i++) {
++ slot_buf.page += slot_buf.offset >> PAGE_SHIFT;
++ slot_buf.offset &= PAGE_SIZE - 1;
++ io_tlb_orig_addr[index+i] = slot_buf;
++ slot_buf.offset += 1 << IO_TLB_SHIFT;
++ }
++ if ((dir == DMA_TO_DEVICE) || (dir == DMA_BIDIRECTIONAL))
++ __sync_single(buffer, dma_addr, size, DMA_TO_DEVICE);
++
++ return dma_addr;
++}
++
++static struct phys_addr dma_addr_to_phys_addr(char *dma_addr)
++{
++ int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
++ struct phys_addr buffer = io_tlb_orig_addr[index];
++ buffer.offset += (long)dma_addr & ((1 << IO_TLB_SHIFT) - 1);
++ buffer.page += buffer.offset >> PAGE_SHIFT;
++ buffer.offset &= PAGE_SIZE - 1;
++ return buffer;
++}
++
++/*
++ * dma_addr is the kernel virtual address of the bounce buffer to unmap.
++ */
++static void
++unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
++{
++ unsigned long flags;
++ int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
++ int index = (dma_addr - iotlb_virt_start) >> IO_TLB_SHIFT;
++ struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
++
++ /*
++ * First, sync the memory before unmapping the entry
++ */
++ if ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))
++ __sync_single(buffer, dma_addr, size, DMA_FROM_DEVICE);
++
++ /*
++ * Return the buffer to the free list by setting the corresponding
++ * entries to indicate the number of contigous entries available.
++ * While returning the entries to the free list, we merge the entries
++ * with slots below and above the pool being returned.
++ */
++ spin_lock_irqsave(&io_tlb_lock, flags);
++ {
++ count = ((index + nslots) < ALIGN(index + 1, IO_TLB_SEGSIZE) ?
++ io_tlb_list[index + nslots] : 0);
++ /*
++ * Step 1: return the slots to the free list, merging the
++ * slots with superceeding slots
++ */
++ for (i = index + nslots - 1; i >= index; i--)
++ io_tlb_list[i] = ++count;
++ /*
++ * Step 2: merge the returned slots with the preceding slots,
++ * if available (non zero)
++ */
++ for (i = index - 1;
++ (OFFSET(i, IO_TLB_SEGSIZE) !=
++ IO_TLB_SEGSIZE -1) && io_tlb_list[i];
++ i--)
++ io_tlb_list[i] = ++count;
++ }
++ spin_unlock_irqrestore(&io_tlb_lock, flags);
++}
++
++static void
++sync_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
++{
++ struct phys_addr buffer = dma_addr_to_phys_addr(dma_addr);
++ BUG_ON((dir != DMA_FROM_DEVICE) && (dir != DMA_TO_DEVICE));
++ __sync_single(buffer, dma_addr, size, dir);
++}
++
++static void
++swiotlb_full(struct device *dev, size_t size, int dir, int do_panic)
++{
++ /*
++ * Ran out of IOMMU space for this operation. This is very bad.
++ * Unfortunately the drivers cannot handle this operation properly.
++ * unless they check for pci_dma_mapping_error (most don't)
++ * When the mapping is small enough return a static buffer to limit
++ * the damage, or panic when the transfer is too big.
++ */
++ printk(KERN_ERR "PCI-DMA: Out of SW-IOMMU space for %lu bytes at "
++ "device %s\n", (unsigned long)size, dev ? dev->bus_id : "?");
++
++ if (size > io_tlb_overflow && do_panic) {
++ if (dir == PCI_DMA_FROMDEVICE || dir == PCI_DMA_BIDIRECTIONAL)
++ panic("PCI-DMA: Memory would be corrupted\n");
++ if (dir == PCI_DMA_TODEVICE || dir == PCI_DMA_BIDIRECTIONAL)
++ panic("PCI-DMA: Random memory would be DMAed\n");
++ }
++}
++
++/*
++ * Map a single buffer of the indicated size for DMA in streaming mode. The
++ * PCI address to use is returned.
++ *
++ * Once the device is given the dma address, the device owns this memory until
++ * either swiotlb_unmap_single or swiotlb_dma_sync_single is performed.
++ */
++dma_addr_t
++swiotlb_map_single(struct device *hwdev, void *ptr, size_t size, int dir)
++{
++ dma_addr_t dev_addr = gnttab_dma_map_page(virt_to_page(ptr)) +
++ offset_in_page(ptr);
++ void *map;
++ struct phys_addr buffer;
++
++ BUG_ON(dir == DMA_NONE);
++
++ /*
++ * If the pointer passed in happens to be in the device's DMA window,
++ * we can safely return the device addr and not worry about bounce
++ * buffering it.
++ */
++ if (!range_straddles_page_boundary(__pa(ptr), size) &&
++ !address_needs_mapping(hwdev, dev_addr))
++ return dev_addr;
++
++ /*
++ * Oh well, have to allocate and map a bounce buffer.
++ */
++ gnttab_dma_unmap_page(dev_addr);
++ buffer.page = virt_to_page(ptr);
++ buffer.offset = (unsigned long)ptr & ~PAGE_MASK;
++ map = map_single(hwdev, buffer, size, dir);
++ if (!map) {
++ swiotlb_full(hwdev, size, dir, 1);
++ map = io_tlb_overflow_buffer;
++ }
++
++ dev_addr = virt_to_bus(map);
++ return dev_addr;
++}
++
++/*
++ * Unmap a single streaming mode DMA translation. The dma_addr and size must
++ * match what was provided for in a previous swiotlb_map_single call. All
++ * other usages are undefined.
++ *
++ * After this call, reads by the cpu to the buffer are guaranteed to see
++ * whatever the device wrote there.
++ */
++void
++swiotlb_unmap_single(struct device *hwdev, dma_addr_t dev_addr, size_t size,
++ int dir)
++{
++ BUG_ON(dir == DMA_NONE);
++ if (in_swiotlb_aperture(dev_addr))
++ unmap_single(hwdev, bus_to_virt(dev_addr), size, dir);
++ else
++ gnttab_dma_unmap_page(dev_addr);
++}
++
++/*
++ * Make physical memory consistent for a single streaming mode DMA translation
++ * after a transfer.
++ *
++ * If you perform a swiotlb_map_single() but wish to interrogate the buffer
++ * using the cpu, yet do not wish to teardown the PCI dma mapping, you must
++ * call this function before doing so. At the next point you give the PCI dma
++ * address back to the card, you must first perform a
++ * swiotlb_dma_sync_for_device, and then the device again owns the buffer
++ */
++void
++swiotlb_sync_single_for_cpu(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir)
++{
++ BUG_ON(dir == DMA_NONE);
++ if (in_swiotlb_aperture(dev_addr))
++ sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
++}
++
++void
++swiotlb_sync_single_for_device(struct device *hwdev, dma_addr_t dev_addr,
++ size_t size, int dir)
++{
++ BUG_ON(dir == DMA_NONE);
++ if (in_swiotlb_aperture(dev_addr))
++ sync_single(hwdev, bus_to_virt(dev_addr), size, dir);
++}
++
++/*
++ * Map a set of buffers described by scatterlist in streaming mode for DMA.
++ * This is the scatter-gather version of the above swiotlb_map_single
++ * interface. Here the scatter gather list elements are each tagged with the
++ * appropriate dma address and length. They are obtained via
++ * sg_dma_{address,length}(SG).
++ *
++ * NOTE: An implementation may be able to use a smaller number of
++ * DMA address/length pairs than there are SG table elements.
++ * (for example via virtual mapping capabilities)
++ * The routine returns the number of addr/length pairs actually
++ * used, at most nents.
++ *
++ * Device ownership issues as mentioned above for swiotlb_map_single are the
++ * same here.
++ */
++int
++swiotlb_map_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++ int dir)
++{
++ struct phys_addr buffer;
++ dma_addr_t dev_addr;
++ char *map;
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++) {
++ dev_addr = gnttab_dma_map_page(sg->page) + sg->offset;
++
++ if (range_straddles_page_boundary(page_to_pseudophys(sg->page)
++ + sg->offset, sg->length)
++ || address_needs_mapping(hwdev, dev_addr)) {
++ gnttab_dma_unmap_page(dev_addr);
++ buffer.page = sg->page;
++ buffer.offset = sg->offset;
++ map = map_single(hwdev, buffer, sg->length, dir);
++ if (!map) {
++ /* Don't panic here, we expect map_sg users
++ to do proper error handling. */
++ swiotlb_full(hwdev, sg->length, dir, 0);
++ swiotlb_unmap_sg(hwdev, sg - i, i, dir);
++ sg[0].dma_length = 0;
++ return 0;
++ }
++ sg->dma_address = (dma_addr_t)virt_to_bus(map);
++ } else
++ sg->dma_address = dev_addr;
++ sg->dma_length = sg->length;
++ }
++ return nelems;
++}
++
++/*
++ * Unmap a set of streaming mode DMA translations. Again, cpu read rules
++ * concerning calls here are the same as for swiotlb_unmap_single() above.
++ */
++void
++swiotlb_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nelems,
++ int dir)
++{
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++)
++ if (in_swiotlb_aperture(sg->dma_address))
++ unmap_single(hwdev,
++ (void *)bus_to_virt(sg->dma_address),
++ sg->dma_length, dir);
++ else
++ gnttab_dma_unmap_page(sg->dma_address);
++}
++
++/*
++ * Make physical memory consistent for a set of streaming mode DMA translations
++ * after a transfer.
++ *
++ * The same as swiotlb_sync_single_* but for a scatter-gather list, same rules
++ * and usage.
++ */
++void
++swiotlb_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int dir)
++{
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++)
++ if (in_swiotlb_aperture(sg->dma_address))
++ sync_single(hwdev,
++ (void *)bus_to_virt(sg->dma_address),
++ sg->dma_length, dir);
++}
++
++void
++swiotlb_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
++ int nelems, int dir)
++{
++ int i;
++
++ BUG_ON(dir == DMA_NONE);
++
++ for (i = 0; i < nelems; i++, sg++)
++ if (in_swiotlb_aperture(sg->dma_address))
++ sync_single(hwdev,
++ (void *)bus_to_virt(sg->dma_address),
++ sg->dma_length, dir);
++}
++
++#ifdef CONFIG_HIGHMEM
++
++dma_addr_t
++swiotlb_map_page(struct device *hwdev, struct page *page,
++ unsigned long offset, size_t size,
++ enum dma_data_direction direction)
++{
++ struct phys_addr buffer;
++ dma_addr_t dev_addr;
++ char *map;
++
++ dev_addr = gnttab_dma_map_page(page) + offset;
++ if (address_needs_mapping(hwdev, dev_addr)) {
++ gnttab_dma_unmap_page(dev_addr);
++ buffer.page = page;
++ buffer.offset = offset;
++ map = map_single(hwdev, buffer, size, direction);
++ if (!map) {
++ swiotlb_full(hwdev, size, direction, 1);
++ map = io_tlb_overflow_buffer;
++ }
++ dev_addr = (dma_addr_t)virt_to_bus(map);
++ }
++
++ return dev_addr;
++}
++
++void
++swiotlb_unmap_page(struct device *hwdev, dma_addr_t dma_address,
++ size_t size, enum dma_data_direction direction)
++{
++ BUG_ON(direction == DMA_NONE);
++ if (in_swiotlb_aperture(dma_address))
++ unmap_single(hwdev, bus_to_virt(dma_address), size, direction);
++ else
++ gnttab_dma_unmap_page(dma_address);
++}
++
++#endif
++
++int
++swiotlb_dma_mapping_error(dma_addr_t dma_addr)
++{
++ return (dma_addr == virt_to_bus(io_tlb_overflow_buffer));
++}
++
++/*
++ * Return whether the given PCI device DMA address mask can be supported
++ * properly. For example, if your device can only drive the low 24-bits
++ * during PCI bus mastering, then you would pass 0x00ffffff as the mask to
++ * this function.
++ */
++int
++swiotlb_dma_supported (struct device *hwdev, u64 mask)
++{
++ return (mask >= ((1UL << dma_bits) - 1));
++}
++
++EXPORT_SYMBOL(swiotlb_init);
++EXPORT_SYMBOL(swiotlb_map_single);
++EXPORT_SYMBOL(swiotlb_unmap_single);
++EXPORT_SYMBOL(swiotlb_map_sg);
++EXPORT_SYMBOL(swiotlb_unmap_sg);
++EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_single_for_device);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_cpu);
++EXPORT_SYMBOL(swiotlb_sync_sg_for_device);
++EXPORT_SYMBOL(swiotlb_dma_mapping_error);
++EXPORT_SYMBOL(swiotlb_dma_supported);
+Index: head-2008-11-25/scripts/Makefile.xen.awk
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/scripts/Makefile.xen.awk 2007-08-06 15:10:49.000000000 +0200
+@@ -0,0 +1,34 @@
++BEGIN {
++ is_rule = 0
++}
++
++/^[[:space:]]*#/ {
++ next
++}
++
++/^[[:space:]]*$/ {
++ if (is_rule)
++ print("")
++ is_rule = 0
++ next
++}
++
++/:[[:space:]]*%\.[cS][[:space:]]/ {
++ line = gensub(/%.([cS])/, "%-xen.\\1", "g", $0)
++ line = gensub(/(single-used-m)/, "xen-\\1", "g", line)
++ print line
++ is_rule = 1
++ next
++}
++
++/^[^\t]$/ {
++ if (is_rule)
++ print("")
++ is_rule = 0
++ next
++}
++
++is_rule {
++ print $0
++ next
++}
--- /dev/null
+Subject: xen3 xen-drivers
+From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 728:832aac894efd)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+Index: head-2008-11-25/drivers/xen/balloon/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/balloon/Makefile 2007-06-12 13:13:44.000000000 +0200
+@@ -0,0 +1,2 @@
++
++obj-y := balloon.o sysfs.o
+Index: head-2008-11-25/drivers/xen/balloon/balloon.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/balloon/balloon.c 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,724 @@
++/******************************************************************************
++ * balloon.c
++ *
++ * Xen balloon driver - enables returning/claiming memory to/from Xen.
++ *
++ * Copyright (c) 2003, B Dragovic
++ * Copyright (c) 2003-2004, M Williamson, K Fraser
++ * Copyright (c) 2005 Dan M. Smith, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/smp_lock.h>
++#include <linux/pagemap.h>
++#include <linux/bootmem.h>
++#include <linux/highmem.h>
++#include <linux/vmalloc.h>
++#include <linux/mutex.h>
++#include <xen/xen_proc.h>
++#include <asm/hypervisor.h>
++#include <xen/balloon.h>
++#include <xen/interface/memory.h>
++#include <asm/maddr.h>
++#include <asm/page.h>
++#include <asm/pgalloc.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++#include <asm/tlb.h>
++#include <linux/highmem.h>
++#include <linux/list.h>
++#include <xen/xenbus.h>
++#include "common.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#ifdef CONFIG_PROC_FS
++static struct proc_dir_entry *balloon_pde;
++#endif
++
++static DEFINE_MUTEX(balloon_mutex);
++
++/*
++ * Protects atomic reservation decrease/increase against concurrent increases.
++ * Also protects non-atomic updates of current_pages and driver_pages, and
++ * balloon lists.
++ */
++DEFINE_SPINLOCK(balloon_lock);
++
++struct balloon_stats balloon_stats;
++
++/* We increase/decrease in batches which fit in a page */
++static unsigned long frame_list[PAGE_SIZE / sizeof(unsigned long)];
++
++/* VM /proc information for memory */
++extern unsigned long totalram_pages;
++
++#ifndef MODULE
++extern unsigned long totalhigh_pages;
++#define inc_totalhigh_pages() (totalhigh_pages++)
++#define dec_totalhigh_pages() (totalhigh_pages--)
++#else
++#define inc_totalhigh_pages() ((void)0)
++#define dec_totalhigh_pages() ((void)0)
++#endif
++
++/* List of ballooned pages, threaded through the mem_map array. */
++static LIST_HEAD(ballooned_pages);
++
++/* Main work function, always executed in process context. */
++static void balloon_process(void *unused);
++static DECLARE_WORK(balloon_worker, balloon_process, NULL);
++static struct timer_list balloon_timer;
++
++/* When ballooning out (allocating memory to return to Xen) we don't really
++ want the kernel to try too hard since that can trigger the oom killer. */
++#define GFP_BALLOON \
++ (GFP_HIGHUSER|__GFP_NOWARN|__GFP_NORETRY|__GFP_NOMEMALLOC|__GFP_COLD)
++
++#define PAGE_TO_LIST(p) (&(p)->lru)
++#define LIST_TO_PAGE(l) list_entry((l), struct page, lru)
++#define UNLIST_PAGE(p) \
++ do { \
++ list_del(PAGE_TO_LIST(p)); \
++ PAGE_TO_LIST(p)->next = NULL; \
++ PAGE_TO_LIST(p)->prev = NULL; \
++ } while(0)
++
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "xen_mem: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "xen_mem: " fmt, ##args)
++
++/* balloon_append: add the given page to the balloon. */
++static void balloon_append(struct page *page)
++{
++ /* Lowmem is re-populated first, so highmem pages go at list tail. */
++ if (PageHighMem(page)) {
++ list_add_tail(PAGE_TO_LIST(page), &ballooned_pages);
++ bs.balloon_high++;
++ dec_totalhigh_pages();
++ } else {
++ list_add(PAGE_TO_LIST(page), &ballooned_pages);
++ bs.balloon_low++;
++ }
++}
++
++/* balloon_retrieve: rescue a page from the balloon, if it is not empty. */
++static struct page *balloon_retrieve(void)
++{
++ struct page *page;
++
++ if (list_empty(&ballooned_pages))
++ return NULL;
++
++ page = LIST_TO_PAGE(ballooned_pages.next);
++ UNLIST_PAGE(page);
++
++ if (PageHighMem(page)) {
++ bs.balloon_high--;
++ inc_totalhigh_pages();
++ }
++ else
++ bs.balloon_low--;
++
++ return page;
++}
++
++static struct page *balloon_first_page(void)
++{
++ if (list_empty(&ballooned_pages))
++ return NULL;
++ return LIST_TO_PAGE(ballooned_pages.next);
++}
++
++static struct page *balloon_next_page(struct page *page)
++{
++ struct list_head *next = PAGE_TO_LIST(page)->next;
++ if (next == &ballooned_pages)
++ return NULL;
++ return LIST_TO_PAGE(next);
++}
++
++static inline void balloon_free_page(struct page *page)
++{
++#ifndef MODULE
++ if (put_page_testzero(page))
++ free_cold_page(page);
++#else
++ /* free_cold_page() is not being exported. */
++ __free_page(page);
++#endif
++}
++
++static void balloon_alarm(unsigned long unused)
++{
++ schedule_work(&balloon_worker);
++}
++
++static unsigned long current_target(void)
++{
++ unsigned long target = min(bs.target_pages, bs.hard_limit);
++ if (target > (bs.current_pages + bs.balloon_low + bs.balloon_high))
++ target = bs.current_pages + bs.balloon_low + bs.balloon_high;
++ return target;
++}
++
++static unsigned long minimum_target(void)
++{
++#ifndef CONFIG_XEN
++#define max_pfn num_physpages
++#endif
++ unsigned long min_pages, curr_pages = current_target();
++
++#define MB2PAGES(mb) ((mb) << (20 - PAGE_SHIFT))
++ /* Simple continuous piecewiese linear function:
++ * max MiB -> min MiB gradient
++ * 0 0
++ * 16 16
++ * 32 24
++ * 128 72 (1/2)
++ * 512 168 (1/4)
++ * 2048 360 (1/8)
++ * 8192 552 (1/32)
++ * 32768 1320
++ * 131072 4392
++ */
++ if (max_pfn < MB2PAGES(128))
++ min_pages = MB2PAGES(8) + (max_pfn >> 1);
++ else if (max_pfn < MB2PAGES(512))
++ min_pages = MB2PAGES(40) + (max_pfn >> 2);
++ else if (max_pfn < MB2PAGES(2048))
++ min_pages = MB2PAGES(104) + (max_pfn >> 3);
++ else
++ min_pages = MB2PAGES(296) + (max_pfn >> 5);
++#undef MB2PAGES
++
++ /* Don't enforce growth */
++ return min(min_pages, curr_pages);
++#ifndef CONFIG_XEN
++#undef max_pfn
++#endif
++}
++
++static int increase_reservation(unsigned long nr_pages)
++{
++ unsigned long pfn, i, flags;
++ struct page *page;
++ long rc;
++ struct xen_memory_reservation reservation = {
++ .address_bits = 0,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++
++ if (nr_pages > ARRAY_SIZE(frame_list))
++ nr_pages = ARRAY_SIZE(frame_list);
++
++ balloon_lock(flags);
++
++ page = balloon_first_page();
++ for (i = 0; i < nr_pages; i++) {
++ BUG_ON(page == NULL);
++ frame_list[i] = page_to_pfn(page);;
++ page = balloon_next_page(page);
++ }
++
++ set_xen_guest_handle(reservation.extent_start, frame_list);
++ reservation.nr_extents = nr_pages;
++ rc = HYPERVISOR_memory_op(
++ XENMEM_populate_physmap, &reservation);
++ if (rc < nr_pages) {
++ if (rc > 0) {
++ int ret;
++
++ /* We hit the Xen hard limit: reprobe. */
++ reservation.nr_extents = rc;
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &reservation);
++ BUG_ON(ret != rc);
++ }
++ if (rc >= 0)
++ bs.hard_limit = (bs.current_pages + rc -
++ bs.driver_pages);
++ goto out;
++ }
++
++ for (i = 0; i < nr_pages; i++) {
++ page = balloon_retrieve();
++ BUG_ON(page == NULL);
++
++ pfn = page_to_pfn(page);
++ BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
++ phys_to_machine_mapping_valid(pfn));
++
++ set_phys_to_machine(pfn, frame_list[i]);
++
++#ifdef CONFIG_XEN
++ /* Link back into the page tables if not highmem. */
++ if (pfn < max_low_pfn) {
++ int ret;
++ ret = HYPERVISOR_update_va_mapping(
++ (unsigned long)__va(pfn << PAGE_SHIFT),
++ pfn_pte_ma(frame_list[i], PAGE_KERNEL),
++ 0);
++ BUG_ON(ret);
++ }
++#endif
++
++ /* Relinquish the page back to the allocator. */
++ ClearPageReserved(page);
++ init_page_count(page);
++ balloon_free_page(page);
++ }
++
++ bs.current_pages += nr_pages;
++ totalram_pages = bs.current_pages;
++
++ out:
++ balloon_unlock(flags);
++
++ return 0;
++}
++
++static int decrease_reservation(unsigned long nr_pages)
++{
++ unsigned long pfn, i, flags;
++ struct page *page;
++ void *v;
++ int need_sleep = 0;
++ int ret;
++ struct xen_memory_reservation reservation = {
++ .address_bits = 0,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++
++ if (nr_pages > ARRAY_SIZE(frame_list))
++ nr_pages = ARRAY_SIZE(frame_list);
++
++ for (i = 0; i < nr_pages; i++) {
++ if ((page = alloc_page(GFP_BALLOON)) == NULL) {
++ nr_pages = i;
++ need_sleep = 1;
++ break;
++ }
++
++ pfn = page_to_pfn(page);
++ frame_list[i] = pfn_to_mfn(pfn);
++
++ if (!PageHighMem(page)) {
++ v = phys_to_virt(pfn << PAGE_SHIFT);
++ scrub_pages(v, 1);
++#ifdef CONFIG_XEN
++ ret = HYPERVISOR_update_va_mapping(
++ (unsigned long)v, __pte_ma(0), 0);
++ BUG_ON(ret);
++#endif
++ }
++#ifdef CONFIG_XEN_SCRUB_PAGES
++ else {
++ v = kmap(page);
++ scrub_pages(v, 1);
++ kunmap(page);
++ }
++#endif
++ }
++
++#ifdef CONFIG_XEN
++ /* Ensure that ballooned highmem pages don't have kmaps. */
++ kmap_flush_unused();
++ flush_tlb_all();
++#endif
++
++ balloon_lock(flags);
++
++ /* No more mappings: invalidate P2M and add to balloon. */
++ for (i = 0; i < nr_pages; i++) {
++ pfn = mfn_to_pfn(frame_list[i]);
++ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
++ balloon_append(pfn_to_page(pfn));
++ }
++
++ set_xen_guest_handle(reservation.extent_start, frame_list);
++ reservation.nr_extents = nr_pages;
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
++ BUG_ON(ret != nr_pages);
++
++ bs.current_pages -= nr_pages;
++ totalram_pages = bs.current_pages;
++
++ balloon_unlock(flags);
++
++ return need_sleep;
++}
++
++/*
++ * We avoid multiple worker processes conflicting via the balloon mutex.
++ * We may of course race updates of the target counts (which are protected
++ * by the balloon lock), or with changes to the Xen hard limit, but we will
++ * recover from these in time.
++ */
++static void balloon_process(void *unused)
++{
++ int need_sleep = 0;
++ long credit;
++
++ mutex_lock(&balloon_mutex);
++
++ do {
++ credit = current_target() - bs.current_pages;
++ if (credit > 0)
++ need_sleep = (increase_reservation(credit) != 0);
++ if (credit < 0)
++ need_sleep = (decrease_reservation(-credit) != 0);
++
++#ifndef CONFIG_PREEMPT
++ if (need_resched())
++ schedule();
++#endif
++ } while ((credit != 0) && !need_sleep);
++
++ /* Schedule more work if there is some still to be done. */
++ if (current_target() != bs.current_pages)
++ mod_timer(&balloon_timer, jiffies + HZ);
++
++ mutex_unlock(&balloon_mutex);
++}
++
++/* Resets the Xen limit, sets new target, and kicks off processing. */
++void balloon_set_new_target(unsigned long target)
++{
++ /* No need for lock. Not read-modify-write updates. */
++ bs.hard_limit = ~0UL;
++ bs.target_pages = max(target, minimum_target());
++ schedule_work(&balloon_worker);
++}
++
++static struct xenbus_watch target_watch =
++{
++ .node = "memory/target"
++};
++
++/* React to a change in the target key */
++static void watch_target(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ unsigned long long new_target;
++ int err;
++
++ err = xenbus_scanf(XBT_NIL, "memory", "target", "%llu", &new_target);
++ if (err != 1) {
++ /* This is ok (for domain0 at least) - so just return */
++ return;
++ }
++
++ /* The given memory/target value is in KiB, so it needs converting to
++ * pages. PAGE_SHIFT converts bytes to pages, hence PAGE_SHIFT - 10.
++ */
++ balloon_set_new_target(new_target >> (PAGE_SHIFT - 10));
++}
++
++static int balloon_init_watcher(struct notifier_block *notifier,
++ unsigned long event,
++ void *data)
++{
++ int err;
++
++ err = register_xenbus_watch(&target_watch);
++ if (err)
++ printk(KERN_ERR "Failed to set balloon watcher\n");
++
++ return NOTIFY_DONE;
++}
++
++#ifdef CONFIG_PROC_FS
++static int balloon_write(struct file *file, const char __user *buffer,
++ unsigned long count, void *data)
++{
++ char memstring[64], *endchar;
++ unsigned long long target_bytes;
++
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ if (count <= 1)
++ return -EBADMSG; /* runt */
++ if (count > sizeof(memstring))
++ return -EFBIG; /* too long */
++
++ if (copy_from_user(memstring, buffer, count))
++ return -EFAULT;
++ memstring[sizeof(memstring)-1] = '\0';
++
++ target_bytes = memparse(memstring, &endchar);
++ balloon_set_new_target(target_bytes >> PAGE_SHIFT);
++
++ return count;
++}
++
++static int balloon_read(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int len;
++
++ len = sprintf(
++ page,
++ "Current allocation: %8lu kB\n"
++ "Requested target: %8lu kB\n"
++ "Low-mem balloon: %8lu kB\n"
++ "High-mem balloon: %8lu kB\n"
++ "Driver pages: %8lu kB\n"
++ "Xen hard limit: ",
++ PAGES2KB(bs.current_pages), PAGES2KB(bs.target_pages),
++ PAGES2KB(bs.balloon_low), PAGES2KB(bs.balloon_high),
++ PAGES2KB(bs.driver_pages));
++
++ if (bs.hard_limit != ~0UL)
++ len += sprintf(page + len, "%8lu kB\n",
++ PAGES2KB(bs.hard_limit));
++ else
++ len += sprintf(page + len, " ??? kB\n");
++
++ *eof = 1;
++ return len;
++}
++#endif
++
++static struct notifier_block xenstore_notifier;
++
++static int __init balloon_init(void)
++{
++#if defined(CONFIG_X86) && defined(CONFIG_XEN)
++ unsigned long pfn;
++ struct page *page;
++#endif
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ IPRINTK("Initialising balloon driver.\n");
++
++#ifdef CONFIG_XEN
++ bs.current_pages = min(xen_start_info->nr_pages, max_pfn);
++ totalram_pages = bs.current_pages;
++#else
++ bs.current_pages = totalram_pages;
++#endif
++ bs.target_pages = bs.current_pages;
++ bs.balloon_low = 0;
++ bs.balloon_high = 0;
++ bs.driver_pages = 0UL;
++ bs.hard_limit = ~0UL;
++
++ init_timer(&balloon_timer);
++ balloon_timer.data = 0;
++ balloon_timer.function = balloon_alarm;
++
++#ifdef CONFIG_PROC_FS
++ if ((balloon_pde = create_xen_proc_entry("balloon", 0644)) == NULL) {
++ WPRINTK("Unable to create /proc/xen/balloon.\n");
++ return -1;
++ }
++
++ balloon_pde->read_proc = balloon_read;
++ balloon_pde->write_proc = balloon_write;
++#endif
++ balloon_sysfs_init();
++
++#if defined(CONFIG_X86) && defined(CONFIG_XEN)
++ /* Initialise the balloon with excess memory space. */
++ for (pfn = xen_start_info->nr_pages; pfn < max_pfn; pfn++) {
++ page = pfn_to_page(pfn);
++ if (!PageReserved(page))
++ balloon_append(page);
++ }
++#endif
++
++ target_watch.callback = watch_target;
++ xenstore_notifier.notifier_call = balloon_init_watcher;
++
++ register_xenstore_notifier(&xenstore_notifier);
++
++ return 0;
++}
++
++subsys_initcall(balloon_init);
++
++static void __exit balloon_exit(void)
++{
++ /* XXX - release balloon here */
++ return;
++}
++
++module_exit(balloon_exit);
++
++void balloon_update_driver_allowance(long delta)
++{
++ unsigned long flags;
++
++ balloon_lock(flags);
++ bs.driver_pages += delta;
++ balloon_unlock(flags);
++}
++
++#ifdef CONFIG_XEN
++static int dealloc_pte_fn(
++ pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++{
++ unsigned long mfn = pte_mfn(*pte);
++ int ret;
++ struct xen_memory_reservation reservation = {
++ .nr_extents = 1,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++ set_xen_guest_handle(reservation.extent_start, &mfn);
++ set_pte_at(&init_mm, addr, pte, __pte_ma(0));
++ set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY);
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
++ BUG_ON(ret != 1);
++ return 0;
++}
++#endif
++
++struct page **alloc_empty_pages_and_pagevec(int nr_pages)
++{
++ unsigned long flags;
++ void *v;
++ struct page *page, **pagevec;
++ int i, ret;
++
++ pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL);
++ if (pagevec == NULL)
++ return NULL;
++
++ for (i = 0; i < nr_pages; i++) {
++ page = pagevec[i] = alloc_page(GFP_KERNEL|__GFP_COLD);
++ if (page == NULL)
++ goto err;
++
++ v = page_address(page);
++ scrub_pages(v, 1);
++
++ balloon_lock(flags);
++
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ unsigned long gmfn = page_to_pfn(page);
++ struct xen_memory_reservation reservation = {
++ .nr_extents = 1,
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++ set_xen_guest_handle(reservation.extent_start, &gmfn);
++ ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &reservation);
++ if (ret == 1)
++ ret = 0; /* success */
++ } else {
++#ifdef CONFIG_XEN
++ ret = apply_to_page_range(&init_mm, (unsigned long)v,
++ PAGE_SIZE, dealloc_pte_fn,
++ NULL);
++#else
++ /* Cannot handle non-auto translate mode. */
++ ret = 1;
++#endif
++ }
++
++ if (ret != 0) {
++ balloon_unlock(flags);
++ balloon_free_page(page);
++ goto err;
++ }
++
++ totalram_pages = --bs.current_pages;
++
++ balloon_unlock(flags);
++ }
++
++ out:
++ schedule_work(&balloon_worker);
++#ifdef CONFIG_XEN
++ flush_tlb_all();
++#endif
++ return pagevec;
++
++ err:
++ balloon_lock(flags);
++ while (--i >= 0)
++ balloon_append(pagevec[i]);
++ balloon_unlock(flags);
++ kfree(pagevec);
++ pagevec = NULL;
++ goto out;
++}
++
++void free_empty_pages_and_pagevec(struct page **pagevec, int nr_pages)
++{
++ unsigned long flags;
++ int i;
++
++ if (pagevec == NULL)
++ return;
++
++ balloon_lock(flags);
++ for (i = 0; i < nr_pages; i++) {
++ BUG_ON(page_count(pagevec[i]) != 1);
++ balloon_append(pagevec[i]);
++ }
++ balloon_unlock(flags);
++
++ kfree(pagevec);
++
++ schedule_work(&balloon_worker);
++}
++
++void balloon_release_driver_page(struct page *page)
++{
++ unsigned long flags;
++
++ balloon_lock(flags);
++ balloon_append(page);
++ bs.driver_pages--;
++ balloon_unlock(flags);
++
++ schedule_work(&balloon_worker);
++}
++
++EXPORT_SYMBOL_GPL(balloon_update_driver_allowance);
++EXPORT_SYMBOL_GPL(alloc_empty_pages_and_pagevec);
++EXPORT_SYMBOL_GPL(free_empty_pages_and_pagevec);
++EXPORT_SYMBOL_GPL(balloon_release_driver_page);
++
++MODULE_LICENSE("Dual BSD/GPL");
+Index: head-2008-11-25/drivers/xen/balloon/common.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/balloon/common.h 2007-06-12 13:13:44.000000000 +0200
+@@ -0,0 +1,58 @@
++/******************************************************************************
++ * balloon/common.h
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_BALLOON_COMMON_H__
++#define __XEN_BALLOON_COMMON_H__
++
++#define PAGES2KB(_p) ((_p)<<(PAGE_SHIFT-10))
++
++struct balloon_stats {
++ /* We aim for 'current allocation' == 'target allocation'. */
++ unsigned long current_pages;
++ unsigned long target_pages;
++ /* We may hit the hard limit in Xen. If we do then we remember it. */
++ unsigned long hard_limit;
++ /*
++ * Drivers may alter the memory reservation independently, but they
++ * must inform the balloon driver so we avoid hitting the hard limit.
++ */
++ unsigned long driver_pages;
++ /* Number of pages in high- and low-memory balloons. */
++ unsigned long balloon_low;
++ unsigned long balloon_high;
++};
++
++extern struct balloon_stats balloon_stats;
++#define bs balloon_stats
++
++int balloon_sysfs_init(void);
++void balloon_sysfs_exit(void);
++
++void balloon_set_new_target(unsigned long target);
++
++#endif /* __XEN_BALLOON_COMMON_H__ */
+Index: head-2008-11-25/drivers/xen/balloon/sysfs.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/balloon/sysfs.c 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,170 @@
++/******************************************************************************
++ * balloon/sysfs.c
++ *
++ * Xen balloon driver - sysfs interfaces.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/capability.h>
++#include <linux/errno.h>
++#include <linux/stat.h>
++#include <linux/string.h>
++#include <linux/sysdev.h>
++#include "common.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#define BALLOON_CLASS_NAME "xen_memory"
++
++#define BALLOON_SHOW(name, format, args...) \
++ static ssize_t show_##name(struct sys_device *dev, \
++ char *buf) \
++ { \
++ return sprintf(buf, format, ##args); \
++ } \
++ static SYSDEV_ATTR(name, S_IRUGO, show_##name, NULL)
++
++BALLOON_SHOW(current_kb, "%lu\n", PAGES2KB(bs.current_pages));
++BALLOON_SHOW(low_kb, "%lu\n", PAGES2KB(bs.balloon_low));
++BALLOON_SHOW(high_kb, "%lu\n", PAGES2KB(bs.balloon_high));
++BALLOON_SHOW(hard_limit_kb,
++ (bs.hard_limit!=~0UL) ? "%lu\n" : "???\n",
++ (bs.hard_limit!=~0UL) ? PAGES2KB(bs.hard_limit) : 0);
++BALLOON_SHOW(driver_kb, "%lu\n", PAGES2KB(bs.driver_pages));
++
++static ssize_t show_target_kb(struct sys_device *dev, char *buf)
++{
++ return sprintf(buf, "%lu\n", PAGES2KB(bs.target_pages));
++}
++
++static ssize_t store_target_kb(struct sys_device *dev,
++ const char *buf,
++ size_t count)
++{
++ char memstring[64], *endchar;
++ unsigned long long target_bytes;
++
++ if (!capable(CAP_SYS_ADMIN))
++ return -EPERM;
++
++ if (count <= 1)
++ return -EBADMSG; /* runt */
++ if (count > sizeof(memstring))
++ return -EFBIG; /* too long */
++ strcpy(memstring, buf);
++
++ target_bytes = memparse(memstring, &endchar);
++ balloon_set_new_target(target_bytes >> PAGE_SHIFT);
++
++ return count;
++}
++
++static SYSDEV_ATTR(target_kb, S_IRUGO | S_IWUSR,
++ show_target_kb, store_target_kb);
++
++static struct sysdev_attribute *balloon_attrs[] = {
++ &attr_target_kb,
++};
++
++static struct attribute *balloon_info_attrs[] = {
++ &attr_current_kb.attr,
++ &attr_low_kb.attr,
++ &attr_high_kb.attr,
++ &attr_hard_limit_kb.attr,
++ &attr_driver_kb.attr,
++ NULL
++};
++
++static struct attribute_group balloon_info_group = {
++ .name = "info",
++ .attrs = balloon_info_attrs,
++};
++
++static struct sysdev_class balloon_sysdev_class = {
++ set_kset_name(BALLOON_CLASS_NAME),
++};
++
++static struct sys_device balloon_sysdev;
++
++static int register_balloon(struct sys_device *sysdev)
++{
++ int i, error;
++
++ error = sysdev_class_register(&balloon_sysdev_class);
++ if (error)
++ return error;
++
++ sysdev->id = 0;
++ sysdev->cls = &balloon_sysdev_class;
++
++ error = sysdev_register(sysdev);
++ if (error) {
++ sysdev_class_unregister(&balloon_sysdev_class);
++ return error;
++ }
++
++ for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++) {
++ error = sysdev_create_file(sysdev, balloon_attrs[i]);
++ if (error)
++ goto fail;
++ }
++
++ error = sysfs_create_group(&sysdev->kobj, &balloon_info_group);
++ if (error)
++ goto fail;
++
++ return 0;
++
++ fail:
++ while (--i >= 0)
++ sysdev_remove_file(sysdev, balloon_attrs[i]);
++ sysdev_unregister(sysdev);
++ sysdev_class_unregister(&balloon_sysdev_class);
++ return error;
++}
++
++static void unregister_balloon(struct sys_device *sysdev)
++{
++ int i;
++
++ sysfs_remove_group(&sysdev->kobj, &balloon_info_group);
++ for (i = 0; i < ARRAY_SIZE(balloon_attrs); i++)
++ sysdev_remove_file(sysdev, balloon_attrs[i]);
++ sysdev_unregister(sysdev);
++ sysdev_class_unregister(&balloon_sysdev_class);
++}
++
++int balloon_sysfs_init(void)
++{
++ return register_balloon(&balloon_sysdev);
++}
++
++void balloon_sysfs_exit(void)
++{
++ unregister_balloon(&balloon_sysdev);
++}
+Index: head-2008-11-25/drivers/xen/blkback/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/blkback/Makefile 2007-06-12 13:13:44.000000000 +0200
+@@ -0,0 +1,3 @@
++obj-$(CONFIG_XEN_BLKDEV_BACKEND) := blkbk.o
++
++blkbk-y := blkback.o xenbus.o interface.o vbd.o
+Index: head-2008-11-25/drivers/xen/blkback/blkback.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/blkback/blkback.c 2008-11-10 11:44:21.000000000 +0100
+@@ -0,0 +1,656 @@
++/******************************************************************************
++ * arch/xen/drivers/blkif/backend/main.c
++ *
++ * Back-end of the driver for virtual block devices. This portion of the
++ * driver exports a 'unified' block-device interface that can be accessed
++ * by any operating system that implements a compatible front end. A
++ * reference front-end implementation can be found in:
++ * arch/xen/drivers/blkif/frontend
++ *
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Copyright (c) 2005, Christopher Clark
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/kthread.h>
++#include <linux/list.h>
++#include <linux/delay.h>
++#include <xen/balloon.h>
++#include <asm/hypervisor.h>
++#include "common.h"
++
++/*
++ * These are rather arbitrary. They are fairly large because adjacent requests
++ * pulled from a communication ring are quite likely to end up being part of
++ * the same scatter/gather request at the disc.
++ *
++ * ** TRY INCREASING 'blkif_reqs' IF WRITE SPEEDS SEEM TOO LOW **
++ *
++ * This will increase the chances of being able to write whole tracks.
++ * 64 should be enough to keep us competitive with Linux.
++ */
++static int blkif_reqs = 64;
++module_param_named(reqs, blkif_reqs, int, 0);
++MODULE_PARM_DESC(reqs, "Number of blkback requests to allocate");
++
++/* Run-time switchable: /sys/module/blkback/parameters/ */
++static unsigned int log_stats = 0;
++static unsigned int debug_lvl = 0;
++module_param(log_stats, int, 0644);
++module_param(debug_lvl, int, 0644);
++
++/*
++ * Each outstanding request that we've passed to the lower device layers has a
++ * 'pending_req' allocated to it. Each buffer_head that completes decrements
++ * the pendcnt towards zero. When it hits zero, the specified domain has a
++ * response queued for it, with the saved 'id' passed back.
++ */
++typedef struct {
++ blkif_t *blkif;
++ u64 id;
++ int nr_pages;
++ atomic_t pendcnt;
++ unsigned short operation;
++ int status;
++ struct list_head free_list;
++} pending_req_t;
++
++static pending_req_t *pending_reqs;
++static struct list_head pending_free;
++static DEFINE_SPINLOCK(pending_free_lock);
++static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
++
++#define BLKBACK_INVALID_HANDLE (~0)
++
++static struct page **pending_pages;
++static grant_handle_t *pending_grant_handles;
++
++static inline int vaddr_pagenr(pending_req_t *req, int seg)
++{
++ return (req - pending_reqs) * BLKIF_MAX_SEGMENTS_PER_REQUEST + seg;
++}
++
++static inline unsigned long vaddr(pending_req_t *req, int seg)
++{
++ unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
++ return (unsigned long)pfn_to_kaddr(pfn);
++}
++
++#define pending_handle(_req, _seg) \
++ (pending_grant_handles[vaddr_pagenr(_req, _seg)])
++
++
++static int do_block_io_op(blkif_t *blkif);
++static void dispatch_rw_block_io(blkif_t *blkif,
++ blkif_request_t *req,
++ pending_req_t *pending_req);
++static void make_response(blkif_t *blkif, u64 id,
++ unsigned short op, int st);
++
++/******************************************************************
++ * misc small helpers
++ */
++static pending_req_t* alloc_req(void)
++{
++ pending_req_t *req = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++ if (!list_empty(&pending_free)) {
++ req = list_entry(pending_free.next, pending_req_t, free_list);
++ list_del(&req->free_list);
++ }
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++ return req;
++}
++
++static void free_req(pending_req_t *req)
++{
++ unsigned long flags;
++ int was_empty;
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++ was_empty = list_empty(&pending_free);
++ list_add(&req->free_list, &pending_free);
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++ if (was_empty)
++ wake_up(&pending_free_wq);
++}
++
++static void unplug_queue(blkif_t *blkif)
++{
++ if (blkif->plug == NULL)
++ return;
++ if (blkif->plug->unplug_fn)
++ blkif->plug->unplug_fn(blkif->plug);
++ blk_put_queue(blkif->plug);
++ blkif->plug = NULL;
++}
++
++static void plug_queue(blkif_t *blkif, struct block_device *bdev)
++{
++ request_queue_t *q = bdev_get_queue(bdev);
++
++ if (q == blkif->plug)
++ return;
++ unplug_queue(blkif);
++ blk_get_queue(q);
++ blkif->plug = q;
++}
++
++static void fast_flush_area(pending_req_t *req)
++{
++ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++ unsigned int i, invcount = 0;
++ grant_handle_t handle;
++ int ret;
++
++ for (i = 0; i < req->nr_pages; i++) {
++ handle = pending_handle(req, i);
++ if (handle == BLKBACK_INVALID_HANDLE)
++ continue;
++ gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
++ GNTMAP_host_map, handle);
++ pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
++ invcount++;
++ }
++
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, unmap, invcount);
++ BUG_ON(ret);
++}
++
++/******************************************************************
++ * SCHEDULER FUNCTIONS
++ */
++
++static void print_stats(blkif_t *blkif)
++{
++ printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d | br %4d\n",
++ current->comm, blkif->st_oo_req,
++ blkif->st_rd_req, blkif->st_wr_req, blkif->st_br_req);
++ blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
++ blkif->st_rd_req = 0;
++ blkif->st_wr_req = 0;
++ blkif->st_oo_req = 0;
++}
++
++int blkif_schedule(void *arg)
++{
++ blkif_t *blkif = arg;
++
++ blkif_get(blkif);
++
++ if (debug_lvl)
++ printk(KERN_DEBUG "%s: started\n", current->comm);
++
++ while (!kthread_should_stop()) {
++ if (try_to_freeze())
++ continue;
++
++ wait_event_interruptible(
++ blkif->wq,
++ blkif->waiting_reqs || kthread_should_stop());
++ wait_event_interruptible(
++ pending_free_wq,
++ !list_empty(&pending_free) || kthread_should_stop());
++
++ blkif->waiting_reqs = 0;
++ smp_mb(); /* clear flag *before* checking for work */
++
++ if (do_block_io_op(blkif))
++ blkif->waiting_reqs = 1;
++ unplug_queue(blkif);
++
++ if (log_stats && time_after(jiffies, blkif->st_print))
++ print_stats(blkif);
++ }
++
++ if (log_stats)
++ print_stats(blkif);
++ if (debug_lvl)
++ printk(KERN_DEBUG "%s: exiting\n", current->comm);
++
++ blkif->xenblkd = NULL;
++ blkif_put(blkif);
++
++ return 0;
++}
++
++/******************************************************************
++ * COMPLETION CALLBACK -- Called as bh->b_end_io()
++ */
++
++static void __end_block_io_op(pending_req_t *pending_req, int error)
++{
++ /* An error fails the entire request. */
++ if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
++ (error == -EOPNOTSUPP)) {
++ DPRINTK("blkback: write barrier op failed, not supported\n");
++ blkback_barrier(XBT_NIL, pending_req->blkif->be, 0);
++ pending_req->status = BLKIF_RSP_EOPNOTSUPP;
++ } else if (error) {
++ DPRINTK("Buffer not up-to-date at end of operation, "
++ "error=%d\n", error);
++ pending_req->status = BLKIF_RSP_ERROR;
++ }
++
++ if (atomic_dec_and_test(&pending_req->pendcnt)) {
++ fast_flush_area(pending_req);
++ make_response(pending_req->blkif, pending_req->id,
++ pending_req->operation, pending_req->status);
++ blkif_put(pending_req->blkif);
++ free_req(pending_req);
++ }
++}
++
++static int end_block_io_op(struct bio *bio, unsigned int done, int error)
++{
++ if (bio->bi_size != 0)
++ return 1;
++ __end_block_io_op(bio->bi_private, error);
++ bio_put(bio);
++ return error;
++}
++
++
++/******************************************************************************
++ * NOTIFICATION FROM GUEST OS.
++ */
++
++static void blkif_notify_work(blkif_t *blkif)
++{
++ blkif->waiting_reqs = 1;
++ wake_up(&blkif->wq);
++}
++
++irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++ blkif_notify_work(dev_id);
++ return IRQ_HANDLED;
++}
++
++
++
++/******************************************************************
++ * DOWNWARD CALLS -- These interface with the block-device layer proper.
++ */
++
++static int do_block_io_op(blkif_t *blkif)
++{
++ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
++ blkif_request_t req;
++ pending_req_t *pending_req;
++ RING_IDX rc, rp;
++ int more_to_do = 0;
++
++ rc = blk_rings->common.req_cons;
++ rp = blk_rings->common.sring->req_prod;
++ rmb(); /* Ensure we see queued requests up to 'rp'. */
++
++ while (rc != rp) {
++
++ if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
++ break;
++
++ pending_req = alloc_req();
++ if (NULL == pending_req) {
++ blkif->st_oo_req++;
++ more_to_do = 1;
++ break;
++ }
++
++ if (kthread_should_stop()) {
++ more_to_do = 1;
++ break;
++ }
++
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
++ break;
++ case BLKIF_PROTOCOL_X86_32:
++ blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
++ break;
++ case BLKIF_PROTOCOL_X86_64:
++ blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
++ break;
++ default:
++ BUG();
++ }
++ blk_rings->common.req_cons = ++rc; /* before make_response() */
++
++ /* Apply all sanity checks to /private copy/ of request. */
++ barrier();
++
++ switch (req.operation) {
++ case BLKIF_OP_READ:
++ blkif->st_rd_req++;
++ dispatch_rw_block_io(blkif, &req, pending_req);
++ break;
++ case BLKIF_OP_WRITE_BARRIER:
++ blkif->st_br_req++;
++ /* fall through */
++ case BLKIF_OP_WRITE:
++ blkif->st_wr_req++;
++ dispatch_rw_block_io(blkif, &req, pending_req);
++ break;
++ default:
++ /* A good sign something is wrong: sleep for a while to
++ * avoid excessive CPU consumption by a bad guest. */
++ msleep(1);
++ DPRINTK("error: unknown block io operation [%d]\n",
++ req.operation);
++ make_response(blkif, req.id, req.operation,
++ BLKIF_RSP_ERROR);
++ free_req(pending_req);
++ break;
++ }
++
++ /* Yield point for this unbounded loop. */
++ cond_resched();
++ }
++
++ return more_to_do;
++}
++
++static void dispatch_rw_block_io(blkif_t *blkif,
++ blkif_request_t *req,
++ pending_req_t *pending_req)
++{
++ extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
++ struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++ struct phys_req preq;
++ struct {
++ unsigned long buf; unsigned int nsec;
++ } seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++ unsigned int nseg;
++ struct bio *bio = NULL;
++ int ret, i;
++ int operation;
++
++ switch (req->operation) {
++ case BLKIF_OP_READ:
++ operation = READ;
++ break;
++ case BLKIF_OP_WRITE:
++ operation = WRITE;
++ break;
++ case BLKIF_OP_WRITE_BARRIER:
++ operation = WRITE_BARRIER;
++ break;
++ default:
++ operation = 0; /* make gcc happy */
++ BUG();
++ }
++
++ /* Check that number of segments is sane. */
++ nseg = req->nr_segments;
++ if (unlikely(nseg == 0 && operation != WRITE_BARRIER) ||
++ unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) {
++ DPRINTK("Bad number of segments in request (%d)\n", nseg);
++ goto fail_response;
++ }
++
++ preq.dev = req->handle;
++ preq.sector_number = req->sector_number;
++ preq.nr_sects = 0;
++
++ pending_req->blkif = blkif;
++ pending_req->id = req->id;
++ pending_req->operation = req->operation;
++ pending_req->status = BLKIF_RSP_OKAY;
++ pending_req->nr_pages = nseg;
++
++ for (i = 0; i < nseg; i++) {
++ uint32_t flags;
++
++ seg[i].nsec = req->seg[i].last_sect -
++ req->seg[i].first_sect + 1;
++
++ if ((req->seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
++ (req->seg[i].last_sect < req->seg[i].first_sect))
++ goto fail_response;
++ preq.nr_sects += seg[i].nsec;
++
++ flags = GNTMAP_host_map;
++ if (operation != READ)
++ flags |= GNTMAP_readonly;
++ gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
++ req->seg[i].gref, blkif->domid);
++ }
++
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
++ BUG_ON(ret);
++
++ for (i = 0; i < nseg; i++) {
++ if (unlikely(map[i].status != 0)) {
++ DPRINTK("invalid buffer -- could not remap it\n");
++ map[i].handle = BLKBACK_INVALID_HANDLE;
++ ret |= 1;
++ }
++
++ pending_handle(pending_req, i) = map[i].handle;
++
++ if (ret)
++ continue;
++
++ set_phys_to_machine(__pa(vaddr(
++ pending_req, i)) >> PAGE_SHIFT,
++ FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
++ seg[i].buf = map[i].dev_bus_addr |
++ (req->seg[i].first_sect << 9);
++ }
++
++ if (ret)
++ goto fail_flush;
++
++ if (vbd_translate(&preq, blkif, operation) != 0) {
++ DPRINTK("access denied: %s of [%llu,%llu] on dev=%04x\n",
++ operation == READ ? "read" : "write",
++ preq.sector_number,
++ preq.sector_number + preq.nr_sects, preq.dev);
++ goto fail_flush;
++ }
++
++ plug_queue(blkif, preq.bdev);
++ atomic_set(&pending_req->pendcnt, 1);
++ blkif_get(blkif);
++
++ for (i = 0; i < nseg; i++) {
++ if (((int)preq.sector_number|(int)seg[i].nsec) &
++ ((bdev_hardsect_size(preq.bdev) >> 9) - 1)) {
++ DPRINTK("Misaligned I/O request from domain %d",
++ blkif->domid);
++ goto fail_put_bio;
++ }
++
++ while ((bio == NULL) ||
++ (bio_add_page(bio,
++ virt_to_page(vaddr(pending_req, i)),
++ seg[i].nsec << 9,
++ seg[i].buf & ~PAGE_MASK) == 0)) {
++ if (bio) {
++ atomic_inc(&pending_req->pendcnt);
++ submit_bio(operation, bio);
++ }
++
++ bio = bio_alloc(GFP_KERNEL, nseg-i);
++ if (unlikely(bio == NULL))
++ goto fail_put_bio;
++
++ bio->bi_bdev = preq.bdev;
++ bio->bi_private = pending_req;
++ bio->bi_end_io = end_block_io_op;
++ bio->bi_sector = preq.sector_number;
++ }
++
++ preq.sector_number += seg[i].nsec;
++ }
++
++ if (!bio) {
++ BUG_ON(operation != WRITE_BARRIER);
++ bio = bio_alloc(GFP_KERNEL, 0);
++ if (unlikely(bio == NULL))
++ goto fail_put_bio;
++
++ bio->bi_bdev = preq.bdev;
++ bio->bi_private = pending_req;
++ bio->bi_end_io = end_block_io_op;
++ bio->bi_sector = -1;
++ }
++
++ submit_bio(operation, bio);
++
++ if (operation == READ)
++ blkif->st_rd_sect += preq.nr_sects;
++ else if (operation == WRITE || operation == WRITE_BARRIER)
++ blkif->st_wr_sect += preq.nr_sects;
++
++ return;
++
++ fail_flush:
++ fast_flush_area(pending_req);
++ fail_response:
++ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
++ free_req(pending_req);
++ msleep(1); /* back off a bit */
++ return;
++
++ fail_put_bio:
++ __end_block_io_op(pending_req, -EINVAL);
++ if (bio)
++ bio_put(bio);
++ unplug_queue(blkif);
++ msleep(1); /* back off a bit */
++ return;
++}
++
++
++
++/******************************************************************
++ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
++ */
++
++
++static void make_response(blkif_t *blkif, u64 id,
++ unsigned short op, int st)
++{
++ blkif_response_t resp;
++ unsigned long flags;
++ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
++ int more_to_do = 0;
++ int notify;
++
++ resp.id = id;
++ resp.operation = op;
++ resp.status = st;
++
++ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
++ /* Place on the response ring for the relevant domain. */
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ case BLKIF_PROTOCOL_X86_32:
++ memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ case BLKIF_PROTOCOL_X86_64:
++ memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ default:
++ BUG();
++ }
++ blk_rings->common.rsp_prod_pvt++;
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
++ if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
++ /*
++ * Tail check for pending requests. Allows frontend to avoid
++ * notifications if requests are already in flight (lower
++ * overheads and promotes batching).
++ */
++ RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
++
++ } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
++ more_to_do = 1;
++ }
++
++ spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
++
++ if (more_to_do)
++ blkif_notify_work(blkif);
++ if (notify)
++ notify_remote_via_irq(blkif->irq);
++}
++
++static int __init blkif_init(void)
++{
++ int i, mmap_pages;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ mmap_pages = blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST;
++
++ pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
++ blkif_reqs, GFP_KERNEL);
++ pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
++ mmap_pages, GFP_KERNEL);
++ pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
++
++ if (!pending_reqs || !pending_grant_handles || !pending_pages)
++ goto out_of_memory;
++
++ for (i = 0; i < mmap_pages; i++)
++ pending_grant_handles[i] = BLKBACK_INVALID_HANDLE;
++
++ blkif_interface_init();
++
++ memset(pending_reqs, 0, sizeof(pending_reqs));
++ INIT_LIST_HEAD(&pending_free);
++
++ for (i = 0; i < blkif_reqs; i++)
++ list_add_tail(&pending_reqs[i].free_list, &pending_free);
++
++ blkif_xenbus_init();
++
++ return 0;
++
++ out_of_memory:
++ kfree(pending_reqs);
++ kfree(pending_grant_handles);
++ free_empty_pages_and_pagevec(pending_pages, mmap_pages);
++ printk("%s: out of memory\n", __FUNCTION__);
++ return -ENOMEM;
++}
++
++module_init(blkif_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+Index: head-2008-11-25/drivers/xen/blkback/common.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/blkback/common.h 2008-05-08 14:02:04.000000000 +0200
+@@ -0,0 +1,139 @@
++/*
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __BLKIF__BACKEND__COMMON_H__
++#define __BLKIF__BACKEND__COMMON_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/vmalloc.h>
++#include <linux/wait.h>
++#include <asm/io.h>
++#include <asm/setup.h>
++#include <asm/pgalloc.h>
++#include <xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <xen/blkif.h>
++#include <xen/gnttab.h>
++#include <xen/driver_util.h>
++#include <xen/xenbus.h>
++
++#define DPRINTK(_f, _a...) \
++ pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++
++struct vbd {
++ blkif_vdev_t handle; /* what the domain refers to this vbd as */
++ unsigned char readonly; /* Non-zero -> read-only */
++ unsigned char type; /* VDISK_xxx */
++ u32 pdevice; /* phys device that this vbd maps to */
++ struct block_device *bdev;
++};
++
++struct backend_info;
++
++typedef struct blkif_st {
++ /* Unique identifier for this interface. */
++ domid_t domid;
++ unsigned int handle;
++ /* Physical parameters of the comms window. */
++ unsigned int irq;
++ /* Comms information. */
++ enum blkif_protocol blk_protocol;
++ blkif_back_rings_t blk_rings;
++ struct vm_struct *blk_ring_area;
++ /* The VBD attached to this interface. */
++ struct vbd vbd;
++ /* Back pointer to the backend_info. */
++ struct backend_info *be;
++ /* Private fields. */
++ spinlock_t blk_ring_lock;
++ atomic_t refcnt;
++
++ wait_queue_head_t wq;
++ struct task_struct *xenblkd;
++ unsigned int waiting_reqs;
++ request_queue_t *plug;
++
++ /* statistics */
++ unsigned long st_print;
++ int st_rd_req;
++ int st_wr_req;
++ int st_oo_req;
++ int st_br_req;
++ int st_rd_sect;
++ int st_wr_sect;
++
++ wait_queue_head_t waiting_to_free;
++
++ grant_handle_t shmem_handle;
++ grant_ref_t shmem_ref;
++} blkif_t;
++
++blkif_t *blkif_alloc(domid_t domid);
++void blkif_disconnect(blkif_t *blkif);
++void blkif_free(blkif_t *blkif);
++int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn);
++
++#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define blkif_put(_b) \
++ do { \
++ if (atomic_dec_and_test(&(_b)->refcnt)) \
++ wake_up(&(_b)->waiting_to_free);\
++ } while (0)
++
++/* Create a vbd. */
++int vbd_create(blkif_t *blkif, blkif_vdev_t vdevice, unsigned major,
++ unsigned minor, int readonly, int cdrom);
++void vbd_free(struct vbd *vbd);
++
++unsigned long long vbd_size(struct vbd *vbd);
++unsigned int vbd_info(struct vbd *vbd);
++unsigned long vbd_secsize(struct vbd *vbd);
++
++struct phys_req {
++ unsigned short dev;
++ unsigned short nr_sects;
++ struct block_device *bdev;
++ blkif_sector_t sector_number;
++};
++
++int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation);
++
++void blkif_interface_init(void);
++
++void blkif_xenbus_init(void);
++
++irqreturn_t blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++int blkif_schedule(void *arg);
++
++int blkback_barrier(struct xenbus_transaction xbt,
++ struct backend_info *be, int state);
++
++#endif /* __BLKIF__BACKEND__COMMON_H__ */
+Index: head-2008-11-25/drivers/xen/blkback/interface.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/blkback/interface.c 2007-06-12 13:13:44.000000000 +0200
+@@ -0,0 +1,181 @@
++/******************************************************************************
++ * arch/xen/drivers/blkif/backend/interface.c
++ *
++ * Block-device interface management.
++ *
++ * Copyright (c) 2004, Keir Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "common.h"
++#include <xen/evtchn.h>
++#include <linux/kthread.h>
++
++static kmem_cache_t *blkif_cachep;
++
++blkif_t *blkif_alloc(domid_t domid)
++{
++ blkif_t *blkif;
++
++ blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
++ if (!blkif)
++ return ERR_PTR(-ENOMEM);
++
++ memset(blkif, 0, sizeof(*blkif));
++ blkif->domid = domid;
++ spin_lock_init(&blkif->blk_ring_lock);
++ atomic_set(&blkif->refcnt, 1);
++ init_waitqueue_head(&blkif->wq);
++ blkif->st_print = jiffies;
++ init_waitqueue_head(&blkif->waiting_to_free);
++
++ return blkif;
++}
++
++static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
++ GNTMAP_host_map, shared_page, blkif->domid);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Grant table operation failure !\n");
++ return op.status;
++ }
++
++ blkif->shmem_ref = shared_page;
++ blkif->shmem_handle = op.handle;
++
++ return 0;
++}
++
++static void unmap_frontend_page(blkif_t *blkif)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
++ GNTMAP_host_map, blkif->shmem_handle);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++}
++
++int blkif_map(blkif_t *blkif, unsigned long shared_page, unsigned int evtchn)
++{
++ int err;
++
++ /* Already connected through? */
++ if (blkif->irq)
++ return 0;
++
++ if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
++ return -ENOMEM;
++
++ err = map_frontend_page(blkif, shared_page);
++ if (err) {
++ free_vm_area(blkif->blk_ring_area);
++ return err;
++ }
++
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ {
++ blkif_sring_t *sring;
++ sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
++ break;
++ }
++ case BLKIF_PROTOCOL_X86_32:
++ {
++ blkif_x86_32_sring_t *sring_x86_32;
++ sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
++ break;
++ }
++ case BLKIF_PROTOCOL_X86_64:
++ {
++ blkif_x86_64_sring_t *sring_x86_64;
++ sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
++ break;
++ }
++ default:
++ BUG();
++ }
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ blkif->domid, evtchn, blkif_be_int, 0, "blkif-backend", blkif);
++ if (err < 0)
++ {
++ unmap_frontend_page(blkif);
++ free_vm_area(blkif->blk_ring_area);
++ blkif->blk_rings.common.sring = NULL;
++ return err;
++ }
++ blkif->irq = err;
++
++ return 0;
++}
++
++void blkif_disconnect(blkif_t *blkif)
++{
++ if (blkif->xenblkd) {
++ kthread_stop(blkif->xenblkd);
++ blkif->xenblkd = NULL;
++ }
++
++ atomic_dec(&blkif->refcnt);
++ wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
++ atomic_inc(&blkif->refcnt);
++
++ if (blkif->irq) {
++ unbind_from_irqhandler(blkif->irq, blkif);
++ blkif->irq = 0;
++ }
++
++ if (blkif->blk_rings.common.sring) {
++ unmap_frontend_page(blkif);
++ free_vm_area(blkif->blk_ring_area);
++ blkif->blk_rings.common.sring = NULL;
++ }
++}
++
++void blkif_free(blkif_t *blkif)
++{
++ if (!atomic_dec_and_test(&blkif->refcnt))
++ BUG();
++ kmem_cache_free(blkif_cachep, blkif);
++}
++
++void __init blkif_interface_init(void)
++{
++ blkif_cachep = kmem_cache_create("blkif_cache", sizeof(blkif_t),
++ 0, 0, NULL, NULL);
++}
+Index: head-2008-11-25/drivers/xen/blkback/vbd.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/blkback/vbd.c 2008-05-08 14:02:04.000000000 +0200
+@@ -0,0 +1,118 @@
++/******************************************************************************
++ * blkback/vbd.c
++ *
++ * Routines for managing virtual block devices (VBDs).
++ *
++ * Copyright (c) 2003-2005, Keir Fraser & Steve Hand
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "common.h"
++
++#define vbd_sz(_v) ((_v)->bdev->bd_part ? \
++ (_v)->bdev->bd_part->nr_sects : (_v)->bdev->bd_disk->capacity)
++
++unsigned long long vbd_size(struct vbd *vbd)
++{
++ return vbd_sz(vbd);
++}
++
++unsigned int vbd_info(struct vbd *vbd)
++{
++ return vbd->type | (vbd->readonly?VDISK_READONLY:0);
++}
++
++unsigned long vbd_secsize(struct vbd *vbd)
++{
++ return bdev_hardsect_size(vbd->bdev);
++}
++
++int vbd_create(blkif_t *blkif, blkif_vdev_t handle, unsigned major,
++ unsigned minor, int readonly, int cdrom)
++{
++ struct vbd *vbd;
++ struct block_device *bdev;
++
++ vbd = &blkif->vbd;
++ vbd->handle = handle;
++ vbd->readonly = readonly;
++ vbd->type = 0;
++
++ vbd->pdevice = MKDEV(major, minor);
++
++ bdev = open_by_devnum(vbd->pdevice,
++ vbd->readonly ? FMODE_READ : FMODE_WRITE);
++
++ if (IS_ERR(bdev)) {
++ DPRINTK("vbd_creat: device %08x could not be opened.\n",
++ vbd->pdevice);
++ return -ENOENT;
++ }
++
++ vbd->bdev = bdev;
++
++ if (vbd->bdev->bd_disk == NULL) {
++ DPRINTK("vbd_creat: device %08x doesn't exist.\n",
++ vbd->pdevice);
++ vbd_free(vbd);
++ return -ENOENT;
++ }
++
++ if (vbd->bdev->bd_disk->flags & GENHD_FL_CD || cdrom)
++ vbd->type |= VDISK_CDROM;
++ if (vbd->bdev->bd_disk->flags & GENHD_FL_REMOVABLE)
++ vbd->type |= VDISK_REMOVABLE;
++
++ DPRINTK("Successful creation of handle=%04x (dom=%u)\n",
++ handle, blkif->domid);
++ return 0;
++}
++
++void vbd_free(struct vbd *vbd)
++{
++ if (vbd->bdev)
++ blkdev_put(vbd->bdev);
++ vbd->bdev = NULL;
++}
++
++int vbd_translate(struct phys_req *req, blkif_t *blkif, int operation)
++{
++ struct vbd *vbd = &blkif->vbd;
++ int rc = -EACCES;
++
++ if ((operation != READ) && vbd->readonly)
++ goto out;
++
++ if (unlikely((req->sector_number + req->nr_sects) > vbd_sz(vbd)))
++ goto out;
++
++ req->dev = vbd->pdevice;
++ req->bdev = vbd->bdev;
++ rc = 0;
++
++ out:
++ return rc;
++}
+Index: head-2008-11-25/drivers/xen/blkback/xenbus.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/blkback/xenbus.c 2008-05-08 14:02:04.000000000 +0200
+@@ -0,0 +1,541 @@
++/* Xenbus code for blkif backend
++ Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
++ Copyright (C) 2005 XenSource Ltd
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++*/
++
++#include <stdarg.h>
++#include <linux/module.h>
++#include <linux/kthread.h>
++#include "common.h"
++
++#undef DPRINTK
++#define DPRINTK(fmt, args...) \
++ pr_debug("blkback/xenbus (%s:%d) " fmt ".\n", \
++ __FUNCTION__, __LINE__, ##args)
++
++struct backend_info
++{
++ struct xenbus_device *dev;
++ blkif_t *blkif;
++ struct xenbus_watch backend_watch;
++ unsigned major;
++ unsigned minor;
++ char *mode;
++};
++
++static void connect(struct backend_info *);
++static int connect_ring(struct backend_info *);
++static void backend_changed(struct xenbus_watch *, const char **,
++ unsigned int);
++
++static int blkback_name(blkif_t *blkif, char *buf)
++{
++ char *devpath, *devname;
++ struct xenbus_device *dev = blkif->be->dev;
++
++ devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
++ if (IS_ERR(devpath))
++ return PTR_ERR(devpath);
++
++ if ((devname = strstr(devpath, "/dev/")) != NULL)
++ devname += strlen("/dev/");
++ else
++ devname = devpath;
++
++ snprintf(buf, TASK_COMM_LEN, "blkback.%d.%s", blkif->domid, devname);
++ kfree(devpath);
++
++ return 0;
++}
++
++static void update_blkif_status(blkif_t *blkif)
++{
++ int err;
++ char name[TASK_COMM_LEN];
++
++ /* Not ready to connect? */
++ if (!blkif->irq || !blkif->vbd.bdev)
++ return;
++
++ /* Already connected? */
++ if (blkif->be->dev->state == XenbusStateConnected)
++ return;
++
++ /* Attempt to connect: exit if we fail to. */
++ connect(blkif->be);
++ if (blkif->be->dev->state != XenbusStateConnected)
++ return;
++
++ err = blkback_name(blkif, name);
++ if (err) {
++ xenbus_dev_error(blkif->be->dev, err, "get blkback dev name");
++ return;
++ }
++
++ blkif->xenblkd = kthread_run(blkif_schedule, blkif, name);
++ if (IS_ERR(blkif->xenblkd)) {
++ err = PTR_ERR(blkif->xenblkd);
++ blkif->xenblkd = NULL;
++ xenbus_dev_error(blkif->be->dev, err, "start xenblkd");
++ }
++}
++
++
++/****************************************************************
++ * sysfs interface for VBD I/O requests
++ */
++
++#define VBD_SHOW(name, format, args...) \
++ static ssize_t show_##name(struct device *_dev, \
++ struct device_attribute *attr, \
++ char *buf) \
++ { \
++ struct xenbus_device *dev = to_xenbus_device(_dev); \
++ struct backend_info *be = dev->dev.driver_data; \
++ \
++ return sprintf(buf, format, ##args); \
++ } \
++ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
++
++VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
++VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
++VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
++VBD_SHOW(br_req, "%d\n", be->blkif->st_br_req);
++VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
++VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
++
++static struct attribute *vbdstat_attrs[] = {
++ &dev_attr_oo_req.attr,
++ &dev_attr_rd_req.attr,
++ &dev_attr_wr_req.attr,
++ &dev_attr_br_req.attr,
++ &dev_attr_rd_sect.attr,
++ &dev_attr_wr_sect.attr,
++ NULL
++};
++
++static struct attribute_group vbdstat_group = {
++ .name = "statistics",
++ .attrs = vbdstat_attrs,
++};
++
++VBD_SHOW(physical_device, "%x:%x\n", be->major, be->minor);
++VBD_SHOW(mode, "%s\n", be->mode);
++
++int xenvbd_sysfs_addif(struct xenbus_device *dev)
++{
++ int error;
++
++ error = device_create_file(&dev->dev, &dev_attr_physical_device);
++ if (error)
++ goto fail1;
++
++ error = device_create_file(&dev->dev, &dev_attr_mode);
++ if (error)
++ goto fail2;
++
++ error = sysfs_create_group(&dev->dev.kobj, &vbdstat_group);
++ if (error)
++ goto fail3;
++
++ return 0;
++
++fail3: sysfs_remove_group(&dev->dev.kobj, &vbdstat_group);
++fail2: device_remove_file(&dev->dev, &dev_attr_mode);
++fail1: device_remove_file(&dev->dev, &dev_attr_physical_device);
++ return error;
++}
++
++void xenvbd_sysfs_delif(struct xenbus_device *dev)
++{
++ sysfs_remove_group(&dev->dev.kobj, &vbdstat_group);
++ device_remove_file(&dev->dev, &dev_attr_mode);
++ device_remove_file(&dev->dev, &dev_attr_physical_device);
++}
++
++static int blkback_remove(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ DPRINTK("");
++
++ if (be->major || be->minor)
++ xenvbd_sysfs_delif(dev);
++
++ if (be->backend_watch.node) {
++ unregister_xenbus_watch(&be->backend_watch);
++ kfree(be->backend_watch.node);
++ be->backend_watch.node = NULL;
++ }
++
++ if (be->blkif) {
++ blkif_disconnect(be->blkif);
++ vbd_free(&be->blkif->vbd);
++ blkif_free(be->blkif);
++ be->blkif = NULL;
++ }
++
++ kfree(be);
++ dev->dev.driver_data = NULL;
++ return 0;
++}
++
++int blkback_barrier(struct xenbus_transaction xbt,
++ struct backend_info *be, int state)
++{
++ struct xenbus_device *dev = be->dev;
++ int err;
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
++ "%d", state);
++ if (err)
++ xenbus_dev_fatal(dev, err, "writing feature-barrier");
++
++ return err;
++}
++
++/**
++ * Entry point to this code when a new device is created. Allocate the basic
++ * structures, and watch the store waiting for the hotplug scripts to tell us
++ * the device's physical major and minor numbers. Switch to InitWait.
++ */
++static int blkback_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++ struct backend_info *be = kzalloc(sizeof(struct backend_info),
++ GFP_KERNEL);
++ if (!be) {
++ xenbus_dev_fatal(dev, -ENOMEM,
++ "allocating backend structure");
++ return -ENOMEM;
++ }
++ be->dev = dev;
++ dev->dev.driver_data = be;
++
++ be->blkif = blkif_alloc(dev->otherend_id);
++ if (IS_ERR(be->blkif)) {
++ err = PTR_ERR(be->blkif);
++ be->blkif = NULL;
++ xenbus_dev_fatal(dev, err, "creating block interface");
++ goto fail;
++ }
++
++ /* setup back pointer */
++ be->blkif->be = be;
++
++ err = xenbus_watch_path2(dev, dev->nodename, "physical-device",
++ &be->backend_watch, backend_changed);
++ if (err)
++ goto fail;
++
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err)
++ goto fail;
++
++ return 0;
++
++fail:
++ DPRINTK("failed");
++ blkback_remove(dev);
++ return err;
++}
++
++
++/**
++ * Callback received when the hotplug scripts have placed the physical-device
++ * node. Read it and the mode node, and create a vbd. If the frontend is
++ * ready, connect.
++ */
++static void backend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ int err;
++ unsigned major;
++ unsigned minor;
++ struct backend_info *be
++ = container_of(watch, struct backend_info, backend_watch);
++ struct xenbus_device *dev = be->dev;
++ int cdrom = 0;
++ char *device_type;
++
++ DPRINTK("");
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename, "physical-device", "%x:%x",
++ &major, &minor);
++ if (XENBUS_EXIST_ERR(err)) {
++ /* Since this watch will fire once immediately after it is
++ registered, we expect this. Ignore it, and wait for the
++ hotplug scripts. */
++ return;
++ }
++ if (err != 2) {
++ xenbus_dev_fatal(dev, err, "reading physical-device");
++ return;
++ }
++
++ if ((be->major || be->minor) &&
++ ((be->major != major) || (be->minor != minor))) {
++ printk(KERN_WARNING
++ "blkback: changing physical device (from %x:%x to "
++ "%x:%x) not supported.\n", be->major, be->minor,
++ major, minor);
++ return;
++ }
++
++ be->mode = xenbus_read(XBT_NIL, dev->nodename, "mode", NULL);
++ if (IS_ERR(be->mode)) {
++ err = PTR_ERR(be->mode);
++ be->mode = NULL;
++ xenbus_dev_fatal(dev, err, "reading mode");
++ return;
++ }
++
++ device_type = xenbus_read(XBT_NIL, dev->otherend, "device-type", NULL);
++ if (!IS_ERR(device_type)) {
++ cdrom = strcmp(device_type, "cdrom") == 0;
++ kfree(device_type);
++ }
++
++ if (be->major == 0 && be->minor == 0) {
++ /* Front end dir is a number, which is used as the handle. */
++
++ char *p = strrchr(dev->otherend, '/') + 1;
++ long handle = simple_strtoul(p, NULL, 0);
++
++ be->major = major;
++ be->minor = minor;
++
++ err = vbd_create(be->blkif, handle, major, minor,
++ (NULL == strchr(be->mode, 'w')), cdrom);
++ if (err) {
++ be->major = be->minor = 0;
++ xenbus_dev_fatal(dev, err, "creating vbd structure");
++ return;
++ }
++
++ err = xenvbd_sysfs_addif(dev);
++ if (err) {
++ vbd_free(&be->blkif->vbd);
++ be->major = be->minor = 0;
++ xenbus_dev_fatal(dev, err, "creating sysfs entries");
++ return;
++ }
++
++ /* We're potentially connected now */
++ update_blkif_status(be->blkif);
++ }
++}
++
++
++/**
++ * Callback received when the frontend's state changes.
++ */
++static void frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state)
++{
++ struct backend_info *be = dev->dev.driver_data;
++ int err;
++
++ DPRINTK("%s", xenbus_strstate(frontend_state));
++
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ if (dev->state == XenbusStateClosed) {
++ printk(KERN_INFO "%s: %s: prepare for reconnect\n",
++ __FUNCTION__, dev->nodename);
++ xenbus_switch_state(dev, XenbusStateInitWait);
++ }
++ break;
++
++ case XenbusStateInitialised:
++ case XenbusStateConnected:
++ /* Ensure we connect even when two watches fire in
++ close successsion and we miss the intermediate value
++ of frontend_state. */
++ if (dev->state == XenbusStateConnected)
++ break;
++
++ err = connect_ring(be);
++ if (err)
++ break;
++ update_blkif_status(be->blkif);
++ break;
++
++ case XenbusStateClosing:
++ blkif_disconnect(be->blkif);
++ xenbus_switch_state(dev, XenbusStateClosing);
++ break;
++
++ case XenbusStateClosed:
++ xenbus_switch_state(dev, XenbusStateClosed);
++ if (xenbus_dev_is_online(dev))
++ break;
++ /* fall through if not online */
++ case XenbusStateUnknown:
++ device_unregister(&dev->dev);
++ break;
++
++ default:
++ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++ frontend_state);
++ break;
++ }
++}
++
++
++/* ** Connection ** */
++
++
++/**
++ * Write the physical details regarding the block device to the store, and
++ * switch to Connected state.
++ */
++static void connect(struct backend_info *be)
++{
++ struct xenbus_transaction xbt;
++ int err;
++ struct xenbus_device *dev = be->dev;
++
++ DPRINTK("%s", dev->otherend);
++
++ /* Supply the information about the device the frontend needs */
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ return;
++ }
++
++ err = blkback_barrier(xbt, be, 1);
++ if (err)
++ goto abort;
++
++ err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
++ vbd_size(&be->blkif->vbd));
++ if (err) {
++ xenbus_dev_fatal(dev, err, "writing %s/sectors",
++ dev->nodename);
++ goto abort;
++ }
++
++ /* FIXME: use a typename instead */
++ err = xenbus_printf(xbt, dev->nodename, "info", "%u",
++ vbd_info(&be->blkif->vbd));
++ if (err) {
++ xenbus_dev_fatal(dev, err, "writing %s/info",
++ dev->nodename);
++ goto abort;
++ }
++ err = xenbus_printf(xbt, dev->nodename, "sector-size", "%lu",
++ vbd_secsize(&be->blkif->vbd));
++ if (err) {
++ xenbus_dev_fatal(dev, err, "writing %s/sector-size",
++ dev->nodename);
++ goto abort;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN)
++ goto again;
++ if (err)
++ xenbus_dev_fatal(dev, err, "ending transaction");
++
++ err = xenbus_switch_state(dev, XenbusStateConnected);
++ if (err)
++ xenbus_dev_fatal(dev, err, "switching to Connected state",
++ dev->nodename);
++
++ return;
++ abort:
++ xenbus_transaction_end(xbt, 1);
++}
++
++
++static int connect_ring(struct backend_info *be)
++{
++ struct xenbus_device *dev = be->dev;
++ unsigned long ring_ref;
++ unsigned int evtchn;
++ char protocol[64] = "";
++ int err;
++
++ DPRINTK("%s", dev->otherend);
++
++ err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu", &ring_ref,
++ "event-channel", "%u", &evtchn, NULL);
++ if (err) {
++ xenbus_dev_fatal(dev, err,
++ "reading %s/ring-ref and event-channel",
++ dev->otherend);
++ return err;
++ }
++
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
++ err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
++ "%63s", protocol, NULL);
++ if (err)
++ strcpy(protocol, "unspecified, assuming native");
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
++ else {
++ xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
++ return -1;
++ }
++ printk(KERN_INFO
++ "blkback: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
++ ring_ref, evtchn, be->blkif->blk_protocol, protocol);
++
++ /* Map the shared frame, irq etc. */
++ err = blkif_map(be->blkif, ring_ref, evtchn);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
++ ring_ref, evtchn);
++ return err;
++ }
++
++ return 0;
++}
++
++
++/* ** Driver Registration ** */
++
++
++static const struct xenbus_device_id blkback_ids[] = {
++ { "vbd" },
++ { "" }
++};
++
++
++static struct xenbus_driver blkback = {
++ .name = "vbd",
++ .owner = THIS_MODULE,
++ .ids = blkback_ids,
++ .probe = blkback_probe,
++ .remove = blkback_remove,
++ .otherend_changed = frontend_changed
++};
++
++
++void blkif_xenbus_init(void)
++{
++ xenbus_register_backend(&blkback);
++}
+Index: head-2008-11-25/drivers/xen/blkfront/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/blkfront/Makefile 2007-06-12 13:13:44.000000000 +0200
+@@ -0,0 +1,5 @@
++
++obj-$(CONFIG_XEN_BLKDEV_FRONTEND) := xenblk.o
++
++xenblk-objs := blkfront.o vbd.o
++
+Index: head-2008-11-25/drivers/xen/blkfront/blkfront.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/blkfront/blkfront.c 2008-08-07 12:44:36.000000000 +0200
+@@ -0,0 +1,936 @@
++/******************************************************************************
++ * blkfront.c
++ *
++ * XenLinux virtual block-device driver.
++ *
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
++ * Copyright (c) 2004, Christian Limpach
++ * Copyright (c) 2004, Andrew Warfield
++ * Copyright (c) 2005, Christopher Clark
++ * Copyright (c) 2005, XenSource Ltd
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/version.h>
++#include "block.h"
++#include <linux/cdrom.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <scsi/scsi.h>
++#include <xen/evtchn.h>
++#include <xen/xenbus.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/protocols.h>
++#include <xen/gnttab.h>
++#include <asm/hypervisor.h>
++#include <asm/maddr.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#define BLKIF_STATE_DISCONNECTED 0
++#define BLKIF_STATE_CONNECTED 1
++#define BLKIF_STATE_SUSPENDED 2
++
++#define MAXIMUM_OUTSTANDING_BLOCK_REQS \
++ (BLKIF_MAX_SEGMENTS_PER_REQUEST * BLK_RING_SIZE)
++#define GRANT_INVALID_REF 0
++
++static void connect(struct blkfront_info *);
++static void blkfront_closing(struct xenbus_device *);
++static int blkfront_remove(struct xenbus_device *);
++static int talk_to_backend(struct xenbus_device *, struct blkfront_info *);
++static int setup_blkring(struct xenbus_device *, struct blkfront_info *);
++
++static void kick_pending_request_queues(struct blkfront_info *);
++
++static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs);
++static void blkif_restart_queue(void *arg);
++static void blkif_recover(struct blkfront_info *);
++static void blkif_completion(struct blk_shadow *);
++static void blkif_free(struct blkfront_info *, int);
++
++
++/**
++ * Entry point to this code when a new device is created. Allocate the basic
++ * structures and the ring buffer for communication with the backend, and
++ * inform the backend of the appropriate details for those. Switch to
++ * Initialised state.
++ */
++static int blkfront_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err, vdevice, i;
++ struct blkfront_info *info;
++
++ /* FIXME: Use dynamic device id if this is not set. */
++ err = xenbus_scanf(XBT_NIL, dev->nodename,
++ "virtual-device", "%i", &vdevice);
++ if (err != 1) {
++ /* go looking in the extended area instead */
++ err = xenbus_scanf(XBT_NIL, dev->nodename, "virtual-device-ext",
++ "%i", &vdevice);
++ if (err != 1) {
++ xenbus_dev_fatal(dev, err, "reading virtual-device");
++ return err;
++ }
++ }
++
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (!info) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
++ return -ENOMEM;
++ }
++
++ info->xbdev = dev;
++ info->vdevice = vdevice;
++ info->connected = BLKIF_STATE_DISCONNECTED;
++ INIT_WORK(&info->work, blkif_restart_queue, (void *)info);
++
++ for (i = 0; i < BLK_RING_SIZE; i++)
++ info->shadow[i].req.id = i+1;
++ info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
++
++ /* Front end dir is a number, which is used as the id. */
++ info->handle = simple_strtoul(strrchr(dev->nodename,'/')+1, NULL, 0);
++ dev->dev.driver_data = info;
++
++ err = talk_to_backend(dev, info);
++ if (err) {
++ kfree(info);
++ dev->dev.driver_data = NULL;
++ return err;
++ }
++
++ return 0;
++}
++
++
++/**
++ * We are reconnecting to the backend, due to a suspend/resume, or a backend
++ * driver restart. We tear down our blkif structure and recreate it, but
++ * leave the device-layer structures intact so that this is transparent to the
++ * rest of the kernel.
++ */
++static int blkfront_resume(struct xenbus_device *dev)
++{
++ struct blkfront_info *info = dev->dev.driver_data;
++ int err;
++
++ DPRINTK("blkfront_resume: %s\n", dev->nodename);
++
++ blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
++
++ err = talk_to_backend(dev, info);
++ if (info->connected == BLKIF_STATE_SUSPENDED && !err)
++ blkif_recover(info);
++
++ return err;
++}
++
++
++/* Common code used when first setting up, and when resuming. */
++static int talk_to_backend(struct xenbus_device *dev,
++ struct blkfront_info *info)
++{
++ const char *message = NULL;
++ struct xenbus_transaction xbt;
++ int err;
++
++ /* Create shared ring, alloc event channel. */
++ err = setup_blkring(dev, info);
++ if (err)
++ goto out;
++
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ goto destroy_blkring;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename,
++ "ring-ref","%u", info->ring_ref);
++ if (err) {
++ message = "writing ring-ref";
++ goto abort_transaction;
++ }
++ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++ irq_to_evtchn_port(info->irq));
++ if (err) {
++ message = "writing event-channel";
++ goto abort_transaction;
++ }
++ err = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
++ XEN_IO_PROTO_ABI_NATIVE);
++ if (err) {
++ message = "writing protocol";
++ goto abort_transaction;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err) {
++ if (err == -EAGAIN)
++ goto again;
++ xenbus_dev_fatal(dev, err, "completing transaction");
++ goto destroy_blkring;
++ }
++
++ xenbus_switch_state(dev, XenbusStateInitialised);
++
++ return 0;
++
++ abort_transaction:
++ xenbus_transaction_end(xbt, 1);
++ if (message)
++ xenbus_dev_fatal(dev, err, "%s", message);
++ destroy_blkring:
++ blkif_free(info, 0);
++ out:
++ return err;
++}
++
++
++static int setup_blkring(struct xenbus_device *dev,
++ struct blkfront_info *info)
++{
++ blkif_sring_t *sring;
++ int err;
++
++ info->ring_ref = GRANT_INVALID_REF;
++
++ sring = (blkif_sring_t *)__get_free_page(GFP_NOIO | __GFP_HIGH);
++ if (!sring) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating shared ring");
++ return -ENOMEM;
++ }
++ SHARED_RING_INIT(sring);
++ FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
++
++ err = xenbus_grant_ring(dev, virt_to_mfn(info->ring.sring));
++ if (err < 0) {
++ free_page((unsigned long)sring);
++ info->ring.sring = NULL;
++ goto fail;
++ }
++ info->ring_ref = err;
++
++ err = bind_listening_port_to_irqhandler(
++ dev->otherend_id, blkif_int, SA_SAMPLE_RANDOM, "blkif", info);
++ if (err <= 0) {
++ xenbus_dev_fatal(dev, err,
++ "bind_listening_port_to_irqhandler");
++ goto fail;
++ }
++ info->irq = err;
++
++ return 0;
++fail:
++ blkif_free(info, 0);
++ return err;
++}
++
++
++/**
++ * Callback received when the backend's state changes.
++ */
++static void backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct blkfront_info *info = dev->dev.driver_data;
++ struct block_device *bd;
++
++ DPRINTK("blkfront:backend_changed.\n");
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitWait:
++ case XenbusStateInitialised:
++ case XenbusStateReconfiguring:
++ case XenbusStateReconfigured:
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ break;
++
++ case XenbusStateConnected:
++ connect(info);
++ break;
++
++ case XenbusStateClosing:
++ bd = bdget(info->dev);
++ if (bd == NULL)
++ xenbus_dev_fatal(dev, -ENODEV, "bdget failed");
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++ down(&bd->bd_sem);
++#else
++ mutex_lock(&bd->bd_mutex);
++#endif
++ if (info->users > 0)
++ xenbus_dev_error(dev, -EBUSY,
++ "Device in use; refusing to close");
++ else
++ blkfront_closing(dev);
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
++ up(&bd->bd_sem);
++#else
++ mutex_unlock(&bd->bd_mutex);
++#endif
++ bdput(bd);
++ break;
++ }
++}
++
++
++/* ** Connection ** */
++
++
++/*
++ * Invoked when the backend is finally 'ready' (and has told produced
++ * the details about the physical device - #sectors, size, etc).
++ */
++static void connect(struct blkfront_info *info)
++{
++ unsigned long long sectors;
++ unsigned long sector_size;
++ unsigned int binfo;
++ int err;
++
++ if ((info->connected == BLKIF_STATE_CONNECTED) ||
++ (info->connected == BLKIF_STATE_SUSPENDED) )
++ return;
++
++ DPRINTK("blkfront.c:connect:%s.\n", info->xbdev->otherend);
++
++ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
++ "sectors", "%Lu", §ors,
++ "info", "%u", &binfo,
++ "sector-size", "%lu", §or_size,
++ NULL);
++ if (err) {
++ xenbus_dev_fatal(info->xbdev, err,
++ "reading backend fields at %s",
++ info->xbdev->otherend);
++ return;
++ }
++
++ err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
++ "feature-barrier", "%lu", &info->feature_barrier,
++ NULL);
++ if (err)
++ info->feature_barrier = 0;
++
++ err = xlvbd_add(sectors, info->vdevice, binfo, sector_size, info);
++ if (err) {
++ xenbus_dev_fatal(info->xbdev, err, "xlvbd_add at %s",
++ info->xbdev->otherend);
++ return;
++ }
++
++ err = xlvbd_sysfs_addif(info);
++ if (err) {
++ xenbus_dev_fatal(info->xbdev, err, "xlvbd_sysfs_addif at %s",
++ info->xbdev->otherend);
++ return;
++ }
++
++ (void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
++
++ /* Kick pending requests. */
++ spin_lock_irq(&blkif_io_lock);
++ info->connected = BLKIF_STATE_CONNECTED;
++ kick_pending_request_queues(info);
++ spin_unlock_irq(&blkif_io_lock);
++
++ add_disk(info->gd);
++
++ info->is_ready = 1;
++}
++
++/**
++ * Handle the change of state of the backend to Closing. We must delete our
++ * device-layer structures now, to ensure that writes are flushed through to
++ * the backend. Once is this done, we can switch to Closed in
++ * acknowledgement.
++ */
++static void blkfront_closing(struct xenbus_device *dev)
++{
++ struct blkfront_info *info = dev->dev.driver_data;
++ unsigned long flags;
++
++ DPRINTK("blkfront_closing: %s removed\n", dev->nodename);
++
++ if (info->rq == NULL)
++ goto out;
++
++ spin_lock_irqsave(&blkif_io_lock, flags);
++ /* No more blkif_request(). */
++ blk_stop_queue(info->rq);
++ /* No more gnttab callback work. */
++ gnttab_cancel_free_callback(&info->callback);
++ spin_unlock_irqrestore(&blkif_io_lock, flags);
++
++ /* Flush gnttab callback work. Must be done with no locks held. */
++ flush_scheduled_work();
++
++ xlvbd_sysfs_delif(info);
++
++ xlvbd_del(info);
++
++ out:
++ xenbus_frontend_closed(dev);
++}
++
++
++static int blkfront_remove(struct xenbus_device *dev)
++{
++ struct blkfront_info *info = dev->dev.driver_data;
++
++ DPRINTK("blkfront_remove: %s removed\n", dev->nodename);
++
++ blkif_free(info, 0);
++
++ kfree(info);
++
++ return 0;
++}
++
++
++static inline int GET_ID_FROM_FREELIST(
++ struct blkfront_info *info)
++{
++ unsigned long free = info->shadow_free;
++ BUG_ON(free > BLK_RING_SIZE);
++ info->shadow_free = info->shadow[free].req.id;
++ info->shadow[free].req.id = 0x0fffffee; /* debug */
++ return free;
++}
++
++static inline void ADD_ID_TO_FREELIST(
++ struct blkfront_info *info, unsigned long id)
++{
++ info->shadow[id].req.id = info->shadow_free;
++ info->shadow[id].request = 0;
++ info->shadow_free = id;
++}
++
++static inline void flush_requests(struct blkfront_info *info)
++{
++ int notify;
++
++ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&info->ring, notify);
++
++ if (notify)
++ notify_remote_via_irq(info->irq);
++}
++
++static void kick_pending_request_queues(struct blkfront_info *info)
++{
++ if (!RING_FULL(&info->ring)) {
++ /* Re-enable calldowns. */
++ blk_start_queue(info->rq);
++ /* Kick things off immediately. */
++ do_blkif_request(info->rq);
++ }
++}
++
++static void blkif_restart_queue(void *arg)
++{
++ struct blkfront_info *info = (struct blkfront_info *)arg;
++ spin_lock_irq(&blkif_io_lock);
++ if (info->connected == BLKIF_STATE_CONNECTED)
++ kick_pending_request_queues(info);
++ spin_unlock_irq(&blkif_io_lock);
++}
++
++static void blkif_restart_queue_callback(void *arg)
++{
++ struct blkfront_info *info = (struct blkfront_info *)arg;
++ schedule_work(&info->work);
++}
++
++int blkif_open(struct inode *inode, struct file *filep)
++{
++ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
++ info->users++;
++ return 0;
++}
++
++
++int blkif_release(struct inode *inode, struct file *filep)
++{
++ struct blkfront_info *info = inode->i_bdev->bd_disk->private_data;
++ info->users--;
++ if (info->users == 0) {
++ /* Check whether we have been instructed to close. We will
++ have ignored this request initially, as the device was
++ still mounted. */
++ struct xenbus_device * dev = info->xbdev;
++ enum xenbus_state state = xenbus_read_driver_state(dev->otherend);
++
++ if (state == XenbusStateClosing && info->is_ready)
++ blkfront_closing(dev);
++ }
++ return 0;
++}
++
++
++int blkif_ioctl(struct inode *inode, struct file *filep,
++ unsigned command, unsigned long argument)
++{
++ int i;
++
++ DPRINTK_IOCTL("command: 0x%x, argument: 0x%lx, dev: 0x%04x\n",
++ command, (long)argument, inode->i_rdev);
++
++ switch (command) {
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16)
++ case HDIO_GETGEO: {
++ struct block_device *bd = inode->i_bdev;
++ struct hd_geometry geo;
++ int ret;
++
++ if (!argument)
++ return -EINVAL;
++
++ geo.start = get_start_sect(bd);
++ ret = blkif_getgeo(bd, &geo);
++ if (ret)
++ return ret;
++
++ if (copy_to_user((struct hd_geometry __user *)argument, &geo,
++ sizeof(geo)))
++ return -EFAULT;
++
++ return 0;
++ }
++#endif
++ case CDROMMULTISESSION:
++ DPRINTK("FIXME: support multisession CDs later\n");
++ for (i = 0; i < sizeof(struct cdrom_multisession); i++)
++ if (put_user(0, (char __user *)(argument + i)))
++ return -EFAULT;
++ return 0;
++
++ case CDROM_GET_CAPABILITY: {
++ struct blkfront_info *info =
++ inode->i_bdev->bd_disk->private_data;
++ struct gendisk *gd = info->gd;
++ if (gd->flags & GENHD_FL_CD)
++ return 0;
++ return -EINVAL;
++ }
++ default:
++ /*printk(KERN_ALERT "ioctl %08x not supported by Xen blkdev\n",
++ command);*/
++ return -EINVAL; /* same return as native Linux */
++ }
++
++ return 0;
++}
++
++
++int blkif_getgeo(struct block_device *bd, struct hd_geometry *hg)
++{
++ /* We don't have real geometry info, but let's at least return
++ values consistent with the size of the device */
++ sector_t nsect = get_capacity(bd->bd_disk);
++ sector_t cylinders = nsect;
++
++ hg->heads = 0xff;
++ hg->sectors = 0x3f;
++ sector_div(cylinders, hg->heads * hg->sectors);
++ hg->cylinders = cylinders;
++ if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
++ hg->cylinders = 0xffff;
++ return 0;
++}
++
++
++/*
++ * blkif_queue_request
++ *
++ * request block io
++ *
++ * id: for guest use only.
++ * operation: BLKIF_OP_{READ,WRITE,PROBE}
++ * buffer: buffer to read/write into. this should be a
++ * virtual address in the guest os.
++ */
++static int blkif_queue_request(struct request *req)
++{
++ struct blkfront_info *info = req->rq_disk->private_data;
++ unsigned long buffer_mfn;
++ blkif_request_t *ring_req;
++ struct bio *bio;
++ struct bio_vec *bvec;
++ int idx;
++ unsigned long id;
++ unsigned int fsect, lsect;
++ int ref;
++ grant_ref_t gref_head;
++
++ if (unlikely(info->connected != BLKIF_STATE_CONNECTED))
++ return 1;
++
++ if (gnttab_alloc_grant_references(
++ BLKIF_MAX_SEGMENTS_PER_REQUEST, &gref_head) < 0) {
++ gnttab_request_free_callback(
++ &info->callback,
++ blkif_restart_queue_callback,
++ info,
++ BLKIF_MAX_SEGMENTS_PER_REQUEST);
++ return 1;
++ }
++
++ /* Fill out a communications ring structure. */
++ ring_req = RING_GET_REQUEST(&info->ring, info->ring.req_prod_pvt);
++ id = GET_ID_FROM_FREELIST(info);
++ info->shadow[id].request = (unsigned long)req;
++
++ ring_req->id = id;
++ ring_req->sector_number = (blkif_sector_t)req->sector;
++ ring_req->handle = info->handle;
++
++ ring_req->operation = rq_data_dir(req) ?
++ BLKIF_OP_WRITE : BLKIF_OP_READ;
++ if (blk_barrier_rq(req))
++ ring_req->operation = BLKIF_OP_WRITE_BARRIER;
++
++ ring_req->nr_segments = 0;
++ rq_for_each_bio (bio, req) {
++ bio_for_each_segment (bvec, bio, idx) {
++ BUG_ON(ring_req->nr_segments
++ == BLKIF_MAX_SEGMENTS_PER_REQUEST);
++ buffer_mfn = page_to_phys(bvec->bv_page) >> PAGE_SHIFT;
++ fsect = bvec->bv_offset >> 9;
++ lsect = fsect + (bvec->bv_len >> 9) - 1;
++ /* install a grant reference. */
++ ref = gnttab_claim_grant_reference(&gref_head);
++ BUG_ON(ref == -ENOSPC);
++
++ gnttab_grant_foreign_access_ref(
++ ref,
++ info->xbdev->otherend_id,
++ buffer_mfn,
++ rq_data_dir(req) ? GTF_readonly : 0 );
++
++ info->shadow[id].frame[ring_req->nr_segments] =
++ mfn_to_pfn(buffer_mfn);
++
++ ring_req->seg[ring_req->nr_segments] =
++ (struct blkif_request_segment) {
++ .gref = ref,
++ .first_sect = fsect,
++ .last_sect = lsect };
++
++ ring_req->nr_segments++;
++ }
++ }
++
++ info->ring.req_prod_pvt++;
++
++ /* Keep a private copy so we can reissue requests when recovering. */
++ info->shadow[id].req = *ring_req;
++
++ gnttab_free_grant_references(gref_head);
++
++ return 0;
++}
++
++/*
++ * do_blkif_request
++ * read a block; request is in a request queue
++ */
++void do_blkif_request(request_queue_t *rq)
++{
++ struct blkfront_info *info = NULL;
++ struct request *req;
++ int queued;
++
++ DPRINTK("Entered do_blkif_request\n");
++
++ queued = 0;
++
++ while ((req = elv_next_request(rq)) != NULL) {
++ info = req->rq_disk->private_data;
++ if (!blk_fs_request(req)) {
++ end_request(req, 0);
++ continue;
++ }
++
++ if (RING_FULL(&info->ring))
++ goto wait;
++
++ DPRINTK("do_blk_req %p: cmd %p, sec %llx, "
++ "(%u/%li) buffer:%p [%s]\n",
++ req, req->cmd, (long long)req->sector,
++ req->current_nr_sectors,
++ req->nr_sectors, req->buffer,
++ rq_data_dir(req) ? "write" : "read");
++
++
++ blkdev_dequeue_request(req);
++ if (blkif_queue_request(req)) {
++ blk_requeue_request(rq, req);
++ wait:
++ /* Avoid pointless unplugs. */
++ blk_stop_queue(rq);
++ break;
++ }
++
++ queued++;
++ }
++
++ if (queued != 0)
++ flush_requests(info);
++}
++
++
++static irqreturn_t blkif_int(int irq, void *dev_id, struct pt_regs *ptregs)
++{
++ struct request *req;
++ blkif_response_t *bret;
++ RING_IDX i, rp;
++ unsigned long flags;
++ struct blkfront_info *info = (struct blkfront_info *)dev_id;
++ int uptodate;
++
++ spin_lock_irqsave(&blkif_io_lock, flags);
++
++ if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
++ spin_unlock_irqrestore(&blkif_io_lock, flags);
++ return IRQ_HANDLED;
++ }
++
++ again:
++ rp = info->ring.sring->rsp_prod;
++ rmb(); /* Ensure we see queued responses up to 'rp'. */
++
++ for (i = info->ring.rsp_cons; i != rp; i++) {
++ unsigned long id;
++ int ret;
++
++ bret = RING_GET_RESPONSE(&info->ring, i);
++ id = bret->id;
++ req = (struct request *)info->shadow[id].request;
++
++ blkif_completion(&info->shadow[id]);
++
++ ADD_ID_TO_FREELIST(info, id);
++
++ uptodate = (bret->status == BLKIF_RSP_OKAY);
++ switch (bret->operation) {
++ case BLKIF_OP_WRITE_BARRIER:
++ if (unlikely(bret->status == BLKIF_RSP_EOPNOTSUPP)) {
++ printk("blkfront: %s: write barrier op failed\n",
++ info->gd->disk_name);
++ uptodate = -EOPNOTSUPP;
++ info->feature_barrier = 0;
++ xlvbd_barrier(info);
++ }
++ /* fall through */
++ case BLKIF_OP_READ:
++ case BLKIF_OP_WRITE:
++ if (unlikely(bret->status != BLKIF_RSP_OKAY))
++ DPRINTK("Bad return from blkdev data "
++ "request: %x\n", bret->status);
++
++ ret = end_that_request_first(req, uptodate,
++ req->hard_nr_sectors);
++ BUG_ON(ret);
++ end_that_request_last(req, uptodate);
++ break;
++ default:
++ BUG();
++ }
++ }
++
++ info->ring.rsp_cons = i;
++
++ if (i != info->ring.req_prod_pvt) {
++ int more_to_do;
++ RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
++ if (more_to_do)
++ goto again;
++ } else
++ info->ring.sring->rsp_event = i + 1;
++
++ kick_pending_request_queues(info);
++
++ spin_unlock_irqrestore(&blkif_io_lock, flags);
++
++ return IRQ_HANDLED;
++}
++
++static void blkif_free(struct blkfront_info *info, int suspend)
++{
++ /* Prevent new requests being issued until we fix things up. */
++ spin_lock_irq(&blkif_io_lock);
++ info->connected = suspend ?
++ BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
++ /* No more blkif_request(). */
++ if (info->rq)
++ blk_stop_queue(info->rq);
++ /* No more gnttab callback work. */
++ gnttab_cancel_free_callback(&info->callback);
++ spin_unlock_irq(&blkif_io_lock);
++
++ /* Flush gnttab callback work. Must be done with no locks held. */
++ flush_scheduled_work();
++
++ /* Free resources associated with old device channel. */
++ if (info->ring_ref != GRANT_INVALID_REF) {
++ gnttab_end_foreign_access(info->ring_ref,
++ (unsigned long)info->ring.sring);
++ info->ring_ref = GRANT_INVALID_REF;
++ info->ring.sring = NULL;
++ }
++ if (info->irq)
++ unbind_from_irqhandler(info->irq, info);
++ info->irq = 0;
++}
++
++static void blkif_completion(struct blk_shadow *s)
++{
++ int i;
++ for (i = 0; i < s->req.nr_segments; i++)
++ gnttab_end_foreign_access(s->req.seg[i].gref, 0UL);
++}
++
++static void blkif_recover(struct blkfront_info *info)
++{
++ int i;
++ blkif_request_t *req;
++ struct blk_shadow *copy;
++ int j;
++
++ /* Stage 1: Make a safe copy of the shadow state. */
++ copy = kmalloc(sizeof(info->shadow), GFP_NOIO | __GFP_NOFAIL | __GFP_HIGH);
++ memcpy(copy, info->shadow, sizeof(info->shadow));
++
++ /* Stage 2: Set up free list. */
++ memset(&info->shadow, 0, sizeof(info->shadow));
++ for (i = 0; i < BLK_RING_SIZE; i++)
++ info->shadow[i].req.id = i+1;
++ info->shadow_free = info->ring.req_prod_pvt;
++ info->shadow[BLK_RING_SIZE-1].req.id = 0x0fffffff;
++
++ /* Stage 3: Find pending requests and requeue them. */
++ for (i = 0; i < BLK_RING_SIZE; i++) {
++ /* Not in use? */
++ if (copy[i].request == 0)
++ continue;
++
++ /* Grab a request slot and copy shadow state into it. */
++ req = RING_GET_REQUEST(
++ &info->ring, info->ring.req_prod_pvt);
++ *req = copy[i].req;
++
++ /* We get a new request id, and must reset the shadow state. */
++ req->id = GET_ID_FROM_FREELIST(info);
++ memcpy(&info->shadow[req->id], ©[i], sizeof(copy[i]));
++
++ /* Rewrite any grant references invalidated by susp/resume. */
++ for (j = 0; j < req->nr_segments; j++)
++ gnttab_grant_foreign_access_ref(
++ req->seg[j].gref,
++ info->xbdev->otherend_id,
++ pfn_to_mfn(info->shadow[req->id].frame[j]),
++ rq_data_dir((struct request *)
++ info->shadow[req->id].request) ?
++ GTF_readonly : 0);
++ info->shadow[req->id].req = *req;
++
++ info->ring.req_prod_pvt++;
++ }
++
++ kfree(copy);
++
++ (void)xenbus_switch_state(info->xbdev, XenbusStateConnected);
++
++ spin_lock_irq(&blkif_io_lock);
++
++ /* Now safe for us to use the shared ring */
++ info->connected = BLKIF_STATE_CONNECTED;
++
++ /* Send off requeued requests */
++ flush_requests(info);
++
++ /* Kick any other new requests queued since we resumed */
++ kick_pending_request_queues(info);
++
++ spin_unlock_irq(&blkif_io_lock);
++}
++
++int blkfront_is_ready(struct xenbus_device *dev)
++{
++ struct blkfront_info *info = dev->dev.driver_data;
++
++ return info->is_ready;
++}
++
++
++/* ** Driver Registration ** */
++
++
++static const struct xenbus_device_id blkfront_ids[] = {
++ { "vbd" },
++ { "" }
++};
++MODULE_ALIAS("xen:vbd");
++
++static struct xenbus_driver blkfront = {
++ .name = "vbd",
++ .owner = THIS_MODULE,
++ .ids = blkfront_ids,
++ .probe = blkfront_probe,
++ .remove = blkfront_remove,
++ .resume = blkfront_resume,
++ .otherend_changed = backend_changed,
++ .is_ready = blkfront_is_ready,
++};
++
++
++static int __init xlblk_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ return xenbus_register_frontend(&blkfront);
++}
++module_init(xlblk_init);
++
++
++static void __exit xlblk_exit(void)
++{
++ return xenbus_unregister_driver(&blkfront);
++}
++module_exit(xlblk_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
+Index: head-2008-11-25/drivers/xen/blkfront/block.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/blkfront/block.h 2008-08-07 12:44:36.000000000 +0200
+@@ -0,0 +1,158 @@
++/******************************************************************************
++ * block.h
++ *
++ * Shared definitions between all levels of XenLinux Virtual block devices.
++ *
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
++ * Copyright (c) 2004-2005, Christian Limpach
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_DRIVERS_BLOCK_H__
++#define __XEN_DRIVERS_BLOCK_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/hdreg.h>
++#include <linux/blkdev.h>
++#include <linux/major.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/gnttab.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/io/blkif.h>
++#include <xen/interface/io/ring.h>
++#include <asm/io.h>
++#include <asm/atomic.h>
++#include <asm/uaccess.h>
++
++#define DPRINTK(_f, _a...) pr_debug(_f, ## _a)
++
++#if 0
++#define DPRINTK_IOCTL(_f, _a...) printk(KERN_ALERT _f, ## _a)
++#else
++#define DPRINTK_IOCTL(_f, _a...) ((void)0)
++#endif
++
++struct xlbd_type_info
++{
++ int partn_shift;
++ int disks_per_major;
++ char *devname;
++ char *diskname;
++};
++
++struct xlbd_major_info
++{
++ int major;
++ int index;
++ int usage;
++ struct xlbd_type_info *type;
++};
++
++struct blk_shadow {
++ blkif_request_t req;
++ unsigned long request;
++ unsigned long frame[BLKIF_MAX_SEGMENTS_PER_REQUEST];
++};
++
++#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
++
++/*
++ * We have one of these per vbd, whether ide, scsi or 'other'. They
++ * hang in private_data off the gendisk structure. We may end up
++ * putting all kinds of interesting stuff here :-)
++ */
++struct blkfront_info
++{
++ struct xenbus_device *xbdev;
++ dev_t dev;
++ struct gendisk *gd;
++ int vdevice;
++ blkif_vdev_t handle;
++ int connected;
++ int ring_ref;
++ blkif_front_ring_t ring;
++ unsigned int irq;
++ struct xlbd_major_info *mi;
++ request_queue_t *rq;
++ struct work_struct work;
++ struct gnttab_free_callback callback;
++ struct blk_shadow shadow[BLK_RING_SIZE];
++ unsigned long shadow_free;
++ int feature_barrier;
++ int is_ready;
++
++ /**
++ * The number of people holding this device open. We won't allow a
++ * hot-unplug unless this is 0.
++ */
++ int users;
++};
++
++extern spinlock_t blkif_io_lock;
++
++extern int blkif_open(struct inode *inode, struct file *filep);
++extern int blkif_release(struct inode *inode, struct file *filep);
++extern int blkif_ioctl(struct inode *inode, struct file *filep,
++ unsigned command, unsigned long argument);
++extern int blkif_getgeo(struct block_device *, struct hd_geometry *);
++extern int blkif_check(dev_t dev);
++extern int blkif_revalidate(dev_t dev);
++extern void do_blkif_request (request_queue_t *rq);
++
++/* Virtual block-device subsystem. */
++/* Note that xlvbd_add doesn't call add_disk for you: you're expected
++ to call add_disk on info->gd once the disk is properly connected
++ up. */
++int xlvbd_add(blkif_sector_t capacity, int device,
++ u16 vdisk_info, u16 sector_size, struct blkfront_info *info);
++void xlvbd_del(struct blkfront_info *info);
++int xlvbd_barrier(struct blkfront_info *info);
++
++#ifdef CONFIG_SYSFS
++int xlvbd_sysfs_addif(struct blkfront_info *info);
++void xlvbd_sysfs_delif(struct blkfront_info *info);
++#else
++static inline int xlvbd_sysfs_addif(struct blkfront_info *info)
++{
++ return 0;
++}
++
++static inline void xlvbd_sysfs_delif(struct blkfront_info *info)
++{
++ ;
++}
++#endif
++
++#endif /* __XEN_DRIVERS_BLOCK_H__ */
+Index: head-2008-11-25/drivers/xen/blkfront/vbd.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/blkfront/vbd.c 2008-08-07 12:44:36.000000000 +0200
+@@ -0,0 +1,460 @@
++/******************************************************************************
++ * vbd.c
++ *
++ * XenLinux virtual block-device driver (xvd).
++ *
++ * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
++ * Modifications by Mark A. Williamson are (c) Intel Research Cambridge
++ * Copyright (c) 2004-2005, Christian Limpach
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "block.h"
++#include <linux/blkdev.h>
++#include <linux/list.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++#define BLKIF_MAJOR(dev) ((dev)>>8)
++#define BLKIF_MINOR(dev) ((dev) & 0xff)
++
++#define EXT_SHIFT 28
++#define EXTENDED (1<<EXT_SHIFT)
++#define VDEV_IS_EXTENDED(dev) ((dev)&(EXTENDED))
++#define BLKIF_MINOR_EXT(dev) ((dev)&(~EXTENDED))
++
++/*
++ * For convenience we distinguish between ide, scsi and 'other' (i.e.,
++ * potentially combinations of the two) in the naming scheme and in a few other
++ * places.
++ */
++
++#define NUM_IDE_MAJORS 10
++#define NUM_SCSI_MAJORS 17
++#define NUM_VBD_MAJORS 2
++
++static struct xlbd_type_info xlbd_ide_type = {
++ .partn_shift = 6,
++ .disks_per_major = 2,
++ .devname = "ide",
++ .diskname = "hd",
++};
++
++static struct xlbd_type_info xlbd_scsi_type = {
++ .partn_shift = 4,
++ .disks_per_major = 16,
++ .devname = "sd",
++ .diskname = "sd",
++};
++
++static struct xlbd_type_info xlbd_vbd_type = {
++ .partn_shift = 4,
++ .disks_per_major = 16,
++ .devname = "xvd",
++ .diskname = "xvd",
++};
++
++static struct xlbd_type_info xlbd_vbd_type_ext = {
++ .partn_shift = 8,
++ .disks_per_major = 256,
++ .devname = "xvd",
++ .diskname = "xvd",
++};
++
++static struct xlbd_major_info *major_info[NUM_IDE_MAJORS + NUM_SCSI_MAJORS +
++ NUM_VBD_MAJORS];
++
++#define XLBD_MAJOR_IDE_START 0
++#define XLBD_MAJOR_SCSI_START (NUM_IDE_MAJORS)
++#define XLBD_MAJOR_VBD_START (NUM_IDE_MAJORS + NUM_SCSI_MAJORS)
++
++#define XLBD_MAJOR_IDE_RANGE XLBD_MAJOR_IDE_START ... XLBD_MAJOR_SCSI_START - 1
++#define XLBD_MAJOR_SCSI_RANGE XLBD_MAJOR_SCSI_START ... XLBD_MAJOR_VBD_START - 1
++#define XLBD_MAJOR_VBD_RANGE XLBD_MAJOR_VBD_START ... XLBD_MAJOR_VBD_START + NUM_VBD_MAJORS - 1
++
++static struct block_device_operations xlvbd_block_fops =
++{
++ .owner = THIS_MODULE,
++ .open = blkif_open,
++ .release = blkif_release,
++ .ioctl = blkif_ioctl,
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++ .getgeo = blkif_getgeo
++#endif
++};
++
++DEFINE_SPINLOCK(blkif_io_lock);
++
++static struct xlbd_major_info *
++xlbd_alloc_major_info(int major, int minor, int index)
++{
++ struct xlbd_major_info *ptr;
++ int do_register;
++
++ ptr = kzalloc(sizeof(struct xlbd_major_info), GFP_KERNEL);
++ if (ptr == NULL)
++ return NULL;
++
++ ptr->major = major;
++ do_register = 1;
++
++ switch (index) {
++ case XLBD_MAJOR_IDE_RANGE:
++ ptr->type = &xlbd_ide_type;
++ ptr->index = index - XLBD_MAJOR_IDE_START;
++ break;
++ case XLBD_MAJOR_SCSI_RANGE:
++ ptr->type = &xlbd_scsi_type;
++ ptr->index = index - XLBD_MAJOR_SCSI_START;
++ break;
++ case XLBD_MAJOR_VBD_RANGE:
++ ptr->index = 0;
++ if ((index - XLBD_MAJOR_VBD_START) == 0)
++ ptr->type = &xlbd_vbd_type;
++ else
++ ptr->type = &xlbd_vbd_type_ext;
++
++ /*
++ * if someone already registered block major 202,
++ * don't try to register it again
++ */
++ if (major_info[XLBD_MAJOR_VBD_START] != NULL)
++ do_register = 0;
++ break;
++ }
++
++ if (do_register) {
++ if (register_blkdev(ptr->major, ptr->type->devname)) {
++ kfree(ptr);
++ return NULL;
++ }
++
++ printk("xen-vbd: registered block device major %i\n", ptr->major);
++ }
++
++ major_info[index] = ptr;
++ return ptr;
++}
++
++static struct xlbd_major_info *
++xlbd_get_major_info(int major, int minor, int vdevice)
++{
++ struct xlbd_major_info *mi;
++ int index;
++
++ switch (major) {
++ case IDE0_MAJOR: index = 0; break;
++ case IDE1_MAJOR: index = 1; break;
++ case IDE2_MAJOR: index = 2; break;
++ case IDE3_MAJOR: index = 3; break;
++ case IDE4_MAJOR: index = 4; break;
++ case IDE5_MAJOR: index = 5; break;
++ case IDE6_MAJOR: index = 6; break;
++ case IDE7_MAJOR: index = 7; break;
++ case IDE8_MAJOR: index = 8; break;
++ case IDE9_MAJOR: index = 9; break;
++ case SCSI_DISK0_MAJOR: index = 10; break;
++ case SCSI_DISK1_MAJOR ... SCSI_DISK7_MAJOR:
++ index = 11 + major - SCSI_DISK1_MAJOR;
++ break;
++ case SCSI_DISK8_MAJOR ... SCSI_DISK15_MAJOR:
++ index = 18 + major - SCSI_DISK8_MAJOR;
++ break;
++ case SCSI_CDROM_MAJOR: index = 26; break;
++ default:
++ if (!VDEV_IS_EXTENDED(vdevice))
++ index = 27;
++ else
++ index = 28;
++ break;
++ }
++
++ mi = ((major_info[index] != NULL) ? major_info[index] :
++ xlbd_alloc_major_info(major, minor, index));
++ if (mi)
++ mi->usage++;
++ return mi;
++}
++
++static void
++xlbd_put_major_info(struct xlbd_major_info *mi)
++{
++ mi->usage--;
++ /* XXX: release major if 0 */
++}
++
++static int
++xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
++{
++ request_queue_t *rq;
++
++ rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
++ if (rq == NULL)
++ return -1;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
++ elevator_init(rq, "noop");
++#else
++ elevator_init(rq, &elevator_noop);
++#endif
++
++ /* Hard sector size and max sectors impersonate the equiv. hardware. */
++ blk_queue_hardsect_size(rq, sector_size);
++ blk_queue_max_sectors(rq, 512);
++
++ /* Each segment in a request is up to an aligned page in size. */
++ blk_queue_segment_boundary(rq, PAGE_SIZE - 1);
++ blk_queue_max_segment_size(rq, PAGE_SIZE);
++
++ /* Ensure a merged request will fit in a single I/O ring slot. */
++ blk_queue_max_phys_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
++ blk_queue_max_hw_segments(rq, BLKIF_MAX_SEGMENTS_PER_REQUEST);
++
++ /* Make sure buffer addresses are sector-aligned. */
++ blk_queue_dma_alignment(rq, 511);
++
++ /* Make sure we don't use bounce buffers. */
++ blk_queue_bounce_limit(rq, BLK_BOUNCE_ANY);
++
++ gd->queue = rq;
++
++ return 0;
++}
++
++static int
++xlvbd_alloc_gendisk(int major, int minor, blkif_sector_t capacity, int vdevice,
++ u16 vdisk_info, u16 sector_size,
++ struct blkfront_info *info)
++{
++ struct gendisk *gd;
++ struct xlbd_major_info *mi;
++ int nr_minors = 1;
++ int err = -ENODEV;
++ unsigned int offset;
++
++ BUG_ON(info->gd != NULL);
++ BUG_ON(info->mi != NULL);
++ BUG_ON(info->rq != NULL);
++
++ mi = xlbd_get_major_info(major, minor, vdevice);
++ if (mi == NULL)
++ goto out;
++ info->mi = mi;
++
++ if ((minor & ((1 << mi->type->partn_shift) - 1)) == 0)
++ nr_minors = 1 << mi->type->partn_shift;
++
++ gd = alloc_disk(nr_minors);
++ if (gd == NULL)
++ goto out;
++
++ offset = mi->index * mi->type->disks_per_major +
++ (minor >> mi->type->partn_shift);
++ if (nr_minors > 1) {
++ if (offset < 26) {
++ sprintf(gd->disk_name, "%s%c",
++ mi->type->diskname, 'a' + offset );
++ }
++ else {
++ sprintf(gd->disk_name, "%s%c%c",
++ mi->type->diskname,
++ 'a' + ((offset/26)-1), 'a' + (offset%26) );
++ }
++ }
++ else {
++ if (offset < 26) {
++ sprintf(gd->disk_name, "%s%c%d",
++ mi->type->diskname,
++ 'a' + offset,
++ minor & ((1 << mi->type->partn_shift) - 1));
++ }
++ else {
++ sprintf(gd->disk_name, "%s%c%c%d",
++ mi->type->diskname,
++ 'a' + ((offset/26)-1), 'a' + (offset%26),
++ minor & ((1 << mi->type->partn_shift) - 1));
++ }
++ }
++
++ gd->major = mi->major;
++ gd->first_minor = minor;
++ gd->fops = &xlvbd_block_fops;
++ gd->private_data = info;
++ gd->driverfs_dev = &(info->xbdev->dev);
++ set_capacity(gd, capacity);
++
++ if (xlvbd_init_blk_queue(gd, sector_size)) {
++ del_gendisk(gd);
++ goto out;
++ }
++
++ info->rq = gd->queue;
++ info->gd = gd;
++
++ if (info->feature_barrier)
++ xlvbd_barrier(info);
++
++ if (vdisk_info & VDISK_READONLY)
++ set_disk_ro(gd, 1);
++
++ if (vdisk_info & VDISK_REMOVABLE)
++ gd->flags |= GENHD_FL_REMOVABLE;
++
++ if (vdisk_info & VDISK_CDROM)
++ gd->flags |= GENHD_FL_CD;
++
++ return 0;
++
++ out:
++ if (mi)
++ xlbd_put_major_info(mi);
++ info->mi = NULL;
++ return err;
++}
++
++int
++xlvbd_add(blkif_sector_t capacity, int vdevice, u16 vdisk_info,
++ u16 sector_size, struct blkfront_info *info)
++{
++ struct block_device *bd;
++ int err = 0;
++ int major, minor;
++
++ if ((vdevice>>EXT_SHIFT) > 1) {
++ /* this is above the extended range; something is wrong */
++ printk(KERN_WARNING "blkfront: vdevice 0x%x is above the extended range; ignoring\n", vdevice);
++ return -ENODEV;
++ }
++
++ if (!VDEV_IS_EXTENDED(vdevice)) {
++ major = BLKIF_MAJOR(vdevice);
++ minor = BLKIF_MINOR(vdevice);
++ }
++ else {
++ major = 202;
++ minor = BLKIF_MINOR_EXT(vdevice);
++ }
++
++ info->dev = MKDEV(major, minor);
++ bd = bdget(info->dev);
++ if (bd == NULL)
++ return -ENODEV;
++
++ err = xlvbd_alloc_gendisk(major, minor, capacity, vdevice, vdisk_info,
++ sector_size, info);
++
++ bdput(bd);
++ return err;
++}
++
++void
++xlvbd_del(struct blkfront_info *info)
++{
++ if (info->mi == NULL)
++ return;
++
++ BUG_ON(info->gd == NULL);
++ del_gendisk(info->gd);
++ put_disk(info->gd);
++ info->gd = NULL;
++
++ xlbd_put_major_info(info->mi);
++ info->mi = NULL;
++
++ BUG_ON(info->rq == NULL);
++ blk_cleanup_queue(info->rq);
++ info->rq = NULL;
++}
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,16)
++int
++xlvbd_barrier(struct blkfront_info *info)
++{
++ int err;
++
++ err = blk_queue_ordered(info->rq,
++ info->feature_barrier ? QUEUE_ORDERED_DRAIN : QUEUE_ORDERED_NONE, NULL);
++ if (err)
++ return err;
++ printk(KERN_INFO "blkfront: %s: barriers %s\n",
++ info->gd->disk_name, info->feature_barrier ? "enabled" : "disabled");
++ return 0;
++}
++#else
++int
++xlvbd_barrier(struct blkfront_info *info)
++{
++ printk(KERN_INFO "blkfront: %s: barriers disabled\n", info->gd->disk_name);
++ return -ENOSYS;
++}
++#endif
++
++#ifdef CONFIG_SYSFS
++static ssize_t show_media(struct device *dev,
++ struct device_attribute *attr, char *buf)
++{
++ struct xenbus_device *xendev = to_xenbus_device(dev);
++ struct blkfront_info *info = xendev->dev.driver_data;
++
++ if (info->gd->flags & GENHD_FL_CD)
++ return sprintf(buf, "cdrom\n");
++ return sprintf(buf, "disk\n");
++}
++
++static struct device_attribute xlvbd_attrs[] = {
++ __ATTR(media, S_IRUGO, show_media, NULL),
++};
++
++int xlvbd_sysfs_addif(struct blkfront_info *info)
++{
++ int i;
++ int error = 0;
++
++ for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++) {
++ error = device_create_file(info->gd->driverfs_dev,
++ &xlvbd_attrs[i]);
++ if (error)
++ goto fail;
++ }
++ return 0;
++
++fail:
++ while (--i >= 0)
++ device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]);
++ return error;
++}
++
++void xlvbd_sysfs_delif(struct blkfront_info *info)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(xlvbd_attrs); i++)
++ device_remove_file(info->gd->driverfs_dev, &xlvbd_attrs[i]);
++}
++
++#endif /* CONFIG_SYSFS */
+Index: head-2008-11-25/drivers/xen/blktap/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/blktap/Makefile 2007-06-12 13:13:44.000000000 +0200
+@@ -0,0 +1,5 @@
++LINUXINCLUDE += -I../xen/include/public/io
++
++obj-$(CONFIG_XEN_BLKDEV_TAP) := xenblktap.o
++
++xenblktap-y := xenbus.o interface.o blktap.o
+Index: head-2008-11-25/drivers/xen/blktap/blktap.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/blktap/blktap.c 2008-11-10 11:44:21.000000000 +0100
+@@ -0,0 +1,1704 @@
++/******************************************************************************
++ * drivers/xen/blktap/blktap.c
++ *
++ * Back-end driver for user level virtual block devices. This portion of the
++ * driver exports a 'unified' block-device interface that can be accessed
++ * by any operating system that implements a compatible front end. Requests
++ * are remapped to a user-space memory region.
++ *
++ * Based on the blkback driver code.
++ *
++ * Copyright (c) 2004-2005, Andrew Warfield and Julian Chesterfield
++ *
++ * Clean ups and fix ups:
++ * Copyright (c) 2006, Steven Rostedt - Red Hat, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/kthread.h>
++#include <linux/list.h>
++#include <asm/hypervisor.h>
++#include "common.h"
++#include <xen/balloon.h>
++#include <xen/driver_util.h>
++#include <linux/kernel.h>
++#include <linux/fs.h>
++#include <linux/mm.h>
++#include <linux/errno.h>
++#include <linux/major.h>
++#include <linux/gfp.h>
++#include <linux/poll.h>
++#include <linux/delay.h>
++#include <asm/tlbflush.h>
++
++#define MAX_TAP_DEV 256 /*the maximum number of tapdisk ring devices */
++#define MAX_DEV_NAME 100 /*the max tapdisk ring device name e.g. blktap0 */
++
++/*
++ * The maximum number of requests that can be outstanding at any time
++ * is determined by
++ *
++ * [mmap_alloc * MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST]
++ *
++ * where mmap_alloc < MAX_DYNAMIC_MEM.
++ *
++ * TODO:
++ * mmap_alloc is initialised to 2 and should be adjustable on the fly via
++ * sysfs.
++ */
++#define BLK_RING_SIZE __RING_SIZE((blkif_sring_t *)0, PAGE_SIZE)
++#define MAX_DYNAMIC_MEM BLK_RING_SIZE
++#define MAX_PENDING_REQS BLK_RING_SIZE
++#define MMAP_PAGES (MAX_PENDING_REQS * BLKIF_MAX_SEGMENTS_PER_REQUEST)
++#define MMAP_VADDR(_start, _req,_seg) \
++ (_start + \
++ ((_req) * BLKIF_MAX_SEGMENTS_PER_REQUEST * PAGE_SIZE) + \
++ ((_seg) * PAGE_SIZE))
++static int blkif_reqs = MAX_PENDING_REQS;
++static int mmap_pages = MMAP_PAGES;
++
++#define RING_PAGES 1 /* BLKTAP - immediately before the mmap area, we
++ * have a bunch of pages reserved for shared
++ * memory rings.
++ */
++
++/*Data struct handed back to userspace for tapdisk device to VBD mapping*/
++typedef struct domid_translate {
++ unsigned short domid;
++ unsigned short busid;
++} domid_translate_t ;
++
++typedef struct domid_translate_ext {
++ unsigned short domid;
++ u32 busid;
++} domid_translate_ext_t ;
++
++/*Data struct associated with each of the tapdisk devices*/
++typedef struct tap_blkif {
++ struct vm_area_struct *vma; /*Shared memory area */
++ unsigned long rings_vstart; /*Kernel memory mapping */
++ unsigned long user_vstart; /*User memory mapping */
++ unsigned long dev_inuse; /*One process opens device at a time. */
++ unsigned long dev_pending; /*In process of being opened */
++ unsigned long ring_ok; /*make this ring->state */
++ blkif_front_ring_t ufe_ring; /*Rings up to user space. */
++ wait_queue_head_t wait; /*for poll */
++ unsigned long mode; /*current switching mode */
++ int minor; /*Minor number for tapdisk device */
++ pid_t pid; /*tapdisk process id */
++ enum { RUNNING, CLEANSHUTDOWN } status; /*Detect a clean userspace
++ shutdown */
++ unsigned long *idx_map; /*Record the user ring id to kern
++ [req id, idx] tuple */
++ blkif_t *blkif; /*Associate blkif with tapdev */
++ struct domid_translate_ext trans; /*Translation from domid to bus. */
++} tap_blkif_t;
++
++static struct tap_blkif *tapfds[MAX_TAP_DEV];
++static int blktap_next_minor;
++
++module_param(blkif_reqs, int, 0);
++/* Run-time switchable: /sys/module/blktap/parameters/ */
++static unsigned int log_stats = 0;
++static unsigned int debug_lvl = 0;
++module_param(log_stats, int, 0644);
++module_param(debug_lvl, int, 0644);
++
++/*
++ * Each outstanding request that we've passed to the lower device layers has a
++ * 'pending_req' allocated to it. Each buffer_head that completes decrements
++ * the pendcnt towards zero. When it hits zero, the specified domain has a
++ * response queued for it, with the saved 'id' passed back.
++ */
++typedef struct {
++ blkif_t *blkif;
++ u64 id;
++ unsigned short mem_idx;
++ int nr_pages;
++ atomic_t pendcnt;
++ unsigned short operation;
++ int status;
++ struct list_head free_list;
++ int inuse;
++} pending_req_t;
++
++static pending_req_t *pending_reqs[MAX_PENDING_REQS];
++static struct list_head pending_free;
++static DEFINE_SPINLOCK(pending_free_lock);
++static DECLARE_WAIT_QUEUE_HEAD (pending_free_wq);
++static int alloc_pending_reqs;
++
++typedef unsigned int PEND_RING_IDX;
++
++static inline int MASK_PEND_IDX(int i) {
++ return (i & (MAX_PENDING_REQS-1));
++}
++
++static inline unsigned int RTN_PEND_IDX(pending_req_t *req, int idx) {
++ return (req - pending_reqs[idx]);
++}
++
++#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
++
++#define BLKBACK_INVALID_HANDLE (~0)
++
++static struct page **foreign_pages[MAX_DYNAMIC_MEM];
++static inline unsigned long idx_to_kaddr(
++ unsigned int mmap_idx, unsigned int req_idx, unsigned int sg_idx)
++{
++ unsigned int arr_idx = req_idx*BLKIF_MAX_SEGMENTS_PER_REQUEST + sg_idx;
++ unsigned long pfn = page_to_pfn(foreign_pages[mmap_idx][arr_idx]);
++ return (unsigned long)pfn_to_kaddr(pfn);
++}
++
++static unsigned short mmap_alloc = 0;
++static unsigned short mmap_lock = 0;
++static unsigned short mmap_inuse = 0;
++
++/******************************************************************
++ * GRANT HANDLES
++ */
++
++/* When using grant tables to map a frame for device access then the
++ * handle returned must be used to unmap the frame. This is needed to
++ * drop the ref count on the frame.
++ */
++struct grant_handle_pair
++{
++ grant_handle_t kernel;
++ grant_handle_t user;
++};
++#define INVALID_GRANT_HANDLE 0xFFFF
++
++static struct grant_handle_pair
++ pending_grant_handles[MAX_DYNAMIC_MEM][MMAP_PAGES];
++#define pending_handle(_id, _idx, _i) \
++ (pending_grant_handles[_id][((_idx) * BLKIF_MAX_SEGMENTS_PER_REQUEST) \
++ + (_i)])
++
++
++static int blktap_read_ufe_ring(tap_blkif_t *info); /*local prototypes*/
++
++#define BLKTAP_MINOR 0 /*/dev/xen/blktap has a dynamic major */
++#define BLKTAP_DEV_DIR "/dev/xen"
++
++static int blktap_major;
++
++/* blktap IOCTLs: */
++#define BLKTAP_IOCTL_KICK_FE 1
++#define BLKTAP_IOCTL_KICK_BE 2 /* currently unused */
++#define BLKTAP_IOCTL_SETMODE 3
++#define BLKTAP_IOCTL_SENDPID 4
++#define BLKTAP_IOCTL_NEWINTF 5
++#define BLKTAP_IOCTL_MINOR 6
++#define BLKTAP_IOCTL_MAJOR 7
++#define BLKTAP_QUERY_ALLOC_REQS 8
++#define BLKTAP_IOCTL_FREEINTF 9
++#define BLKTAP_IOCTL_NEWINTF_EXT 50
++#define BLKTAP_IOCTL_PRINT_IDXS 100
++
++/* blktap switching modes: (Set with BLKTAP_IOCTL_SETMODE) */
++#define BLKTAP_MODE_PASSTHROUGH 0x00000000 /* default */
++#define BLKTAP_MODE_INTERCEPT_FE 0x00000001
++#define BLKTAP_MODE_INTERCEPT_BE 0x00000002 /* unimp. */
++
++#define BLKTAP_MODE_INTERPOSE \
++ (BLKTAP_MODE_INTERCEPT_FE | BLKTAP_MODE_INTERCEPT_BE)
++
++
++static inline int BLKTAP_MODE_VALID(unsigned long arg)
++{
++ return ((arg == BLKTAP_MODE_PASSTHROUGH ) ||
++ (arg == BLKTAP_MODE_INTERCEPT_FE) ||
++ (arg == BLKTAP_MODE_INTERPOSE ));
++}
++
++/* Requests passing through the tap to userspace are re-assigned an ID.
++ * We must record a mapping between the BE [IDX,ID] tuple and the userspace
++ * ring ID.
++ */
++
++static inline unsigned long MAKE_ID(domid_t fe_dom, PEND_RING_IDX idx)
++{
++ return ((fe_dom << 16) | MASK_PEND_IDX(idx));
++}
++
++extern inline PEND_RING_IDX ID_TO_IDX(unsigned long id)
++{
++ return (PEND_RING_IDX)(id & 0x0000ffff);
++}
++
++extern inline int ID_TO_MIDX(unsigned long id)
++{
++ return (int)(id >> 16);
++}
++
++#define INVALID_REQ 0xdead0000
++
++/*TODO: Convert to a free list*/
++static inline int GET_NEXT_REQ(unsigned long *idx_map)
++{
++ int i;
++ for (i = 0; i < MAX_PENDING_REQS; i++)
++ if (idx_map[i] == INVALID_REQ)
++ return i;
++
++ return INVALID_REQ;
++}
++
++static inline int OFFSET_TO_USR_IDX(int offset)
++{
++ return offset / BLKIF_MAX_SEGMENTS_PER_REQUEST;
++}
++
++static inline int OFFSET_TO_SEG(int offset)
++{
++ return offset % BLKIF_MAX_SEGMENTS_PER_REQUEST;
++}
++
++
++#define BLKTAP_INVALID_HANDLE(_g) \
++ (((_g->kernel) == INVALID_GRANT_HANDLE) && \
++ ((_g->user) == INVALID_GRANT_HANDLE))
++
++#define BLKTAP_INVALIDATE_HANDLE(_g) do { \
++ (_g)->kernel = INVALID_GRANT_HANDLE; (_g)->user = INVALID_GRANT_HANDLE; \
++ } while(0)
++
++
++/******************************************************************
++ * BLKTAP VM OPS
++ */
++
++static struct page *blktap_nopage(struct vm_area_struct *vma,
++ unsigned long address,
++ int *type)
++{
++ /*
++ * if the page has not been mapped in by the driver then return
++ * NOPAGE_SIGBUS to the domain.
++ */
++
++ return NOPAGE_SIGBUS;
++}
++
++static pte_t blktap_clear_pte(struct vm_area_struct *vma,
++ unsigned long uvaddr,
++ pte_t *ptep, int is_fullmm)
++{
++ pte_t copy;
++ tap_blkif_t *info;
++ int offset, seg, usr_idx, pending_idx, mmap_idx;
++ unsigned long uvstart = vma->vm_start + (RING_PAGES << PAGE_SHIFT);
++ unsigned long kvaddr;
++ struct page **map;
++ struct page *pg;
++ struct grant_handle_pair *khandle;
++ struct gnttab_unmap_grant_ref unmap[2];
++ int count = 0;
++
++ /*
++ * If the address is before the start of the grant mapped region or
++ * if vm_file is NULL (meaning mmap failed and we have nothing to do)
++ */
++ if (uvaddr < uvstart || vma->vm_file == NULL)
++ return ptep_get_and_clear_full(vma->vm_mm, uvaddr,
++ ptep, is_fullmm);
++
++ info = vma->vm_file->private_data;
++ map = vma->vm_private_data;
++
++ /* TODO Should these be changed to if statements? */
++ BUG_ON(!info);
++ BUG_ON(!info->idx_map);
++ BUG_ON(!map);
++
++ offset = (int) ((uvaddr - uvstart) >> PAGE_SHIFT);
++ usr_idx = OFFSET_TO_USR_IDX(offset);
++ seg = OFFSET_TO_SEG(offset);
++
++ pending_idx = MASK_PEND_IDX(ID_TO_IDX(info->idx_map[usr_idx]));
++ mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]);
++
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, seg);
++ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++ ClearPageReserved(pg);
++ map[offset + RING_PAGES] = NULL;
++
++ khandle = &pending_handle(mmap_idx, pending_idx, seg);
++
++ if (khandle->kernel != INVALID_GRANT_HANDLE) {
++ gnttab_set_unmap_op(&unmap[count], kvaddr,
++ GNTMAP_host_map, khandle->kernel);
++ count++;
++
++ set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
++ INVALID_P2M_ENTRY);
++ }
++
++ if (khandle->user != INVALID_GRANT_HANDLE) {
++ BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
++
++ copy = *ptep;
++ gnttab_set_unmap_op(&unmap[count], virt_to_machine(ptep),
++ GNTMAP_host_map
++ | GNTMAP_application_map
++ | GNTMAP_contains_pte,
++ khandle->user);
++ count++;
++ } else {
++ BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap));
++
++ /* USING SHADOW PAGE TABLES. */
++ copy = ptep_get_and_clear_full(vma->vm_mm, uvaddr, ptep,
++ is_fullmm);
++ }
++
++ if (count) {
++ BLKTAP_INVALIDATE_HANDLE(khandle);
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
++ unmap, count))
++ BUG();
++ }
++
++ return copy;
++}
++
++struct vm_operations_struct blktap_vm_ops = {
++ nopage: blktap_nopage,
++ zap_pte: blktap_clear_pte,
++};
++
++/******************************************************************
++ * BLKTAP FILE OPS
++ */
++
++/*Function Declarations*/
++static tap_blkif_t *get_next_free_dev(void);
++static int blktap_open(struct inode *inode, struct file *filp);
++static int blktap_release(struct inode *inode, struct file *filp);
++static int blktap_mmap(struct file *filp, struct vm_area_struct *vma);
++static int blktap_ioctl(struct inode *inode, struct file *filp,
++ unsigned int cmd, unsigned long arg);
++static unsigned int blktap_poll(struct file *file, poll_table *wait);
++
++static const struct file_operations blktap_fops = {
++ .owner = THIS_MODULE,
++ .poll = blktap_poll,
++ .ioctl = blktap_ioctl,
++ .open = blktap_open,
++ .release = blktap_release,
++ .mmap = blktap_mmap,
++};
++
++
++static tap_blkif_t *get_next_free_dev(void)
++{
++ struct class *class;
++ tap_blkif_t *info;
++ int minor;
++
++ /*
++ * This is called only from the ioctl, which
++ * means we should always have interrupts enabled.
++ */
++ BUG_ON(irqs_disabled());
++
++ spin_lock_irq(&pending_free_lock);
++
++ /* tapfds[0] is always NULL */
++
++ for (minor = 1; minor < blktap_next_minor; minor++) {
++ info = tapfds[minor];
++ /* we could have failed a previous attempt. */
++ if (!info ||
++ ((info->dev_inuse == 0) &&
++ (info->dev_pending == 0)) ) {
++ info->dev_pending = 1;
++ goto found;
++ }
++ }
++ info = NULL;
++ minor = -1;
++
++ /*
++ * We didn't find free device. If we can still allocate
++ * more, then we grab the next device minor that is
++ * available. This is done while we are still under
++ * the protection of the pending_free_lock.
++ */
++ if (blktap_next_minor < MAX_TAP_DEV)
++ minor = blktap_next_minor++;
++found:
++ spin_unlock_irq(&pending_free_lock);
++
++ if (!info && minor > 0) {
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (unlikely(!info)) {
++ /*
++ * If we failed here, try to put back
++ * the next minor number. But if one
++ * was just taken, then we just lose this
++ * minor. We can try to allocate this
++ * minor again later.
++ */
++ spin_lock_irq(&pending_free_lock);
++ if (blktap_next_minor == minor+1)
++ blktap_next_minor--;
++ spin_unlock_irq(&pending_free_lock);
++ goto out;
++ }
++
++ info->minor = minor;
++ /*
++ * Make sure that we have a minor before others can
++ * see us.
++ */
++ wmb();
++ tapfds[minor] = info;
++
++ if ((class = get_xen_class()) != NULL)
++ class_device_create(class, NULL,
++ MKDEV(blktap_major, minor), NULL,
++ "blktap%d", minor);
++ }
++
++out:
++ return info;
++}
++
++int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif)
++{
++ tap_blkif_t *info;
++ int i;
++
++ for (i = 1; i < blktap_next_minor; i++) {
++ info = tapfds[i];
++ if ( info &&
++ (info->trans.domid == domid) &&
++ (info->trans.busid == xenbus_id) ) {
++ info->blkif = blkif;
++ info->status = RUNNING;
++ return i;
++ }
++ }
++ return -1;
++}
++
++void signal_tapdisk(int idx)
++{
++ tap_blkif_t *info;
++ struct task_struct *ptask;
++
++ /*
++ * if the userland tools set things up wrong, this could be negative;
++ * just don't try to signal in this case
++ */
++ if (idx < 0)
++ return;
++
++ info = tapfds[idx];
++ if ((idx < 0) || (idx > MAX_TAP_DEV) || !info)
++ return;
++
++ if (info->pid > 0) {
++ ptask = find_task_by_pid(info->pid);
++ if (ptask)
++ info->status = CLEANSHUTDOWN;
++ }
++ info->blkif = NULL;
++
++ return;
++}
++
++static int blktap_open(struct inode *inode, struct file *filp)
++{
++ blkif_sring_t *sring;
++ int idx = iminor(inode) - BLKTAP_MINOR;
++ tap_blkif_t *info;
++ int i;
++
++ /* ctrl device, treat differently */
++ if (!idx)
++ return 0;
++
++ info = tapfds[idx];
++
++ if ((idx < 0) || (idx > MAX_TAP_DEV) || !info) {
++ WPRINTK("Unable to open device /dev/xen/blktap%d\n",
++ idx);
++ return -ENODEV;
++ }
++
++ DPRINTK("Opening device /dev/xen/blktap%d\n",idx);
++
++ /*Only one process can access device at a time*/
++ if (test_and_set_bit(0, &info->dev_inuse))
++ return -EBUSY;
++
++ info->dev_pending = 0;
++
++ /* Allocate the fe ring. */
++ sring = (blkif_sring_t *)get_zeroed_page(GFP_KERNEL);
++ if (sring == NULL)
++ goto fail_nomem;
++
++ SetPageReserved(virt_to_page(sring));
++
++ SHARED_RING_INIT(sring);
++ FRONT_RING_INIT(&info->ufe_ring, sring, PAGE_SIZE);
++
++ filp->private_data = info;
++ info->vma = NULL;
++
++ info->idx_map = kmalloc(sizeof(unsigned long) * MAX_PENDING_REQS,
++ GFP_KERNEL);
++
++ if (info->idx_map == NULL)
++ goto fail_nomem;
++
++ if (idx > 0) {
++ init_waitqueue_head(&info->wait);
++ for (i = 0; i < MAX_PENDING_REQS; i++)
++ info->idx_map[i] = INVALID_REQ;
++ }
++
++ DPRINTK("Tap open: device /dev/xen/blktap%d\n",idx);
++ return 0;
++
++ fail_nomem:
++ return -ENOMEM;
++}
++
++static int blktap_release(struct inode *inode, struct file *filp)
++{
++ tap_blkif_t *info = filp->private_data;
++
++ /* check for control device */
++ if (!info)
++ return 0;
++
++ info->dev_inuse = 0;
++ DPRINTK("Freeing device [/dev/xen/blktap%d]\n",info->minor);
++
++ /* Free the ring page. */
++ ClearPageReserved(virt_to_page(info->ufe_ring.sring));
++ free_page((unsigned long) info->ufe_ring.sring);
++
++ /* Clear any active mappings and free foreign map table */
++ if (info->vma) {
++ struct mm_struct *mm = info->vma->vm_mm;
++
++ down_write(&mm->mmap_sem);
++ zap_page_range(
++ info->vma, info->vma->vm_start,
++ info->vma->vm_end - info->vma->vm_start, NULL);
++ up_write(&mm->mmap_sem);
++
++ kfree(info->vma->vm_private_data);
++
++ info->vma = NULL;
++ }
++
++ if (info->idx_map) {
++ kfree(info->idx_map);
++ info->idx_map = NULL;
++ }
++
++ if ( (info->status != CLEANSHUTDOWN) && (info->blkif != NULL) ) {
++ if (info->blkif->xenblkd != NULL) {
++ kthread_stop(info->blkif->xenblkd);
++ info->blkif->xenblkd = NULL;
++ }
++ info->status = CLEANSHUTDOWN;
++ }
++
++ return 0;
++}
++
++
++/* Note on mmap:
++ * We need to map pages to user space in a way that will allow the block
++ * subsystem set up direct IO to them. This couldn't be done before, because
++ * there isn't really a sane way to translate a user virtual address down to a
++ * physical address when the page belongs to another domain.
++ *
++ * My first approach was to map the page in to kernel memory, add an entry
++ * for it in the physical frame list (using alloc_lomem_region as in blkback)
++ * and then attempt to map that page up to user space. This is disallowed
++ * by xen though, which realizes that we don't really own the machine frame
++ * underlying the physical page.
++ *
++ * The new approach is to provide explicit support for this in xen linux.
++ * The VMA now has a flag, VM_FOREIGN, to indicate that it contains pages
++ * mapped from other vms. vma->vm_private_data is set up as a mapping
++ * from pages to actual page structs. There is a new clause in get_user_pages
++ * that does the right thing for this sort of mapping.
++ */
++static int blktap_mmap(struct file *filp, struct vm_area_struct *vma)
++{
++ int size;
++ struct page **map;
++ int i;
++ tap_blkif_t *info = filp->private_data;
++ int ret;
++
++ if (info == NULL) {
++ WPRINTK("blktap: mmap, retrieving idx failed\n");
++ return -ENOMEM;
++ }
++
++ vma->vm_flags |= VM_RESERVED;
++ vma->vm_ops = &blktap_vm_ops;
++
++ size = vma->vm_end - vma->vm_start;
++ if (size != ((mmap_pages + RING_PAGES) << PAGE_SHIFT)) {
++ WPRINTK("you _must_ map exactly %d pages!\n",
++ mmap_pages + RING_PAGES);
++ return -EAGAIN;
++ }
++
++ size >>= PAGE_SHIFT;
++ info->rings_vstart = vma->vm_start;
++ info->user_vstart = info->rings_vstart + (RING_PAGES << PAGE_SHIFT);
++
++ /* Map the ring pages to the start of the region and reserve it. */
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ ret = vm_insert_page(vma, vma->vm_start,
++ virt_to_page(info->ufe_ring.sring));
++ else
++ ret = remap_pfn_range(vma, vma->vm_start,
++ __pa(info->ufe_ring.sring) >> PAGE_SHIFT,
++ PAGE_SIZE, vma->vm_page_prot);
++ if (ret) {
++ WPRINTK("Mapping user ring failed!\n");
++ goto fail;
++ }
++
++ /* Mark this VM as containing foreign pages, and set up mappings. */
++ map = kzalloc(((vma->vm_end - vma->vm_start) >> PAGE_SHIFT)
++ * sizeof(struct page *),
++ GFP_KERNEL);
++ if (map == NULL) {
++ WPRINTK("Couldn't alloc VM_FOREIGN map.\n");
++ goto fail;
++ }
++
++ for (i = 0; i < ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT); i++)
++ map[i] = NULL;
++
++ vma->vm_private_data = map;
++ vma->vm_flags |= VM_FOREIGN;
++ vma->vm_flags |= VM_DONTCOPY;
++
++#ifdef CONFIG_X86
++ vma->vm_mm->context.has_foreign_mappings = 1;
++#endif
++
++ info->vma = vma;
++ info->ring_ok = 1;
++ return 0;
++ fail:
++ /* Clear any active mappings. */
++ zap_page_range(vma, vma->vm_start,
++ vma->vm_end - vma->vm_start, NULL);
++
++ return -ENOMEM;
++}
++
++
++static int blktap_ioctl(struct inode *inode, struct file *filp,
++ unsigned int cmd, unsigned long arg)
++{
++ tap_blkif_t *info = filp->private_data;
++
++ switch(cmd) {
++ case BLKTAP_IOCTL_KICK_FE:
++ {
++ /* There are fe messages to process. */
++ return blktap_read_ufe_ring(info);
++ }
++ case BLKTAP_IOCTL_SETMODE:
++ {
++ if (info) {
++ if (BLKTAP_MODE_VALID(arg)) {
++ info->mode = arg;
++ /* XXX: may need to flush rings here. */
++ DPRINTK("blktap: set mode to %lx\n",
++ arg);
++ return 0;
++ }
++ }
++ return 0;
++ }
++ case BLKTAP_IOCTL_PRINT_IDXS:
++ {
++ if (info) {
++ printk("User Rings: \n-----------\n");
++ printk("UF: rsp_cons: %2d, req_prod_prv: %2d "
++ "| req_prod: %2d, rsp_prod: %2d\n",
++ info->ufe_ring.rsp_cons,
++ info->ufe_ring.req_prod_pvt,
++ info->ufe_ring.sring->req_prod,
++ info->ufe_ring.sring->rsp_prod);
++ }
++ return 0;
++ }
++ case BLKTAP_IOCTL_SENDPID:
++ {
++ if (info) {
++ info->pid = (pid_t)arg;
++ DPRINTK("blktap: pid received %d\n",
++ info->pid);
++ }
++ return 0;
++ }
++ case BLKTAP_IOCTL_NEWINTF:
++ {
++ uint64_t val = (uint64_t)arg;
++ domid_translate_t *tr = (domid_translate_t *)&val;
++
++ DPRINTK("NEWINTF Req for domid %d and bus id %d\n",
++ tr->domid, tr->busid);
++ info = get_next_free_dev();
++ if (!info) {
++ WPRINTK("Error initialising /dev/xen/blktap - "
++ "No more devices\n");
++ return -1;
++ }
++ info->trans.domid = tr->domid;
++ info->trans.busid = tr->busid;
++ return info->minor;
++ }
++ case BLKTAP_IOCTL_NEWINTF_EXT:
++ {
++ void __user *udata = (void __user *) arg;
++ domid_translate_ext_t tr;
++
++ if (copy_from_user(&tr, udata, sizeof(domid_translate_ext_t)))
++ return -EFAULT;
++
++ DPRINTK("NEWINTF_EXT Req for domid %d and bus id %d\n",
++ tr.domid, tr.busid);
++ info = get_next_free_dev();
++ if (!info) {
++ WPRINTK("Error initialising /dev/xen/blktap - "
++ "No more devices\n");
++ return -1;
++ }
++ info->trans.domid = tr.domid;
++ info->trans.busid = tr.busid;
++ return info->minor;
++ }
++ case BLKTAP_IOCTL_FREEINTF:
++ {
++ unsigned long dev = arg;
++ unsigned long flags;
++
++ info = tapfds[dev];
++
++ if ((dev > MAX_TAP_DEV) || !info)
++ return 0; /* should this be an error? */
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++ if (info->dev_pending)
++ info->dev_pending = 0;
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++
++ return 0;
++ }
++ case BLKTAP_IOCTL_MINOR:
++ {
++ unsigned long dev = arg;
++
++ info = tapfds[dev];
++
++ if ((dev > MAX_TAP_DEV) || !info)
++ return -EINVAL;
++
++ return info->minor;
++ }
++ case BLKTAP_IOCTL_MAJOR:
++ return blktap_major;
++
++ case BLKTAP_QUERY_ALLOC_REQS:
++ {
++ WPRINTK("BLKTAP_QUERY_ALLOC_REQS ioctl: %d/%d\n",
++ alloc_pending_reqs, blkif_reqs);
++ return (alloc_pending_reqs/blkif_reqs) * 100;
++ }
++ }
++ return -ENOIOCTLCMD;
++}
++
++static unsigned int blktap_poll(struct file *filp, poll_table *wait)
++{
++ tap_blkif_t *info = filp->private_data;
++
++ /* do not work on the control device */
++ if (!info)
++ return 0;
++
++ poll_wait(filp, &info->wait, wait);
++ if (info->ufe_ring.req_prod_pvt != info->ufe_ring.sring->req_prod) {
++ RING_PUSH_REQUESTS(&info->ufe_ring);
++ return POLLIN | POLLRDNORM;
++ }
++ return 0;
++}
++
++void blktap_kick_user(int idx)
++{
++ tap_blkif_t *info;
++
++ info = tapfds[idx];
++
++ if ((idx < 0) || (idx > MAX_TAP_DEV) || !info)
++ return;
++
++ wake_up_interruptible(&info->wait);
++
++ return;
++}
++
++static int do_block_io_op(blkif_t *blkif);
++static void dispatch_rw_block_io(blkif_t *blkif,
++ blkif_request_t *req,
++ pending_req_t *pending_req);
++static void make_response(blkif_t *blkif, u64 id,
++ unsigned short op, int st);
++
++/******************************************************************
++ * misc small helpers
++ */
++static int req_increase(void)
++{
++ int i, j;
++
++ if (mmap_alloc >= MAX_PENDING_REQS || mmap_lock)
++ return -EINVAL;
++
++ pending_reqs[mmap_alloc] = kzalloc(sizeof(pending_req_t)
++ * blkif_reqs, GFP_KERNEL);
++ foreign_pages[mmap_alloc] = alloc_empty_pages_and_pagevec(mmap_pages);
++
++ if (!pending_reqs[mmap_alloc] || !foreign_pages[mmap_alloc])
++ goto out_of_memory;
++
++ DPRINTK("%s: reqs=%d, pages=%d\n",
++ __FUNCTION__, blkif_reqs, mmap_pages);
++
++ for (i = 0; i < MAX_PENDING_REQS; i++) {
++ list_add_tail(&pending_reqs[mmap_alloc][i].free_list,
++ &pending_free);
++ pending_reqs[mmap_alloc][i].mem_idx = mmap_alloc;
++ for (j = 0; j < BLKIF_MAX_SEGMENTS_PER_REQUEST; j++)
++ BLKTAP_INVALIDATE_HANDLE(&pending_handle(mmap_alloc,
++ i, j));
++ }
++
++ mmap_alloc++;
++ DPRINTK("# MMAPs increased to %d\n",mmap_alloc);
++ return 0;
++
++ out_of_memory:
++ free_empty_pages_and_pagevec(foreign_pages[mmap_alloc], mmap_pages);
++ kfree(pending_reqs[mmap_alloc]);
++ WPRINTK("%s: out of memory\n", __FUNCTION__);
++ return -ENOMEM;
++}
++
++static void mmap_req_del(int mmap)
++{
++ BUG_ON(!spin_is_locked(&pending_free_lock));
++
++ kfree(pending_reqs[mmap]);
++ pending_reqs[mmap] = NULL;
++
++ free_empty_pages_and_pagevec(foreign_pages[mmap_alloc], mmap_pages);
++ foreign_pages[mmap] = NULL;
++
++ mmap_lock = 0;
++ DPRINTK("# MMAPs decreased to %d\n",mmap_alloc);
++ mmap_alloc--;
++}
++
++static pending_req_t* alloc_req(void)
++{
++ pending_req_t *req = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++
++ if (!list_empty(&pending_free)) {
++ req = list_entry(pending_free.next, pending_req_t, free_list);
++ list_del(&req->free_list);
++ }
++
++ if (req) {
++ req->inuse = 1;
++ alloc_pending_reqs++;
++ }
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++
++ return req;
++}
++
++static void free_req(pending_req_t *req)
++{
++ unsigned long flags;
++ int was_empty;
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++
++ alloc_pending_reqs--;
++ req->inuse = 0;
++ if (mmap_lock && (req->mem_idx == mmap_alloc-1)) {
++ mmap_inuse--;
++ if (mmap_inuse == 0) mmap_req_del(mmap_alloc-1);
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++ return;
++ }
++ was_empty = list_empty(&pending_free);
++ list_add(&req->free_list, &pending_free);
++
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++
++ if (was_empty)
++ wake_up(&pending_free_wq);
++}
++
++static void fast_flush_area(pending_req_t *req, int k_idx, int u_idx,
++ int tapidx)
++{
++ struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
++ unsigned int i, invcount = 0, locked = 0;
++ struct grant_handle_pair *khandle;
++ uint64_t ptep;
++ int ret, mmap_idx;
++ unsigned long kvaddr, uvaddr;
++ tap_blkif_t *info;
++ struct mm_struct *mm;
++
++
++ info = tapfds[tapidx];
++
++ if ((tapidx < 0) || (tapidx > MAX_TAP_DEV) || !info) {
++ WPRINTK("fast_flush: Couldn't get info!\n");
++ return;
++ }
++
++ mm = info->vma ? info->vma->vm_mm : NULL;
++
++ if (info->vma != NULL &&
++ xen_feature(XENFEAT_auto_translated_physmap)) {
++ down_write(&mm->mmap_sem);
++ zap_page_range(info->vma,
++ MMAP_VADDR(info->user_vstart, u_idx, 0),
++ req->nr_pages << PAGE_SHIFT, NULL);
++ up_write(&mm->mmap_sem);
++ return;
++ }
++
++ mmap_idx = req->mem_idx;
++
++ for (i = 0; i < req->nr_pages; i++) {
++ kvaddr = idx_to_kaddr(mmap_idx, k_idx, i);
++ uvaddr = MMAP_VADDR(info->user_vstart, u_idx, i);
++
++ khandle = &pending_handle(mmap_idx, k_idx, i);
++
++ if (khandle->kernel != INVALID_GRANT_HANDLE) {
++ gnttab_set_unmap_op(&unmap[invcount],
++ idx_to_kaddr(mmap_idx, k_idx, i),
++ GNTMAP_host_map, khandle->kernel);
++ invcount++;
++
++ set_phys_to_machine(
++ __pa(idx_to_kaddr(mmap_idx, k_idx, i))
++ >> PAGE_SHIFT, INVALID_P2M_ENTRY);
++ }
++
++ if (khandle->user != INVALID_GRANT_HANDLE) {
++ BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
++ if (!locked++)
++ down_write(&mm->mmap_sem);
++ if (create_lookup_pte_addr(
++ mm,
++ MMAP_VADDR(info->user_vstart, u_idx, i),
++ &ptep) !=0) {
++ up_write(&mm->mmap_sem);
++ WPRINTK("Couldn't get a pte addr!\n");
++ return;
++ }
++
++ gnttab_set_unmap_op(&unmap[invcount], ptep,
++ GNTMAP_host_map
++ | GNTMAP_application_map
++ | GNTMAP_contains_pte,
++ khandle->user);
++ invcount++;
++ }
++
++ BLKTAP_INVALIDATE_HANDLE(khandle);
++ }
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, unmap, invcount);
++ BUG_ON(ret);
++
++ if (info->vma != NULL &&
++ !xen_feature(XENFEAT_auto_translated_physmap)) {
++ if (!locked++)
++ down_write(&mm->mmap_sem);
++ zap_page_range(info->vma,
++ MMAP_VADDR(info->user_vstart, u_idx, 0),
++ req->nr_pages << PAGE_SHIFT, NULL);
++ }
++
++ if (locked)
++ up_write(&mm->mmap_sem);
++}
++
++/******************************************************************
++ * SCHEDULER FUNCTIONS
++ */
++
++static void print_stats(blkif_t *blkif)
++{
++ printk(KERN_DEBUG "%s: oo %3d | rd %4d | wr %4d\n",
++ current->comm, blkif->st_oo_req,
++ blkif->st_rd_req, blkif->st_wr_req);
++ blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
++ blkif->st_rd_req = 0;
++ blkif->st_wr_req = 0;
++ blkif->st_oo_req = 0;
++}
++
++int tap_blkif_schedule(void *arg)
++{
++ blkif_t *blkif = arg;
++
++ blkif_get(blkif);
++
++ if (debug_lvl)
++ printk(KERN_DEBUG "%s: started\n", current->comm);
++
++ while (!kthread_should_stop()) {
++ if (try_to_freeze())
++ continue;
++
++ wait_event_interruptible(
++ blkif->wq,
++ blkif->waiting_reqs || kthread_should_stop());
++ wait_event_interruptible(
++ pending_free_wq,
++ !list_empty(&pending_free) || kthread_should_stop());
++
++ blkif->waiting_reqs = 0;
++ smp_mb(); /* clear flag *before* checking for work */
++
++ if (do_block_io_op(blkif))
++ blkif->waiting_reqs = 1;
++
++ if (log_stats && time_after(jiffies, blkif->st_print))
++ print_stats(blkif);
++ }
++
++ if (log_stats)
++ print_stats(blkif);
++ if (debug_lvl)
++ printk(KERN_DEBUG "%s: exiting\n", current->comm);
++
++ blkif->xenblkd = NULL;
++ blkif_put(blkif);
++
++ return 0;
++}
++
++/******************************************************************
++ * COMPLETION CALLBACK -- Called by user level ioctl()
++ */
++
++static int blktap_read_ufe_ring(tap_blkif_t *info)
++{
++ /* This is called to read responses from the UFE ring. */
++ RING_IDX i, j, rp;
++ blkif_response_t *resp;
++ blkif_t *blkif=NULL;
++ int pending_idx, usr_idx, mmap_idx;
++ pending_req_t *pending_req;
++
++ if (!info)
++ return 0;
++
++ /* We currently only forward packets in INTERCEPT_FE mode. */
++ if (!(info->mode & BLKTAP_MODE_INTERCEPT_FE))
++ return 0;
++
++ /* for each outstanding message on the UFEring */
++ rp = info->ufe_ring.sring->rsp_prod;
++ rmb();
++
++ for (i = info->ufe_ring.rsp_cons; i != rp; i++) {
++ blkif_response_t res;
++ resp = RING_GET_RESPONSE(&info->ufe_ring, i);
++ memcpy(&res, resp, sizeof(res));
++ mb(); /* rsp_cons read by RING_FULL() in do_block_io_op(). */
++ ++info->ufe_ring.rsp_cons;
++
++ /*retrieve [usr_idx] to [mmap_idx,pending_idx] mapping*/
++ usr_idx = (int)res.id;
++ pending_idx = MASK_PEND_IDX(ID_TO_IDX(info->idx_map[usr_idx]));
++ mmap_idx = ID_TO_MIDX(info->idx_map[usr_idx]);
++
++ if ( (mmap_idx >= mmap_alloc) ||
++ (ID_TO_IDX(info->idx_map[usr_idx]) >= MAX_PENDING_REQS) )
++ WPRINTK("Incorrect req map"
++ "[%d], internal map [%d,%d (%d)]\n",
++ usr_idx, mmap_idx,
++ ID_TO_IDX(info->idx_map[usr_idx]),
++ MASK_PEND_IDX(
++ ID_TO_IDX(info->idx_map[usr_idx])));
++
++ pending_req = &pending_reqs[mmap_idx][pending_idx];
++ blkif = pending_req->blkif;
++
++ for (j = 0; j < pending_req->nr_pages; j++) {
++
++ unsigned long kvaddr, uvaddr;
++ struct page **map = info->vma->vm_private_data;
++ struct page *pg;
++ int offset;
++
++ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, j);
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, j);
++
++ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++ ClearPageReserved(pg);
++ offset = (uvaddr - info->vma->vm_start)
++ >> PAGE_SHIFT;
++ map[offset] = NULL;
++ }
++ fast_flush_area(pending_req, pending_idx, usr_idx, info->minor);
++ info->idx_map[usr_idx] = INVALID_REQ;
++ make_response(blkif, pending_req->id, res.operation,
++ res.status);
++ blkif_put(pending_req->blkif);
++ free_req(pending_req);
++ }
++
++ return 0;
++}
++
++
++/******************************************************************************
++ * NOTIFICATION FROM GUEST OS.
++ */
++
++static void blkif_notify_work(blkif_t *blkif)
++{
++ blkif->waiting_reqs = 1;
++ wake_up(&blkif->wq);
++}
++
++irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++ blkif_notify_work(dev_id);
++ return IRQ_HANDLED;
++}
++
++
++
++/******************************************************************
++ * DOWNWARD CALLS -- These interface with the block-device layer proper.
++ */
++static int print_dbug = 1;
++static int do_block_io_op(blkif_t *blkif)
++{
++ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
++ blkif_request_t req;
++ pending_req_t *pending_req;
++ RING_IDX rc, rp;
++ int more_to_do = 0;
++ tap_blkif_t *info;
++
++ rc = blk_rings->common.req_cons;
++ rp = blk_rings->common.sring->req_prod;
++ rmb(); /* Ensure we see queued requests up to 'rp'. */
++
++ /*Check blkif has corresponding UE ring*/
++ if (blkif->dev_num < 0) {
++ /*oops*/
++ if (print_dbug) {
++ WPRINTK("Corresponding UE "
++ "ring does not exist!\n");
++ print_dbug = 0; /*We only print this message once*/
++ }
++ return 0;
++ }
++
++ info = tapfds[blkif->dev_num];
++
++ if (blkif->dev_num > MAX_TAP_DEV || !info || !info->dev_inuse) {
++ if (print_dbug) {
++ WPRINTK("Can't get UE info!\n");
++ print_dbug = 0;
++ }
++ return 0;
++ }
++
++ while (rc != rp) {
++
++ if (RING_FULL(&info->ufe_ring)) {
++ WPRINTK("RING_FULL! More to do\n");
++ more_to_do = 1;
++ break;
++ }
++
++ if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc)) {
++ WPRINTK("RING_REQUEST_CONS_OVERFLOW!"
++ " More to do\n");
++ more_to_do = 1;
++ break;
++ }
++
++ pending_req = alloc_req();
++ if (NULL == pending_req) {
++ blkif->st_oo_req++;
++ more_to_do = 1;
++ break;
++ }
++
++ if (kthread_should_stop()) {
++ more_to_do = 1;
++ break;
++ }
++
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc),
++ sizeof(req));
++ break;
++ case BLKIF_PROTOCOL_X86_32:
++ blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
++ break;
++ case BLKIF_PROTOCOL_X86_64:
++ blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
++ break;
++ default:
++ BUG();
++ }
++ blk_rings->common.req_cons = ++rc; /* before make_response() */
++
++ /* Apply all sanity checks to /private copy/ of request. */
++ barrier();
++
++ switch (req.operation) {
++ case BLKIF_OP_READ:
++ blkif->st_rd_req++;
++ dispatch_rw_block_io(blkif, &req, pending_req);
++ break;
++
++ case BLKIF_OP_WRITE:
++ blkif->st_wr_req++;
++ dispatch_rw_block_io(blkif, &req, pending_req);
++ break;
++
++ default:
++ /* A good sign something is wrong: sleep for a while to
++ * avoid excessive CPU consumption by a bad guest. */
++ msleep(1);
++ WPRINTK("unknown operation [%d]\n",
++ req.operation);
++ make_response(blkif, req.id, req.operation,
++ BLKIF_RSP_ERROR);
++ free_req(pending_req);
++ break;
++ }
++
++ /* Yield point for this unbounded loop. */
++ cond_resched();
++ }
++
++ blktap_kick_user(blkif->dev_num);
++
++ return more_to_do;
++}
++
++static void dispatch_rw_block_io(blkif_t *blkif,
++ blkif_request_t *req,
++ pending_req_t *pending_req)
++{
++ extern void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]);
++ int op, operation = (req->operation == BLKIF_OP_WRITE) ? WRITE : READ;
++ struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST*2];
++ unsigned int nseg;
++ int ret, i, nr_sects = 0;
++ tap_blkif_t *info;
++ blkif_request_t *target;
++ int pending_idx = RTN_PEND_IDX(pending_req,pending_req->mem_idx);
++ int usr_idx;
++ uint16_t mmap_idx = pending_req->mem_idx;
++ struct mm_struct *mm;
++
++ if (blkif->dev_num < 0 || blkif->dev_num > MAX_TAP_DEV)
++ goto fail_response;
++
++ info = tapfds[blkif->dev_num];
++ if (info == NULL)
++ goto fail_response;
++
++ /* Check we have space on user ring - should never fail. */
++ usr_idx = GET_NEXT_REQ(info->idx_map);
++ if (usr_idx == INVALID_REQ) {
++ BUG();
++ goto fail_response;
++ }
++
++ /* Check that number of segments is sane. */
++ nseg = req->nr_segments;
++ if ( unlikely(nseg == 0) ||
++ unlikely(nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST) ) {
++ WPRINTK("Bad number of segments in request (%d)\n", nseg);
++ goto fail_response;
++ }
++
++ /* Make sure userspace is ready. */
++ if (!info->ring_ok) {
++ WPRINTK("blktap: ring not ready for requests!\n");
++ goto fail_response;
++ }
++
++ if (RING_FULL(&info->ufe_ring)) {
++ WPRINTK("blktap: fe_ring is full, can't add "
++ "IO Request will be dropped. %d %d\n",
++ RING_SIZE(&info->ufe_ring),
++ RING_SIZE(&blkif->blk_rings.common));
++ goto fail_response;
++ }
++
++ pending_req->blkif = blkif;
++ pending_req->id = req->id;
++ pending_req->operation = operation;
++ pending_req->status = BLKIF_RSP_OKAY;
++ pending_req->nr_pages = nseg;
++ op = 0;
++ mm = info->vma->vm_mm;
++ if (!xen_feature(XENFEAT_auto_translated_physmap))
++ down_write(&mm->mmap_sem);
++ for (i = 0; i < nseg; i++) {
++ unsigned long uvaddr;
++ unsigned long kvaddr;
++ uint64_t ptep;
++ uint32_t flags;
++
++ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
++
++ flags = GNTMAP_host_map;
++ if (operation == WRITE)
++ flags |= GNTMAP_readonly;
++ gnttab_set_map_op(&map[op], kvaddr, flags,
++ req->seg[i].gref, blkif->domid);
++ op++;
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Now map it to user. */
++ ret = create_lookup_pte_addr(mm, uvaddr, &ptep);
++ if (ret) {
++ up_write(&mm->mmap_sem);
++ WPRINTK("Couldn't get a pte addr!\n");
++ goto fail_flush;
++ }
++
++ flags = GNTMAP_host_map | GNTMAP_application_map
++ | GNTMAP_contains_pte;
++ if (operation == WRITE)
++ flags |= GNTMAP_readonly;
++ gnttab_set_map_op(&map[op], ptep, flags,
++ req->seg[i].gref, blkif->domid);
++ op++;
++ }
++
++ nr_sects += (req->seg[i].last_sect -
++ req->seg[i].first_sect + 1);
++ }
++
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, op);
++ BUG_ON(ret);
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ up_write(&mm->mmap_sem);
++
++ for (i = 0; i < (nseg*2); i+=2) {
++ unsigned long uvaddr;
++ unsigned long kvaddr;
++ unsigned long offset;
++ struct page *pg;
++
++ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i/2);
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i/2);
++
++ if (unlikely(map[i].status != 0)) {
++ WPRINTK("invalid kernel buffer -- "
++ "could not remap it\n");
++ ret |= 1;
++ map[i].handle = INVALID_GRANT_HANDLE;
++ }
++
++ if (unlikely(map[i+1].status != 0)) {
++ WPRINTK("invalid user buffer -- "
++ "could not remap it\n");
++ ret |= 1;
++ map[i+1].handle = INVALID_GRANT_HANDLE;
++ }
++
++ pending_handle(mmap_idx, pending_idx, i/2).kernel
++ = map[i].handle;
++ pending_handle(mmap_idx, pending_idx, i/2).user
++ = map[i+1].handle;
++
++ if (ret)
++ continue;
++
++ set_phys_to_machine(__pa(kvaddr) >> PAGE_SHIFT,
++ FOREIGN_FRAME(map[i].dev_bus_addr
++ >> PAGE_SHIFT));
++ offset = (uvaddr - info->vma->vm_start) >> PAGE_SHIFT;
++ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++ ((struct page **)info->vma->vm_private_data)[offset] =
++ pg;
++ }
++ } else {
++ for (i = 0; i < nseg; i++) {
++ unsigned long uvaddr;
++ unsigned long kvaddr;
++ unsigned long offset;
++ struct page *pg;
++
++ uvaddr = MMAP_VADDR(info->user_vstart, usr_idx, i);
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
++
++ if (unlikely(map[i].status != 0)) {
++ WPRINTK("invalid kernel buffer -- "
++ "could not remap it\n");
++ ret |= 1;
++ map[i].handle = INVALID_GRANT_HANDLE;
++ }
++
++ pending_handle(mmap_idx, pending_idx, i).kernel
++ = map[i].handle;
++
++ if (ret)
++ continue;
++
++ offset = (uvaddr - info->vma->vm_start) >> PAGE_SHIFT;
++ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++ ((struct page **)info->vma->vm_private_data)[offset] =
++ pg;
++ }
++ }
++
++ if (ret)
++ goto fail_flush;
++
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ down_write(&mm->mmap_sem);
++ /* Mark mapped pages as reserved: */
++ for (i = 0; i < req->nr_segments; i++) {
++ unsigned long kvaddr;
++ struct page *pg;
++
++ kvaddr = idx_to_kaddr(mmap_idx, pending_idx, i);
++ pg = pfn_to_page(__pa(kvaddr) >> PAGE_SHIFT);
++ SetPageReserved(pg);
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ ret = vm_insert_page(info->vma,
++ MMAP_VADDR(info->user_vstart,
++ usr_idx, i), pg);
++ if (ret) {
++ up_write(&mm->mmap_sem);
++ goto fail_flush;
++ }
++ }
++ }
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ up_write(&mm->mmap_sem);
++
++ /*record [mmap_idx,pending_idx] to [usr_idx] mapping*/
++ info->idx_map[usr_idx] = MAKE_ID(mmap_idx, pending_idx);
++
++ blkif_get(blkif);
++ /* Finally, write the request message to the user ring. */
++ target = RING_GET_REQUEST(&info->ufe_ring,
++ info->ufe_ring.req_prod_pvt);
++ memcpy(target, req, sizeof(*req));
++ target->id = usr_idx;
++ wmb(); /* blktap_poll() reads req_prod_pvt asynchronously */
++ info->ufe_ring.req_prod_pvt++;
++
++ if (operation == READ)
++ blkif->st_rd_sect += nr_sects;
++ else if (operation == WRITE)
++ blkif->st_wr_sect += nr_sects;
++
++ return;
++
++ fail_flush:
++ WPRINTK("Reached Fail_flush\n");
++ fast_flush_area(pending_req, pending_idx, usr_idx, blkif->dev_num);
++ fail_response:
++ make_response(blkif, req->id, req->operation, BLKIF_RSP_ERROR);
++ free_req(pending_req);
++ msleep(1); /* back off a bit */
++}
++
++
++
++/******************************************************************
++ * MISCELLANEOUS SETUP / TEARDOWN / DEBUGGING
++ */
++
++
++static void make_response(blkif_t *blkif, u64 id,
++ unsigned short op, int st)
++{
++ blkif_response_t resp;
++ unsigned long flags;
++ blkif_back_rings_t *blk_rings = &blkif->blk_rings;
++ int more_to_do = 0;
++ int notify;
++
++ resp.id = id;
++ resp.operation = op;
++ resp.status = st;
++
++ spin_lock_irqsave(&blkif->blk_ring_lock, flags);
++ /* Place on the response ring for the relevant domain. */
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ memcpy(RING_GET_RESPONSE(&blk_rings->native,
++ blk_rings->native.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ case BLKIF_PROTOCOL_X86_32:
++ memcpy(RING_GET_RESPONSE(&blk_rings->x86_32,
++ blk_rings->x86_32.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ case BLKIF_PROTOCOL_X86_64:
++ memcpy(RING_GET_RESPONSE(&blk_rings->x86_64,
++ blk_rings->x86_64.rsp_prod_pvt),
++ &resp, sizeof(resp));
++ break;
++ default:
++ BUG();
++ }
++ blk_rings->common.rsp_prod_pvt++;
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
++
++ if (blk_rings->common.rsp_prod_pvt == blk_rings->common.req_cons) {
++ /*
++ * Tail check for pending requests. Allows frontend to avoid
++ * notifications if requests are already in flight (lower
++ * overheads and promotes batching).
++ */
++ RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
++ } else if (RING_HAS_UNCONSUMED_REQUESTS(&blk_rings->common)) {
++ more_to_do = 1;
++ }
++
++ spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
++ if (more_to_do)
++ blkif_notify_work(blkif);
++ if (notify)
++ notify_remote_via_irq(blkif->irq);
++}
++
++static int __init blkif_init(void)
++{
++ int i, ret;
++ struct class *class;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ INIT_LIST_HEAD(&pending_free);
++ for(i = 0; i < 2; i++) {
++ ret = req_increase();
++ if (ret)
++ break;
++ }
++ if (i == 0)
++ return ret;
++
++ tap_blkif_interface_init();
++
++ alloc_pending_reqs = 0;
++
++ tap_blkif_xenbus_init();
++
++ /* Dynamically allocate a major for this device */
++ ret = register_chrdev(0, "blktap", &blktap_fops);
++
++ if (ret < 0) {
++ WPRINTK("Couldn't register /dev/xen/blktap\n");
++ return -ENOMEM;
++ }
++
++ blktap_major = ret;
++
++ /* tapfds[0] is always NULL */
++ blktap_next_minor++;
++
++ DPRINTK("Created misc_dev [/dev/xen/blktap%d]\n",i);
++
++ /* Make sure the xen class exists */
++ if ((class = get_xen_class()) != NULL) {
++ /*
++ * This will allow udev to create the blktap ctrl device.
++ * We only want to create blktap0 first. We don't want
++ * to flood the sysfs system with needless blktap devices.
++ * We only create the device when a request of a new device is
++ * made.
++ */
++ class_device_create(class, NULL,
++ MKDEV(blktap_major, 0), NULL,
++ "blktap0");
++ } else {
++ /* this is bad, but not fatal */
++ WPRINTK("blktap: sysfs xen_class not created\n");
++ }
++
++ DPRINTK("Blktap device successfully created\n");
++
++ return 0;
++}
++
++module_init(blkif_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+Index: head-2008-11-25/drivers/xen/blktap/common.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/blktap/common.h 2008-09-15 13:40:15.000000000 +0200
+@@ -0,0 +1,122 @@
++/*
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __BLKIF__BACKEND__COMMON_H__
++#define __BLKIF__BACKEND__COMMON_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/blkdev.h>
++#include <linux/vmalloc.h>
++#include <asm/io.h>
++#include <asm/setup.h>
++#include <asm/pgalloc.h>
++#include <xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <xen/blkif.h>
++#include <xen/gnttab.h>
++#include <xen/driver_util.h>
++
++#define DPRINTK(_f, _a...) pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++
++#define WPRINTK(fmt, args...) printk(KERN_WARNING "blk_tap: " fmt, ##args)
++
++struct backend_info;
++
++typedef struct blkif_st {
++ /* Unique identifier for this interface. */
++ domid_t domid;
++ unsigned int handle;
++ /* Physical parameters of the comms window. */
++ unsigned int irq;
++ /* Comms information. */
++ enum blkif_protocol blk_protocol;
++ blkif_back_rings_t blk_rings;
++ struct vm_struct *blk_ring_area;
++ /* Back pointer to the backend_info. */
++ struct backend_info *be;
++ /* Private fields. */
++ spinlock_t blk_ring_lock;
++ atomic_t refcnt;
++
++ wait_queue_head_t wq;
++ struct task_struct *xenblkd;
++ unsigned int waiting_reqs;
++ request_queue_t *plug;
++
++ /* statistics */
++ unsigned long st_print;
++ int st_rd_req;
++ int st_wr_req;
++ int st_oo_req;
++ int st_rd_sect;
++ int st_wr_sect;
++
++ wait_queue_head_t waiting_to_free;
++
++ grant_handle_t shmem_handle;
++ grant_ref_t shmem_ref;
++
++ int dev_num;
++ uint64_t sectors;
++} blkif_t;
++
++blkif_t *tap_alloc_blkif(domid_t domid);
++void tap_blkif_free(blkif_t *blkif);
++void tap_blkif_kmem_cache_free(blkif_t *blkif);
++int tap_blkif_map(blkif_t *blkif, unsigned long shared_page,
++ unsigned int evtchn);
++void tap_blkif_unmap(blkif_t *blkif);
++
++#define blkif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define blkif_put(_b) \
++ do { \
++ if (atomic_dec_and_test(&(_b)->refcnt)) \
++ wake_up(&(_b)->waiting_to_free);\
++ } while (0)
++
++
++struct phys_req {
++ unsigned short dev;
++ unsigned short nr_sects;
++ struct block_device *bdev;
++ blkif_sector_t sector_number;
++};
++
++void tap_blkif_interface_init(void);
++
++void tap_blkif_xenbus_init(void);
++
++irqreturn_t tap_blkif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++int tap_blkif_schedule(void *arg);
++
++int dom_to_devid(domid_t domid, int xenbus_id, blkif_t *blkif);
++void signal_tapdisk(int idx);
++
++#endif /* __BLKIF__BACKEND__COMMON_H__ */
+Index: head-2008-11-25/drivers/xen/blktap/interface.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/blktap/interface.c 2008-09-15 13:40:15.000000000 +0200
+@@ -0,0 +1,181 @@
++/******************************************************************************
++ * drivers/xen/blktap/interface.c
++ *
++ * Block-device interface management.
++ *
++ * Copyright (c) 2004, Keir Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++
++ */
++
++#include "common.h"
++#include <xen/evtchn.h>
++
++static kmem_cache_t *blkif_cachep;
++
++blkif_t *tap_alloc_blkif(domid_t domid)
++{
++ blkif_t *blkif;
++
++ blkif = kmem_cache_alloc(blkif_cachep, GFP_KERNEL);
++ if (!blkif)
++ return ERR_PTR(-ENOMEM);
++
++ memset(blkif, 0, sizeof(*blkif));
++ blkif->domid = domid;
++ spin_lock_init(&blkif->blk_ring_lock);
++ atomic_set(&blkif->refcnt, 1);
++ init_waitqueue_head(&blkif->wq);
++ blkif->st_print = jiffies;
++ init_waitqueue_head(&blkif->waiting_to_free);
++
++ return blkif;
++}
++
++static int map_frontend_page(blkif_t *blkif, unsigned long shared_page)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)blkif->blk_ring_area->addr,
++ GNTMAP_host_map, shared_page, blkif->domid);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Grant table operation failure !\n");
++ return op.status;
++ }
++
++ blkif->shmem_ref = shared_page;
++ blkif->shmem_handle = op.handle;
++
++ return 0;
++}
++
++static void unmap_frontend_page(blkif_t *blkif)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)blkif->blk_ring_area->addr,
++ GNTMAP_host_map, blkif->shmem_handle);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++}
++
++int tap_blkif_map(blkif_t *blkif, unsigned long shared_page,
++ unsigned int evtchn)
++{
++ int err;
++
++ /* Already connected through? */
++ if (blkif->irq)
++ return 0;
++
++ if ( (blkif->blk_ring_area = alloc_vm_area(PAGE_SIZE)) == NULL )
++ return -ENOMEM;
++
++ err = map_frontend_page(blkif, shared_page);
++ if (err) {
++ free_vm_area(blkif->blk_ring_area);
++ return err;
++ }
++
++ switch (blkif->blk_protocol) {
++ case BLKIF_PROTOCOL_NATIVE:
++ {
++ blkif_sring_t *sring;
++ sring = (blkif_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.native, sring, PAGE_SIZE);
++ break;
++ }
++ case BLKIF_PROTOCOL_X86_32:
++ {
++ blkif_x86_32_sring_t *sring_x86_32;
++ sring_x86_32 = (blkif_x86_32_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.x86_32, sring_x86_32, PAGE_SIZE);
++ break;
++ }
++ case BLKIF_PROTOCOL_X86_64:
++ {
++ blkif_x86_64_sring_t *sring_x86_64;
++ sring_x86_64 = (blkif_x86_64_sring_t *)blkif->blk_ring_area->addr;
++ BACK_RING_INIT(&blkif->blk_rings.x86_64, sring_x86_64, PAGE_SIZE);
++ break;
++ }
++ default:
++ BUG();
++ }
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ blkif->domid, evtchn, tap_blkif_be_int,
++ 0, "blkif-backend", blkif);
++ if (err < 0) {
++ unmap_frontend_page(blkif);
++ free_vm_area(blkif->blk_ring_area);
++ blkif->blk_rings.common.sring = NULL;
++ return err;
++ }
++ blkif->irq = err;
++
++ return 0;
++}
++
++void tap_blkif_unmap(blkif_t *blkif)
++{
++ if (blkif->irq) {
++ unbind_from_irqhandler(blkif->irq, blkif);
++ blkif->irq = 0;
++ }
++ if (blkif->blk_rings.common.sring) {
++ unmap_frontend_page(blkif);
++ free_vm_area(blkif->blk_ring_area);
++ blkif->blk_rings.common.sring = NULL;
++ }
++}
++
++void tap_blkif_free(blkif_t *blkif)
++{
++ atomic_dec(&blkif->refcnt);
++ wait_event(blkif->waiting_to_free, atomic_read(&blkif->refcnt) == 0);
++ atomic_inc(&blkif->refcnt);
++
++ tap_blkif_unmap(blkif);
++}
++
++void tap_blkif_kmem_cache_free(blkif_t *blkif)
++{
++ if (!atomic_dec_and_test(&blkif->refcnt))
++ BUG();
++ kmem_cache_free(blkif_cachep, blkif);
++}
++
++void __init tap_blkif_interface_init(void)
++{
++ blkif_cachep = kmem_cache_create("blktapif_cache", sizeof(blkif_t),
++ 0, 0, NULL, NULL);
++}
+Index: head-2008-11-25/drivers/xen/blktap/xenbus.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/blktap/xenbus.c 2008-09-15 13:40:15.000000000 +0200
+@@ -0,0 +1,479 @@
++/* drivers/xen/blktap/xenbus.c
++ *
++ * Xenbus code for blktap
++ *
++ * Copyright (c) 2004-2005, Andrew Warfield and Julian Chesterfield
++ *
++ * Based on the blkback xenbus code:
++ *
++ * Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
++ * Copyright (C) 2005 XenSource Ltd
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <stdarg.h>
++#include <linux/module.h>
++#include <linux/kthread.h>
++#include <xen/xenbus.h>
++#include "common.h"
++
++
++struct backend_info
++{
++ struct xenbus_device *dev;
++ blkif_t *blkif;
++ struct xenbus_watch backend_watch;
++ int xenbus_id;
++ int group_added;
++};
++
++
++static void connect(struct backend_info *);
++static int connect_ring(struct backend_info *);
++static int blktap_remove(struct xenbus_device *dev);
++static int blktap_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id);
++static void tap_backend_changed(struct xenbus_watch *, const char **,
++ unsigned int);
++static void tap_frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state);
++
++static int strsep_len(const char *str, char c, unsigned int len)
++{
++ unsigned int i;
++
++ for (i = 0; str[i]; i++)
++ if (str[i] == c) {
++ if (len == 0)
++ return i;
++ len--;
++ }
++ return (len == 0) ? i : -ERANGE;
++}
++
++static long get_id(const char *str)
++{
++ int len,end;
++ const char *ptr;
++ char *tptr, num[10];
++
++ len = strsep_len(str, '/', 2);
++ end = strlen(str);
++ if ( (len < 0) || (end < 0) ) return -1;
++
++ ptr = str + len + 1;
++ strncpy(num,ptr,end - len);
++ tptr = num + (end - (len + 1));
++ *tptr = '\0';
++ DPRINTK("Get_id called for %s (%s)\n",str,num);
++
++ return simple_strtol(num, NULL, 10);
++}
++
++static int blktap_name(blkif_t *blkif, char *buf)
++{
++ char *devpath, *devname;
++ struct xenbus_device *dev = blkif->be->dev;
++
++ devpath = xenbus_read(XBT_NIL, dev->nodename, "dev", NULL);
++ if (IS_ERR(devpath))
++ return PTR_ERR(devpath);
++
++ if ((devname = strstr(devpath, "/dev/")) != NULL)
++ devname += strlen("/dev/");
++ else
++ devname = devpath;
++
++ snprintf(buf, TASK_COMM_LEN, "blktap.%d.%s", blkif->domid, devname);
++ kfree(devpath);
++
++ return 0;
++}
++
++/****************************************************************
++ * sysfs interface for I/O requests of blktap device
++ */
++
++#define VBD_SHOW(name, format, args...) \
++ static ssize_t show_##name(struct device *_dev, \
++ struct device_attribute *attr, \
++ char *buf) \
++ { \
++ struct xenbus_device *dev = to_xenbus_device(_dev); \
++ struct backend_info *be = dev->dev.driver_data; \
++ \
++ return sprintf(buf, format, ##args); \
++ } \
++ static DEVICE_ATTR(name, S_IRUGO, show_##name, NULL)
++
++VBD_SHOW(oo_req, "%d\n", be->blkif->st_oo_req);
++VBD_SHOW(rd_req, "%d\n", be->blkif->st_rd_req);
++VBD_SHOW(wr_req, "%d\n", be->blkif->st_wr_req);
++VBD_SHOW(rd_sect, "%d\n", be->blkif->st_rd_sect);
++VBD_SHOW(wr_sect, "%d\n", be->blkif->st_wr_sect);
++
++static struct attribute *tapstat_attrs[] = {
++ &dev_attr_oo_req.attr,
++ &dev_attr_rd_req.attr,
++ &dev_attr_wr_req.attr,
++ &dev_attr_rd_sect.attr,
++ &dev_attr_wr_sect.attr,
++ NULL
++};
++
++static struct attribute_group tapstat_group = {
++ .name = "statistics",
++ .attrs = tapstat_attrs,
++};
++
++int xentap_sysfs_addif(struct xenbus_device *dev)
++{
++ int err;
++ struct backend_info *be = dev->dev.driver_data;
++ err = sysfs_create_group(&dev->dev.kobj, &tapstat_group);
++ if (!err)
++ be->group_added = 1;
++ return err;
++}
++
++void xentap_sysfs_delif(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
++ sysfs_remove_group(&dev->dev.kobj, &tapstat_group);
++ be->group_added = 0;
++}
++
++static int blktap_remove(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ if (be->group_added)
++ xentap_sysfs_delif(be->dev);
++ if (be->backend_watch.node) {
++ unregister_xenbus_watch(&be->backend_watch);
++ kfree(be->backend_watch.node);
++ be->backend_watch.node = NULL;
++ }
++ if (be->blkif) {
++ if (be->blkif->xenblkd)
++ kthread_stop(be->blkif->xenblkd);
++ signal_tapdisk(be->blkif->dev_num);
++ tap_blkif_free(be->blkif);
++ tap_blkif_kmem_cache_free(be->blkif);
++ be->blkif = NULL;
++ }
++ kfree(be);
++ dev->dev.driver_data = NULL;
++ return 0;
++}
++
++static void tap_update_blkif_status(blkif_t *blkif)
++{
++ int err;
++ char name[TASK_COMM_LEN];
++
++ /* Not ready to connect? */
++ if(!blkif->irq || !blkif->sectors) {
++ return;
++ }
++
++ /* Already connected? */
++ if (blkif->be->dev->state == XenbusStateConnected)
++ return;
++
++ /* Attempt to connect: exit if we fail to. */
++ connect(blkif->be);
++ if (blkif->be->dev->state != XenbusStateConnected)
++ return;
++
++ err = blktap_name(blkif, name);
++ if (err) {
++ xenbus_dev_error(blkif->be->dev, err, "get blktap dev name");
++ return;
++ }
++
++ if (!blkif->be->group_added) {
++ err = xentap_sysfs_addif(blkif->be->dev);
++ if (err) {
++ xenbus_dev_fatal(blkif->be->dev, err,
++ "creating sysfs entries");
++ return;
++ }
++ }
++
++ blkif->xenblkd = kthread_run(tap_blkif_schedule, blkif, name);
++ if (IS_ERR(blkif->xenblkd)) {
++ err = PTR_ERR(blkif->xenblkd);
++ blkif->xenblkd = NULL;
++ xenbus_dev_fatal(blkif->be->dev, err, "start xenblkd");
++ WPRINTK("Error starting thread\n");
++ }
++}
++
++/**
++ * Entry point to this code when a new device is created. Allocate
++ * the basic structures, and watch the store waiting for the
++ * user-space program to tell us the physical device info. Switch to
++ * InitWait.
++ */
++static int blktap_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++ struct backend_info *be = kzalloc(sizeof(struct backend_info),
++ GFP_KERNEL);
++ if (!be) {
++ xenbus_dev_fatal(dev, -ENOMEM,
++ "allocating backend structure");
++ return -ENOMEM;
++ }
++
++ be->dev = dev;
++ dev->dev.driver_data = be;
++ be->xenbus_id = get_id(dev->nodename);
++
++ be->blkif = tap_alloc_blkif(dev->otherend_id);
++ if (IS_ERR(be->blkif)) {
++ err = PTR_ERR(be->blkif);
++ be->blkif = NULL;
++ xenbus_dev_fatal(dev, err, "creating block interface");
++ goto fail;
++ }
++
++ /* setup back pointer */
++ be->blkif->be = be;
++ be->blkif->sectors = 0;
++
++ /* set a watch on disk info, waiting for userspace to update details*/
++ err = xenbus_watch_path2(dev, dev->nodename, "info",
++ &be->backend_watch, tap_backend_changed);
++ if (err)
++ goto fail;
++
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err)
++ goto fail;
++ return 0;
++
++fail:
++ DPRINTK("blktap probe failed\n");
++ blktap_remove(dev);
++ return err;
++}
++
++
++/**
++ * Callback received when the user space code has placed the device
++ * information in xenstore.
++ */
++static void tap_backend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ int err;
++ unsigned long info;
++ struct backend_info *be
++ = container_of(watch, struct backend_info, backend_watch);
++ struct xenbus_device *dev = be->dev;
++
++ /**
++ * Check to see whether userspace code has opened the image
++ * and written sector
++ * and disk info to xenstore
++ */
++ err = xenbus_gather(XBT_NIL, dev->nodename, "info", "%lu", &info,
++ NULL);
++ if (XENBUS_EXIST_ERR(err))
++ return;
++ if (err) {
++ xenbus_dev_error(dev, err, "getting info");
++ return;
++ }
++
++ DPRINTK("Userspace update on disk info, %lu\n",info);
++
++ err = xenbus_gather(XBT_NIL, dev->nodename, "sectors", "%llu",
++ &be->blkif->sectors, NULL);
++
++ /* Associate tap dev with domid*/
++ be->blkif->dev_num = dom_to_devid(be->blkif->domid, be->xenbus_id,
++ be->blkif);
++ DPRINTK("Thread started for domid [%d], connecting disk\n",
++ be->blkif->dev_num);
++
++ tap_update_blkif_status(be->blkif);
++}
++
++/**
++ * Callback received when the frontend's state changes.
++ */
++static void tap_frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state)
++{
++ struct backend_info *be = dev->dev.driver_data;
++ int err;
++
++ DPRINTK("\n");
++
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ if (dev->state == XenbusStateClosed) {
++ printk(KERN_INFO "%s: %s: prepare for reconnect\n",
++ __FUNCTION__, dev->nodename);
++ xenbus_switch_state(dev, XenbusStateInitWait);
++ }
++ break;
++
++ case XenbusStateInitialised:
++ case XenbusStateConnected:
++ /* Ensure we connect even when two watches fire in
++ close successsion and we miss the intermediate value
++ of frontend_state. */
++ if (dev->state == XenbusStateConnected)
++ break;
++
++ err = connect_ring(be);
++ if (err)
++ break;
++ tap_update_blkif_status(be->blkif);
++ break;
++
++ case XenbusStateClosing:
++ if (be->blkif->xenblkd) {
++ kthread_stop(be->blkif->xenblkd);
++ be->blkif->xenblkd = NULL;
++ }
++ tap_blkif_free(be->blkif);
++ xenbus_switch_state(dev, XenbusStateClosing);
++ break;
++
++ case XenbusStateClosed:
++ xenbus_switch_state(dev, XenbusStateClosed);
++ if (xenbus_dev_is_online(dev))
++ break;
++ /* fall through if not online */
++ case XenbusStateUnknown:
++ device_unregister(&dev->dev);
++ break;
++
++ default:
++ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++ frontend_state);
++ break;
++ }
++}
++
++
++/**
++ * Switch to Connected state.
++ */
++static void connect(struct backend_info *be)
++{
++ int err;
++
++ struct xenbus_device *dev = be->dev;
++
++ err = xenbus_switch_state(dev, XenbusStateConnected);
++ if (err)
++ xenbus_dev_fatal(dev, err, "switching to Connected state",
++ dev->nodename);
++
++ return;
++}
++
++
++static int connect_ring(struct backend_info *be)
++{
++ struct xenbus_device *dev = be->dev;
++ unsigned long ring_ref;
++ unsigned int evtchn;
++ char protocol[64];
++ int err;
++
++ DPRINTK("%s\n", dev->otherend);
++
++ err = xenbus_gather(XBT_NIL, dev->otherend, "ring-ref", "%lu",
++ &ring_ref, "event-channel", "%u", &evtchn, NULL);
++ if (err) {
++ xenbus_dev_fatal(dev, err,
++ "reading %s/ring-ref and event-channel",
++ dev->otherend);
++ return err;
++ }
++
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
++ err = xenbus_gather(XBT_NIL, dev->otherend, "protocol",
++ "%63s", protocol, NULL);
++ if (err)
++ strcpy(protocol, "unspecified, assuming native");
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_NATIVE))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_NATIVE;
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_32))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_32;
++ else if (0 == strcmp(protocol, XEN_IO_PROTO_ABI_X86_64))
++ be->blkif->blk_protocol = BLKIF_PROTOCOL_X86_64;
++ else {
++ xenbus_dev_fatal(dev, err, "unknown fe protocol %s", protocol);
++ return -1;
++ }
++ printk(KERN_INFO
++ "blktap: ring-ref %ld, event-channel %d, protocol %d (%s)\n",
++ ring_ref, evtchn, be->blkif->blk_protocol, protocol);
++
++ /* Map the shared frame, irq etc. */
++ err = tap_blkif_map(be->blkif, ring_ref, evtchn);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "mapping ring-ref %lu port %u",
++ ring_ref, evtchn);
++ return err;
++ }
++
++ return 0;
++}
++
++
++/* ** Driver Registration ** */
++
++
++static const struct xenbus_device_id blktap_ids[] = {
++ { "tap" },
++ { "" }
++};
++
++
++static struct xenbus_driver blktap = {
++ .name = "tap",
++ .owner = THIS_MODULE,
++ .ids = blktap_ids,
++ .probe = blktap_probe,
++ .remove = blktap_remove,
++ .otherend_changed = tap_frontend_changed
++};
++
++
++void tap_blkif_xenbus_init(void)
++{
++ xenbus_register_backend(&blktap);
++}
+Index: head-2008-11-25/drivers/xen/char/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/char/Makefile 2007-07-10 09:42:30.000000000 +0200
+@@ -0,0 +1 @@
++obj-$(CONFIG_XEN_DEVMEM) := mem.o
+Index: head-2008-11-25/drivers/xen/char/mem.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/char/mem.c 2007-08-06 15:10:49.000000000 +0200
+@@ -0,0 +1,190 @@
++/*
++ * Originally from linux/drivers/char/mem.c
++ *
++ * Copyright (C) 1991, 1992 Linus Torvalds
++ *
++ * Added devfs support.
++ * Jan-11-1998, C. Scott Ananian <cananian@alumni.princeton.edu>
++ * Shared /dev/zero mmaping support, Feb 2000, Kanoj Sarcar <kanoj@sgi.com>
++ */
++
++#include <linux/mm.h>
++#include <linux/miscdevice.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/mman.h>
++#include <linux/random.h>
++#include <linux/init.h>
++#include <linux/raw.h>
++#include <linux/tty.h>
++#include <linux/capability.h>
++#include <linux/smp_lock.h>
++#include <linux/ptrace.h>
++#include <linux/device.h>
++#include <asm/pgalloc.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <asm/hypervisor.h>
++
++static inline int uncached_access(struct file *file)
++{
++ if (file->f_flags & O_SYNC)
++ return 1;
++ /* Xen sets correct MTRR type on non-RAM for us. */
++ return 0;
++}
++
++/*
++ * This funcion reads the *physical* memory. The f_pos points directly to the
++ * memory location.
++ */
++static ssize_t read_mem(struct file * file, char __user * buf,
++ size_t count, loff_t *ppos)
++{
++ unsigned long p = *ppos, ignored;
++ ssize_t read = 0, sz;
++ void __iomem *v;
++
++ while (count > 0) {
++ /*
++ * Handle first page in case it's not aligned
++ */
++ if (-p & (PAGE_SIZE - 1))
++ sz = -p & (PAGE_SIZE - 1);
++ else
++ sz = PAGE_SIZE;
++
++ sz = min_t(unsigned long, sz, count);
++
++ v = ioremap(p, sz);
++ if (IS_ERR(v) || v == NULL) {
++ /*
++ * Some programs (e.g., dmidecode) groove off into
++ * weird RAM areas where no tables can possibly exist
++ * (because Xen will have stomped on them!). These
++ * programs get rather upset if we let them know that
++ * Xen failed their access, so we fake out a read of
++ * all zeroes.
++ */
++ if (clear_user(buf, count))
++ return -EFAULT;
++ read += count;
++ break;
++ }
++
++ ignored = copy_to_user(buf, v, sz);
++ iounmap(v);
++ if (ignored)
++ return -EFAULT;
++ buf += sz;
++ p += sz;
++ count -= sz;
++ read += sz;
++ }
++
++ *ppos += read;
++ return read;
++}
++
++static ssize_t write_mem(struct file * file, const char __user * buf,
++ size_t count, loff_t *ppos)
++{
++ unsigned long p = *ppos, ignored;
++ ssize_t written = 0, sz;
++ void __iomem *v;
++
++ while (count > 0) {
++ /*
++ * Handle first page in case it's not aligned
++ */
++ if (-p & (PAGE_SIZE - 1))
++ sz = -p & (PAGE_SIZE - 1);
++ else
++ sz = PAGE_SIZE;
++
++ sz = min_t(unsigned long, sz, count);
++
++ v = ioremap(p, sz);
++ if (v == NULL)
++ break;
++ if (IS_ERR(v)) {
++ if (written == 0)
++ return PTR_ERR(v);
++ break;
++ }
++
++ ignored = copy_from_user(v, buf, sz);
++ iounmap(v);
++ if (ignored) {
++ written += sz - ignored;
++ if (written)
++ break;
++ return -EFAULT;
++ }
++ buf += sz;
++ p += sz;
++ count -= sz;
++ written += sz;
++ }
++
++ *ppos += written;
++ return written;
++}
++
++#ifndef ARCH_HAS_DEV_MEM_MMAP_MEM
++static int xen_mmap_mem(struct file * file, struct vm_area_struct * vma)
++{
++ size_t size = vma->vm_end - vma->vm_start;
++
++ if (uncached_access(file))
++ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
++
++ /* We want to return the real error code, not EAGAIN. */
++ return direct_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
++ size, vma->vm_page_prot, DOMID_IO);
++}
++#endif
++
++/*
++ * The memory devices use the full 32/64 bits of the offset, and so we cannot
++ * check against negative addresses: they are ok. The return value is weird,
++ * though, in that case (0).
++ *
++ * also note that seeking relative to the "end of file" isn't supported:
++ * it has no meaning, so it returns -EINVAL.
++ */
++static loff_t memory_lseek(struct file * file, loff_t offset, int orig)
++{
++ loff_t ret;
++
++ mutex_lock(&file->f_dentry->d_inode->i_mutex);
++ switch (orig) {
++ case 0:
++ file->f_pos = offset;
++ ret = file->f_pos;
++ force_successful_syscall_return();
++ break;
++ case 1:
++ file->f_pos += offset;
++ ret = file->f_pos;
++ force_successful_syscall_return();
++ break;
++ default:
++ ret = -EINVAL;
++ }
++ mutex_unlock(&file->f_dentry->d_inode->i_mutex);
++ return ret;
++}
++
++static int open_mem(struct inode * inode, struct file * filp)
++{
++ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
++
++const struct file_operations mem_fops = {
++ .llseek = memory_lseek,
++ .read = read_mem,
++ .write = write_mem,
++ .mmap = xen_mmap_mem,
++ .open = open_mem,
++};
+Index: head-2008-11-25/drivers/xen/console/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/console/Makefile 2007-06-12 13:13:44.000000000 +0200
+@@ -0,0 +1,2 @@
++
++obj-y := console.o xencons_ring.o
+Index: head-2008-11-25/drivers/xen/console/console.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/console/console.c 2007-10-15 09:39:38.000000000 +0200
+@@ -0,0 +1,731 @@
++/******************************************************************************
++ * console.c
++ *
++ * Virtual console driver.
++ *
++ * Copyright (c) 2002-2004, K A Fraser.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <linux/serial.h>
++#include <linux/major.h>
++#include <linux/ptrace.h>
++#include <linux/ioport.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/init.h>
++#include <linux/console.h>
++#include <linux/bootmem.h>
++#include <linux/sysrq.h>
++#include <linux/screen_info.h>
++#include <linux/vt.h>
++#include <asm/io.h>
++#include <asm/irq.h>
++#include <asm/uaccess.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/event_channel.h>
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/xenbus.h>
++#include <xen/xencons.h>
++
++/*
++ * Modes:
++ * 'xencons=off' [XC_OFF]: Console is disabled.
++ * 'xencons=tty' [XC_TTY]: Console attached to '/dev/tty[0-9]+'.
++ * 'xencons=ttyS' [XC_SERIAL]: Console attached to '/dev/ttyS[0-9]+'.
++ * 'xencons=xvc' [XC_XVC]: Console attached to '/dev/xvc0'.
++ * default: XC_XVC
++ *
++ * NB. In mode XC_TTY, we create dummy consoles for tty2-63. This suppresses
++ * warnings from standard distro startup scripts.
++ */
++static enum {
++ XC_OFF, XC_TTY, XC_SERIAL, XC_XVC
++} xc_mode = XC_XVC;
++static int xc_num = -1;
++
++/* /dev/xvc0 device number allocated by lanana.org. */
++#define XEN_XVC_MAJOR 204
++#define XEN_XVC_MINOR 191
++
++#ifdef CONFIG_MAGIC_SYSRQ
++static unsigned long sysrq_requested;
++extern int sysrq_enabled;
++#endif
++
++static int __init xencons_setup(char *str)
++{
++ char *q;
++ int n;
++ extern int console_use_vt;
++
++ console_use_vt = 1;
++ if (!strncmp(str, "ttyS", 4)) {
++ xc_mode = XC_SERIAL;
++ str += 4;
++ } else if (!strncmp(str, "tty", 3)) {
++ xc_mode = XC_TTY;
++ str += 3;
++ console_use_vt = 0;
++ } else if (!strncmp(str, "xvc", 3)) {
++ xc_mode = XC_XVC;
++ str += 3;
++ } else if (!strncmp(str, "off", 3)) {
++ xc_mode = XC_OFF;
++ str += 3;
++ }
++
++ n = simple_strtol(str, &q, 10);
++ if (q != str)
++ xc_num = n;
++
++ return 1;
++}
++__setup("xencons=", xencons_setup);
++
++/* The kernel and user-land drivers share a common transmit buffer. */
++static unsigned int wbuf_size = 4096;
++#define WBUF_MASK(_i) ((_i)&(wbuf_size-1))
++static char *wbuf;
++static unsigned int wc, wp; /* write_cons, write_prod */
++
++static int __init xencons_bufsz_setup(char *str)
++{
++ unsigned int goal;
++ goal = simple_strtoul(str, NULL, 0);
++ if (goal) {
++ goal = roundup_pow_of_two(goal);
++ if (wbuf_size < goal)
++ wbuf_size = goal;
++ }
++ return 1;
++}
++__setup("xencons_bufsz=", xencons_bufsz_setup);
++
++/* This lock protects accesses to the common transmit buffer. */
++static DEFINE_SPINLOCK(xencons_lock);
++
++/* Common transmit-kick routine. */
++static void __xencons_tx_flush(void);
++
++static struct tty_driver *xencons_driver;
++
++/******************** Kernel console driver ********************************/
++
++static void kcons_write(struct console *c, const char *s, unsigned int count)
++{
++ int i = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++
++ while (i < count) {
++ for (; i < count; i++) {
++ if ((wp - wc) >= (wbuf_size - 1))
++ break;
++ if ((wbuf[WBUF_MASK(wp++)] = s[i]) == '\n')
++ wbuf[WBUF_MASK(wp++)] = '\r';
++ }
++
++ __xencons_tx_flush();
++ }
++
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void kcons_write_dom0(struct console *c, const char *s, unsigned int count)
++{
++
++ while (count > 0) {
++ int rc;
++ rc = HYPERVISOR_console_io( CONSOLEIO_write, count, (char *)s);
++ if (rc <= 0)
++ break;
++ count -= rc;
++ s += rc;
++ }
++}
++
++static struct tty_driver *kcons_device(struct console *c, int *index)
++{
++ *index = 0;
++ return xencons_driver;
++}
++
++static struct console kcons_info = {
++ .device = kcons_device,
++ .flags = CON_PRINTBUFFER | CON_ENABLED,
++ .index = -1,
++};
++
++static int __init xen_console_init(void)
++{
++ if (!is_running_on_xen())
++ goto out;
++
++ if (is_initial_xendomain()) {
++ kcons_info.write = kcons_write_dom0;
++ } else {
++ if (!xen_start_info->console.domU.evtchn)
++ goto out;
++ kcons_info.write = kcons_write;
++ }
++
++ switch (xc_mode) {
++ case XC_XVC:
++ strcpy(kcons_info.name, "xvc");
++ if (xc_num == -1)
++ xc_num = 0;
++ break;
++
++ case XC_SERIAL:
++ strcpy(kcons_info.name, "ttyS");
++ if (xc_num == -1)
++ xc_num = 0;
++ break;
++
++ case XC_TTY:
++ strcpy(kcons_info.name, "tty");
++ if (xc_num == -1)
++ xc_num = 1;
++ break;
++
++ default:
++ goto out;
++ }
++
++ wbuf = alloc_bootmem(wbuf_size);
++
++ register_console(&kcons_info);
++
++ out:
++ return 0;
++}
++console_initcall(xen_console_init);
++
++/*** Useful function for console debugging -- goes straight to Xen. ***/
++asmlinkage int xprintk(const char *fmt, ...)
++{
++ va_list args;
++ int printk_len;
++ static char printk_buf[1024];
++
++ /* Emit the output into the temporary buffer */
++ va_start(args, fmt);
++ printk_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args);
++ va_end(args);
++
++ /* Send the processed output directly to Xen. */
++ kcons_write_dom0(NULL, printk_buf, printk_len);
++
++ return 0;
++}
++
++/*** Forcibly flush console data before dying. ***/
++void xencons_force_flush(void)
++{
++ int sz;
++
++ /* Emergency console is synchronous, so there's nothing to flush. */
++ if (!is_running_on_xen() ||
++ is_initial_xendomain() ||
++ !xen_start_info->console.domU.evtchn)
++ return;
++
++ /* Spin until console data is flushed through to the daemon. */
++ while (wc != wp) {
++ int sent = 0;
++ if ((sz = wp - wc) == 0)
++ continue;
++ sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
++ if (sent > 0)
++ wc += sent;
++ }
++}
++
++
++void __init dom0_init_screen_info(const struct dom0_vga_console_info *info, size_t size)
++{
++ /* This is drawn from a dump from vgacon:startup in
++ * standard Linux. */
++ screen_info.orig_video_mode = 3;
++ screen_info.orig_video_isVGA = 1;
++ screen_info.orig_video_lines = 25;
++ screen_info.orig_video_cols = 80;
++ screen_info.orig_video_ega_bx = 3;
++ screen_info.orig_video_points = 16;
++ screen_info.orig_y = screen_info.orig_video_lines - 1;
++
++ switch (info->video_type) {
++ case XEN_VGATYPE_TEXT_MODE_3:
++ if (size < offsetof(struct dom0_vga_console_info, u.text_mode_3)
++ + sizeof(info->u.text_mode_3))
++ break;
++ screen_info.orig_video_lines = info->u.text_mode_3.rows;
++ screen_info.orig_video_cols = info->u.text_mode_3.columns;
++ screen_info.orig_x = info->u.text_mode_3.cursor_x;
++ screen_info.orig_y = info->u.text_mode_3.cursor_y;
++ screen_info.orig_video_points =
++ info->u.text_mode_3.font_height;
++ break;
++
++ case XEN_VGATYPE_VESA_LFB:
++ if (size < offsetof(struct dom0_vga_console_info,
++ u.vesa_lfb.gbl_caps))
++ break;
++ screen_info.orig_video_isVGA = VIDEO_TYPE_VLFB;
++ screen_info.lfb_width = info->u.vesa_lfb.width;
++ screen_info.lfb_height = info->u.vesa_lfb.height;
++ screen_info.lfb_depth = info->u.vesa_lfb.bits_per_pixel;
++ screen_info.lfb_base = info->u.vesa_lfb.lfb_base;
++ screen_info.lfb_size = info->u.vesa_lfb.lfb_size;
++ screen_info.lfb_linelength = info->u.vesa_lfb.bytes_per_line;
++ screen_info.red_size = info->u.vesa_lfb.red_size;
++ screen_info.red_pos = info->u.vesa_lfb.red_pos;
++ screen_info.green_size = info->u.vesa_lfb.green_size;
++ screen_info.green_pos = info->u.vesa_lfb.green_pos;
++ screen_info.blue_size = info->u.vesa_lfb.blue_size;
++ screen_info.blue_pos = info->u.vesa_lfb.blue_pos;
++ screen_info.rsvd_size = info->u.vesa_lfb.rsvd_size;
++ screen_info.rsvd_pos = info->u.vesa_lfb.rsvd_pos;
++ if (size >= offsetof(struct dom0_vga_console_info,
++ u.vesa_lfb.gbl_caps)
++ + sizeof(info->u.vesa_lfb.gbl_caps))
++ screen_info.capabilities = info->u.vesa_lfb.gbl_caps;
++ if (size >= offsetof(struct dom0_vga_console_info,
++ u.vesa_lfb.mode_attrs)
++ + sizeof(info->u.vesa_lfb.mode_attrs))
++ screen_info.vesa_attributes = info->u.vesa_lfb.mode_attrs;
++ break;
++ }
++}
++
++
++/******************** User-space console driver (/dev/console) ************/
++
++#define DRV(_d) (_d)
++#define DUMMY_TTY(_tty) ((xc_mode == XC_TTY) && \
++ ((_tty)->index != (xc_num - 1)))
++
++static struct termios *xencons_termios[MAX_NR_CONSOLES];
++static struct termios *xencons_termios_locked[MAX_NR_CONSOLES];
++static struct tty_struct *xencons_tty;
++static int xencons_priv_irq;
++static char x_char;
++
++void xencons_rx(char *buf, unsigned len, struct pt_regs *regs)
++{
++ int i;
++ unsigned long flags;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ if (xencons_tty == NULL)
++ goto out;
++
++ for (i = 0; i < len; i++) {
++#ifdef CONFIG_MAGIC_SYSRQ
++ if (sysrq_enabled) {
++ if (buf[i] == '\x0f') { /* ^O */
++ if (!sysrq_requested) {
++ sysrq_requested = jiffies;
++ continue; /* don't print sysrq key */
++ }
++ sysrq_requested = 0;
++ } else if (sysrq_requested) {
++ unsigned long sysrq_timeout =
++ sysrq_requested + HZ*2;
++ sysrq_requested = 0;
++ if (time_before(jiffies, sysrq_timeout)) {
++ spin_unlock_irqrestore(
++ &xencons_lock, flags);
++ handle_sysrq(
++ buf[i], regs, xencons_tty);
++ spin_lock_irqsave(
++ &xencons_lock, flags);
++ continue;
++ }
++ }
++ }
++#endif
++ tty_insert_flip_char(xencons_tty, buf[i], 0);
++ }
++ tty_flip_buffer_push(xencons_tty);
++
++ out:
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void __xencons_tx_flush(void)
++{
++ int sent, sz, work_done = 0;
++
++ if (x_char) {
++ if (is_initial_xendomain())
++ kcons_write_dom0(NULL, &x_char, 1);
++ else
++ while (x_char)
++ if (xencons_ring_send(&x_char, 1) == 1)
++ break;
++ x_char = 0;
++ work_done = 1;
++ }
++
++ while (wc != wp) {
++ sz = wp - wc;
++ if (sz > (wbuf_size - WBUF_MASK(wc)))
++ sz = wbuf_size - WBUF_MASK(wc);
++ if (is_initial_xendomain()) {
++ kcons_write_dom0(NULL, &wbuf[WBUF_MASK(wc)], sz);
++ wc += sz;
++ } else {
++ sent = xencons_ring_send(&wbuf[WBUF_MASK(wc)], sz);
++ if (sent == 0)
++ break;
++ wc += sent;
++ }
++ work_done = 1;
++ }
++
++ if (work_done && (xencons_tty != NULL)) {
++ wake_up_interruptible(&xencons_tty->write_wait);
++ if ((xencons_tty->flags & (1 << TTY_DO_WRITE_WAKEUP)) &&
++ (xencons_tty->ldisc.write_wakeup != NULL))
++ (xencons_tty->ldisc.write_wakeup)(xencons_tty);
++ }
++}
++
++void xencons_tx(void)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ __xencons_tx_flush();
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++/* Privileged receive callback and transmit kicker. */
++static irqreturn_t xencons_priv_interrupt(int irq, void *dev_id,
++ struct pt_regs *regs)
++{
++ static char rbuf[16];
++ int l;
++
++ while ((l = HYPERVISOR_console_io(CONSOLEIO_read, 16, rbuf)) > 0)
++ xencons_rx(rbuf, l, regs);
++
++ xencons_tx();
++
++ return IRQ_HANDLED;
++}
++
++static int xencons_write_room(struct tty_struct *tty)
++{
++ return wbuf_size - (wp - wc);
++}
++
++static int xencons_chars_in_buffer(struct tty_struct *tty)
++{
++ return wp - wc;
++}
++
++static void xencons_send_xchar(struct tty_struct *tty, char ch)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ x_char = ch;
++ __xencons_tx_flush();
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void xencons_throttle(struct tty_struct *tty)
++{
++ if (DUMMY_TTY(tty))
++ return;
++
++ if (I_IXOFF(tty))
++ xencons_send_xchar(tty, STOP_CHAR(tty));
++}
++
++static void xencons_unthrottle(struct tty_struct *tty)
++{
++ if (DUMMY_TTY(tty))
++ return;
++
++ if (I_IXOFF(tty)) {
++ if (x_char != 0)
++ x_char = 0;
++ else
++ xencons_send_xchar(tty, START_CHAR(tty));
++ }
++}
++
++static void xencons_flush_buffer(struct tty_struct *tty)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ wc = wp = 0;
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static inline int __xencons_put_char(int ch)
++{
++ char _ch = (char)ch;
++ if ((wp - wc) == wbuf_size)
++ return 0;
++ wbuf[WBUF_MASK(wp++)] = _ch;
++ return 1;
++}
++
++static int xencons_write(
++ struct tty_struct *tty,
++ const unsigned char *buf,
++ int count)
++{
++ int i;
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return count;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++
++ for (i = 0; i < count; i++)
++ if (!__xencons_put_char(buf[i]))
++ break;
++
++ if (i != 0)
++ __xencons_tx_flush();
++
++ spin_unlock_irqrestore(&xencons_lock, flags);
++
++ return i;
++}
++
++static void xencons_put_char(struct tty_struct *tty, u_char ch)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ (void)__xencons_put_char(ch);
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void xencons_flush_chars(struct tty_struct *tty)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ __xencons_tx_flush();
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static void xencons_wait_until_sent(struct tty_struct *tty, int timeout)
++{
++ unsigned long orig_jiffies = jiffies;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ while (DRV(tty->driver)->chars_in_buffer(tty)) {
++ set_current_state(TASK_INTERRUPTIBLE);
++ schedule_timeout(1);
++ if (signal_pending(current))
++ break;
++ if (timeout && time_after(jiffies, orig_jiffies + timeout))
++ break;
++ }
++
++ set_current_state(TASK_RUNNING);
++}
++
++static int xencons_open(struct tty_struct *tty, struct file *filp)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return 0;
++
++ spin_lock_irqsave(&xencons_lock, flags);
++ tty->driver_data = NULL;
++ if (xencons_tty == NULL)
++ xencons_tty = tty;
++ __xencons_tx_flush();
++ spin_unlock_irqrestore(&xencons_lock, flags);
++
++ return 0;
++}
++
++static void xencons_close(struct tty_struct *tty, struct file *filp)
++{
++ unsigned long flags;
++
++ if (DUMMY_TTY(tty))
++ return;
++
++ mutex_lock(&tty_mutex);
++
++ if (tty->count != 1) {
++ mutex_unlock(&tty_mutex);
++ return;
++ }
++
++ /* Prevent other threads from re-opening this tty. */
++ set_bit(TTY_CLOSING, &tty->flags);
++ mutex_unlock(&tty_mutex);
++
++ tty->closing = 1;
++ tty_wait_until_sent(tty, 0);
++ if (DRV(tty->driver)->flush_buffer != NULL)
++ DRV(tty->driver)->flush_buffer(tty);
++ if (tty->ldisc.flush_buffer != NULL)
++ tty->ldisc.flush_buffer(tty);
++ tty->closing = 0;
++ spin_lock_irqsave(&xencons_lock, flags);
++ xencons_tty = NULL;
++ spin_unlock_irqrestore(&xencons_lock, flags);
++}
++
++static struct tty_operations xencons_ops = {
++ .open = xencons_open,
++ .close = xencons_close,
++ .write = xencons_write,
++ .write_room = xencons_write_room,
++ .put_char = xencons_put_char,
++ .flush_chars = xencons_flush_chars,
++ .chars_in_buffer = xencons_chars_in_buffer,
++ .send_xchar = xencons_send_xchar,
++ .flush_buffer = xencons_flush_buffer,
++ .throttle = xencons_throttle,
++ .unthrottle = xencons_unthrottle,
++ .wait_until_sent = xencons_wait_until_sent,
++};
++
++static int __init xencons_init(void)
++{
++ int rc;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ if (xc_mode == XC_OFF)
++ return 0;
++
++ if (!is_initial_xendomain()) {
++ rc = xencons_ring_init();
++ if (rc)
++ return rc;
++ }
++
++ xencons_driver = alloc_tty_driver((xc_mode == XC_TTY) ?
++ MAX_NR_CONSOLES : 1);
++ if (xencons_driver == NULL)
++ return -ENOMEM;
++
++ DRV(xencons_driver)->name = "xencons";
++ DRV(xencons_driver)->major = TTY_MAJOR;
++ DRV(xencons_driver)->type = TTY_DRIVER_TYPE_SERIAL;
++ DRV(xencons_driver)->subtype = SERIAL_TYPE_NORMAL;
++ DRV(xencons_driver)->init_termios = tty_std_termios;
++ DRV(xencons_driver)->flags =
++ TTY_DRIVER_REAL_RAW |
++ TTY_DRIVER_RESET_TERMIOS;
++ DRV(xencons_driver)->termios = xencons_termios;
++ DRV(xencons_driver)->termios_locked = xencons_termios_locked;
++
++ switch (xc_mode) {
++ case XC_XVC:
++ DRV(xencons_driver)->name = "xvc";
++ DRV(xencons_driver)->major = XEN_XVC_MAJOR;
++ DRV(xencons_driver)->minor_start = XEN_XVC_MINOR;
++ DRV(xencons_driver)->name_base = xc_num;
++ break;
++ case XC_SERIAL:
++ DRV(xencons_driver)->name = "ttyS";
++ DRV(xencons_driver)->minor_start = 64 + xc_num;
++ DRV(xencons_driver)->name_base = xc_num;
++ break;
++ default:
++ DRV(xencons_driver)->name = "tty";
++ DRV(xencons_driver)->minor_start = 1;
++ DRV(xencons_driver)->name_base = 1;
++ break;
++ }
++
++ tty_set_operations(xencons_driver, &xencons_ops);
++
++ if ((rc = tty_register_driver(DRV(xencons_driver))) != 0) {
++ printk("WARNING: Failed to register Xen virtual "
++ "console driver as '%s%d'\n",
++ DRV(xencons_driver)->name,
++ DRV(xencons_driver)->name_base);
++ put_tty_driver(xencons_driver);
++ xencons_driver = NULL;
++ return rc;
++ }
++
++ if (is_initial_xendomain()) {
++ xencons_priv_irq = bind_virq_to_irqhandler(
++ VIRQ_CONSOLE,
++ 0,
++ xencons_priv_interrupt,
++ 0,
++ "console",
++ NULL);
++ BUG_ON(xencons_priv_irq < 0);
++ }
++
++ printk("Xen virtual console successfully installed as %s%d\n",
++ DRV(xencons_driver)->name, xc_num);
++
++ return 0;
++}
++
++module_init(xencons_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+Index: head-2008-11-25/drivers/xen/console/xencons_ring.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/console/xencons_ring.c 2007-06-12 13:13:44.000000000 +0200
+@@ -0,0 +1,143 @@
++/*
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/errno.h>
++#include <linux/signal.h>
++#include <linux/sched.h>
++#include <linux/interrupt.h>
++#include <linux/tty.h>
++#include <linux/tty_flip.h>
++#include <linux/serial.h>
++#include <linux/major.h>
++#include <linux/ptrace.h>
++#include <linux/ioport.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/xencons.h>
++#include <linux/wait.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/err.h>
++#include <xen/interface/io/console.h>
++
++static int xencons_irq;
++
++static inline struct xencons_interface *xencons_interface(void)
++{
++ return mfn_to_virt(xen_start_info->console.domU.mfn);
++}
++
++static inline void notify_daemon(void)
++{
++ /* Use evtchn: this is called early, before irq is set up. */
++ notify_remote_via_evtchn(xen_start_info->console.domU.evtchn);
++}
++
++int xencons_ring_send(const char *data, unsigned len)
++{
++ int sent = 0;
++ struct xencons_interface *intf = xencons_interface();
++ XENCONS_RING_IDX cons, prod;
++
++ cons = intf->out_cons;
++ prod = intf->out_prod;
++ mb();
++ BUG_ON((prod - cons) > sizeof(intf->out));
++
++ while ((sent < len) && ((prod - cons) < sizeof(intf->out)))
++ intf->out[MASK_XENCONS_IDX(prod++, intf->out)] = data[sent++];
++
++ wmb();
++ intf->out_prod = prod;
++
++ notify_daemon();
++
++ return sent;
++}
++
++static irqreturn_t handle_input(int irq, void *unused, struct pt_regs *regs)
++{
++ struct xencons_interface *intf = xencons_interface();
++ XENCONS_RING_IDX cons, prod;
++
++ cons = intf->in_cons;
++ prod = intf->in_prod;
++ mb();
++ BUG_ON((prod - cons) > sizeof(intf->in));
++
++ while (cons != prod) {
++ xencons_rx(intf->in+MASK_XENCONS_IDX(cons,intf->in), 1, regs);
++ cons++;
++ }
++
++ mb();
++ intf->in_cons = cons;
++
++ notify_daemon();
++
++ xencons_tx();
++
++ return IRQ_HANDLED;
++}
++
++int xencons_ring_init(void)
++{
++ int irq;
++
++ if (xencons_irq)
++ unbind_from_irqhandler(xencons_irq, NULL);
++ xencons_irq = 0;
++
++ if (!is_running_on_xen() ||
++ is_initial_xendomain() ||
++ !xen_start_info->console.domU.evtchn)
++ return -ENODEV;
++
++ irq = bind_caller_port_to_irqhandler(
++ xen_start_info->console.domU.evtchn,
++ handle_input, 0, "xencons", NULL);
++ if (irq < 0) {
++ printk(KERN_ERR "XEN console request irq failed %i\n", irq);
++ return irq;
++ }
++
++ xencons_irq = irq;
++
++ /* In case we have in-flight data after save/restore... */
++ notify_daemon();
++
++ return 0;
++}
++
++void xencons_resume(void)
++{
++ (void)xencons_ring_init();
++}
+Index: head-2008-11-25/drivers/xen/core/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/core/Makefile 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,14 @@
++#
++# Makefile for the linux kernel.
++#
++
++obj-y := evtchn.o gnttab.o features.o reboot.o machine_reboot.o firmware.o
++
++obj-$(CONFIG_PCI) += pci.o
++obj-$(CONFIG_PROC_FS) += xen_proc.o
++obj-$(CONFIG_SYS_HYPERVISOR) += hypervisor_sysfs.o
++obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
++obj-$(CONFIG_XEN_SYSFS) += xen_sysfs.o
++obj-$(CONFIG_XEN_SMPBOOT) += smpboot.o
++obj-$(CONFIG_KEXEC) += machine_kexec.o
++obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
+Index: head-2008-11-25/drivers/xen/core/cpu_hotplug.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/core/cpu_hotplug.c 2008-01-21 11:15:26.000000000 +0100
+@@ -0,0 +1,173 @@
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <xen/cpu_hotplug.h>
++#include <xen/xenbus.h>
++
++/*
++ * Set of CPUs that remote admin software will allow us to bring online.
++ * Notified to us via xenbus.
++ */
++static cpumask_t xenbus_allowed_cpumask;
++
++/* Set of CPUs that local admin will allow us to bring online. */
++static cpumask_t local_allowed_cpumask = CPU_MASK_ALL;
++
++static int local_cpu_hotplug_request(void)
++{
++ /*
++ * We assume a CPU hotplug request comes from local admin if it is made
++ * via a userspace process (i.e., one with a real mm_struct).
++ */
++ return (current->mm != NULL);
++}
++
++static void vcpu_hotplug(unsigned int cpu)
++{
++ int err;
++ char dir[32], state[32];
++
++ if ((cpu >= NR_CPUS) || !cpu_possible(cpu))
++ return;
++
++ sprintf(dir, "cpu/%u", cpu);
++ err = xenbus_scanf(XBT_NIL, dir, "availability", "%s", state);
++ if (err != 1) {
++ printk(KERN_ERR "XENBUS: Unable to read cpu state\n");
++ return;
++ }
++
++ if (strcmp(state, "online") == 0) {
++ cpu_set(cpu, xenbus_allowed_cpumask);
++ (void)cpu_up(cpu);
++ } else if (strcmp(state, "offline") == 0) {
++ cpu_clear(cpu, xenbus_allowed_cpumask);
++ (void)cpu_down(cpu);
++ } else {
++ printk(KERN_ERR "XENBUS: unknown state(%s) on CPU%d\n",
++ state, cpu);
++ }
++}
++
++static void handle_vcpu_hotplug_event(
++ struct xenbus_watch *watch, const char **vec, unsigned int len)
++{
++ unsigned int cpu;
++ char *cpustr;
++ const char *node = vec[XS_WATCH_PATH];
++
++ if ((cpustr = strstr(node, "cpu/")) != NULL) {
++ sscanf(cpustr, "cpu/%u", &cpu);
++ vcpu_hotplug(cpu);
++ }
++}
++
++static int smpboot_cpu_notify(struct notifier_block *notifier,
++ unsigned long action, void *hcpu)
++{
++ unsigned int cpu = (long)hcpu;
++
++ /*
++ * We do this in a callback notifier rather than __cpu_disable()
++ * because local_cpu_hotplug_request() does not work in the latter
++ * as it's always executed from within a stopmachine kthread.
++ */
++ if ((action == CPU_DOWN_PREPARE) && local_cpu_hotplug_request())
++ cpu_clear(cpu, local_allowed_cpumask);
++
++ return NOTIFY_OK;
++}
++
++static int setup_cpu_watcher(struct notifier_block *notifier,
++ unsigned long event, void *data)
++{
++ unsigned int i;
++
++ static struct xenbus_watch cpu_watch = {
++ .node = "cpu",
++ .callback = handle_vcpu_hotplug_event,
++ .flags = XBWF_new_thread };
++ (void)register_xenbus_watch(&cpu_watch);
++
++ if (!is_initial_xendomain()) {
++ for_each_possible_cpu(i)
++ vcpu_hotplug(i);
++ printk(KERN_INFO "Brought up %ld CPUs\n",
++ (long)num_online_cpus());
++ }
++
++ return NOTIFY_DONE;
++}
++
++static int __init setup_vcpu_hotplug_event(void)
++{
++ static struct notifier_block hotplug_cpu = {
++ .notifier_call = smpboot_cpu_notify };
++ static struct notifier_block xsn_cpu = {
++ .notifier_call = setup_cpu_watcher };
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ register_cpu_notifier(&hotplug_cpu);
++ register_xenstore_notifier(&xsn_cpu);
++
++ return 0;
++}
++
++arch_initcall(setup_vcpu_hotplug_event);
++
++int smp_suspend(void)
++{
++ unsigned int cpu;
++ int err;
++
++ for_each_online_cpu(cpu) {
++ if (cpu == 0)
++ continue;
++ err = cpu_down(cpu);
++ if (err) {
++ printk(KERN_CRIT "Failed to take all CPUs "
++ "down: %d.\n", err);
++ for_each_possible_cpu(cpu)
++ vcpu_hotplug(cpu);
++ return err;
++ }
++ }
++
++ return 0;
++}
++
++void smp_resume(void)
++{
++ unsigned int cpu;
++
++ for_each_possible_cpu(cpu)
++ vcpu_hotplug(cpu);
++}
++
++int cpu_up_check(unsigned int cpu)
++{
++ int rc = 0;
++
++ if (local_cpu_hotplug_request()) {
++ cpu_set(cpu, local_allowed_cpumask);
++ if (!cpu_isset(cpu, xenbus_allowed_cpumask)) {
++ printk("%s: attempt to bring up CPU %u disallowed by "
++ "remote admin.\n", __FUNCTION__, cpu);
++ rc = -EBUSY;
++ }
++ } else if (!cpu_isset(cpu, local_allowed_cpumask) ||
++ !cpu_isset(cpu, xenbus_allowed_cpumask)) {
++ rc = -EBUSY;
++ }
++
++ return rc;
++}
++
++void init_xenbus_allowed_cpumask(void)
++{
++ xenbus_allowed_cpumask = cpu_present_map;
++}
+Index: head-2008-11-25/drivers/xen/core/evtchn.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/core/evtchn.c 2008-11-10 11:44:21.000000000 +0100
+@@ -0,0 +1,1140 @@
++/******************************************************************************
++ * evtchn.c
++ *
++ * Communication via Xen event channels.
++ *
++ * Copyright (c) 2002-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/irq.h>
++#include <linux/interrupt.h>
++#include <linux/sched.h>
++#include <linux/kernel_stat.h>
++#include <linux/version.h>
++#include <asm/atomic.h>
++#include <asm/system.h>
++#include <asm/ptrace.h>
++#include <asm/synch_bitops.h>
++#include <xen/evtchn.h>
++#include <xen/interface/event_channel.h>
++#include <xen/interface/physdev.h>
++#include <asm/hypervisor.h>
++#include <linux/mc146818rtc.h> /* RTC_IRQ */
++
++/*
++ * This lock protects updates to the following mapping and reference-count
++ * arrays. The lock does not need to be acquired to read the mapping tables.
++ */
++static DEFINE_SPINLOCK(irq_mapping_update_lock);
++
++/* IRQ <-> event-channel mappings. */
++static int evtchn_to_irq[NR_EVENT_CHANNELS] = {
++ [0 ... NR_EVENT_CHANNELS-1] = -1 };
++
++/* Packed IRQ information: binding type, sub-type index, and event channel. */
++static u32 irq_info[NR_IRQS];
++
++/* Binding types. */
++enum {
++ IRQT_UNBOUND,
++ IRQT_PIRQ,
++ IRQT_VIRQ,
++ IRQT_IPI,
++ IRQT_LOCAL_PORT,
++ IRQT_CALLER_PORT,
++ _IRQT_COUNT
++};
++
++#define _IRQT_BITS 4
++#define _EVTCHN_BITS 12
++#define _INDEX_BITS (32 - _IRQT_BITS - _EVTCHN_BITS)
++
++/* Constructor for packed IRQ information. */
++static inline u32 mk_irq_info(u32 type, u32 index, u32 evtchn)
++{
++ BUILD_BUG_ON(_IRQT_COUNT > (1U << _IRQT_BITS));
++
++ BUILD_BUG_ON(NR_PIRQS > (1U << _INDEX_BITS));
++ BUILD_BUG_ON(NR_VIRQS > (1U << _INDEX_BITS));
++ BUILD_BUG_ON(NR_IPIS > (1U << _INDEX_BITS));
++ BUG_ON(index >> _INDEX_BITS);
++
++ BUILD_BUG_ON(NR_EVENT_CHANNELS > (1U << _EVTCHN_BITS));
++
++ return ((type << (32 - _IRQT_BITS)) | (index << _EVTCHN_BITS) | evtchn);
++}
++
++/* Convenient shorthand for packed representation of an unbound IRQ. */
++#define IRQ_UNBOUND mk_irq_info(IRQT_UNBOUND, 0, 0)
++
++/*
++ * Accessors for packed IRQ information.
++ */
++
++static inline unsigned int evtchn_from_irq(int irq)
++{
++ return irq_info[irq] & ((1U << _EVTCHN_BITS) - 1);
++}
++
++static inline unsigned int index_from_irq(int irq)
++{
++ return (irq_info[irq] >> _EVTCHN_BITS) & ((1U << _INDEX_BITS) - 1);
++}
++
++static inline unsigned int type_from_irq(int irq)
++{
++ return irq_info[irq] >> (32 - _IRQT_BITS);
++}
++
++/* IRQ <-> VIRQ mapping. */
++DEFINE_PER_CPU(int, virq_to_irq[NR_VIRQS]) = {[0 ... NR_VIRQS-1] = -1};
++
++/* IRQ <-> IPI mapping. */
++#ifndef NR_IPIS
++#define NR_IPIS 1
++#endif
++DEFINE_PER_CPU(int, ipi_to_irq[NR_IPIS]) = {[0 ... NR_IPIS-1] = -1};
++
++/* Reference counts for bindings to IRQs. */
++static int irq_bindcount[NR_IRQS];
++
++/* Bitmap indicating which PIRQs require Xen to be notified on unmask. */
++static DECLARE_BITMAP(pirq_needs_eoi, NR_PIRQS);
++
++#ifdef CONFIG_SMP
++
++static u8 cpu_evtchn[NR_EVENT_CHANNELS];
++static unsigned long cpu_evtchn_mask[NR_CPUS][NR_EVENT_CHANNELS/BITS_PER_LONG];
++
++static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
++ unsigned int idx)
++{
++ return (sh->evtchn_pending[idx] &
++ cpu_evtchn_mask[cpu][idx] &
++ ~sh->evtchn_mask[idx]);
++}
++
++static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ int irq = evtchn_to_irq[chn];
++
++ BUG_ON(!test_bit(chn, s->evtchn_mask));
++
++ if (irq != -1)
++ set_native_irq_info(irq, cpumask_of_cpu(cpu));
++
++ clear_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu_evtchn[chn]]);
++ set_bit(chn, (unsigned long *)cpu_evtchn_mask[cpu]);
++ cpu_evtchn[chn] = cpu;
++}
++
++static void init_evtchn_cpu_bindings(void)
++{
++ int i;
++
++ /* By default all event channels notify CPU#0. */
++ for (i = 0; i < NR_IRQS; i++)
++ set_native_irq_info(i, cpumask_of_cpu(0));
++
++ memset(cpu_evtchn, 0, sizeof(cpu_evtchn));
++ memset(cpu_evtchn_mask[0], ~0, sizeof(cpu_evtchn_mask[0]));
++}
++
++static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
++{
++ return cpu_evtchn[evtchn];
++}
++
++#else
++
++static inline unsigned long active_evtchns(unsigned int cpu, shared_info_t *sh,
++ unsigned int idx)
++{
++ return (sh->evtchn_pending[idx] & ~sh->evtchn_mask[idx]);
++}
++
++static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
++{
++}
++
++static void init_evtchn_cpu_bindings(void)
++{
++}
++
++static inline unsigned int cpu_from_evtchn(unsigned int evtchn)
++{
++ return 0;
++}
++
++#endif
++
++/* Upcall to generic IRQ layer. */
++#ifdef CONFIG_X86
++extern fastcall unsigned int do_IRQ(struct pt_regs *regs);
++void __init xen_init_IRQ(void);
++void __init init_IRQ(void)
++{
++ irq_ctx_init(0);
++ xen_init_IRQ();
++}
++#if defined (__i386__)
++static inline void exit_idle(void) {}
++#define IRQ_REG orig_eax
++#elif defined (__x86_64__)
++#include <asm/idle.h>
++#define IRQ_REG orig_rax
++#endif
++#define do_IRQ(irq, regs) do { \
++ (regs)->IRQ_REG = ~(irq); \
++ do_IRQ((regs)); \
++} while (0)
++#endif
++
++/* Xen will never allocate port zero for any purpose. */
++#define VALID_EVTCHN(chn) ((chn) != 0)
++
++/*
++ * Force a proper event-channel callback from Xen after clearing the
++ * callback mask. We do this in a very simple manner, by making a call
++ * down into Xen. The pending flag will be checked by Xen on return.
++ */
++void force_evtchn_callback(void)
++{
++ VOID(HYPERVISOR_xen_version(0, NULL));
++}
++/* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
++EXPORT_SYMBOL(force_evtchn_callback);
++
++static DEFINE_PER_CPU(unsigned int, upcall_count) = { 0 };
++static DEFINE_PER_CPU(unsigned int, last_processed_l1i) = { BITS_PER_LONG - 1 };
++static DEFINE_PER_CPU(unsigned int, last_processed_l2i) = { BITS_PER_LONG - 1 };
++
++/* NB. Interrupts are disabled on entry. */
++asmlinkage void evtchn_do_upcall(struct pt_regs *regs)
++{
++ unsigned long l1, l2;
++ unsigned long masked_l1, masked_l2;
++ unsigned int l1i, l2i, port, count;
++ int irq;
++ unsigned int cpu = smp_processor_id();
++ shared_info_t *s = HYPERVISOR_shared_info;
++ vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
++
++ exit_idle();
++ irq_enter();
++
++ do {
++ /* Avoid a callback storm when we reenable delivery. */
++ vcpu_info->evtchn_upcall_pending = 0;
++
++ /* Nested invocations bail immediately. */
++ if (unlikely(per_cpu(upcall_count, cpu)++))
++ break;
++
++#ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
++ /* Clear master flag /before/ clearing selector flag. */
++ wmb();
++#endif
++ l1 = xchg(&vcpu_info->evtchn_pending_sel, 0);
++
++ l1i = per_cpu(last_processed_l1i, cpu);
++ l2i = per_cpu(last_processed_l2i, cpu);
++
++ while (l1 != 0) {
++
++ l1i = (l1i + 1) % BITS_PER_LONG;
++ masked_l1 = l1 & ((~0UL) << l1i);
++
++ if (masked_l1 == 0) { /* if we masked out all events, wrap around to the beginning */
++ l1i = BITS_PER_LONG - 1;
++ l2i = BITS_PER_LONG - 1;
++ continue;
++ }
++ l1i = __ffs(masked_l1);
++
++ do {
++ l2 = active_evtchns(cpu, s, l1i);
++
++ l2i = (l2i + 1) % BITS_PER_LONG;
++ masked_l2 = l2 & ((~0UL) << l2i);
++
++ if (masked_l2 == 0) { /* if we masked out all events, move on */
++ l2i = BITS_PER_LONG - 1;
++ break;
++ }
++
++ l2i = __ffs(masked_l2);
++
++ /* process port */
++ port = (l1i * BITS_PER_LONG) + l2i;
++ if ((irq = evtchn_to_irq[port]) != -1)
++ do_IRQ(irq, regs);
++ else
++ evtchn_device_upcall(port);
++
++ /* if this is the final port processed, we'll pick up here+1 next time */
++ per_cpu(last_processed_l1i, cpu) = l1i;
++ per_cpu(last_processed_l2i, cpu) = l2i;
++
++ } while (l2i != BITS_PER_LONG - 1);
++
++ l2 = active_evtchns(cpu, s, l1i);
++ if (l2 == 0) /* we handled all ports, so we can clear the selector bit */
++ l1 &= ~(1UL << l1i);
++
++ }
++
++ /* If there were nested callbacks then we have more to do. */
++ count = per_cpu(upcall_count, cpu);
++ per_cpu(upcall_count, cpu) = 0;
++ } while (unlikely(count != 1));
++
++ irq_exit();
++}
++
++static int find_unbound_irq(void)
++{
++ static int warned;
++ int irq;
++
++ for (irq = DYNIRQ_BASE; irq < (DYNIRQ_BASE + NR_DYNIRQS); irq++)
++ if (irq_bindcount[irq] == 0)
++ return irq;
++
++ if (!warned) {
++ warned = 1;
++ printk(KERN_WARNING "No available IRQ to bind to: "
++ "increase NR_DYNIRQS.\n");
++ }
++
++ return -ENOSPC;
++}
++
++static int bind_caller_port_to_irq(unsigned int caller_port)
++{
++ int irq;
++
++ spin_lock(&irq_mapping_update_lock);
++
++ if ((irq = evtchn_to_irq[caller_port]) == -1) {
++ if ((irq = find_unbound_irq()) < 0)
++ goto out;
++
++ evtchn_to_irq[caller_port] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_CALLER_PORT, 0, caller_port);
++ }
++
++ irq_bindcount[irq]++;
++
++ out:
++ spin_unlock(&irq_mapping_update_lock);
++ return irq;
++}
++
++static int bind_local_port_to_irq(unsigned int local_port)
++{
++ int irq;
++
++ spin_lock(&irq_mapping_update_lock);
++
++ BUG_ON(evtchn_to_irq[local_port] != -1);
++
++ if ((irq = find_unbound_irq()) < 0) {
++ struct evtchn_close close = { .port = local_port };
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
++ BUG();
++ goto out;
++ }
++
++ evtchn_to_irq[local_port] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port);
++ irq_bindcount[irq]++;
++
++ out:
++ spin_unlock(&irq_mapping_update_lock);
++ return irq;
++}
++
++static int bind_listening_port_to_irq(unsigned int remote_domain)
++{
++ struct evtchn_alloc_unbound alloc_unbound;
++ int err;
++
++ alloc_unbound.dom = DOMID_SELF;
++ alloc_unbound.remote_dom = remote_domain;
++
++ err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
++ &alloc_unbound);
++
++ return err ? : bind_local_port_to_irq(alloc_unbound.port);
++}
++
++static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
++ unsigned int remote_port)
++{
++ struct evtchn_bind_interdomain bind_interdomain;
++ int err;
++
++ bind_interdomain.remote_dom = remote_domain;
++ bind_interdomain.remote_port = remote_port;
++
++ err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
++ &bind_interdomain);
++
++ return err ? : bind_local_port_to_irq(bind_interdomain.local_port);
++}
++
++static int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
++{
++ struct evtchn_bind_virq bind_virq;
++ int evtchn, irq;
++
++ spin_lock(&irq_mapping_update_lock);
++
++ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1) {
++ if ((irq = find_unbound_irq()) < 0)
++ goto out;
++
++ bind_virq.virq = virq;
++ bind_virq.vcpu = cpu;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
++ &bind_virq) != 0)
++ BUG();
++ evtchn = bind_virq.port;
++
++ evtchn_to_irq[evtchn] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
++
++ per_cpu(virq_to_irq, cpu)[virq] = irq;
++
++ bind_evtchn_to_cpu(evtchn, cpu);
++ }
++
++ irq_bindcount[irq]++;
++
++ out:
++ spin_unlock(&irq_mapping_update_lock);
++ return irq;
++}
++
++static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
++{
++ struct evtchn_bind_ipi bind_ipi;
++ int evtchn, irq;
++
++ spin_lock(&irq_mapping_update_lock);
++
++ if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1) {
++ if ((irq = find_unbound_irq()) < 0)
++ goto out;
++
++ bind_ipi.vcpu = cpu;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
++ &bind_ipi) != 0)
++ BUG();
++ evtchn = bind_ipi.port;
++
++ evtchn_to_irq[evtchn] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
++
++ per_cpu(ipi_to_irq, cpu)[ipi] = irq;
++
++ bind_evtchn_to_cpu(evtchn, cpu);
++ }
++
++ irq_bindcount[irq]++;
++
++ out:
++ spin_unlock(&irq_mapping_update_lock);
++ return irq;
++}
++
++static void unbind_from_irq(unsigned int irq)
++{
++ struct evtchn_close close;
++ unsigned int cpu;
++ int evtchn = evtchn_from_irq(irq);
++
++ spin_lock(&irq_mapping_update_lock);
++
++ if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) {
++ close.port = evtchn;
++ if ((type_from_irq(irq) != IRQT_CALLER_PORT) &&
++ HYPERVISOR_event_channel_op(EVTCHNOP_close, &close))
++ BUG();
++
++ switch (type_from_irq(irq)) {
++ case IRQT_VIRQ:
++ per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
++ [index_from_irq(irq)] = -1;
++ break;
++ case IRQT_IPI:
++ per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
++ [index_from_irq(irq)] = -1;
++ break;
++ default:
++ break;
++ }
++
++ /* Closed ports are implicitly re-bound to VCPU0. */
++ bind_evtchn_to_cpu(evtchn, 0);
++
++ evtchn_to_irq[evtchn] = -1;
++ irq_info[irq] = IRQ_UNBOUND;
++
++ /* Zap stats across IRQ changes of use. */
++ for_each_possible_cpu(cpu)
++ kstat_cpu(cpu).irqs[irq] = 0;
++ }
++
++ spin_unlock(&irq_mapping_update_lock);
++}
++
++int bind_caller_port_to_irqhandler(
++ unsigned int caller_port,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id)
++{
++ int irq, retval;
++
++ irq = bind_caller_port_to_irq(caller_port);
++ if (irq < 0)
++ return irq;
++
++ retval = request_irq(irq, handler, irqflags, devname, dev_id);
++ if (retval != 0) {
++ unbind_from_irq(irq);
++ return retval;
++ }
++
++ return irq;
++}
++EXPORT_SYMBOL_GPL(bind_caller_port_to_irqhandler);
++
++int bind_listening_port_to_irqhandler(
++ unsigned int remote_domain,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id)
++{
++ int irq, retval;
++
++ irq = bind_listening_port_to_irq(remote_domain);
++ if (irq < 0)
++ return irq;
++
++ retval = request_irq(irq, handler, irqflags, devname, dev_id);
++ if (retval != 0) {
++ unbind_from_irq(irq);
++ return retval;
++ }
++
++ return irq;
++}
++EXPORT_SYMBOL_GPL(bind_listening_port_to_irqhandler);
++
++int bind_interdomain_evtchn_to_irqhandler(
++ unsigned int remote_domain,
++ unsigned int remote_port,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id)
++{
++ int irq, retval;
++
++ irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
++ if (irq < 0)
++ return irq;
++
++ retval = request_irq(irq, handler, irqflags, devname, dev_id);
++ if (retval != 0) {
++ unbind_from_irq(irq);
++ return retval;
++ }
++
++ return irq;
++}
++EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
++
++int bind_virq_to_irqhandler(
++ unsigned int virq,
++ unsigned int cpu,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id)
++{
++ int irq, retval;
++
++ irq = bind_virq_to_irq(virq, cpu);
++ if (irq < 0)
++ return irq;
++
++ retval = request_irq(irq, handler, irqflags, devname, dev_id);
++ if (retval != 0) {
++ unbind_from_irq(irq);
++ return retval;
++ }
++
++ return irq;
++}
++EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
++
++int bind_ipi_to_irqhandler(
++ unsigned int ipi,
++ unsigned int cpu,
++ irqreturn_t (*handler)(int, void *, struct pt_regs *),
++ unsigned long irqflags,
++ const char *devname,
++ void *dev_id)
++{
++ int irq, retval;
++
++ irq = bind_ipi_to_irq(ipi, cpu);
++ if (irq < 0)
++ return irq;
++
++ retval = request_irq(irq, handler, irqflags, devname, dev_id);
++ if (retval != 0) {
++ unbind_from_irq(irq);
++ return retval;
++ }
++
++ return irq;
++}
++EXPORT_SYMBOL_GPL(bind_ipi_to_irqhandler);
++
++void unbind_from_irqhandler(unsigned int irq, void *dev_id)
++{
++ free_irq(irq, dev_id);
++ unbind_from_irq(irq);
++}
++EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
++
++#ifdef CONFIG_SMP
++void rebind_evtchn_to_cpu(int port, unsigned int cpu)
++{
++ struct evtchn_bind_vcpu ebv = { .port = port, .vcpu = cpu };
++ int masked;
++
++ masked = test_and_set_evtchn_mask(port);
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &ebv) == 0)
++ bind_evtchn_to_cpu(port, cpu);
++ if (!masked)
++ unmask_evtchn(port);
++}
++
++static void rebind_irq_to_cpu(unsigned int irq, unsigned int tcpu)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ rebind_evtchn_to_cpu(evtchn, tcpu);
++}
++
++static void set_affinity_irq(unsigned int irq, cpumask_t dest)
++{
++ unsigned tcpu = first_cpu(dest);
++ rebind_irq_to_cpu(irq, tcpu);
++}
++#endif
++
++int resend_irq_on_evtchn(unsigned int irq)
++{
++ int masked, evtchn = evtchn_from_irq(irq);
++ shared_info_t *s = HYPERVISOR_shared_info;
++
++ if (!VALID_EVTCHN(evtchn))
++ return 1;
++
++ masked = test_and_set_evtchn_mask(evtchn);
++ synch_set_bit(evtchn, s->evtchn_pending);
++ if (!masked)
++ unmask_evtchn(evtchn);
++
++ return 1;
++}
++
++/*
++ * Interface to generic handling in irq.c
++ */
++
++static unsigned int startup_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ unmask_evtchn(evtchn);
++ return 0;
++}
++
++static void shutdown_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ mask_evtchn(evtchn);
++}
++
++static void enable_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ unmask_evtchn(evtchn);
++}
++
++static void disable_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ mask_evtchn(evtchn);
++}
++
++static void ack_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ move_native_irq(irq);
++
++ if (VALID_EVTCHN(evtchn)) {
++ mask_evtchn(evtchn);
++ clear_evtchn(evtchn);
++ }
++}
++
++static void end_dynirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn) && !(irq_desc[irq].status & IRQ_DISABLED))
++ unmask_evtchn(evtchn);
++}
++
++static struct hw_interrupt_type dynirq_type = {
++ .typename = "Dynamic-irq",
++ .startup = startup_dynirq,
++ .shutdown = shutdown_dynirq,
++ .enable = enable_dynirq,
++ .disable = disable_dynirq,
++ .ack = ack_dynirq,
++ .end = end_dynirq,
++#ifdef CONFIG_SMP
++ .set_affinity = set_affinity_irq,
++#endif
++ .retrigger = resend_irq_on_evtchn,
++};
++
++static inline void pirq_unmask_notify(int irq)
++{
++ struct physdev_eoi eoi = { .irq = evtchn_get_xen_pirq(irq) };
++ if (unlikely(test_bit(irq - PIRQ_BASE, pirq_needs_eoi)))
++ VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi));
++}
++
++static inline void pirq_query_unmask(int irq)
++{
++ struct physdev_irq_status_query irq_status;
++ irq_status.irq = evtchn_get_xen_pirq(irq);
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
++ irq_status.flags = 0;
++ clear_bit(irq - PIRQ_BASE, pirq_needs_eoi);
++ if (irq_status.flags & XENIRQSTAT_needs_eoi)
++ set_bit(irq - PIRQ_BASE, pirq_needs_eoi);
++}
++
++/*
++ * On startup, if there is no action associated with the IRQ then we are
++ * probing. In this case we should not share with others as it will confuse us.
++ */
++#define probing_irq(_irq) (irq_desc[(_irq)].action == NULL)
++
++static unsigned int startup_pirq(unsigned int irq)
++{
++ struct evtchn_bind_pirq bind_pirq;
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ goto out;
++
++ bind_pirq.pirq = evtchn_get_xen_pirq(irq);
++ /* NB. We are happy to share unless we are probing. */
++ bind_pirq.flags = probing_irq(irq) ? 0 : BIND_PIRQ__WILL_SHARE;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq) != 0) {
++ if (!probing_irq(irq))
++ printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
++ irq);
++ return 0;
++ }
++ evtchn = bind_pirq.port;
++
++ pirq_query_unmask(irq);
++
++ evtchn_to_irq[evtchn] = irq;
++ bind_evtchn_to_cpu(evtchn, 0);
++ irq_info[irq] = mk_irq_info(IRQT_PIRQ, bind_pirq.pirq, evtchn);
++
++ out:
++ unmask_evtchn(evtchn);
++ pirq_unmask_notify(irq);
++
++ return 0;
++}
++
++static void shutdown_pirq(unsigned int irq)
++{
++ struct evtchn_close close;
++ int evtchn = evtchn_from_irq(irq);
++
++ if (!VALID_EVTCHN(evtchn))
++ return;
++
++ mask_evtchn(evtchn);
++
++ close.port = evtchn;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
++ BUG();
++
++ bind_evtchn_to_cpu(evtchn, 0);
++ evtchn_to_irq[evtchn] = -1;
++ irq_info[irq] = mk_irq_info(IRQT_PIRQ, index_from_irq(irq), 0);
++}
++
++static void enable_pirq(unsigned int irq)
++{
++ startup_pirq(irq);
++}
++
++static void disable_pirq(unsigned int irq)
++{
++}
++
++static void ack_pirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ move_native_irq(irq);
++
++ if (VALID_EVTCHN(evtchn)) {
++ mask_evtchn(evtchn);
++ clear_evtchn(evtchn);
++ }
++}
++
++static void end_pirq(unsigned int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if ((irq_desc[irq].status & (IRQ_DISABLED|IRQ_PENDING)) ==
++ (IRQ_DISABLED|IRQ_PENDING)) {
++ shutdown_pirq(irq);
++ } else if (VALID_EVTCHN(evtchn)) {
++ unmask_evtchn(evtchn);
++ pirq_unmask_notify(irq);
++ }
++}
++
++static struct hw_interrupt_type pirq_type = {
++ .typename = "Phys-irq",
++ .startup = startup_pirq,
++ .shutdown = shutdown_pirq,
++ .enable = enable_pirq,
++ .disable = disable_pirq,
++ .ack = ack_pirq,
++ .end = end_pirq,
++#ifdef CONFIG_SMP
++ .set_affinity = set_affinity_irq,
++#endif
++ .retrigger = resend_irq_on_evtchn,
++};
++
++int irq_ignore_unhandled(unsigned int irq)
++{
++ struct physdev_irq_status_query irq_status = { .irq = irq };
++
++ if (!is_running_on_xen())
++ return 0;
++
++ if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
++ return 0;
++ return !!(irq_status.flags & XENIRQSTAT_shared);
++}
++
++void notify_remote_via_irq(int irq)
++{
++ int evtchn = evtchn_from_irq(irq);
++
++ if (VALID_EVTCHN(evtchn))
++ notify_remote_via_evtchn(evtchn);
++}
++EXPORT_SYMBOL_GPL(notify_remote_via_irq);
++
++int irq_to_evtchn_port(int irq)
++{
++ return evtchn_from_irq(irq);
++}
++EXPORT_SYMBOL_GPL(irq_to_evtchn_port);
++
++void mask_evtchn(int port)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ synch_set_bit(port, s->evtchn_mask);
++}
++EXPORT_SYMBOL_GPL(mask_evtchn);
++
++void unmask_evtchn(int port)
++{
++ shared_info_t *s = HYPERVISOR_shared_info;
++ unsigned int cpu = smp_processor_id();
++ vcpu_info_t *vcpu_info = &s->vcpu_info[cpu];
++
++ BUG_ON(!irqs_disabled());
++
++ /* Slow path (hypercall) if this is a non-local port. */
++ if (unlikely(cpu != cpu_from_evtchn(port))) {
++ struct evtchn_unmask unmask = { .port = port };
++ VOID(HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask));
++ return;
++ }
++
++ synch_clear_bit(port, s->evtchn_mask);
++
++ /* Did we miss an interrupt 'edge'? Re-fire if so. */
++ if (synch_test_bit(port, s->evtchn_pending) &&
++ !synch_test_and_set_bit(port / BITS_PER_LONG,
++ &vcpu_info->evtchn_pending_sel))
++ vcpu_info->evtchn_upcall_pending = 1;
++}
++EXPORT_SYMBOL_GPL(unmask_evtchn);
++
++void disable_all_local_evtchn(void)
++{
++ unsigned i, cpu = smp_processor_id();
++ shared_info_t *s = HYPERVISOR_shared_info;
++
++ for (i = 0; i < NR_EVENT_CHANNELS; ++i)
++ if (cpu_from_evtchn(i) == cpu)
++ synch_set_bit(i, &s->evtchn_mask[0]);
++}
++
++static void restore_cpu_virqs(unsigned int cpu)
++{
++ struct evtchn_bind_virq bind_virq;
++ int virq, irq, evtchn;
++
++ for (virq = 0; virq < NR_VIRQS; virq++) {
++ if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
++ continue;
++
++ BUG_ON(irq_info[irq] != mk_irq_info(IRQT_VIRQ, virq, 0));
++
++ /* Get a new binding from Xen. */
++ bind_virq.virq = virq;
++ bind_virq.vcpu = cpu;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
++ &bind_virq) != 0)
++ BUG();
++ evtchn = bind_virq.port;
++
++ /* Record the new mapping. */
++ evtchn_to_irq[evtchn] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn);
++ bind_evtchn_to_cpu(evtchn, cpu);
++
++ /* Ready for use. */
++ unmask_evtchn(evtchn);
++ }
++}
++
++static void restore_cpu_ipis(unsigned int cpu)
++{
++ struct evtchn_bind_ipi bind_ipi;
++ int ipi, irq, evtchn;
++
++ for (ipi = 0; ipi < NR_IPIS; ipi++) {
++ if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
++ continue;
++
++ BUG_ON(irq_info[irq] != mk_irq_info(IRQT_IPI, ipi, 0));
++
++ /* Get a new binding from Xen. */
++ bind_ipi.vcpu = cpu;
++ if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
++ &bind_ipi) != 0)
++ BUG();
++ evtchn = bind_ipi.port;
++
++ /* Record the new mapping. */
++ evtchn_to_irq[evtchn] = irq;
++ irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn);
++ bind_evtchn_to_cpu(evtchn, cpu);
++
++ /* Ready for use. */
++ unmask_evtchn(evtchn);
++
++ }
++}
++
++void irq_resume(void)
++{
++ unsigned int cpu, irq, evtchn;
++
++ init_evtchn_cpu_bindings();
++
++ /* New event-channel space is not 'live' yet. */
++ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
++ mask_evtchn(evtchn);
++
++ /* Check that no PIRQs are still bound. */
++ for (irq = PIRQ_BASE; irq < (PIRQ_BASE + NR_PIRQS); irq++)
++ BUG_ON(irq_info[irq] != IRQ_UNBOUND);
++
++ /* No IRQ <-> event-channel mappings. */
++ for (irq = 0; irq < NR_IRQS; irq++)
++ irq_info[irq] &= ~((1U << _EVTCHN_BITS) - 1);
++ for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
++ evtchn_to_irq[evtchn] = -1;
++
++ for_each_possible_cpu(cpu) {
++ restore_cpu_virqs(cpu);
++ restore_cpu_ipis(cpu);
++ }
++
++}
++
++#if defined(CONFIG_X86_IO_APIC)
++#define identity_mapped_irq(irq) (!IO_APIC_IRQ((irq) - PIRQ_BASE))
++#elif defined(CONFIG_X86)
++#define identity_mapped_irq(irq) (((irq) - PIRQ_BASE) < 16)
++#else
++#define identity_mapped_irq(irq) (1)
++#endif
++
++void evtchn_register_pirq(int irq)
++{
++ BUG_ON(irq < PIRQ_BASE || irq - PIRQ_BASE > NR_PIRQS);
++ if (identity_mapped_irq(irq))
++ return;
++ irq_info[irq] = mk_irq_info(IRQT_PIRQ, irq, 0);
++ irq_desc[irq].chip = &pirq_type;
++}
++
++int evtchn_map_pirq(int irq, int xen_pirq)
++{
++ if (irq < 0) {
++ static DEFINE_SPINLOCK(irq_alloc_lock);
++
++ irq = PIRQ_BASE + NR_PIRQS - 1;
++ spin_lock(&irq_alloc_lock);
++ do {
++ if (identity_mapped_irq(irq))
++ continue;
++ if (!index_from_irq(irq)) {
++ BUG_ON(type_from_irq(irq) != IRQT_UNBOUND);
++ irq_info[irq] = mk_irq_info(IRQT_PIRQ,
++ xen_pirq, 0);
++ break;
++ }
++ } while (--irq >= PIRQ_BASE);
++ spin_unlock(&irq_alloc_lock);
++ if (irq < PIRQ_BASE)
++ return -ENOSPC;
++ irq_desc[irq].chip = &pirq_type;
++ } else if (!xen_pirq) {
++ if (unlikely(type_from_irq(irq) != IRQT_PIRQ))
++ return -EINVAL;
++ irq_desc[irq].chip = &no_irq_type;
++ irq_info[irq] = IRQ_UNBOUND;
++ return 0;
++ } else if (type_from_irq(irq) != IRQT_PIRQ
++ || index_from_irq(irq) != xen_pirq) {
++ printk(KERN_ERR "IRQ#%d is already mapped to %d:%u - "
++ "cannot map to PIRQ#%u\n",
++ irq, type_from_irq(irq), index_from_irq(irq), xen_pirq);
++ return -EINVAL;
++ }
++ return index_from_irq(irq) ? irq : -EINVAL;
++}
++
++int evtchn_get_xen_pirq(int irq)
++{
++ if (identity_mapped_irq(irq))
++ return irq;
++ BUG_ON(type_from_irq(irq) != IRQT_PIRQ);
++ return index_from_irq(irq);
++}
++
++void __init xen_init_IRQ(void)
++{
++ unsigned int i;
++
++ init_evtchn_cpu_bindings();
++
++ /* No event channels are 'live' right now. */
++ for (i = 0; i < NR_EVENT_CHANNELS; i++)
++ mask_evtchn(i);
++
++ /* No IRQ -> event-channel mappings. */
++ for (i = 0; i < NR_IRQS; i++)
++ irq_info[i] = IRQ_UNBOUND;
++
++ /* Dynamic IRQ space is currently unbound. Zero the refcnts. */
++ for (i = DYNIRQ_BASE; i < (DYNIRQ_BASE + NR_DYNIRQS); i++) {
++ irq_bindcount[i] = 0;
++
++ irq_desc[i].status = IRQ_DISABLED|IRQ_NOPROBE;
++ irq_desc[i].action = NULL;
++ irq_desc[i].depth = 1;
++ irq_desc[i].chip = &dynirq_type;
++ }
++
++ /* Phys IRQ space is statically bound (1:1 mapping). Nail refcnts. */
++ for (i = PIRQ_BASE; i < (PIRQ_BASE + NR_PIRQS); i++) {
++ irq_bindcount[i] = 1;
++
++ if (!identity_mapped_irq(i))
++ continue;
++
++#ifdef RTC_IRQ
++ /* If not domain 0, force our RTC driver to fail its probe. */
++ if (i - PIRQ_BASE == RTC_IRQ && !is_initial_xendomain())
++ continue;
++#endif
++
++ irq_desc[i].status = IRQ_DISABLED;
++ irq_desc[i].action = NULL;
++ irq_desc[i].depth = 1;
++ irq_desc[i].chip = &pirq_type;
++ }
++}
+Index: head-2008-11-25/drivers/xen/core/features.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/core/features.c 2007-06-12 13:13:44.000000000 +0200
+@@ -0,0 +1,34 @@
++/******************************************************************************
++ * features.c
++ *
++ * Xen feature flags.
++ *
++ * Copyright (c) 2006, Ian Campbell, XenSource Inc.
++ */
++#include <linux/types.h>
++#include <linux/cache.h>
++#include <linux/module.h>
++#include <asm/hypervisor.h>
++#include <xen/features.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++u8 xen_features[XENFEAT_NR_SUBMAPS * 32] __read_mostly;
++/* Not a GPL symbol: used in ubiquitous macros, so too restrictive. */
++EXPORT_SYMBOL(xen_features);
++
++void setup_xen_features(void)
++{
++ xen_feature_info_t fi;
++ int i, j;
++
++ for (i = 0; i < XENFEAT_NR_SUBMAPS; i++) {
++ fi.submap_idx = i;
++ if (HYPERVISOR_xen_version(XENVER_get_features, &fi) < 0)
++ break;
++ for (j=0; j<32; j++)
++ xen_features[i*32+j] = !!(fi.submap & 1<<j);
++ }
++}
+Index: head-2008-11-25/drivers/xen/core/firmware.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/core/firmware.c 2007-06-22 09:08:06.000000000 +0200
+@@ -0,0 +1,74 @@
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/init.h>
++#include <linux/edd.h>
++#include <video/edid.h>
++#include <xen/interface/platform.h>
++#include <asm/hypervisor.h>
++
++#if defined(CONFIG_EDD) || defined(CONFIG_EDD_MODULE)
++void __init copy_edd(void)
++{
++ int ret;
++ struct xen_platform_op op;
++
++ if (!is_initial_xendomain())
++ return;
++
++ op.cmd = XENPF_firmware_info;
++
++ op.u.firmware_info.type = XEN_FW_DISK_INFO;
++ for (op.u.firmware_info.index = 0;
++ edd.edd_info_nr < EDDMAXNR;
++ op.u.firmware_info.index++) {
++ struct edd_info *info = edd.edd_info + edd.edd_info_nr;
++
++ info->params.length = sizeof(info->params);
++ set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params,
++ &info->params);
++ ret = HYPERVISOR_platform_op(&op);
++ if (ret)
++ break;
++
++#define C(x) info->x = op.u.firmware_info.u.disk_info.x
++ C(device);
++ C(version);
++ C(interface_support);
++ C(legacy_max_cylinder);
++ C(legacy_max_head);
++ C(legacy_sectors_per_track);
++#undef C
++
++ edd.edd_info_nr++;
++ }
++
++ op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE;
++ for (op.u.firmware_info.index = 0;
++ edd.mbr_signature_nr < EDD_MBR_SIG_MAX;
++ op.u.firmware_info.index++) {
++ ret = HYPERVISOR_platform_op(&op);
++ if (ret)
++ break;
++ edd.mbr_signature[edd.mbr_signature_nr++] =
++ op.u.firmware_info.u.disk_mbr_signature.mbr_signature;
++ }
++}
++#endif
++
++void __init copy_edid(void)
++{
++#if defined(CONFIG_FIRMWARE_EDID) && defined(CONFIG_X86)
++ struct xen_platform_op op;
++
++ if (!is_initial_xendomain())
++ return;
++
++ op.cmd = XENPF_firmware_info;
++ op.u.firmware_info.index = 0;
++ op.u.firmware_info.type = XEN_FW_VBEDDC_INFO;
++ set_xen_guest_handle(op.u.firmware_info.u.vbeddc_info.edid,
++ edid_info.dummy);
++ if (HYPERVISOR_platform_op(&op) != 0)
++ memset(edid_info.dummy, 0x13, sizeof(edid_info.dummy));
++#endif
++}
+Index: head-2008-11-25/drivers/xen/core/gnttab.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/core/gnttab.c 2008-11-04 11:13:10.000000000 +0100
+@@ -0,0 +1,772 @@
++/******************************************************************************
++ * gnttab.c
++ *
++ * Granting foreign access to our memory reservation.
++ *
++ * Copyright (c) 2005-2006, Christopher Clark
++ * Copyright (c) 2004-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/seqlock.h>
++#include <xen/interface/xen.h>
++#include <xen/gnttab.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++#include <asm/synch_bitops.h>
++#include <asm/io.h>
++#include <xen/interface/memory.h>
++#include <xen/driver_util.h>
++#include <asm/gnttab_dma.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++/* External tools reserve first few grant table entries. */
++#define NR_RESERVED_ENTRIES 8
++#define GNTTAB_LIST_END 0xffffffff
++#define ENTRIES_PER_GRANT_FRAME (PAGE_SIZE / sizeof(grant_entry_t))
++
++static grant_ref_t **gnttab_list;
++static unsigned int nr_grant_frames;
++static unsigned int boot_max_nr_grant_frames;
++static int gnttab_free_count;
++static grant_ref_t gnttab_free_head;
++static DEFINE_SPINLOCK(gnttab_list_lock);
++
++static struct grant_entry *shared;
++
++static struct gnttab_free_callback *gnttab_free_callback_list;
++
++static int gnttab_expand(unsigned int req_entries);
++
++#define RPP (PAGE_SIZE / sizeof(grant_ref_t))
++#define gnttab_entry(entry) (gnttab_list[(entry) / RPP][(entry) % RPP])
++
++#define nr_freelist_frames(grant_frames) \
++ (((grant_frames) * ENTRIES_PER_GRANT_FRAME + RPP - 1) / RPP)
++
++static int get_free_entries(int count)
++{
++ unsigned long flags;
++ int ref, rc;
++ grant_ref_t head;
++
++ spin_lock_irqsave(&gnttab_list_lock, flags);
++
++ if ((gnttab_free_count < count) &&
++ ((rc = gnttab_expand(count - gnttab_free_count)) < 0)) {
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++ return rc;
++ }
++
++ ref = head = gnttab_free_head;
++ gnttab_free_count -= count;
++ while (count-- > 1)
++ head = gnttab_entry(head);
++ gnttab_free_head = gnttab_entry(head);
++ gnttab_entry(head) = GNTTAB_LIST_END;
++
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++
++ return ref;
++}
++
++#define get_free_entry() get_free_entries(1)
++
++static void do_free_callbacks(void)
++{
++ struct gnttab_free_callback *callback, *next;
++
++ callback = gnttab_free_callback_list;
++ gnttab_free_callback_list = NULL;
++
++ while (callback != NULL) {
++ next = callback->next;
++ if (gnttab_free_count >= callback->count) {
++ callback->next = NULL;
++ callback->queued = 0;
++ callback->fn(callback->arg);
++ } else {
++ callback->next = gnttab_free_callback_list;
++ gnttab_free_callback_list = callback;
++ }
++ callback = next;
++ }
++}
++
++static inline void check_free_callbacks(void)
++{
++ if (unlikely(gnttab_free_callback_list))
++ do_free_callbacks();
++}
++
++static void put_free_entry(grant_ref_t ref)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&gnttab_list_lock, flags);
++ gnttab_entry(ref) = gnttab_free_head;
++ gnttab_free_head = ref;
++ gnttab_free_count++;
++ check_free_callbacks();
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++
++/*
++ * Public grant-issuing interface functions
++ */
++
++int gnttab_grant_foreign_access(domid_t domid, unsigned long frame,
++ int flags)
++{
++ int ref;
++
++ if (unlikely((ref = get_free_entry()) < 0))
++ return -ENOSPC;
++
++ shared[ref].frame = frame;
++ shared[ref].domid = domid;
++ wmb();
++ BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing));
++ shared[ref].flags = GTF_permit_access | flags;
++
++ return ref;
++}
++EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access);
++
++void gnttab_grant_foreign_access_ref(grant_ref_t ref, domid_t domid,
++ unsigned long frame, int flags)
++{
++ shared[ref].frame = frame;
++ shared[ref].domid = domid;
++ wmb();
++ BUG_ON(flags & (GTF_accept_transfer | GTF_reading | GTF_writing));
++ shared[ref].flags = GTF_permit_access | flags;
++}
++EXPORT_SYMBOL_GPL(gnttab_grant_foreign_access_ref);
++
++
++int gnttab_query_foreign_access(grant_ref_t ref)
++{
++ u16 nflags;
++
++ nflags = shared[ref].flags;
++
++ return (nflags & (GTF_reading|GTF_writing));
++}
++EXPORT_SYMBOL_GPL(gnttab_query_foreign_access);
++
++int gnttab_end_foreign_access_ref(grant_ref_t ref)
++{
++ u16 flags, nflags;
++
++ nflags = shared[ref].flags;
++ do {
++ if ((flags = nflags) & (GTF_reading|GTF_writing)) {
++ printk(KERN_DEBUG "WARNING: g.e. still in use!\n");
++ return 0;
++ }
++ } while ((nflags = synch_cmpxchg_subword(&shared[ref].flags, flags, 0)) !=
++ flags);
++
++ return 1;
++}
++EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
++
++void gnttab_end_foreign_access(grant_ref_t ref, unsigned long page)
++{
++ if (gnttab_end_foreign_access_ref(ref)) {
++ put_free_entry(ref);
++ if (page != 0)
++ free_page(page);
++ } else {
++ /* XXX This needs to be fixed so that the ref and page are
++ placed on a list to be freed up later. */
++ printk(KERN_DEBUG
++ "WARNING: leaking g.e. and page still in use!\n");
++ }
++}
++EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
++
++int gnttab_grant_foreign_transfer(domid_t domid, unsigned long pfn)
++{
++ int ref;
++
++ if (unlikely((ref = get_free_entry()) < 0))
++ return -ENOSPC;
++ gnttab_grant_foreign_transfer_ref(ref, domid, pfn);
++
++ return ref;
++}
++EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer);
++
++void gnttab_grant_foreign_transfer_ref(grant_ref_t ref, domid_t domid,
++ unsigned long pfn)
++{
++ shared[ref].frame = pfn;
++ shared[ref].domid = domid;
++ wmb();
++ shared[ref].flags = GTF_accept_transfer;
++}
++EXPORT_SYMBOL_GPL(gnttab_grant_foreign_transfer_ref);
++
++unsigned long gnttab_end_foreign_transfer_ref(grant_ref_t ref)
++{
++ unsigned long frame;
++ u16 flags;
++
++ /*
++ * If a transfer is not even yet started, try to reclaim the grant
++ * reference and return failure (== 0).
++ */
++ while (!((flags = shared[ref].flags) & GTF_transfer_committed)) {
++ if (synch_cmpxchg_subword(&shared[ref].flags, flags, 0) == flags)
++ return 0;
++ cpu_relax();
++ }
++
++ /* If a transfer is in progress then wait until it is completed. */
++ while (!(flags & GTF_transfer_completed)) {
++ flags = shared[ref].flags;
++ cpu_relax();
++ }
++
++ /* Read the frame number /after/ reading completion status. */
++ rmb();
++ frame = shared[ref].frame;
++ BUG_ON(frame == 0);
++
++ return frame;
++}
++EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer_ref);
++
++unsigned long gnttab_end_foreign_transfer(grant_ref_t ref)
++{
++ unsigned long frame = gnttab_end_foreign_transfer_ref(ref);
++ put_free_entry(ref);
++ return frame;
++}
++EXPORT_SYMBOL_GPL(gnttab_end_foreign_transfer);
++
++void gnttab_free_grant_reference(grant_ref_t ref)
++{
++ put_free_entry(ref);
++}
++EXPORT_SYMBOL_GPL(gnttab_free_grant_reference);
++
++void gnttab_free_grant_references(grant_ref_t head)
++{
++ grant_ref_t ref;
++ unsigned long flags;
++ int count = 1;
++ if (head == GNTTAB_LIST_END)
++ return;
++ spin_lock_irqsave(&gnttab_list_lock, flags);
++ ref = head;
++ while (gnttab_entry(ref) != GNTTAB_LIST_END) {
++ ref = gnttab_entry(ref);
++ count++;
++ }
++ gnttab_entry(ref) = gnttab_free_head;
++ gnttab_free_head = head;
++ gnttab_free_count += count;
++ check_free_callbacks();
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++EXPORT_SYMBOL_GPL(gnttab_free_grant_references);
++
++int gnttab_alloc_grant_references(u16 count, grant_ref_t *head)
++{
++ int h = get_free_entries(count);
++
++ if (h < 0)
++ return -ENOSPC;
++
++ *head = h;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(gnttab_alloc_grant_references);
++
++int gnttab_empty_grant_references(const grant_ref_t *private_head)
++{
++ return (*private_head == GNTTAB_LIST_END);
++}
++EXPORT_SYMBOL_GPL(gnttab_empty_grant_references);
++
++int gnttab_claim_grant_reference(grant_ref_t *private_head)
++{
++ grant_ref_t g = *private_head;
++ if (unlikely(g == GNTTAB_LIST_END))
++ return -ENOSPC;
++ *private_head = gnttab_entry(g);
++ return g;
++}
++EXPORT_SYMBOL_GPL(gnttab_claim_grant_reference);
++
++void gnttab_release_grant_reference(grant_ref_t *private_head,
++ grant_ref_t release)
++{
++ gnttab_entry(release) = *private_head;
++ *private_head = release;
++}
++EXPORT_SYMBOL_GPL(gnttab_release_grant_reference);
++
++void gnttab_request_free_callback(struct gnttab_free_callback *callback,
++ void (*fn)(void *), void *arg, u16 count)
++{
++ unsigned long flags;
++ spin_lock_irqsave(&gnttab_list_lock, flags);
++ if (callback->queued)
++ goto out;
++ callback->fn = fn;
++ callback->arg = arg;
++ callback->count = count;
++ callback->queued = 1;
++ callback->next = gnttab_free_callback_list;
++ gnttab_free_callback_list = callback;
++ check_free_callbacks();
++out:
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++EXPORT_SYMBOL_GPL(gnttab_request_free_callback);
++
++void gnttab_cancel_free_callback(struct gnttab_free_callback *callback)
++{
++ struct gnttab_free_callback **pcb;
++ unsigned long flags;
++
++ spin_lock_irqsave(&gnttab_list_lock, flags);
++ for (pcb = &gnttab_free_callback_list; *pcb; pcb = &(*pcb)->next) {
++ if (*pcb == callback) {
++ *pcb = callback->next;
++ callback->queued = 0;
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&gnttab_list_lock, flags);
++}
++EXPORT_SYMBOL_GPL(gnttab_cancel_free_callback);
++
++static int grow_gnttab_list(unsigned int more_frames)
++{
++ unsigned int new_nr_grant_frames, extra_entries, i;
++ unsigned int nr_glist_frames, new_nr_glist_frames;
++
++ new_nr_grant_frames = nr_grant_frames + more_frames;
++ extra_entries = more_frames * ENTRIES_PER_GRANT_FRAME;
++
++ nr_glist_frames = nr_freelist_frames(nr_grant_frames);
++ new_nr_glist_frames = nr_freelist_frames(new_nr_grant_frames);
++ for (i = nr_glist_frames; i < new_nr_glist_frames; i++) {
++ gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_ATOMIC);
++ if (!gnttab_list[i])
++ goto grow_nomem;
++ }
++
++ for (i = ENTRIES_PER_GRANT_FRAME * nr_grant_frames;
++ i < ENTRIES_PER_GRANT_FRAME * new_nr_grant_frames - 1; i++)
++ gnttab_entry(i) = i + 1;
++
++ gnttab_entry(i) = gnttab_free_head;
++ gnttab_free_head = ENTRIES_PER_GRANT_FRAME * nr_grant_frames;
++ gnttab_free_count += extra_entries;
++
++ nr_grant_frames = new_nr_grant_frames;
++
++ check_free_callbacks();
++
++ return 0;
++
++grow_nomem:
++ for ( ; i >= nr_glist_frames; i--)
++ free_page((unsigned long) gnttab_list[i]);
++ return -ENOMEM;
++}
++
++static unsigned int __max_nr_grant_frames(void)
++{
++ struct gnttab_query_size query;
++ int rc;
++
++ query.dom = DOMID_SELF;
++
++ rc = HYPERVISOR_grant_table_op(GNTTABOP_query_size, &query, 1);
++ if ((rc < 0) || (query.status != GNTST_okay))
++ return 4; /* Legacy max supported number of frames */
++
++ return query.max_nr_frames;
++}
++
++static inline unsigned int max_nr_grant_frames(void)
++{
++ unsigned int xen_max = __max_nr_grant_frames();
++
++ if (xen_max > boot_max_nr_grant_frames)
++ return boot_max_nr_grant_frames;
++ return xen_max;
++}
++
++#ifdef CONFIG_XEN
++
++static DEFINE_SEQLOCK(gnttab_dma_lock);
++
++#ifdef CONFIG_X86
++static int map_pte_fn(pte_t *pte, struct page *pmd_page,
++ unsigned long addr, void *data)
++{
++ unsigned long **frames = (unsigned long **)data;
++
++ set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL));
++ (*frames)++;
++ return 0;
++}
++
++static int unmap_pte_fn(pte_t *pte, struct page *pmd_page,
++ unsigned long addr, void *data)
++{
++
++ set_pte_at(&init_mm, addr, pte, __pte(0));
++ return 0;
++}
++
++void *arch_gnttab_alloc_shared(unsigned long *frames)
++{
++ struct vm_struct *area;
++ area = alloc_vm_area(PAGE_SIZE * max_nr_grant_frames());
++ BUG_ON(area == NULL);
++ return area->addr;
++}
++#endif /* CONFIG_X86 */
++
++static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
++{
++ struct gnttab_setup_table setup;
++ unsigned long *frames;
++ unsigned int nr_gframes = end_idx + 1;
++ int rc;
++
++ frames = kmalloc(nr_gframes * sizeof(unsigned long), GFP_ATOMIC);
++ if (!frames)
++ return -ENOMEM;
++
++ setup.dom = DOMID_SELF;
++ setup.nr_frames = nr_gframes;
++ set_xen_guest_handle(setup.frame_list, frames);
++
++ rc = HYPERVISOR_grant_table_op(GNTTABOP_setup_table, &setup, 1);
++ if (rc == -ENOSYS) {
++ kfree(frames);
++ return -ENOSYS;
++ }
++
++ BUG_ON(rc || setup.status);
++
++ if (shared == NULL)
++ shared = arch_gnttab_alloc_shared(frames);
++
++#ifdef CONFIG_X86
++ rc = apply_to_page_range(&init_mm, (unsigned long)shared,
++ PAGE_SIZE * nr_gframes,
++ map_pte_fn, &frames);
++ BUG_ON(rc);
++ frames -= nr_gframes; /* adjust after map_pte_fn() */
++#endif /* CONFIG_X86 */
++
++ kfree(frames);
++
++ return 0;
++}
++
++static void gnttab_page_free(struct page *page)
++{
++ ClearPageForeign(page);
++ gnttab_reset_grant_page(page);
++ put_page(page);
++}
++
++/*
++ * Must not be called with IRQs off. This should only be used on the
++ * slow path.
++ *
++ * Copy a foreign granted page to local memory.
++ */
++int gnttab_copy_grant_page(grant_ref_t ref, struct page **pagep)
++{
++ struct gnttab_unmap_and_replace unmap;
++ mmu_update_t mmu;
++ struct page *page;
++ struct page *new_page;
++ void *new_addr;
++ void *addr;
++ paddr_t pfn;
++ maddr_t mfn;
++ maddr_t new_mfn;
++ int err;
++
++ page = *pagep;
++ if (!get_page_unless_zero(page))
++ return -ENOENT;
++
++ err = -ENOMEM;
++ new_page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
++ if (!new_page)
++ goto out;
++
++ new_addr = page_address(new_page);
++ addr = page_address(page);
++ memcpy(new_addr, addr, PAGE_SIZE);
++
++ pfn = page_to_pfn(page);
++ mfn = pfn_to_mfn(pfn);
++ new_mfn = virt_to_mfn(new_addr);
++
++ write_seqlock(&gnttab_dma_lock);
++
++ /* Make seq visible before checking page_mapped. */
++ smp_mb();
++
++ /* Has the page been DMA-mapped? */
++ if (unlikely(page_mapped(page))) {
++ write_sequnlock(&gnttab_dma_lock);
++ put_page(new_page);
++ err = -EBUSY;
++ goto out;
++ }
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap))
++ set_phys_to_machine(pfn, new_mfn);
++
++ gnttab_set_replace_op(&unmap, (unsigned long)addr,
++ (unsigned long)new_addr, ref);
++
++ err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
++ &unmap, 1);
++ BUG_ON(err);
++ BUG_ON(unmap.status);
++
++ write_sequnlock(&gnttab_dma_lock);
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);
++
++ mmu.ptr = (new_mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
++ mmu.val = pfn;
++ err = HYPERVISOR_mmu_update(&mmu, 1, NULL, DOMID_SELF);
++ BUG_ON(err);
++ }
++
++ new_page->mapping = page->mapping;
++ new_page->index = page->index;
++ set_bit(PG_foreign, &new_page->flags);
++ *pagep = new_page;
++
++ SetPageForeign(page, gnttab_page_free);
++ page->mapping = NULL;
++
++out:
++ put_page(page);
++ return err;
++}
++EXPORT_SYMBOL_GPL(gnttab_copy_grant_page);
++
++void gnttab_reset_grant_page(struct page *page)
++{
++ init_page_count(page);
++ reset_page_mapcount(page);
++}
++EXPORT_SYMBOL_GPL(gnttab_reset_grant_page);
++
++/*
++ * Keep track of foreign pages marked as PageForeign so that we don't
++ * return them to the remote domain prematurely.
++ *
++ * PageForeign pages are pinned down by increasing their mapcount.
++ *
++ * All other pages are simply returned as is.
++ */
++void __gnttab_dma_map_page(struct page *page)
++{
++ unsigned int seq;
++
++ if (!is_running_on_xen() || !PageForeign(page))
++ return;
++
++ do {
++ seq = read_seqbegin(&gnttab_dma_lock);
++
++ if (gnttab_dma_local_pfn(page))
++ break;
++
++ atomic_set(&page->_mapcount, 0);
++
++ /* Make _mapcount visible before read_seqretry. */
++ smp_mb();
++ } while (unlikely(read_seqretry(&gnttab_dma_lock, seq)));
++}
++
++int gnttab_resume(void)
++{
++ if (max_nr_grant_frames() < nr_grant_frames)
++ return -ENOSYS;
++ return gnttab_map(0, nr_grant_frames - 1);
++}
++
++int gnttab_suspend(void)
++{
++#ifdef CONFIG_X86
++ apply_to_page_range(&init_mm, (unsigned long)shared,
++ PAGE_SIZE * nr_grant_frames,
++ unmap_pte_fn, NULL);
++#endif
++ return 0;
++}
++
++#else /* !CONFIG_XEN */
++
++#include <platform-pci.h>
++
++static unsigned long resume_frames;
++
++static int gnttab_map(unsigned int start_idx, unsigned int end_idx)
++{
++ struct xen_add_to_physmap xatp;
++ unsigned int i = end_idx;
++
++ /* Loop backwards, so that the first hypercall has the largest index,
++ * ensuring that the table will grow only once.
++ */
++ do {
++ xatp.domid = DOMID_SELF;
++ xatp.idx = i;
++ xatp.space = XENMAPSPACE_grant_table;
++ xatp.gpfn = (resume_frames >> PAGE_SHIFT) + i;
++ if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
++ BUG();
++ } while (i-- > start_idx);
++
++ return 0;
++}
++
++int gnttab_resume(void)
++{
++ unsigned int max_nr_gframes, nr_gframes;
++
++ nr_gframes = nr_grant_frames;
++ max_nr_gframes = max_nr_grant_frames();
++ if (max_nr_gframes < nr_gframes)
++ return -ENOSYS;
++
++ if (!resume_frames) {
++ resume_frames = alloc_xen_mmio(PAGE_SIZE * max_nr_gframes);
++ shared = ioremap(resume_frames, PAGE_SIZE * max_nr_gframes);
++ if (shared == NULL) {
++ printk("error to ioremap gnttab share frames\n");
++ return -1;
++ }
++ }
++
++ gnttab_map(0, nr_gframes - 1);
++
++ return 0;
++}
++
++#endif /* !CONFIG_XEN */
++
++static int gnttab_expand(unsigned int req_entries)
++{
++ int rc;
++ unsigned int cur, extra;
++
++ cur = nr_grant_frames;
++ extra = ((req_entries + (ENTRIES_PER_GRANT_FRAME-1)) /
++ ENTRIES_PER_GRANT_FRAME);
++ if (cur + extra > max_nr_grant_frames())
++ return -ENOSPC;
++
++ if ((rc = gnttab_map(cur, cur + extra - 1)) == 0)
++ rc = grow_gnttab_list(extra);
++
++ return rc;
++}
++
++int __devinit gnttab_init(void)
++{
++ int i;
++ unsigned int max_nr_glist_frames, nr_glist_frames;
++ unsigned int nr_init_grefs;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ nr_grant_frames = 1;
++ boot_max_nr_grant_frames = __max_nr_grant_frames();
++
++ /* Determine the maximum number of frames required for the
++ * grant reference free list on the current hypervisor.
++ */
++ max_nr_glist_frames = nr_freelist_frames(boot_max_nr_grant_frames);
++
++ gnttab_list = kmalloc(max_nr_glist_frames * sizeof(grant_ref_t *),
++ GFP_KERNEL);
++ if (gnttab_list == NULL)
++ return -ENOMEM;
++
++ nr_glist_frames = nr_freelist_frames(nr_grant_frames);
++ for (i = 0; i < nr_glist_frames; i++) {
++ gnttab_list[i] = (grant_ref_t *)__get_free_page(GFP_KERNEL);
++ if (gnttab_list[i] == NULL)
++ goto ini_nomem;
++ }
++
++ if (gnttab_resume() < 0)
++ return -ENODEV;
++
++ nr_init_grefs = nr_grant_frames * ENTRIES_PER_GRANT_FRAME;
++
++ for (i = NR_RESERVED_ENTRIES; i < nr_init_grefs - 1; i++)
++ gnttab_entry(i) = i + 1;
++
++ gnttab_entry(nr_init_grefs - 1) = GNTTAB_LIST_END;
++ gnttab_free_count = nr_init_grefs - NR_RESERVED_ENTRIES;
++ gnttab_free_head = NR_RESERVED_ENTRIES;
++
++ return 0;
++
++ ini_nomem:
++ for (i--; i >= 0; i--)
++ free_page((unsigned long)gnttab_list[i]);
++ kfree(gnttab_list);
++ return -ENOMEM;
++}
++
++#ifdef CONFIG_XEN
++core_initcall(gnttab_init);
++#endif
+Index: head-2008-11-25/drivers/xen/core/hypervisor_sysfs.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/core/hypervisor_sysfs.c 2007-07-10 09:42:30.000000000 +0200
+@@ -0,0 +1,57 @@
++/*
++ * copyright (c) 2006 IBM Corporation
++ * Authored by: Mike D. Day <ncmike@us.ibm.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/kobject.h>
++#include <xen/hypervisor_sysfs.h>
++#include <asm/hypervisor.h>
++
++static ssize_t hyp_sysfs_show(struct kobject *kobj,
++ struct attribute *attr,
++ char *buffer)
++{
++ struct hyp_sysfs_attr *hyp_attr;
++ hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
++ if (hyp_attr->show)
++ return hyp_attr->show(hyp_attr, buffer);
++ return 0;
++}
++
++static ssize_t hyp_sysfs_store(struct kobject *kobj,
++ struct attribute *attr,
++ const char *buffer,
++ size_t len)
++{
++ struct hyp_sysfs_attr *hyp_attr;
++ hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
++ if (hyp_attr->store)
++ return hyp_attr->store(hyp_attr, buffer, len);
++ return 0;
++}
++
++static struct sysfs_ops hyp_sysfs_ops = {
++ .show = hyp_sysfs_show,
++ .store = hyp_sysfs_store,
++};
++
++static struct kobj_type hyp_sysfs_kobj_type = {
++ .sysfs_ops = &hyp_sysfs_ops,
++};
++
++static int __init hypervisor_subsys_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ hypervisor_subsys.kset.kobj.ktype = &hyp_sysfs_kobj_type;
++ return 0;
++}
++
++device_initcall(hypervisor_subsys_init);
+Index: head-2008-11-25/drivers/xen/core/machine_kexec.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/core/machine_kexec.c 2008-10-13 13:43:45.000000000 +0200
+@@ -0,0 +1,222 @@
++/*
++ * drivers/xen/core/machine_kexec.c
++ * handle transition of Linux booting another kernel
++ */
++
++#include <linux/kexec.h>
++#include <xen/interface/kexec.h>
++#include <linux/mm.h>
++#include <linux/bootmem.h>
++
++extern void machine_kexec_setup_load_arg(xen_kexec_image_t *xki,
++ struct kimage *image);
++extern int machine_kexec_setup_resources(struct resource *hypervisor,
++ struct resource *phys_cpus,
++ int nr_phys_cpus);
++extern void machine_kexec_register_resources(struct resource *res);
++
++static int __initdata xen_max_nr_phys_cpus;
++static struct resource xen_hypervisor_res;
++static struct resource *xen_phys_cpus;
++
++size_t vmcoreinfo_size_xen;
++unsigned long paddr_vmcoreinfo_xen;
++
++void __init xen_machine_kexec_setup_resources(void)
++{
++ xen_kexec_range_t range;
++ struct resource *res;
++ int k = 0;
++ int rc;
++
++ if (!is_initial_xendomain())
++ return;
++
++ /* determine maximum number of physical cpus */
++
++ while (1) {
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_CPU;
++ range.nr = k;
++
++ if(HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++ break;
++
++ k++;
++ }
++
++ if (k == 0)
++ return;
++
++ xen_max_nr_phys_cpus = k;
++
++ /* allocate xen_phys_cpus */
++
++ xen_phys_cpus = alloc_bootmem_low(k * sizeof(struct resource));
++ BUG_ON(xen_phys_cpus == NULL);
++
++ /* fill in xen_phys_cpus with per-cpu crash note information */
++
++ for (k = 0; k < xen_max_nr_phys_cpus; k++) {
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_CPU;
++ range.nr = k;
++
++ if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++ goto err;
++
++ res = xen_phys_cpus + k;
++
++ memset(res, 0, sizeof(*res));
++ res->name = "Crash note";
++ res->start = range.start;
++ res->end = range.start + range.size - 1;
++ res->flags = IORESOURCE_BUSY | IORESOURCE_MEM;
++ }
++
++ /* fill in xen_hypervisor_res with hypervisor machine address range */
++
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_XEN;
++
++ if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++ goto err;
++
++ xen_hypervisor_res.name = "Hypervisor code and data";
++ xen_hypervisor_res.start = range.start;
++ xen_hypervisor_res.end = range.start + range.size - 1;
++ xen_hypervisor_res.flags = IORESOURCE_BUSY | IORESOURCE_MEM;
++
++ /* fill in crashk_res if range is reserved by hypervisor */
++
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_CRASH;
++
++ if (HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range))
++ goto err;
++
++ if (range.size) {
++ crashk_res.start = range.start;
++ crashk_res.end = range.start + range.size - 1;
++ }
++
++ /* get physical address of vmcoreinfo */
++ memset(&range, 0, sizeof(range));
++ range.range = KEXEC_RANGE_MA_VMCOREINFO;
++
++ rc = HYPERVISOR_kexec_op(KEXEC_CMD_kexec_get_range, &range);
++
++ if (rc == 0) {
++ /* Hypercall succeeded */
++ vmcoreinfo_size_xen = range.size;
++ paddr_vmcoreinfo_xen = range.start;
++
++ } else {
++ /* Hypercall failed.
++ * Indicate not to create sysfs file by resetting globals
++ */
++ vmcoreinfo_size_xen = 0;
++ paddr_vmcoreinfo_xen = 0;
++
++ /* The KEXEC_CMD_kexec_get_range hypercall did not implement
++ * KEXEC_RANGE_MA_VMCOREINFO until Xen 3.3.
++ * Do not bail out if it fails for this reason.
++ */
++ if (rc != -EINVAL)
++ return;
++ }
++
++ if (machine_kexec_setup_resources(&xen_hypervisor_res, xen_phys_cpus,
++ xen_max_nr_phys_cpus))
++ goto err;
++
++ return;
++
++ err:
++ /*
++ * It isn't possible to free xen_phys_cpus this early in the
++ * boot. Failure at this stage is unexpected and the amount of
++ * memory is small therefore we tolerate the potential leak.
++ */
++ xen_max_nr_phys_cpus = 0;
++ return;
++}
++
++void __init xen_machine_kexec_register_resources(struct resource *res)
++{
++ request_resource(res, &xen_hypervisor_res);
++ machine_kexec_register_resources(res);
++}
++
++static void setup_load_arg(xen_kexec_image_t *xki, struct kimage *image)
++{
++ machine_kexec_setup_load_arg(xki, image);
++
++ xki->indirection_page = image->head;
++ xki->start_address = image->start;
++}
++
++/*
++ * Load the image into xen so xen can kdump itself
++ * This might have been done in prepare, but prepare
++ * is currently called too early. It might make sense
++ * to move prepare, but for now, just add an extra hook.
++ */
++int xen_machine_kexec_load(struct kimage *image)
++{
++ xen_kexec_load_t xkl;
++
++ memset(&xkl, 0, sizeof(xkl));
++ xkl.type = image->type;
++ setup_load_arg(&xkl.image, image);
++ return HYPERVISOR_kexec_op(KEXEC_CMD_kexec_load, &xkl);
++}
++
++/*
++ * Unload the image that was stored by machine_kexec_load()
++ * This might have been done in machine_kexec_cleanup() but it
++ * is called too late, and its possible xen could try and kdump
++ * using resources that have been freed.
++ */
++void xen_machine_kexec_unload(struct kimage *image)
++{
++ xen_kexec_load_t xkl;
++
++ memset(&xkl, 0, sizeof(xkl));
++ xkl.type = image->type;
++ WARN_ON(HYPERVISOR_kexec_op(KEXEC_CMD_kexec_unload, &xkl));
++}
++
++/*
++ * Do not allocate memory (or fail in any way) in machine_kexec().
++ * We are past the point of no return, committed to rebooting now.
++ *
++ * This has the hypervisor move to the prefered reboot CPU,
++ * stop all CPUs and kexec. That is it combines machine_shutdown()
++ * and machine_kexec() in Linux kexec terms.
++ */
++NORET_TYPE void machine_kexec(struct kimage *image)
++{
++ xen_kexec_exec_t xke;
++
++ memset(&xke, 0, sizeof(xke));
++ xke.type = image->type;
++ VOID(HYPERVISOR_kexec_op(KEXEC_CMD_kexec, &xke));
++ panic("KEXEC_CMD_kexec hypercall should not return\n");
++}
++
++void machine_shutdown(void)
++{
++ /* do nothing */
++}
++
++
++/*
++ * Local variables:
++ * c-file-style: "linux"
++ * indent-tabs-mode: t
++ * c-indent-level: 8
++ * c-basic-offset: 8
++ * tab-width: 8
++ * End:
++ */
+Index: head-2008-11-25/drivers/xen/core/machine_reboot.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/core/machine_reboot.c 2008-09-01 12:07:31.000000000 +0200
+@@ -0,0 +1,247 @@
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/unistd.h>
++#include <linux/module.h>
++#include <linux/reboot.h>
++#include <linux/sysrq.h>
++#include <linux/stringify.h>
++#include <linux/stop_machine.h>
++#include <asm/irq.h>
++#include <asm/mmu_context.h>
++#include <xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <linux/cpu.h>
++#include <xen/gnttab.h>
++#include <xen/xencons.h>
++#include <xen/cpu_hotplug.h>
++#include <xen/interface/vcpu.h>
++
++#if defined(__i386__) || defined(__x86_64__)
++
++/*
++ * Power off function, if any
++ */
++void (*pm_power_off)(void);
++EXPORT_SYMBOL(pm_power_off);
++
++void machine_emergency_restart(void)
++{
++ /* We really want to get pending console data out before we die. */
++ xencons_force_flush();
++ HYPERVISOR_shutdown(SHUTDOWN_reboot);
++}
++
++void machine_restart(char * __unused)
++{
++ machine_emergency_restart();
++}
++
++void machine_halt(void)
++{
++ machine_power_off();
++}
++
++void machine_power_off(void)
++{
++ /* We really want to get pending console data out before we die. */
++ xencons_force_flush();
++ if (pm_power_off)
++ pm_power_off();
++ HYPERVISOR_shutdown(SHUTDOWN_poweroff);
++}
++
++int reboot_thru_bios = 0; /* for dmi_scan.c */
++EXPORT_SYMBOL(machine_restart);
++EXPORT_SYMBOL(machine_halt);
++EXPORT_SYMBOL(machine_power_off);
++
++static void pre_suspend(void)
++{
++ HYPERVISOR_shared_info = (shared_info_t *)empty_zero_page;
++ WARN_ON(HYPERVISOR_update_va_mapping(fix_to_virt(FIX_SHARED_INFO),
++ __pte_ma(0), 0));
++
++ xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn);
++ xen_start_info->console.domU.mfn =
++ mfn_to_pfn(xen_start_info->console.domU.mfn);
++}
++
++static void post_suspend(int suspend_cancelled)
++{
++ int i, j, k, fpp;
++ unsigned long shinfo_mfn;
++ extern unsigned long max_pfn;
++ extern unsigned long *pfn_to_mfn_frame_list_list;
++ extern unsigned long *pfn_to_mfn_frame_list[];
++
++ if (suspend_cancelled) {
++ xen_start_info->store_mfn =
++ pfn_to_mfn(xen_start_info->store_mfn);
++ xen_start_info->console.domU.mfn =
++ pfn_to_mfn(xen_start_info->console.domU.mfn);
++ } else {
++#ifdef CONFIG_SMP
++ cpu_initialized_map = cpu_online_map;
++#endif
++ }
++
++ shinfo_mfn = xen_start_info->shared_info >> PAGE_SHIFT;
++ if (HYPERVISOR_update_va_mapping(fix_to_virt(FIX_SHARED_INFO),
++ pfn_pte_ma(shinfo_mfn, PAGE_KERNEL),
++ 0))
++ BUG();
++ HYPERVISOR_shared_info = (shared_info_t *)fix_to_virt(FIX_SHARED_INFO);
++
++ memset(empty_zero_page, 0, PAGE_SIZE);
++
++ fpp = PAGE_SIZE/sizeof(unsigned long);
++ for (i = 0, j = 0, k = -1; i < max_pfn; i += fpp, j++) {
++ if ((j % fpp) == 0) {
++ k++;
++ pfn_to_mfn_frame_list_list[k] =
++ virt_to_mfn(pfn_to_mfn_frame_list[k]);
++ j = 0;
++ }
++ pfn_to_mfn_frame_list[k][j] =
++ virt_to_mfn(&phys_to_machine_mapping[i]);
++ }
++ HYPERVISOR_shared_info->arch.max_pfn = max_pfn;
++ HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
++ virt_to_mfn(pfn_to_mfn_frame_list_list);
++}
++
++#else /* !(defined(__i386__) || defined(__x86_64__)) */
++
++#ifndef HAVE_XEN_PRE_SUSPEND
++#define xen_pre_suspend() ((void)0)
++#endif
++
++#ifndef HAVE_XEN_POST_SUSPEND
++#define xen_post_suspend(x) ((void)0)
++#endif
++
++#define switch_idle_mm() ((void)0)
++#define mm_pin_all() ((void)0)
++#define pre_suspend() xen_pre_suspend()
++#define post_suspend(x) xen_post_suspend(x)
++
++#endif
++
++struct suspend {
++ int fast_suspend;
++ void (*resume_notifier)(int);
++};
++
++static int take_machine_down(void *_suspend)
++{
++ struct suspend *suspend = _suspend;
++ int suspend_cancelled, err;
++ extern void time_resume(void);
++
++ if (suspend->fast_suspend) {
++ BUG_ON(!irqs_disabled());
++ } else {
++ BUG_ON(irqs_disabled());
++
++ for (;;) {
++ err = smp_suspend();
++ if (err)
++ return err;
++
++ xenbus_suspend();
++ preempt_disable();
++
++ if (num_online_cpus() == 1)
++ break;
++
++ preempt_enable();
++ xenbus_suspend_cancel();
++ }
++
++ local_irq_disable();
++ }
++
++ mm_pin_all();
++ gnttab_suspend();
++ pre_suspend();
++
++ /*
++ * This hypercall returns 1 if suspend was cancelled or the domain was
++ * merely checkpointed, and 0 if it is resuming in a new domain.
++ */
++ suspend_cancelled = HYPERVISOR_suspend(virt_to_mfn(xen_start_info));
++
++ suspend->resume_notifier(suspend_cancelled);
++ post_suspend(suspend_cancelled);
++ gnttab_resume();
++ if (!suspend_cancelled) {
++ irq_resume();
++#ifdef __x86_64__
++ /*
++ * Older versions of Xen do not save/restore the user %cr3.
++ * We do it here just in case, but there's no need if we are
++ * in fast-suspend mode as that implies a new enough Xen.
++ */
++ if (!suspend->fast_suspend)
++ xen_new_user_pt(__pa(__user_pgd(
++ current->active_mm->pgd)));
++#endif
++ }
++ time_resume();
++
++ if (!suspend->fast_suspend)
++ local_irq_enable();
++
++ return suspend_cancelled;
++}
++
++int __xen_suspend(int fast_suspend, void (*resume_notifier)(int))
++{
++ int err, suspend_cancelled;
++ struct suspend suspend;
++
++ BUG_ON(smp_processor_id() != 0);
++ BUG_ON(in_interrupt());
++
++#if defined(__i386__) || defined(__x86_64__)
++ if (xen_feature(XENFEAT_auto_translated_physmap)) {
++ printk(KERN_WARNING "Cannot suspend in "
++ "auto_translated_physmap mode.\n");
++ return -EOPNOTSUPP;
++ }
++#endif
++
++ /* If we are definitely UP then 'slow mode' is actually faster. */
++ if (num_possible_cpus() == 1)
++ fast_suspend = 0;
++
++ suspend.fast_suspend = fast_suspend;
++ suspend.resume_notifier = resume_notifier;
++
++ if (fast_suspend) {
++ xenbus_suspend();
++ err = stop_machine_run(take_machine_down, &suspend, 0);
++ if (err < 0)
++ xenbus_suspend_cancel();
++ } else {
++ err = take_machine_down(&suspend);
++ }
++
++ if (err < 0)
++ return err;
++
++ suspend_cancelled = err;
++ if (!suspend_cancelled) {
++ xencons_resume();
++ xenbus_resume();
++ } else {
++ xenbus_suspend_cancel();
++ }
++
++ if (!fast_suspend)
++ smp_resume();
++
++ return 0;
++}
+Index: head-2008-11-25/drivers/xen/core/pci.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/core/pci.c 2008-11-10 11:44:21.000000000 +0100
+@@ -0,0 +1,59 @@
++/*
++ * vim:shiftwidth=8:noexpandtab
++ */
++
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <xen/interface/physdev.h>
++
++static int (*pci_bus_probe)(struct device *dev);
++static int (*pci_bus_remove)(struct device *dev);
++
++static int pci_bus_probe_wrapper(struct device *dev)
++{
++ int r;
++ struct pci_dev *pci_dev = to_pci_dev(dev);
++ struct physdev_manage_pci manage_pci;
++ manage_pci.bus = pci_dev->bus->number;
++ manage_pci.devfn = pci_dev->devfn;
++
++ r = HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_add, &manage_pci);
++ if (r && r != -ENOSYS)
++ return r;
++
++ r = pci_bus_probe(dev);
++ return r;
++}
++
++static int pci_bus_remove_wrapper(struct device *dev)
++{
++ int r;
++ struct pci_dev *pci_dev = to_pci_dev(dev);
++ struct physdev_manage_pci manage_pci;
++ manage_pci.bus = pci_dev->bus->number;
++ manage_pci.devfn = pci_dev->devfn;
++
++ r = pci_bus_remove(dev);
++ /* dev and pci_dev are no longer valid!! */
++
++ WARN_ON(HYPERVISOR_physdev_op(PHYSDEVOP_manage_pci_remove,
++ &manage_pci));
++ return r;
++}
++
++static int __init hook_pci_bus(void)
++{
++ if (!is_running_on_xen() || !is_initial_xendomain())
++ return 0;
++
++ pci_bus_probe = pci_bus_type.probe;
++ pci_bus_type.probe = pci_bus_probe_wrapper;
++
++ pci_bus_remove = pci_bus_type.remove;
++ pci_bus_type.remove = pci_bus_remove_wrapper;
++
++ return 0;
++}
++
++core_initcall(hook_pci_bus);
+Index: head-2008-11-25/drivers/xen/core/reboot.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/core/reboot.c 2008-08-07 12:44:36.000000000 +0200
+@@ -0,0 +1,335 @@
++#define __KERNEL_SYSCALLS__
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/unistd.h>
++#include <linux/module.h>
++#include <linux/reboot.h>
++#include <linux/sysrq.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/evtchn.h>
++#include <linux/kmod.h>
++#include <linux/slab.h>
++#include <linux/workqueue.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++MODULE_LICENSE("Dual BSD/GPL");
++
++#define SHUTDOWN_INVALID -1
++#define SHUTDOWN_POWEROFF 0
++#define SHUTDOWN_SUSPEND 2
++#define SHUTDOWN_RESUMING 3
++#define SHUTDOWN_HALT 4
++
++/* Ignore multiple shutdown requests. */
++static int shutting_down = SHUTDOWN_INVALID;
++
++/* Was last suspend request cancelled? */
++static int suspend_cancelled;
++
++/* Can we leave APs online when we suspend? */
++static int fast_suspend;
++
++static void __shutdown_handler(void *unused);
++static DECLARE_WORK(shutdown_work, __shutdown_handler, NULL);
++
++static int setup_suspend_evtchn(void);
++
++int __xen_suspend(int fast_suspend, void (*resume_notifier)(int));
++
++static int shutdown_process(void *__unused)
++{
++ static char *envp[] = { "HOME=/", "TERM=linux",
++ "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL };
++ static char *poweroff_argv[] = { "/sbin/poweroff", NULL };
++
++ extern asmlinkage long sys_reboot(int magic1, int magic2,
++ unsigned int cmd, void *arg);
++
++ if ((shutting_down == SHUTDOWN_POWEROFF) ||
++ (shutting_down == SHUTDOWN_HALT)) {
++ if (call_usermodehelper("/sbin/poweroff", poweroff_argv,
++ envp, 0) < 0) {
++#ifdef CONFIG_XEN
++ sys_reboot(LINUX_REBOOT_MAGIC1,
++ LINUX_REBOOT_MAGIC2,
++ LINUX_REBOOT_CMD_POWER_OFF,
++ NULL);
++#endif /* CONFIG_XEN */
++ }
++ }
++
++ shutting_down = SHUTDOWN_INVALID; /* could try again */
++
++ return 0;
++}
++
++static void xen_resume_notifier(int _suspend_cancelled)
++{
++ int old_state = xchg(&shutting_down, SHUTDOWN_RESUMING);
++ BUG_ON(old_state != SHUTDOWN_SUSPEND);
++ suspend_cancelled = _suspend_cancelled;
++}
++
++static int xen_suspend(void *__unused)
++{
++ int err, old_state;
++
++ daemonize("suspend");
++ err = set_cpus_allowed(current, cpumask_of_cpu(0));
++ if (err) {
++ printk(KERN_ERR "Xen suspend can't run on CPU0 (%d)\n", err);
++ goto fail;
++ }
++
++ do {
++ err = __xen_suspend(fast_suspend, xen_resume_notifier);
++ if (err) {
++ printk(KERN_ERR "Xen suspend failed (%d)\n", err);
++ goto fail;
++ }
++ if (!suspend_cancelled)
++ setup_suspend_evtchn();
++ old_state = cmpxchg(
++ &shutting_down, SHUTDOWN_RESUMING, SHUTDOWN_INVALID);
++ } while (old_state == SHUTDOWN_SUSPEND);
++
++ switch (old_state) {
++ case SHUTDOWN_INVALID:
++ case SHUTDOWN_SUSPEND:
++ BUG();
++ case SHUTDOWN_RESUMING:
++ break;
++ default:
++ schedule_work(&shutdown_work);
++ break;
++ }
++
++ return 0;
++
++ fail:
++ old_state = xchg(&shutting_down, SHUTDOWN_INVALID);
++ BUG_ON(old_state != SHUTDOWN_SUSPEND);
++ return 0;
++}
++
++static void switch_shutdown_state(int new_state)
++{
++ int prev_state, old_state = SHUTDOWN_INVALID;
++
++ /* We only drive shutdown_state into an active state. */
++ if (new_state == SHUTDOWN_INVALID)
++ return;
++
++ do {
++ /* We drop this transition if already in an active state. */
++ if ((old_state != SHUTDOWN_INVALID) &&
++ (old_state != SHUTDOWN_RESUMING))
++ return;
++ /* Attempt to transition. */
++ prev_state = old_state;
++ old_state = cmpxchg(&shutting_down, old_state, new_state);
++ } while (old_state != prev_state);
++
++ /* Either we kick off the work, or we leave it to xen_suspend(). */
++ if (old_state == SHUTDOWN_INVALID)
++ schedule_work(&shutdown_work);
++ else
++ BUG_ON(old_state != SHUTDOWN_RESUMING);
++}
++
++static void __shutdown_handler(void *unused)
++{
++ int err;
++
++ err = kernel_thread((shutting_down == SHUTDOWN_SUSPEND) ?
++ xen_suspend : shutdown_process,
++ NULL, CLONE_FS | CLONE_FILES);
++
++ if (err < 0) {
++ printk(KERN_WARNING "Error creating shutdown process (%d): "
++ "retrying...\n", -err);
++ schedule_delayed_work(&shutdown_work, HZ/2);
++ }
++}
++
++static void shutdown_handler(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ extern void ctrl_alt_del(void);
++ char *str;
++ struct xenbus_transaction xbt;
++ int err, new_state = SHUTDOWN_INVALID;
++
++ if ((shutting_down != SHUTDOWN_INVALID) &&
++ (shutting_down != SHUTDOWN_RESUMING))
++ return;
++
++ again:
++ err = xenbus_transaction_start(&xbt);
++ if (err)
++ return;
++
++ str = (char *)xenbus_read(xbt, "control", "shutdown", NULL);
++ /* Ignore read errors and empty reads. */
++ if (XENBUS_IS_ERR_READ(str)) {
++ xenbus_transaction_end(xbt, 1);
++ return;
++ }
++
++ xenbus_write(xbt, "control", "shutdown", "");
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN) {
++ kfree(str);
++ goto again;
++ }
++
++ if (strcmp(str, "poweroff") == 0)
++ new_state = SHUTDOWN_POWEROFF;
++ else if (strcmp(str, "reboot") == 0)
++ ctrl_alt_del();
++ else if (strcmp(str, "suspend") == 0)
++ new_state = SHUTDOWN_SUSPEND;
++ else if (strcmp(str, "halt") == 0)
++ new_state = SHUTDOWN_HALT;
++ else
++ printk("Ignoring shutdown request: %s\n", str);
++
++ switch_shutdown_state(new_state);
++
++ kfree(str);
++}
++
++static void sysrq_handler(struct xenbus_watch *watch, const char **vec,
++ unsigned int len)
++{
++ char sysrq_key = '\0';
++ struct xenbus_transaction xbt;
++ int err;
++
++ again:
++ err = xenbus_transaction_start(&xbt);
++ if (err)
++ return;
++ if (!xenbus_scanf(xbt, "control", "sysrq", "%c", &sysrq_key)) {
++ printk(KERN_ERR "Unable to read sysrq code in "
++ "control/sysrq\n");
++ xenbus_transaction_end(xbt, 1);
++ return;
++ }
++
++ if (sysrq_key != '\0')
++ xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN)
++ goto again;
++
++#ifdef CONFIG_MAGIC_SYSRQ
++ if (sysrq_key != '\0')
++ handle_sysrq(sysrq_key, NULL, NULL);
++#endif
++}
++
++static struct xenbus_watch shutdown_watch = {
++ .node = "control/shutdown",
++ .callback = shutdown_handler
++};
++
++static struct xenbus_watch sysrq_watch = {
++ .node = "control/sysrq",
++ .callback = sysrq_handler
++};
++
++static irqreturn_t suspend_int(int irq, void* dev_id, struct pt_regs *ptregs)
++{
++ switch_shutdown_state(SHUTDOWN_SUSPEND);
++ return IRQ_HANDLED;
++}
++
++static int setup_suspend_evtchn(void)
++{
++ static int irq;
++ int port;
++ char portstr[16];
++
++ if (irq > 0)
++ unbind_from_irqhandler(irq, NULL);
++
++ irq = bind_listening_port_to_irqhandler(0, suspend_int, 0, "suspend",
++ NULL);
++ if (irq <= 0)
++ return -1;
++
++ port = irq_to_evtchn_port(irq);
++ printk(KERN_INFO "suspend: event channel %d\n", port);
++ sprintf(portstr, "%d", port);
++ xenbus_write(XBT_NIL, "device/suspend", "event-channel", portstr);
++
++ return 0;
++}
++
++static int setup_shutdown_watcher(void)
++{
++ int err;
++
++ xenbus_scanf(XBT_NIL, "control",
++ "platform-feature-multiprocessor-suspend",
++ "%d", &fast_suspend);
++
++ err = register_xenbus_watch(&shutdown_watch);
++ if (err) {
++ printk(KERN_ERR "Failed to set shutdown watcher\n");
++ return err;
++ }
++
++ err = register_xenbus_watch(&sysrq_watch);
++ if (err) {
++ printk(KERN_ERR "Failed to set sysrq watcher\n");
++ return err;
++ }
++
++ /* suspend event channel */
++ err = setup_suspend_evtchn();
++ if (err) {
++ printk(KERN_ERR "Failed to register suspend event channel\n");
++ return err;
++ }
++
++ return 0;
++}
++
++#ifdef CONFIG_XEN
++
++static int shutdown_event(struct notifier_block *notifier,
++ unsigned long event,
++ void *data)
++{
++ setup_shutdown_watcher();
++ return NOTIFY_DONE;
++}
++
++static int __init setup_shutdown_event(void)
++{
++ static struct notifier_block xenstore_notifier = {
++ .notifier_call = shutdown_event
++ };
++ register_xenstore_notifier(&xenstore_notifier);
++
++ return 0;
++}
++
++subsys_initcall(setup_shutdown_event);
++
++#else /* !defined(CONFIG_XEN) */
++
++int xen_reboot_init(void)
++{
++ return setup_shutdown_watcher();
++}
++
++#endif /* !defined(CONFIG_XEN) */
+Index: head-2008-11-25/drivers/xen/core/smpboot.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/core/smpboot.c 2008-03-06 08:54:32.000000000 +0100
+@@ -0,0 +1,464 @@
++/*
++ * Xen SMP booting functions
++ *
++ * See arch/i386/kernel/smpboot.c for copyright and credits for derived
++ * portions of this file.
++ */
++
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/kernel_stat.h>
++#include <linux/smp_lock.h>
++#include <linux/irq.h>
++#include <linux/bootmem.h>
++#include <linux/notifier.h>
++#include <linux/cpu.h>
++#include <linux/percpu.h>
++#include <asm/desc.h>
++#include <asm/arch_hooks.h>
++#include <asm/pgalloc.h>
++#include <xen/evtchn.h>
++#include <xen/interface/vcpu.h>
++#include <xen/cpu_hotplug.h>
++#include <xen/xenbus.h>
++
++extern irqreturn_t smp_reschedule_interrupt(int, void *, struct pt_regs *);
++extern irqreturn_t smp_call_function_interrupt(int, void *, struct pt_regs *);
++
++extern int local_setup_timer(unsigned int cpu);
++extern void local_teardown_timer(unsigned int cpu);
++
++extern void hypervisor_callback(void);
++extern void failsafe_callback(void);
++extern void system_call(void);
++extern void smp_trap_init(trap_info_t *);
++
++/* Number of siblings per CPU package */
++int smp_num_siblings = 1;
++
++cpumask_t cpu_online_map;
++EXPORT_SYMBOL(cpu_online_map);
++cpumask_t cpu_possible_map;
++EXPORT_SYMBOL(cpu_possible_map);
++cpumask_t cpu_initialized_map;
++
++struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
++EXPORT_SYMBOL(cpu_data);
++
++#ifdef CONFIG_HOTPLUG_CPU
++DEFINE_PER_CPU(int, cpu_state) = { 0 };
++#endif
++
++static DEFINE_PER_CPU(int, resched_irq);
++static DEFINE_PER_CPU(int, callfunc_irq);
++static char resched_name[NR_CPUS][15];
++static char callfunc_name[NR_CPUS][15];
++
++u8 cpu_2_logical_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = BAD_APICID };
++
++cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned;
++cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
++EXPORT_SYMBOL(cpu_core_map);
++
++#if defined(__i386__)
++u8 x86_cpu_to_apicid[NR_CPUS] = { [0 ... NR_CPUS-1] = 0xff };
++EXPORT_SYMBOL(x86_cpu_to_apicid);
++#elif !defined(CONFIG_X86_IO_APIC)
++unsigned int maxcpus = NR_CPUS;
++#endif
++
++void __init prefill_possible_map(void)
++{
++ int i, rc;
++
++ for_each_possible_cpu(i)
++ if (i != smp_processor_id())
++ return;
++
++ for (i = 0; i < NR_CPUS; i++) {
++ rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL);
++ if (rc >= 0)
++ cpu_set(i, cpu_possible_map);
++ }
++}
++
++void __init smp_alloc_memory(void)
++{
++}
++
++static inline void
++set_cpu_sibling_map(unsigned int cpu)
++{
++ cpu_data[cpu].phys_proc_id = cpu;
++ cpu_data[cpu].cpu_core_id = 0;
++
++ cpu_sibling_map[cpu] = cpumask_of_cpu(cpu);
++ cpu_core_map[cpu] = cpumask_of_cpu(cpu);
++
++ cpu_data[cpu].booted_cores = 1;
++}
++
++static void
++remove_siblinginfo(unsigned int cpu)
++{
++ cpu_data[cpu].phys_proc_id = BAD_APICID;
++ cpu_data[cpu].cpu_core_id = BAD_APICID;
++
++ cpus_clear(cpu_sibling_map[cpu]);
++ cpus_clear(cpu_core_map[cpu]);
++
++ cpu_data[cpu].booted_cores = 0;
++}
++
++static int __cpuinit xen_smp_intr_init(unsigned int cpu)
++{
++ int rc;
++
++ per_cpu(resched_irq, cpu) = per_cpu(callfunc_irq, cpu) = -1;
++
++ sprintf(resched_name[cpu], "resched%u", cpu);
++ rc = bind_ipi_to_irqhandler(RESCHEDULE_VECTOR,
++ cpu,
++ smp_reschedule_interrupt,
++ SA_INTERRUPT,
++ resched_name[cpu],
++ NULL);
++ if (rc < 0)
++ goto fail;
++ per_cpu(resched_irq, cpu) = rc;
++
++ sprintf(callfunc_name[cpu], "callfunc%u", cpu);
++ rc = bind_ipi_to_irqhandler(CALL_FUNCTION_VECTOR,
++ cpu,
++ smp_call_function_interrupt,
++ SA_INTERRUPT,
++ callfunc_name[cpu],
++ NULL);
++ if (rc < 0)
++ goto fail;
++ per_cpu(callfunc_irq, cpu) = rc;
++
++ if ((cpu != 0) && ((rc = local_setup_timer(cpu)) != 0))
++ goto fail;
++
++ return 0;
++
++ fail:
++ if (per_cpu(resched_irq, cpu) >= 0)
++ unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
++ if (per_cpu(callfunc_irq, cpu) >= 0)
++ unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
++ return rc;
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++static void xen_smp_intr_exit(unsigned int cpu)
++{
++ if (cpu != 0)
++ local_teardown_timer(cpu);
++
++ unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL);
++ unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL);
++}
++#endif
++
++void __cpuinit cpu_bringup(void)
++{
++ cpu_init();
++ identify_cpu(cpu_data + smp_processor_id());
++ touch_softlockup_watchdog();
++ preempt_disable();
++ local_irq_enable();
++}
++
++static void __cpuinit cpu_bringup_and_idle(void)
++{
++ cpu_bringup();
++ cpu_idle();
++}
++
++static void __cpuinit cpu_initialize_context(unsigned int cpu)
++{
++ /* vcpu_guest_context_t is too large to allocate on the stack.
++ * Hence we allocate statically and protect it with a lock */
++ static vcpu_guest_context_t ctxt;
++ static DEFINE_SPINLOCK(ctxt_lock);
++
++ struct task_struct *idle = idle_task(cpu);
++#ifdef __x86_64__
++ struct desc_ptr *gdt_descr = &cpu_gdt_descr[cpu];
++#else
++ struct Xgt_desc_struct *gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++#endif
++
++ if (cpu_test_and_set(cpu, cpu_initialized_map))
++ return;
++
++ spin_lock(&ctxt_lock);
++
++ memset(&ctxt, 0, sizeof(ctxt));
++
++ ctxt.flags = VGCF_IN_KERNEL;
++ ctxt.user_regs.ds = __USER_DS;
++ ctxt.user_regs.es = __USER_DS;
++ ctxt.user_regs.fs = 0;
++ ctxt.user_regs.gs = 0;
++ ctxt.user_regs.ss = __KERNEL_DS;
++ ctxt.user_regs.eip = (unsigned long)cpu_bringup_and_idle;
++ ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000; /* IOPL_RING1 */
++
++ memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
++
++ smp_trap_init(ctxt.trap_ctxt);
++
++ ctxt.ldt_ents = 0;
++
++ ctxt.gdt_frames[0] = virt_to_mfn(gdt_descr->address);
++ ctxt.gdt_ents = gdt_descr->size / 8;
++
++#ifdef __i386__
++ ctxt.user_regs.cs = __KERNEL_CS;
++ ctxt.user_regs.esp = idle->thread.esp0 - sizeof(struct pt_regs);
++
++ ctxt.kernel_ss = __KERNEL_DS;
++ ctxt.kernel_sp = idle->thread.esp0;
++
++ ctxt.event_callback_cs = __KERNEL_CS;
++ ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
++ ctxt.failsafe_callback_cs = __KERNEL_CS;
++ ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
++
++ ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
++#else /* __x86_64__ */
++ ctxt.user_regs.cs = __KERNEL_CS;
++ ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
++
++ ctxt.kernel_ss = __KERNEL_DS;
++ ctxt.kernel_sp = idle->thread.rsp0;
++
++ ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
++ ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
++ ctxt.syscall_callback_eip = (unsigned long)system_call;
++
++ ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt));
++
++ ctxt.gs_base_kernel = (unsigned long)(cpu_pda(cpu));
++#endif
++
++ if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt))
++ BUG();
++
++ spin_unlock(&ctxt_lock);
++}
++
++void __init smp_prepare_cpus(unsigned int max_cpus)
++{
++ unsigned int cpu;
++ struct task_struct *idle;
++ int apicid, acpiid;
++ struct vcpu_get_physid cpu_id;
++#ifdef __x86_64__
++ struct desc_ptr *gdt_descr;
++#else
++ struct Xgt_desc_struct *gdt_descr;
++#endif
++
++ apicid = 0;
++ if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, 0, &cpu_id) == 0) {
++ apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id);
++ acpiid = xen_vcpu_physid_to_x86_acpiid(cpu_id.phys_id);
++#ifdef CONFIG_ACPI
++ if (acpiid != 0xff)
++ x86_acpiid_to_apicid[acpiid] = apicid;
++#endif
++ }
++ boot_cpu_data.apicid = apicid;
++ cpu_data[0] = boot_cpu_data;
++
++ cpu_2_logical_apicid[0] = apicid;
++ x86_cpu_to_apicid[0] = apicid;
++
++ current_thread_info()->cpu = 0;
++
++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
++ cpus_clear(cpu_sibling_map[cpu]);
++ cpus_clear(cpu_core_map[cpu]);
++ }
++
++ set_cpu_sibling_map(0);
++
++ if (xen_smp_intr_init(0))
++ BUG();
++
++ cpu_initialized_map = cpumask_of_cpu(0);
++
++ /* Restrict the possible_map according to max_cpus. */
++ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) {
++ for (cpu = NR_CPUS-1; !cpu_isset(cpu, cpu_possible_map); cpu--)
++ continue;
++ cpu_clear(cpu, cpu_possible_map);
++ }
++
++ for_each_possible_cpu (cpu) {
++ if (cpu == 0)
++ continue;
++
++#ifdef __x86_64__
++ gdt_descr = &cpu_gdt_descr[cpu];
++#else
++ gdt_descr = &per_cpu(cpu_gdt_descr, cpu);
++#endif
++ gdt_descr->address = get_zeroed_page(GFP_KERNEL);
++ if (unlikely(!gdt_descr->address)) {
++ printk(KERN_CRIT "CPU%d failed to allocate GDT\n",
++ cpu);
++ continue;
++ }
++ gdt_descr->size = GDT_SIZE;
++ memcpy((void *)gdt_descr->address, cpu_gdt_table, GDT_SIZE);
++ make_page_readonly(
++ (void *)gdt_descr->address,
++ XENFEAT_writable_descriptor_tables);
++
++ apicid = cpu;
++ if (HYPERVISOR_vcpu_op(VCPUOP_get_physid, cpu, &cpu_id) == 0) {
++ apicid = xen_vcpu_physid_to_x86_apicid(cpu_id.phys_id);
++ acpiid = xen_vcpu_physid_to_x86_acpiid(cpu_id.phys_id);
++#ifdef CONFIG_ACPI
++ if (acpiid != 0xff)
++ x86_acpiid_to_apicid[acpiid] = apicid;
++#endif
++ }
++ cpu_data[cpu] = boot_cpu_data;
++ cpu_data[cpu].apicid = apicid;
++
++ cpu_2_logical_apicid[cpu] = apicid;
++ x86_cpu_to_apicid[cpu] = apicid;
++
++ idle = fork_idle(cpu);
++ if (IS_ERR(idle))
++ panic("failed fork for CPU %d", cpu);
++
++#ifdef __x86_64__
++ cpu_pda(cpu)->pcurrent = idle;
++ cpu_pda(cpu)->cpunumber = cpu;
++ clear_ti_thread_flag(idle->thread_info, TIF_FORK);
++#endif
++
++ irq_ctx_init(cpu);
++
++#ifdef CONFIG_HOTPLUG_CPU
++ if (is_initial_xendomain())
++ cpu_set(cpu, cpu_present_map);
++#else
++ cpu_set(cpu, cpu_present_map);
++#endif
++ }
++
++ init_xenbus_allowed_cpumask();
++
++#ifdef CONFIG_X86_IO_APIC
++ /*
++ * Here we can be sure that there is an IO-APIC in the system. Let's
++ * go and set it up:
++ */
++ if (!skip_ioapic_setup && nr_ioapics)
++ setup_IO_APIC();
++#endif
++}
++
++void __devinit smp_prepare_boot_cpu(void)
++{
++ prefill_possible_map();
++}
++
++#ifdef CONFIG_HOTPLUG_CPU
++
++/*
++ * Initialize cpu_present_map late to skip SMP boot code in init/main.c.
++ * But do it early enough to catch critical for_each_present_cpu() loops
++ * in i386-specific code.
++ */
++static int __init initialize_cpu_present_map(void)
++{
++ cpu_present_map = cpu_possible_map;
++ return 0;
++}
++core_initcall(initialize_cpu_present_map);
++
++int __cpu_disable(void)
++{
++ cpumask_t map = cpu_online_map;
++ unsigned int cpu = smp_processor_id();
++
++ if (cpu == 0)
++ return -EBUSY;
++
++ remove_siblinginfo(cpu);
++
++ cpu_clear(cpu, map);
++ fixup_irqs(map);
++ cpu_clear(cpu, cpu_online_map);
++
++ return 0;
++}
++
++void __cpu_die(unsigned int cpu)
++{
++ while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) {
++ current->state = TASK_UNINTERRUPTIBLE;
++ schedule_timeout(HZ/10);
++ }
++
++ xen_smp_intr_exit(cpu);
++
++ if (num_online_cpus() == 1)
++ alternatives_smp_switch(0);
++}
++
++#endif /* CONFIG_HOTPLUG_CPU */
++
++int __cpuinit __cpu_up(unsigned int cpu)
++{
++ int rc;
++
++ rc = cpu_up_check(cpu);
++ if (rc)
++ return rc;
++
++ cpu_initialize_context(cpu);
++
++ if (num_online_cpus() == 1)
++ alternatives_smp_switch(1);
++
++ /* This must be done before setting cpu_online_map */
++ set_cpu_sibling_map(cpu);
++ wmb();
++
++ rc = xen_smp_intr_init(cpu);
++ if (rc) {
++ remove_siblinginfo(cpu);
++ return rc;
++ }
++
++ cpu_set(cpu, cpu_online_map);
++
++ rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL);
++ BUG_ON(rc);
++
++ return 0;
++}
++
++void __init smp_cpus_done(unsigned int max_cpus)
++{
++}
++
++#ifndef CONFIG_X86_LOCAL_APIC
++int setup_profiling_timer(unsigned int multiplier)
++{
++ return -EINVAL;
++}
++#endif
+Index: head-2008-11-25/drivers/xen/core/xen_proc.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/core/xen_proc.c 2007-06-12 13:13:44.000000000 +0200
+@@ -0,0 +1,23 @@
++
++#include <linux/module.h>
++#include <linux/proc_fs.h>
++#include <xen/xen_proc.h>
++
++static struct proc_dir_entry *xen_base;
++
++struct proc_dir_entry *create_xen_proc_entry(const char *name, mode_t mode)
++{
++ if ( xen_base == NULL )
++ if ( (xen_base = proc_mkdir("xen", &proc_root)) == NULL )
++ panic("Couldn't create /proc/xen");
++ return create_proc_entry(name, mode, xen_base);
++}
++
++EXPORT_SYMBOL_GPL(create_xen_proc_entry);
++
++void remove_xen_proc_entry(const char *name)
++{
++ remove_proc_entry(name, xen_base);
++}
++
++EXPORT_SYMBOL_GPL(remove_xen_proc_entry);
+Index: head-2008-11-25/drivers/xen/core/xen_sysfs.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/core/xen_sysfs.c 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,427 @@
++/*
++ * copyright (c) 2006 IBM Corporation
++ * Authored by: Mike D. Day <ncmike@us.ibm.com>
++ *
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License version 2 as
++ * published by the Free Software Foundation.
++ */
++
++#include <linux/err.h>
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/init.h>
++#include <asm/hypervisor.h>
++#include <xen/features.h>
++#include <xen/hypervisor_sysfs.h>
++#include <xen/xenbus.h>
++#include <xen/interface/kexec.h>
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR("Mike D. Day <ncmike@us.ibm.com>");
++
++static ssize_t type_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ return sprintf(buffer, "xen\n");
++}
++
++HYPERVISOR_ATTR_RO(type);
++
++static int __init xen_sysfs_type_init(void)
++{
++ return sysfs_create_file(&hypervisor_subsys.kset.kobj, &type_attr.attr);
++}
++
++static void xen_sysfs_type_destroy(void)
++{
++ sysfs_remove_file(&hypervisor_subsys.kset.kobj, &type_attr.attr);
++}
++
++/* xen version attributes */
++static ssize_t major_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int version = HYPERVISOR_xen_version(XENVER_version, NULL);
++ if (version)
++ return sprintf(buffer, "%d\n", version >> 16);
++ return -ENODEV;
++}
++
++HYPERVISOR_ATTR_RO(major);
++
++static ssize_t minor_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int version = HYPERVISOR_xen_version(XENVER_version, NULL);
++ if (version)
++ return sprintf(buffer, "%d\n", version & 0xff);
++ return -ENODEV;
++}
++
++HYPERVISOR_ATTR_RO(minor);
++
++static ssize_t extra_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ char *extra;
++
++ extra = kmalloc(XEN_EXTRAVERSION_LEN, GFP_KERNEL);
++ if (extra) {
++ ret = HYPERVISOR_xen_version(XENVER_extraversion, extra);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", extra);
++ kfree(extra);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(extra);
++
++static struct attribute *version_attrs[] = {
++ &major_attr.attr,
++ &minor_attr.attr,
++ &extra_attr.attr,
++ NULL
++};
++
++static struct attribute_group version_group = {
++ .name = "version",
++ .attrs = version_attrs,
++};
++
++static int __init xen_sysfs_version_init(void)
++{
++ return sysfs_create_group(&hypervisor_subsys.kset.kobj,
++ &version_group);
++}
++
++static void xen_sysfs_version_destroy(void)
++{
++ sysfs_remove_group(&hypervisor_subsys.kset.kobj, &version_group);
++}
++
++/* UUID */
++
++static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ char *vm, *val;
++ int ret;
++ extern int xenstored_ready;
++
++ if (!xenstored_ready)
++ return -EBUSY;
++
++ vm = xenbus_read(XBT_NIL, "vm", "", NULL);
++ if (IS_ERR(vm))
++ return PTR_ERR(vm);
++ val = xenbus_read(XBT_NIL, vm, "uuid", NULL);
++ kfree(vm);
++ if (IS_ERR(val))
++ return PTR_ERR(val);
++ ret = sprintf(buffer, "%s\n", val);
++ kfree(val);
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(uuid);
++
++static int __init xen_sysfs_uuid_init(void)
++{
++ return sysfs_create_file(&hypervisor_subsys.kset.kobj, &uuid_attr.attr);
++}
++
++static void xen_sysfs_uuid_destroy(void)
++{
++ sysfs_remove_file(&hypervisor_subsys.kset.kobj, &uuid_attr.attr);
++}
++
++/* xen compilation attributes */
++
++static ssize_t compiler_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ struct xen_compile_info *info;
++
++ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
++ if (info) {
++ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", info->compiler);
++ kfree(info);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(compiler);
++
++static ssize_t compiled_by_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ struct xen_compile_info *info;
++
++ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
++ if (info) {
++ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", info->compile_by);
++ kfree(info);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(compiled_by);
++
++static ssize_t compile_date_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ struct xen_compile_info *info;
++
++ info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
++ if (info) {
++ ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", info->compile_date);
++ kfree(info);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(compile_date);
++
++static struct attribute *xen_compile_attrs[] = {
++ &compiler_attr.attr,
++ &compiled_by_attr.attr,
++ &compile_date_attr.attr,
++ NULL
++};
++
++static struct attribute_group xen_compilation_group = {
++ .name = "compilation",
++ .attrs = xen_compile_attrs,
++};
++
++int __init static xen_compilation_init(void)
++{
++ return sysfs_create_group(&hypervisor_subsys.kset.kobj,
++ &xen_compilation_group);
++}
++
++static void xen_compilation_destroy(void)
++{
++ sysfs_remove_group(&hypervisor_subsys.kset.kobj,
++ &xen_compilation_group);
++}
++
++/* xen properties info */
++
++static ssize_t capabilities_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ char *caps;
++
++ caps = kmalloc(XEN_CAPABILITIES_INFO_LEN, GFP_KERNEL);
++ if (caps) {
++ ret = HYPERVISOR_xen_version(XENVER_capabilities, caps);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", caps);
++ kfree(caps);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(capabilities);
++
++static ssize_t changeset_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ char *cset;
++
++ cset = kmalloc(XEN_CHANGESET_INFO_LEN, GFP_KERNEL);
++ if (cset) {
++ ret = HYPERVISOR_xen_version(XENVER_changeset, cset);
++ if (!ret)
++ ret = sprintf(buffer, "%s\n", cset);
++ kfree(cset);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(changeset);
++
++static ssize_t virtual_start_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret = -ENOMEM;
++ struct xen_platform_parameters *parms;
++
++ parms = kmalloc(sizeof(struct xen_platform_parameters), GFP_KERNEL);
++ if (parms) {
++ ret = HYPERVISOR_xen_version(XENVER_platform_parameters,
++ parms);
++ if (!ret)
++ ret = sprintf(buffer, "%lx\n", parms->virt_start);
++ kfree(parms);
++ }
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(virtual_start);
++
++static ssize_t pagesize_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ int ret;
++
++ ret = HYPERVISOR_xen_version(XENVER_pagesize, NULL);
++ if (ret > 0)
++ ret = sprintf(buffer, "%x\n", ret);
++
++ return ret;
++}
++
++HYPERVISOR_ATTR_RO(pagesize);
++
++/* eventually there will be several more features to export */
++static ssize_t xen_feature_show(int index, char *buffer)
++{
++ int ret = -ENOMEM;
++ struct xen_feature_info *info;
++
++ info = kmalloc(sizeof(struct xen_feature_info), GFP_KERNEL);
++ if (info) {
++ info->submap_idx = index;
++ ret = HYPERVISOR_xen_version(XENVER_get_features, info);
++ if (!ret)
++ ret = sprintf(buffer, "%d\n", info->submap);
++ kfree(info);
++ }
++
++ return ret;
++}
++
++static ssize_t writable_pt_show(struct hyp_sysfs_attr *attr, char *buffer)
++{
++ return xen_feature_show(XENFEAT_writable_page_tables, buffer);
++}
++
++HYPERVISOR_ATTR_RO(writable_pt);
++
++static struct attribute *xen_properties_attrs[] = {
++ &capabilities_attr.attr,
++ &changeset_attr.attr,
++ &virtual_start_attr.attr,
++ &pagesize_attr.attr,
++ &writable_pt_attr.attr,
++ NULL
++};
++
++static struct attribute_group xen_properties_group = {
++ .name = "properties",
++ .attrs = xen_properties_attrs,
++};
++
++static int __init xen_properties_init(void)
++{
++ return sysfs_create_group(&hypervisor_subsys.kset.kobj,
++ &xen_properties_group);
++}
++
++static void xen_properties_destroy(void)
++{
++ sysfs_remove_group(&hypervisor_subsys.kset.kobj,
++ &xen_properties_group);
++}
++
++#ifdef CONFIG_KEXEC
++
++extern size_t vmcoreinfo_size_xen;
++extern unsigned long paddr_vmcoreinfo_xen;
++
++static ssize_t vmcoreinfo_show(struct hyp_sysfs_attr *attr, char *page)
++{
++ return sprintf(page, "%lx %zx\n",
++ paddr_vmcoreinfo_xen, vmcoreinfo_size_xen);
++}
++
++HYPERVISOR_ATTR_RO(vmcoreinfo);
++
++static int __init xen_sysfs_vmcoreinfo_init(void)
++{
++ return sysfs_create_file(&hypervisor_subsys.kset.kobj,
++ &vmcoreinfo_attr.attr);
++}
++
++static void xen_sysfs_vmcoreinfo_destroy(void)
++{
++ sysfs_remove_file(&hypervisor_subsys.kset.kobj, &vmcoreinfo_attr.attr);
++}
++
++#endif
++
++static int __init hyper_sysfs_init(void)
++{
++ int ret;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ ret = xen_sysfs_type_init();
++ if (ret)
++ goto out;
++ ret = xen_sysfs_version_init();
++ if (ret)
++ goto version_out;
++ ret = xen_compilation_init();
++ if (ret)
++ goto comp_out;
++ ret = xen_sysfs_uuid_init();
++ if (ret)
++ goto uuid_out;
++ ret = xen_properties_init();
++ if (ret)
++ goto prop_out;
++#ifdef CONFIG_KEXEC
++ if (vmcoreinfo_size_xen != 0) {
++ ret = xen_sysfs_vmcoreinfo_init();
++ if (ret)
++ goto vmcoreinfo_out;
++ }
++#endif
++
++ goto out;
++
++#ifdef CONFIG_KEXEC
++vmcoreinfo_out:
++#endif
++ xen_properties_destroy();
++prop_out:
++ xen_sysfs_uuid_destroy();
++uuid_out:
++ xen_compilation_destroy();
++comp_out:
++ xen_sysfs_version_destroy();
++version_out:
++ xen_sysfs_type_destroy();
++out:
++ return ret;
++}
++
++static void __exit hyper_sysfs_exit(void)
++{
++#ifdef CONFIG_KEXEC
++ if (vmcoreinfo_size_xen != 0)
++ xen_sysfs_vmcoreinfo_destroy();
++#endif
++ xen_properties_destroy();
++ xen_compilation_destroy();
++ xen_sysfs_uuid_destroy();
++ xen_sysfs_version_destroy();
++ xen_sysfs_type_destroy();
++
++}
++
++module_init(hyper_sysfs_init);
++module_exit(hyper_sysfs_exit);
+Index: head-2008-11-25/drivers/xen/core/xencomm.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/core/xencomm.c 2007-11-12 08:41:05.000000000 +0100
+@@ -0,0 +1,229 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Hollis Blanchard <hollisb@us.ibm.com>
++ */
++
++#include <linux/gfp.h>
++#include <linux/mm.h>
++#include <asm/page.h>
++#include <xen/xencomm.h>
++#include <xen/interface/xen.h>
++#ifdef __ia64__
++#include <asm/xen/xencomm.h> /* for is_kern_addr() */
++#endif
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++static int xencomm_init(struct xencomm_desc *desc,
++ void *buffer, unsigned long bytes)
++{
++ unsigned long recorded = 0;
++ int i = 0;
++
++ while ((recorded < bytes) && (i < desc->nr_addrs)) {
++ unsigned long vaddr = (unsigned long)buffer + recorded;
++ unsigned long paddr;
++ int offset;
++ int chunksz;
++
++ offset = vaddr % PAGE_SIZE; /* handle partial pages */
++ chunksz = min(PAGE_SIZE - offset, bytes - recorded);
++
++ paddr = xencomm_vtop(vaddr);
++ if (paddr == ~0UL) {
++ printk("%s: couldn't translate vaddr %lx\n",
++ __func__, vaddr);
++ return -EINVAL;
++ }
++
++ desc->address[i++] = paddr;
++ recorded += chunksz;
++ }
++
++ if (recorded < bytes) {
++ printk("%s: could only translate %ld of %ld bytes\n",
++ __func__, recorded, bytes);
++ return -ENOSPC;
++ }
++
++ /* mark remaining addresses invalid (just for safety) */
++ while (i < desc->nr_addrs)
++ desc->address[i++] = XENCOMM_INVALID;
++
++ desc->magic = XENCOMM_MAGIC;
++
++ return 0;
++}
++
++static struct xencomm_desc *xencomm_alloc(gfp_t gfp_mask,
++ void *buffer, unsigned long bytes)
++{
++ struct xencomm_desc *desc;
++ unsigned long buffer_ulong = (unsigned long)buffer;
++ unsigned long start = buffer_ulong & PAGE_MASK;
++ unsigned long end = (buffer_ulong + bytes) | ~PAGE_MASK;
++ unsigned long nr_addrs = (end - start + 1) >> PAGE_SHIFT;
++ unsigned long size = sizeof(*desc) +
++ sizeof(desc->address[0]) * nr_addrs;
++
++ /*
++ * slab allocator returns at least sizeof(void*) aligned pointer.
++ * When sizeof(*desc) > sizeof(void*), struct xencomm_desc might
++ * cross page boundary.
++ */
++ if (sizeof(*desc) > sizeof(void*)) {
++ unsigned long order = get_order(size);
++ desc = (struct xencomm_desc *)__get_free_pages(gfp_mask,
++ order);
++ if (desc == NULL)
++ return NULL;
++
++ desc->nr_addrs =
++ ((PAGE_SIZE << order) - sizeof(struct xencomm_desc)) /
++ sizeof(*desc->address);
++ } else {
++ desc = kmalloc(size, gfp_mask);
++ if (desc == NULL)
++ return NULL;
++
++ desc->nr_addrs = nr_addrs;
++ }
++ return desc;
++}
++
++void xencomm_free(struct xencomm_handle *desc)
++{
++ if (desc && !((ulong)desc & XENCOMM_INLINE_FLAG)) {
++ struct xencomm_desc *desc__ = (struct xencomm_desc*)desc;
++ if (sizeof(*desc__) > sizeof(void*)) {
++ unsigned long size = sizeof(*desc__) +
++ sizeof(desc__->address[0]) * desc__->nr_addrs;
++ unsigned long order = get_order(size);
++ free_pages((unsigned long)__va(desc), order);
++ } else
++ kfree(__va(desc));
++ }
++}
++
++static int xencomm_create(void *buffer, unsigned long bytes, struct xencomm_desc **ret, gfp_t gfp_mask)
++{
++ struct xencomm_desc *desc;
++ int rc;
++
++ pr_debug("%s: %p[%ld]\n", __func__, buffer, bytes);
++
++ if (bytes == 0) {
++ /* don't create a descriptor; Xen recognizes NULL. */
++ BUG_ON(buffer != NULL);
++ *ret = NULL;
++ return 0;
++ }
++
++ BUG_ON(buffer == NULL); /* 'bytes' is non-zero */
++
++ desc = xencomm_alloc(gfp_mask, buffer, bytes);
++ if (!desc) {
++ printk("%s failure\n", "xencomm_alloc");
++ return -ENOMEM;
++ }
++
++ rc = xencomm_init(desc, buffer, bytes);
++ if (rc) {
++ printk("%s failure: %d\n", "xencomm_init", rc);
++ xencomm_free((struct xencomm_handle *)__pa(desc));
++ return rc;
++ }
++
++ *ret = desc;
++ return 0;
++}
++
++/* check if memory address is within VMALLOC region */
++static int is_phys_contiguous(unsigned long addr)
++{
++ if (!is_kernel_addr(addr))
++ return 0;
++
++ return (addr < VMALLOC_START) || (addr >= VMALLOC_END);
++}
++
++static struct xencomm_handle *xencomm_create_inline(void *ptr)
++{
++ unsigned long paddr;
++
++ BUG_ON(!is_phys_contiguous((unsigned long)ptr));
++
++ paddr = (unsigned long)xencomm_pa(ptr);
++ BUG_ON(paddr & XENCOMM_INLINE_FLAG);
++ return (struct xencomm_handle *)(paddr | XENCOMM_INLINE_FLAG);
++}
++
++/* "mini" routine, for stack-based communications: */
++static int xencomm_create_mini(void *buffer,
++ unsigned long bytes, struct xencomm_mini *xc_desc,
++ struct xencomm_desc **ret)
++{
++ int rc = 0;
++ struct xencomm_desc *desc;
++ BUG_ON(((unsigned long)xc_desc) % sizeof(*xc_desc) != 0);
++
++ desc = (void *)xc_desc;
++
++ desc->nr_addrs = XENCOMM_MINI_ADDRS;
++
++ if (!(rc = xencomm_init(desc, buffer, bytes)))
++ *ret = desc;
++
++ return rc;
++}
++
++struct xencomm_handle *xencomm_map(void *ptr, unsigned long bytes)
++{
++ int rc;
++ struct xencomm_desc *desc;
++
++ if (is_phys_contiguous((unsigned long)ptr))
++ return xencomm_create_inline(ptr);
++
++ rc = xencomm_create(ptr, bytes, &desc, GFP_KERNEL);
++
++ if (rc || desc == NULL)
++ return NULL;
++
++ return xencomm_pa(desc);
++}
++
++struct xencomm_handle *__xencomm_map_no_alloc(void *ptr, unsigned long bytes,
++ struct xencomm_mini *xc_desc)
++{
++ int rc;
++ struct xencomm_desc *desc = NULL;
++
++ if (is_phys_contiguous((unsigned long)ptr))
++ return xencomm_create_inline(ptr);
++
++ rc = xencomm_create_mini(ptr, bytes, xc_desc,
++ &desc);
++
++ if (rc)
++ return NULL;
++
++ return xencomm_pa(desc);
++}
+Index: head-2008-11-25/drivers/xen/evtchn/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/evtchn/Makefile 2007-06-12 13:13:44.000000000 +0200
+@@ -0,0 +1,2 @@
++
++obj-y := evtchn.o
+Index: head-2008-11-25/drivers/xen/evtchn/evtchn.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/evtchn/evtchn.c 2008-08-07 12:44:36.000000000 +0200
+@@ -0,0 +1,560 @@
++/******************************************************************************
++ * evtchn.c
++ *
++ * Driver for receiving and demuxing event-channel signals.
++ *
++ * Copyright (c) 2004-2005, K A Fraser
++ * Multi-process extensions Copyright (c) 2004, Steven Smith
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/fs.h>
++#include <linux/errno.h>
++#include <linux/miscdevice.h>
++#include <linux/major.h>
++#include <linux/proc_fs.h>
++#include <linux/stat.h>
++#include <linux/poll.h>
++#include <linux/irq.h>
++#include <linux/init.h>
++#include <linux/gfp.h>
++#include <linux/mutex.h>
++#include <linux/cpu.h>
++#include <xen/evtchn.h>
++#include <xen/public/evtchn.h>
++
++struct per_user_data {
++ /* Notification ring, accessed via /dev/xen/evtchn. */
++#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t))
++#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
++ evtchn_port_t *ring;
++ unsigned int ring_cons, ring_prod, ring_overflow;
++ struct mutex ring_cons_mutex; /* protect against concurrent readers */
++
++ /* Processes wait on this queue when ring is empty. */
++ wait_queue_head_t evtchn_wait;
++ struct fasync_struct *evtchn_async_queue;
++
++ int bind_cpu;
++ int nr_event_wrong_delivery;
++};
++
++/* Who's bound to each port? */
++static struct per_user_data *port_user[NR_EVENT_CHANNELS];
++static spinlock_t port_user_lock;
++
++void evtchn_device_upcall(int port)
++{
++ struct per_user_data *u;
++
++ spin_lock(&port_user_lock);
++
++ mask_evtchn(port);
++ clear_evtchn(port);
++
++ if ((u = port_user[port]) != NULL) {
++ if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
++ u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
++ wmb(); /* Ensure ring contents visible */
++ if (u->ring_cons == u->ring_prod++) {
++ wake_up_interruptible(&u->evtchn_wait);
++ kill_fasync(&u->evtchn_async_queue,
++ SIGIO, POLL_IN);
++ }
++ } else {
++ u->ring_overflow = 1;
++ }
++ }
++
++ spin_unlock(&port_user_lock);
++}
++
++static void evtchn_check_wrong_delivery(struct per_user_data *u)
++{
++ evtchn_port_t port;
++ unsigned int current_cpu = smp_processor_id();
++
++ /* Delivered to correct CPU? All is good. */
++ if (u->bind_cpu == current_cpu) {
++ u->nr_event_wrong_delivery = 0;
++ return;
++ }
++
++ /* Tolerate up to 100 consecutive misdeliveries. */
++ if (++u->nr_event_wrong_delivery < 100)
++ return;
++
++ spin_lock_irq(&port_user_lock);
++
++ for (port = 0; port < NR_EVENT_CHANNELS; port++)
++ if (port_user[port] == u)
++ rebind_evtchn_to_cpu(port, current_cpu);
++
++ u->bind_cpu = current_cpu;
++ u->nr_event_wrong_delivery = 0;
++
++ spin_unlock_irq(&port_user_lock);
++}
++
++static ssize_t evtchn_read(struct file *file, char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ int rc;
++ unsigned int c, p, bytes1 = 0, bytes2 = 0;
++ struct per_user_data *u = file->private_data;
++
++ /* Whole number of ports. */
++ count &= ~(sizeof(evtchn_port_t)-1);
++
++ if (count == 0)
++ return 0;
++
++ if (count > PAGE_SIZE)
++ count = PAGE_SIZE;
++
++ for (;;) {
++ mutex_lock(&u->ring_cons_mutex);
++
++ rc = -EFBIG;
++ if (u->ring_overflow)
++ goto unlock_out;
++
++ if ((c = u->ring_cons) != (p = u->ring_prod))
++ break;
++
++ mutex_unlock(&u->ring_cons_mutex);
++
++ if (file->f_flags & O_NONBLOCK)
++ return -EAGAIN;
++
++ rc = wait_event_interruptible(
++ u->evtchn_wait, u->ring_cons != u->ring_prod);
++ if (rc)
++ return rc;
++ }
++
++ /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
++ if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
++ bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
++ sizeof(evtchn_port_t);
++ bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
++ } else {
++ bytes1 = (p - c) * sizeof(evtchn_port_t);
++ bytes2 = 0;
++ }
++
++ /* Truncate chunks according to caller's maximum byte count. */
++ if (bytes1 > count) {
++ bytes1 = count;
++ bytes2 = 0;
++ } else if ((bytes1 + bytes2) > count) {
++ bytes2 = count - bytes1;
++ }
++
++ rc = -EFAULT;
++ rmb(); /* Ensure that we see the port before we copy it. */
++ if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
++ ((bytes2 != 0) &&
++ copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
++ goto unlock_out;
++
++ evtchn_check_wrong_delivery(u);
++
++ u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
++ rc = bytes1 + bytes2;
++
++ unlock_out:
++ mutex_unlock(&u->ring_cons_mutex);
++ return rc;
++}
++
++static ssize_t evtchn_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ int rc, i;
++ evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
++ struct per_user_data *u = file->private_data;
++
++ if (kbuf == NULL)
++ return -ENOMEM;
++
++ /* Whole number of ports. */
++ count &= ~(sizeof(evtchn_port_t)-1);
++
++ rc = 0;
++ if (count == 0)
++ goto out;
++
++ if (count > PAGE_SIZE)
++ count = PAGE_SIZE;
++
++ rc = -EFAULT;
++ if (copy_from_user(kbuf, buf, count) != 0)
++ goto out;
++
++ spin_lock_irq(&port_user_lock);
++ for (i = 0; i < (count/sizeof(evtchn_port_t)); i++)
++ if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
++ unmask_evtchn(kbuf[i]);
++ spin_unlock_irq(&port_user_lock);
++
++ rc = count;
++
++ out:
++ free_page((unsigned long)kbuf);
++ return rc;
++}
++
++static unsigned int next_bind_cpu(cpumask_t map)
++{
++ static unsigned int bind_cpu;
++ bind_cpu = next_cpu(bind_cpu, map);
++ if (bind_cpu >= NR_CPUS)
++ bind_cpu = first_cpu(map);
++ return bind_cpu;
++}
++
++static void evtchn_bind_to_user(struct per_user_data *u, int port)
++{
++ spin_lock_irq(&port_user_lock);
++
++ BUG_ON(port_user[port] != NULL);
++ port_user[port] = u;
++
++ if (u->bind_cpu == -1)
++ u->bind_cpu = next_bind_cpu(cpu_online_map);
++
++ rebind_evtchn_to_cpu(port, u->bind_cpu);
++
++ unmask_evtchn(port);
++
++ spin_unlock_irq(&port_user_lock);
++}
++
++static long evtchn_ioctl(struct file *file,
++ unsigned int cmd, unsigned long arg)
++{
++ int rc;
++ struct per_user_data *u = file->private_data;
++ void __user *uarg = (void __user *) arg;
++
++ switch (cmd) {
++ case IOCTL_EVTCHN_BIND_VIRQ: {
++ struct ioctl_evtchn_bind_virq bind;
++ struct evtchn_bind_virq bind_virq;
++
++ rc = -EFAULT;
++ if (copy_from_user(&bind, uarg, sizeof(bind)))
++ break;
++
++ bind_virq.virq = bind.virq;
++ bind_virq.vcpu = 0;
++ rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
++ &bind_virq);
++ if (rc != 0)
++ break;
++
++ rc = bind_virq.port;
++ evtchn_bind_to_user(u, rc);
++ break;
++ }
++
++ case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
++ struct ioctl_evtchn_bind_interdomain bind;
++ struct evtchn_bind_interdomain bind_interdomain;
++
++ rc = -EFAULT;
++ if (copy_from_user(&bind, uarg, sizeof(bind)))
++ break;
++
++ bind_interdomain.remote_dom = bind.remote_domain;
++ bind_interdomain.remote_port = bind.remote_port;
++ rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
++ &bind_interdomain);
++ if (rc != 0)
++ break;
++
++ rc = bind_interdomain.local_port;
++ evtchn_bind_to_user(u, rc);
++ break;
++ }
++
++ case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
++ struct ioctl_evtchn_bind_unbound_port bind;
++ struct evtchn_alloc_unbound alloc_unbound;
++
++ rc = -EFAULT;
++ if (copy_from_user(&bind, uarg, sizeof(bind)))
++ break;
++
++ alloc_unbound.dom = DOMID_SELF;
++ alloc_unbound.remote_dom = bind.remote_domain;
++ rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
++ &alloc_unbound);
++ if (rc != 0)
++ break;
++
++ rc = alloc_unbound.port;
++ evtchn_bind_to_user(u, rc);
++ break;
++ }
++
++ case IOCTL_EVTCHN_UNBIND: {
++ struct ioctl_evtchn_unbind unbind;
++ struct evtchn_close close;
++ int ret;
++
++ rc = -EFAULT;
++ if (copy_from_user(&unbind, uarg, sizeof(unbind)))
++ break;
++
++ rc = -EINVAL;
++ if (unbind.port >= NR_EVENT_CHANNELS)
++ break;
++
++ spin_lock_irq(&port_user_lock);
++
++ rc = -ENOTCONN;
++ if (port_user[unbind.port] != u) {
++ spin_unlock_irq(&port_user_lock);
++ break;
++ }
++
++ port_user[unbind.port] = NULL;
++ mask_evtchn(unbind.port);
++ rebind_evtchn_to_cpu(unbind.port, 0);
++
++ spin_unlock_irq(&port_user_lock);
++
++ close.port = unbind.port;
++ ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
++ BUG_ON(ret);
++
++ rc = 0;
++ break;
++ }
++
++ case IOCTL_EVTCHN_NOTIFY: {
++ struct ioctl_evtchn_notify notify;
++
++ rc = -EFAULT;
++ if (copy_from_user(¬ify, uarg, sizeof(notify)))
++ break;
++
++ if (notify.port >= NR_EVENT_CHANNELS) {
++ rc = -EINVAL;
++ } else if (port_user[notify.port] != u) {
++ rc = -ENOTCONN;
++ } else {
++ notify_remote_via_evtchn(notify.port);
++ rc = 0;
++ }
++ break;
++ }
++
++ case IOCTL_EVTCHN_RESET: {
++ /* Initialise the ring to empty. Clear errors. */
++ mutex_lock(&u->ring_cons_mutex);
++ spin_lock_irq(&port_user_lock);
++ u->ring_cons = u->ring_prod = u->ring_overflow = 0;
++ spin_unlock_irq(&port_user_lock);
++ mutex_unlock(&u->ring_cons_mutex);
++ rc = 0;
++ break;
++ }
++
++ default:
++ rc = -ENOSYS;
++ break;
++ }
++
++ return rc;
++}
++
++static unsigned int evtchn_poll(struct file *file, poll_table *wait)
++{
++ unsigned int mask = POLLOUT | POLLWRNORM;
++ struct per_user_data *u = file->private_data;
++
++ poll_wait(file, &u->evtchn_wait, wait);
++ if (u->ring_cons != u->ring_prod)
++ mask |= POLLIN | POLLRDNORM;
++ if (u->ring_overflow)
++ mask = POLLERR;
++ return mask;
++}
++
++static int evtchn_fasync(int fd, struct file *filp, int on)
++{
++ struct per_user_data *u = filp->private_data;
++ return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
++}
++
++static int evtchn_open(struct inode *inode, struct file *filp)
++{
++ struct per_user_data *u;
++
++ if ((u = kmalloc(sizeof(*u), GFP_KERNEL)) == NULL)
++ return -ENOMEM;
++
++ memset(u, 0, sizeof(*u));
++ init_waitqueue_head(&u->evtchn_wait);
++
++ u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
++ if (u->ring == NULL) {
++ kfree(u);
++ return -ENOMEM;
++ }
++
++ mutex_init(&u->ring_cons_mutex);
++
++ filp->private_data = u;
++
++ u->bind_cpu = -1;
++
++ return 0;
++}
++
++static int evtchn_release(struct inode *inode, struct file *filp)
++{
++ int i;
++ struct per_user_data *u = filp->private_data;
++ struct evtchn_close close;
++
++ spin_lock_irq(&port_user_lock);
++
++ free_page((unsigned long)u->ring);
++
++ for (i = 0; i < NR_EVENT_CHANNELS; i++) {
++ int ret;
++ if (port_user[i] != u)
++ continue;
++
++ port_user[i] = NULL;
++ mask_evtchn(i);
++ rebind_evtchn_to_cpu(i, 0);
++
++ close.port = i;
++ ret = HYPERVISOR_event_channel_op(EVTCHNOP_close, &close);
++ BUG_ON(ret);
++ }
++
++ spin_unlock_irq(&port_user_lock);
++
++ kfree(u);
++
++ return 0;
++}
++
++static const struct file_operations evtchn_fops = {
++ .owner = THIS_MODULE,
++ .read = evtchn_read,
++ .write = evtchn_write,
++ .unlocked_ioctl = evtchn_ioctl,
++ .poll = evtchn_poll,
++ .fasync = evtchn_fasync,
++ .open = evtchn_open,
++ .release = evtchn_release,
++};
++
++static struct miscdevice evtchn_miscdev = {
++ .minor = MISC_DYNAMIC_MINOR,
++ .name = "evtchn",
++ .fops = &evtchn_fops,
++};
++
++static int __cpuinit evtchn_cpu_notify(struct notifier_block *nfb,
++ unsigned long action, void *hcpu)
++{
++ int hotcpu = (unsigned long)hcpu;
++ cpumask_t map = cpu_online_map;
++ int port, newcpu;
++ struct per_user_data *u;
++
++ switch (action) {
++ case CPU_DOWN_PREPARE:
++ cpu_clear(hotcpu, map);
++ spin_lock_irq(&port_user_lock);
++ for (port = 0; port < NR_EVENT_CHANNELS; port++) {
++ if ((u = port_user[port]) != NULL &&
++ u->bind_cpu == hotcpu &&
++ (newcpu = next_bind_cpu(map)) < NR_CPUS) {
++ rebind_evtchn_to_cpu(port, newcpu);
++ u->bind_cpu = newcpu;
++ }
++ }
++ spin_unlock_irq(&port_user_lock);
++ break;
++ default:
++ return NOTIFY_DONE;
++ }
++ return NOTIFY_OK;
++}
++
++static struct notifier_block __cpuinitdata evtchn_cpu_nfb = {
++ .notifier_call = evtchn_cpu_notify
++};
++
++static int __init evtchn_init(void)
++{
++ int err;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ spin_lock_init(&port_user_lock);
++ memset(port_user, 0, sizeof(port_user));
++
++ /* Create '/dev/misc/evtchn'. */
++ err = misc_register(&evtchn_miscdev);
++ if (err != 0) {
++ printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
++ return err;
++ }
++
++ register_cpu_notifier(&evtchn_cpu_nfb);
++
++ printk("Event-channel device installed.\n");
++
++ return 0;
++}
++
++static void __exit evtchn_cleanup(void)
++{
++ misc_deregister(&evtchn_miscdev);
++ unregister_cpu_notifier(&evtchn_cpu_nfb);
++}
++
++module_init(evtchn_init);
++module_exit(evtchn_cleanup);
++
++MODULE_LICENSE("Dual BSD/GPL");
+Index: head-2008-11-25/drivers/xen/fbfront/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/fbfront/Makefile 2007-06-12 13:13:45.000000000 +0200
+@@ -0,0 +1,2 @@
++obj-$(CONFIG_XEN_FRAMEBUFFER) := xenfb.o
++obj-$(CONFIG_XEN_KEYBOARD) += xenkbd.o
+Index: head-2008-11-25/drivers/xen/fbfront/xenfb.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/fbfront/xenfb.c 2008-11-25 12:22:34.000000000 +0100
+@@ -0,0 +1,887 @@
++/*
++ * linux/drivers/video/xenfb.c -- Xen para-virtual frame buffer device
++ *
++ * Copyright (C) 2005-2006 Anthony Liguori <aliguori@us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
++ *
++ * Based on linux/drivers/video/q40fb.c
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++
++/*
++ * TODO:
++ *
++ * Switch to grant tables when they become capable of dealing with the
++ * frame buffer.
++ */
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/fb.h>
++#include <linux/module.h>
++#include <linux/vmalloc.h>
++#include <linux/mm.h>
++#include <linux/mutex.h>
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/interface/io/fbif.h>
++#include <xen/interface/io/protocols.h>
++#include <xen/xenbus.h>
++#include <linux/kthread.h>
++
++struct xenfb_mapping
++{
++ struct list_head link;
++ struct vm_area_struct *vma;
++ atomic_t map_refs;
++ int faults;
++ struct xenfb_info *info;
++};
++
++struct xenfb_info
++{
++ struct task_struct *kthread;
++ wait_queue_head_t wq;
++
++ unsigned char *fb;
++ struct fb_info *fb_info;
++ struct timer_list refresh;
++ int dirty;
++ int x1, y1, x2, y2; /* dirty rectangle,
++ protected by dirty_lock */
++ spinlock_t dirty_lock;
++ struct mutex mm_lock;
++ int nr_pages;
++ struct page **pages;
++ struct list_head mappings; /* protected by mm_lock */
++
++ int irq;
++ struct xenfb_page *page;
++ unsigned long *mfns;
++ int update_wanted; /* XENFB_TYPE_UPDATE wanted */
++ int feature_resize; /* Backend has resize feature */
++ struct xenfb_resize resize;
++ int resize_dpy;
++ spinlock_t resize_lock;
++
++ struct xenbus_device *xbdev;
++};
++
++/*
++ * There are three locks:
++ * spinlock resize_lock protecting resize_dpy and resize
++ * spinlock dirty_lock protecting the dirty rectangle
++ * mutex mm_lock protecting mappings.
++ *
++ * How the dirty and mapping locks work together
++ *
++ * The problem is that dirty rectangle and mappings aren't
++ * independent: the dirty rectangle must cover all faulted pages in
++ * mappings. We need to prove that our locking maintains this
++ * invariant.
++ *
++ * There are several kinds of critical regions:
++ *
++ * 1. Holding only dirty_lock: xenfb_refresh(). May run in
++ * interrupts. Extends the dirty rectangle. Trivially preserves
++ * invariant.
++ *
++ * 2. Holding only mm_lock: xenfb_mmap() and xenfb_vm_close(). Touch
++ * only mappings. The former creates unfaulted pages. Preserves
++ * invariant. The latter removes pages. Preserves invariant.
++ *
++ * 3. Holding both locks: xenfb_vm_nopage(). Extends the dirty
++ * rectangle and updates mappings consistently. Preserves
++ * invariant.
++ *
++ * 4. The ugliest one: xenfb_update_screen(). Clear the dirty
++ * rectangle and update mappings consistently.
++ *
++ * We can't simply hold both locks, because zap_page_range() cannot
++ * be called with a spinlock held.
++ *
++ * Therefore, we first clear the dirty rectangle with both locks
++ * held. Then we unlock dirty_lock and update the mappings.
++ * Critical regions that hold only dirty_lock may interfere with
++ * that. This can only be region 1: xenfb_refresh(). But that
++ * just extends the dirty rectangle, which can't harm the
++ * invariant.
++ *
++ * But FIXME: the invariant is too weak. It misses that the fault
++ * record in mappings must be consistent with the mapping of pages in
++ * the associated address space! do_no_page() updates the PTE after
++ * xenfb_vm_nopage() returns, i.e. outside the critical region. This
++ * allows the following race:
++ *
++ * X writes to some address in the Xen frame buffer
++ * Fault - call do_no_page()
++ * call xenfb_vm_nopage()
++ * grab mm_lock
++ * map->faults++;
++ * release mm_lock
++ * return back to do_no_page()
++ * (preempted, or SMP)
++ * Xen worker thread runs.
++ * grab mm_lock
++ * look at mappings
++ * find this mapping, zaps its pages (but page not in pte yet)
++ * clear map->faults
++ * releases mm_lock
++ * (back to X process)
++ * put page in X's pte
++ *
++ * Oh well, we wont be updating the writes to this page anytime soon.
++ */
++#define MB_ (1024*1024)
++#define XENFB_DEFAULT_FB_LEN (XENFB_WIDTH * XENFB_HEIGHT * XENFB_DEPTH / 8)
++
++enum {KPARAM_MEM, KPARAM_WIDTH, KPARAM_HEIGHT, KPARAM_CNT};
++static int video[KPARAM_CNT] = {2, XENFB_WIDTH, XENFB_HEIGHT};
++module_param_array(video, int, NULL, 0);
++MODULE_PARM_DESC(video,
++ "Size of video memory in MB and width,height in pixels, default = (2,800,600)");
++
++static int xenfb_fps = 20;
++
++static int xenfb_remove(struct xenbus_device *);
++static void xenfb_init_shared_page(struct xenfb_info *, struct fb_info *);
++static int xenfb_connect_backend(struct xenbus_device *, struct xenfb_info *);
++static void xenfb_disconnect_backend(struct xenfb_info *);
++
++static void xenfb_send_event(struct xenfb_info *info,
++ union xenfb_out_event *event)
++{
++ __u32 prod;
++
++ prod = info->page->out_prod;
++ /* caller ensures !xenfb_queue_full() */
++ mb(); /* ensure ring space available */
++ XENFB_OUT_RING_REF(info->page, prod) = *event;
++ wmb(); /* ensure ring contents visible */
++ info->page->out_prod = prod + 1;
++
++ notify_remote_via_irq(info->irq);
++}
++
++static void xenfb_do_update(struct xenfb_info *info,
++ int x, int y, int w, int h)
++{
++ union xenfb_out_event event;
++
++ memset(&event, 0, sizeof(event));
++ event.type = XENFB_TYPE_UPDATE;
++ event.update.x = x;
++ event.update.y = y;
++ event.update.width = w;
++ event.update.height = h;
++
++ /* caller ensures !xenfb_queue_full() */
++ xenfb_send_event(info, &event);
++}
++
++static void xenfb_do_resize(struct xenfb_info *info)
++{
++ union xenfb_out_event event;
++
++ memset(&event, 0, sizeof(event));
++ event.resize = info->resize;
++
++ /* caller ensures !xenfb_queue_full() */
++ xenfb_send_event(info, &event);
++}
++
++static int xenfb_queue_full(struct xenfb_info *info)
++{
++ __u32 cons, prod;
++
++ prod = info->page->out_prod;
++ cons = info->page->out_cons;
++ return prod - cons == XENFB_OUT_RING_LEN;
++}
++
++static void xenfb_update_screen(struct xenfb_info *info)
++{
++ unsigned long flags;
++ int y1, y2, x1, x2;
++ struct xenfb_mapping *map;
++
++ if (!info->update_wanted)
++ return;
++ if (xenfb_queue_full(info))
++ return;
++
++ mutex_lock(&info->mm_lock);
++
++ spin_lock_irqsave(&info->dirty_lock, flags);
++ y1 = info->y1;
++ y2 = info->y2;
++ x1 = info->x1;
++ x2 = info->x2;
++ info->x1 = info->y1 = INT_MAX;
++ info->x2 = info->y2 = 0;
++ spin_unlock_irqrestore(&info->dirty_lock, flags);
++
++ list_for_each_entry(map, &info->mappings, link) {
++ if (!map->faults)
++ continue;
++ zap_page_range(map->vma, map->vma->vm_start,
++ map->vma->vm_end - map->vma->vm_start, NULL);
++ map->faults = 0;
++ }
++
++ mutex_unlock(&info->mm_lock);
++
++ if (x2 < x1 || y2 < y1) {
++ printk("xenfb_update_screen bogus rect %d %d %d %d\n",
++ x1, x2, y1, y2);
++ WARN_ON(1);
++ }
++ xenfb_do_update(info, x1, y1, x2 - x1, y2 - y1);
++}
++
++static void xenfb_handle_resize_dpy(struct xenfb_info *info)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->resize_lock, flags);
++ if (info->resize_dpy) {
++ if (!xenfb_queue_full(info)) {
++ info->resize_dpy = 0;
++ xenfb_do_resize(info);
++ }
++ }
++ spin_unlock_irqrestore(&info->resize_lock, flags);
++}
++
++static int xenfb_thread(void *data)
++{
++ struct xenfb_info *info = data;
++
++ while (!kthread_should_stop()) {
++ xenfb_handle_resize_dpy(info);
++ if (info->dirty) {
++ info->dirty = 0;
++ xenfb_update_screen(info);
++ }
++ wait_event_interruptible(info->wq,
++ kthread_should_stop() || info->dirty);
++ try_to_freeze();
++ }
++ return 0;
++}
++
++static int xenfb_setcolreg(unsigned regno, unsigned red, unsigned green,
++ unsigned blue, unsigned transp,
++ struct fb_info *info)
++{
++ u32 v;
++
++ if (regno > info->cmap.len)
++ return 1;
++
++ red >>= (16 - info->var.red.length);
++ green >>= (16 - info->var.green.length);
++ blue >>= (16 - info->var.blue.length);
++
++ v = (red << info->var.red.offset) |
++ (green << info->var.green.offset) |
++ (blue << info->var.blue.offset);
++
++ /* FIXME is this sane? check against xxxfb_setcolreg()! */
++ switch (info->var.bits_per_pixel) {
++ case 16:
++ case 24:
++ case 32:
++ ((u32 *)info->pseudo_palette)[regno] = v;
++ break;
++ }
++
++ return 0;
++}
++
++static void xenfb_timer(unsigned long data)
++{
++ struct xenfb_info *info = (struct xenfb_info *)data;
++ wake_up(&info->wq);
++}
++
++static void __xenfb_refresh(struct xenfb_info *info,
++ int x1, int y1, int w, int h)
++{
++ int y2, x2;
++
++ y2 = y1 + h;
++ x2 = x1 + w;
++
++ if (info->y1 > y1)
++ info->y1 = y1;
++ if (info->y2 < y2)
++ info->y2 = y2;
++ if (info->x1 > x1)
++ info->x1 = x1;
++ if (info->x2 < x2)
++ info->x2 = x2;
++ info->dirty = 1;
++
++ if (timer_pending(&info->refresh))
++ return;
++
++ mod_timer(&info->refresh, jiffies + HZ/xenfb_fps);
++}
++
++static void xenfb_refresh(struct xenfb_info *info,
++ int x1, int y1, int w, int h)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->dirty_lock, flags);
++ __xenfb_refresh(info, x1, y1, w, h);
++ spin_unlock_irqrestore(&info->dirty_lock, flags);
++}
++
++static void xenfb_fillrect(struct fb_info *p, const struct fb_fillrect *rect)
++{
++ struct xenfb_info *info = p->par;
++
++ cfb_fillrect(p, rect);
++ xenfb_refresh(info, rect->dx, rect->dy, rect->width, rect->height);
++}
++
++static void xenfb_imageblit(struct fb_info *p, const struct fb_image *image)
++{
++ struct xenfb_info *info = p->par;
++
++ cfb_imageblit(p, image);
++ xenfb_refresh(info, image->dx, image->dy, image->width, image->height);
++}
++
++static void xenfb_copyarea(struct fb_info *p, const struct fb_copyarea *area)
++{
++ struct xenfb_info *info = p->par;
++
++ cfb_copyarea(p, area);
++ xenfb_refresh(info, area->dx, area->dy, area->width, area->height);
++}
++
++static void xenfb_vm_open(struct vm_area_struct *vma)
++{
++ struct xenfb_mapping *map = vma->vm_private_data;
++ atomic_inc(&map->map_refs);
++}
++
++static void xenfb_vm_close(struct vm_area_struct *vma)
++{
++ struct xenfb_mapping *map = vma->vm_private_data;
++ struct xenfb_info *info = map->info;
++
++ mutex_lock(&info->mm_lock);
++ if (atomic_dec_and_test(&map->map_refs)) {
++ list_del(&map->link);
++ kfree(map);
++ }
++ mutex_unlock(&info->mm_lock);
++}
++
++static struct page *xenfb_vm_nopage(struct vm_area_struct *vma,
++ unsigned long vaddr, int *type)
++{
++ struct xenfb_mapping *map = vma->vm_private_data;
++ struct xenfb_info *info = map->info;
++ int pgnr = (vaddr - vma->vm_start) >> PAGE_SHIFT;
++ unsigned long flags;
++ struct page *page;
++ int y1, y2;
++
++ if (pgnr >= info->nr_pages)
++ return NOPAGE_SIGBUS;
++
++ mutex_lock(&info->mm_lock);
++ spin_lock_irqsave(&info->dirty_lock, flags);
++ page = info->pages[pgnr];
++ get_page(page);
++ map->faults++;
++
++ y1 = pgnr * PAGE_SIZE / info->fb_info->fix.line_length;
++ y2 = (pgnr * PAGE_SIZE + PAGE_SIZE - 1) / info->fb_info->fix.line_length;
++ if (y2 > info->fb_info->var.yres)
++ y2 = info->fb_info->var.yres;
++ __xenfb_refresh(info, 0, y1, info->fb_info->var.xres, y2 - y1);
++ spin_unlock_irqrestore(&info->dirty_lock, flags);
++ mutex_unlock(&info->mm_lock);
++
++ if (type)
++ *type = VM_FAULT_MINOR;
++
++ return page;
++}
++
++static struct vm_operations_struct xenfb_vm_ops = {
++ .open = xenfb_vm_open,
++ .close = xenfb_vm_close,
++ .nopage = xenfb_vm_nopage,
++};
++
++static int xenfb_mmap(struct fb_info *fb_info, struct vm_area_struct *vma)
++{
++ struct xenfb_info *info = fb_info->par;
++ struct xenfb_mapping *map;
++ int map_pages;
++
++ if (!(vma->vm_flags & VM_WRITE))
++ return -EINVAL;
++ if (!(vma->vm_flags & VM_SHARED))
++ return -EINVAL;
++ if (vma->vm_pgoff != 0)
++ return -EINVAL;
++
++ map_pages = (vma->vm_end - vma->vm_start + PAGE_SIZE-1) >> PAGE_SHIFT;
++ if (map_pages > info->nr_pages)
++ return -EINVAL;
++
++ map = kzalloc(sizeof(*map), GFP_KERNEL);
++ if (map == NULL)
++ return -ENOMEM;
++
++ map->vma = vma;
++ map->faults = 0;
++ map->info = info;
++ atomic_set(&map->map_refs, 1);
++
++ mutex_lock(&info->mm_lock);
++ list_add(&map->link, &info->mappings);
++ mutex_unlock(&info->mm_lock);
++
++ vma->vm_ops = &xenfb_vm_ops;
++ vma->vm_flags |= (VM_DONTEXPAND | VM_RESERVED);
++ vma->vm_private_data = map;
++
++ return 0;
++}
++
++static int
++xenfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
++{
++ struct xenfb_info *xenfb_info;
++ int required_mem_len;
++
++ xenfb_info = info->par;
++
++ if (!xenfb_info->feature_resize) {
++ if (var->xres == video[KPARAM_WIDTH] &&
++ var->yres == video[KPARAM_HEIGHT] &&
++ var->bits_per_pixel == xenfb_info->page->depth) {
++ return 0;
++ }
++ return -EINVAL;
++ }
++
++ /* Can't resize past initial width and height */
++ if (var->xres > video[KPARAM_WIDTH] || var->yres > video[KPARAM_HEIGHT])
++ return -EINVAL;
++
++ required_mem_len = var->xres * var->yres * (xenfb_info->page->depth / 8);
++ if (var->bits_per_pixel == xenfb_info->page->depth &&
++ var->xres <= info->fix.line_length / (XENFB_DEPTH / 8) &&
++ required_mem_len <= info->fix.smem_len) {
++ var->xres_virtual = var->xres;
++ var->yres_virtual = var->yres;
++ return 0;
++ }
++ return -EINVAL;
++}
++
++static int xenfb_set_par(struct fb_info *info)
++{
++ struct xenfb_info *xenfb_info;
++ unsigned long flags;
++
++ xenfb_info = info->par;
++
++ spin_lock_irqsave(&xenfb_info->resize_lock, flags);
++ xenfb_info->resize.type = XENFB_TYPE_RESIZE;
++ xenfb_info->resize.width = info->var.xres;
++ xenfb_info->resize.height = info->var.yres;
++ xenfb_info->resize.stride = info->fix.line_length;
++ xenfb_info->resize.depth = info->var.bits_per_pixel;
++ xenfb_info->resize.offset = 0;
++ xenfb_info->resize_dpy = 1;
++ spin_unlock_irqrestore(&xenfb_info->resize_lock, flags);
++ return 0;
++}
++
++static struct fb_ops xenfb_fb_ops = {
++ .owner = THIS_MODULE,
++ .fb_setcolreg = xenfb_setcolreg,
++ .fb_fillrect = xenfb_fillrect,
++ .fb_copyarea = xenfb_copyarea,
++ .fb_imageblit = xenfb_imageblit,
++ .fb_mmap = xenfb_mmap,
++ .fb_check_var = xenfb_check_var,
++ .fb_set_par = xenfb_set_par,
++};
++
++static irqreturn_t xenfb_event_handler(int rq, void *dev_id,
++ struct pt_regs *regs)
++{
++ /*
++ * No in events recognized, simply ignore them all.
++ * If you need to recognize some, see xenbkd's input_handler()
++ * for how to do that.
++ */
++ struct xenfb_info *info = dev_id;
++ struct xenfb_page *page = info->page;
++
++ if (page->in_cons != page->in_prod) {
++ info->page->in_cons = info->page->in_prod;
++ notify_remote_via_irq(info->irq);
++ }
++ return IRQ_HANDLED;
++}
++
++static unsigned long vmalloc_to_mfn(void *address)
++{
++ return pfn_to_mfn(vmalloc_to_pfn(address));
++}
++
++static int __devinit xenfb_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ struct xenfb_info *info;
++ struct fb_info *fb_info;
++ int fb_size;
++ int val;
++ int ret;
++
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (info == NULL) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
++ return -ENOMEM;
++ }
++
++ /* Limit kernel param videoram amount to what is in xenstore */
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "videoram", "%d", &val) == 1) {
++ if (val < video[KPARAM_MEM])
++ video[KPARAM_MEM] = val;
++ }
++
++ /* If requested res does not fit in available memory, use default */
++ fb_size = video[KPARAM_MEM] * MB_;
++ if (video[KPARAM_WIDTH] * video[KPARAM_HEIGHT] * XENFB_DEPTH/8 > fb_size) {
++ video[KPARAM_WIDTH] = XENFB_WIDTH;
++ video[KPARAM_HEIGHT] = XENFB_HEIGHT;
++ fb_size = XENFB_DEFAULT_FB_LEN;
++ }
++
++ dev->dev.driver_data = info;
++ info->xbdev = dev;
++ info->irq = -1;
++ info->x1 = info->y1 = INT_MAX;
++ spin_lock_init(&info->dirty_lock);
++ spin_lock_init(&info->resize_lock);
++ mutex_init(&info->mm_lock);
++ init_waitqueue_head(&info->wq);
++ init_timer(&info->refresh);
++ info->refresh.function = xenfb_timer;
++ info->refresh.data = (unsigned long)info;
++ INIT_LIST_HEAD(&info->mappings);
++
++ info->fb = vmalloc(fb_size);
++ if (info->fb == NULL)
++ goto error_nomem;
++ memset(info->fb, 0, fb_size);
++
++ info->nr_pages = (fb_size + PAGE_SIZE - 1) >> PAGE_SHIFT;
++
++ info->pages = kmalloc(sizeof(struct page *) * info->nr_pages,
++ GFP_KERNEL);
++ if (info->pages == NULL)
++ goto error_nomem;
++
++ info->mfns = vmalloc(sizeof(unsigned long) * info->nr_pages);
++ if (!info->mfns)
++ goto error_nomem;
++
++ /* set up shared page */
++ info->page = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
++ if (!info->page)
++ goto error_nomem;
++
++ fb_info = framebuffer_alloc(sizeof(u32) * 256, NULL);
++ /* see fishy hackery below */
++ if (fb_info == NULL)
++ goto error_nomem;
++
++ /* FIXME fishy hackery */
++ fb_info->pseudo_palette = fb_info->par;
++ fb_info->par = info;
++ /* /FIXME */
++ fb_info->screen_base = info->fb;
++
++ fb_info->fbops = &xenfb_fb_ops;
++ fb_info->var.xres_virtual = fb_info->var.xres = video[KPARAM_WIDTH];
++ fb_info->var.yres_virtual = fb_info->var.yres = video[KPARAM_HEIGHT];
++ fb_info->var.bits_per_pixel = XENFB_DEPTH;
++
++ fb_info->var.red = (struct fb_bitfield){16, 8, 0};
++ fb_info->var.green = (struct fb_bitfield){8, 8, 0};
++ fb_info->var.blue = (struct fb_bitfield){0, 8, 0};
++
++ fb_info->var.activate = FB_ACTIVATE_NOW;
++ fb_info->var.height = -1;
++ fb_info->var.width = -1;
++ fb_info->var.vmode = FB_VMODE_NONINTERLACED;
++
++ fb_info->fix.visual = FB_VISUAL_TRUECOLOR;
++ fb_info->fix.line_length = fb_info->var.xres * (XENFB_DEPTH / 8);
++ fb_info->fix.smem_start = 0;
++ fb_info->fix.smem_len = fb_size;
++ strcpy(fb_info->fix.id, "xen");
++ fb_info->fix.type = FB_TYPE_PACKED_PIXELS;
++ fb_info->fix.accel = FB_ACCEL_NONE;
++
++ fb_info->flags = FBINFO_FLAG_DEFAULT;
++
++ ret = fb_alloc_cmap(&fb_info->cmap, 256, 0);
++ if (ret < 0) {
++ framebuffer_release(fb_info);
++ xenbus_dev_fatal(dev, ret, "fb_alloc_cmap");
++ goto error;
++ }
++
++ xenfb_init_shared_page(info, fb_info);
++
++ ret = register_framebuffer(fb_info);
++ if (ret) {
++ fb_dealloc_cmap(&info->fb_info->cmap);
++ framebuffer_release(fb_info);
++ xenbus_dev_fatal(dev, ret, "register_framebuffer");
++ goto error;
++ }
++ info->fb_info = fb_info;
++
++ ret = xenfb_connect_backend(dev, info);
++ if (ret < 0)
++ goto error;
++
++ /* FIXME should this be delayed until backend XenbusStateConnected? */
++ info->kthread = kthread_run(xenfb_thread, info, "xenfb thread");
++ if (IS_ERR(info->kthread)) {
++ ret = PTR_ERR(info->kthread);
++ info->kthread = NULL;
++ xenbus_dev_fatal(dev, ret, "register_framebuffer");
++ goto error;
++ }
++
++ return 0;
++
++ error_nomem:
++ ret = -ENOMEM;
++ xenbus_dev_fatal(dev, ret, "allocating device memory");
++ error:
++ xenfb_remove(dev);
++ return ret;
++}
++
++static int xenfb_resume(struct xenbus_device *dev)
++{
++ struct xenfb_info *info = dev->dev.driver_data;
++
++ xenfb_disconnect_backend(info);
++ xenfb_init_shared_page(info, info->fb_info);
++ return xenfb_connect_backend(dev, info);
++}
++
++static int xenfb_remove(struct xenbus_device *dev)
++{
++ struct xenfb_info *info = dev->dev.driver_data;
++
++ del_timer(&info->refresh);
++ if (info->kthread)
++ kthread_stop(info->kthread);
++ xenfb_disconnect_backend(info);
++ if (info->fb_info) {
++ unregister_framebuffer(info->fb_info);
++ fb_dealloc_cmap(&info->fb_info->cmap);
++ framebuffer_release(info->fb_info);
++ }
++ free_page((unsigned long)info->page);
++ vfree(info->mfns);
++ kfree(info->pages);
++ vfree(info->fb);
++ kfree(info);
++
++ return 0;
++}
++
++static void xenfb_init_shared_page(struct xenfb_info *info,
++ struct fb_info * fb_info)
++{
++ int i;
++ int epd = PAGE_SIZE / sizeof(info->mfns[0]);
++
++ for (i = 0; i < info->nr_pages; i++)
++ info->pages[i] = vmalloc_to_page(info->fb + i * PAGE_SIZE);
++
++ for (i = 0; i < info->nr_pages; i++)
++ info->mfns[i] = vmalloc_to_mfn(info->fb + i * PAGE_SIZE);
++
++ for (i = 0; i * epd < info->nr_pages; i++)
++ info->page->pd[i] = vmalloc_to_mfn(&info->mfns[i * epd]);
++
++ info->page->width = fb_info->var.xres;
++ info->page->height = fb_info->var.yres;
++ info->page->depth = fb_info->var.bits_per_pixel;
++ info->page->line_length = fb_info->fix.line_length;
++ info->page->mem_length = fb_info->fix.smem_len;
++ info->page->in_cons = info->page->in_prod = 0;
++ info->page->out_cons = info->page->out_prod = 0;
++}
++
++static int xenfb_connect_backend(struct xenbus_device *dev,
++ struct xenfb_info *info)
++{
++ int ret;
++ struct xenbus_transaction xbt;
++
++ ret = bind_listening_port_to_irqhandler(
++ dev->otherend_id, xenfb_event_handler, 0, "xenfb", info);
++ if (ret < 0) {
++ xenbus_dev_fatal(dev, ret,
++ "bind_listening_port_to_irqhandler");
++ return ret;
++ }
++ info->irq = ret;
++
++ again:
++ ret = xenbus_transaction_start(&xbt);
++ if (ret) {
++ xenbus_dev_fatal(dev, ret, "starting transaction");
++ return ret;
++ }
++ ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
++ virt_to_mfn(info->page));
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++ irq_to_evtchn_port(info->irq));
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_printf(xbt, dev->nodename, "protocol", "%s",
++ XEN_IO_PROTO_ABI_NATIVE);
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_printf(xbt, dev->nodename, "feature-update", "1");
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_transaction_end(xbt, 0);
++ if (ret) {
++ if (ret == -EAGAIN)
++ goto again;
++ xenbus_dev_fatal(dev, ret, "completing transaction");
++ return ret;
++ }
++
++ xenbus_switch_state(dev, XenbusStateInitialised);
++ return 0;
++
++ error_xenbus:
++ xenbus_transaction_end(xbt, 1);
++ xenbus_dev_fatal(dev, ret, "writing xenstore");
++ return ret;
++}
++
++static void xenfb_disconnect_backend(struct xenfb_info *info)
++{
++ if (info->irq >= 0)
++ unbind_from_irqhandler(info->irq, info);
++ info->irq = -1;
++}
++
++static void xenfb_backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct xenfb_info *info = dev->dev.driver_data;
++ int val;
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitialised:
++ case XenbusStateReconfiguring:
++ case XenbusStateReconfigured:
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ break;
++
++ case XenbusStateInitWait:
++ InitWait:
++ xenbus_switch_state(dev, XenbusStateConnected);
++ break;
++
++ case XenbusStateConnected:
++ /*
++ * Work around xenbus race condition: If backend goes
++ * through InitWait to Connected fast enough, we can
++ * get Connected twice here.
++ */
++ if (dev->state != XenbusStateConnected)
++ goto InitWait; /* no InitWait seen yet, fudge it */
++
++ if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
++ "request-update", "%d", &val) < 0)
++ val = 0;
++ if (val)
++ info->update_wanted = 1;
++
++ if (xenbus_scanf(XBT_NIL, dev->otherend,
++ "feature-resize", "%d", &val) < 0)
++ val = 0;
++ info->feature_resize = val;
++ break;
++
++ case XenbusStateClosing:
++ // FIXME is this safe in any dev->state?
++ xenbus_frontend_closed(dev);
++ break;
++ }
++}
++
++static const struct xenbus_device_id xenfb_ids[] = {
++ { "vfb" },
++ { "" }
++};
++MODULE_ALIAS("xen:vfb");
++
++static struct xenbus_driver xenfb_driver = {
++ .name = "vfb",
++ .owner = THIS_MODULE,
++ .ids = xenfb_ids,
++ .probe = xenfb_probe,
++ .remove = xenfb_remove,
++ .resume = xenfb_resume,
++ .otherend_changed = xenfb_backend_changed,
++};
++
++static int __init xenfb_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ /* Nothing to do if running in dom0. */
++ if (is_initial_xendomain())
++ return -ENODEV;
++
++ return xenbus_register_frontend(&xenfb_driver);
++}
++
++static void __exit xenfb_cleanup(void)
++{
++ return xenbus_unregister_driver(&xenfb_driver);
++}
++
++module_init(xenfb_init);
++module_exit(xenfb_cleanup);
++
++MODULE_LICENSE("GPL");
+Index: head-2008-11-25/drivers/xen/fbfront/xenkbd.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/fbfront/xenkbd.c 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,354 @@
++/*
++ * linux/drivers/input/keyboard/xenkbd.c -- Xen para-virtual input device
++ *
++ * Copyright (C) 2005 Anthony Liguori <aliguori@us.ibm.com>
++ * Copyright (C) 2006 Red Hat, Inc., Markus Armbruster <armbru@redhat.com>
++ *
++ * Based on linux/drivers/input/mouse/sermouse.c
++ *
++ * This file is subject to the terms and conditions of the GNU General Public
++ * License. See the file COPYING in the main directory of this archive for
++ * more details.
++ */
++
++/*
++ * TODO:
++ *
++ * Switch to grant tables together with xenfb.c.
++ */
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/module.h>
++#include <linux/input.h>
++#include <asm/hypervisor.h>
++#include <xen/evtchn.h>
++#include <xen/interface/io/fbif.h>
++#include <xen/interface/io/kbdif.h>
++#include <xen/xenbus.h>
++
++struct xenkbd_info
++{
++ struct input_dev *kbd;
++ struct input_dev *ptr;
++ struct xenkbd_page *page;
++ int irq;
++ struct xenbus_device *xbdev;
++ char phys[32];
++};
++
++static int xenkbd_remove(struct xenbus_device *);
++static int xenkbd_connect_backend(struct xenbus_device *, struct xenkbd_info *);
++static void xenkbd_disconnect_backend(struct xenkbd_info *);
++
++/*
++ * Note: if you need to send out events, see xenfb_do_update() for how
++ * to do that.
++ */
++
++static irqreturn_t input_handler(int rq, void *dev_id, struct pt_regs *regs)
++{
++ struct xenkbd_info *info = dev_id;
++ struct xenkbd_page *page = info->page;
++ __u32 cons, prod;
++
++ prod = page->in_prod;
++ if (prod == page->in_cons)
++ return IRQ_HANDLED;
++ rmb(); /* ensure we see ring contents up to prod */
++ for (cons = page->in_cons; cons != prod; cons++) {
++ union xenkbd_in_event *event;
++ struct input_dev *dev;
++ event = &XENKBD_IN_RING_REF(page, cons);
++
++ dev = info->ptr;
++ switch (event->type) {
++ case XENKBD_TYPE_MOTION:
++ if (event->motion.rel_z)
++ input_report_rel(dev, REL_WHEEL,
++ -event->motion.rel_z);
++ input_report_rel(dev, REL_X, event->motion.rel_x);
++ input_report_rel(dev, REL_Y, event->motion.rel_y);
++ break;
++ case XENKBD_TYPE_KEY:
++ dev = NULL;
++ if (test_bit(event->key.keycode, info->kbd->keybit))
++ dev = info->kbd;
++ if (test_bit(event->key.keycode, info->ptr->keybit))
++ dev = info->ptr;
++ if (dev)
++ input_report_key(dev, event->key.keycode,
++ event->key.pressed);
++ else
++ printk("xenkbd: unhandled keycode 0x%x\n",
++ event->key.keycode);
++ break;
++ case XENKBD_TYPE_POS:
++ if (event->pos.rel_z)
++ input_report_rel(dev, REL_WHEEL,
++ -event->pos.rel_z);
++ input_report_abs(dev, ABS_X, event->pos.abs_x);
++ input_report_abs(dev, ABS_Y, event->pos.abs_y);
++ break;
++ }
++ if (dev)
++ input_sync(dev);
++ }
++ mb(); /* ensure we got ring contents */
++ page->in_cons = cons;
++ notify_remote_via_irq(info->irq);
++
++ return IRQ_HANDLED;
++}
++
++int __devinit xenkbd_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int ret, i;
++ struct xenkbd_info *info;
++ struct input_dev *kbd, *ptr;
++
++ info = kzalloc(sizeof(*info), GFP_KERNEL);
++ if (!info) {
++ xenbus_dev_fatal(dev, -ENOMEM, "allocating info structure");
++ return -ENOMEM;
++ }
++ dev->dev.driver_data = info;
++ info->xbdev = dev;
++ snprintf(info->phys, sizeof(info->phys), "xenbus/%s", dev->nodename);
++
++ info->page = (void *)__get_free_page(GFP_KERNEL);
++ if (!info->page)
++ goto error_nomem;
++ info->page->in_cons = info->page->in_prod = 0;
++ info->page->out_cons = info->page->out_prod = 0;
++
++ /* keyboard */
++ kbd = input_allocate_device();
++ if (!kbd)
++ goto error_nomem;
++ kbd->name = "Xen Virtual Keyboard";
++ kbd->phys = info->phys;
++ kbd->id.bustype = BUS_PCI;
++ kbd->id.vendor = 0x5853;
++ kbd->id.product = 0xffff;
++ kbd->evbit[0] = BIT(EV_KEY);
++ for (i = KEY_ESC; i < KEY_UNKNOWN; i++)
++ set_bit(i, kbd->keybit);
++ for (i = KEY_OK; i < KEY_MAX; i++)
++ set_bit(i, kbd->keybit);
++
++ ret = input_register_device(kbd);
++ if (ret) {
++ input_free_device(kbd);
++ xenbus_dev_fatal(dev, ret, "input_register_device(kbd)");
++ goto error;
++ }
++ info->kbd = kbd;
++
++ /* pointing device */
++ ptr = input_allocate_device();
++ if (!ptr)
++ goto error_nomem;
++ ptr->name = "Xen Virtual Pointer";
++ ptr->phys = info->phys;
++ ptr->id.bustype = BUS_PCI;
++ ptr->id.vendor = 0x5853;
++ ptr->id.product = 0xfffe;
++ ptr->evbit[0] = BIT(EV_KEY) | BIT(EV_REL) | BIT(EV_ABS);
++ for (i = BTN_LEFT; i <= BTN_TASK; i++)
++ set_bit(i, ptr->keybit);
++ ptr->relbit[0] = BIT(REL_X) | BIT(REL_Y) | BIT(REL_WHEEL);
++ input_set_abs_params(ptr, ABS_X, 0, XENFB_WIDTH, 0, 0);
++ input_set_abs_params(ptr, ABS_Y, 0, XENFB_HEIGHT, 0, 0);
++
++ ret = input_register_device(ptr);
++ if (ret) {
++ input_free_device(ptr);
++ xenbus_dev_fatal(dev, ret, "input_register_device(ptr)");
++ goto error;
++ }
++ info->ptr = ptr;
++
++ ret = xenkbd_connect_backend(dev, info);
++ if (ret < 0)
++ goto error;
++
++ return 0;
++
++ error_nomem:
++ ret = -ENOMEM;
++ xenbus_dev_fatal(dev, ret, "allocating device memory");
++ error:
++ xenkbd_remove(dev);
++ return ret;
++}
++
++static int xenkbd_resume(struct xenbus_device *dev)
++{
++ struct xenkbd_info *info = dev->dev.driver_data;
++
++ xenkbd_disconnect_backend(info);
++ info->page->in_cons = info->page->in_prod = 0;
++ info->page->out_cons = info->page->out_prod = 0;
++ return xenkbd_connect_backend(dev, info);
++}
++
++static int xenkbd_remove(struct xenbus_device *dev)
++{
++ struct xenkbd_info *info = dev->dev.driver_data;
++
++ xenkbd_disconnect_backend(info);
++ input_unregister_device(info->kbd);
++ input_unregister_device(info->ptr);
++ free_page((unsigned long)info->page);
++ kfree(info);
++ return 0;
++}
++
++static int xenkbd_connect_backend(struct xenbus_device *dev,
++ struct xenkbd_info *info)
++{
++ int ret;
++ struct xenbus_transaction xbt;
++
++ ret = bind_listening_port_to_irqhandler(
++ dev->otherend_id, input_handler, 0, "xenkbd", info);
++ if (ret < 0) {
++ xenbus_dev_fatal(dev, ret,
++ "bind_listening_port_to_irqhandler");
++ return ret;
++ }
++ info->irq = ret;
++
++ again:
++ ret = xenbus_transaction_start(&xbt);
++ if (ret) {
++ xenbus_dev_fatal(dev, ret, "starting transaction");
++ return ret;
++ }
++ ret = xenbus_printf(xbt, dev->nodename, "page-ref", "%lu",
++ virt_to_mfn(info->page));
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++ irq_to_evtchn_port(info->irq));
++ if (ret)
++ goto error_xenbus;
++ ret = xenbus_transaction_end(xbt, 0);
++ if (ret) {
++ if (ret == -EAGAIN)
++ goto again;
++ xenbus_dev_fatal(dev, ret, "completing transaction");
++ return ret;
++ }
++
++ xenbus_switch_state(dev, XenbusStateInitialised);
++ return 0;
++
++ error_xenbus:
++ xenbus_transaction_end(xbt, 1);
++ xenbus_dev_fatal(dev, ret, "writing xenstore");
++ return ret;
++}
++
++static void xenkbd_disconnect_backend(struct xenkbd_info *info)
++{
++ if (info->irq >= 0)
++ unbind_from_irqhandler(info->irq, info);
++ info->irq = -1;
++}
++
++static void xenkbd_backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct xenkbd_info *info = dev->dev.driver_data;
++ int ret, val;
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitialised:
++ case XenbusStateReconfiguring:
++ case XenbusStateReconfigured:
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ break;
++
++ case XenbusStateInitWait:
++ InitWait:
++ ret = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
++ "feature-abs-pointer", "%d", &val);
++ if (ret < 0)
++ val = 0;
++ if (val) {
++ ret = xenbus_printf(XBT_NIL, info->xbdev->nodename,
++ "request-abs-pointer", "1");
++ if (ret)
++ ; /* FIXME */
++ }
++ xenbus_switch_state(dev, XenbusStateConnected);
++ break;
++
++ case XenbusStateConnected:
++ /*
++ * Work around xenbus race condition: If backend goes
++ * through InitWait to Connected fast enough, we can
++ * get Connected twice here.
++ */
++ if (dev->state != XenbusStateConnected)
++ goto InitWait; /* no InitWait seen yet, fudge it */
++
++ /* Set input abs params to match backend screen res */
++ if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
++ "width", "%d", &val) > 0 )
++ input_set_abs_params(info->ptr, ABS_X, 0, val, 0, 0);
++
++ if (xenbus_scanf(XBT_NIL, info->xbdev->otherend,
++ "height", "%d", &val) > 0 )
++ input_set_abs_params(info->ptr, ABS_Y, 0, val, 0, 0);
++
++ break;
++
++ case XenbusStateClosing:
++ xenbus_frontend_closed(dev);
++ break;
++ }
++}
++
++static const struct xenbus_device_id xenkbd_ids[] = {
++ { "vkbd" },
++ { "" }
++};
++MODULE_ALIAS("xen:vkbd");
++
++static struct xenbus_driver xenkbd_driver = {
++ .name = "vkbd",
++ .owner = THIS_MODULE,
++ .ids = xenkbd_ids,
++ .probe = xenkbd_probe,
++ .remove = xenkbd_remove,
++ .resume = xenkbd_resume,
++ .otherend_changed = xenkbd_backend_changed,
++};
++
++static int __init xenkbd_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ /* Nothing to do if running in dom0. */
++ if (is_initial_xendomain())
++ return -ENODEV;
++
++ return xenbus_register_frontend(&xenkbd_driver);
++}
++
++static void __exit xenkbd_cleanup(void)
++{
++ return xenbus_unregister_driver(&xenkbd_driver);
++}
++
++module_init(xenkbd_init);
++module_exit(xenkbd_cleanup);
++
++MODULE_LICENSE("GPL");
+Index: head-2008-11-25/drivers/xen/gntdev/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/gntdev/Makefile 2008-01-07 13:19:18.000000000 +0100
+@@ -0,0 +1 @@
++obj-$(CONFIG_XEN_GRANT_DEV) := gntdev.o
+Index: head-2008-11-25/drivers/xen/gntdev/gntdev.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/gntdev/gntdev.c 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,1074 @@
++/******************************************************************************
++ * gntdev.c
++ *
++ * Device for accessing (in user-space) pages that have been granted by other
++ * domains.
++ *
++ * Copyright (c) 2006-2007, D G Murray.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ */
++
++#include <asm/atomic.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/init.h>
++#include <linux/fs.h>
++#include <linux/device.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <asm/uaccess.h>
++#include <asm/io.h>
++#include <xen/gnttab.h>
++#include <asm/hypervisor.h>
++#include <xen/balloon.h>
++#include <xen/evtchn.h>
++#include <xen/driver_util.h>
++
++#include <linux/types.h>
++#include <xen/public/gntdev.h>
++
++
++#define DRIVER_AUTHOR "Derek G. Murray <Derek.Murray@cl.cam.ac.uk>"
++#define DRIVER_DESC "User-space granted page access driver"
++
++MODULE_LICENSE("GPL");
++MODULE_AUTHOR(DRIVER_AUTHOR);
++MODULE_DESCRIPTION(DRIVER_DESC);
++
++#define MAX_GRANTS_LIMIT 1024
++#define DEFAULT_MAX_GRANTS 128
++
++/* A slot can be in one of three states:
++ *
++ * 0. GNTDEV_SLOT_INVALID:
++ * This slot is not associated with a grant reference, and is therefore free
++ * to be overwritten by a new grant reference.
++ *
++ * 1. GNTDEV_SLOT_NOT_YET_MAPPED:
++ * This slot is associated with a grant reference (via the
++ * IOCTL_GNTDEV_MAP_GRANT_REF ioctl), but it has not yet been mmap()-ed.
++ *
++ * 2. GNTDEV_SLOT_MAPPED:
++ * This slot is associated with a grant reference, and has been mmap()-ed.
++ */
++typedef enum gntdev_slot_state {
++ GNTDEV_SLOT_INVALID = 0,
++ GNTDEV_SLOT_NOT_YET_MAPPED,
++ GNTDEV_SLOT_MAPPED
++} gntdev_slot_state_t;
++
++#define GNTDEV_INVALID_HANDLE -1
++#define GNTDEV_FREE_LIST_INVALID -1
++/* Each opened instance of gntdev is associated with a list of grants,
++ * represented by an array of elements of the following type,
++ * gntdev_grant_info_t.
++ */
++typedef struct gntdev_grant_info {
++ gntdev_slot_state_t state;
++ union {
++ uint32_t free_list_index;
++ struct {
++ domid_t domid;
++ grant_ref_t ref;
++ grant_handle_t kernel_handle;
++ grant_handle_t user_handle;
++ uint64_t dev_bus_addr;
++ } valid;
++ } u;
++} gntdev_grant_info_t;
++
++/* Private data structure, which is stored in the file pointer for files
++ * associated with this device.
++ */
++typedef struct gntdev_file_private_data {
++
++ /* Array of grant information. */
++ gntdev_grant_info_t *grants;
++ uint32_t grants_size;
++
++ /* Read/write semaphore used to protect the grants array. */
++ struct rw_semaphore grants_sem;
++
++ /* An array of indices of free slots in the grants array.
++ * N.B. An entry in this list may temporarily have the value
++ * GNTDEV_FREE_LIST_INVALID if the corresponding slot has been removed
++ * from the list by the contiguous allocator, but the list has not yet
++ * been compressed. However, this is not visible across invocations of
++ * the device.
++ */
++ int32_t *free_list;
++
++ /* The number of free slots in the grants array. */
++ uint32_t free_list_size;
++
++ /* Read/write semaphore used to protect the free list. */
++ struct rw_semaphore free_list_sem;
++
++ /* Index of the next slot after the most recent contiguous allocation,
++ * for use in a next-fit allocator.
++ */
++ uint32_t next_fit_index;
++
++ /* Used to map grants into the kernel, before mapping them into user
++ * space.
++ */
++ struct page **foreign_pages;
++
++} gntdev_file_private_data_t;
++
++/* Module lifecycle operations. */
++static int __init gntdev_init(void);
++static void __exit gntdev_exit(void);
++
++module_init(gntdev_init);
++module_exit(gntdev_exit);
++
++/* File operations. */
++static int gntdev_open(struct inode *inode, struct file *flip);
++static int gntdev_release(struct inode *inode, struct file *flip);
++static int gntdev_mmap(struct file *flip, struct vm_area_struct *vma);
++static long gntdev_ioctl(struct file *flip,
++ unsigned int cmd, unsigned long arg);
++
++static const struct file_operations gntdev_fops = {
++ .owner = THIS_MODULE,
++ .open = gntdev_open,
++ .release = gntdev_release,
++ .mmap = gntdev_mmap,
++ .unlocked_ioctl = gntdev_ioctl
++};
++
++/* VM operations. */
++static void gntdev_vma_close(struct vm_area_struct *vma);
++static pte_t gntdev_clear_pte(struct vm_area_struct *vma, unsigned long addr,
++ pte_t *ptep, int is_fullmm);
++
++static struct vm_operations_struct gntdev_vmops = {
++ .close = gntdev_vma_close,
++ .zap_pte = gntdev_clear_pte
++};
++
++/* Global variables. */
++
++/* The driver major number, for use when unregistering the driver. */
++static int gntdev_major;
++
++#define GNTDEV_NAME "gntdev"
++
++/* Memory mapping functions
++ * ------------------------
++ *
++ * Every granted page is mapped into both kernel and user space, and the two
++ * following functions return the respective virtual addresses of these pages.
++ *
++ * When shadow paging is disabled, the granted page is mapped directly into
++ * user space; when it is enabled, it is mapped into the kernel and remapped
++ * into user space using vm_insert_page() (see gntdev_mmap(), below).
++ */
++
++/* Returns the virtual address (in user space) of the @page_index'th page
++ * in the given VM area.
++ */
++static inline unsigned long get_user_vaddr (struct vm_area_struct *vma,
++ int page_index)
++{
++ return (unsigned long) vma->vm_start + (page_index << PAGE_SHIFT);
++}
++
++/* Returns the virtual address (in kernel space) of the @slot_index'th page
++ * mapped by the gntdev instance that owns the given private data struct.
++ */
++static inline unsigned long get_kernel_vaddr (gntdev_file_private_data_t *priv,
++ int slot_index)
++{
++ unsigned long pfn;
++ void *kaddr;
++ pfn = page_to_pfn(priv->foreign_pages[slot_index]);
++ kaddr = pfn_to_kaddr(pfn);
++ return (unsigned long) kaddr;
++}
++
++/* Helper functions. */
++
++/* Adds information about a grant reference to the list of grants in the file's
++ * private data structure. Returns non-zero on failure. On success, sets the
++ * value of *offset to the offset that should be mmap()-ed in order to map the
++ * grant reference.
++ */
++static int add_grant_reference(struct file *flip,
++ struct ioctl_gntdev_grant_ref *op,
++ uint64_t *offset)
++{
++ gntdev_file_private_data_t *private_data
++ = (gntdev_file_private_data_t *) flip->private_data;
++
++ uint32_t slot_index;
++
++ if (unlikely(private_data->free_list_size == 0)) {
++ return -ENOMEM;
++ }
++
++ slot_index = private_data->free_list[--private_data->free_list_size];
++ private_data->free_list[private_data->free_list_size]
++ = GNTDEV_FREE_LIST_INVALID;
++
++ /* Copy the grant information into file's private data. */
++ private_data->grants[slot_index].state = GNTDEV_SLOT_NOT_YET_MAPPED;
++ private_data->grants[slot_index].u.valid.domid = op->domid;
++ private_data->grants[slot_index].u.valid.ref = op->ref;
++
++ /* The offset is calculated as the index of the chosen entry in the
++ * file's private data's array of grant information. This is then
++ * shifted to give an offset into the virtual "file address space".
++ */
++ *offset = slot_index << PAGE_SHIFT;
++
++ return 0;
++}
++
++/* Adds the @count grant references to the contiguous range in the slot array
++ * beginning at @first_slot. It is assumed that @first_slot was returned by a
++ * previous invocation of find_contiguous_free_range(), during the same
++ * invocation of the driver.
++ */
++static int add_grant_references(struct file *flip,
++ int count,
++ struct ioctl_gntdev_grant_ref *ops,
++ uint32_t first_slot)
++{
++ gntdev_file_private_data_t *private_data
++ = (gntdev_file_private_data_t *) flip->private_data;
++ int i;
++
++ for (i = 0; i < count; ++i) {
++
++ /* First, mark the slot's entry in the free list as invalid. */
++ int free_list_index =
++ private_data->grants[first_slot+i].u.free_list_index;
++ private_data->free_list[free_list_index] =
++ GNTDEV_FREE_LIST_INVALID;
++
++ /* Now, update the slot. */
++ private_data->grants[first_slot+i].state =
++ GNTDEV_SLOT_NOT_YET_MAPPED;
++ private_data->grants[first_slot+i].u.valid.domid =
++ ops[i].domid;
++ private_data->grants[first_slot+i].u.valid.ref = ops[i].ref;
++ }
++
++ return 0;
++}
++
++/* Scans through the free list for @flip, removing entries that are marked as
++ * GNTDEV_SLOT_INVALID. This will reduce the recorded size of the free list to
++ * the number of valid entries.
++ */
++static void compress_free_list(struct file *flip)
++{
++ gntdev_file_private_data_t *private_data
++ = (gntdev_file_private_data_t *) flip->private_data;
++ int i, j = 0, old_size, slot_index;
++
++ old_size = private_data->free_list_size;
++ for (i = 0; i < old_size; ++i) {
++ if (private_data->free_list[i] != GNTDEV_FREE_LIST_INVALID) {
++ if (i > j) {
++ slot_index = private_data->free_list[i];
++ private_data->free_list[j] = slot_index;
++ private_data->grants[slot_index].u
++ .free_list_index = j;
++ private_data->free_list[i]
++ = GNTDEV_FREE_LIST_INVALID;
++ }
++ ++j;
++ } else {
++ --private_data->free_list_size;
++ }
++ }
++}
++
++/* Searches the grant array in the private data of @flip for a range of
++ * @num_slots contiguous slots in the GNTDEV_SLOT_INVALID state.
++ *
++ * Returns the index of the first slot if a range is found, otherwise -ENOMEM.
++ */
++static int find_contiguous_free_range(struct file *flip,
++ uint32_t num_slots)
++{
++ gntdev_file_private_data_t *private_data
++ = (gntdev_file_private_data_t *) flip->private_data;
++
++ int i;
++ int start_index = private_data->next_fit_index;
++ int range_start = 0, range_length;
++
++ if (private_data->free_list_size < num_slots) {
++ return -ENOMEM;
++ }
++
++ /* First search from the start_index to the end of the array. */
++ range_length = 0;
++ for (i = start_index; i < private_data->grants_size; ++i) {
++ if (private_data->grants[i].state == GNTDEV_SLOT_INVALID) {
++ if (range_length == 0) {
++ range_start = i;
++ }
++ ++range_length;
++ if (range_length == num_slots) {
++ return range_start;
++ }
++ }
++ }
++
++ /* Now search from the start of the array to the start_index. */
++ range_length = 0;
++ for (i = 0; i < start_index; ++i) {
++ if (private_data->grants[i].state == GNTDEV_SLOT_INVALID) {
++ if (range_length == 0) {
++ range_start = i;
++ }
++ ++range_length;
++ if (range_length == num_slots) {
++ return range_start;
++ }
++ }
++ }
++
++ return -ENOMEM;
++}
++
++static int init_private_data(gntdev_file_private_data_t *priv,
++ uint32_t max_grants)
++{
++ int i;
++
++ /* Allocate space for the kernel-mapping of granted pages. */
++ priv->foreign_pages =
++ alloc_empty_pages_and_pagevec(max_grants);
++ if (!priv->foreign_pages)
++ goto nomem_out;
++
++ /* Allocate the grant list and free-list. */
++ priv->grants = kmalloc(max_grants * sizeof(gntdev_grant_info_t),
++ GFP_KERNEL);
++ if (!priv->grants)
++ goto nomem_out2;
++ priv->free_list = kmalloc(max_grants * sizeof(int32_t), GFP_KERNEL);
++ if (!priv->free_list)
++ goto nomem_out3;
++
++ /* Initialise the free-list, which contains all slots at first. */
++ for (i = 0; i < max_grants; ++i) {
++ priv->free_list[max_grants - i - 1] = i;
++ priv->grants[i].state = GNTDEV_SLOT_INVALID;
++ priv->grants[i].u.free_list_index = max_grants - i - 1;
++ }
++ priv->grants_size = max_grants;
++ priv->free_list_size = max_grants;
++ priv->next_fit_index = 0;
++
++ return 0;
++
++nomem_out3:
++ kfree(priv->grants);
++nomem_out2:
++ free_empty_pages_and_pagevec(priv->foreign_pages, max_grants);
++nomem_out:
++ return -ENOMEM;
++
++}
++
++/* Interface functions. */
++
++/* Initialises the driver. Called when the module is loaded. */
++static int __init gntdev_init(void)
++{
++ struct class *class;
++ struct class_device *device;
++
++ if (!is_running_on_xen()) {
++ printk(KERN_ERR "You must be running Xen to use gntdev\n");
++ return -ENODEV;
++ }
++
++ gntdev_major = register_chrdev(0, GNTDEV_NAME, &gntdev_fops);
++ if (gntdev_major < 0)
++ {
++ printk(KERN_ERR "Could not register gntdev device\n");
++ return -ENOMEM;
++ }
++
++ /* Note that if the sysfs code fails, we will still initialise the
++ * device, and output the major number so that the device can be
++ * created manually using mknod.
++ */
++ if ((class = get_xen_class()) == NULL) {
++ printk(KERN_ERR "Error setting up xen_class\n");
++ printk(KERN_ERR "gntdev created with major number = %d\n",
++ gntdev_major);
++ return 0;
++ }
++
++ device = class_device_create(class, NULL, MKDEV(gntdev_major, 0),
++ NULL, GNTDEV_NAME);
++ if (IS_ERR(device)) {
++ printk(KERN_ERR "Error creating gntdev device in xen_class\n");
++ printk(KERN_ERR "gntdev created with major number = %d\n",
++ gntdev_major);
++ return 0;
++ }
++
++ return 0;
++}
++
++/* Cleans up and unregisters the driver. Called when the driver is unloaded.
++ */
++static void __exit gntdev_exit(void)
++{
++ struct class *class;
++ if ((class = get_xen_class()) != NULL)
++ class_device_destroy(class, MKDEV(gntdev_major, 0));
++ unregister_chrdev(gntdev_major, GNTDEV_NAME);
++}
++
++/* Called when the device is opened. */
++static int gntdev_open(struct inode *inode, struct file *flip)
++{
++ gntdev_file_private_data_t *private_data;
++
++ try_module_get(THIS_MODULE);
++
++ /* Allocate space for the per-instance private data. */
++ private_data = kmalloc(sizeof(*private_data), GFP_KERNEL);
++ if (!private_data)
++ goto nomem_out;
++
++ /* These will be lazily initialised by init_private_data. */
++ private_data->grants = NULL;
++ private_data->free_list = NULL;
++ private_data->foreign_pages = NULL;
++
++ init_rwsem(&private_data->grants_sem);
++ init_rwsem(&private_data->free_list_sem);
++
++ flip->private_data = private_data;
++
++ return 0;
++
++nomem_out:
++ return -ENOMEM;
++}
++
++/* Called when the device is closed.
++ */
++static int gntdev_release(struct inode *inode, struct file *flip)
++{
++ if (flip->private_data) {
++ gntdev_file_private_data_t *private_data =
++ (gntdev_file_private_data_t *) flip->private_data;
++ if (private_data->foreign_pages)
++ free_empty_pages_and_pagevec
++ (private_data->foreign_pages,
++ private_data->grants_size);
++ if (private_data->grants)
++ kfree(private_data->grants);
++ if (private_data->free_list)
++ kfree(private_data->free_list);
++ kfree(private_data);
++ }
++ module_put(THIS_MODULE);
++ return 0;
++}
++
++/* Called when an attempt is made to mmap() the device. The private data from
++ * @flip contains the list of grant references that can be mapped. The vm_pgoff
++ * field of @vma contains the index into that list that refers to the grant
++ * reference that will be mapped. Only mappings that are a multiple of
++ * PAGE_SIZE are handled.
++ */
++static int gntdev_mmap (struct file *flip, struct vm_area_struct *vma)
++{
++ struct gnttab_map_grant_ref op;
++ unsigned long slot_index = vma->vm_pgoff;
++ unsigned long kernel_vaddr, user_vaddr;
++ uint32_t size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ uint64_t ptep;
++ int ret;
++ int flags;
++ int i;
++ struct page *page;
++ gntdev_file_private_data_t *private_data = flip->private_data;
++
++ if (unlikely(!private_data)) {
++ printk(KERN_ERR "File's private data is NULL.\n");
++ return -EINVAL;
++ }
++
++ /* Test to make sure that the grants array has been initialised. */
++ down_read(&private_data->grants_sem);
++ if (unlikely(!private_data->grants)) {
++ up_read(&private_data->grants_sem);
++ printk(KERN_ERR "Attempted to mmap before ioctl.\n");
++ return -EINVAL;
++ }
++ up_read(&private_data->grants_sem);
++
++ if (unlikely((size <= 0) ||
++ (size + slot_index) > private_data->grants_size)) {
++ printk(KERN_ERR "Invalid number of pages or offset"
++ "(num_pages = %d, first_slot = %ld).\n",
++ size, slot_index);
++ return -ENXIO;
++ }
++
++ if ((vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_SHARED)) {
++ printk(KERN_ERR "Writable mappings must be shared.\n");
++ return -EINVAL;
++ }
++
++ /* Slots must be in the NOT_YET_MAPPED state. */
++ down_write(&private_data->grants_sem);
++ for (i = 0; i < size; ++i) {
++ if (private_data->grants[slot_index + i].state !=
++ GNTDEV_SLOT_NOT_YET_MAPPED) {
++ printk(KERN_ERR "Slot (index = %ld) is in the wrong "
++ "state (%d).\n", slot_index + i,
++ private_data->grants[slot_index + i].state);
++ up_write(&private_data->grants_sem);
++ return -EINVAL;
++ }
++ }
++
++ /* Install the hook for unmapping. */
++ vma->vm_ops = &gntdev_vmops;
++
++ /* The VM area contains pages from another VM. */
++ vma->vm_flags |= VM_FOREIGN;
++ vma->vm_private_data = kzalloc(size * sizeof(struct page *),
++ GFP_KERNEL);
++ if (vma->vm_private_data == NULL) {
++ printk(KERN_ERR "Couldn't allocate mapping structure for VM "
++ "area.\n");
++ return -ENOMEM;
++ }
++
++ /* This flag prevents Bad PTE errors when the memory is unmapped. */
++ vma->vm_flags |= VM_RESERVED;
++
++ /* This flag prevents this VM area being copied on a fork(). A better
++ * behaviour might be to explicitly carry out the appropriate mappings
++ * on fork(), but I don't know if there's a hook for this.
++ */
++ vma->vm_flags |= VM_DONTCOPY;
++
++#ifdef CONFIG_X86
++ /* This flag ensures that the page tables are not unpinned before the
++ * VM area is unmapped. Therefore Xen still recognises the PTE as
++ * belonging to an L1 pagetable, and the grant unmap operation will
++ * succeed, even if the process does not exit cleanly.
++ */
++ vma->vm_mm->context.has_foreign_mappings = 1;
++#endif
++
++ for (i = 0; i < size; ++i) {
++
++ flags = GNTMAP_host_map;
++ if (!(vma->vm_flags & VM_WRITE))
++ flags |= GNTMAP_readonly;
++
++ kernel_vaddr = get_kernel_vaddr(private_data, slot_index + i);
++ user_vaddr = get_user_vaddr(vma, i);
++ page = pfn_to_page(__pa(kernel_vaddr) >> PAGE_SHIFT);
++
++ gnttab_set_map_op(&op, kernel_vaddr, flags,
++ private_data->grants[slot_index+i]
++ .u.valid.ref,
++ private_data->grants[slot_index+i]
++ .u.valid.domid);
++
++ /* Carry out the mapping of the grant reference. */
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++ &op, 1);
++ BUG_ON(ret);
++ if (op.status) {
++ printk(KERN_ERR "Error mapping the grant reference "
++ "into the kernel (%d). domid = %d; ref = %d\n",
++ op.status,
++ private_data->grants[slot_index+i]
++ .u.valid.domid,
++ private_data->grants[slot_index+i]
++ .u.valid.ref);
++ goto undo_map_out;
++ }
++
++ /* Store a reference to the page that will be mapped into user
++ * space.
++ */
++ ((struct page **) vma->vm_private_data)[i] = page;
++
++ /* Mark mapped page as reserved. */
++ SetPageReserved(page);
++
++ /* Record the grant handle, for use in the unmap operation. */
++ private_data->grants[slot_index+i].u.valid.kernel_handle =
++ op.handle;
++ private_data->grants[slot_index+i].u.valid.dev_bus_addr =
++ op.dev_bus_addr;
++
++ private_data->grants[slot_index+i].state = GNTDEV_SLOT_MAPPED;
++ private_data->grants[slot_index+i].u.valid.user_handle =
++ GNTDEV_INVALID_HANDLE;
++
++ /* Now perform the mapping to user space. */
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++
++ /* NOT USING SHADOW PAGE TABLES. */
++ /* In this case, we map the grant(s) straight into user
++ * space.
++ */
++
++ /* Get the machine address of the PTE for the user
++ * page.
++ */
++ if ((ret = create_lookup_pte_addr(vma->vm_mm,
++ vma->vm_start
++ + (i << PAGE_SHIFT),
++ &ptep)))
++ {
++ printk(KERN_ERR "Error obtaining PTE pointer "
++ "(%d).\n", ret);
++ goto undo_map_out;
++ }
++
++ /* Configure the map operation. */
++
++ /* The reference is to be used by host CPUs. */
++ flags = GNTMAP_host_map;
++
++ /* Specifies a user space mapping. */
++ flags |= GNTMAP_application_map;
++
++ /* The map request contains the machine address of the
++ * PTE to update.
++ */
++ flags |= GNTMAP_contains_pte;
++
++ if (!(vma->vm_flags & VM_WRITE))
++ flags |= GNTMAP_readonly;
++
++ gnttab_set_map_op(&op, ptep, flags,
++ private_data->grants[slot_index+i]
++ .u.valid.ref,
++ private_data->grants[slot_index+i]
++ .u.valid.domid);
++
++ /* Carry out the mapping of the grant reference. */
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++ &op, 1);
++ BUG_ON(ret);
++ if (op.status) {
++ printk(KERN_ERR "Error mapping the grant "
++ "reference into user space (%d). domid "
++ "= %d; ref = %d\n", op.status,
++ private_data->grants[slot_index+i].u
++ .valid.domid,
++ private_data->grants[slot_index+i].u
++ .valid.ref);
++ goto undo_map_out;
++ }
++
++ /* Record the grant handle, for use in the unmap
++ * operation.
++ */
++ private_data->grants[slot_index+i].u.
++ valid.user_handle = op.handle;
++
++ /* Update p2m structure with the new mapping. */
++ set_phys_to_machine(__pa(kernel_vaddr) >> PAGE_SHIFT,
++ FOREIGN_FRAME(private_data->
++ grants[slot_index+i]
++ .u.valid.dev_bus_addr
++ >> PAGE_SHIFT));
++ } else {
++ /* USING SHADOW PAGE TABLES. */
++ /* In this case, we simply insert the page into the VM
++ * area. */
++ ret = vm_insert_page(vma, user_vaddr, page);
++ }
++
++ }
++
++ up_write(&private_data->grants_sem);
++ return 0;
++
++undo_map_out:
++ /* If we have a mapping failure, the unmapping will be taken care of
++ * by do_mmap_pgoff(), which will eventually call gntdev_clear_pte().
++ * All we need to do here is free the vma_private_data.
++ */
++ kfree(vma->vm_private_data);
++
++ /* THIS IS VERY UNPLEASANT: do_mmap_pgoff() will set the vma->vm_file
++ * to NULL on failure. However, we need this in gntdev_clear_pte() to
++ * unmap the grants. Therefore, we smuggle a reference to the file's
++ * private data in the VM area's private data pointer.
++ */
++ vma->vm_private_data = private_data;
++
++ up_write(&private_data->grants_sem);
++
++ return -ENOMEM;
++}
++
++static pte_t gntdev_clear_pte(struct vm_area_struct *vma, unsigned long addr,
++ pte_t *ptep, int is_fullmm)
++{
++ int slot_index, ret;
++ pte_t copy;
++ struct gnttab_unmap_grant_ref op;
++ gntdev_file_private_data_t *private_data;
++
++ /* THIS IS VERY UNPLEASANT: do_mmap_pgoff() will set the vma->vm_file
++ * to NULL on failure. However, we need this in gntdev_clear_pte() to
++ * unmap the grants. Therefore, we smuggle a reference to the file's
++ * private data in the VM area's private data pointer.
++ */
++ if (vma->vm_file) {
++ private_data = (gntdev_file_private_data_t *)
++ vma->vm_file->private_data;
++ } else if (vma->vm_private_data) {
++ private_data = (gntdev_file_private_data_t *)
++ vma->vm_private_data;
++ } else {
++ private_data = NULL; /* gcc warning */
++ BUG();
++ }
++
++ /* Copy the existing value of the PTE for returning. */
++ copy = *ptep;
++
++ /* Calculate the grant relating to this PTE. */
++ slot_index = vma->vm_pgoff + ((addr - vma->vm_start) >> PAGE_SHIFT);
++
++ /* Only unmap grants if the slot has been mapped. This could be being
++ * called from a failing mmap().
++ */
++ if (private_data->grants[slot_index].state == GNTDEV_SLOT_MAPPED) {
++
++ /* First, we clear the user space mapping, if it has been made.
++ */
++ if (private_data->grants[slot_index].u.valid.user_handle !=
++ GNTDEV_INVALID_HANDLE &&
++ !xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* NOT USING SHADOW PAGE TABLES. */
++ gnttab_set_unmap_op(&op, virt_to_machine(ptep),
++ GNTMAP_contains_pte,
++ private_data->grants[slot_index]
++ .u.valid.user_handle);
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, &op, 1);
++ BUG_ON(ret);
++ if (op.status)
++ printk("User unmap grant status = %d\n",
++ op.status);
++ } else {
++ /* USING SHADOW PAGE TABLES. */
++ pte_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
++ }
++
++ /* Finally, we unmap the grant from kernel space. */
++ gnttab_set_unmap_op(&op,
++ get_kernel_vaddr(private_data, slot_index),
++ GNTMAP_host_map,
++ private_data->grants[slot_index].u.valid
++ .kernel_handle);
++ ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref,
++ &op, 1);
++ BUG_ON(ret);
++ if (op.status)
++ printk("Kernel unmap grant status = %d\n", op.status);
++
++
++ /* Return slot to the not-yet-mapped state, so that it may be
++ * mapped again, or removed by a subsequent ioctl.
++ */
++ private_data->grants[slot_index].state =
++ GNTDEV_SLOT_NOT_YET_MAPPED;
++
++ /* Invalidate the physical to machine mapping for this page. */
++ set_phys_to_machine(__pa(get_kernel_vaddr(private_data,
++ slot_index))
++ >> PAGE_SHIFT, INVALID_P2M_ENTRY);
++
++ } else {
++ pte_clear_full(vma->vm_mm, addr, ptep, is_fullmm);
++ }
++
++ return copy;
++}
++
++/* "Destructor" for a VM area.
++ */
++static void gntdev_vma_close(struct vm_area_struct *vma) {
++ if (vma->vm_private_data) {
++ kfree(vma->vm_private_data);
++ }
++}
++
++/* Called when an ioctl is made on the device.
++ */
++static long gntdev_ioctl(struct file *flip,
++ unsigned int cmd, unsigned long arg)
++{
++ int rc = 0;
++ gntdev_file_private_data_t *private_data =
++ (gntdev_file_private_data_t *) flip->private_data;
++
++ /* On the first invocation, we will lazily initialise the grant array
++ * and free-list.
++ */
++ if (unlikely(!private_data->grants)
++ && likely(cmd != IOCTL_GNTDEV_SET_MAX_GRANTS)) {
++ down_write(&private_data->grants_sem);
++
++ if (unlikely(private_data->grants)) {
++ up_write(&private_data->grants_sem);
++ goto private_data_initialised;
++ }
++
++ /* Just use the default. Setting to a non-default is handled
++ * in the ioctl switch.
++ */
++ rc = init_private_data(private_data, DEFAULT_MAX_GRANTS);
++
++ up_write(&private_data->grants_sem);
++
++ if (rc) {
++ printk (KERN_ERR "Initialising gntdev private data "
++ "failed.\n");
++ return rc;
++ }
++ }
++
++private_data_initialised:
++ switch (cmd) {
++ case IOCTL_GNTDEV_MAP_GRANT_REF:
++ {
++ struct ioctl_gntdev_map_grant_ref op;
++ down_write(&private_data->grants_sem);
++ down_write(&private_data->free_list_sem);
++
++ if ((rc = copy_from_user(&op, (void __user *) arg,
++ sizeof(op)))) {
++ rc = -EFAULT;
++ goto map_out;
++ }
++ if (unlikely(op.count <= 0)) {
++ rc = -EINVAL;
++ goto map_out;
++ }
++
++ if (op.count == 1) {
++ if ((rc = add_grant_reference(flip, &op.refs[0],
++ &op.index)) < 0) {
++ printk(KERN_ERR "Adding grant reference "
++ "failed (%d).\n", rc);
++ goto map_out;
++ }
++ } else {
++ struct ioctl_gntdev_grant_ref *refs, *u;
++ refs = kmalloc(op.count * sizeof(*refs), GFP_KERNEL);
++ if (!refs) {
++ rc = -ENOMEM;
++ goto map_out;
++ }
++ u = ((struct ioctl_gntdev_map_grant_ref *)arg)->refs;
++ if ((rc = copy_from_user(refs,
++ (void __user *)u,
++ sizeof(*refs) * op.count))) {
++ printk(KERN_ERR "Copying refs from user failed"
++ " (%d).\n", rc);
++ rc = -EINVAL;
++ goto map_out;
++ }
++ if ((rc = find_contiguous_free_range(flip, op.count))
++ < 0) {
++ printk(KERN_ERR "Finding contiguous range "
++ "failed (%d).\n", rc);
++ kfree(refs);
++ goto map_out;
++ }
++ op.index = rc << PAGE_SHIFT;
++ if ((rc = add_grant_references(flip, op.count,
++ refs, rc))) {
++ printk(KERN_ERR "Adding grant references "
++ "failed (%d).\n", rc);
++ kfree(refs);
++ goto map_out;
++ }
++ compress_free_list(flip);
++ kfree(refs);
++ }
++ if ((rc = copy_to_user((void __user *) arg,
++ &op,
++ sizeof(op)))) {
++ printk(KERN_ERR "Copying result back to user failed "
++ "(%d)\n", rc);
++ rc = -EFAULT;
++ goto map_out;
++ }
++ map_out:
++ up_write(&private_data->grants_sem);
++ up_write(&private_data->free_list_sem);
++ return rc;
++ }
++ case IOCTL_GNTDEV_UNMAP_GRANT_REF:
++ {
++ struct ioctl_gntdev_unmap_grant_ref op;
++ int i, start_index;
++
++ down_write(&private_data->grants_sem);
++ down_write(&private_data->free_list_sem);
++
++ if ((rc = copy_from_user(&op,
++ (void __user *) arg,
++ sizeof(op)))) {
++ rc = -EFAULT;
++ goto unmap_out;
++ }
++
++ start_index = op.index >> PAGE_SHIFT;
++
++ /* First, check that all pages are in the NOT_YET_MAPPED
++ * state.
++ */
++ for (i = 0; i < op.count; ++i) {
++ if (unlikely
++ (private_data->grants[start_index + i].state
++ != GNTDEV_SLOT_NOT_YET_MAPPED)) {
++ if (private_data->grants[start_index + i].state
++ == GNTDEV_SLOT_INVALID) {
++ printk(KERN_ERR
++ "Tried to remove an invalid "
++ "grant at offset 0x%x.",
++ (start_index + i)
++ << PAGE_SHIFT);
++ rc = -EINVAL;
++ } else {
++ printk(KERN_ERR
++ "Tried to remove a grant which "
++ "is currently mmap()-ed at "
++ "offset 0x%x.",
++ (start_index + i)
++ << PAGE_SHIFT);
++ rc = -EBUSY;
++ }
++ goto unmap_out;
++ }
++ }
++
++ /* Unmap pages and add them to the free list.
++ */
++ for (i = 0; i < op.count; ++i) {
++ private_data->grants[start_index+i].state =
++ GNTDEV_SLOT_INVALID;
++ private_data->grants[start_index+i].u.free_list_index =
++ private_data->free_list_size;
++ private_data->free_list[private_data->free_list_size] =
++ start_index + i;
++ ++private_data->free_list_size;
++ }
++
++ unmap_out:
++ up_write(&private_data->grants_sem);
++ up_write(&private_data->free_list_sem);
++ return rc;
++ }
++ case IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR:
++ {
++ struct ioctl_gntdev_get_offset_for_vaddr op;
++ struct vm_area_struct *vma;
++ unsigned long vaddr;
++
++ if ((rc = copy_from_user(&op,
++ (void __user *) arg,
++ sizeof(op)))) {
++ rc = -EFAULT;
++ goto get_offset_out;
++ }
++ vaddr = (unsigned long)op.vaddr;
++
++ down_read(¤t->mm->mmap_sem);
++ vma = find_vma(current->mm, vaddr);
++ if (vma == NULL) {
++ rc = -EFAULT;
++ goto get_offset_unlock_out;
++ }
++ if ((!vma->vm_ops) || (vma->vm_ops != &gntdev_vmops)) {
++ printk(KERN_ERR "The vaddr specified does not belong "
++ "to a gntdev instance: %#lx\n", vaddr);
++ rc = -EFAULT;
++ goto get_offset_unlock_out;
++ }
++ if (vma->vm_start != vaddr) {
++ printk(KERN_ERR "The vaddr specified in an "
++ "IOCTL_GNTDEV_GET_OFFSET_FOR_VADDR must be at "
++ "the start of the VM area. vma->vm_start = "
++ "%#lx; vaddr = %#lx\n",
++ vma->vm_start, vaddr);
++ rc = -EFAULT;
++ goto get_offset_unlock_out;
++ }
++ op.offset = vma->vm_pgoff << PAGE_SHIFT;
++ op.count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
++ up_read(¤t->mm->mmap_sem);
++ if ((rc = copy_to_user((void __user *) arg,
++ &op,
++ sizeof(op)))) {
++ rc = -EFAULT;
++ goto get_offset_out;
++ }
++ goto get_offset_out;
++ get_offset_unlock_out:
++ up_read(¤t->mm->mmap_sem);
++ get_offset_out:
++ return rc;
++ }
++ case IOCTL_GNTDEV_SET_MAX_GRANTS:
++ {
++ struct ioctl_gntdev_set_max_grants op;
++ if ((rc = copy_from_user(&op,
++ (void __user *) arg,
++ sizeof(op)))) {
++ rc = -EFAULT;
++ goto set_max_out;
++ }
++ down_write(&private_data->grants_sem);
++ if (private_data->grants) {
++ rc = -EBUSY;
++ goto set_max_unlock_out;
++ }
++ if (op.count > MAX_GRANTS_LIMIT) {
++ rc = -EINVAL;
++ goto set_max_unlock_out;
++ }
++ rc = init_private_data(private_data, op.count);
++ set_max_unlock_out:
++ up_write(&private_data->grants_sem);
++ set_max_out:
++ return rc;
++ }
++ default:
++ return -ENOIOCTLCMD;
++ }
++
++ return 0;
++}
+Index: head-2008-11-25/drivers/xen/netback/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/netback/Makefile 2007-07-12 08:54:23.000000000 +0200
+@@ -0,0 +1,5 @@
++obj-$(CONFIG_XEN_NETDEV_BACKEND) := netbk.o
++obj-$(CONFIG_XEN_NETDEV_LOOPBACK) += netloop.o
++
++netbk-y := netback.o xenbus.o interface.o accel.o
++netloop-y := loopback.o
+Index: head-2008-11-25/drivers/xen/netback/accel.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/netback/accel.c 2008-01-07 13:19:18.000000000 +0100
+@@ -0,0 +1,269 @@
++/******************************************************************************
++ * drivers/xen/netback/accel.c
++ *
++ * Interface between backend virtual network device and accelerated plugin.
++ *
++ * Copyright (C) 2007 Solarflare Communications, Inc
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/list.h>
++#include <asm/atomic.h>
++#include <xen/xenbus.h>
++#include <linux/mutex.h>
++
++#include "common.h"
++
++#if 0
++#undef DPRINTK
++#define DPRINTK(fmt, args...) \
++ printk("netback/accel (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
++#endif
++
++/*
++ * A list of available netback accelerator plugin modules (each list
++ * entry is of type struct netback_accelerator)
++ */
++static struct list_head accelerators_list;
++/* Lock used to protect access to accelerators_list */
++DEFINE_MUTEX(accelerators_mutex);
++
++/*
++ * Compare a backend to an accelerator, and decide if they are
++ * compatible (i.e. if the accelerator should be used by the
++ * backend)
++ */
++static int match_accelerator(struct xenbus_device *xendev,
++ struct backend_info *be,
++ struct netback_accelerator *accelerator)
++{
++ int rc = 0;
++ char *eth_name = xenbus_read(XBT_NIL, xendev->nodename, "accel", NULL);
++
++ if (IS_ERR(eth_name)) {
++ /* Probably means not present */
++ DPRINTK("%s: no match due to xenbus_read accel error %d\n",
++ __FUNCTION__, PTR_ERR(eth_name));
++ return 0;
++ } else {
++ if (!strcmp(eth_name, accelerator->eth_name))
++ rc = 1;
++ kfree(eth_name);
++ return rc;
++ }
++}
++
++
++static void do_probe(struct backend_info *be,
++ struct netback_accelerator *accelerator,
++ struct xenbus_device *xendev)
++{
++ be->accelerator = accelerator;
++ atomic_inc(&be->accelerator->use_count);
++ if (be->accelerator->hooks->probe(xendev) != 0) {
++ atomic_dec(&be->accelerator->use_count);
++ module_put(be->accelerator->hooks->owner);
++ be->accelerator = NULL;
++ }
++}
++
++
++/*
++ * Notify suitable backends that a new accelerator is available and
++ * connected. This will also notify the accelerator plugin module
++ * that it is being used for a device through the probe hook.
++ */
++static int netback_accelerator_probe_backend(struct device *dev, void *arg)
++{
++ struct netback_accelerator *accelerator =
++ (struct netback_accelerator *)arg;
++ struct xenbus_device *xendev = to_xenbus_device(dev);
++
++ if (!strcmp("vif", xendev->devicetype)) {
++ struct backend_info *be = xendev->dev.driver_data;
++
++ if (match_accelerator(xendev, be, accelerator) &&
++ try_module_get(accelerator->hooks->owner)) {
++ do_probe(be, accelerator, xendev);
++ }
++ }
++ return 0;
++}
++
++
++/*
++ * Notify suitable backends that an accelerator is unavailable.
++ */
++static int netback_accelerator_remove_backend(struct device *dev, void *arg)
++{
++ struct xenbus_device *xendev = to_xenbus_device(dev);
++ struct netback_accelerator *accelerator =
++ (struct netback_accelerator *)arg;
++
++ if (!strcmp("vif", xendev->devicetype)) {
++ struct backend_info *be = xendev->dev.driver_data;
++
++ if (be->accelerator == accelerator) {
++ be->accelerator->hooks->remove(xendev);
++ atomic_dec(&be->accelerator->use_count);
++ module_put(be->accelerator->hooks->owner);
++ be->accelerator = NULL;
++ }
++ }
++ return 0;
++}
++
++
++
++/*
++ * Entry point for an netback accelerator plugin module. Called to
++ * advertise its presence, and connect to any suitable backends.
++ */
++int netback_connect_accelerator(unsigned version, int id, const char *eth_name,
++ struct netback_accel_hooks *hooks)
++{
++ struct netback_accelerator *new_accelerator;
++ unsigned eth_name_len;
++
++ if (version != NETBACK_ACCEL_VERSION) {
++ if (version > NETBACK_ACCEL_VERSION) {
++ /* Caller has higher version number, leave it
++ up to them to decide whether to continue.
++ They can recall with a lower number if
++ they're happy to be compatible with us */
++ return NETBACK_ACCEL_VERSION;
++ } else {
++ /* We have a more recent version than caller.
++ Currently reject, but may in future be able
++ to be backwardly compatible */
++ return -EPROTO;
++ }
++ }
++
++ new_accelerator =
++ kmalloc(sizeof(struct netback_accelerator), GFP_KERNEL);
++ if (!new_accelerator) {
++ DPRINTK("%s: failed to allocate memory for accelerator\n",
++ __FUNCTION__);
++ return -ENOMEM;
++ }
++
++ new_accelerator->id = id;
++
++ eth_name_len = strlen(eth_name)+1;
++ new_accelerator->eth_name = kmalloc(eth_name_len, GFP_KERNEL);
++ if (!new_accelerator->eth_name) {
++ DPRINTK("%s: failed to allocate memory for eth_name string\n",
++ __FUNCTION__);
++ kfree(new_accelerator);
++ return -ENOMEM;
++ }
++ strlcpy(new_accelerator->eth_name, eth_name, eth_name_len);
++
++ new_accelerator->hooks = hooks;
++
++ atomic_set(&new_accelerator->use_count, 0);
++
++ mutex_lock(&accelerators_mutex);
++ list_add(&new_accelerator->link, &accelerators_list);
++
++ /* tell existing backends about new plugin */
++ xenbus_for_each_backend(new_accelerator,
++ netback_accelerator_probe_backend);
++
++ mutex_unlock(&accelerators_mutex);
++
++ return 0;
++
++}
++EXPORT_SYMBOL_GPL(netback_connect_accelerator);
++
++
++/*
++ * Disconnect an accelerator plugin module that has previously been
++ * connected.
++ */
++void netback_disconnect_accelerator(int id, const char *eth_name)
++{
++ struct netback_accelerator *accelerator, *next;
++
++ mutex_lock(&accelerators_mutex);
++ list_for_each_entry_safe(accelerator, next, &accelerators_list, link) {
++ if (!strcmp(eth_name, accelerator->eth_name)) {
++ xenbus_for_each_backend
++ (accelerator, netback_accelerator_remove_backend);
++ BUG_ON(atomic_read(&accelerator->use_count) != 0);
++ list_del(&accelerator->link);
++ kfree(accelerator->eth_name);
++ kfree(accelerator);
++ break;
++ }
++ }
++ mutex_unlock(&accelerators_mutex);
++}
++EXPORT_SYMBOL_GPL(netback_disconnect_accelerator);
++
++
++void netback_probe_accelerators(struct backend_info *be,
++ struct xenbus_device *dev)
++{
++ struct netback_accelerator *accelerator;
++
++ /*
++ * Check list of accelerators to see if any is suitable, and
++ * use it if it is.
++ */
++ mutex_lock(&accelerators_mutex);
++ list_for_each_entry(accelerator, &accelerators_list, link) {
++ if (match_accelerator(dev, be, accelerator) &&
++ try_module_get(accelerator->hooks->owner)) {
++ do_probe(be, accelerator, dev);
++ break;
++ }
++ }
++ mutex_unlock(&accelerators_mutex);
++}
++
++
++void netback_remove_accelerators(struct backend_info *be,
++ struct xenbus_device *dev)
++{
++ mutex_lock(&accelerators_mutex);
++ /* Notify the accelerator (if any) of this device's removal */
++ if (be->accelerator != NULL) {
++ be->accelerator->hooks->remove(dev);
++ atomic_dec(&be->accelerator->use_count);
++ module_put(be->accelerator->hooks->owner);
++ be->accelerator = NULL;
++ }
++ mutex_unlock(&accelerators_mutex);
++}
++
++
++void netif_accel_init(void)
++{
++ INIT_LIST_HEAD(&accelerators_list);
++}
+Index: head-2008-11-25/drivers/xen/netback/common.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/netback/common.h 2008-01-07 13:19:18.000000000 +0100
+@@ -0,0 +1,217 @@
++/******************************************************************************
++ * arch/xen/drivers/netif/backend/common.h
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __NETIF__BACKEND__COMMON_H__
++#define __NETIF__BACKEND__COMMON_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/ip.h>
++#include <linux/in.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/wait.h>
++#include <xen/evtchn.h>
++#include <xen/interface/io/netif.h>
++#include <asm/io.h>
++#include <asm/pgalloc.h>
++#include <xen/interface/grant_table.h>
++#include <xen/gnttab.h>
++#include <xen/driver_util.h>
++#include <xen/xenbus.h>
++
++#define DPRINTK(_f, _a...) \
++ pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "xen_net: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "xen_net: " fmt, ##args)
++
++typedef struct netif_st {
++ /* Unique identifier for this interface. */
++ domid_t domid;
++ unsigned int handle;
++
++ u8 fe_dev_addr[6];
++
++ /* Physical parameters of the comms window. */
++ grant_handle_t tx_shmem_handle;
++ grant_ref_t tx_shmem_ref;
++ grant_handle_t rx_shmem_handle;
++ grant_ref_t rx_shmem_ref;
++ unsigned int irq;
++
++ /* The shared rings and indexes. */
++ netif_tx_back_ring_t tx;
++ netif_rx_back_ring_t rx;
++ struct vm_struct *tx_comms_area;
++ struct vm_struct *rx_comms_area;
++
++ /* Set of features that can be turned on in dev->features. */
++ int features;
++
++ /* Internal feature information. */
++ u8 can_queue:1; /* can queue packets for receiver? */
++ u8 copying_receiver:1; /* copy packets to receiver? */
++
++ /* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
++ RING_IDX rx_req_cons_peek;
++
++ /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
++ unsigned long credit_bytes;
++ unsigned long credit_usec;
++ unsigned long remaining_credit;
++ struct timer_list credit_timeout;
++
++ /* Enforce draining of the transmit queue. */
++ struct timer_list tx_queue_timeout;
++
++ /* Miscellaneous private stuff. */
++ struct list_head list; /* scheduling list */
++ atomic_t refcnt;
++ struct net_device *dev;
++ struct net_device_stats stats;
++
++ unsigned int carrier;
++
++ wait_queue_head_t waiting_to_free;
++} netif_t;
++
++/*
++ * Implement our own carrier flag: the network stack's version causes delays
++ * when the carrier is re-enabled (in particular, dev_activate() may not
++ * immediately be called, which can cause packet loss; also the etherbridge
++ * can be rather lazy in activating its port).
++ */
++#define netback_carrier_on(netif) ((netif)->carrier = 1)
++#define netback_carrier_off(netif) ((netif)->carrier = 0)
++#define netback_carrier_ok(netif) ((netif)->carrier)
++
++enum {
++ NETBK_DONT_COPY_SKB,
++ NETBK_DELAYED_COPY_SKB,
++ NETBK_ALWAYS_COPY_SKB,
++};
++
++extern int netbk_copy_skb_mode;
++
++/* Function pointers into netback accelerator plugin modules */
++struct netback_accel_hooks {
++ struct module *owner;
++ int (*probe)(struct xenbus_device *dev);
++ int (*remove)(struct xenbus_device *dev);
++};
++
++/* Structure to track the state of a netback accelerator plugin */
++struct netback_accelerator {
++ struct list_head link;
++ int id;
++ char *eth_name;
++ atomic_t use_count;
++ struct netback_accel_hooks *hooks;
++};
++
++struct backend_info {
++ struct xenbus_device *dev;
++ netif_t *netif;
++ enum xenbus_state frontend_state;
++
++ /* State relating to the netback accelerator */
++ void *netback_accel_priv;
++ /* The accelerator that this backend is currently using */
++ struct netback_accelerator *accelerator;
++};
++
++#define NETBACK_ACCEL_VERSION 0x00010001
++
++/*
++ * Connect an accelerator plugin module to netback. Returns zero on
++ * success, < 0 on error, > 0 (with highest version number supported)
++ * if version mismatch.
++ */
++extern int netback_connect_accelerator(unsigned version,
++ int id, const char *eth_name,
++ struct netback_accel_hooks *hooks);
++/* Disconnect a previously connected accelerator plugin module */
++extern void netback_disconnect_accelerator(int id, const char *eth_name);
++
++
++extern
++void netback_probe_accelerators(struct backend_info *be,
++ struct xenbus_device *dev);
++extern
++void netback_remove_accelerators(struct backend_info *be,
++ struct xenbus_device *dev);
++extern
++void netif_accel_init(void);
++
++
++#define NET_TX_RING_SIZE __RING_SIZE((netif_tx_sring_t *)0, PAGE_SIZE)
++#define NET_RX_RING_SIZE __RING_SIZE((netif_rx_sring_t *)0, PAGE_SIZE)
++
++void netif_disconnect(netif_t *netif);
++
++netif_t *netif_alloc(domid_t domid, unsigned int handle);
++int netif_map(netif_t *netif, unsigned long tx_ring_ref,
++ unsigned long rx_ring_ref, unsigned int evtchn);
++
++#define netif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define netif_put(_b) \
++ do { \
++ if ( atomic_dec_and_test(&(_b)->refcnt) ) \
++ wake_up(&(_b)->waiting_to_free); \
++ } while (0)
++
++void netif_xenbus_init(void);
++
++#define netif_schedulable(netif) \
++ (netif_running((netif)->dev) && netback_carrier_ok(netif))
++
++void netif_schedule_work(netif_t *netif);
++void netif_deschedule_work(netif_t *netif);
++
++int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
++struct net_device_stats *netif_be_get_stats(struct net_device *dev);
++irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++
++static inline int netbk_can_queue(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ return netif->can_queue;
++}
++
++static inline int netbk_can_sg(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ return netif->features & NETIF_F_SG;
++}
++
++#endif /* __NETIF__BACKEND__COMMON_H__ */
+Index: head-2008-11-25/drivers/xen/netback/interface.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/netback/interface.c 2007-06-12 13:13:45.000000000 +0200
+@@ -0,0 +1,336 @@
++/******************************************************************************
++ * arch/xen/drivers/netif/backend/interface.c
++ *
++ * Network-device interface management.
++ *
++ * Copyright (c) 2004-2005, Keir Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "common.h"
++#include <linux/ethtool.h>
++#include <linux/rtnetlink.h>
++
++/*
++ * Module parameter 'queue_length':
++ *
++ * Enables queuing in the network stack when a client has run out of receive
++ * descriptors. Although this feature can improve receive bandwidth by avoiding
++ * packet loss, it can also result in packets sitting in the 'tx_queue' for
++ * unbounded time. This is bad if those packets hold onto foreign resources.
++ * For example, consider a packet that holds onto resources belonging to the
++ * guest for which it is queued (e.g., packet received on vif1.0, destined for
++ * vif1.1 which is not activated in the guest): in this situation the guest
++ * will never be destroyed, unless vif1.1 is taken down. To avoid this, we
++ * run a timer (tx_queue_timeout) to drain the queue when the interface is
++ * blocked.
++ */
++static unsigned long netbk_queue_length = 32;
++module_param_named(queue_length, netbk_queue_length, ulong, 0);
++
++static void __netif_up(netif_t *netif)
++{
++ enable_irq(netif->irq);
++ netif_schedule_work(netif);
++}
++
++static void __netif_down(netif_t *netif)
++{
++ disable_irq(netif->irq);
++ netif_deschedule_work(netif);
++}
++
++static int net_open(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ if (netback_carrier_ok(netif)) {
++ __netif_up(netif);
++ netif_start_queue(dev);
++ }
++ return 0;
++}
++
++static int net_close(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ if (netback_carrier_ok(netif))
++ __netif_down(netif);
++ netif_stop_queue(dev);
++ return 0;
++}
++
++static int netbk_change_mtu(struct net_device *dev, int mtu)
++{
++ int max = netbk_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
++
++ if (mtu > max)
++ return -EINVAL;
++ dev->mtu = mtu;
++ return 0;
++}
++
++static int netbk_set_sg(struct net_device *dev, u32 data)
++{
++ if (data) {
++ netif_t *netif = netdev_priv(dev);
++
++ if (!(netif->features & NETIF_F_SG))
++ return -ENOSYS;
++ }
++
++ return ethtool_op_set_sg(dev, data);
++}
++
++static int netbk_set_tso(struct net_device *dev, u32 data)
++{
++ if (data) {
++ netif_t *netif = netdev_priv(dev);
++
++ if (!(netif->features & NETIF_F_TSO))
++ return -ENOSYS;
++ }
++
++ return ethtool_op_set_tso(dev, data);
++}
++
++static struct ethtool_ops network_ethtool_ops =
++{
++ .get_tx_csum = ethtool_op_get_tx_csum,
++ .set_tx_csum = ethtool_op_set_tx_csum,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = netbk_set_sg,
++ .get_tso = ethtool_op_get_tso,
++ .set_tso = netbk_set_tso,
++ .get_link = ethtool_op_get_link,
++};
++
++netif_t *netif_alloc(domid_t domid, unsigned int handle)
++{
++ int err = 0;
++ struct net_device *dev;
++ netif_t *netif;
++ char name[IFNAMSIZ] = {};
++
++ snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
++ dev = alloc_netdev(sizeof(netif_t), name, ether_setup);
++ if (dev == NULL) {
++ DPRINTK("Could not create netif: out of memory\n");
++ return ERR_PTR(-ENOMEM);
++ }
++
++ netif = netdev_priv(dev);
++ memset(netif, 0, sizeof(*netif));
++ netif->domid = domid;
++ netif->handle = handle;
++ atomic_set(&netif->refcnt, 1);
++ init_waitqueue_head(&netif->waiting_to_free);
++ netif->dev = dev;
++
++ netback_carrier_off(netif);
++
++ netif->credit_bytes = netif->remaining_credit = ~0UL;
++ netif->credit_usec = 0UL;
++ init_timer(&netif->credit_timeout);
++ /* Initialize 'expires' now: it's used to track the credit window. */
++ netif->credit_timeout.expires = jiffies;
++
++ init_timer(&netif->tx_queue_timeout);
++
++ dev->hard_start_xmit = netif_be_start_xmit;
++ dev->get_stats = netif_be_get_stats;
++ dev->open = net_open;
++ dev->stop = net_close;
++ dev->change_mtu = netbk_change_mtu;
++ dev->features = NETIF_F_IP_CSUM;
++
++ SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
++
++ dev->tx_queue_len = netbk_queue_length;
++
++ /*
++ * Initialise a dummy MAC address. We choose the numerically
++ * largest non-broadcast address to prevent the address getting
++ * stolen by an Ethernet bridge for STP purposes.
++ * (FE:FF:FF:FF:FF:FF)
++ */
++ memset(dev->dev_addr, 0xFF, ETH_ALEN);
++ dev->dev_addr[0] &= ~0x01;
++
++ rtnl_lock();
++ err = register_netdevice(dev);
++ rtnl_unlock();
++ if (err) {
++ DPRINTK("Could not register new net device %s: err=%d\n",
++ dev->name, err);
++ free_netdev(dev);
++ return ERR_PTR(err);
++ }
++
++ DPRINTK("Successfully created netif\n");
++ return netif;
++}
++
++static int map_frontend_pages(
++ netif_t *netif, grant_ref_t tx_ring_ref, grant_ref_t rx_ring_ref)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)netif->tx_comms_area->addr,
++ GNTMAP_host_map, tx_ring_ref, netif->domid);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Gnttab failure mapping tx_ring_ref!\n");
++ return op.status;
++ }
++
++ netif->tx_shmem_ref = tx_ring_ref;
++ netif->tx_shmem_handle = op.handle;
++
++ gnttab_set_map_op(&op, (unsigned long)netif->rx_comms_area->addr,
++ GNTMAP_host_map, rx_ring_ref, netif->domid);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Gnttab failure mapping rx_ring_ref!\n");
++ return op.status;
++ }
++
++ netif->rx_shmem_ref = rx_ring_ref;
++ netif->rx_shmem_handle = op.handle;
++
++ return 0;
++}
++
++static void unmap_frontend_pages(netif_t *netif)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)netif->tx_comms_area->addr,
++ GNTMAP_host_map, netif->tx_shmem_handle);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++
++ gnttab_set_unmap_op(&op, (unsigned long)netif->rx_comms_area->addr,
++ GNTMAP_host_map, netif->rx_shmem_handle);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++}
++
++int netif_map(netif_t *netif, unsigned long tx_ring_ref,
++ unsigned long rx_ring_ref, unsigned int evtchn)
++{
++ int err = -ENOMEM;
++ netif_tx_sring_t *txs;
++ netif_rx_sring_t *rxs;
++
++ /* Already connected through? */
++ if (netif->irq)
++ return 0;
++
++ netif->tx_comms_area = alloc_vm_area(PAGE_SIZE);
++ if (netif->tx_comms_area == NULL)
++ return -ENOMEM;
++ netif->rx_comms_area = alloc_vm_area(PAGE_SIZE);
++ if (netif->rx_comms_area == NULL)
++ goto err_rx;
++
++ err = map_frontend_pages(netif, tx_ring_ref, rx_ring_ref);
++ if (err)
++ goto err_map;
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ netif->domid, evtchn, netif_be_int, 0,
++ netif->dev->name, netif);
++ if (err < 0)
++ goto err_hypervisor;
++ netif->irq = err;
++ disable_irq(netif->irq);
++
++ txs = (netif_tx_sring_t *)netif->tx_comms_area->addr;
++ BACK_RING_INIT(&netif->tx, txs, PAGE_SIZE);
++
++ rxs = (netif_rx_sring_t *)
++ ((char *)netif->rx_comms_area->addr);
++ BACK_RING_INIT(&netif->rx, rxs, PAGE_SIZE);
++
++ netif->rx_req_cons_peek = 0;
++
++ netif_get(netif);
++
++ rtnl_lock();
++ netback_carrier_on(netif);
++ if (netif_running(netif->dev))
++ __netif_up(netif);
++ rtnl_unlock();
++
++ return 0;
++err_hypervisor:
++ unmap_frontend_pages(netif);
++err_map:
++ free_vm_area(netif->rx_comms_area);
++err_rx:
++ free_vm_area(netif->tx_comms_area);
++ return err;
++}
++
++void netif_disconnect(netif_t *netif)
++{
++ if (netback_carrier_ok(netif)) {
++ rtnl_lock();
++ netback_carrier_off(netif);
++ netif_carrier_off(netif->dev); /* discard queued packets */
++ if (netif_running(netif->dev))
++ __netif_down(netif);
++ rtnl_unlock();
++ netif_put(netif);
++ }
++
++ atomic_dec(&netif->refcnt);
++ wait_event(netif->waiting_to_free, atomic_read(&netif->refcnt) == 0);
++
++ del_timer_sync(&netif->credit_timeout);
++ del_timer_sync(&netif->tx_queue_timeout);
++
++ if (netif->irq)
++ unbind_from_irqhandler(netif->irq, netif);
++
++ unregister_netdev(netif->dev);
++
++ if (netif->tx.sring) {
++ unmap_frontend_pages(netif);
++ free_vm_area(netif->tx_comms_area);
++ free_vm_area(netif->rx_comms_area);
++ }
++
++ free_netdev(netif->dev);
++}
+Index: head-2008-11-25/drivers/xen/netback/loopback.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/netback/loopback.c 2007-08-06 15:10:49.000000000 +0200
+@@ -0,0 +1,324 @@
++/******************************************************************************
++ * netback/loopback.c
++ *
++ * A two-interface loopback device to emulate a local netfront-netback
++ * connection. This ensures that local packet delivery looks identical
++ * to inter-domain delivery. Most importantly, packets delivered locally
++ * originating from other domains will get *copied* when they traverse this
++ * driver. This prevents unbounded delays in socket-buffer queues from
++ * causing the netback driver to "seize up".
++ *
++ * This driver creates a symmetric pair of loopback interfaces with names
++ * vif0.0 and veth0. The intention is that 'vif0.0' is bound to an Ethernet
++ * bridge, just like a proper netback interface, while a local IP interface
++ * is configured on 'veth0'.
++ *
++ * As with a real netback interface, vif0.0 is configured with a suitable
++ * dummy MAC address. No default is provided for veth0: a reasonable strategy
++ * is to transfer eth0's MAC address to veth0, and give eth0 a dummy address
++ * (to avoid confusing the Etherbridge).
++ *
++ * Copyright (c) 2005 K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/ethtool.h>
++#include <net/dst.h>
++#include <net/xfrm.h> /* secpath_reset() */
++#include <asm/hypervisor.h> /* is_initial_xendomain() */
++
++static int nloopbacks = -1;
++module_param(nloopbacks, int, 0);
++MODULE_PARM_DESC(nloopbacks, "Number of netback-loopback devices to create");
++
++struct net_private {
++ struct net_device *loopback_dev;
++ struct net_device_stats stats;
++};
++
++static int loopback_open(struct net_device *dev)
++{
++ struct net_private *np = netdev_priv(dev);
++ memset(&np->stats, 0, sizeof(np->stats));
++ netif_start_queue(dev);
++ return 0;
++}
++
++static int loopback_close(struct net_device *dev)
++{
++ netif_stop_queue(dev);
++ return 0;
++}
++
++#ifdef CONFIG_X86
++static int is_foreign(unsigned long pfn)
++{
++ /* NB. Play it safe for auto-translation mode. */
++ return (xen_feature(XENFEAT_auto_translated_physmap) ||
++ (phys_to_machine_mapping[pfn] & FOREIGN_FRAME_BIT));
++}
++#else
++/* How to detect a foreign mapping? Play it safe. */
++#define is_foreign(pfn) (1)
++#endif
++
++static int skb_remove_foreign_references(struct sk_buff *skb)
++{
++ struct page *page;
++ unsigned long pfn;
++ int i, off;
++ char *vaddr;
++
++ BUG_ON(skb_shinfo(skb)->frag_list);
++
++ if (skb_cloned(skb) &&
++ unlikely(pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
++ return 0;
++
++ for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
++ pfn = page_to_pfn(skb_shinfo(skb)->frags[i].page);
++ if (!is_foreign(pfn))
++ continue;
++
++ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(!page))
++ return 0;
++
++ vaddr = kmap_skb_frag(&skb_shinfo(skb)->frags[i]);
++ off = skb_shinfo(skb)->frags[i].page_offset;
++ memcpy(page_address(page) + off,
++ vaddr + off,
++ skb_shinfo(skb)->frags[i].size);
++ kunmap_skb_frag(vaddr);
++
++ put_page(skb_shinfo(skb)->frags[i].page);
++ skb_shinfo(skb)->frags[i].page = page;
++ }
++
++ return 1;
++}
++
++static int loopback_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ struct net_private *np = netdev_priv(dev);
++
++ if (!skb_remove_foreign_references(skb)) {
++ np->stats.tx_dropped++;
++ dev_kfree_skb(skb);
++ return 0;
++ }
++
++ dst_release(skb->dst);
++ skb->dst = NULL;
++
++ skb_orphan(skb);
++
++ np->stats.tx_bytes += skb->len;
++ np->stats.tx_packets++;
++
++ /* Switch to loopback context. */
++ dev = np->loopback_dev;
++ np = netdev_priv(dev);
++
++ np->stats.rx_bytes += skb->len;
++ np->stats.rx_packets++;
++
++ if (skb->ip_summed == CHECKSUM_HW) {
++ /* Defer checksum calculation. */
++ skb->proto_csum_blank = 1;
++ /* Must be a local packet: assert its integrity. */
++ skb->proto_data_valid = 1;
++ }
++
++ skb->ip_summed = skb->proto_data_valid ?
++ CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
++
++ skb->pkt_type = PACKET_HOST; /* overridden by eth_type_trans() */
++ skb->protocol = eth_type_trans(skb, dev);
++ skb->dev = dev;
++ dev->last_rx = jiffies;
++
++ /* Flush netfilter context: rx'ed skbuffs not expected to have any. */
++ nf_reset(skb);
++ secpath_reset(skb);
++
++ netif_rx(skb);
++
++ return 0;
++}
++
++static struct net_device_stats *loopback_get_stats(struct net_device *dev)
++{
++ struct net_private *np = netdev_priv(dev);
++ return &np->stats;
++}
++
++static struct ethtool_ops network_ethtool_ops =
++{
++ .get_tx_csum = ethtool_op_get_tx_csum,
++ .set_tx_csum = ethtool_op_set_tx_csum,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = ethtool_op_set_sg,
++ .get_tso = ethtool_op_get_tso,
++ .set_tso = ethtool_op_set_tso,
++ .get_link = ethtool_op_get_link,
++};
++
++/*
++ * Nothing to do here. Virtual interface is point-to-point and the
++ * physical interface is probably promiscuous anyway.
++ */
++static void loopback_set_multicast_list(struct net_device *dev)
++{
++}
++
++static void loopback_construct(struct net_device *dev, struct net_device *lo)
++{
++ struct net_private *np = netdev_priv(dev);
++
++ np->loopback_dev = lo;
++
++ dev->open = loopback_open;
++ dev->stop = loopback_close;
++ dev->hard_start_xmit = loopback_start_xmit;
++ dev->get_stats = loopback_get_stats;
++ dev->set_multicast_list = loopback_set_multicast_list;
++ dev->change_mtu = NULL; /* allow arbitrary mtu */
++
++ dev->tx_queue_len = 0;
++
++ dev->features = (NETIF_F_HIGHDMA |
++ NETIF_F_LLTX |
++ NETIF_F_TSO |
++ NETIF_F_SG |
++ NETIF_F_IP_CSUM);
++
++ SET_ETHTOOL_OPS(dev, &network_ethtool_ops);
++
++ /*
++ * We do not set a jumbo MTU on the interface. Otherwise the network
++ * stack will try to send large packets that will get dropped by the
++ * Ethernet bridge (unless the physical Ethernet interface is
++ * configured to transfer jumbo packets). If a larger MTU is desired
++ * then the system administrator can specify it using the 'ifconfig'
++ * command.
++ */
++ /*dev->mtu = 16*1024;*/
++}
++
++static int __init make_loopback(int i)
++{
++ struct net_device *dev1, *dev2;
++ char dev_name[IFNAMSIZ];
++ int err = -ENOMEM;
++
++ sprintf(dev_name, "vif0.%d", i);
++ dev1 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
++ if (!dev1)
++ return err;
++
++ sprintf(dev_name, "veth%d", i);
++ dev2 = alloc_netdev(sizeof(struct net_private), dev_name, ether_setup);
++ if (!dev2)
++ goto fail_netdev2;
++
++ loopback_construct(dev1, dev2);
++ loopback_construct(dev2, dev1);
++
++ /*
++ * Initialise a dummy MAC address for the 'dummy backend' interface. We
++ * choose the numerically largest non-broadcast address to prevent the
++ * address getting stolen by an Ethernet bridge for STP purposes.
++ */
++ memset(dev1->dev_addr, 0xFF, ETH_ALEN);
++ dev1->dev_addr[0] &= ~0x01;
++
++ if ((err = register_netdev(dev1)) != 0)
++ goto fail;
++
++ if ((err = register_netdev(dev2)) != 0) {
++ unregister_netdev(dev1);
++ goto fail;
++ }
++
++ return 0;
++
++ fail:
++ free_netdev(dev2);
++ fail_netdev2:
++ free_netdev(dev1);
++ return err;
++}
++
++static void __exit clean_loopback(int i)
++{
++ struct net_device *dev1, *dev2;
++ char dev_name[IFNAMSIZ];
++
++ sprintf(dev_name, "vif0.%d", i);
++ dev1 = dev_get_by_name(dev_name);
++ sprintf(dev_name, "veth%d", i);
++ dev2 = dev_get_by_name(dev_name);
++ if (dev1 && dev2) {
++ unregister_netdev(dev2);
++ unregister_netdev(dev1);
++ free_netdev(dev2);
++ free_netdev(dev1);
++ }
++}
++
++static int __init loopback_init(void)
++{
++ int i, err = 0;
++
++ if (nloopbacks == -1)
++ nloopbacks = is_initial_xendomain() ? 4 : 0;
++
++ for (i = 0; i < nloopbacks; i++)
++ if ((err = make_loopback(i)) != 0)
++ break;
++
++ return err;
++}
++
++module_init(loopback_init);
++
++static void __exit loopback_exit(void)
++{
++ int i;
++
++ for (i = nloopbacks; i-- > 0; )
++ clean_loopback(i);
++}
++
++module_exit(loopback_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
+Index: head-2008-11-25/drivers/xen/netback/netback.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/netback/netback.c 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,1614 @@
++/******************************************************************************
++ * drivers/xen/netback/netback.c
++ *
++ * Back-end of the driver for virtual network devices. This portion of the
++ * driver exports a 'unified' network-device interface that can be accessed
++ * by any operating system that implements a compatible front end. A
++ * reference front-end implementation can be found in:
++ * drivers/xen/netfront/netfront.c
++ *
++ * Copyright (c) 2002-2005, K A Fraser
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include "common.h"
++#include <xen/balloon.h>
++#include <xen/interface/memory.h>
++
++/*define NETBE_DEBUG_INTERRUPT*/
++
++/* extra field used in struct page */
++#define netif_page_index(pg) (*(long *)&(pg)->mapping)
++
++struct netbk_rx_meta {
++ skb_frag_t frag;
++ int id;
++ u8 copy:1;
++};
++
++struct netbk_tx_pending_inuse {
++ struct list_head list;
++ unsigned long alloc_time;
++};
++
++static void netif_idx_release(u16 pending_idx);
++static void netif_page_release(struct page *page);
++static void make_tx_response(netif_t *netif,
++ netif_tx_request_t *txp,
++ s8 st);
++static netif_rx_response_t *make_rx_response(netif_t *netif,
++ u16 id,
++ s8 st,
++ u16 offset,
++ u16 size,
++ u16 flags);
++
++static void net_tx_action(unsigned long unused);
++static DECLARE_TASKLET(net_tx_tasklet, net_tx_action, 0);
++
++static void net_rx_action(unsigned long unused);
++static DECLARE_TASKLET(net_rx_tasklet, net_rx_action, 0);
++
++static struct timer_list net_timer;
++static struct timer_list netbk_tx_pending_timer;
++
++#define MAX_PENDING_REQS 256
++
++static struct sk_buff_head rx_queue;
++
++static struct page **mmap_pages;
++static inline unsigned long idx_to_pfn(unsigned int idx)
++{
++ return page_to_pfn(mmap_pages[idx]);
++}
++
++static inline unsigned long idx_to_kaddr(unsigned int idx)
++{
++ return (unsigned long)pfn_to_kaddr(idx_to_pfn(idx));
++}
++
++#define PKT_PROT_LEN 64
++
++static struct pending_tx_info {
++ netif_tx_request_t req;
++ netif_t *netif;
++} pending_tx_info[MAX_PENDING_REQS];
++static u16 pending_ring[MAX_PENDING_REQS];
++typedef unsigned int PEND_RING_IDX;
++#define MASK_PEND_IDX(_i) ((_i)&(MAX_PENDING_REQS-1))
++static PEND_RING_IDX pending_prod, pending_cons;
++#define NR_PENDING_REQS (MAX_PENDING_REQS - pending_prod + pending_cons)
++
++/* Freed TX SKBs get batched on this ring before return to pending_ring. */
++static u16 dealloc_ring[MAX_PENDING_REQS];
++static PEND_RING_IDX dealloc_prod, dealloc_cons;
++
++/* Doubly-linked list of in-use pending entries. */
++static struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS];
++static LIST_HEAD(pending_inuse_head);
++
++static struct sk_buff_head tx_queue;
++
++static grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
++static gnttab_unmap_grant_ref_t tx_unmap_ops[MAX_PENDING_REQS];
++static gnttab_map_grant_ref_t tx_map_ops[MAX_PENDING_REQS];
++
++static struct list_head net_schedule_list;
++static spinlock_t net_schedule_list_lock;
++
++#define MAX_MFN_ALLOC 64
++static unsigned long mfn_list[MAX_MFN_ALLOC];
++static unsigned int alloc_index = 0;
++
++/* Setting this allows the safe use of this driver without netloop. */
++static int MODPARM_copy_skb = 1;
++module_param_named(copy_skb, MODPARM_copy_skb, bool, 0);
++MODULE_PARM_DESC(copy_skb, "Copy data received from netfront without netloop");
++
++int netbk_copy_skb_mode;
++
++static inline unsigned long alloc_mfn(void)
++{
++ BUG_ON(alloc_index == 0);
++ return mfn_list[--alloc_index];
++}
++
++static int check_mfn(int nr)
++{
++ struct xen_memory_reservation reservation = {
++ .extent_order = 0,
++ .domid = DOMID_SELF
++ };
++ int rc;
++
++ if (likely(alloc_index >= nr))
++ return 0;
++
++ set_xen_guest_handle(reservation.extent_start, mfn_list + alloc_index);
++ reservation.nr_extents = MAX_MFN_ALLOC - alloc_index;
++ rc = HYPERVISOR_memory_op(XENMEM_increase_reservation, &reservation);
++ if (likely(rc > 0))
++ alloc_index += rc;
++
++ return alloc_index >= nr ? 0 : -ENOMEM;
++}
++
++static inline void maybe_schedule_tx_action(void)
++{
++ smp_mb();
++ if ((NR_PENDING_REQS < (MAX_PENDING_REQS/2)) &&
++ !list_empty(&net_schedule_list))
++ tasklet_schedule(&net_tx_tasklet);
++}
++
++static struct sk_buff *netbk_copy_skb(struct sk_buff *skb)
++{
++ struct skb_shared_info *ninfo;
++ struct sk_buff *nskb;
++ unsigned long offset;
++ int ret;
++ int len;
++ int headlen;
++
++ BUG_ON(skb_shinfo(skb)->frag_list != NULL);
++
++ nskb = alloc_skb(SKB_MAX_HEAD(0), GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(!nskb))
++ goto err;
++
++ skb_reserve(nskb, 16 + NET_IP_ALIGN);
++ headlen = nskb->end - nskb->data;
++ if (headlen > skb_headlen(skb))
++ headlen = skb_headlen(skb);
++ ret = skb_copy_bits(skb, 0, __skb_put(nskb, headlen), headlen);
++ BUG_ON(ret);
++
++ ninfo = skb_shinfo(nskb);
++ ninfo->gso_size = skb_shinfo(skb)->gso_size;
++ ninfo->gso_type = skb_shinfo(skb)->gso_type;
++
++ offset = headlen;
++ len = skb->len - headlen;
++
++ nskb->len = skb->len;
++ nskb->data_len = len;
++ nskb->truesize += len;
++
++ while (len) {
++ struct page *page;
++ int copy;
++ int zero;
++
++ if (unlikely(ninfo->nr_frags >= MAX_SKB_FRAGS)) {
++ dump_stack();
++ goto err_free;
++ }
++
++ copy = len >= PAGE_SIZE ? PAGE_SIZE : len;
++ zero = len >= PAGE_SIZE ? 0 : __GFP_ZERO;
++
++ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN | zero);
++ if (unlikely(!page))
++ goto err_free;
++
++ ret = skb_copy_bits(skb, offset, page_address(page), copy);
++ BUG_ON(ret);
++
++ ninfo->frags[ninfo->nr_frags].page = page;
++ ninfo->frags[ninfo->nr_frags].page_offset = 0;
++ ninfo->frags[ninfo->nr_frags].size = copy;
++ ninfo->nr_frags++;
++
++ offset += copy;
++ len -= copy;
++ }
++
++ offset = nskb->data - skb->data;
++
++ nskb->h.raw = skb->h.raw + offset;
++ nskb->nh.raw = skb->nh.raw + offset;
++ nskb->mac.raw = skb->mac.raw + offset;
++
++ return nskb;
++
++ err_free:
++ kfree_skb(nskb);
++ err:
++ return NULL;
++}
++
++static inline int netbk_max_required_rx_slots(netif_t *netif)
++{
++ if (netif->features & (NETIF_F_SG|NETIF_F_TSO))
++ return MAX_SKB_FRAGS + 2; /* header + extra_info + frags */
++ return 1; /* all in one */
++}
++
++static inline int netbk_queue_full(netif_t *netif)
++{
++ RING_IDX peek = netif->rx_req_cons_peek;
++ RING_IDX needed = netbk_max_required_rx_slots(netif);
++
++ return ((netif->rx.sring->req_prod - peek) < needed) ||
++ ((netif->rx.rsp_prod_pvt + NET_RX_RING_SIZE - peek) < needed);
++}
++
++static void tx_queue_callback(unsigned long data)
++{
++ netif_t *netif = (netif_t *)data;
++ if (netif_schedulable(netif))
++ netif_wake_queue(netif->dev);
++}
++
++int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++
++ BUG_ON(skb->dev != dev);
++
++ /* Drop the packet if the target domain has no receive buffers. */
++ if (unlikely(!netif_schedulable(netif) || netbk_queue_full(netif)))
++ goto drop;
++
++ /*
++ * Copy the packet here if it's destined for a flipping interface
++ * but isn't flippable (e.g. extra references to data).
++ * XXX For now we also copy skbuffs whose head crosses a page
++ * boundary, because netbk_gop_skb can't handle them.
++ */
++ if (!netif->copying_receiver ||
++ ((skb_headlen(skb) + offset_in_page(skb->data)) >= PAGE_SIZE)) {
++ struct sk_buff *nskb = netbk_copy_skb(skb);
++ if ( unlikely(nskb == NULL) )
++ goto drop;
++ /* Copy only the header fields we use in this driver. */
++ nskb->dev = skb->dev;
++ nskb->ip_summed = skb->ip_summed;
++ nskb->proto_data_valid = skb->proto_data_valid;
++ dev_kfree_skb(skb);
++ skb = nskb;
++ }
++
++ netif->rx_req_cons_peek += skb_shinfo(skb)->nr_frags + 1 +
++ !!skb_shinfo(skb)->gso_size;
++ netif_get(netif);
++
++ if (netbk_can_queue(dev) && netbk_queue_full(netif)) {
++ netif->rx.sring->req_event = netif->rx_req_cons_peek +
++ netbk_max_required_rx_slots(netif);
++ mb(); /* request notification /then/ check & stop the queue */
++ if (netbk_queue_full(netif)) {
++ netif_stop_queue(dev);
++ /*
++ * Schedule 500ms timeout to restart the queue, thus
++ * ensuring that an inactive queue will be drained.
++ * Packets will be immediately be dropped until more
++ * receive buffers become available (see
++ * netbk_queue_full() check above).
++ */
++ netif->tx_queue_timeout.data = (unsigned long)netif;
++ netif->tx_queue_timeout.function = tx_queue_callback;
++ __mod_timer(&netif->tx_queue_timeout, jiffies + HZ/2);
++ }
++ }
++
++ skb_queue_tail(&rx_queue, skb);
++ tasklet_schedule(&net_rx_tasklet);
++
++ return 0;
++
++ drop:
++ netif->stats.tx_dropped++;
++ dev_kfree_skb(skb);
++ return 0;
++}
++
++#if 0
++static void xen_network_done_notify(void)
++{
++ static struct net_device *eth0_dev = NULL;
++ if (unlikely(eth0_dev == NULL))
++ eth0_dev = __dev_get_by_name("eth0");
++ netif_rx_schedule(eth0_dev);
++}
++/*
++ * Add following to poll() function in NAPI driver (Tigon3 is example):
++ * if ( xen_network_done() )
++ * tg3_enable_ints(tp);
++ */
++int xen_network_done(void)
++{
++ return skb_queue_empty(&rx_queue);
++}
++#endif
++
++struct netrx_pending_operations {
++ unsigned trans_prod, trans_cons;
++ unsigned mmu_prod, mmu_mcl;
++ unsigned mcl_prod, mcl_cons;
++ unsigned copy_prod, copy_cons;
++ unsigned meta_prod, meta_cons;
++ mmu_update_t *mmu;
++ gnttab_transfer_t *trans;
++ gnttab_copy_t *copy;
++ multicall_entry_t *mcl;
++ struct netbk_rx_meta *meta;
++};
++
++/* Set up the grant operations for this fragment. If it's a flipping
++ interface, we also set up the unmap request from here. */
++static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
++ int i, struct netrx_pending_operations *npo,
++ struct page *page, unsigned long size,
++ unsigned long offset)
++{
++ mmu_update_t *mmu;
++ gnttab_transfer_t *gop;
++ gnttab_copy_t *copy_gop;
++ multicall_entry_t *mcl;
++ netif_rx_request_t *req;
++ unsigned long old_mfn, new_mfn;
++
++ old_mfn = virt_to_mfn(page_address(page));
++
++ req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
++ if (netif->copying_receiver) {
++ /* The fragment needs to be copied rather than
++ flipped. */
++ meta->copy = 1;
++ copy_gop = npo->copy + npo->copy_prod++;
++ copy_gop->flags = GNTCOPY_dest_gref;
++ if (PageForeign(page)) {
++ struct pending_tx_info *src_pend =
++ &pending_tx_info[netif_page_index(page)];
++ copy_gop->source.domid = src_pend->netif->domid;
++ copy_gop->source.u.ref = src_pend->req.gref;
++ copy_gop->flags |= GNTCOPY_source_gref;
++ } else {
++ copy_gop->source.domid = DOMID_SELF;
++ copy_gop->source.u.gmfn = old_mfn;
++ }
++ copy_gop->source.offset = offset;
++ copy_gop->dest.domid = netif->domid;
++ copy_gop->dest.offset = 0;
++ copy_gop->dest.u.ref = req->gref;
++ copy_gop->len = size;
++ } else {
++ meta->copy = 0;
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ new_mfn = alloc_mfn();
++
++ /*
++ * Set the new P2M table entry before
++ * reassigning the old data page. Heed the
++ * comment in pgtable-2level.h:pte_page(). :-)
++ */
++ set_phys_to_machine(page_to_pfn(page), new_mfn);
++
++ mcl = npo->mcl + npo->mcl_prod++;
++ MULTI_update_va_mapping(mcl,
++ (unsigned long)page_address(page),
++ pfn_pte_ma(new_mfn, PAGE_KERNEL),
++ 0);
++
++ mmu = npo->mmu + npo->mmu_prod++;
++ mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
++ MMU_MACHPHYS_UPDATE;
++ mmu->val = page_to_pfn(page);
++ }
++
++ gop = npo->trans + npo->trans_prod++;
++ gop->mfn = old_mfn;
++ gop->domid = netif->domid;
++ gop->ref = req->gref;
++ }
++ return req->id;
++}
++
++static void netbk_gop_skb(struct sk_buff *skb,
++ struct netrx_pending_operations *npo)
++{
++ netif_t *netif = netdev_priv(skb->dev);
++ int nr_frags = skb_shinfo(skb)->nr_frags;
++ int i;
++ int extra;
++ struct netbk_rx_meta *head_meta, *meta;
++
++ head_meta = npo->meta + npo->meta_prod++;
++ head_meta->frag.page_offset = skb_shinfo(skb)->gso_type;
++ head_meta->frag.size = skb_shinfo(skb)->gso_size;
++ extra = !!head_meta->frag.size + 1;
++
++ for (i = 0; i < nr_frags; i++) {
++ meta = npo->meta + npo->meta_prod++;
++ meta->frag = skb_shinfo(skb)->frags[i];
++ meta->id = netbk_gop_frag(netif, meta, i + extra, npo,
++ meta->frag.page,
++ meta->frag.size,
++ meta->frag.page_offset);
++ }
++
++ /*
++ * This must occur at the end to ensure that we don't trash skb_shinfo
++ * until we're done. We know that the head doesn't cross a page
++ * boundary because such packets get copied in netif_be_start_xmit.
++ */
++ head_meta->id = netbk_gop_frag(netif, head_meta, 0, npo,
++ virt_to_page(skb->data),
++ skb_headlen(skb),
++ offset_in_page(skb->data));
++
++ netif->rx.req_cons += nr_frags + extra;
++}
++
++static inline void netbk_free_pages(int nr_frags, struct netbk_rx_meta *meta)
++{
++ int i;
++
++ for (i = 0; i < nr_frags; i++)
++ put_page(meta[i].frag.page);
++}
++
++/* This is a twin to netbk_gop_skb. Assume that netbk_gop_skb was
++ used to set up the operations on the top of
++ netrx_pending_operations, which have since been done. Check that
++ they didn't give any errors and advance over them. */
++static int netbk_check_gop(int nr_frags, domid_t domid,
++ struct netrx_pending_operations *npo)
++{
++ multicall_entry_t *mcl;
++ gnttab_transfer_t *gop;
++ gnttab_copy_t *copy_op;
++ int status = NETIF_RSP_OKAY;
++ int i;
++
++ for (i = 0; i <= nr_frags; i++) {
++ if (npo->meta[npo->meta_cons + i].copy) {
++ copy_op = npo->copy + npo->copy_cons++;
++ if (copy_op->status != GNTST_okay) {
++ DPRINTK("Bad status %d from copy to DOM%d.\n",
++ copy_op->status, domid);
++ status = NETIF_RSP_ERROR;
++ }
++ } else {
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ mcl = npo->mcl + npo->mcl_cons++;
++ /* The update_va_mapping() must not fail. */
++ BUG_ON(mcl->result != 0);
++ }
++
++ gop = npo->trans + npo->trans_cons++;
++ /* Check the reassignment error code. */
++ if (gop->status != 0) {
++ DPRINTK("Bad status %d from grant transfer to DOM%u\n",
++ gop->status, domid);
++ /*
++ * Page no longer belongs to us unless
++ * GNTST_bad_page, but that should be
++ * a fatal error anyway.
++ */
++ BUG_ON(gop->status == GNTST_bad_page);
++ status = NETIF_RSP_ERROR;
++ }
++ }
++ }
++
++ return status;
++}
++
++static void netbk_add_frag_responses(netif_t *netif, int status,
++ struct netbk_rx_meta *meta, int nr_frags)
++{
++ int i;
++ unsigned long offset;
++
++ for (i = 0; i < nr_frags; i++) {
++ int id = meta[i].id;
++ int flags = (i == nr_frags - 1) ? 0 : NETRXF_more_data;
++
++ if (meta[i].copy)
++ offset = 0;
++ else
++ offset = meta[i].frag.page_offset;
++ make_rx_response(netif, id, status, offset,
++ meta[i].frag.size, flags);
++ }
++}
++
++static void net_rx_action(unsigned long unused)
++{
++ netif_t *netif = NULL;
++ s8 status;
++ u16 id, irq, flags;
++ netif_rx_response_t *resp;
++ multicall_entry_t *mcl;
++ struct sk_buff_head rxq;
++ struct sk_buff *skb;
++ int notify_nr = 0;
++ int ret;
++ int nr_frags;
++ int count;
++ unsigned long offset;
++
++ /*
++ * Putting hundreds of bytes on the stack is considered rude.
++ * Static works because a tasklet can only be on one CPU at any time.
++ */
++ static multicall_entry_t rx_mcl[NET_RX_RING_SIZE+3];
++ static mmu_update_t rx_mmu[NET_RX_RING_SIZE];
++ static gnttab_transfer_t grant_trans_op[NET_RX_RING_SIZE];
++ static gnttab_copy_t grant_copy_op[NET_RX_RING_SIZE];
++ static unsigned char rx_notify[NR_IRQS];
++ static u16 notify_list[NET_RX_RING_SIZE];
++ static struct netbk_rx_meta meta[NET_RX_RING_SIZE];
++
++ struct netrx_pending_operations npo = {
++ mmu: rx_mmu,
++ trans: grant_trans_op,
++ copy: grant_copy_op,
++ mcl: rx_mcl,
++ meta: meta};
++
++ skb_queue_head_init(&rxq);
++
++ count = 0;
++
++ while ((skb = skb_dequeue(&rx_queue)) != NULL) {
++ nr_frags = skb_shinfo(skb)->nr_frags;
++ *(int *)skb->cb = nr_frags;
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap) &&
++ !((netif_t *)netdev_priv(skb->dev))->copying_receiver &&
++ check_mfn(nr_frags + 1)) {
++ /* Memory squeeze? Back off for an arbitrary while. */
++ if ( net_ratelimit() )
++ WPRINTK("Memory squeeze in netback "
++ "driver.\n");
++ mod_timer(&net_timer, jiffies + HZ);
++ skb_queue_head(&rx_queue, skb);
++ break;
++ }
++
++ netbk_gop_skb(skb, &npo);
++
++ count += nr_frags + 1;
++
++ __skb_queue_tail(&rxq, skb);
++
++ /* Filled the batch queue? */
++ if (count + MAX_SKB_FRAGS >= NET_RX_RING_SIZE)
++ break;
++ }
++
++ BUG_ON(npo.meta_prod > ARRAY_SIZE(meta));
++
++ npo.mmu_mcl = npo.mcl_prod;
++ if (npo.mcl_prod) {
++ BUG_ON(xen_feature(XENFEAT_auto_translated_physmap));
++ BUG_ON(npo.mmu_prod > ARRAY_SIZE(rx_mmu));
++ mcl = npo.mcl + npo.mcl_prod++;
++
++ BUG_ON(mcl[-1].op != __HYPERVISOR_update_va_mapping);
++ mcl[-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
++
++ mcl->op = __HYPERVISOR_mmu_update;
++ mcl->args[0] = (unsigned long)rx_mmu;
++ mcl->args[1] = npo.mmu_prod;
++ mcl->args[2] = 0;
++ mcl->args[3] = DOMID_SELF;
++ }
++
++ if (npo.trans_prod) {
++ BUG_ON(npo.trans_prod > ARRAY_SIZE(grant_trans_op));
++ mcl = npo.mcl + npo.mcl_prod++;
++ mcl->op = __HYPERVISOR_grant_table_op;
++ mcl->args[0] = GNTTABOP_transfer;
++ mcl->args[1] = (unsigned long)grant_trans_op;
++ mcl->args[2] = npo.trans_prod;
++ }
++
++ if (npo.copy_prod) {
++ BUG_ON(npo.copy_prod > ARRAY_SIZE(grant_copy_op));
++ mcl = npo.mcl + npo.mcl_prod++;
++ mcl->op = __HYPERVISOR_grant_table_op;
++ mcl->args[0] = GNTTABOP_copy;
++ mcl->args[1] = (unsigned long)grant_copy_op;
++ mcl->args[2] = npo.copy_prod;
++ }
++
++ /* Nothing to do? */
++ if (!npo.mcl_prod)
++ return;
++
++ BUG_ON(npo.mcl_prod > ARRAY_SIZE(rx_mcl));
++
++ ret = HYPERVISOR_multicall(npo.mcl, npo.mcl_prod);
++ BUG_ON(ret != 0);
++ /* The mmu_machphys_update() must not fail. */
++ BUG_ON(npo.mmu_mcl && npo.mcl[npo.mmu_mcl].result != 0);
++
++ while ((skb = __skb_dequeue(&rxq)) != NULL) {
++ nr_frags = *(int *)skb->cb;
++
++ netif = netdev_priv(skb->dev);
++ /* We can't rely on skb_release_data to release the
++ pages used by fragments for us, since it tries to
++ touch the pages in the fraglist. If we're in
++ flipping mode, that doesn't work. In copying mode,
++ we still have access to all of the pages, and so
++ it's safe to let release_data deal with it. */
++ /* (Freeing the fragments is safe since we copy
++ non-linear skbs destined for flipping interfaces) */
++ if (!netif->copying_receiver) {
++ atomic_set(&(skb_shinfo(skb)->dataref), 1);
++ skb_shinfo(skb)->frag_list = NULL;
++ skb_shinfo(skb)->nr_frags = 0;
++ netbk_free_pages(nr_frags, meta + npo.meta_cons + 1);
++ }
++
++ netif->stats.tx_bytes += skb->len;
++ netif->stats.tx_packets++;
++
++ status = netbk_check_gop(nr_frags, netif->domid, &npo);
++
++ id = meta[npo.meta_cons].id;
++ flags = nr_frags ? NETRXF_more_data : 0;
++
++ if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
++ flags |= NETRXF_csum_blank | NETRXF_data_validated;
++ else if (skb->proto_data_valid) /* remote but checksummed? */
++ flags |= NETRXF_data_validated;
++
++ if (meta[npo.meta_cons].copy)
++ offset = 0;
++ else
++ offset = offset_in_page(skb->data);
++ resp = make_rx_response(netif, id, status, offset,
++ skb_headlen(skb), flags);
++
++ if (meta[npo.meta_cons].frag.size) {
++ struct netif_extra_info *gso =
++ (struct netif_extra_info *)
++ RING_GET_RESPONSE(&netif->rx,
++ netif->rx.rsp_prod_pvt++);
++
++ resp->flags |= NETRXF_extra_info;
++
++ gso->u.gso.size = meta[npo.meta_cons].frag.size;
++ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
++ gso->u.gso.pad = 0;
++ gso->u.gso.features = 0;
++
++ gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
++ gso->flags = 0;
++ }
++
++ netbk_add_frag_responses(netif, status,
++ meta + npo.meta_cons + 1,
++ nr_frags);
++
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->rx, ret);
++ irq = netif->irq;
++ if (ret && !rx_notify[irq]) {
++ rx_notify[irq] = 1;
++ notify_list[notify_nr++] = irq;
++ }
++
++ if (netif_queue_stopped(netif->dev) &&
++ netif_schedulable(netif) &&
++ !netbk_queue_full(netif))
++ netif_wake_queue(netif->dev);
++
++ netif_put(netif);
++ dev_kfree_skb(skb);
++ npo.meta_cons += nr_frags + 1;
++ }
++
++ while (notify_nr != 0) {
++ irq = notify_list[--notify_nr];
++ rx_notify[irq] = 0;
++ notify_remote_via_irq(irq);
++ }
++
++ /* More work to do? */
++ if (!skb_queue_empty(&rx_queue) && !timer_pending(&net_timer))
++ tasklet_schedule(&net_rx_tasklet);
++#if 0
++ else
++ xen_network_done_notify();
++#endif
++}
++
++static void net_alarm(unsigned long unused)
++{
++ tasklet_schedule(&net_rx_tasklet);
++}
++
++static void netbk_tx_pending_timeout(unsigned long unused)
++{
++ tasklet_schedule(&net_tx_tasklet);
++}
++
++struct net_device_stats *netif_be_get_stats(struct net_device *dev)
++{
++ netif_t *netif = netdev_priv(dev);
++ return &netif->stats;
++}
++
++static int __on_net_schedule_list(netif_t *netif)
++{
++ return netif->list.next != NULL;
++}
++
++static void remove_from_net_schedule_list(netif_t *netif)
++{
++ spin_lock_irq(&net_schedule_list_lock);
++ if (likely(__on_net_schedule_list(netif))) {
++ list_del(&netif->list);
++ netif->list.next = NULL;
++ netif_put(netif);
++ }
++ spin_unlock_irq(&net_schedule_list_lock);
++}
++
++static void add_to_net_schedule_list_tail(netif_t *netif)
++{
++ if (__on_net_schedule_list(netif))
++ return;
++
++ spin_lock_irq(&net_schedule_list_lock);
++ if (!__on_net_schedule_list(netif) &&
++ likely(netif_schedulable(netif))) {
++ list_add_tail(&netif->list, &net_schedule_list);
++ netif_get(netif);
++ }
++ spin_unlock_irq(&net_schedule_list_lock);
++}
++
++/*
++ * Note on CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER:
++ * If this driver is pipelining transmit requests then we can be very
++ * aggressive in avoiding new-packet notifications -- frontend only needs to
++ * send a notification if there are no outstanding unreceived responses.
++ * If we may be buffer transmit buffers for any reason then we must be rather
++ * more conservative and treat this as the final check for pending work.
++ */
++void netif_schedule_work(netif_t *netif)
++{
++ int more_to_do;
++
++#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
++ more_to_do = RING_HAS_UNCONSUMED_REQUESTS(&netif->tx);
++#else
++ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
++#endif
++
++ if (more_to_do) {
++ add_to_net_schedule_list_tail(netif);
++ maybe_schedule_tx_action();
++ }
++}
++
++void netif_deschedule_work(netif_t *netif)
++{
++ remove_from_net_schedule_list(netif);
++}
++
++
++static void tx_add_credit(netif_t *netif)
++{
++ unsigned long max_burst, max_credit;
++
++ /*
++ * Allow a burst big enough to transmit a jumbo packet of up to 128kB.
++ * Otherwise the interface can seize up due to insufficient credit.
++ */
++ max_burst = RING_GET_REQUEST(&netif->tx, netif->tx.req_cons)->size;
++ max_burst = min(max_burst, 131072UL);
++ max_burst = max(max_burst, netif->credit_bytes);
++
++ /* Take care that adding a new chunk of credit doesn't wrap to zero. */
++ max_credit = netif->remaining_credit + netif->credit_bytes;
++ if (max_credit < netif->remaining_credit)
++ max_credit = ULONG_MAX; /* wrapped: clamp to ULONG_MAX */
++
++ netif->remaining_credit = min(max_credit, max_burst);
++}
++
++static void tx_credit_callback(unsigned long data)
++{
++ netif_t *netif = (netif_t *)data;
++ tx_add_credit(netif);
++ netif_schedule_work(netif);
++}
++
++static inline int copy_pending_req(PEND_RING_IDX pending_idx)
++{
++ return gnttab_copy_grant_page(grant_tx_handle[pending_idx],
++ &mmap_pages[pending_idx]);
++}
++
++inline static void net_tx_action_dealloc(void)
++{
++ struct netbk_tx_pending_inuse *inuse, *n;
++ gnttab_unmap_grant_ref_t *gop;
++ u16 pending_idx;
++ PEND_RING_IDX dc, dp;
++ netif_t *netif;
++ int ret;
++ LIST_HEAD(list);
++
++ dc = dealloc_cons;
++ gop = tx_unmap_ops;
++
++ /*
++ * Free up any grants we have finished using
++ */
++ do {
++ dp = dealloc_prod;
++
++ /* Ensure we see all indices enqueued by netif_idx_release(). */
++ smp_rmb();
++
++ while (dc != dp) {
++ unsigned long pfn;
++
++ pending_idx = dealloc_ring[MASK_PEND_IDX(dc++)];
++ list_move_tail(&pending_inuse[pending_idx].list, &list);
++
++ pfn = idx_to_pfn(pending_idx);
++ /* Already unmapped? */
++ if (!phys_to_machine_mapping_valid(pfn))
++ continue;
++
++ gnttab_set_unmap_op(gop, idx_to_kaddr(pending_idx),
++ GNTMAP_host_map,
++ grant_tx_handle[pending_idx]);
++ gop++;
++ }
++
++ if (netbk_copy_skb_mode != NETBK_DELAYED_COPY_SKB ||
++ list_empty(&pending_inuse_head))
++ break;
++
++ /* Copy any entries that have been pending for too long. */
++ list_for_each_entry_safe(inuse, n, &pending_inuse_head, list) {
++ if (time_after(inuse->alloc_time + HZ / 2, jiffies))
++ break;
++
++ switch (copy_pending_req(inuse - pending_inuse)) {
++ case 0:
++ list_move_tail(&inuse->list, &list);
++ continue;
++ case -EBUSY:
++ list_del_init(&inuse->list);
++ continue;
++ case -ENOENT:
++ continue;
++ }
++
++ break;
++ }
++ } while (dp != dealloc_prod);
++
++ dealloc_cons = dc;
++
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, tx_unmap_ops, gop - tx_unmap_ops);
++ BUG_ON(ret);
++
++ list_for_each_entry_safe(inuse, n, &list, list) {
++ pending_idx = inuse - pending_inuse;
++
++ netif = pending_tx_info[pending_idx].netif;
++
++ make_tx_response(netif, &pending_tx_info[pending_idx].req,
++ NETIF_RSP_OKAY);
++
++ /* Ready for next use. */
++ gnttab_reset_grant_page(mmap_pages[pending_idx]);
++
++ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++
++ netif_put(netif);
++
++ list_del_init(&inuse->list);
++ }
++}
++
++static void netbk_tx_err(netif_t *netif, netif_tx_request_t *txp, RING_IDX end)
++{
++ RING_IDX cons = netif->tx.req_cons;
++
++ do {
++ make_tx_response(netif, txp, NETIF_RSP_ERROR);
++ if (cons >= end)
++ break;
++ txp = RING_GET_REQUEST(&netif->tx, cons++);
++ } while (1);
++ netif->tx.req_cons = cons;
++ netif_schedule_work(netif);
++ netif_put(netif);
++}
++
++static int netbk_count_requests(netif_t *netif, netif_tx_request_t *first,
++ netif_tx_request_t *txp, int work_to_do)
++{
++ RING_IDX cons = netif->tx.req_cons;
++ int frags = 0;
++
++ if (!(first->flags & NETTXF_more_data))
++ return 0;
++
++ do {
++ if (frags >= work_to_do) {
++ DPRINTK("Need more frags\n");
++ return -frags;
++ }
++
++ if (unlikely(frags >= MAX_SKB_FRAGS)) {
++ DPRINTK("Too many frags\n");
++ return -frags;
++ }
++
++ memcpy(txp, RING_GET_REQUEST(&netif->tx, cons + frags),
++ sizeof(*txp));
++ if (txp->size > first->size) {
++ DPRINTK("Frags galore\n");
++ return -frags;
++ }
++
++ first->size -= txp->size;
++ frags++;
++
++ if (unlikely((txp->offset + txp->size) > PAGE_SIZE)) {
++ DPRINTK("txp->offset: %x, size: %u\n",
++ txp->offset, txp->size);
++ return -frags;
++ }
++ } while ((txp++)->flags & NETTXF_more_data);
++
++ return frags;
++}
++
++static gnttab_map_grant_ref_t *netbk_get_requests(netif_t *netif,
++ struct sk_buff *skb,
++ netif_tx_request_t *txp,
++ gnttab_map_grant_ref_t *mop)
++{
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ skb_frag_t *frags = shinfo->frags;
++ unsigned long pending_idx = *((u16 *)skb->data);
++ int i, start;
++
++ /* Skip first skb fragment if it is on same page as header fragment. */
++ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
++
++ for (i = start; i < shinfo->nr_frags; i++, txp++) {
++ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons++)];
++
++ gnttab_set_map_op(mop++, idx_to_kaddr(pending_idx),
++ GNTMAP_host_map | GNTMAP_readonly,
++ txp->gref, netif->domid);
++
++ memcpy(&pending_tx_info[pending_idx].req, txp, sizeof(*txp));
++ netif_get(netif);
++ pending_tx_info[pending_idx].netif = netif;
++ frags[i].page = (void *)pending_idx;
++ }
++
++ return mop;
++}
++
++static int netbk_tx_check_mop(struct sk_buff *skb,
++ gnttab_map_grant_ref_t **mopp)
++{
++ gnttab_map_grant_ref_t *mop = *mopp;
++ int pending_idx = *((u16 *)skb->data);
++ netif_t *netif = pending_tx_info[pending_idx].netif;
++ netif_tx_request_t *txp;
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ int nr_frags = shinfo->nr_frags;
++ int i, err, start;
++
++ /* Check status of header. */
++ err = mop->status;
++ if (unlikely(err)) {
++ txp = &pending_tx_info[pending_idx].req;
++ make_tx_response(netif, txp, NETIF_RSP_ERROR);
++ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++ netif_put(netif);
++ } else {
++ set_phys_to_machine(
++ __pa(idx_to_kaddr(pending_idx)) >> PAGE_SHIFT,
++ FOREIGN_FRAME(mop->dev_bus_addr >> PAGE_SHIFT));
++ grant_tx_handle[pending_idx] = mop->handle;
++ }
++
++ /* Skip first skb fragment if it is on same page as header fragment. */
++ start = ((unsigned long)shinfo->frags[0].page == pending_idx);
++
++ for (i = start; i < nr_frags; i++) {
++ int j, newerr;
++
++ pending_idx = (unsigned long)shinfo->frags[i].page;
++
++ /* Check error status: if okay then remember grant handle. */
++ newerr = (++mop)->status;
++ if (likely(!newerr)) {
++ set_phys_to_machine(
++ __pa(idx_to_kaddr(pending_idx))>>PAGE_SHIFT,
++ FOREIGN_FRAME(mop->dev_bus_addr>>PAGE_SHIFT));
++ grant_tx_handle[pending_idx] = mop->handle;
++ /* Had a previous error? Invalidate this fragment. */
++ if (unlikely(err))
++ netif_idx_release(pending_idx);
++ continue;
++ }
++
++ /* Error on this fragment: respond to client with an error. */
++ txp = &pending_tx_info[pending_idx].req;
++ make_tx_response(netif, txp, NETIF_RSP_ERROR);
++ pending_ring[MASK_PEND_IDX(pending_prod++)] = pending_idx;
++ netif_put(netif);
++
++ /* Not the first error? Preceding frags already invalidated. */
++ if (err)
++ continue;
++
++ /* First error: invalidate header and preceding fragments. */
++ pending_idx = *((u16 *)skb->data);
++ netif_idx_release(pending_idx);
++ for (j = start; j < i; j++) {
++ pending_idx = (unsigned long)shinfo->frags[i].page;
++ netif_idx_release(pending_idx);
++ }
++
++ /* Remember the error: invalidate all subsequent fragments. */
++ err = newerr;
++ }
++
++ *mopp = mop + 1;
++ return err;
++}
++
++static void netbk_fill_frags(struct sk_buff *skb)
++{
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ int nr_frags = shinfo->nr_frags;
++ int i;
++
++ for (i = 0; i < nr_frags; i++) {
++ skb_frag_t *frag = shinfo->frags + i;
++ netif_tx_request_t *txp;
++ unsigned long pending_idx;
++
++ pending_idx = (unsigned long)frag->page;
++
++ pending_inuse[pending_idx].alloc_time = jiffies;
++ list_add_tail(&pending_inuse[pending_idx].list,
++ &pending_inuse_head);
++
++ txp = &pending_tx_info[pending_idx].req;
++ frag->page = virt_to_page(idx_to_kaddr(pending_idx));
++ frag->size = txp->size;
++ frag->page_offset = txp->offset;
++
++ skb->len += txp->size;
++ skb->data_len += txp->size;
++ skb->truesize += txp->size;
++ }
++}
++
++int netbk_get_extras(netif_t *netif, struct netif_extra_info *extras,
++ int work_to_do)
++{
++ struct netif_extra_info extra;
++ RING_IDX cons = netif->tx.req_cons;
++
++ do {
++ if (unlikely(work_to_do-- <= 0)) {
++ DPRINTK("Missing extra info\n");
++ return -EBADR;
++ }
++
++ memcpy(&extra, RING_GET_REQUEST(&netif->tx, cons),
++ sizeof(extra));
++ if (unlikely(!extra.type ||
++ extra.type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
++ netif->tx.req_cons = ++cons;
++ DPRINTK("Invalid extra type: %d\n", extra.type);
++ return -EINVAL;
++ }
++
++ memcpy(&extras[extra.type - 1], &extra, sizeof(extra));
++ netif->tx.req_cons = ++cons;
++ } while (extra.flags & XEN_NETIF_EXTRA_FLAG_MORE);
++
++ return work_to_do;
++}
++
++static int netbk_set_skb_gso(struct sk_buff *skb, struct netif_extra_info *gso)
++{
++ if (!gso->u.gso.size) {
++ DPRINTK("GSO size must not be zero.\n");
++ return -EINVAL;
++ }
++
++ /* Currently only TCPv4 S.O. is supported. */
++ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
++ DPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
++ return -EINVAL;
++ }
++
++ skb_shinfo(skb)->gso_size = gso->u.gso.size;
++ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
++
++ /* Header must be checked, and gso_segs computed. */
++ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
++ skb_shinfo(skb)->gso_segs = 0;
++
++ return 0;
++}
++
++/* Called after netfront has transmitted */
++static void net_tx_action(unsigned long unused)
++{
++ struct list_head *ent;
++ struct sk_buff *skb;
++ netif_t *netif;
++ netif_tx_request_t txreq;
++ netif_tx_request_t txfrags[MAX_SKB_FRAGS];
++ struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
++ u16 pending_idx;
++ RING_IDX i;
++ gnttab_map_grant_ref_t *mop;
++ unsigned int data_len;
++ int ret, work_to_do;
++
++ if (dealloc_cons != dealloc_prod)
++ net_tx_action_dealloc();
++
++ mop = tx_map_ops;
++ while (((NR_PENDING_REQS + MAX_SKB_FRAGS) < MAX_PENDING_REQS) &&
++ !list_empty(&net_schedule_list)) {
++ /* Get a netif from the list with work to do. */
++ ent = net_schedule_list.next;
++ netif = list_entry(ent, netif_t, list);
++ netif_get(netif);
++ remove_from_net_schedule_list(netif);
++
++ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, work_to_do);
++ if (!work_to_do) {
++ netif_put(netif);
++ continue;
++ }
++
++ i = netif->tx.req_cons;
++ rmb(); /* Ensure that we see the request before we copy it. */
++ memcpy(&txreq, RING_GET_REQUEST(&netif->tx, i), sizeof(txreq));
++
++ /* Credit-based scheduling. */
++ if (txreq.size > netif->remaining_credit) {
++ unsigned long now = jiffies;
++ unsigned long next_credit =
++ netif->credit_timeout.expires +
++ msecs_to_jiffies(netif->credit_usec / 1000);
++
++ /* Timer could already be pending in rare cases. */
++ if (timer_pending(&netif->credit_timeout)) {
++ netif_put(netif);
++ continue;
++ }
++
++ /* Passed the point where we can replenish credit? */
++ if (time_after_eq(now, next_credit)) {
++ netif->credit_timeout.expires = now;
++ tx_add_credit(netif);
++ }
++
++ /* Still too big to send right now? Set a callback. */
++ if (txreq.size > netif->remaining_credit) {
++ netif->credit_timeout.data =
++ (unsigned long)netif;
++ netif->credit_timeout.function =
++ tx_credit_callback;
++ __mod_timer(&netif->credit_timeout,
++ next_credit);
++ netif_put(netif);
++ continue;
++ }
++ }
++ netif->remaining_credit -= txreq.size;
++
++ work_to_do--;
++ netif->tx.req_cons = ++i;
++
++ memset(extras, 0, sizeof(extras));
++ if (txreq.flags & NETTXF_extra_info) {
++ work_to_do = netbk_get_extras(netif, extras,
++ work_to_do);
++ i = netif->tx.req_cons;
++ if (unlikely(work_to_do < 0)) {
++ netbk_tx_err(netif, &txreq, i);
++ continue;
++ }
++ }
++
++ ret = netbk_count_requests(netif, &txreq, txfrags, work_to_do);
++ if (unlikely(ret < 0)) {
++ netbk_tx_err(netif, &txreq, i - ret);
++ continue;
++ }
++ i += ret;
++
++ if (unlikely(txreq.size < ETH_HLEN)) {
++ DPRINTK("Bad packet size: %d\n", txreq.size);
++ netbk_tx_err(netif, &txreq, i);
++ continue;
++ }
++
++ /* No crossing a page as the payload mustn't fragment. */
++ if (unlikely((txreq.offset + txreq.size) > PAGE_SIZE)) {
++ DPRINTK("txreq.offset: %x, size: %u, end: %lu\n",
++ txreq.offset, txreq.size,
++ (txreq.offset &~PAGE_MASK) + txreq.size);
++ netbk_tx_err(netif, &txreq, i);
++ continue;
++ }
++
++ pending_idx = pending_ring[MASK_PEND_IDX(pending_cons)];
++
++ data_len = (txreq.size > PKT_PROT_LEN &&
++ ret < MAX_SKB_FRAGS) ?
++ PKT_PROT_LEN : txreq.size;
++
++ skb = alloc_skb(data_len + 16 + NET_IP_ALIGN,
++ GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(skb == NULL)) {
++ DPRINTK("Can't allocate a skb in start_xmit.\n");
++ netbk_tx_err(netif, &txreq, i);
++ break;
++ }
++
++ /* Packets passed to netif_rx() must have some headroom. */
++ skb_reserve(skb, 16 + NET_IP_ALIGN);
++
++ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
++ struct netif_extra_info *gso;
++ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
++
++ if (netbk_set_skb_gso(skb, gso)) {
++ kfree_skb(skb);
++ netbk_tx_err(netif, &txreq, i);
++ continue;
++ }
++ }
++
++ gnttab_set_map_op(mop, idx_to_kaddr(pending_idx),
++ GNTMAP_host_map | GNTMAP_readonly,
++ txreq.gref, netif->domid);
++ mop++;
++
++ memcpy(&pending_tx_info[pending_idx].req,
++ &txreq, sizeof(txreq));
++ pending_tx_info[pending_idx].netif = netif;
++ *((u16 *)skb->data) = pending_idx;
++
++ __skb_put(skb, data_len);
++
++ skb_shinfo(skb)->nr_frags = ret;
++ if (data_len < txreq.size) {
++ skb_shinfo(skb)->nr_frags++;
++ skb_shinfo(skb)->frags[0].page =
++ (void *)(unsigned long)pending_idx;
++ } else {
++ /* Discriminate from any valid pending_idx value. */
++ skb_shinfo(skb)->frags[0].page = (void *)~0UL;
++ }
++
++ __skb_queue_tail(&tx_queue, skb);
++
++ pending_cons++;
++
++ mop = netbk_get_requests(netif, skb, txfrags, mop);
++
++ netif->tx.req_cons = i;
++ netif_schedule_work(netif);
++
++ if ((mop - tx_map_ops) >= ARRAY_SIZE(tx_map_ops))
++ break;
++ }
++
++ if (mop == tx_map_ops)
++ return;
++
++ ret = HYPERVISOR_grant_table_op(
++ GNTTABOP_map_grant_ref, tx_map_ops, mop - tx_map_ops);
++ BUG_ON(ret);
++
++ mop = tx_map_ops;
++ while ((skb = __skb_dequeue(&tx_queue)) != NULL) {
++ netif_tx_request_t *txp;
++
++ pending_idx = *((u16 *)skb->data);
++ netif = pending_tx_info[pending_idx].netif;
++ txp = &pending_tx_info[pending_idx].req;
++
++ /* Check the remap error code. */
++ if (unlikely(netbk_tx_check_mop(skb, &mop))) {
++ DPRINTK("netback grant failed.\n");
++ skb_shinfo(skb)->nr_frags = 0;
++ kfree_skb(skb);
++ continue;
++ }
++
++ data_len = skb->len;
++ memcpy(skb->data,
++ (void *)(idx_to_kaddr(pending_idx)|txp->offset),
++ data_len);
++ if (data_len < txp->size) {
++ /* Append the packet payload as a fragment. */
++ txp->offset += data_len;
++ txp->size -= data_len;
++ } else {
++ /* Schedule a response immediately. */
++ netif_idx_release(pending_idx);
++ }
++
++ /*
++ * Old frontends do not assert data_validated but we
++ * can infer it from csum_blank so test both flags.
++ */
++ if (txp->flags & (NETTXF_data_validated|NETTXF_csum_blank)) {
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ skb->proto_data_valid = 1;
++ } else {
++ skb->ip_summed = CHECKSUM_NONE;
++ skb->proto_data_valid = 0;
++ }
++ skb->proto_csum_blank = !!(txp->flags & NETTXF_csum_blank);
++
++ netbk_fill_frags(skb);
++
++ skb->dev = netif->dev;
++ skb->protocol = eth_type_trans(skb, skb->dev);
++
++ netif->stats.rx_bytes += skb->len;
++ netif->stats.rx_packets++;
++
++ if (unlikely(netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB) &&
++ unlikely(skb_linearize(skb))) {
++ DPRINTK("Can't linearize skb in net_tx_action.\n");
++ kfree_skb(skb);
++ continue;
++ }
++
++ netif_rx(skb);
++ netif->dev->last_rx = jiffies;
++ }
++
++ if (netbk_copy_skb_mode == NETBK_DELAYED_COPY_SKB &&
++ !list_empty(&pending_inuse_head)) {
++ struct netbk_tx_pending_inuse *oldest;
++
++ oldest = list_entry(pending_inuse_head.next,
++ struct netbk_tx_pending_inuse, list);
++ mod_timer(&netbk_tx_pending_timer, oldest->alloc_time + HZ);
++ }
++}
++
++static void netif_idx_release(u16 pending_idx)
++{
++ static DEFINE_SPINLOCK(_lock);
++ unsigned long flags;
++
++ spin_lock_irqsave(&_lock, flags);
++ dealloc_ring[MASK_PEND_IDX(dealloc_prod)] = pending_idx;
++ /* Sync with net_tx_action_dealloc: insert idx /then/ incr producer. */
++ smp_wmb();
++ dealloc_prod++;
++ spin_unlock_irqrestore(&_lock, flags);
++
++ tasklet_schedule(&net_tx_tasklet);
++}
++
++static void netif_page_release(struct page *page)
++{
++ netif_idx_release(netif_page_index(page));
++}
++
++irqreturn_t netif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++ netif_t *netif = dev_id;
++
++ add_to_net_schedule_list_tail(netif);
++ maybe_schedule_tx_action();
++
++ if (netif_schedulable(netif) && !netbk_queue_full(netif))
++ netif_wake_queue(netif->dev);
++
++ return IRQ_HANDLED;
++}
++
++static void make_tx_response(netif_t *netif,
++ netif_tx_request_t *txp,
++ s8 st)
++{
++ RING_IDX i = netif->tx.rsp_prod_pvt;
++ netif_tx_response_t *resp;
++ int notify;
++
++ resp = RING_GET_RESPONSE(&netif->tx, i);
++ resp->id = txp->id;
++ resp->status = st;
++
++ if (txp->flags & NETTXF_extra_info)
++ RING_GET_RESPONSE(&netif->tx, ++i)->status = NETIF_RSP_NULL;
++
++ netif->tx.rsp_prod_pvt = ++i;
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&netif->tx, notify);
++ if (notify)
++ notify_remote_via_irq(netif->irq);
++
++#ifdef CONFIG_XEN_NETDEV_PIPELINED_TRANSMITTER
++ if (i == netif->tx.req_cons) {
++ int more_to_do;
++ RING_FINAL_CHECK_FOR_REQUESTS(&netif->tx, more_to_do);
++ if (more_to_do)
++ add_to_net_schedule_list_tail(netif);
++ }
++#endif
++}
++
++static netif_rx_response_t *make_rx_response(netif_t *netif,
++ u16 id,
++ s8 st,
++ u16 offset,
++ u16 size,
++ u16 flags)
++{
++ RING_IDX i = netif->rx.rsp_prod_pvt;
++ netif_rx_response_t *resp;
++
++ resp = RING_GET_RESPONSE(&netif->rx, i);
++ resp->offset = offset;
++ resp->flags = flags;
++ resp->id = id;
++ resp->status = (s16)size;
++ if (st < 0)
++ resp->status = (s16)st;
++
++ netif->rx.rsp_prod_pvt = ++i;
++
++ return resp;
++}
++
++#ifdef NETBE_DEBUG_INTERRUPT
++static irqreturn_t netif_be_dbg(int irq, void *dev_id, struct pt_regs *regs)
++{
++ struct list_head *ent;
++ netif_t *netif;
++ int i = 0;
++
++ printk(KERN_ALERT "netif_schedule_list:\n");
++ spin_lock_irq(&net_schedule_list_lock);
++
++ list_for_each (ent, &net_schedule_list) {
++ netif = list_entry(ent, netif_t, list);
++ printk(KERN_ALERT " %d: private(rx_req_cons=%08x "
++ "rx_resp_prod=%08x\n",
++ i, netif->rx.req_cons, netif->rx.rsp_prod_pvt);
++ printk(KERN_ALERT " tx_req_cons=%08x tx_resp_prod=%08x)\n",
++ netif->tx.req_cons, netif->tx.rsp_prod_pvt);
++ printk(KERN_ALERT " shared(rx_req_prod=%08x "
++ "rx_resp_prod=%08x\n",
++ netif->rx.sring->req_prod, netif->rx.sring->rsp_prod);
++ printk(KERN_ALERT " rx_event=%08x tx_req_prod=%08x\n",
++ netif->rx.sring->rsp_event, netif->tx.sring->req_prod);
++ printk(KERN_ALERT " tx_resp_prod=%08x, tx_event=%08x)\n",
++ netif->tx.sring->rsp_prod, netif->tx.sring->rsp_event);
++ i++;
++ }
++
++ spin_unlock_irq(&net_schedule_list_lock);
++ printk(KERN_ALERT " ** End of netif_schedule_list **\n");
++
++ return IRQ_HANDLED;
++}
++#endif
++
++static int __init netback_init(void)
++{
++ int i;
++ struct page *page;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ /* We can increase reservation by this much in net_rx_action(). */
++ balloon_update_driver_allowance(NET_RX_RING_SIZE);
++
++ skb_queue_head_init(&rx_queue);
++ skb_queue_head_init(&tx_queue);
++
++ init_timer(&net_timer);
++ net_timer.data = 0;
++ net_timer.function = net_alarm;
++
++ init_timer(&netbk_tx_pending_timer);
++ netbk_tx_pending_timer.data = 0;
++ netbk_tx_pending_timer.function = netbk_tx_pending_timeout;
++
++ mmap_pages = alloc_empty_pages_and_pagevec(MAX_PENDING_REQS);
++ if (mmap_pages == NULL) {
++ printk("%s: out of memory\n", __FUNCTION__);
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < MAX_PENDING_REQS; i++) {
++ page = mmap_pages[i];
++ SetPageForeign(page, netif_page_release);
++ netif_page_index(page) = i;
++ INIT_LIST_HEAD(&pending_inuse[i].list);
++ }
++
++ pending_cons = 0;
++ pending_prod = MAX_PENDING_REQS;
++ for (i = 0; i < MAX_PENDING_REQS; i++)
++ pending_ring[i] = i;
++
++ spin_lock_init(&net_schedule_list_lock);
++ INIT_LIST_HEAD(&net_schedule_list);
++
++ netbk_copy_skb_mode = NETBK_DONT_COPY_SKB;
++ if (MODPARM_copy_skb) {
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_and_replace,
++ NULL, 0))
++ netbk_copy_skb_mode = NETBK_ALWAYS_COPY_SKB;
++ else
++ netbk_copy_skb_mode = NETBK_DELAYED_COPY_SKB;
++ }
++
++ netif_accel_init();
++
++ netif_xenbus_init();
++
++#ifdef NETBE_DEBUG_INTERRUPT
++ (void)bind_virq_to_irqhandler(VIRQ_DEBUG,
++ 0,
++ netif_be_dbg,
++ SA_SHIRQ,
++ "net-be-dbg",
++ &netif_be_dbg);
++#endif
++
++ return 0;
++}
++
++module_init(netback_init);
++
++MODULE_LICENSE("Dual BSD/GPL");
+Index: head-2008-11-25/drivers/xen/netback/xenbus.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/netback/xenbus.c 2008-09-01 12:07:31.000000000 +0200
+@@ -0,0 +1,454 @@
++/* Xenbus code for netif backend
++ Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
++ Copyright (C) 2005 XenSource Ltd
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++*/
++
++#include <stdarg.h>
++#include <linux/module.h>
++#include <xen/xenbus.h>
++#include "common.h"
++
++#if 0
++#undef DPRINTK
++#define DPRINTK(fmt, args...) \
++ printk("netback/xenbus (%s:%d) " fmt ".\n", __FUNCTION__, __LINE__, ##args)
++#endif
++
++
++static int connect_rings(struct backend_info *);
++static void connect(struct backend_info *);
++static void backend_create_netif(struct backend_info *be);
++
++static int netback_remove(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ netback_remove_accelerators(be, dev);
++
++ if (be->netif) {
++ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
++ netif_disconnect(be->netif);
++ be->netif = NULL;
++ }
++ kfree(be);
++ dev->dev.driver_data = NULL;
++ return 0;
++}
++
++
++/**
++ * Entry point to this code when a new device is created. Allocate the basic
++ * structures and switch to InitWait.
++ */
++static int netback_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ const char *message;
++ struct xenbus_transaction xbt;
++ int err;
++ int sg;
++ struct backend_info *be = kzalloc(sizeof(struct backend_info),
++ GFP_KERNEL);
++ if (!be) {
++ xenbus_dev_fatal(dev, -ENOMEM,
++ "allocating backend structure");
++ return -ENOMEM;
++ }
++
++ be->dev = dev;
++ dev->dev.driver_data = be;
++
++ sg = 1;
++ if (netbk_copy_skb_mode == NETBK_ALWAYS_COPY_SKB)
++ sg = 0;
++
++ do {
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ goto fail;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", sg);
++ if (err) {
++ message = "writing feature-sg";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4",
++ "%d", sg);
++ if (err) {
++ message = "writing feature-gso-tcpv4";
++ goto abort_transaction;
++ }
++
++ /* We support rx-copy path. */
++ err = xenbus_printf(xbt, dev->nodename,
++ "feature-rx-copy", "%d", 1);
++ if (err) {
++ message = "writing feature-rx-copy";
++ goto abort_transaction;
++ }
++
++ /*
++ * We don't support rx-flip path (except old guests who don't
++ * grok this feature flag).
++ */
++ err = xenbus_printf(xbt, dev->nodename,
++ "feature-rx-flip", "%d", 0);
++ if (err) {
++ message = "writing feature-rx-flip";
++ goto abort_transaction;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ } while (err == -EAGAIN);
++
++ if (err) {
++ xenbus_dev_fatal(dev, err, "completing transaction");
++ goto fail;
++ }
++
++ netback_probe_accelerators(be, dev);
++
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err)
++ goto fail;
++
++ /* This kicks hotplug scripts, so do it immediately. */
++ backend_create_netif(be);
++
++ return 0;
++
++abort_transaction:
++ xenbus_transaction_end(xbt, 1);
++ xenbus_dev_fatal(dev, err, "%s", message);
++fail:
++ DPRINTK("failed");
++ netback_remove(dev);
++ return err;
++}
++
++
++/**
++ * Handle the creation of the hotplug script environment. We add the script
++ * and vif variables to the environment, for the benefit of the vif-* hotplug
++ * scripts.
++ */
++static int netback_uevent(struct xenbus_device *xdev, char **envp,
++ int num_envp, char *buffer, int buffer_size)
++{
++ struct backend_info *be = xdev->dev.driver_data;
++ netif_t *netif = be->netif;
++ int i = 0, length = 0;
++ char *val;
++
++ DPRINTK("netback_uevent");
++
++ val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
++ if (IS_ERR(val)) {
++ int err = PTR_ERR(val);
++ xenbus_dev_fatal(xdev, err, "reading script");
++ return err;
++ }
++ else {
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size,
++ &length, "script=%s", val);
++ kfree(val);
++ }
++
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "vif=%s", netif->dev->name);
++
++ envp[i] = NULL;
++
++ return 0;
++}
++
++
++static void backend_create_netif(struct backend_info *be)
++{
++ int err;
++ long handle;
++ struct xenbus_device *dev = be->dev;
++
++ if (be->netif != NULL)
++ return;
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
++ if (err != 1) {
++ xenbus_dev_fatal(dev, err, "reading handle");
++ return;
++ }
++
++ be->netif = netif_alloc(dev->otherend_id, handle);
++ if (IS_ERR(be->netif)) {
++ err = PTR_ERR(be->netif);
++ be->netif = NULL;
++ xenbus_dev_fatal(dev, err, "creating interface");
++ return;
++ }
++
++ kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
++}
++
++
++/**
++ * Callback received when the frontend's state changes.
++ */
++static void frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ DPRINTK("%s", xenbus_strstate(frontend_state));
++
++ be->frontend_state = frontend_state;
++
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ if (dev->state == XenbusStateClosed) {
++ printk(KERN_INFO "%s: %s: prepare for reconnect\n",
++ __FUNCTION__, dev->nodename);
++ xenbus_switch_state(dev, XenbusStateInitWait);
++ }
++ break;
++
++ case XenbusStateInitialised:
++ break;
++
++ case XenbusStateConnected:
++ if (dev->state == XenbusStateConnected)
++ break;
++ backend_create_netif(be);
++ if (be->netif)
++ connect(be);
++ break;
++
++ case XenbusStateClosing:
++ if (be->netif) {
++ kobject_uevent(&dev->dev.kobj, KOBJ_OFFLINE);
++ netif_disconnect(be->netif);
++ be->netif = NULL;
++ }
++ xenbus_switch_state(dev, XenbusStateClosing);
++ break;
++
++ case XenbusStateClosed:
++ xenbus_switch_state(dev, XenbusStateClosed);
++ if (xenbus_dev_is_online(dev))
++ break;
++ /* fall through if not online */
++ case XenbusStateUnknown:
++ device_unregister(&dev->dev);
++ break;
++
++ default:
++ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++ frontend_state);
++ break;
++ }
++}
++
++
++static void xen_net_read_rate(struct xenbus_device *dev,
++ unsigned long *bytes, unsigned long *usec)
++{
++ char *s, *e;
++ unsigned long b, u;
++ char *ratestr;
++
++ /* Default to unlimited bandwidth. */
++ *bytes = ~0UL;
++ *usec = 0;
++
++ ratestr = xenbus_read(XBT_NIL, dev->nodename, "rate", NULL);
++ if (IS_ERR(ratestr))
++ return;
++
++ s = ratestr;
++ b = simple_strtoul(s, &e, 10);
++ if ((s == e) || (*e != ','))
++ goto fail;
++
++ s = e + 1;
++ u = simple_strtoul(s, &e, 10);
++ if ((s == e) || (*e != '\0'))
++ goto fail;
++
++ *bytes = b;
++ *usec = u;
++
++ kfree(ratestr);
++ return;
++
++ fail:
++ WPRINTK("Failed to parse network rate limit. Traffic unlimited.\n");
++ kfree(ratestr);
++}
++
++static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
++{
++ char *s, *e, *macstr;
++ int i;
++
++ macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
++ if (IS_ERR(macstr))
++ return PTR_ERR(macstr);
++
++ for (i = 0; i < ETH_ALEN; i++) {
++ mac[i] = simple_strtoul(s, &e, 16);
++ if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
++ kfree(macstr);
++ return -ENOENT;
++ }
++ s = e+1;
++ }
++
++ kfree(macstr);
++ return 0;
++}
++
++static void connect(struct backend_info *be)
++{
++ int err;
++ struct xenbus_device *dev = be->dev;
++
++ err = connect_rings(be);
++ if (err)
++ return;
++
++ err = xen_net_read_mac(dev, be->netif->fe_dev_addr);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
++ return;
++ }
++
++ xen_net_read_rate(dev, &be->netif->credit_bytes,
++ &be->netif->credit_usec);
++ be->netif->remaining_credit = be->netif->credit_bytes;
++
++ xenbus_switch_state(dev, XenbusStateConnected);
++
++ netif_wake_queue(be->netif->dev);
++}
++
++
++static int connect_rings(struct backend_info *be)
++{
++ struct xenbus_device *dev = be->dev;
++ unsigned long tx_ring_ref, rx_ring_ref;
++ unsigned int evtchn, rx_copy;
++ int err;
++ int val;
++
++ DPRINTK("");
++
++ err = xenbus_gather(XBT_NIL, dev->otherend,
++ "tx-ring-ref", "%lu", &tx_ring_ref,
++ "rx-ring-ref", "%lu", &rx_ring_ref,
++ "event-channel", "%u", &evtchn, NULL);
++ if (err) {
++ xenbus_dev_fatal(dev, err,
++ "reading %s/ring-ref and event-channel",
++ dev->otherend);
++ return err;
++ }
++
++ err = xenbus_scanf(XBT_NIL, dev->otherend, "request-rx-copy", "%u",
++ &rx_copy);
++ if (err == -ENOENT) {
++ err = 0;
++ rx_copy = 0;
++ }
++ if (err < 0) {
++ xenbus_dev_fatal(dev, err, "reading %s/request-rx-copy",
++ dev->otherend);
++ return err;
++ }
++ be->netif->copying_receiver = !!rx_copy;
++
++ if (be->netif->dev->tx_queue_len != 0) {
++ if (xenbus_scanf(XBT_NIL, dev->otherend,
++ "feature-rx-notify", "%d", &val) < 0)
++ val = 0;
++ if (val)
++ be->netif->can_queue = 1;
++ else
++ /* Must be non-zero for pfifo_fast to work. */
++ be->netif->dev->tx_queue_len = 1;
++ }
++
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", "%d", &val) < 0)
++ val = 0;
++ if (val) {
++ be->netif->features |= NETIF_F_SG;
++ be->netif->dev->features |= NETIF_F_SG;
++ }
++
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-gso-tcpv4", "%d",
++ &val) < 0)
++ val = 0;
++ if (val) {
++ be->netif->features |= NETIF_F_TSO;
++ be->netif->dev->features |= NETIF_F_TSO;
++ }
++
++ if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-no-csum-offload",
++ "%d", &val) < 0)
++ val = 0;
++ if (val) {
++ be->netif->features &= ~NETIF_F_IP_CSUM;
++ be->netif->dev->features &= ~NETIF_F_IP_CSUM;
++ }
++
++ /* Map the shared frame, irq etc. */
++ err = netif_map(be->netif, tx_ring_ref, rx_ring_ref, evtchn);
++ if (err) {
++ xenbus_dev_fatal(dev, err,
++ "mapping shared-frames %lu/%lu port %u",
++ tx_ring_ref, rx_ring_ref, evtchn);
++ return err;
++ }
++ return 0;
++}
++
++
++/* ** Driver Registration ** */
++
++
++static const struct xenbus_device_id netback_ids[] = {
++ { "vif" },
++ { "" }
++};
++
++
++static struct xenbus_driver netback = {
++ .name = "vif",
++ .owner = THIS_MODULE,
++ .ids = netback_ids,
++ .probe = netback_probe,
++ .remove = netback_remove,
++ .uevent = netback_uevent,
++ .otherend_changed = frontend_changed,
++};
++
++
++void netif_xenbus_init(void)
++{
++ xenbus_register_backend(&netback);
++}
+Index: head-2008-11-25/drivers/xen/netfront/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/netfront/Makefile 2007-07-12 08:54:23.000000000 +0200
+@@ -0,0 +1,4 @@
++
++obj-$(CONFIG_XEN_NETDEV_FRONTEND) := xennet.o
++
++xennet-objs := netfront.o accel.o
+Index: head-2008-11-25/drivers/xen/netfront/accel.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/netfront/accel.c 2008-08-07 12:44:36.000000000 +0200
+@@ -0,0 +1,824 @@
++/******************************************************************************
++ * Virtual network driver for conversing with remote driver backends.
++ *
++ * Copyright (C) 2007 Solarflare Communications, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/netdevice.h>
++#include <linux/skbuff.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++
++#include "netfront.h"
++
++#define DPRINTK(fmt, args...) \
++ pr_debug("netfront/accel (%s:%d) " fmt, \
++ __FUNCTION__, __LINE__, ##args)
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "netfront/accel: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "netfront/accel: " fmt, ##args)
++
++static int netfront_remove_accelerator(struct netfront_info *np,
++ struct xenbus_device *dev);
++static int netfront_load_accelerator(struct netfront_info *np,
++ struct xenbus_device *dev,
++ const char *frontend);
++
++/*
++ * List of all netfront accelerator plugin modules available. Each
++ * list entry is of type struct netfront_accelerator.
++ */
++static struct list_head accelerators_list;
++
++/* Lock to protect access to accelerators_list */
++static spinlock_t accelerators_lock;
++
++/* Workqueue to process acceleration configuration changes */
++struct workqueue_struct *accel_watch_workqueue;
++
++/* Mutex to prevent concurrent loads and suspends, etc. */
++DEFINE_MUTEX(accelerator_mutex);
++
++void netif_init_accel(void)
++{
++ INIT_LIST_HEAD(&accelerators_list);
++ spin_lock_init(&accelerators_lock);
++
++ accel_watch_workqueue = create_workqueue("net_accel");
++}
++
++void netif_exit_accel(void)
++{
++ struct netfront_accelerator *accelerator, *tmp;
++ unsigned long flags;
++
++ flush_workqueue(accel_watch_workqueue);
++ destroy_workqueue(accel_watch_workqueue);
++
++ spin_lock_irqsave(&accelerators_lock, flags);
++
++ list_for_each_entry_safe(accelerator, tmp, &accelerators_list, link) {
++ BUG_ON(!list_empty(&accelerator->vif_states));
++
++ list_del(&accelerator->link);
++ kfree(accelerator->frontend);
++ kfree(accelerator);
++ }
++
++ spin_unlock_irqrestore(&accelerators_lock, flags);
++}
++
++
++/*
++ * Watch the configured accelerator and change plugin if it's modified
++ */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++static void accel_watch_work(struct work_struct *context)
++#else
++static void accel_watch_work(void *context)
++#endif
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++ struct netfront_accel_vif_state *vif_state =
++ container_of(context, struct netfront_accel_vif_state,
++ accel_work);
++#else
++ struct netfront_accel_vif_state *vif_state =
++ (struct netfront_accel_vif_state *)context;
++#endif
++ struct netfront_info *np = vif_state->np;
++ char *accel_frontend;
++ int accel_len, rc = -1;
++
++ mutex_lock(&accelerator_mutex);
++
++ accel_frontend = xenbus_read(XBT_NIL, np->xbdev->otherend,
++ "accel-frontend", &accel_len);
++ if (IS_ERR(accel_frontend)) {
++ accel_frontend = NULL;
++ netfront_remove_accelerator(np, np->xbdev);
++ } else {
++ /* If this is the first time, request the accelerator,
++ otherwise only request one if it has changed */
++ if (vif_state->accel_frontend == NULL) {
++ rc = netfront_load_accelerator(np, np->xbdev,
++ accel_frontend);
++ } else {
++ if (strncmp(vif_state->accel_frontend, accel_frontend,
++ accel_len)) {
++ netfront_remove_accelerator(np, np->xbdev);
++ rc = netfront_load_accelerator(np, np->xbdev,
++ accel_frontend);
++ }
++ }
++ }
++
++ /* Get rid of previous state and replace with the new name */
++ if (vif_state->accel_frontend != NULL)
++ kfree(vif_state->accel_frontend);
++ vif_state->accel_frontend = accel_frontend;
++
++ mutex_unlock(&accelerator_mutex);
++
++ if (rc == 0) {
++ DPRINTK("requesting module %s\n", accel_frontend);
++ request_module("%s", accel_frontend);
++ /*
++ * Module should now call netfront_accelerator_loaded() once
++ * it's up and running, and we can continue from there
++ */
++ }
++}
++
++
++static void accel_watch_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ struct netfront_accel_vif_state *vif_state =
++ container_of(watch, struct netfront_accel_vif_state,
++ accel_watch);
++ queue_work(accel_watch_workqueue, &vif_state->accel_work);
++}
++
++
++void netfront_accelerator_add_watch(struct netfront_info *np)
++{
++ int err;
++
++ /* Check we're not trying to overwrite an existing watch */
++ BUG_ON(np->accel_vif_state.accel_watch.node != NULL);
++
++ /* Get a watch on the accelerator plugin */
++ err = xenbus_watch_path2(np->xbdev, np->xbdev->otherend,
++ "accel-frontend",
++ &np->accel_vif_state.accel_watch,
++ accel_watch_changed);
++ if (err) {
++ DPRINTK("%s: Failed to register accel watch: %d\n",
++ __FUNCTION__, err);
++ np->accel_vif_state.accel_watch.node = NULL;
++ }
++}
++
++
++static
++void netfront_accelerator_remove_watch(struct netfront_info *np)
++{
++ struct netfront_accel_vif_state *vif_state = &np->accel_vif_state;
++
++ /* Get rid of watch on accelerator plugin */
++ if (vif_state->accel_watch.node != NULL) {
++ unregister_xenbus_watch(&vif_state->accel_watch);
++ kfree(vif_state->accel_watch.node);
++ vif_state->accel_watch.node = NULL;
++
++ flush_workqueue(accel_watch_workqueue);
++
++ /* Clean up any state left from watch */
++ if (vif_state->accel_frontend != NULL) {
++ kfree(vif_state->accel_frontend);
++ vif_state->accel_frontend = NULL;
++ }
++ }
++}
++
++
++/*
++ * Initialise the accel_vif_state field in the netfront state
++ */
++void init_accelerator_vif(struct netfront_info *np,
++ struct xenbus_device *dev)
++{
++ np->accelerator = NULL;
++
++ /* It's assumed that these things don't change */
++ np->accel_vif_state.np = np;
++ np->accel_vif_state.dev = dev;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++ INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work);
++#else
++ INIT_WORK(&np->accel_vif_state.accel_work, accel_watch_work,
++ &np->accel_vif_state);
++#endif
++}
++
++
++/*
++ * Compare a frontend description string against an accelerator to see
++ * if they match. Would ultimately be nice to replace the string with
++ * a unique numeric identifier for each accelerator.
++ */
++static int match_accelerator(const char *frontend,
++ struct netfront_accelerator *accelerator)
++{
++ return strcmp(frontend, accelerator->frontend) == 0;
++}
++
++
++/*
++ * Add a frontend vif to the list of vifs that is using a netfront
++ * accelerator plugin module.
++ */
++static void add_accelerator_vif(struct netfront_accelerator *accelerator,
++ struct netfront_info *np)
++{
++ unsigned long flags;
++
++ /* Need lock to write list */
++ spin_lock_irqsave(&accelerator->vif_states_lock, flags);
++
++ if (np->accelerator == NULL) {
++ np->accelerator = accelerator;
++
++ list_add(&np->accel_vif_state.link, &accelerator->vif_states);
++ } else {
++ /*
++ * May get here legitimately if suspend_cancel is
++ * called, but in that case configuration should not
++ * have changed
++ */
++ BUG_ON(np->accelerator != accelerator);
++ }
++
++ spin_unlock_irqrestore(&accelerator->vif_states_lock, flags);
++}
++
++
++/*
++ * Initialise the state to track an accelerator plugin module.
++ */
++static int init_accelerator(const char *frontend,
++ struct netfront_accelerator **result,
++ struct netfront_accel_hooks *hooks)
++{
++ struct netfront_accelerator *accelerator =
++ kmalloc(sizeof(struct netfront_accelerator), GFP_KERNEL);
++ unsigned long flags;
++ int frontend_len;
++
++ if (!accelerator) {
++ DPRINTK("no memory for accelerator\n");
++ return -ENOMEM;
++ }
++
++ frontend_len = strlen(frontend) + 1;
++ accelerator->frontend = kmalloc(frontend_len, GFP_KERNEL);
++ if (!accelerator->frontend) {
++ DPRINTK("no memory for accelerator\n");
++ kfree(accelerator);
++ return -ENOMEM;
++ }
++ strlcpy(accelerator->frontend, frontend, frontend_len);
++
++ INIT_LIST_HEAD(&accelerator->vif_states);
++ spin_lock_init(&accelerator->vif_states_lock);
++
++ accelerator->hooks = hooks;
++
++ spin_lock_irqsave(&accelerators_lock, flags);
++ list_add(&accelerator->link, &accelerators_list);
++ spin_unlock_irqrestore(&accelerators_lock, flags);
++
++ *result = accelerator;
++
++ return 0;
++}
++
++
++/*
++ * Modify the hooks stored in the per-vif state to match that in the
++ * netfront accelerator's state.
++ */
++static void
++accelerator_set_vif_state_hooks(struct netfront_accel_vif_state *vif_state)
++{
++ /* This function must be called with the vif_states_lock held */
++
++ DPRINTK("%p\n",vif_state);
++
++ /* Make sure there are no data path operations going on */
++ netif_poll_disable(vif_state->np->netdev);
++ netif_tx_lock_bh(vif_state->np->netdev);
++
++ vif_state->hooks = vif_state->np->accelerator->hooks;
++
++ netif_tx_unlock_bh(vif_state->np->netdev);
++ netif_poll_enable(vif_state->np->netdev);
++}
++
++
++static void accelerator_probe_new_vif(struct netfront_info *np,
++ struct xenbus_device *dev,
++ struct netfront_accelerator *accelerator)
++{
++ struct netfront_accel_hooks *hooks;
++ unsigned long flags;
++
++ DPRINTK("\n");
++
++ /* Include this frontend device on the accelerator's list */
++ add_accelerator_vif(accelerator, np);
++
++ hooks = accelerator->hooks;
++
++ if (hooks) {
++ if (hooks->new_device(np->netdev, dev) == 0) {
++ spin_lock_irqsave
++ (&accelerator->vif_states_lock, flags);
++
++ accelerator_set_vif_state_hooks(&np->accel_vif_state);
++
++ spin_unlock_irqrestore
++ (&accelerator->vif_states_lock, flags);
++ }
++ }
++
++ return;
++}
++
++
++/*
++ * Request that a particular netfront accelerator plugin is loaded.
++ * Usually called as a result of the vif configuration specifying
++ * which one to use. Must be called with accelerator_mutex held
++ */
++static int netfront_load_accelerator(struct netfront_info *np,
++ struct xenbus_device *dev,
++ const char *frontend)
++{
++ struct netfront_accelerator *accelerator;
++ int rc = 0;
++
++ DPRINTK(" %s\n", frontend);
++
++ /*
++ * Look at list of loaded accelerators to see if the requested
++ * one is already there
++ */
++ list_for_each_entry(accelerator, &accelerators_list, link) {
++ if (match_accelerator(frontend, accelerator)) {
++ accelerator_probe_new_vif(np, dev, accelerator);
++ return 0;
++ }
++ }
++
++ /* Couldn't find it, so create a new one and load the module */
++ if ((rc = init_accelerator(frontend, &accelerator, NULL)) < 0) {
++ return rc;
++ }
++
++ /* Include this frontend device on the accelerator's list */
++ add_accelerator_vif(accelerator, np);
++
++ return rc;
++}
++
++
++/*
++ * Go through all the netfront vifs and see if they have requested
++ * this accelerator. Notify the accelerator plugin of the relevant
++ * device if so. Called when an accelerator plugin module is first
++ * loaded and connects to netfront.
++ */
++static void
++accelerator_probe_vifs(struct netfront_accelerator *accelerator,
++ struct netfront_accel_hooks *hooks)
++{
++ struct netfront_accel_vif_state *vif_state, *tmp;
++ unsigned long flags;
++
++ DPRINTK("%p\n", accelerator);
++
++ /*
++ * Store the hooks for future calls to probe a new device, and
++ * to wire into the vif_state once the accelerator plugin is
++ * ready to accelerate each vif
++ */
++ BUG_ON(hooks == NULL);
++ accelerator->hooks = hooks;
++
++ /*
++ * currently hold accelerator_mutex, so don't need
++ * vif_states_lock to read the list
++ */
++ list_for_each_entry_safe(vif_state, tmp, &accelerator->vif_states,
++ link) {
++ struct netfront_info *np = vif_state->np;
++
++ if (hooks->new_device(np->netdev, vif_state->dev) == 0) {
++ spin_lock_irqsave
++ (&accelerator->vif_states_lock, flags);
++
++ accelerator_set_vif_state_hooks(vif_state);
++
++ spin_unlock_irqrestore
++ (&accelerator->vif_states_lock, flags);
++ }
++ }
++}
++
++
++/*
++ * Called by the netfront accelerator plugin module when it has loaded
++ */
++int netfront_accelerator_loaded(int version, const char *frontend,
++ struct netfront_accel_hooks *hooks)
++{
++ struct netfront_accelerator *accelerator;
++
++ if (is_initial_xendomain())
++ return -EINVAL;
++
++ if (version != NETFRONT_ACCEL_VERSION) {
++ if (version > NETFRONT_ACCEL_VERSION) {
++ /* Caller has higher version number, leave it
++ up to them to decide whether to continue.
++ They can re-call with a lower number if
++ they're happy to be compatible with us */
++ return NETFRONT_ACCEL_VERSION;
++ } else {
++ /* We have a more recent version than caller.
++ Currently reject, but may in future be able
++ to be backwardly compatible */
++ return -EPROTO;
++ }
++ }
++
++ mutex_lock(&accelerator_mutex);
++
++ /*
++ * Look through list of accelerators to see if it has already
++ * been requested
++ */
++ list_for_each_entry(accelerator, &accelerators_list, link) {
++ if (match_accelerator(frontend, accelerator)) {
++ accelerator_probe_vifs(accelerator, hooks);
++ goto out;
++ }
++ }
++
++ /*
++ * If it wasn't in the list, add it now so that when it is
++ * requested the caller will find it
++ */
++ DPRINTK("Couldn't find matching accelerator (%s)\n",
++ frontend);
++
++ init_accelerator(frontend, &accelerator, hooks);
++
++ out:
++ mutex_unlock(&accelerator_mutex);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(netfront_accelerator_loaded);
++
++
++/*
++ * Remove the hooks from a single vif state.
++ */
++static void
++accelerator_remove_single_hook(struct netfront_accelerator *accelerator,
++ struct netfront_accel_vif_state *vif_state)
++{
++ /* Make sure there are no data path operations going on */
++ netif_poll_disable(vif_state->np->netdev);
++ netif_tx_lock_bh(vif_state->np->netdev);
++
++ /*
++ * Remove the hooks, but leave the vif_state on the
++ * accelerator's list as that signifies this vif is
++ * interested in using that accelerator if it becomes
++ * available again
++ */
++ vif_state->hooks = NULL;
++
++ netif_tx_unlock_bh(vif_state->np->netdev);
++ netif_poll_enable(vif_state->np->netdev);
++}
++
++
++/*
++ * Safely remove the accelerator function hooks from a netfront state.
++ */
++static void accelerator_remove_hooks(struct netfront_accelerator *accelerator)
++{
++ struct netfront_accel_hooks *hooks;
++ struct netfront_accel_vif_state *vif_state, *tmp;
++ unsigned long flags;
++
++ /* Mutex is held so don't need vif_states_lock to iterate list */
++ list_for_each_entry_safe(vif_state, tmp,
++ &accelerator->vif_states,
++ link) {
++ spin_lock_irqsave(&accelerator->vif_states_lock, flags);
++
++ if(vif_state->hooks) {
++ hooks = vif_state->hooks;
++
++ /* Last chance to get statistics from the accelerator */
++ hooks->get_stats(vif_state->np->netdev,
++ &vif_state->np->stats);
++
++ spin_unlock_irqrestore(&accelerator->vif_states_lock,
++ flags);
++
++ accelerator_remove_single_hook(accelerator, vif_state);
++
++ accelerator->hooks->remove(vif_state->dev);
++ } else {
++ spin_unlock_irqrestore(&accelerator->vif_states_lock,
++ flags);
++ }
++ }
++
++ accelerator->hooks = NULL;
++}
++
++
++/*
++ * Called by a netfront accelerator when it is unloaded. This safely
++ * removes the hooks into the plugin and blocks until all devices have
++ * finished using it, so on return it is safe to unload.
++ */
++void netfront_accelerator_stop(const char *frontend)
++{
++ struct netfront_accelerator *accelerator;
++ unsigned long flags;
++
++ mutex_lock(&accelerator_mutex);
++ spin_lock_irqsave(&accelerators_lock, flags);
++
++ list_for_each_entry(accelerator, &accelerators_list, link) {
++ if (match_accelerator(frontend, accelerator)) {
++ spin_unlock_irqrestore(&accelerators_lock, flags);
++
++ accelerator_remove_hooks(accelerator);
++
++ goto out;
++ }
++ }
++ spin_unlock_irqrestore(&accelerators_lock, flags);
++ out:
++ mutex_unlock(&accelerator_mutex);
++}
++EXPORT_SYMBOL_GPL(netfront_accelerator_stop);
++
++
++/* Helper for call_remove and do_suspend */
++static int do_remove(struct netfront_info *np, struct xenbus_device *dev,
++ unsigned long *lock_flags)
++{
++ struct netfront_accelerator *accelerator = np->accelerator;
++ struct netfront_accel_hooks *hooks;
++ int rc = 0;
++
++ if (np->accel_vif_state.hooks) {
++ hooks = np->accel_vif_state.hooks;
++
++ /* Last chance to get statistics from the accelerator */
++ hooks->get_stats(np->netdev, &np->stats);
++
++ spin_unlock_irqrestore(&accelerator->vif_states_lock,
++ *lock_flags);
++
++ /*
++ * Try and do the opposite of accelerator_probe_new_vif
++ * to ensure there's no state pointing back at the
++ * netdev
++ */
++ accelerator_remove_single_hook(accelerator,
++ &np->accel_vif_state);
++
++ rc = accelerator->hooks->remove(dev);
++
++ spin_lock_irqsave(&accelerator->vif_states_lock, *lock_flags);
++ }
++
++ return rc;
++}
++
++
++static int netfront_remove_accelerator(struct netfront_info *np,
++ struct xenbus_device *dev)
++{
++ struct netfront_accelerator *accelerator;
++ struct netfront_accel_vif_state *tmp_vif_state;
++ unsigned long flags;
++ int rc = 0;
++
++ /* Check that we've got a device that was accelerated */
++ if (np->accelerator == NULL)
++ return rc;
++
++ accelerator = np->accelerator;
++
++ spin_lock_irqsave(&accelerator->vif_states_lock, flags);
++
++ list_for_each_entry(tmp_vif_state, &accelerator->vif_states,
++ link) {
++ if (tmp_vif_state == &np->accel_vif_state) {
++ list_del(&np->accel_vif_state.link);
++ break;
++ }
++ }
++
++ rc = do_remove(np, dev, &flags);
++
++ np->accelerator = NULL;
++
++ spin_unlock_irqrestore(&accelerator->vif_states_lock, flags);
++
++ return rc;
++}
++
++
++int netfront_accelerator_call_remove(struct netfront_info *np,
++ struct xenbus_device *dev)
++{
++ int rc;
++ netfront_accelerator_remove_watch(np);
++ mutex_lock(&accelerator_mutex);
++ rc = netfront_remove_accelerator(np, dev);
++ mutex_unlock(&accelerator_mutex);
++ return rc;
++}
++
++
++int netfront_accelerator_suspend(struct netfront_info *np,
++ struct xenbus_device *dev)
++{
++ unsigned long flags;
++ int rc = 0;
++
++ netfront_accelerator_remove_watch(np);
++
++ mutex_lock(&accelerator_mutex);
++
++ /* Check that we've got a device that was accelerated */
++ if (np->accelerator == NULL)
++ goto out;
++
++ /*
++ * Call the remove accelerator hook, but leave the vif_state
++ * on the accelerator's list in case there is a suspend_cancel.
++ */
++ spin_lock_irqsave(&np->accelerator->vif_states_lock, flags);
++
++ rc = do_remove(np, dev, &flags);
++
++ spin_unlock_irqrestore(&np->accelerator->vif_states_lock, flags);
++ out:
++ mutex_unlock(&accelerator_mutex);
++ return rc;
++}
++
++
++int netfront_accelerator_suspend_cancel(struct netfront_info *np,
++ struct xenbus_device *dev)
++{
++ /*
++ * Setting the watch will cause it to fire and probe the
++ * accelerator, so no need to call accelerator_probe_new_vif()
++ * directly here
++ */
++ if (dev->state == XenbusStateConnected)
++ netfront_accelerator_add_watch(np);
++ return 0;
++}
++
++
++void netfront_accelerator_resume(struct netfront_info *np,
++ struct xenbus_device *dev)
++{
++ struct netfront_accel_vif_state *accel_vif_state = NULL;
++ spinlock_t *vif_states_lock;
++ unsigned long flags;
++
++ mutex_lock(&accelerator_mutex);
++
++ /* Check that we've got a device that was accelerated */
++ if(np->accelerator == NULL)
++ goto out;
++
++ /* Find the vif_state from the accelerator's list */
++ list_for_each_entry(accel_vif_state, &np->accelerator->vif_states,
++ link) {
++ if (accel_vif_state->dev == dev) {
++ BUG_ON(accel_vif_state != &np->accel_vif_state);
++
++ vif_states_lock = &np->accelerator->vif_states_lock;
++ spin_lock_irqsave(vif_states_lock, flags);
++
++ /*
++ * Remove it from the accelerator's list so
++ * state is consistent for probing new vifs
++ * when they get connected
++ */
++ list_del(&accel_vif_state->link);
++ np->accelerator = NULL;
++
++ spin_unlock_irqrestore(vif_states_lock, flags);
++
++ break;
++ }
++ }
++
++ out:
++ mutex_unlock(&accelerator_mutex);
++ return;
++}
++
++
++int netfront_check_accelerator_queue_ready(struct net_device *dev,
++ struct netfront_info *np)
++{
++ struct netfront_accelerator *accelerator;
++ struct netfront_accel_hooks *hooks;
++ int rc = 1;
++ unsigned long flags;
++
++ accelerator = np->accelerator;
++
++ /* Call the check_ready accelerator hook. */
++ if (np->accel_vif_state.hooks && accelerator) {
++ spin_lock_irqsave(&accelerator->vif_states_lock, flags);
++ hooks = np->accel_vif_state.hooks;
++ if (hooks && np->accelerator == accelerator)
++ rc = np->accel_vif_state.hooks->check_ready(dev);
++ spin_unlock_irqrestore(&accelerator->vif_states_lock, flags);
++ }
++
++ return rc;
++}
++
++
++void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np,
++ struct net_device *dev)
++{
++ struct netfront_accelerator *accelerator;
++ struct netfront_accel_hooks *hooks;
++ unsigned long flags;
++
++ accelerator = np->accelerator;
++
++ /* Call the stop_napi_interrupts accelerator hook. */
++ if (np->accel_vif_state.hooks && accelerator != NULL) {
++ spin_lock_irqsave(&accelerator->vif_states_lock, flags);
++ hooks = np->accel_vif_state.hooks;
++ if (hooks && np->accelerator == accelerator)
++ np->accel_vif_state.hooks->stop_napi_irq(dev);
++ spin_unlock_irqrestore(&accelerator->vif_states_lock, flags);
++ }
++}
++
++
++int netfront_accelerator_call_get_stats(struct netfront_info *np,
++ struct net_device *dev)
++{
++ struct netfront_accelerator *accelerator;
++ struct netfront_accel_hooks *hooks;
++ unsigned long flags;
++ int rc = 0;
++
++ accelerator = np->accelerator;
++
++ /* Call the get_stats accelerator hook. */
++ if (np->accel_vif_state.hooks && accelerator != NULL) {
++ spin_lock_irqsave(&accelerator->vif_states_lock, flags);
++ hooks = np->accel_vif_state.hooks;
++ if (hooks && np->accelerator == accelerator)
++ rc = np->accel_vif_state.hooks->get_stats(dev,
++ &np->stats);
++ spin_unlock_irqrestore(&accelerator->vif_states_lock, flags);
++ }
++ return rc;
++}
++
+Index: head-2008-11-25/drivers/xen/netfront/netfront.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/netfront/netfront.c 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,2240 @@
++/******************************************************************************
++ * Virtual network driver for conversing with remote driver backends.
++ *
++ * Copyright (c) 2002-2005, K A Fraser
++ * Copyright (c) 2005, XenSource Ltd
++ * Copyright (C) 2007 Solarflare Communications, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/netdevice.h>
++#include <linux/inetdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/skbuff.h>
++#include <linux/init.h>
++#include <linux/bitops.h>
++#include <linux/ethtool.h>
++#include <linux/in.h>
++#include <linux/if_ether.h>
++#include <linux/io.h>
++#include <linux/moduleparam.h>
++#include <net/sock.h>
++#include <net/pkt_sched.h>
++#include <net/arp.h>
++#include <net/route.h>
++#include <asm/uaccess.h>
++#include <xen/evtchn.h>
++#include <xen/xenbus.h>
++#include <xen/interface/io/netif.h>
++#include <xen/interface/memory.h>
++#include <xen/balloon.h>
++#include <asm/page.h>
++#include <asm/maddr.h>
++#include <asm/uaccess.h>
++#include <xen/interface/grant_table.h>
++#include <xen/gnttab.h>
++
++struct netfront_cb {
++ struct page *page;
++ unsigned offset;
++};
++
++#define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
++
++#include "netfront.h"
++
++/*
++ * Mutually-exclusive module options to select receive data path:
++ * rx_copy : Packets are copied by network backend into local memory
++ * rx_flip : Page containing packet data is transferred to our ownership
++ * For fully-virtualised guests there is no option - copying must be used.
++ * For paravirtualised guests, flipping is the default.
++ */
++#ifdef CONFIG_XEN
++static int MODPARM_rx_copy = 0;
++module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
++MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
++static int MODPARM_rx_flip = 0;
++module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
++MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
++#else
++static const int MODPARM_rx_copy = 1;
++static const int MODPARM_rx_flip = 0;
++#endif
++
++#define RX_COPY_THRESHOLD 256
++
++/* If we don't have GSO, fake things up so that we never try to use it. */
++#if defined(NETIF_F_GSO)
++#define HAVE_GSO 1
++#define HAVE_TSO 1 /* TSO is a subset of GSO */
++#define HAVE_CSUM_OFFLOAD 1
++static inline void dev_disable_gso_features(struct net_device *dev)
++{
++ /* Turn off all GSO bits except ROBUST. */
++ dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1;
++ dev->features |= NETIF_F_GSO_ROBUST;
++}
++#elif defined(NETIF_F_TSO)
++#define HAVE_GSO 0
++#define HAVE_TSO 1
++
++/* Some older kernels cannot cope with incorrect checksums,
++ * particularly in netfilter. I'm not sure there is 100% correlation
++ * with the presence of NETIF_F_TSO but it appears to be a good first
++ * approximiation.
++ */
++#define HAVE_CSUM_OFFLOAD 0
++
++#define gso_size tso_size
++#define gso_segs tso_segs
++static inline void dev_disable_gso_features(struct net_device *dev)
++{
++ /* Turn off all TSO bits. */
++ dev->features &= ~NETIF_F_TSO;
++}
++static inline int skb_is_gso(const struct sk_buff *skb)
++{
++ return skb_shinfo(skb)->tso_size;
++}
++static inline int skb_gso_ok(struct sk_buff *skb, int features)
++{
++ return (features & NETIF_F_TSO);
++}
++
++static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
++{
++ return skb_is_gso(skb) &&
++ (!skb_gso_ok(skb, dev->features) ||
++ unlikely(skb->ip_summed != CHECKSUM_HW));
++}
++#else
++#define HAVE_GSO 0
++#define HAVE_TSO 0
++#define HAVE_CSUM_OFFLOAD 0
++#define netif_needs_gso(dev, skb) 0
++#define dev_disable_gso_features(dev) ((void)0)
++#define ethtool_op_set_tso(dev, data) (-ENOSYS)
++#endif
++
++#define GRANT_INVALID_REF 0
++
++struct netfront_rx_info {
++ struct netif_rx_response rx;
++ struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
++};
++
++/*
++ * Implement our own carrier flag: the network stack's version causes delays
++ * when the carrier is re-enabled (in particular, dev_activate() may not
++ * immediately be called, which can cause packet loss).
++ */
++#define netfront_carrier_on(netif) ((netif)->carrier = 1)
++#define netfront_carrier_off(netif) ((netif)->carrier = 0)
++#define netfront_carrier_ok(netif) ((netif)->carrier)
++
++/*
++ * Access macros for acquiring freeing slots in tx_skbs[].
++ */
++
++static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
++{
++ list[id] = list[0];
++ list[0] = (void *)(unsigned long)id;
++}
++
++static inline unsigned short get_id_from_freelist(struct sk_buff **list)
++{
++ unsigned int id = (unsigned int)(unsigned long)list[0];
++ list[0] = list[id];
++ return id;
++}
++
++static inline int xennet_rxidx(RING_IDX idx)
++{
++ return idx & (NET_RX_RING_SIZE - 1);
++}
++
++static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
++ RING_IDX ri)
++{
++ int i = xennet_rxidx(ri);
++ struct sk_buff *skb = np->rx_skbs[i];
++ np->rx_skbs[i] = NULL;
++ return skb;
++}
++
++static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
++ RING_IDX ri)
++{
++ int i = xennet_rxidx(ri);
++ grant_ref_t ref = np->grant_rx_ref[i];
++ np->grant_rx_ref[i] = GRANT_INVALID_REF;
++ return ref;
++}
++
++#define DPRINTK(fmt, args...) \
++ pr_debug("netfront (%s:%d) " fmt, \
++ __FUNCTION__, __LINE__, ##args)
++#define IPRINTK(fmt, args...) \
++ printk(KERN_INFO "netfront: " fmt, ##args)
++#define WPRINTK(fmt, args...) \
++ printk(KERN_WARNING "netfront: " fmt, ##args)
++
++static int setup_device(struct xenbus_device *, struct netfront_info *);
++static struct net_device *create_netdev(struct xenbus_device *);
++
++static void end_access(int, void *);
++static void netif_disconnect_backend(struct netfront_info *);
++
++static int network_connect(struct net_device *);
++static void network_tx_buf_gc(struct net_device *);
++static void network_alloc_rx_buffers(struct net_device *);
++static void send_fake_arp(struct net_device *);
++
++static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs);
++
++#ifdef CONFIG_SYSFS
++static int xennet_sysfs_addif(struct net_device *netdev);
++static void xennet_sysfs_delif(struct net_device *netdev);
++#else /* !CONFIG_SYSFS */
++#define xennet_sysfs_addif(dev) (0)
++#define xennet_sysfs_delif(dev) do { } while(0)
++#endif
++
++static inline int xennet_can_sg(struct net_device *dev)
++{
++ return dev->features & NETIF_F_SG;
++}
++
++/**
++ * Entry point to this code when a new device is created. Allocate the basic
++ * structures and the ring buffers for communication with the backend, and
++ * inform the backend of the appropriate details for those.
++ */
++static int __devinit netfront_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++ struct net_device *netdev;
++ struct netfront_info *info;
++
++ netdev = create_netdev(dev);
++ if (IS_ERR(netdev)) {
++ err = PTR_ERR(netdev);
++ xenbus_dev_fatal(dev, err, "creating netdev");
++ return err;
++ }
++
++ info = netdev_priv(netdev);
++ dev->dev.driver_data = info;
++
++ err = register_netdev(info->netdev);
++ if (err) {
++ printk(KERN_WARNING "%s: register_netdev err=%d\n",
++ __FUNCTION__, err);
++ goto fail;
++ }
++
++ err = xennet_sysfs_addif(info->netdev);
++ if (err) {
++ unregister_netdev(info->netdev);
++ printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
++ __FUNCTION__, err);
++ goto fail;
++ }
++
++ return 0;
++
++ fail:
++ free_netdev(netdev);
++ dev->dev.driver_data = NULL;
++ return err;
++}
++
++static int __devexit netfront_remove(struct xenbus_device *dev)
++{
++ struct netfront_info *info = dev->dev.driver_data;
++
++ DPRINTK("%s\n", dev->nodename);
++
++ netfront_accelerator_call_remove(info, dev);
++
++ netif_disconnect_backend(info);
++
++ del_timer_sync(&info->rx_refill_timer);
++
++ xennet_sysfs_delif(info->netdev);
++
++ unregister_netdev(info->netdev);
++
++ free_netdev(info->netdev);
++
++ return 0;
++}
++
++
++static int netfront_suspend(struct xenbus_device *dev)
++{
++ struct netfront_info *info = dev->dev.driver_data;
++ return netfront_accelerator_suspend(info, dev);
++}
++
++
++static int netfront_suspend_cancel(struct xenbus_device *dev)
++{
++ struct netfront_info *info = dev->dev.driver_data;
++ return netfront_accelerator_suspend_cancel(info, dev);
++}
++
++
++/**
++ * We are reconnecting to the backend, due to a suspend/resume, or a backend
++ * driver restart. We tear down our netif structure and recreate it, but
++ * leave the device-layer structures intact so that this is transparent to the
++ * rest of the kernel.
++ */
++static int netfront_resume(struct xenbus_device *dev)
++{
++ struct netfront_info *info = dev->dev.driver_data;
++
++ DPRINTK("%s\n", dev->nodename);
++
++ netfront_accelerator_resume(info, dev);
++
++ netif_disconnect_backend(info);
++ return 0;
++}
++
++static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
++{
++ char *s, *e, *macstr;
++ int i;
++
++ macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
++ if (IS_ERR(macstr))
++ return PTR_ERR(macstr);
++
++ for (i = 0; i < ETH_ALEN; i++) {
++ mac[i] = simple_strtoul(s, &e, 16);
++ if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
++ kfree(macstr);
++ return -ENOENT;
++ }
++ s = e+1;
++ }
++
++ kfree(macstr);
++ return 0;
++}
++
++/* Common code used when first setting up, and when resuming. */
++static int talk_to_backend(struct xenbus_device *dev,
++ struct netfront_info *info)
++{
++ const char *message;
++ struct xenbus_transaction xbt;
++ int err;
++
++ /* Read mac only in the first setup. */
++ if (!is_valid_ether_addr(info->mac)) {
++ err = xen_net_read_mac(dev, info->mac);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "parsing %s/mac",
++ dev->nodename);
++ goto out;
++ }
++ }
++
++ /* Create shared ring, alloc event channel. */
++ err = setup_device(dev, info);
++ if (err)
++ goto out;
++
++ /* This will load an accelerator if one is configured when the
++ * watch fires */
++ netfront_accelerator_add_watch(info);
++
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ goto destroy_ring;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
++ info->tx_ring_ref);
++ if (err) {
++ message = "writing tx ring-ref";
++ goto abort_transaction;
++ }
++ err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
++ info->rx_ring_ref);
++ if (err) {
++ message = "writing rx ring-ref";
++ goto abort_transaction;
++ }
++ err = xenbus_printf(xbt, dev->nodename,
++ "event-channel", "%u",
++ irq_to_evtchn_port(info->irq));
++ if (err) {
++ message = "writing event-channel";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
++ info->copying_receiver);
++ if (err) {
++ message = "writing request-rx-copy";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
++ if (err) {
++ message = "writing feature-rx-notify";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload",
++ "%d", !HAVE_CSUM_OFFLOAD);
++ if (err) {
++ message = "writing feature-no-csum-offload";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
++ if (err) {
++ message = "writing feature-sg";
++ goto abort_transaction;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d",
++ HAVE_TSO);
++ if (err) {
++ message = "writing feature-gso-tcpv4";
++ goto abort_transaction;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err) {
++ if (err == -EAGAIN)
++ goto again;
++ xenbus_dev_fatal(dev, err, "completing transaction");
++ goto destroy_ring;
++ }
++
++ return 0;
++
++ abort_transaction:
++ xenbus_transaction_end(xbt, 1);
++ xenbus_dev_fatal(dev, err, "%s", message);
++ destroy_ring:
++ netfront_accelerator_call_remove(info, dev);
++ netif_disconnect_backend(info);
++ out:
++ return err;
++}
++
++static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
++{
++ struct netif_tx_sring *txs;
++ struct netif_rx_sring *rxs;
++ int err;
++ struct net_device *netdev = info->netdev;
++
++ info->tx_ring_ref = GRANT_INVALID_REF;
++ info->rx_ring_ref = GRANT_INVALID_REF;
++ info->rx.sring = NULL;
++ info->tx.sring = NULL;
++ info->irq = 0;
++
++ txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
++ if (!txs) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(dev, err, "allocating tx ring page");
++ goto fail;
++ }
++ SHARED_RING_INIT(txs);
++ FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
++
++ err = xenbus_grant_ring(dev, virt_to_mfn(txs));
++ if (err < 0) {
++ free_page((unsigned long)txs);
++ goto fail;
++ }
++ info->tx_ring_ref = err;
++
++ rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
++ if (!rxs) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(dev, err, "allocating rx ring page");
++ goto fail;
++ }
++ SHARED_RING_INIT(rxs);
++ FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
++
++ err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
++ if (err < 0) {
++ free_page((unsigned long)rxs);
++ goto fail;
++ }
++ info->rx_ring_ref = err;
++
++ memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
++
++ err = bind_listening_port_to_irqhandler(
++ dev->otherend_id, netif_int, SA_SAMPLE_RANDOM, netdev->name,
++ netdev);
++ if (err < 0)
++ goto fail;
++ info->irq = err;
++
++ return 0;
++
++ fail:
++ return err;
++}
++
++/**
++ * Callback received when the backend's state changes.
++ */
++static void backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct netfront_info *np = dev->dev.driver_data;
++ struct net_device *netdev = np->netdev;
++
++ DPRINTK("%s\n", xenbus_strstate(backend_state));
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitialised:
++ case XenbusStateConnected:
++ case XenbusStateReconfiguring:
++ case XenbusStateReconfigured:
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ break;
++
++ case XenbusStateInitWait:
++ if (dev->state != XenbusStateInitialising)
++ break;
++ if (network_connect(netdev) != 0)
++ break;
++ xenbus_switch_state(dev, XenbusStateConnected);
++ send_fake_arp(netdev);
++ break;
++
++ case XenbusStateClosing:
++ xenbus_frontend_closed(dev);
++ break;
++ }
++}
++
++/** Send a packet on a net device to encourage switches to learn the
++ * MAC. We send a fake ARP request.
++ *
++ * @param dev device
++ * @return 0 on success, error code otherwise
++ */
++static void send_fake_arp(struct net_device *dev)
++{
++#ifdef CONFIG_INET
++ struct sk_buff *skb;
++ u32 src_ip, dst_ip;
++
++ dst_ip = INADDR_BROADCAST;
++ src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
++
++ /* No IP? Then nothing to do. */
++ if (src_ip == 0)
++ return;
++
++ skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
++ dst_ip, dev, src_ip,
++ /*dst_hw*/ NULL, /*src_hw*/ NULL,
++ /*target_hw*/ dev->dev_addr);
++ if (skb == NULL)
++ return;
++
++ dev_queue_xmit(skb);
++#endif
++}
++
++static inline int netfront_tx_slot_available(struct netfront_info *np)
++{
++ return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
++ (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
++}
++
++
++static inline void network_maybe_wake_tx(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++
++ if (unlikely(netif_queue_stopped(dev)) &&
++ netfront_tx_slot_available(np) &&
++ likely(netif_running(dev)) &&
++ netfront_check_accelerator_queue_ready(dev, np))
++ netif_wake_queue(dev);
++}
++
++
++int netfront_check_queue_ready(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++
++ return unlikely(netif_queue_stopped(dev)) &&
++ netfront_tx_slot_available(np) &&
++ likely(netif_running(dev));
++}
++EXPORT_SYMBOL(netfront_check_queue_ready);
++
++
++static int network_open(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++
++ memset(&np->stats, 0, sizeof(np->stats));
++
++ spin_lock_bh(&np->rx_lock);
++ if (netfront_carrier_ok(np)) {
++ network_alloc_rx_buffers(dev);
++ np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
++ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){
++ netfront_accelerator_call_stop_napi_irq(np, dev);
++
++ netif_rx_schedule(dev);
++ }
++ }
++ spin_unlock_bh(&np->rx_lock);
++
++ network_maybe_wake_tx(dev);
++
++ return 0;
++}
++
++static void network_tx_buf_gc(struct net_device *dev)
++{
++ RING_IDX cons, prod;
++ unsigned short id;
++ struct netfront_info *np = netdev_priv(dev);
++ struct sk_buff *skb;
++
++ BUG_ON(!netfront_carrier_ok(np));
++
++ do {
++ prod = np->tx.sring->rsp_prod;
++ rmb(); /* Ensure we see responses up to 'rp'. */
++
++ for (cons = np->tx.rsp_cons; cons != prod; cons++) {
++ struct netif_tx_response *txrsp;
++
++ txrsp = RING_GET_RESPONSE(&np->tx, cons);
++ if (txrsp->status == NETIF_RSP_NULL)
++ continue;
++
++ id = txrsp->id;
++ skb = np->tx_skbs[id];
++ if (unlikely(gnttab_query_foreign_access(
++ np->grant_tx_ref[id]) != 0)) {
++ printk(KERN_ALERT "network_tx_buf_gc: warning "
++ "-- grant still in use by backend "
++ "domain.\n");
++ BUG();
++ }
++ gnttab_end_foreign_access_ref(np->grant_tx_ref[id]);
++ gnttab_release_grant_reference(
++ &np->gref_tx_head, np->grant_tx_ref[id]);
++ np->grant_tx_ref[id] = GRANT_INVALID_REF;
++ add_id_to_freelist(np->tx_skbs, id);
++ dev_kfree_skb_irq(skb);
++ }
++
++ np->tx.rsp_cons = prod;
++
++ /*
++ * Set a new event, then check for race with update of tx_cons.
++ * Note that it is essential to schedule a callback, no matter
++ * how few buffers are pending. Even if there is space in the
++ * transmit ring, higher layers may be blocked because too much
++ * data is outstanding: in such cases notification from Xen is
++ * likely to be the only kick that we'll get.
++ */
++ np->tx.sring->rsp_event =
++ prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
++ mb();
++ } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
++
++ network_maybe_wake_tx(dev);
++}
++
++static void rx_refill_timeout(unsigned long data)
++{
++ struct net_device *dev = (struct net_device *)data;
++ struct netfront_info *np = netdev_priv(dev);
++
++ netfront_accelerator_call_stop_napi_irq(np, dev);
++
++ netif_rx_schedule(dev);
++}
++
++static void network_alloc_rx_buffers(struct net_device *dev)
++{
++ unsigned short id;
++ struct netfront_info *np = netdev_priv(dev);
++ struct sk_buff *skb;
++ struct page *page;
++ int i, batch_target, notify;
++ RING_IDX req_prod = np->rx.req_prod_pvt;
++ struct xen_memory_reservation reservation;
++ grant_ref_t ref;
++ unsigned long pfn;
++ void *vaddr;
++ int nr_flips;
++ netif_rx_request_t *req;
++
++ if (unlikely(!netfront_carrier_ok(np)))
++ return;
++
++ /*
++ * Allocate skbuffs greedily, even though we batch updates to the
++ * receive ring. This creates a less bursty demand on the memory
++ * allocator, so should reduce the chance of failed allocation requests
++ * both for ourself and for other kernel subsystems.
++ */
++ batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
++ for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
++ /*
++ * Allocate an skb and a page. Do not use __dev_alloc_skb as
++ * that will allocate page-sized buffers which is not
++ * necessary here.
++ * 16 bytes added as necessary headroom for netif_receive_skb.
++ */
++ skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN,
++ GFP_ATOMIC | __GFP_NOWARN);
++ if (unlikely(!skb))
++ goto no_skb;
++
++ page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
++ if (!page) {
++ kfree_skb(skb);
++no_skb:
++ /* Any skbuffs queued for refill? Force them out. */
++ if (i != 0)
++ goto refill;
++ /* Could not allocate any skbuffs. Try again later. */
++ mod_timer(&np->rx_refill_timer,
++ jiffies + (HZ/10));
++ break;
++ }
++
++ skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */
++ skb_shinfo(skb)->frags[0].page = page;
++ skb_shinfo(skb)->nr_frags = 1;
++ __skb_queue_tail(&np->rx_batch, skb);
++ }
++
++ /* Is the batch large enough to be worthwhile? */
++ if (i < (np->rx_target/2)) {
++ if (req_prod > np->rx.sring->req_prod)
++ goto push;
++ return;
++ }
++
++ /* Adjust our fill target if we risked running out of buffers. */
++ if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
++ ((np->rx_target *= 2) > np->rx_max_target))
++ np->rx_target = np->rx_max_target;
++
++ refill:
++ for (nr_flips = i = 0; ; i++) {
++ if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
++ break;
++
++ skb->dev = dev;
++
++ id = xennet_rxidx(req_prod + i);
++
++ BUG_ON(np->rx_skbs[id]);
++ np->rx_skbs[id] = skb;
++
++ ref = gnttab_claim_grant_reference(&np->gref_rx_head);
++ BUG_ON((signed short)ref < 0);
++ np->grant_rx_ref[id] = ref;
++
++ pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
++ vaddr = page_address(skb_shinfo(skb)->frags[0].page);
++
++ req = RING_GET_REQUEST(&np->rx, req_prod + i);
++ if (!np->copying_receiver) {
++ gnttab_grant_foreign_transfer_ref(ref,
++ np->xbdev->otherend_id,
++ pfn);
++ np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn);
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Remove this page before passing
++ * back to Xen. */
++ set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
++ MULTI_update_va_mapping(np->rx_mcl+i,
++ (unsigned long)vaddr,
++ __pte(0), 0);
++ }
++ nr_flips++;
++ } else {
++ gnttab_grant_foreign_access_ref(ref,
++ np->xbdev->otherend_id,
++ pfn_to_mfn(pfn),
++ 0);
++ }
++
++ req->id = id;
++ req->gref = ref;
++ }
++
++ if ( nr_flips != 0 ) {
++ /* Tell the ballon driver what is going on. */
++ balloon_update_driver_allowance(i);
++
++ set_xen_guest_handle(reservation.extent_start,
++ np->rx_pfn_array);
++ reservation.nr_extents = nr_flips;
++ reservation.extent_order = 0;
++ reservation.address_bits = 0;
++ reservation.domid = DOMID_SELF;
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* After all PTEs have been zapped, flush the TLB. */
++ np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
++ UVMF_TLB_FLUSH|UVMF_ALL;
++
++ /* Give away a batch of pages. */
++ np->rx_mcl[i].op = __HYPERVISOR_memory_op;
++ np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
++ np->rx_mcl[i].args[1] = (unsigned long)&reservation;
++
++ /* Zap PTEs and give away pages in one big
++ * multicall. */
++ if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1)))
++ BUG();
++
++ /* Check return status of HYPERVISOR_memory_op(). */
++ if (unlikely(np->rx_mcl[i].result != i))
++ panic("Unable to reduce memory reservation\n");
++ while (nr_flips--)
++ BUG_ON(np->rx_mcl[nr_flips].result);
++ } else {
++ if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
++ &reservation) != i)
++ panic("Unable to reduce memory reservation\n");
++ }
++ } else {
++ wmb();
++ }
++
++ /* Above is a suitable barrier to ensure backend will see requests. */
++ np->rx.req_prod_pvt = req_prod + i;
++ push:
++ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
++ if (notify)
++ notify_remote_via_irq(np->irq);
++}
++
++static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
++ struct netif_tx_request *tx)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ char *data = skb->data;
++ unsigned long mfn;
++ RING_IDX prod = np->tx.req_prod_pvt;
++ int frags = skb_shinfo(skb)->nr_frags;
++ unsigned int offset = offset_in_page(data);
++ unsigned int len = skb_headlen(skb);
++ unsigned int id;
++ grant_ref_t ref;
++ int i;
++
++ while (len > PAGE_SIZE - offset) {
++ tx->size = PAGE_SIZE - offset;
++ tx->flags |= NETTXF_more_data;
++ len -= tx->size;
++ data += tx->size;
++ offset = 0;
++
++ id = get_id_from_freelist(np->tx_skbs);
++ np->tx_skbs[id] = skb_get(skb);
++ tx = RING_GET_REQUEST(&np->tx, prod++);
++ tx->id = id;
++ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
++ BUG_ON((signed short)ref < 0);
++
++ mfn = virt_to_mfn(data);
++ gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
++ mfn, GTF_readonly);
++
++ tx->gref = np->grant_tx_ref[id] = ref;
++ tx->offset = offset;
++ tx->size = len;
++ tx->flags = 0;
++ }
++
++ for (i = 0; i < frags; i++) {
++ skb_frag_t *frag = skb_shinfo(skb)->frags + i;
++
++ tx->flags |= NETTXF_more_data;
++
++ id = get_id_from_freelist(np->tx_skbs);
++ np->tx_skbs[id] = skb_get(skb);
++ tx = RING_GET_REQUEST(&np->tx, prod++);
++ tx->id = id;
++ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
++ BUG_ON((signed short)ref < 0);
++
++ mfn = pfn_to_mfn(page_to_pfn(frag->page));
++ gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
++ mfn, GTF_readonly);
++
++ tx->gref = np->grant_tx_ref[id] = ref;
++ tx->offset = frag->page_offset;
++ tx->size = frag->size;
++ tx->flags = 0;
++ }
++
++ np->tx.req_prod_pvt = prod;
++}
++
++static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
++{
++ unsigned short id;
++ struct netfront_info *np = netdev_priv(dev);
++ struct netif_tx_request *tx;
++ struct netif_extra_info *extra;
++ char *data = skb->data;
++ RING_IDX i;
++ grant_ref_t ref;
++ unsigned long mfn;
++ int notify;
++ int frags = skb_shinfo(skb)->nr_frags;
++ unsigned int offset = offset_in_page(data);
++ unsigned int len = skb_headlen(skb);
++
++ /* Check the fast path, if hooks are available */
++ if (np->accel_vif_state.hooks &&
++ np->accel_vif_state.hooks->start_xmit(skb, dev)) {
++ /* Fast path has sent this packet */
++ return 0;
++ }
++
++ frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
++ if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
++ printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
++ frags);
++ dump_stack();
++ goto drop;
++ }
++
++ spin_lock_irq(&np->tx_lock);
++
++ if (unlikely(!netfront_carrier_ok(np) ||
++ (frags > 1 && !xennet_can_sg(dev)) ||
++ netif_needs_gso(dev, skb))) {
++ spin_unlock_irq(&np->tx_lock);
++ goto drop;
++ }
++
++ i = np->tx.req_prod_pvt;
++
++ id = get_id_from_freelist(np->tx_skbs);
++ np->tx_skbs[id] = skb;
++
++ tx = RING_GET_REQUEST(&np->tx, i);
++
++ tx->id = id;
++ ref = gnttab_claim_grant_reference(&np->gref_tx_head);
++ BUG_ON((signed short)ref < 0);
++ mfn = virt_to_mfn(data);
++ gnttab_grant_foreign_access_ref(
++ ref, np->xbdev->otherend_id, mfn, GTF_readonly);
++ tx->gref = np->grant_tx_ref[id] = ref;
++ tx->offset = offset;
++ tx->size = len;
++
++ tx->flags = 0;
++ extra = NULL;
++
++ if (skb->ip_summed == CHECKSUM_HW) /* local packet? */
++ tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
++#ifdef CONFIG_XEN
++ if (skb->proto_data_valid) /* remote but checksummed? */
++ tx->flags |= NETTXF_data_validated;
++#endif
++
++#if HAVE_TSO
++ if (skb_shinfo(skb)->gso_size) {
++ struct netif_extra_info *gso = (struct netif_extra_info *)
++ RING_GET_REQUEST(&np->tx, ++i);
++
++ if (extra)
++ extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
++ else
++ tx->flags |= NETTXF_extra_info;
++
++ gso->u.gso.size = skb_shinfo(skb)->gso_size;
++ gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
++ gso->u.gso.pad = 0;
++ gso->u.gso.features = 0;
++
++ gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
++ gso->flags = 0;
++ extra = gso;
++ }
++#endif
++
++ np->tx.req_prod_pvt = i + 1;
++
++ xennet_make_frags(skb, dev, tx);
++ tx->size = skb->len;
++
++ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
++ if (notify)
++ notify_remote_via_irq(np->irq);
++
++ np->stats.tx_bytes += skb->len;
++ np->stats.tx_packets++;
++ dev->trans_start = jiffies;
++
++ /* Note: It is not safe to access skb after network_tx_buf_gc()! */
++ network_tx_buf_gc(dev);
++
++ if (!netfront_tx_slot_available(np))
++ netif_stop_queue(dev);
++
++ spin_unlock_irq(&np->tx_lock);
++
++ return 0;
++
++ drop:
++ np->stats.tx_dropped++;
++ dev_kfree_skb(skb);
++ return 0;
++}
++
++static irqreturn_t netif_int(int irq, void *dev_id, struct pt_regs *ptregs)
++{
++ struct net_device *dev = dev_id;
++ struct netfront_info *np = netdev_priv(dev);
++ unsigned long flags;
++
++ spin_lock_irqsave(&np->tx_lock, flags);
++
++ if (likely(netfront_carrier_ok(np))) {
++ network_tx_buf_gc(dev);
++ /* Under tx_lock: protects access to rx shared-ring indexes. */
++ if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) {
++ netfront_accelerator_call_stop_napi_irq(np, dev);
++
++ netif_rx_schedule(dev);
++ dev->last_rx = jiffies;
++ }
++ }
++
++ spin_unlock_irqrestore(&np->tx_lock, flags);
++
++ return IRQ_HANDLED;
++}
++
++static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
++ grant_ref_t ref)
++{
++ int new = xennet_rxidx(np->rx.req_prod_pvt);
++
++ BUG_ON(np->rx_skbs[new]);
++ np->rx_skbs[new] = skb;
++ np->grant_rx_ref[new] = ref;
++ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
++ RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
++ np->rx.req_prod_pvt++;
++}
++
++int xennet_get_extras(struct netfront_info *np,
++ struct netif_extra_info *extras, RING_IDX rp)
++
++{
++ struct netif_extra_info *extra;
++ RING_IDX cons = np->rx.rsp_cons;
++ int err = 0;
++
++ do {
++ struct sk_buff *skb;
++ grant_ref_t ref;
++
++ if (unlikely(cons + 1 == rp)) {
++ if (net_ratelimit())
++ WPRINTK("Missing extra info\n");
++ err = -EBADR;
++ break;
++ }
++
++ extra = (struct netif_extra_info *)
++ RING_GET_RESPONSE(&np->rx, ++cons);
++
++ if (unlikely(!extra->type ||
++ extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
++ if (net_ratelimit())
++ WPRINTK("Invalid extra type: %d\n",
++ extra->type);
++ err = -EINVAL;
++ } else {
++ memcpy(&extras[extra->type - 1], extra,
++ sizeof(*extra));
++ }
++
++ skb = xennet_get_rx_skb(np, cons);
++ ref = xennet_get_rx_ref(np, cons);
++ xennet_move_rx_slot(np, skb, ref);
++ } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
++
++ np->rx.rsp_cons = cons;
++ return err;
++}
++
++static int xennet_get_responses(struct netfront_info *np,
++ struct netfront_rx_info *rinfo, RING_IDX rp,
++ struct sk_buff_head *list,
++ int *pages_flipped_p)
++{
++ int pages_flipped = *pages_flipped_p;
++ struct mmu_update *mmu;
++ struct multicall_entry *mcl;
++ struct netif_rx_response *rx = &rinfo->rx;
++ struct netif_extra_info *extras = rinfo->extras;
++ RING_IDX cons = np->rx.rsp_cons;
++ struct sk_buff *skb = xennet_get_rx_skb(np, cons);
++ grant_ref_t ref = xennet_get_rx_ref(np, cons);
++ int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
++ int frags = 1;
++ int err = 0;
++ unsigned long ret;
++
++ if (rx->flags & NETRXF_extra_info) {
++ err = xennet_get_extras(np, extras, rp);
++ cons = np->rx.rsp_cons;
++ }
++
++ for (;;) {
++ unsigned long mfn;
++
++ if (unlikely(rx->status < 0 ||
++ rx->offset + rx->status > PAGE_SIZE)) {
++ if (net_ratelimit())
++ WPRINTK("rx->offset: %x, size: %u\n",
++ rx->offset, rx->status);
++ xennet_move_rx_slot(np, skb, ref);
++ err = -EINVAL;
++ goto next;
++ }
++
++ /*
++ * This definitely indicates a bug, either in this driver or in
++ * the backend driver. In future this should flag the bad
++ * situation to the system controller to reboot the backed.
++ */
++ if (ref == GRANT_INVALID_REF) {
++ if (net_ratelimit())
++ WPRINTK("Bad rx response id %d.\n", rx->id);
++ err = -EINVAL;
++ goto next;
++ }
++
++ if (!np->copying_receiver) {
++ /* Memory pressure, insufficient buffer
++ * headroom, ... */
++ if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
++ if (net_ratelimit())
++ WPRINTK("Unfulfilled rx req "
++ "(id=%d, st=%d).\n",
++ rx->id, rx->status);
++ xennet_move_rx_slot(np, skb, ref);
++ err = -ENOMEM;
++ goto next;
++ }
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Remap the page. */
++ struct page *page =
++ skb_shinfo(skb)->frags[0].page;
++ unsigned long pfn = page_to_pfn(page);
++ void *vaddr = page_address(page);
++
++ mcl = np->rx_mcl + pages_flipped;
++ mmu = np->rx_mmu + pages_flipped;
++
++ MULTI_update_va_mapping(mcl,
++ (unsigned long)vaddr,
++ pfn_pte_ma(mfn,
++ PAGE_KERNEL),
++ 0);
++ mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
++ | MMU_MACHPHYS_UPDATE;
++ mmu->val = pfn;
++
++ set_phys_to_machine(pfn, mfn);
++ }
++ pages_flipped++;
++ } else {
++ ret = gnttab_end_foreign_access_ref(ref);
++ BUG_ON(!ret);
++ }
++
++ gnttab_release_grant_reference(&np->gref_rx_head, ref);
++
++ __skb_queue_tail(list, skb);
++
++next:
++ if (!(rx->flags & NETRXF_more_data))
++ break;
++
++ if (cons + frags == rp) {
++ if (net_ratelimit())
++ WPRINTK("Need more frags\n");
++ err = -ENOENT;
++ break;
++ }
++
++ rx = RING_GET_RESPONSE(&np->rx, cons + frags);
++ skb = xennet_get_rx_skb(np, cons + frags);
++ ref = xennet_get_rx_ref(np, cons + frags);
++ frags++;
++ }
++
++ if (unlikely(frags > max)) {
++ if (net_ratelimit())
++ WPRINTK("Too many frags\n");
++ err = -E2BIG;
++ }
++
++ if (unlikely(err))
++ np->rx.rsp_cons = cons + frags;
++
++ *pages_flipped_p = pages_flipped;
++
++ return err;
++}
++
++static RING_IDX xennet_fill_frags(struct netfront_info *np,
++ struct sk_buff *skb,
++ struct sk_buff_head *list)
++{
++ struct skb_shared_info *shinfo = skb_shinfo(skb);
++ int nr_frags = shinfo->nr_frags;
++ RING_IDX cons = np->rx.rsp_cons;
++ skb_frag_t *frag = shinfo->frags + nr_frags;
++ struct sk_buff *nskb;
++
++ while ((nskb = __skb_dequeue(list))) {
++ struct netif_rx_response *rx =
++ RING_GET_RESPONSE(&np->rx, ++cons);
++
++ frag->page = skb_shinfo(nskb)->frags[0].page;
++ frag->page_offset = rx->offset;
++ frag->size = rx->status;
++
++ skb->data_len += rx->status;
++
++ skb_shinfo(nskb)->nr_frags = 0;
++ kfree_skb(nskb);
++
++ frag++;
++ nr_frags++;
++ }
++
++ shinfo->nr_frags = nr_frags;
++ return cons;
++}
++
++static int xennet_set_skb_gso(struct sk_buff *skb,
++ struct netif_extra_info *gso)
++{
++ if (!gso->u.gso.size) {
++ if (net_ratelimit())
++ WPRINTK("GSO size must not be zero.\n");
++ return -EINVAL;
++ }
++
++ /* Currently only TCPv4 S.O. is supported. */
++ if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
++ if (net_ratelimit())
++ WPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
++ return -EINVAL;
++ }
++
++#if HAVE_TSO
++ skb_shinfo(skb)->gso_size = gso->u.gso.size;
++#if HAVE_GSO
++ skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
++
++ /* Header must be checked, and gso_segs computed. */
++ skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
++#endif
++ skb_shinfo(skb)->gso_segs = 0;
++
++ return 0;
++#else
++ if (net_ratelimit())
++ WPRINTK("GSO unsupported by this kernel.\n");
++ return -EINVAL;
++#endif
++}
++
++static int netif_poll(struct net_device *dev, int *pbudget)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ struct sk_buff *skb;
++ struct netfront_rx_info rinfo;
++ struct netif_rx_response *rx = &rinfo.rx;
++ struct netif_extra_info *extras = rinfo.extras;
++ RING_IDX i, rp;
++ struct multicall_entry *mcl;
++ int work_done, budget, more_to_do = 1, accel_more_to_do = 1;
++ struct sk_buff_head rxq;
++ struct sk_buff_head errq;
++ struct sk_buff_head tmpq;
++ unsigned long flags;
++ unsigned int len;
++ int pages_flipped = 0;
++ int err;
++
++ spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */
++
++ if (unlikely(!netfront_carrier_ok(np))) {
++ spin_unlock(&np->rx_lock);
++ return 0;
++ }
++
++ skb_queue_head_init(&rxq);
++ skb_queue_head_init(&errq);
++ skb_queue_head_init(&tmpq);
++
++ if ((budget = *pbudget) > dev->quota)
++ budget = dev->quota;
++ rp = np->rx.sring->rsp_prod;
++ rmb(); /* Ensure we see queued responses up to 'rp'. */
++
++ i = np->rx.rsp_cons;
++ work_done = 0;
++ while ((i != rp) && (work_done < budget)) {
++ memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
++ memset(extras, 0, sizeof(rinfo.extras));
++
++ err = xennet_get_responses(np, &rinfo, rp, &tmpq,
++ &pages_flipped);
++
++ if (unlikely(err)) {
++err:
++ while ((skb = __skb_dequeue(&tmpq)))
++ __skb_queue_tail(&errq, skb);
++ np->stats.rx_errors++;
++ i = np->rx.rsp_cons;
++ continue;
++ }
++
++ skb = __skb_dequeue(&tmpq);
++
++ if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
++ struct netif_extra_info *gso;
++ gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
++
++ if (unlikely(xennet_set_skb_gso(skb, gso))) {
++ __skb_queue_head(&tmpq, skb);
++ np->rx.rsp_cons += skb_queue_len(&tmpq);
++ goto err;
++ }
++ }
++
++ NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
++ NETFRONT_SKB_CB(skb)->offset = rx->offset;
++
++ len = rx->status;
++ if (len > RX_COPY_THRESHOLD)
++ len = RX_COPY_THRESHOLD;
++ skb_put(skb, len);
++
++ if (rx->status > len) {
++ skb_shinfo(skb)->frags[0].page_offset =
++ rx->offset + len;
++ skb_shinfo(skb)->frags[0].size = rx->status - len;
++ skb->data_len = rx->status - len;
++ } else {
++ skb_shinfo(skb)->frags[0].page = NULL;
++ skb_shinfo(skb)->nr_frags = 0;
++ }
++
++ i = xennet_fill_frags(np, skb, &tmpq);
++
++ /*
++ * Truesize must approximates the size of true data plus
++ * any supervisor overheads. Adding hypervisor overheads
++ * has been shown to significantly reduce achievable
++ * bandwidth with the default receive buffer size. It is
++ * therefore not wise to account for it here.
++ *
++ * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to
++ * RX_COPY_THRESHOLD + the supervisor overheads. Here, we
++ * add the size of the data pulled in xennet_fill_frags().
++ *
++ * We also adjust for any unused space in the main data
++ * area by subtracting (RX_COPY_THRESHOLD - len). This is
++ * especially important with drivers which split incoming
++ * packets into header and data, using only 66 bytes of
++ * the main data area (see the e1000 driver for example.)
++ * On such systems, without this last adjustement, our
++ * achievable receive throughout using the standard receive
++ * buffer size was cut by 25%(!!!).
++ */
++ skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
++ skb->len += skb->data_len;
++
++ /*
++ * Old backends do not assert data_validated but we
++ * can infer it from csum_blank so test both flags.
++ */
++ if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank))
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++ else
++ skb->ip_summed = CHECKSUM_NONE;
++#ifdef CONFIG_XEN
++ skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE);
++ skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank);
++#endif
++ np->stats.rx_packets++;
++ np->stats.rx_bytes += skb->len;
++
++ __skb_queue_tail(&rxq, skb);
++
++ np->rx.rsp_cons = ++i;
++ work_done++;
++ }
++
++ if (pages_flipped) {
++ /* Some pages are no longer absent... */
++ balloon_update_driver_allowance(-pages_flipped);
++
++ /* Do all the remapping work and M2P updates. */
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ mcl = np->rx_mcl + pages_flipped;
++ mcl->op = __HYPERVISOR_mmu_update;
++ mcl->args[0] = (unsigned long)np->rx_mmu;
++ mcl->args[1] = pages_flipped;
++ mcl->args[2] = 0;
++ mcl->args[3] = DOMID_SELF;
++ err = HYPERVISOR_multicall_check(np->rx_mcl,
++ pages_flipped + 1,
++ NULL);
++ BUG_ON(err);
++ }
++ }
++
++ while ((skb = __skb_dequeue(&errq)))
++ kfree_skb(skb);
++
++ while ((skb = __skb_dequeue(&rxq)) != NULL) {
++ struct page *page = NETFRONT_SKB_CB(skb)->page;
++ void *vaddr = page_address(page);
++ unsigned offset = NETFRONT_SKB_CB(skb)->offset;
++
++ memcpy(skb->data, vaddr + offset, skb_headlen(skb));
++
++ if (page != skb_shinfo(skb)->frags[0].page)
++ __free_page(page);
++
++ /* Ethernet work: Delayed to here as it peeks the header. */
++ skb->protocol = eth_type_trans(skb, dev);
++
++ /* Pass it up. */
++ netif_receive_skb(skb);
++ dev->last_rx = jiffies;
++ }
++
++ /* If we get a callback with very few responses, reduce fill target. */
++ /* NB. Note exponential increase, linear decrease. */
++ if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
++ ((3*np->rx_target) / 4)) &&
++ (--np->rx_target < np->rx_min_target))
++ np->rx_target = np->rx_min_target;
++
++ network_alloc_rx_buffers(dev);
++
++ if (work_done < budget) {
++ /* there's some spare capacity, try the accelerated path */
++ int accel_budget = budget - work_done;
++ int accel_budget_start = accel_budget;
++
++ if (np->accel_vif_state.hooks) {
++ accel_more_to_do =
++ np->accel_vif_state.hooks->netdev_poll
++ (dev, &accel_budget);
++ work_done += (accel_budget_start - accel_budget);
++ } else
++ accel_more_to_do = 0;
++ }
++
++ *pbudget -= work_done;
++ dev->quota -= work_done;
++
++ if (work_done < budget) {
++ local_irq_save(flags);
++
++ RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
++
++ if (!more_to_do && !accel_more_to_do &&
++ np->accel_vif_state.hooks) {
++ /*
++ * Slow path has nothing more to do, see if
++ * fast path is likewise
++ */
++ accel_more_to_do =
++ np->accel_vif_state.hooks->start_napi_irq(dev);
++ }
++
++ if (!more_to_do && !accel_more_to_do)
++ __netif_rx_complete(dev);
++
++ local_irq_restore(flags);
++ }
++
++ spin_unlock(&np->rx_lock);
++
++ return more_to_do | accel_more_to_do;
++}
++
++static void netif_release_tx_bufs(struct netfront_info *np)
++{
++ struct sk_buff *skb;
++ int i;
++
++ for (i = 1; i <= NET_TX_RING_SIZE; i++) {
++ if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET)
++ continue;
++
++ skb = np->tx_skbs[i];
++ gnttab_end_foreign_access_ref(np->grant_tx_ref[i]);
++ gnttab_release_grant_reference(
++ &np->gref_tx_head, np->grant_tx_ref[i]);
++ np->grant_tx_ref[i] = GRANT_INVALID_REF;
++ add_id_to_freelist(np->tx_skbs, i);
++ dev_kfree_skb_irq(skb);
++ }
++}
++
++static void netif_release_rx_bufs_flip(struct netfront_info *np)
++{
++ struct mmu_update *mmu = np->rx_mmu;
++ struct multicall_entry *mcl = np->rx_mcl;
++ struct sk_buff_head free_list;
++ struct sk_buff *skb;
++ unsigned long mfn;
++ int xfer = 0, noxfer = 0, unused = 0;
++ int id, ref, rc;
++
++ skb_queue_head_init(&free_list);
++
++ spin_lock_bh(&np->rx_lock);
++
++ for (id = 0; id < NET_RX_RING_SIZE; id++) {
++ if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) {
++ unused++;
++ continue;
++ }
++
++ skb = np->rx_skbs[id];
++ mfn = gnttab_end_foreign_transfer_ref(ref);
++ gnttab_release_grant_reference(&np->gref_rx_head, ref);
++ np->grant_rx_ref[id] = GRANT_INVALID_REF;
++ add_id_to_freelist(np->rx_skbs, id);
++
++ if (0 == mfn) {
++ struct page *page = skb_shinfo(skb)->frags[0].page;
++ balloon_release_driver_page(page);
++ skb_shinfo(skb)->nr_frags = 0;
++ dev_kfree_skb(skb);
++ noxfer++;
++ continue;
++ }
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Remap the page. */
++ struct page *page = skb_shinfo(skb)->frags[0].page;
++ unsigned long pfn = page_to_pfn(page);
++ void *vaddr = page_address(page);
++
++ MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
++ pfn_pte_ma(mfn, PAGE_KERNEL),
++ 0);
++ mcl++;
++ mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
++ | MMU_MACHPHYS_UPDATE;
++ mmu->val = pfn;
++ mmu++;
++
++ set_phys_to_machine(pfn, mfn);
++ }
++ __skb_queue_tail(&free_list, skb);
++ xfer++;
++ }
++
++ DPRINTK("%s: %d xfer, %d noxfer, %d unused\n",
++ __FUNCTION__, xfer, noxfer, unused);
++
++ if (xfer) {
++ /* Some pages are no longer absent... */
++ balloon_update_driver_allowance(-xfer);
++
++ if (!xen_feature(XENFEAT_auto_translated_physmap)) {
++ /* Do all the remapping work and M2P updates. */
++ mcl->op = __HYPERVISOR_mmu_update;
++ mcl->args[0] = (unsigned long)np->rx_mmu;
++ mcl->args[1] = mmu - np->rx_mmu;
++ mcl->args[2] = 0;
++ mcl->args[3] = DOMID_SELF;
++ mcl++;
++ rc = HYPERVISOR_multicall_check(
++ np->rx_mcl, mcl - np->rx_mcl, NULL);
++ BUG_ON(rc);
++ }
++ }
++
++ while ((skb = __skb_dequeue(&free_list)) != NULL)
++ dev_kfree_skb(skb);
++
++ spin_unlock_bh(&np->rx_lock);
++}
++
++static void netif_release_rx_bufs_copy(struct netfront_info *np)
++{
++ struct sk_buff *skb;
++ int i, ref;
++ int busy = 0, inuse = 0;
++
++ spin_lock_bh(&np->rx_lock);
++
++ for (i = 0; i < NET_RX_RING_SIZE; i++) {
++ ref = np->grant_rx_ref[i];
++
++ if (ref == GRANT_INVALID_REF)
++ continue;
++
++ inuse++;
++
++ skb = np->rx_skbs[i];
++
++ if (!gnttab_end_foreign_access_ref(ref))
++ {
++ busy++;
++ continue;
++ }
++
++ gnttab_release_grant_reference(&np->gref_rx_head, ref);
++ np->grant_rx_ref[i] = GRANT_INVALID_REF;
++ add_id_to_freelist(np->rx_skbs, i);
++
++ dev_kfree_skb(skb);
++ }
++
++ if (busy)
++ DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n",
++ __FUNCTION__, busy, inuse, NET_RX_RING_SIZE);
++
++ spin_unlock_bh(&np->rx_lock);
++}
++
++static int network_close(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ netif_stop_queue(np->netdev);
++ return 0;
++}
++
++
++static struct net_device_stats *network_get_stats(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++
++ netfront_accelerator_call_get_stats(np, dev);
++ return &np->stats;
++}
++
++static int xennet_set_mac_address(struct net_device *dev, void *p)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ struct sockaddr *addr = p;
++
++ if (netif_running(dev))
++ return -EBUSY;
++
++ if (!is_valid_ether_addr(addr->sa_data))
++ return -EADDRNOTAVAIL;
++
++ memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
++ memcpy(np->mac, addr->sa_data, ETH_ALEN);
++
++ return 0;
++}
++
++static int xennet_change_mtu(struct net_device *dev, int mtu)
++{
++ int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
++
++ if (mtu > max)
++ return -EINVAL;
++ dev->mtu = mtu;
++ return 0;
++}
++
++static int xennet_set_sg(struct net_device *dev, u32 data)
++{
++ if (data) {
++ struct netfront_info *np = netdev_priv(dev);
++ int val;
++
++ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
++ "%d", &val) < 0)
++ val = 0;
++ if (!val)
++ return -ENOSYS;
++ } else if (dev->mtu > ETH_DATA_LEN)
++ dev->mtu = ETH_DATA_LEN;
++
++ return ethtool_op_set_sg(dev, data);
++}
++
++static int xennet_set_tso(struct net_device *dev, u32 data)
++{
++ if (data) {
++ struct netfront_info *np = netdev_priv(dev);
++ int val;
++
++ if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
++ "feature-gso-tcpv4", "%d", &val) < 0)
++ val = 0;
++ if (!val)
++ return -ENOSYS;
++ }
++
++ return ethtool_op_set_tso(dev, data);
++}
++
++static void xennet_set_features(struct net_device *dev)
++{
++ dev_disable_gso_features(dev);
++ xennet_set_sg(dev, 0);
++
++ /* We need checksum offload to enable scatter/gather and TSO. */
++ if (!(dev->features & NETIF_F_IP_CSUM))
++ return;
++
++ if (xennet_set_sg(dev, 1))
++ return;
++
++ /* Before 2.6.9 TSO seems to be unreliable so do not enable it
++ * on older kernels.
++ */
++ if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9))
++ xennet_set_tso(dev, 1);
++}
++
++static int network_connect(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ int i, requeue_idx, err;
++ struct sk_buff *skb;
++ grant_ref_t ref;
++ netif_rx_request_t *req;
++ unsigned int feature_rx_copy, feature_rx_flip;
++
++ err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
++ "feature-rx-copy", "%u", &feature_rx_copy);
++ if (err != 1)
++ feature_rx_copy = 0;
++ err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
++ "feature-rx-flip", "%u", &feature_rx_flip);
++ if (err != 1)
++ feature_rx_flip = 1;
++
++ /*
++ * Copy packets on receive path if:
++ * (a) This was requested by user, and the backend supports it; or
++ * (b) Flipping was requested, but this is unsupported by the backend.
++ */
++ np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
++ (MODPARM_rx_flip && !feature_rx_flip));
++
++ err = talk_to_backend(np->xbdev, np);
++ if (err)
++ return err;
++
++ xennet_set_features(dev);
++
++ DPRINTK("device %s has %sing receive path.\n",
++ dev->name, np->copying_receiver ? "copy" : "flipp");
++
++ spin_lock_bh(&np->rx_lock);
++ spin_lock_irq(&np->tx_lock);
++
++ /*
++ * Recovery procedure:
++ * NB. Freelist index entries are always going to be less than
++ * PAGE_OFFSET, whereas pointers to skbs will always be equal or
++ * greater than PAGE_OFFSET: we use this property to distinguish
++ * them.
++ */
++
++ /* Step 1: Discard all pending TX packet fragments. */
++ netif_release_tx_bufs(np);
++
++ /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
++ for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
++ if (!np->rx_skbs[i])
++ continue;
++
++ skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
++ ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
++ req = RING_GET_REQUEST(&np->rx, requeue_idx);
++
++ if (!np->copying_receiver) {
++ gnttab_grant_foreign_transfer_ref(
++ ref, np->xbdev->otherend_id,
++ page_to_pfn(skb_shinfo(skb)->frags->page));
++ } else {
++ gnttab_grant_foreign_access_ref(
++ ref, np->xbdev->otherend_id,
++ pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
++ frags->page)),
++ 0);
++ }
++ req->gref = ref;
++ req->id = requeue_idx;
++
++ requeue_idx++;
++ }
++
++ np->rx.req_prod_pvt = requeue_idx;
++
++ /*
++ * Step 3: All public and private state should now be sane. Get
++ * ready to start sending and receiving packets and give the driver
++ * domain a kick because we've probably just requeued some
++ * packets.
++ */
++ netfront_carrier_on(np);
++ notify_remote_via_irq(np->irq);
++ network_tx_buf_gc(dev);
++ network_alloc_rx_buffers(dev);
++
++ spin_unlock_irq(&np->tx_lock);
++ spin_unlock_bh(&np->rx_lock);
++
++ return 0;
++}
++
++static void netif_uninit(struct net_device *dev)
++{
++ struct netfront_info *np = netdev_priv(dev);
++ netif_release_tx_bufs(np);
++ if (np->copying_receiver)
++ netif_release_rx_bufs_copy(np);
++ else
++ netif_release_rx_bufs_flip(np);
++ gnttab_free_grant_references(np->gref_tx_head);
++ gnttab_free_grant_references(np->gref_rx_head);
++}
++
++static struct ethtool_ops network_ethtool_ops =
++{
++ .get_tx_csum = ethtool_op_get_tx_csum,
++ .set_tx_csum = ethtool_op_set_tx_csum,
++ .get_sg = ethtool_op_get_sg,
++ .set_sg = xennet_set_sg,
++#if HAVE_TSO
++ .get_tso = ethtool_op_get_tso,
++ .set_tso = xennet_set_tso,
++#endif
++ .get_link = ethtool_op_get_link,
++};
++
++#ifdef CONFIG_SYSFS
++static ssize_t show_rxbuf_min(struct class_device *cd, char *buf)
++{
++ struct net_device *netdev = container_of(cd, struct net_device,
++ class_dev);
++ struct netfront_info *info = netdev_priv(netdev);
++
++ return sprintf(buf, "%u\n", info->rx_min_target);
++}
++
++static ssize_t store_rxbuf_min(struct class_device *cd,
++ const char *buf, size_t len)
++{
++ struct net_device *netdev = container_of(cd, struct net_device,
++ class_dev);
++ struct netfront_info *np = netdev_priv(netdev);
++ char *endp;
++ unsigned long target;
++
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++
++ target = simple_strtoul(buf, &endp, 0);
++ if (endp == buf)
++ return -EBADMSG;
++
++ if (target < RX_MIN_TARGET)
++ target = RX_MIN_TARGET;
++ if (target > RX_MAX_TARGET)
++ target = RX_MAX_TARGET;
++
++ spin_lock_bh(&np->rx_lock);
++ if (target > np->rx_max_target)
++ np->rx_max_target = target;
++ np->rx_min_target = target;
++ if (target > np->rx_target)
++ np->rx_target = target;
++
++ network_alloc_rx_buffers(netdev);
++
++ spin_unlock_bh(&np->rx_lock);
++ return len;
++}
++
++static ssize_t show_rxbuf_max(struct class_device *cd, char *buf)
++{
++ struct net_device *netdev = container_of(cd, struct net_device,
++ class_dev);
++ struct netfront_info *info = netdev_priv(netdev);
++
++ return sprintf(buf, "%u\n", info->rx_max_target);
++}
++
++static ssize_t store_rxbuf_max(struct class_device *cd,
++ const char *buf, size_t len)
++{
++ struct net_device *netdev = container_of(cd, struct net_device,
++ class_dev);
++ struct netfront_info *np = netdev_priv(netdev);
++ char *endp;
++ unsigned long target;
++
++ if (!capable(CAP_NET_ADMIN))
++ return -EPERM;
++
++ target = simple_strtoul(buf, &endp, 0);
++ if (endp == buf)
++ return -EBADMSG;
++
++ if (target < RX_MIN_TARGET)
++ target = RX_MIN_TARGET;
++ if (target > RX_MAX_TARGET)
++ target = RX_MAX_TARGET;
++
++ spin_lock_bh(&np->rx_lock);
++ if (target < np->rx_min_target)
++ np->rx_min_target = target;
++ np->rx_max_target = target;
++ if (target < np->rx_target)
++ np->rx_target = target;
++
++ network_alloc_rx_buffers(netdev);
++
++ spin_unlock_bh(&np->rx_lock);
++ return len;
++}
++
++static ssize_t show_rxbuf_cur(struct class_device *cd, char *buf)
++{
++ struct net_device *netdev = container_of(cd, struct net_device,
++ class_dev);
++ struct netfront_info *info = netdev_priv(netdev);
++
++ return sprintf(buf, "%u\n", info->rx_target);
++}
++
++static const struct class_device_attribute xennet_attrs[] = {
++ __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
++ __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
++ __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
++};
++
++static int xennet_sysfs_addif(struct net_device *netdev)
++{
++ int i;
++ int error = 0;
++
++ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
++ error = class_device_create_file(&netdev->class_dev,
++ &xennet_attrs[i]);
++ if (error)
++ goto fail;
++ }
++ return 0;
++
++ fail:
++ while (--i >= 0)
++ class_device_remove_file(&netdev->class_dev,
++ &xennet_attrs[i]);
++ return error;
++}
++
++static void xennet_sysfs_delif(struct net_device *netdev)
++{
++ int i;
++
++ for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
++ class_device_remove_file(&netdev->class_dev,
++ &xennet_attrs[i]);
++ }
++}
++
++#endif /* CONFIG_SYSFS */
++
++
++/*
++ * Nothing to do here. Virtual interface is point-to-point and the
++ * physical interface is probably promiscuous anyway.
++ */
++static void network_set_multicast_list(struct net_device *dev)
++{
++}
++
++static struct net_device * __devinit create_netdev(struct xenbus_device *dev)
++{
++ int i, err = 0;
++ struct net_device *netdev = NULL;
++ struct netfront_info *np = NULL;
++
++ netdev = alloc_etherdev(sizeof(struct netfront_info));
++ if (!netdev) {
++ printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
++ __FUNCTION__);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ np = netdev_priv(netdev);
++ np->xbdev = dev;
++
++ spin_lock_init(&np->tx_lock);
++ spin_lock_init(&np->rx_lock);
++
++ init_accelerator_vif(np, dev);
++
++ skb_queue_head_init(&np->rx_batch);
++ np->rx_target = RX_DFL_MIN_TARGET;
++ np->rx_min_target = RX_DFL_MIN_TARGET;
++ np->rx_max_target = RX_MAX_TARGET;
++
++ init_timer(&np->rx_refill_timer);
++ np->rx_refill_timer.data = (unsigned long)netdev;
++ np->rx_refill_timer.function = rx_refill_timeout;
++
++ /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
++ for (i = 0; i <= NET_TX_RING_SIZE; i++) {
++ np->tx_skbs[i] = (void *)((unsigned long) i+1);
++ np->grant_tx_ref[i] = GRANT_INVALID_REF;
++ }
++
++ for (i = 0; i < NET_RX_RING_SIZE; i++) {
++ np->rx_skbs[i] = NULL;
++ np->grant_rx_ref[i] = GRANT_INVALID_REF;
++ }
++
++ /* A grant for every tx ring slot */
++ if (gnttab_alloc_grant_references(TX_MAX_TARGET,
++ &np->gref_tx_head) < 0) {
++ printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
++ err = -ENOMEM;
++ goto exit;
++ }
++ /* A grant for every rx ring slot */
++ if (gnttab_alloc_grant_references(RX_MAX_TARGET,
++ &np->gref_rx_head) < 0) {
++ printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
++ err = -ENOMEM;
++ goto exit_free_tx;
++ }
++
++ netdev->open = network_open;
++ netdev->hard_start_xmit = network_start_xmit;
++ netdev->stop = network_close;
++ netdev->get_stats = network_get_stats;
++ netdev->poll = netif_poll;
++ netdev->set_multicast_list = network_set_multicast_list;
++ netdev->uninit = netif_uninit;
++ netdev->set_mac_address = xennet_set_mac_address;
++ netdev->change_mtu = xennet_change_mtu;
++ netdev->weight = 64;
++ netdev->features = NETIF_F_IP_CSUM;
++
++ SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
++ SET_MODULE_OWNER(netdev);
++ SET_NETDEV_DEV(netdev, &dev->dev);
++
++ np->netdev = netdev;
++
++ netfront_carrier_off(np);
++
++ return netdev;
++
++ exit_free_tx:
++ gnttab_free_grant_references(np->gref_tx_head);
++ exit:
++ free_netdev(netdev);
++ return ERR_PTR(err);
++}
++
++#ifdef CONFIG_INET
++/*
++ * We use this notifier to send out a fake ARP reply to reset switches and
++ * router ARP caches when an IP interface is brought up on a VIF.
++ */
++static int
++inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
++{
++ struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
++ struct net_device *dev = ifa->ifa_dev->dev;
++
++ /* UP event and is it one of our devices? */
++ if (event == NETDEV_UP && dev->open == network_open)
++ send_fake_arp(dev);
++
++ return NOTIFY_DONE;
++}
++
++static struct notifier_block notifier_inetdev = {
++ .notifier_call = inetdev_notify,
++ .next = NULL,
++ .priority = 0
++};
++#endif
++
++
++static void netif_disconnect_backend(struct netfront_info *info)
++{
++ /* Stop old i/f to prevent errors whilst we rebuild the state. */
++ spin_lock_bh(&info->rx_lock);
++ spin_lock_irq(&info->tx_lock);
++ netfront_carrier_off(info);
++ spin_unlock_irq(&info->tx_lock);
++ spin_unlock_bh(&info->rx_lock);
++
++ if (info->irq)
++ unbind_from_irqhandler(info->irq, info->netdev);
++ info->irq = 0;
++
++ end_access(info->tx_ring_ref, info->tx.sring);
++ end_access(info->rx_ring_ref, info->rx.sring);
++ info->tx_ring_ref = GRANT_INVALID_REF;
++ info->rx_ring_ref = GRANT_INVALID_REF;
++ info->tx.sring = NULL;
++ info->rx.sring = NULL;
++}
++
++
++static void end_access(int ref, void *page)
++{
++ if (ref != GRANT_INVALID_REF)
++ gnttab_end_foreign_access(ref, (unsigned long)page);
++}
++
++
++/* ** Driver registration ** */
++
++
++static const struct xenbus_device_id netfront_ids[] = {
++ { "vif" },
++ { "" }
++};
++MODULE_ALIAS("xen:vif");
++
++
++static struct xenbus_driver netfront_driver = {
++ .name = "vif",
++ .owner = THIS_MODULE,
++ .ids = netfront_ids,
++ .probe = netfront_probe,
++ .remove = __devexit_p(netfront_remove),
++ .suspend = netfront_suspend,
++ .suspend_cancel = netfront_suspend_cancel,
++ .resume = netfront_resume,
++ .otherend_changed = backend_changed,
++};
++
++
++static int __init netif_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++#ifdef CONFIG_XEN
++ if (MODPARM_rx_flip && MODPARM_rx_copy) {
++ WPRINTK("Cannot specify both rx_copy and rx_flip.\n");
++ return -EINVAL;
++ }
++
++ if (!MODPARM_rx_flip && !MODPARM_rx_copy)
++ MODPARM_rx_flip = 1; /* Default is to flip. */
++#endif
++
++ netif_init_accel();
++
++ IPRINTK("Initialising virtual ethernet driver.\n");
++
++#ifdef CONFIG_INET
++ (void)register_inetaddr_notifier(¬ifier_inetdev);
++#endif
++
++ return xenbus_register_frontend(&netfront_driver);
++}
++module_init(netif_init);
++
++
++static void __exit netif_exit(void)
++{
++#ifdef CONFIG_INET
++ unregister_inetaddr_notifier(¬ifier_inetdev);
++#endif
++
++ netif_exit_accel();
++
++ return xenbus_unregister_driver(&netfront_driver);
++}
++module_exit(netif_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
+Index: head-2008-11-25/drivers/xen/netfront/netfront.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/netfront/netfront.h 2008-01-07 13:19:18.000000000 +0100
+@@ -0,0 +1,274 @@
++/******************************************************************************
++ * Virtual network driver for conversing with remote driver backends.
++ *
++ * Copyright (c) 2002-2005, K A Fraser
++ * Copyright (c) 2005, XenSource Ltd
++ * Copyright (C) 2007 Solarflare Communications, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef NETFRONT_H
++#define NETFRONT_H
++
++#include <xen/interface/io/netif.h>
++#include <linux/netdevice.h>
++#include <linux/skbuff.h>
++#include <linux/list.h>
++
++#define NET_TX_RING_SIZE __RING_SIZE((struct netif_tx_sring *)0, PAGE_SIZE)
++#define NET_RX_RING_SIZE __RING_SIZE((struct netif_rx_sring *)0, PAGE_SIZE)
++
++#include <xen/xenbus.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++/*
++ * Function pointer table for hooks into a network acceleration
++ * plugin. These are called at appropriate points from the netfront
++ * driver
++ */
++struct netfront_accel_hooks {
++ /*
++ * new_device: Accelerator hook to ask the plugin to support a
++ * new network interface
++ */
++ int (*new_device)(struct net_device *net_dev, struct xenbus_device *dev);
++ /*
++ * remove: Opposite of new_device
++ */
++ int (*remove)(struct xenbus_device *dev);
++ /*
++ * The net_device is being polled, check the accelerated
++ * hardware for any pending packets
++ */
++ int (*netdev_poll)(struct net_device *dev, int *pbudget);
++ /*
++ * start_xmit: Used to give the accelerated plugin the option
++ * of sending a packet. Returns non-zero if has done so, or
++ * zero to decline and force the packet onto normal send
++ * path
++ */
++ int (*start_xmit)(struct sk_buff *skb, struct net_device *dev);
++ /*
++ * start/stop_napi_interrupts Used by netfront to indicate
++ * when napi interrupts should be enabled or disabled
++ */
++ int (*start_napi_irq)(struct net_device *dev);
++ void (*stop_napi_irq)(struct net_device *dev);
++ /*
++ * Called before re-enabling the TX queue to check the fast
++ * path has slots too
++ */
++ int (*check_ready)(struct net_device *dev);
++ /*
++ * Get the fastpath network statistics
++ */
++ int (*get_stats)(struct net_device *dev,
++ struct net_device_stats *stats);
++};
++
++
++/* Version of API/protocol for communication between netfront and
++ acceleration plugin supported */
++#define NETFRONT_ACCEL_VERSION 0x00010003
++
++/*
++ * Per-netfront device state for the accelerator. This is used to
++ * allow efficient per-netfront device access to the accelerator
++ * hooks
++ */
++struct netfront_accel_vif_state {
++ struct list_head link;
++
++ struct xenbus_device *dev;
++ struct netfront_info *np;
++ struct netfront_accel_hooks *hooks;
++
++ /* Watch on the accelerator configuration value */
++ struct xenbus_watch accel_watch;
++ /* Work item to process change in accelerator */
++ struct work_struct accel_work;
++ /* The string from xenbus last time accel_watch fired */
++ char *accel_frontend;
++};
++
++/*
++ * Per-accelerator state stored in netfront. These form a list that
++ * is used to track which devices are accelerated by which plugins,
++ * and what plugins are available/have been requested
++ */
++struct netfront_accelerator {
++ /* Used to make a list */
++ struct list_head link;
++ /* ID of the accelerator */
++ int id;
++ /*
++ * String describing the accelerator. Currently this is the
++ * name of the accelerator module. This is provided by the
++ * backend accelerator through xenstore
++ */
++ char *frontend;
++ /* The hooks into the accelerator plugin module */
++ struct netfront_accel_hooks *hooks;
++
++ /*
++ * List of per-netfront device state (struct
++ * netfront_accel_vif_state) for each netfront device that is
++ * using this accelerator
++ */
++ struct list_head vif_states;
++ spinlock_t vif_states_lock;
++};
++
++struct netfront_info {
++ struct list_head list;
++ struct net_device *netdev;
++
++ struct net_device_stats stats;
++
++ struct netif_tx_front_ring tx;
++ struct netif_rx_front_ring rx;
++
++ spinlock_t tx_lock;
++ spinlock_t rx_lock;
++
++ unsigned int irq;
++ unsigned int copying_receiver;
++ unsigned int carrier;
++
++ /* Receive-ring batched refills. */
++#define RX_MIN_TARGET 8
++#define RX_DFL_MIN_TARGET 64
++#define RX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
++ unsigned rx_min_target, rx_max_target, rx_target;
++ struct sk_buff_head rx_batch;
++
++ struct timer_list rx_refill_timer;
++
++ /*
++ * {tx,rx}_skbs store outstanding skbuffs. The first entry in tx_skbs
++ * is an index into a chain of free entries.
++ */
++ struct sk_buff *tx_skbs[NET_TX_RING_SIZE+1];
++ struct sk_buff *rx_skbs[NET_RX_RING_SIZE];
++
++#define TX_MAX_TARGET min_t(int, NET_RX_RING_SIZE, 256)
++ grant_ref_t gref_tx_head;
++ grant_ref_t grant_tx_ref[NET_TX_RING_SIZE + 1];
++ grant_ref_t gref_rx_head;
++ grant_ref_t grant_rx_ref[NET_RX_RING_SIZE];
++
++ struct xenbus_device *xbdev;
++ int tx_ring_ref;
++ int rx_ring_ref;
++ u8 mac[ETH_ALEN];
++
++ unsigned long rx_pfn_array[NET_RX_RING_SIZE];
++ struct multicall_entry rx_mcl[NET_RX_RING_SIZE+1];
++ struct mmu_update rx_mmu[NET_RX_RING_SIZE];
++
++ /* Private pointer to state internal to accelerator module */
++ void *accel_priv;
++ /* The accelerator used by this netfront device */
++ struct netfront_accelerator *accelerator;
++ /* The accelerator state for this netfront device */
++ struct netfront_accel_vif_state accel_vif_state;
++};
++
++
++/* Exported Functions */
++
++/*
++ * Called by an accelerator plugin module when it has loaded.
++ *
++ * frontend: the string describing the accelerator, currently the module name
++ * hooks: the hooks for netfront to use to call into the accelerator
++ * version: the version of API between frontend and plugin requested
++ *
++ * return: 0 on success, <0 on error, >0 (with version supported) on
++ * version mismatch
++ */
++extern int netfront_accelerator_loaded(int version, const char *frontend,
++ struct netfront_accel_hooks *hooks);
++
++/*
++ * Called by an accelerator plugin module when it is about to unload.
++ *
++ * frontend: the string describing the accelerator. Must match the
++ * one passed to netfront_accelerator_loaded()
++ */
++extern void netfront_accelerator_stop(const char *frontend);
++
++/*
++ * Called by an accelerator before waking the net device's TX queue to
++ * ensure the slow path has available slots. Returns true if OK to
++ * wake, false if still busy
++ */
++extern int netfront_check_queue_ready(struct net_device *net_dev);
++
++
++/* Internal-to-netfront Functions */
++
++/*
++ * Call into accelerator and check to see if it has tx space before we
++ * wake the net device's TX queue. Returns true if OK to wake, false
++ * if still busy
++ */
++extern
++int netfront_check_accelerator_queue_ready(struct net_device *dev,
++ struct netfront_info *np);
++extern
++int netfront_accelerator_call_remove(struct netfront_info *np,
++ struct xenbus_device *dev);
++extern
++int netfront_accelerator_suspend(struct netfront_info *np,
++ struct xenbus_device *dev);
++extern
++int netfront_accelerator_suspend_cancel(struct netfront_info *np,
++ struct xenbus_device *dev);
++extern
++void netfront_accelerator_resume(struct netfront_info *np,
++ struct xenbus_device *dev);
++extern
++void netfront_accelerator_call_stop_napi_irq(struct netfront_info *np,
++ struct net_device *dev);
++extern
++int netfront_accelerator_call_get_stats(struct netfront_info *np,
++ struct net_device *dev);
++extern
++void netfront_accelerator_add_watch(struct netfront_info *np);
++
++extern
++void netif_init_accel(void);
++extern
++void netif_exit_accel(void);
++
++extern
++void init_accelerator_vif(struct netfront_info *np,
++ struct xenbus_device *dev);
++#endif /* NETFRONT_H */
+Index: head-2008-11-25/drivers/xen/pciback/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/Makefile 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,17 @@
++obj-$(CONFIG_XEN_PCIDEV_BACKEND) += pciback.o
++
++pciback-y := pci_stub.o pciback_ops.o xenbus.o
++pciback-y += conf_space.o conf_space_header.o \
++ conf_space_capability.o \
++ conf_space_capability_vpd.o \
++ conf_space_capability_pm.o \
++ conf_space_quirks.o
++pciback-$(CONFIG_PCI_MSI) += conf_space_capability_msi.o
++pciback-$(CONFIG_XEN_PCIDEV_BACKEND_VPCI) += vpci.o
++pciback-$(CONFIG_XEN_PCIDEV_BACKEND_SLOT) += slot.o
++pciback-$(CONFIG_XEN_PCIDEV_BACKEND_PASS) += passthrough.o
++pciback-$(CONFIG_XEN_PCIDEV_BACKEND_CONTROLLER) += controller.o
++
++ifeq ($(CONFIG_XEN_PCIDEV_BE_DEBUG),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
+Index: head-2008-11-25/drivers/xen/pciback/conf_space.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/conf_space.c 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,426 @@
++/*
++ * PCI Backend - Functions for creating a virtual configuration space for
++ * exported PCI Devices.
++ * It's dangerous to allow PCI Driver Domains to change their
++ * device's resources (memory, i/o ports, interrupts). We need to
++ * restrict changes to certain PCI Configuration registers:
++ * BARs, INTERRUPT_PIN, most registers in the header...
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include "pciback.h"
++#include "conf_space.h"
++#include "conf_space_quirks.h"
++
++#define DEFINE_PCI_CONFIG(op,size,type) \
++int pciback_##op##_config_##size \
++(struct pci_dev *dev, int offset, type value, void *data) \
++{ \
++ return pci_##op##_config_##size (dev, offset, value); \
++}
++
++DEFINE_PCI_CONFIG(read, byte, u8 *)
++DEFINE_PCI_CONFIG(read, word, u16 *)
++DEFINE_PCI_CONFIG(read, dword, u32 *)
++
++DEFINE_PCI_CONFIG(write, byte, u8)
++DEFINE_PCI_CONFIG(write, word, u16)
++DEFINE_PCI_CONFIG(write, dword, u32)
++
++static int conf_space_read(struct pci_dev *dev,
++ const struct config_field_entry *entry,
++ int offset, u32 *value)
++{
++ int ret = 0;
++ const struct config_field *field = entry->field;
++
++ *value = 0;
++
++ switch (field->size) {
++ case 1:
++ if (field->u.b.read)
++ ret = field->u.b.read(dev, offset, (u8 *) value,
++ entry->data);
++ break;
++ case 2:
++ if (field->u.w.read)
++ ret = field->u.w.read(dev, offset, (u16 *) value,
++ entry->data);
++ break;
++ case 4:
++ if (field->u.dw.read)
++ ret = field->u.dw.read(dev, offset, value, entry->data);
++ break;
++ }
++ return ret;
++}
++
++static int conf_space_write(struct pci_dev *dev,
++ const struct config_field_entry *entry,
++ int offset, u32 value)
++{
++ int ret = 0;
++ const struct config_field *field = entry->field;
++
++ switch (field->size) {
++ case 1:
++ if (field->u.b.write)
++ ret = field->u.b.write(dev, offset, (u8) value,
++ entry->data);
++ break;
++ case 2:
++ if (field->u.w.write)
++ ret = field->u.w.write(dev, offset, (u16) value,
++ entry->data);
++ break;
++ case 4:
++ if (field->u.dw.write)
++ ret = field->u.dw.write(dev, offset, value,
++ entry->data);
++ break;
++ }
++ return ret;
++}
++
++static inline u32 get_mask(int size)
++{
++ if (size == 1)
++ return 0xff;
++ else if (size == 2)
++ return 0xffff;
++ else
++ return 0xffffffff;
++}
++
++static inline int valid_request(int offset, int size)
++{
++ /* Validate request (no un-aligned requests) */
++ if ((size == 1 || size == 2 || size == 4) && (offset % size) == 0)
++ return 1;
++ return 0;
++}
++
++static inline u32 merge_value(u32 val, u32 new_val, u32 new_val_mask,
++ int offset)
++{
++ if (offset >= 0) {
++ new_val_mask <<= (offset * 8);
++ new_val <<= (offset * 8);
++ } else {
++ new_val_mask >>= (offset * -8);
++ new_val >>= (offset * -8);
++ }
++ val = (val & ~new_val_mask) | (new_val & new_val_mask);
++
++ return val;
++}
++
++static int pcibios_err_to_errno(int err)
++{
++ switch (err) {
++ case PCIBIOS_SUCCESSFUL:
++ return XEN_PCI_ERR_success;
++ case PCIBIOS_DEVICE_NOT_FOUND:
++ return XEN_PCI_ERR_dev_not_found;
++ case PCIBIOS_BAD_REGISTER_NUMBER:
++ return XEN_PCI_ERR_invalid_offset;
++ case PCIBIOS_FUNC_NOT_SUPPORTED:
++ return XEN_PCI_ERR_not_implemented;
++ case PCIBIOS_SET_FAILED:
++ return XEN_PCI_ERR_access_denied;
++ }
++ return err;
++}
++
++int pciback_config_read(struct pci_dev *dev, int offset, int size,
++ u32 * ret_val)
++{
++ int err = 0;
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ const struct config_field_entry *cfg_entry;
++ const struct config_field *field;
++ int req_start, req_end, field_start, field_end;
++ /* if read fails for any reason, return 0 (as if device didn't respond) */
++ u32 value = 0, tmp_val;
++
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x\n",
++ pci_name(dev), size, offset);
++
++ if (!valid_request(offset, size)) {
++ err = XEN_PCI_ERR_invalid_offset;
++ goto out;
++ }
++
++ /* Get the real value first, then modify as appropriate */
++ switch (size) {
++ case 1:
++ err = pci_read_config_byte(dev, offset, (u8 *) & value);
++ break;
++ case 2:
++ err = pci_read_config_word(dev, offset, (u16 *) & value);
++ break;
++ case 4:
++ err = pci_read_config_dword(dev, offset, &value);
++ break;
++ }
++
++ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++ field = cfg_entry->field;
++
++ req_start = offset;
++ req_end = offset + size;
++ field_start = OFFSET(cfg_entry);
++ field_end = OFFSET(cfg_entry) + field->size;
++
++ if ((req_start >= field_start && req_start < field_end)
++ || (req_end > field_start && req_end <= field_end)) {
++ err = conf_space_read(dev, cfg_entry, field_start,
++ &tmp_val);
++ if (err)
++ goto out;
++
++ value = merge_value(value, tmp_val,
++ get_mask(field->size),
++ field_start - req_start);
++ }
++ }
++
++ out:
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG "pciback: %s: read %d bytes at 0x%x = %x\n",
++ pci_name(dev), size, offset, value);
++
++ *ret_val = value;
++ return pcibios_err_to_errno(err);
++}
++
++int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value)
++{
++ int err = 0, handled = 0;
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ const struct config_field_entry *cfg_entry;
++ const struct config_field *field;
++ u32 tmp_val;
++ int req_start, req_end, field_start, field_end;
++
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG
++ "pciback: %s: write request %d bytes at 0x%x = %x\n",
++ pci_name(dev), size, offset, value);
++
++ if (!valid_request(offset, size))
++ return XEN_PCI_ERR_invalid_offset;
++
++ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++ field = cfg_entry->field;
++
++ req_start = offset;
++ req_end = offset + size;
++ field_start = OFFSET(cfg_entry);
++ field_end = OFFSET(cfg_entry) + field->size;
++
++ if ((req_start >= field_start && req_start < field_end)
++ || (req_end > field_start && req_end <= field_end)) {
++ tmp_val = 0;
++
++ err = pciback_config_read(dev, field_start,
++ field->size, &tmp_val);
++ if (err)
++ break;
++
++ tmp_val = merge_value(tmp_val, value, get_mask(size),
++ req_start - field_start);
++
++ err = conf_space_write(dev, cfg_entry, field_start,
++ tmp_val);
++
++ /* handled is set true here, but not every byte
++ * may have been written! Properly detecting if
++ * every byte is handled is unnecessary as the
++ * flag is used to detect devices that need
++ * special helpers to work correctly.
++ */
++ handled = 1;
++ }
++ }
++
++ if (!handled && !err) {
++ /* By default, anything not specificially handled above is
++ * read-only. The permissive flag changes this behavior so
++ * that anything not specifically handled above is writable.
++ * This means that some fields may still be read-only because
++ * they have entries in the config_field list that intercept
++ * the write and do nothing. */
++ if (dev_data->permissive) {
++ switch (size) {
++ case 1:
++ err = pci_write_config_byte(dev, offset,
++ (u8) value);
++ break;
++ case 2:
++ err = pci_write_config_word(dev, offset,
++ (u16) value);
++ break;
++ case 4:
++ err = pci_write_config_dword(dev, offset,
++ (u32) value);
++ break;
++ }
++ } else if (!dev_data->warned_on_write) {
++ dev_data->warned_on_write = 1;
++ dev_warn(&dev->dev, "Driver tried to write to a "
++ "read-only configuration space field at offset "
++ "0x%x, size %d. This may be harmless, but if "
++ "you have problems with your device:\n"
++ "1) see permissive attribute in sysfs\n"
++ "2) report problems to the xen-devel "
++ "mailing list along with details of your "
++ "device obtained from lspci.\n", offset, size);
++ }
++ }
++
++ return pcibios_err_to_errno(err);
++}
++
++void pciback_config_free_dyn_fields(struct pci_dev *dev)
++{
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry, *t;
++ const struct config_field *field;
++
++ dev_dbg(&dev->dev,
++ "free-ing dynamically allocated virtual configuration space fields\n");
++
++ list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
++ field = cfg_entry->field;
++
++ if (field->clean) {
++ field->clean((struct config_field *)field);
++
++ if (cfg_entry->data)
++ kfree(cfg_entry->data);
++
++ list_del(&cfg_entry->list);
++ kfree(cfg_entry);
++ }
++
++ }
++}
++
++void pciback_config_reset_dev(struct pci_dev *dev)
++{
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ const struct config_field_entry *cfg_entry;
++ const struct config_field *field;
++
++ dev_dbg(&dev->dev, "resetting virtual configuration space\n");
++
++ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++ field = cfg_entry->field;
++
++ if (field->reset)
++ field->reset(dev, OFFSET(cfg_entry), cfg_entry->data);
++ }
++}
++
++void pciback_config_free_dev(struct pci_dev *dev)
++{
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry, *t;
++ const struct config_field *field;
++
++ dev_dbg(&dev->dev, "free-ing virtual configuration space fields\n");
++
++ list_for_each_entry_safe(cfg_entry, t, &dev_data->config_fields, list) {
++ list_del(&cfg_entry->list);
++
++ field = cfg_entry->field;
++
++ if (field->release)
++ field->release(dev, OFFSET(cfg_entry), cfg_entry->data);
++
++ kfree(cfg_entry);
++ }
++}
++
++int pciback_config_add_field_offset(struct pci_dev *dev,
++ const struct config_field *field,
++ unsigned int base_offset)
++{
++ int err = 0;
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry;
++ void *tmp;
++
++ cfg_entry = kmalloc(sizeof(*cfg_entry), GFP_KERNEL);
++ if (!cfg_entry) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ cfg_entry->data = NULL;
++ cfg_entry->field = field;
++ cfg_entry->base_offset = base_offset;
++
++ /* silently ignore duplicate fields */
++ err = pciback_field_is_dup(dev,OFFSET(cfg_entry));
++ if (err)
++ goto out;
++
++ if (field->init) {
++ tmp = field->init(dev, OFFSET(cfg_entry));
++
++ if (IS_ERR(tmp)) {
++ err = PTR_ERR(tmp);
++ goto out;
++ }
++
++ cfg_entry->data = tmp;
++ }
++
++ dev_dbg(&dev->dev, "added config field at offset 0x%02x\n",
++ OFFSET(cfg_entry));
++ list_add_tail(&cfg_entry->list, &dev_data->config_fields);
++
++ out:
++ if (err)
++ kfree(cfg_entry);
++
++ return err;
++}
++
++/* This sets up the device's virtual configuration space to keep track of
++ * certain registers (like the base address registers (BARs) so that we can
++ * keep the client from manipulating them directly.
++ */
++int pciback_config_init_dev(struct pci_dev *dev)
++{
++ int err = 0;
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++
++ dev_dbg(&dev->dev, "initializing virtual configuration space\n");
++
++ INIT_LIST_HEAD(&dev_data->config_fields);
++
++ err = pciback_config_header_add_fields(dev);
++ if (err)
++ goto out;
++
++ err = pciback_config_capability_add_fields(dev);
++ if (err)
++ goto out;
++
++ err = pciback_config_quirks_init(dev);
++
++ out:
++ return err;
++}
++
++int pciback_config_init(void)
++{
++ return pciback_config_capability_init();
++}
+Index: head-2008-11-25/drivers/xen/pciback/conf_space.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/conf_space.h 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,126 @@
++/*
++ * PCI Backend - Common data structures for overriding the configuration space
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#ifndef __XEN_PCIBACK_CONF_SPACE_H__
++#define __XEN_PCIBACK_CONF_SPACE_H__
++
++#include <linux/list.h>
++#include <linux/err.h>
++
++/* conf_field_init can return an errno in a ptr with ERR_PTR() */
++typedef void *(*conf_field_init) (struct pci_dev * dev, int offset);
++typedef void (*conf_field_reset) (struct pci_dev * dev, int offset, void *data);
++typedef void (*conf_field_free) (struct pci_dev * dev, int offset, void *data);
++
++typedef int (*conf_dword_write) (struct pci_dev * dev, int offset, u32 value,
++ void *data);
++typedef int (*conf_word_write) (struct pci_dev * dev, int offset, u16 value,
++ void *data);
++typedef int (*conf_byte_write) (struct pci_dev * dev, int offset, u8 value,
++ void *data);
++typedef int (*conf_dword_read) (struct pci_dev * dev, int offset, u32 * value,
++ void *data);
++typedef int (*conf_word_read) (struct pci_dev * dev, int offset, u16 * value,
++ void *data);
++typedef int (*conf_byte_read) (struct pci_dev * dev, int offset, u8 * value,
++ void *data);
++
++/* These are the fields within the configuration space which we
++ * are interested in intercepting reads/writes to and changing their
++ * values.
++ */
++struct config_field {
++ unsigned int offset;
++ unsigned int size;
++ unsigned int mask;
++ conf_field_init init;
++ conf_field_reset reset;
++ conf_field_free release;
++ void (*clean) (struct config_field * field);
++ union {
++ struct {
++ conf_dword_write write;
++ conf_dword_read read;
++ } dw;
++ struct {
++ conf_word_write write;
++ conf_word_read read;
++ } w;
++ struct {
++ conf_byte_write write;
++ conf_byte_read read;
++ } b;
++ } u;
++ struct list_head list;
++};
++
++struct config_field_entry {
++ struct list_head list;
++ const struct config_field *field;
++ unsigned int base_offset;
++ void *data;
++};
++
++#define OFFSET(cfg_entry) ((cfg_entry)->base_offset+(cfg_entry)->field->offset)
++
++/* Add fields to a device - the add_fields macro expects to get a pointer to
++ * the first entry in an array (of which the ending is marked by size==0)
++ */
++int pciback_config_add_field_offset(struct pci_dev *dev,
++ const struct config_field *field,
++ unsigned int offset);
++
++static inline int pciback_config_add_field(struct pci_dev *dev,
++ const struct config_field *field)
++{
++ return pciback_config_add_field_offset(dev, field, 0);
++}
++
++static inline int pciback_config_add_fields(struct pci_dev *dev,
++ const struct config_field *field)
++{
++ int i, err = 0;
++ for (i = 0; field[i].size != 0; i++) {
++ err = pciback_config_add_field(dev, &field[i]);
++ if (err)
++ break;
++ }
++ return err;
++}
++
++static inline int pciback_config_add_fields_offset(struct pci_dev *dev,
++ const struct config_field *field,
++ unsigned int offset)
++{
++ int i, err = 0;
++ for (i = 0; field[i].size != 0; i++) {
++ err = pciback_config_add_field_offset(dev, &field[i], offset);
++ if (err)
++ break;
++ }
++ return err;
++}
++
++/* Read/Write the real configuration space */
++int pciback_read_config_byte(struct pci_dev *dev, int offset, u8 * value,
++ void *data);
++int pciback_read_config_word(struct pci_dev *dev, int offset, u16 * value,
++ void *data);
++int pciback_read_config_dword(struct pci_dev *dev, int offset, u32 * value,
++ void *data);
++int pciback_write_config_byte(struct pci_dev *dev, int offset, u8 value,
++ void *data);
++int pciback_write_config_word(struct pci_dev *dev, int offset, u16 value,
++ void *data);
++int pciback_write_config_dword(struct pci_dev *dev, int offset, u32 value,
++ void *data);
++
++int pciback_config_capability_init(void);
++
++int pciback_config_header_add_fields(struct pci_dev *dev);
++int pciback_config_capability_add_fields(struct pci_dev *dev);
++
++#endif /* __XEN_PCIBACK_CONF_SPACE_H__ */
+Index: head-2008-11-25/drivers/xen/pciback/conf_space_capability.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/conf_space_capability.c 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,69 @@
++/*
++ * PCI Backend - Handles the virtual fields found on the capability lists
++ * in the configuration space.
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include "pciback.h"
++#include "conf_space.h"
++#include "conf_space_capability.h"
++
++static LIST_HEAD(capabilities);
++
++static const struct config_field caplist_header[] = {
++ {
++ .offset = PCI_CAP_LIST_ID,
++ .size = 2, /* encompass PCI_CAP_LIST_ID & PCI_CAP_LIST_NEXT */
++ .u.w.read = pciback_read_config_word,
++ .u.w.write = NULL,
++ },
++ {}
++};
++
++static inline void register_capability(struct pciback_config_capability *cap)
++{
++ list_add_tail(&cap->cap_list, &capabilities);
++}
++
++int pciback_config_capability_add_fields(struct pci_dev *dev)
++{
++ int err = 0;
++ struct pciback_config_capability *cap;
++ int cap_offset;
++
++ list_for_each_entry(cap, &capabilities, cap_list) {
++ cap_offset = pci_find_capability(dev, cap->capability);
++ if (cap_offset) {
++ dev_dbg(&dev->dev, "Found capability 0x%x at 0x%x\n",
++ cap->capability, cap_offset);
++
++ err = pciback_config_add_fields_offset(dev,
++ caplist_header,
++ cap_offset);
++ if (err)
++ goto out;
++ err = pciback_config_add_fields_offset(dev,
++ cap->fields,
++ cap_offset);
++ if (err)
++ goto out;
++ }
++ }
++
++ out:
++ return err;
++}
++
++extern struct pciback_config_capability pciback_config_capability_vpd;
++extern struct pciback_config_capability pciback_config_capability_pm;
++
++int pciback_config_capability_init(void)
++{
++ register_capability(&pciback_config_capability_vpd);
++ register_capability(&pciback_config_capability_pm);
++
++ return 0;
++}
+Index: head-2008-11-25/drivers/xen/pciback/conf_space_capability.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/conf_space_capability.h 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,23 @@
++/*
++ * PCI Backend - Data structures for special overlays for structures on
++ * the capability list.
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#ifndef __PCIBACK_CONFIG_CAPABILITY_H__
++#define __PCIBACK_CONFIG_CAPABILITY_H__
++
++#include <linux/pci.h>
++#include <linux/list.h>
++
++struct pciback_config_capability {
++ struct list_head cap_list;
++
++ int capability;
++
++ /* If the device has the capability found above, add these fields */
++ const struct config_field *fields;
++};
++
++#endif
+Index: head-2008-11-25/drivers/xen/pciback/conf_space_capability_msi.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/conf_space_capability_msi.c 2008-09-15 13:40:15.000000000 +0200
+@@ -0,0 +1,79 @@
++/*
++ * PCI Backend -- Configuration overlay for MSI capability
++ */
++#include <linux/pci.h>
++#include <linux/slab.h>
++#include "conf_space.h"
++#include "conf_space_capability.h"
++#include <xen/interface/io/pciif.h>
++#include "pciback.h"
++
++int pciback_enable_msi(struct pciback_device *pdev,
++ struct pci_dev *dev, struct xen_pci_op *op)
++{
++ int otherend = pdev->xdev->otherend_id;
++ int status;
++
++ status = pci_enable_msi(dev);
++
++ if (status) {
++ printk("error enable msi for guest %x status %x\n", otherend, status);
++ op->value = 0;
++ return XEN_PCI_ERR_op_failed;
++ }
++
++ op->value = dev->irq;
++ return 0;
++}
++
++int pciback_disable_msi(struct pciback_device *pdev,
++ struct pci_dev *dev, struct xen_pci_op *op)
++{
++ pci_disable_msi(dev);
++
++ op->value = dev->irq;
++ return 0;
++}
++
++int pciback_enable_msix(struct pciback_device *pdev,
++ struct pci_dev *dev, struct xen_pci_op *op)
++{
++ int i, result;
++ struct msix_entry *entries;
++
++ if (op->value > SH_INFO_MAX_VEC)
++ return -EINVAL;
++
++ entries = kmalloc(op->value * sizeof(*entries), GFP_KERNEL);
++ if (entries == NULL)
++ return -ENOMEM;
++
++ for (i = 0; i < op->value; i++) {
++ entries[i].entry = op->msix_entries[i].entry;
++ entries[i].vector = op->msix_entries[i].vector;
++ }
++
++ result = pci_enable_msix(dev, entries, op->value);
++
++ for (i = 0; i < op->value; i++) {
++ op->msix_entries[i].entry = entries[i].entry;
++ op->msix_entries[i].vector = entries[i].vector;
++ }
++
++ kfree(entries);
++
++ op->value = result;
++
++ return result;
++}
++
++int pciback_disable_msix(struct pciback_device *pdev,
++ struct pci_dev *dev, struct xen_pci_op *op)
++{
++
++ pci_disable_msix(dev);
++
++ op->value = dev->irq;
++ return 0;
++}
++
+Index: head-2008-11-25/drivers/xen/pciback/conf_space_capability_pm.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/conf_space_capability_pm.c 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,126 @@
++/*
++ * PCI Backend - Configuration space overlay for power management
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/pci.h>
++#include "conf_space.h"
++#include "conf_space_capability.h"
++
++static int pm_caps_read(struct pci_dev *dev, int offset, u16 *value,
++ void *data)
++{
++ int err;
++ u16 real_value;
++
++ err = pci_read_config_word(dev, offset, &real_value);
++ if (err)
++ goto out;
++
++ *value = real_value & ~PCI_PM_CAP_PME_MASK;
++
++ out:
++ return err;
++}
++
++/* PM_OK_BITS specifies the bits that the driver domain is allowed to change.
++ * Can't allow driver domain to enable PMEs - they're shared */
++#define PM_OK_BITS (PCI_PM_CTRL_PME_STATUS|PCI_PM_CTRL_DATA_SEL_MASK)
++
++static int pm_ctrl_write(struct pci_dev *dev, int offset, u16 new_value,
++ void *data)
++{
++ int err;
++ u16 old_value;
++ pci_power_t new_state, old_state;
++
++ err = pci_read_config_word(dev, offset, &old_value);
++ if (err)
++ goto out;
++
++ old_state = (pci_power_t)(old_value & PCI_PM_CTRL_STATE_MASK);
++ new_state = (pci_power_t)(new_value & PCI_PM_CTRL_STATE_MASK);
++
++ new_value &= PM_OK_BITS;
++ if ((old_value & PM_OK_BITS) != new_value) {
++ new_value = (old_value & ~PM_OK_BITS) | new_value;
++ err = pci_write_config_word(dev, offset, new_value);
++ if (err)
++ goto out;
++ }
++
++ /* Let pci core handle the power management change */
++ dev_dbg(&dev->dev, "set power state to %x\n", new_state);
++ err = pci_set_power_state(dev, new_state);
++ if (err) {
++ err = PCIBIOS_SET_FAILED;
++ goto out;
++ }
++
++ /*
++ * Device may lose PCI config info on D3->D0 transition. This
++ * is a problem for some guests which will not reset BARs. Even
++ * those that have a go will be foiled by our BAR-write handler
++ * which will discard the write! Since Linux won't re-init
++ * the config space automatically in all cases, we do it here.
++ * Future: Should we re-initialise all first 64 bytes of config space?
++ */
++ if (new_state == PCI_D0 &&
++ (old_state == PCI_D3hot || old_state == PCI_D3cold) &&
++ !(old_value & PCI_PM_CTRL_NO_SOFT_RESET))
++ pci_restore_bars(dev);
++
++ out:
++ return err;
++}
++
++/* Ensure PMEs are disabled */
++static void *pm_ctrl_init(struct pci_dev *dev, int offset)
++{
++ int err;
++ u16 value;
++
++ err = pci_read_config_word(dev, offset, &value);
++ if (err)
++ goto out;
++
++ if (value & PCI_PM_CTRL_PME_ENABLE) {
++ value &= ~PCI_PM_CTRL_PME_ENABLE;
++ err = pci_write_config_word(dev, offset, value);
++ }
++
++ out:
++ return ERR_PTR(err);
++}
++
++static const struct config_field caplist_pm[] = {
++ {
++ .offset = PCI_PM_PMC,
++ .size = 2,
++ .u.w.read = pm_caps_read,
++ },
++ {
++ .offset = PCI_PM_CTRL,
++ .size = 2,
++ .init = pm_ctrl_init,
++ .u.w.read = pciback_read_config_word,
++ .u.w.write = pm_ctrl_write,
++ },
++ {
++ .offset = PCI_PM_PPB_EXTENSIONS,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ },
++ {
++ .offset = PCI_PM_DATA_REGISTER,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ },
++ {}
++};
++
++struct pciback_config_capability pciback_config_capability_pm = {
++ .capability = PCI_CAP_ID_PM,
++ .fields = caplist_pm,
++};
+Index: head-2008-11-25/drivers/xen/pciback/conf_space_capability_vpd.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/conf_space_capability_vpd.c 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,40 @@
++/*
++ * PCI Backend - Configuration space overlay for Vital Product Data
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/pci.h>
++#include "conf_space.h"
++#include "conf_space_capability.h"
++
++static int vpd_address_write(struct pci_dev *dev, int offset, u16 value,
++ void *data)
++{
++ /* Disallow writes to the vital product data */
++ if (value & PCI_VPD_ADDR_F)
++ return PCIBIOS_SET_FAILED;
++ else
++ return pci_write_config_word(dev, offset, value);
++}
++
++static const struct config_field caplist_vpd[] = {
++ {
++ .offset = PCI_VPD_ADDR,
++ .size = 2,
++ .u.w.read = pciback_read_config_word,
++ .u.w.write = vpd_address_write,
++ },
++ {
++ .offset = PCI_VPD_DATA,
++ .size = 4,
++ .u.dw.read = pciback_read_config_dword,
++ .u.dw.write = NULL,
++ },
++ {}
++};
++
++struct pciback_config_capability pciback_config_capability_vpd = {
++ .capability = PCI_CAP_ID_VPD,
++ .fields = caplist_vpd,
++};
+Index: head-2008-11-25/drivers/xen/pciback/conf_space_header.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/conf_space_header.c 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,317 @@
++/*
++ * PCI Backend - Handles the virtual fields in the configuration space headers.
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include "pciback.h"
++#include "conf_space.h"
++
++struct pci_bar_info {
++ u32 val;
++ u32 len_val;
++ int which;
++};
++
++#define is_enable_cmd(value) ((value)&(PCI_COMMAND_MEMORY|PCI_COMMAND_IO))
++#define is_master_cmd(value) ((value)&PCI_COMMAND_MASTER)
++
++static int command_write(struct pci_dev *dev, int offset, u16 value, void *data)
++{
++ int err;
++
++ if (!dev->is_enabled && is_enable_cmd(value)) {
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG "pciback: %s: enable\n",
++ pci_name(dev));
++ err = pci_enable_device(dev);
++ if (err)
++ return err;
++ } else if (dev->is_enabled && !is_enable_cmd(value)) {
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG "pciback: %s: disable\n",
++ pci_name(dev));
++ pci_disable_device(dev);
++ }
++
++ if (!dev->is_busmaster && is_master_cmd(value)) {
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG "pciback: %s: set bus master\n",
++ pci_name(dev));
++ pci_set_master(dev);
++ }
++
++ if (value & PCI_COMMAND_INVALIDATE) {
++ if (unlikely(verbose_request))
++ printk(KERN_DEBUG
++ "pciback: %s: enable memory-write-invalidate\n",
++ pci_name(dev));
++ err = pci_set_mwi(dev);
++ if (err) {
++ printk(KERN_WARNING
++ "pciback: %s: cannot enable memory-write-invalidate (%d)\n",
++ pci_name(dev), err);
++ value &= ~PCI_COMMAND_INVALIDATE;
++ }
++ }
++
++ return pci_write_config_word(dev, offset, value);
++}
++
++static int rom_write(struct pci_dev *dev, int offset, u32 value, void *data)
++{
++ struct pci_bar_info *bar = data;
++
++ if (unlikely(!bar)) {
++ printk(KERN_WARNING "pciback: driver data not found for %s\n",
++ pci_name(dev));
++ return XEN_PCI_ERR_op_failed;
++ }
++
++ /* A write to obtain the length must happen as a 32-bit write.
++ * This does not (yet) support writing individual bytes
++ */
++ if (value == ~PCI_ROM_ADDRESS_ENABLE)
++ bar->which = 1;
++ else {
++ u32 tmpval;
++ pci_read_config_dword(dev, offset, &tmpval);
++ if (tmpval != bar->val && value == bar->val) {
++ /* Allow restoration of bar value. */
++ pci_write_config_dword(dev, offset, bar->val);
++ }
++ bar->which = 0;
++ }
++
++ /* Do we need to support enabling/disabling the rom address here? */
++
++ return 0;
++}
++
++/* For the BARs, only allow writes which write ~0 or
++ * the correct resource information
++ * (Needed for when the driver probes the resource usage)
++ */
++static int bar_write(struct pci_dev *dev, int offset, u32 value, void *data)
++{
++ struct pci_bar_info *bar = data;
++
++ if (unlikely(!bar)) {
++ printk(KERN_WARNING "pciback: driver data not found for %s\n",
++ pci_name(dev));
++ return XEN_PCI_ERR_op_failed;
++ }
++
++ /* A write to obtain the length must happen as a 32-bit write.
++ * This does not (yet) support writing individual bytes
++ */
++ if (value == ~0)
++ bar->which = 1;
++ else {
++ u32 tmpval;
++ pci_read_config_dword(dev, offset, &tmpval);
++ if (tmpval != bar->val && value == bar->val) {
++ /* Allow restoration of bar value. */
++ pci_write_config_dword(dev, offset, bar->val);
++ }
++ bar->which = 0;
++ }
++
++ return 0;
++}
++
++static int bar_read(struct pci_dev *dev, int offset, u32 * value, void *data)
++{
++ struct pci_bar_info *bar = data;
++
++ if (unlikely(!bar)) {
++ printk(KERN_WARNING "pciback: driver data not found for %s\n",
++ pci_name(dev));
++ return XEN_PCI_ERR_op_failed;
++ }
++
++ *value = bar->which ? bar->len_val : bar->val;
++
++ return 0;
++}
++
++static inline void read_dev_bar(struct pci_dev *dev,
++ struct pci_bar_info *bar_info, int offset,
++ u32 len_mask)
++{
++ pci_read_config_dword(dev, offset, &bar_info->val);
++ pci_write_config_dword(dev, offset, len_mask);
++ pci_read_config_dword(dev, offset, &bar_info->len_val);
++ pci_write_config_dword(dev, offset, bar_info->val);
++}
++
++static void *bar_init(struct pci_dev *dev, int offset)
++{
++ struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
++
++ if (!bar)
++ return ERR_PTR(-ENOMEM);
++
++ read_dev_bar(dev, bar, offset, ~0);
++ bar->which = 0;
++
++ return bar;
++}
++
++static void *rom_init(struct pci_dev *dev, int offset)
++{
++ struct pci_bar_info *bar = kmalloc(sizeof(*bar), GFP_KERNEL);
++
++ if (!bar)
++ return ERR_PTR(-ENOMEM);
++
++ read_dev_bar(dev, bar, offset, ~PCI_ROM_ADDRESS_ENABLE);
++ bar->which = 0;
++
++ return bar;
++}
++
++static void bar_reset(struct pci_dev *dev, int offset, void *data)
++{
++ struct pci_bar_info *bar = data;
++
++ bar->which = 0;
++}
++
++static void bar_release(struct pci_dev *dev, int offset, void *data)
++{
++ kfree(data);
++}
++
++static int interrupt_read(struct pci_dev *dev, int offset, u8 * value,
++ void *data)
++{
++ *value = (u8) dev->irq;
++
++ return 0;
++}
++
++static int bist_write(struct pci_dev *dev, int offset, u8 value, void *data)
++{
++ u8 cur_value;
++ int err;
++
++ err = pci_read_config_byte(dev, offset, &cur_value);
++ if (err)
++ goto out;
++
++ if ((cur_value & ~PCI_BIST_START) == (value & ~PCI_BIST_START)
++ || value == PCI_BIST_START)
++ err = pci_write_config_byte(dev, offset, value);
++
++ out:
++ return err;
++}
++
++static const struct config_field header_common[] = {
++ {
++ .offset = PCI_COMMAND,
++ .size = 2,
++ .u.w.read = pciback_read_config_word,
++ .u.w.write = command_write,
++ },
++ {
++ .offset = PCI_INTERRUPT_LINE,
++ .size = 1,
++ .u.b.read = interrupt_read,
++ },
++ {
++ .offset = PCI_INTERRUPT_PIN,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ },
++ {
++ /* Any side effects of letting driver domain control cache line? */
++ .offset = PCI_CACHE_LINE_SIZE,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ .u.b.write = pciback_write_config_byte,
++ },
++ {
++ .offset = PCI_LATENCY_TIMER,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ },
++ {
++ .offset = PCI_BIST,
++ .size = 1,
++ .u.b.read = pciback_read_config_byte,
++ .u.b.write = bist_write,
++ },
++ {}
++};
++
++#define CFG_FIELD_BAR(reg_offset) \
++ { \
++ .offset = reg_offset, \
++ .size = 4, \
++ .init = bar_init, \
++ .reset = bar_reset, \
++ .release = bar_release, \
++ .u.dw.read = bar_read, \
++ .u.dw.write = bar_write, \
++ }
++
++#define CFG_FIELD_ROM(reg_offset) \
++ { \
++ .offset = reg_offset, \
++ .size = 4, \
++ .init = rom_init, \
++ .reset = bar_reset, \
++ .release = bar_release, \
++ .u.dw.read = bar_read, \
++ .u.dw.write = rom_write, \
++ }
++
++static const struct config_field header_0[] = {
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_2),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_3),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_4),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_5),
++ CFG_FIELD_ROM(PCI_ROM_ADDRESS),
++ {}
++};
++
++static const struct config_field header_1[] = {
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_0),
++ CFG_FIELD_BAR(PCI_BASE_ADDRESS_1),
++ CFG_FIELD_ROM(PCI_ROM_ADDRESS1),
++ {}
++};
++
++int pciback_config_header_add_fields(struct pci_dev *dev)
++{
++ int err;
++
++ err = pciback_config_add_fields(dev, header_common);
++ if (err)
++ goto out;
++
++ switch (dev->hdr_type) {
++ case PCI_HEADER_TYPE_NORMAL:
++ err = pciback_config_add_fields(dev, header_0);
++ break;
++
++ case PCI_HEADER_TYPE_BRIDGE:
++ err = pciback_config_add_fields(dev, header_1);
++ break;
++
++ default:
++ err = -EINVAL;
++ printk(KERN_ERR "pciback: %s: Unsupported header type %d!\n",
++ pci_name(dev), dev->hdr_type);
++ break;
++ }
++
++ out:
++ return err;
++}
+Index: head-2008-11-25/drivers/xen/pciback/conf_space_quirks.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/conf_space_quirks.c 2007-06-12 13:13:45.000000000 +0200
+@@ -0,0 +1,126 @@
++/*
++ * PCI Backend - Handle special overlays for broken devices.
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ * Author: Chris Bookholt <hap10@epoch.ncsc.mil>
++ */
++
++#include <linux/kernel.h>
++#include <linux/pci.h>
++#include "pciback.h"
++#include "conf_space.h"
++#include "conf_space_quirks.h"
++
++LIST_HEAD(pciback_quirks);
++
++struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev)
++{
++ struct pciback_config_quirk *tmp_quirk;
++
++ list_for_each_entry(tmp_quirk, &pciback_quirks, quirks_list)
++ if (pci_match_id(&tmp_quirk->devid, dev))
++ goto out;
++ tmp_quirk = NULL;
++ printk(KERN_DEBUG
++ "quirk didn't match any device pciback knows about\n");
++ out:
++ return tmp_quirk;
++}
++
++static inline void register_quirk(struct pciback_config_quirk *quirk)
++{
++ list_add_tail(&quirk->quirks_list, &pciback_quirks);
++}
++
++int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg)
++{
++ int ret = 0;
++ struct pciback_dev_data *dev_data = pci_get_drvdata(dev);
++ struct config_field_entry *cfg_entry;
++
++ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++ if ( OFFSET(cfg_entry) == reg) {
++ ret = 1;
++ break;
++ }
++ }
++ return ret;
++}
++
++int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
++ *field)
++{
++ int err = 0;
++
++ switch (field->size) {
++ case 1:
++ field->u.b.read = pciback_read_config_byte;
++ field->u.b.write = pciback_write_config_byte;
++ break;
++ case 2:
++ field->u.w.read = pciback_read_config_word;
++ field->u.w.write = pciback_write_config_word;
++ break;
++ case 4:
++ field->u.dw.read = pciback_read_config_dword;
++ field->u.dw.write = pciback_write_config_dword;
++ break;
++ default:
++ err = -EINVAL;
++ goto out;
++ }
++
++ pciback_config_add_field(dev, field);
++
++ out:
++ return err;
++}
++
++int pciback_config_quirks_init(struct pci_dev *dev)
++{
++ struct pciback_config_quirk *quirk;
++ int ret = 0;
++
++ quirk = kzalloc(sizeof(*quirk), GFP_ATOMIC);
++ if (!quirk) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ quirk->devid.vendor = dev->vendor;
++ quirk->devid.device = dev->device;
++ quirk->devid.subvendor = dev->subsystem_vendor;
++ quirk->devid.subdevice = dev->subsystem_device;
++ quirk->devid.class = 0;
++ quirk->devid.class_mask = 0;
++ quirk->devid.driver_data = 0UL;
++
++ quirk->pdev = dev;
++
++ register_quirk(quirk);
++ out:
++ return ret;
++}
++
++void pciback_config_field_free(struct config_field *field)
++{
++ kfree(field);
++}
++
++int pciback_config_quirk_release(struct pci_dev *dev)
++{
++ struct pciback_config_quirk *quirk;
++ int ret = 0;
++
++ quirk = pciback_find_quirk(dev);
++ if (!quirk) {
++ ret = -ENXIO;
++ goto out;
++ }
++
++ list_del(&quirk->quirks_list);
++ kfree(quirk);
++
++ out:
++ return ret;
++}
+Index: head-2008-11-25/drivers/xen/pciback/conf_space_quirks.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/conf_space_quirks.h 2007-06-12 13:13:45.000000000 +0200
+@@ -0,0 +1,35 @@
++/*
++ * PCI Backend - Data structures for special overlays for broken devices.
++ *
++ * Ryan Wilson <hap9@epoch.ncsc.mil>
++ * Chris Bookholt <hap10@epoch.ncsc.mil>
++ */
++
++#ifndef __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
++#define __XEN_PCIBACK_CONF_SPACE_QUIRKS_H__
++
++#include <linux/pci.h>
++#include <linux/list.h>
++
++struct pciback_config_quirk {
++ struct list_head quirks_list;
++ struct pci_device_id devid;
++ struct pci_dev *pdev;
++};
++
++struct pciback_config_quirk *pciback_find_quirk(struct pci_dev *dev);
++
++int pciback_config_quirks_add_field(struct pci_dev *dev, struct config_field
++ *field);
++
++int pciback_config_quirks_remove_field(struct pci_dev *dev, int reg);
++
++int pciback_config_quirks_init(struct pci_dev *dev);
++
++void pciback_config_field_free(struct config_field *field);
++
++int pciback_config_quirk_release(struct pci_dev *dev);
++
++int pciback_field_is_dup(struct pci_dev *dev, unsigned int reg);
++
++#endif
+Index: head-2008-11-25/drivers/xen/pciback/controller.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/controller.c 2008-02-26 10:54:11.000000000 +0100
+@@ -0,0 +1,408 @@
++/*
++ * Copyright (C) 2007 Hewlett-Packard Development Company, L.P.
++ * Alex Williamson <alex.williamson@hp.com>
++ *
++ * PCI "Controller" Backend - virtualize PCI bus topology based on PCI
++ * controllers. Devices under the same PCI controller are exposed on the
++ * same virtual domain:bus. Within a bus, device slots are virtualized
++ * to compact the bus.
++ *
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
++ */
++
++#include <linux/acpi.h>
++#include <linux/list.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pciback.h"
++
++#define PCI_MAX_BUSSES 255
++#define PCI_MAX_SLOTS 32
++
++struct controller_dev_entry {
++ struct list_head list;
++ struct pci_dev *dev;
++ unsigned int devfn;
++};
++
++struct controller_list_entry {
++ struct list_head list;
++ struct pci_controller *controller;
++ unsigned int domain;
++ unsigned int bus;
++ unsigned int next_devfn;
++ struct list_head dev_list;
++};
++
++struct controller_dev_data {
++ struct list_head list;
++ unsigned int next_domain;
++ unsigned int next_bus;
++ spinlock_t lock;
++};
++
++struct walk_info {
++ struct pciback_device *pdev;
++ int resource_count;
++ int root_num;
++};
++
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn)
++{
++ struct controller_dev_data *dev_data = pdev->pci_dev_data;
++ struct controller_dev_entry *dev_entry;
++ struct controller_list_entry *cntrl_entry;
++ struct pci_dev *dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dev_data->lock, flags);
++
++ list_for_each_entry(cntrl_entry, &dev_data->list, list) {
++ if (cntrl_entry->domain != domain ||
++ cntrl_entry->bus != bus)
++ continue;
++
++ list_for_each_entry(dev_entry, &cntrl_entry->dev_list, list) {
++ if (devfn == dev_entry->devfn) {
++ dev = dev_entry->dev;
++ goto found;
++ }
++ }
++ }
++found:
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++
++ return dev;
++}
++
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
++ int devid, publish_pci_dev_cb publish_cb)
++{
++ struct controller_dev_data *dev_data = pdev->pci_dev_data;
++ struct controller_dev_entry *dev_entry;
++ struct controller_list_entry *cntrl_entry;
++ struct pci_controller *dev_controller = PCI_CONTROLLER(dev);
++ unsigned long flags;
++ int ret = 0, found = 0;
++
++ spin_lock_irqsave(&dev_data->lock, flags);
++
++ /* Look to see if we already have a domain:bus for this controller */
++ list_for_each_entry(cntrl_entry, &dev_data->list, list) {
++ if (cntrl_entry->controller == dev_controller) {
++ found = 1;
++ break;
++ }
++ }
++
++ if (!found) {
++ cntrl_entry = kmalloc(sizeof(*cntrl_entry), GFP_ATOMIC);
++ if (!cntrl_entry) {
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ cntrl_entry->controller = dev_controller;
++ cntrl_entry->next_devfn = PCI_DEVFN(0, 0);
++
++ cntrl_entry->domain = dev_data->next_domain;
++ cntrl_entry->bus = dev_data->next_bus++;
++ if (dev_data->next_bus > PCI_MAX_BUSSES) {
++ dev_data->next_domain++;
++ dev_data->next_bus = 0;
++ }
++
++ INIT_LIST_HEAD(&cntrl_entry->dev_list);
++
++ list_add_tail(&cntrl_entry->list, &dev_data->list);
++ }
++
++ if (PCI_SLOT(cntrl_entry->next_devfn) > PCI_MAX_SLOTS) {
++ /*
++ * While it seems unlikely, this can actually happen if
++ * a controller has P2P bridges under it.
++ */
++ xenbus_dev_fatal(pdev->xdev, -ENOSPC, "Virtual bus %04x:%02x "
++ "is full, no room to export %04x:%02x:%02x.%x",
++ cntrl_entry->domain, cntrl_entry->bus,
++ pci_domain_nr(dev->bus), dev->bus->number,
++ PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
++ ret = -ENOSPC;
++ goto out;
++ }
++
++ dev_entry = kmalloc(sizeof(*dev_entry), GFP_ATOMIC);
++ if (!dev_entry) {
++ if (list_empty(&cntrl_entry->dev_list)) {
++ list_del(&cntrl_entry->list);
++ kfree(cntrl_entry);
++ }
++ ret = -ENOMEM;
++ goto out;
++ }
++
++ dev_entry->dev = dev;
++ dev_entry->devfn = cntrl_entry->next_devfn;
++
++ list_add_tail(&dev_entry->list, &cntrl_entry->dev_list);
++
++ cntrl_entry->next_devfn += PCI_DEVFN(1, 0);
++
++out:
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++
++ /* TODO: Publish virtual domain:bus:slot.func here. */
++
++ return ret;
++}
++
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++{
++ struct controller_dev_data *dev_data = pdev->pci_dev_data;
++ struct controller_list_entry *cntrl_entry;
++ struct controller_dev_entry *dev_entry = NULL;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dev_data->lock, flags);
++
++ list_for_each_entry(cntrl_entry, &dev_data->list, list) {
++ if (cntrl_entry->controller != PCI_CONTROLLER(dev))
++ continue;
++
++ list_for_each_entry(dev_entry, &cntrl_entry->dev_list, list) {
++ if (dev_entry->dev == dev) {
++ found_dev = dev_entry->dev;
++ break;
++ }
++ }
++ }
++
++ if (!found_dev) {
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++ return;
++ }
++
++ list_del(&dev_entry->list);
++ kfree(dev_entry);
++
++ if (list_empty(&cntrl_entry->dev_list)) {
++ list_del(&cntrl_entry->list);
++ kfree(cntrl_entry);
++ }
++
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++ pcistub_put_pci_dev(found_dev);
++}
++
++int pciback_init_devices(struct pciback_device *pdev)
++{
++ struct controller_dev_data *dev_data;
++
++ dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
++ if (!dev_data)
++ return -ENOMEM;
++
++ spin_lock_init(&dev_data->lock);
++
++ INIT_LIST_HEAD(&dev_data->list);
++
++ /* Starting domain:bus numbers */
++ dev_data->next_domain = 0;
++ dev_data->next_bus = 0;
++
++ pdev->pci_dev_data = dev_data;
++
++ return 0;
++}
++
++static acpi_status write_xenbus_resource(struct acpi_resource *res, void *data)
++{
++ struct walk_info *info = data;
++ struct acpi_resource_address64 addr;
++ acpi_status status;
++ int i, len, err;
++ char str[32], tmp[3];
++ unsigned char *ptr, *buf;
++
++ status = acpi_resource_to_address64(res, &addr);
++
++ /* Do we care about this range? Let's check. */
++ if (!ACPI_SUCCESS(status) ||
++ !(addr.resource_type == ACPI_MEMORY_RANGE ||
++ addr.resource_type == ACPI_IO_RANGE) ||
++ !addr.address_length || addr.producer_consumer != ACPI_PRODUCER)
++ return AE_OK;
++
++ /*
++ * Furthermore, we really only care to tell the guest about
++ * address ranges that require address translation of some sort.
++ */
++ if (!(addr.resource_type == ACPI_MEMORY_RANGE &&
++ addr.info.mem.translation) &&
++ !(addr.resource_type == ACPI_IO_RANGE &&
++ addr.info.io.translation))
++ return AE_OK;
++
++ /* Store the resource in xenbus for the guest */
++ len = snprintf(str, sizeof(str), "root-%d-resource-%d",
++ info->root_num, info->resource_count);
++ if (unlikely(len >= (sizeof(str) - 1)))
++ return AE_OK;
++
++ buf = kzalloc((sizeof(*res) * 2) + 1, GFP_KERNEL);
++ if (!buf)
++ return AE_OK;
++
++ /* Clean out resource_source */
++ res->data.address64.resource_source.index = 0xFF;
++ res->data.address64.resource_source.string_length = 0;
++ res->data.address64.resource_source.string_ptr = NULL;
++
++ ptr = (unsigned char *)res;
++
++ /* Turn the acpi_resource into an ASCII byte stream */
++ for (i = 0; i < sizeof(*res); i++) {
++ snprintf(tmp, sizeof(tmp), "%02x", ptr[i]);
++ strncat(buf, tmp, 2);
++ }
++
++ err = xenbus_printf(XBT_NIL, info->pdev->xdev->nodename,
++ str, "%s", buf);
++
++ if (!err)
++ info->resource_count++;
++
++ kfree(buf);
++
++ return AE_OK;
++}
++
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++ publish_pci_root_cb publish_root_cb)
++{
++ struct controller_dev_data *dev_data = pdev->pci_dev_data;
++ struct controller_list_entry *cntrl_entry;
++ int i, root_num, len, err = 0;
++ unsigned int domain, bus;
++ char str[64];
++ struct walk_info info;
++
++ spin_lock(&dev_data->lock);
++
++ list_for_each_entry(cntrl_entry, &dev_data->list, list) {
++ /* First publish all the domain:bus info */
++ err = publish_root_cb(pdev, cntrl_entry->domain,
++ cntrl_entry->bus);
++ if (err)
++ goto out;
++
++ /*
++ * Now figure out which root-%d this belongs to
++ * so we can associate resources with it.
++ */
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++ "root_num", "%d", &root_num);
++
++ if (err != 1)
++ goto out;
++
++ for (i = 0; i < root_num; i++) {
++ len = snprintf(str, sizeof(str), "root-%d", i);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++ str, "%x:%x", &domain, &bus);
++ if (err != 2)
++ goto out;
++
++ /* Is this the one we just published? */
++ if (domain == cntrl_entry->domain &&
++ bus == cntrl_entry->bus)
++ break;
++ }
++
++ if (i == root_num)
++ goto out;
++
++ info.pdev = pdev;
++ info.resource_count = 0;
++ info.root_num = i;
++
++ /* Let ACPI do the heavy lifting on decoding resources */
++ acpi_walk_resources(cntrl_entry->controller->acpi_handle,
++ METHOD_NAME__CRS, write_xenbus_resource,
++ &info);
++
++ /* No resouces. OK. On to the next one */
++ if (!info.resource_count)
++ continue;
++
++ /* Store the number of resources we wrote for this root-%d */
++ len = snprintf(str, sizeof(str), "root-%d-resources", i);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
++ "%d", info.resource_count);
++ if (err)
++ goto out;
++ }
++
++ /* Finally, write some magic to synchronize with the guest. */
++ len = snprintf(str, sizeof(str), "root-resource-magic");
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
++ "%lx", (sizeof(struct acpi_resource) * 2) + 1);
++
++out:
++ spin_unlock(&dev_data->lock);
++
++ return err;
++}
++
++void pciback_release_devices(struct pciback_device *pdev)
++{
++ struct controller_dev_data *dev_data = pdev->pci_dev_data;
++ struct controller_list_entry *cntrl_entry, *c;
++ struct controller_dev_entry *dev_entry, *d;
++
++ list_for_each_entry_safe(cntrl_entry, c, &dev_data->list, list) {
++ list_for_each_entry_safe(dev_entry, d,
++ &cntrl_entry->dev_list, list) {
++ list_del(&dev_entry->list);
++ pcistub_put_pci_dev(dev_entry->dev);
++ kfree(dev_entry);
++ }
++ list_del(&cntrl_entry->list);
++ kfree(cntrl_entry);
++ }
++
++ kfree(dev_data);
++ pdev->pci_dev_data = NULL;
++}
+Index: head-2008-11-25/drivers/xen/pciback/passthrough.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/passthrough.c 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,166 @@
++/*
++ * PCI Backend - Provides restricted access to the real PCI bus topology
++ * to the frontend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/list.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pciback.h"
++
++struct passthrough_dev_data {
++ /* Access to dev_list must be protected by lock */
++ struct list_head dev_list;
++ spinlock_t lock;
++};
++
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn)
++{
++ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++ struct pci_dev_entry *dev_entry;
++ struct pci_dev *dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dev_data->lock, flags);
++
++ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
++ if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus)
++ && bus == (unsigned int)dev_entry->dev->bus->number
++ && devfn == dev_entry->dev->devfn) {
++ dev = dev_entry->dev;
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++
++ return dev;
++}
++
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
++ int devid, publish_pci_dev_cb publish_cb)
++{
++ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++ struct pci_dev_entry *dev_entry;
++ unsigned long flags;
++ unsigned int domain, bus, devfn;
++ int err;
++
++ dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
++ if (!dev_entry)
++ return -ENOMEM;
++ dev_entry->dev = dev;
++
++ spin_lock_irqsave(&dev_data->lock, flags);
++ list_add_tail(&dev_entry->list, &dev_data->dev_list);
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++
++ /* Publish this device. */
++ domain = (unsigned int)pci_domain_nr(dev->bus);
++ bus = (unsigned int)dev->bus->number;
++ devfn = dev->devfn;
++ err = publish_cb(pdev, domain, bus, devfn, devid);
++
++ return err;
++}
++
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++{
++ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++ struct pci_dev_entry *dev_entry, *t;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&dev_data->lock, flags);
++
++ list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
++ if (dev_entry->dev == dev) {
++ list_del(&dev_entry->list);
++ found_dev = dev_entry->dev;
++ kfree(dev_entry);
++ }
++ }
++
++ spin_unlock_irqrestore(&dev_data->lock, flags);
++
++ if (found_dev)
++ pcistub_put_pci_dev(found_dev);
++}
++
++int pciback_init_devices(struct pciback_device *pdev)
++{
++ struct passthrough_dev_data *dev_data;
++
++ dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL);
++ if (!dev_data)
++ return -ENOMEM;
++
++ spin_lock_init(&dev_data->lock);
++
++ INIT_LIST_HEAD(&dev_data->dev_list);
++
++ pdev->pci_dev_data = dev_data;
++
++ return 0;
++}
++
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++ publish_pci_root_cb publish_root_cb)
++{
++ int err = 0;
++ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++ struct pci_dev_entry *dev_entry, *e;
++ struct pci_dev *dev;
++ int found;
++ unsigned int domain, bus;
++
++ spin_lock(&dev_data->lock);
++
++ list_for_each_entry(dev_entry, &dev_data->dev_list, list) {
++ /* Only publish this device as a root if none of its
++ * parent bridges are exported
++ */
++ found = 0;
++ dev = dev_entry->dev->bus->self;
++ for (; !found && dev != NULL; dev = dev->bus->self) {
++ list_for_each_entry(e, &dev_data->dev_list, list) {
++ if (dev == e->dev) {
++ found = 1;
++ break;
++ }
++ }
++ }
++
++ domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus);
++ bus = (unsigned int)dev_entry->dev->bus->number;
++
++ if (!found) {
++ err = publish_root_cb(pdev, domain, bus);
++ if (err)
++ break;
++ }
++ }
++
++ spin_unlock(&dev_data->lock);
++
++ return err;
++}
++
++void pciback_release_devices(struct pciback_device *pdev)
++{
++ struct passthrough_dev_data *dev_data = pdev->pci_dev_data;
++ struct pci_dev_entry *dev_entry, *t;
++
++ list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) {
++ list_del(&dev_entry->list);
++ pcistub_put_pci_dev(dev_entry->dev);
++ kfree(dev_entry);
++ }
++
++ kfree(dev_data);
++ pdev->pci_dev_data = NULL;
++}
+Index: head-2008-11-25/drivers/xen/pciback/pci_stub.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/pci_stub.c 2008-10-29 09:55:56.000000000 +0100
+@@ -0,0 +1,948 @@
++/*
++ * PCI Stub Driver - Grabs devices in backend to be exported later
++ *
++ * Ryan Wilson <hap9@epoch.ncsc.mil>
++ * Chris Bookholt <hap10@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/kref.h>
++#include <asm/atomic.h>
++#include "pciback.h"
++#include "conf_space.h"
++#include "conf_space_quirks.h"
++
++static char *pci_devs_to_hide = NULL;
++module_param_named(hide, pci_devs_to_hide, charp, 0444);
++
++struct pcistub_device_id {
++ struct list_head slot_list;
++ int domain;
++ unsigned char bus;
++ unsigned int devfn;
++};
++static LIST_HEAD(pcistub_device_ids);
++static DEFINE_SPINLOCK(device_ids_lock);
++
++struct pcistub_device {
++ struct kref kref;
++ struct list_head dev_list;
++ spinlock_t lock;
++
++ struct pci_dev *dev;
++ struct pciback_device *pdev; /* non-NULL if struct pci_dev is in use */
++};
++
++/* Access to pcistub_devices & seized_devices lists and the initialize_devices
++ * flag must be locked with pcistub_devices_lock
++ */
++static DEFINE_SPINLOCK(pcistub_devices_lock);
++static LIST_HEAD(pcistub_devices);
++
++/* wait for device_initcall before initializing our devices
++ * (see pcistub_init_devices_late)
++ */
++static int initialize_devices = 0;
++static LIST_HEAD(seized_devices);
++
++static struct pcistub_device *pcistub_device_alloc(struct pci_dev *dev)
++{
++ struct pcistub_device *psdev;
++
++ dev_dbg(&dev->dev, "pcistub_device_alloc\n");
++
++ psdev = kzalloc(sizeof(*psdev), GFP_ATOMIC);
++ if (!psdev)
++ return NULL;
++
++ psdev->dev = pci_dev_get(dev);
++ if (!psdev->dev) {
++ kfree(psdev);
++ return NULL;
++ }
++
++ kref_init(&psdev->kref);
++ spin_lock_init(&psdev->lock);
++
++ return psdev;
++}
++
++/* Don't call this directly as it's called by pcistub_device_put */
++static void pcistub_device_release(struct kref *kref)
++{
++ struct pcistub_device *psdev;
++
++ psdev = container_of(kref, struct pcistub_device, kref);
++
++ dev_dbg(&psdev->dev->dev, "pcistub_device_release\n");
++
++ /* Clean-up the device */
++ pciback_reset_device(psdev->dev);
++ pciback_config_free_dyn_fields(psdev->dev);
++ pciback_config_free_dev(psdev->dev);
++ kfree(pci_get_drvdata(psdev->dev));
++ pci_set_drvdata(psdev->dev, NULL);
++
++ pci_dev_put(psdev->dev);
++
++ kfree(psdev);
++}
++
++static inline void pcistub_device_get(struct pcistub_device *psdev)
++{
++ kref_get(&psdev->kref);
++}
++
++static inline void pcistub_device_put(struct pcistub_device *psdev)
++{
++ kref_put(&psdev->kref, pcistub_device_release);
++}
++
++static struct pcistub_device *pcistub_device_find(int domain, int bus,
++ int slot, int func)
++{
++ struct pcistub_device *psdev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (psdev->dev != NULL
++ && domain == pci_domain_nr(psdev->dev->bus)
++ && bus == psdev->dev->bus->number
++ && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
++ pcistub_device_get(psdev);
++ goto out;
++ }
++ }
++
++ /* didn't find it */
++ psdev = NULL;
++
++ out:
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++ return psdev;
++}
++
++static struct pci_dev *pcistub_device_get_pci_dev(struct pciback_device *pdev,
++ struct pcistub_device *psdev)
++{
++ struct pci_dev *pci_dev = NULL;
++ unsigned long flags;
++
++ pcistub_device_get(psdev);
++
++ spin_lock_irqsave(&psdev->lock, flags);
++ if (!psdev->pdev) {
++ psdev->pdev = pdev;
++ pci_dev = psdev->dev;
++ }
++ spin_unlock_irqrestore(&psdev->lock, flags);
++
++ if (!pci_dev)
++ pcistub_device_put(psdev);
++
++ return pci_dev;
++}
++
++struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
++ int domain, int bus,
++ int slot, int func)
++{
++ struct pcistub_device *psdev;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (psdev->dev != NULL
++ && domain == pci_domain_nr(psdev->dev->bus)
++ && bus == psdev->dev->bus->number
++ && PCI_DEVFN(slot, func) == psdev->dev->devfn) {
++ found_dev = pcistub_device_get_pci_dev(pdev, psdev);
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++ return found_dev;
++}
++
++struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
++ struct pci_dev *dev)
++{
++ struct pcistub_device *psdev;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (psdev->dev == dev) {
++ found_dev = pcistub_device_get_pci_dev(pdev, psdev);
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++ return found_dev;
++}
++
++void pcistub_put_pci_dev(struct pci_dev *dev)
++{
++ struct pcistub_device *psdev, *found_psdev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (psdev->dev == dev) {
++ found_psdev = psdev;
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ /* Cleanup our device
++ * (so it's ready for the next domain)
++ */
++ pciback_reset_device(found_psdev->dev);
++ pciback_config_free_dyn_fields(found_psdev->dev);
++ pciback_config_reset_dev(found_psdev->dev);
++
++ spin_lock_irqsave(&found_psdev->lock, flags);
++ found_psdev->pdev = NULL;
++ spin_unlock_irqrestore(&found_psdev->lock, flags);
++
++ pcistub_device_put(found_psdev);
++}
++
++static int __devinit pcistub_match_one(struct pci_dev *dev,
++ struct pcistub_device_id *pdev_id)
++{
++ /* Match the specified device by domain, bus, slot, func and also if
++ * any of the device's parent bridges match.
++ */
++ for (; dev != NULL; dev = dev->bus->self) {
++ if (pci_domain_nr(dev->bus) == pdev_id->domain
++ && dev->bus->number == pdev_id->bus
++ && dev->devfn == pdev_id->devfn)
++ return 1;
++
++ /* Sometimes topmost bridge links to itself. */
++ if (dev == dev->bus->self)
++ break;
++ }
++
++ return 0;
++}
++
++static int __devinit pcistub_match(struct pci_dev *dev)
++{
++ struct pcistub_device_id *pdev_id;
++ unsigned long flags;
++ int found = 0;
++
++ spin_lock_irqsave(&device_ids_lock, flags);
++ list_for_each_entry(pdev_id, &pcistub_device_ids, slot_list) {
++ if (pcistub_match_one(dev, pdev_id)) {
++ found = 1;
++ break;
++ }
++ }
++ spin_unlock_irqrestore(&device_ids_lock, flags);
++
++ return found;
++}
++
++static int __devinit pcistub_init_device(struct pci_dev *dev)
++{
++ struct pciback_dev_data *dev_data;
++ int err = 0;
++
++ dev_dbg(&dev->dev, "initializing...\n");
++
++ /* The PCI backend is not intended to be a module (or to work with
++ * removable PCI devices (yet). If it were, pciback_config_free()
++ * would need to be called somewhere to free the memory allocated
++ * here and then to call kfree(pci_get_drvdata(psdev->dev)).
++ */
++ dev_data = kzalloc(sizeof(*dev_data), GFP_ATOMIC);
++ if (!dev_data) {
++ err = -ENOMEM;
++ goto out;
++ }
++ pci_set_drvdata(dev, dev_data);
++
++ dev_dbg(&dev->dev, "initializing config\n");
++ err = pciback_config_init_dev(dev);
++ if (err)
++ goto out;
++
++ /* HACK: Force device (& ACPI) to determine what IRQ it's on - we
++ * must do this here because pcibios_enable_device may specify
++ * the pci device's true irq (and possibly its other resources)
++ * if they differ from what's in the configuration space.
++ * This makes the assumption that the device's resources won't
++ * change after this point (otherwise this code may break!)
++ */
++ dev_dbg(&dev->dev, "enabling device\n");
++ err = pci_enable_device(dev);
++ if (err)
++ goto config_release;
++
++ /* Now disable the device (this also ensures some private device
++ * data is setup before we export)
++ */
++ dev_dbg(&dev->dev, "reset device\n");
++ pciback_reset_device(dev);
++
++ return 0;
++
++ config_release:
++ pciback_config_free_dev(dev);
++
++ out:
++ pci_set_drvdata(dev, NULL);
++ kfree(dev_data);
++ return err;
++}
++
++/*
++ * Because some initialization still happens on
++ * devices during fs_initcall, we need to defer
++ * full initialization of our devices until
++ * device_initcall.
++ */
++static int __init pcistub_init_devices_late(void)
++{
++ struct pcistub_device *psdev;
++ unsigned long flags;
++ int err = 0;
++
++ pr_debug("pciback: pcistub_init_devices_late\n");
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ while (!list_empty(&seized_devices)) {
++ psdev = container_of(seized_devices.next,
++ struct pcistub_device, dev_list);
++ list_del(&psdev->dev_list);
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ err = pcistub_init_device(psdev->dev);
++ if (err) {
++ dev_err(&psdev->dev->dev,
++ "error %d initializing device\n", err);
++ kfree(psdev);
++ psdev = NULL;
++ }
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ if (psdev)
++ list_add_tail(&psdev->dev_list, &pcistub_devices);
++ }
++
++ initialize_devices = 1;
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ return 0;
++}
++
++static int __devinit pcistub_seize(struct pci_dev *dev)
++{
++ struct pcistub_device *psdev;
++ unsigned long flags;
++ int err = 0;
++
++ psdev = pcistub_device_alloc(dev);
++ if (!psdev)
++ return -ENOMEM;
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ if (initialize_devices) {
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ /* don't want irqs disabled when calling pcistub_init_device */
++ err = pcistub_init_device(psdev->dev);
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ if (!err)
++ list_add(&psdev->dev_list, &pcistub_devices);
++ } else {
++ dev_dbg(&dev->dev, "deferring initialization\n");
++ list_add(&psdev->dev_list, &seized_devices);
++ }
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ if (err)
++ pcistub_device_put(psdev);
++
++ return err;
++}
++
++static int __devinit pcistub_probe(struct pci_dev *dev,
++ const struct pci_device_id *id)
++{
++ int err = 0;
++
++ dev_dbg(&dev->dev, "probing...\n");
++
++ if (pcistub_match(dev)) {
++
++ if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL
++ && dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
++ dev_err(&dev->dev, "can't export pci devices that "
++ "don't have a normal (0) or bridge (1) "
++ "header type!\n");
++ err = -ENODEV;
++ goto out;
++ }
++
++ dev_info(&dev->dev, "seizing device\n");
++ err = pcistub_seize(dev);
++ } else
++ /* Didn't find the device */
++ err = -ENODEV;
++
++ out:
++ return err;
++}
++
++static void pcistub_remove(struct pci_dev *dev)
++{
++ struct pcistub_device *psdev, *found_psdev = NULL;
++ unsigned long flags;
++
++ dev_dbg(&dev->dev, "removing\n");
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++
++ pciback_config_quirk_release(dev);
++
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (psdev->dev == dev) {
++ found_psdev = psdev;
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ if (found_psdev) {
++ dev_dbg(&dev->dev, "found device to remove - in use? %p\n",
++ found_psdev->pdev);
++
++ if (found_psdev->pdev) {
++ printk(KERN_WARNING "pciback: ****** removing device "
++ "%s while still in-use! ******\n",
++ pci_name(found_psdev->dev));
++ printk(KERN_WARNING "pciback: ****** driver domain may "
++ "still access this device's i/o resources!\n");
++ printk(KERN_WARNING "pciback: ****** shutdown driver "
++ "domain before binding device\n");
++ printk(KERN_WARNING "pciback: ****** to other drivers "
++ "or domains\n");
++
++ pciback_release_pci_dev(found_psdev->pdev,
++ found_psdev->dev);
++ }
++
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++ list_del(&found_psdev->dev_list);
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++
++ /* the final put for releasing from the list */
++ pcistub_device_put(found_psdev);
++ }
++}
++
++static const struct pci_device_id pcistub_ids[] = {
++ {
++ .vendor = PCI_ANY_ID,
++ .device = PCI_ANY_ID,
++ .subvendor = PCI_ANY_ID,
++ .subdevice = PCI_ANY_ID,
++ },
++ {0,},
++};
++
++/*
++ * Note: There is no MODULE_DEVICE_TABLE entry here because this isn't
++ * for a normal device. I don't want it to be loaded automatically.
++ */
++
++static struct pci_driver pciback_pci_driver = {
++ .name = "pciback",
++ .id_table = pcistub_ids,
++ .probe = pcistub_probe,
++ .remove = pcistub_remove,
++};
++
++static inline int str_to_slot(const char *buf, int *domain, int *bus,
++ int *slot, int *func)
++{
++ int err;
++
++ err = sscanf(buf, " %x:%x:%x.%x", domain, bus, slot, func);
++ if (err == 4)
++ return 0;
++ else if (err < 0)
++ return -EINVAL;
++
++ /* try again without domain */
++ *domain = 0;
++ err = sscanf(buf, " %x:%x.%x", bus, slot, func);
++ if (err == 3)
++ return 0;
++
++ return -EINVAL;
++}
++
++static inline int str_to_quirk(const char *buf, int *domain, int *bus, int
++ *slot, int *func, int *reg, int *size, int *mask)
++{
++ int err;
++
++ err =
++ sscanf(buf, " %04x:%02x:%02x.%1x-%08x:%1x:%08x", domain, bus, slot,
++ func, reg, size, mask);
++ if (err == 7)
++ return 0;
++ return -EINVAL;
++}
++
++static int pcistub_device_id_add(int domain, int bus, int slot, int func)
++{
++ struct pcistub_device_id *pci_dev_id;
++ unsigned long flags;
++
++ pci_dev_id = kmalloc(sizeof(*pci_dev_id), GFP_KERNEL);
++ if (!pci_dev_id)
++ return -ENOMEM;
++
++ pci_dev_id->domain = domain;
++ pci_dev_id->bus = bus;
++ pci_dev_id->devfn = PCI_DEVFN(slot, func);
++
++ pr_debug("pciback: wants to seize %04x:%02x:%02x.%01x\n",
++ domain, bus, slot, func);
++
++ spin_lock_irqsave(&device_ids_lock, flags);
++ list_add_tail(&pci_dev_id->slot_list, &pcistub_device_ids);
++ spin_unlock_irqrestore(&device_ids_lock, flags);
++
++ return 0;
++}
++
++static int pcistub_device_id_remove(int domain, int bus, int slot, int func)
++{
++ struct pcistub_device_id *pci_dev_id, *t;
++ int devfn = PCI_DEVFN(slot, func);
++ int err = -ENOENT;
++ unsigned long flags;
++
++ spin_lock_irqsave(&device_ids_lock, flags);
++ list_for_each_entry_safe(pci_dev_id, t, &pcistub_device_ids, slot_list) {
++
++ if (pci_dev_id->domain == domain
++ && pci_dev_id->bus == bus && pci_dev_id->devfn == devfn) {
++ /* Don't break; here because it's possible the same
++ * slot could be in the list more than once
++ */
++ list_del(&pci_dev_id->slot_list);
++ kfree(pci_dev_id);
++
++ err = 0;
++
++ pr_debug("pciback: removed %04x:%02x:%02x.%01x from "
++ "seize list\n", domain, bus, slot, func);
++ }
++ }
++ spin_unlock_irqrestore(&device_ids_lock, flags);
++
++ return err;
++}
++
++static int pcistub_reg_add(int domain, int bus, int slot, int func, int reg,
++ int size, int mask)
++{
++ int err = 0;
++ struct pcistub_device *psdev;
++ struct pci_dev *dev;
++ struct config_field *field;
++
++ psdev = pcistub_device_find(domain, bus, slot, func);
++ if (!psdev || !psdev->dev) {
++ err = -ENODEV;
++ goto out;
++ }
++ dev = psdev->dev;
++
++ field = kzalloc(sizeof(*field), GFP_ATOMIC);
++ if (!field) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ field->offset = reg;
++ field->size = size;
++ field->mask = mask;
++ field->init = NULL;
++ field->reset = NULL;
++ field->release = NULL;
++ field->clean = pciback_config_field_free;
++
++ err = pciback_config_quirks_add_field(dev, field);
++ if (err)
++ kfree(field);
++ out:
++ return err;
++}
++
++static ssize_t pcistub_slot_add(struct device_driver *drv, const char *buf,
++ size_t count)
++{
++ int domain, bus, slot, func;
++ int err;
++
++ err = str_to_slot(buf, &domain, &bus, &slot, &func);
++ if (err)
++ goto out;
++
++ err = pcistub_device_id_add(domain, bus, slot, func);
++
++ out:
++ if (!err)
++ err = count;
++ return err;
++}
++
++DRIVER_ATTR(new_slot, S_IWUSR, NULL, pcistub_slot_add);
++
++static ssize_t pcistub_slot_remove(struct device_driver *drv, const char *buf,
++ size_t count)
++{
++ int domain, bus, slot, func;
++ int err;
++
++ err = str_to_slot(buf, &domain, &bus, &slot, &func);
++ if (err)
++ goto out;
++
++ err = pcistub_device_id_remove(domain, bus, slot, func);
++
++ out:
++ if (!err)
++ err = count;
++ return err;
++}
++
++DRIVER_ATTR(remove_slot, S_IWUSR, NULL, pcistub_slot_remove);
++
++static ssize_t pcistub_slot_show(struct device_driver *drv, char *buf)
++{
++ struct pcistub_device_id *pci_dev_id;
++ size_t count = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&device_ids_lock, flags);
++ list_for_each_entry(pci_dev_id, &pcistub_device_ids, slot_list) {
++ if (count >= PAGE_SIZE)
++ break;
++
++ count += scnprintf(buf + count, PAGE_SIZE - count,
++ "%04x:%02x:%02x.%01x\n",
++ pci_dev_id->domain, pci_dev_id->bus,
++ PCI_SLOT(pci_dev_id->devfn),
++ PCI_FUNC(pci_dev_id->devfn));
++ }
++ spin_unlock_irqrestore(&device_ids_lock, flags);
++
++ return count;
++}
++
++DRIVER_ATTR(slots, S_IRUSR, pcistub_slot_show, NULL);
++
++static ssize_t pcistub_quirk_add(struct device_driver *drv, const char *buf,
++ size_t count)
++{
++ int domain, bus, slot, func, reg, size, mask;
++ int err;
++
++ err = str_to_quirk(buf, &domain, &bus, &slot, &func, ®, &size,
++ &mask);
++ if (err)
++ goto out;
++
++ err = pcistub_reg_add(domain, bus, slot, func, reg, size, mask);
++
++ out:
++ if (!err)
++ err = count;
++ return err;
++}
++
++static ssize_t pcistub_quirk_show(struct device_driver *drv, char *buf)
++{
++ int count = 0;
++ unsigned long flags;
++ extern struct list_head pciback_quirks;
++ struct pciback_config_quirk *quirk;
++ struct pciback_dev_data *dev_data;
++ const struct config_field *field;
++ const struct config_field_entry *cfg_entry;
++
++ spin_lock_irqsave(&device_ids_lock, flags);
++ list_for_each_entry(quirk, &pciback_quirks, quirks_list) {
++ if (count >= PAGE_SIZE)
++ goto out;
++
++ count += scnprintf(buf + count, PAGE_SIZE - count,
++ "%02x:%02x.%01x\n\t%04x:%04x:%04x:%04x\n",
++ quirk->pdev->bus->number,
++ PCI_SLOT(quirk->pdev->devfn),
++ PCI_FUNC(quirk->pdev->devfn),
++ quirk->devid.vendor, quirk->devid.device,
++ quirk->devid.subvendor,
++ quirk->devid.subdevice);
++
++ dev_data = pci_get_drvdata(quirk->pdev);
++
++ list_for_each_entry(cfg_entry, &dev_data->config_fields, list) {
++ field = cfg_entry->field;
++ if (count >= PAGE_SIZE)
++ goto out;
++
++ count += scnprintf(buf + count, PAGE_SIZE - count,
++ "\t\t%08x:%01x:%08x\n",
++ cfg_entry->base_offset + field->offset,
++ field->size, field->mask);
++ }
++ }
++
++ out:
++ spin_unlock_irqrestore(&device_ids_lock, flags);
++
++ return count;
++}
++
++DRIVER_ATTR(quirks, S_IRUSR | S_IWUSR, pcistub_quirk_show, pcistub_quirk_add);
++
++static ssize_t permissive_add(struct device_driver *drv, const char *buf,
++ size_t count)
++{
++ int domain, bus, slot, func;
++ int err;
++ struct pcistub_device *psdev;
++ struct pciback_dev_data *dev_data;
++ err = str_to_slot(buf, &domain, &bus, &slot, &func);
++ if (err)
++ goto out;
++ psdev = pcistub_device_find(domain, bus, slot, func);
++ if (!psdev) {
++ err = -ENODEV;
++ goto out;
++ }
++ if (!psdev->dev) {
++ err = -ENODEV;
++ goto release;
++ }
++ dev_data = pci_get_drvdata(psdev->dev);
++ /* the driver data for a device should never be null at this point */
++ if (!dev_data) {
++ err = -ENXIO;
++ goto release;
++ }
++ if (!dev_data->permissive) {
++ dev_data->permissive = 1;
++ /* Let user know that what they're doing could be unsafe */
++ dev_warn(&psdev->dev->dev,
++ "enabling permissive mode configuration space accesses!\n");
++ dev_warn(&psdev->dev->dev,
++ "permissive mode is potentially unsafe!\n");
++ }
++ release:
++ pcistub_device_put(psdev);
++ out:
++ if (!err)
++ err = count;
++ return err;
++}
++
++static ssize_t permissive_show(struct device_driver *drv, char *buf)
++{
++ struct pcistub_device *psdev;
++ struct pciback_dev_data *dev_data;
++ size_t count = 0;
++ unsigned long flags;
++ spin_lock_irqsave(&pcistub_devices_lock, flags);
++ list_for_each_entry(psdev, &pcistub_devices, dev_list) {
++ if (count >= PAGE_SIZE)
++ break;
++ if (!psdev->dev)
++ continue;
++ dev_data = pci_get_drvdata(psdev->dev);
++ if (!dev_data || !dev_data->permissive)
++ continue;
++ count +=
++ scnprintf(buf + count, PAGE_SIZE - count, "%s\n",
++ pci_name(psdev->dev));
++ }
++ spin_unlock_irqrestore(&pcistub_devices_lock, flags);
++ return count;
++}
++
++DRIVER_ATTR(permissive, S_IRUSR | S_IWUSR, permissive_show, permissive_add);
++
++#ifdef CONFIG_PCI_MSI
++
++int pciback_get_owner(struct pci_dev *dev)
++{
++ struct pcistub_device *psdev;
++
++ psdev = pcistub_device_find(pci_domain_nr(dev->bus), dev->bus->number,
++ PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn));
++
++ if (!psdev || !psdev->pdev)
++ return -1;
++
++ return psdev->pdev->xdev->otherend_id;
++}
++#endif
++
++static void pcistub_exit(void)
++{
++ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_new_slot);
++ driver_remove_file(&pciback_pci_driver.driver,
++ &driver_attr_remove_slot);
++ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_slots);
++ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_quirks);
++ driver_remove_file(&pciback_pci_driver.driver, &driver_attr_permissive);
++
++ pci_unregister_driver(&pciback_pci_driver);
++ WARN_ON(unregister_msi_get_owner(pciback_get_owner));
++}
++
++static int __init pcistub_init(void)
++{
++ int pos = 0;
++ int err = 0;
++ int domain, bus, slot, func;
++ int parsed;
++
++ if (pci_devs_to_hide && *pci_devs_to_hide) {
++ do {
++ parsed = 0;
++
++ err = sscanf(pci_devs_to_hide + pos,
++ " (%x:%x:%x.%x) %n",
++ &domain, &bus, &slot, &func, &parsed);
++ if (err != 4) {
++ domain = 0;
++ err = sscanf(pci_devs_to_hide + pos,
++ " (%x:%x.%x) %n",
++ &bus, &slot, &func, &parsed);
++ if (err != 3)
++ goto parse_error;
++ }
++
++ err = pcistub_device_id_add(domain, bus, slot, func);
++ if (err)
++ goto out;
++
++ /* if parsed<=0, we've reached the end of the string */
++ pos += parsed;
++ } while (parsed > 0 && pci_devs_to_hide[pos]);
++ }
++
++ /* If we're the first PCI Device Driver to register, we're the
++ * first one to get offered PCI devices as they become
++ * available (and thus we can be the first to grab them)
++ */
++ err = pci_register_driver(&pciback_pci_driver);
++ if (err < 0)
++ goto out;
++
++ err = driver_create_file(&pciback_pci_driver.driver,
++ &driver_attr_new_slot);
++ if (!err)
++ err = driver_create_file(&pciback_pci_driver.driver,
++ &driver_attr_remove_slot);
++ if (!err)
++ err = driver_create_file(&pciback_pci_driver.driver,
++ &driver_attr_slots);
++ if (!err)
++ err = driver_create_file(&pciback_pci_driver.driver,
++ &driver_attr_quirks);
++ if (!err)
++ err = driver_create_file(&pciback_pci_driver.driver,
++ &driver_attr_permissive);
++
++ if (!err)
++ err = register_msi_get_owner(pciback_get_owner);
++ if (err)
++ pcistub_exit();
++
++ out:
++ return err;
++
++ parse_error:
++ printk(KERN_ERR "pciback: Error parsing pci_devs_to_hide at \"%s\"\n",
++ pci_devs_to_hide + pos);
++ return -EINVAL;
++}
++
++#ifndef MODULE
++/*
++ * fs_initcall happens before device_initcall
++ * so pciback *should* get called first (b/c we
++ * want to suck up any device before other drivers
++ * get a chance by being the first pci device
++ * driver to register)
++ */
++fs_initcall(pcistub_init);
++#endif
++
++static int __init pciback_init(void)
++{
++ int err;
++
++ err = pciback_config_init();
++ if (err)
++ return err;
++
++#ifdef MODULE
++ err = pcistub_init();
++ if (err < 0)
++ return err;
++#endif
++
++ pcistub_init_devices_late();
++ err = pciback_xenbus_register();
++ if (err)
++ pcistub_exit();
++
++ return err;
++}
++
++static void __exit pciback_cleanup(void)
++{
++ pciback_xenbus_unregister();
++ pcistub_exit();
++}
++
++module_init(pciback_init);
++module_exit(pciback_cleanup);
++
++MODULE_LICENSE("Dual BSD/GPL");
+Index: head-2008-11-25/drivers/xen/pciback/pciback.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/pciback.h 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,111 @@
++/*
++ * PCI Backend Common Data Structures & Function Declarations
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#ifndef __XEN_PCIBACK_H__
++#define __XEN_PCIBACK_H__
++
++#include <linux/pci.h>
++#include <linux/interrupt.h>
++#include <xen/xenbus.h>
++#include <linux/list.h>
++#include <linux/spinlock.h>
++#include <linux/workqueue.h>
++#include <asm/atomic.h>
++#include <xen/interface/io/pciif.h>
++
++struct pci_dev_entry {
++ struct list_head list;
++ struct pci_dev *dev;
++};
++
++#define _PDEVF_op_active (0)
++#define PDEVF_op_active (1<<(_PDEVF_op_active))
++
++struct pciback_device {
++ void *pci_dev_data;
++ spinlock_t dev_lock;
++
++ struct xenbus_device *xdev;
++
++ struct xenbus_watch be_watch;
++ u8 be_watching;
++
++ int evtchn_irq;
++
++ struct vm_struct *sh_area;
++ struct xen_pci_sharedinfo *sh_info;
++
++ unsigned long flags;
++
++ struct work_struct op_work;
++};
++
++struct pciback_dev_data {
++ struct list_head config_fields;
++ int permissive;
++ int warned_on_write;
++};
++
++/* Get/Put PCI Devices that are hidden from the PCI Backend Domain */
++struct pci_dev *pcistub_get_pci_dev_by_slot(struct pciback_device *pdev,
++ int domain, int bus,
++ int slot, int func);
++struct pci_dev *pcistub_get_pci_dev(struct pciback_device *pdev,
++ struct pci_dev *dev);
++void pcistub_put_pci_dev(struct pci_dev *dev);
++
++/* Ensure a device is turned off or reset */
++void pciback_reset_device(struct pci_dev *pdev);
++
++/* Access a virtual configuration space for a PCI device */
++int pciback_config_init(void);
++int pciback_config_init_dev(struct pci_dev *dev);
++void pciback_config_free_dyn_fields(struct pci_dev *dev);
++void pciback_config_reset_dev(struct pci_dev *dev);
++void pciback_config_free_dev(struct pci_dev *dev);
++int pciback_config_read(struct pci_dev *dev, int offset, int size,
++ u32 * ret_val);
++int pciback_config_write(struct pci_dev *dev, int offset, int size, u32 value);
++
++/* Handle requests for specific devices from the frontend */
++typedef int (*publish_pci_dev_cb) (struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn, unsigned int devid);
++typedef int (*publish_pci_root_cb) (struct pciback_device * pdev,
++ unsigned int domain, unsigned int bus);
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
++ int devid, publish_pci_dev_cb publish_cb);
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev);
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn);
++int pciback_init_devices(struct pciback_device *pdev);
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++ publish_pci_root_cb cb);
++void pciback_release_devices(struct pciback_device *pdev);
++
++/* Handles events from front-end */
++irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs);
++void pciback_do_op(void *data);
++
++int pciback_xenbus_register(void);
++void pciback_xenbus_unregister(void);
++
++#ifdef CONFIG_PCI_MSI
++int pciback_enable_msi(struct pciback_device *pdev,
++ struct pci_dev *dev, struct xen_pci_op *op);
++
++int pciback_disable_msi(struct pciback_device *pdev,
++ struct pci_dev *dev, struct xen_pci_op *op);
++
++
++int pciback_enable_msix(struct pciback_device *pdev,
++ struct pci_dev *dev, struct xen_pci_op *op);
++
++int pciback_disable_msix(struct pciback_device *pdev,
++ struct pci_dev *dev, struct xen_pci_op *op);
++#endif
++extern int verbose_request;
++#endif
+Index: head-2008-11-25/drivers/xen/pciback/pciback_ops.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/pciback_ops.c 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,117 @@
++/*
++ * PCI Backend Operations - respond to PCI requests from Frontend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <asm/bitops.h>
++#include <xen/evtchn.h>
++#include "pciback.h"
++
++int verbose_request = 0;
++module_param(verbose_request, int, 0644);
++
++/* Ensure a device is "turned off" and ready to be exported.
++ * (Also see pciback_config_reset to ensure virtual configuration space is
++ * ready to be re-exported)
++ */
++void pciback_reset_device(struct pci_dev *dev)
++{
++ u16 cmd;
++
++ /* Disable devices (but not bridges) */
++ if (dev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
++ pci_disable_device(dev);
++
++ pci_write_config_word(dev, PCI_COMMAND, 0);
++
++ dev->is_enabled = 0;
++ dev->is_busmaster = 0;
++ } else {
++ pci_read_config_word(dev, PCI_COMMAND, &cmd);
++ if (cmd & (PCI_COMMAND_INVALIDATE)) {
++ cmd &= ~(PCI_COMMAND_INVALIDATE);
++ pci_write_config_word(dev, PCI_COMMAND, cmd);
++
++ dev->is_busmaster = 0;
++ }
++ }
++}
++
++static inline void test_and_schedule_op(struct pciback_device *pdev)
++{
++ /* Check that frontend is requesting an operation and that we are not
++ * already processing a request */
++ if (test_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags)
++ && !test_and_set_bit(_PDEVF_op_active, &pdev->flags))
++ schedule_work(&pdev->op_work);
++}
++
++/* Performing the configuration space reads/writes must not be done in atomic
++ * context because some of the pci_* functions can sleep (mostly due to ACPI
++ * use of semaphores). This function is intended to be called from a work
++ * queue in process context taking a struct pciback_device as a parameter */
++void pciback_do_op(void *data)
++{
++ struct pciback_device *pdev = data;
++ struct pci_dev *dev;
++ struct xen_pci_op *op = &pdev->sh_info->op;
++
++ dev = pciback_get_pci_dev(pdev, op->domain, op->bus, op->devfn);
++
++ if (dev == NULL)
++ op->err = XEN_PCI_ERR_dev_not_found;
++ else
++ {
++ switch (op->cmd)
++ {
++ case XEN_PCI_OP_conf_read:
++ op->err = pciback_config_read(dev,
++ op->offset, op->size, &op->value);
++ break;
++ case XEN_PCI_OP_conf_write:
++ op->err = pciback_config_write(dev,
++ op->offset, op->size, op->value);
++ break;
++#ifdef CONFIG_PCI_MSI
++ case XEN_PCI_OP_enable_msi:
++ op->err = pciback_enable_msi(pdev, dev, op);
++ break;
++ case XEN_PCI_OP_disable_msi:
++ op->err = pciback_disable_msi(pdev, dev, op);
++ break;
++ case XEN_PCI_OP_enable_msix:
++ op->err = pciback_enable_msix(pdev, dev, op);
++ break;
++ case XEN_PCI_OP_disable_msix:
++ op->err = pciback_disable_msix(pdev, dev, op);
++ break;
++#endif
++ default:
++ op->err = XEN_PCI_ERR_not_implemented;
++ break;
++ }
++ }
++ /* Tell the driver domain that we're done. */
++ wmb();
++ clear_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
++ notify_remote_via_irq(pdev->evtchn_irq);
++
++ /* Mark that we're done. */
++ smp_mb__before_clear_bit(); /* /after/ clearing PCIF_active */
++ clear_bit(_PDEVF_op_active, &pdev->flags);
++ smp_mb__after_clear_bit(); /* /before/ final check for work */
++
++ /* Check to see if the driver domain tried to start another request in
++ * between clearing _XEN_PCIF_active and clearing _PDEVF_op_active. */
++ test_and_schedule_op(pdev);
++}
++
++irqreturn_t pciback_handle_event(int irq, void *dev_id, struct pt_regs *regs)
++{
++ struct pciback_device *pdev = dev_id;
++
++ test_and_schedule_op(pdev);
++
++ return IRQ_HANDLED;
++}
+Index: head-2008-11-25/drivers/xen/pciback/slot.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/slot.c 2008-02-26 10:54:11.000000000 +0100
+@@ -0,0 +1,157 @@
++/*
++ * PCI Backend - Provides a Virtual PCI bus (with real devices)
++ * to the frontend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil> (vpci.c)
++ * Author: Tristan Gingold <tristan.gingold@bull.net>, from vpci.c
++ */
++
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pciback.h"
++
++/* There are at most 32 slots in a pci bus. */
++#define PCI_SLOT_MAX 32
++
++#define PCI_BUS_NBR 2
++
++struct slot_dev_data {
++ /* Access to dev_list must be protected by lock */
++ struct pci_dev *slots[PCI_BUS_NBR][PCI_SLOT_MAX];
++ spinlock_t lock;
++};
++
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn)
++{
++ struct pci_dev *dev = NULL;
++ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++ unsigned long flags;
++
++ if (domain != 0 || PCI_FUNC(devfn) != 0)
++ return NULL;
++
++ if (PCI_SLOT(devfn) >= PCI_SLOT_MAX || bus >= PCI_BUS_NBR)
++ return NULL;
++
++ spin_lock_irqsave(&slot_dev->lock, flags);
++ dev = slot_dev->slots[bus][PCI_SLOT(devfn)];
++ spin_unlock_irqrestore(&slot_dev->lock, flags);
++
++ return dev;
++}
++
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
++ int devid, publish_pci_dev_cb publish_cb)
++{
++ int err = 0, slot, bus;
++ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++ unsigned long flags;
++
++ if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
++ err = -EFAULT;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Can't export bridges on the virtual PCI bus");
++ goto out;
++ }
++
++ spin_lock_irqsave(&slot_dev->lock, flags);
++
++ /* Assign to a new slot on the virtual PCI bus */
++ for (bus = 0; bus < PCI_BUS_NBR; bus++)
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ if (slot_dev->slots[bus][slot] == NULL) {
++ printk(KERN_INFO
++ "pciback: slot: %s: assign to virtual slot %d, bus %d\n",
++ pci_name(dev), slot, bus);
++ slot_dev->slots[bus][slot] = dev;
++ goto unlock;
++ }
++ }
++
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "No more space on root virtual PCI bus");
++
++ unlock:
++ spin_unlock_irqrestore(&slot_dev->lock, flags);
++
++ /* Publish this device. */
++ if(!err)
++ err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, 0), devid);
++
++ out:
++ return err;
++}
++
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++{
++ int slot, bus;
++ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&slot_dev->lock, flags);
++
++ for (bus = 0; bus < PCI_BUS_NBR; bus++)
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ if (slot_dev->slots[bus][slot] == dev) {
++ slot_dev->slots[bus][slot] = NULL;
++ found_dev = dev;
++ goto out;
++ }
++ }
++
++ out:
++ spin_unlock_irqrestore(&slot_dev->lock, flags);
++
++ if (found_dev)
++ pcistub_put_pci_dev(found_dev);
++}
++
++int pciback_init_devices(struct pciback_device *pdev)
++{
++ int slot, bus;
++ struct slot_dev_data *slot_dev;
++
++ slot_dev = kmalloc(sizeof(*slot_dev), GFP_KERNEL);
++ if (!slot_dev)
++ return -ENOMEM;
++
++ spin_lock_init(&slot_dev->lock);
++
++ for (bus = 0; bus < PCI_BUS_NBR; bus++)
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++)
++ slot_dev->slots[bus][slot] = NULL;
++
++ pdev->pci_dev_data = slot_dev;
++
++ return 0;
++}
++
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++ publish_pci_root_cb publish_cb)
++{
++ /* The Virtual PCI bus has only one root */
++ return publish_cb(pdev, 0, 0);
++}
++
++void pciback_release_devices(struct pciback_device *pdev)
++{
++ int slot, bus;
++ struct slot_dev_data *slot_dev = pdev->pci_dev_data;
++ struct pci_dev *dev;
++
++ for (bus = 0; bus < PCI_BUS_NBR; bus++)
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ dev = slot_dev->slots[bus][slot];
++ if (dev != NULL)
++ pcistub_put_pci_dev(dev);
++ }
++
++ kfree(slot_dev);
++ pdev->pci_dev_data = NULL;
++}
+Index: head-2008-11-25/drivers/xen/pciback/vpci.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/vpci.c 2008-02-26 10:54:11.000000000 +0100
+@@ -0,0 +1,212 @@
++/*
++ * PCI Backend - Provides a Virtual PCI bus (with real devices)
++ * to the frontend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++
++#include <linux/list.h>
++#include <linux/slab.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pciback.h"
++
++#define PCI_SLOT_MAX 32
++
++struct vpci_dev_data {
++ /* Access to dev_list must be protected by lock */
++ struct list_head dev_list[PCI_SLOT_MAX];
++ spinlock_t lock;
++};
++
++static inline struct list_head *list_first(struct list_head *head)
++{
++ return head->next;
++}
++
++struct pci_dev *pciback_get_pci_dev(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn)
++{
++ struct pci_dev_entry *entry;
++ struct pci_dev *dev = NULL;
++ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++ unsigned long flags;
++
++ if (domain != 0 || bus != 0)
++ return NULL;
++
++ if (PCI_SLOT(devfn) < PCI_SLOT_MAX) {
++ spin_lock_irqsave(&vpci_dev->lock, flags);
++
++ list_for_each_entry(entry,
++ &vpci_dev->dev_list[PCI_SLOT(devfn)],
++ list) {
++ if (PCI_FUNC(entry->dev->devfn) == PCI_FUNC(devfn)) {
++ dev = entry->dev;
++ break;
++ }
++ }
++
++ spin_unlock_irqrestore(&vpci_dev->lock, flags);
++ }
++ return dev;
++}
++
++static inline int match_slot(struct pci_dev *l, struct pci_dev *r)
++{
++ if (pci_domain_nr(l->bus) == pci_domain_nr(r->bus)
++ && l->bus == r->bus && PCI_SLOT(l->devfn) == PCI_SLOT(r->devfn))
++ return 1;
++
++ return 0;
++}
++
++int pciback_add_pci_dev(struct pciback_device *pdev, struct pci_dev *dev,
++ int devid, publish_pci_dev_cb publish_cb)
++{
++ int err = 0, slot, func;
++ struct pci_dev_entry *t, *dev_entry;
++ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++ unsigned long flags;
++
++ if ((dev->class >> 24) == PCI_BASE_CLASS_BRIDGE) {
++ err = -EFAULT;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Can't export bridges on the virtual PCI bus");
++ goto out;
++ }
++
++ dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL);
++ if (!dev_entry) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error adding entry to virtual PCI bus");
++ goto out;
++ }
++
++ dev_entry->dev = dev;
++
++ spin_lock_irqsave(&vpci_dev->lock, flags);
++
++ /* Keep multi-function devices together on the virtual PCI bus */
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ if (!list_empty(&vpci_dev->dev_list[slot])) {
++ t = list_entry(list_first(&vpci_dev->dev_list[slot]),
++ struct pci_dev_entry, list);
++
++ if (match_slot(dev, t->dev)) {
++ pr_info("pciback: vpci: %s: "
++ "assign to virtual slot %d func %d\n",
++ pci_name(dev), slot,
++ PCI_FUNC(dev->devfn));
++ list_add_tail(&dev_entry->list,
++ &vpci_dev->dev_list[slot]);
++ func = PCI_FUNC(dev->devfn);
++ goto unlock;
++ }
++ }
++ }
++
++ /* Assign to a new slot on the virtual PCI bus */
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ if (list_empty(&vpci_dev->dev_list[slot])) {
++ printk(KERN_INFO
++ "pciback: vpci: %s: assign to virtual slot %d\n",
++ pci_name(dev), slot);
++ list_add_tail(&dev_entry->list,
++ &vpci_dev->dev_list[slot]);
++ func = PCI_FUNC(dev->devfn);
++ goto unlock;
++ }
++ }
++
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "No more space on root virtual PCI bus");
++
++ unlock:
++ spin_unlock_irqrestore(&vpci_dev->lock, flags);
++
++ /* Publish this device. */
++ if(!err)
++ err = publish_cb(pdev, 0, 0, PCI_DEVFN(slot, func), devid);
++
++ out:
++ return err;
++}
++
++void pciback_release_pci_dev(struct pciback_device *pdev, struct pci_dev *dev)
++{
++ int slot;
++ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++ struct pci_dev *found_dev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&vpci_dev->lock, flags);
++
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ struct pci_dev_entry *e, *tmp;
++ list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
++ list) {
++ if (e->dev == dev) {
++ list_del(&e->list);
++ found_dev = e->dev;
++ kfree(e);
++ goto out;
++ }
++ }
++ }
++
++ out:
++ spin_unlock_irqrestore(&vpci_dev->lock, flags);
++
++ if (found_dev)
++ pcistub_put_pci_dev(found_dev);
++}
++
++int pciback_init_devices(struct pciback_device *pdev)
++{
++ int slot;
++ struct vpci_dev_data *vpci_dev;
++
++ vpci_dev = kmalloc(sizeof(*vpci_dev), GFP_KERNEL);
++ if (!vpci_dev)
++ return -ENOMEM;
++
++ spin_lock_init(&vpci_dev->lock);
++
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ INIT_LIST_HEAD(&vpci_dev->dev_list[slot]);
++ }
++
++ pdev->pci_dev_data = vpci_dev;
++
++ return 0;
++}
++
++int pciback_publish_pci_roots(struct pciback_device *pdev,
++ publish_pci_root_cb publish_cb)
++{
++ /* The Virtual PCI bus has only one root */
++ return publish_cb(pdev, 0, 0);
++}
++
++void pciback_release_devices(struct pciback_device *pdev)
++{
++ int slot;
++ struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
++
++ for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
++ struct pci_dev_entry *e, *tmp;
++ list_for_each_entry_safe(e, tmp, &vpci_dev->dev_list[slot],
++ list) {
++ list_del(&e->list);
++ pcistub_put_pci_dev(e->dev);
++ kfree(e);
++ }
++ }
++
++ kfree(vpci_dev);
++ pdev->pci_dev_data = NULL;
++}
+Index: head-2008-11-25/drivers/xen/pciback/xenbus.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pciback/xenbus.c 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,704 @@
++/*
++ * PCI Backend Xenbus Setup - handles setup with frontend and xend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/list.h>
++#include <linux/vmalloc.h>
++#include <xen/xenbus.h>
++#include <xen/evtchn.h>
++#include "pciback.h"
++
++#define INVALID_EVTCHN_IRQ (-1)
++
++static struct pciback_device *alloc_pdev(struct xenbus_device *xdev)
++{
++ struct pciback_device *pdev;
++
++ pdev = kzalloc(sizeof(struct pciback_device), GFP_KERNEL);
++ if (pdev == NULL)
++ goto out;
++ dev_dbg(&xdev->dev, "allocated pdev @ 0x%p\n", pdev);
++
++ pdev->xdev = xdev;
++ xdev->dev.driver_data = pdev;
++
++ spin_lock_init(&pdev->dev_lock);
++
++ pdev->sh_area = NULL;
++ pdev->sh_info = NULL;
++ pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
++ pdev->be_watching = 0;
++
++ INIT_WORK(&pdev->op_work, pciback_do_op, pdev);
++
++ if (pciback_init_devices(pdev)) {
++ kfree(pdev);
++ pdev = NULL;
++ }
++ out:
++ return pdev;
++}
++
++static void pciback_disconnect(struct pciback_device *pdev)
++{
++ spin_lock(&pdev->dev_lock);
++
++ /* Ensure the guest can't trigger our handler before removing devices */
++ if (pdev->evtchn_irq != INVALID_EVTCHN_IRQ) {
++ unbind_from_irqhandler(pdev->evtchn_irq, pdev);
++ pdev->evtchn_irq = INVALID_EVTCHN_IRQ;
++ }
++
++ /* If the driver domain started an op, make sure we complete it or
++ * delete it before releasing the shared memory */
++ cancel_delayed_work(&pdev->op_work);
++ flush_scheduled_work();
++
++ if (pdev->sh_info != NULL) {
++ xenbus_unmap_ring_vfree(pdev->xdev, pdev->sh_area);
++ pdev->sh_info = NULL;
++ }
++
++ spin_unlock(&pdev->dev_lock);
++}
++
++static void free_pdev(struct pciback_device *pdev)
++{
++ if (pdev->be_watching)
++ unregister_xenbus_watch(&pdev->be_watch);
++
++ pciback_disconnect(pdev);
++
++ pciback_release_devices(pdev);
++
++ pdev->xdev->dev.driver_data = NULL;
++ pdev->xdev = NULL;
++
++ kfree(pdev);
++}
++
++static int pciback_do_attach(struct pciback_device *pdev, int gnt_ref,
++ int remote_evtchn)
++{
++ int err = 0;
++ struct vm_struct *area;
++
++ dev_dbg(&pdev->xdev->dev,
++ "Attaching to frontend resources - gnt_ref=%d evtchn=%d\n",
++ gnt_ref, remote_evtchn);
++
++ area = xenbus_map_ring_valloc(pdev->xdev, gnt_ref);
++ if (IS_ERR(area)) {
++ err = PTR_ERR(area);
++ goto out;
++ }
++ pdev->sh_area = area;
++ pdev->sh_info = area->addr;
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ pdev->xdev->otherend_id, remote_evtchn, pciback_handle_event,
++ SA_SAMPLE_RANDOM, "pciback", pdev);
++ if (err < 0) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error binding event channel to IRQ");
++ goto out;
++ }
++ pdev->evtchn_irq = err;
++ err = 0;
++
++ dev_dbg(&pdev->xdev->dev, "Attached!\n");
++ out:
++ return err;
++}
++
++static int pciback_attach(struct pciback_device *pdev)
++{
++ int err = 0;
++ int gnt_ref, remote_evtchn;
++ char *magic = NULL;
++
++ spin_lock(&pdev->dev_lock);
++
++ /* Make sure we only do this setup once */
++ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++ XenbusStateInitialised)
++ goto out;
++
++ /* Wait for frontend to state that it has published the configuration */
++ if (xenbus_read_driver_state(pdev->xdev->otherend) !=
++ XenbusStateInitialised)
++ goto out;
++
++ dev_dbg(&pdev->xdev->dev, "Reading frontend config\n");
++
++ err = xenbus_gather(XBT_NIL, pdev->xdev->otherend,
++ "pci-op-ref", "%u", &gnt_ref,
++ "event-channel", "%u", &remote_evtchn,
++ "magic", NULL, &magic, NULL);
++ if (err) {
++ /* If configuration didn't get read correctly, wait longer */
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading configuration from frontend");
++ goto out;
++ }
++
++ if (magic == NULL || strcmp(magic, XEN_PCI_MAGIC) != 0) {
++ xenbus_dev_fatal(pdev->xdev, -EFAULT,
++ "version mismatch (%s/%s) with pcifront - "
++ "halting pciback",
++ magic, XEN_PCI_MAGIC);
++ goto out;
++ }
++
++ err = pciback_do_attach(pdev, gnt_ref, remote_evtchn);
++ if (err)
++ goto out;
++
++ dev_dbg(&pdev->xdev->dev, "Connecting...\n");
++
++ err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
++ if (err)
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error switching to connected state!");
++
++ dev_dbg(&pdev->xdev->dev, "Connected? %d\n", err);
++ out:
++ spin_unlock(&pdev->dev_lock);
++
++ if (magic)
++ kfree(magic);
++
++ return err;
++}
++
++static int pciback_publish_pci_dev(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus,
++ unsigned int devfn, unsigned int devid)
++{
++ int err;
++ int len;
++ char str[64];
++
++ len = snprintf(str, sizeof(str), "vdev-%d", devid);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
++ "%04x:%02x:%02x.%02x", domain, bus,
++ PCI_SLOT(devfn), PCI_FUNC(devfn));
++
++ out:
++ return err;
++}
++
++static int pciback_export_device(struct pciback_device *pdev,
++ int domain, int bus, int slot, int func,
++ int devid)
++{
++ struct pci_dev *dev;
++ int err = 0;
++
++ dev_dbg(&pdev->xdev->dev, "exporting dom %x bus %x slot %x func %x\n",
++ domain, bus, slot, func);
++
++ dev = pcistub_get_pci_dev_by_slot(pdev, domain, bus, slot, func);
++ if (!dev) {
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Couldn't locate PCI device "
++ "(%04x:%02x:%02x.%01x)! "
++ "perhaps already in-use?",
++ domain, bus, slot, func);
++ goto out;
++ }
++
++ err = pciback_add_pci_dev(pdev, dev, devid, pciback_publish_pci_dev);
++ if (err)
++ goto out;
++
++ /* TODO: It'd be nice to export a bridge and have all of its children
++ * get exported with it. This may be best done in xend (which will
++ * have to calculate resource usage anyway) but we probably want to
++ * put something in here to ensure that if a bridge gets given to a
++ * driver domain, that all devices under that bridge are not given
++ * to other driver domains (as he who controls the bridge can disable
++ * it and stop the other devices from working).
++ */
++ out:
++ return err;
++}
++
++static int pciback_remove_device(struct pciback_device *pdev,
++ int domain, int bus, int slot, int func)
++{
++ int err = 0;
++ struct pci_dev *dev;
++
++ dev_dbg(&pdev->xdev->dev, "removing dom %x bus %x slot %x func %x\n",
++ domain, bus, slot, func);
++
++ dev = pciback_get_pci_dev(pdev, domain, bus, PCI_DEVFN(slot, func));
++ if (!dev) {
++ err = -EINVAL;
++ dev_dbg(&pdev->xdev->dev, "Couldn't locate PCI device "
++ "(%04x:%02x:%02x.%01x)! not owned by this domain\n",
++ domain, bus, slot, func);
++ goto out;
++ }
++
++ pciback_release_pci_dev(pdev, dev);
++
++ out:
++ return err;
++}
++
++static int pciback_publish_pci_root(struct pciback_device *pdev,
++ unsigned int domain, unsigned int bus)
++{
++ unsigned int d, b;
++ int i, root_num, len, err;
++ char str[64];
++
++ dev_dbg(&pdev->xdev->dev, "Publishing pci roots\n");
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++ "root_num", "%d", &root_num);
++ if (err == 0 || err == -ENOENT)
++ root_num = 0;
++ else if (err < 0)
++ goto out;
++
++ /* Verify that we haven't already published this pci root */
++ for (i = 0; i < root_num; i++) {
++ len = snprintf(str, sizeof(str), "root-%d", i);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++ str, "%x:%x", &d, &b);
++ if (err < 0)
++ goto out;
++ if (err != 2) {
++ err = -EINVAL;
++ goto out;
++ }
++
++ if (d == domain && b == bus) {
++ err = 0;
++ goto out;
++ }
++ }
++
++ len = snprintf(str, sizeof(str), "root-%d", root_num);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ dev_dbg(&pdev->xdev->dev, "writing root %d at %04x:%02x\n",
++ root_num, domain, bus);
++
++ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, str,
++ "%04x:%02x", domain, bus);
++ if (err)
++ goto out;
++
++ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename,
++ "root_num", "%d", (root_num + 1));
++
++ out:
++ return err;
++}
++
++static int pciback_reconfigure(struct pciback_device *pdev)
++{
++ int err = 0;
++ int num_devs;
++ int domain, bus, slot, func;
++ int substate;
++ int i, len;
++ char state_str[64];
++ char dev_str[64];
++
++ spin_lock(&pdev->dev_lock);
++
++ dev_dbg(&pdev->xdev->dev, "Reconfiguring device ...\n");
++
++ /* Make sure we only reconfigure once */
++ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++ XenbusStateReconfiguring)
++ goto out;
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
++ &num_devs);
++ if (err != 1) {
++ if (err >= 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading number of devices");
++ goto out;
++ }
++
++ for (i = 0; i < num_devs; i++) {
++ len = snprintf(state_str, sizeof(state_str), "state-%d", i);
++ if (unlikely(len >= (sizeof(state_str) - 1))) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "String overflow while reading "
++ "configuration");
++ goto out;
++ }
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, state_str,
++ "%d", &substate);
++ if (err != 1)
++ substate = XenbusStateUnknown;
++
++ switch (substate) {
++ case XenbusStateInitialising:
++ dev_dbg(&pdev->xdev->dev, "Attaching dev-%d ...\n", i);
++
++ len = snprintf(dev_str, sizeof(dev_str), "dev-%d", i);
++ if (unlikely(len >= (sizeof(dev_str) - 1))) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "String overflow while "
++ "reading configuration");
++ goto out;
++ }
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++ dev_str, "%x:%x:%x.%x",
++ &domain, &bus, &slot, &func);
++ if (err < 0) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading device "
++ "configuration");
++ goto out;
++ }
++ if (err != 4) {
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error parsing pci device "
++ "configuration");
++ goto out;
++ }
++
++ err = pciback_export_device(pdev, domain, bus, slot,
++ func, i);
++ if (err)
++ goto out;
++
++ /* Publish pci roots. */
++ err = pciback_publish_pci_roots(pdev, pciback_publish_pci_root);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error while publish PCI root"
++ "buses for frontend");
++ goto out;
++ }
++
++ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename,
++ state_str, "%d",
++ XenbusStateInitialised);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error switching substate of "
++ "dev-%d\n", i);
++ goto out;
++ }
++ break;
++
++ case XenbusStateClosing:
++ dev_dbg(&pdev->xdev->dev, "Detaching dev-%d ...\n", i);
++
++ len = snprintf(dev_str, sizeof(dev_str), "vdev-%d", i);
++ if (unlikely(len >= (sizeof(dev_str) - 1))) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "String overflow while "
++ "reading configuration");
++ goto out;
++ }
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename,
++ dev_str, "%x:%x:%x.%x",
++ &domain, &bus, &slot, &func);
++ if (err < 0) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading device "
++ "configuration");
++ goto out;
++ }
++ if (err != 4) {
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error parsing pci device "
++ "configuration");
++ goto out;
++ }
++
++ err = pciback_remove_device(pdev, domain, bus, slot,
++ func);
++ if(err)
++ goto out;
++
++ /* TODO: If at some point we implement support for pci
++ * root hot-remove on pcifront side, we'll need to
++ * remove unnecessary xenstore nodes of pci roots here.
++ */
++
++ break;
++
++ default:
++ break;
++ }
++ }
++
++ err = xenbus_switch_state(pdev->xdev, XenbusStateReconfigured);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error switching to reconfigured state!");
++ goto out;
++ }
++
++ out:
++ spin_unlock(&pdev->dev_lock);
++
++ return 0;
++}
++
++static void pciback_frontend_changed(struct xenbus_device *xdev,
++ enum xenbus_state fe_state)
++{
++ struct pciback_device *pdev = xdev->dev.driver_data;
++
++ dev_dbg(&xdev->dev, "fe state changed %d\n", fe_state);
++
++ switch (fe_state) {
++ case XenbusStateInitialised:
++ pciback_attach(pdev);
++ break;
++
++ case XenbusStateReconfiguring:
++ pciback_reconfigure(pdev);
++ break;
++
++ case XenbusStateConnected:
++ /* pcifront switched its state from reconfiguring to connected.
++ * Then switch to connected state.
++ */
++ xenbus_switch_state(xdev, XenbusStateConnected);
++ break;
++
++ case XenbusStateClosing:
++ pciback_disconnect(pdev);
++ xenbus_switch_state(xdev, XenbusStateClosing);
++ break;
++
++ case XenbusStateClosed:
++ pciback_disconnect(pdev);
++ xenbus_switch_state(xdev, XenbusStateClosed);
++ if (xenbus_dev_is_online(xdev))
++ break;
++ /* fall through if not online */
++ case XenbusStateUnknown:
++ dev_dbg(&xdev->dev, "frontend is gone! unregister device\n");
++ device_unregister(&xdev->dev);
++ break;
++
++ default:
++ break;
++ }
++}
++
++static int pciback_setup_backend(struct pciback_device *pdev)
++{
++ /* Get configuration from xend (if available now) */
++ int domain, bus, slot, func;
++ int err = 0;
++ int i, num_devs;
++ char dev_str[64];
++ char state_str[64];
++
++ spin_lock(&pdev->dev_lock);
++
++ /* It's possible we could get the call to setup twice, so make sure
++ * we're not already connected.
++ */
++ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++ XenbusStateInitWait)
++ goto out;
++
++ dev_dbg(&pdev->xdev->dev, "getting be setup\n");
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, "num_devs", "%d",
++ &num_devs);
++ if (err != 1) {
++ if (err >= 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading number of devices");
++ goto out;
++ }
++
++ for (i = 0; i < num_devs; i++) {
++ int l = snprintf(dev_str, sizeof(dev_str), "dev-%d", i);
++ if (unlikely(l >= (sizeof(dev_str) - 1))) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "String overflow while reading "
++ "configuration");
++ goto out;
++ }
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->nodename, dev_str,
++ "%x:%x:%x.%x", &domain, &bus, &slot, &func);
++ if (err < 0) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading device configuration");
++ goto out;
++ }
++ if (err != 4) {
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error parsing pci device "
++ "configuration");
++ goto out;
++ }
++
++ err = pciback_export_device(pdev, domain, bus, slot, func, i);
++ if (err)
++ goto out;
++
++ /* Switch substate of this device. */
++ l = snprintf(state_str, sizeof(state_str), "state-%d", i);
++ if (unlikely(l >= (sizeof(state_str) - 1))) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "String overflow while reading "
++ "configuration");
++ goto out;
++ }
++ err = xenbus_printf(XBT_NIL, pdev->xdev->nodename, state_str,
++ "%d", XenbusStateInitialised);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err, "Error switching "
++ "substate of dev-%d\n", i);
++ goto out;
++ }
++ }
++
++ err = pciback_publish_pci_roots(pdev, pciback_publish_pci_root);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error while publish PCI root buses "
++ "for frontend");
++ goto out;
++ }
++
++ err = xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
++ if (err)
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error switching to initialised state!");
++
++ out:
++ spin_unlock(&pdev->dev_lock);
++
++ if (!err)
++ /* see if pcifront is already configured (if not, we'll wait) */
++ pciback_attach(pdev);
++
++ return err;
++}
++
++static void pciback_be_watch(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ struct pciback_device *pdev =
++ container_of(watch, struct pciback_device, be_watch);
++
++ switch (xenbus_read_driver_state(pdev->xdev->nodename)) {
++ case XenbusStateInitWait:
++ pciback_setup_backend(pdev);
++ break;
++
++ default:
++ break;
++ }
++}
++
++static int pciback_xenbus_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err = 0;
++ struct pciback_device *pdev = alloc_pdev(dev);
++
++ if (pdev == NULL) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(dev, err,
++ "Error allocating pciback_device struct");
++ goto out;
++ }
++
++ /* wait for xend to configure us */
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err)
++ goto out;
++
++ /* watch the backend node for backend configuration information */
++ err = xenbus_watch_path(dev, dev->nodename, &pdev->be_watch,
++ pciback_be_watch);
++ if (err)
++ goto out;
++ pdev->be_watching = 1;
++
++ /* We need to force a call to our callback here in case
++ * xend already configured us!
++ */
++ pciback_be_watch(&pdev->be_watch, NULL, 0);
++
++ out:
++ return err;
++}
++
++static int pciback_xenbus_remove(struct xenbus_device *dev)
++{
++ struct pciback_device *pdev = dev->dev.driver_data;
++
++ if (pdev != NULL)
++ free_pdev(pdev);
++
++ return 0;
++}
++
++static const struct xenbus_device_id xenpci_ids[] = {
++ {"pci"},
++ {{0}},
++};
++
++static struct xenbus_driver xenbus_pciback_driver = {
++ .name = "pciback",
++ .owner = THIS_MODULE,
++ .ids = xenpci_ids,
++ .probe = pciback_xenbus_probe,
++ .remove = pciback_xenbus_remove,
++ .otherend_changed = pciback_frontend_changed,
++};
++
++int __init pciback_xenbus_register(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ return xenbus_register_backend(&xenbus_pciback_driver);
++}
++
++void __exit pciback_xenbus_unregister(void)
++{
++ xenbus_unregister_driver(&xenbus_pciback_driver);
++}
+Index: head-2008-11-25/drivers/xen/pcifront/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pcifront/Makefile 2007-06-12 13:13:45.000000000 +0200
+@@ -0,0 +1,7 @@
++obj-y += pcifront.o
++
++pcifront-y := pci_op.o xenbus.o pci.o
++
++ifeq ($(CONFIG_XEN_PCIDEV_FE_DEBUG),y)
++EXTRA_CFLAGS += -DDEBUG
++endif
+Index: head-2008-11-25/drivers/xen/pcifront/pci.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pcifront/pci.c 2007-06-12 13:13:45.000000000 +0200
+@@ -0,0 +1,46 @@
++/*
++ * PCI Frontend Operations - ensure only one PCI frontend runs at a time
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include "pcifront.h"
++
++DEFINE_SPINLOCK(pcifront_dev_lock);
++static struct pcifront_device *pcifront_dev = NULL;
++
++int pcifront_connect(struct pcifront_device *pdev)
++{
++ int err = 0;
++
++ spin_lock(&pcifront_dev_lock);
++
++ if (!pcifront_dev) {
++ dev_info(&pdev->xdev->dev, "Installing PCI frontend\n");
++ pcifront_dev = pdev;
++ }
++ else {
++ dev_err(&pdev->xdev->dev, "PCI frontend already installed!\n");
++ err = -EEXIST;
++ }
++
++ spin_unlock(&pcifront_dev_lock);
++
++ return err;
++}
++
++void pcifront_disconnect(struct pcifront_device *pdev)
++{
++ spin_lock(&pcifront_dev_lock);
++
++ if (pdev == pcifront_dev) {
++ dev_info(&pdev->xdev->dev,
++ "Disconnecting PCI Frontend Buses\n");
++ pcifront_dev = NULL;
++ }
++
++ spin_unlock(&pcifront_dev_lock);
++}
+Index: head-2008-11-25/drivers/xen/pcifront/pci_op.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pcifront/pci_op.c 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,551 @@
++/*
++ * PCI Frontend Operations - Communicates with frontend
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/version.h>
++#include <linux/init.h>
++#include <linux/pci.h>
++#include <linux/spinlock.h>
++#include <linux/time.h>
++#include <xen/evtchn.h>
++#include "pcifront.h"
++
++static int verbose_request = 0;
++module_param(verbose_request, int, 0644);
++
++#ifdef __ia64__
++static void pcifront_init_sd(struct pcifront_sd *sd,
++ unsigned int domain, unsigned int bus,
++ struct pcifront_device *pdev)
++{
++ int err, i, j, k, len, root_num, res_count;
++ struct acpi_resource res;
++ unsigned int d, b, byte;
++ unsigned long magic;
++ char str[64], tmp[3];
++ unsigned char *buf, *bufp;
++ u8 *ptr;
++
++ memset(sd, 0, sizeof(*sd));
++
++ sd->segment = domain;
++ sd->node = -1; /* Revisit for NUMA */
++ sd->platform_data = pdev;
++
++ /* Look for resources for this controller in xenbus. */
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "root_num",
++ "%d", &root_num);
++ if (err != 1)
++ return;
++
++ for (i = 0; i < root_num; i++) {
++ len = snprintf(str, sizeof(str), "root-%d", i);
++ if (unlikely(len >= (sizeof(str) - 1)))
++ return;
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
++ str, "%x:%x", &d, &b);
++ if (err != 2)
++ return;
++
++ if (d == domain && b == bus)
++ break;
++ }
++
++ if (i == root_num)
++ return;
++
++ len = snprintf(str, sizeof(str), "root-resource-magic");
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
++ str, "%lx", &magic);
++
++ if (err != 1)
++ return; /* No resources, nothing to do */
++
++ if (magic != (sizeof(res) * 2) + 1) {
++ printk(KERN_WARNING "pcifront: resource magic mismatch\n");
++ return;
++ }
++
++ len = snprintf(str, sizeof(str), "root-%d-resources", i);
++ if (unlikely(len >= (sizeof(str) - 1)))
++ return;
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
++ str, "%d", &res_count);
++
++ if (err != 1)
++ return; /* No resources, nothing to do */
++
++ sd->window = kzalloc(sizeof(*sd->window) * res_count, GFP_KERNEL);
++ if (!sd->window)
++ return;
++
++ /* magic is also the size of the byte stream in xenbus */
++ buf = kmalloc(magic, GFP_KERNEL);
++ if (!buf) {
++ kfree(sd->window);
++ sd->window = NULL;
++ return;
++ }
++
++ /* Read the resources out of xenbus */
++ for (j = 0; j < res_count; j++) {
++ memset(&res, 0, sizeof(res));
++ memset(buf, 0, magic);
++
++ len = snprintf(str, sizeof(str), "root-%d-resource-%d", i, j);
++ if (unlikely(len >= (sizeof(str) - 1)))
++ return;
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
++ "%s", buf);
++ if (err != 1) {
++ printk(KERN_WARNING "pcifront: error reading "
++ "resource %d on bus %04x:%02x\n",
++ j, domain, bus);
++ continue;
++ }
++
++ bufp = buf;
++ ptr = (u8 *)&res;
++ memset(tmp, 0, sizeof(tmp));
++
++ /* Copy ASCII byte stream into structure */
++ for (k = 0; k < magic - 1; k += 2) {
++ memcpy(tmp, bufp, 2);
++ bufp += 2;
++
++ sscanf(tmp, "%02x", &byte);
++ *ptr = byte;
++ ptr++;
++ }
++
++ xen_add_resource(sd, domain, bus, &res);
++ sd->windows++;
++ }
++ kfree(buf);
++}
++#endif
++
++static int errno_to_pcibios_err(int errno)
++{
++ switch (errno) {
++ case XEN_PCI_ERR_success:
++ return PCIBIOS_SUCCESSFUL;
++
++ case XEN_PCI_ERR_dev_not_found:
++ return PCIBIOS_DEVICE_NOT_FOUND;
++
++ case XEN_PCI_ERR_invalid_offset:
++ case XEN_PCI_ERR_op_failed:
++ return PCIBIOS_BAD_REGISTER_NUMBER;
++
++ case XEN_PCI_ERR_not_implemented:
++ return PCIBIOS_FUNC_NOT_SUPPORTED;
++
++ case XEN_PCI_ERR_access_denied:
++ return PCIBIOS_SET_FAILED;
++ }
++ return errno;
++}
++
++static int do_pci_op(struct pcifront_device *pdev, struct xen_pci_op *op)
++{
++ int err = 0;
++ struct xen_pci_op *active_op = &pdev->sh_info->op;
++ unsigned long irq_flags;
++ evtchn_port_t port = pdev->evtchn;
++ s64 ns, ns_timeout;
++ struct timeval tv;
++
++ spin_lock_irqsave(&pdev->sh_info_lock, irq_flags);
++
++ memcpy(active_op, op, sizeof(struct xen_pci_op));
++
++ /* Go */
++ wmb();
++ set_bit(_XEN_PCIF_active, (unsigned long *)&pdev->sh_info->flags);
++ notify_remote_via_evtchn(port);
++
++ /*
++ * We set a poll timeout of 3 seconds but give up on return after
++ * 2 seconds. It is better to time out too late rather than too early
++ * (in the latter case we end up continually re-executing poll() with a
++ * timeout in the past). 1s difference gives plenty of slack for error.
++ */
++ do_gettimeofday(&tv);
++ ns_timeout = timeval_to_ns(&tv) + 2 * (s64)NSEC_PER_SEC;
++
++ clear_evtchn(port);
++
++ while (test_bit(_XEN_PCIF_active,
++ (unsigned long *)&pdev->sh_info->flags)) {
++ if (HYPERVISOR_poll(&port, 1, jiffies + 3*HZ))
++ BUG();
++ clear_evtchn(port);
++ do_gettimeofday(&tv);
++ ns = timeval_to_ns(&tv);
++ if (ns > ns_timeout) {
++ dev_err(&pdev->xdev->dev,
++ "pciback not responding!!!\n");
++ clear_bit(_XEN_PCIF_active,
++ (unsigned long *)&pdev->sh_info->flags);
++ err = XEN_PCI_ERR_dev_not_found;
++ goto out;
++ }
++ }
++
++ memcpy(op, active_op, sizeof(struct xen_pci_op));
++
++ err = op->err;
++ out:
++ spin_unlock_irqrestore(&pdev->sh_info_lock, irq_flags);
++ return err;
++}
++
++/* Access to this function is spinlocked in drivers/pci/access.c */
++static int pcifront_bus_read(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 * val)
++{
++ int err = 0;
++ struct xen_pci_op op = {
++ .cmd = XEN_PCI_OP_conf_read,
++ .domain = pci_domain_nr(bus),
++ .bus = bus->number,
++ .devfn = devfn,
++ .offset = where,
++ .size = size,
++ };
++ struct pcifront_sd *sd = bus->sysdata;
++ struct pcifront_device *pdev = pcifront_get_pdev(sd);
++
++ if (verbose_request)
++ dev_info(&pdev->xdev->dev,
++ "read dev=%04x:%02x:%02x.%01x - offset %x size %d\n",
++ pci_domain_nr(bus), bus->number, PCI_SLOT(devfn),
++ PCI_FUNC(devfn), where, size);
++
++ err = do_pci_op(pdev, &op);
++
++ if (likely(!err)) {
++ if (verbose_request)
++ dev_info(&pdev->xdev->dev, "read got back value %x\n",
++ op.value);
++
++ *val = op.value;
++ } else if (err == -ENODEV) {
++ /* No device here, pretend that it just returned 0 */
++ err = 0;
++ *val = 0;
++ }
++
++ return errno_to_pcibios_err(err);
++}
++
++/* Access to this function is spinlocked in drivers/pci/access.c */
++static int pcifront_bus_write(struct pci_bus *bus, unsigned int devfn,
++ int where, int size, u32 val)
++{
++ struct xen_pci_op op = {
++ .cmd = XEN_PCI_OP_conf_write,
++ .domain = pci_domain_nr(bus),
++ .bus = bus->number,
++ .devfn = devfn,
++ .offset = where,
++ .size = size,
++ .value = val,
++ };
++ struct pcifront_sd *sd = bus->sysdata;
++ struct pcifront_device *pdev = pcifront_get_pdev(sd);
++
++ if (verbose_request)
++ dev_info(&pdev->xdev->dev,
++ "write dev=%04x:%02x:%02x.%01x - "
++ "offset %x size %d val %x\n",
++ pci_domain_nr(bus), bus->number,
++ PCI_SLOT(devfn), PCI_FUNC(devfn), where, size, val);
++
++ return errno_to_pcibios_err(do_pci_op(pdev, &op));
++}
++
++struct pci_ops pcifront_bus_ops = {
++ .read = pcifront_bus_read,
++ .write = pcifront_bus_write,
++};
++
++#ifdef CONFIG_PCI_MSI
++int pci_frontend_enable_msix(struct pci_dev *dev,
++ struct msix_entry *entries,
++ int nvec)
++{
++ int err;
++ int i;
++ struct xen_pci_op op = {
++ .cmd = XEN_PCI_OP_enable_msix,
++ .domain = pci_domain_nr(dev->bus),
++ .bus = dev->bus->number,
++ .devfn = dev->devfn,
++ .value = nvec,
++ };
++ struct pcifront_sd *sd = dev->bus->sysdata;
++ struct pcifront_device *pdev = pcifront_get_pdev(sd);
++
++ if (nvec > SH_INFO_MAX_VEC) {
++ printk("too much vector for pci frontend%x\n", nvec);
++ return -EINVAL;
++ }
++
++ for (i = 0; i < nvec; i++) {
++ op.msix_entries[i].entry = entries[i].entry;
++ op.msix_entries[i].vector = entries[i].vector;
++ }
++
++ err = do_pci_op(pdev, &op);
++
++ if (!err) {
++ if (!op.value) {
++ /* we get the result */
++ for ( i = 0; i < nvec; i++)
++ entries[i].vector = op.msix_entries[i].vector;
++ return 0;
++ }
++ else {
++ printk("enable msix get value %x\n", op.value);
++ return op.value;
++ }
++ }
++ else {
++ printk("enable msix get err %x\n", err);
++ return err;
++ }
++}
++
++void pci_frontend_disable_msix(struct pci_dev* dev)
++{
++ int err;
++ struct xen_pci_op op = {
++ .cmd = XEN_PCI_OP_disable_msix,
++ .domain = pci_domain_nr(dev->bus),
++ .bus = dev->bus->number,
++ .devfn = dev->devfn,
++ };
++ struct pcifront_sd *sd = dev->bus->sysdata;
++ struct pcifront_device *pdev = pcifront_get_pdev(sd);
++
++ err = do_pci_op(pdev, &op);
++
++ /* What should do for error ? */
++ if (err)
++ printk("pci_disable_msix get err %x\n", err);
++}
++
++int pci_frontend_enable_msi(struct pci_dev *dev)
++{
++ int err;
++ struct xen_pci_op op = {
++ .cmd = XEN_PCI_OP_enable_msi,
++ .domain = pci_domain_nr(dev->bus),
++ .bus = dev->bus->number,
++ .devfn = dev->devfn,
++ };
++ struct pcifront_sd *sd = dev->bus->sysdata;
++ struct pcifront_device *pdev = pcifront_get_pdev(sd);
++
++ err = do_pci_op(pdev, &op);
++ if (likely(!err)) {
++ dev->irq = op.value;
++ }
++ else {
++ printk("pci frontend enable msi failed for dev %x:%x \n",
++ op.bus, op.devfn);
++ err = -EINVAL;
++ }
++ return err;
++}
++
++void pci_frontend_disable_msi(struct pci_dev* dev)
++{
++ int err;
++ struct xen_pci_op op = {
++ .cmd = XEN_PCI_OP_disable_msi,
++ .domain = pci_domain_nr(dev->bus),
++ .bus = dev->bus->number,
++ .devfn = dev->devfn,
++ };
++ struct pcifront_sd *sd = dev->bus->sysdata;
++ struct pcifront_device *pdev = pcifront_get_pdev(sd);
++
++ err = do_pci_op(pdev, &op);
++ if (err == XEN_PCI_ERR_dev_not_found) {
++ /* XXX No response from backend, what shall we do? */
++ printk("get no response from backend for disable MSI\n");
++ return;
++ }
++ if (likely(!err))
++ dev->irq = op.value;
++ else
++ /* how can pciback notify us fail? */
++ printk("get fake response frombackend \n");
++}
++#endif /* CONFIG_PCI_MSI */
++
++/* Claim resources for the PCI frontend as-is, backend won't allow changes */
++static void pcifront_claim_resource(struct pci_dev *dev, void *data)
++{
++ struct pcifront_device *pdev = data;
++ int i;
++ struct resource *r;
++
++ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
++ r = &dev->resource[i];
++
++ if (!r->parent && r->start && r->flags) {
++ dev_dbg(&pdev->xdev->dev, "claiming resource %s/%d\n",
++ pci_name(dev), i);
++ pci_claim_resource(dev, i);
++ }
++ }
++}
++
++int __devinit pcifront_scan_root(struct pcifront_device *pdev,
++ unsigned int domain, unsigned int bus)
++{
++ struct pci_bus *b;
++ struct pcifront_sd *sd = NULL;
++ struct pci_bus_entry *bus_entry = NULL;
++ int err = 0;
++
++#ifndef CONFIG_PCI_DOMAINS
++ if (domain != 0) {
++ dev_err(&pdev->xdev->dev,
++ "PCI Root in non-zero PCI Domain! domain=%d\n", domain);
++ dev_err(&pdev->xdev->dev,
++ "Please compile with CONFIG_PCI_DOMAINS\n");
++ err = -EINVAL;
++ goto err_out;
++ }
++#endif
++
++ dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
++ domain, bus);
++
++ bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
++ sd = kmalloc(sizeof(*sd), GFP_KERNEL);
++ if (!bus_entry || !sd) {
++ err = -ENOMEM;
++ goto err_out;
++ }
++ pcifront_init_sd(sd, domain, bus, pdev);
++
++ b = pci_scan_bus_parented(&pdev->xdev->dev, bus,
++ &pcifront_bus_ops, sd);
++ if (!b) {
++ dev_err(&pdev->xdev->dev,
++ "Error creating PCI Frontend Bus!\n");
++ err = -ENOMEM;
++ goto err_out;
++ }
++
++ pcifront_setup_root_resources(b, sd);
++ bus_entry->bus = b;
++
++ list_add(&bus_entry->list, &pdev->root_buses);
++
++ /* Claim resources before going "live" with our devices */
++ pci_walk_bus(b, pcifront_claim_resource, pdev);
++
++ pci_bus_add_devices(b);
++
++ return 0;
++
++ err_out:
++ kfree(bus_entry);
++ kfree(sd);
++
++ return err;
++}
++
++int __devinit pcifront_rescan_root(struct pcifront_device *pdev,
++ unsigned int domain, unsigned int bus)
++{
++ struct pci_bus *b;
++ struct pci_dev *d;
++ unsigned int devfn;
++
++#ifndef CONFIG_PCI_DOMAINS
++ if (domain != 0) {
++ dev_err(&pdev->xdev->dev,
++ "PCI Root in non-zero PCI Domain! domain=%d\n", domain);
++ dev_err(&pdev->xdev->dev,
++ "Please compile with CONFIG_PCI_DOMAINS\n");
++ return -EINVAL;
++ }
++#endif
++
++ dev_info(&pdev->xdev->dev, "Rescanning PCI Frontend Bus %04x:%02x\n",
++ domain, bus);
++
++ b = pci_find_bus(domain, bus);
++ if(!b)
++ /* If the bus is unknown, create it. */
++ return pcifront_scan_root(pdev, domain, bus);
++
++ /* Rescan the bus for newly attached functions and add.
++ * We omit handling of PCI bridge attachment because pciback prevents
++ * bridges from being exported.
++ */
++ for (devfn = 0; devfn < 0x100; devfn++) {
++ d = pci_get_slot(b, devfn);
++ if(d) {
++ /* Device is already known. */
++ pci_dev_put(d);
++ continue;
++ }
++
++ d = pci_scan_single_device(b, devfn);
++ if (d) {
++ dev_info(&pdev->xdev->dev, "New device on "
++ "%04x:%02x:%02x.%02x found.\n", domain, bus,
++ PCI_SLOT(devfn), PCI_FUNC(devfn));
++ pci_bus_add_device(d);
++ }
++ }
++
++ return 0;
++}
++
++static void free_root_bus_devs(struct pci_bus *bus)
++{
++ struct pci_dev *dev;
++
++ while (!list_empty(&bus->devices)) {
++ dev = container_of(bus->devices.next, struct pci_dev,
++ bus_list);
++ dev_dbg(&dev->dev, "removing device\n");
++ pci_remove_bus_device(dev);
++ }
++}
++
++void pcifront_free_roots(struct pcifront_device *pdev)
++{
++ struct pci_bus_entry *bus_entry, *t;
++
++ dev_dbg(&pdev->xdev->dev, "cleaning up root buses\n");
++
++ list_for_each_entry_safe(bus_entry, t, &pdev->root_buses, list) {
++ list_del(&bus_entry->list);
++
++ free_root_bus_devs(bus_entry->bus);
++
++ kfree(bus_entry->bus->sysdata);
++
++ device_unregister(bus_entry->bus->bridge);
++ pci_remove_bus(bus_entry->bus);
++
++ kfree(bus_entry);
++ }
++}
+Index: head-2008-11-25/drivers/xen/pcifront/pcifront.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pcifront/pcifront.h 2008-02-26 10:54:11.000000000 +0100
+@@ -0,0 +1,42 @@
++/*
++ * PCI Frontend - Common data structures & function declarations
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#ifndef __XEN_PCIFRONT_H__
++#define __XEN_PCIFRONT_H__
++
++#include <linux/spinlock.h>
++#include <linux/pci.h>
++#include <xen/xenbus.h>
++#include <xen/interface/io/pciif.h>
++#include <xen/pcifront.h>
++
++struct pci_bus_entry {
++ struct list_head list;
++ struct pci_bus *bus;
++};
++
++struct pcifront_device {
++ struct xenbus_device *xdev;
++ struct list_head root_buses;
++ spinlock_t dev_lock;
++
++ int evtchn;
++ int gnt_ref;
++
++ /* Lock this when doing any operations in sh_info */
++ spinlock_t sh_info_lock;
++ struct xen_pci_sharedinfo *sh_info;
++};
++
++int pcifront_connect(struct pcifront_device *pdev);
++void pcifront_disconnect(struct pcifront_device *pdev);
++
++int pcifront_scan_root(struct pcifront_device *pdev,
++ unsigned int domain, unsigned int bus);
++int pcifront_rescan_root(struct pcifront_device *pdev,
++ unsigned int domain, unsigned int bus);
++void pcifront_free_roots(struct pcifront_device *pdev);
++
++#endif /* __XEN_PCIFRONT_H__ */
+Index: head-2008-11-25/drivers/xen/pcifront/xenbus.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/pcifront/xenbus.c 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,455 @@
++/*
++ * PCI Frontend Xenbus Setup - handles setup with backend (imports page/evtchn)
++ *
++ * Author: Ryan Wilson <hap9@epoch.ncsc.mil>
++ */
++#include <linux/module.h>
++#include <linux/init.h>
++#include <linux/mm.h>
++#include <xen/xenbus.h>
++#include <xen/gnttab.h>
++#include "pcifront.h"
++
++#ifndef __init_refok
++#define __init_refok
++#endif
++
++#define INVALID_GRANT_REF (0)
++#define INVALID_EVTCHN (-1)
++
++static struct pcifront_device *alloc_pdev(struct xenbus_device *xdev)
++{
++ struct pcifront_device *pdev;
++
++ pdev = kzalloc(sizeof(struct pcifront_device), GFP_KERNEL);
++ if (pdev == NULL)
++ goto out;
++
++ pdev->sh_info =
++ (struct xen_pci_sharedinfo *)__get_free_page(GFP_KERNEL);
++ if (pdev->sh_info == NULL) {
++ kfree(pdev);
++ pdev = NULL;
++ goto out;
++ }
++ pdev->sh_info->flags = 0;
++
++ xdev->dev.driver_data = pdev;
++ pdev->xdev = xdev;
++
++ INIT_LIST_HEAD(&pdev->root_buses);
++
++ spin_lock_init(&pdev->dev_lock);
++ spin_lock_init(&pdev->sh_info_lock);
++
++ pdev->evtchn = INVALID_EVTCHN;
++ pdev->gnt_ref = INVALID_GRANT_REF;
++
++ dev_dbg(&xdev->dev, "Allocated pdev @ 0x%p pdev->sh_info @ 0x%p\n",
++ pdev, pdev->sh_info);
++ out:
++ return pdev;
++}
++
++static void free_pdev(struct pcifront_device *pdev)
++{
++ dev_dbg(&pdev->xdev->dev, "freeing pdev @ 0x%p\n", pdev);
++
++ pcifront_free_roots(pdev);
++
++ if (pdev->evtchn != INVALID_EVTCHN)
++ xenbus_free_evtchn(pdev->xdev, pdev->evtchn);
++
++ if (pdev->gnt_ref != INVALID_GRANT_REF)
++ gnttab_end_foreign_access(pdev->gnt_ref,
++ (unsigned long)pdev->sh_info);
++
++ pdev->xdev->dev.driver_data = NULL;
++
++ kfree(pdev);
++}
++
++static int pcifront_publish_info(struct pcifront_device *pdev)
++{
++ int err = 0;
++ struct xenbus_transaction trans;
++
++ err = xenbus_grant_ring(pdev->xdev, virt_to_mfn(pdev->sh_info));
++ if (err < 0)
++ goto out;
++
++ pdev->gnt_ref = err;
++
++ err = xenbus_alloc_evtchn(pdev->xdev, &pdev->evtchn);
++ if (err)
++ goto out;
++
++ do_publish:
++ err = xenbus_transaction_start(&trans);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error writing configuration for backend "
++ "(start transaction)");
++ goto out;
++ }
++
++ err = xenbus_printf(trans, pdev->xdev->nodename,
++ "pci-op-ref", "%u", pdev->gnt_ref);
++ if (!err)
++ err = xenbus_printf(trans, pdev->xdev->nodename,
++ "event-channel", "%u", pdev->evtchn);
++ if (!err)
++ err = xenbus_printf(trans, pdev->xdev->nodename,
++ "magic", XEN_PCI_MAGIC);
++
++ if (err) {
++ xenbus_transaction_end(trans, 1);
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error writing configuration for backend");
++ goto out;
++ } else {
++ err = xenbus_transaction_end(trans, 0);
++ if (err == -EAGAIN)
++ goto do_publish;
++ else if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error completing transaction "
++ "for backend");
++ goto out;
++ }
++ }
++
++ xenbus_switch_state(pdev->xdev, XenbusStateInitialised);
++
++ dev_dbg(&pdev->xdev->dev, "publishing successful!\n");
++
++ out:
++ return err;
++}
++
++static int __devinit pcifront_try_connect(struct pcifront_device *pdev)
++{
++ int err = -EFAULT;
++ int i, num_roots, len;
++ char str[64];
++ unsigned int domain, bus;
++
++ spin_lock(&pdev->dev_lock);
++
++ /* Only connect once */
++ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++ XenbusStateInitialised)
++ goto out;
++
++ err = pcifront_connect(pdev);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error connecting PCI Frontend");
++ goto out;
++ }
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
++ "root_num", "%d", &num_roots);
++ if (err == -ENOENT) {
++ xenbus_dev_error(pdev->xdev, err,
++ "No PCI Roots found, trying 0000:00");
++ err = pcifront_scan_root(pdev, 0, 0);
++ num_roots = 0;
++ } else if (err != 1) {
++ if (err == 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading number of PCI roots");
++ goto out;
++ }
++
++ for (i = 0; i < num_roots; i++) {
++ len = snprintf(str, sizeof(str), "root-%d", i);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
++ "%x:%x", &domain, &bus);
++ if (err != 2) {
++ if (err >= 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading PCI root %d", i);
++ goto out;
++ }
++
++ err = pcifront_scan_root(pdev, domain, bus);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error scanning PCI root %04x:%02x",
++ domain, bus);
++ goto out;
++ }
++ }
++
++ err = xenbus_switch_state(pdev->xdev, XenbusStateConnected);
++ if (err)
++ goto out;
++
++ out:
++ spin_unlock(&pdev->dev_lock);
++ return err;
++}
++
++static int pcifront_try_disconnect(struct pcifront_device *pdev)
++{
++ int err = 0;
++ enum xenbus_state prev_state;
++
++ spin_lock(&pdev->dev_lock);
++
++ prev_state = xenbus_read_driver_state(pdev->xdev->nodename);
++
++ if (prev_state >= XenbusStateClosing)
++ goto out;
++
++ if(prev_state == XenbusStateConnected) {
++ pcifront_free_roots(pdev);
++ pcifront_disconnect(pdev);
++ }
++
++ err = xenbus_switch_state(pdev->xdev, XenbusStateClosed);
++
++ out:
++ spin_unlock(&pdev->dev_lock);
++
++ return err;
++}
++
++static int __devinit pcifront_attach_devices(struct pcifront_device *pdev)
++{
++ int err = -EFAULT;
++ int i, num_roots, len;
++ unsigned int domain, bus;
++ char str[64];
++
++ spin_lock(&pdev->dev_lock);
++
++ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++ XenbusStateReconfiguring)
++ goto out;
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend,
++ "root_num", "%d", &num_roots);
++ if (err == -ENOENT) {
++ xenbus_dev_error(pdev->xdev, err,
++ "No PCI Roots found, trying 0000:00");
++ err = pcifront_rescan_root(pdev, 0, 0);
++ num_roots = 0;
++ } else if (err != 1) {
++ if (err == 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading number of PCI roots");
++ goto out;
++ }
++
++ for (i = 0; i < num_roots; i++) {
++ len = snprintf(str, sizeof(str), "root-%d", i);
++ if (unlikely(len >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
++ "%x:%x", &domain, &bus);
++ if (err != 2) {
++ if (err >= 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading PCI root %d", i);
++ goto out;
++ }
++
++ err = pcifront_rescan_root(pdev, domain, bus);
++ if (err) {
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error scanning PCI root %04x:%02x",
++ domain, bus);
++ goto out;
++ }
++ }
++
++ xenbus_switch_state(pdev->xdev, XenbusStateConnected);
++
++ out:
++ spin_unlock(&pdev->dev_lock);
++ return err;
++}
++
++static int pcifront_detach_devices(struct pcifront_device *pdev)
++{
++ int err = 0;
++ int i, num_devs;
++ unsigned int domain, bus, slot, func;
++ struct pci_bus *pci_bus;
++ struct pci_dev *pci_dev;
++ char str[64];
++
++ spin_lock(&pdev->dev_lock);
++
++ if (xenbus_read_driver_state(pdev->xdev->nodename) !=
++ XenbusStateConnected)
++ goto out;
++
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, "num_devs", "%d",
++ &num_devs);
++ if (err != 1) {
++ if (err >= 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading number of PCI devices");
++ goto out;
++ }
++
++ /* Find devices being detached and remove them. */
++ for (i = 0; i < num_devs; i++) {
++ int l, state;
++ l = snprintf(str, sizeof(str), "state-%d", i);
++ if (unlikely(l >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str, "%d",
++ &state);
++ if (err != 1)
++ state = XenbusStateUnknown;
++
++ if (state != XenbusStateClosing)
++ continue;
++
++ /* Remove device. */
++ l = snprintf(str, sizeof(str), "vdev-%d", i);
++ if (unlikely(l >= (sizeof(str) - 1))) {
++ err = -ENOMEM;
++ goto out;
++ }
++ err = xenbus_scanf(XBT_NIL, pdev->xdev->otherend, str,
++ "%x:%x:%x.%x", &domain, &bus, &slot, &func);
++ if (err != 4) {
++ if (err >= 0)
++ err = -EINVAL;
++ xenbus_dev_fatal(pdev->xdev, err,
++ "Error reading PCI device %d", i);
++ goto out;
++ }
++
++ pci_bus = pci_find_bus(domain, bus);
++ if(!pci_bus) {
++ dev_dbg(&pdev->xdev->dev, "Cannot get bus %04x:%02x\n",
++ domain, bus);
++ continue;
++ }
++ pci_dev = pci_get_slot(pci_bus, PCI_DEVFN(slot, func));
++ if(!pci_dev) {
++ dev_dbg(&pdev->xdev->dev,
++ "Cannot get PCI device %04x:%02x:%02x.%02x\n",
++ domain, bus, slot, func);
++ continue;
++ }
++ pci_remove_bus_device(pci_dev);
++ pci_dev_put(pci_dev);
++
++ dev_dbg(&pdev->xdev->dev,
++ "PCI device %04x:%02x:%02x.%02x removed.\n",
++ domain, bus, slot, func);
++ }
++
++ err = xenbus_switch_state(pdev->xdev, XenbusStateReconfiguring);
++
++ out:
++ spin_unlock(&pdev->dev_lock);
++ return err;
++}
++
++static void __init_refok pcifront_backend_changed(struct xenbus_device *xdev,
++ enum xenbus_state be_state)
++{
++ struct pcifront_device *pdev = xdev->dev.driver_data;
++
++ switch (be_state) {
++ case XenbusStateUnknown:
++ case XenbusStateInitialising:
++ case XenbusStateInitWait:
++ case XenbusStateInitialised:
++ case XenbusStateClosed:
++ break;
++
++ case XenbusStateConnected:
++ pcifront_try_connect(pdev);
++ break;
++
++ case XenbusStateClosing:
++ dev_warn(&xdev->dev, "backend going away!\n");
++ pcifront_try_disconnect(pdev);
++ break;
++
++ case XenbusStateReconfiguring:
++ pcifront_detach_devices(pdev);
++ break;
++
++ case XenbusStateReconfigured:
++ pcifront_attach_devices(pdev);
++ break;
++ }
++}
++
++static int pcifront_xenbus_probe(struct xenbus_device *xdev,
++ const struct xenbus_device_id *id)
++{
++ int err = 0;
++ struct pcifront_device *pdev = alloc_pdev(xdev);
++
++ if (pdev == NULL) {
++ err = -ENOMEM;
++ xenbus_dev_fatal(xdev, err,
++ "Error allocating pcifront_device struct");
++ goto out;
++ }
++
++ err = pcifront_publish_info(pdev);
++
++ out:
++ return err;
++}
++
++static int pcifront_xenbus_remove(struct xenbus_device *xdev)
++{
++ if (xdev->dev.driver_data)
++ free_pdev(xdev->dev.driver_data);
++
++ return 0;
++}
++
++static const struct xenbus_device_id xenpci_ids[] = {
++ {"pci"},
++ {{0}},
++};
++MODULE_ALIAS("xen:pci");
++
++static struct xenbus_driver xenbus_pcifront_driver = {
++ .name = "pcifront",
++ .owner = THIS_MODULE,
++ .ids = xenpci_ids,
++ .probe = pcifront_xenbus_probe,
++ .remove = pcifront_xenbus_remove,
++ .otherend_changed = pcifront_backend_changed,
++};
++
++static int __init pcifront_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ return xenbus_register_frontend(&xenbus_pcifront_driver);
++}
++
++/* Initialize after the Xen PCI Frontend Stub is initialized */
++subsys_initcall(pcifront_init);
+Index: head-2008-11-25/drivers/xen/privcmd/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/privcmd/Makefile 2007-07-10 09:42:30.000000000 +0200
+@@ -0,0 +1,3 @@
++
++obj-y += privcmd.o
++obj-$(CONFIG_COMPAT) += compat_privcmd.o
+Index: head-2008-11-25/drivers/xen/privcmd/compat_privcmd.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/privcmd/compat_privcmd.c 2007-07-10 09:42:30.000000000 +0200
+@@ -0,0 +1,73 @@
++/*
++ * This program is free software; you can redistribute it and/or modify
++ * it under the terms of the GNU General Public License as published by
++ * the Free Software Foundation; either version 2 of the License, or
++ * (at your option) any later version.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++ *
++ * Copyright (C) IBM Corp. 2006
++ *
++ * Authors: Jimi Xenidis <jimix@watson.ibm.com>
++ */
++
++#include <linux/config.h>
++#include <linux/compat.h>
++#include <linux/ioctl.h>
++#include <linux/syscalls.h>
++#include <asm/hypervisor.h>
++#include <asm/uaccess.h>
++#include <xen/public/privcmd.h>
++#include <xen/compat_ioctl.h>
++
++int privcmd_ioctl_32(int fd, unsigned int cmd, unsigned long arg)
++{
++ int ret;
++
++ switch (cmd) {
++ case IOCTL_PRIVCMD_MMAP_32: {
++ struct privcmd_mmap *p;
++ struct privcmd_mmap_32 *p32;
++ struct privcmd_mmap_32 n32;
++
++ p32 = compat_ptr(arg);
++ p = compat_alloc_user_space(sizeof(*p));
++ if (copy_from_user(&n32, p32, sizeof(n32)) ||
++ put_user(n32.num, &p->num) ||
++ put_user(n32.dom, &p->dom) ||
++ put_user(compat_ptr(n32.entry), &p->entry))
++ return -EFAULT;
++
++ ret = sys_ioctl(fd, IOCTL_PRIVCMD_MMAP, (unsigned long)p);
++ }
++ break;
++ case IOCTL_PRIVCMD_MMAPBATCH_32: {
++ struct privcmd_mmapbatch *p;
++ struct privcmd_mmapbatch_32 *p32;
++ struct privcmd_mmapbatch_32 n32;
++
++ p32 = compat_ptr(arg);
++ p = compat_alloc_user_space(sizeof(*p));
++ if (copy_from_user(&n32, p32, sizeof(n32)) ||
++ put_user(n32.num, &p->num) ||
++ put_user(n32.dom, &p->dom) ||
++ put_user(n32.addr, &p->addr) ||
++ put_user(compat_ptr(n32.arr), &p->arr))
++ return -EFAULT;
++
++ ret = sys_ioctl(fd, IOCTL_PRIVCMD_MMAPBATCH, (unsigned long)p);
++ }
++ break;
++ default:
++ ret = -EINVAL;
++ break;
++ }
++ return ret;
++}
+Index: head-2008-11-25/drivers/xen/privcmd/privcmd.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/privcmd/privcmd.c 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,356 @@
++/******************************************************************************
++ * privcmd.c
++ *
++ * Interface to privileged domain-0 commands.
++ *
++ * Copyright (c) 2002-2004, K A Fraser, B Dragovic
++ */
++
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/slab.h>
++#include <linux/string.h>
++#include <linux/errno.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/swap.h>
++#include <linux/smp_lock.h>
++#include <linux/highmem.h>
++#include <linux/pagemap.h>
++#include <linux/seq_file.h>
++#include <asm/hypervisor.h>
++
++#include <asm/pgalloc.h>
++#include <asm/pgtable.h>
++#include <asm/uaccess.h>
++#include <asm/tlb.h>
++#include <asm/hypervisor.h>
++#include <xen/public/privcmd.h>
++#include <xen/interface/xen.h>
++#include <xen/xen_proc.h>
++#include <xen/features.h>
++
++static struct proc_dir_entry *privcmd_intf;
++static struct proc_dir_entry *capabilities_intf;
++
++#ifndef HAVE_ARCH_PRIVCMD_MMAP
++static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma);
++#endif
++
++static long privcmd_ioctl(struct file *file,
++ unsigned int cmd, unsigned long data)
++{
++ int ret = -ENOSYS;
++ void __user *udata = (void __user *) data;
++
++ switch (cmd) {
++ case IOCTL_PRIVCMD_HYPERCALL: {
++ privcmd_hypercall_t hypercall;
++
++ if (copy_from_user(&hypercall, udata, sizeof(hypercall)))
++ return -EFAULT;
++
++#if defined(__i386__)
++ if (hypercall.op >= (PAGE_SIZE >> 5))
++ break;
++ __asm__ __volatile__ (
++ "pushl %%ebx; pushl %%ecx; pushl %%edx; "
++ "pushl %%esi; pushl %%edi; "
++ "movl 8(%%eax),%%ebx ;"
++ "movl 16(%%eax),%%ecx ;"
++ "movl 24(%%eax),%%edx ;"
++ "movl 32(%%eax),%%esi ;"
++ "movl 40(%%eax),%%edi ;"
++ "movl (%%eax),%%eax ;"
++ "shll $5,%%eax ;"
++ "addl $hypercall_page,%%eax ;"
++ "call *%%eax ;"
++ "popl %%edi; popl %%esi; popl %%edx; "
++ "popl %%ecx; popl %%ebx"
++ : "=a" (ret) : "0" (&hypercall) : "memory" );
++#elif defined (__x86_64__)
++ if (hypercall.op < (PAGE_SIZE >> 5)) {
++ long ign1, ign2, ign3;
++ __asm__ __volatile__ (
++ "movq %8,%%r10; movq %9,%%r8;"
++ "shll $5,%%eax ;"
++ "addq $hypercall_page,%%rax ;"
++ "call *%%rax"
++ : "=a" (ret), "=D" (ign1),
++ "=S" (ign2), "=d" (ign3)
++ : "0" ((unsigned int)hypercall.op),
++ "1" (hypercall.arg[0]),
++ "2" (hypercall.arg[1]),
++ "3" (hypercall.arg[2]),
++ "g" (hypercall.arg[3]),
++ "g" (hypercall.arg[4])
++ : "r8", "r10", "memory" );
++ }
++#else
++ ret = privcmd_hypercall(&hypercall);
++#endif
++ }
++ break;
++
++ case IOCTL_PRIVCMD_MMAP: {
++#define MMAP_NR_PER_PAGE (int)((PAGE_SIZE-sizeof(struct list_head))/sizeof(privcmd_mmap_entry_t))
++ privcmd_mmap_t mmapcmd;
++ privcmd_mmap_entry_t *msg;
++ privcmd_mmap_entry_t __user *p;
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++ unsigned long va;
++ int i, rc;
++ LIST_HEAD(pagelist);
++ struct list_head *l,*l2;
++
++ if (!is_initial_xendomain())
++ return -EPERM;
++
++ if (copy_from_user(&mmapcmd, udata, sizeof(mmapcmd)))
++ return -EFAULT;
++
++ p = mmapcmd.entry;
++ for (i = 0; i < mmapcmd.num;) {
++ int nr = min(mmapcmd.num - i, MMAP_NR_PER_PAGE);
++
++ rc = -ENOMEM;
++ l = (struct list_head *) __get_free_page(GFP_KERNEL);
++ if (l == NULL)
++ goto mmap_out;
++
++ INIT_LIST_HEAD(l);
++ list_add_tail(l, &pagelist);
++ msg = (privcmd_mmap_entry_t*)(l + 1);
++
++ rc = -EFAULT;
++ if (copy_from_user(msg, p, nr*sizeof(*msg)))
++ goto mmap_out;
++ i += nr;
++ p += nr;
++ }
++
++ l = pagelist.next;
++ msg = (privcmd_mmap_entry_t*)(l + 1);
++
++ down_write(&mm->mmap_sem);
++
++ vma = find_vma(mm, msg->va);
++ rc = -EINVAL;
++ if (!vma || (msg->va != vma->vm_start) ||
++ !privcmd_enforce_singleshot_mapping(vma))
++ goto mmap_out;
++
++ va = vma->vm_start;
++
++ i = 0;
++ list_for_each(l, &pagelist) {
++ int nr = i + min(mmapcmd.num - i, MMAP_NR_PER_PAGE);
++
++ msg = (privcmd_mmap_entry_t*)(l + 1);
++ while (i<nr) {
++
++ /* Do not allow range to wrap the address space. */
++ rc = -EINVAL;
++ if ((msg->npages > (LONG_MAX >> PAGE_SHIFT)) ||
++ ((unsigned long)(msg->npages << PAGE_SHIFT) >= -va))
++ goto mmap_out;
++
++ /* Range chunks must be contiguous in va space. */
++ if ((msg->va != va) ||
++ ((msg->va+(msg->npages<<PAGE_SHIFT)) > vma->vm_end))
++ goto mmap_out;
++
++ if ((rc = direct_remap_pfn_range(
++ vma,
++ msg->va & PAGE_MASK,
++ msg->mfn,
++ msg->npages << PAGE_SHIFT,
++ vma->vm_page_prot,
++ mmapcmd.dom)) < 0)
++ goto mmap_out;
++
++ va += msg->npages << PAGE_SHIFT;
++ msg++;
++ i++;
++ }
++ }
++
++ rc = 0;
++
++ mmap_out:
++ up_write(&mm->mmap_sem);
++ list_for_each_safe(l,l2,&pagelist)
++ free_page((unsigned long)l);
++ ret = rc;
++ }
++#undef MMAP_NR_PER_PAGE
++ break;
++
++ case IOCTL_PRIVCMD_MMAPBATCH: {
++#define MMAPBATCH_NR_PER_PAGE (unsigned long)((PAGE_SIZE-sizeof(struct list_head))/sizeof(unsigned long))
++ privcmd_mmapbatch_t m;
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++ xen_pfn_t __user *p;
++ unsigned long addr, *mfn, nr_pages;
++ int i;
++ LIST_HEAD(pagelist);
++ struct list_head *l, *l2;
++
++ if (!is_initial_xendomain())
++ return -EPERM;
++
++ if (copy_from_user(&m, udata, sizeof(m)))
++ return -EFAULT;
++
++ nr_pages = m.num;
++ if ((m.num <= 0) || (nr_pages > (LONG_MAX >> PAGE_SHIFT)))
++ return -EINVAL;
++
++ p = m.arr;
++ for (i=0; i<nr_pages; ) {
++ int nr = min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
++
++ ret = -ENOMEM;
++ l = (struct list_head *)__get_free_page(GFP_KERNEL);
++ if (l == NULL)
++ goto mmapbatch_out;
++
++ INIT_LIST_HEAD(l);
++ list_add_tail(l, &pagelist);
++
++ mfn = (unsigned long*)(l + 1);
++ ret = -EFAULT;
++ if (copy_from_user(mfn, p, nr*sizeof(*mfn)))
++ goto mmapbatch_out;
++
++ i += nr; p+= nr;
++ }
++
++ down_write(&mm->mmap_sem);
++
++ vma = find_vma(mm, m.addr);
++ ret = -EINVAL;
++ if (!vma ||
++ (m.addr != vma->vm_start) ||
++ ((m.addr + (nr_pages << PAGE_SHIFT)) != vma->vm_end) ||
++ !privcmd_enforce_singleshot_mapping(vma)) {
++ up_write(&mm->mmap_sem);
++ goto mmapbatch_out;
++ }
++
++ p = m.arr;
++ addr = m.addr;
++ i = 0;
++ ret = 0;
++ list_for_each(l, &pagelist) {
++ int nr = i + min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
++ mfn = (unsigned long *)(l + 1);
++
++ while (i<nr) {
++ if(direct_remap_pfn_range(vma, addr & PAGE_MASK,
++ *mfn, PAGE_SIZE,
++ vma->vm_page_prot, m.dom) < 0) {
++ *mfn |= 0xf0000000U;
++ ret++;
++ }
++ mfn++; i++; addr += PAGE_SIZE;
++ }
++ }
++
++ up_write(&mm->mmap_sem);
++ if (ret > 0) {
++ p = m.arr;
++ i = 0;
++ ret = 0;
++ list_for_each(l, &pagelist) {
++ int nr = min(nr_pages - i, MMAPBATCH_NR_PER_PAGE);
++ mfn = (unsigned long *)(l + 1);
++ if (copy_to_user(p, mfn, nr*sizeof(*mfn)))
++ ret = -EFAULT;
++ i += nr; p += nr;
++ }
++ }
++ mmapbatch_out:
++ list_for_each_safe(l,l2,&pagelist)
++ free_page((unsigned long)l);
++#undef MMAPBATCH_NR_PER_PAGE
++ }
++ break;
++
++ default:
++ ret = -EINVAL;
++ break;
++ }
++
++ return ret;
++}
++
++#ifndef HAVE_ARCH_PRIVCMD_MMAP
++static struct page *privcmd_nopage(struct vm_area_struct *vma,
++ unsigned long address,
++ int *type)
++{
++ return NOPAGE_SIGBUS;
++}
++
++static struct vm_operations_struct privcmd_vm_ops = {
++ .nopage = privcmd_nopage
++};
++
++static int privcmd_mmap(struct file * file, struct vm_area_struct * vma)
++{
++ /* Unsupported for auto-translate guests. */
++ if (xen_feature(XENFEAT_auto_translated_physmap))
++ return -ENOSYS;
++
++ /* DONTCOPY is essential for Xen as copy_page_range is broken. */
++ vma->vm_flags |= VM_RESERVED | VM_IO | VM_DONTCOPY;
++ vma->vm_ops = &privcmd_vm_ops;
++ vma->vm_private_data = NULL;
++
++ return 0;
++}
++
++static int privcmd_enforce_singleshot_mapping(struct vm_area_struct *vma)
++{
++ return (xchg(&vma->vm_private_data, (void *)1) == NULL);
++}
++#endif
++
++static const struct file_operations privcmd_file_ops = {
++ .unlocked_ioctl = privcmd_ioctl,
++ .mmap = privcmd_mmap,
++};
++
++static int capabilities_read(char *page, char **start, off_t off,
++ int count, int *eof, void *data)
++{
++ int len = 0;
++ *page = 0;
++
++ if (is_initial_xendomain())
++ len = sprintf( page, "control_d\n" );
++
++ *eof = 1;
++ return len;
++}
++
++static int __init privcmd_init(void)
++{
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ privcmd_intf = create_xen_proc_entry("privcmd", 0400);
++ if (privcmd_intf != NULL)
++ privcmd_intf->proc_fops = &privcmd_file_ops;
++
++ capabilities_intf = create_xen_proc_entry("capabilities", 0400 );
++ if (capabilities_intf != NULL)
++ capabilities_intf->read_proc = capabilities_read;
++
++ return 0;
++}
++
++__initcall(privcmd_init);
+Index: head-2008-11-25/drivers/xen/scsiback/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/scsiback/Makefile 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,4 @@
++obj-$(CONFIG_XEN_SCSI_BACKEND) := xen-scsibk.o
++
++xen-scsibk-y := interface.o scsiback.o xenbus.o translate.o emulate.o
++
+Index: head-2008-11-25/drivers/xen/scsiback/common.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/scsiback/common.h 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,181 @@
++/*
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * Based on the blkback driver code.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __SCSIIF__BACKEND__COMMON_H__
++#define __SCSIIF__BACKEND__COMMON_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/wait.h>
++#include <linux/sched.h>
++#include <linux/kthread.h>
++#include <linux/blkdev.h>
++#include <linux/list.h>
++#include <linux/kthread.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_device.h>
++#include <scsi/scsi_dbg.h>
++#include <scsi/scsi_eh.h>
++#include <asm/io.h>
++#include <asm/setup.h>
++#include <asm/pgalloc.h>
++#include <asm/delay.h>
++#include <xen/evtchn.h>
++#include <asm/hypervisor.h>
++#include <xen/gnttab.h>
++#include <xen/driver_util.h>
++#include <xen/xenbus.h>
++#include <xen/interface/io/ring.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/vscsiif.h>
++
++
++#define DPRINTK(_f, _a...) \
++ pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++
++struct ids_tuple {
++ unsigned int hst; /* host */
++ unsigned int chn; /* channel */
++ unsigned int tgt; /* target */
++ unsigned int lun; /* LUN */
++};
++
++struct v2p_entry {
++ struct ids_tuple v; /* translate from */
++ struct scsi_device *sdev; /* translate to */
++ struct list_head l;
++};
++
++struct vscsibk_info {
++ struct xenbus_device *dev;
++
++ domid_t domid;
++ unsigned int evtchn;
++ unsigned int irq;
++
++ struct vscsiif_back_ring ring;
++ struct vm_struct *ring_area;
++ grant_handle_t shmem_handle;
++ grant_ref_t shmem_ref;
++
++ spinlock_t ring_lock;
++ atomic_t nr_unreplied_reqs;
++
++ spinlock_t v2p_lock;
++ struct list_head v2p_entry_lists;
++
++ struct task_struct *kthread;
++ wait_queue_head_t waiting_to_free;
++ wait_queue_head_t wq;
++ unsigned int waiting_reqs;
++ struct page **mmap_pages;
++
++};
++
++typedef struct {
++ unsigned char act;
++ struct vscsibk_info *info;
++ struct scsi_device *sdev;
++
++ uint16_t rqid;
++
++ uint8_t nr_segments;
++ uint8_t cmnd[VSCSIIF_MAX_COMMAND_SIZE];
++ uint8_t cmd_len;
++
++ uint8_t sc_data_direction;
++ uint16_t timeout_per_command;
++
++ uint32_t request_bufflen;
++ struct scatterlist *sgl;
++ grant_ref_t gref[VSCSIIF_SG_TABLESIZE];
++
++ int32_t rslt;
++ uint32_t resid;
++ uint8_t sense_buffer[VSCSIIF_SENSE_BUFFERSIZE];
++
++ struct list_head free_list;
++} pending_req_t;
++
++
++
++#define scsiback_get(_b) (atomic_inc(&(_b)->nr_unreplied_reqs))
++#define scsiback_put(_b) \
++ do { \
++ if (atomic_dec_and_test(&(_b)->nr_unreplied_reqs)) \
++ wake_up(&(_b)->waiting_to_free);\
++ } while (0)
++
++#define VSCSIIF_TIMEOUT (900*HZ)
++
++
++irqreturn_t scsiback_intr(int, void *, struct pt_regs *);
++int scsiback_init_sring(struct vscsibk_info *info,
++ unsigned long ring_ref, unsigned int evtchn);
++int scsiback_schedule(void *data);
++
++
++struct vscsibk_info *vscsibk_info_alloc(domid_t domid);
++void scsiback_free(struct vscsibk_info *info);
++void scsiback_disconnect(struct vscsibk_info *info);
++int __init scsiback_interface_init(void);
++void scsiback_interface_exit(void);
++int scsiback_xenbus_init(void);
++void scsiback_xenbus_unregister(void);
++
++void scsiback_init_translation_table(struct vscsibk_info *info);
++
++int scsiback_add_translation_entry(struct vscsibk_info *info,
++ struct scsi_device *sdev, struct ids_tuple *v);
++
++int scsiback_del_translation_entry(struct vscsibk_info *info,
++ struct ids_tuple *v);
++struct scsi_device *scsiback_do_translation(struct vscsibk_info *info,
++ struct ids_tuple *v);
++void scsiback_release_translation_entry(struct vscsibk_info *info);
++
++
++void scsiback_cmd_exec(pending_req_t *pending_req);
++void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
++ uint32_t resid, pending_req_t *pending_req);
++void scsiback_fast_flush_area(pending_req_t *req);
++
++void scsiback_rsp_emulation(pending_req_t *pending_req);
++void scsiback_req_emulation_or_cmdexec(pending_req_t *pending_req);
++void scsiback_emulation_init(void);
++
++
++#endif /* __SCSIIF__BACKEND__COMMON_H__ */
+Index: head-2008-11-25/drivers/xen/scsiback/emulate.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/scsiback/emulate.c 2008-08-07 12:44:36.000000000 +0200
+@@ -0,0 +1,454 @@
++/*
++ * Xen SCSI backend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <scsi/scsi.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_device.h>
++#include "common.h"
++
++/* Following SCSI commands are not defined in scsi/scsi.h */
++#define EXTENDED_COPY 0x83 /* EXTENDED COPY command */
++#define REPORT_ALIASES 0xa3 /* REPORT ALIASES command */
++#define CHANGE_ALIASES 0xa4 /* CHANGE ALIASES command */
++#define SET_PRIORITY 0xa4 /* SET PRIORITY command */
++
++
++/*
++ The bitmap in order to control emulation.
++ (Bit 3 to 7 are reserved for future use.)
++*/
++#define VSCSIIF_NEED_CMD_EXEC 0x01 /* If this bit is set, cmd exec */
++ /* is required. */
++#define VSCSIIF_NEED_EMULATE_REQBUF 0x02 /* If this bit is set, need */
++ /* emulation reqest buff before */
++ /* cmd exec. */
++#define VSCSIIF_NEED_EMULATE_RSPBUF 0x04 /* If this bit is set, need */
++ /* emulation resp buff after */
++ /* cmd exec. */
++
++/* Additional Sense Code (ASC) used */
++#define NO_ADDITIONAL_SENSE 0x0
++#define LOGICAL_UNIT_NOT_READY 0x4
++#define UNRECOVERED_READ_ERR 0x11
++#define PARAMETER_LIST_LENGTH_ERR 0x1a
++#define INVALID_OPCODE 0x20
++#define ADDR_OUT_OF_RANGE 0x21
++#define INVALID_FIELD_IN_CDB 0x24
++#define INVALID_FIELD_IN_PARAM_LIST 0x26
++#define POWERON_RESET 0x29
++#define SAVING_PARAMS_UNSUP 0x39
++#define THRESHOLD_EXCEEDED 0x5d
++#define LOW_POWER_COND_ON 0x5e
++
++
++
++/* Number os SCSI op_code */
++#define VSCSI_MAX_SCSI_OP_CODE 256
++static unsigned char bitmap[VSCSI_MAX_SCSI_OP_CODE];
++
++
++
++/*
++ Emulation routines for each SCSI op_code.
++*/
++static void (*pre_function[VSCSI_MAX_SCSI_OP_CODE])(pending_req_t *, void *);
++static void (*post_function[VSCSI_MAX_SCSI_OP_CODE])(pending_req_t *, void *);
++
++
++static const int check_condition_result =
++ (DRIVER_SENSE << 24) | SAM_STAT_CHECK_CONDITION;
++
++static void scsiback_mk_sense_buffer(uint8_t *data, uint8_t key,
++ uint8_t asc, uint8_t asq)
++{
++ data[0] = 0x70; /* fixed, current */
++ data[2] = key;
++ data[7] = 0xa; /* implies 18 byte sense buffer */
++ data[12] = asc;
++ data[13] = asq;
++}
++
++static void resp_not_supported_cmd(pending_req_t *pending_req, void *data)
++{
++ scsiback_mk_sense_buffer(pending_req->sense_buffer, ILLEGAL_REQUEST,
++ INVALID_OPCODE, 0);
++ pending_req->resid = 0;
++ pending_req->rslt = check_condition_result;
++}
++
++
++static int __copy_to_sg(struct scatterlist *sg, unsigned int nr_sg,
++ void *buf, unsigned int buflen)
++{
++ void *from = buf;
++ void *to;
++ unsigned int from_rest = buflen;
++ unsigned int to_capa;
++ unsigned int copy_size = 0;
++ unsigned int i;
++ unsigned long pfn;
++
++ for (i = 0; i < nr_sg; i++) {
++ if (sg->page == NULL) {
++ printk(KERN_WARNING "%s: inconsistent length field in "
++ "scatterlist\n", __FUNCTION__);
++ return -ENOMEM;
++ }
++
++ to_capa = sg->length;
++ copy_size = min_t(unsigned int, to_capa, from_rest);
++
++ pfn = page_to_pfn(sg->page);
++ to = pfn_to_kaddr(pfn) + (sg->offset);
++ memcpy(to, from, copy_size);
++
++ from_rest -= copy_size;
++ if (from_rest == 0) {
++ return 0;
++ }
++
++ sg++;
++ from += copy_size;
++ }
++
++ printk(KERN_WARNING "%s: no space in scatterlist\n",
++ __FUNCTION__);
++ return -ENOMEM;
++}
++
++static int __copy_from_sg(struct scatterlist *sg, unsigned int nr_sg,
++ void *buf, unsigned int buflen)
++{
++ void *from;
++ void *to = buf;
++ unsigned int from_rest;
++ unsigned int to_capa = buflen;
++ unsigned int copy_size;
++ unsigned int i;
++ unsigned long pfn;
++
++ for (i = 0; i < nr_sg; i++) {
++ if (sg->page == NULL) {
++ printk(KERN_WARNING "%s: inconsistent length field in "
++ "scatterlist\n", __FUNCTION__);
++ return -ENOMEM;
++ }
++
++ from_rest = sg->length;
++ if ((from_rest > 0) && (to_capa < from_rest)) {
++ printk(KERN_WARNING
++ "%s: no space in destination buffer\n",
++ __FUNCTION__);
++ return -ENOMEM;
++ }
++ copy_size = from_rest;
++
++ pfn = page_to_pfn(sg->page);
++ from = pfn_to_kaddr(pfn) + (sg->offset);
++ memcpy(to, from, copy_size);
++
++ to_capa -= copy_size;
++
++ sg++;
++ to += copy_size;
++ }
++
++ return 0;
++}
++
++static int __nr_luns_under_host(struct vscsibk_info *info)
++{
++ struct v2p_entry *entry;
++ struct list_head *head = &(info->v2p_entry_lists);
++ unsigned long flags;
++ int lun_cnt = 0;
++
++ spin_lock_irqsave(&info->v2p_lock, flags);
++ list_for_each_entry(entry, head, l) {
++ lun_cnt++;
++ }
++ spin_unlock_irqrestore(&info->v2p_lock, flags);
++
++ return (lun_cnt);
++}
++
++
++/* REPORT LUNS Define*/
++#define VSCSI_REPORT_LUNS_HEADER 8
++#define VSCSI_REPORT_LUNS_RETRY 3
++
++/* quoted scsi_debug.c/resp_report_luns() */
++static void __report_luns(pending_req_t *pending_req, void *data)
++{
++ struct vscsibk_info *info = pending_req->info;
++ unsigned int channel = pending_req->sdev->channel;
++ unsigned int target = pending_req->sdev->id;
++ unsigned int nr_seg = pending_req->nr_segments;
++ unsigned char *cmd = (unsigned char *)pending_req->cmnd;
++
++ unsigned char *buff = NULL;
++ unsigned char alloc_len;
++ unsigned int alloc_luns = 0;
++ unsigned int req_bufflen = 0;
++ unsigned int actual_len = 0;
++ unsigned int retry_cnt = 0;
++ int select_report = (int)cmd[2];
++ int i, lun_cnt = 0, lun, upper, err = 0;
++
++ struct v2p_entry *entry;
++ struct list_head *head = &(info->v2p_entry_lists);
++ unsigned long flags;
++
++ struct scsi_lun *one_lun;
++
++ req_bufflen = cmd[9] + (cmd[8] << 8) + (cmd[7] << 16) + (cmd[6] << 24);
++ if ((req_bufflen < 4) || (select_report != 0))
++ goto fail;
++
++ alloc_luns = __nr_luns_under_host(info);
++ alloc_len = sizeof(struct scsi_lun) * alloc_luns
++ + VSCSI_REPORT_LUNS_HEADER;
++retry:
++ if ((buff = kmalloc(alloc_len, GFP_KERNEL)) == NULL) {
++ printk(KERN_ERR "scsiback:%s kmalloc err\n", __FUNCTION__);
++ goto fail;
++ }
++
++ memset(buff, 0, alloc_len);
++
++ one_lun = (struct scsi_lun *) &buff[8];
++ spin_lock_irqsave(&info->v2p_lock, flags);
++ list_for_each_entry(entry, head, l) {
++ if ((entry->v.chn == channel) &&
++ (entry->v.tgt == target)) {
++
++ /* check overflow */
++ if (lun_cnt >= alloc_luns) {
++ spin_unlock_irqrestore(&info->v2p_lock,
++ flags);
++
++ if (retry_cnt < VSCSI_REPORT_LUNS_RETRY) {
++ retry_cnt++;
++ if (buff)
++ kfree(buff);
++ goto retry;
++ }
++
++ goto fail;
++ }
++
++ lun = entry->v.lun;
++ upper = (lun >> 8) & 0x3f;
++ if (upper)
++ one_lun[lun_cnt].scsi_lun[0] = upper;
++ one_lun[lun_cnt].scsi_lun[1] = lun & 0xff;
++ lun_cnt++;
++ }
++ }
++
++ spin_unlock_irqrestore(&info->v2p_lock, flags);
++
++ buff[2] = ((sizeof(struct scsi_lun) * lun_cnt) >> 8) & 0xff;
++ buff[3] = (sizeof(struct scsi_lun) * lun_cnt) & 0xff;
++
++ actual_len = lun_cnt * sizeof(struct scsi_lun)
++ + VSCSI_REPORT_LUNS_HEADER;
++ req_bufflen = 0;
++ for (i = 0; i < nr_seg; i++)
++ req_bufflen += pending_req->sgl[i].length;
++
++ err = __copy_to_sg(pending_req->sgl, nr_seg, buff,
++ min(req_bufflen, actual_len));
++ if (err)
++ goto fail;
++
++ memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
++ pending_req->rslt = 0x00;
++ pending_req->resid = req_bufflen - min(req_bufflen, actual_len);
++
++ kfree(buff);
++ return;
++
++fail:
++ scsiback_mk_sense_buffer(pending_req->sense_buffer, ILLEGAL_REQUEST,
++ INVALID_FIELD_IN_CDB, 0);
++ pending_req->rslt = check_condition_result;
++ pending_req->resid = 0;
++ if (buff)
++ kfree(buff);
++ return;
++}
++
++
++
++int __pre_do_emulation(pending_req_t *pending_req, void *data)
++{
++ uint8_t op_code = pending_req->cmnd[0];
++
++ if ((bitmap[op_code] & VSCSIIF_NEED_EMULATE_REQBUF) &&
++ pre_function[op_code] != NULL) {
++ pre_function[op_code](pending_req, data);
++ }
++
++ /*
++ 0: no need for native driver call, so should return immediately.
++ 1: non emulation or should call native driver
++ after modifing the request buffer.
++ */
++ return !!(bitmap[op_code] & VSCSIIF_NEED_CMD_EXEC);
++}
++
++void scsiback_rsp_emulation(pending_req_t *pending_req)
++{
++ uint8_t op_code = pending_req->cmnd[0];
++
++ if ((bitmap[op_code] & VSCSIIF_NEED_EMULATE_RSPBUF) &&
++ post_function[op_code] != NULL) {
++ post_function[op_code](pending_req, NULL);
++ }
++
++ return;
++}
++
++
++void scsiback_req_emulation_or_cmdexec(pending_req_t *pending_req)
++{
++ if (__pre_do_emulation(pending_req, NULL)) {
++ scsiback_cmd_exec(pending_req);
++ }
++ else {
++ scsiback_fast_flush_area(pending_req);
++ scsiback_do_resp_with_sense(pending_req->sense_buffer,
++ pending_req->rslt, pending_req->resid, pending_req);
++ }
++}
++
++
++/*
++ Following are not customizable functions.
++*/
++void scsiback_emulation_init(void)
++{
++ int i;
++
++ /* Initialize to default state */
++ for (i = 0; i < VSCSI_MAX_SCSI_OP_CODE; i++) {
++ bitmap[i] = (VSCSIIF_NEED_EMULATE_REQBUF |
++ VSCSIIF_NEED_EMULATE_RSPBUF);
++ pre_function[i] = resp_not_supported_cmd;
++ post_function[i] = NULL;
++ /* means,
++ - no need for pre-emulation
++ - no need for post-emulation
++ - call native driver
++ */
++ }
++
++ /*
++ Register appropriate functions below as you need.
++ (See scsi/scsi.h for definition of SCSI op_code.)
++ */
++
++ /*
++ This command is Non emulation.
++ */
++ bitmap[TEST_UNIT_READY] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[TEST_UNIT_READY] = NULL;
++ post_function[TEST_UNIT_READY] = NULL;
++
++ bitmap[REZERO_UNIT] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[REZERO_UNIT] = NULL;
++ post_function[REZERO_UNIT] = NULL;
++
++ bitmap[REQUEST_SENSE] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[REQUEST_SENSE] = NULL;
++ post_function[REQUEST_SENSE] = NULL;
++
++ bitmap[FORMAT_UNIT] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[FORMAT_UNIT] = NULL;
++ post_function[FORMAT_UNIT] = NULL;
++
++ bitmap[READ_BLOCK_LIMITS] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[READ_BLOCK_LIMITS] = NULL;
++ post_function[READ_BLOCK_LIMITS] = NULL;
++
++ bitmap[READ_6] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[READ_6] = NULL;
++ post_function[READ_6] = NULL;
++
++ bitmap[WRITE_6] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[WRITE_6] = NULL;
++ post_function[WRITE_6] = NULL;
++
++ bitmap[WRITE_FILEMARKS] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[WRITE_FILEMARKS] = NULL;
++ post_function[WRITE_FILEMARKS] = NULL;
++
++ bitmap[SPACE] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[SPACE] = NULL;
++ post_function[SPACE] = NULL;
++
++ bitmap[INQUIRY] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[INQUIRY] = NULL;
++ post_function[INQUIRY] = NULL;
++
++ bitmap[ERASE] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[ERASE] = NULL;
++ post_function[ERASE] = NULL;
++
++ bitmap[MODE_SENSE] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[MODE_SENSE] = NULL;
++ post_function[MODE_SENSE] = NULL;
++
++ bitmap[SEND_DIAGNOSTIC] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[SEND_DIAGNOSTIC] = NULL;
++ post_function[SEND_DIAGNOSTIC] = NULL;
++
++ bitmap[READ_CAPACITY] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[READ_CAPACITY] = NULL;
++ post_function[READ_CAPACITY] = NULL;
++
++ bitmap[READ_10] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[READ_10] = NULL;
++ post_function[READ_10] = NULL;
++
++ bitmap[WRITE_10] = VSCSIIF_NEED_CMD_EXEC;
++ pre_function[WRITE_10] = NULL;
++ post_function[WRITE_10] = NULL;
++
++ /*
++ This command is Full emulation.
++ */
++ pre_function[REPORT_LUNS] = __report_luns;
++ bitmap[REPORT_LUNS] = (VSCSIIF_NEED_EMULATE_REQBUF |
++ VSCSIIF_NEED_EMULATE_RSPBUF);
++
++ return;
++}
+Index: head-2008-11-25/drivers/xen/scsiback/interface.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/scsiback/interface.c 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,182 @@
++/*
++ * interface management.
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * Based on the blkback driver code.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <scsi/scsi.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_device.h>
++#include "common.h"
++
++#include <xen/evtchn.h>
++#include <linux/kthread.h>
++
++
++static kmem_cache_t *scsiback_cachep;
++
++struct vscsibk_info *vscsibk_info_alloc(domid_t domid)
++{
++ struct vscsibk_info *info;
++
++ info = kmem_cache_alloc(scsiback_cachep, GFP_KERNEL);
++ if (!info)
++ return ERR_PTR(-ENOMEM);
++
++ memset(info, 0, sizeof(*info));
++ info->domid = domid;
++ spin_lock_init(&info->ring_lock);
++ atomic_set(&info->nr_unreplied_reqs, 0);
++ init_waitqueue_head(&info->wq);
++ init_waitqueue_head(&info->waiting_to_free);
++
++ return info;
++}
++
++static int map_frontend_page( struct vscsibk_info *info,
++ unsigned long ring_ref)
++{
++ struct gnttab_map_grant_ref op;
++ int err;
++
++ gnttab_set_map_op(&op, (unsigned long)info->ring_area->addr,
++ GNTMAP_host_map, ring_ref,
++ info->domid);
++
++ err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1);
++ BUG_ON(err);
++
++ if (op.status) {
++ printk(KERN_ERR "scsiback: Grant table operation failure !\n");
++ return op.status;
++ }
++
++ info->shmem_ref = ring_ref;
++ info->shmem_handle = op.handle;
++
++ return (GNTST_okay);
++}
++
++static void unmap_frontend_page(struct vscsibk_info *info)
++{
++ struct gnttab_unmap_grant_ref op;
++ int err;
++
++ gnttab_set_unmap_op(&op, (unsigned long)info->ring_area->addr,
++ GNTMAP_host_map, info->shmem_handle);
++
++ err = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1);
++ BUG_ON(err);
++
++}
++
++int scsiback_init_sring(struct vscsibk_info *info,
++ unsigned long ring_ref, unsigned int evtchn)
++{
++ struct vscsiif_sring *sring;
++ int err;
++
++ if (info->irq) {
++ printk(KERN_ERR "scsiback: Already connected through?\n");
++ return -1;
++ }
++
++ info->ring_area = alloc_vm_area(PAGE_SIZE);
++ if (!info)
++ return -ENOMEM;
++
++ err = map_frontend_page(info, ring_ref);
++ if (err)
++ goto free_vm;
++
++ sring = (struct vscsiif_sring *) info->ring_area->addr;
++ BACK_RING_INIT(&info->ring, sring, PAGE_SIZE);
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ info->domid, evtchn,
++ scsiback_intr, 0, "vscsiif-backend", info);
++
++ if (err < 0)
++ goto unmap_page;
++
++ info->irq = err;
++
++ return 0;
++
++unmap_page:
++ unmap_frontend_page(info);
++free_vm:
++ free_vm_area(info->ring_area);
++
++ return err;
++}
++
++void scsiback_disconnect(struct vscsibk_info *info)
++{
++ if (info->kthread) {
++ kthread_stop(info->kthread);
++ info->kthread = NULL;
++ }
++
++ wait_event(info->waiting_to_free,
++ atomic_read(&info->nr_unreplied_reqs) == 0);
++
++ if (info->irq) {
++ unbind_from_irqhandler(info->irq, info);
++ info->irq = 0;
++ }
++
++ if (info->ring.sring) {
++ unmap_frontend_page(info);
++ free_vm_area(info->ring_area);
++ info->ring.sring = NULL;
++ }
++}
++
++void scsiback_free(struct vscsibk_info *info)
++{
++ kmem_cache_free(scsiback_cachep, info);
++}
++
++int __init scsiback_interface_init(void)
++{
++ scsiback_cachep = kmem_cache_create("vscsiif_cache",
++ sizeof(struct vscsibk_info), 0, 0, NULL, NULL);
++ if (!scsiback_cachep) {
++ printk(KERN_ERR "scsiback: can't init scsi cache\n");
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++void scsiback_interface_exit(void)
++{
++ kmem_cache_destroy(scsiback_cachep);
++}
+Index: head-2008-11-25/drivers/xen/scsiback/scsiback.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/scsiback/scsiback.c 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,717 @@
++/*
++ * Xen SCSI backend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * Based on the blkback driver code.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/spinlock.h>
++#include <linux/kthread.h>
++#include <linux/list.h>
++#include <linux/delay.h>
++#include <xen/balloon.h>
++#include <asm/hypervisor.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_device.h>
++#include <scsi/scsi_dbg.h>
++#include <scsi/scsi_eh.h>
++
++#include "common.h"
++
++
++struct list_head pending_free;
++DEFINE_SPINLOCK(pending_free_lock);
++DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
++
++int vscsiif_reqs = VSCSIIF_BACK_MAX_PENDING_REQS;
++module_param_named(reqs, vscsiif_reqs, int, 0);
++MODULE_PARM_DESC(reqs, "Number of scsiback requests to allocate");
++
++static unsigned int log_print_stat = 0;
++module_param(log_print_stat, int, 0644);
++
++#define SCSIBACK_INVALID_HANDLE (~0)
++
++static pending_req_t *pending_reqs;
++static struct page **pending_pages;
++static grant_handle_t *pending_grant_handles;
++
++static int vaddr_pagenr(pending_req_t *req, int seg)
++{
++ return (req - pending_reqs) * VSCSIIF_SG_TABLESIZE + seg;
++}
++
++static unsigned long vaddr(pending_req_t *req, int seg)
++{
++ unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
++ return (unsigned long)pfn_to_kaddr(pfn);
++}
++
++#define pending_handle(_req, _seg) \
++ (pending_grant_handles[vaddr_pagenr(_req, _seg)])
++
++
++void scsiback_fast_flush_area(pending_req_t *req)
++{
++ struct gnttab_unmap_grant_ref unmap[VSCSIIF_SG_TABLESIZE];
++ unsigned int i, invcount = 0;
++ grant_handle_t handle;
++ int err;
++
++ if (req->nr_segments) {
++ for (i = 0; i < req->nr_segments; i++) {
++ handle = pending_handle(req, i);
++ if (handle == SCSIBACK_INVALID_HANDLE)
++ continue;
++ gnttab_set_unmap_op(&unmap[i], vaddr(req, i),
++ GNTMAP_host_map, handle);
++ pending_handle(req, i) = SCSIBACK_INVALID_HANDLE;
++ invcount++;
++ }
++
++ err = HYPERVISOR_grant_table_op(
++ GNTTABOP_unmap_grant_ref, unmap, invcount);
++ BUG_ON(err);
++ kfree(req->sgl);
++ }
++
++ return;
++}
++
++
++static pending_req_t * alloc_req(struct vscsibk_info *info)
++{
++ pending_req_t *req = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++ if (!list_empty(&pending_free)) {
++ req = list_entry(pending_free.next, pending_req_t, free_list);
++ list_del(&req->free_list);
++ }
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++ return req;
++}
++
++
++static void free_req(pending_req_t *req)
++{
++ unsigned long flags;
++ int was_empty;
++
++ spin_lock_irqsave(&pending_free_lock, flags);
++ was_empty = list_empty(&pending_free);
++ list_add(&req->free_list, &pending_free);
++ spin_unlock_irqrestore(&pending_free_lock, flags);
++ if (was_empty)
++ wake_up(&pending_free_wq);
++}
++
++
++static void scsiback_notify_work(struct vscsibk_info *info)
++{
++ info->waiting_reqs = 1;
++ wake_up(&info->wq);
++}
++
++void scsiback_do_resp_with_sense(char *sense_buffer, int32_t result,
++ uint32_t resid, pending_req_t *pending_req)
++{
++ vscsiif_response_t *ring_res;
++ struct vscsibk_info *info = pending_req->info;
++ int notify;
++ int more_to_do = 1;
++ unsigned long flags;
++
++ DPRINTK("%s\n",__FUNCTION__);
++
++ spin_lock_irqsave(&info->ring_lock, flags);
++
++ ring_res = RING_GET_RESPONSE(&info->ring, info->ring.rsp_prod_pvt);
++ info->ring.rsp_prod_pvt++;
++
++ ring_res->rslt = result;
++ ring_res->rqid = pending_req->rqid;
++
++ if (sense_buffer != NULL) {
++ memcpy(ring_res->sense_buffer, sense_buffer,
++ VSCSIIF_SENSE_BUFFERSIZE);
++ ring_res->sense_len = VSCSIIF_SENSE_BUFFERSIZE;
++ } else {
++ ring_res->sense_len = 0;
++ }
++
++ ring_res->residual_len = resid;
++
++ RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&info->ring, notify);
++ if (info->ring.rsp_prod_pvt == info->ring.req_cons) {
++ RING_FINAL_CHECK_FOR_REQUESTS(&info->ring, more_to_do);
++ } else if (RING_HAS_UNCONSUMED_REQUESTS(&info->ring)) {
++ more_to_do = 1;
++ }
++
++ spin_unlock_irqrestore(&info->ring_lock, flags);
++
++ if (more_to_do)
++ scsiback_notify_work(info);
++
++ if (notify)
++ notify_remote_via_irq(info->irq);
++
++ free_req(pending_req);
++}
++
++static void scsiback_print_status(char *sense_buffer, int errors,
++ pending_req_t *pending_req)
++{
++ struct scsi_device *sdev = pending_req->sdev;
++
++ printk(KERN_ERR "scsiback: %d:%d:%d:%d ",sdev->host->host_no,
++ sdev->channel, sdev->id, sdev->lun);
++ printk(KERN_ERR "status = 0x%02x, message = 0x%02x, host = 0x%02x, driver = 0x%02x\n",
++ status_byte(errors), msg_byte(errors),
++ host_byte(errors), driver_byte(errors));
++
++ printk(KERN_ERR "scsiback: cmnd[0]=0x%02X\n",
++ pending_req->cmnd[0]);
++
++ if (CHECK_CONDITION & status_byte(errors))
++ __scsi_print_sense("scsiback", sense_buffer, SCSI_SENSE_BUFFERSIZE);
++}
++
++
++static void scsiback_cmd_done(struct request *req, int errors)
++{
++ pending_req_t *pending_req = req->end_io_data;
++ unsigned char *sense_buffer;
++ unsigned int resid;
++
++ sense_buffer = req->sense;
++ resid = req->data_len;
++
++ if (errors != 0) {
++ if (log_print_stat)
++ scsiback_print_status(sense_buffer, errors, pending_req);
++ }
++
++ scsiback_rsp_emulation(pending_req);
++
++ scsiback_fast_flush_area(pending_req);
++ scsiback_do_resp_with_sense(sense_buffer, errors, resid, pending_req);
++ scsiback_put(pending_req->info);
++
++ __blk_put_request(req->q, req);
++}
++
++
++static int scsiback_gnttab_data_map(vscsiif_request_t *ring_req,
++ pending_req_t *pending_req)
++{
++ u32 flags;
++ int write;
++ int i, err = 0;
++ unsigned int data_len = 0;
++ struct gnttab_map_grant_ref map[VSCSIIF_SG_TABLESIZE];
++ struct vscsibk_info *info = pending_req->info;
++
++ int data_dir = (int)pending_req->sc_data_direction;
++ unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
++
++ write = (data_dir == DMA_TO_DEVICE);
++
++ if (nr_segments) {
++ /* free of (sgl) in fast_flush_area()*/
++ pending_req->sgl = kmalloc(sizeof(struct scatterlist) * nr_segments,
++ GFP_KERNEL);
++ if (!pending_req->sgl) {
++ printk(KERN_ERR "scsiback: %s: kmalloc() error.\n", __FUNCTION__);
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < nr_segments; i++) {
++ flags = GNTMAP_host_map;
++ if (write)
++ flags |= GNTMAP_readonly;
++ gnttab_set_map_op(&map[i], vaddr(pending_req, i), flags,
++ ring_req->seg[i].gref,
++ info->domid);
++ }
++
++ err = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nr_segments);
++ BUG_ON(err);
++
++ for (i = 0; i < nr_segments; i++) {
++ if (unlikely(map[i].status != 0)) {
++ printk(KERN_ERR "scsiback: invalid buffer -- could not remap it\n");
++ map[i].handle = SCSIBACK_INVALID_HANDLE;
++ err |= 1;
++ }
++
++ pending_handle(pending_req, i) = map[i].handle;
++
++ if (err)
++ continue;
++
++ set_phys_to_machine(__pa(vaddr(
++ pending_req, i)) >> PAGE_SHIFT,
++ FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
++
++ pending_req->sgl[i].page = virt_to_page(vaddr(pending_req, i));
++ pending_req->sgl[i].offset = ring_req->seg[i].offset;
++ pending_req->sgl[i].length = ring_req->seg[i].length;
++ data_len += pending_req->sgl[i].length;
++
++ barrier();
++ if (pending_req->sgl[i].offset >= PAGE_SIZE ||
++ pending_req->sgl[i].length > PAGE_SIZE ||
++ pending_req->sgl[i].offset + pending_req->sgl[i].length > PAGE_SIZE)
++ err |= 1;
++
++ }
++
++ if (err)
++ goto fail_flush;
++ }
++
++ pending_req->request_bufflen = data_len;
++
++ return 0;
++
++fail_flush:
++ scsiback_fast_flush_area(pending_req);
++ return -ENOMEM;
++}
++
++/* quoted scsi_lib.c/scsi_merge_bio */
++static int scsiback_merge_bio(struct request *rq, struct bio *bio)
++{
++ struct request_queue *q = rq->q;
++
++ bio->bi_flags &= ~(1 << BIO_SEG_VALID);
++ if (rq_data_dir(rq) == WRITE)
++ bio->bi_rw |= (1 << BIO_RW);
++
++ blk_queue_bounce(q, &bio);
++
++ if (!rq->bio)
++ blk_rq_bio_prep(q, rq, bio);
++ else if (!q->back_merge_fn(q, rq, bio))
++ return -EINVAL;
++ else {
++ rq->biotail->bi_next = bio;
++ rq->biotail = bio;
++ rq->hard_nr_sectors += bio_sectors(bio);
++ rq->nr_sectors = rq->hard_nr_sectors;
++ }
++
++ return 0;
++}
++
++
++/* quoted scsi_lib.c/scsi_bi_endio */
++static int scsiback_bi_endio(struct bio *bio, unsigned int bytes_done, int error)
++{
++ if (bio->bi_size)
++ return 1;
++
++ bio_put(bio);
++ return 0;
++}
++
++
++
++/* quoted scsi_lib.c/scsi_req_map_sg . */
++static int request_map_sg(struct request *rq, pending_req_t *pending_req, unsigned int count)
++{
++ struct request_queue *q = rq->q;
++ int nr_pages;
++ unsigned int nsegs = count;
++
++ unsigned int data_len = 0, len, bytes, off;
++ struct page *page;
++ struct bio *bio = NULL;
++ int i, err, nr_vecs = 0;
++
++ for (i = 0; i < nsegs; i++) {
++ page = pending_req->sgl[i].page;
++ off = (unsigned int)pending_req->sgl[i].offset;
++ len = (unsigned int)pending_req->sgl[i].length;
++ data_len += len;
++
++ nr_pages = (len + off + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ while (len > 0) {
++ bytes = min_t(unsigned int, len, PAGE_SIZE - off);
++
++ if (!bio) {
++ nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
++ nr_pages -= nr_vecs;
++ bio = bio_alloc(GFP_KERNEL, nr_vecs);
++ if (!bio) {
++ err = -ENOMEM;
++ goto free_bios;
++ }
++ bio->bi_end_io = scsiback_bi_endio;
++ }
++
++ if (bio_add_pc_page(q, bio, page, bytes, off) !=
++ bytes) {
++ bio_put(bio);
++ err = -EINVAL;
++ goto free_bios;
++ }
++
++ if (bio->bi_vcnt >= nr_vecs) {
++ err = scsiback_merge_bio(rq, bio);
++ if (err) {
++ bio_endio(bio, bio->bi_size, 0);
++ goto free_bios;
++ }
++ bio = NULL;
++ }
++
++ page++;
++ len -= bytes;
++ off = 0;
++ }
++ }
++
++ rq->buffer = rq->data = NULL;
++ rq->data_len = data_len;
++
++ return 0;
++
++free_bios:
++ while ((bio = rq->bio) != NULL) {
++ rq->bio = bio->bi_next;
++ /*
++ * call endio instead of bio_put incase it was bounced
++ */
++ bio_endio(bio, bio->bi_size, 0);
++ }
++
++ return err;
++}
++
++
++void scsiback_cmd_exec(pending_req_t *pending_req)
++{
++ int cmd_len = (int)pending_req->cmd_len;
++ int data_dir = (int)pending_req->sc_data_direction;
++ unsigned int nr_segments = (unsigned int)pending_req->nr_segments;
++ unsigned int timeout;
++ struct request *rq;
++ int write;
++
++ DPRINTK("%s\n",__FUNCTION__);
++
++ /* because it doesn't timeout backend earlier than frontend.*/
++ if (pending_req->timeout_per_command)
++ timeout = pending_req->timeout_per_command * HZ;
++ else
++ timeout = VSCSIIF_TIMEOUT;
++
++ write = (data_dir == DMA_TO_DEVICE);
++ rq = blk_get_request(pending_req->sdev->request_queue, write, GFP_KERNEL);
++
++ rq->flags |= REQ_BLOCK_PC;
++ rq->cmd_len = cmd_len;
++ memcpy(rq->cmd, pending_req->cmnd, cmd_len);
++
++ memset(pending_req->sense_buffer, 0, VSCSIIF_SENSE_BUFFERSIZE);
++ rq->sense = pending_req->sense_buffer;
++ rq->sense_len = 0;
++
++ /* not allowed to retry in backend. */
++ rq->retries = 0;
++ rq->timeout = timeout;
++ rq->end_io_data = pending_req;
++
++ if (nr_segments) {
++
++ if (request_map_sg(rq, pending_req, nr_segments)) {
++ printk(KERN_ERR "scsiback: SG Request Map Error\n");
++ return;
++ }
++ }
++
++ scsiback_get(pending_req->info);
++ blk_execute_rq_nowait(rq->q, NULL, rq, 1, scsiback_cmd_done);
++
++ return ;
++}
++
++
++static void scsiback_device_reset_exec(pending_req_t *pending_req)
++{
++ struct vscsibk_info *info = pending_req->info;
++ int err;
++ struct scsi_device *sdev = pending_req->sdev;
++
++ scsiback_get(info);
++ err = scsi_reset_provider(sdev, SCSI_TRY_RESET_DEVICE);
++
++ scsiback_do_resp_with_sense(NULL, err, 0, pending_req);
++ scsiback_put(info);
++
++ return;
++}
++
++
++irqreturn_t scsiback_intr(int irq, void *dev_id, struct pt_regs *regs)
++{
++ scsiback_notify_work((struct vscsibk_info *)dev_id);
++ return IRQ_HANDLED;
++}
++
++static int prepare_pending_reqs(struct vscsibk_info *info,
++ vscsiif_request_t *ring_req, pending_req_t *pending_req)
++{
++ struct scsi_device *sdev;
++ struct ids_tuple vir;
++ int err = -EINVAL;
++
++ DPRINTK("%s\n",__FUNCTION__);
++
++ pending_req->rqid = ring_req->rqid;
++ pending_req->act = ring_req->act;
++
++ pending_req->info = info;
++
++ vir.chn = ring_req->channel;
++ vir.tgt = ring_req->id;
++ vir.lun = ring_req->lun;
++
++ rmb();
++ sdev = scsiback_do_translation(info, &vir);
++ if (!sdev) {
++ pending_req->sdev = NULL;
++ DPRINTK("scsiback: doesn't exist.\n");
++ err = -ENODEV;
++ goto invalid_value;
++ }
++ pending_req->sdev = sdev;
++
++ /* request range check from frontend */
++ pending_req->sc_data_direction = ring_req->sc_data_direction;
++ barrier();
++ if ((pending_req->sc_data_direction != DMA_BIDIRECTIONAL) &&
++ (pending_req->sc_data_direction != DMA_TO_DEVICE) &&
++ (pending_req->sc_data_direction != DMA_FROM_DEVICE) &&
++ (pending_req->sc_data_direction != DMA_NONE)) {
++ DPRINTK("scsiback: invalid parameter data_dir = %d\n",
++ pending_req->sc_data_direction);
++ err = -EINVAL;
++ goto invalid_value;
++ }
++
++ pending_req->nr_segments = ring_req->nr_segments;
++ barrier();
++ if (pending_req->nr_segments > VSCSIIF_SG_TABLESIZE) {
++ DPRINTK("scsiback: invalid parameter nr_seg = %d\n",
++ pending_req->nr_segments);
++ err = -EINVAL;
++ goto invalid_value;
++ }
++
++ pending_req->cmd_len = ring_req->cmd_len;
++ barrier();
++ if (pending_req->cmd_len > VSCSIIF_MAX_COMMAND_SIZE) {
++ DPRINTK("scsiback: invalid parameter cmd_len = %d\n",
++ pending_req->cmd_len);
++ err = -EINVAL;
++ goto invalid_value;
++ }
++ memcpy(pending_req->cmnd, ring_req->cmnd, pending_req->cmd_len);
++
++ pending_req->timeout_per_command = ring_req->timeout_per_command;
++
++ if(scsiback_gnttab_data_map(ring_req, pending_req)) {
++ DPRINTK("scsiback: invalid buffer\n");
++ err = -EINVAL;
++ goto invalid_value;
++ }
++
++ return 0;
++
++invalid_value:
++ return err;
++}
++
++
++static int scsiback_do_cmd_fn(struct vscsibk_info *info)
++{
++ struct vscsiif_back_ring *ring = &info->ring;
++ vscsiif_request_t *ring_req;
++
++ pending_req_t *pending_req;
++ RING_IDX rc, rp;
++ int err, more_to_do = 0;
++
++ DPRINTK("%s\n",__FUNCTION__);
++
++ rc = ring->req_cons;
++ rp = ring->sring->req_prod;
++ rmb();
++
++ while ((rc != rp)) {
++ if (RING_REQUEST_CONS_OVERFLOW(ring, rc))
++ break;
++ pending_req = alloc_req(info);
++ if (NULL == pending_req) {
++ more_to_do = 1;
++ break;
++ }
++
++ ring_req = RING_GET_REQUEST(ring, rc);
++ ring->req_cons = ++rc;
++
++ err = prepare_pending_reqs(info, ring_req,
++ pending_req);
++ if (err == -EINVAL) {
++ scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
++ 0, pending_req);
++ continue;
++ } else if (err == -ENODEV) {
++ scsiback_do_resp_with_sense(NULL, (DID_NO_CONNECT << 16),
++ 0, pending_req);
++ continue;
++ }
++
++ if (pending_req->act == VSCSIIF_ACT_SCSI_CDB) {
++ scsiback_req_emulation_or_cmdexec(pending_req);
++ } else if (pending_req->act == VSCSIIF_ACT_SCSI_RESET) {
++ scsiback_device_reset_exec(pending_req);
++ } else {
++ printk(KERN_ERR "scsiback: invalid parameter for request\n");
++ scsiback_do_resp_with_sense(NULL, (DRIVER_ERROR << 24),
++ 0, pending_req);
++ continue;
++ }
++ }
++
++ if (RING_HAS_UNCONSUMED_REQUESTS(ring))
++ more_to_do = 1;
++
++ /* Yield point for this unbounded loop. */
++ cond_resched();
++
++ return more_to_do;
++}
++
++
++int scsiback_schedule(void *data)
++{
++ struct vscsibk_info *info = (struct vscsibk_info *)data;
++
++ DPRINTK("%s\n",__FUNCTION__);
++
++ while (!kthread_should_stop()) {
++ wait_event_interruptible(
++ info->wq,
++ info->waiting_reqs || kthread_should_stop());
++ wait_event_interruptible(
++ pending_free_wq,
++ !list_empty(&pending_free) || kthread_should_stop());
++
++ info->waiting_reqs = 0;
++ smp_mb();
++
++ if (scsiback_do_cmd_fn(info))
++ info->waiting_reqs = 1;
++ }
++
++ return 0;
++}
++
++
++static int __init scsiback_init(void)
++{
++ int i, mmap_pages;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ mmap_pages = vscsiif_reqs * VSCSIIF_SG_TABLESIZE;
++
++ pending_reqs = kmalloc(sizeof(pending_reqs[0]) *
++ vscsiif_reqs, GFP_KERNEL);
++ pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
++ mmap_pages, GFP_KERNEL);
++ pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
++
++ if (!pending_reqs || !pending_grant_handles || !pending_pages)
++ goto out_of_memory;
++
++ for (i = 0; i < mmap_pages; i++)
++ pending_grant_handles[i] = SCSIBACK_INVALID_HANDLE;
++
++ if (scsiback_interface_init() < 0)
++ goto out_of_kmem;
++
++ memset(pending_reqs, 0, sizeof(pending_reqs));
++ INIT_LIST_HEAD(&pending_free);
++
++ for (i = 0; i < vscsiif_reqs; i++)
++ list_add_tail(&pending_reqs[i].free_list, &pending_free);
++
++ if (scsiback_xenbus_init())
++ goto out_of_xenbus;
++
++ scsiback_emulation_init();
++
++ return 0;
++
++out_of_xenbus:
++ scsiback_xenbus_unregister();
++out_of_kmem:
++ scsiback_interface_exit();
++out_of_memory:
++ kfree(pending_reqs);
++ kfree(pending_grant_handles);
++ free_empty_pages_and_pagevec(pending_pages, mmap_pages);
++ printk(KERN_ERR "scsiback: %s: out of memory\n", __FUNCTION__);
++ return -ENOMEM;
++}
++
++static void __exit scsiback_exit(void)
++{
++ scsiback_xenbus_unregister();
++ scsiback_interface_exit();
++ kfree(pending_reqs);
++ kfree(pending_grant_handles);
++ free_empty_pages_and_pagevec(pending_pages, (vscsiif_reqs * VSCSIIF_SG_TABLESIZE));
++
++}
++
++module_init(scsiback_init);
++module_exit(scsiback_exit);
++
++MODULE_DESCRIPTION("Xen SCSI backend driver");
++MODULE_LICENSE("Dual BSD/GPL");
+Index: head-2008-11-25/drivers/xen/scsiback/translate.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/scsiback/translate.c 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,168 @@
++/*
++ * Xen SCSI backend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/list.h>
++#include <linux/gfp.h>
++
++#include "common.h"
++
++/*
++ Initialize the translation entry list
++*/
++void scsiback_init_translation_table(struct vscsibk_info *info)
++{
++ INIT_LIST_HEAD(&info->v2p_entry_lists);
++ spin_lock_init(&info->v2p_lock);
++}
++
++
++/*
++ Add a new translation entry
++*/
++int scsiback_add_translation_entry(struct vscsibk_info *info,
++ struct scsi_device *sdev, struct ids_tuple *v)
++{
++ int err = 0;
++ struct v2p_entry *entry;
++ struct v2p_entry *new;
++ struct list_head *head = &(info->v2p_entry_lists);
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->v2p_lock, flags);
++
++ /* Check double assignment to identical virtual ID */
++ list_for_each_entry(entry, head, l) {
++ if ((entry->v.chn == v->chn) &&
++ (entry->v.tgt == v->tgt) &&
++ (entry->v.lun == v->lun)) {
++ printk(KERN_WARNING "scsiback: Virtual ID is already used. "
++ "Assignment was not performed.\n");
++ err = -EEXIST;
++ goto out;
++ }
++
++ }
++
++ /* Create a new translation entry and add to the list */
++ if ((new = kmalloc(sizeof(struct v2p_entry), GFP_ATOMIC)) == NULL) {
++ printk(KERN_ERR "scsiback: %s: kmalloc() error.\n", __FUNCTION__);
++ err = -ENOMEM;
++ goto out;
++ }
++ new->v = *v;
++ new->sdev = sdev;
++ list_add_tail(&new->l, head);
++
++out:
++ spin_unlock_irqrestore(&info->v2p_lock, flags);
++ return err;
++}
++
++
++/*
++ Delete the translation entry specfied
++*/
++int scsiback_del_translation_entry(struct vscsibk_info *info,
++ struct ids_tuple *v)
++{
++ struct v2p_entry *entry;
++ struct list_head *head = &(info->v2p_entry_lists);
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->v2p_lock, flags);
++ /* Find out the translation entry specified */
++ list_for_each_entry(entry, head, l) {
++ if ((entry->v.chn == v->chn) &&
++ (entry->v.tgt == v->tgt) &&
++ (entry->v.lun == v->lun)) {
++ goto found;
++ }
++ }
++
++ spin_unlock_irqrestore(&info->v2p_lock, flags);
++ return 1;
++
++found:
++ /* Delete the translation entry specfied */
++ scsi_device_put(entry->sdev);
++ list_del(&entry->l);
++ kfree(entry);
++
++ spin_unlock_irqrestore(&info->v2p_lock, flags);
++ return 0;
++}
++
++
++/*
++ Perform virtual to physical translation
++*/
++struct scsi_device *scsiback_do_translation(struct vscsibk_info *info,
++ struct ids_tuple *v)
++{
++ struct v2p_entry *entry;
++ struct list_head *head = &(info->v2p_entry_lists);
++ struct scsi_device *sdev = NULL;
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->v2p_lock, flags);
++ list_for_each_entry(entry, head, l) {
++ if ((entry->v.chn == v->chn) &&
++ (entry->v.tgt == v->tgt) &&
++ (entry->v.lun == v->lun)) {
++ sdev = entry->sdev;
++ goto out;
++ }
++ }
++out:
++ spin_unlock_irqrestore(&info->v2p_lock, flags);
++ return sdev;
++}
++
++
++/*
++ Release the translation entry specfied
++*/
++void scsiback_release_translation_entry(struct vscsibk_info *info)
++{
++ struct v2p_entry *entry, *tmp;
++ struct list_head *head = &(info->v2p_entry_lists);
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->v2p_lock, flags);
++ list_for_each_entry_safe(entry, tmp, head, l) {
++ scsi_device_put(entry->sdev);
++ list_del(&entry->l);
++ kfree(entry);
++ }
++
++ spin_unlock_irqrestore(&info->v2p_lock, flags);
++ return;
++
++}
+Index: head-2008-11-25/drivers/xen/scsiback/xenbus.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/scsiback/xenbus.c 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,368 @@
++/*
++ * Xen SCSI backend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * Based on the blkback driver code.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <stdarg.h>
++#include <linux/module.h>
++#include <linux/kthread.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_host.h>
++#include <scsi/scsi_device.h>
++
++#include "common.h"
++
++struct backend_info
++{
++ struct xenbus_device *dev;
++ struct vscsibk_info *info;
++};
++
++
++static int __vscsiif_name(struct backend_info *be, char *buf)
++{
++ struct xenbus_device *dev = be->dev;
++ unsigned int domid, id;
++
++ sscanf(dev->nodename, "backend/vscsi/%u/%u", &domid, &id);
++ snprintf(buf, TASK_COMM_LEN, "vscsi.%u.%u", be->info->domid, id);
++
++ return 0;
++}
++
++static int scsiback_map(struct backend_info *be)
++{
++ struct xenbus_device *dev = be->dev;
++ unsigned long ring_ref;
++ unsigned int evtchn;
++ int err;
++ char name[TASK_COMM_LEN];
++
++ err = xenbus_gather(XBT_NIL, dev->otherend,
++ "ring-ref", "%lu", &ring_ref,
++ "event-channel", "%u", &evtchn, NULL);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "reading %s ring", dev->otherend);
++ return err;
++ }
++
++ err = scsiback_init_sring(be->info, ring_ref, evtchn);
++ if (err)
++ return err;
++
++ err = __vscsiif_name(be, name);
++ if (err) {
++ xenbus_dev_error(dev, err, "get scsiback dev name");
++ return err;
++ }
++
++ be->info->kthread = kthread_run(scsiback_schedule, be->info, name);
++ if (IS_ERR(be->info->kthread)) {
++ err = PTR_ERR(be->info->kthread);
++ be->info->kthread = NULL;
++ xenbus_dev_error(be->dev, err, "start vscsiif");
++ return err;
++ }
++
++ return 0;
++}
++
++
++struct scsi_device *scsiback_get_scsi_device(struct ids_tuple *phy)
++{
++ struct Scsi_Host *shost;
++ struct scsi_device *sdev = NULL;
++
++ shost = scsi_host_lookup(phy->hst);
++ if (IS_ERR(shost)) {
++ printk(KERN_ERR "scsiback: host%d doesn't exist.\n",
++ phy->hst);
++ return NULL;
++ }
++ sdev = scsi_device_lookup(shost, phy->chn, phy->tgt, phy->lun);
++ if (!sdev) {
++ printk(KERN_ERR "scsiback: %d:%d:%d:%d doesn't exist.\n",
++ phy->hst, phy->chn, phy->tgt, phy->lun);
++ scsi_host_put(shost);
++ return NULL;
++ }
++
++ scsi_host_put(shost);
++ return (sdev);
++}
++
++#define VSCSIBACK_OP_ADD_OR_DEL_LUN 1
++#define VSCSIBACK_OP_UPDATEDEV_STATE 2
++
++
++static void scsiback_do_lun_hotplug(struct backend_info *be, int op)
++{
++ int i, err = 0;
++ struct ids_tuple phy, vir;
++ int device_state;
++ char str[64], state_str[64];
++ char **dir;
++ unsigned int dir_n = 0;
++ struct xenbus_device *dev = be->dev;
++ struct scsi_device *sdev;
++
++ dir = xenbus_directory(XBT_NIL, dev->nodename, "vscsi-devs", &dir_n);
++ if (IS_ERR(dir))
++ return;
++
++ for (i = 0; i < dir_n; i++) {
++
++ /* read status */
++ snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]);
++ err = xenbus_scanf(XBT_NIL, dev->nodename, state_str, "%u",
++ &device_state);
++ if (XENBUS_EXIST_ERR(err))
++ continue;
++
++ /* physical SCSI device */
++ snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", dir[i]);
++ err = xenbus_scanf(XBT_NIL, dev->nodename, str,
++ "%u:%u:%u:%u", &phy.hst, &phy.chn, &phy.tgt, &phy.lun);
++ if (XENBUS_EXIST_ERR(err)) {
++ xenbus_printf(XBT_NIL, dev->nodename, state_str,
++ "%d", XenbusStateClosed);
++ continue;
++ }
++
++ /* virtual SCSI device */
++ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
++ err = xenbus_scanf(XBT_NIL, dev->nodename, str,
++ "%u:%u:%u:%u", &vir.hst, &vir.chn, &vir.tgt, &vir.lun);
++ if (XENBUS_EXIST_ERR(err)) {
++ xenbus_printf(XBT_NIL, dev->nodename, state_str,
++ "%d", XenbusStateClosed);
++ continue;
++ }
++
++ switch (op) {
++ case VSCSIBACK_OP_ADD_OR_DEL_LUN:
++ if (device_state == XenbusStateInitialising) {
++ sdev = scsiback_get_scsi_device(&phy);
++ if (!sdev)
++ xenbus_printf(XBT_NIL, dev->nodename, state_str,
++ "%d", XenbusStateClosed);
++ else {
++ err = scsiback_add_translation_entry(be->info, sdev, &vir);
++ if (!err) {
++ if (xenbus_printf(XBT_NIL, dev->nodename, state_str,
++ "%d", XenbusStateInitialised)) {
++ printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str);
++ scsiback_del_translation_entry(be->info, &vir);
++ }
++ } else {
++ scsi_device_put(sdev);
++ xenbus_printf(XBT_NIL, dev->nodename, state_str,
++ "%d", XenbusStateClosed);
++ }
++ }
++ }
++
++ if (device_state == XenbusStateClosing) {
++ if (!scsiback_del_translation_entry(be->info, &vir)) {
++ if (xenbus_printf(XBT_NIL, dev->nodename, state_str,
++ "%d", XenbusStateClosed))
++ printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str);
++ }
++ }
++ break;
++
++ case VSCSIBACK_OP_UPDATEDEV_STATE:
++ if (device_state == XenbusStateInitialised) {
++ /* modify vscsi-devs/dev-x/state */
++ if (xenbus_printf(XBT_NIL, dev->nodename, state_str,
++ "%d", XenbusStateConnected)) {
++ printk(KERN_ERR "scsiback: xenbus_printf error %s\n", state_str);
++ scsiback_del_translation_entry(be->info, &vir);
++ xenbus_printf(XBT_NIL, dev->nodename, state_str,
++ "%d", XenbusStateClosed);
++ }
++ }
++ break;
++ /*When it is necessary, processing is added here.*/
++ default:
++ break;
++ }
++ }
++
++ kfree(dir);
++ return ;
++}
++
++
++static void scsiback_frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state)
++{
++ struct backend_info *be = dev->dev.driver_data;
++ int err;
++
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ break;
++ case XenbusStateInitialised:
++ err = scsiback_map(be);
++ if (err)
++ break;
++
++ scsiback_do_lun_hotplug(be, VSCSIBACK_OP_ADD_OR_DEL_LUN);
++ xenbus_switch_state(dev, XenbusStateConnected);
++
++ break;
++ case XenbusStateConnected:
++
++ scsiback_do_lun_hotplug(be, VSCSIBACK_OP_UPDATEDEV_STATE);
++
++ if (dev->state == XenbusStateConnected)
++ break;
++
++ xenbus_switch_state(dev, XenbusStateConnected);
++
++ break;
++
++ case XenbusStateClosing:
++ scsiback_disconnect(be->info);
++ xenbus_switch_state(dev, XenbusStateClosing);
++ break;
++
++ case XenbusStateClosed:
++ xenbus_switch_state(dev, XenbusStateClosed);
++ if (xenbus_dev_is_online(dev))
++ break;
++ /* fall through if not online */
++ case XenbusStateUnknown:
++ device_unregister(&dev->dev);
++ break;
++
++ case XenbusStateReconfiguring:
++ scsiback_do_lun_hotplug(be, VSCSIBACK_OP_ADD_OR_DEL_LUN);
++
++ xenbus_switch_state(dev, XenbusStateReconfigured);
++
++ break;
++
++ default:
++ xenbus_dev_fatal(dev, -EINVAL, "saw state %d at frontend",
++ frontend_state);
++ break;
++ }
++}
++
++
++static int scsiback_remove(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ if (be->info) {
++ scsiback_disconnect(be->info);
++ scsiback_release_translation_entry(be->info);
++ scsiback_free(be->info);
++ be->info = NULL;
++ }
++
++ kfree(be);
++ dev->dev.driver_data = NULL;
++
++ return 0;
++}
++
++
++static int scsiback_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++
++ struct backend_info *be = kzalloc(sizeof(struct backend_info),
++ GFP_KERNEL);
++
++ DPRINTK("%p %d\n", dev, dev->otherend_id);
++
++ if (!be) {
++ xenbus_dev_fatal(dev, -ENOMEM,
++ "allocating backend structure");
++ return -ENOMEM;
++ }
++ be->dev = dev;
++ dev->dev.driver_data = be;
++
++ be->info = vscsibk_info_alloc(dev->otherend_id);
++ if (IS_ERR(be->info)) {
++ err = PTR_ERR(be->info);
++ be->info = NULL;
++ xenbus_dev_fatal(dev, err, "creating scsihost interface");
++ goto fail;
++ }
++
++ be->info->dev = dev;
++ be->info->irq = 0;
++
++ scsiback_init_translation_table(be->info);
++
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err)
++ goto fail;
++
++ return 0;
++
++
++fail:
++ printk(KERN_WARNING "scsiback: %s failed\n",__FUNCTION__);
++ scsiback_remove(dev);
++
++ return err;
++}
++
++
++static struct xenbus_device_id scsiback_ids[] = {
++ { "vscsi" },
++ { "" }
++};
++
++static struct xenbus_driver scsiback = {
++ .name = "vscsi",
++ .owner = THIS_MODULE,
++ .ids = scsiback_ids,
++ .probe = scsiback_probe,
++ .remove = scsiback_remove,
++ .otherend_changed = scsiback_frontend_changed
++};
++
++int scsiback_xenbus_init(void)
++{
++ return xenbus_register_backend(&scsiback);
++}
++
++void scsiback_xenbus_unregister(void)
++{
++ xenbus_unregister_driver(&scsiback);
++}
+Index: head-2008-11-25/drivers/xen/scsifront/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/scsifront/Makefile 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,3 @@
++
++obj-$(CONFIG_XEN_SCSI_FRONTEND) := xenscsi.o
++xenscsi-objs := scsifront.o xenbus.o
+Index: head-2008-11-25/drivers/xen/scsifront/common.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/scsifront/common.h 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,129 @@
++/*
++ * Xen SCSI frontend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#ifndef __XEN_DRIVERS_SCSIFRONT_H__
++#define __XEN_DRIVERS_SCSIFRONT_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/kernel.h>
++#include <linux/device.h>
++#include <linux/kthread.h>
++#include <linux/wait.h>
++#include <linux/interrupt.h>
++#include <linux/spinlock.h>
++#include <linux/sched.h>
++#include <linux/blkdev.h>
++#include <scsi/scsi_cmnd.h>
++#include <scsi/scsi_device.h>
++#include <scsi/scsi.h>
++#include <scsi/scsi_host.h>
++#include <xen/xenbus.h>
++#include <xen/gnttab.h>
++#include <xen/evtchn.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/io/ring.h>
++#include <xen/interface/io/vscsiif.h>
++#include <asm/delay.h>
++
++
++#define GRANT_INVALID_REF 0
++#define VSCSI_IN_ABORT 1
++#define VSCSI_IN_RESET 2
++
++/* tuning point*/
++#define VSCSIIF_DEFAULT_CMD_PER_LUN 10
++#define VSCSIIF_MAX_TARGET 64
++#define VSCSIIF_MAX_LUN 255
++
++#define VSCSIIF_RING_SIZE \
++ __RING_SIZE((struct vscsiif_sring *)0, PAGE_SIZE)
++#define VSCSIIF_MAX_REQS VSCSIIF_RING_SIZE
++
++struct vscsifrnt_shadow {
++ uint16_t next_free;
++
++ /* command between backend and frontend
++ * VSCSIIF_ACT_SCSI_CDB or VSCSIIF_ACT_SCSI_RESET */
++ unsigned char act;
++
++ /* do reset function */
++ wait_queue_head_t wq_reset; /* reset work queue */
++ int wait_reset; /* reset work queue condition */
++ int32_t rslt_reset; /* reset response status */
++ /* (SUCESS or FAILED) */
++
++ /* for DMA_TO_DEVICE(1), DMA_FROM_DEVICE(2), DMA_NONE(3)
++ requests */
++ unsigned int sc_data_direction;
++
++ /* Number of pieces of scatter-gather */
++ unsigned int nr_segments;
++
++ /* requested struct scsi_cmnd is stored from kernel */
++ unsigned long req_scsi_cmnd;
++ int gref[VSCSIIF_SG_TABLESIZE];
++};
++
++struct vscsifrnt_info {
++ struct xenbus_device *dev;
++
++ struct Scsi_Host *host;
++
++ spinlock_t io_lock;
++ spinlock_t shadow_lock;
++ unsigned int evtchn;
++ unsigned int irq;
++
++ grant_ref_t ring_ref;
++ struct vscsiif_front_ring ring;
++ struct vscsiif_response ring_res;
++
++ struct vscsifrnt_shadow shadow[VSCSIIF_MAX_REQS];
++ uint32_t shadow_free;
++
++ struct task_struct *kthread;
++ wait_queue_head_t wq;
++ unsigned int waiting_resp;
++
++};
++
++#define DPRINTK(_f, _a...) \
++ pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++
++int scsifront_xenbus_init(void);
++void scsifront_xenbus_unregister(void);
++int scsifront_schedule(void *data);
++irqreturn_t scsifront_intr(int irq, void *dev_id, struct pt_regs *ptregs);
++int scsifront_cmd_done(struct vscsifrnt_info *info);
++
++
++#endif /* __XEN_DRIVERS_SCSIFRONT_H__ */
+Index: head-2008-11-25/drivers/xen/scsifront/scsifront.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/scsifront/scsifront.c 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,511 @@
++/*
++ * Xen SCSI frontend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++
++#include <linux/version.h>
++#include "common.h"
++
++static int get_id_from_freelist(struct vscsifrnt_info *info)
++{
++ unsigned long flags;
++ uint32_t free;
++
++ spin_lock_irqsave(&info->shadow_lock, flags);
++
++ free = info->shadow_free;
++ BUG_ON(free > VSCSIIF_MAX_REQS);
++ info->shadow_free = info->shadow[free].next_free;
++ info->shadow[free].next_free = 0x0fff;
++
++ info->shadow[free].wait_reset = 0;
++
++ spin_unlock_irqrestore(&info->shadow_lock, flags);
++
++ return free;
++}
++
++static void add_id_to_freelist(struct vscsifrnt_info *info, uint32_t id)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->shadow_lock, flags);
++
++ info->shadow[id].next_free = info->shadow_free;
++ info->shadow[id].req_scsi_cmnd = 0;
++ info->shadow_free = id;
++
++ spin_unlock_irqrestore(&info->shadow_lock, flags);
++}
++
++
++struct vscsiif_request * scsifront_pre_request(struct vscsifrnt_info *info)
++{
++ struct vscsiif_front_ring *ring = &(info->ring);
++ vscsiif_request_t *ring_req;
++ uint32_t id;
++
++ ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
++
++ ring->req_prod_pvt++;
++
++ id = get_id_from_freelist(info); /* use id by response */
++ ring_req->rqid = (uint16_t)id;
++
++ return ring_req;
++}
++
++
++static void scsifront_notify_work(struct vscsifrnt_info *info)
++{
++ info->waiting_resp = 1;
++ wake_up(&info->wq);
++}
++
++
++static void scsifront_do_request(struct vscsifrnt_info *info)
++{
++ struct vscsiif_front_ring *ring = &(info->ring);
++ unsigned int irq = info->irq;
++ int notify;
++
++ RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
++ if (notify)
++ notify_remote_via_irq(irq);
++}
++
++irqreturn_t scsifront_intr(int irq, void *dev_id, struct pt_regs *ptregs)
++{
++ scsifront_notify_work((struct vscsifrnt_info *)dev_id);
++ return IRQ_HANDLED;
++}
++
++
++static void scsifront_gnttab_done(struct vscsifrnt_shadow *s, uint32_t id)
++{
++ int i;
++
++ if (s->sc_data_direction == DMA_NONE)
++ return;
++
++ if (s->nr_segments) {
++ for (i = 0; i < s->nr_segments; i++) {
++ if (unlikely(gnttab_query_foreign_access(
++ s->gref[i]) != 0)) {
++ printk(KERN_ALERT "scsifront: "
++ "grant still in use by backend.\n");
++ BUG();
++ }
++ gnttab_end_foreign_access(s->gref[i], 0UL);
++ }
++ }
++
++ return;
++}
++
++
++static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
++ vscsiif_response_t *ring_res)
++{
++ struct scsi_cmnd *sc;
++ uint32_t id;
++ uint8_t sense_len;
++
++ id = ring_res->rqid;
++ sc = (struct scsi_cmnd *)info->shadow[id].req_scsi_cmnd;
++
++ if (sc == NULL)
++ BUG();
++
++ scsifront_gnttab_done(&info->shadow[id], id);
++ add_id_to_freelist(info, id);
++
++ sc->result = ring_res->rslt;
++ sc->resid = ring_res->residual_len;
++
++ if (ring_res->sense_len > VSCSIIF_SENSE_BUFFERSIZE)
++ sense_len = VSCSIIF_SENSE_BUFFERSIZE;
++ else
++ sense_len = ring_res->sense_len;
++
++ if (sense_len)
++ memcpy(sc->sense_buffer, ring_res->sense_buffer, sense_len);
++
++ sc->scsi_done(sc);
++
++ return;
++}
++
++
++static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
++ vscsiif_response_t *ring_res)
++{
++ uint16_t id = ring_res->rqid;
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->shadow_lock, flags);
++ info->shadow[id].wait_reset = 1;
++ info->shadow[id].rslt_reset = ring_res->rslt;
++ spin_unlock_irqrestore(&info->shadow_lock, flags);
++
++ wake_up(&(info->shadow[id].wq_reset));
++}
++
++
++int scsifront_cmd_done(struct vscsifrnt_info *info)
++{
++ vscsiif_response_t *ring_res;
++
++ RING_IDX i, rp;
++ int more_to_do = 0;
++ unsigned long flags;
++
++ spin_lock_irqsave(&info->io_lock, flags);
++
++ rp = info->ring.sring->rsp_prod;
++ rmb();
++ for (i = info->ring.rsp_cons; i != rp; i++) {
++
++ ring_res = RING_GET_RESPONSE(&info->ring, i);
++
++ if (info->shadow[ring_res->rqid].act == VSCSIIF_ACT_SCSI_CDB)
++ scsifront_cdb_cmd_done(info, ring_res);
++ else
++ scsifront_sync_cmd_done(info, ring_res);
++ }
++
++ info->ring.rsp_cons = i;
++
++ if (i != info->ring.req_prod_pvt) {
++ RING_FINAL_CHECK_FOR_RESPONSES(&info->ring, more_to_do);
++ } else {
++ info->ring.sring->rsp_event = i + 1;
++ }
++
++ spin_unlock_irqrestore(&info->io_lock, flags);
++
++
++ /* Yield point for this unbounded loop. */
++ cond_resched();
++
++ return more_to_do;
++}
++
++
++
++
++int scsifront_schedule(void *data)
++{
++ struct vscsifrnt_info *info = (struct vscsifrnt_info *)data;
++
++ while (!kthread_should_stop()) {
++ wait_event_interruptible(
++ info->wq,
++ info->waiting_resp || kthread_should_stop());
++
++ info->waiting_resp = 0;
++ smp_mb();
++
++ if (scsifront_cmd_done(info))
++ info->waiting_resp = 1;
++ }
++
++ return 0;
++}
++
++
++
++static int map_data_for_request(struct vscsifrnt_info *info,
++ struct scsi_cmnd *sc, vscsiif_request_t *ring_req, uint32_t id)
++{
++ grant_ref_t gref_head;
++ struct page *page;
++ int err, i, ref, ref_cnt = 0;
++ int write = (sc->sc_data_direction == DMA_TO_DEVICE);
++ int nr_pages, off, len, bytes;
++ unsigned long buffer_pfn;
++ unsigned int data_len = 0;
++
++ if (sc->sc_data_direction == DMA_NONE)
++ return 0;
++
++ err = gnttab_alloc_grant_references(VSCSIIF_SG_TABLESIZE, &gref_head);
++ if (err) {
++ printk(KERN_ERR "scsifront: gnttab_alloc_grant_references() error\n");
++ return -ENOMEM;
++ }
++
++ if (sc->use_sg) {
++ /* quoted scsi_lib.c/scsi_req_map_sg . */
++ struct scatterlist *sg = (struct scatterlist *)sc->request_buffer;
++ nr_pages = (sc->request_bufflen + sg[0].offset + PAGE_SIZE - 1) >> PAGE_SHIFT;
++
++ if (nr_pages > VSCSIIF_SG_TABLESIZE) {
++ printk(KERN_ERR "scsifront: Unable to map request_buffer for command!\n");
++ ref_cnt = (-E2BIG);
++ goto big_to_sg;
++ }
++
++ for (i = 0; i < sc->use_sg; i++) {
++ page = sg[i].page;
++ off = sg[i].offset;
++ len = sg[i].length;
++ data_len += len;
++
++ buffer_pfn = page_to_phys(page) >> PAGE_SHIFT;
++
++ while (len > 0) {
++ bytes = min_t(unsigned int, len, PAGE_SIZE - off);
++
++ ref = gnttab_claim_grant_reference(&gref_head);
++ BUG_ON(ref == -ENOSPC);
++
++ gnttab_grant_foreign_access_ref(ref, info->dev->otherend_id,
++ buffer_pfn, write);
++
++ info->shadow[id].gref[ref_cnt] = ref;
++ ring_req->seg[ref_cnt].gref = ref;
++ ring_req->seg[ref_cnt].offset = (uint16_t)off;
++ ring_req->seg[ref_cnt].length = (uint16_t)bytes;
++
++ buffer_pfn++;
++ len -= bytes;
++ off = 0;
++ ref_cnt++;
++ }
++ }
++ } else if (sc->request_bufflen) {
++ unsigned long end = ((unsigned long)sc->request_buffer
++ + sc->request_bufflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
++ unsigned long start = (unsigned long)sc->request_buffer >> PAGE_SHIFT;
++
++ page = virt_to_page(sc->request_buffer);
++ nr_pages = end - start;
++ len = sc->request_bufflen;
++
++ if (nr_pages > VSCSIIF_SG_TABLESIZE) {
++ ref_cnt = (-E2BIG);
++ goto big_to_sg;
++ }
++
++ buffer_pfn = page_to_phys(page) >> PAGE_SHIFT;
++
++ off = offset_in_page((unsigned long)sc->request_buffer);
++ for (i = 0; i < nr_pages; i++) {
++ bytes = PAGE_SIZE - off;
++
++ if (bytes > len)
++ bytes = len;
++
++ ref = gnttab_claim_grant_reference(&gref_head);
++ BUG_ON(ref == -ENOSPC);
++
++ gnttab_grant_foreign_access_ref(ref, info->dev->otherend_id,
++ buffer_pfn, write);
++
++ info->shadow[id].gref[i] = ref;
++ ring_req->seg[i].gref = ref;
++ ring_req->seg[i].offset = (uint16_t)off;
++ ring_req->seg[i].length = (uint16_t)bytes;
++
++ buffer_pfn++;
++ len -= bytes;
++ off = 0;
++ ref_cnt++;
++ }
++ }
++
++big_to_sg:
++
++ gnttab_free_grant_references(gref_head);
++
++ return ref_cnt;
++}
++
++static int scsifront_queuecommand(struct scsi_cmnd *sc,
++ void (*done)(struct scsi_cmnd *))
++{
++ struct vscsifrnt_info *info =
++ (struct vscsifrnt_info *) sc->device->host->hostdata;
++ vscsiif_request_t *ring_req;
++ int ref_cnt;
++ uint16_t rqid;
++
++ if (RING_FULL(&info->ring)) {
++ goto out_host_busy;
++ }
++
++ sc->scsi_done = done;
++ sc->result = 0;
++
++ ring_req = scsifront_pre_request(info);
++ rqid = ring_req->rqid;
++ ring_req->act = VSCSIIF_ACT_SCSI_CDB;
++
++ ring_req->id = sc->device->id;
++ ring_req->lun = sc->device->lun;
++ ring_req->channel = sc->device->channel;
++ ring_req->cmd_len = sc->cmd_len;
++
++ BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
++
++ if ( sc->cmd_len )
++ memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
++ else
++ memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE);
++
++ ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
++ ring_req->timeout_per_command = (sc->timeout_per_command / HZ);
++
++ info->shadow[rqid].req_scsi_cmnd = (unsigned long)sc;
++ info->shadow[rqid].sc_data_direction = sc->sc_data_direction;
++ info->shadow[rqid].act = ring_req->act;
++
++ ref_cnt = map_data_for_request(info, sc, ring_req, rqid);
++ if (ref_cnt < 0) {
++ add_id_to_freelist(info, rqid);
++ if (ref_cnt == (-ENOMEM))
++ goto out_host_busy;
++ else {
++ sc->result = (DID_ERROR << 16);
++ goto out_fail_command;
++ }
++ }
++
++ ring_req->nr_segments = (uint8_t)ref_cnt;
++ info->shadow[rqid].nr_segments = ref_cnt;
++
++ scsifront_do_request(info);
++
++ return 0;
++
++out_host_busy:
++ return SCSI_MLQUEUE_HOST_BUSY;
++
++out_fail_command:
++ done(sc);
++ return 0;
++}
++
++
++static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)
++{
++ return (FAILED);
++}
++
++/* vscsi supports only device_reset, because it is each of LUNs */
++static int scsifront_dev_reset_handler(struct scsi_cmnd *sc)
++{
++ struct Scsi_Host *host = sc->device->host;
++ struct vscsifrnt_info *info =
++ (struct vscsifrnt_info *) sc->device->host->hostdata;
++
++ vscsiif_request_t *ring_req;
++ uint16_t rqid;
++ int err;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
++ spin_lock_irq(host->host_lock);
++#endif
++
++ ring_req = scsifront_pre_request(info);
++ ring_req->act = VSCSIIF_ACT_SCSI_RESET;
++
++ rqid = ring_req->rqid;
++ info->shadow[rqid].act = VSCSIIF_ACT_SCSI_RESET;
++
++ ring_req->channel = sc->device->channel;
++ ring_req->id = sc->device->id;
++ ring_req->lun = sc->device->lun;
++ ring_req->cmd_len = sc->cmd_len;
++
++ if ( sc->cmd_len )
++ memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
++ else
++ memset(ring_req->cmnd, 0, VSCSIIF_MAX_COMMAND_SIZE);
++
++ ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
++ ring_req->timeout_per_command = (sc->timeout_per_command / HZ);
++ ring_req->nr_segments = 0;
++
++ scsifront_do_request(info);
++
++ spin_unlock_irq(host->host_lock);
++ wait_event_interruptible(info->shadow[rqid].wq_reset,
++ info->shadow[rqid].wait_reset);
++ spin_lock_irq(host->host_lock);
++
++ err = info->shadow[rqid].rslt_reset;
++
++ add_id_to_freelist(info, rqid);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
++ spin_unlock_irq(host->host_lock);
++#endif
++ return (err);
++}
++
++
++struct scsi_host_template scsifront_sht = {
++ .module = THIS_MODULE,
++ .name = "Xen SCSI frontend driver",
++ .queuecommand = scsifront_queuecommand,
++ .eh_abort_handler = scsifront_eh_abort_handler,
++ .eh_device_reset_handler= scsifront_dev_reset_handler,
++ .cmd_per_lun = VSCSIIF_DEFAULT_CMD_PER_LUN,
++ .can_queue = VSCSIIF_MAX_REQS,
++ .this_id = -1,
++ .sg_tablesize = VSCSIIF_SG_TABLESIZE,
++ .use_clustering = DISABLE_CLUSTERING,
++ .proc_name = "scsifront",
++};
++
++
++static int __init scsifront_init(void)
++{
++ int err;
++
++ if (!is_running_on_xen())
++ return -ENODEV;
++
++ err = scsifront_xenbus_init();
++
++ return err;
++}
++
++static void __exit scsifront_exit(void)
++{
++ scsifront_xenbus_unregister();
++}
++
++module_init(scsifront_init);
++module_exit(scsifront_exit);
++
++MODULE_DESCRIPTION("Xen SCSI frontend driver");
++MODULE_LICENSE("GPL");
+Index: head-2008-11-25/drivers/xen/scsifront/xenbus.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/scsifront/xenbus.c 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,421 @@
++/*
++ * Xen SCSI frontend driver
++ *
++ * Copyright (c) 2008, FUJITSU Limited
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++
++#include <linux/version.h>
++#include "common.h"
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
++ #define DEFAULT_TASK_COMM_LEN 16
++#else
++ #define DEFAULT_TASK_COMM_LEN TASK_COMM_LEN
++#endif
++
++extern struct scsi_host_template scsifront_sht;
++
++static void scsifront_free(struct vscsifrnt_info *info)
++{
++ struct Scsi_Host *host = info->host;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,14)
++ if (host->shost_state != SHOST_DEL) {
++#else
++ if (!test_bit(SHOST_DEL, &host->shost_state)) {
++#endif
++ scsi_remove_host(info->host);
++ }
++
++ if (info->ring_ref != GRANT_INVALID_REF) {
++ gnttab_end_foreign_access(info->ring_ref,
++ (unsigned long)info->ring.sring);
++ info->ring_ref = GRANT_INVALID_REF;
++ info->ring.sring = NULL;
++ }
++
++ if (info->irq)
++ unbind_from_irqhandler(info->irq, info);
++ info->irq = 0;
++
++ scsi_host_put(info->host);
++}
++
++
++static int scsifront_alloc_ring(struct vscsifrnt_info *info)
++{
++ struct xenbus_device *dev = info->dev;
++ struct vscsiif_sring *sring;
++ int err = -ENOMEM;
++
++
++ info->ring_ref = GRANT_INVALID_REF;
++
++ /***** Frontend to Backend ring start *****/
++ sring = (struct vscsiif_sring *) __get_free_page(GFP_KERNEL);
++ if (!sring) {
++ xenbus_dev_fatal(dev, err, "fail to allocate shared ring (Front to Back)");
++ return err;
++ }
++ SHARED_RING_INIT(sring);
++ FRONT_RING_INIT(&info->ring, sring, PAGE_SIZE);
++
++ err = xenbus_grant_ring(dev, virt_to_mfn(sring));
++ if (err < 0) {
++ free_page((unsigned long) sring);
++ info->ring.sring = NULL;
++ xenbus_dev_fatal(dev, err, "fail to grant shared ring (Front to Back)");
++ goto free_sring;
++ }
++ info->ring_ref = err;
++
++ err = bind_listening_port_to_irqhandler(
++ dev->otherend_id, scsifront_intr,
++ SA_SAMPLE_RANDOM, "scsifront", info);
++
++ if (err <= 0) {
++ xenbus_dev_fatal(dev, err, "bind_listening_port_to_irqhandler");
++ goto free_sring;
++ }
++ info->irq = err;
++
++ return 0;
++
++/* free resource */
++free_sring:
++ scsifront_free(info);
++
++ return err;
++}
++
++
++static int scsifront_init_ring(struct vscsifrnt_info *info)
++{
++ struct xenbus_device *dev = info->dev;
++ struct xenbus_transaction xbt;
++ int err;
++
++ DPRINTK("%s\n",__FUNCTION__);
++
++ err = scsifront_alloc_ring(info);
++ if (err)
++ return err;
++ DPRINTK("%u %u\n", info->ring_ref, info->evtchn);
++
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "starting transaction");
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "ring-ref", "%u",
++ info->ring_ref);
++ if (err) {
++ xenbus_dev_fatal(dev, err, "%s", "writing ring-ref");
++ goto fail;
++ }
++
++ err = xenbus_printf(xbt, dev->nodename, "event-channel", "%u",
++ irq_to_evtchn_port(info->irq));
++
++ if (err) {
++ xenbus_dev_fatal(dev, err, "%s", "writing event-channel");
++ goto fail;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err) {
++ if (err == -EAGAIN)
++ goto again;
++ xenbus_dev_fatal(dev, err, "completing transaction");
++ goto free_sring;
++ }
++
++ return 0;
++
++fail:
++ xenbus_transaction_end(xbt, 1);
++free_sring:
++ /* free resource */
++ scsifront_free(info);
++
++ return err;
++}
++
++
++static int scsifront_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ struct vscsifrnt_info *info;
++ struct Scsi_Host *host;
++ int i, err = -ENOMEM;
++ char name[DEFAULT_TASK_COMM_LEN];
++
++ host = scsi_host_alloc(&scsifront_sht, sizeof(*info));
++ if (!host) {
++ xenbus_dev_fatal(dev, err, "fail to allocate scsi host");
++ return err;
++ }
++ info = (struct vscsifrnt_info *) host->hostdata;
++ info->host = host;
++
++
++ dev->dev.driver_data = info;
++ info->dev = dev;
++
++ for (i = 0; i < VSCSIIF_MAX_REQS; i++) {
++ info->shadow[i].next_free = i + 1;
++ init_waitqueue_head(&(info->shadow[i].wq_reset));
++ info->shadow[i].wait_reset = 0;
++ }
++ info->shadow[VSCSIIF_MAX_REQS - 1].next_free = 0x0fff;
++
++ err = scsifront_init_ring(info);
++ if (err) {
++ scsi_host_put(host);
++ return err;
++ }
++
++ init_waitqueue_head(&info->wq);
++ spin_lock_init(&info->io_lock);
++ spin_lock_init(&info->shadow_lock);
++
++ snprintf(name, DEFAULT_TASK_COMM_LEN, "vscsiif.%d", info->host->host_no);
++
++ info->kthread = kthread_run(scsifront_schedule, info, name);
++ if (IS_ERR(info->kthread)) {
++ err = PTR_ERR(info->kthread);
++ info->kthread = NULL;
++ printk(KERN_ERR "scsifront: kthread start err %d\n", err);
++ goto free_sring;
++ }
++
++ host->max_id = VSCSIIF_MAX_TARGET;
++ host->max_channel = 0;
++ host->max_lun = VSCSIIF_MAX_LUN;
++ host->max_sectors = (VSCSIIF_SG_TABLESIZE - 1) * PAGE_SIZE / 512;
++
++ err = scsi_add_host(host, &dev->dev);
++ if (err) {
++ printk(KERN_ERR "scsifront: fail to add scsi host %d\n", err);
++ goto free_sring;
++ }
++
++ xenbus_switch_state(dev, XenbusStateInitialised);
++
++ return 0;
++
++free_sring:
++ /* free resource */
++ scsifront_free(info);
++ return err;
++}
++
++static int scsifront_remove(struct xenbus_device *dev)
++{
++ struct vscsifrnt_info *info = dev->dev.driver_data;
++
++ DPRINTK("%s: %s removed\n",__FUNCTION__ ,dev->nodename);
++
++ if (info->kthread) {
++ kthread_stop(info->kthread);
++ info->kthread = NULL;
++ }
++
++ scsifront_free(info);
++
++ return 0;
++}
++
++
++static int scsifront_disconnect(struct vscsifrnt_info *info)
++{
++ struct xenbus_device *dev = info->dev;
++ struct Scsi_Host *host = info->host;
++
++ DPRINTK("%s: %s disconnect\n",__FUNCTION__ ,dev->nodename);
++
++ /*
++ When this function is executed, all devices of
++ Frontend have been deleted.
++ Therefore, it need not block I/O before remove_host.
++ */
++
++ scsi_remove_host(host);
++ xenbus_frontend_closed(dev);
++
++ return 0;
++}
++
++#define VSCSIFRONT_OP_ADD_LUN 1
++#define VSCSIFRONT_OP_DEL_LUN 2
++
++static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
++{
++ struct xenbus_device *dev = info->dev;
++ int i, err = 0;
++ char str[64], state_str[64];
++ char **dir;
++ unsigned int dir_n = 0;
++ unsigned int device_state;
++ unsigned int hst, chn, tgt, lun;
++ struct scsi_device *sdev;
++
++ dir = xenbus_directory(XBT_NIL, dev->otherend, "vscsi-devs", &dir_n);
++ if (IS_ERR(dir))
++ return;
++
++ for (i = 0; i < dir_n; i++) {
++ /* read status */
++ snprintf(str, sizeof(str), "vscsi-devs/%s/state", dir[i]);
++ err = xenbus_scanf(XBT_NIL, dev->otherend, str, "%u",
++ &device_state);
++ if (XENBUS_EXIST_ERR(err))
++ continue;
++
++ /* virtual SCSI device */
++ snprintf(str, sizeof(str), "vscsi-devs/%s/v-dev", dir[i]);
++ err = xenbus_scanf(XBT_NIL, dev->otherend, str,
++ "%u:%u:%u:%u", &hst, &chn, &tgt, &lun);
++ if (XENBUS_EXIST_ERR(err))
++ continue;
++
++ /* front device state path */
++ snprintf(state_str, sizeof(state_str), "vscsi-devs/%s/state", dir[i]);
++
++ switch (op) {
++ case VSCSIFRONT_OP_ADD_LUN:
++ if (device_state == XenbusStateInitialised) {
++ sdev = scsi_device_lookup(info->host, chn, tgt, lun);
++ if (sdev) {
++ printk(KERN_ERR "scsifront: Device already in use.\n");
++ scsi_device_put(sdev);
++ xenbus_printf(XBT_NIL, dev->nodename,
++ state_str, "%d", XenbusStateClosed);
++ } else {
++ scsi_add_device(info->host, chn, tgt, lun);
++ xenbus_printf(XBT_NIL, dev->nodename,
++ state_str, "%d", XenbusStateConnected);
++ }
++ }
++ break;
++ case VSCSIFRONT_OP_DEL_LUN:
++ if (device_state == XenbusStateClosing) {
++ sdev = scsi_device_lookup(info->host, chn, tgt, lun);
++ if (sdev) {
++ scsi_remove_device(sdev);
++ scsi_device_put(sdev);
++ xenbus_printf(XBT_NIL, dev->nodename,
++ state_str, "%d", XenbusStateClosed);
++ }
++ }
++ break;
++ default:
++ break;
++ }
++ }
++
++ kfree(dir);
++ return;
++}
++
++
++
++
++static void scsifront_backend_changed(struct xenbus_device *dev,
++ enum xenbus_state backend_state)
++{
++ struct vscsifrnt_info *info = dev->dev.driver_data;
++
++ DPRINTK("%p %u %u\n", dev, dev->state, backend_state);
++
++ switch (backend_state) {
++ case XenbusStateUnknown:
++ case XenbusStateInitialising:
++ case XenbusStateInitWait:
++ case XenbusStateClosed:
++ break;
++
++ case XenbusStateInitialised:
++ break;
++
++ case XenbusStateConnected:
++ if (xenbus_read_driver_state(dev->nodename) ==
++ XenbusStateInitialised) {
++ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
++ }
++
++ if (dev->state == XenbusStateConnected)
++ break;
++
++ xenbus_switch_state(dev, XenbusStateConnected);
++ break;
++
++ case XenbusStateClosing:
++ scsifront_disconnect(info);
++ break;
++
++ case XenbusStateReconfiguring:
++ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_DEL_LUN);
++ xenbus_switch_state(dev, XenbusStateReconfiguring);
++ break;
++
++ case XenbusStateReconfigured:
++ scsifront_do_lun_hotplug(info, VSCSIFRONT_OP_ADD_LUN);
++ xenbus_switch_state(dev, XenbusStateConnected);
++ break;
++ }
++}
++
++
++static struct xenbus_device_id scsifront_ids[] = {
++ { "vscsi" },
++ { "" }
++};
++
++
++static struct xenbus_driver scsifront_driver = {
++ .name = "vscsi",
++ .owner = THIS_MODULE,
++ .ids = scsifront_ids,
++ .probe = scsifront_probe,
++ .remove = scsifront_remove,
++/* .resume = scsifront_resume, */
++ .otherend_changed = scsifront_backend_changed,
++};
++
++int scsifront_xenbus_init(void)
++{
++ return xenbus_register_frontend(&scsifront_driver);
++}
++
++void scsifront_xenbus_unregister(void)
++{
++ xenbus_unregister_driver(&scsifront_driver);
++}
++
+Index: head-2008-11-25/drivers/xen/sfc_netback/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/Makefile 2008-02-26 10:54:11.000000000 +0100
+@@ -0,0 +1,12 @@
++EXTRA_CFLAGS += -Idrivers/xen/sfc_netback -Idrivers/xen/sfc_netutil -Idrivers/xen/netback -Idrivers/net/sfc
++EXTRA_CFLAGS += -D__ci_driver__
++EXTRA_CFLAGS += -DEFX_USE_KCOMPAT
++EXTRA_CFLAGS += -Werror
++
++ifdef GCOV
++EXTRA_CFLAGS += -fprofile-arcs -ftest-coverage -DEFX_GCOV
++endif
++
++obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_BACKEND) := sfc_netback.o
++
++sfc_netback-objs := accel.o accel_fwd.o accel_msg.o accel_solarflare.o accel_xenbus.o accel_debugfs.o
+Index: head-2008-11-25/drivers/xen/sfc_netback/accel.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/accel.c 2008-02-26 10:54:11.000000000 +0100
+@@ -0,0 +1,129 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include "accel.h"
++#include "accel_msg_iface.h"
++#include "accel_solarflare.h"
++
++#include <linux/notifier.h>
++
++#ifdef EFX_GCOV
++#include "gcov.h"
++#endif
++
++static int netback_accel_netdev_event(struct notifier_block *nb,
++ unsigned long event, void *ptr)
++{
++ struct net_device *net_dev = (struct net_device *)ptr;
++ struct netback_accel *bend;
++
++ if ((event == NETDEV_UP) || (event == NETDEV_DOWN)) {
++ mutex_lock(&bend_list_mutex);
++ bend = bend_list;
++ while (bend != NULL) {
++ mutex_lock(&bend->bend_mutex);
++ /*
++ * This happens when the shared pages have
++ * been unmapped, but the bend not yet removed
++ * from list
++ */
++ if (bend->shared_page == NULL)
++ goto next;
++
++ if (bend->net_dev->ifindex == net_dev->ifindex)
++ netback_accel_set_interface_state
++ (bend, event == NETDEV_UP);
++
++ next:
++ mutex_unlock(&bend->bend_mutex);
++ bend = bend->next_bend;
++ }
++ mutex_unlock(&bend_list_mutex);
++ }
++
++ return NOTIFY_DONE;
++}
++
++
++static struct notifier_block netback_accel_netdev_notifier = {
++ .notifier_call = netback_accel_netdev_event,
++};
++
++
++unsigned sfc_netback_max_pages = NETBACK_ACCEL_DEFAULT_MAX_BUF_PAGES;
++module_param_named(max_pages, sfc_netback_max_pages, uint, 0644);
++MODULE_PARM_DESC(max_pages,
++ "The number of buffer pages to enforce on each guest");
++
++/* Initialise subsystems need for the accelerated fast path */
++static int __init netback_accel_init(void)
++{
++ int rc = 0;
++
++#ifdef EFX_GCOV
++ gcov_provider_init(THIS_MODULE);
++#endif
++
++ rc = netback_accel_init_fwd();
++
++ if (rc == 0)
++ netback_accel_debugfs_init();
++
++ if (rc == 0)
++ rc = netback_accel_sf_init();
++
++ if (rc == 0)
++ rc = register_netdevice_notifier
++ (&netback_accel_netdev_notifier);
++
++ /*
++ * What if no device was found, shouldn't we clean up stuff
++ * we've allocated for acceleration subsystem?
++ */
++
++ return rc;
++}
++
++module_init(netback_accel_init);
++
++static void __exit netback_accel_exit(void)
++{
++ unregister_netdevice_notifier(&netback_accel_netdev_notifier);
++
++ netback_accel_sf_shutdown();
++
++ netback_accel_shutdown_bends();
++
++ netback_accel_debugfs_fini();
++
++ netback_accel_shutdown_fwd();
++
++#ifdef EFX_GCOV
++ gcov_provider_fini(THIS_MODULE);
++#endif
++}
++
++module_exit(netback_accel_exit);
++
++MODULE_LICENSE("GPL");
+Index: head-2008-11-25/drivers/xen/sfc_netback/accel.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/accel.h 2008-02-26 10:54:11.000000000 +0100
+@@ -0,0 +1,393 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NETBACK_ACCEL_H
++#define NETBACK_ACCEL_H
++
++#include <linux/slab.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/in.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++#include <linux/mutex.h>
++#include <linux/wait.h>
++
++#include <xen/xenbus.h>
++
++#include "accel_shared_fifo.h"
++#include "accel_msg_iface.h"
++#include "accel_util.h"
++
++/**************************************************************************
++ * Datatypes
++ **************************************************************************/
++
++#define NETBACK_ACCEL_DEFAULT_MAX_FILTERS (8)
++#define NETBACK_ACCEL_DEFAULT_MAX_MCASTS (8)
++#define NETBACK_ACCEL_DEFAULT_MAX_BUF_PAGES (384)
++/* Variable to store module parameter for max_buf_pages */
++extern unsigned sfc_netback_max_pages;
++
++#define NETBACK_ACCEL_STATS 1
++
++#if NETBACK_ACCEL_STATS
++#define NETBACK_ACCEL_STATS_OP(x) x
++#else
++#define NETBACK_ACCEL_STATS_OP(x)
++#endif
++
++/*! Statistics for a given backend */
++struct netback_accel_stats {
++ /*! Number of eventq wakeup events */
++ u64 evq_wakeups;
++ /*! Number of eventq timeout events */
++ u64 evq_timeouts;
++ /*! Number of filters used */
++ u32 num_filters;
++ /*! Number of buffer pages registered */
++ u32 num_buffer_pages;
++};
++
++
++/* Debug fs nodes for each of the above stats */
++struct netback_accel_dbfs {
++ struct dentry *evq_wakeups;
++ struct dentry *evq_timeouts;
++ struct dentry *num_filters;
++ struct dentry *num_buffer_pages;
++};
++
++
++/*! Resource limits for a given NIC */
++struct netback_accel_limits {
++ int max_filters; /*!< Max. number of filters to use. */
++ int max_mcasts; /*!< Max. number of mcast subscriptions */
++ int max_buf_pages; /*!< Max. number of pages of NIC buffers */
++};
++
++
++/*! The state for an instance of the back end driver. */
++struct netback_accel {
++ /*! mutex to protect this state */
++ struct mutex bend_mutex;
++
++ /*! Watches on xenstore */
++ struct xenbus_watch domu_accel_watch;
++ struct xenbus_watch config_accel_watch;
++
++ /*! Pointer to whatever device cookie ties us in to the hypervisor */
++ void *hdev_data;
++
++ /*! FIFO indices. Next page is msg FIFOs */
++ struct net_accel_shared_page *shared_page;
++
++ /*! Defer control message processing */
++ struct work_struct handle_msg;
++
++ /*! Identifies other end VM and interface.*/
++ int far_end;
++ int vif_num;
++
++ /*!< To unmap the shared pages */
++ void *sh_pages_unmap;
++
++ /* Resource tracking */
++ /*! Limits on H/W & Dom0 resources */
++ struct netback_accel_limits quotas;
++
++ /* Hardware resources */
++ /*! The H/W type of associated NIC */
++ enum net_accel_hw_type hw_type;
++ /*! State of allocation */
++ int hw_state;
++ /*! Index into ci_driver.nics[] for this interface */
++ int nic_index;
++ /*! How to set up the acceleration for this hardware */
++ int (*accel_setup)(struct netback_accel *);
++ /*! And how to stop it. */
++ void (*accel_shutdown)(struct netback_accel *);
++
++ /*! The physical/real net_dev for this interface */
++ struct net_device *net_dev;
++
++ /*! Magic pointer to locate state in fowarding table */
++ void *fwd_priv;
++
++ /*! Message FIFO */
++ sh_msg_fifo2 to_domU;
++ /*! Message FIFO */
++ sh_msg_fifo2 from_domU;
++
++ /*! General notification channel id */
++ int msg_channel;
++ /*! General notification channel irq */
++ int msg_channel_irq;
++
++ /*! Event channel id dedicated to network packet interrupts. */
++ int net_channel;
++ /*! Event channel irq dedicated to network packets interrupts */
++ int net_channel_irq;
++
++ /*! The MAC address the frontend goes by. */
++ u8 mac[ETH_ALEN];
++ /*! Driver name of associated NIC */
++ char *nicname;
++
++ /*! Array of pointers to buffer pages mapped */
++ grant_handle_t *buffer_maps;
++ u64 *buffer_addrs;
++ /*! Index into buffer_maps */
++ int buffer_maps_index;
++ /*! Max number of pages that domU is allowed/will request to map */
++ int max_pages;
++
++ /*! Pointer to hardware specific private area */
++ void *accel_hw_priv;
++
++ /*! Wait queue for changes in accelstate. */
++ wait_queue_head_t state_wait_queue;
++
++ /*! Current state of the frontend according to the xenbus
++ * watch. */
++ XenbusState frontend_state;
++
++ /*! Current state of this backend. */
++ XenbusState backend_state;
++
++ /*! Non-zero if the backend is being removed. */
++ int removing;
++
++ /*! Non-zero if the setup_vnic has been called. */
++ int vnic_is_setup;
++
++#if NETBACK_ACCEL_STATS
++ struct netback_accel_stats stats;
++#endif
++#if defined(CONFIG_DEBUG_FS)
++ char *dbfs_dir_name;
++ struct dentry *dbfs_dir;
++ struct netback_accel_dbfs dbfs;
++#endif
++
++ /*! List */
++ struct netback_accel *next_bend;
++};
++
++
++/*
++ * Values for netback_accel.hw_state. States of resource allocation
++ * we can go through
++ */
++/*! No hardware has yet been allocated. */
++#define NETBACK_ACCEL_RES_NONE (0)
++/*! Hardware has been allocated. */
++#define NETBACK_ACCEL_RES_ALLOC (1)
++#define NETBACK_ACCEL_RES_FILTER (2)
++#define NETBACK_ACCEL_RES_HWINFO (3)
++
++/*! Filtering specification. This assumes that for VNIC support we
++ * will always want wildcard entries, so only specifies the
++ * destination IP/port
++ */
++struct netback_accel_filter_spec {
++ /*! Internal, used to access efx_vi API */
++ void *filter_handle;
++
++ /*! Destination IP in network order */
++ u32 destip_be;
++ /*! Destination port in network order */
++ u16 destport_be;
++ /*! Mac address */
++ u8 mac[ETH_ALEN];
++ /*! TCP or UDP */
++ u8 proto;
++};
++
++
++/**************************************************************************
++ * From accel.c
++ **************************************************************************/
++
++/*! \brief Start up all the acceleration plugins
++ *
++ * \return 0 on success, an errno on failure
++ */
++extern int netback_accel_init_accel(void);
++
++/*! \brief Shut down all the acceleration plugins
++ */
++extern void netback_accel_shutdown_accel(void);
++
++
++/**************************************************************************
++ * From accel_fwd.c
++ **************************************************************************/
++
++/*! \brief Init the forwarding infrastructure
++ * \return 0 on success, or -ENOMEM if it couldn't get memory for the
++ * forward table
++ */
++extern int netback_accel_init_fwd(void);
++
++/*! \brief Shut down the forwarding and free memory. */
++extern void netback_accel_shutdown_fwd(void);
++
++/*! Initialise each nic port's fowarding table */
++extern void *netback_accel_init_fwd_port(void);
++extern void netback_accel_shutdown_fwd_port(void *fwd_priv);
++
++/*! \brief Add an entry to the forwarding table.
++ * \param mac : MAC address, used as hash key
++ * \param ctxt : value to associate with key (can be NULL, see
++ * netback_accel_fwd_set_context)
++ * \return 0 on success, -ENOMEM if table was full and could no grow it
++ */
++extern int netback_accel_fwd_add(const __u8 *mac, void *context,
++ void *fwd_priv);
++
++/*! \brief Remove an entry from the forwarding table.
++ * \param mac : the MAC address to remove
++ * \return nothing: it is not an error if the mac was not in the table
++ */
++extern void netback_accel_fwd_remove(const __u8 *mac, void *fwd_priv);
++
++/*! \brief Set the context pointer for an existing fwd table entry.
++ * \param mac : key that is already present in the table
++ * \param context : new value to associate with key
++ * \return 0 on success, -ENOENT if mac not present in table.
++ */
++extern int netback_accel_fwd_set_context(const __u8 *mac, void *context,
++ void *fwd_priv);
++
++/**************************************************************************
++ * From accel_msg.c
++ **************************************************************************/
++
++
++/*! \brief Send the start-of-day message that handshakes with the VNIC
++ * and tells it its MAC address.
++ *
++ * \param bend The back end driver data structure
++ * \param version The version of communication to use, e.g. NET_ACCEL_MSG_VERSION
++ */
++extern void netback_accel_msg_tx_hello(struct netback_accel *bend,
++ unsigned version);
++
++/*! \brief Send a "there's a new local mac address" message
++ *
++ * \param bend The back end driver data structure for the vnic to send
++ * the message to
++ * \param mac Pointer to the new mac address
++ */
++extern void netback_accel_msg_tx_new_localmac(struct netback_accel *bend,
++ const void *mac);
++
++/*! \brief Send a "a mac address that was local has gone away" message
++ *
++ * \param bend The back end driver data structure for the vnic to send
++ * the message to
++ * \param mac Pointer to the old mac address
++ */
++extern void netback_accel_msg_tx_old_localmac(struct netback_accel *bend,
++ const void *mac);
++
++extern void netback_accel_set_interface_state(struct netback_accel *bend,
++ int up);
++
++/*! \brief Process the message queue for a bend that has just
++ * interrupted.
++ *
++ * Demultiplexs an interrupt from the front end driver, taking
++ * messages from the fifo and taking appropriate action.
++ *
++ * \param bend The back end driver data structure
++ */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++extern void netback_accel_msg_rx_handler(struct work_struct *arg);
++#else
++extern void netback_accel_msg_rx_handler(void *bend_void);
++#endif
++
++/**************************************************************************
++ * From accel_xenbus.c
++ **************************************************************************/
++/*! List of all the bends currently in existence. */
++extern struct netback_accel *bend_list;
++extern struct mutex bend_list_mutex;
++
++/*! \brief Probe a new network interface. */
++extern int netback_accel_probe(struct xenbus_device *dev);
++
++/*! \brief Remove a network interface. */
++extern int netback_accel_remove(struct xenbus_device *dev);
++
++/*! \brief Shutdown all accelerator backends */
++extern void netback_accel_shutdown_bends(void);
++
++/*! \brief Initiate the xenbus state teardown handshake */
++extern void netback_accel_set_closing(struct netback_accel *bend);
++
++/**************************************************************************
++ * From accel_debugfs.c
++ **************************************************************************/
++/*! Global statistics */
++struct netback_accel_global_stats {
++ /*! Number of TX packets seen through driverlink */
++ u64 dl_tx_packets;
++ /*! Number of TX packets seen through driverlink we didn't like */
++ u64 dl_tx_bad_packets;
++ /*! Number of RX packets seen through driverlink */
++ u64 dl_rx_packets;
++ /*! Number of mac addresses we are forwarding to */
++ u32 num_fwds;
++};
++
++/*! Debug fs entries for each of the above stats */
++struct netback_accel_global_dbfs {
++ struct dentry *dl_tx_packets;
++ struct dentry *dl_tx_bad_packets;
++ struct dentry *dl_rx_packets;
++ struct dentry *num_fwds;
++};
++
++#if NETBACK_ACCEL_STATS
++extern struct netback_accel_global_stats global_stats;
++#endif
++
++/*! \brief Initialise the debugfs root and populate with global stats */
++extern void netback_accel_debugfs_init(void);
++
++/*! \brief Remove our debugfs root directory */
++extern void netback_accel_debugfs_fini(void);
++
++/*! \brief Add per-bend statistics to debug fs */
++extern int netback_accel_debugfs_create(struct netback_accel *bend);
++/*! \brief Remove per-bend statistics from debug fs */
++extern int netback_accel_debugfs_remove(struct netback_accel *bend);
++
++#endif /* NETBACK_ACCEL_H */
++
++
+Index: head-2008-11-25/drivers/xen/sfc_netback/accel_debugfs.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/accel_debugfs.c 2008-02-26 10:54:11.000000000 +0100
+@@ -0,0 +1,148 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/fs.h>
++#include <linux/debugfs.h>
++
++#include "accel.h"
++
++#if defined(CONFIG_DEBUG_FS)
++static struct dentry *sfc_debugfs_root = NULL;
++#endif
++
++#if NETBACK_ACCEL_STATS
++struct netback_accel_global_stats global_stats;
++#if defined(CONFIG_DEBUG_FS)
++static struct netback_accel_global_dbfs global_dbfs;
++#endif
++#endif
++
++void netback_accel_debugfs_init(void)
++{
++#if defined(CONFIG_DEBUG_FS)
++ sfc_debugfs_root = debugfs_create_dir("sfc_netback", NULL);
++ if (sfc_debugfs_root == NULL)
++ return;
++
++ global_dbfs.num_fwds = debugfs_create_u32
++ ("num_fwds", S_IRUSR | S_IRGRP | S_IROTH,
++ sfc_debugfs_root, &global_stats.num_fwds);
++ global_dbfs.dl_tx_packets = debugfs_create_u64
++ ("dl_tx_packets", S_IRUSR | S_IRGRP | S_IROTH,
++ sfc_debugfs_root, &global_stats.dl_tx_packets);
++ global_dbfs.dl_rx_packets = debugfs_create_u64
++ ("dl_rx_packets", S_IRUSR | S_IRGRP | S_IROTH,
++ sfc_debugfs_root, &global_stats.dl_rx_packets);
++ global_dbfs.dl_tx_bad_packets = debugfs_create_u64
++ ("dl_tx_bad_packets", S_IRUSR | S_IRGRP | S_IROTH,
++ sfc_debugfs_root, &global_stats.dl_tx_bad_packets);
++#endif
++}
++
++
++void netback_accel_debugfs_fini(void)
++{
++#if defined(CONFIG_DEBUG_FS)
++ debugfs_remove(global_dbfs.num_fwds);
++ debugfs_remove(global_dbfs.dl_tx_packets);
++ debugfs_remove(global_dbfs.dl_rx_packets);
++ debugfs_remove(global_dbfs.dl_tx_bad_packets);
++
++ debugfs_remove(sfc_debugfs_root);
++#endif
++}
++
++
++int netback_accel_debugfs_create(struct netback_accel *bend)
++{
++#if defined(CONFIG_DEBUG_FS)
++ /* Smallest length is 7 (vif0.0\n) */
++ int length = 7, temp;
++
++ if (sfc_debugfs_root == NULL)
++ return -ENOENT;
++
++ /* Work out length of string representation of far_end and vif_num */
++ temp = bend->far_end;
++ while (temp > 9) {
++ length++;
++ temp = temp / 10;
++ }
++ temp = bend->vif_num;
++ while (temp > 9) {
++ length++;
++ temp = temp / 10;
++ }
++
++ bend->dbfs_dir_name = kmalloc(length, GFP_KERNEL);
++ if (bend->dbfs_dir_name == NULL)
++ return -ENOMEM;
++ sprintf(bend->dbfs_dir_name, "vif%d.%d", bend->far_end, bend->vif_num);
++
++ bend->dbfs_dir = debugfs_create_dir(bend->dbfs_dir_name,
++ sfc_debugfs_root);
++ if (bend->dbfs_dir == NULL) {
++ kfree(bend->dbfs_dir_name);
++ return -ENOMEM;
++ }
++
++#if NETBACK_ACCEL_STATS
++ bend->dbfs.evq_wakeups = debugfs_create_u64
++ ("evq_wakeups", S_IRUSR | S_IRGRP | S_IROTH,
++ bend->dbfs_dir, &bend->stats.evq_wakeups);
++ bend->dbfs.evq_timeouts = debugfs_create_u64
++ ("evq_timeouts", S_IRUSR | S_IRGRP | S_IROTH,
++ bend->dbfs_dir, &bend->stats.evq_timeouts);
++ bend->dbfs.num_filters = debugfs_create_u32
++ ("num_filters", S_IRUSR | S_IRGRP | S_IROTH,
++ bend->dbfs_dir, &bend->stats.num_filters);
++ bend->dbfs.num_buffer_pages = debugfs_create_u32
++ ("num_buffer_pages", S_IRUSR | S_IRGRP | S_IROTH,
++ bend->dbfs_dir, &bend->stats.num_buffer_pages);
++#endif
++#endif
++ return 0;
++}
++
++
++int netback_accel_debugfs_remove(struct netback_accel *bend)
++{
++#if defined(CONFIG_DEBUG_FS)
++ if (bend->dbfs_dir != NULL) {
++#if NETBACK_ACCEL_STATS
++ debugfs_remove(bend->dbfs.evq_wakeups);
++ debugfs_remove(bend->dbfs.evq_timeouts);
++ debugfs_remove(bend->dbfs.num_filters);
++ debugfs_remove(bend->dbfs.num_buffer_pages);
++#endif
++ debugfs_remove(bend->dbfs_dir);
++ }
++
++ if (bend->dbfs_dir_name)
++ kfree(bend->dbfs_dir_name);
++#endif
++ return 0;
++}
++
++
+Index: head-2008-11-25/drivers/xen/sfc_netback/accel_fwd.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/accel_fwd.c 2008-04-02 12:34:02.000000000 +0200
+@@ -0,0 +1,420 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include "accel.h"
++#include "accel_cuckoo_hash.h"
++#include "accel_util.h"
++#include "accel_solarflare.h"
++
++#include "driverlink_api.h"
++
++#include <linux/if_arp.h>
++#include <linux/skbuff.h>
++#include <linux/list.h>
++
++/* State stored in the forward table */
++struct fwd_struct {
++ struct list_head link; /* Forms list */
++ void * context;
++ __u8 valid;
++ __u8 mac[ETH_ALEN];
++};
++
++/* Max value we support */
++#define NUM_FWDS_BITS 8
++#define NUM_FWDS (1 << NUM_FWDS_BITS)
++#define FWD_MASK (NUM_FWDS - 1)
++
++struct port_fwd {
++ /* Make a list */
++ struct list_head link;
++ /* Hash table to store the fwd_structs */
++ cuckoo_hash_table fwd_hash_table;
++ /* The array of fwd_structs */
++ struct fwd_struct *fwd_array;
++ /* Linked list of entries in use. */
++ struct list_head fwd_list;
++ /* Could do something clever with a reader/writer lock. */
++ spinlock_t fwd_lock;
++ /* Make find_free_entry() a bit faster by caching this */
++ int last_free_index;
++};
++
++/*
++ * This is unlocked as it's only called from dl probe and remove,
++ * which are themselves synchronised. Could get rid of it entirely as
++ * it's never iterated, but useful for debug
++ */
++static struct list_head port_fwds;
++
++
++/* Search the fwd_array for an unused entry */
++static int fwd_find_free_entry(struct port_fwd *fwd_set)
++{
++ int index = fwd_set->last_free_index;
++
++ do {
++ if (!fwd_set->fwd_array[index].valid) {
++ fwd_set->last_free_index = index;
++ return index;
++ }
++ index++;
++ if (index >= NUM_FWDS)
++ index = 0;
++ } while (index != fwd_set->last_free_index);
++
++ return -ENOMEM;
++}
++
++
++/* Look up a MAC in the hash table. Caller should hold table lock. */
++static inline struct fwd_struct *fwd_find_entry(const __u8 *mac,
++ struct port_fwd *fwd_set)
++{
++ cuckoo_hash_value value;
++ cuckoo_hash_mac_key key = cuckoo_mac_to_key(mac);
++
++ if (cuckoo_hash_lookup(&fwd_set->fwd_hash_table,
++ (cuckoo_hash_key *)(&key),
++ &value)) {
++ struct fwd_struct *fwd = &fwd_set->fwd_array[value];
++ DPRINTK_ON(memcmp(fwd->mac, mac, ETH_ALEN) != 0);
++ return fwd;
++ }
++
++ return NULL;
++}
++
++
++/* Initialise each nic port's fowarding table */
++void *netback_accel_init_fwd_port(void)
++{
++ struct port_fwd *fwd_set;
++
++ fwd_set = kzalloc(sizeof(struct port_fwd), GFP_KERNEL);
++ if (fwd_set == NULL) {
++ return NULL;
++ }
++
++ spin_lock_init(&fwd_set->fwd_lock);
++
++ fwd_set->fwd_array = kzalloc(sizeof (struct fwd_struct) * NUM_FWDS,
++ GFP_KERNEL);
++ if (fwd_set->fwd_array == NULL) {
++ kfree(fwd_set);
++ return NULL;
++ }
++
++ if (cuckoo_hash_init(&fwd_set->fwd_hash_table, NUM_FWDS_BITS, 8) != 0) {
++ kfree(fwd_set->fwd_array);
++ kfree(fwd_set);
++ return NULL;
++ }
++
++ INIT_LIST_HEAD(&fwd_set->fwd_list);
++
++ list_add(&fwd_set->link, &port_fwds);
++
++ return fwd_set;
++}
++
++
++void netback_accel_shutdown_fwd_port(void *fwd_priv)
++{
++ struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv;
++
++ BUG_ON(fwd_priv == NULL);
++
++ BUG_ON(list_empty(&port_fwds));
++ list_del(&fwd_set->link);
++
++ BUG_ON(!list_empty(&fwd_set->fwd_list));
++
++ cuckoo_hash_destroy(&fwd_set->fwd_hash_table);
++ kfree(fwd_set->fwd_array);
++ kfree(fwd_set);
++}
++
++
++int netback_accel_init_fwd()
++{
++ INIT_LIST_HEAD(&port_fwds);
++ return 0;
++}
++
++
++void netback_accel_shutdown_fwd()
++{
++ BUG_ON(!list_empty(&port_fwds));
++}
++
++
++/*
++ * Add an entry to the forwarding table. Returns -ENOMEM if no
++ * space.
++ */
++int netback_accel_fwd_add(const __u8 *mac, void *context, void *fwd_priv)
++{
++ struct fwd_struct *fwd;
++ int rc = 0, index;
++ unsigned long flags;
++ cuckoo_hash_mac_key key = cuckoo_mac_to_key(mac);
++ struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv;
++
++ BUG_ON(fwd_priv == NULL);
++
++ DPRINTK("Adding mac " MAC_FMT "\n", MAC_ARG(mac));
++
++ spin_lock_irqsave(&fwd_set->fwd_lock, flags);
++
++ if ((rc = fwd_find_free_entry(fwd_set)) < 0 ) {
++ spin_unlock_irqrestore(&fwd_set->fwd_lock, flags);
++ return rc;
++ }
++
++ index = rc;
++
++ /* Shouldn't already be in the table */
++ if (cuckoo_hash_lookup(&fwd_set->fwd_hash_table,
++ (cuckoo_hash_key *)(&key), &rc) != 0) {
++ spin_unlock_irqrestore(&fwd_set->fwd_lock, flags);
++ EPRINTK("MAC address " MAC_FMT " already accelerated.\n",
++ MAC_ARG(mac));
++ return -EEXIST;
++ }
++
++ if ((rc = cuckoo_hash_add(&fwd_set->fwd_hash_table,
++ (cuckoo_hash_key *)(&key), index, 1)) == 0) {
++ fwd = &fwd_set->fwd_array[index];
++ fwd->valid = 1;
++ fwd->context = context;
++ memcpy(fwd->mac, mac, ETH_ALEN);
++ list_add(&fwd->link, &fwd_set->fwd_list);
++ NETBACK_ACCEL_STATS_OP(global_stats.num_fwds++);
++ }
++
++ spin_unlock_irqrestore(&fwd_set->fwd_lock, flags);
++
++ /*
++ * No need to tell frontend that this mac address is local -
++ * it should auto-discover through packets on fastpath what is
++ * local and what is not, and just being on same server
++ * doesn't make it local (it could be on a different
++ * bridge)
++ */
++
++ return rc;
++}
++
++
++/* remove an entry from the forwarding tables. */
++void netback_accel_fwd_remove(const __u8 *mac, void *fwd_priv)
++{
++ struct fwd_struct *fwd;
++ unsigned long flags;
++ cuckoo_hash_mac_key key = cuckoo_mac_to_key(mac);
++ struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv;
++
++ DPRINTK("Removing mac " MAC_FMT "\n", MAC_ARG(mac));
++
++ BUG_ON(fwd_priv == NULL);
++
++ spin_lock_irqsave(&fwd_set->fwd_lock, flags);
++
++ fwd = fwd_find_entry(mac, fwd_set);
++ if (fwd != NULL) {
++ BUG_ON(list_empty(&fwd_set->fwd_list));
++ list_del(&fwd->link);
++
++ fwd->valid = 0;
++ cuckoo_hash_remove(&fwd_set->fwd_hash_table,
++ (cuckoo_hash_key *)(&key));
++ NETBACK_ACCEL_STATS_OP(global_stats.num_fwds--);
++ }
++ spin_unlock_irqrestore(&fwd_set->fwd_lock, flags);
++
++ /*
++ * No need to tell frontend that this is no longer present -
++ * the frontend is currently only interested in remote
++ * addresses and it works these out (mostly) by itself
++ */
++}
++
++
++/* Set the context pointer for a hash table entry. */
++int netback_accel_fwd_set_context(const __u8 *mac, void *context,
++ void *fwd_priv)
++{
++ struct fwd_struct *fwd;
++ unsigned long flags;
++ int rc = -ENOENT;
++ struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv;
++
++ BUG_ON(fwd_priv == NULL);
++
++ spin_lock_irqsave(&fwd_set->fwd_lock, flags);
++ fwd = fwd_find_entry(mac, fwd_set);
++ if (fwd != NULL) {
++ fwd->context = context;
++ rc = 0;
++ }
++ spin_unlock_irqrestore(&fwd_set->fwd_lock, flags);
++ return rc;
++}
++
++
++/**************************************************************************
++ * Process a received packet
++ **************************************************************************/
++
++/*
++ * Returns whether or not we have a match in our forward table for the
++ * this skb. Must be called with appropriate fwd_lock already held
++ */
++static struct netback_accel *for_a_vnic(struct netback_pkt_buf *skb,
++ struct port_fwd *fwd_set)
++{
++ struct fwd_struct *fwd;
++ struct netback_accel *retval = NULL;
++
++ fwd = fwd_find_entry(skb->mac.raw, fwd_set);
++ if (fwd != NULL)
++ retval = fwd->context;
++ return retval;
++}
++
++
++static inline int packet_is_arp_reply(struct sk_buff *skb)
++{
++ return skb->protocol == ntohs(ETH_P_ARP)
++ && skb->nh.arph->ar_op == ntohs(ARPOP_REPLY);
++}
++
++
++static inline void hdr_to_filt(struct ethhdr *ethhdr, struct iphdr *ip,
++ struct netback_accel_filter_spec *spec)
++{
++ spec->proto = ip->protocol;
++ spec->destip_be = ip->daddr;
++ memcpy(spec->mac, ethhdr->h_source, ETH_ALEN);
++
++ if (ip->protocol == IPPROTO_TCP) {
++ struct tcphdr *tcp = (struct tcphdr *)((char *)ip + 4 * ip->ihl);
++ spec->destport_be = tcp->dest;
++ } else {
++ struct udphdr *udp = (struct udphdr *)((char *)ip + 4 * ip->ihl);
++ EPRINTK_ON(ip->protocol != IPPROTO_UDP);
++ spec->destport_be = udp->dest;
++ }
++}
++
++
++static inline int netback_accel_can_filter(struct netback_pkt_buf *skb)
++{
++ return (skb->protocol == htons(ETH_P_IP) &&
++ ((skb->nh.iph->protocol == IPPROTO_TCP) ||
++ (skb->nh.iph->protocol == IPPROTO_UDP)));
++}
++
++
++static inline void netback_accel_filter_packet(struct netback_accel *bend,
++ struct netback_pkt_buf *skb)
++{
++ struct netback_accel_filter_spec fs;
++ struct ethhdr *eh = (struct ethhdr *)(skb->mac.raw);
++
++ hdr_to_filt(eh, skb->nh.iph, &fs);
++
++ netback_accel_filter_check_add(bend, &fs);
++}
++
++
++/*
++ * Receive a packet and do something appropriate with it. Return true
++ * to take exclusive ownership of the packet. This is verging on
++ * solarflare specific
++ */
++void netback_accel_rx_packet(struct netback_pkt_buf *skb, void *fwd_priv)
++{
++ struct netback_accel *bend;
++ struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv;
++ unsigned long flags;
++
++ BUG_ON(fwd_priv == NULL);
++
++ /* Checking for bcast is cheaper so do that first */
++ if (is_broadcast_ether_addr(skb->mac.raw)) {
++ /* pass through the slow path by not claiming ownership */
++ return;
++ } else if (is_multicast_ether_addr(skb->mac.raw)) {
++ /* pass through the slow path by not claiming ownership */
++ return;
++ } else {
++ /* It is unicast */
++ spin_lock_irqsave(&fwd_set->fwd_lock, flags);
++ /* We insert filter to pass it off to a VNIC */
++ if ((bend = for_a_vnic(skb, fwd_set)) != NULL)
++ if (netback_accel_can_filter(skb))
++ netback_accel_filter_packet(bend, skb);
++ spin_unlock_irqrestore(&fwd_set->fwd_lock, flags);
++ }
++ return;
++}
++
++
++void netback_accel_tx_packet(struct sk_buff *skb, void *fwd_priv)
++{
++ __u8 *mac;
++ unsigned long flags;
++ struct port_fwd *fwd_set = (struct port_fwd *)fwd_priv;
++ struct fwd_struct *fwd;
++
++ BUG_ON(fwd_priv == NULL);
++
++ if (is_broadcast_ether_addr(skb->mac.raw) && packet_is_arp_reply(skb)) {
++ /*
++ * update our fast path forwarding to reflect this
++ * gratuitous ARP
++ */
++ mac = skb->mac.raw+ETH_ALEN;
++
++ DPRINTK("%s: found gratuitous ARP for " MAC_FMT "\n",
++ __FUNCTION__, MAC_ARG(mac));
++
++ spin_lock_irqsave(&fwd_set->fwd_lock, flags);
++ /*
++ * Might not be local, but let's tell them all it is,
++ * and they can restore the fastpath if they continue
++ * to get packets that way
++ */
++ list_for_each_entry(fwd, &fwd_set->fwd_list, link) {
++ struct netback_accel *bend = fwd->context;
++ if (bend != NULL)
++ netback_accel_msg_tx_new_localmac(bend, mac);
++ }
++
++ spin_unlock_irqrestore(&fwd_set->fwd_lock, flags);
++ }
++ return;
++}
+Index: head-2008-11-25/drivers/xen/sfc_netback/accel_msg.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/accel_msg.c 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,392 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <xen/evtchn.h>
++
++#include "accel.h"
++#include "accel_msg_iface.h"
++#include "accel_util.h"
++#include "accel_solarflare.h"
++
++/* Send a HELLO to front end to start things off */
++void netback_accel_msg_tx_hello(struct netback_accel *bend, unsigned version)
++{
++ unsigned long lock_state;
++ struct net_accel_msg *msg =
++ net_accel_msg_start_send(bend->shared_page,
++ &bend->to_domU, &lock_state);
++ /* The queue _cannot_ be full, we're the first users. */
++ EPRINTK_ON(msg == NULL);
++
++ if (msg != NULL) {
++ net_accel_msg_init(msg, NET_ACCEL_MSG_HELLO);
++ msg->u.hello.version = version;
++ msg->u.hello.max_pages = bend->quotas.max_buf_pages;
++ VPRINTK("Sending hello to channel %d\n", bend->msg_channel);
++ net_accel_msg_complete_send_notify(bend->shared_page,
++ &bend->to_domU,
++ &lock_state,
++ bend->msg_channel_irq);
++ }
++}
++
++/* Send a local mac message to vnic */
++static void netback_accel_msg_tx_localmac(struct netback_accel *bend,
++ int type, const void *mac)
++{
++ unsigned long lock_state;
++ struct net_accel_msg *msg;
++
++ BUG_ON(bend == NULL || mac == NULL);
++
++ VPRINTK("Sending local mac message: " MAC_FMT "\n",
++ MAC_ARG((const char *)mac));
++
++ msg = net_accel_msg_start_send(bend->shared_page, &bend->to_domU,
++ &lock_state);
++
++ if (msg != NULL) {
++ net_accel_msg_init(msg, NET_ACCEL_MSG_LOCALMAC);
++ msg->u.localmac.flags = type;
++ memcpy(msg->u.localmac.mac, mac, ETH_ALEN);
++ net_accel_msg_complete_send_notify(bend->shared_page,
++ &bend->to_domU,
++ &lock_state,
++ bend->msg_channel_irq);
++ } else {
++ /*
++ * TODO if this happens we may leave a domU
++ * fastpathing packets when they should be delivered
++ * locally. Solution is get domU to timeout entries
++ * in its fastpath lookup table when it receives no RX
++ * traffic
++ */
++ EPRINTK("%s: saw full queue, may need ARP timer to recover\n",
++ __FUNCTION__);
++ }
++}
++
++/* Send an add local mac message to vnic */
++void netback_accel_msg_tx_new_localmac(struct netback_accel *bend,
++ const void *mac)
++{
++ netback_accel_msg_tx_localmac(bend, NET_ACCEL_MSG_ADD, mac);
++}
++
++
++static int netback_accel_msg_rx_buffer_map(struct netback_accel *bend,
++ struct net_accel_msg *msg)
++{
++ int log2_pages, rc;
++
++ /* Can only allocate in power of two */
++ log2_pages = log2_ge(msg->u.mapbufs.pages, 0);
++ if (msg->u.mapbufs.pages != pow2(log2_pages)) {
++ EPRINTK("%s: Can only alloc bufs in power of 2 sizes (%d)\n",
++ __FUNCTION__, msg->u.mapbufs.pages);
++ rc = -EINVAL;
++ goto err_out;
++ }
++
++ /*
++ * Sanity. Assumes NET_ACCEL_MSG_MAX_PAGE_REQ is same for
++ * both directions/domains
++ */
++ if (msg->u.mapbufs.pages > NET_ACCEL_MSG_MAX_PAGE_REQ) {
++ EPRINTK("%s: too many pages in a single message: %d %d\n",
++ __FUNCTION__, msg->u.mapbufs.pages,
++ NET_ACCEL_MSG_MAX_PAGE_REQ);
++ rc = -EINVAL;
++ goto err_out;
++ }
++
++ if ((rc = netback_accel_add_buffers(bend, msg->u.mapbufs.pages,
++ log2_pages, msg->u.mapbufs.grants,
++ &msg->u.mapbufs.buf)) < 0) {
++ goto err_out;
++ }
++
++ msg->id |= NET_ACCEL_MSG_REPLY;
++
++ return 0;
++
++ err_out:
++ EPRINTK("%s: err_out\n", __FUNCTION__);
++ msg->id |= NET_ACCEL_MSG_ERROR | NET_ACCEL_MSG_REPLY;
++ return rc;
++}
++
++
++/* Hint from frontend that one of our filters is out of date */
++static int netback_accel_process_fastpath(struct netback_accel *bend,
++ struct net_accel_msg *msg)
++{
++ struct netback_accel_filter_spec spec;
++
++ if (msg->u.fastpath.flags & NET_ACCEL_MSG_REMOVE) {
++ /*
++ * Would be nice to BUG() this but would leave us
++ * vulnerable to naughty frontend
++ */
++ EPRINTK_ON(msg->u.fastpath.flags & NET_ACCEL_MSG_ADD);
++
++ memcpy(spec.mac, msg->u.fastpath.mac, ETH_ALEN);
++ spec.destport_be = msg->u.fastpath.port;
++ spec.destip_be = msg->u.fastpath.ip;
++ spec.proto = msg->u.fastpath.proto;
++
++ netback_accel_filter_remove_spec(bend, &spec);
++ }
++
++ return 0;
++}
++
++
++/* Flow control for message queues */
++inline void set_queue_not_full(struct netback_accel *bend)
++{
++ if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL_B,
++ (unsigned long *)&bend->shared_page->aflags))
++ notify_remote_via_irq(bend->msg_channel_irq);
++ else
++ VPRINTK("queue not full bit already set, not signalling\n");
++}
++
++
++/* Flow control for message queues */
++inline void set_queue_full(struct netback_accel *bend)
++{
++ if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0FULL_B,
++ (unsigned long *)&bend->shared_page->aflags))
++ notify_remote_via_irq(bend->msg_channel_irq);
++ else
++ VPRINTK("queue full bit already set, not signalling\n");
++}
++
++
++void netback_accel_set_interface_state(struct netback_accel *bend, int up)
++{
++ bend->shared_page->net_dev_up = up;
++ if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_NETUPDOWN_B,
++ (unsigned long *)&bend->shared_page->aflags))
++ notify_remote_via_irq(bend->msg_channel_irq);
++ else
++ VPRINTK("interface up/down bit already set, not signalling\n");
++}
++
++
++static int check_rx_hello_version(unsigned version)
++{
++ /* Should only happen if there's been a version mismatch */
++ BUG_ON(version == NET_ACCEL_MSG_VERSION);
++
++ if (version > NET_ACCEL_MSG_VERSION) {
++ /* Newer protocol, we must refuse */
++ return -EPROTO;
++ }
++
++ if (version < NET_ACCEL_MSG_VERSION) {
++ /*
++ * We are newer, so have discretion to accept if we
++ * wish. For now however, just reject
++ */
++ return -EPROTO;
++ }
++
++ return -EINVAL;
++}
++
++
++static int process_rx_msg(struct netback_accel *bend,
++ struct net_accel_msg *msg)
++{
++ int err = 0;
++
++ switch (msg->id) {
++ case NET_ACCEL_MSG_REPLY | NET_ACCEL_MSG_HELLO:
++ /* Reply to a HELLO; mark ourselves as connected */
++ DPRINTK("got Hello reply, version %.8x\n",
++ msg->u.hello.version);
++
++ /*
++ * Check that we've not successfully done this
++ * already. NB no check at the moment that this reply
++ * comes after we've actually sent a HELLO as that's
++ * not possible with the current code structure
++ */
++ if (bend->hw_state != NETBACK_ACCEL_RES_NONE)
++ return -EPROTO;
++
++ /* Store max_pages for accel_setup */
++ if (msg->u.hello.max_pages > bend->quotas.max_buf_pages) {
++ EPRINTK("More pages than quota allows (%d > %d)\n",
++ msg->u.hello.max_pages,
++ bend->quotas.max_buf_pages);
++ /* Force it down to the quota */
++ msg->u.hello.max_pages = bend->quotas.max_buf_pages;
++ }
++ bend->max_pages = msg->u.hello.max_pages;
++
++ /* Set up the hardware visible to the other end */
++ err = bend->accel_setup(bend);
++ if (err) {
++ /* This is fatal */
++ DPRINTK("Hello gave accel_setup error %d\n", err);
++ netback_accel_set_closing(bend);
++ } else {
++ /*
++ * Now add the context so that packet
++ * forwarding will commence
++ */
++ netback_accel_fwd_set_context(bend->mac, bend,
++ bend->fwd_priv);
++ }
++ break;
++ case NET_ACCEL_MSG_REPLY | NET_ACCEL_MSG_HELLO | NET_ACCEL_MSG_ERROR:
++ EPRINTK("got Hello error, versions us:%.8x them:%.8x\n",
++ NET_ACCEL_MSG_VERSION, msg->u.hello.version);
++
++ if (bend->hw_state != NETBACK_ACCEL_RES_NONE)
++ return -EPROTO;
++
++ if (msg->u.hello.version != NET_ACCEL_MSG_VERSION) {
++ /* Error is due to version mismatch */
++ err = check_rx_hello_version(msg->u.hello.version);
++ if (err == 0) {
++ /*
++ * It's OK to be compatible, send
++ * another hello with compatible version
++ */
++ netback_accel_msg_tx_hello
++ (bend, msg->u.hello.version);
++ } else {
++ /*
++ * Tell frontend that we're not going to
++ * send another HELLO by going to Closing.
++ */
++ netback_accel_set_closing(bend);
++ }
++ }
++ break;
++ case NET_ACCEL_MSG_MAPBUF:
++ VPRINTK("Got mapped buffers request %d\n",
++ msg->u.mapbufs.reqid);
++
++ if (bend->hw_state == NETBACK_ACCEL_RES_NONE)
++ return -EPROTO;
++
++ /*
++ * Frontend wants a buffer table entry for the
++ * supplied pages
++ */
++ err = netback_accel_msg_rx_buffer_map(bend, msg);
++ if (net_accel_msg_reply_notify(bend->shared_page,
++ bend->msg_channel_irq,
++ &bend->to_domU, msg)) {
++ /*
++ * This is fatal as we can't tell the frontend
++ * about the problem through the message
++ * queue, and so would otherwise stalemate
++ */
++ netback_accel_set_closing(bend);
++ }
++ break;
++ case NET_ACCEL_MSG_FASTPATH:
++ DPRINTK("Got fastpath request\n");
++
++ if (bend->hw_state == NETBACK_ACCEL_RES_NONE)
++ return -EPROTO;
++
++ err = netback_accel_process_fastpath(bend, msg);
++ break;
++ default:
++ EPRINTK("Huh? Message code is %x\n", msg->id);
++ err = -EPROTO;
++ break;
++ }
++ return err;
++}
++
++
++/* Demultiplex an IRQ from the frontend driver. */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++void netback_accel_msg_rx_handler(struct work_struct *arg)
++#else
++void netback_accel_msg_rx_handler(void *bend_void)
++#endif
++{
++ struct net_accel_msg msg;
++ int err, queue_was_full = 0;
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++ struct netback_accel *bend =
++ container_of(arg, struct netback_accel, handle_msg);
++#else
++ struct netback_accel *bend = (struct netback_accel *)bend_void;
++#endif
++
++ mutex_lock(&bend->bend_mutex);
++
++ /*
++ * This happens when the shared pages have been unmapped, but
++ * the workqueue not flushed yet
++ */
++ if (bend->shared_page == NULL)
++ goto done;
++
++ if ((bend->shared_page->aflags &
++ NET_ACCEL_MSG_AFLAGS_TO_DOM0_MASK) != 0) {
++ if (bend->shared_page->aflags &
++ NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL) {
++ /* We've been told there may now be space. */
++ clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL_B,
++ (unsigned long *)&bend->shared_page->aflags);
++ }
++
++ if (bend->shared_page->aflags &
++ NET_ACCEL_MSG_AFLAGS_QUEUEUFULL) {
++ clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUFULL_B,
++ (unsigned long *)&bend->shared_page->aflags);
++ queue_was_full = 1;
++ }
++ }
++
++ while ((err = net_accel_msg_recv(bend->shared_page, &bend->from_domU,
++ &msg)) == 0) {
++ err = process_rx_msg(bend, &msg);
++
++ if (err != 0) {
++ EPRINTK("%s: Error %d\n", __FUNCTION__, err);
++ goto err;
++ }
++ }
++
++ err:
++ /* There will be space now if we can make any. */
++ if (queue_was_full)
++ set_queue_not_full(bend);
++ done:
++ mutex_unlock(&bend->bend_mutex);
++
++ return;
++}
+Index: head-2008-11-25/drivers/xen/sfc_netback/accel_solarflare.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/accel_solarflare.c 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,1253 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include "common.h"
++
++#include "accel.h"
++#include "accel_solarflare.h"
++#include "accel_msg_iface.h"
++#include "accel_util.h"
++
++#include "accel_cuckoo_hash.h"
++
++#include "ci/driver/resource/efx_vi.h"
++
++#include "ci/efrm/nic_table.h"
++#include "ci/efhw/public.h"
++
++#include <xen/evtchn.h>
++#include <xen/driver_util.h>
++#include <linux/list.h>
++#include <linux/mutex.h>
++
++#include "driverlink_api.h"
++
++#define SF_XEN_RX_USR_BUF_SIZE 2048
++
++struct falcon_bend_accel_priv {
++ struct efx_vi_state *efx_vih;
++
++ /*! Array of pointers to dma_map state, used so VNIC can
++ * request their removal in a single message
++ */
++ struct efx_vi_dma_map_state **dma_maps;
++ /*! Index into dma_maps */
++ int dma_maps_index;
++
++ /*! Serialises access to filters */
++ spinlock_t filter_lock;
++ /*! Bitmap of which filters are free */
++ unsigned long free_filters;
++ /*! Used for index normalisation */
++ u32 filter_idx_mask;
++ struct netback_accel_filter_spec *fspecs;
++ cuckoo_hash_table filter_hash_table;
++
++ u32 txdmaq_gnt;
++ u32 rxdmaq_gnt;
++ u32 doorbell_gnt;
++ u32 evq_rptr_gnt;
++ u32 evq_mem_gnts[EF_HW_FALCON_EVQ_PAGES];
++ u32 evq_npages;
++};
++
++/* Forward declaration */
++static int netback_accel_filter_init(struct netback_accel *);
++static void netback_accel_filter_shutdown(struct netback_accel *);
++
++/**************************************************************************
++ *
++ * Driverlink stuff
++ *
++ **************************************************************************/
++
++struct driverlink_port {
++ struct list_head link;
++ enum net_accel_hw_type type;
++ struct net_device *net_dev;
++ struct efx_dl_device *efx_dl_dev;
++ int nic_index;
++ void *fwd_priv;
++};
++
++static struct list_head dl_ports;
++
++/* This mutex protects global state, such as the dl_ports list */
++DEFINE_MUTEX(accel_mutex);
++
++static int init_done = 0;
++
++/* The DL callbacks */
++
++
++#if defined(EFX_USE_FASTCALL)
++static enum efx_veto fastcall
++#else
++static enum efx_veto
++#endif
++bend_dl_tx_packet(struct efx_dl_device *efx_dl_dev,
++ struct sk_buff *skb)
++{
++ struct driverlink_port *port = efx_dl_dev->priv;
++
++ BUG_ON(port == NULL);
++
++ NETBACK_ACCEL_STATS_OP(global_stats.dl_tx_packets++);
++ if (skb->mac.raw != NULL)
++ netback_accel_tx_packet(skb, port->fwd_priv);
++ else {
++ DPRINTK("Ignoring packet with missing mac address\n");
++ NETBACK_ACCEL_STATS_OP(global_stats.dl_tx_bad_packets++);
++ }
++ return EFX_ALLOW_PACKET;
++}
++
++/* EFX_USE_FASTCALL */
++#if defined(EFX_USE_FASTCALL)
++static enum efx_veto fastcall
++#else
++static enum efx_veto
++#endif
++bend_dl_rx_packet(struct efx_dl_device *efx_dl_dev,
++ const char *pkt_buf, int pkt_len)
++{
++ struct driverlink_port *port = efx_dl_dev->priv;
++ struct netback_pkt_buf pkt;
++ struct ethhdr *eh;
++
++ BUG_ON(port == NULL);
++
++ pkt.mac.raw = (char *)pkt_buf;
++ pkt.nh.raw = (char *)pkt_buf + ETH_HLEN;
++ eh = (struct ethhdr *)pkt_buf;
++ pkt.protocol = eh->h_proto;
++
++ NETBACK_ACCEL_STATS_OP(global_stats.dl_rx_packets++);
++ netback_accel_rx_packet(&pkt, port->fwd_priv);
++ return EFX_ALLOW_PACKET;
++}
++
++
++/* Callbacks we'd like to get from the netdriver through driverlink */
++struct efx_dl_callbacks bend_dl_callbacks =
++ {
++ .tx_packet = bend_dl_tx_packet,
++ .rx_packet = bend_dl_rx_packet,
++ };
++
++
++static struct netback_accel_hooks accel_hooks = {
++ THIS_MODULE,
++ &netback_accel_probe,
++ &netback_accel_remove
++};
++
++
++/*
++ * Handy helper which given an efx_dl_device works out which
++ * efab_nic_t index into efrm_nic_table.nics[] it corresponds to
++ */
++static int efx_device_to_efab_nic_index(struct efx_dl_device *efx_dl_dev)
++{
++ int i;
++
++ for (i = 0; i < EFHW_MAX_NR_DEVS; i++) {
++ struct efhw_nic *nic = efrm_nic_table.nic[i];
++
++ /*
++ * It's possible for the nic structure to have not
++ * been initialised if the resource driver failed its
++ * driverlink probe
++ */
++ if (nic == NULL || nic->net_driver_dev == NULL)
++ continue;
++
++ /* Work out if these are talking about the same NIC */
++ if (nic->net_driver_dev->pci_dev == efx_dl_dev->pci_dev)
++ return i;
++ }
++
++ return -1;
++}
++
++
++/* Driver link probe - register our callbacks */
++static int bend_dl_probe(struct efx_dl_device *efx_dl_dev,
++ const struct net_device *net_dev,
++ const struct efx_dl_device_info *dev_info,
++ const char* silicon_rev)
++{
++ int rc;
++ enum net_accel_hw_type type;
++ struct driverlink_port *port;
++
++ DPRINTK("%s: %s\n", __FUNCTION__, silicon_rev);
++
++ if (strcmp(silicon_rev, "falcon/a1") == 0)
++ type = NET_ACCEL_MSG_HWTYPE_FALCON_A;
++ else if (strcmp(silicon_rev, "falcon/b0") == 0)
++ type = NET_ACCEL_MSG_HWTYPE_FALCON_B;
++ else {
++ EPRINTK("%s: unsupported silicon %s\n", __FUNCTION__,
++ silicon_rev);
++ rc = -EINVAL;
++ goto fail1;
++ }
++
++ port = kmalloc(sizeof(struct driverlink_port), GFP_KERNEL);
++ if (port == NULL) {
++ EPRINTK("%s: no memory for dl probe\n", __FUNCTION__);
++ rc = -ENOMEM;
++ goto fail1;
++ }
++
++ port->efx_dl_dev = efx_dl_dev;
++ efx_dl_dev->priv = port;
++
++ port->nic_index = efx_device_to_efab_nic_index(efx_dl_dev);
++ if (port->nic_index < 0) {
++ /*
++ * This can happen in theory if the resource driver
++ * failed to initialise properly
++ */
++ EPRINTK("%s: nic structure not found\n", __FUNCTION__);
++ rc = -EINVAL;
++ goto fail2;
++ }
++
++ port->fwd_priv = netback_accel_init_fwd_port();
++ if (port->fwd_priv == NULL) {
++ EPRINTK("%s: failed to set up forwarding for port\n",
++ __FUNCTION__);
++ rc = -ENOMEM;
++ goto fail2;
++ }
++
++ rc = efx_dl_register_callbacks(efx_dl_dev, &bend_dl_callbacks);
++ if (rc != 0) {
++ EPRINTK("%s: register_callbacks failed\n", __FUNCTION__);
++ goto fail3;
++ }
++
++ port->type = type;
++ port->net_dev = (struct net_device *)net_dev;
++
++ mutex_lock(&accel_mutex);
++ list_add(&port->link, &dl_ports);
++ mutex_unlock(&accel_mutex);
++
++ rc = netback_connect_accelerator(NETBACK_ACCEL_VERSION, 0,
++ port->net_dev->name, &accel_hooks);
++
++ if (rc < 0) {
++ EPRINTK("Xen netback accelerator version mismatch\n");
++ goto fail4;
++ } else if (rc > 0) {
++ /*
++ * In future may want to add backwards compatibility
++ * and accept certain subsets of previous versions
++ */
++ EPRINTK("Xen netback accelerator version mismatch\n");
++ goto fail4;
++ }
++
++ return 0;
++
++ fail4:
++ mutex_lock(&accel_mutex);
++ list_del(&port->link);
++ mutex_unlock(&accel_mutex);
++
++ efx_dl_unregister_callbacks(efx_dl_dev, &bend_dl_callbacks);
++ fail3:
++ netback_accel_shutdown_fwd_port(port->fwd_priv);
++ fail2:
++ efx_dl_dev->priv = NULL;
++ kfree(port);
++ fail1:
++ return rc;
++}
++
++
++static void bend_dl_remove(struct efx_dl_device *efx_dl_dev)
++{
++ struct driverlink_port *port;
++
++ DPRINTK("Unregistering driverlink callbacks.\n");
++
++ mutex_lock(&accel_mutex);
++
++ port = (struct driverlink_port *)efx_dl_dev->priv;
++
++ BUG_ON(list_empty(&dl_ports));
++ BUG_ON(port == NULL);
++ BUG_ON(port->efx_dl_dev != efx_dl_dev);
++
++ netback_disconnect_accelerator(0, port->net_dev->name);
++
++ list_del(&port->link);
++
++ mutex_unlock(&accel_mutex);
++
++ efx_dl_unregister_callbacks(efx_dl_dev, &bend_dl_callbacks);
++ netback_accel_shutdown_fwd_port(port->fwd_priv);
++
++ efx_dl_dev->priv = NULL;
++ kfree(port);
++
++ return;
++}
++
++
++static struct efx_dl_driver bend_dl_driver =
++ {
++ .name = "SFC Xen backend",
++ .probe = bend_dl_probe,
++ .remove = bend_dl_remove,
++ };
++
++
++int netback_accel_sf_init(void)
++{
++ int rc, nic_i;
++ struct efhw_nic *nic;
++
++ INIT_LIST_HEAD(&dl_ports);
++
++ rc = efx_dl_register_driver(&bend_dl_driver);
++ /* If we couldn't find the NET driver, give up */
++ if (rc == -ENOENT)
++ return rc;
++
++ if (rc == 0) {
++ EFRM_FOR_EACH_NIC(nic_i, nic)
++ falcon_nic_set_rx_usr_buf_size(nic,
++ SF_XEN_RX_USR_BUF_SIZE);
++ }
++
++ init_done = (rc == 0);
++ return rc;
++}
++
++
++void netback_accel_sf_shutdown(void)
++{
++ if (!init_done)
++ return;
++ DPRINTK("Unregistering driverlink driver\n");
++
++ /*
++ * This will trigger removal callbacks for all the devices, which
++ * will unregister their callbacks, disconnect from netfront, etc.
++ */
++ efx_dl_unregister_driver(&bend_dl_driver);
++}
++
++
++int netback_accel_sf_hwtype(struct netback_accel *bend)
++{
++ struct driverlink_port *port;
++
++ mutex_lock(&accel_mutex);
++
++ list_for_each_entry(port, &dl_ports, link) {
++ if (strcmp(bend->nicname, port->net_dev->name) == 0) {
++ bend->hw_type = port->type;
++ bend->accel_setup = netback_accel_setup_vnic_hw;
++ bend->accel_shutdown = netback_accel_shutdown_vnic_hw;
++ bend->fwd_priv = port->fwd_priv;
++ /* This is just needed to pass to efx_vi_alloc */
++ bend->nic_index = port->nic_index;
++ bend->net_dev = port->net_dev;
++ mutex_unlock(&accel_mutex);
++ return 0;
++ }
++ }
++
++ mutex_unlock(&accel_mutex);
++
++ EPRINTK("Failed to identify backend device '%s' with a NIC\n",
++ bend->nicname);
++
++ return -ENOENT;
++}
++
++
++/****************************************************************************
++ * Resource management code
++ ***************************************************************************/
++
++static int alloc_page_state(struct netback_accel *bend, int max_pages)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv;
++
++ if (max_pages < 0 || max_pages > bend->quotas.max_buf_pages) {
++ EPRINTK("%s: invalid max_pages: %d\n", __FUNCTION__, max_pages);
++ return -EINVAL;
++ }
++
++ accel_hw_priv = kzalloc(sizeof(struct falcon_bend_accel_priv),
++ GFP_KERNEL);
++ if (accel_hw_priv == NULL) {
++ EPRINTK("%s: no memory for accel_hw_priv\n", __FUNCTION__);
++ return -ENOMEM;
++ }
++
++ accel_hw_priv->dma_maps = kzalloc
++ (sizeof(struct efx_vi_dma_map_state **) *
++ (max_pages / NET_ACCEL_MSG_MAX_PAGE_REQ), GFP_KERNEL);
++ if (accel_hw_priv->dma_maps == NULL) {
++ EPRINTK("%s: no memory for dma_maps\n", __FUNCTION__);
++ kfree(accel_hw_priv);
++ return -ENOMEM;
++ }
++
++ bend->buffer_maps = kzalloc(sizeof(struct vm_struct *) * max_pages,
++ GFP_KERNEL);
++ if (bend->buffer_maps == NULL) {
++ EPRINTK("%s: no memory for buffer_maps\n", __FUNCTION__);
++ kfree(accel_hw_priv->dma_maps);
++ kfree(accel_hw_priv);
++ return -ENOMEM;
++ }
++
++ bend->buffer_addrs = kzalloc(sizeof(u64) * max_pages, GFP_KERNEL);
++ if (bend->buffer_addrs == NULL) {
++ kfree(bend->buffer_maps);
++ kfree(accel_hw_priv->dma_maps);
++ kfree(accel_hw_priv);
++ return -ENOMEM;
++ }
++
++ bend->accel_hw_priv = accel_hw_priv;
++
++ return 0;
++}
++
++
++static int free_page_state(struct netback_accel *bend)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv;
++
++ DPRINTK("%s: %p\n", __FUNCTION__, bend);
++
++ accel_hw_priv = bend->accel_hw_priv;
++
++ if (accel_hw_priv) {
++ kfree(accel_hw_priv->dma_maps);
++ kfree(bend->buffer_maps);
++ kfree(bend->buffer_addrs);
++ kfree(accel_hw_priv);
++ bend->accel_hw_priv = NULL;
++ bend->max_pages = 0;
++ }
++
++ return 0;
++}
++
++
++/* The timeout event callback for the event q */
++static void bend_evq_timeout(void *context, int is_timeout)
++{
++ struct netback_accel *bend = (struct netback_accel *)context;
++ if (is_timeout) {
++ /* Pass event to vnic front end driver */
++ VPRINTK("timeout event to %d\n", bend->net_channel);
++ NETBACK_ACCEL_STATS_OP(bend->stats.evq_timeouts++);
++ notify_remote_via_irq(bend->net_channel_irq);
++ } else {
++ /* It's a wakeup event, used by Falcon */
++ VPRINTK("wakeup to %d\n", bend->net_channel);
++ NETBACK_ACCEL_STATS_OP(bend->stats.evq_wakeups++);
++ notify_remote_via_irq(bend->net_channel_irq);
++ }
++}
++
++
++/*
++ * Create the eventq and associated gubbins for communication with the
++ * front end vnic driver
++ */
++static int ef_get_vnic(struct netback_accel *bend)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv;
++ int rc = 0;
++
++ BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_NONE);
++
++ /* Allocate page related state and accel_hw_priv */
++ rc = alloc_page_state(bend, bend->max_pages);
++ if (rc != 0) {
++ EPRINTK("Failed to allocate page state: %d\n", rc);
++ return rc;
++ }
++
++ accel_hw_priv = bend->accel_hw_priv;
++
++ rc = efx_vi_alloc(&accel_hw_priv->efx_vih, bend->nic_index);
++ if (rc != 0) {
++ EPRINTK("%s: efx_vi_alloc failed %d\n", __FUNCTION__, rc);
++ free_page_state(bend);
++ return rc;
++ }
++
++ rc = efx_vi_eventq_register_callback(accel_hw_priv->efx_vih,
++ bend_evq_timeout,
++ bend);
++ if (rc != 0) {
++ EPRINTK("%s: register_callback failed %d\n", __FUNCTION__, rc);
++ efx_vi_free(accel_hw_priv->efx_vih);
++ free_page_state(bend);
++ return rc;
++ }
++
++ bend->hw_state = NETBACK_ACCEL_RES_ALLOC;
++
++ return 0;
++}
++
++
++static void ef_free_vnic(struct netback_accel *bend)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++
++ BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_ALLOC);
++
++ efx_vi_eventq_kill_callback(accel_hw_priv->efx_vih);
++
++ DPRINTK("Hardware is freeable. Will proceed.\n");
++
++ efx_vi_free(accel_hw_priv->efx_vih);
++ accel_hw_priv->efx_vih = NULL;
++
++ VPRINTK("Free page state...\n");
++ free_page_state(bend);
++
++ bend->hw_state = NETBACK_ACCEL_RES_NONE;
++}
++
++
++static inline void ungrant_or_crash(grant_ref_t gntref, int domain) {
++ if (net_accel_ungrant_page(gntref) == -EBUSY)
++ net_accel_shutdown_remote(domain);
++}
++
++
++static void netback_accel_release_hwinfo(struct netback_accel *bend)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++ int i;
++
++ DPRINTK("Remove dma q grants %d %d\n", accel_hw_priv->txdmaq_gnt,
++ accel_hw_priv->rxdmaq_gnt);
++ ungrant_or_crash(accel_hw_priv->txdmaq_gnt, bend->far_end);
++ ungrant_or_crash(accel_hw_priv->rxdmaq_gnt, bend->far_end);
++
++ DPRINTK("Remove doorbell grant %d\n", accel_hw_priv->doorbell_gnt);
++ ungrant_or_crash(accel_hw_priv->doorbell_gnt, bend->far_end);
++
++ if (bend->hw_type == NET_ACCEL_MSG_HWTYPE_FALCON_A) {
++ DPRINTK("Remove rptr grant %d\n", accel_hw_priv->evq_rptr_gnt);
++ ungrant_or_crash(accel_hw_priv->evq_rptr_gnt, bend->far_end);
++ }
++
++ for (i = 0; i < accel_hw_priv->evq_npages; i++) {
++ DPRINTK("Remove evq grant %d\n", accel_hw_priv->evq_mem_gnts[i]);
++ ungrant_or_crash(accel_hw_priv->evq_mem_gnts[i], bend->far_end);
++ }
++
++ bend->hw_state = NETBACK_ACCEL_RES_FILTER;
++
++ return;
++}
++
++
++static int ef_bend_hwinfo_falcon_common(struct netback_accel *bend,
++ struct net_accel_hw_falcon_b *hwinfo)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++ struct efx_vi_hw_resource_metadata res_mdata;
++ struct efx_vi_hw_resource res_array[EFX_VI_HW_RESOURCE_MAXSIZE];
++ int rc, len = EFX_VI_HW_RESOURCE_MAXSIZE, i, pfn = 0;
++ unsigned long txdmaq_pfn = 0, rxdmaq_pfn = 0;
++
++ rc = efx_vi_hw_resource_get_phys(accel_hw_priv->efx_vih, &res_mdata,
++ res_array, &len);
++ if (rc != 0) {
++ DPRINTK("%s: resource_get_phys returned %d\n",
++ __FUNCTION__, rc);
++ return rc;
++ }
++
++ if (res_mdata.version != 0)
++ return -EPROTO;
++
++ hwinfo->nic_arch = res_mdata.nic_arch;
++ hwinfo->nic_variant = res_mdata.nic_variant;
++ hwinfo->nic_revision = res_mdata.nic_revision;
++
++ hwinfo->evq_order = res_mdata.evq_order;
++ hwinfo->evq_offs = res_mdata.evq_offs;
++ hwinfo->evq_capacity = res_mdata.evq_capacity;
++ hwinfo->instance = res_mdata.instance;
++ hwinfo->rx_capacity = res_mdata.rx_capacity;
++ hwinfo->tx_capacity = res_mdata.tx_capacity;
++
++ VPRINTK("evq_order %d evq_offs %d evq_cap %d inst %d rx_cap %d tx_cap %d\n",
++ hwinfo->evq_order, hwinfo->evq_offs, hwinfo->evq_capacity,
++ hwinfo->instance, hwinfo->rx_capacity, hwinfo->tx_capacity);
++
++ for (i = 0; i < len; i++) {
++ struct efx_vi_hw_resource *res = &(res_array[i]);
++ switch (res->type) {
++ case EFX_VI_HW_RESOURCE_TXDMAQ:
++ txdmaq_pfn = page_to_pfn(virt_to_page(res->address));
++ break;
++ case EFX_VI_HW_RESOURCE_RXDMAQ:
++ rxdmaq_pfn = page_to_pfn(virt_to_page(res->address));
++ break;
++ case EFX_VI_HW_RESOURCE_EVQTIMER:
++ break;
++ case EFX_VI_HW_RESOURCE_EVQRPTR:
++ case EFX_VI_HW_RESOURCE_EVQRPTR_OFFSET:
++ hwinfo->evq_rptr = res->address;
++ break;
++ case EFX_VI_HW_RESOURCE_EVQMEMKVA:
++ accel_hw_priv->evq_npages = 1 << res_mdata.evq_order;
++ pfn = page_to_pfn(virt_to_page(res->address));
++ break;
++ case EFX_VI_HW_RESOURCE_BELLPAGE:
++ hwinfo->doorbell_mfn = res->address;
++ break;
++ default:
++ EPRINTK("%s: Unknown hardware resource type %d\n",
++ __FUNCTION__, res->type);
++ break;
++ }
++ }
++
++ VPRINTK("Passing txdmaq page pfn %lx\n", txdmaq_pfn);
++ accel_hw_priv->txdmaq_gnt = hwinfo->txdmaq_gnt =
++ net_accel_grant_page(bend->hdev_data, pfn_to_mfn(txdmaq_pfn),
++ 0);
++
++ VPRINTK("Passing rxdmaq page pfn %lx\n", rxdmaq_pfn);
++ accel_hw_priv->rxdmaq_gnt = hwinfo->rxdmaq_gnt =
++ net_accel_grant_page(bend->hdev_data, pfn_to_mfn(rxdmaq_pfn),
++ 0);
++
++ VPRINTK("Passing doorbell page mfn %x\n", hwinfo->doorbell_mfn);
++ /* Make the relevant H/W pages mappable by the far end */
++ accel_hw_priv->doorbell_gnt = hwinfo->doorbell_gnt =
++ net_accel_grant_page(bend->hdev_data, hwinfo->doorbell_mfn, 1);
++
++ /* Now do the same for the memory pages */
++ /* Convert the page + length we got back for the evq to grants. */
++ for (i = 0; i < accel_hw_priv->evq_npages; i++) {
++ accel_hw_priv->evq_mem_gnts[i] = hwinfo->evq_mem_gnts[i] =
++ net_accel_grant_page(bend->hdev_data, pfn_to_mfn(pfn), 0);
++ VPRINTK("Got grant %u for evq pfn %x\n", hwinfo->evq_mem_gnts[i],
++ pfn);
++ pfn++;
++ }
++
++ return 0;
++}
++
++
++static int ef_bend_hwinfo_falcon_a(struct netback_accel *bend,
++ struct net_accel_hw_falcon_a *hwinfo)
++{
++ int rc;
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++
++ if ((rc = ef_bend_hwinfo_falcon_common(bend, &hwinfo->common)) != 0)
++ return rc;
++
++ /*
++ * Note that unlike the above, where the message field is the
++ * page number, here evq_rptr is the entire address because
++ * it is currently a pointer into the densely mapped timer page.
++ */
++ VPRINTK("Passing evq_rptr pfn %x for rptr %x\n",
++ hwinfo->common.evq_rptr >> PAGE_SHIFT,
++ hwinfo->common.evq_rptr);
++ rc = net_accel_grant_page(bend->hdev_data,
++ hwinfo->common.evq_rptr >> PAGE_SHIFT, 0);
++ if (rc < 0)
++ return rc;
++
++ accel_hw_priv->evq_rptr_gnt = hwinfo->evq_rptr_gnt = rc;
++ VPRINTK("evq_rptr_gnt got %d\n", hwinfo->evq_rptr_gnt);
++
++ return 0;
++}
++
++
++static int ef_bend_hwinfo_falcon_b(struct netback_accel *bend,
++ struct net_accel_hw_falcon_b *hwinfo)
++{
++ return ef_bend_hwinfo_falcon_common(bend, hwinfo);
++}
++
++
++/*
++ * Fill in the message with a description of the hardware resources, based on
++ * the H/W type
++ */
++static int netback_accel_hwinfo(struct netback_accel *bend,
++ struct net_accel_msg_hw *msgvi)
++{
++ int rc = 0;
++
++ BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_FILTER);
++
++ msgvi->type = bend->hw_type;
++ switch (bend->hw_type) {
++ case NET_ACCEL_MSG_HWTYPE_FALCON_A:
++ rc = ef_bend_hwinfo_falcon_a(bend, &msgvi->resources.falcon_a);
++ break;
++ case NET_ACCEL_MSG_HWTYPE_FALCON_B:
++ rc = ef_bend_hwinfo_falcon_b(bend, &msgvi->resources.falcon_b);
++ break;
++ case NET_ACCEL_MSG_HWTYPE_NONE:
++ /* Nothing to do. The slow path should just work. */
++ break;
++ }
++
++ if (rc == 0)
++ bend->hw_state = NETBACK_ACCEL_RES_HWINFO;
++
++ return rc;
++}
++
++
++/* Allocate hardware resources and make them available to the client domain */
++int netback_accel_setup_vnic_hw(struct netback_accel *bend)
++{
++ struct net_accel_msg msg;
++ int err;
++
++ /* Allocate the event queue, VI and so on. */
++ err = ef_get_vnic(bend);
++ if (err) {
++ EPRINTK("Failed to allocate hardware resource for bend:"
++ "error %d\n", err);
++ return err;
++ }
++
++ /* Set up the filter management */
++ err = netback_accel_filter_init(bend);
++ if (err) {
++ EPRINTK("Filter setup failed, error %d", err);
++ ef_free_vnic(bend);
++ return err;
++ }
++
++ net_accel_msg_init(&msg, NET_ACCEL_MSG_SETHW);
++
++ /*
++ * Extract the low-level hardware info we will actually pass to the
++ * other end, and set up the grants/ioremap permissions needed
++ */
++ err = netback_accel_hwinfo(bend, &msg.u.hw);
++
++ if (err != 0) {
++ netback_accel_filter_shutdown(bend);
++ ef_free_vnic(bend);
++ return err;
++ }
++
++ /* Send the message, this is a reply to a hello-reply */
++ err = net_accel_msg_reply_notify(bend->shared_page,
++ bend->msg_channel_irq,
++ &bend->to_domU, &msg);
++
++ /*
++ * The message should succeed as it's logically a reply and we
++ * guarantee space for replies, but a misbehaving frontend
++ * could result in that behaviour, so be tolerant
++ */
++ if (err != 0) {
++ netback_accel_release_hwinfo(bend);
++ netback_accel_filter_shutdown(bend);
++ ef_free_vnic(bend);
++ }
++
++ return err;
++}
++
++
++/* Free hardware resources */
++void netback_accel_shutdown_vnic_hw(struct netback_accel *bend)
++{
++ /*
++ * Only try and release resources if accel_hw_priv was setup,
++ * otherwise there is nothing to do as we're on "null-op"
++ * acceleration
++ */
++ switch (bend->hw_state) {
++ case NETBACK_ACCEL_RES_HWINFO:
++ VPRINTK("Release hardware resources\n");
++ netback_accel_release_hwinfo(bend);
++ /* deliberate drop through */
++ case NETBACK_ACCEL_RES_FILTER:
++ VPRINTK("Free filters...\n");
++ netback_accel_filter_shutdown(bend);
++ /* deliberate drop through */
++ case NETBACK_ACCEL_RES_ALLOC:
++ VPRINTK("Free vnic...\n");
++ ef_free_vnic(bend);
++ /* deliberate drop through */
++ case NETBACK_ACCEL_RES_NONE:
++ break;
++ default:
++ BUG();
++ }
++}
++
++/**************************************************************************
++ *
++ * Buffer table stuff
++ *
++ **************************************************************************/
++
++/*
++ * Undo any allocation that netback_accel_msg_rx_buffer_map() has made
++ * if it fails half way through
++ */
++static inline void buffer_map_cleanup(struct netback_accel *bend, int i)
++{
++ while (i > 0) {
++ i--;
++ bend->buffer_maps_index--;
++ net_accel_unmap_device_page(bend->hdev_data,
++ bend->buffer_maps[bend->buffer_maps_index],
++ bend->buffer_addrs[bend->buffer_maps_index]);
++ }
++}
++
++
++int netback_accel_add_buffers(struct netback_accel *bend, int pages, int log2_pages,
++ u32 *grants, u32 *buf_addr_out)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++ unsigned long long addr_array[NET_ACCEL_MSG_MAX_PAGE_REQ];
++ int rc, i, index;
++ u64 dev_bus_addr;
++
++ /* Make sure we can't overflow the dma_maps array */
++ if (accel_hw_priv->dma_maps_index >=
++ bend->max_pages / NET_ACCEL_MSG_MAX_PAGE_REQ) {
++ EPRINTK("%s: too many buffer table allocations: %d %d\n",
++ __FUNCTION__, accel_hw_priv->dma_maps_index,
++ bend->max_pages / NET_ACCEL_MSG_MAX_PAGE_REQ);
++ return -EINVAL;
++ }
++
++ /* Make sure we can't overflow the buffer_maps array */
++ if (bend->buffer_maps_index + pages > bend->max_pages) {
++ EPRINTK("%s: too many pages mapped: %d + %d > %d\n",
++ __FUNCTION__, bend->buffer_maps_index,
++ pages, bend->max_pages);
++ return -EINVAL;
++ }
++
++ for (i = 0; i < pages; i++) {
++ VPRINTK("%s: mapping page %d\n", __FUNCTION__, i);
++ rc = net_accel_map_device_page
++ (bend->hdev_data, grants[i],
++ &bend->buffer_maps[bend->buffer_maps_index],
++ &dev_bus_addr);
++
++ if (rc != 0) {
++ EPRINTK("error in net_accel_map_device_page\n");
++ buffer_map_cleanup(bend, i);
++ return rc;
++ }
++
++ bend->buffer_addrs[bend->buffer_maps_index] = dev_bus_addr;
++
++ bend->buffer_maps_index++;
++
++ addr_array[i] = dev_bus_addr;
++ }
++
++ VPRINTK("%s: mapping dma addresses to vih %p\n", __FUNCTION__,
++ accel_hw_priv->efx_vih);
++
++ index = accel_hw_priv->dma_maps_index;
++ if ((rc = efx_vi_dma_map_addrs(accel_hw_priv->efx_vih, addr_array, pages,
++ &(accel_hw_priv->dma_maps[index]))) < 0) {
++ EPRINTK("error in dma_map_pages\n");
++ buffer_map_cleanup(bend, i);
++ return rc;
++ }
++
++ accel_hw_priv->dma_maps_index++;
++ NETBACK_ACCEL_STATS_OP(bend->stats.num_buffer_pages += pages);
++
++ //DPRINTK("%s: getting map address\n", __FUNCTION__);
++
++ *buf_addr_out = efx_vi_dma_get_map_addr(accel_hw_priv->efx_vih,
++ accel_hw_priv->dma_maps[index]);
++
++ //DPRINTK("%s: done\n", __FUNCTION__);
++
++ return 0;
++}
++
++
++int netback_accel_remove_buffers(struct netback_accel *bend)
++{
++ /* Only try to free buffers if accel_hw_priv was setup */
++ if (bend->hw_state != NETBACK_ACCEL_RES_NONE) {
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++ int i;
++
++ efx_vi_reset(accel_hw_priv->efx_vih);
++
++ while (accel_hw_priv->dma_maps_index > 0) {
++ accel_hw_priv->dma_maps_index--;
++ i = accel_hw_priv->dma_maps_index;
++ efx_vi_dma_unmap_addrs(accel_hw_priv->efx_vih,
++ accel_hw_priv->dma_maps[i]);
++ }
++
++ while (bend->buffer_maps_index > 0) {
++ VPRINTK("Unmapping granted buffer %d\n",
++ bend->buffer_maps_index);
++ bend->buffer_maps_index--;
++ i = bend->buffer_maps_index;
++ net_accel_unmap_device_page(bend->hdev_data,
++ bend->buffer_maps[i],
++ bend->buffer_addrs[i]);
++ }
++
++ NETBACK_ACCEL_STATS_OP(bend->stats.num_buffer_pages = 0);
++ }
++
++ return 0;
++}
++
++/**************************************************************************
++ *
++ * Filter stuff
++ *
++ **************************************************************************/
++
++static int netback_accel_filter_init(struct netback_accel *bend)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++ int i, rc;
++
++ BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_ALLOC);
++
++ spin_lock_init(&accel_hw_priv->filter_lock);
++
++ if ((rc = cuckoo_hash_init(&accel_hw_priv->filter_hash_table,
++ 5 /* space for 32 filters */, 8)) != 0) {
++ EPRINTK("Failed to initialise filter hash table\n");
++ return rc;
++ }
++
++ accel_hw_priv->fspecs = kzalloc(sizeof(struct netback_accel_filter_spec) *
++ bend->quotas.max_filters,
++ GFP_KERNEL);
++
++ if (accel_hw_priv->fspecs == NULL) {
++ EPRINTK("No memory for filter specs.\n");
++ cuckoo_hash_destroy(&accel_hw_priv->filter_hash_table);
++ return -ENOMEM;
++ }
++
++ for (i = 0; i < bend->quotas.max_filters; i++) {
++ accel_hw_priv->free_filters |= (1 << i);
++ }
++
++ /* Base mask on highest set bit in max_filters */
++ accel_hw_priv->filter_idx_mask = (1 << fls(bend->quotas.max_filters)) - 1;
++ VPRINTK("filter setup: max is %x mask is %x\n",
++ bend->quotas.max_filters, accel_hw_priv->filter_idx_mask);
++
++ bend->hw_state = NETBACK_ACCEL_RES_FILTER;
++
++ return 0;
++}
++
++
++static inline void make_filter_key(cuckoo_hash_ip_key *key,
++ struct netback_accel_filter_spec *filt)
++
++{
++ key->local_ip = filt->destip_be;
++ key->local_port = filt->destport_be;
++ key->proto = filt->proto;
++}
++
++
++static inline
++void netback_accel_free_filter(struct falcon_bend_accel_priv *accel_hw_priv,
++ int filter)
++{
++ cuckoo_hash_ip_key filter_key;
++
++ if (!(accel_hw_priv->free_filters & (1 << filter))) {
++ efx_vi_filter_stop(accel_hw_priv->efx_vih,
++ accel_hw_priv->fspecs[filter].filter_handle);
++ make_filter_key(&filter_key, &(accel_hw_priv->fspecs[filter]));
++ if (cuckoo_hash_remove(&accel_hw_priv->filter_hash_table,
++ (cuckoo_hash_key *)&filter_key)) {
++ EPRINTK("%s: Couldn't find filter to remove from table\n",
++ __FUNCTION__);
++ BUG();
++ }
++ }
++}
++
++
++static void netback_accel_filter_shutdown(struct netback_accel *bend)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++ int i;
++ unsigned long flags;
++
++ BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_FILTER);
++
++ spin_lock_irqsave(&accel_hw_priv->filter_lock, flags);
++
++ BUG_ON(accel_hw_priv->fspecs == NULL);
++
++ for (i = 0; i < bend->quotas.max_filters; i++) {
++ netback_accel_free_filter(accel_hw_priv, i);
++ }
++
++ kfree(accel_hw_priv->fspecs);
++ accel_hw_priv->fspecs = NULL;
++ accel_hw_priv->free_filters = 0;
++
++ cuckoo_hash_destroy(&accel_hw_priv->filter_hash_table);
++
++ spin_unlock_irqrestore(&accel_hw_priv->filter_lock, flags);
++
++ bend->hw_state = NETBACK_ACCEL_RES_ALLOC;
++}
++
++
++/*! Suggest a filter to replace when we want to insert a new one and have
++ * none free.
++ */
++static unsigned get_victim_filter(struct netback_accel *bend)
++{
++ /*
++ * We could attempt to get really clever, and may do at some
++ * point, but random replacement is v. cheap and low on
++ * pathological worst cases.
++ */
++ unsigned index, cycles;
++
++ rdtscl(cycles);
++
++ /*
++ * Some doubt about the quality of the bottom few bits, so
++ * throw 'em * away
++ */
++ index = (cycles >> 4) & ((struct falcon_bend_accel_priv *)
++ bend->accel_hw_priv)->filter_idx_mask;
++ /*
++ * We don't enforce that the number of filters is a power of
++ * two, but the masking gets us to within one subtraction of a
++ * valid index
++ */
++ if (index >= bend->quotas.max_filters)
++ index -= bend->quotas.max_filters;
++ DPRINTK("backend %s->%d has no free filters. Filter %d will be evicted\n",
++ bend->nicname, bend->far_end, index);
++ return index;
++}
++
++
++/* Add a filter for the specified IP/port to the backend */
++int
++netback_accel_filter_check_add(struct netback_accel *bend,
++ struct netback_accel_filter_spec *filt)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++ struct netback_accel_filter_spec *fs;
++ unsigned filter_index;
++ unsigned long flags;
++ int rc, recycling = 0;
++ cuckoo_hash_ip_key filter_key, evict_key;
++
++ BUG_ON(filt->proto != IPPROTO_TCP && filt->proto != IPPROTO_UDP);
++
++ DPRINTK("Will add %s filter for dst ip %08x and dst port %d\n",
++ (filt->proto == IPPROTO_TCP) ? "TCP" : "UDP",
++ be32_to_cpu(filt->destip_be), be16_to_cpu(filt->destport_be));
++
++ spin_lock_irqsave(&accel_hw_priv->filter_lock, flags);
++ /*
++ * Check to see if we're already filtering this IP address and
++ * port. Happens if you insert a filter mid-stream as there
++ * are many packets backed up to be delivered to dom0 already
++ */
++ make_filter_key(&filter_key, filt);
++ if (cuckoo_hash_lookup(&accel_hw_priv->filter_hash_table,
++ (cuckoo_hash_key *)(&filter_key),
++ &filter_index)) {
++ DPRINTK("Found matching filter %d already in table\n",
++ filter_index);
++ rc = -1;
++ goto out;
++ }
++
++ if (accel_hw_priv->free_filters == 0) {
++ filter_index = get_victim_filter(bend);
++ recycling = 1;
++ } else {
++ filter_index = __ffs(accel_hw_priv->free_filters);
++ clear_bit(filter_index, &accel_hw_priv->free_filters);
++ }
++
++ fs = &accel_hw_priv->fspecs[filter_index];
++
++ if (recycling) {
++ DPRINTK("Removing filter index %d handle %p\n", filter_index,
++ fs->filter_handle);
++
++ if ((rc = efx_vi_filter_stop(accel_hw_priv->efx_vih,
++ fs->filter_handle)) != 0) {
++ EPRINTK("Couldn't clear NIC filter table entry %d\n", rc);
++ }
++
++ make_filter_key(&evict_key, fs);
++ if (cuckoo_hash_remove(&accel_hw_priv->filter_hash_table,
++ (cuckoo_hash_key *)&evict_key)) {
++ EPRINTK("Couldn't find filter to remove from table\n");
++ BUG();
++ }
++ NETBACK_ACCEL_STATS_OP(bend->stats.num_filters--);
++ }
++
++ /* Update the filter spec with new details */
++ *fs = *filt;
++
++ if ((rc = cuckoo_hash_add(&accel_hw_priv->filter_hash_table,
++ (cuckoo_hash_key *)&filter_key, filter_index,
++ 1)) != 0) {
++ EPRINTK("Error (%d) adding filter to table\n", rc);
++ accel_hw_priv->free_filters |= (1 << filter_index);
++ goto out;
++ }
++
++ rc = efx_vi_filter(accel_hw_priv->efx_vih, filt->proto, filt->destip_be,
++ filt->destport_be,
++ (struct filter_resource_t **)&fs->filter_handle);
++
++ if (rc != 0) {
++ EPRINTK("Hardware filter insertion failed. Error %d\n", rc);
++ accel_hw_priv->free_filters |= (1 << filter_index);
++ cuckoo_hash_remove(&accel_hw_priv->filter_hash_table,
++ (cuckoo_hash_key *)&filter_key);
++ rc = -1;
++ goto out;
++ }
++
++ NETBACK_ACCEL_STATS_OP(bend->stats.num_filters++);
++
++ VPRINTK("%s: success index %d handle %p\n", __FUNCTION__, filter_index,
++ fs->filter_handle);
++
++ rc = filter_index;
++ out:
++ spin_unlock_irqrestore(&accel_hw_priv->filter_lock, flags);
++ return rc;
++}
++
++
++/* Remove a filter entry for the specific device and IP/port */
++static void netback_accel_filter_remove(struct netback_accel *bend,
++ int filter_index)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++
++ BUG_ON(accel_hw_priv->free_filters & (1 << filter_index));
++ netback_accel_free_filter(accel_hw_priv, filter_index);
++ accel_hw_priv->free_filters |= (1 << filter_index);
++}
++
++
++/* Remove a filter entry for the specific device and IP/port */
++void netback_accel_filter_remove_spec(struct netback_accel *bend,
++ struct netback_accel_filter_spec *filt)
++{
++ struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
++ unsigned filter_found;
++ unsigned long flags;
++ cuckoo_hash_ip_key filter_key;
++ struct netback_accel_filter_spec *fs;
++
++ if (filt->proto == IPPROTO_TCP) {
++ DPRINTK("Remove TCP filter for dst ip %08x and dst port %d\n",
++ be32_to_cpu(filt->destip_be),
++ be16_to_cpu(filt->destport_be));
++ } else if (filt->proto == IPPROTO_UDP) {
++ DPRINTK("Remove UDP filter for dst ip %08x and dst port %d\n",
++ be32_to_cpu(filt->destip_be),
++ be16_to_cpu(filt->destport_be));
++ } else {
++ /*
++ * This could be provoked by an evil frontend, so can't
++ * BUG(), but harmless as it should fail tests below
++ */
++ DPRINTK("Non-TCP/UDP filter dst ip %08x and dst port %d\n",
++ be32_to_cpu(filt->destip_be),
++ be16_to_cpu(filt->destport_be));
++ }
++
++ spin_lock_irqsave(&accel_hw_priv->filter_lock, flags);
++
++ make_filter_key(&filter_key, filt);
++ if (!cuckoo_hash_lookup(&accel_hw_priv->filter_hash_table,
++ (cuckoo_hash_key *)(&filter_key),
++ &filter_found)) {
++ EPRINTK("Couldn't find matching filter already in table\n");
++ goto out;
++ }
++
++ /* Do a full check to make sure we've not had a hash collision */
++ fs = &accel_hw_priv->fspecs[filter_found];
++ if (fs->destip_be == filt->destip_be &&
++ fs->destport_be == filt->destport_be &&
++ fs->proto == filt->proto &&
++ !memcmp(fs->mac, filt->mac, ETH_ALEN)) {
++ netback_accel_filter_remove(bend, filter_found);
++ } else {
++ EPRINTK("Entry in hash table does not match filter spec\n");
++ goto out;
++ }
++
++ out:
++ spin_unlock_irqrestore(&accel_hw_priv->filter_lock, flags);
++}
+Index: head-2008-11-25/drivers/xen/sfc_netback/accel_solarflare.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/accel_solarflare.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,88 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NETBACK_ACCEL_SOLARFLARE_H
++#define NETBACK_ACCEL_SOLARFLARE_H
++
++#include "accel.h"
++#include "accel_msg_iface.h"
++
++#include "driverlink_api.h"
++
++#define MAX_NICS 5
++#define MAX_PORTS 2
++
++
++extern int netback_accel_sf_init(void);
++extern void netback_accel_sf_shutdown(void);
++extern int netback_accel_sf_hwtype(struct netback_accel *bend);
++
++extern int netback_accel_sf_char_init(void);
++extern void netback_accel_sf_char_shutdown(void);
++
++extern int netback_accel_setup_vnic_hw(struct netback_accel *bend);
++extern void netback_accel_shutdown_vnic_hw(struct netback_accel *bend);
++
++extern int netback_accel_add_buffers(struct netback_accel *bend, int pages,
++ int log2_pages, u32 *grants,
++ u32 *buf_addr_out);
++extern int netback_accel_remove_buffers(struct netback_accel *bend);
++
++
++/* Add a filter for the specified IP/port to the backend */
++extern int
++netback_accel_filter_check_add(struct netback_accel *bend,
++ struct netback_accel_filter_spec *filt);
++/* Remove a filter entry for the specific device and IP/port */
++extern
++void netback_accel_filter_remove_index(struct netback_accel *bend,
++ int filter_index);
++extern
++void netback_accel_filter_remove_spec(struct netback_accel *bend,
++ struct netback_accel_filter_spec *filt);
++
++/* This is designed to look a bit like a skb */
++struct netback_pkt_buf {
++ union {
++ unsigned char *raw;
++ } mac;
++ union {
++ struct iphdr *iph;
++ struct arphdr *arph;
++ unsigned char *raw;
++ } nh;
++ int protocol;
++};
++
++/*! \brief Handle a received packet: insert fast path filters as necessary
++ * \param skb The packet buffer
++ */
++extern void netback_accel_rx_packet(struct netback_pkt_buf *skb, void *fwd_priv);
++
++/*! \brief Handle a transmitted packet: update fast path filters as necessary
++ * \param skb The packet buffer
++ */
++extern void netback_accel_tx_packet(struct sk_buff *skb, void *fwd_priv);
++
++#endif /* NETBACK_ACCEL_SOLARFLARE_H */
+Index: head-2008-11-25/drivers/xen/sfc_netback/accel_xenbus.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/accel_xenbus.c 2008-02-26 10:54:11.000000000 +0100
+@@ -0,0 +1,831 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <xen/evtchn.h>
++#include <linux/mutex.h>
++
++/* drivers/xen/netback/common.h */
++#include "common.h"
++
++#include "accel.h"
++#include "accel_solarflare.h"
++#include "accel_util.h"
++
++#define NODENAME_PATH_FMT "backend/vif/%d/%d"
++
++#define NETBACK_ACCEL_FROM_XENBUS_DEVICE(_dev) (struct netback_accel *) \
++ ((struct backend_info *)(_dev)->dev.driver_data)->netback_accel_priv
++
++/* List of all the bends currently in existence. */
++struct netback_accel *bend_list = NULL;
++DEFINE_MUTEX(bend_list_mutex);
++
++/* Put in bend_list. Must hold bend_list_mutex */
++static void link_bend(struct netback_accel *bend)
++{
++ bend->next_bend = bend_list;
++ bend_list = bend;
++}
++
++/* Remove from bend_list, Must hold bend_list_mutex */
++static void unlink_bend(struct netback_accel *bend)
++{
++ struct netback_accel *tmp = bend_list;
++ struct netback_accel *prev = NULL;
++ while (tmp != NULL) {
++ if (tmp == bend) {
++ if (prev != NULL)
++ prev->next_bend = bend->next_bend;
++ else
++ bend_list = bend->next_bend;
++ return;
++ }
++ prev = tmp;
++ tmp = tmp->next_bend;
++ }
++}
++
++
++/* Demultiplex a message IRQ from the frontend driver. */
++static irqreturn_t msgirq_from_frontend(int irq, void *context,
++ struct pt_regs *unused)
++{
++ struct xenbus_device *dev = context;
++ struct netback_accel *bend = NETBACK_ACCEL_FROM_XENBUS_DEVICE(dev);
++ VPRINTK("irq %d from device %s\n", irq, dev->nodename);
++ schedule_work(&bend->handle_msg);
++ return IRQ_HANDLED;
++}
++
++
++/*
++ * Demultiplex an IRQ from the frontend driver. This is never used
++ * functionally, but we need it to pass to the bind function, and may
++ * get called spuriously
++ */
++static irqreturn_t netirq_from_frontend(int irq, void *context,
++ struct pt_regs *unused)
++{
++ VPRINTK("netirq %d from device %s\n", irq,
++ ((struct xenbus_device *)context)->nodename);
++
++ return IRQ_HANDLED;
++}
++
++
++/* Read the limits values of the xenbus structure. */
++static
++void cfg_hw_quotas(struct xenbus_device *dev, struct netback_accel *bend)
++{
++ int err = xenbus_gather
++ (XBT_NIL, dev->nodename,
++ "limits/max-filters", "%d", &bend->quotas.max_filters,
++ "limits/max-buf-pages", "%d", &bend->quotas.max_buf_pages,
++ "limits/max-mcasts", "%d", &bend->quotas.max_mcasts,
++ NULL);
++ if (err) {
++ /*
++ * TODO what if they have previously been set by the
++ * user? This will overwrite with defaults. Maybe
++ * not what we want to do, but useful in startup
++ * case
++ */
++ DPRINTK("Failed to read quotas from xenbus, using defaults\n");
++ bend->quotas.max_filters = NETBACK_ACCEL_DEFAULT_MAX_FILTERS;
++ bend->quotas.max_buf_pages = sfc_netback_max_pages;
++ bend->quotas.max_mcasts = NETBACK_ACCEL_DEFAULT_MAX_MCASTS;
++ }
++
++ return;
++}
++
++
++static void bend_config_accel_change(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ struct netback_accel *bend;
++
++ bend = container_of(watch, struct netback_accel, config_accel_watch);
++
++ mutex_lock(&bend->bend_mutex);
++ if (bend->config_accel_watch.node != NULL) {
++ struct xenbus_device *dev =
++ (struct xenbus_device *)bend->hdev_data;
++ DPRINTK("Watch matched, got dev %p otherend %p\n",
++ dev, dev->otherend);
++ if(!xenbus_exists(XBT_NIL, watch->node, "")) {
++ DPRINTK("Ignoring watch as otherend seems invalid\n");
++ goto out;
++ }
++
++ cfg_hw_quotas(dev, bend);
++ }
++ out:
++ mutex_unlock(&bend->bend_mutex);
++ return;
++}
++
++
++/*
++ * Setup watch on "limits" in the backend vif info to know when
++ * configuration has been set
++ */
++static int setup_config_accel_watch(struct xenbus_device *dev,
++ struct netback_accel *bend)
++{
++ int err;
++
++ VPRINTK("Setting watch on %s/%s\n", dev->nodename, "limits");
++
++ err = xenbus_watch_path2(dev, dev->nodename, "limits",
++ &bend->config_accel_watch,
++ bend_config_accel_change);
++
++ if (err) {
++ EPRINTK("%s: Failed to register xenbus watch: %d\n",
++ __FUNCTION__, err);
++ bend->config_accel_watch.node = NULL;
++ return err;
++ }
++ return 0;
++}
++
++
++static int
++cfg_frontend_info(struct xenbus_device *dev, struct netback_accel *bend,
++ int *grants)
++{
++ /* Get some info from xenbus on the event channel and shmem grant */
++ int err = xenbus_gather(XBT_NIL, dev->otherend,
++ "accel-msg-channel", "%u", &bend->msg_channel,
++ "accel-ctrl-page", "%d", &(grants[0]),
++ "accel-msg-page", "%d", &(grants[1]),
++ "accel-net-channel", "%u", &bend->net_channel,
++ NULL);
++ if (err)
++ EPRINTK("failed to read event channels or shmem grant: %d\n",
++ err);
++ else
++ DPRINTK("got event chan %d and net chan %d from frontend\n",
++ bend->msg_channel, bend->net_channel);
++ return err;
++}
++
++
++/* Setup all the comms needed to chat with the front end driver */
++static int setup_vnic(struct xenbus_device *dev)
++{
++ struct netback_accel *bend;
++ int grants[2], err, msgs_per_queue;
++
++ bend = NETBACK_ACCEL_FROM_XENBUS_DEVICE(dev);
++
++ err = cfg_frontend_info(dev, bend, grants);
++ if (err)
++ goto fail1;
++
++ /*
++ * If we get here, both frontend Connected and configuration
++ * options available. All is well.
++ */
++
++ /* Get the hardware quotas for the VNIC in question. */
++ cfg_hw_quotas(dev, bend);
++
++ /* Set up the deferred work handlers */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++ INIT_WORK(&bend->handle_msg,
++ netback_accel_msg_rx_handler);
++#else
++ INIT_WORK(&bend->handle_msg,
++ netback_accel_msg_rx_handler,
++ (void*)bend);
++#endif
++
++ /* Request the frontend mac */
++ err = net_accel_xen_net_read_mac(dev, bend->mac);
++ if (err)
++ goto fail2;
++
++ /* Set up the shared page. */
++ bend->shared_page = net_accel_map_grants_contig(dev, grants, 2,
++ &bend->sh_pages_unmap);
++
++ if (bend->shared_page == NULL) {
++ EPRINTK("failed to map shared page for %s\n", dev->otherend);
++ err = -ENOMEM;
++ goto fail2;
++ }
++
++ /* Initialise the shared page(s) used for comms */
++ net_accel_msg_init_page(bend->shared_page, PAGE_SIZE,
++ bend->net_dev->flags & IFF_UP);
++
++ msgs_per_queue = (PAGE_SIZE/2) / sizeof(struct net_accel_msg);
++
++ net_accel_msg_init_queue
++ (&bend->to_domU, &bend->shared_page->queue0,
++ (struct net_accel_msg *)((__u8*)bend->shared_page + PAGE_SIZE),
++ msgs_per_queue);
++
++ net_accel_msg_init_queue
++ (&bend->from_domU, &bend->shared_page->queue1,
++ (struct net_accel_msg *)((__u8*)bend->shared_page +
++ (3 * PAGE_SIZE / 2)),
++ msgs_per_queue);
++
++ /* Bind the message event channel to a handler
++ *
++ * Note that we will probably get a spurious interrupt when we
++ * do this, so it must not be done until we have set up
++ * everything we need to handle it.
++ */
++ err = bind_interdomain_evtchn_to_irqhandler(dev->otherend_id,
++ bend->msg_channel,
++ msgirq_from_frontend,
++ 0,
++ "netback_accel",
++ dev);
++ if (err < 0) {
++ EPRINTK("failed to bind event channel: %d\n", err);
++ goto fail3;
++ }
++ else
++ bend->msg_channel_irq = err;
++
++ /* TODO: No need to bind this evtchn to an irq. */
++ err = bind_interdomain_evtchn_to_irqhandler(dev->otherend_id,
++ bend->net_channel,
++ netirq_from_frontend,
++ 0,
++ "netback_accel",
++ dev);
++ if (err < 0) {
++ EPRINTK("failed to bind net channel: %d\n", err);
++ goto fail4;
++ }
++ else
++ bend->net_channel_irq = err;
++
++ /*
++ * Grab ourselves an entry in the forwarding hash table. We do
++ * this now so we don't have the embarassmesnt of sorting out
++ * an allocation failure while at IRQ. Because we pass NULL as
++ * the context, the actual hash lookup will succeed for this
++ * NIC, but the check for somewhere to forward to will
++ * fail. This is necessary to prevent forwarding before
++ * hardware resources are set up
++ */
++ err = netback_accel_fwd_add(bend->mac, NULL, bend->fwd_priv);
++ if (err) {
++ EPRINTK("failed to add to fwd hash table\n");
++ goto fail5;
++ }
++
++ /*
++ * Say hello to frontend. Important to do this straight after
++ * obtaining the message queue as otherwise we are vulnerable
++ * to an evil frontend sending a HELLO-REPLY before we've sent
++ * the HELLO and confusing us
++ */
++ netback_accel_msg_tx_hello(bend, NET_ACCEL_MSG_VERSION);
++ return 0;
++
++ fail5:
++ unbind_from_irqhandler(bend->net_channel_irq, dev);
++ fail4:
++ unbind_from_irqhandler(bend->msg_channel_irq, dev);
++ fail3:
++ net_accel_unmap_grants_contig(dev, bend->sh_pages_unmap);
++ bend->shared_page = NULL;
++ bend->sh_pages_unmap = NULL;
++ fail2:
++ fail1:
++ return err;
++}
++
++
++static int read_nicname(struct xenbus_device *dev, struct netback_accel *bend)
++{
++ int len;
++
++ /* nic name used to select interface used for acceleration */
++ bend->nicname = xenbus_read(XBT_NIL, dev->nodename, "accel", &len);
++ if (IS_ERR(bend->nicname))
++ return PTR_ERR(bend->nicname);
++
++ return 0;
++}
++
++static const char *frontend_name = "sfc_netfront";
++
++static int publish_frontend_name(struct xenbus_device *dev)
++{
++ struct xenbus_transaction tr;
++ int err;
++
++ /* Publish the name of the frontend driver */
++ do {
++ err = xenbus_transaction_start(&tr);
++ if (err != 0) {
++ EPRINTK("%s: transaction start failed\n", __FUNCTION__);
++ return err;
++ }
++ err = xenbus_printf(tr, dev->nodename, "accel-frontend",
++ "%s", frontend_name);
++ if (err != 0) {
++ EPRINTK("%s: xenbus_printf failed\n", __FUNCTION__);
++ xenbus_transaction_end(tr, 1);
++ return err;
++ }
++ err = xenbus_transaction_end(tr, 0);
++ } while (err == -EAGAIN);
++
++ if (err != 0) {
++ EPRINTK("failed to end frontend name transaction\n");
++ return err;
++ }
++ return 0;
++}
++
++
++static int unpublish_frontend_name(struct xenbus_device *dev)
++{
++ struct xenbus_transaction tr;
++ int err;
++
++ do {
++ err = xenbus_transaction_start(&tr);
++ if (err != 0)
++ break;
++ err = xenbus_rm(tr, dev->nodename, "accel-frontend");
++ if (err != 0) {
++ xenbus_transaction_end(tr, 1);
++ break;
++ }
++ err = xenbus_transaction_end(tr, 0);
++ } while (err == -EAGAIN);
++
++ return err;
++}
++
++
++static void cleanup_vnic(struct netback_accel *bend)
++{
++ struct xenbus_device *dev;
++
++ dev = (struct xenbus_device *)bend->hdev_data;
++
++ DPRINTK("%s: bend %p dev %p\n", __FUNCTION__, bend, dev);
++
++ DPRINTK("%s: Remove %p's mac from fwd table...\n",
++ __FUNCTION__, bend);
++ netback_accel_fwd_remove(bend->mac, bend->fwd_priv);
++
++ /* Free buffer table allocations */
++ netback_accel_remove_buffers(bend);
++
++ DPRINTK("%s: Release hardware resources...\n", __FUNCTION__);
++ if (bend->accel_shutdown)
++ bend->accel_shutdown(bend);
++
++ if (bend->net_channel_irq) {
++ unbind_from_irqhandler(bend->net_channel_irq, dev);
++ bend->net_channel_irq = 0;
++ }
++
++ if (bend->msg_channel_irq) {
++ unbind_from_irqhandler(bend->msg_channel_irq, dev);
++ bend->msg_channel_irq = 0;
++ }
++
++ if (bend->sh_pages_unmap) {
++ DPRINTK("%s: Unmap grants %p\n", __FUNCTION__,
++ bend->sh_pages_unmap);
++ net_accel_unmap_grants_contig(dev, bend->sh_pages_unmap);
++ bend->sh_pages_unmap = NULL;
++ bend->shared_page = NULL;
++ }
++}
++
++
++/*************************************************************************/
++
++/*
++ * The following code handles accelstate changes between the frontend
++ * and the backend. It calls setup_vnic and cleanup_vnic in matching
++ * pairs in response to transitions.
++ *
++ * Valid state transitions for Dom0 are as follows:
++ *
++ * Closed->Init on probe or in response to Init from domU
++ * Closed->Closing on error/remove
++ *
++ * Init->Connected in response to Connected from domU
++ * Init->Closing on error/remove or in response to Closing from domU
++ *
++ * Connected->Closing on error/remove or in response to Closing from domU
++ *
++ * Closing->Closed in response to Closed from domU
++ *
++ */
++
++
++static void netback_accel_frontend_changed(struct xenbus_device *dev,
++ XenbusState frontend_state)
++{
++ struct netback_accel *bend = NETBACK_ACCEL_FROM_XENBUS_DEVICE(dev);
++ XenbusState backend_state;
++
++ DPRINTK("%s: changing from %s to %s. nodename %s, otherend %s\n",
++ __FUNCTION__, xenbus_strstate(bend->frontend_state),
++ xenbus_strstate(frontend_state),dev->nodename, dev->otherend);
++
++ /*
++ * Ignore duplicate state changes. This can happen if the
++ * frontend changes state twice in quick succession and the
++ * first watch fires in the backend after the second
++ * transition has completed.
++ */
++ if (bend->frontend_state == frontend_state)
++ return;
++
++ bend->frontend_state = frontend_state;
++ backend_state = bend->backend_state;
++
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ if (backend_state == XenbusStateClosed &&
++ !bend->removing)
++ backend_state = XenbusStateInitialising;
++ break;
++
++ case XenbusStateConnected:
++ if (backend_state == XenbusStateInitialising) {
++ if (!bend->vnic_is_setup &&
++ setup_vnic(dev) == 0) {
++ bend->vnic_is_setup = 1;
++ backend_state = XenbusStateConnected;
++ } else {
++ backend_state = XenbusStateClosing;
++ }
++ }
++ break;
++
++ case XenbusStateInitWait:
++ case XenbusStateInitialised:
++ default:
++ DPRINTK("Unknown state %s (%d) from frontend.\n",
++ xenbus_strstate(frontend_state), frontend_state);
++ /* Unknown state. Fall through. */
++ case XenbusStateClosing:
++ if (backend_state != XenbusStateClosed)
++ backend_state = XenbusStateClosing;
++
++ /*
++ * The bend will now persist (with watches active) in
++ * case the frontend comes back again, eg. after
++ * frontend module reload or suspend/resume
++ */
++
++ break;
++
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ if (bend->vnic_is_setup) {
++ bend->vnic_is_setup = 0;
++ cleanup_vnic(bend);
++ }
++
++ if (backend_state == XenbusStateClosing)
++ backend_state = XenbusStateClosed;
++ break;
++ }
++
++ if (backend_state != bend->backend_state) {
++ DPRINTK("Switching from state %s (%d) to %s (%d)\n",
++ xenbus_strstate(bend->backend_state),
++ bend->backend_state,
++ xenbus_strstate(backend_state), backend_state);
++ bend->backend_state = backend_state;
++ net_accel_update_state(dev, backend_state);
++ }
++
++ wake_up(&bend->state_wait_queue);
++}
++
++
++/* accelstate on the frontend's xenbus node has changed */
++static void bend_domu_accel_change(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ int state;
++ struct netback_accel *bend;
++
++ bend = container_of(watch, struct netback_accel, domu_accel_watch);
++ if (bend->domu_accel_watch.node != NULL) {
++ struct xenbus_device *dev =
++ (struct xenbus_device *)bend->hdev_data;
++ VPRINTK("Watch matched, got dev %p otherend %p\n",
++ dev, dev->otherend);
++ /*
++ * dev->otherend != NULL check to protect against
++ * watch firing when domain goes away and we haven't
++ * yet cleaned up
++ */
++ if (!dev->otherend ||
++ !xenbus_exists(XBT_NIL, watch->node, "") ||
++ strncmp(dev->otherend, vec[XS_WATCH_PATH],
++ strlen(dev->otherend))) {
++ DPRINTK("Ignoring watch as otherend seems invalid\n");
++ return;
++ }
++
++ mutex_lock(&bend->bend_mutex);
++
++ xenbus_scanf(XBT_NIL, dev->otherend, "accelstate", "%d",
++ &state);
++ netback_accel_frontend_changed(dev, state);
++
++ mutex_unlock(&bend->bend_mutex);
++ }
++}
++
++/* Setup watch on frontend's accelstate */
++static int setup_domu_accel_watch(struct xenbus_device *dev,
++ struct netback_accel *bend)
++{
++ int err;
++
++ VPRINTK("Setting watch on %s/%s\n", dev->otherend, "accelstate");
++
++ err = xenbus_watch_path2(dev, dev->otherend, "accelstate",
++ &bend->domu_accel_watch,
++ bend_domu_accel_change);
++ if (err) {
++ EPRINTK("%s: Failed to register xenbus watch: %d\n",
++ __FUNCTION__, err);
++ goto fail;
++ }
++ return 0;
++ fail:
++ bend->domu_accel_watch.node = NULL;
++ return err;
++}
++
++
++int netback_accel_probe(struct xenbus_device *dev)
++{
++ struct netback_accel *bend;
++ struct backend_info *binfo;
++ int err;
++
++ DPRINTK("%s: passed device %s\n", __FUNCTION__, dev->nodename);
++
++ /* Allocate structure to store all our state... */
++ bend = kzalloc(sizeof(struct netback_accel), GFP_KERNEL);
++ if (bend == NULL) {
++ DPRINTK("%s: no memory for bend\n", __FUNCTION__);
++ return -ENOMEM;
++ }
++
++ mutex_init(&bend->bend_mutex);
++
++ mutex_lock(&bend->bend_mutex);
++
++ /* ...and store it where we can get at it */
++ binfo = (struct backend_info *) dev->dev.driver_data;
++ binfo->netback_accel_priv = bend;
++ /* And vice-versa */
++ bend->hdev_data = dev;
++
++ DPRINTK("%s: Adding bend %p to list\n", __FUNCTION__, bend);
++
++ init_waitqueue_head(&bend->state_wait_queue);
++ bend->vnic_is_setup = 0;
++ bend->frontend_state = XenbusStateUnknown;
++ bend->backend_state = XenbusStateClosed;
++ bend->removing = 0;
++
++ sscanf(dev->nodename, NODENAME_PATH_FMT, &bend->far_end,
++ &bend->vif_num);
++
++ err = read_nicname(dev, bend);
++ if (err) {
++ /*
++ * Technically not an error, just means we're not
++ * supposed to accelerate this
++ */
++ DPRINTK("failed to get device name\n");
++ goto fail_nicname;
++ }
++
++ /*
++ * Look up the device name in the list of NICs provided by
++ * driverlink to get the hardware type.
++ */
++ err = netback_accel_sf_hwtype(bend);
++ if (err) {
++ /*
++ * Technically not an error, just means we're not
++ * supposed to accelerate this, probably belongs to
++ * some other backend
++ */
++ DPRINTK("failed to match device name\n");
++ goto fail_init_type;
++ }
++
++ err = publish_frontend_name(dev);
++ if (err)
++ goto fail_publish;
++
++ err = netback_accel_debugfs_create(bend);
++ if (err)
++ goto fail_debugfs;
++
++ mutex_unlock(&bend->bend_mutex);
++
++ err = setup_config_accel_watch(dev, bend);
++ if (err)
++ goto fail_config_watch;
++
++ err = setup_domu_accel_watch(dev, bend);
++ if (err)
++ goto fail_domu_watch;
++
++ /*
++ * Indicate to the other end that we're ready to start unless
++ * the watch has already fired.
++ */
++ mutex_lock(&bend->bend_mutex);
++ if (bend->backend_state == XenbusStateClosed) {
++ bend->backend_state = XenbusStateInitialising;
++ net_accel_update_state(dev, XenbusStateInitialising);
++ }
++ mutex_unlock(&bend->bend_mutex);
++
++ mutex_lock(&bend_list_mutex);
++ link_bend(bend);
++ mutex_unlock(&bend_list_mutex);
++
++ return 0;
++
++fail_domu_watch:
++
++ unregister_xenbus_watch(&bend->config_accel_watch);
++ kfree(bend->config_accel_watch.node);
++fail_config_watch:
++
++ /*
++ * Flush the scheduled work queue before freeing bend to get
++ * rid of any pending netback_accel_msg_rx_handler()
++ */
++ flush_scheduled_work();
++
++ mutex_lock(&bend->bend_mutex);
++ net_accel_update_state(dev, XenbusStateUnknown);
++ netback_accel_debugfs_remove(bend);
++fail_debugfs:
++
++ unpublish_frontend_name(dev);
++fail_publish:
++
++ /* No need to reverse netback_accel_sf_hwtype. */
++fail_init_type:
++
++ kfree(bend->nicname);
++fail_nicname:
++ binfo->netback_accel_priv = NULL;
++ mutex_unlock(&bend->bend_mutex);
++ kfree(bend);
++ return err;
++}
++
++
++int netback_accel_remove(struct xenbus_device *dev)
++{
++ struct backend_info *binfo;
++ struct netback_accel *bend;
++ int frontend_state;
++
++ binfo = (struct backend_info *) dev->dev.driver_data;
++ bend = (struct netback_accel *) binfo->netback_accel_priv;
++
++ DPRINTK("%s: dev %p bend %p\n", __FUNCTION__, dev, bend);
++
++ BUG_ON(bend == NULL);
++
++ mutex_lock(&bend_list_mutex);
++ unlink_bend(bend);
++ mutex_unlock(&bend_list_mutex);
++
++ mutex_lock(&bend->bend_mutex);
++
++ /* Reject any requests to connect. */
++ bend->removing = 1;
++
++ /*
++ * Switch to closing to tell the other end that we're going
++ * away.
++ */
++ if (bend->backend_state != XenbusStateClosing) {
++ bend->backend_state = XenbusStateClosing;
++ net_accel_update_state(dev, XenbusStateClosing);
++ }
++
++ frontend_state = (int)XenbusStateUnknown;
++ xenbus_scanf(XBT_NIL, dev->otherend, "accelstate", "%d",
++ &frontend_state);
++
++ mutex_unlock(&bend->bend_mutex);
++
++ /*
++ * Wait until this end goes to the closed state. This happens
++ * in response to the other end going to the closed state.
++ * Don't bother doing this if the other end is already closed
++ * because if it is then there is nothing to do.
++ */
++ if (frontend_state != (int)XenbusStateClosed &&
++ frontend_state != (int)XenbusStateUnknown)
++ wait_event(bend->state_wait_queue,
++ bend->backend_state == XenbusStateClosed);
++
++ unregister_xenbus_watch(&bend->domu_accel_watch);
++ kfree(bend->domu_accel_watch.node);
++
++ unregister_xenbus_watch(&bend->config_accel_watch);
++ kfree(bend->config_accel_watch.node);
++
++ /*
++ * Flush the scheduled work queue before freeing bend to get
++ * rid of any pending netback_accel_msg_rx_handler()
++ */
++ flush_scheduled_work();
++
++ mutex_lock(&bend->bend_mutex);
++
++ /* Tear down the vnic if it was set up. */
++ if (bend->vnic_is_setup) {
++ bend->vnic_is_setup = 0;
++ cleanup_vnic(bend);
++ }
++
++ bend->backend_state = XenbusStateUnknown;
++ net_accel_update_state(dev, XenbusStateUnknown);
++
++ netback_accel_debugfs_remove(bend);
++
++ unpublish_frontend_name(dev);
++
++ kfree(bend->nicname);
++
++ binfo->netback_accel_priv = NULL;
++
++ mutex_unlock(&bend->bend_mutex);
++
++ kfree(bend);
++
++ return 0;
++}
++
++
++void netback_accel_shutdown_bends(void)
++{
++ mutex_lock(&bend_list_mutex);
++ /*
++ * I think we should have had a remove callback for all
++ * interfaces before being allowed to unload the module
++ */
++ BUG_ON(bend_list != NULL);
++ mutex_unlock(&bend_list_mutex);
++}
++
++
++void netback_accel_set_closing(struct netback_accel *bend)
++{
++
++ bend->backend_state = XenbusStateClosing;
++ net_accel_update_state((struct xenbus_device *)bend->hdev_data,
++ XenbusStateClosing);
++}
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/compat.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/compat.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,53 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr
++ * \brief Compatability layer. Provides definitions of fundamental
++ * types and definitions that are used throughout CI source
++ * code. It does not introduce any link time dependencies,
++ * or include any unnecessary system headers.
++ */
++/*! \cidoxg_include_ci */
++
++#ifndef __CI_COMPAT_H__
++#define __CI_COMPAT_H__
++
++#ifdef __cplusplus
++extern "C" {
++#endif
++
++#include <ci/compat/primitive.h>
++#include <ci/compat/sysdep.h>
++#include <ci/compat/utils.h>
++
++
++#ifdef __cplusplus
++}
++#endif
++
++#endif /* __CI_COMPAT_H__ */
++
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/compat/gcc.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/compat/gcc.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,158 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*! \cidoxg_include_ci_compat */
++
++#ifndef __CI_COMPAT_GCC_H__
++#define __CI_COMPAT_GCC_H__
++
++
++#define CI_HAVE_INT64
++
++
++#if defined(__linux__) && defined(__KERNEL__)
++
++# include <linux/types.h>
++
++typedef __u64 ci_uint64;
++typedef __s64 ci_int64;
++# if BITS_PER_LONG == 32
++typedef __s32 ci_ptr_arith_t;
++typedef __u32 ci_uintptr_t;
++# else
++typedef __s64 ci_ptr_arith_t;
++typedef __u64 ci_uintptr_t;
++# endif
++
++
++/* it's not obvious to me why the below is wrong for x64_64, but
++ * gcc seems to complain on this platform
++ */
++# if defined(__ia64__)
++# define CI_PRId64 "ld"
++# define CI_PRIi64 "li"
++# define CI_PRIo64 "lo"
++# define CI_PRIu64 "lu"
++# define CI_PRIx64 "lx"
++# define CI_PRIX64 "lX"
++# else
++# define CI_PRId64 "lld"
++# define CI_PRIi64 "lli"
++# define CI_PRIo64 "llo"
++# define CI_PRIu64 "llu"
++# define CI_PRIx64 "llx"
++# define CI_PRIX64 "llX"
++# endif
++
++# define CI_PRId32 "d"
++# define CI_PRIi32 "i"
++# define CI_PRIo32 "o"
++# define CI_PRIu32 "u"
++# define CI_PRIx32 "x"
++# define CI_PRIX32 "X"
++
++#else
++
++# include <stdint.h>
++# include <inttypes.h>
++
++typedef uint64_t ci_uint64;
++typedef int64_t ci_int64;
++typedef intptr_t ci_ptr_arith_t;
++typedef uintptr_t ci_uintptr_t;
++
++# define CI_PRId64 PRId64
++# define CI_PRIi64 PRIi64
++# define CI_PRIo64 PRIo64
++# define CI_PRIu64 PRIu64
++# define CI_PRIx64 PRIx64
++# define CI_PRIX64 PRIX64
++
++# define CI_PRId32 PRId32
++# define CI_PRIi32 PRIi32
++# define CI_PRIo32 PRIo32
++# define CI_PRIu32 PRIu32
++# define CI_PRIx32 PRIx32
++# define CI_PRIX32 PRIX32
++
++#endif
++
++
++typedef ci_uint64 ci_fixed_descriptor_t;
++
++#define from_fixed_descriptor(desc) ((ci_uintptr_t)(desc))
++#define to_fixed_descriptor(desc) ((ci_fixed_descriptor_t)(ci_uintptr_t)(desc))
++
++
++#if __GNUC__ >= 3 && !defined(__cplusplus)
++/*
++** Checks that [p_mbr] has the same type as [&c_type::mbr_name].
++*/
++# define CI_CONTAINER(c_type, mbr_name, p_mbr) \
++ __builtin_choose_expr( \
++ __builtin_types_compatible_p(__typeof__(&((c_type*)0)->mbr_name), \
++ __typeof__(p_mbr)), \
++ __CI_CONTAINER(c_type, mbr_name, p_mbr), (void)0)
++
++# define ci_restrict __restrict__
++#endif
++
++
++#if !defined(__KERNEL__) || defined(__unix__)
++#define CI_HAVE_NPRINTF 1
++#endif
++
++
++/* At what version was this introduced? */
++#if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ > 91)
++# define CI_LIKELY(t) __builtin_expect((t), 1)
++# define CI_UNLIKELY(t) __builtin_expect((t), 0)
++#endif
++
++/**********************************************************************
++ * Attributes
++ */
++#if __GNUC__ >= 3 && defined(NDEBUG)
++# define CI_HF __attribute__((visibility("hidden")))
++# define CI_HV __attribute__((visibility("hidden")))
++#else
++# define CI_HF
++# define CI_HV
++#endif
++
++#if __GNUC__ >= 4 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)
++# define ci_noinline static __attribute__((__noinline__))
++/* (Linux 2.6 defines its own "noinline", so we use the "__noinline__" form) */
++#else
++# define ci_noinline static
++#endif
++
++#define CI_ALIGN(x) __attribute__ ((aligned (x)))
++
++#define CI_PRINTF_LIKE(a,b) __attribute__((format(printf,a,b)))
++
++#endif /* __CI_COMPAT_GCC_H__ */
++
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/compat/gcc_x86.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/compat/gcc_x86.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,115 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*! \cidoxg_include_ci_compat */
++
++#ifndef __CI_COMPAT_GCC_X86_H__
++#define __CI_COMPAT_GCC_X86_H__
++
++/*
++** The facts:
++**
++** SSE sfence
++** SSE2 lfence, mfence, pause
++*/
++
++/*
++ Barriers to enforce ordering with respect to:
++
++ normal memory use: ci_wmb, ci_rmb, ci_wmb
++ IO bus access use: ci_wiob, ci_riob, ci_iob
++*/
++#if defined(__x86_64__)
++# define ci_x86_mb() __asm__ __volatile__ ("lock; addl $0,0(%%rsp)":::"memory")
++#else
++# define ci_x86_mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)":::"memory")
++#endif
++
++/* ?? measure the impact of latency of sfence on a modern processor before we
++ take a decision on how to integrate with respect to writecombining */
++
++/* DJR: I don't think we need to add "memory" here. It means the asm does
++** something to memory that GCC doesn't understand. But all this does is
++** commit changes that GCC thinks have already happened. NB. GCC will not
++** reorder across a __volatile__ __asm__ anyway.
++*/
++#define ci_gcc_fence() __asm__ __volatile__ ("")
++
++#if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 96)
++# define ci_x86_sfence() __asm__ __volatile__ ("sfence")
++# define ci_x86_lfence() __asm__ __volatile__ ("lfence")
++# define ci_x86_mfence() __asm__ __volatile__ ("mfence")
++#else
++# define ci_x86_sfence() __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xF8")
++# define ci_x86_lfence() __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xE8")
++# define ci_x86_mfence() __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xF0")
++#endif
++
++
++/* x86 processors to P4 Xeon store in-order unless executing streaming
++ extensions or when using writecombining
++
++ Hence we do not define ci_wmb to use sfence by default. Requirement is that
++ we do not use writecombining to memory and any code which uses SSE
++ extensions must call sfence directly
++
++ We need to track non intel clones which may support out of order store.
++
++*/
++
++#if CI_CPU_OOS
++# if CI_CPU_HAS_SSE
++# define ci_wmb() ci_x86_sfence()
++# else
++# define ci_wmb() ci_x86_mb()
++# endif
++#else
++# define ci_wmb() ci_gcc_fence()
++#endif
++
++#if CI_CPU_HAS_SSE2
++# define ci_rmb() ci_x86_lfence()
++# define ci_mb() ci_x86_mfence()
++# define ci_riob() ci_x86_lfence()
++# define ci_wiob() ci_x86_sfence()
++# define ci_iob() ci_x86_mfence()
++#else
++# if CI_CPU_HAS_SSE
++# define ci_wiob() ci_x86_sfence()
++# else
++# define ci_wiob() ci_x86_mb()
++# endif
++# define ci_rmb() ci_x86_mb()
++# define ci_mb() ci_x86_mb()
++# define ci_riob() ci_x86_mb()
++# define ci_iob() ci_x86_mb()
++#endif
++
++typedef unsigned long ci_phys_addr_t;
++#define ci_phys_addr_fmt "%lx"
++
++#endif /* __CI_COMPAT_GCC_X86_H__ */
++
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/compat/primitive.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/compat/primitive.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,77 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++/*! \cidoxg_include_ci_compat */
++
++#ifndef __CI_COMPAT_PRIMITIVE_H__
++#define __CI_COMPAT_PRIMITIVE_H__
++
++
++/**********************************************************************
++ * Primitive types.
++ */
++
++typedef unsigned char ci_uint8;
++typedef char ci_int8;
++
++typedef unsigned short ci_uint16;
++typedef short ci_int16;
++
++typedef unsigned int ci_uint32;
++typedef int ci_int32;
++
++/* 64-bit support is platform dependent. */
++
++
++/**********************************************************************
++ * Other fancy types.
++ */
++
++typedef ci_uint8 ci_octet;
++
++typedef enum {
++ CI_FALSE = 0,
++ CI_TRUE
++} ci_boolean_t;
++
++
++/**********************************************************************
++ * Some nice types you'd always assumed were standards.
++ * (Really, they are SYSV "standards".)
++ */
++
++#ifdef _WIN32
++typedef unsigned long ulong;
++typedef unsigned int uint;
++typedef char* caddr_t;
++#elif defined(__linux__) && defined(__KERNEL__)
++#include <linux/types.h>
++#elif defined(__linux__)
++#include <sys/types.h>
++#endif
++
++
++#endif /* __CI_COMPAT_PRIMITIVE_H__ */
++
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/compat/sysdep.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/compat/sysdep.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,166 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*! \cidoxg_include_ci_compat */
++
++#ifndef __CI_COMPAT_SYSDEP_H__
++#define __CI_COMPAT_SYSDEP_H__
++
++
++/**********************************************************************
++ * Platform definition fixups.
++ */
++
++#if defined(__ci_ul_driver__) && !defined(__ci_driver__)
++# define __ci_driver__
++#endif
++
++#if defined(__ci_driver__) && !defined(__ci_ul_driver__) && \
++ !defined(__KERNEL__)
++# define __KERNEL__
++#endif
++
++
++/**********************************************************************
++ * Sanity checks (no cheating!)
++ */
++
++#if defined(__KERNEL__) && !defined(__ci_driver__)
++# error Insane.
++#endif
++
++#if defined(__KERNEL__) && defined(__ci_ul_driver__)
++# error Madness.
++#endif
++
++#if defined(__unix__) && defined(_WIN32)
++# error Strange.
++#endif
++
++#if defined(__GNUC__) && defined(_MSC_VER)
++# error Crazy.
++#endif
++
++
++/**********************************************************************
++ * Compiler and processor dependencies.
++ */
++
++#if defined(__GNUC__)
++
++# include <ci/compat/gcc.h>
++
++# if defined(__i386__)
++# include <ci/compat/x86.h>
++# include <ci/compat/gcc_x86.h>
++# elif defined(__x86_64__)
++# include <ci/compat/x86_64.h>
++# include <ci/compat/gcc_x86.h>
++# elif defined(__PPC__)
++# include <ci/compat/ppc.h>
++# include <ci/compat/gcc_ppc.h>
++# elif defined(__ia64__)
++# include <ci/compat/ia64.h>
++# include <ci/compat/gcc_ia64.h>
++# else
++# error Unknown processor - GNU C
++# endif
++
++#elif defined(_MSC_VER)
++
++# include <ci/compat/msvc.h>
++
++# if defined(__i386__)
++# include <ci/compat/x86.h>
++# include <ci/compat/msvc_x86.h>
++# elif defined(__x86_64__)
++# include <ci/compat/x86_64.h>
++# include <ci/compat/msvc_x86_64.h>
++# else
++# error Unknown processor MSC
++# endif
++
++#elif defined(__PGI)
++
++# include <ci/compat/x86.h>
++# include <ci/compat/pg_x86.h>
++
++#elif defined(__INTEL_COMPILER)
++
++/* Intel compilers v7 claim to be very gcc compatible. */
++# if __INTEL_COMPILER >= 700
++# include <ci/compat/gcc.h>
++# include <ci/compat/x86.h>
++# include <ci/compat/gcc_x86.h>
++# else
++# error Old Intel compiler not supported. Yet.
++# endif
++
++#else
++# error Unknown compiler.
++#endif
++
++
++/**********************************************************************
++ * Misc stuff (that probably shouldn't be here).
++ */
++
++#ifdef __sun
++# ifdef __KERNEL__
++# define _KERNEL
++# define _SYSCALL32
++# ifdef _LP64
++# define _SYSCALL32_IMPL
++# endif
++# else
++# define _REENTRANT
++# endif
++#endif
++
++
++/**********************************************************************
++ * Defaults for anything left undefined.
++ */
++
++#ifndef CI_LIKELY
++# define CI_LIKELY(t) (t)
++# define CI_UNLIKELY(t) (t)
++#endif
++
++#ifndef ci_restrict
++# define ci_restrict
++#endif
++
++#ifndef ci_inline
++# define ci_inline static inline
++#endif
++
++#ifndef ci_noinline
++# define ci_noinline static
++#endif
++
++#endif /* __CI_COMPAT_SYSDEP_H__ */
++
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/compat/utils.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/compat/utils.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,269 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr
++ * \brief Handy utility macros.
++ * \date 2003/01/17
++ */
++
++/*! \cidoxg_include_ci_compat */
++
++#ifndef __CI_COMPAT_UTILS_H__
++#define __CI_COMPAT_UTILS_H__
++
++
++/**********************************************************************
++ * Alignment -- [align] must be a power of 2.
++ **********************************************************************/
++
++ /*! Align forward onto next boundary. */
++
++#define CI_ALIGN_FWD(p, align) (((p)+(align)-1u) & ~((align)-1u))
++
++
++ /*! Align back onto prev boundary. */
++
++#define CI_ALIGN_BACK(p, align) ((p) & ~((align)-1u))
++
++
++ /*! How far to next boundary? */
++
++#define CI_ALIGN_NEEDED(p, align, signed_t) (-(signed_t)(p) & ((align)-1u))
++
++
++ /*! How far beyond prev boundary? */
++
++#define CI_OFFSET(p, align) ((p) & ((align)-1u))
++
++
++ /*! Does object fit in gap before next boundary? */
++
++#define CI_FITS(p, size, align, signed_t) \
++ (CI_ALIGN_NEEDED((p) + 1, (align), signed_t) + 1 >= (size))
++
++
++ /*! Align forward onto next boundary. */
++
++#define CI_PTR_ALIGN_FWD(p, align) \
++ ((char*) CI_ALIGN_FWD(((ci_ptr_arith_t)(p)), ((ci_ptr_arith_t)(align))))
++
++ /*! Align back onto prev boundary. */
++
++#define CI_PTR_ALIGN_BACK(p, align) \
++ ((char*) CI_ALIGN_BACK(((ci_ptr_arith_t)(p)), ((ci_ptr_arith_t)(align))))
++
++ /*! How far to next boundary? */
++
++#define CI_PTR_ALIGN_NEEDED(p, align) \
++ CI_ALIGN_NEEDED(((ci_ptr_arith_t)(p)), ((ci_ptr_arith_t)(align)), \
++ ci_ptr_arith_t)
++
++ /*! How far to next boundary? NZ = not zero i.e. give align if on boundary */
++
++#define CI_PTR_ALIGN_NEEDED_NZ(p, align) \
++ ((align) - (((char*)p) - \
++ ((char*) CI_ALIGN_BACK(((ci_ptr_arith_t)(p)), ((ci_ptr_arith_t)(align))))))
++
++ /*! How far beyond prev boundary? */
++
++#define CI_PTR_OFFSET(p, align) \
++ CI_OFFSET(((ci_ptr_arith_t)(p)), ((ci_ptr_arith_t)(align)))
++
++
++ /* Same as CI_ALIGN_FWD and CI_ALIGN_BACK. */
++
++#define CI_ROUND_UP(i, align) (((i)+(align)-1u) & ~((align)-1u))
++
++#define CI_ROUND_DOWN(i, align) ((i) & ~((align)-1u))
++
++
++/**********************************************************************
++ * Byte-order
++ **********************************************************************/
++
++/* These are not flags. They are enumeration values for use with
++ * CI_MY_BYTE_ORDER. */
++#define CI_BIG_ENDIAN 1
++#define CI_LITTLE_ENDIAN 0
++
++/*
++** Note that these byte-swapping primitives may leave junk in bits above
++** the range they operate on.
++**
++** The CI_BSWAP_nn() routines require that bits above [nn] are zero. Use
++** CI_BSWAPM_nn(x) if this cannot be guaranteed.
++*/
++
++/* ?? May be able to improve on some of these with inline assembler on some
++** platforms.
++*/
++
++#define CI_BSWAP_16(v) ((((v) & 0xff) << 8) | ((v) >> 8))
++#define CI_BSWAPM_16(v) ((((v) & 0xff) << 8) | (((v) & 0xff00) >> 8))
++
++#define CI_BSWAP_32(v) (((v) >> 24) | \
++ (((v) & 0x00ff0000) >> 8) | \
++ (((v) & 0x0000ff00) << 8) | \
++ ((v) << 24))
++#define CI_BSWAPM_32(v) ((((v) & 0xff000000) >> 24) | \
++ (((v) & 0x00ff0000) >> 8) | \
++ (((v) & 0x0000ff00) << 8) | \
++ ((v) << 24))
++
++#define CI_BSWAP_64(v) (((v) >> 56) | \
++ (((v) & 0x00ff000000000000) >> 40) | \
++ (((v) & 0x0000ff0000000000) >> 24) | \
++ (((v) & 0x000000ff00000000) >> 8) | \
++ (((v) & 0x00000000ff000000) << 8) | \
++ (((v) & 0x0000000000ff0000) << 24) | \
++ (((v) & 0x000000000000ff00) << 40) | \
++ ((v) << 56))
++
++# define CI_BSWAPPED_16_IF(c,v) ((c) ? CI_BSWAP_16(v) : (v))
++# define CI_BSWAPPED_32_IF(c,v) ((c) ? CI_BSWAP_32(v) : (v))
++# define CI_BSWAPPED_64_IF(c,v) ((c) ? CI_BSWAP_64(v) : (v))
++# define CI_BSWAP_16_IF(c,v) do{ if((c)) (v) = CI_BSWAP_16(v); }while(0)
++# define CI_BSWAP_32_IF(c,v) do{ if((c)) (v) = CI_BSWAP_32(v); }while(0)
++# define CI_BSWAP_64_IF(c,v) do{ if((c)) (v) = CI_BSWAP_64(v); }while(0)
++
++#if (CI_MY_BYTE_ORDER == CI_LITTLE_ENDIAN)
++# define CI_BSWAP_LE16(v) (v)
++# define CI_BSWAP_LE32(v) (v)
++# define CI_BSWAP_LE64(v) (v)
++# define CI_BSWAP_BE16(v) CI_BSWAP_16(v)
++# define CI_BSWAP_BE32(v) CI_BSWAP_32(v)
++# define CI_BSWAP_BE64(v) CI_BSWAP_64(v)
++# define CI_BSWAPM_LE16(v) (v)
++# define CI_BSWAPM_LE32(v) (v)
++# define CI_BSWAPM_LE64(v) (v)
++# define CI_BSWAPM_BE16(v) CI_BSWAPM_16(v)
++# define CI_BSWAPM_BE32(v) CI_BSWAPM_32(v)
++#elif (CI_MY_BYTE_ORDER == CI_BIG_ENDIAN)
++# define CI_BSWAP_BE16(v) (v)
++# define CI_BSWAP_BE32(v) (v)
++# define CI_BSWAP_BE64(v) (v)
++# define CI_BSWAP_LE16(v) CI_BSWAP_16(v)
++# define CI_BSWAP_LE32(v) CI_BSWAP_32(v)
++# define CI_BSWAP_LE64(v) CI_BSWAP_64(v)
++# define CI_BSWAPM_BE16(v) (v)
++# define CI_BSWAPM_BE32(v) (v)
++# define CI_BSWAPM_BE64(v) (v)
++# define CI_BSWAPM_LE16(v) CI_BSWAPM_16(v)
++# define CI_BSWAPM_LE32(v) CI_BSWAPM_32(v)
++#else
++# error Bad endian.
++#endif
++
++
++/**********************************************************************
++ * Get pointer to struct from pointer to member
++ **********************************************************************/
++
++#define CI_MEMBER_OFFSET(c_type, mbr_name) \
++ ((ci_uint32) (ci_uintptr_t)(&((c_type*)0)->mbr_name))
++
++#define CI_MEMBER_SIZE(c_type, mbr_name) \
++ sizeof(((c_type*)0)->mbr_name)
++
++#define __CI_CONTAINER(c_type, mbr_name, p_mbr) \
++ ( (c_type*) ((char*)(p_mbr) - CI_MEMBER_OFFSET(c_type, mbr_name)) )
++
++#ifndef CI_CONTAINER
++# define CI_CONTAINER(t,m,p) __CI_CONTAINER(t,m,p)
++#endif
++
++
++/**********************************************************************
++ * Structure member initialiser.
++ **********************************************************************/
++
++#ifndef CI_STRUCT_MBR
++# define CI_STRUCT_MBR(name, val) .name = val
++#endif
++
++
++/**********************************************************************
++ * min / max
++ **********************************************************************/
++
++#define CI_MIN(x,y) (((x) < (y)) ? (x) : (y))
++#define CI_MAX(x,y) (((x) > (y)) ? (x) : (y))
++
++/**********************************************************************
++ * abs
++ **********************************************************************/
++
++#define CI_ABS(x) (((x) < 0) ? -(x) : (x))
++
++/**********************************************************************
++ * Conditional debugging
++ **********************************************************************/
++
++#ifdef NDEBUG
++# define CI_DEBUG(x)
++# define CI_NDEBUG(x) x
++# define CI_IF_DEBUG(y,n) (n)
++# define CI_DEBUG_ARG(x)
++#else
++# define CI_DEBUG(x) x
++# define CI_NDEBUG(x)
++# define CI_IF_DEBUG(y,n) (y)
++# define CI_DEBUG_ARG(x) ,x
++#endif
++
++#ifdef __KERNEL__
++#define CI_KERNEL_ARG(x) ,x
++#else
++#define CI_KERNEL_ARG(x)
++#endif
++
++#ifdef _WIN32
++# define CI_KERNEL_ARG_WIN(x) CI_KERNEL_ARG(x)
++# define CI_ARG_WIN(x) ,x
++#else
++# define CI_KERNEL_ARG_WIN(x)
++# define CI_ARG_WIN(x)
++#endif
++
++#ifdef __unix__
++# define CI_KERNEL_ARG_UNIX(x) CI_KERNEL_ARG(x)
++# define CI_ARG_UNIX(x) ,x
++#else
++# define CI_KERNEL_ARG_UNIX(x)
++# define CI_ARG_UNIX(x)
++#endif
++
++#ifdef __linux__
++# define CI_KERNEL_ARG_LINUX(x) CI_KERNEL_ARG(x)
++# define CI_ARG_LINUX(x) ,x
++#else
++# define CI_KERNEL_ARG_LINUX(x)
++# define CI_ARG_LINUX(x)
++#endif
++
++
++#endif /* __CI_COMPAT_UTILS_H__ */
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/compat/x86.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/compat/x86.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,48 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*! \cidoxg_include_ci_compat */
++
++#ifndef __CI_COMPAT_X86_H__
++#define __CI_COMPAT_X86_H__
++
++
++#define CI_MY_BYTE_ORDER CI_LITTLE_ENDIAN
++
++#define CI_WORD_SIZE 4
++#define CI_PTR_SIZE 4
++
++#define CI_PAGE_SIZE 4096
++#define CI_PAGE_SHIFT 12
++#define CI_PAGE_MASK (~(CI_PAGE_SIZE - 1))
++
++#define CI_CPU_HAS_SSE 1 /* SSE extensions supported */
++#define CI_CPU_HAS_SSE2 0 /* SSE2 extensions supported */
++#define CI_CPU_OOS 0 /* CPU does out of order stores */
++
++
++#endif /* __CI_COMPAT_X86_H__ */
++
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/compat/x86_64.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/compat/x86_64.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,54 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr
++ * \brief Arch stuff for AMD x86_64.
++ * \date 2004/08/17
++ */
++
++/*! \cidoxg_include_ci_compat */
++#ifndef __CI_COMPAT_X86_64_H__
++#define __CI_COMPAT_X86_64_H__
++
++
++#define CI_MY_BYTE_ORDER CI_LITTLE_ENDIAN
++
++#define CI_WORD_SIZE 8
++#define CI_PTR_SIZE 8
++
++#define CI_PAGE_SIZE 4096
++#define CI_PAGE_SHIFT 12
++#define CI_PAGE_MASK (~(CI_PAGE_SIZE - 1))
++
++#define CI_CPU_HAS_SSE 1 /* SSE extensions supported */
++
++/* SSE2 disabled while investigating BUG1060 */
++#define CI_CPU_HAS_SSE2 0 /* SSE2 extensions supported */
++#define CI_CPU_OOS 0 /* CPU does out of order stores */
++
++
++#endif /* __CI_COMPAT_X86_64_H__ */
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/driver/resource/efx_vi.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/driver/resource/efx_vi.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,276 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file contains public EFX VI API to Solarflare resource manager.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_DRIVER_RESOURCE_EFX_VI_H__
++#define __CI_DRIVER_RESOURCE_EFX_VI_H__
++
++/* Default size of event queue in the efx_vi resource. Copied from
++ * CI_CFG_NETIF_EVENTQ_SIZE */
++#define EFX_VI_EVENTQ_SIZE_DEFAULT 1024
++
++extern int efx_vi_eventq_size;
++
++/**************************************************************************
++ * efx_vi_state types, allocation and free
++ **************************************************************************/
++
++/*! Handle for refering to a efx_vi */
++struct efx_vi_state;
++
++/*!
++ * Allocate an efx_vi, including event queue and pt_endpoint
++ *
++ * \param vih_out Pointer to a handle that is set on success
++ * \param nic_index Index of NIC to apply this resource to
++ * \return Zero on success (and vih_out set), non-zero on failure.
++ */
++extern int
++efx_vi_alloc(struct efx_vi_state **vih_out, int nic_index);
++
++/*!
++ * Free a previously allocated efx_vi
++ *
++ * \param vih The handle of the efx_vi to free
++ */
++extern void
++efx_vi_free(struct efx_vi_state *vih);
++
++/*!
++ * Reset a previously allocated efx_vi
++ *
++ * \param vih The handle of the efx_vi to reset
++ */
++extern void
++efx_vi_reset(struct efx_vi_state *vih);
++
++/**************************************************************************
++ * efx_vi_eventq types and functions
++ **************************************************************************/
++
++/*!
++ * Register a function to receive callbacks when event queue timeouts
++ * or wakeups occur. Only one function per efx_vi can be registered
++ * at once.
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param callback The function to callback
++ * \param context An argument to pass to the callback function
++ * \return Zero on success, non-zero on failure.
++ */
++extern int
++efx_vi_eventq_register_callback(struct efx_vi_state *vih,
++ void (*callback)(void *context, int is_timeout),
++ void *context);
++
++/*!
++ * Remove the current eventq timeout or wakeup callback function
++ *
++ * \param vih The handle to identify the efx_vi
++ * \return Zero on success, non-zero on failure
++ */
++extern int
++efx_vi_eventq_kill_callback(struct efx_vi_state *vih);
++
++/**************************************************************************
++ * efx_vi_dma_map types and functions
++ **************************************************************************/
++
++/*!
++ * Handle for refering to a efx_vi
++ */
++struct efx_vi_dma_map_state;
++
++/*!
++ * Map a list of buffer pages so they are registered with the hardware
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param addrs An array of page pointers to map
++ * \param n_addrs Length of the page pointer array. Must be a power of two.
++ * \param dmh_out Set on success to a handle used to refer to this mapping
++ * \return Zero on success, non-zero on failure.
++ */
++extern int
++efx_vi_dma_map_pages(struct efx_vi_state *vih, struct page **pages,
++ int n_pages, struct efx_vi_dma_map_state **dmh_out);
++extern int
++efx_vi_dma_map_addrs(struct efx_vi_state *vih,
++ unsigned long long *dev_bus_addrs, int n_pages,
++ struct efx_vi_dma_map_state **dmh_out);
++
++/*!
++ * Unmap a previously mapped set of pages so they are no longer registered
++ * with the hardware.
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param dmh The handle to identify the dma mapping
++ */
++extern void
++efx_vi_dma_unmap_pages(struct efx_vi_state *vih,
++ struct efx_vi_dma_map_state *dmh);
++extern void
++efx_vi_dma_unmap_addrs(struct efx_vi_state *vih,
++ struct efx_vi_dma_map_state *dmh);
++
++/*!
++ * Retrieve the buffer address of the mapping
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param dmh The handle to identify the buffer mapping
++ * \return The buffer address on success, or zero on failure
++ */
++extern unsigned
++efx_vi_dma_get_map_addr(struct efx_vi_state *vih,
++ struct efx_vi_dma_map_state *dmh);
++
++/**************************************************************************
++ * efx_vi filter functions
++ **************************************************************************/
++
++#define EFX_VI_STATIC_FILTERS 32
++
++/*! Handle to refer to a filter instance */
++struct filter_resource_t;
++
++/*!
++ * Allocate and add a filter
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param protocol The protocol of the new filter: UDP or TCP
++ * \param ip_addr_be32 The local ip address of the filter
++ * \param port_le16 The local port of the filter
++ * \param fh_out Set on success to be a handle to refer to this filter
++ * \return Zero on success, non-zero on failure.
++ */
++extern int
++efx_vi_filter(struct efx_vi_state *vih, int protocol, unsigned ip_addr_be32,
++ int port_le16, struct filter_resource_t **fh_out);
++
++/*!
++ * Remove a filter and free resources associated with it
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param fh The handle to identify the filter
++ * \return Zero on success, non-zero on failure
++ */
++extern int
++efx_vi_filter_stop(struct efx_vi_state *vih, struct filter_resource_t *fh);
++
++/**************************************************************************
++ * efx_vi hw resources types and functions
++ **************************************************************************/
++
++/*! Constants for the type field in efx_vi_hw_resource */
++#define EFX_VI_HW_RESOURCE_TXDMAQ 0x0 /* PFN of TX DMA Q */
++#define EFX_VI_HW_RESOURCE_RXDMAQ 0x1 /* PFN of RX DMA Q */
++#define EFX_VI_HW_RESOURCE_TXBELL 0x2 /* PFN of TX Doorbell (EF1) */
++#define EFX_VI_HW_RESOURCE_RXBELL 0x3 /* PFN of RX Doorbell (EF1) */
++#define EFX_VI_HW_RESOURCE_EVQTIMER 0x4 /* Address of event q timer */
++
++/* Address of event q pointer (EF1) */
++#define EFX_VI_HW_RESOURCE_EVQPTR 0x5
++/* Address of register pointer (Falcon A) */
++#define EFX_VI_HW_RESOURCE_EVQRPTR 0x6
++/* Offset of register pointer (Falcon B) */
++#define EFX_VI_HW_RESOURCE_EVQRPTR_OFFSET 0x7
++/* Address of mem KVA */
++#define EFX_VI_HW_RESOURCE_EVQMEMKVA 0x8
++/* PFN of doorbell page (Falcon) */
++#define EFX_VI_HW_RESOURCE_BELLPAGE 0x9
++
++/*! How large an array to allocate for the get_() functions - smaller
++ than the total number of constants as some are mutually exclusive */
++#define EFX_VI_HW_RESOURCE_MAXSIZE 0x7
++
++/*! Constants for the mem_type field in efx_vi_hw_resource */
++#define EFX_VI_HW_RESOURCE_IOBUFFER 0 /* Host memory */
++#define EFX_VI_HW_RESOURCE_PERIPHERAL 1 /* Card memory/registers */
++
++/*!
++ * Data structure providing information on a hardware resource mapping
++ */
++struct efx_vi_hw_resource {
++ u8 type; /*!< What this resource represents */
++ u8 mem_type; /*!< What type of memory is it in, eg,
++ * host or iomem */
++ u8 more_to_follow; /*!< Is this part of a multi-region resource */
++ u32 length; /*!< Length of the resource in bytes */
++ unsigned long address; /*!< Address of this resource */
++};
++
++/*!
++ * Metadata concerning the list of hardware resource mappings
++ */
++struct efx_vi_hw_resource_metadata {
++ int version;
++ int evq_order;
++ int evq_offs;
++ int evq_capacity;
++ int instance;
++ unsigned rx_capacity;
++ unsigned tx_capacity;
++ int nic_arch;
++ int nic_revision;
++ char nic_variant;
++};
++
++/*!
++ * Obtain a list of hardware resource mappings, using virtual addresses
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param mdata Pointer to a structure to receive the metadata
++ * \param hw_res_array An array to receive the list of hardware resources
++ * \param length The length of hw_res_array. Updated on success to contain
++ * the number of entries in the supplied array that were used.
++ * \return Zero on success, non-zero on failure
++ */
++extern int
++efx_vi_hw_resource_get_virt(struct efx_vi_state *vih,
++ struct efx_vi_hw_resource_metadata *mdata,
++ struct efx_vi_hw_resource *hw_res_array,
++ int *length);
++
++/*!
++ * Obtain a list of hardware resource mappings, using physical addresses
++ *
++ * \param vih The handle to identify the efx_vi
++ * \param mdata Pointer to a structure to receive the metadata
++ * \param hw_res_array An array to receive the list of hardware resources
++ * \param length The length of hw_res_array. Updated on success to contain
++ * the number of entries in the supplied array that were used.
++ * \return Zero on success, non-zero on failure
++ */
++extern int
++efx_vi_hw_resource_get_phys(struct efx_vi_state *vih,
++ struct efx_vi_hw_resource_metadata *mdata,
++ struct efx_vi_hw_resource *hw_res_array,
++ int *length);
++
++#endif /* __CI_DRIVER_RESOURCE_EFX_VI_H__ */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/common.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/common.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,102 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides API of the efhw library which may be used both from
++ * the kernel and from the user-space code.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_COMMON_H__
++#define __CI_EFHW_COMMON_H__
++
++#include <ci/efhw/common_sysdep.h>
++
++enum efhw_arch {
++ EFHW_ARCH_FALCON,
++ EFHW_ARCH_SIENA,
++};
++
++typedef uint32_t efhw_buffer_addr_t;
++#define EFHW_BUFFER_ADDR_FMT "[ba:%"PRIx32"]"
++
++/*! Comment? */
++typedef union {
++ uint64_t u64;
++ struct {
++ uint32_t a;
++ uint32_t b;
++ } opaque;
++ struct {
++ uint32_t code;
++ uint32_t status;
++ } ev1002;
++} efhw_event_t;
++
++/* Flags for TX/RX queues */
++#define EFHW_VI_JUMBO_EN 0x01 /*! scatter RX over multiple desc */
++#define EFHW_VI_ISCSI_RX_HDIG_EN 0x02 /*! iscsi rx header digest */
++#define EFHW_VI_ISCSI_TX_HDIG_EN 0x04 /*! iscsi tx header digest */
++#define EFHW_VI_ISCSI_RX_DDIG_EN 0x08 /*! iscsi rx data digest */
++#define EFHW_VI_ISCSI_TX_DDIG_EN 0x10 /*! iscsi tx data digest */
++#define EFHW_VI_TX_PHYS_ADDR_EN 0x20 /*! TX physical address mode */
++#define EFHW_VI_RX_PHYS_ADDR_EN 0x40 /*! RX physical address mode */
++#define EFHW_VI_RM_WITH_INTERRUPT 0x80 /*! VI with an interrupt */
++#define EFHW_VI_TX_IP_CSUM_DIS 0x100 /*! enable ip checksum generation */
++#define EFHW_VI_TX_TCPUDP_CSUM_DIS 0x200 /*! enable tcp/udp checksum
++ generation */
++#define EFHW_VI_TX_TCPUDP_ONLY 0x400 /*! drop non-tcp/udp packets */
++
++/* Types of hardware filter */
++/* Each of these values implicitly selects scatter filters on B0 - or in
++ EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK if a non-scatter filter is required */
++#define EFHW_IP_FILTER_TYPE_UDP_WILDCARD (0) /* dest host only */
++#define EFHW_IP_FILTER_TYPE_UDP_FULL (1) /* dest host and port */
++#define EFHW_IP_FILTER_TYPE_TCP_WILDCARD (2) /* dest based filter */
++#define EFHW_IP_FILTER_TYPE_TCP_FULL (3) /* src filter */
++/* Same again, but with RSS (for B0 only) */
++#define EFHW_IP_FILTER_TYPE_UDP_WILDCARD_RSS_B0 (4)
++#define EFHW_IP_FILTER_TYPE_UDP_FULL_RSS_B0 (5)
++#define EFHW_IP_FILTER_TYPE_TCP_WILDCARD_RSS_B0 (6)
++#define EFHW_IP_FILTER_TYPE_TCP_FULL_RSS_B0 (7)
++
++#define EFHW_IP_FILTER_TYPE_FULL_MASK (0x1) /* Mask for full / wildcard */
++#define EFHW_IP_FILTER_TYPE_TCP_MASK (0x2) /* Mask for TCP type */
++#define EFHW_IP_FILTER_TYPE_RSS_B0_MASK (0x4) /* Mask for B0 RSS enable */
++#define EFHW_IP_FILTER_TYPE_NOSCAT_B0_MASK (0x8) /* Mask for B0 SCATTER dsbl */
++
++#define EFHW_IP_FILTER_TYPE_MASK (0xffff) /* Mask of types above */
++
++#define EFHW_IP_FILTER_BROADCAST (0x10000) /* driverlink filter
++ support */
++
++#endif /* __CI_EFHW_COMMON_H__ */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/common_sysdep.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/common_sysdep.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,67 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides version-independent Linux kernel API for
++ * userland-to-kernel interfaces.
++ * Only kernels >=2.6.9 are supported.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_COMMON_LINUX_H__
++#define __CI_EFHW_COMMON_LINUX_H__
++
++#include <linux/types.h>
++#include <linux/version.h>
++
++/* Dirty hack, but Linux kernel does not provide DMA_ADDR_T_FMT */
++#if BITS_PER_LONG == 64 || defined(CONFIG_HIGHMEM64G)
++#define DMA_ADDR_T_FMT "%llx"
++#else
++#define DMA_ADDR_T_FMT "%x"
++#endif
++
++/* Linux kernel also does not provide PRIx32... Sigh. */
++#define PRIx32 "x"
++#define PRIx64 "llx"
++
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
++enum {
++ false = 0,
++ true = 1
++};
++
++typedef _Bool bool;
++#endif /* LINUX_VERSION_CODE < 2.6.19 */
++
++#endif /* __CI_EFHW_COMMON_LINUX_H__ */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/debug.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/debug.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,84 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides debug-related API for efhw library using Linux kernel
++ * primitives.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_DEBUG_LINUX_H__
++#define __CI_EFHW_DEBUG_LINUX_H__
++
++#define EFHW_PRINTK_PREFIX "[sfc efhw] "
++
++#define EFHW_PRINTK(level, fmt, ...) \
++ printk(level EFHW_PRINTK_PREFIX fmt "\n", __VA_ARGS__)
++
++/* Following macros should be used with non-zero format parameters
++ * due to __VA_ARGS__ limitations. Use "%s" with __FUNCTION__ if you can't
++ * find better parameters. */
++#define EFHW_ERR(fmt, ...) EFHW_PRINTK(KERN_ERR, fmt, __VA_ARGS__)
++#define EFHW_WARN(fmt, ...) EFHW_PRINTK(KERN_WARNING, fmt, __VA_ARGS__)
++#define EFHW_NOTICE(fmt, ...) EFHW_PRINTK(KERN_NOTICE, fmt, __VA_ARGS__)
++#if 0 && !defined(NDEBUG)
++#define EFHW_TRACE(fmt, ...) EFHW_PRINTK(KERN_DEBUG, fmt, __VA_ARGS__)
++#else
++#define EFHW_TRACE(fmt, ...)
++#endif
++
++#ifndef NDEBUG
++#define EFHW_ASSERT(cond) BUG_ON((cond) == 0)
++#define EFHW_DO_DEBUG(expr) expr
++#else
++#define EFHW_ASSERT(cond)
++#define EFHW_DO_DEBUG(expr)
++#endif
++
++#define EFHW_TEST(expr) \
++ do { \
++ if (unlikely(!(expr))) \
++ BUG(); \
++ } while (0)
++
++/* Build time asserts. We paste the line number into the type name
++ * so that the macro can be used more than once per file even if the
++ * compiler objects to multiple identical typedefs. Collisions
++ * between use in different header files is still possible. */
++#ifndef EFHW_BUILD_ASSERT
++#define __EFHW_BUILD_ASSERT_NAME(_x) __EFHW_BUILD_ASSERT_ILOATHECPP(_x)
++#define __EFHW_BUILD_ASSERT_ILOATHECPP(_x) __EFHW_BUILD_ASSERT__ ##_x
++#define EFHW_BUILD_ASSERT(e) \
++ typedef char __EFHW_BUILD_ASSERT_NAME(__LINE__)[(e) ? 1 : -1]
++#endif
++
++#endif /* __CI_EFHW_DEBUG_LINUX_H__ */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/efhw_config.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/efhw_config.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,43 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides some limits used in both kernel and userland code.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_EFAB_CONFIG_H__
++#define __CI_EFHW_EFAB_CONFIG_H__
++
++#define EFHW_MAX_NR_DEVS 5 /* max number of efhw devices supported */
++
++#endif /* __CI_EFHW_EFAB_CONFIG_H__ */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/efhw_types.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/efhw_types.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,342 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides struct efhw_nic and some related types.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_EFAB_TYPES_H__
++#define __CI_EFHW_EFAB_TYPES_H__
++
++#include <ci/efhw/efhw_config.h>
++#include <ci/efhw/hardware_sysdep.h>
++#include <ci/efhw/iopage_types.h>
++#include <ci/efhw/sysdep.h>
++
++/*--------------------------------------------------------------------
++ *
++ * hardware limits used in the types
++ *
++ *--------------------------------------------------------------------*/
++
++#define EFHW_KEVENTQ_MAX 8
++
++/*--------------------------------------------------------------------
++ *
++ * forward type declarations
++ *
++ *--------------------------------------------------------------------*/
++
++struct efhw_nic;
++
++/*--------------------------------------------------------------------
++ *
++ * Managed interface
++ *
++ *--------------------------------------------------------------------*/
++
++struct efhw_buffer_table_allocation{
++ unsigned base;
++ unsigned order;
++};
++
++struct eventq_resource_hardware {
++ /*!iobuffer allocated for eventq - can be larger than eventq */
++ efhw_iopages_t iobuff;
++ unsigned iobuff_off;
++ struct efhw_buffer_table_allocation buf_tbl_alloc;
++ int capacity; /*!< capacity of event queue */
++};
++
++/*--------------------------------------------------------------------
++ *
++ * event queues and event driven callbacks
++ *
++ *--------------------------------------------------------------------*/
++
++struct efhw_keventq {
++ volatile int lock;
++ caddr_t evq_base;
++ int32_t evq_ptr;
++ uint32_t evq_mask;
++ unsigned instance;
++ struct eventq_resource_hardware hw;
++ struct efhw_ev_handler *ev_handlers;
++};
++
++/**********************************************************************
++ * Portable HW interface. ***************************************
++ **********************************************************************/
++
++/*--------------------------------------------------------------------
++ *
++ * EtherFabric Functional units - configuration and control
++ *
++ *--------------------------------------------------------------------*/
++
++struct efhw_func_ops {
++
++ /*-------------- Initialisation ------------ */
++
++ /*! close down all hardware functional units - leaves NIC in a safe
++ state for driver unload */
++ void (*close_hardware) (struct efhw_nic *nic);
++
++ /*! initialise all hardware functional units */
++ int (*init_hardware) (struct efhw_nic *nic,
++ struct efhw_ev_handler *,
++ const uint8_t *mac_addr);
++
++ /*-------------- Interrupt support ------------ */
++
++ /*! Main interrupt routine
++ ** This function returns,
++ ** - zero, if the IRQ was not generated by EF1
++ ** - non-zero, if EF1 was the source of the IRQ
++ **
++ **
++ ** opaque is an OS provided pointer for use by the OS callbacks
++ ** e.g in Windows used to indicate DPC scheduled
++ */
++ int (*interrupt) (struct efhw_nic *nic);
++
++ /*! Enable given interrupt mask for the given IRQ unit */
++ void (*interrupt_enable) (struct efhw_nic *nic, uint idx);
++
++ /*! Disable given interrupt mask for the given IRQ unit */
++ void (*interrupt_disable) (struct efhw_nic *nic, uint idx);
++
++ /*! Set interrupt moderation strategy for the given IRQ unit
++ ** val is in usec
++ */
++ void (*set_interrupt_moderation)(struct efhw_nic *nic,
++ uint idx, uint val);
++
++ /*-------------- Event support ------------ */
++
++ /*! Enable the given event queue
++ depending on the underlying implementation (EF1 or Falcon) then
++ either a q_base_addr in host memory, or a buffer base id should
++ be proivded
++ */
++ void (*event_queue_enable) (struct efhw_nic *nic,
++ uint evq, /* evnt queue index */
++ uint evq_size, /* units of #entries */
++ dma_addr_t q_base_addr, uint buf_base_id);
++
++ /*! Disable the given event queue (and any associated timer) */
++ void (*event_queue_disable) (struct efhw_nic *nic, uint evq,
++ int timer_only);
++
++ /*! request wakeup from the NIC on a given event Q */
++ void (*wakeup_request) (struct efhw_nic *nic, dma_addr_t q_base_addr,
++ int next_i, int evq);
++
++ /*! Push a SW event on a given eventQ */
++ void (*sw_event) (struct efhw_nic *nic, int data, int evq);
++
++ /*-------------- Filter support ------------ */
++
++ /*! Setup a given filter - The software can request a filter_i,
++ * but some EtherFabric implementations will override with
++ * a more suitable index
++ */
++ int (*ipfilter_set) (struct efhw_nic *nic, int type,
++ int *filter_i, int dmaq,
++ unsigned saddr_be32, unsigned sport_be16,
++ unsigned daddr_be32, unsigned dport_be16);
++
++ /*! Attach a given filter to a DMAQ */
++ void (*ipfilter_attach) (struct efhw_nic *nic, int filter_idx,
++ int dmaq_idx);
++
++ /*! Detach a filter from its DMAQ */
++ void (*ipfilter_detach) (struct efhw_nic *nic, int filter_idx);
++
++ /*! Clear down a given filter */
++ void (*ipfilter_clear) (struct efhw_nic *nic, int filter_idx);
++
++ /*-------------- DMA support ------------ */
++
++ /*! Initialise NIC state for a given TX DMAQ */
++ void (*dmaq_tx_q_init) (struct efhw_nic *nic,
++ uint dmaq, uint evq, uint owner, uint tag,
++ uint dmaq_size, uint buf_idx, uint flags);
++
++ /*! Initialise NIC state for a given RX DMAQ */
++ void (*dmaq_rx_q_init) (struct efhw_nic *nic,
++ uint dmaq, uint evq, uint owner, uint tag,
++ uint dmaq_size, uint buf_idx, uint flags);
++
++ /*! Disable a given TX DMAQ */
++ void (*dmaq_tx_q_disable) (struct efhw_nic *nic, uint dmaq);
++
++ /*! Disable a given RX DMAQ */
++ void (*dmaq_rx_q_disable) (struct efhw_nic *nic, uint dmaq);
++
++ /*! Flush a given TX DMA channel */
++ int (*flush_tx_dma_channel) (struct efhw_nic *nic, uint dmaq);
++
++ /*! Flush a given RX DMA channel */
++ int (*flush_rx_dma_channel) (struct efhw_nic *nic, uint dmaq);
++
++ /*-------------- Buffer table Support ------------ */
++
++ /*! Initialise a buffer table page */
++ void (*buffer_table_set) (struct efhw_nic *nic,
++ dma_addr_t dma_addr,
++ uint bufsz, uint region,
++ int own_id, int buffer_id);
++
++ /*! Initialise a block of buffer table pages */
++ void (*buffer_table_set_n) (struct efhw_nic *nic, int buffer_id,
++ dma_addr_t dma_addr,
++ uint bufsz, uint region,
++ int n_pages, int own_id);
++
++ /*! Clear a block of buffer table pages */
++ void (*buffer_table_clear) (struct efhw_nic *nic, int buffer_id,
++ int num);
++
++ /*! Commit a buffer table update */
++ void (*buffer_table_commit) (struct efhw_nic *nic);
++
++};
++
++
++/*----------------------------------------------------------------------------
++ *
++ * NIC type
++ *
++ *---------------------------------------------------------------------------*/
++
++struct efhw_device_type {
++ int arch; /* enum efhw_arch */
++ char variant; /* 'A', 'B', ... */
++ int revision; /* 0, 1, ... */
++};
++
++
++/*----------------------------------------------------------------------------
++ *
++ * EtherFabric NIC instance - nic.c for HW independent functions
++ *
++ *---------------------------------------------------------------------------*/
++
++/*! */
++struct efhw_nic {
++ /*! zero base index in efrm_nic_table.nic array */
++ volatile int index;
++ int ifindex; /*!< OS level nic index */
++#ifdef HAS_NET_NAMESPACE
++ struct net *nd_net;
++#endif
++
++ struct efhw_device_type devtype;
++
++ /*! Options that can be set by user. */
++ unsigned options;
++# define NIC_OPT_EFTEST 0x1 /* owner is an eftest app */
++
++# define NIC_OPT_DEFAULT 0
++
++ /*! Internal flags that indicate hardware properties at runtime. */
++ unsigned flags;
++# define NIC_FLAG_NO_INTERRUPT 0x01 /* to be set at init time only */
++# define NIC_FLAG_TRY_MSI 0x02
++# define NIC_FLAG_MSI 0x04
++# define NIC_FLAG_OS_IRQ_EN 0x08
++# define NIC_FLAG_10G 0x10
++
++ unsigned mtu; /*!< MAC MTU (includes MAC hdr) */
++
++ /* hardware resources */
++
++ /*! I/O address of the start of the bar */
++ efhw_ioaddr_t bar_ioaddr;
++
++ /*! Bar number of control aperture. */
++ unsigned ctr_ap_bar;
++ /*! Length of control aperture in bytes. */
++ unsigned ctr_ap_bytes;
++
++ uint8_t mac_addr[ETH_ALEN]; /*!< mac address */
++
++ /*! EtherFabric Functional Units -- functions */
++ const struct efhw_func_ops *efhw_func;
++
++ /* Value read from FPGA version register. Zero for asic. */
++ unsigned fpga_version;
++
++ /*! This lock protects a number of misc NIC resources. It should
++ * only be used for things that can be at the bottom of the lock
++ * order. ie. You mustn't attempt to grab any other lock while
++ * holding this one.
++ */
++ spinlock_t *reg_lock;
++ spinlock_t the_reg_lock;
++
++ int buf_commit_outstanding; /*!< outstanding buffer commits */
++
++ /*! interrupt callbacks (hard-irq) */
++ void (*irq_handler) (struct efhw_nic *, int unit);
++
++ /*! event queues per driver */
++ struct efhw_keventq evq[EFHW_KEVENTQ_MAX];
++
++/* for marking when we are not using an IRQ unit
++ - 0 is a valid offset to an IRQ unit on EF1! */
++#define EFHW_IRQ_UNIT_UNUSED 0xffff
++ /*! interrupt unit in use */
++ unsigned int irq_unit[EFHW_KEVENTQ_MAX];
++ efhw_iopage_t irq_iobuff; /*!< Falcon SYSERR interrupt */
++
++ /* The new driverlink infrastructure. */
++ struct efx_dl_device *net_driver_dev;
++ struct efx_dlfilt_cb_s *dlfilter_cb;
++
++ /*! Bit masks of the sizes of event queues and dma queues supported
++ * by the nic. */
++ unsigned evq_sizes;
++ unsigned rxq_sizes;
++ unsigned txq_sizes;
++
++ /* Size of filter table (including odd and even banks). */
++ unsigned filter_tbl_size;
++};
++
++
++#define EFHW_KVA(nic) ((nic)->bar_ioaddr)
++
++
++#endif /* __CI_EFHW_EFHW_TYPES_H__ */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/hardware_sysdep.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/hardware_sysdep.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,84 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides version-independent Linux kernel API for header files
++ * with hardware-related definitions (in ci/driver/efab/hardware*).
++ * Only kernels >=2.6.9 are supported.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_HARDWARE_LINUX_H__
++#define __CI_EFHW_HARDWARE_LINUX_H__
++
++#include <asm/io.h>
++
++#ifdef __LITTLE_ENDIAN
++#define EFHW_IS_LITTLE_ENDIAN
++#elif __BIG_ENDIAN
++#define EFHW_IS_BIG_ENDIAN
++#else
++#error Unknown endianness
++#endif
++
++#ifndef mmiowb
++ #if defined(__i386__) || defined(__x86_64__)
++ #define mmiowb()
++ #elif defined(__ia64__)
++ #ifndef ia64_mfa
++ #define ia64_mfa() asm volatile ("mf.a" ::: "memory")
++ #endif
++ #define mmiowb ia64_mfa
++ #else
++ #error "Need definition for mmiowb()"
++ #endif
++#endif
++
++typedef char *efhw_ioaddr_t;
++
++#ifndef readq
++static inline uint64_t __readq(void __iomem *addr)
++{
++ return *(volatile uint64_t *)addr;
++}
++#define readq(x) __readq(x)
++#endif
++
++#ifndef writeq
++static inline void __writeq(uint64_t v, void __iomem *addr)
++{
++ *(volatile uint64_t *)addr = v;
++}
++#define writeq(val, addr) __writeq((val), (addr))
++#endif
++
++#endif /* __CI_EFHW_HARDWARE_LINUX_H__ */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/iopage_types.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/iopage_types.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,188 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides efhw_page_t and efhw_iopage_t for Linux kernel.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_IOPAGE_LINUX_H__
++#define __CI_EFHW_IOPAGE_LINUX_H__
++
++#include <linux/gfp.h>
++#include <linux/hardirq.h>
++#include <ci/efhw/debug.h>
++
++/*--------------------------------------------------------------------
++ *
++ * efhw_page_t: A single page of memory. Directly mapped in the driver,
++ * and can be mapped to userlevel.
++ *
++ *--------------------------------------------------------------------*/
++
++typedef struct {
++ unsigned long kva;
++} efhw_page_t;
++
++static inline int efhw_page_alloc(efhw_page_t *p)
++{
++ p->kva = __get_free_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL);
++ return p->kva ? 0 : -ENOMEM;
++}
++
++static inline int efhw_page_alloc_zeroed(efhw_page_t *p)
++{
++ p->kva = get_zeroed_page(in_interrupt()? GFP_ATOMIC : GFP_KERNEL);
++ return p->kva ? 0 : -ENOMEM;
++}
++
++static inline void efhw_page_free(efhw_page_t *p)
++{
++ free_page(p->kva);
++ EFHW_DO_DEBUG(memset(p, 0, sizeof(*p)));
++}
++
++static inline char *efhw_page_ptr(efhw_page_t *p)
++{
++ return (char *)p->kva;
++}
++
++static inline unsigned efhw_page_pfn(efhw_page_t *p)
++{
++ return (unsigned)(__pa(p->kva) >> PAGE_SHIFT);
++}
++
++static inline void efhw_page_mark_invalid(efhw_page_t *p)
++{
++ p->kva = 0;
++}
++
++static inline int efhw_page_is_valid(efhw_page_t *p)
++{
++ return p->kva != 0;
++}
++
++static inline void efhw_page_init_from_va(efhw_page_t *p, void *va)
++{
++ p->kva = (unsigned long)va;
++}
++
++/*--------------------------------------------------------------------
++ *
++ * efhw_iopage_t: A single page of memory. Directly mapped in the driver,
++ * and can be mapped to userlevel. Can also be accessed by the NIC.
++ *
++ *--------------------------------------------------------------------*/
++
++typedef struct {
++ efhw_page_t p;
++ dma_addr_t dma_addr;
++} efhw_iopage_t;
++
++static inline dma_addr_t efhw_iopage_dma_addr(efhw_iopage_t *p)
++{
++ return p->dma_addr;
++}
++
++#define efhw_iopage_ptr(iop) efhw_page_ptr(&(iop)->p)
++#define efhw_iopage_pfn(iop) efhw_page_pfn(&(iop)->p)
++#define efhw_iopage_mark_invalid(iop) efhw_page_mark_invalid(&(iop)->p)
++#define efhw_iopage_is_valid(iop) efhw_page_is_valid(&(iop)->p)
++
++/*--------------------------------------------------------------------
++ *
++ * efhw_iopages_t: A set of pages that are contiguous in physical memory.
++ * Directly mapped in the driver, and can be mapped to userlevel. Can also
++ * be accessed by the NIC.
++ *
++ * NB. The O/S may be unwilling to allocate many, or even any of these. So
++ * only use this type where the NIC really needs a physically contiguous
++ * buffer.
++ *
++ *--------------------------------------------------------------------*/
++
++typedef struct {
++ caddr_t kva;
++ unsigned order;
++ dma_addr_t dma_addr;
++} efhw_iopages_t;
++
++static inline caddr_t efhw_iopages_ptr(efhw_iopages_t *p)
++{
++ return p->kva;
++}
++
++static inline unsigned efhw_iopages_pfn(efhw_iopages_t *p)
++{
++ return (unsigned)(__pa(p->kva) >> PAGE_SHIFT);
++}
++
++static inline dma_addr_t efhw_iopages_dma_addr(efhw_iopages_t *p)
++{
++ return p->dma_addr;
++}
++
++static inline unsigned efhw_iopages_size(efhw_iopages_t *p)
++{
++ return 1u << (p->order + PAGE_SHIFT);
++}
++
++/* efhw_iopage_t <-> efhw_iopages_t conversions for handling physically
++ * contiguous allocations in iobufsets for iSCSI. This allows the
++ * essential information about contiguous allocations from
++ * efhw_iopages_alloc() to be saved away in the efhw_iopage_t array in an
++ * iobufset. (Changing the iobufset resource to use a union type would
++ * involve a lot of code changes, and make the iobufset's metadata larger
++ * which could be bad as it's supposed to fit into a single page on some
++ * platforms.)
++ */
++static inline void
++efhw_iopage_init_from_iopages(efhw_iopage_t *iopage,
++ efhw_iopages_t *iopages, unsigned pageno)
++{
++ iopage->p.kva = ((unsigned long)efhw_iopages_ptr(iopages))
++ + (pageno * PAGE_SIZE);
++ iopage->dma_addr = efhw_iopages_dma_addr(iopages) +
++ (pageno * PAGE_SIZE);
++}
++
++static inline void
++efhw_iopages_init_from_iopage(efhw_iopages_t *iopages,
++ efhw_iopage_t *iopage, unsigned order)
++{
++ iopages->kva = (caddr_t) efhw_iopage_ptr(iopage);
++ EFHW_ASSERT(iopages->kva);
++ iopages->order = order;
++ iopages->dma_addr = efhw_iopage_dma_addr(iopage);
++}
++
++#endif /* __CI_EFHW_IOPAGE_LINUX_H__ */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/public.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/public.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,83 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides public API of efhw library exported from the SFC
++ * resource driver.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_PUBLIC_H__
++#define __CI_EFHW_PUBLIC_H__
++
++#include <ci/efhw/common.h>
++#include <ci/efhw/efhw_types.h>
++
++/*! Returns true if we have some EtherFabric functional units -
++ whether configured or not */
++static inline int efhw_nic_have_functional_units(struct efhw_nic *nic)
++{
++ return nic->efhw_func != 0;
++}
++
++/*! Returns true if the EtherFabric functional units have been configured */
++static inline int efhw_nic_have_hw(struct efhw_nic *nic)
++{
++ return efhw_nic_have_functional_units(nic) && (EFHW_KVA(nic) != 0);
++}
++
++/*! Helper function to allocate the iobuffer needed by an eventq
++ * - it ensures the eventq has the correct alignment for the NIC
++ *
++ * \param rm Event-queue resource manager
++ * \param instance Event-queue instance (index)
++ * \param buf_bytes Requested size of eventq
++ * \return < 0 if iobuffer allocation fails
++ */
++int efhw_nic_event_queue_alloc_iobuffer(struct efhw_nic *nic,
++ struct eventq_resource_hardware *h,
++ int evq_instance, unsigned buf_bytes);
++
++extern void falcon_nic_set_rx_usr_buf_size(struct efhw_nic *,
++ int rx_usr_buf_size);
++
++extern void
++falcon_nic_rx_filter_ctl_set(struct efhw_nic *nic, uint32_t tcp_full,
++ uint32_t tcp_wild,
++ uint32_t udp_full, uint32_t udp_wild);
++
++extern void
++falcon_nic_rx_filter_ctl_get(struct efhw_nic *nic, uint32_t *tcp_full,
++ uint32_t *tcp_wild,
++ uint32_t *udp_full, uint32_t *udp_wild);
++
++#endif /* __CI_EFHW_PUBLIC_H__ */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/sysdep.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/efhw/sysdep.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,72 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides version-independent Linux kernel API for efhw library.
++ * Only kernels >=2.6.9 are supported.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFHW_SYSDEP_LINUX_H__
++#define __CI_EFHW_SYSDEP_LINUX_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/spinlock.h>
++#include <linux/delay.h>
++#include <linux/if_ether.h>
++
++#include <linux/netdevice.h> /* necessary for etherdevice.h on some kernels */
++#include <linux/etherdevice.h>
++
++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,21)
++static inline int is_local_ether_addr(const u8 *addr)
++{
++ return (0x02 & addr[0]);
++}
++#endif
++
++typedef unsigned long irq_flags_t;
++
++#define spin_lock_destroy(l_) do {} while (0)
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,24)
++#define HAS_NET_NAMESPACE
++#endif
++
++/* Funny, but linux has round_up for x86 only, defined in
++ * x86-specific header */
++#ifndef round_up
++#define round_up(x, y) (((x) + (y) - 1) & ~((y)-1))
++#endif
++
++#endif /* __CI_EFHW_SYSDEP_LINUX_H__ */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/efrm/nic_table.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/efrm/nic_table.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,98 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides public API for NIC table.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_NIC_TABLE_H__
++#define __CI_EFRM_NIC_TABLE_H__
++
++#include <ci/efhw/efhw_types.h>
++#include <ci/efrm/sysdep.h>
++
++/*--------------------------------------------------------------------
++ *
++ * struct efrm_nic_table - top level driver object keeping all NICs -
++ * implemented in driver_object.c
++ *
++ *--------------------------------------------------------------------*/
++
++/*! Comment? */
++struct efrm_nic_table {
++ /*! nics attached to this driver */
++ struct efhw_nic *nic[EFHW_MAX_NR_DEVS];
++ /*! pointer to an arbitrary struct efhw_nic if one exists;
++ * for code which does not care which NIC it wants but
++ * still needs one. Note you cannot assume nic[0] exists. */
++ struct efhw_nic *a_nic;
++ uint32_t nic_count; /*!< number of nics attached to this driver */
++ spinlock_t lock; /*!< lock for table modifications */
++ atomic_t ref_count; /*!< refcount for users of nic table */
++};
++
++/* Resource driver structures used by other drivers as well */
++extern struct efrm_nic_table efrm_nic_table;
++
++static inline void efrm_nic_table_hold(void)
++{
++ atomic_inc(&efrm_nic_table.ref_count);
++}
++
++static inline void efrm_nic_table_rele(void)
++{
++ atomic_dec(&efrm_nic_table.ref_count);
++}
++
++static inline int efrm_nic_table_held(void)
++{
++ return (atomic_read(&efrm_nic_table.ref_count) != 0);
++}
++
++/* Run code block _x multiple times with variable nic set to each
++ * registered NIC in turn.
++ * DO NOT "break" out of this loop early. */
++#define EFRM_FOR_EACH_NIC(_nic_i, _nic) \
++ for ((_nic_i) = (efrm_nic_table_hold(), 0); \
++ (_nic_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0); \
++ (_nic_i)++) \
++ if (((_nic) = efrm_nic_table.nic[_nic_i]))
++
++#define EFRM_FOR_EACH_NIC_IN_SET(_set, _i, _nic) \
++ for ((_i) = (efrm_nic_table_hold(), 0); \
++ (_i) < EFHW_MAX_NR_DEVS || (efrm_nic_table_rele(), 0); \
++ ++(_i)) \
++ if (((_nic) = efrm_nic_table.nic[_i]) && \
++ efrm_nic_set_read((_set), (_i)))
++
++#endif /* __CI_EFRM_NIC_TABLE_H__ */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/efrm/sysdep.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/efrm/sysdep.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,54 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides Linux-like system-independent API for efrm library.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_SYSDEP_H__
++#define __CI_EFRM_SYSDEP_H__
++
++/* Spinlocks are defined in efhw/sysdep.h */
++#include <ci/efhw/sysdep.h>
++
++#if defined(__linux__) && defined(__KERNEL__)
++
++# include <ci/efrm/sysdep_linux.h>
++
++#else
++
++# include <ci/efrm/sysdep_ci2linux.h>
++
++#endif
++
++#endif /* __CI_EFRM_SYSDEP_H__ */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/efrm/sysdep_linux.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/efrm/sysdep_linux.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,248 @@
++/****************************************************************************
++ * Driver for Solarflare network controllers -
++ * resource management for Xen backend, OpenOnload, etc
++ * (including support for SFE4001 10GBT NIC)
++ *
++ * This file provides version-independent Linux kernel API for efrm library.
++ * Only kernels >=2.6.9 are supported.
++ *
++ * Copyright 2005-2007: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Kfifo API is partially stolen from linux-2.6.22/include/linux/list.h
++ * Copyright (C) 2004 Stelian Pop <stelian@popies.net>
++ *
++ * Developed and maintained by Solarflare Communications:
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * Certain parts of the driver were implemented by
++ * Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
++ * OKTET Labs Ltd, Russia,
++ * http://oktetlabs.ru, <info@oktetlabs.ru>
++ * by request of Solarflare Communications
++ *
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef __CI_EFRM_SYSDEP_LINUX_H__
++#define __CI_EFRM_SYSDEP_LINUX_H__
++
++#include <linux/version.h>
++#include <linux/list.h>
++#include <linux/vmalloc.h>
++#include <linux/errno.h>
++#include <linux/string.h>
++#include <linux/workqueue.h>
++#include <linux/gfp.h>
++#include <linux/slab.h>
++#include <linux/hardirq.h>
++#include <linux/kernel.h>
++#include <linux/if_ether.h>
++#include <linux/completion.h>
++#include <linux/in.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++/* get roundup_pow_of_two(), which was in kernel.h in early kernel versions */
++#include <linux/log2.h>
++#endif
++
++/********************************************************************
++ *
++ * List API
++ *
++ ********************************************************************/
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
++static inline void
++list_replace_init(struct list_head *old, struct list_head *new)
++{
++ new->next = old->next;
++ new->next->prev = new;
++ new->prev = old->prev;
++ new->prev->next = new;
++ INIT_LIST_HEAD(old);
++}
++#endif
++
++static inline struct list_head *list_pop(struct list_head *list)
++{
++ struct list_head *link = list->next;
++ list_del(link);
++ return link;
++}
++
++static inline struct list_head *list_pop_tail(struct list_head *list)
++{
++ struct list_head *link = list->prev;
++ list_del(link);
++ return link;
++}
++
++/********************************************************************
++ *
++ * Workqueue API
++ *
++ ********************************************************************/
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
++#define NEED_OLD_WORK_API
++
++/**
++ * The old and new work function prototypes just change
++ * the type of the pointer in the only argument, so it's
++ * safe to cast one function type to the other
++ */
++typedef void (*efrm_old_work_func_t) (void *p);
++
++#undef INIT_WORK
++#define INIT_WORK(_work, _func) \
++ do { \
++ INIT_LIST_HEAD(&(_work)->entry); \
++ (_work)->pending = 0; \
++ PREPARE_WORK((_work), \
++ (efrm_old_work_func_t) (_func), \
++ (_work)); \
++ } while (0)
++
++#endif
++
++/********************************************************************
++ *
++ * Kfifo API
++ *
++ ********************************************************************/
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10)
++
++#if !defined(RHEL_RELEASE_CODE) || (RHEL_RELEASE_CODE < 1029)
++typedef unsigned gfp_t;
++#endif
++
++#define HAS_NO_KFIFO
++
++struct kfifo {
++ unsigned char *buffer; /* the buffer holding the data */
++ unsigned int size; /* the size of the allocated buffer */
++ unsigned int in; /* data is added at offset (in % size) */
++ unsigned int out; /* data is extracted from off. (out % size) */
++ spinlock_t *lock; /* protects concurrent modifications */
++};
++
++extern struct kfifo *kfifo_init(unsigned char *buffer, unsigned int size,
++ gfp_t gfp_mask, spinlock_t *lock);
++extern struct kfifo *kfifo_alloc(unsigned int size, gfp_t gfp_mask,
++ spinlock_t *lock);
++extern void kfifo_free(struct kfifo *fifo);
++extern unsigned int __kfifo_put(struct kfifo *fifo,
++ unsigned char *buffer, unsigned int len);
++extern unsigned int __kfifo_get(struct kfifo *fifo,
++ unsigned char *buffer, unsigned int len);
++
++/**
++ * kfifo_put - puts some data into the FIFO
++ * @fifo: the fifo to be used.
++ * @buffer: the data to be added.
++ * @len: the length of the data to be added.
++ *
++ * This function copies at most @len bytes from the @buffer into
++ * the FIFO depending on the free space, and returns the number of
++ * bytes copied.
++ */
++static inline unsigned int
++kfifo_put(struct kfifo *fifo, unsigned char *buffer, unsigned int len)
++{
++ unsigned long flags;
++ unsigned int ret;
++
++ spin_lock_irqsave(fifo->lock, flags);
++
++ ret = __kfifo_put(fifo, buffer, len);
++
++ spin_unlock_irqrestore(fifo->lock, flags);
++
++ return ret;
++}
++
++/**
++ * kfifo_get - gets some data from the FIFO
++ * @fifo: the fifo to be used.
++ * @buffer: where the data must be copied.
++ * @len: the size of the destination buffer.
++ *
++ * This function copies at most @len bytes from the FIFO into the
++ * @buffer and returns the number of copied bytes.
++ */
++static inline unsigned int
++kfifo_get(struct kfifo *fifo, unsigned char *buffer, unsigned int len)
++{
++ unsigned long flags;
++ unsigned int ret;
++
++ spin_lock_irqsave(fifo->lock, flags);
++
++ ret = __kfifo_get(fifo, buffer, len);
++
++ /*
++ * optimization: if the FIFO is empty, set the indices to 0
++ * so we don't wrap the next time
++ */
++ if (fifo->in == fifo->out)
++ fifo->in = fifo->out = 0;
++
++ spin_unlock_irqrestore(fifo->lock, flags);
++
++ return ret;
++}
++
++/**
++ * __kfifo_len - returns the number of bytes available in the FIFO, no locking version
++ * @fifo: the fifo to be used.
++ */
++static inline unsigned int __kfifo_len(struct kfifo *fifo)
++{
++ return fifo->in - fifo->out;
++}
++
++/**
++ * kfifo_len - returns the number of bytes available in the FIFO
++ * @fifo: the fifo to be used.
++ */
++static inline unsigned int kfifo_len(struct kfifo *fifo)
++{
++ unsigned long flags;
++ unsigned int ret;
++
++ spin_lock_irqsave(fifo->lock, flags);
++
++ ret = __kfifo_len(fifo);
++
++ spin_unlock_irqrestore(fifo->lock, flags);
++
++ return ret;
++}
++
++#else
++#include <linux/kfifo.h>
++#endif
++
++static inline void kfifo_vfree(struct kfifo *fifo)
++{
++ vfree(fifo->buffer);
++ kfree(fifo);
++}
++
++#endif /* __CI_EFRM_SYSDEP_LINUX_H__ */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/tools/config.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/tools/config.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,49 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*! \cidoxg_include_ci_tools */
++
++#ifndef __CI_TOOLS_CONFIG_H__
++#define __CI_TOOLS_CONFIG_H__
++
++
++/**********************************************************************
++ * Debugging.
++ */
++
++#define CI_INCLUDE_ASSERT_VALID 0
++
++/* Set non-zero to allow info about who has allocated what to appear in
++ * /proc/drivers/level5/mem.
++ * However - Note that doing so can lead to segfault when you unload the
++ * driver, and other weirdness. i.e. I don't think the code for is quite
++ * right (written by Oktet, hacked by gel), but it does work well enough to be
++ * useful.
++ */
++#define CI_MEMLEAK_DEBUG_ALLOC_TABLE 0
++
++
++#endif /* __CI_TOOLS_CONFIG_H__ */
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/tools/debug.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/tools/debug.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,336 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*! \cidoxg_include_ci_tools */
++
++#ifndef __CI_TOOLS_DEBUG_H__
++#define __CI_TOOLS_DEBUG_H__
++
++#define CI_LOG_E(x) x /* errors */
++#define CI_LOG_W(x) x /* warnings */
++#define CI_LOG_I(x) x /* information */
++#define CI_LOG_V(x) x /* verbose */
++
++/* Build time asserts. We paste the line number into the type name
++ * so that the macro can be used more than once per file even if the
++ * compiler objects to multiple identical typedefs. Collisions
++ * between use in different header files is still possible. */
++#ifndef CI_BUILD_ASSERT
++#define __CI_BUILD_ASSERT_NAME(_x) __CI_BUILD_ASSERT_ILOATHECPP(_x)
++#define __CI_BUILD_ASSERT_ILOATHECPP(_x) __CI_BUILD_ASSERT__ ##_x
++#define CI_BUILD_ASSERT(e)\
++ typedef char __CI_BUILD_ASSERT_NAME(__LINE__)[(e)?1:-1]
++#endif
++
++
++#ifdef NDEBUG
++
++# define _ci_check(exp, file, line)
++# define _ci_assert2(e, x, y, file, line)
++# define _ci_assert(exp, file, line)
++# define _ci_assert_equal(exp1, exp2, file, line)
++# define _ci_assert_equiv(exp1, exp2, file, line)
++# define _ci_assert_nequal(exp1, exp2, file, line)
++# define _ci_assert_le(exp1, exp2, file, line)
++# define _ci_assert_lt(exp1, exp2, file, line)
++# define _ci_assert_ge(exp1, exp2, file, line)
++# define _ci_assert_gt(exp1, exp2, file, line)
++# define _ci_assert_impl(exp1, exp2, file, line)
++
++# define _ci_verify(exp, file, line) \
++ do { \
++ (void)(exp); \
++ } while (0)
++
++# define CI_DEBUG_TRY(exp) \
++ do { \
++ (void)(exp); \
++ } while (0)
++
++#define CI_TRACE(exp,fmt)
++#define CI_TRACE_INT(integer)
++#define CI_TRACE_INT32(integer)
++#define CI_TRACE_INT64(integer)
++#define CI_TRACE_UINT(integer)
++#define CI_TRACE_UINT32(integer)
++#define CI_TRACE_UINT64(integer)
++#define CI_TRACE_HEX(integer)
++#define CI_TRACE_HEX32(integer)
++#define CI_TRACE_HEX64(integer)
++#define CI_TRACE_PTR(pointer)
++#define CI_TRACE_STRING(string)
++#define CI_TRACE_MAC(mac)
++#define CI_TRACE_IP(ip_be32)
++#define CI_TRACE_ARP(arp_pkt)
++
++#else
++
++# define _CI_ASSERT_FMT "\nfrom %s:%d"
++
++# define _ci_check(exp, file, line) \
++ do { \
++ if (CI_UNLIKELY(!(exp))) \
++ ci_warn(("ci_check(%s)"_CI_ASSERT_FMT, #exp, \
++ (file), (line))); \
++ } while (0)
++
++/*
++ * NOTE: ci_fail() emits the file and line where the assert is actually
++ * coded.
++ */
++
++# define _ci_assert(exp, file, line) \
++ do { \
++ if (CI_UNLIKELY(!(exp))) \
++ ci_fail(("ci_assert(%s)"_CI_ASSERT_FMT, #exp, \
++ (file), (line))); \
++ } while (0)
++
++# define _ci_assert2(e, x, y, file, line) do { \
++ if(CI_UNLIKELY( ! (e) )) \
++ ci_fail(("ci_assert(%s)\nwhere [%s=%"CI_PRIx64"] " \
++ "[%s=%"CI_PRIx64"]\nat %s:%d\nfrom %s:%d", #e \
++ , #x, (ci_uint64)(ci_uintptr_t)(x) \
++ , #y, (ci_uint64)(ci_uintptr_t)(y), \
++ __FILE__, __LINE__, (file), (line))); \
++ } while (0)
++
++# define _ci_verify(exp, file, line) \
++ do { \
++ if (CI_UNLIKELY(!(exp))) \
++ ci_fail(("ci_verify(%s)"_CI_ASSERT_FMT, #exp, \
++ (file), (line))); \
++ } while (0)
++
++# define _ci_assert_equal(x, y, f, l) _ci_assert2((x)==(y), x, y, (f), (l))
++# define _ci_assert_nequal(x, y, f, l) _ci_assert2((x)!=(y), x, y, (f), (l))
++# define _ci_assert_le(x, y, f, l) _ci_assert2((x)<=(y), x, y, (f), (l))
++# define _ci_assert_lt(x, y, f, l) _ci_assert2((x)< (y), x, y, (f), (l))
++# define _ci_assert_ge(x, y, f, l) _ci_assert2((x)>=(y), x, y, (f), (l))
++# define _ci_assert_gt(x, y, f, l) _ci_assert2((x)> (y), x, y, (f), (l))
++# define _ci_assert_or(x, y, f, l) _ci_assert2((x)||(y), x, y, (f), (l))
++# define _ci_assert_impl(x, y, f, l) _ci_assert2(!(x) || (y), x, y, (f), (l))
++# define _ci_assert_equiv(x, y, f, l) _ci_assert2(!(x)== !(y), x, y, (f), (l))
++
++#define _ci_assert_equal_msg(exp1, exp2, msg, file, line) \
++ do { \
++ if (CI_UNLIKELY((exp1)!=(exp2))) \
++ ci_fail(("ci_assert_equal_msg(%s == %s) were " \
++ "(%"CI_PRIx64":%"CI_PRIx64") with msg[%c%c%c%c]" \
++ _CI_ASSERT_FMT, #exp1, #exp2, \
++ (ci_uint64)(ci_uintptr_t)(exp1), \
++ (ci_uint64)(ci_uintptr_t)(exp2), \
++ (((ci_uint32)msg) >> 24) && 0xff, \
++ (((ci_uint32)msg) >> 16) && 0xff, \
++ (((ci_uint32)msg) >> 8 ) && 0xff, \
++ (((ci_uint32)msg) ) && 0xff, \
++ (file), (line))); \
++ } while (0)
++
++# define CI_DEBUG_TRY(exp) CI_TRY(exp)
++
++#define CI_TRACE(exp,fmt) \
++ ci_log("%s:%d:%s] " #exp "=" fmt, \
++ __FILE__, __LINE__, __FUNCTION__, (exp))
++
++
++#define CI_TRACE_INT(integer) \
++ ci_log("%s:%d:%s] " #integer "=%d", \
++ __FILE__, __LINE__, __FUNCTION__, (integer))
++
++
++#define CI_TRACE_INT32(integer) \
++ ci_log("%s:%d:%s] " #integer "=%d", \
++ __FILE__, __LINE__, __FUNCTION__, ((ci_int32)integer))
++
++
++#define CI_TRACE_INT64(integer) \
++ ci_log("%s:%d:%s] " #integer "=%lld", \
++ __FILE__, __LINE__, __FUNCTION__, ((ci_int64)integer))
++
++
++#define CI_TRACE_UINT(integer) \
++ ci_log("%s:%d:%s] " #integer "=%ud", \
++ __FILE__, __LINE__, __FUNCTION__, (integer))
++
++
++#define CI_TRACE_UINT32(integer) \
++ ci_log("%s:%d:%s] " #integer "=%ud", \
++ __FILE__, __LINE__, __FUNCTION__, ((ci_uint32)integer))
++
++
++#define CI_TRACE_UINT64(integer) \
++ ci_log("%s:%d:%s] " #integer "=%ulld", \
++ __FILE__, __LINE__, __FUNCTION__, ((ci_uint64)integer))
++
++
++#define CI_TRACE_HEX(integer) \
++ ci_log("%s:%d:%s] " #integer "=0x%x", \
++ __FILE__, __LINE__, __FUNCTION__, (integer))
++
++
++#define CI_TRACE_HEX32(integer) \
++ ci_log("%s:%d:%s] " #integer "=0x%x", \
++ __FILE__, __LINE__, __FUNCTION__, ((ci_uint32)integer))
++
++
++#define CI_TRACE_HEX64(integer) \
++ ci_log("%s:%d:%s] " #integer "=0x%llx", \
++ __FILE__, __LINE__, __FUNCTION__, ((ci_uint64)integer))
++
++
++#define CI_TRACE_PTR(pointer) \
++ ci_log("%s:%d:%s] " #pointer "=0x%p", \
++ __FILE__, __LINE__, __FUNCTION__, (pointer))
++
++
++#define CI_TRACE_STRING(string) \
++ ci_log("%s:%d:%s] " #string "=%s", \
++ __FILE__, __LINE__, __FUNCTION__, (string))
++
++
++#define CI_TRACE_MAC(mac) \
++ ci_log("%s:%d:%s] " #mac "=" CI_MAC_PRINTF_FORMAT, \
++ __FILE__, __LINE__, __FUNCTION__, CI_MAC_PRINTF_ARGS(mac))
++
++
++#define CI_TRACE_IP(ip_be32) \
++ ci_log("%s:%d:%s] " #ip_be32 "=" CI_IP_PRINTF_FORMAT, __FILE__, \
++ __LINE__, __FUNCTION__, CI_IP_PRINTF_ARGS(&(ip_be32)))
++
++
++#define CI_TRACE_ARP(arp_pkt) \
++ ci_log("%s:%d:%s]\n"CI_ARP_PRINTF_FORMAT, \
++ __FILE__, __LINE__, __FUNCTION__, CI_ARP_PRINTF_ARGS(arp_pkt))
++
++#endif /* NDEBUG */
++
++#define ci_check(exp) \
++ _ci_check(exp, __FILE__, __LINE__)
++
++#define ci_assert(exp) \
++ _ci_assert(exp, __FILE__, __LINE__)
++
++#define ci_verify(exp) \
++ _ci_verify(exp, __FILE__, __LINE__)
++
++#define ci_assert_equal(exp1, exp2) \
++ _ci_assert_equal(exp1, exp2, __FILE__, __LINE__)
++
++#define ci_assert_equal_msg(exp1, exp2, msg) \
++ _ci_assert_equal_msg(exp1, exp2, msg, __FILE__, __LINE__)
++
++#define ci_assert_nequal(exp1, exp2) \
++ _ci_assert_nequal(exp1, exp2, __FILE__, __LINE__)
++
++#define ci_assert_le(exp1, exp2) \
++ _ci_assert_le(exp1, exp2, __FILE__, __LINE__)
++
++#define ci_assert_lt(exp1, exp2) \
++ _ci_assert_lt(exp1, exp2, __FILE__, __LINE__)
++
++#define ci_assert_ge(exp1, exp2) \
++ _ci_assert_ge(exp1, exp2, __FILE__, __LINE__)
++
++#define ci_assert_gt(exp1, exp2) \
++ _ci_assert_gt(exp1, exp2, __FILE__, __LINE__)
++
++#define ci_assert_impl(exp1, exp2) \
++ _ci_assert_impl(exp1, exp2, __FILE__, __LINE__)
++
++#define ci_assert_equiv(exp1, exp2) \
++ _ci_assert_equiv(exp1, exp2, __FILE__, __LINE__)
++
++
++#define CI_TEST(exp) \
++ do{ \
++ if( CI_UNLIKELY(!(exp)) ) \
++ ci_fail(("CI_TEST(%s)", #exp)); \
++ }while(0)
++
++
++#define CI_TRY(exp) \
++ do{ \
++ int _trc; \
++ _trc=(exp); \
++ if( CI_UNLIKELY(_trc < 0) ) \
++ ci_sys_fail(#exp, _trc); \
++ }while(0)
++
++
++#define CI_TRY_RET(exp) \
++ do{ \
++ int _trc; \
++ _trc=(exp); \
++ if( CI_UNLIKELY(_trc < 0) ) { \
++ ci_log("%s returned %d at %s:%d", #exp, _trc, __FILE__, __LINE__); \
++ return _trc; \
++ } \
++ }while(0)
++
++#define CI_LOGLEVEL_TRY_RET(logfn, exp) \
++ do{ \
++ int _trc; \
++ _trc=(exp); \
++ if( CI_UNLIKELY(_trc < 0) ) { \
++ logfn (ci_log("%s returned %d at %s:%d", #exp, _trc, __FILE__, __LINE__)); \
++ return _trc; \
++ } \
++ }while(0)
++
++
++#define CI_SOCK_TRY(exp) \
++ do{ \
++ ci_sock_err_t _trc; \
++ _trc=(exp); \
++ if( CI_UNLIKELY(!ci_sock_errok(_trc)) ) \
++ ci_sys_fail(#exp, _trc.val); \
++ }while(0)
++
++
++#define CI_SOCK_TRY_RET(exp) \
++ do{ \
++ ci_sock_err_t _trc; \
++ _trc=(exp); \
++ if( CI_UNLIKELY(!ci_sock_errok(_trc)) ) { \
++ ci_log("%s returned %d at %s:%d", #exp, _trc.val, __FILE__, __LINE__); \
++ return ci_sock_errcode(_trc); \
++ } \
++ }while(0)
++
++
++#define CI_SOCK_TRY_SOCK_RET(exp) \
++ do{ \
++ ci_sock_err_t _trc; \
++ _trc=(exp); \
++ if( CI_UNLIKELY(!ci_sock_errok(_trc)) ) { \
++ ci_log("%s returned %d at %s:%d", #exp, _trc.val, __FILE__, __LINE__); \
++ return _trc; \
++ } \
++ }while(0)
++
++#endif /* __CI_TOOLS_DEBUG_H__ */
++
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/tools/log.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/tools/log.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,262 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr
++ * \brief Functions for logging and pretty-printing.
++ * \date 2002/08/07
++ */
++
++/*! \cidoxg_include_ci_tools */
++
++#ifndef __CI_TOOLS_LOG_H__
++#define __CI_TOOLS_LOG_H__
++
++#include <stdarg.h>
++
++
++/**********************************************************************
++ * Logging.
++ */
++
++/* size of internal log buffer */
++#define CI_LOG_MAX_LINE 512
++/* uses of ci_log must ensure that all trace messages are shorter than this */
++#define CI_LOG_MAX_MSG_LENGTH (CI_LOG_MAX_LINE-50)
++
++extern void ci_vlog(const char* fmt, va_list args) CI_HF;
++extern void ci_log(const char* fmt, ...) CI_PRINTF_LIKE(1,2) CI_HF;
++
++ /*! Set the prefix for log messages.
++ **
++ ** Uses the storage pointed to by \em prefix. Therefore \em prefix must
++ ** be allocated on the heap, or statically.
++ */
++extern void ci_set_log_prefix(const char* prefix) CI_HF;
++
++typedef void (*ci_log_fn_t)(const char* msg);
++extern ci_log_fn_t ci_log_fn CI_HV;
++
++/* Log functions. */
++extern void ci_log_null(const char* msg) CI_HF;
++extern void ci_log_stderr(const char* msg) CI_HF;
++extern void ci_log_stdout(const char* msg) CI_HF;
++extern void ci_log_syslog(const char* msg) CI_HF;
++
++/*! Call the following to install special logging behaviours. */
++extern void ci_log_buffer_till_fail(void) CI_HF;
++extern void ci_log_buffer_till_exit(void) CI_HF;
++
++extern void __ci_log_unique(const char* msg) CI_HF;
++extern ci_log_fn_t __ci_log_unique_fn CI_HV;
++ci_inline void ci_log_uniquify(void) {
++ if( ci_log_fn != __ci_log_unique ) {
++ __ci_log_unique_fn = ci_log_fn;
++ ci_log_fn = __ci_log_unique;
++ }
++}
++
++extern void ci_log_file(const char* msg) CI_HF;
++extern int ci_log_file_fd CI_HV;
++
++extern void __ci_log_nth(const char* msg) CI_HF;
++extern ci_log_fn_t __ci_log_nth_fn CI_HV;
++extern int ci_log_nth_n CI_HV; /* default 100 */
++ci_inline void ci_log_nth(void) {
++ if( ci_log_fn != __ci_log_nth ) {
++ __ci_log_nth_fn = ci_log_fn;
++ ci_log_fn = __ci_log_nth;
++ }
++}
++
++extern int ci_log_level CI_HV;
++
++extern int ci_log_options CI_HV;
++#define CI_LOG_PID 0x1
++#define CI_LOG_TID 0x2
++#define CI_LOG_TIME 0x4
++#define CI_LOG_DELTA 0x8
++
++/**********************************************************************
++ * Used to define which mode we are in
++ */
++#if (defined(_WIN32) && !defined(__KERNEL__))
++typedef enum {
++ ci_log_md_NULL=0,
++ ci_log_md_ioctl,
++ ci_log_md_stderr,
++ ci_log_md_stdout,
++ ci_log_md_file,
++ ci_log_md_serial,
++ ci_log_md_syslog,
++ ci_log_md_pidfile
++} ci_log_mode_t;
++extern ci_log_mode_t ci_log_mode;
++#endif
++
++/**********************************************************************
++ * Pretty-printing.
++ */
++
++extern char ci_printable_char(char c) CI_HF;
++
++extern void (*ci_hex_dump_formatter)(char* buf, const ci_octet* s,
++ int i, int off, int len) CI_HV;
++extern void ci_hex_dump_format_octets(char*,const ci_octet*,int,int,int) CI_HF;
++extern void ci_hex_dump_format_dwords(char*,const ci_octet*,int,int,int) CI_HF;
++
++extern void ci_hex_dump_row(char* buf, volatile const void* s, int len,
++ ci_ptr_arith_t address) CI_HF;
++ /*!< A row contains up to 16 bytes. Row starts at [address & 15u], so
++ ** therefore [len + (address & 15u)] must be <= 16.
++ */
++
++extern void ci_hex_dump(ci_log_fn_t, volatile const void*,
++ int len, ci_ptr_arith_t address) CI_HF;
++
++extern int ci_hex_dump_to_raw(const char* src_hex, void* buf,
++ unsigned* addr_out_opt, int* skip) CI_HF;
++ /*!< Recovers raw data from a single line of a hex dump. [buf] must be at
++ ** least 16 bytes long. Returns the number of bytes written to [buf] (in
++ ** range 1 -> 16), or -1 if [src_hex] doesn't contain hex data. Does not
++ ** cope with missing bytes at the start of a line.
++ */
++
++extern int ci_format_eth_addr(char* buf, const void* eth_mac_addr,
++ char sep) CI_HF;
++ /*!< This will write 18 characters to <buf> including terminating null.
++ ** Returns number of bytes written excluding null. If [sep] is zero, ':'
++ ** is used.
++ */
++
++extern int ci_parse_eth_addr(void* eth_mac_addr,
++ const char* str, char sep) CI_HF;
++ /*!< If [sep] is zero, absolutely any separator is accepted (even
++ ** inconsistent separators). Returns 0 on success, -1 on error.
++ */
++
++extern int ci_format_ip4_addr(char* buf, unsigned addr_be32) CI_HF;
++ /*!< Formats the IP address (in network endian) in dotted-quad. Returns
++ ** the number of bytes written (up to 15), excluding the null. [buf]
++ ** must be at least 16 bytes long.
++ */
++
++
++/**********************************************************************
++ * Error checking.
++ */
++
++extern void (*ci_fail_stop_fn)(void) CI_HV;
++
++extern void ci_fail_stop(void) CI_HF;
++extern void ci_fail_hang(void) CI_HF;
++extern void ci_fail_bomb(void) CI_HF;
++extern void ci_backtrace(void) CI_HF;
++
++#if defined __linux__ && !defined __KERNEL__
++extern void ci_fail_abort (void) CI_HF;
++#endif
++
++#ifdef __GNUC__
++extern void
++__ci_fail(const char*, ...) CI_PRINTF_LIKE(1,2) CI_HF;
++#else
++# if _PREFAST_
++ extern void _declspec(noreturn) __ci_fail(const char* fmt, ...);
++# else
++ extern void __ci_fail(const char* fmt, ...);
++# endif
++
++#endif
++
++#define ci_warn(x) \
++ do{ ci_log("WARN at %s:%d", __FILE__, __LINE__); }while(0)
++
++#define ci_fail(x) \
++ do{ ci_log("FAIL at %s:%d", __FILE__, __LINE__); __ci_fail x; }while(0)
++
++extern void __ci_sys_fail(const char* fn, int rc,
++ const char* file, int line) CI_HF;
++#define ci_sys_fail(fn, rc) __ci_sys_fail(fn, rc, __FILE__, __LINE__)
++
++/**********************************************************************
++ * Logging to buffer (src/citools/log_buffer.c)
++ */
++
++/*! Divert ci_log() messages to the log buffer
++ * normally they go to the system console */
++extern void ci_log_buffer_till_fail(void) CI_HF;
++
++/*! Dump the contents of the log buffer to the system console */
++extern void ci_log_buffer_dump(void) CI_HF;
++
++
++/**********************************************************************
++ * Some useful pretty-printing.
++ */
++
++#ifdef __linux__
++# define CI_SOCKCALL_FLAGS_FMT "%s%s%s%s%s%s%s%s%s%s%s"
++
++# define CI_SOCKCALL_FLAGS_PRI_ARG(x) \
++ (((x) & MSG_OOB ) ? "OOB " :""), \
++ (((x) & MSG_PEEK ) ? "PEEK " :""), \
++ (((x) & MSG_DONTROUTE ) ? "DONTROUTE " :""), \
++ (((x) & MSG_EOR ) ? "EOR " :""), \
++ (((x) & MSG_CTRUNC ) ? "CTRUNC " :""), \
++ (((x) & MSG_TRUNC ) ? "TRUNC " :""), \
++ (((x) & MSG_WAITALL ) ? "WAITALL " :""), \
++ (((x) & MSG_DONTWAIT ) ? "DONTWAIT " :""), \
++ (((x) & MSG_NOSIGNAL ) ? "NOSIGNAL " :""), \
++ (((x) & MSG_ERRQUEUE ) ? "ERRQUEUE " :""), \
++ (((x) & MSG_CONFIRM ) ? "CONFIRM " :"")
++#endif
++
++#ifdef _WIN32
++# define CI_SOCKCALL_FLAGS_FMT "%s%s%s"
++
++# define CI_SOCKCALL_FLAGS_PRI_ARG(x) \
++ (((x) & MSG_OOB ) ? "OOB " :""), \
++ (((x) & MSG_PEEK ) ? "PEEK " :""), \
++ (((x) & MSG_DONTROUTE ) ? "DONTROUTE " :"")
++#endif
++
++#ifdef __sun__
++# define CI_SOCKCALL_FLAGS_FMT "%s%s%s%s%s%s%s%s%s"
++
++# define CI_SOCKCALL_FLAGS_PRI_ARG(x) \
++ (((x) & MSG_OOB ) ? "OOB " :""), \
++ (((x) & MSG_PEEK ) ? "PEEK " :""), \
++ (((x) & MSG_DONTROUTE ) ? "DONTROUTE " :""), \
++ (((x) & MSG_EOR ) ? "EOR " :""), \
++ (((x) & MSG_CTRUNC ) ? "CTRUNC " :""), \
++ (((x) & MSG_TRUNC ) ? "TRUNC " :""), \
++ (((x) & MSG_WAITALL ) ? "WAITALL " :""), \
++ (((x) & MSG_DONTWAIT ) ? "DONTWAIT " :""), \
++ (((x) & MSG_NOTIFICATION) ? "NOTIFICATION" :"")
++#endif
++
++#endif /* __CI_TOOLS_LOG_H__ */
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/tools/platform/gcc_x86.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/tools/platform/gcc_x86.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,361 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*! \cidoxg_include_ci_tools_platform */
++
++#ifndef __CI_TOOLS_GCC_X86_H__
++#define __CI_TOOLS_GCC_X86_H__
++
++
++/**********************************************************************
++ * Free-running cycle counters.
++ */
++
++#define CI_HAVE_FRC64
++#define CI_HAVE_FRC32
++
++#define ci_frc32(pval) __asm__ __volatile__("rdtsc" : "=a" (*pval) : : "edx")
++
++#if defined(__x86_64__)
++ci_inline void ci_frc64(ci_uint64* pval) {
++ /* temp fix until we figure how to get this out in one bite */
++ ci_uint64 low, high;
++ __asm__ __volatile__("rdtsc" : "=a" (low) , "=d" (high));
++ *pval = (high << 32) | low;
++}
++
++#else
++#define ci_frc64(pval) __asm__ __volatile__("rdtsc" : "=A" (*pval))
++#endif
++
++#define ci_frc_flush() /* ?? Need a pipeline barrier. */
++
++
++/**********************************************************************
++ * Atomic integer.
++ */
++
++/*
++** int ci_atomic_read(a) { return a->n; }
++** void ci_atomic_set(a, v) { a->n = v; }
++** void ci_atomic_inc(a) { ++a->n; }
++** void ci_atomic_dec(a) { --a->n; }
++** int ci_atomic_inc_and_test(a) { return ++a->n == 0; }
++** int ci_atomic_dec_and_test(a) { return --a->n == 0; }
++** void ci_atomic_and(a, v) { a->n &= v; }
++** void ci_atomic_or(a, v) { a->n |= v; }
++*/
++
++typedef struct { volatile ci_int32 n; } ci_atomic_t;
++
++#define CI_ATOMIC_INITIALISER(i) {(i)}
++
++static inline ci_int32 ci_atomic_read(const ci_atomic_t* a) { return a->n; }
++static inline void ci_atomic_set(ci_atomic_t* a, int v) { a->n = v; ci_wmb(); }
++
++static inline void ci_atomic_inc(ci_atomic_t* a)
++{ __asm__ __volatile__("lock; incl %0" : "+m" (a->n)); }
++
++
++static inline void ci_atomic_dec(ci_atomic_t* a)
++{ __asm__ __volatile__("lock; decl %0" : "+m" (a->n)); }
++
++static inline int ci_atomic_inc_and_test(ci_atomic_t* a) {
++ char r;
++ __asm__ __volatile__("lock; incl %0; sete %1"
++ : "+m" (a->n), "=qm" (r));
++ return r;
++}
++
++static inline int ci_atomic_dec_and_test(ci_atomic_t* a) {
++ char r;
++ __asm__ __volatile__("lock; decl %0; sete %1"
++ : "+m" (a->n), "=qm" (r));
++ return r;
++}
++
++ci_inline int
++ci_atomic_xadd (ci_atomic_t *a, int v) {
++ __asm__ ("lock xadd %0, %1" : "=r" (v), "+m" (a->n) : "0" (v));
++ return v;
++}
++ci_inline int
++ci_atomic_xchg (ci_atomic_t *a, int v) {
++ __asm__ ("lock xchg %0, %1" : "=r" (v), "+m" (a->n) : "0" (v));
++ return v;
++}
++
++ci_inline void ci_atomic32_or(volatile ci_uint32* p, ci_uint32 mask)
++{ __asm__ __volatile__("lock; orl %1, %0" : "+m" (*p) : "ir" (mask)); }
++
++ci_inline void ci_atomic32_and(volatile ci_uint32* p, ci_uint32 mask)
++{ __asm__ __volatile__("lock; andl %1, %0" : "+m" (*p) : "ir" (mask)); }
++
++ci_inline void ci_atomic32_add(volatile ci_uint32* p, ci_uint32 v)
++{ __asm__ __volatile__("lock; addl %1, %0" : "+m" (*p) : "ir" (v)); }
++
++#define ci_atomic_or(a, v) ci_atomic32_or ((ci_uint32*) &(a)->n, (v))
++#define ci_atomic_and(a, v) ci_atomic32_and((ci_uint32*) &(a)->n, (v))
++#define ci_atomic_add(a, v) ci_atomic32_add((ci_uint32*) &(a)->n, (v))
++
++extern int ci_glibc_uses_nptl (void) CI_HF;
++extern int ci_glibc_nptl_broken(void) CI_HF;
++extern int ci_glibc_gs_get_is_multihreaded_offset (void) CI_HF;
++extern int ci_glibc_gs_is_multihreaded_offset CI_HV;
++
++#if !defined(__x86_64__)
++#ifdef __GLIBC__
++/* Returns non-zero if the calling process might be mulithreaded, returns 0 if
++ * it definitely isn't (i.e. if reimplementing this function for other
++ * architectures and platforms, you can safely just return 1).
++ */
++static inline int ci_is_multithreaded (void) {
++
++ while (1) {
++ if (ci_glibc_gs_is_multihreaded_offset >= 0) {
++ /* NPTL keeps a variable that tells us this hanging off gs (i.e. in thread-
++ * local storage); just return this
++ */
++ int r;
++ __asm__ __volatile__ ("movl %%gs:(%1), %0"
++ : "=r" (r)
++ : "r" (ci_glibc_gs_is_multihreaded_offset));
++ return r;
++ }
++
++ if (ci_glibc_gs_is_multihreaded_offset == -2) {
++ /* This means we've already determined that the libc version is NOT good
++ * for our funky "is multithreaded" hack
++ */
++ return 1;
++ }
++
++ /* If we get here, it means this is the first time the function has been
++ * called -- detect the libc version and go around again.
++ */
++ ci_glibc_gs_is_multihreaded_offset = ci_glibc_gs_get_is_multihreaded_offset ();
++
++ /* Go around again. We do the test here rather than at the top so that we go
++ * quicker in the common the case
++ */
++ }
++}
++
++#else /* def __GLIBC__ */
++
++#define ci_is_multithreaded() 1 /* ?? Is the the POSIX way of finding out */
++ /* whether the appication is single */
++ /* threaded? */
++
++#endif /* def __GLIBC__ */
++
++#else /* defined __x86_64__ */
++
++static inline int ci_is_multithreaded (void) {
++ /* Now easy way to tell on x86_64; so assume we're multithreaded */
++ return 1;
++}
++
++#endif /* defined __x86_64__ */
++
++
++/**********************************************************************
++ * Compare and swap.
++ */
++
++#define CI_HAVE_COMPARE_AND_SWAP
++
++ci_inline int ci_cas32_succeed(volatile ci_int32* p, ci_int32 oldval,
++ ci_int32 newval) {
++ char ret;
++ ci_int32 prevval;
++ __asm__ __volatile__("lock; cmpxchgl %3, %1; sete %0"
++ : "=q"(ret), "+m"(*p), "=a"(prevval)
++ : "r"(newval), "a"(oldval));
++ return ret;
++}
++
++ci_inline int ci_cas32_fail(volatile ci_int32* p, ci_int32 oldval,
++ ci_int32 newval) {
++ char ret;
++ ci_int32 prevval;
++ __asm__ __volatile__("lock; cmpxchgl %3, %1; setne %0"
++ : "=q"(ret), "+m"(*p), "=a"(prevval)
++ : "r"(newval), "a"(oldval));
++ return ret;
++}
++
++#ifdef __x86_64__
++ci_inline int ci_cas64_succeed(volatile ci_int64* p, ci_int64 oldval,
++ ci_int64 newval) {
++ char ret;
++ ci_int64 prevval;
++ __asm__ __volatile__("lock; cmpxchgq %3, %1; sete %0"
++ : "=q"(ret), "+m"(*p), "=a"(prevval)
++ : "r"(newval), "a"(oldval));
++ return ret;
++}
++
++ci_inline int ci_cas64_fail(volatile ci_int64* p, ci_int64 oldval,
++ ci_int64 newval) {
++ char ret;
++ ci_int64 prevval;
++ __asm__ __volatile__("lock; cmpxchgq %3, %1; setne %0"
++ : "=q"(ret), "+m"(*p), "=a"(prevval)
++ : "r"(newval), "a"(oldval));
++ return ret;
++}
++#endif
++
++ci_inline int ci_cas32u_succeed(volatile ci_uint32* p, ci_uint32 oldval, ci_uint32 newval) {
++ char ret;
++ ci_uint32 prevval;
++ __asm__ __volatile__("lock; cmpxchgl %3, %1; sete %0"
++ : "=q"(ret), "+m"(*p), "=a"(prevval)
++ : "r"(newval), "a"(oldval));
++ return ret;
++}
++
++ci_inline int ci_cas32u_fail(volatile ci_uint32* p, ci_uint32 oldval, ci_uint32 newval) {
++ char ret;
++ ci_uint32 prevval;
++ __asm__ __volatile__("lock; cmpxchgl %3, %1; setne %0"
++ : "=q"(ret), "+m"(*p), "=a"(prevval)
++ : "r"(newval), "a"(oldval));
++ return ret;
++}
++
++ci_inline int ci_cas64u_succeed(volatile ci_uint64* p, ci_uint64 oldval,
++ ci_uint64 newval) {
++ char ret;
++ ci_uint64 prevval;
++ __asm__ __volatile__("lock; cmpxchgq %3, %1; sete %0"
++ : "=q"(ret), "+m"(*p), "=a"(prevval)
++ : "r"(newval), "a"(oldval));
++ return ret;
++}
++
++ci_inline int ci_cas64u_fail(volatile ci_uint64* p, ci_uint64 oldval,
++ ci_uint64 newval) {
++ char ret;
++ ci_uint64 prevval;
++ __asm__ __volatile__("lock; cmpxchgq %3, %1; setne %0"
++ : "=q"(ret), "+m"(*p), "=a"(prevval)
++ : "r"(newval), "a"(oldval));
++ return ret;
++}
++
++#ifdef __x86_64__
++
++# define ci_cas_uintptr_succeed(p,o,n) \
++ ci_cas64u_succeed((volatile ci_uint64*) (p), (o), (n))
++# define ci_cas_uintptr_fail(p,o,n) \
++ ci_cas64u_fail((volatile ci_uint64*) (p), (o), (n))
++
++#else
++
++# define ci_cas_uintptr_succeed(p,o,n) \
++ ci_cas32u_succeed((volatile ci_uint32*) (p), (o), (n))
++# define ci_cas_uintptr_fail(p,o,n) \
++ ci_cas32u_fail((volatile ci_uint32*) (p), (o), (n))
++
++#endif
++
++
++/**********************************************************************
++ * Atomic bit field.
++ */
++
++typedef ci_uint32 ci_bits;
++#define CI_BITS_N 32u
++
++#define CI_BITS_DECLARE(name, n) \
++ ci_bits name[((n) + CI_BITS_N - 1u) / CI_BITS_N]
++
++ci_inline void ci_bits_clear_all(volatile ci_bits* b, int n_bits)
++{ memset((void*) b, 0, (n_bits+CI_BITS_N-1u) / CI_BITS_N * sizeof(ci_bits)); }
++
++ci_inline void ci_bit_set(volatile ci_bits* b, int i) {
++ __asm__ __volatile__("lock; btsl %1, %0"
++ : "=m" (*b)
++ : "Ir" (i));
++}
++
++ci_inline void ci_bit_clear(volatile ci_bits* b, int i) {
++ __asm__ __volatile__("lock; btrl %1, %0"
++ : "=m" (*b)
++ : "Ir" (i));
++}
++
++ci_inline int ci_bit_test(volatile ci_bits* b, int i) {
++ char rc;
++ __asm__("btl %2, %1; setc %0"
++ : "=r" (rc)
++ : "m" (*b), "Ir" (i));
++ return rc;
++}
++
++ci_inline int ci_bit_test_and_set(volatile ci_bits* b, int i) {
++ char rc;
++ __asm__ __volatile__("lock; btsl %2, %1; setc %0"
++ : "=r" (rc), "+m" (*b)
++ : "Ir" (i));
++ return rc;
++}
++
++ci_inline int ci_bit_test_and_clear(volatile ci_bits* b, int i) {
++ char rc;
++ __asm__ __volatile__("lock; btrl %2, %1; setc %0"
++ : "=r" (rc), "+m" (*b)
++ : "Ir" (i));
++ return rc;
++}
++
++/* These mask ops only work within a single ci_bits word. */
++#define ci_bit_mask_set(b,m) ci_atomic32_or((b), (m))
++#define ci_bit_mask_clear(b,m) ci_atomic32_and((b), ~(m))
++
++
++/**********************************************************************
++ * Misc.
++ */
++
++#if __GNUC__ >= 3
++# define ci_spinloop_pause() __asm__("pause")
++#else
++# define ci_spinloop_pause() __asm__(".byte 0xf3, 0x90")
++#endif
++
++
++#define CI_HAVE_ADDC32
++#define ci_add_carry32(sum, v) __asm__("addl %1, %0 ;" \
++ "adcl $0, %0 ;" \
++ : "=r" (sum) \
++ : "g" ((ci_uint32) v), "0" (sum))
++
++
++#endif /* __CI_TOOLS_GCC_X86_H__ */
++
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/tools/platform/linux_kernel.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/tools/platform/linux_kernel.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,362 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++
++/*! \cidoxg_include_ci_tools_platform */
++
++#ifndef __CI_TOOLS_LINUX_KERNEL_H__
++#define __CI_TOOLS_LINUX_KERNEL_H__
++
++/**********************************************************************
++ * Need to know the kernel version.
++ */
++
++#ifndef LINUX_VERSION_CODE
++# include <linux/version.h>
++# ifndef UTS_RELEASE
++ /* 2.6.18 onwards defines UTS_RELEASE in a separate header */
++# include <linux/utsrelease.h>
++# endif
++#endif
++
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) || \
++ LINUX_VERSION_CODE >= KERNEL_VERSION(2,7,0)
++# error "Linux 2.6 required"
++#endif
++
++
++#include <linux/slab.h> /* kmalloc / kfree */
++#include <linux/vmalloc.h> /* vmalloc / vfree */
++#include <linux/interrupt.h>/* in_interrupt() */
++#include <linux/in.h>
++#include <linux/in6.h>
++#include <linux/spinlock.h>
++#include <linux/highmem.h>
++#include <linux/smp_lock.h>
++#include <linux/ctype.h>
++#include <linux/uio.h>
++#include <asm/current.h>
++#include <asm/errno.h>
++#include <asm/kmap_types.h>
++#include <asm/semaphore.h>
++
++#include <ci/tools/config.h>
++
++#define ci_in_irq in_irq
++#define ci_in_interrupt in_interrupt
++#define ci_in_atomic in_atomic
++
++
++/**********************************************************************
++ * Misc stuff.
++ */
++
++#ifdef BUG
++# define CI_BOMB BUG
++#endif
++
++ci_inline void* __ci_alloc(size_t n)
++{ return kmalloc(n, (in_interrupt() ? GFP_ATOMIC : GFP_KERNEL)); }
++
++ci_inline void* __ci_atomic_alloc(size_t n)
++{ return kmalloc(n, GFP_ATOMIC ); }
++
++ci_inline void __ci_free(void* p) { return kfree(p); }
++ci_inline void* __ci_vmalloc(size_t n) { return vmalloc(n); }
++ci_inline void __ci_vfree(void* p) { return vfree(p); }
++
++
++#if CI_MEMLEAK_DEBUG_ALLOC_TABLE
++ #define ci_alloc(s) ci_alloc_memleak_debug (s, __FILE__, __LINE__)
++ #define ci_atomic_alloc(s) ci_atomic_alloc_memleak_debug(s, __FILE__, __LINE__)
++ #define ci_free ci_free_memleak_debug
++ #define ci_vmalloc(s) ci_vmalloc_memleak_debug (s, __FILE__,__LINE__)
++ #define ci_vfree ci_vfree_memleak_debug
++ #define ci_alloc_fn ci_alloc_fn_memleak_debug
++ #define ci_vmalloc_fn ci_vmalloc_fn_memleak_debug
++#else /* !CI_MEMLEAK_DEBUG_ALLOC_TABLE */
++ #define ci_alloc_fn __ci_alloc
++ #define ci_vmalloc_fn __ci_vmalloc
++#endif
++
++#ifndef ci_alloc
++ #define ci_atomic_alloc __ci_atomic_alloc
++ #define ci_alloc __ci_alloc
++ #define ci_free __ci_free
++ #define ci_vmalloc __ci_vmalloc
++ #define ci_vmalloc_fn __ci_vmalloc
++ #define ci_vfree __ci_vfree
++#endif
++
++#define ci_sprintf sprintf
++#define ci_vsprintf vsprintf
++#define ci_snprintf snprintf
++#define ci_vsnprintf vsnprintf
++#define ci_sscanf sscanf
++
++
++#define CI_LOG_FN_DEFAULT ci_log_syslog
++
++
++/*--------------------------------------------------------------------
++ *
++ * irqs_disabled - needed for kmap helpers on some kernels
++ *
++ *--------------------------------------------------------------------*/
++#ifdef irqs_disabled
++# define ci_irqs_disabled irqs_disabled
++#else
++# if defined(__i386__) | defined(__x86_64__)
++# define ci_irqs_disabled(x) \
++ ({ \
++ unsigned long flags; \
++ local_save_flags(flags); \
++ !(flags & (1<<9)); \
++ })
++# else
++# error "Need to implement irqs_disabled() for your architecture"
++# endif
++#endif
++
++
++/**********************************************************************
++ * kmap helpers.
++ *
++ * Use ci_k(un)map for code paths which are not in an atomic context.
++ * For atomic code you need to use ci_k(un)map_in_atomic. This will grab
++ * one of the per-CPU kmap slots.
++ *
++ * NB in_interrupt != in_irq. If you don't know the difference then
++ * don't use kmap_in_atomic
++ *
++ * 2.4 allocates kmap slots by function. We are going to re-use the
++ * skb module's slot - we also use the same interlock
++ *
++ * 2.6 allocates kmap slots by type as well as by function. We are
++ * going to use the currently (2.6.10) unsused SOFTIRQ slot
++ *
++ */
++
++ci_inline void* ci_kmap(struct page *page) {
++ CI_DEBUG(if( ci_in_atomic() | ci_in_interrupt() | ci_in_irq() ) BUG());
++ return kmap(page);
++}
++
++ci_inline void ci_kunmap(struct page *page) {
++ kunmap(page);
++}
++
++#define CI_KM_SLOT KM_SOFTIRQ0
++
++
++typedef struct semaphore ci_semaphore_t;
++
++ci_inline void
++ci_sem_init (ci_semaphore_t *sem, int val) {
++ sema_init (sem, val);
++}
++
++ci_inline void
++ci_sem_down (ci_semaphore_t *sem) {
++ down (sem);
++}
++
++ci_inline int
++ci_sem_trydown (ci_semaphore_t *sem) {
++ return down_trylock (sem);
++}
++
++ci_inline void
++ci_sem_up (ci_semaphore_t *sem) {
++ up (sem);
++}
++
++ci_inline int
++ci_sem_get_count(ci_semaphore_t *sem) {
++ return sem->count.counter;
++}
++
++ci_inline void* ci_kmap_in_atomic(struct page *page)
++{
++ CI_DEBUG(if( ci_in_irq() ) BUG());
++
++ /* iSCSI can call without in_interrupt() but with irqs_disabled()
++ and in a context that can't sleep, so we need to check that
++ too */
++ if(ci_in_interrupt() || ci_irqs_disabled())
++ return kmap_atomic(page, CI_KM_SLOT);
++ else
++ return kmap(page);
++}
++
++ci_inline void ci_kunmap_in_atomic(struct page *page, void* kaddr)
++{
++ CI_DEBUG(if( ci_in_irq() ) BUG());
++
++ /* iSCSI can call without in_interrupt() but with irqs_disabled()
++ and in a context that can't sleep, so we need to check that
++ too */
++ if(ci_in_interrupt() || ci_irqs_disabled())
++ kunmap_atomic(kaddr, CI_KM_SLOT);
++ else
++ kunmap(page);
++}
++
++/**********************************************************************
++ * spinlock implementation: used by <ci/tools/spinlock.h>
++ */
++
++#define CI_HAVE_SPINLOCKS
++
++typedef ci_uintptr_t ci_lock_holder_t;
++#define ci_lock_thisthread (ci_lock_holder_t)current
++#define ci_lock_no_holder (ci_lock_holder_t)NULL
++
++typedef spinlock_t ci_lock_i;
++typedef spinlock_t ci_irqlock_i;
++typedef unsigned long ci_irqlock_state_t;
++
++#define IRQLOCK_CYCLES 500000
++
++#define ci_lock_ctor_i(l) spin_lock_init(l)
++#define ci_lock_dtor_i(l) do{}while(0)
++#define ci_lock_lock_i(l) spin_lock(l)
++#define ci_lock_trylock_i(l) spin_trylock(l)
++#define ci_lock_unlock_i(l) spin_unlock(l)
++
++#define ci_irqlock_ctor_i(l) spin_lock_init(l)
++#define ci_irqlock_dtor_i(l) do{}while(0)
++#define ci_irqlock_lock_i(l,s) spin_lock_irqsave(l,*(s))
++#define ci_irqlock_unlock_i(l,s) spin_unlock_irqrestore(l, *(s))
++
++
++/**********************************************************************
++ * register access
++ */
++
++#include <asm/io.h>
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
++typedef volatile void __iomem* ioaddr_t;
++#else
++typedef unsigned long ioaddr_t;
++#endif
++
++
++
++/**********************************************************************
++ * thread implementation -- kernel dependancies probably should be
++ * moved to driver/linux_kernel.h
++ */
++
++#define ci_linux_daemonize(name) daemonize(name)
++
++#include <linux/workqueue.h>
++
++
++typedef struct {
++ void* (*fn)(void* arg);
++ void* arg;
++ const char* name;
++ int thrd_id;
++ struct completion exit_event;
++ struct work_struct keventd_witem;
++} ci_kernel_thread_t;
++
++
++typedef ci_kernel_thread_t* cithread_t;
++
++
++extern int cithread_create(cithread_t* tid, void* (*fn)(void*), void* arg,
++ const char* name);
++extern int cithread_detach(cithread_t kt);
++extern int cithread_join(cithread_t kt);
++
++
++/* Kernel sysctl variables. */
++extern int sysctl_tcp_wmem[3];
++extern int sysctl_tcp_rmem[3];
++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++#define LINUX_HAS_SYSCTL_MEM_MAX
++extern ci_uint32 sysctl_wmem_max;
++extern ci_uint32 sysctl_rmem_max;
++#endif
++
++
++/*--------------------------------------------------------------------
++ *
++ * ci_bigbuf_t: An abstraction of a large buffer. Needed because in the
++ * Linux kernel, large buffers need to be allocated with vmalloc(), whereas
++ * smaller buffers should use kmalloc(). This abstraction chooses the
++ * appropriate mechansim.
++ *
++ *--------------------------------------------------------------------*/
++
++typedef struct {
++ char* p;
++ int is_vmalloc;
++} ci_bigbuf_t;
++
++
++ci_inline int ci_bigbuf_alloc(ci_bigbuf_t* bb, size_t bytes) {
++ if( bytes >= CI_PAGE_SIZE && ! ci_in_atomic() ) {
++ bb->is_vmalloc = 1;
++ if( (bb->p = vmalloc(bytes)) ) return 0;
++ }
++ bb->is_vmalloc = 0;
++ bb->p = kmalloc(bytes, ci_in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
++ return bb->p ? 0 : -ENOMEM;
++}
++
++ci_inline void ci_bigbuf_free(ci_bigbuf_t* bb) {
++ if( bb->is_vmalloc ) vfree(bb->p);
++ else kfree(bb->p);
++}
++
++ci_inline char* ci_bigbuf_ptr(ci_bigbuf_t* bb)
++{ return bb->p; }
++
++/**********************************************************************
++ * struct iovec abstraction (for Windows port)
++ */
++
++typedef struct iovec ci_iovec;
++
++/* Accessors for buffer/length */
++#define CI_IOVEC_BASE(i) ((i)->iov_base)
++#define CI_IOVEC_LEN(i) ((i)->iov_len)
++
++/**********************************************************************
++ * Signals
++ */
++
++ci_inline void
++ci_send_sig(int signum)
++{
++ send_sig(signum, current, 0);
++}
++
++#endif /* __CI_TOOLS_LINUX_KERNEL_H__ */
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netback/ci/tools/sysdep.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netback/ci/tools/sysdep.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,132 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*! \cidoxg_include_ci_tools */
++
++#ifndef __CI_TOOLS_SYSDEP_H__
++#define __CI_TOOLS_SYSDEP_H__
++
++/* Make this header self-sufficient */
++#include <ci/compat.h>
++#include <ci/tools/log.h>
++#include <ci/tools/debug.h>
++
++
++/**********************************************************************
++ * Platform dependencies.
++ */
++
++#if defined(__KERNEL__)
++
++# if defined(__linux__)
++# include <ci/tools/platform/linux_kernel.h>
++# elif defined(_WIN32)
++# include <ci/tools/platform/win32_kernel.h>
++# elif defined(__sun__)
++# include <ci/tools/platform/sunos_kernel.h>
++# else
++# error Unknown platform.
++# endif
++
++#elif defined(_WIN32)
++
++# include <ci/tools/platform/win32.h>
++
++#elif defined(__unix__)
++
++# include <ci/tools/platform/unix.h>
++
++#else
++
++# error Unknown platform.
++
++#endif
++
++#if defined(__linux__)
++/*! Linux sendfile() support enable/disable. */
++# define CI_HAVE_SENDFILE /* provide sendfile i/f */
++
++# define CI_HAVE_OS_NOPAGE
++#endif
++
++#if defined(__sun__)
++# define CI_HAVE_SENDFILE /* provide sendfile i/f */
++# define CI_HAVE_SENDFILEV /* provide sendfilev i/f */
++
++# define CI_IOCTL_SENDFILE /* use efrm CI_SENDFILEV ioctl */
++#endif
++
++#if defined(_WIN32)
++typedef ci_uint32 ci_uerr_t; /* range of OS user-mode return codes */
++typedef ci_uint32 ci_kerr_t; /* range of OS kernel-mode return codes */
++#elif defined(__unix__)
++typedef ci_int32 ci_uerr_t; /* range of OS user-mode return codes */
++typedef ci_int32 ci_kerr_t; /* range of OS kernel-mode return codes */
++#endif
++
++
++/**********************************************************************
++ * Compiler and processor dependencies.
++ */
++
++#if defined(__GNUC__)
++
++#if defined(__i386__) || defined(__x86_64__)
++# include <ci/tools/platform/gcc_x86.h>
++#elif defined(__PPC__)
++# include <ci/tools/platform/gcc_ppc.h>
++#elif defined(__ia64__)
++# include <ci/tools/platform/gcc_ia64.h>
++#else
++# error Unknown processor.
++#endif
++
++#elif defined(_MSC_VER)
++
++#if defined(__i386__)
++# include <ci/tools/platform/msvc_x86.h>
++# elif defined(__x86_64__)
++# include <ci/tools/platform/msvc_x86_64.h>
++#else
++# error Unknown processor.
++#endif
++
++#elif defined(__PGI)
++
++# include <ci/tools/platform/pg_x86.h>
++
++#elif defined(__INTEL_COMPILER)
++
++/* Intel compilers v7 claim to be very gcc compatible. */
++# include <ci/tools/platform/gcc_x86.h>
++
++#else
++# error Unknown compiler.
++#endif
++
++
++#endif /* __CI_TOOLS_SYSDEP_H__ */
++
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netfront/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/Makefile 2008-02-26 10:54:11.000000000 +0100
+@@ -0,0 +1,11 @@
++EXTRA_CFLAGS += -Idrivers/xen/sfc_netfront -Idrivers/xen/sfc_netutil -Idrivers/xen/netfront
++EXTRA_CFLAGS += -D__ci_driver__
++EXTRA_CFLAGS += -Werror
++
++ifdef GCOV
++EXTRA_CFLAGS += -fprofile-arcs -ftest-coverage -DEFX_GCOV
++endif
++
++obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_FRONTEND) := sfc_netfront.o
++
++sfc_netfront-objs := accel_msg.o accel_bufs.o accel_netfront.o accel_vi.o accel_xenbus.o accel_tso.o accel_ssr.o accel_debugfs.o falcon_event.o falcon_vi.o pt_tx.o vi_init.o
+Index: head-2008-11-25/drivers/xen/sfc_netfront/accel.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/accel.h 2008-02-26 10:54:11.000000000 +0100
+@@ -0,0 +1,477 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NETFRONT_ACCEL_H
++#define NETFRONT_ACCEL_H
++
++#include "accel_msg_iface.h"
++#include "accel_cuckoo_hash.h"
++#include "accel_bufs.h"
++
++#include "etherfabric/ef_vi.h"
++
++#include <xen/xenbus.h>
++#include <xen/evtchn.h>
++
++#include <linux/kernel.h>
++#include <linux/list.h>
++
++enum netfront_accel_post_status {
++ NETFRONT_ACCEL_STATUS_GOOD,
++ NETFRONT_ACCEL_STATUS_BUSY,
++ NETFRONT_ACCEL_STATUS_CANT
++};
++
++#define NETFRONT_ACCEL_STATS 1
++#if NETFRONT_ACCEL_STATS
++#define NETFRONT_ACCEL_STATS_OP(x) x
++#else
++#define NETFRONT_ACCEL_STATS_OP(x)
++#endif
++
++
++enum netfront_accel_msg_state {
++ NETFRONT_ACCEL_MSG_NONE = 0,
++ NETFRONT_ACCEL_MSG_HELLO = 1,
++ NETFRONT_ACCEL_MSG_HW = 2
++};
++
++
++typedef struct {
++ u32 in_progress;
++ u32 total_len;
++ struct sk_buff *skb;
++} netfront_accel_jumbo_state;
++
++
++struct netfront_accel_ssr_state {
++ /** List of tracked connections. */
++ struct list_head conns;
++
++ /** Free efx_ssr_conn instances. */
++ struct list_head free_conns;
++};
++
++
++struct netfront_accel_netdev_stats {
++ /* Fastpath stats. */
++ u32 fastpath_rx_pkts;
++ u32 fastpath_rx_bytes;
++ u32 fastpath_rx_errors;
++ u32 fastpath_tx_pkts;
++ u32 fastpath_tx_bytes;
++ u32 fastpath_tx_errors;
++};
++
++
++struct netfront_accel_netdev_dbfs {
++ struct dentry *fastpath_rx_pkts;
++ struct dentry *fastpath_rx_bytes;
++ struct dentry *fastpath_rx_errors;
++ struct dentry *fastpath_tx_pkts;
++ struct dentry *fastpath_tx_bytes;
++ struct dentry *fastpath_tx_errors;
++};
++
++
++struct netfront_accel_stats {
++ /** Fast path events */
++ u64 fastpath_tx_busy;
++
++ /** TX DMA queue status */
++ u64 fastpath_tx_completions;
++
++ /** The number of events processed. */
++ u64 event_count;
++
++ /** Number of frame trunc events seen on fastpath */
++ u64 fastpath_frm_trunc;
++
++ /** Number of no rx descriptor trunc events seen on fastpath */
++ u64 rx_no_desc_trunc;
++
++ /** The number of misc bad events (e.g. RX_DISCARD) processed. */
++ u64 bad_event_count;
++
++ /** Number of events dealt with in poll loop */
++ u32 events_per_poll_max;
++ u32 events_per_poll_tx_max;
++ u32 events_per_poll_rx_max;
++
++ /** Largest number of concurrently outstanding tx descriptors */
++ u32 fastpath_tx_pending_max;
++
++ /** The number of events since the last interrupts. */
++ u32 event_count_since_irq;
++
++ /** The max number of events between interrupts. */
++ u32 events_per_irq_max;
++
++ /** The number of interrupts. */
++ u64 irq_count;
++
++ /** The number of useless interrupts. */
++ u64 useless_irq_count;
++
++ /** The number of polls scheduled. */
++ u64 poll_schedule_count;
++
++ /** The number of polls called. */
++ u64 poll_call_count;
++
++ /** The number of rechecks. */
++ u64 poll_reschedule_count;
++
++ /** Number of times we've called netif_stop_queue/netif_wake_queue */
++ u64 queue_stops;
++ u64 queue_wakes;
++
++ /** SSR stats */
++ u64 ssr_bursts;
++ u64 ssr_drop_stream;
++ u64 ssr_misorder;
++ u64 ssr_slow_start;
++ u64 ssr_merges;
++ u64 ssr_too_many;
++ u64 ssr_new_stream;
++};
++
++
++struct netfront_accel_dbfs {
++ struct dentry *fastpath_tx_busy;
++ struct dentry *fastpath_tx_completions;
++ struct dentry *fastpath_tx_pending_max;
++ struct dentry *fastpath_frm_trunc;
++ struct dentry *rx_no_desc_trunc;
++ struct dentry *event_count;
++ struct dentry *bad_event_count;
++ struct dentry *events_per_poll_max;
++ struct dentry *events_per_poll_rx_max;
++ struct dentry *events_per_poll_tx_max;
++ struct dentry *event_count_since_irq;
++ struct dentry *events_per_irq_max;
++ struct dentry *irq_count;
++ struct dentry *useless_irq_count;
++ struct dentry *poll_schedule_count;
++ struct dentry *poll_call_count;
++ struct dentry *poll_reschedule_count;
++ struct dentry *queue_stops;
++ struct dentry *queue_wakes;
++ struct dentry *ssr_bursts;
++ struct dentry *ssr_drop_stream;
++ struct dentry *ssr_misorder;
++ struct dentry *ssr_slow_start;
++ struct dentry *ssr_merges;
++ struct dentry *ssr_too_many;
++ struct dentry *ssr_new_stream;
++};
++
++
++typedef struct netfront_accel_vnic {
++ struct netfront_accel_vnic *next;
++
++ struct mutex vnic_mutex;
++
++ spinlock_t tx_lock;
++
++ struct netfront_accel_bufpages bufpages;
++ struct netfront_accel_bufinfo *rx_bufs;
++ struct netfront_accel_bufinfo *tx_bufs;
++
++ /** Hardware & VI state */
++ ef_vi vi;
++
++ ef_vi_state *vi_state;
++
++ ef_eventq_state evq_state;
++
++ void *evq_mapping;
++
++ /** Hardware dependant state */
++ union {
++ struct {
++ /** Falcon A or B */
++ enum net_accel_hw_type type;
++ u32 *evq_rptr;
++ u32 *doorbell;
++ void *evq_rptr_mapping;
++ void *doorbell_mapping;
++ void *txdmaq_mapping;
++ void *rxdmaq_mapping;
++ } falcon;
++ } hw;
++
++ /** RX DMA queue status */
++ u32 rx_dma_level;
++
++ /** Number of RX descriptors waiting to be pushed to the card. */
++ u32 rx_dma_batched;
++#define NETFRONT_ACCEL_RX_DESC_BATCH 16
++
++ /**
++ * Hash table of remote mac addresses to decide whether to try
++ * fast path
++ */
++ cuckoo_hash_table fastpath_table;
++ spinlock_t table_lock;
++
++ /** the local mac address of virtual interface we're accelerating */
++ u8 mac[ETH_ALEN];
++
++ int rx_pkt_stride;
++ int rx_skb_stride;
++
++ /**
++ * Keep track of fragments of jumbo packets as events are
++ * delivered by NIC
++ */
++ netfront_accel_jumbo_state jumbo_state;
++
++ struct net_device *net_dev;
++
++ /** These two gate the enabling of fast path operations */
++ int frontend_ready;
++ int backend_netdev_up;
++
++ int irq_enabled;
++ spinlock_t irq_enabled_lock;
++
++ int tx_enabled;
++
++ int poll_enabled;
++
++ /** A spare slot for a TX packet. This is treated as an extension
++ * of the DMA queue. */
++ struct sk_buff *tx_skb;
++
++ /** Keep track of fragments of SSR packets */
++ struct netfront_accel_ssr_state ssr_state;
++
++ struct xenbus_device *dev;
++
++ /** Event channel for messages */
++ int msg_channel;
++ int msg_channel_irq;
++
++ /** Event channel for network interrupts. */
++ int net_channel;
++ int net_channel_irq;
++
++ struct net_accel_shared_page *shared_page;
++
++ grant_ref_t ctrl_page_gnt;
++ grant_ref_t msg_page_gnt;
++
++ /** Message Qs, 1 each way. */
++ sh_msg_fifo2 to_dom0;
++ sh_msg_fifo2 from_dom0;
++
++ enum netfront_accel_msg_state msg_state;
++
++ /** Watch on accelstate */
++ struct xenbus_watch backend_accel_watch;
++ /** Watch on frontend's MAC address */
++ struct xenbus_watch mac_address_watch;
++
++ /** Work to process received irq/msg */
++ struct work_struct msg_from_bend;
++
++ /** Wait queue for changes in accelstate. */
++ wait_queue_head_t state_wait_queue;
++
++ /** The current accelstate of this driver. */
++ XenbusState frontend_state;
++
++ /** The most recent accelstate seen by the xenbus watch. */
++ XenbusState backend_state;
++
++ /** Non-zero if we should reject requests to connect. */
++ int removing;
++
++ /** Non-zero if the domU shared state has been initialised. */
++ int domU_state_is_setup;
++
++ /** Non-zero if the dom0 shared state has been initialised. */
++ int dom0_state_is_setup;
++
++ /* Those statistics that are added to the netdev stats */
++ struct netfront_accel_netdev_stats netdev_stats;
++ struct netfront_accel_netdev_stats stats_last_read;
++#ifdef CONFIG_DEBUG_FS
++ struct netfront_accel_netdev_dbfs netdev_dbfs;
++#endif
++
++ /* These statistics are internal and optional */
++#if NETFRONT_ACCEL_STATS
++ struct netfront_accel_stats stats;
++#ifdef CONFIG_DEBUG_FS
++ struct netfront_accel_dbfs dbfs;
++#endif
++#endif
++
++ /** Debufs fs dir for this interface */
++ struct dentry *dbfs_dir;
++} netfront_accel_vnic;
++
++
++/* Module parameters */
++extern unsigned sfc_netfront_max_pages;
++extern unsigned sfc_netfront_buffer_split;
++
++extern const char *frontend_name;
++extern struct netfront_accel_hooks accel_hooks;
++extern struct workqueue_struct *netfront_accel_workqueue;
++
++
++extern
++void netfront_accel_vi_ctor(netfront_accel_vnic *vnic);
++
++extern
++int netfront_accel_vi_init(netfront_accel_vnic *vnic,
++ struct net_accel_msg_hw *hw_msg);
++
++extern
++void netfront_accel_vi_dtor(netfront_accel_vnic *vnic);
++
++
++/**
++ * Add new buffers which have been registered with the NIC.
++ *
++ * @v vnic The vnic instance to process the response.
++ *
++ * The buffers contained in the message are added to the buffer pool.
++ */
++extern
++void netfront_accel_vi_add_bufs(netfront_accel_vnic *vnic, int is_rx);
++
++/**
++ * Put a packet on the tx DMA queue.
++ *
++ * @v vnic The vnic instance to accept the packet.
++ * @v skb A sk_buff to send.
++ *
++ * Attempt to send a packet. On success, the skb is owned by the DMA
++ * queue and will be released when the completion event arrives.
++ */
++extern enum netfront_accel_post_status
++netfront_accel_vi_tx_post(netfront_accel_vnic *vnic,
++ struct sk_buff *skb);
++
++
++/**
++ * Process events in response to an interrupt.
++ *
++ * @v vnic The vnic instance to poll.
++ * @v rx_packets The maximum number of rx packets to process.
++ * @ret rx_done The number of rx packets processed.
++ *
++ * The vnic will process events until there are no more events
++ * remaining or the specified number of rx packets has been processed.
++ * The split from the interrupt call is to allow Linux NAPI
++ * polling.
++ */
++extern
++int netfront_accel_vi_poll(netfront_accel_vnic *vnic, int rx_packets);
++
++
++/**
++ * Iterate over the fragments of a packet buffer.
++ *
++ * @v skb The packet buffer to examine.
++ * @v idx A variable name for the fragment index.
++ * @v data A variable name for the address of the fragment data.
++ * @v length A variable name for the fragment length.
++ * @v code A section of code to execute for each fragment.
++ *
++ * This macro iterates over the fragments in a packet buffer and
++ * executes the code for each of them.
++ */
++#define NETFRONT_ACCEL_PKTBUFF_FOR_EACH_FRAGMENT(skb, frag_idx, \
++ frag_data, frag_len, \
++ code) \
++ do { \
++ int frag_idx; \
++ void *frag_data; \
++ unsigned int frag_len; \
++ \
++ frag_data = skb->data; \
++ frag_len = skb_headlen(skb); \
++ frag_idx = 0; \
++ while (1) { /* For each fragment */ \
++ code; \
++ if (frag_idx >= skb_shinfo(skb)->nr_frags) { \
++ break; \
++ } else { \
++ skb_frag_t *fragment; \
++ fragment = &skb_shinfo(skb)->frags[frag_idx]; \
++ frag_len = fragment->size; \
++ frag_data = ((void*)page_address(fragment->page) \
++ + fragment->page_offset); \
++ }; \
++ frag_idx++; \
++ } \
++ } while(0)
++
++static inline
++void netfront_accel_disable_net_interrupts(netfront_accel_vnic *vnic)
++{
++ mask_evtchn(vnic->net_channel);
++}
++
++static inline
++void netfront_accel_enable_net_interrupts(netfront_accel_vnic *vnic)
++{
++ unmask_evtchn(vnic->net_channel);
++}
++
++void netfront_accel_msg_tx_fastpath(netfront_accel_vnic *vnic, const void *mac,
++ u32 ip, u16 port, u8 protocol);
++
++/* Process an IRQ received from back end driver */
++irqreturn_t netfront_accel_msg_channel_irq_from_bend(int irq, void *context,
++ struct pt_regs *unused);
++irqreturn_t netfront_accel_net_channel_irq_from_bend(int irq, void *context,
++ struct pt_regs *unused);
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++extern void netfront_accel_msg_from_bend(struct work_struct *context);
++#else
++extern void netfront_accel_msg_from_bend(void *context);
++#endif
++
++extern void vnic_stop_fastpath(netfront_accel_vnic *vnic);
++
++extern int netfront_accel_probe(struct net_device *net_dev,
++ struct xenbus_device *dev);
++extern int netfront_accel_remove(struct xenbus_device *dev);
++extern void netfront_accel_set_closing(netfront_accel_vnic *vnic);
++
++extern int netfront_accel_vi_enable_interrupts(netfront_accel_vnic *vnic);
++
++extern void netfront_accel_debugfs_init(void);
++extern void netfront_accel_debugfs_fini(void);
++extern int netfront_accel_debugfs_create(netfront_accel_vnic *vnic);
++extern int netfront_accel_debugfs_remove(netfront_accel_vnic *vnic);
++
++#endif /* NETFRONT_ACCEL_H */
+Index: head-2008-11-25/drivers/xen/sfc_netfront/accel_bufs.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/accel_bufs.c 2008-02-26 10:54:12.000000000 +0100
+@@ -0,0 +1,393 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <xen/gnttab.h>
++
++#include "accel_bufs.h"
++#include "accel_util.h"
++
++#include "accel.h"
++
++
++static int
++netfront_accel_alloc_buf_desc_blocks(struct netfront_accel_bufinfo *manager,
++ int pages)
++{
++ manager->desc_blocks =
++ kzalloc(sizeof(struct netfront_accel_pkt_desc *) *
++ NETFRONT_ACCEL_BUF_NUM_BLOCKS(pages), GFP_KERNEL);
++ if (manager->desc_blocks == NULL) {
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++static int
++netfront_accel_alloc_buf_lists(struct netfront_accel_bufpages *bufpages,
++ int pages)
++{
++ bufpages->page_list = kmalloc(pages * sizeof(void *), GFP_KERNEL);
++ if (bufpages->page_list == NULL) {
++ return -ENOMEM;
++ }
++
++ bufpages->grant_list = kzalloc(pages * sizeof(grant_ref_t), GFP_KERNEL);
++ if (bufpages->grant_list == NULL) {
++ kfree(bufpages->page_list);
++ bufpages->page_list = NULL;
++ return -ENOMEM;
++ }
++
++ return 0;
++}
++
++
++int netfront_accel_alloc_buffer_mem(struct netfront_accel_bufpages *bufpages,
++ struct netfront_accel_bufinfo *rx_manager,
++ struct netfront_accel_bufinfo *tx_manager,
++ int pages)
++{
++ int n, rc;
++
++ if ((rc = netfront_accel_alloc_buf_desc_blocks
++ (rx_manager, pages - (pages / sfc_netfront_buffer_split))) < 0) {
++ goto rx_fail;
++ }
++
++ if ((rc = netfront_accel_alloc_buf_desc_blocks
++ (tx_manager, pages / sfc_netfront_buffer_split)) < 0) {
++ goto tx_fail;
++ }
++
++ if ((rc = netfront_accel_alloc_buf_lists(bufpages, pages)) < 0) {
++ goto lists_fail;
++ }
++
++ for (n = 0; n < pages; n++) {
++ void *tmp = (void*)__get_free_page(GFP_KERNEL);
++ if (tmp == NULL)
++ break;
++
++ bufpages->page_list[n] = tmp;
++ }
++
++ if (n != pages) {
++ EPRINTK("%s: not enough pages: %d != %d\n", __FUNCTION__, n,
++ pages);
++ for (; n >= 0; n--)
++ free_page((unsigned long)(bufpages->page_list[n]));
++ rc = -ENOMEM;
++ goto pages_fail;
++ }
++
++ bufpages->max_pages = pages;
++ bufpages->page_reqs = 0;
++
++ return 0;
++
++ pages_fail:
++ kfree(bufpages->page_list);
++ kfree(bufpages->grant_list);
++
++ bufpages->page_list = NULL;
++ bufpages->grant_list = NULL;
++ lists_fail:
++ kfree(tx_manager->desc_blocks);
++ tx_manager->desc_blocks = NULL;
++
++ tx_fail:
++ kfree(rx_manager->desc_blocks);
++ rx_manager->desc_blocks = NULL;
++ rx_fail:
++ return rc;
++}
++
++
++void netfront_accel_free_buffer_mem(struct netfront_accel_bufpages *bufpages,
++ struct netfront_accel_bufinfo *rx_manager,
++ struct netfront_accel_bufinfo *tx_manager)
++{
++ int i;
++
++ for (i = 0; i < bufpages->max_pages; i++) {
++ if (bufpages->grant_list[i] != 0)
++ net_accel_ungrant_page(bufpages->grant_list[i]);
++ free_page((unsigned long)(bufpages->page_list[i]));
++ }
++
++ if (bufpages->max_pages) {
++ kfree(bufpages->page_list);
++ kfree(bufpages->grant_list);
++ kfree(rx_manager->desc_blocks);
++ kfree(tx_manager->desc_blocks);
++ }
++}
++
++
++/*
++ * Allocate memory for the buffer manager and create a lock. If no
++ * lock is supplied its own is allocated.
++ */
++struct netfront_accel_bufinfo *netfront_accel_init_bufs(spinlock_t *lock)
++{
++ struct netfront_accel_bufinfo *res = kmalloc(sizeof(*res), GFP_KERNEL);
++ if (res != NULL) {
++ res->npages = res->nused = 0;
++ res->first_free = -1;
++
++ if (lock == NULL) {
++ res->lock = kmalloc(sizeof(*res->lock), GFP_KERNEL);
++ if (res->lock == NULL) {
++ kfree(res);
++ return NULL;
++ }
++ spin_lock_init(res->lock);
++ res->internally_locked = 1;
++ } else {
++ res->lock = lock;
++ res->internally_locked = 0;
++ }
++
++ res->desc_blocks = NULL;
++ }
++
++ return res;
++}
++
++
++void netfront_accel_fini_bufs(struct netfront_accel_bufinfo *bufs)
++{
++ if (bufs->internally_locked)
++ kfree(bufs->lock);
++ kfree(bufs);
++}
++
++
++int netfront_accel_buf_map_request(struct xenbus_device *dev,
++ struct netfront_accel_bufpages *bufpages,
++ struct net_accel_msg *msg,
++ int pages, int offset)
++{
++ int i, mfn;
++ int err;
++
++ net_accel_msg_init(msg, NET_ACCEL_MSG_MAPBUF);
++
++ BUG_ON(pages > NET_ACCEL_MSG_MAX_PAGE_REQ);
++
++ msg->u.mapbufs.pages = pages;
++
++ for (i = 0; i < msg->u.mapbufs.pages; i++) {
++ /*
++ * This can happen if we tried to send this message
++ * earlier but the queue was full.
++ */
++ if (bufpages->grant_list[offset+i] != 0) {
++ msg->u.mapbufs.grants[i] =
++ bufpages->grant_list[offset+i];
++ continue;
++ }
++
++ mfn = virt_to_mfn(bufpages->page_list[offset+i]);
++ VPRINTK("%s: Granting page %d, mfn %08x\n",
++ __FUNCTION__, i, mfn);
++
++ bufpages->grant_list[offset+i] =
++ net_accel_grant_page(dev, mfn, 0);
++ msg->u.mapbufs.grants[i] = bufpages->grant_list[offset+i];
++
++ if (msg->u.mapbufs.grants[i] < 0) {
++ EPRINTK("%s: Failed to grant buffer: %d\n",
++ __FUNCTION__, msg->u.mapbufs.grants[i]);
++ err = -EIO;
++ goto error;
++ }
++ }
++
++ /* This is interpreted on return as the offset in the the page_list */
++ msg->u.mapbufs.reqid = offset;
++
++ return 0;
++
++error:
++ /* Ungrant all the pages we've successfully granted. */
++ for (i--; i >= 0; i--) {
++ net_accel_ungrant_page(bufpages->grant_list[offset+i]);
++ bufpages->grant_list[offset+i] = 0;
++ }
++ return err;
++}
++
++
++/* Process a response to a buffer request. */
++int netfront_accel_add_bufs(struct netfront_accel_bufpages *bufpages,
++ struct netfront_accel_bufinfo *manager,
++ struct net_accel_msg *msg)
++{
++ int msg_pages, page_offset, i, newtot;
++ int old_block_count, new_block_count;
++ u32 msg_buf;
++ unsigned long flags;
++
++ VPRINTK("%s: manager %p msg %p\n", __FUNCTION__, manager, msg);
++
++ BUG_ON(msg->id != (NET_ACCEL_MSG_MAPBUF | NET_ACCEL_MSG_REPLY));
++
++ msg_pages = msg->u.mapbufs.pages;
++ msg_buf = msg->u.mapbufs.buf;
++ page_offset = msg->u.mapbufs.reqid;
++
++ spin_lock_irqsave(manager->lock, flags);
++ newtot = manager->npages + msg_pages;
++ old_block_count =
++ (manager->npages + NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK - 1) >>
++ NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT;
++ new_block_count =
++ (newtot + NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK - 1) >>
++ NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT;
++
++ for (i = old_block_count; i < new_block_count; i++) {
++ struct netfront_accel_pkt_desc *block;
++ if (manager->desc_blocks[i] != NULL) {
++ VPRINTK("Not needed\n");
++ continue;
++ }
++ block = kzalloc(NETFRONT_ACCEL_BUFS_PER_BLOCK *
++ sizeof(netfront_accel_pkt_desc), GFP_ATOMIC);
++ if (block == NULL) {
++ spin_unlock_irqrestore(manager->lock, flags);
++ return -ENOMEM;
++ }
++ manager->desc_blocks[i] = block;
++ }
++ for (i = manager->npages; i < newtot; i++) {
++ int k, j = i - manager->npages;
++ int block_num;
++ int block_idx;
++ struct netfront_accel_pkt_desc *pkt;
++
++ block_num = i >> NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT;
++ block_idx = (NETFRONT_ACCEL_BUFS_PER_PAGE*i)
++ & (NETFRONT_ACCEL_BUFS_PER_BLOCK-1);
++
++ pkt = manager->desc_blocks[block_num] + block_idx;
++
++ for (k = 0; k < NETFRONT_ACCEL_BUFS_PER_PAGE; k++) {
++ BUG_ON(page_offset + j >= bufpages->max_pages);
++
++ pkt[k].buf_id = NETFRONT_ACCEL_BUFS_PER_PAGE * i + k;
++ pkt[k].pkt_kva = bufpages->page_list[page_offset + j] +
++ (PAGE_SIZE/NETFRONT_ACCEL_BUFS_PER_PAGE) * k;
++ pkt[k].pkt_buff_addr = msg_buf +
++ (PAGE_SIZE/NETFRONT_ACCEL_BUFS_PER_PAGE) *
++ (NETFRONT_ACCEL_BUFS_PER_PAGE * j + k);
++ pkt[k].next_free = manager->first_free;
++ manager->first_free = pkt[k].buf_id;
++ *(int*)(pkt[k].pkt_kva) = pkt[k].buf_id;
++
++ VPRINTK("buf %d desc %p kva %p buffaddr %x\n",
++ pkt[k].buf_id, &(pkt[k]), pkt[k].pkt_kva,
++ pkt[k].pkt_buff_addr);
++ }
++ }
++ manager->npages = newtot;
++ spin_unlock_irqrestore(manager->lock, flags);
++ VPRINTK("Added %d pages. Total is now %d\n", msg_pages,
++ manager->npages);
++ return 0;
++}
++
++
++netfront_accel_pkt_desc *
++netfront_accel_buf_find(struct netfront_accel_bufinfo *manager, u16 id)
++{
++ netfront_accel_pkt_desc *pkt;
++ int block_num = id >> NETFRONT_ACCEL_BUFS_PER_BLOCK_SHIFT;
++ int block_idx = id & (NETFRONT_ACCEL_BUFS_PER_BLOCK - 1);
++ BUG_ON(id >= manager->npages * NETFRONT_ACCEL_BUFS_PER_PAGE);
++ BUG_ON(block_idx >= NETFRONT_ACCEL_BUFS_PER_BLOCK);
++ pkt = manager->desc_blocks[block_num] + block_idx;
++ return pkt;
++}
++
++
++/* Allocate a buffer from the buffer manager */
++netfront_accel_pkt_desc *
++netfront_accel_buf_get(struct netfront_accel_bufinfo *manager)
++{
++ int bufno = -1;
++ netfront_accel_pkt_desc *buf = NULL;
++ unsigned long flags = 0;
++
++ /* Any spare? */
++ if (manager->first_free == -1)
++ return NULL;
++ /* Take lock */
++ if (manager->internally_locked)
++ spin_lock_irqsave(manager->lock, flags);
++ bufno = manager->first_free;
++ if (bufno != -1) {
++ buf = netfront_accel_buf_find(manager, bufno);
++ manager->first_free = buf->next_free;
++ manager->nused++;
++ }
++ /* Release lock */
++ if (manager->internally_locked)
++ spin_unlock_irqrestore(manager->lock, flags);
++
++ /* Tell the world */
++ VPRINTK("Allocated buffer %i, buffaddr %x\n", bufno,
++ buf->pkt_buff_addr);
++
++ return buf;
++}
++
++
++/* Release a buffer back to the buffer manager pool */
++int netfront_accel_buf_put(struct netfront_accel_bufinfo *manager, u16 id)
++{
++ netfront_accel_pkt_desc *buf = netfront_accel_buf_find(manager, id);
++ unsigned long flags = 0;
++ unsigned was_empty = 0;
++ int bufno = id;
++
++ VPRINTK("Freeing buffer %i\n", id);
++ BUG_ON(id == (u16)-1);
++
++ if (manager->internally_locked)
++ spin_lock_irqsave(manager->lock, flags);
++
++ if (manager->first_free == -1)
++ was_empty = 1;
++
++ buf->next_free = manager->first_free;
++ manager->first_free = bufno;
++ manager->nused--;
++
++ if (manager->internally_locked)
++ spin_unlock_irqrestore(manager->lock, flags);
++
++ return was_empty;
++}
+Index: head-2008-11-25/drivers/xen/sfc_netfront/accel_bufs.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/accel_bufs.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,181 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NETFRONT_ACCEL_BUFS_H
++#define NETFRONT_ACCEL_BUFS_H
++
++#include <linux/skbuff.h>
++#include <linux/spinlock.h>
++#include <xen/xenbus.h>
++
++#include "accel_msg_iface.h"
++
++
++/*! Buffer descriptor structure */
++typedef struct netfront_accel_pkt_desc {
++ int buf_id;
++ u32 pkt_buff_addr;
++ void *pkt_kva;
++ /* This is the socket buffer currently married to this buffer */
++ struct sk_buff *skb;
++ int next_free;
++} netfront_accel_pkt_desc;
++
++
++#define NETFRONT_ACCEL_DEFAULT_BUF_PAGES (384)
++#define NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT (4)
++#define NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK \
++ (1 << (NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT))
++#define NETFRONT_ACCEL_BUFS_PER_PAGE_SHIFT (1)
++#define NETFRONT_ACCEL_BUFS_PER_PAGE \
++ (1 << (NETFRONT_ACCEL_BUFS_PER_PAGE_SHIFT))
++#define NETFRONT_ACCEL_BUFS_PER_BLOCK_SHIFT \
++ (NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT + \
++ NETFRONT_ACCEL_BUFS_PER_PAGE_SHIFT)
++#define NETFRONT_ACCEL_BUFS_PER_BLOCK \
++ (1 << NETFRONT_ACCEL_BUFS_PER_BLOCK_SHIFT)
++#define NETFRONT_ACCEL_BUF_NUM_BLOCKS(max_pages) \
++ (((max_pages)+NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK-1) / \
++ NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK)
++
++/*! Buffer management structure. */
++struct netfront_accel_bufinfo {
++ /* number added to this manager */
++ unsigned npages;
++ /* number currently used from this manager */
++ unsigned nused;
++
++ int first_free;
++
++ int internally_locked;
++ spinlock_t *lock;
++
++ /*
++ * array of pointers (length NETFRONT_ACCEL_BUF_NUM_BLOCKS) to
++ * pkt descs
++ */
++ struct netfront_accel_pkt_desc **desc_blocks;
++};
++
++
++struct netfront_accel_bufpages {
++ /* length of lists of pages/grants */
++ int max_pages;
++ /* list of pages allocated for network buffers */
++ void **page_list;
++ /* list of grants for the above pages */
++ grant_ref_t *grant_list;
++
++ /* number of page requests that have been made */
++ unsigned page_reqs;
++};
++
++
++/*! Allocate memory for the buffer manager, set up locks etc.
++ * Optionally takes a lock to use, if not supplied it makes its own.
++ *
++ * \return pointer to netfront_accel_bufinfo structure that represents the
++ * buffer manager
++ */
++extern struct netfront_accel_bufinfo *
++netfront_accel_init_bufs(spinlock_t *lock);
++
++/*! Allocate memory for the buffers
++ */
++extern int
++netfront_accel_alloc_buffer_mem(struct netfront_accel_bufpages *bufpages,
++ struct netfront_accel_bufinfo *rx_res,
++ struct netfront_accel_bufinfo *tx_res,
++ int pages);
++extern void
++netfront_accel_free_buffer_mem(struct netfront_accel_bufpages *bufpages,
++ struct netfront_accel_bufinfo *rx_res,
++ struct netfront_accel_bufinfo *tx_res);
++
++/*! Release memory for the buffer manager, buffers, etc.
++ *
++ * \param manager pointer to netfront_accel_bufinfo structure that
++ * represents the buffer manager
++ */
++extern void netfront_accel_fini_bufs(struct netfront_accel_bufinfo *manager);
++
++/*! Release a buffer.
++ *
++ * \param manager The buffer manager which owns the buffer.
++ * \param id The buffer identifier.
++ */
++extern int netfront_accel_buf_put(struct netfront_accel_bufinfo *manager,
++ u16 id);
++
++/*! Get the packet descriptor associated with a buffer id.
++ *
++ * \param manager The buffer manager which owns the buffer.
++ * \param id The buffer identifier.
++ *
++ * The returned value is the packet descriptor for this buffer.
++ */
++extern netfront_accel_pkt_desc *
++netfront_accel_buf_find(struct netfront_accel_bufinfo *manager, u16 id);
++
++
++/*! Fill out a message request for some buffers to be mapped by the
++ * back end driver
++ *
++ * \param manager The buffer manager
++ * \param msg Pointer to an ef_msg to complete.
++ * \return 0 on success
++ */
++extern int
++netfront_accel_buf_map_request(struct xenbus_device *dev,
++ struct netfront_accel_bufpages *bufpages,
++ struct net_accel_msg *msg,
++ int pages, int offset);
++
++/*! Process a response to a buffer request.
++ *
++ * Deal with a received message from the back end in response to our
++ * request for buffers
++ *
++ * \param manager The buffer manager
++ * \param msg The received message from the back end describing new
++ * buffers
++ * \return 0 on success
++ */
++extern int
++netfront_accel_add_bufs(struct netfront_accel_bufpages *bufpages,
++ struct netfront_accel_bufinfo *manager,
++ struct net_accel_msg *msg);
++
++
++/*! Allocate a buffer from the buffer manager
++ *
++ * \param manager The buffer manager data structure
++ * \param id On exit, the id of the buffer allocated
++ * \return Pointer to buffer descriptor.
++ */
++struct netfront_accel_pkt_desc *
++netfront_accel_buf_get(struct netfront_accel_bufinfo *manager);
++
++#endif /* NETFRONT_ACCEL_BUFS_H */
++
+Index: head-2008-11-25/drivers/xen/sfc_netfront/accel_debugfs.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/accel_debugfs.c 2008-02-26 10:54:12.000000000 +0100
+@@ -0,0 +1,211 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/fs.h>
++#include <linux/debugfs.h>
++
++#include "accel.h"
++
++#if defined(CONFIG_DEBUG_FS)
++static struct dentry *sfc_debugfs_root = NULL;
++#endif
++
++void netfront_accel_debugfs_init(void)
++{
++#if defined(CONFIG_DEBUG_FS)
++ sfc_debugfs_root = debugfs_create_dir(frontend_name, NULL);
++#endif
++}
++
++
++void netfront_accel_debugfs_fini(void)
++{
++#if defined(CONFIG_DEBUG_FS)
++ if (sfc_debugfs_root)
++ debugfs_remove(sfc_debugfs_root);
++#endif
++}
++
++
++int netfront_accel_debugfs_create(netfront_accel_vnic *vnic)
++{
++#if defined(CONFIG_DEBUG_FS)
++ if (sfc_debugfs_root == NULL)
++ return -ENOENT;
++
++ vnic->dbfs_dir = debugfs_create_dir(vnic->net_dev->name,
++ sfc_debugfs_root);
++ if (vnic->dbfs_dir == NULL)
++ return -ENOMEM;
++
++ vnic->netdev_dbfs.fastpath_rx_pkts = debugfs_create_u32
++ ("fastpath_rx_pkts", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->netdev_stats.fastpath_rx_pkts);
++ vnic->netdev_dbfs.fastpath_rx_bytes = debugfs_create_u32
++ ("fastpath_rx_bytes", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->netdev_stats.fastpath_rx_bytes);
++ vnic->netdev_dbfs.fastpath_rx_errors = debugfs_create_u32
++ ("fastpath_rx_errors", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->netdev_stats.fastpath_rx_errors);
++ vnic->netdev_dbfs.fastpath_tx_pkts = debugfs_create_u32
++ ("fastpath_tx_pkts", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->netdev_stats.fastpath_tx_pkts);
++ vnic->netdev_dbfs.fastpath_tx_bytes = debugfs_create_u32
++ ("fastpath_tx_bytes", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->netdev_stats.fastpath_tx_bytes);
++ vnic->netdev_dbfs.fastpath_tx_errors = debugfs_create_u32
++ ("fastpath_tx_errors", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->netdev_stats.fastpath_tx_errors);
++
++#if NETFRONT_ACCEL_STATS
++ vnic->dbfs.irq_count = debugfs_create_u64
++ ("irq_count", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.irq_count);
++ vnic->dbfs.useless_irq_count = debugfs_create_u64
++ ("useless_irq_count", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.useless_irq_count);
++ vnic->dbfs.poll_schedule_count = debugfs_create_u64
++ ("poll_schedule_count", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.poll_schedule_count);
++ vnic->dbfs.poll_call_count = debugfs_create_u64
++ ("poll_call_count", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.poll_call_count);
++ vnic->dbfs.poll_reschedule_count = debugfs_create_u64
++ ("poll_reschedule_count", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.poll_reschedule_count);
++ vnic->dbfs.queue_stops = debugfs_create_u64
++ ("queue_stops", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.queue_stops);
++ vnic->dbfs.queue_wakes = debugfs_create_u64
++ ("queue_wakes", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.queue_wakes);
++ vnic->dbfs.ssr_bursts = debugfs_create_u64
++ ("ssr_bursts", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.ssr_bursts);
++ vnic->dbfs.ssr_drop_stream = debugfs_create_u64
++ ("ssr_drop_stream", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.ssr_drop_stream);
++ vnic->dbfs.ssr_misorder = debugfs_create_u64
++ ("ssr_misorder", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.ssr_misorder);
++ vnic->dbfs.ssr_slow_start = debugfs_create_u64
++ ("ssr_slow_start", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.ssr_slow_start);
++ vnic->dbfs.ssr_merges = debugfs_create_u64
++ ("ssr_merges", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.ssr_merges);
++ vnic->dbfs.ssr_too_many = debugfs_create_u64
++ ("ssr_too_many", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.ssr_too_many);
++ vnic->dbfs.ssr_new_stream = debugfs_create_u64
++ ("ssr_new_stream", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.ssr_new_stream);
++
++ vnic->dbfs.fastpath_tx_busy = debugfs_create_u64
++ ("fastpath_tx_busy", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.fastpath_tx_busy);
++ vnic->dbfs.fastpath_tx_completions = debugfs_create_u64
++ ("fastpath_tx_completions", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.fastpath_tx_completions);
++ vnic->dbfs.fastpath_tx_pending_max = debugfs_create_u32
++ ("fastpath_tx_pending_max", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.fastpath_tx_pending_max);
++ vnic->dbfs.event_count = debugfs_create_u64
++ ("event_count", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.event_count);
++ vnic->dbfs.bad_event_count = debugfs_create_u64
++ ("bad_event_count", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.bad_event_count);
++ vnic->dbfs.event_count_since_irq = debugfs_create_u32
++ ("event_count_since_irq", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.event_count_since_irq);
++ vnic->dbfs.events_per_irq_max = debugfs_create_u32
++ ("events_per_irq_max", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.events_per_irq_max);
++ vnic->dbfs.fastpath_frm_trunc = debugfs_create_u64
++ ("fastpath_frm_trunc", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.fastpath_frm_trunc);
++ vnic->dbfs.rx_no_desc_trunc = debugfs_create_u64
++ ("rx_no_desc_trunc", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.rx_no_desc_trunc);
++ vnic->dbfs.events_per_poll_max = debugfs_create_u32
++ ("events_per_poll_max", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.events_per_poll_max);
++ vnic->dbfs.events_per_poll_rx_max = debugfs_create_u32
++ ("events_per_poll_rx_max", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.events_per_poll_rx_max);
++ vnic->dbfs.events_per_poll_tx_max = debugfs_create_u32
++ ("events_per_poll_tx_max", S_IRUSR | S_IRGRP | S_IROTH,
++ vnic->dbfs_dir, &vnic->stats.events_per_poll_tx_max);
++#endif
++#endif
++ return 0;
++}
++
++
++int netfront_accel_debugfs_remove(netfront_accel_vnic *vnic)
++{
++#if defined(CONFIG_DEBUG_FS)
++ if (vnic->dbfs_dir != NULL) {
++ debugfs_remove(vnic->netdev_dbfs.fastpath_rx_pkts);
++ debugfs_remove(vnic->netdev_dbfs.fastpath_rx_bytes);
++ debugfs_remove(vnic->netdev_dbfs.fastpath_rx_errors);
++ debugfs_remove(vnic->netdev_dbfs.fastpath_tx_pkts);
++ debugfs_remove(vnic->netdev_dbfs.fastpath_tx_bytes);
++ debugfs_remove(vnic->netdev_dbfs.fastpath_tx_errors);
++
++#if NETFRONT_ACCEL_STATS
++ debugfs_remove(vnic->dbfs.irq_count);
++ debugfs_remove(vnic->dbfs.useless_irq_count);
++ debugfs_remove(vnic->dbfs.poll_schedule_count);
++ debugfs_remove(vnic->dbfs.poll_call_count);
++ debugfs_remove(vnic->dbfs.poll_reschedule_count);
++ debugfs_remove(vnic->dbfs.queue_stops);
++ debugfs_remove(vnic->dbfs.queue_wakes);
++ debugfs_remove(vnic->dbfs.ssr_bursts);
++ debugfs_remove(vnic->dbfs.ssr_drop_stream);
++ debugfs_remove(vnic->dbfs.ssr_misorder);
++ debugfs_remove(vnic->dbfs.ssr_slow_start);
++ debugfs_remove(vnic->dbfs.ssr_merges);
++ debugfs_remove(vnic->dbfs.ssr_too_many);
++ debugfs_remove(vnic->dbfs.ssr_new_stream);
++
++ debugfs_remove(vnic->dbfs.fastpath_tx_busy);
++ debugfs_remove(vnic->dbfs.fastpath_tx_completions);
++ debugfs_remove(vnic->dbfs.fastpath_tx_pending_max);
++ debugfs_remove(vnic->dbfs.event_count);
++ debugfs_remove(vnic->dbfs.bad_event_count);
++ debugfs_remove(vnic->dbfs.event_count_since_irq);
++ debugfs_remove(vnic->dbfs.events_per_irq_max);
++ debugfs_remove(vnic->dbfs.fastpath_frm_trunc);
++ debugfs_remove(vnic->dbfs.rx_no_desc_trunc);
++ debugfs_remove(vnic->dbfs.events_per_poll_max);
++ debugfs_remove(vnic->dbfs.events_per_poll_rx_max);
++ debugfs_remove(vnic->dbfs.events_per_poll_tx_max);
++#endif
++ debugfs_remove(vnic->dbfs_dir);
++ }
++#endif
++ return 0;
++}
+Index: head-2008-11-25/drivers/xen/sfc_netfront/accel_msg.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/accel_msg.c 2008-02-26 10:54:12.000000000 +0100
+@@ -0,0 +1,566 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/stddef.h>
++#include <linux/errno.h>
++
++#include <xen/xenbus.h>
++
++#include "accel.h"
++#include "accel_msg_iface.h"
++#include "accel_util.h"
++#include "accel_bufs.h"
++
++#include "netfront.h" /* drivers/xen/netfront/netfront.h */
++
++static void vnic_start_interrupts(netfront_accel_vnic *vnic)
++{
++ unsigned long flags;
++
++ /* Prime our interrupt */
++ spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
++ if (!netfront_accel_vi_enable_interrupts(vnic)) {
++ /* Cripes, that was quick, better pass it up */
++ netfront_accel_disable_net_interrupts(vnic);
++ vnic->irq_enabled = 0;
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_schedule_count++);
++ netif_rx_schedule(vnic->net_dev);
++ } else {
++ /*
++ * Nothing yet, make sure we get interrupts through
++ * back end
++ */
++ vnic->irq_enabled = 1;
++ netfront_accel_enable_net_interrupts(vnic);
++ }
++ spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
++}
++
++
++static void vnic_stop_interrupts(netfront_accel_vnic *vnic)
++{
++ unsigned long flags;
++
++ spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
++ netfront_accel_disable_net_interrupts(vnic);
++ vnic->irq_enabled = 0;
++ spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
++}
++
++
++static void vnic_start_fastpath(netfront_accel_vnic *vnic)
++{
++ struct net_device *net_dev = vnic->net_dev;
++ unsigned long flags;
++
++ DPRINTK("%s\n", __FUNCTION__);
++
++ spin_lock_irqsave(&vnic->tx_lock, flags);
++ vnic->tx_enabled = 1;
++ spin_unlock_irqrestore(&vnic->tx_lock, flags);
++
++ netif_poll_disable(net_dev);
++ vnic->poll_enabled = 1;
++ netif_poll_enable(net_dev);
++
++ vnic_start_interrupts(vnic);
++}
++
++
++void vnic_stop_fastpath(netfront_accel_vnic *vnic)
++{
++ struct net_device *net_dev = vnic->net_dev;
++ struct netfront_info *np = (struct netfront_info *)netdev_priv(net_dev);
++ unsigned long flags1, flags2;
++
++ DPRINTK("%s\n", __FUNCTION__);
++
++ vnic_stop_interrupts(vnic);
++
++ spin_lock_irqsave(&vnic->tx_lock, flags1);
++ vnic->tx_enabled = 0;
++ spin_lock_irqsave(&np->tx_lock, flags2);
++ if (vnic->tx_skb != NULL) {
++ dev_kfree_skb_any(vnic->tx_skb);
++ vnic->tx_skb = NULL;
++ if (netfront_check_queue_ready(net_dev)) {
++ netif_wake_queue(net_dev);
++ NETFRONT_ACCEL_STATS_OP
++ (vnic->stats.queue_wakes++);
++ }
++ }
++ spin_unlock_irqrestore(&np->tx_lock, flags2);
++ spin_unlock_irqrestore(&vnic->tx_lock, flags1);
++
++ /* Must prevent polls and hold lock to modify poll_enabled */
++ netif_poll_disable(net_dev);
++ spin_lock_irqsave(&vnic->irq_enabled_lock, flags1);
++ vnic->poll_enabled = 0;
++ spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags1);
++ netif_poll_enable(net_dev);
++}
++
++
++static void netfront_accel_interface_up(netfront_accel_vnic *vnic)
++{
++
++ if (!vnic->backend_netdev_up) {
++ vnic->backend_netdev_up = 1;
++
++ if (vnic->frontend_ready)
++ vnic_start_fastpath(vnic);
++ }
++}
++
++
++static void netfront_accel_interface_down(netfront_accel_vnic *vnic)
++{
++
++ if (vnic->backend_netdev_up) {
++ vnic->backend_netdev_up = 0;
++
++ if (vnic->frontend_ready)
++ vnic_stop_fastpath(vnic);
++ }
++}
++
++
++static int vnic_add_bufs(netfront_accel_vnic *vnic,
++ struct net_accel_msg *msg)
++{
++ int rc, offset;
++ struct netfront_accel_bufinfo *bufinfo;
++
++ BUG_ON(msg->u.mapbufs.pages > NET_ACCEL_MSG_MAX_PAGE_REQ);
++
++ offset = msg->u.mapbufs.reqid;
++
++ if (offset < vnic->bufpages.max_pages -
++ (vnic->bufpages.max_pages / sfc_netfront_buffer_split)) {
++ bufinfo = vnic->rx_bufs;
++ } else
++ bufinfo = vnic->tx_bufs;
++
++ /* Queue up some Rx buffers to start things off. */
++ if ((rc = netfront_accel_add_bufs(&vnic->bufpages, bufinfo, msg)) == 0) {
++ netfront_accel_vi_add_bufs(vnic, bufinfo == vnic->rx_bufs);
++
++ if (offset + msg->u.mapbufs.pages == vnic->bufpages.max_pages) {
++ VPRINTK("%s: got all buffers back\n", __FUNCTION__);
++ vnic->frontend_ready = 1;
++ if (vnic->backend_netdev_up)
++ vnic_start_fastpath(vnic);
++ } else {
++ VPRINTK("%s: got buffers back %d %d\n", __FUNCTION__,
++ offset, msg->u.mapbufs.pages);
++ }
++ }
++
++ return rc;
++}
++
++
++/* The largest [o] such that (1u << o) <= n. Requires n > 0. */
++
++inline unsigned log2_le(unsigned long n) {
++ unsigned order = 1;
++ while ((1ul << order) <= n) ++order;
++ return (order - 1);
++}
++
++static int vnic_send_buffer_requests(netfront_accel_vnic *vnic,
++ struct netfront_accel_bufpages *bufpages)
++{
++ int pages, offset, rc = 0, sent = 0;
++ struct net_accel_msg msg;
++
++ while (bufpages->page_reqs < bufpages->max_pages) {
++ offset = bufpages->page_reqs;
++
++ pages = pow2(log2_le(bufpages->max_pages -
++ bufpages->page_reqs));
++ pages = pages < NET_ACCEL_MSG_MAX_PAGE_REQ ?
++ pages : NET_ACCEL_MSG_MAX_PAGE_REQ;
++
++ BUG_ON(offset < 0);
++ BUG_ON(pages <= 0);
++
++ rc = netfront_accel_buf_map_request(vnic->dev, bufpages,
++ &msg, pages, offset);
++ if (rc == 0) {
++ rc = net_accel_msg_send(vnic->shared_page,
++ &vnic->to_dom0, &msg);
++ if (rc < 0) {
++ VPRINTK("%s: queue full, stopping for now\n",
++ __FUNCTION__);
++ break;
++ }
++ sent++;
++ } else {
++ EPRINTK("%s: problem with grant, stopping for now\n",
++ __FUNCTION__);
++ break;
++ }
++
++ bufpages->page_reqs += pages;
++ }
++
++ if (sent)
++ net_accel_msg_notify(vnic->msg_channel_irq);
++
++ return rc;
++}
++
++
++/*
++ * In response to dom0 saying "my queue is full", we reply with this
++ * when it is no longer full
++ */
++inline void vnic_set_queue_not_full(netfront_accel_vnic *vnic)
++{
++
++ if (test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL_B,
++ (unsigned long *)&vnic->shared_page->aflags))
++ notify_remote_via_irq(vnic->msg_channel_irq);
++ else
++ VPRINTK("queue not full bit already set, not signalling\n");
++}
++
++/*
++ * Notify dom0 that the queue we want to use is full, it should
++ * respond by setting MSG_AFLAGS_QUEUEUNOTFULL in due course
++ */
++inline void vnic_set_queue_full(netfront_accel_vnic *vnic)
++{
++
++ if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUFULL_B,
++ (unsigned long *)&vnic->shared_page->aflags))
++ notify_remote_via_irq(vnic->msg_channel_irq);
++ else
++ VPRINTK("queue full bit already set, not signalling\n");
++}
++
++
++static int vnic_check_hello_version(unsigned version)
++{
++ if (version > NET_ACCEL_MSG_VERSION) {
++ /* Newer protocol, we must refuse */
++ return -EPROTO;
++ }
++
++ if (version < NET_ACCEL_MSG_VERSION) {
++ /*
++ * We are newer, so have discretion to accept if we
++ * wish. For now however, just reject
++ */
++ return -EPROTO;
++ }
++
++ BUG_ON(version != NET_ACCEL_MSG_VERSION);
++ return 0;
++}
++
++
++static int vnic_process_hello_msg(netfront_accel_vnic *vnic,
++ struct net_accel_msg *msg)
++{
++ int err = 0;
++ unsigned pages = sfc_netfront_max_pages;
++
++ if (vnic_check_hello_version(msg->u.hello.version) < 0) {
++ msg->id = NET_ACCEL_MSG_HELLO | NET_ACCEL_MSG_REPLY
++ | NET_ACCEL_MSG_ERROR;
++ msg->u.hello.version = NET_ACCEL_MSG_VERSION;
++ } else {
++ vnic->backend_netdev_up
++ = vnic->shared_page->net_dev_up;
++
++ msg->id = NET_ACCEL_MSG_HELLO | NET_ACCEL_MSG_REPLY;
++ msg->u.hello.version = NET_ACCEL_MSG_VERSION;
++ if (msg->u.hello.max_pages &&
++ msg->u.hello.max_pages < pages)
++ pages = msg->u.hello.max_pages;
++ msg->u.hello.max_pages = pages;
++
++ /* Half of pages for rx, half for tx */
++ err = netfront_accel_alloc_buffer_mem(&vnic->bufpages,
++ vnic->rx_bufs,
++ vnic->tx_bufs,
++ pages);
++ if (err)
++ msg->id |= NET_ACCEL_MSG_ERROR;
++ }
++
++ /* Send reply */
++ net_accel_msg_reply_notify(vnic->shared_page, vnic->msg_channel_irq,
++ &vnic->to_dom0, msg);
++ return err;
++}
++
++
++static int vnic_process_localmac_msg(netfront_accel_vnic *vnic,
++ struct net_accel_msg *msg)
++{
++ unsigned long flags;
++ cuckoo_hash_mac_key key;
++
++ if (msg->u.localmac.flags & NET_ACCEL_MSG_ADD) {
++ DPRINTK("MAC has moved, could be local: " MAC_FMT "\n",
++ MAC_ARG(msg->u.localmac.mac));
++ key = cuckoo_mac_to_key(msg->u.localmac.mac);
++ spin_lock_irqsave(&vnic->table_lock, flags);
++ /* Try to remove it, not a big deal if not there */
++ cuckoo_hash_remove(&vnic->fastpath_table,
++ (cuckoo_hash_key *)&key);
++ spin_unlock_irqrestore(&vnic->table_lock, flags);
++ }
++
++ return 0;
++}
++
++
++static
++int vnic_process_rx_msg(netfront_accel_vnic *vnic,
++ struct net_accel_msg *msg)
++{
++ int err;
++
++ switch (msg->id) {
++ case NET_ACCEL_MSG_HELLO:
++ /* Hello, reply with Reply */
++ DPRINTK("got Hello, with version %.8x\n",
++ msg->u.hello.version);
++ BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_NONE);
++ err = vnic_process_hello_msg(vnic, msg);
++ if (err == 0)
++ vnic->msg_state = NETFRONT_ACCEL_MSG_HELLO;
++ break;
++ case NET_ACCEL_MSG_SETHW:
++ /* Hardware info message */
++ DPRINTK("got H/W info\n");
++ BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HELLO);
++ err = netfront_accel_vi_init(vnic, &msg->u.hw);
++ if (err == 0)
++ vnic->msg_state = NETFRONT_ACCEL_MSG_HW;
++ break;
++ case NET_ACCEL_MSG_MAPBUF | NET_ACCEL_MSG_REPLY:
++ VPRINTK("Got mapped buffers back\n");
++ BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HW);
++ err = vnic_add_bufs(vnic, msg);
++ break;
++ case NET_ACCEL_MSG_MAPBUF | NET_ACCEL_MSG_REPLY | NET_ACCEL_MSG_ERROR:
++ /* No buffers. Can't use the fast path. */
++ EPRINTK("Got mapped buffers error. Cannot accelerate.\n");
++ BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HW);
++ err = -EIO;
++ break;
++ case NET_ACCEL_MSG_LOCALMAC:
++ /* Should be add, remove not currently used */
++ EPRINTK_ON(!(msg->u.localmac.flags & NET_ACCEL_MSG_ADD));
++ BUG_ON(vnic->msg_state != NETFRONT_ACCEL_MSG_HW);
++ err = vnic_process_localmac_msg(vnic, msg);
++ break;
++ default:
++ EPRINTK("Huh? Message code is 0x%x\n", msg->id);
++ err = -EPROTO;
++ break;
++ }
++
++ return err;
++}
++
++
++/* Process an IRQ received from back end driver */
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++void netfront_accel_msg_from_bend(struct work_struct *context)
++#else
++void netfront_accel_msg_from_bend(void *context)
++#endif
++{
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++ netfront_accel_vnic *vnic =
++ container_of(context, netfront_accel_vnic, msg_from_bend);
++#else
++ netfront_accel_vnic *vnic = (netfront_accel_vnic *)context;
++#endif
++ struct net_accel_msg msg;
++ int err, queue_was_full = 0;
++
++ mutex_lock(&vnic->vnic_mutex);
++
++ /*
++ * This happens when the shared pages have been unmapped but
++ * the workqueue has yet to be flushed
++ */
++ if (!vnic->dom0_state_is_setup)
++ goto unlock_out;
++
++ while ((vnic->shared_page->aflags & NET_ACCEL_MSG_AFLAGS_TO_DOMU_MASK)
++ != 0) {
++ if (vnic->shared_page->aflags &
++ NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL) {
++ /* We've been told there may now be space. */
++ clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL_B,
++ (unsigned long *)&vnic->shared_page->aflags);
++ }
++
++ if (vnic->shared_page->aflags &
++ NET_ACCEL_MSG_AFLAGS_QUEUE0FULL) {
++ /*
++ * There will be space at the end of this
++ * function if we can make any.
++ */
++ clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0FULL_B,
++ (unsigned long *)&vnic->shared_page->aflags);
++ queue_was_full = 1;
++ }
++
++ if (vnic->shared_page->aflags &
++ NET_ACCEL_MSG_AFLAGS_NETUPDOWN) {
++ DPRINTK("%s: net interface change\n", __FUNCTION__);
++ clear_bit(NET_ACCEL_MSG_AFLAGS_NETUPDOWN_B,
++ (unsigned long *)&vnic->shared_page->aflags);
++ if (vnic->shared_page->net_dev_up)
++ netfront_accel_interface_up(vnic);
++ else
++ netfront_accel_interface_down(vnic);
++ }
++ }
++
++ /* Pull msg out of shared memory */
++ while ((err = net_accel_msg_recv(vnic->shared_page, &vnic->from_dom0,
++ &msg)) == 0) {
++ err = vnic_process_rx_msg(vnic, &msg);
++
++ if (err != 0)
++ goto done;
++ }
++
++ /*
++ * Send any pending buffer map request messages that we can,
++ * and mark domU->dom0 as full if necessary.
++ */
++ if (vnic->msg_state == NETFRONT_ACCEL_MSG_HW &&
++ vnic->bufpages.page_reqs < vnic->bufpages.max_pages) {
++ if (vnic_send_buffer_requests(vnic, &vnic->bufpages) == -ENOSPC)
++ vnic_set_queue_full(vnic);
++ }
++
++ /*
++ * If there are no messages then this is not an error. It
++ * just means that we've finished processing the queue.
++ */
++ if (err == -ENOENT)
++ err = 0;
++ done:
++ /* We will now have made space in the dom0->domU queue if we can */
++ if (queue_was_full)
++ vnic_set_queue_not_full(vnic);
++
++ if (err != 0) {
++ EPRINTK("%s returned %d\n", __FUNCTION__, err);
++ netfront_accel_set_closing(vnic);
++ }
++
++ unlock_out:
++ mutex_unlock(&vnic->vnic_mutex);
++
++ return;
++}
++
++
++irqreturn_t netfront_accel_msg_channel_irq_from_bend(int irq, void *context,
++ struct pt_regs *unused)
++{
++ netfront_accel_vnic *vnic = (netfront_accel_vnic *)context;
++ VPRINTK("irq %d from device %s\n", irq, vnic->dev->nodename);
++
++ queue_work(netfront_accel_workqueue, &vnic->msg_from_bend);
++
++ return IRQ_HANDLED;
++}
++
++/* Process an interrupt received from the NIC via backend */
++irqreturn_t netfront_accel_net_channel_irq_from_bend(int irq, void *context,
++ struct pt_regs *unused)
++{
++ netfront_accel_vnic *vnic = (netfront_accel_vnic *)context;
++ struct net_device *net_dev = vnic->net_dev;
++ unsigned long flags;
++
++ VPRINTK("net irq %d from device %s\n", irq, vnic->dev->nodename);
++
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.irq_count++);
++
++ BUG_ON(net_dev==NULL);
++
++ spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
++ if (vnic->irq_enabled) {
++ netfront_accel_disable_net_interrupts(vnic);
++ vnic->irq_enabled = 0;
++ spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
++
++#if NETFRONT_ACCEL_STATS
++ vnic->stats.poll_schedule_count++;
++ if (vnic->stats.event_count_since_irq >
++ vnic->stats.events_per_irq_max)
++ vnic->stats.events_per_irq_max =
++ vnic->stats.event_count_since_irq;
++ vnic->stats.event_count_since_irq = 0;
++#endif
++ netif_rx_schedule(net_dev);
++ }
++ else {
++ spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.useless_irq_count++);
++ DPRINTK("%s: irq when disabled\n", __FUNCTION__);
++ }
++
++ return IRQ_HANDLED;
++}
++
++
++void netfront_accel_msg_tx_fastpath(netfront_accel_vnic *vnic, const void *mac,
++ u32 ip, u16 port, u8 protocol)
++{
++ unsigned long lock_state;
++ struct net_accel_msg *msg;
++
++ msg = net_accel_msg_start_send(vnic->shared_page, &vnic->to_dom0,
++ &lock_state);
++
++ if (msg == NULL)
++ return;
++
++ net_accel_msg_init(msg, NET_ACCEL_MSG_FASTPATH);
++ msg->u.fastpath.flags = NET_ACCEL_MSG_REMOVE;
++ memcpy(msg->u.fastpath.mac, mac, ETH_ALEN);
++
++ msg->u.fastpath.port = port;
++ msg->u.fastpath.ip = ip;
++ msg->u.fastpath.proto = protocol;
++
++ net_accel_msg_complete_send_notify(vnic->shared_page, &vnic->to_dom0,
++ &lock_state, vnic->msg_channel_irq);
++}
+Index: head-2008-11-25/drivers/xen/sfc_netfront/accel_netfront.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/accel_netfront.c 2008-02-26 10:54:12.000000000 +0100
+@@ -0,0 +1,319 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/skbuff.h>
++#include <linux/netdevice.h>
++
++/* drivers/xen/netfront/netfront.h */
++#include "netfront.h"
++
++#include "accel.h"
++#include "accel_bufs.h"
++#include "accel_util.h"
++#include "accel_msg_iface.h"
++#include "accel_ssr.h"
++
++#ifdef EFX_GCOV
++#include "gcov.h"
++#endif
++
++#define NETFRONT_ACCEL_VNIC_FROM_NETDEV(_nd) \
++ ((netfront_accel_vnic *)((struct netfront_info *)netdev_priv(net_dev))->accel_priv)
++
++static int netfront_accel_netdev_start_xmit(struct sk_buff *skb,
++ struct net_device *net_dev)
++{
++ netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
++ struct netfront_info *np =
++ (struct netfront_info *)netdev_priv(net_dev);
++ int handled, rc;
++ unsigned long flags1, flags2;
++
++ BUG_ON(vnic == NULL);
++
++ /* Take our tx lock and hold for the duration */
++ spin_lock_irqsave(&vnic->tx_lock, flags1);
++
++ if (!vnic->tx_enabled) {
++ rc = 0;
++ goto unlock_out;
++ }
++
++ handled = netfront_accel_vi_tx_post(vnic, skb);
++ if (handled == NETFRONT_ACCEL_STATUS_BUSY) {
++ BUG_ON(vnic->net_dev != net_dev);
++ DPRINTK("%s stopping queue\n", __FUNCTION__);
++
++ /* Netfront's lock protects tx_skb */
++ spin_lock_irqsave(&np->tx_lock, flags2);
++ BUG_ON(vnic->tx_skb != NULL);
++ vnic->tx_skb = skb;
++ netif_stop_queue(net_dev);
++ spin_unlock_irqrestore(&np->tx_lock, flags2);
++
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.queue_stops++);
++ }
++
++ if (handled == NETFRONT_ACCEL_STATUS_CANT)
++ rc = 0;
++ else
++ rc = 1;
++
++unlock_out:
++ spin_unlock_irqrestore(&vnic->tx_lock, flags1);
++
++ return rc;
++}
++
++
++static int netfront_accel_netdev_poll(struct net_device *net_dev, int *budget)
++{
++ netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
++ int rx_allowed = *budget, rx_done;
++
++ BUG_ON(vnic == NULL);
++
++ /* Can check this without lock as modifier excludes polls */
++ if (!vnic->poll_enabled)
++ return 0;
++
++ rx_done = netfront_accel_vi_poll(vnic, rx_allowed);
++ *budget -= rx_done;
++
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_call_count++);
++
++ VPRINTK("%s: done %d allowed %d\n",
++ __FUNCTION__, rx_done, rx_allowed);
++
++ netfront_accel_ssr_end_of_burst(vnic, &vnic->ssr_state);
++
++ if (rx_done < rx_allowed) {
++ return 0; /* Done */
++ }
++
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_reschedule_count++);
++
++ return 1; /* More to do. */
++}
++
++
++/*
++ * Process request from netfront to start napi interrupt
++ * mode. (i.e. enable interrupts as it's finished polling)
++ */
++static int netfront_accel_start_napi_interrupts(struct net_device *net_dev)
++{
++ netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
++ unsigned long flags;
++
++ BUG_ON(vnic == NULL);
++
++ /*
++ * Can check this without lock as writer excludes poll before
++ * modifying
++ */
++ if (!vnic->poll_enabled)
++ return 0;
++
++ if (!netfront_accel_vi_enable_interrupts(vnic)) {
++ /*
++ * There was something there, tell caller we had
++ * something to do.
++ */
++ return 1;
++ }
++
++ spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
++ vnic->irq_enabled = 1;
++ netfront_accel_enable_net_interrupts(vnic);
++ spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
++
++ return 0;
++}
++
++
++/*
++ * Process request from netfront to stop napi interrupt
++ * mode. (i.e. disable interrupts as it's starting to poll
++ */
++static void netfront_accel_stop_napi_interrupts(struct net_device *net_dev)
++{
++ netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
++ unsigned long flags;
++
++ BUG_ON(vnic == NULL);
++
++ spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
++
++ if (!vnic->poll_enabled) {
++ spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
++ return;
++ }
++
++ netfront_accel_disable_net_interrupts(vnic);
++ vnic->irq_enabled = 0;
++ spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
++}
++
++
++static int netfront_accel_check_ready(struct net_device *net_dev)
++{
++ netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
++
++ BUG_ON(vnic == NULL);
++
++ /* This is protected by netfront's lock */
++ return vnic->tx_skb == NULL;
++}
++
++
++static int netfront_accel_get_stats(struct net_device *net_dev,
++ struct net_device_stats *stats)
++{
++ netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
++ struct netfront_accel_netdev_stats now;
++
++ BUG_ON(vnic == NULL);
++
++ now.fastpath_rx_pkts = vnic->netdev_stats.fastpath_rx_pkts;
++ now.fastpath_rx_bytes = vnic->netdev_stats.fastpath_rx_bytes;
++ now.fastpath_rx_errors = vnic->netdev_stats.fastpath_rx_errors;
++ now.fastpath_tx_pkts = vnic->netdev_stats.fastpath_tx_pkts;
++ now.fastpath_tx_bytes = vnic->netdev_stats.fastpath_tx_bytes;
++ now.fastpath_tx_errors = vnic->netdev_stats.fastpath_tx_errors;
++
++ stats->rx_packets += (now.fastpath_rx_pkts -
++ vnic->stats_last_read.fastpath_rx_pkts);
++ stats->rx_bytes += (now.fastpath_rx_bytes -
++ vnic->stats_last_read.fastpath_rx_bytes);
++ stats->rx_errors += (now.fastpath_rx_errors -
++ vnic->stats_last_read.fastpath_rx_errors);
++ stats->tx_packets += (now.fastpath_tx_pkts -
++ vnic->stats_last_read.fastpath_tx_pkts);
++ stats->tx_bytes += (now.fastpath_tx_bytes -
++ vnic->stats_last_read.fastpath_tx_bytes);
++ stats->tx_errors += (now.fastpath_tx_errors -
++ vnic->stats_last_read.fastpath_tx_errors);
++
++ vnic->stats_last_read = now;
++
++ return 0;
++}
++
++
++struct netfront_accel_hooks accel_hooks = {
++ .new_device = &netfront_accel_probe,
++ .remove = &netfront_accel_remove,
++ .netdev_poll = &netfront_accel_netdev_poll,
++ .start_xmit = &netfront_accel_netdev_start_xmit,
++ .start_napi_irq = &netfront_accel_start_napi_interrupts,
++ .stop_napi_irq = &netfront_accel_stop_napi_interrupts,
++ .check_ready = &netfront_accel_check_ready,
++ .get_stats = &netfront_accel_get_stats
++};
++
++
++unsigned sfc_netfront_max_pages = NETFRONT_ACCEL_DEFAULT_BUF_PAGES;
++module_param_named (max_pages, sfc_netfront_max_pages, uint, 0644);
++MODULE_PARM_DESC(max_pages, "Number of buffer pages to request");
++
++unsigned sfc_netfront_buffer_split = 2;
++module_param_named (buffer_split, sfc_netfront_buffer_split, uint, 0644);
++MODULE_PARM_DESC(buffer_split,
++ "Fraction of buffers to use for TX, rest for RX");
++
++
++const char *frontend_name = "sfc_netfront";
++
++struct workqueue_struct *netfront_accel_workqueue;
++
++static int __init netfront_accel_init(void)
++{
++ int rc;
++#ifdef EFX_GCOV
++ gcov_provider_init(THIS_MODULE);
++#endif
++
++ /*
++ * If we're running on dom0, netfront hasn't initialised
++ * itself, so we need to keep away
++ */
++ if (is_initial_xendomain())
++ return 0;
++
++ if (!is_pow2(sizeof(struct net_accel_msg)))
++ EPRINTK("%s: bad structure size\n", __FUNCTION__);
++
++ netfront_accel_workqueue = create_workqueue(frontend_name);
++
++ netfront_accel_debugfs_init();
++
++ rc = netfront_accelerator_loaded(NETFRONT_ACCEL_VERSION,
++ frontend_name, &accel_hooks);
++
++ if (rc < 0) {
++ EPRINTK("Xen netfront accelerator version mismatch\n");
++ return -EINVAL;
++ }
++
++ if (rc > 0) {
++ /*
++ * In future may want to add backwards compatibility
++ * and accept certain subsets of previous versions
++ */
++ EPRINTK("Xen netfront accelerator version mismatch\n");
++ return -EINVAL;
++ }
++
++ return 0;
++}
++module_init(netfront_accel_init);
++
++static void __exit netfront_accel_exit(void)
++{
++ if (is_initial_xendomain())
++ return;
++
++ DPRINTK("%s: unhooking\n", __FUNCTION__);
++
++ /* Unhook from normal netfront */
++ netfront_accelerator_stop(frontend_name);
++
++ DPRINTK("%s: done\n", __FUNCTION__);
++
++ netfront_accel_debugfs_fini();
++
++ flush_workqueue(netfront_accel_workqueue);
++
++ destroy_workqueue(netfront_accel_workqueue);
++
++#ifdef EFX_GCOV
++ gcov_provider_fini(THIS_MODULE);
++#endif
++ return;
++}
++module_exit(netfront_accel_exit);
++
++MODULE_LICENSE("GPL");
++
+Index: head-2008-11-25/drivers/xen/sfc_netfront/accel_ssr.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/accel_ssr.c 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,308 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/socket.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/list.h>
++#include <net/ip.h>
++#include <net/checksum.h>
++
++#include "accel.h"
++#include "accel_util.h"
++#include "accel_bufs.h"
++
++#include "accel_ssr.h"
++
++static inline int list_valid(struct list_head *lh) {
++ return(lh->next != NULL);
++}
++
++static void netfront_accel_ssr_deliver (struct netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st,
++ struct netfront_accel_ssr_conn *c);
++
++/** Construct an efx_ssr_state.
++ *
++ * @v st The SSR state (per channel per port)
++ * @v port The port.
++ */
++void netfront_accel_ssr_init(struct netfront_accel_ssr_state *st) {
++ unsigned i;
++
++ INIT_LIST_HEAD(&st->conns);
++ INIT_LIST_HEAD(&st->free_conns);
++ for (i = 0; i < 8; ++i) {
++ struct netfront_accel_ssr_conn *c =
++ kmalloc(sizeof(*c), GFP_KERNEL);
++ if (c == NULL) break;
++ c->n_in_order_pkts = 0;
++ c->skb = NULL;
++ list_add(&c->link, &st->free_conns);
++ }
++
++}
++
++
++/** Destructor for an efx_ssr_state.
++ *
++ * @v st The SSR state (per channel per port)
++ */
++void netfront_accel_ssr_fini(netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st) {
++ struct netfront_accel_ssr_conn *c;
++
++ /* Return cleanly if efx_ssr_init() not previously called */
++ BUG_ON(list_valid(&st->conns) != list_valid(&st->free_conns));
++ if (! list_valid(&st->conns))
++ return;
++
++ while ( ! list_empty(&st->free_conns)) {
++ c = list_entry(st->free_conns.prev,
++ struct netfront_accel_ssr_conn, link);
++ list_del(&c->link);
++ BUG_ON(c->skb != NULL);
++ kfree(c);
++ }
++ while ( ! list_empty(&st->conns)) {
++ c = list_entry(st->conns.prev,
++ struct netfront_accel_ssr_conn, link);
++ list_del(&c->link);
++ if (c->skb)
++ netfront_accel_ssr_deliver(vnic, st, c);
++ kfree(c);
++ }
++}
++
++
++/** Calc IP checksum and deliver to the OS
++ *
++ * @v st The SSR state (per channel per port)
++ * @v c The SSR connection state
++ */
++static void netfront_accel_ssr_deliver(netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st,
++ struct netfront_accel_ssr_conn *c) {
++ BUG_ON(c->skb == NULL);
++
++ /*
++ * If we've chained packets together, recalculate the IP
++ * checksum.
++ */
++ if (skb_shinfo(c->skb)->frag_list) {
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.ssr_bursts);
++ c->iph->check = 0;
++ c->iph->check = ip_fast_csum((unsigned char *) c->iph,
++ c->iph->ihl);
++ }
++
++ VPRINTK("%s: %d\n", __FUNCTION__, c->skb->len);
++
++ netif_receive_skb(c->skb);
++ c->skb = NULL;
++}
++
++
++/** Push held skbs down into network stack.
++ *
++ * @v st SSR state
++ *
++ * Only called if we are tracking one or more connections.
++ */
++void __netfront_accel_ssr_end_of_burst(netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st) {
++ struct netfront_accel_ssr_conn *c;
++
++ BUG_ON(list_empty(&st->conns));
++
++ list_for_each_entry(c, &st->conns, link)
++ if (c->skb)
++ netfront_accel_ssr_deliver(vnic, st, c);
++
++ /* Time-out connections that have received no traffic for 20ms. */
++ c = list_entry(st->conns.prev, struct netfront_accel_ssr_conn,
++ link);
++ if (jiffies - c->last_pkt_jiffies > (HZ / 50 + 1)) {
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.ssr_drop_stream);
++ list_del(&c->link);
++ list_add(&c->link, &st->free_conns);
++ }
++}
++
++
++/** Process SKB and decide whether to dispatch it to the stack now or
++ * later.
++ *
++ * @v st SSR state
++ * @v skb SKB to exmaine
++ * @ret rc 0 => deliver SKB to kernel now, otherwise the SKB belongs
++ * us.
++ */
++int netfront_accel_ssr_skb(struct netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st,
++ struct sk_buff *skb) {
++ int data_length, dont_merge;
++ struct netfront_accel_ssr_conn *c;
++ struct iphdr *iph;
++ struct tcphdr *th;
++ unsigned th_seq;
++
++ BUG_ON(skb_shinfo(skb)->frag_list != NULL);
++ BUG_ON(skb->next != NULL);
++
++ /* We're not interested if it isn't TCP over IPv4. */
++ iph = (struct iphdr *) skb->data;
++ if (skb->protocol != htons(ETH_P_IP) ||
++ iph->protocol != IPPROTO_TCP) {
++ return 0;
++ }
++
++ /* Ignore segments that fail csum or are fragmented. */
++ if (unlikely((skb->ip_summed - CHECKSUM_UNNECESSARY) |
++ (iph->frag_off & htons(IP_MF | IP_OFFSET)))) {
++ return 0;
++ }
++
++ th = (struct tcphdr*)(skb->data + iph->ihl * 4);
++ data_length = ntohs(iph->tot_len) - iph->ihl * 4 - th->doff * 4;
++ th_seq = ntohl(th->seq);
++ dont_merge = (data_length == 0) | th->urg | th->syn | th->rst;
++
++ list_for_each_entry(c, &st->conns, link) {
++ if ((c->saddr - iph->saddr) |
++ (c->daddr - iph->daddr) |
++ (c->source - th->source) |
++ (c->dest - th->dest ))
++ continue;
++
++ /* Re-insert at head of list to reduce lookup time. */
++ list_del(&c->link);
++ list_add(&c->link, &st->conns);
++ c->last_pkt_jiffies = jiffies;
++
++ if (unlikely(th_seq - c->next_seq)) {
++ /* Out-of-order, so start counting again. */
++ if (c->skb)
++ netfront_accel_ssr_deliver(vnic, st, c);
++ c->n_in_order_pkts = 0;
++ c->next_seq = th_seq + data_length;
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.ssr_misorder);
++ return 0;
++ }
++ c->next_seq = th_seq + data_length;
++
++ if (++c->n_in_order_pkts < 300) {
++ /* May be in slow-start, so don't merge. */
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.ssr_slow_start);
++ return 0;
++ }
++
++ if (unlikely(dont_merge)) {
++ if (c->skb)
++ netfront_accel_ssr_deliver(vnic, st, c);
++ return 0;
++ }
++
++ if (c->skb) {
++ c->iph->tot_len = ntohs(c->iph->tot_len);
++ c->iph->tot_len += data_length;
++ c->iph->tot_len = htons(c->iph->tot_len);
++ c->th->ack_seq = th->ack_seq;
++ c->th->fin |= th->fin;
++ c->th->psh |= th->psh;
++ c->th->window = th->window;
++
++ /* Remove the headers from this skb. */
++ skb_pull(skb, skb->len - data_length);
++
++ /*
++ * Tack the new skb onto the head skb's frag_list.
++ * This is exactly the format that fragmented IP
++ * datagrams are reassembled into.
++ */
++ BUG_ON(skb->next != 0);
++ if ( ! skb_shinfo(c->skb)->frag_list)
++ skb_shinfo(c->skb)->frag_list = skb;
++ else
++ c->skb_tail->next = skb;
++ c->skb_tail = skb;
++ c->skb->len += skb->len;
++ c->skb->data_len += skb->len;
++ c->skb->truesize += skb->truesize;
++
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.ssr_merges);
++
++ /*
++ * If the next packet might push this super-packet
++ * over the limit for an IP packet, deliver it now.
++ * This is slightly conservative, but close enough.
++ */
++ if (c->skb->len +
++ (PAGE_SIZE / NETFRONT_ACCEL_BUFS_PER_PAGE)
++ > 16384)
++ netfront_accel_ssr_deliver(vnic, st, c);
++
++ return 1;
++ }
++ else {
++ c->iph = iph;
++ c->th = th;
++ c->skb = skb;
++ return 1;
++ }
++ }
++
++ /* We're not yet tracking this connection. */
++
++ if (dont_merge) {
++ return 0;
++ }
++
++ if (list_empty(&st->free_conns)) {
++ c = list_entry(st->conns.prev,
++ struct netfront_accel_ssr_conn,
++ link);
++ if (c->skb) {
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.ssr_too_many);
++ return 0;
++ }
++ }
++ else {
++ c = list_entry(st->free_conns.next,
++ struct netfront_accel_ssr_conn,
++ link);
++ }
++ list_del(&c->link);
++ list_add(&c->link, &st->conns);
++ c->saddr = iph->saddr;
++ c->daddr = iph->daddr;
++ c->source = th->source;
++ c->dest = th->dest;
++ c->next_seq = th_seq + data_length;
++ c->n_in_order_pkts = 0;
++ BUG_ON(c->skb != NULL);
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.ssr_new_stream);
++ return 0;
++}
+Index: head-2008-11-25/drivers/xen/sfc_netfront/accel_ssr.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/accel_ssr.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,88 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NETFRONT_ACCEL_SSR_H
++#define NETFRONT_ACCEL_SSR_H
++
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/list.h>
++
++#include "accel.h"
++
++/** State for Soft Segment Reassembly (SSR). */
++
++struct netfront_accel_ssr_conn {
++ struct list_head link;
++
++ unsigned saddr, daddr;
++ unsigned short source, dest;
++
++ /** Number of in-order packets we've seen with payload. */
++ unsigned n_in_order_pkts;
++
++ /** Next in-order sequence number. */
++ unsigned next_seq;
++
++ /** Time we last saw a packet on this connection. */
++ unsigned long last_pkt_jiffies;
++
++ /** The SKB we are currently holding. If NULL, then all following
++ * fields are undefined.
++ */
++ struct sk_buff *skb;
++
++ /** The tail of the frag_list of SKBs we're holding. Only valid
++ * after at least one merge.
++ */
++ struct sk_buff *skb_tail;
++
++ /** The IP header of the skb we are holding. */
++ struct iphdr *iph;
++
++ /** The TCP header of the skb we are holding. */
++ struct tcphdr *th;
++};
++
++extern void netfront_accel_ssr_init(struct netfront_accel_ssr_state *st);
++extern void netfront_accel_ssr_fini(netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st);
++
++extern void
++__netfront_accel_ssr_end_of_burst(netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st);
++
++extern int netfront_accel_ssr_skb(netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st,
++ struct sk_buff *skb);
++
++static inline void
++netfront_accel_ssr_end_of_burst (netfront_accel_vnic *vnic,
++ struct netfront_accel_ssr_state *st) {
++ if ( ! list_empty(&st->conns) )
++ __netfront_accel_ssr_end_of_burst(vnic, st);
++}
++
++#endif /* NETFRONT_ACCEL_SSR_H */
+Index: head-2008-11-25/drivers/xen/sfc_netfront/accel_tso.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/accel_tso.c 2008-02-26 10:54:12.000000000 +0100
+@@ -0,0 +1,511 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/pci.h>
++#include <linux/tcp.h>
++#include <linux/ip.h>
++#include <linux/in.h>
++#include <linux/if_ether.h>
++
++#include "accel.h"
++#include "accel_util.h"
++
++#include "accel_tso.h"
++
++#define PTR_DIFF(p1, p2) ((u8*)(p1) - (u8*)(p2))
++#define ETH_HDR_LEN(skb) ((skb)->nh.raw - (skb)->data)
++#define SKB_TCP_OFF(skb) PTR_DIFF ((skb)->h.th, (skb)->data)
++#define SKB_IP_OFF(skb) PTR_DIFF ((skb)->nh.iph, (skb)->data)
++
++/*
++ * Set a maximum number of buffers in each output packet to make life
++ * a little simpler - if this is reached it will just move on to
++ * another packet
++ */
++#define ACCEL_TSO_MAX_BUFFERS (6)
++
++/** TSO State.
++ *
++ * The state used during segmentation. It is put into this data structure
++ * just to make it easy to pass into inline functions.
++ */
++struct netfront_accel_tso_state {
++ /** bytes of data we've yet to segment */
++ unsigned remaining_len;
++
++ /** current sequence number */
++ unsigned seqnum;
++
++ /** remaining space in current packet */
++ unsigned packet_space;
++
++ /** List of packets to be output, containing the buffers and
++ * iovecs to describe each packet
++ */
++ struct netfront_accel_tso_output_packet *output_packets;
++
++ /** Total number of buffers in output_packets */
++ unsigned buffers;
++
++ /** Total number of packets in output_packets */
++ unsigned packets;
++
++ /** Input Fragment Cursor.
++ *
++ * Where we are in the current fragment of the incoming SKB. These
++ * values get updated in place when we split a fragment over
++ * multiple packets.
++ */
++ struct {
++ /** address of current position */
++ void *addr;
++ /** remaining length */
++ unsigned int len;
++ } ifc; /* == ifc Input Fragment Cursor */
++
++ /** Parameters.
++ *
++ * These values are set once at the start of the TSO send and do
++ * not get changed as the routine progresses.
++ */
++ struct {
++ /* the number of bytes of header */
++ unsigned int header_length;
++
++ /* The number of bytes to put in each outgoing segment. */
++ int full_packet_size;
++
++ /* Current IP ID, host endian. */
++ unsigned ip_id;
++
++ /* Max size of each output packet payload */
++ int gso_size;
++ } p;
++};
++
++
++/**
++ * Verify that our various assumptions about sk_buffs and the conditions
++ * under which TSO will be attempted hold true.
++ *
++ * @v skb The sk_buff to check.
++ */
++static inline void tso_check_safe(struct sk_buff *skb) {
++ EPRINTK_ON(skb->protocol != htons (ETH_P_IP));
++ EPRINTK_ON(((struct ethhdr*) skb->data)->h_proto != htons (ETH_P_IP));
++ EPRINTK_ON(skb->nh.iph->protocol != IPPROTO_TCP);
++ EPRINTK_ON((SKB_TCP_OFF(skb)
++ + (skb->h.th->doff << 2u)) > skb_headlen(skb));
++}
++
++
++
++/** Parse the SKB header and initialise state. */
++static inline void tso_start(struct netfront_accel_tso_state *st,
++ struct sk_buff *skb) {
++
++ /*
++ * All ethernet/IP/TCP headers combined size is TCP header size
++ * plus offset of TCP header relative to start of packet.
++ */
++ st->p.header_length = (skb->h.th->doff << 2u) + SKB_TCP_OFF(skb);
++ st->p.full_packet_size = (st->p.header_length
++ + skb_shinfo(skb)->gso_size);
++ st->p.gso_size = skb_shinfo(skb)->gso_size;
++
++ st->p.ip_id = htons(skb->nh.iph->id);
++ st->seqnum = ntohl(skb->h.th->seq);
++
++ EPRINTK_ON(skb->h.th->urg);
++ EPRINTK_ON(skb->h.th->syn);
++ EPRINTK_ON(skb->h.th->rst);
++
++ st->remaining_len = skb->len - st->p.header_length;
++
++ st->output_packets = NULL;
++ st->buffers = 0;
++ st->packets = 0;
++
++ VPRINTK("Starting new TSO: hl %d ps %d gso %d seq %x len %d\n",
++ st->p.header_length, st->p.full_packet_size, st->p.gso_size,
++ st->seqnum, skb->len);
++}
++
++/**
++ * Add another NIC mapped buffer onto an output packet
++ */
++static inline int tso_start_new_buffer(netfront_accel_vnic *vnic,
++ struct netfront_accel_tso_state *st,
++ int first)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++ struct netfront_accel_pkt_desc *buf;
++
++ /* Get a mapped packet buffer */
++ buf = netfront_accel_buf_get(vnic->tx_bufs);
++ if (buf == NULL) {
++ DPRINTK("%s: No buffer for TX\n", __FUNCTION__);
++ return -1;
++ }
++
++ /* Store a bit of meta-data at the end */
++ tso_buf =(struct netfront_accel_tso_buffer *)
++ (buf->pkt_kva + NETFRONT_ACCEL_TSO_BUF_LENGTH
++ + sizeof(struct netfront_accel_tso_output_packet));
++
++ tso_buf->buf = buf;
++
++ tso_buf->length = 0;
++
++ if (first) {
++ struct netfront_accel_tso_output_packet *output_packet
++ = (struct netfront_accel_tso_output_packet *)
++ (buf->pkt_kva + NETFRONT_ACCEL_TSO_BUF_LENGTH);
++ output_packet->next = st->output_packets;
++ st->output_packets = output_packet;
++ tso_buf->next = NULL;
++ st->output_packets->tso_bufs = tso_buf;
++ st->output_packets->tso_bufs_len = 1;
++ } else {
++ tso_buf->next = st->output_packets->tso_bufs;
++ st->output_packets->tso_bufs = tso_buf;
++ st->output_packets->tso_bufs_len ++;
++ }
++
++ BUG_ON(st->output_packets->tso_bufs_len > ACCEL_TSO_MAX_BUFFERS);
++
++ st->buffers ++;
++
++ /*
++ * Store the context, set to NULL, last packet buffer will get
++ * non-NULL later
++ */
++ tso_buf->buf->skb = NULL;
++
++ return 0;
++}
++
++
++/* Generate a new header, and prepare for the new packet.
++ *
++ * @v vnic VNIC
++ * @v skb Socket buffer
++ * @v st TSO state
++ * @ret rc 0 on success, or -1 if failed to alloc header
++ */
++
++static inline
++int tso_start_new_packet(netfront_accel_vnic *vnic,
++ struct sk_buff *skb,
++ struct netfront_accel_tso_state *st)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++ struct iphdr *tsoh_iph;
++ struct tcphdr *tsoh_th;
++ unsigned ip_length;
++
++ if (tso_start_new_buffer(vnic, st, 1) < 0) {
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.fastpath_tx_busy++);
++ return -1;
++ }
++
++ /* This has been set up by tso_start_new_buffer() */
++ tso_buf = st->output_packets->tso_bufs;
++
++ /* Copy in the header */
++ memcpy(tso_buf->buf->pkt_kva, skb->data, st->p.header_length);
++ tso_buf->length = st->p.header_length;
++
++ tsoh_th = (struct tcphdr*)
++ (tso_buf->buf->pkt_kva + SKB_TCP_OFF(skb));
++ tsoh_iph = (struct iphdr*)
++ (tso_buf->buf->pkt_kva + SKB_IP_OFF(skb));
++
++ /* Set to zero to encourage falcon to fill these in */
++ tsoh_th->check = 0;
++ tsoh_iph->check = 0;
++
++ tsoh_th->seq = htonl(st->seqnum);
++ st->seqnum += st->p.gso_size;
++
++ if (st->remaining_len > st->p.gso_size) {
++ /* This packet will not finish the TSO burst. */
++ ip_length = st->p.full_packet_size - ETH_HDR_LEN(skb);
++ tsoh_th->fin = 0;
++ tsoh_th->psh = 0;
++ } else {
++ /* This packet will be the last in the TSO burst. */
++ ip_length = (st->p.header_length - ETH_HDR_LEN(skb)
++ + st->remaining_len);
++ tsoh_th->fin = skb->h.th->fin;
++ tsoh_th->psh = skb->h.th->psh;
++ }
++
++ tsoh_iph->tot_len = htons(ip_length);
++
++ /* Linux leaves suitable gaps in the IP ID space for us to fill. */
++ tsoh_iph->id = st->p.ip_id++;
++ tsoh_iph->id = htons(tsoh_iph->id);
++
++ st->packet_space = st->p.gso_size;
++
++ st->packets++;
++
++ return 0;
++}
++
++
++
++static inline void tso_get_fragment(struct netfront_accel_tso_state *st,
++ int len, void *addr)
++{
++ st->ifc.len = len;
++ st->ifc.addr = addr;
++ return;
++}
++
++
++static inline void tso_unwind(netfront_accel_vnic *vnic,
++ struct netfront_accel_tso_state *st)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++ struct netfront_accel_tso_output_packet *output_packet;
++
++ DPRINTK("%s\n", __FUNCTION__);
++
++ while (st->output_packets != NULL) {
++ output_packet = st->output_packets;
++ st->output_packets = output_packet->next;
++ while (output_packet->tso_bufs != NULL) {
++ tso_buf = output_packet->tso_bufs;
++ output_packet->tso_bufs = tso_buf->next;
++
++ st->buffers --;
++ output_packet->tso_bufs_len --;
++
++ netfront_accel_buf_put(vnic->tx_bufs,
++ tso_buf->buf->buf_id);
++ }
++ }
++ BUG_ON(st->buffers != 0);
++}
++
++
++
++static inline
++void tso_fill_packet_with_fragment(netfront_accel_vnic *vnic,
++ struct netfront_accel_tso_state *st)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++ int n, space;
++
++ BUG_ON(st->output_packets == NULL);
++ BUG_ON(st->output_packets->tso_bufs == NULL);
++
++ tso_buf = st->output_packets->tso_bufs;
++
++ if (st->ifc.len == 0) return;
++ if (st->packet_space == 0) return;
++ if (tso_buf->length == NETFRONT_ACCEL_TSO_BUF_LENGTH) return;
++
++ n = min(st->ifc.len, st->packet_space);
++
++ space = NETFRONT_ACCEL_TSO_BUF_LENGTH - tso_buf->length;
++ n = min(n, space);
++
++ st->packet_space -= n;
++ st->remaining_len -= n;
++ st->ifc.len -= n;
++
++ memcpy(tso_buf->buf->pkt_kva + tso_buf->length, st->ifc.addr, n);
++
++ tso_buf->length += n;
++
++ BUG_ON(tso_buf->length > NETFRONT_ACCEL_TSO_BUF_LENGTH);
++
++ st->ifc.addr += n;
++
++ return;
++}
++
++
++int netfront_accel_enqueue_skb_tso(netfront_accel_vnic *vnic,
++ struct sk_buff *skb)
++{
++ struct netfront_accel_tso_state state;
++ struct netfront_accel_tso_buffer *tso_buf = NULL;
++ struct netfront_accel_tso_output_packet *reversed_list = NULL;
++ struct netfront_accel_tso_output_packet *tmp_pkt;
++ ef_iovec iovecs[ACCEL_TSO_MAX_BUFFERS];
++ int frag_i, rc, dma_id;
++ skb_frag_t *f;
++
++ tso_check_safe(skb);
++
++ if (skb->ip_summed != CHECKSUM_HW)
++ EPRINTK("Trying to TSO send a packet without HW checksum\n");
++
++ tso_start(&state, skb);
++
++ /*
++ * Setup the first payload fragment. If the skb header area
++ * contains exactly the headers and all payload is in the frag
++ * list things are little simpler
++ */
++ if (skb_headlen(skb) == state.p.header_length) {
++ /* Grab the first payload fragment. */
++ BUG_ON(skb_shinfo(skb)->nr_frags < 1);
++ frag_i = 0;
++ f = &skb_shinfo(skb)->frags[frag_i];
++ tso_get_fragment(&state, f->size,
++ page_address(f->page) + f->page_offset);
++ } else {
++ int hl = state.p.header_length;
++ tso_get_fragment(&state, skb_headlen(skb) - hl,
++ skb->data + hl);
++ frag_i = -1;
++ }
++
++ if (tso_start_new_packet(vnic, skb, &state) < 0) {
++ DPRINTK("%s: out of first start-packet memory\n",
++ __FUNCTION__);
++ goto unwind;
++ }
++
++ while (1) {
++ tso_fill_packet_with_fragment(vnic, &state);
++
++ /* Move onto the next fragment? */
++ if (state.ifc.len == 0) {
++ if (++frag_i >= skb_shinfo(skb)->nr_frags)
++ /* End of payload reached. */
++ break;
++ f = &skb_shinfo(skb)->frags[frag_i];
++ tso_get_fragment(&state, f->size,
++ page_address(f->page) +
++ f->page_offset);
++ }
++
++ /* Start a new buffer? */
++ if ((state.output_packets->tso_bufs->length ==
++ NETFRONT_ACCEL_TSO_BUF_LENGTH) &&
++ tso_start_new_buffer(vnic, &state, 0)) {
++ DPRINTK("%s: out of start-buffer memory\n",
++ __FUNCTION__);
++ goto unwind;
++ }
++
++ /* Start at new packet? */
++ if ((state.packet_space == 0 ||
++ ((state.output_packets->tso_bufs_len >=
++ ACCEL_TSO_MAX_BUFFERS) &&
++ (state.output_packets->tso_bufs->length >=
++ NETFRONT_ACCEL_TSO_BUF_LENGTH))) &&
++ tso_start_new_packet(vnic, skb, &state) < 0) {
++ DPRINTK("%s: out of start-packet memory\n",
++ __FUNCTION__);
++ goto unwind;
++ }
++
++ }
++
++ /* Check for space */
++ if (ef_vi_transmit_space(&vnic->vi) < state.buffers) {
++ DPRINTK("%s: Not enough TX space (%d)\n",
++ __FUNCTION__, state.buffers);
++ goto unwind;
++ }
++
++ /*
++ * Store the skb context in the most recent buffer (i.e. the
++ * last buffer that will be sent)
++ */
++ state.output_packets->tso_bufs->buf->skb = skb;
++
++ /* Reverse the list of packets as we construct it on a stack */
++ while (state.output_packets != NULL) {
++ tmp_pkt = state.output_packets;
++ state.output_packets = tmp_pkt->next;
++ tmp_pkt->next = reversed_list;
++ reversed_list = tmp_pkt;
++ }
++
++ /* Pass off to hardware */
++ while (reversed_list != NULL) {
++ tmp_pkt = reversed_list;
++ reversed_list = tmp_pkt->next;
++
++ BUG_ON(tmp_pkt->tso_bufs_len > ACCEL_TSO_MAX_BUFFERS);
++ BUG_ON(tmp_pkt->tso_bufs_len == 0);
++
++ dma_id = tmp_pkt->tso_bufs->buf->buf_id;
++
++ /*
++ * Make an iovec of the buffers in the list, reversing
++ * the buffers as we go as they are constructed on a
++ * stack
++ */
++ tso_buf = tmp_pkt->tso_bufs;
++ for (frag_i = tmp_pkt->tso_bufs_len - 1;
++ frag_i >= 0;
++ frag_i--) {
++ iovecs[frag_i].iov_base = tso_buf->buf->pkt_buff_addr;
++ iovecs[frag_i].iov_len = tso_buf->length;
++ tso_buf = tso_buf->next;
++ }
++
++ rc = ef_vi_transmitv(&vnic->vi, iovecs, tmp_pkt->tso_bufs_len,
++ dma_id);
++ /*
++ * We checked for space already, so it really should
++ * succeed
++ */
++ BUG_ON(rc != 0);
++ }
++
++ /* Track number of tx fastpath stats */
++ vnic->netdev_stats.fastpath_tx_bytes += skb->len;
++ vnic->netdev_stats.fastpath_tx_pkts += state.packets;
++#if NETFRONT_ACCEL_STATS
++ {
++ unsigned n;
++ n = vnic->netdev_stats.fastpath_tx_pkts -
++ vnic->stats.fastpath_tx_completions;
++ if (n > vnic->stats.fastpath_tx_pending_max)
++ vnic->stats.fastpath_tx_pending_max = n;
++ }
++#endif
++
++ return NETFRONT_ACCEL_STATUS_GOOD;
++
++ unwind:
++ tso_unwind(vnic, &state);
++
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.fastpath_tx_busy++);
++
++ return NETFRONT_ACCEL_STATUS_BUSY;
++}
++
++
++
+Index: head-2008-11-25/drivers/xen/sfc_netfront/accel_tso.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/accel_tso.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,57 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NETFRONT_ACCEL_TSO_H
++#define NETFRONT_ACCEL_TSO_H
++
++#include "accel_bufs.h"
++
++/* Track the buffers used in each output packet */
++struct netfront_accel_tso_buffer {
++ struct netfront_accel_tso_buffer *next;
++ struct netfront_accel_pkt_desc *buf;
++ unsigned length;
++};
++
++/* Track the output packets formed from each input packet */
++struct netfront_accel_tso_output_packet {
++ struct netfront_accel_tso_output_packet *next;
++ struct netfront_accel_tso_buffer *tso_bufs;
++ unsigned tso_bufs_len;
++};
++
++
++/*
++ * Max available space in a buffer for data once meta-data has taken
++ * its place
++ */
++#define NETFRONT_ACCEL_TSO_BUF_LENGTH \
++ ((PAGE_SIZE / NETFRONT_ACCEL_BUFS_PER_PAGE) \
++ - sizeof(struct netfront_accel_tso_buffer) \
++ - sizeof(struct netfront_accel_tso_output_packet))
++
++int netfront_accel_enqueue_skb_tso(netfront_accel_vnic *vnic,
++ struct sk_buff *skb);
++
++#endif /* NETFRONT_ACCEL_TSO_H */
+Index: head-2008-11-25/drivers/xen/sfc_netfront/accel_vi.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/accel_vi.c 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,1194 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/if_ether.h>
++#include <linux/ip.h>
++#include <net/checksum.h>
++#include <asm/io.h>
++
++#include "accel.h"
++#include "accel_util.h"
++#include "accel_bufs.h"
++#include "accel_tso.h"
++#include "accel_ssr.h"
++#include "netfront.h"
++
++#include "etherfabric/ef_vi.h"
++
++/*
++ * Max available space in a buffer for data once meta-data has taken
++ * its place
++ */
++#define NETFRONT_ACCEL_TX_BUF_LENGTH \
++ ((PAGE_SIZE / NETFRONT_ACCEL_BUFS_PER_PAGE) \
++ - sizeof(struct netfront_accel_tso_buffer))
++
++#define ACCEL_TX_MAX_BUFFERS (6)
++#define ACCEL_VI_POLL_EVENTS (8)
++
++static
++int netfront_accel_vi_init_fini(netfront_accel_vnic *vnic,
++ struct net_accel_msg_hw *hw_msg)
++{
++ struct ef_vi_nic_type nic_type;
++ struct net_accel_hw_falcon_b *hw_info;
++ void *io_kva, *evq_base, *rx_dma_kva, *tx_dma_kva, *doorbell_kva;
++ u32 *evq_gnts;
++ u32 evq_order;
++ int vi_state_size;
++ u8 vi_data[VI_MAPPINGS_SIZE];
++
++ if (hw_msg == NULL)
++ goto fini;
++
++ /* And create the local macs table lock */
++ spin_lock_init(&vnic->table_lock);
++
++ /* Create fastpath table, initial size 8, key length 8 */
++ if (cuckoo_hash_init(&vnic->fastpath_table, 3, 8)) {
++ EPRINTK("failed to allocate fastpath table\n");
++ goto fail_cuckoo;
++ }
++
++ vnic->hw.falcon.type = hw_msg->type;
++
++ switch (hw_msg->type) {
++ case NET_ACCEL_MSG_HWTYPE_FALCON_A:
++ hw_info = &hw_msg->resources.falcon_a.common;
++ /* Need the extra rptr register page on A1 */
++ io_kva = net_accel_map_iomem_page
++ (vnic->dev, hw_msg->resources.falcon_a.evq_rptr_gnt,
++ &vnic->hw.falcon.evq_rptr_mapping);
++ if (io_kva == NULL) {
++ EPRINTK("%s: evq_rptr permission failed\n", __FUNCTION__);
++ goto evq_rptr_fail;
++ }
++
++ vnic->hw.falcon.evq_rptr = io_kva +
++ (hw_info->evq_rptr & (PAGE_SIZE - 1));
++ break;
++ case NET_ACCEL_MSG_HWTYPE_FALCON_B:
++ hw_info = &hw_msg->resources.falcon_b;
++ break;
++ default:
++ goto bad_type;
++ }
++
++ /**** Event Queue ****/
++
++ /* Map the event queue pages */
++ evq_gnts = hw_info->evq_mem_gnts;
++ evq_order = hw_info->evq_order;
++
++ EPRINTK_ON(hw_info->evq_offs != 0);
++
++ DPRINTK("Will map evq %d pages\n", 1 << evq_order);
++
++ evq_base =
++ net_accel_map_grants_contig(vnic->dev, evq_gnts, 1 << evq_order,
++ &vnic->evq_mapping);
++ if (evq_base == NULL) {
++ EPRINTK("%s: evq_base failed\n", __FUNCTION__);
++ goto evq_fail;
++ }
++
++ /**** Doorbells ****/
++ /* Set up the doorbell mappings. */
++ doorbell_kva =
++ net_accel_map_iomem_page(vnic->dev, hw_info->doorbell_gnt,
++ &vnic->hw.falcon.doorbell_mapping);
++ if (doorbell_kva == NULL) {
++ EPRINTK("%s: doorbell permission failed\n", __FUNCTION__);
++ goto doorbell_fail;
++ }
++ vnic->hw.falcon.doorbell = doorbell_kva;
++
++ /* On Falcon_B we get the rptr from the doorbell page */
++ if (hw_msg->type == NET_ACCEL_MSG_HWTYPE_FALCON_B) {
++ vnic->hw.falcon.evq_rptr =
++ (u32 *)((char *)vnic->hw.falcon.doorbell
++ + hw_info->evq_rptr);
++ }
++
++ /**** DMA Queue ****/
++
++ /* Set up the DMA Queues from the message. */
++ tx_dma_kva = net_accel_map_grants_contig
++ (vnic->dev, &(hw_info->txdmaq_gnt), 1,
++ &vnic->hw.falcon.txdmaq_mapping);
++ if (tx_dma_kva == NULL) {
++ EPRINTK("%s: TX dma failed\n", __FUNCTION__);
++ goto tx_dma_fail;
++ }
++
++ rx_dma_kva = net_accel_map_grants_contig
++ (vnic->dev, &(hw_info->rxdmaq_gnt), 1,
++ &vnic->hw.falcon.rxdmaq_mapping);
++ if (rx_dma_kva == NULL) {
++ EPRINTK("%s: RX dma failed\n", __FUNCTION__);
++ goto rx_dma_fail;
++ }
++
++ /* Full confession */
++ DPRINTK("Mapped H/W"
++ " Tx DMAQ grant %x -> %p\n"
++ " Rx DMAQ grant %x -> %p\n"
++ " EVQ grant %x -> %p\n",
++ hw_info->txdmaq_gnt, tx_dma_kva,
++ hw_info->rxdmaq_gnt, rx_dma_kva,
++ evq_gnts[0], evq_base
++ );
++
++ memset(vi_data, 0, sizeof(vi_data));
++
++ /* TODO BUG11305: convert efhw_arch to ef_vi_arch
++ * e.g.
++ * arch = ef_vi_arch_from_efhw_arch(hw_info->nic_arch);
++ * assert(arch >= 0);
++ * nic_type.arch = arch;
++ */
++ nic_type.arch = (unsigned char)hw_info->nic_arch;
++ nic_type.variant = (char)hw_info->nic_variant;
++ nic_type.revision = (unsigned char)hw_info->nic_revision;
++
++ ef_vi_init_mapping_evq(vi_data, nic_type, hw_info->instance,
++ 1 << (evq_order + PAGE_SHIFT), evq_base,
++ (void *)0xdeadbeef);
++
++ ef_vi_init_mapping_vi(vi_data, nic_type, hw_info->rx_capacity,
++ hw_info->tx_capacity, hw_info->instance,
++ doorbell_kva, rx_dma_kva, tx_dma_kva, 0);
++
++ vi_state_size = ef_vi_calc_state_bytes(hw_info->rx_capacity,
++ hw_info->tx_capacity);
++ vnic->vi_state = (ef_vi_state *)kmalloc(vi_state_size, GFP_KERNEL);
++ if (vnic->vi_state == NULL) {
++ EPRINTK("%s: kmalloc for VI state failed\n", __FUNCTION__);
++ goto vi_state_fail;
++ }
++ ef_vi_init(&vnic->vi, vi_data, vnic->vi_state, &vnic->evq_state, 0);
++
++ ef_eventq_state_init(&vnic->vi);
++
++ ef_vi_state_init(&vnic->vi);
++
++ return 0;
++
++fini:
++ kfree(vnic->vi_state);
++ vnic->vi_state = NULL;
++vi_state_fail:
++ net_accel_unmap_grants_contig(vnic->dev, vnic->hw.falcon.rxdmaq_mapping);
++rx_dma_fail:
++ net_accel_unmap_grants_contig(vnic->dev, vnic->hw.falcon.txdmaq_mapping);
++tx_dma_fail:
++ net_accel_unmap_iomem_page(vnic->dev, vnic->hw.falcon.doorbell_mapping);
++ vnic->hw.falcon.doorbell = NULL;
++doorbell_fail:
++ net_accel_unmap_grants_contig(vnic->dev, vnic->evq_mapping);
++evq_fail:
++ if (vnic->hw.falcon.type == NET_ACCEL_MSG_HWTYPE_FALCON_A)
++ net_accel_unmap_iomem_page(vnic->dev,
++ vnic->hw.falcon.evq_rptr_mapping);
++ vnic->hw.falcon.evq_rptr = NULL;
++evq_rptr_fail:
++bad_type:
++ cuckoo_hash_destroy(&vnic->fastpath_table);
++fail_cuckoo:
++ return -EIO;
++}
++
++
++void netfront_accel_vi_ctor(netfront_accel_vnic *vnic)
++{
++ /* Just mark the VI as uninitialised. */
++ vnic->vi_state = NULL;
++}
++
++
++int netfront_accel_vi_init(netfront_accel_vnic *vnic, struct net_accel_msg_hw *hw_msg)
++{
++ BUG_ON(hw_msg == NULL);
++ return netfront_accel_vi_init_fini(vnic, hw_msg);
++}
++
++
++void netfront_accel_vi_dtor(netfront_accel_vnic *vnic)
++{
++ if (vnic->vi_state != NULL)
++ netfront_accel_vi_init_fini(vnic, NULL);
++}
++
++
++static
++void netfront_accel_vi_post_rx(netfront_accel_vnic *vnic, u16 id,
++ netfront_accel_pkt_desc *buf)
++{
++
++ int idx = vnic->rx_dma_batched;
++
++#if 0
++ VPRINTK("Posting buffer %d (0x%08x) for rx at index %d, space is %d\n",
++ id, buf->pkt_buff_addr, idx, ef_vi_receive_space(&vnic->vi));
++#endif
++ /* Set up a virtual buffer descriptor */
++ ef_vi_receive_init(&vnic->vi, buf->pkt_buff_addr, id,
++ /*rx_bytes=max*/0);
++
++ idx++;
++
++ vnic->rx_dma_level++;
++
++ /*
++ * Only push the descriptor to the card if we've reached the
++ * batch size. Otherwise, the descriptors can sit around for
++ * a while. There will be plenty available.
++ */
++ if (idx >= NETFRONT_ACCEL_RX_DESC_BATCH ||
++ vnic->rx_dma_level < NETFRONT_ACCEL_RX_DESC_BATCH) {
++#if 0
++ VPRINTK("Flushing %d rx descriptors.\n", idx);
++#endif
++
++ /* Push buffer to hardware */
++ ef_vi_receive_push(&vnic->vi);
++
++ idx = 0;
++ }
++
++ vnic->rx_dma_batched = idx;
++}
++
++
++inline
++void netfront_accel_vi_post_rx_or_free(netfront_accel_vnic *vnic, u16 id,
++ netfront_accel_pkt_desc *buf)
++{
++
++ VPRINTK("%s: %d\n", __FUNCTION__, id);
++
++ if (ef_vi_receive_space(&vnic->vi) <= vnic->rx_dma_batched) {
++ VPRINTK("RX space is full\n");
++ netfront_accel_buf_put(vnic->rx_bufs, id);
++ return;
++ }
++
++ VPRINTK("Completed buffer %d is reposted\n", id);
++ netfront_accel_vi_post_rx(vnic, id, buf);
++
++ /*
++ * Let's see if there's any more to be pushed out to the NIC
++ * while we're here
++ */
++ while (ef_vi_receive_space(&vnic->vi) > vnic->rx_dma_batched) {
++ /* Try to allocate a buffer. */
++ buf = netfront_accel_buf_get(vnic->rx_bufs);
++ if (buf == NULL)
++ break;
++
++ /* Add it to the rx dma queue. */
++ netfront_accel_vi_post_rx(vnic, buf->buf_id, buf);
++ }
++}
++
++
++void netfront_accel_vi_add_bufs(netfront_accel_vnic *vnic, int is_rx)
++{
++
++ while (is_rx &&
++ ef_vi_receive_space(&vnic->vi) > vnic->rx_dma_batched) {
++ netfront_accel_pkt_desc *buf;
++
++ VPRINTK("%s: %d\n", __FUNCTION__, vnic->rx_dma_level);
++
++ /* Try to allocate a buffer. */
++ buf = netfront_accel_buf_get(vnic->rx_bufs);
++
++ if (buf == NULL)
++ break;
++
++ /* Add it to the rx dma queue. */
++ netfront_accel_vi_post_rx(vnic, buf->buf_id, buf);
++ }
++
++ VPRINTK("%s: done\n", __FUNCTION__);
++}
++
++
++struct netfront_accel_multi_state {
++ unsigned remaining_len;
++
++ unsigned buffers;
++
++ struct netfront_accel_tso_buffer *output_buffers;
++
++ /* Where we are in the current fragment of the SKB. */
++ struct {
++ /* address of current position */
++ void *addr;
++ /* remaining length */
++ unsigned int len;
++ } ifc; /* == Input Fragment Cursor */
++};
++
++
++static inline void multi_post_start(struct netfront_accel_multi_state *st,
++ struct sk_buff *skb)
++{
++ st->remaining_len = skb->len;
++ st->output_buffers = NULL;
++ st->buffers = 0;
++ st->ifc.len = skb_headlen(skb);
++ st->ifc.addr = skb->data;
++}
++
++static int multi_post_start_new_buffer(netfront_accel_vnic *vnic,
++ struct netfront_accel_multi_state *st)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++ struct netfront_accel_pkt_desc *buf;
++
++ /* Get a mapped packet buffer */
++ buf = netfront_accel_buf_get(vnic->tx_bufs);
++ if (buf == NULL) {
++ DPRINTK("%s: No buffer for TX\n", __FUNCTION__);
++ return -1;
++ }
++
++ /* Store a bit of meta-data at the end */
++ tso_buf = (struct netfront_accel_tso_buffer *)
++ (buf->pkt_kva + NETFRONT_ACCEL_TX_BUF_LENGTH);
++
++ tso_buf->buf = buf;
++
++ tso_buf->length = 0;
++
++ tso_buf->next = st->output_buffers;
++ st->output_buffers = tso_buf;
++ st->buffers++;
++
++ BUG_ON(st->buffers >= ACCEL_TX_MAX_BUFFERS);
++
++ /*
++ * Store the context, set to NULL, last packet buffer will get
++ * non-NULL later
++ */
++ tso_buf->buf->skb = NULL;
++
++ return 0;
++}
++
++
++static void
++multi_post_fill_buffer_with_fragment(netfront_accel_vnic *vnic,
++ struct netfront_accel_multi_state *st)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++ unsigned n, space;
++
++ BUG_ON(st->output_buffers == NULL);
++ tso_buf = st->output_buffers;
++
++ if (st->ifc.len == 0) return;
++ if (tso_buf->length == NETFRONT_ACCEL_TX_BUF_LENGTH) return;
++
++ BUG_ON(tso_buf->length > NETFRONT_ACCEL_TX_BUF_LENGTH);
++
++ space = NETFRONT_ACCEL_TX_BUF_LENGTH - tso_buf->length;
++ n = min(st->ifc.len, space);
++
++ memcpy(tso_buf->buf->pkt_kva + tso_buf->length, st->ifc.addr, n);
++
++ st->remaining_len -= n;
++ st->ifc.len -= n;
++ tso_buf->length += n;
++ st->ifc.addr += n;
++
++ BUG_ON(tso_buf->length > NETFRONT_ACCEL_TX_BUF_LENGTH);
++
++ return;
++}
++
++
++static inline void multi_post_unwind(netfront_accel_vnic *vnic,
++ struct netfront_accel_multi_state *st)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++
++ DPRINTK("%s\n", __FUNCTION__);
++
++ while (st->output_buffers != NULL) {
++ tso_buf = st->output_buffers;
++ st->output_buffers = tso_buf->next;
++ st->buffers--;
++ netfront_accel_buf_put(vnic->tx_bufs, tso_buf->buf->buf_id);
++ }
++ BUG_ON(st->buffers != 0);
++}
++
++
++static enum netfront_accel_post_status
++netfront_accel_enqueue_skb_multi(netfront_accel_vnic *vnic, struct sk_buff *skb)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++ struct netfront_accel_multi_state state;
++ ef_iovec iovecs[ACCEL_TX_MAX_BUFFERS];
++ skb_frag_t *f;
++ int frag_i, rc, dma_id;
++
++ multi_post_start(&state, skb);
++
++ frag_i = -1;
++
++ if (skb->ip_summed == CHECKSUM_HW) {
++ /* Set to zero to encourage falcon to work it out for us */
++ *(u16*)(skb->h.raw + skb->csum) = 0;
++ }
++
++ if (multi_post_start_new_buffer(vnic, &state)) {
++ DPRINTK("%s: out of buffers\n", __FUNCTION__);
++ goto unwind;
++ }
++
++ while (1) {
++ multi_post_fill_buffer_with_fragment(vnic, &state);
++
++ /* Move onto the next fragment? */
++ if (state.ifc.len == 0) {
++ if (++frag_i >= skb_shinfo(skb)->nr_frags)
++ /* End of payload reached. */
++ break;
++ f = &skb_shinfo(skb)->frags[frag_i];
++ state.ifc.len = f->size;
++ state.ifc.addr = page_address(f->page) + f->page_offset;
++ }
++
++ /* Start a new buffer? */
++ if ((state.output_buffers->length ==
++ NETFRONT_ACCEL_TX_BUF_LENGTH) &&
++ multi_post_start_new_buffer(vnic, &state)) {
++ DPRINTK("%s: out of buffers\n", __FUNCTION__);
++ goto unwind;
++ }
++ }
++
++ /* Check for space */
++ if (ef_vi_transmit_space(&vnic->vi) < state.buffers) {
++ DPRINTK("%s: Not enough TX space (%d)\n", __FUNCTION__, state.buffers);
++ goto unwind;
++ }
++
++ /* Store the skb in what will be the last buffer's context */
++ state.output_buffers->buf->skb = skb;
++ /* Remember dma_id of what will be the last buffer */
++ dma_id = state.output_buffers->buf->buf_id;
++
++ /*
++ * Make an iovec of the buffers in the list, reversing the
++ * buffers as we go as they are constructed on a stack
++ */
++ tso_buf = state.output_buffers;
++ for (frag_i = state.buffers-1; frag_i >= 0; frag_i--) {
++ iovecs[frag_i].iov_base = tso_buf->buf->pkt_buff_addr;
++ iovecs[frag_i].iov_len = tso_buf->length;
++ tso_buf = tso_buf->next;
++ }
++
++ rc = ef_vi_transmitv(&vnic->vi, iovecs, state.buffers, dma_id);
++
++ /* Track number of tx fastpath stats */
++ vnic->netdev_stats.fastpath_tx_bytes += skb->len;
++ vnic->netdev_stats.fastpath_tx_pkts ++;
++#if NETFRONT_ACCEL_STATS
++ {
++ u32 n;
++ n = vnic->netdev_stats.fastpath_tx_pkts -
++ (u32)vnic->stats.fastpath_tx_completions;
++ if (n > vnic->stats.fastpath_tx_pending_max)
++ vnic->stats.fastpath_tx_pending_max = n;
++ }
++#endif
++ return NETFRONT_ACCEL_STATUS_GOOD;
++
++unwind:
++ multi_post_unwind(vnic, &state);
++
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.fastpath_tx_busy++);
++
++ return NETFRONT_ACCEL_STATUS_BUSY;
++}
++
++
++static enum netfront_accel_post_status
++netfront_accel_enqueue_skb_single(netfront_accel_vnic *vnic, struct sk_buff *skb)
++{
++ struct netfront_accel_tso_buffer *tso_buf;
++ struct netfront_accel_pkt_desc *buf;
++ u8 *kva;
++ int rc;
++
++ if (ef_vi_transmit_space(&vnic->vi) < 1) {
++ DPRINTK("%s: No TX space\n", __FUNCTION__);
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.fastpath_tx_busy++);
++ return NETFRONT_ACCEL_STATUS_BUSY;
++ }
++
++ buf = netfront_accel_buf_get(vnic->tx_bufs);
++ if (buf == NULL) {
++ DPRINTK("%s: No buffer for TX\n", __FUNCTION__);
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.fastpath_tx_busy++);
++ return NETFRONT_ACCEL_STATUS_BUSY;
++ }
++
++ /* Track number of tx fastpath stats */
++ vnic->netdev_stats.fastpath_tx_pkts++;
++ vnic->netdev_stats.fastpath_tx_bytes += skb->len;
++
++#if NETFRONT_ACCEL_STATS
++ {
++ u32 n;
++ n = vnic->netdev_stats.fastpath_tx_pkts -
++ (u32)vnic->stats.fastpath_tx_completions;
++ if (n > vnic->stats.fastpath_tx_pending_max)
++ vnic->stats.fastpath_tx_pending_max = n;
++ }
++#endif
++
++ /* Store the context */
++ buf->skb = skb;
++
++ kva = buf->pkt_kva;
++
++ if (skb->ip_summed == CHECKSUM_HW) {
++ /* Set to zero to encourage falcon to work it out for us */
++ *(u16*)(skb->h.raw + skb->csum) = 0;
++ }
++ NETFRONT_ACCEL_PKTBUFF_FOR_EACH_FRAGMENT
++ (skb, idx, frag_data, frag_len, {
++ /* Copy in payload */
++ VPRINTK("*** Copying %d bytes to %p\n", frag_len, kva);
++ memcpy(kva, frag_data, frag_len);
++ kva += frag_len;
++ });
++
++ VPRINTK("%s: id %d pkt %p kva %p buff_addr 0x%08x\n", __FUNCTION__,
++ buf->buf_id, buf, buf->pkt_kva, buf->pkt_buff_addr);
++
++
++ /* Set up the TSO meta-data for a single buffer/packet */
++ tso_buf = (struct netfront_accel_tso_buffer *)
++ (buf->pkt_kva + NETFRONT_ACCEL_TX_BUF_LENGTH);
++ tso_buf->next = NULL;
++ tso_buf->buf = buf;
++ tso_buf->length = skb->len;
++
++ rc = ef_vi_transmit(&vnic->vi, buf->pkt_buff_addr, skb->len,
++ buf->buf_id);
++ /* We checked for space already, so it really should succeed */
++ BUG_ON(rc != 0);
++
++ return NETFRONT_ACCEL_STATUS_GOOD;
++}
++
++
++enum netfront_accel_post_status
++netfront_accel_vi_tx_post(netfront_accel_vnic *vnic, struct sk_buff *skb)
++{
++ struct ethhdr *pkt_eth_hdr;
++ struct iphdr *pkt_ipv4_hdr;
++ int value, try_fastpath;
++
++ /*
++ * This assumes that the data field points to the dest mac
++ * address.
++ */
++ cuckoo_hash_mac_key key = cuckoo_mac_to_key(skb->data);
++
++ /*
++ * NB very important that all things that could return "CANT"
++ * are tested before things that return "BUSY" as if it it
++ * returns "BUSY" it is assumed that it won't return "CANT"
++ * next time it is tried
++ */
++
++ /*
++ * Do a fastpath send if fast path table lookup returns true.
++ * We do this without the table lock and so may get the wrong
++ * answer, but current opinion is that's not a big problem
++ */
++ try_fastpath = cuckoo_hash_lookup(&vnic->fastpath_table,
++ (cuckoo_hash_key *)(&key), &value);
++
++ if (!try_fastpath) {
++ VPRINTK("try fast path false for mac: " MAC_FMT "\n",
++ MAC_ARG(skb->data));
++
++ return NETFRONT_ACCEL_STATUS_CANT;
++ }
++
++ /* Check to see if the packet can be sent. */
++ if (skb_headlen(skb) < sizeof(*pkt_eth_hdr) + sizeof(*pkt_ipv4_hdr)) {
++ EPRINTK("%s: Packet header is too small\n", __FUNCTION__);
++ return NETFRONT_ACCEL_STATUS_CANT;
++ }
++
++ pkt_eth_hdr = (void*)skb->data;
++ pkt_ipv4_hdr = (void*)(pkt_eth_hdr+1);
++
++ if (be16_to_cpu(pkt_eth_hdr->h_proto) != ETH_P_IP) {
++ DPRINTK("%s: Packet is not IPV4 (ether_type=0x%04x)\n", __FUNCTION__,
++ be16_to_cpu(pkt_eth_hdr->h_proto));
++ return NETFRONT_ACCEL_STATUS_CANT;
++ }
++
++ if (pkt_ipv4_hdr->protocol != IPPROTO_TCP &&
++ pkt_ipv4_hdr->protocol != IPPROTO_UDP) {
++ DPRINTK("%s: Packet is not TCP/UDP (ip_protocol=0x%02x)\n",
++ __FUNCTION__, pkt_ipv4_hdr->protocol);
++ return NETFRONT_ACCEL_STATUS_CANT;
++ }
++
++ VPRINTK("%s: %d bytes, gso %d\n", __FUNCTION__, skb->len,
++ skb_shinfo(skb)->gso_size);
++
++ if (skb_shinfo(skb)->gso_size) {
++ return netfront_accel_enqueue_skb_tso(vnic, skb);
++ }
++
++ if (skb->len <= NETFRONT_ACCEL_TX_BUF_LENGTH) {
++ return netfront_accel_enqueue_skb_single(vnic, skb);
++ }
++
++ return netfront_accel_enqueue_skb_multi(vnic, skb);
++}
++
++
++/*
++ * Copy the data to required end destination. NB. len is the total new
++ * length of the socket buffer, not the amount of data to copy
++ */
++inline
++int ef_vnic_copy_to_skb(netfront_accel_vnic *vnic, struct sk_buff *skb,
++ struct netfront_accel_pkt_desc *buf, int len)
++{
++ int i, extra = len - skb->len;
++ char c;
++ int pkt_stride = vnic->rx_pkt_stride;
++ int skb_stride = vnic->rx_skb_stride;
++ char *skb_start;
++
++ /*
++ * This pulls stuff into the cache - have seen performance
++ * benefit in this, but disabled by default
++ */
++ skb_start = skb->data;
++ if (pkt_stride) {
++ for (i = 0; i < len; i += pkt_stride) {
++ c += ((volatile char*)(buf->pkt_kva))[i];
++ }
++ }
++ if (skb_stride) {
++ for (i = skb->len; i < len ; i += skb_stride) {
++ c += ((volatile char*)(skb_start))[i];
++ }
++ }
++
++ if (skb_tailroom(skb) >= extra) {
++ memcpy(skb_put(skb, extra), buf->pkt_kva, extra);
++ return 0;
++ }
++
++ return -ENOSPC;
++}
++
++
++static void discard_jumbo_state(netfront_accel_vnic *vnic)
++{
++
++ if (vnic->jumbo_state.skb != NULL) {
++ dev_kfree_skb_any(vnic->jumbo_state.skb);
++
++ vnic->jumbo_state.skb = NULL;
++ }
++ vnic->jumbo_state.in_progress = 0;
++}
++
++
++static void netfront_accel_vi_rx_complete(netfront_accel_vnic *vnic,
++ struct sk_buff *skb)
++{
++ cuckoo_hash_mac_key key;
++ unsigned long flags;
++ int value;
++ struct net_device *net_dev;
++
++
++ key = cuckoo_mac_to_key(skb->data + ETH_ALEN);
++
++ /*
++ * If this is a MAC address that we want to do fast path TX
++ * to, and we don't already, add it to the fastpath table.
++ * The initial lookup is done without the table lock and so
++ * may get the wrong answer, but current opinion is that's not
++ * a big problem
++ */
++ if (is_valid_ether_addr(skb->data + ETH_ALEN) &&
++ !cuckoo_hash_lookup(&vnic->fastpath_table, (cuckoo_hash_key *)&key,
++ &value)) {
++ spin_lock_irqsave(&vnic->table_lock, flags);
++
++ cuckoo_hash_add_check(&vnic->fastpath_table,
++ (cuckoo_hash_key *)&key,
++ 1, 1);
++
++ spin_unlock_irqrestore(&vnic->table_lock, flags);
++ }
++
++ if (compare_ether_addr(skb->data, vnic->mac)) {
++ struct iphdr *ip = (struct iphdr *)(skb->data + ETH_HLEN);
++ u16 port;
++
++ DPRINTK("%s: saw wrong MAC address " MAC_FMT "\n",
++ __FUNCTION__, MAC_ARG(skb->data));
++
++ if (ip->protocol == IPPROTO_TCP) {
++ struct tcphdr *tcp = (struct tcphdr *)
++ ((char *)ip + 4 * ip->ihl);
++ port = tcp->dest;
++ } else {
++ struct udphdr *udp = (struct udphdr *)
++ ((char *)ip + 4 * ip->ihl);
++ EPRINTK_ON(ip->protocol != IPPROTO_UDP);
++ port = udp->dest;
++ }
++
++ netfront_accel_msg_tx_fastpath(vnic, skb->data,
++ ip->daddr, port,
++ ip->protocol);
++ }
++
++ net_dev = vnic->net_dev;
++ skb->dev = net_dev;
++ skb->protocol = eth_type_trans(skb, net_dev);
++ /* CHECKSUM_UNNECESSARY as hardware has done it already */
++ skb->ip_summed = CHECKSUM_UNNECESSARY;
++
++ if (!netfront_accel_ssr_skb(vnic, &vnic->ssr_state, skb))
++ netif_receive_skb(skb);
++}
++
++
++static int netfront_accel_vi_poll_process_rx(netfront_accel_vnic *vnic,
++ ef_event *ev)
++{
++ struct netfront_accel_bufinfo *bufinfo = vnic->rx_bufs;
++ struct netfront_accel_pkt_desc *buf = NULL;
++ struct sk_buff *skb;
++ int id, len, sop = 0, cont = 0;
++
++ VPRINTK("Rx event.\n");
++ /*
++ * Complete the receive operation, and get the request id of
++ * the buffer
++ */
++ id = ef_vi_receive_done(&vnic->vi, ev);
++
++ if (id < 0 || id >= bufinfo->npages*NETFRONT_ACCEL_BUFS_PER_PAGE) {
++ EPRINTK("Rx packet %d is invalid\n", id);
++ /* Carry on round the loop if more events */
++ goto bad_packet;
++ }
++ /* Get our buffer descriptor */
++ buf = netfront_accel_buf_find(bufinfo, id);
++
++ len = EF_EVENT_RX_BYTES(*ev);
++
++ /* An RX buffer has been removed from the DMA ring. */
++ vnic->rx_dma_level--;
++
++ if (EF_EVENT_TYPE(*ev) == EF_EVENT_TYPE_RX) {
++ sop = EF_EVENT_RX_SOP(*ev);
++ cont = EF_EVENT_RX_CONT(*ev);
++
++ skb = vnic->jumbo_state.skb;
++
++ VPRINTK("Rx packet %d: %d bytes so far; sop %d; cont %d\n",
++ id, len, sop, cont);
++
++ if (sop) {
++ if (!vnic->jumbo_state.in_progress) {
++ vnic->jumbo_state.in_progress = 1;
++ BUG_ON(vnic->jumbo_state.skb != NULL);
++ } else {
++ /*
++ * This fragment shows a missing tail in
++ * previous one, but is itself possibly OK
++ */
++ DPRINTK("sop and in_progress => no tail\n");
++
++ /* Release the socket buffer we already had */
++ discard_jumbo_state(vnic);
++
++ /* Now start processing this fragment */
++ vnic->jumbo_state.in_progress = 1;
++ skb = NULL;
++ }
++ } else if (!vnic->jumbo_state.in_progress) {
++ DPRINTK("!sop and !in_progress => missing head\n");
++ goto missing_head;
++ }
++
++ if (!cont) {
++ /* Update state for next time */
++ vnic->jumbo_state.in_progress = 0;
++ vnic->jumbo_state.skb = NULL;
++ } else if (!vnic->jumbo_state.in_progress) {
++ DPRINTK("cont and !in_progress => missing head\n");
++ goto missing_head;
++ }
++
++ if (skb == NULL) {
++ BUG_ON(!sop);
++
++ if (!cont)
++ skb = alloc_skb(len+NET_IP_ALIGN, GFP_ATOMIC);
++ else
++ skb = alloc_skb(vnic->net_dev->mtu+NET_IP_ALIGN,
++ GFP_ATOMIC);
++
++ if (skb == NULL) {
++ DPRINTK("%s: Couldn't get an rx skb.\n",
++ __FUNCTION__);
++ netfront_accel_vi_post_rx_or_free(vnic, (u16)id, buf);
++ /*
++ * Dropping this fragment means we
++ * should discard the rest too
++ */
++ discard_jumbo_state(vnic);
++
++ /* Carry on round the loop if more events */
++ return 0;
++ }
++
++ }
++
++ /* Copy the data to required end destination */
++ if (ef_vnic_copy_to_skb(vnic, skb, buf, len) != 0) {
++ /*
++ * No space in the skb - suggests > MTU packet
++ * received
++ */
++ EPRINTK("%s: Rx packet too large (%d)\n",
++ __FUNCTION__, len);
++ netfront_accel_vi_post_rx_or_free(vnic, (u16)id, buf);
++ discard_jumbo_state(vnic);
++ return 0;
++ }
++
++ /* Put the buffer back in the DMA queue. */
++ netfront_accel_vi_post_rx_or_free(vnic, (u16)id, buf);
++
++ if (cont) {
++ vnic->jumbo_state.skb = skb;
++
++ return 0;
++ } else {
++ /* Track number of rx fastpath packets */
++ vnic->netdev_stats.fastpath_rx_pkts++;
++ vnic->netdev_stats.fastpath_rx_bytes += len;
++
++ netfront_accel_vi_rx_complete(vnic, skb);
++
++ return 1;
++ }
++ } else {
++ BUG_ON(EF_EVENT_TYPE(*ev) != EF_EVENT_TYPE_RX_DISCARD);
++
++ if (EF_EVENT_RX_DISCARD_TYPE(*ev)
++ == EF_EVENT_RX_DISCARD_TRUNC) {
++ DPRINTK("%s: " EF_EVENT_FMT
++ " buffer %d FRM_TRUNC q_id %d\n",
++ __FUNCTION__, EF_EVENT_PRI_ARG(*ev), id,
++ EF_EVENT_RX_DISCARD_Q_ID(*ev) );
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.fastpath_frm_trunc);
++ } else if (EF_EVENT_RX_DISCARD_TYPE(*ev)
++ == EF_EVENT_RX_DISCARD_OTHER) {
++ DPRINTK("%s: " EF_EVENT_FMT
++ " buffer %d RX_DISCARD_OTHER q_id %d\n",
++ __FUNCTION__, EF_EVENT_PRI_ARG(*ev), id,
++ EF_EVENT_RX_DISCARD_Q_ID(*ev) );
++ /*
++ * Probably tail of packet for which error has
++ * already been logged, so don't count in
++ * stats
++ */
++ } else {
++ EPRINTK("%s: " EF_EVENT_FMT
++ " buffer %d rx discard type %d q_id %d\n",
++ __FUNCTION__, EF_EVENT_PRI_ARG(*ev), id,
++ EF_EVENT_RX_DISCARD_TYPE(*ev),
++ EF_EVENT_RX_DISCARD_Q_ID(*ev) );
++ NETFRONT_ACCEL_STATS_OP(++vnic->stats.bad_event_count);
++ }
++ }
++
++ /* discard type drops through here */
++
++bad_packet:
++ /* Release the socket buffer we already had */
++ discard_jumbo_state(vnic);
++
++missing_head:
++ BUG_ON(vnic->jumbo_state.in_progress != 0);
++ BUG_ON(vnic->jumbo_state.skb != NULL);
++
++ if (id >= 0 && id < bufinfo->npages*NETFRONT_ACCEL_BUFS_PER_PAGE)
++ /* Put the buffer back in the DMA queue. */
++ netfront_accel_vi_post_rx_or_free(vnic, (u16)id, buf);
++
++ vnic->netdev_stats.fastpath_rx_errors++;
++
++ DPRINTK("%s experienced bad packet/missing fragment error: %d \n",
++ __FUNCTION__, ev->rx.flags);
++
++ return 0;
++}
++
++
++static void netfront_accel_vi_not_busy(netfront_accel_vnic *vnic)
++{
++ struct netfront_info *np = ((struct netfront_info *)
++ netdev_priv(vnic->net_dev));
++ struct sk_buff *skb;
++ int handled;
++ unsigned long flags;
++
++ /*
++ * TODO if we could safely check tx_skb == NULL and return
++ * early without taking the lock, that would obviously help
++ * performance
++ */
++
++ /* Take the netfront lock which protects tx_skb. */
++ spin_lock_irqsave(&np->tx_lock, flags);
++ if (vnic->tx_skb != NULL) {
++ DPRINTK("%s trying to send spare buffer\n", __FUNCTION__);
++
++ skb = vnic->tx_skb;
++ vnic->tx_skb = NULL;
++
++ spin_unlock_irqrestore(&np->tx_lock, flags);
++
++ handled = netfront_accel_vi_tx_post(vnic, skb);
++
++ spin_lock_irqsave(&np->tx_lock, flags);
++
++ if (handled != NETFRONT_ACCEL_STATUS_BUSY) {
++ DPRINTK("%s restarting tx\n", __FUNCTION__);
++ if (netfront_check_queue_ready(vnic->net_dev)) {
++ netif_wake_queue(vnic->net_dev);
++ NETFRONT_ACCEL_STATS_OP
++ (vnic->stats.queue_wakes++);
++ }
++ } else {
++ vnic->tx_skb = skb;
++ }
++
++ /*
++ * Should never get a CANT, as it checks that before
++ * deciding it was BUSY first time round
++ */
++ BUG_ON(handled == NETFRONT_ACCEL_STATUS_CANT);
++ }
++ spin_unlock_irqrestore(&np->tx_lock, flags);
++}
++
++
++static void netfront_accel_vi_tx_complete(netfront_accel_vnic *vnic,
++ struct netfront_accel_tso_buffer *tso_buf,
++ int is_last)
++{
++ struct netfront_accel_tso_buffer *next;
++
++ /*
++ * We get a single completion for every call to
++ * ef_vi_transmitv so handle any other buffers which are part
++ * of the same packet
++ */
++ while (tso_buf != NULL) {
++ if (tso_buf->buf->skb != NULL) {
++ dev_kfree_skb_any(tso_buf->buf->skb);
++ tso_buf->buf->skb = NULL;
++ }
++
++ next = tso_buf->next;
++
++ netfront_accel_buf_put(vnic->tx_bufs, tso_buf->buf->buf_id);
++
++ tso_buf = next;
++ }
++
++ /*
++ * If this was the last one in the batch, we try and send any
++ * pending tx_skb. There should now be buffers and
++ * descriptors
++ */
++ if (is_last)
++ netfront_accel_vi_not_busy(vnic);
++}
++
++
++static void netfront_accel_vi_poll_process_tx(netfront_accel_vnic *vnic,
++ ef_event *ev)
++{
++ struct netfront_accel_pkt_desc *buf;
++ struct netfront_accel_tso_buffer *tso_buf;
++ ef_request_id ids[EF_VI_TRANSMIT_BATCH];
++ int i, n_ids;
++ unsigned long flags;
++
++ /* Get the request ids for this tx completion event. */
++ n_ids = ef_vi_transmit_unbundle(&vnic->vi, ev, ids);
++
++ /* Take the tx buffer spin lock and hold for the duration */
++ spin_lock_irqsave(&vnic->tx_lock, flags);
++
++ for (i = 0; i < n_ids; ++i) {
++ VPRINTK("Tx packet %d complete\n", ids[i]);
++ buf = netfront_accel_buf_find(vnic->tx_bufs, ids[i]);
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.fastpath_tx_completions++);
++
++ tso_buf = (struct netfront_accel_tso_buffer *)
++ (buf->pkt_kva + NETFRONT_ACCEL_TX_BUF_LENGTH);
++ BUG_ON(tso_buf->buf != buf);
++
++ netfront_accel_vi_tx_complete(vnic, tso_buf, i == (n_ids-1));
++ }
++
++ spin_unlock_irqrestore(&vnic->tx_lock, flags);
++}
++
++
++int netfront_accel_vi_poll(netfront_accel_vnic *vnic, int rx_packets)
++{
++ ef_event ev[ACCEL_VI_POLL_EVENTS];
++ int rx_remain = rx_packets, rc, events, i;
++#if NETFRONT_ACCEL_STATS
++ int n_evs_polled = 0, rx_evs_polled = 0, tx_evs_polled = 0;
++#endif
++ BUG_ON(rx_packets <= 0);
++
++ events = ef_eventq_poll(&vnic->vi, ev,
++ min(rx_remain, ACCEL_VI_POLL_EVENTS));
++ i = 0;
++ NETFRONT_ACCEL_STATS_OP(n_evs_polled += events);
++
++ VPRINTK("%s: %d events\n", __FUNCTION__, events);
++
++ /* Loop over each event */
++ while (events) {
++ VPRINTK("%s: Event "EF_EVENT_FMT", index %lu\n", __FUNCTION__,
++ EF_EVENT_PRI_ARG(ev[i]),
++ (unsigned long)(vnic->vi.evq_state->evq_ptr));
++
++ if ((EF_EVENT_TYPE(ev[i]) == EF_EVENT_TYPE_RX) ||
++ (EF_EVENT_TYPE(ev[i]) == EF_EVENT_TYPE_RX_DISCARD)) {
++ rc = netfront_accel_vi_poll_process_rx(vnic, &ev[i]);
++ rx_remain -= rc;
++ BUG_ON(rx_remain < 0);
++ NETFRONT_ACCEL_STATS_OP(rx_evs_polled++);
++ } else if (EF_EVENT_TYPE(ev[i]) == EF_EVENT_TYPE_TX) {
++ netfront_accel_vi_poll_process_tx(vnic, &ev[i]);
++ NETFRONT_ACCEL_STATS_OP(tx_evs_polled++);
++ } else if (EF_EVENT_TYPE(ev[i]) ==
++ EF_EVENT_TYPE_RX_NO_DESC_TRUNC) {
++ DPRINTK("%s: RX_NO_DESC_TRUNC " EF_EVENT_FMT "\n",
++ __FUNCTION__, EF_EVENT_PRI_ARG(ev[i]));
++ discard_jumbo_state(vnic);
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.rx_no_desc_trunc++);
++ } else {
++ EPRINTK("Unexpected event " EF_EVENT_FMT "\n",
++ EF_EVENT_PRI_ARG(ev[i]));
++ NETFRONT_ACCEL_STATS_OP(vnic->stats.bad_event_count++);
++ }
++
++ i++;
++
++ /* Carry on round the loop if more events and more space */
++ if (i == events) {
++ if (rx_remain == 0)
++ break;
++
++ events = ef_eventq_poll(&vnic->vi, ev,
++ min(rx_remain,
++ ACCEL_VI_POLL_EVENTS));
++ i = 0;
++ NETFRONT_ACCEL_STATS_OP(n_evs_polled += events);
++ }
++ }
++
++#if NETFRONT_ACCEL_STATS
++ vnic->stats.event_count += n_evs_polled;
++ vnic->stats.event_count_since_irq += n_evs_polled;
++ if (n_evs_polled > vnic->stats.events_per_poll_max)
++ vnic->stats.events_per_poll_max = n_evs_polled;
++ if (rx_evs_polled > vnic->stats.events_per_poll_rx_max)
++ vnic->stats.events_per_poll_rx_max = rx_evs_polled;
++ if (tx_evs_polled > vnic->stats.events_per_poll_tx_max)
++ vnic->stats.events_per_poll_tx_max = tx_evs_polled;
++#endif
++
++ return rx_packets - rx_remain;
++}
++
++
++int netfront_accel_vi_enable_interrupts(netfront_accel_vnic *vnic)
++{
++ u32 sw_evq_ptr;
++
++ VPRINTK("%s: checking for event on %p\n", __FUNCTION__, &vnic->vi.evq_state);
++
++ BUG_ON(vnic == NULL);
++ BUG_ON(vnic->vi.evq_state == NULL);
++
++ /* Do a quick check for an event. */
++ if (ef_eventq_has_event(&vnic->vi)) {
++ VPRINTK("%s: found event\n", __FUNCTION__);
++ return 0;
++ }
++
++ VPRINTK("evq_ptr=0x%08x evq_mask=0x%08x\n",
++ vnic->evq_state.evq_ptr, vnic->vi.evq_mask);
++
++ /* Request a wakeup from the hardware. */
++ sw_evq_ptr = vnic->evq_state.evq_ptr & vnic->vi.evq_mask;
++
++ BUG_ON(vnic->hw.falcon.evq_rptr == NULL);
++
++ VPRINTK("Requesting wakeup at 0x%08x, rptr %p\n", sw_evq_ptr,
++ vnic->hw.falcon.evq_rptr);
++ *(volatile u32 *)(vnic->hw.falcon.evq_rptr) = (sw_evq_ptr >> 3);
++
++ return 1;
++}
+Index: head-2008-11-25/drivers/xen/sfc_netfront/accel_xenbus.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/accel_xenbus.c 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,776 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/stddef.h>
++#include <linux/errno.h>
++
++#include <xen/xenbus.h>
++#include <xen/evtchn.h>
++#include <xen/gnttab.h>
++
++#include "accel.h"
++#include "accel_util.h"
++#include "accel_msg_iface.h"
++#include "accel_bufs.h"
++#include "accel_ssr.h"
++/* drivers/xen/netfront/netfront.h */
++#include "netfront.h"
++
++void netfront_accel_set_closing(netfront_accel_vnic *vnic)
++{
++
++ vnic->frontend_state = XenbusStateClosing;
++ net_accel_update_state(vnic->dev, XenbusStateClosing);
++}
++
++
++static void mac_address_change(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ netfront_accel_vnic *vnic;
++ struct xenbus_device *dev;
++ int rc;
++
++ DPRINTK("%s\n", __FUNCTION__);
++
++ vnic = container_of(watch, netfront_accel_vnic,
++ mac_address_watch);
++ dev = vnic->dev;
++
++ rc = net_accel_xen_net_read_mac(dev, vnic->mac);
++
++ if (rc != 0)
++ EPRINTK("%s: failed to read mac (%d)\n", __FUNCTION__, rc);
++}
++
++
++static int setup_mac_address_watch(struct xenbus_device *dev,
++ netfront_accel_vnic *vnic)
++{
++ int err;
++
++ DPRINTK("Setting watch on %s/%s\n", dev->nodename, "mac");
++
++ err = xenbus_watch_path2(dev, dev->nodename, "mac",
++ &vnic->mac_address_watch,
++ mac_address_change);
++ if (err) {
++ EPRINTK("%s: Failed to register xenbus watch: %d\n",
++ __FUNCTION__, err);
++ goto fail;
++ }
++
++ return 0;
++ fail:
++ vnic->mac_address_watch.node = NULL;
++ return err;
++}
++
++
++/* Grant access to some pages and publish through xenbus */
++static int make_named_grant(struct xenbus_device *dev, void *page,
++ const char *name, grant_ref_t *gnt_ref)
++{
++ struct xenbus_transaction tr;
++ int err;
++ grant_ref_t gnt;
++
++ gnt = net_accel_grant_page(dev, virt_to_mfn(page), 0);
++ if (gnt < 0)
++ return gnt;
++
++ do {
++ err = xenbus_transaction_start(&tr);
++ if (err != 0) {
++ EPRINTK("%s: transaction start failed %d\n",
++ __FUNCTION__, err);
++ return err;
++ }
++ err = xenbus_printf(tr, dev->nodename, name, "%d", gnt);
++ if (err != 0) {
++ EPRINTK("%s: xenbus_printf failed %d\n", __FUNCTION__,
++ err);
++ xenbus_transaction_end(tr, 1);
++ return err;
++ }
++ err = xenbus_transaction_end(tr, 0);
++ } while (err == -EAGAIN);
++
++ if (err != 0) {
++ EPRINTK("%s: transaction end failed %d\n", __FUNCTION__, err);
++ return err;
++ }
++
++ *gnt_ref = gnt;
++
++ return 0;
++}
++
++
++static int remove_named_grant(struct xenbus_device *dev,
++ const char *name, grant_ref_t gnt_ref)
++{
++ struct xenbus_transaction tr;
++ int err;
++
++ net_accel_ungrant_page(gnt_ref);
++
++ do {
++ err = xenbus_transaction_start(&tr);
++ if (err != 0) {
++ EPRINTK("%s: transaction start failed %d\n",
++ __FUNCTION__, err);
++ return err;
++ }
++ err = xenbus_rm(tr, dev->nodename, name);
++ if (err != 0) {
++ EPRINTK("%s: xenbus_rm failed %d\n", __FUNCTION__,
++ err);
++ xenbus_transaction_end(tr, 1);
++ return err;
++ }
++ err = xenbus_transaction_end(tr, 0);
++ } while (err == -EAGAIN);
++
++ if (err != 0) {
++ EPRINTK("%s: transaction end failed %d\n", __FUNCTION__, err);
++ return err;
++ }
++
++ return 0;
++}
++
++
++static
++netfront_accel_vnic *netfront_accel_vnic_ctor(struct net_device *net_dev,
++ struct xenbus_device *dev)
++{
++ struct netfront_info *np =
++ (struct netfront_info *)netdev_priv(net_dev);
++ netfront_accel_vnic *vnic;
++ int err;
++
++ /*
++ * A bug in earlier versions of Xen accel plugin system meant
++ * you could be probed twice for the same device on suspend
++ * cancel. Be tolerant of that.
++ */
++ if (np->accel_priv != NULL)
++ return ERR_PTR(-EALREADY);
++
++ /* Alloc mem for state */
++ vnic = kzalloc(sizeof(netfront_accel_vnic), GFP_KERNEL);
++ if (vnic == NULL) {
++ EPRINTK("%s: no memory for vnic state\n", __FUNCTION__);
++ return ERR_PTR(-ENOMEM);
++ }
++
++ spin_lock_init(&vnic->tx_lock);
++
++ mutex_init(&vnic->vnic_mutex);
++ mutex_lock(&vnic->vnic_mutex);
++
++ /* Store so state can be retrieved from device */
++ BUG_ON(np->accel_priv != NULL);
++ np->accel_priv = vnic;
++ vnic->dev = dev;
++ vnic->net_dev = net_dev;
++ spin_lock_init(&vnic->irq_enabled_lock);
++ netfront_accel_ssr_init(&vnic->ssr_state);
++
++ init_waitqueue_head(&vnic->state_wait_queue);
++ vnic->backend_state = XenbusStateUnknown;
++ vnic->frontend_state = XenbusStateClosed;
++ vnic->removing = 0;
++ vnic->domU_state_is_setup = 0;
++ vnic->dom0_state_is_setup = 0;
++ vnic->poll_enabled = 0;
++ vnic->tx_enabled = 0;
++ vnic->tx_skb = NULL;
++
++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
++ INIT_WORK(&vnic->msg_from_bend, netfront_accel_msg_from_bend);
++#else
++ INIT_WORK(&vnic->msg_from_bend, netfront_accel_msg_from_bend, vnic);
++#endif
++
++ netfront_accel_debugfs_create(vnic);
++
++ mutex_unlock(&vnic->vnic_mutex);
++
++ err = net_accel_xen_net_read_mac(dev, vnic->mac);
++ if (err)
++ goto fail_mac;
++
++ /* Setup a watch on the frontend's MAC address */
++ err = setup_mac_address_watch(dev, vnic);
++ if (err)
++ goto fail_mac;
++
++ return vnic;
++
++fail_mac:
++
++ mutex_lock(&vnic->vnic_mutex);
++
++ netfront_accel_debugfs_remove(vnic);
++
++ netfront_accel_ssr_fini(vnic, &vnic->ssr_state);
++
++ EPRINTK_ON(vnic->tx_skb != NULL);
++
++ vnic->frontend_state = XenbusStateUnknown;
++ net_accel_update_state(dev, XenbusStateUnknown);
++
++ mutex_unlock(&vnic->vnic_mutex);
++
++ np->accel_priv = NULL;
++ kfree(vnic);
++
++ return ERR_PTR(err);
++}
++
++
++static void netfront_accel_vnic_dtor(netfront_accel_vnic *vnic)
++{
++ struct net_device *net_dev = vnic->net_dev;
++ struct netfront_info *np =
++ (struct netfront_info *)netdev_priv(net_dev);
++
++ /*
++ * Now we don't hold the lock any more it is safe to remove
++ * this watch and synchonrise with the completion of
++ * watches
++ */
++ DPRINTK("%s: unregistering xenbus mac watch\n", __FUNCTION__);
++ unregister_xenbus_watch(&vnic->mac_address_watch);
++ kfree(vnic->mac_address_watch.node);
++
++ flush_workqueue(netfront_accel_workqueue);
++
++ mutex_lock(&vnic->vnic_mutex);
++
++ netfront_accel_debugfs_remove(vnic);
++
++ netfront_accel_ssr_fini(vnic, &vnic->ssr_state);
++
++ EPRINTK_ON(vnic->tx_skb != NULL);
++
++ vnic->frontend_state = XenbusStateUnknown;
++ net_accel_update_state(vnic->dev, XenbusStateUnknown);
++
++ mutex_unlock(&vnic->vnic_mutex);
++
++ np->accel_priv = NULL;
++ kfree(vnic);
++}
++
++
++static int vnic_setup_domU_shared_state(struct xenbus_device *dev,
++ netfront_accel_vnic *vnic)
++{
++ struct xenbus_transaction tr;
++ int err;
++ int msgs_per_queue;
++
++
++ DPRINTK("Setting up domU shared state.\n");
++
++ msgs_per_queue = (PAGE_SIZE/2) / sizeof(struct net_accel_msg);
++
++ /* Allocate buffer state */
++ vnic->tx_bufs = netfront_accel_init_bufs(&vnic->tx_lock);
++ if (vnic->tx_bufs == NULL) {
++ err = -ENOMEM;
++ EPRINTK("%s: Failed to allocate tx buffers\n", __FUNCTION__);
++ goto fail_tx_bufs;
++ }
++
++ vnic->rx_bufs = netfront_accel_init_bufs(NULL);
++ if (vnic->rx_bufs == NULL) {
++ err = -ENOMEM;
++ EPRINTK("%s: Failed to allocate rx buffers\n", __FUNCTION__);
++ goto fail_rx_bufs;
++ }
++
++ /*
++ * This allocates two pages, one for the shared page and one
++ * for the message queue.
++ */
++ vnic->shared_page = (struct net_accel_shared_page *)
++ __get_free_pages(GFP_KERNEL, 1);
++ if (vnic->shared_page == NULL) {
++ EPRINTK("%s: no memory for shared pages\n", __FUNCTION__);
++ err = -ENOMEM;
++ goto fail_shared_page;
++ }
++
++ net_accel_msg_init_queue
++ (&vnic->from_dom0, &vnic->shared_page->queue0,
++ (struct net_accel_msg *)((u8*)vnic->shared_page + PAGE_SIZE),
++ msgs_per_queue);
++
++ net_accel_msg_init_queue
++ (&vnic->to_dom0, &vnic->shared_page->queue1,
++ (struct net_accel_msg *)((u8*)vnic->shared_page +
++ (3 * PAGE_SIZE / 2)),
++ msgs_per_queue);
++
++ vnic->msg_state = NETFRONT_ACCEL_MSG_NONE;
++
++ err = make_named_grant(dev, vnic->shared_page, "accel-ctrl-page",
++ &vnic->ctrl_page_gnt);
++ if (err) {
++ EPRINTK("couldn't make ctrl-page named grant\n");
++ goto fail_ctrl_page_grant;
++ }
++
++ err = make_named_grant(dev, (u8*)vnic->shared_page + PAGE_SIZE,
++ "accel-msg-page", &vnic->msg_page_gnt);
++ if (err) {
++ EPRINTK("couldn't make msg-page named grant\n");
++ goto fail_msg_page_grant;
++ }
++
++ /* Create xenbus msg event channel */
++ err = bind_listening_port_to_irqhandler
++ (dev->otherend_id, netfront_accel_msg_channel_irq_from_bend,
++ SA_SAMPLE_RANDOM, "vnicctrl", vnic);
++ if (err < 0) {
++ EPRINTK("Couldn't bind msg event channel\n");
++ goto fail_msg_irq;
++ }
++ vnic->msg_channel_irq = err;
++ vnic->msg_channel = irq_to_evtchn_port(vnic->msg_channel_irq);
++
++ /* Create xenbus net event channel */
++ err = bind_listening_port_to_irqhandler
++ (dev->otherend_id, netfront_accel_net_channel_irq_from_bend,
++ SA_SAMPLE_RANDOM, "vnicfront", vnic);
++ if (err < 0) {
++ EPRINTK("Couldn't bind net event channel\n");
++ goto fail_net_irq;
++ }
++ vnic->net_channel_irq = err;
++ vnic->net_channel = irq_to_evtchn_port(vnic->net_channel_irq);
++ /* Want to ensure we don't get interrupts before we're ready */
++ netfront_accel_disable_net_interrupts(vnic);
++
++ DPRINTK("otherend %d has msg ch %u (%u) and net ch %u (%u)\n",
++ dev->otherend_id, vnic->msg_channel, vnic->msg_channel_irq,
++ vnic->net_channel, vnic->net_channel_irq);
++
++ do {
++ err = xenbus_transaction_start(&tr);
++ if (err != 0) {
++ EPRINTK("%s: Transaction start failed %d\n",
++ __FUNCTION__, err);
++ goto fail_transaction;
++ }
++
++ err = xenbus_printf(tr, dev->nodename, "accel-msg-channel",
++ "%u", vnic->msg_channel);
++ if (err != 0) {
++ EPRINTK("%s: event channel xenbus write failed %d\n",
++ __FUNCTION__, err);
++ xenbus_transaction_end(tr, 1);
++ goto fail_transaction;
++ }
++
++ err = xenbus_printf(tr, dev->nodename, "accel-net-channel",
++ "%u", vnic->net_channel);
++ if (err != 0) {
++ EPRINTK("%s: net channel xenbus write failed %d\n",
++ __FUNCTION__, err);
++ xenbus_transaction_end(tr, 1);
++ goto fail_transaction;
++ }
++
++ err = xenbus_transaction_end(tr, 0);
++ } while (err == -EAGAIN);
++
++ if (err != 0) {
++ EPRINTK("%s: Transaction end failed %d\n", __FUNCTION__, err);
++ goto fail_transaction;
++ }
++
++ DPRINTK("Completed setting up domU shared state\n");
++
++ return 0;
++
++fail_transaction:
++
++ unbind_from_irqhandler(vnic->net_channel_irq, vnic);
++fail_net_irq:
++
++ unbind_from_irqhandler(vnic->msg_channel_irq, vnic);
++fail_msg_irq:
++
++ remove_named_grant(dev, "accel-ctrl-page", vnic->ctrl_page_gnt);
++fail_msg_page_grant:
++
++ remove_named_grant(dev, "accel-msg-page", vnic->msg_page_gnt);
++fail_ctrl_page_grant:
++
++ free_pages((unsigned long)vnic->shared_page, 1);
++ vnic->shared_page = NULL;
++fail_shared_page:
++
++ netfront_accel_fini_bufs(vnic->rx_bufs);
++fail_rx_bufs:
++
++ netfront_accel_fini_bufs(vnic->tx_bufs);
++fail_tx_bufs:
++
++ /* Undo the memory allocation created when we got the HELLO */
++ netfront_accel_free_buffer_mem(&vnic->bufpages,
++ vnic->rx_bufs,
++ vnic->tx_bufs);
++
++ DPRINTK("Failed to setup domU shared state with code %d\n", err);
++
++ return err;
++}
++
++
++static void vnic_remove_domU_shared_state(struct xenbus_device *dev,
++ netfront_accel_vnic *vnic)
++{
++ struct xenbus_transaction tr;
++
++ /*
++ * Don't remove any watches because we currently hold the
++ * mutex and the watches take the mutex.
++ */
++
++ DPRINTK("%s: removing event channel irq handlers %d %d\n",
++ __FUNCTION__, vnic->net_channel_irq, vnic->msg_channel_irq);
++ do {
++ if (xenbus_transaction_start(&tr) != 0)
++ break;
++ xenbus_rm(tr, dev->nodename, "accel-msg-channel");
++ xenbus_rm(tr, dev->nodename, "accel-net-channel");
++ } while (xenbus_transaction_end(tr, 0) == -EAGAIN);
++
++ unbind_from_irqhandler(vnic->net_channel_irq, vnic);
++ unbind_from_irqhandler(vnic->msg_channel_irq, vnic);
++
++ /* ungrant pages for msg channel */
++ remove_named_grant(dev, "accel-ctrl-page", vnic->ctrl_page_gnt);
++ remove_named_grant(dev, "accel-msg-page", vnic->msg_page_gnt);
++ free_pages((unsigned long)vnic->shared_page, 1);
++ vnic->shared_page = NULL;
++
++ /* ungrant pages for buffers, and free buffer memory */
++ netfront_accel_free_buffer_mem(&vnic->bufpages,
++ vnic->rx_bufs,
++ vnic->tx_bufs);
++ netfront_accel_fini_bufs(vnic->rx_bufs);
++ netfront_accel_fini_bufs(vnic->tx_bufs);
++}
++
++
++static void vnic_setup_dom0_shared_state(struct xenbus_device *dev,
++ netfront_accel_vnic *vnic)
++{
++ DPRINTK("Setting up dom0 shared state\n");
++
++ netfront_accel_vi_ctor(vnic);
++
++ /*
++ * Message processing will be enabled when this function
++ * returns, but we might have missed an interrupt. Schedule a
++ * check just in case.
++ */
++ queue_work(netfront_accel_workqueue, &vnic->msg_from_bend);
++}
++
++
++static void vnic_remove_dom0_shared_state(struct xenbus_device *dev,
++ netfront_accel_vnic *vnic)
++{
++ DPRINTK("Removing dom0 shared state\n");
++
++ vnic_stop_fastpath(vnic);
++
++ netfront_accel_vi_dtor(vnic);
++}
++
++
++/*************************************************************************/
++
++/*
++ * The following code handles accelstate changes between the frontend
++ * and the backend. In response to transitions, calls the following
++ * functions in matching pairs:
++ *
++ * vnic_setup_domU_shared_state
++ * vnic_remove_domU_shared_state
++ *
++ * vnic_setup_dom0_shared_state
++ * vnic_remove_dom0_shared_state
++ *
++ * Valid state transitions for DomU are as follows:
++ *
++ * Closed->Init on probe or in response to Init from dom0
++ *
++ * Init->Connected in response to Init from dom0
++ * Init->Closing on error providing dom0 is in Init
++ * Init->Closed on remove or in response to Closing from dom0
++ *
++ * Connected->Closing on error/remove
++ * Connected->Closed in response to Closing from dom0
++ *
++ * Closing->Closed in response to Closing from dom0
++ *
++ */
++
++
++/* Function to deal with Xenbus accel state change in backend */
++static void netfront_accel_backend_accel_changed(netfront_accel_vnic *vnic,
++ XenbusState backend_state)
++{
++ struct xenbus_device *dev = vnic->dev;
++ XenbusState frontend_state;
++ int state;
++
++ DPRINTK("%s: changing from %s to %s. nodename %s, otherend %s\n",
++ __FUNCTION__, xenbus_strstate(vnic->backend_state),
++ xenbus_strstate(backend_state), dev->nodename, dev->otherend);
++
++ /*
++ * Ignore duplicate state changes. This can happen if the
++ * backend changes state twice in quick succession and the
++ * first watch fires in the frontend after the second
++ * transition has completed.
++ */
++ if (vnic->backend_state == backend_state)
++ return;
++
++ vnic->backend_state = backend_state;
++ frontend_state = vnic->frontend_state;
++
++ switch (backend_state) {
++ case XenbusStateInitialising:
++ /*
++ * It's possible for us to miss the closed state from
++ * dom0, so do the work here.
++ */
++ if (vnic->domU_state_is_setup) {
++ vnic_remove_domU_shared_state(dev, vnic);
++ vnic->domU_state_is_setup = 0;
++ }
++
++ if (frontend_state != XenbusStateInitialising) {
++ /* Make sure the backend doesn't go away. */
++ frontend_state = XenbusStateInitialising;
++ net_accel_update_state(dev, frontend_state);
++ xenbus_scanf(XBT_NIL, dev->otherend, "accelstate", "%d", &state);
++ backend_state = (XenbusState)state;
++ if (backend_state != XenbusStateInitialising)
++ break;
++ }
++
++ /* Start the new connection. */
++ if (!vnic->removing) {
++ BUG_ON(vnic->domU_state_is_setup);
++ if (vnic_setup_domU_shared_state(dev, vnic) == 0) {
++ vnic->domU_state_is_setup = 1;
++ frontend_state = XenbusStateConnected;
++ } else
++ frontend_state = XenbusStateClosing;
++ }
++ break;
++ case XenbusStateConnected:
++ if (vnic->domU_state_is_setup &&
++ !vnic->dom0_state_is_setup) {
++ vnic_setup_dom0_shared_state(dev, vnic);
++ vnic->dom0_state_is_setup = 1;
++ }
++ break;
++ default:
++ case XenbusStateClosing:
++ if (vnic->dom0_state_is_setup) {
++ vnic_remove_dom0_shared_state(dev, vnic);
++ vnic->dom0_state_is_setup = 0;
++ }
++ frontend_state = XenbusStateClosed;
++ break;
++ case XenbusStateUnknown:
++ case XenbusStateClosed:
++ if (vnic->domU_state_is_setup) {
++ vnic_remove_domU_shared_state(dev, vnic);
++ vnic->domU_state_is_setup = 0;
++ }
++ break;
++ }
++
++ if (frontend_state != vnic->frontend_state) {
++ DPRINTK("Switching from state %s (%d) to %s (%d)\n",
++ xenbus_strstate(vnic->frontend_state),
++ vnic->frontend_state,
++ xenbus_strstate(frontend_state), frontend_state);
++ vnic->frontend_state = frontend_state;
++ net_accel_update_state(dev, frontend_state);
++ }
++
++ wake_up(&vnic->state_wait_queue);
++}
++
++
++static void backend_accel_state_change(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ int state;
++ netfront_accel_vnic *vnic;
++ struct xenbus_device *dev;
++
++ DPRINTK("%s\n", __FUNCTION__);
++
++ vnic = container_of(watch, struct netfront_accel_vnic,
++ backend_accel_watch);
++
++ mutex_lock(&vnic->vnic_mutex);
++
++ dev = vnic->dev;
++
++ state = (int)XenbusStateUnknown;
++ xenbus_scanf(XBT_NIL, dev->otherend, "accelstate", "%d", &state);
++ netfront_accel_backend_accel_changed(vnic, state);
++
++ mutex_unlock(&vnic->vnic_mutex);
++}
++
++
++static int setup_dom0_accel_watch(struct xenbus_device *dev,
++ netfront_accel_vnic *vnic)
++{
++ int err;
++
++ DPRINTK("Setting watch on %s/%s\n", dev->otherend, "accelstate");
++
++ err = xenbus_watch_path2(dev, dev->otherend, "accelstate",
++ &vnic->backend_accel_watch,
++ backend_accel_state_change);
++ if (err) {
++ EPRINTK("%s: Failed to register xenbus watch: %d\n",
++ __FUNCTION__, err);
++ goto fail;
++ }
++ return 0;
++ fail:
++ vnic->backend_accel_watch.node = NULL;
++ return err;
++}
++
++
++int netfront_accel_probe(struct net_device *net_dev, struct xenbus_device *dev)
++{
++ netfront_accel_vnic *vnic;
++ int err;
++
++ DPRINTK("Probe passed device %s\n", dev->nodename);
++
++ vnic = netfront_accel_vnic_ctor(net_dev, dev);
++ if (IS_ERR(vnic))
++ return PTR_ERR(vnic);
++
++ /*
++ * Setup a watch on the backend accel state. This sets things
++ * going.
++ */
++ err = setup_dom0_accel_watch(dev, vnic);
++ if (err) {
++ netfront_accel_vnic_dtor(vnic);
++ EPRINTK("%s: probe failed with code %d\n", __FUNCTION__, err);
++ return err;
++ }
++
++ /*
++ * Indicate to the other end that we're ready to start unless
++ * the watch has already fired.
++ */
++ mutex_lock(&vnic->vnic_mutex);
++ VPRINTK("setup success, updating accelstate\n");
++ if (vnic->frontend_state == XenbusStateClosed) {
++ vnic->frontend_state = XenbusStateInitialising;
++ net_accel_update_state(dev, XenbusStateInitialising);
++ }
++ mutex_unlock(&vnic->vnic_mutex);
++
++ DPRINTK("Probe done device %s\n", dev->nodename);
++
++ return 0;
++}
++
++
++int netfront_accel_remove(struct xenbus_device *dev)
++{
++ struct netfront_info *np =
++ (struct netfront_info *)dev->dev.driver_data;
++ netfront_accel_vnic *vnic = (netfront_accel_vnic *)np->accel_priv;
++
++ DPRINTK("%s %s\n", __FUNCTION__, dev->nodename);
++
++ BUG_ON(vnic == NULL);
++
++ mutex_lock(&vnic->vnic_mutex);
++
++ /* Reject any attempts to connect. */
++ vnic->removing = 1;
++
++ /* Close any existing connection. */
++ if (vnic->frontend_state == XenbusStateConnected) {
++ vnic->frontend_state = XenbusStateClosing;
++ net_accel_update_state(dev, XenbusStateClosing);
++ }
++
++ mutex_unlock(&vnic->vnic_mutex);
++
++ DPRINTK("%s waiting for release of %s\n", __FUNCTION__, dev->nodename);
++
++ /*
++ * Wait for the xenbus watch to release the shared resources.
++ * This indicates that dom0 has made the transition
++ * Closing->Closed or that dom0 was in Closed or Init and no
++ * resources were mapped.
++ */
++ wait_event(vnic->state_wait_queue,
++ !vnic->domU_state_is_setup);
++
++ /*
++ * Now we don't need this watch anymore it is safe to remove
++ * it (and so synchronise with it completing if outstanding)
++ */
++ DPRINTK("%s: unregistering xenbus accel watch\n",
++ __FUNCTION__);
++ unregister_xenbus_watch(&vnic->backend_accel_watch);
++ kfree(vnic->backend_accel_watch.node);
++
++ netfront_accel_vnic_dtor(vnic);
++
++ DPRINTK("%s done %s\n", __FUNCTION__, dev->nodename);
++
++ return 0;
++}
+Index: head-2008-11-25/drivers/xen/sfc_netfront/ef_vi_falcon.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/ef_vi_falcon.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,172 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author slp
++ * \brief Falcon specific definitions
++ * \date 2004/08
++ */
++
++#ifndef __EF_VI_FALCON_H__
++#define __EF_VI_FALCON_H__
++
++#define EFHW_4K 0x00001000u
++#define EFHW_8K 0x00002000u
++
++/* include the autogenerated register definitions */
++
++#include "ef_vi_falcon_core.h"
++#include "ef_vi_falcon_desc.h"
++#include "ef_vi_falcon_event.h"
++
++
++/*----------------------------------------------------------------------------
++ *
++ * Helpers to turn bit shifts into dword shifts and check that the bit fields
++ * haven't overflown the dword etc. Aim is to preserve consistency with the
++ * autogenerated headers - once stable we could hard code.
++ *
++ *---------------------------------------------------------------------------*/
++
++/* mask constructors */
++#define __FALCON_MASK(WIDTH,T) ((((T)1) << (WIDTH)) - 1)
++#define __EFVI_MASK32(WIDTH) __FALCON_MASK((WIDTH),uint32_t)
++#define __EFVI_MASK64(WIDTH) __FALCON_MASK((WIDTH),uint64_t)
++
++#define __EFVI_FALCON_MASKFIELD32(LBN, WIDTH) ((uint32_t) \
++ (__EFVI_MASK32(WIDTH) << (LBN)))
++
++/* constructors for fields which span the first and second dwords */
++#define __LW(LBN) (32 - LBN)
++#define LOW(v, LBN, WIDTH) ((uint32_t) \
++ (((v) & __EFVI_MASK64(__LW((LBN)))) << (LBN)))
++#define HIGH(v, LBN, WIDTH) ((uint32_t)(((v) >> __LW((LBN))) & \
++ __EFVI_MASK64((WIDTH - __LW((LBN))))))
++/* constructors for fields within the second dword */
++#define __DW2(LBN) ((LBN) - 32)
++
++/* constructors for fields which span the second and third dwords */
++#define __LW2(LBN) (64 - LBN)
++#define LOW2(v, LBN, WIDTH) ((uint32_t) \
++ (((v) & __EFVI_MASK64(__LW2((LBN)))) << ((LBN) - 32)))
++#define HIGH2(v, LBN, WIDTH) ((uint32_t) \
++ (((v) >> __LW2((LBN))) & __EFVI_MASK64((WIDTH - __LW2((LBN))))))
++
++/* constructors for fields within the third dword */
++#define __DW3(LBN) ((LBN) - 64)
++
++
++/* constructors for fields which span the third and fourth dwords */
++#define __LW3(LBN) (96 - LBN)
++#define LOW3(v, LBN, WIDTH) ((uint32_t) \
++ (((v) & __EFVI_MASK64(__LW3((LBN)))) << ((LBN) - 64)))
++#define HIGH3(v, LBN, WIDTH) ((unit32_t) \
++ (((v) >> __LW3((LBN))) & __EFVI_MASK64((WIDTH - __LW3((LBN))))))
++
++/* constructors for fields within the fourth dword */
++#define __DW4(LBN) ((LBN) - 96)
++
++/* checks that the autogenerated headers our consistent with our model */
++#define WIDTHCHCK(a, b) ef_assert((a) == (b))
++#define RANGECHCK(v, WIDTH) \
++ ef_assert(((uint64_t)(v) & ~(__EFVI_MASK64((WIDTH)))) == 0)
++
++/* fields within the first dword */
++#define DWCHCK(LBN, WIDTH) ef_assert(((LBN) >= 0) &&(((LBN)+(WIDTH)) <= 32))
++
++/* fields which span the first and second dwords */
++#define LWCHK(LBN, WIDTH) ef_assert(WIDTH >= __LW(LBN))
++
++/*----------------------------------------------------------------------------
++ *
++ * Buffer virtual addresses (4K buffers)
++ *
++ *---------------------------------------------------------------------------*/
++
++/* Form a buffer virtual address from buffer ID and offset. If the offset
++** is larger than the buffer size, then the buffer indexed will be
++** calculated appropriately. It is the responsibility of the caller to
++** ensure that they have valid buffers programmed at that address.
++*/
++#define EFVI_FALCON_VADDR_4K_S (12)
++#define EFVI_FALCON_VADDR_M 0xfffff /* post shift mask */
++
++
++#define EFVI_FALCON_BUFFER_4K_ADDR(id,off) \
++ (((id) << EFVI_FALCON_VADDR_4K_S) + (off))
++
++#define EFVI_FALCON_BUFFER_4K_PAGE(vaddr) \
++ (((vaddr) >> EFVI_FALCON_VADDR_4K_S) & EFVI_FALCON_VADDR_M)
++
++#define EFVI_FALCON_BUFFER_4K_OFF(vaddr) \
++ ((vaddr) & __EFVI_MASK32(EFVI_FALCON_VADDR_4K_S))
++
++
++/*----------------------------------------------------------------------------
++ *
++ * Masks
++ *
++ *---------------------------------------------------------------------------*/
++
++#define EFVI_FALCON_CLOCK_ASIC_HZ (125000)
++#define EFVI_FALCON_CLOCK_FPGA_HZ (62500)
++#define EFVI_FALCON_CLOCK_HZ EFVI_FALCON_CLOCK_ASIC_HZ
++
++
++/*----------------------------------------------------------------------------
++ *
++ * Timers
++ *
++ *---------------------------------------------------------------------------*/
++
++/* Event-Queue Timer granularity - measured in us
++ Given by: 4096 * 3 cycle * clock period */
++
++#define EFVI_FALCON_EVQTIMER_PERIOD_US ((4096 * 3 * 1000) / EFVI_FALCON_CLOCK_HZ)
++
++/* mode bits */
++#define EFVI_FALCON_TIMER_MODE_DIS 0 /* disabled */
++#define EFVI_FALCON_TIMER_MODE_RUN 1 /* started counting right away */
++#define EFVI_FALCON_TIMER_MODE_HOLD 2 /* trigger mode (user queues) */
++
++#define EFVI_FALCON_EVQTIMER_HOLD (EFVI_FALCON_TIMER_MODE_HOLD << TIMER_MODE_LBN)
++#define EFVI_FALCON_EVQTIMER_RUN (EFVI_FALCON_TIMER_MODE_RUN << TIMER_MODE_LBN)
++#define EFVI_FALCON_EVQTIMER_DISABLE (EFVI_FALCON_TIMER_MODE_DIS << TIMER_MODE_LBN)
++
++
++/* ---- efhw_event_t helpers --- */
++
++#define EFVI_FALCON_EVENT_CODE(evp) \
++ ((evp)->u64 & EFVI_FALCON_EVENT_CODE_MASK)
++
++#define EFVI_FALCON_EVENT_SW_DATA_MASK 0x0000ffff
++
++#define __EFVI_FALCON_OPEN_MASK(WIDTH) ((((uint64_t)1) << (WIDTH)) - 1)
++
++#define EFVI_FALCON_EVENT_CODE_MASK \
++ (__EFVI_FALCON_OPEN_MASK(EV_CODE_WIDTH) << EV_CODE_LBN)
++
++
++#endif /* __EF_VI_FALCON_H__ */
+Index: head-2008-11-25/drivers/xen/sfc_netfront/ef_vi_falcon_core.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/ef_vi_falcon_core.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,1075 @@
++
++#define EFVI_FALCON_EXTENDED_P_BAR 1
++
++//////////////---- Bus Interface Unit Registers C Header ----//////////////
++#define IOM_IND_ADR_REG_OFST 0x0 // IO-mapped indirect access address register
++ #define IOM_AUTO_ADR_INC_EN_LBN 16
++ #define IOM_AUTO_ADR_INC_EN_WIDTH 1
++ #define IOM_IND_ADR_LBN 0
++ #define IOM_IND_ADR_WIDTH 16
++#define IOM_IND_DAT_REG_OFST 0x4 // IO-mapped indirect access data register
++ #define IOM_IND_DAT_LBN 0
++ #define IOM_IND_DAT_WIDTH 32
++#define ADR_REGION_REG_KER_OFST 0x0 // Address region register
++#define ADR_REGION_REG_OFST 0x0 // Address region register
++ #define ADR_REGION3_LBN 96
++ #define ADR_REGION3_WIDTH 18
++ #define ADR_REGION2_LBN 64
++ #define ADR_REGION2_WIDTH 18
++ #define ADR_REGION1_LBN 32
++ #define ADR_REGION1_WIDTH 18
++ #define ADR_REGION0_LBN 0
++ #define ADR_REGION0_WIDTH 18
++#define INT_EN_REG_KER_OFST 0x10 // Kernel driver Interrupt enable register
++ #define KER_INT_CHAR_LBN 4
++ #define KER_INT_CHAR_WIDTH 1
++ #define KER_INT_KER_LBN 3
++ #define KER_INT_KER_WIDTH 1
++ #define ILL_ADR_ERR_INT_EN_KER_LBN 2
++ #define ILL_ADR_ERR_INT_EN_KER_WIDTH 1
++ #define SRM_PERR_INT_EN_KER_LBN 1
++ #define SRM_PERR_INT_EN_KER_WIDTH 1
++ #define DRV_INT_EN_KER_LBN 0
++ #define DRV_INT_EN_KER_WIDTH 1
++#define INT_EN_REG_CHAR_OFST 0x20 // Char Driver interrupt enable register
++ #define CHAR_INT_CHAR_LBN 4
++ #define CHAR_INT_CHAR_WIDTH 1
++ #define CHAR_INT_KER_LBN 3
++ #define CHAR_INT_KER_WIDTH 1
++ #define ILL_ADR_ERR_INT_EN_CHAR_LBN 2
++ #define ILL_ADR_ERR_INT_EN_CHAR_WIDTH 1
++ #define SRM_PERR_INT_EN_CHAR_LBN 1
++ #define SRM_PERR_INT_EN_CHAR_WIDTH 1
++ #define DRV_INT_EN_CHAR_LBN 0
++ #define DRV_INT_EN_CHAR_WIDTH 1
++#define INT_ADR_REG_KER_OFST 0x30 // Interrupt host address for Kernel driver
++ #define INT_ADR_KER_LBN 0
++ #define INT_ADR_KER_WIDTH 64
++ #define DRV_INT_KER_LBN 32
++ #define DRV_INT_KER_WIDTH 1
++ #define EV_FF_HALF_INT_KER_LBN 3
++ #define EV_FF_HALF_INT_KER_WIDTH 1
++ #define EV_FF_FULL_INT_KER_LBN 2
++ #define EV_FF_FULL_INT_KER_WIDTH 1
++ #define ILL_ADR_ERR_INT_KER_LBN 1
++ #define ILL_ADR_ERR_INT_KER_WIDTH 1
++ #define SRAM_PERR_INT_KER_LBN 0
++ #define SRAM_PERR_INT_KER_WIDTH 1
++#define INT_ADR_REG_CHAR_OFST 0x40 // Interrupt host address for Char driver
++ #define INT_ADR_CHAR_LBN 0
++ #define INT_ADR_CHAR_WIDTH 64
++ #define DRV_INT_CHAR_LBN 32
++ #define DRV_INT_CHAR_WIDTH 1
++ #define EV_FF_HALF_INT_CHAR_LBN 3
++ #define EV_FF_HALF_INT_CHAR_WIDTH 1
++ #define EV_FF_FULL_INT_CHAR_LBN 2
++ #define EV_FF_FULL_INT_CHAR_WIDTH 1
++ #define ILL_ADR_ERR_INT_CHAR_LBN 1
++ #define ILL_ADR_ERR_INT_CHAR_WIDTH 1
++ #define SRAM_PERR_INT_CHAR_LBN 0
++ #define SRAM_PERR_INT_CHAR_WIDTH 1
++#define INT_ISR0_B0_OFST 0x90 // B0 only
++#define INT_ISR1_B0_OFST 0xA0
++#define INT_ACK_REG_KER_A1_OFST 0x50 // Kernel interrupt acknowledge register
++ #define RESERVED_LBN 0
++ #define RESERVED_WIDTH 32
++#define INT_ACK_REG_CHAR_A1_OFST 0x60 // CHAR interrupt acknowledge register
++ #define RESERVED_LBN 0
++ #define RESERVED_WIDTH 32
++//////////////---- Global CSR Registers C Header ----//////////////
++#define STRAP_REG_KER_OFST 0x200 // ASIC strap status register
++#define STRAP_REG_OFST 0x200 // ASIC strap status register
++ #define ONCHIP_SRAM_LBN 16
++ #define ONCHIP_SRAM_WIDTH 0
++ #define STRAP_ISCSI_EN_LBN 3
++ #define STRAP_ISCSI_EN_WIDTH 1
++ #define STRAP_PINS_LBN 0
++ #define STRAP_PINS_WIDTH 3
++#define GPIO_CTL_REG_KER_OFST 0x210 // GPIO control register
++#define GPIO_CTL_REG_OFST 0x210 // GPIO control register
++ #define GPIO_OEN_LBN 24
++ #define GPIO_OEN_WIDTH 4
++ #define GPIO_OUT_LBN 16
++ #define GPIO_OUT_WIDTH 4
++ #define GPIO_IN_LBN 8
++ #define GPIO_IN_WIDTH 4
++ #define GPIO_PWRUP_VALUE_LBN 0
++ #define GPIO_PWRUP_VALUE_WIDTH 4
++#define GLB_CTL_REG_KER_OFST 0x220 // Global control register
++#define GLB_CTL_REG_OFST 0x220 // Global control register
++ #define SWRST_LBN 0
++ #define SWRST_WIDTH 1
++#define FATAL_INTR_REG_KER_OFST 0x230 // Fatal interrupt register for Kernel
++ #define PCI_BUSERR_INT_KER_EN_LBN 43
++ #define PCI_BUSERR_INT_KER_EN_WIDTH 1
++ #define SRAM_OOB_INT_KER_EN_LBN 42
++ #define SRAM_OOB_INT_KER_EN_WIDTH 1
++ #define BUFID_OOB_INT_KER_EN_LBN 41
++ #define BUFID_OOB_INT_KER_EN_WIDTH 1
++ #define MEM_PERR_INT_KER_EN_LBN 40
++ #define MEM_PERR_INT_KER_EN_WIDTH 1
++ #define RBUF_OWN_INT_KER_EN_LBN 39
++ #define RBUF_OWN_INT_KER_EN_WIDTH 1
++ #define TBUF_OWN_INT_KER_EN_LBN 38
++ #define TBUF_OWN_INT_KER_EN_WIDTH 1
++ #define RDESCQ_OWN_INT_KER_EN_LBN 37
++ #define RDESCQ_OWN_INT_KER_EN_WIDTH 1
++ #define TDESCQ_OWN_INT_KER_EN_LBN 36
++ #define TDESCQ_OWN_INT_KER_EN_WIDTH 1
++ #define EVQ_OWN_INT_KER_EN_LBN 35
++ #define EVQ_OWN_INT_KER_EN_WIDTH 1
++ #define EVFF_OFLO_INT_KER_EN_LBN 34
++ #define EVFF_OFLO_INT_KER_EN_WIDTH 1
++ #define ILL_ADR_INT_KER_EN_LBN 33
++ #define ILL_ADR_INT_KER_EN_WIDTH 1
++ #define SRM_PERR_INT_KER_EN_LBN 32
++ #define SRM_PERR_INT_KER_EN_WIDTH 1
++ #define PCI_BUSERR_INT_KER_LBN 11
++ #define PCI_BUSERR_INT_KER_WIDTH 1
++ #define SRAM_OOB_INT_KER_LBN 10
++ #define SRAM_OOB_INT_KER_WIDTH 1
++ #define BUFID_OOB_INT_KER_LBN 9
++ #define BUFID_OOB_INT_KER_WIDTH 1
++ #define MEM_PERR_INT_KER_LBN 8
++ #define MEM_PERR_INT_KER_WIDTH 1
++ #define RBUF_OWN_INT_KER_LBN 7
++ #define RBUF_OWN_INT_KER_WIDTH 1
++ #define TBUF_OWN_INT_KER_LBN 6
++ #define TBUF_OWN_INT_KER_WIDTH 1
++ #define RDESCQ_OWN_INT_KER_LBN 5
++ #define RDESCQ_OWN_INT_KER_WIDTH 1
++ #define TDESCQ_OWN_INT_KER_LBN 4
++ #define TDESCQ_OWN_INT_KER_WIDTH 1
++ #define EVQ_OWN_INT_KER_LBN 3
++ #define EVQ_OWN_INT_KER_WIDTH 1
++ #define EVFF_OFLO_INT_KER_LBN 2
++ #define EVFF_OFLO_INT_KER_WIDTH 1
++ #define ILL_ADR_INT_KER_LBN 1
++ #define ILL_ADR_INT_KER_WIDTH 1
++ #define SRM_PERR_INT_KER_LBN 0
++ #define SRM_PERR_INT_KER_WIDTH 1
++#define FATAL_INTR_REG_OFST 0x240 // Fatal interrupt register for Char
++ #define PCI_BUSERR_INT_CHAR_EN_LBN 43
++ #define PCI_BUSERR_INT_CHAR_EN_WIDTH 1
++ #define SRAM_OOB_INT_CHAR_EN_LBN 42
++ #define SRAM_OOB_INT_CHAR_EN_WIDTH 1
++ #define BUFID_OOB_INT_CHAR_EN_LBN 41
++ #define BUFID_OOB_INT_CHAR_EN_WIDTH 1
++ #define MEM_PERR_INT_CHAR_EN_LBN 40
++ #define MEM_PERR_INT_CHAR_EN_WIDTH 1
++ #define RBUF_OWN_INT_CHAR_EN_LBN 39
++ #define RBUF_OWN_INT_CHAR_EN_WIDTH 1
++ #define TBUF_OWN_INT_CHAR_EN_LBN 38
++ #define TBUF_OWN_INT_CHAR_EN_WIDTH 1
++ #define RDESCQ_OWN_INT_CHAR_EN_LBN 37
++ #define RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
++ #define TDESCQ_OWN_INT_CHAR_EN_LBN 36
++ #define TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
++ #define EVQ_OWN_INT_CHAR_EN_LBN 35
++ #define EVQ_OWN_INT_CHAR_EN_WIDTH 1
++ #define EVFF_OFLO_INT_CHAR_EN_LBN 34
++ #define EVFF_OFLO_INT_CHAR_EN_WIDTH 1
++ #define ILL_ADR_INT_CHAR_EN_LBN 33
++ #define ILL_ADR_INT_CHAR_EN_WIDTH 1
++ #define SRM_PERR_INT_CHAR_EN_LBN 32
++ #define SRM_PERR_INT_CHAR_EN_WIDTH 1
++ #define FATAL_INTR_REG_EN_BITS 0xffffffffffffffffULL
++ #define PCI_BUSERR_INT_CHAR_LBN 11
++ #define PCI_BUSERR_INT_CHAR_WIDTH 1
++ #define SRAM_OOB_INT_CHAR_LBN 10
++ #define SRAM_OOB_INT_CHAR_WIDTH 1
++ #define BUFID_OOB_INT_CHAR_LBN 9
++ #define BUFID_OOB_INT_CHAR_WIDTH 1
++ #define MEM_PERR_INT_CHAR_LBN 8
++ #define MEM_PERR_INT_CHAR_WIDTH 1
++ #define RBUF_OWN_INT_CHAR_LBN 7
++ #define RBUF_OWN_INT_CHAR_WIDTH 1
++ #define TBUF_OWN_INT_CHAR_LBN 6
++ #define TBUF_OWN_INT_CHAR_WIDTH 1
++ #define RDESCQ_OWN_INT_CHAR_LBN 5
++ #define RDESCQ_OWN_INT_CHAR_WIDTH 1
++ #define TDESCQ_OWN_INT_CHAR_LBN 4
++ #define TDESCQ_OWN_INT_CHAR_WIDTH 1
++ #define EVQ_OWN_INT_CHAR_LBN 3
++ #define EVQ_OWN_INT_CHAR_WIDTH 1
++ #define EVFF_OFLO_INT_CHAR_LBN 2
++ #define EVFF_OFLO_INT_CHAR_WIDTH 1
++ #define ILL_ADR_INT_CHAR_LBN 1
++ #define ILL_ADR_INT_CHAR_WIDTH 1
++ #define SRM_PERR_INT_CHAR_LBN 0
++ #define SRM_PERR_INT_CHAR_WIDTH 1
++#define DP_CTRL_REG_OFST 0x250 // Datapath control register
++ #define FLS_EVQ_ID_LBN 0
++ #define FLS_EVQ_ID_WIDTH 12
++#define MEM_STAT_REG_KER_OFST 0x260 // Memory status register
++#define MEM_STAT_REG_OFST 0x260 // Memory status register
++ #define MEM_PERR_VEC_LBN 53
++ #define MEM_PERR_VEC_WIDTH 38
++ #define MBIST_CORR_LBN 38
++ #define MBIST_CORR_WIDTH 15
++ #define MBIST_ERR_LBN 0
++ #define MBIST_ERR_WIDTH 38
++#define DEBUG_REG_KER_OFST 0x270 // Debug register
++#define DEBUG_REG_OFST 0x270 // Debug register
++ #define DEBUG_BLK_SEL2_LBN 47
++ #define DEBUG_BLK_SEL2_WIDTH 3
++ #define DEBUG_BLK_SEL1_LBN 44
++ #define DEBUG_BLK_SEL1_WIDTH 3
++ #define DEBUG_BLK_SEL0_LBN 41
++ #define DEBUG_BLK_SEL0_WIDTH 3
++ #define MISC_DEBUG_ADDR_LBN 36
++ #define MISC_DEBUG_ADDR_WIDTH 5
++ #define SERDES_DEBUG_ADDR_LBN 31
++ #define SERDES_DEBUG_ADDR_WIDTH 5
++ #define EM_DEBUG_ADDR_LBN 26
++ #define EM_DEBUG_ADDR_WIDTH 5
++ #define SR_DEBUG_ADDR_LBN 21
++ #define SR_DEBUG_ADDR_WIDTH 5
++ #define EV_DEBUG_ADDR_LBN 16
++ #define EV_DEBUG_ADDR_WIDTH 5
++ #define RX_DEBUG_ADDR_LBN 11
++ #define RX_DEBUG_ADDR_WIDTH 5
++ #define TX_DEBUG_ADDR_LBN 6
++ #define TX_DEBUG_ADDR_WIDTH 5
++ #define BIU_DEBUG_ADDR_LBN 1
++ #define BIU_DEBUG_ADDR_WIDTH 5
++ #define DEBUG_EN_LBN 0
++ #define DEBUG_EN_WIDTH 1
++#define DRIVER_REG0_KER_OFST 0x280 // Driver scratch register 0
++#define DRIVER_REG0_OFST 0x280 // Driver scratch register 0
++ #define DRIVER_DW0_LBN 0
++ #define DRIVER_DW0_WIDTH 32
++#define DRIVER_REG1_KER_OFST 0x290 // Driver scratch register 1
++#define DRIVER_REG1_OFST 0x290 // Driver scratch register 1
++ #define DRIVER_DW1_LBN 0
++ #define DRIVER_DW1_WIDTH 32
++#define DRIVER_REG2_KER_OFST 0x2A0 // Driver scratch register 2
++#define DRIVER_REG2_OFST 0x2A0 // Driver scratch register 2
++ #define DRIVER_DW2_LBN 0
++ #define DRIVER_DW2_WIDTH 32
++#define DRIVER_REG3_KER_OFST 0x2B0 // Driver scratch register 3
++#define DRIVER_REG3_OFST 0x2B0 // Driver scratch register 3
++ #define DRIVER_DW3_LBN 0
++ #define DRIVER_DW3_WIDTH 32
++#define DRIVER_REG4_KER_OFST 0x2C0 // Driver scratch register 4
++#define DRIVER_REG4_OFST 0x2C0 // Driver scratch register 4
++ #define DRIVER_DW4_LBN 0
++ #define DRIVER_DW4_WIDTH 32
++#define DRIVER_REG5_KER_OFST 0x2D0 // Driver scratch register 5
++#define DRIVER_REG5_OFST 0x2D0 // Driver scratch register 5
++ #define DRIVER_DW5_LBN 0
++ #define DRIVER_DW5_WIDTH 32
++#define DRIVER_REG6_KER_OFST 0x2E0 // Driver scratch register 6
++#define DRIVER_REG6_OFST 0x2E0 // Driver scratch register 6
++ #define DRIVER_DW6_LBN 0
++ #define DRIVER_DW6_WIDTH 32
++#define DRIVER_REG7_KER_OFST 0x2F0 // Driver scratch register 7
++#define DRIVER_REG7_OFST 0x2F0 // Driver scratch register 7
++ #define DRIVER_DW7_LBN 0
++ #define DRIVER_DW7_WIDTH 32
++#define ALTERA_BUILD_REG_OFST 0x300 // Altera build register
++#define ALTERA_BUILD_REG_OFST 0x300 // Altera build register
++ #define ALTERA_BUILD_VER_LBN 0
++ #define ALTERA_BUILD_VER_WIDTH 32
++
++/* so called CSR spare register
++ - contains separate parity enable bits for the various internal memory blocks */
++#define MEM_PARITY_ERR_EN_REG_KER 0x310
++#define MEM_PARITY_ALL_BLOCKS_EN_LBN 64
++#define MEM_PARITY_ALL_BLOCKS_EN_WIDTH 38
++#define MEM_PARITY_TX_DATA_EN_LBN 72
++#define MEM_PARITY_TX_DATA_EN_WIDTH 2
++
++//////////////---- Event & Timer Module Registers C Header ----//////////////
++
++#if EFVI_FALCON_EXTENDED_P_BAR
++#define EVQ_RPTR_REG_KER_OFST 0x11B00 // Event queue read pointer register
++#else
++#define EVQ_RPTR_REG_KER_OFST 0x1B00 // Event queue read pointer register
++#endif
++
++#define EVQ_RPTR_REG_OFST 0xFA0000 // Event queue read pointer register array.
++ #define EVQ_RPTR_LBN 0
++ #define EVQ_RPTR_WIDTH 15
++
++#if EFVI_FALCON_EXTENDED_P_BAR
++#define EVQ_PTR_TBL_KER_OFST 0x11A00 // Event queue pointer table for kernel access
++#else
++#define EVQ_PTR_TBL_KER_OFST 0x1A00 // Event queue pointer table for kernel access
++#endif
++
++#define EVQ_PTR_TBL_CHAR_OFST 0xF60000 // Event queue pointer table for char direct access
++ #define EVQ_WKUP_OR_INT_EN_LBN 39
++ #define EVQ_WKUP_OR_INT_EN_WIDTH 1
++ #define EVQ_NXT_WPTR_LBN 24
++ #define EVQ_NXT_WPTR_WIDTH 15
++ #define EVQ_EN_LBN 23
++ #define EVQ_EN_WIDTH 1
++ #define EVQ_SIZE_LBN 20
++ #define EVQ_SIZE_WIDTH 3
++ #define EVQ_BUF_BASE_ID_LBN 0
++ #define EVQ_BUF_BASE_ID_WIDTH 20
++#define TIMER_CMD_REG_KER_OFST 0x420 // Timer table for kernel access. Page-mapped
++#define TIMER_CMD_REG_PAGE4_OFST 0x8420 // Timer table for user-level access. Page-mapped. For lowest 1K queues.
++#define TIMER_CMD_REG_PAGE123K_OFST 0x1000420 // Timer table for user-level access. Page-mapped. For upper 3K queues.
++#define TIMER_TBL_OFST 0xF70000 // Timer table for char driver direct access
++ #define TIMER_MODE_LBN 12
++ #define TIMER_MODE_WIDTH 2
++ #define TIMER_VAL_LBN 0
++ #define TIMER_VAL_WIDTH 12
++ #define TIMER_MODE_INT_HLDOFF 2
++ #define EVQ_BUF_SIZE_LBN 0
++ #define EVQ_BUF_SIZE_WIDTH 1
++#define DRV_EV_REG_KER_OFST 0x440 // Driver generated event register
++#define DRV_EV_REG_OFST 0x440 // Driver generated event register
++ #define DRV_EV_QID_LBN 64
++ #define DRV_EV_QID_WIDTH 12
++ #define DRV_EV_DATA_LBN 0
++ #define DRV_EV_DATA_WIDTH 64
++#define EVQ_CTL_REG_KER_OFST 0x450 // Event queue control register
++#define EVQ_CTL_REG_OFST 0x450 // Event queue control register
++ #define RX_EVQ_WAKEUP_MASK_B0_LBN 15
++ #define RX_EVQ_WAKEUP_MASK_B0_WIDTH 6
++ #define EVQ_OWNERR_CTL_LBN 14
++ #define EVQ_OWNERR_CTL_WIDTH 1
++ #define EVQ_FIFO_AF_TH_LBN 8
++ #define EVQ_FIFO_AF_TH_WIDTH 6
++ #define EVQ_FIFO_NOTAF_TH_LBN 0
++ #define EVQ_FIFO_NOTAF_TH_WIDTH 6
++//////////////---- SRAM Module Registers C Header ----//////////////
++#define BUF_TBL_CFG_REG_KER_OFST 0x600 // Buffer table configuration register
++#define BUF_TBL_CFG_REG_OFST 0x600 // Buffer table configuration register
++ #define BUF_TBL_MODE_LBN 3
++ #define BUF_TBL_MODE_WIDTH 1
++#define SRM_RX_DC_CFG_REG_KER_OFST 0x610 // SRAM receive descriptor cache configuration register
++#define SRM_RX_DC_CFG_REG_OFST 0x610 // SRAM receive descriptor cache configuration register
++ #define SRM_RX_DC_BASE_ADR_LBN 0
++ #define SRM_RX_DC_BASE_ADR_WIDTH 21
++#define SRM_TX_DC_CFG_REG_KER_OFST 0x620 // SRAM transmit descriptor cache configuration register
++#define SRM_TX_DC_CFG_REG_OFST 0x620 // SRAM transmit descriptor cache configuration register
++ #define SRM_TX_DC_BASE_ADR_LBN 0
++ #define SRM_TX_DC_BASE_ADR_WIDTH 21
++#define SRM_CFG_REG_KER_OFST 0x630 // SRAM configuration register
++#define SRM_CFG_REG_OFST 0x630 // SRAM configuration register
++ #define SRAM_OOB_ADR_INTEN_LBN 5
++ #define SRAM_OOB_ADR_INTEN_WIDTH 1
++ #define SRAM_OOB_BUF_INTEN_LBN 4
++ #define SRAM_OOB_BUF_INTEN_WIDTH 1
++ #define SRAM_BT_INIT_EN_LBN 3
++ #define SRAM_BT_INIT_EN_WIDTH 1
++ #define SRM_NUM_BANK_LBN 2
++ #define SRM_NUM_BANK_WIDTH 1
++ #define SRM_BANK_SIZE_LBN 0
++ #define SRM_BANK_SIZE_WIDTH 2
++#define BUF_TBL_UPD_REG_KER_OFST 0x650 // Buffer table update register
++#define BUF_TBL_UPD_REG_OFST 0x650 // Buffer table update register
++ #define BUF_UPD_CMD_LBN 63
++ #define BUF_UPD_CMD_WIDTH 1
++ #define BUF_CLR_CMD_LBN 62
++ #define BUF_CLR_CMD_WIDTH 1
++ #define BUF_CLR_END_ID_LBN 32
++ #define BUF_CLR_END_ID_WIDTH 20
++ #define BUF_CLR_START_ID_LBN 0
++ #define BUF_CLR_START_ID_WIDTH 20
++#define SRM_UPD_EVQ_REG_KER_OFST 0x660 // Buffer table update register
++#define SRM_UPD_EVQ_REG_OFST 0x660 // Buffer table update register
++ #define SRM_UPD_EVQ_ID_LBN 0
++ #define SRM_UPD_EVQ_ID_WIDTH 12
++#define SRAM_PARITY_REG_KER_OFST 0x670 // SRAM parity register.
++#define SRAM_PARITY_REG_OFST 0x670 // SRAM parity register.
++ #define FORCE_SRAM_PERR_LBN 0
++ #define FORCE_SRAM_PERR_WIDTH 1
++
++#if EFVI_FALCON_EXTENDED_P_BAR
++#define BUF_HALF_TBL_KER_OFST 0x18000 // Buffer table in half buffer table mode direct access by kernel driver
++#else
++#define BUF_HALF_TBL_KER_OFST 0x8000 // Buffer table in half buffer table mode direct access by kernel driver
++#endif
++
++
++#define BUF_HALF_TBL_OFST 0x800000 // Buffer table in half buffer table mode direct access by char driver
++ #define BUF_ADR_HBUF_ODD_LBN 44
++ #define BUF_ADR_HBUF_ODD_WIDTH 20
++ #define BUF_OWNER_ID_HBUF_ODD_LBN 32
++ #define BUF_OWNER_ID_HBUF_ODD_WIDTH 12
++ #define BUF_ADR_HBUF_EVEN_LBN 12
++ #define BUF_ADR_HBUF_EVEN_WIDTH 20
++ #define BUF_OWNER_ID_HBUF_EVEN_LBN 0
++ #define BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
++
++
++#if EFVI_FALCON_EXTENDED_P_BAR
++#define BUF_FULL_TBL_KER_OFST 0x18000 // Buffer table in full buffer table mode direct access by kernel driver
++#else
++#define BUF_FULL_TBL_KER_OFST 0x8000 // Buffer table in full buffer table mode direct access by kernel driver
++#endif
++
++
++
++
++#define BUF_FULL_TBL_OFST 0x800000 // Buffer table in full buffer table mode direct access by char driver
++ #define IP_DAT_BUF_SIZE_LBN 50
++ #define IP_DAT_BUF_SIZE_WIDTH 1
++ #define BUF_ADR_REGION_LBN 48
++ #define BUF_ADR_REGION_WIDTH 2
++ #define BUF_ADR_FBUF_LBN 14
++ #define BUF_ADR_FBUF_WIDTH 34
++ #define BUF_OWNER_ID_FBUF_LBN 0
++ #define BUF_OWNER_ID_FBUF_WIDTH 14
++#define SRM_DBG_REG_OFST 0x3000000 // SRAM debug access
++ #define SRM_DBG_LBN 0
++ #define SRM_DBG_WIDTH 64
++//////////////---- RX Datapath Registers C Header ----//////////////
++
++#define RX_CFG_REG_KER_OFST 0x800 // Receive configuration register
++#define RX_CFG_REG_OFST 0x800 // Receive configuration register
++
++#if !defined(FALCON_64K_RXFIFO) && !defined(FALCON_PRE_02020029)
++# if !defined(FALCON_128K_RXFIFO)
++# define FALCON_128K_RXFIFO
++# endif
++#endif
++
++#if defined(FALCON_128K_RXFIFO)
++
++/* new for B0 */
++ #define RX_TOEP_TCP_SUPPRESS_B0_LBN 48
++ #define RX_TOEP_TCP_SUPPRESS_B0_WIDTH 1
++ #define RX_INGR_EN_B0_LBN 47
++ #define RX_INGR_EN_B0_WIDTH 1
++ #define RX_TOEP_IPV4_B0_LBN 46
++ #define RX_TOEP_IPV4_B0_WIDTH 1
++ #define RX_HASH_ALG_B0_LBN 45
++ #define RX_HASH_ALG_B0_WIDTH 1
++ #define RX_HASH_INSERT_HDR_B0_LBN 44
++ #define RX_HASH_INSERT_HDR_B0_WIDTH 1
++/* moved for B0 */
++ #define RX_DESC_PUSH_EN_B0_LBN 43
++ #define RX_DESC_PUSH_EN_B0_WIDTH 1
++ #define RX_RDW_PATCH_EN_LBN 42 /* Non head of line blocking */
++ #define RX_RDW_PATCH_EN_WIDTH 1
++ #define RX_PCI_BURST_SIZE_B0_LBN 39
++ #define RX_PCI_BURST_SIZE_B0_WIDTH 3
++ #define RX_OWNERR_CTL_B0_LBN 38
++ #define RX_OWNERR_CTL_B0_WIDTH 1
++ #define RX_XON_TX_TH_B0_LBN 33
++ #define RX_XON_TX_TH_B0_WIDTH 5
++ #define RX_XOFF_TX_TH_B0_LBN 28
++ #define RX_XOFF_TX_TH_B0_WIDTH 5
++ #define RX_USR_BUF_SIZE_B0_LBN 19
++ #define RX_USR_BUF_SIZE_B0_WIDTH 9
++ #define RX_XON_MAC_TH_B0_LBN 10
++ #define RX_XON_MAC_TH_B0_WIDTH 9
++ #define RX_XOFF_MAC_TH_B0_LBN 1
++ #define RX_XOFF_MAC_TH_B0_WIDTH 9
++ #define RX_XOFF_MAC_EN_B0_LBN 0
++ #define RX_XOFF_MAC_EN_B0_WIDTH 1
++
++#elif !defined(FALCON_PRE_02020029)
++/* new for B0 */
++ #define RX_TOEP_TCP_SUPPRESS_B0_LBN 46
++ #define RX_TOEP_TCP_SUPPRESS_B0_WIDTH 1
++ #define RX_INGR_EN_B0_LBN 45
++ #define RX_INGR_EN_B0_WIDTH 1
++ #define RX_TOEP_IPV4_B0_LBN 44
++ #define RX_TOEP_IPV4_B0_WIDTH 1
++ #define RX_HASH_ALG_B0_LBN 43
++ #define RX_HASH_ALG_B0_WIDTH 41
++ #define RX_HASH_INSERT_HDR_B0_LBN 42
++ #define RX_HASH_INSERT_HDR_B0_WIDTH 1
++/* moved for B0 */
++ #define RX_DESC_PUSH_EN_B0_LBN 41
++ #define RX_DESC_PUSH_EN_B0_WIDTH 1
++ #define RX_PCI_BURST_SIZE_B0_LBN 37
++ #define RX_PCI_BURST_SIZE_B0_WIDTH 3
++ #define RX_OWNERR_CTL_B0_LBN 36
++ #define RX_OWNERR_CTL_B0_WIDTH 1
++ #define RX_XON_TX_TH_B0_LBN 31
++ #define RX_XON_TX_TH_B0_WIDTH 5
++ #define RX_XOFF_TX_TH_B0_LBN 26
++ #define RX_XOFF_TX_TH_B0_WIDTH 5
++ #define RX_USR_BUF_SIZE_B0_LBN 17
++ #define RX_USR_BUF_SIZE_B0_WIDTH 9
++ #define RX_XON_MAC_TH_B0_LBN 9
++ #define RX_XON_MAC_TH_B0_WIDTH 8
++ #define RX_XOFF_MAC_TH_B0_LBN 1
++ #define RX_XOFF_MAC_TH_B0_WIDTH 8
++ #define RX_XOFF_MAC_EN_B0_LBN 0
++ #define RX_XOFF_MAC_EN_B0_WIDTH 1
++
++#else
++/* new for B0 */
++ #define RX_TOEP_TCP_SUPPRESS_B0_LBN 44
++ #define RX_TOEP_TCP_SUPPRESS_B0_WIDTH 1
++ #define RX_INGR_EN_B0_LBN 43
++ #define RX_INGR_EN_B0_WIDTH 1
++ #define RX_TOEP_IPV4_B0_LBN 42
++ #define RX_TOEP_IPV4_B0_WIDTH 1
++ #define RX_HASH_ALG_B0_LBN 41
++ #define RX_HASH_ALG_B0_WIDTH 41
++ #define RX_HASH_INSERT_HDR_B0_LBN 40
++ #define RX_HASH_INSERT_HDR_B0_WIDTH 1
++/* moved for B0 */
++ #define RX_DESC_PUSH_EN_B0_LBN 35
++ #define RX_DESC_PUSH_EN_B0_WIDTH 1
++ #define RX_PCI_BURST_SIZE_B0_LBN 35
++ #define RX_PCI_BURST_SIZE_B0_WIDTH 2
++ #define RX_OWNERR_CTL_B0_LBN 34
++ #define RX_OWNERR_CTL_B0_WIDTH 1
++ #define RX_XON_TX_TH_B0_LBN 29
++ #define RX_XON_TX_TH_B0_WIDTH 5
++ #define RX_XOFF_TX_TH_B0_LBN 24
++ #define RX_XOFF_TX_TH_B0_WIDTH 5
++ #define RX_USR_BUF_SIZE_B0_LBN 15
++ #define RX_USR_BUF_SIZE_B0_WIDTH 9
++ #define RX_XON_MAC_TH_B0_LBN 8
++ #define RX_XON_MAC_TH_B0_WIDTH 7
++ #define RX_XOFF_MAC_TH_B0_LBN 1
++ #define RX_XOFF_MAC_TH_B0_WIDTH 7
++ #define RX_XOFF_MAC_EN_B0_LBN 0
++ #define RX_XOFF_MAC_EN_B0_WIDTH 1
++
++#endif
++
++/* A0/A1 */
++ #define RX_PUSH_EN_A1_LBN 35
++ #define RX_PUSH_EN_A1_WIDTH 1
++ #define RX_PCI_BURST_SIZE_A1_LBN 31
++ #define RX_PCI_BURST_SIZE_A1_WIDTH 3
++ #define RX_OWNERR_CTL_A1_LBN 30
++ #define RX_OWNERR_CTL_A1_WIDTH 1
++ #define RX_XON_TX_TH_A1_LBN 25
++ #define RX_XON_TX_TH_A1_WIDTH 5
++ #define RX_XOFF_TX_TH_A1_LBN 20
++ #define RX_XOFF_TX_TH_A1_WIDTH 5
++ #define RX_USR_BUF_SIZE_A1_LBN 11
++ #define RX_USR_BUF_SIZE_A1_WIDTH 9
++ #define RX_XON_MAC_TH_A1_LBN 6
++ #define RX_XON_MAC_TH_A1_WIDTH 5
++ #define RX_XOFF_MAC_TH_A1_LBN 1
++ #define RX_XOFF_MAC_TH_A1_WIDTH 5
++ #define RX_XOFF_MAC_EN_A1_LBN 0
++ #define RX_XOFF_MAC_EN_A1_WIDTH 1
++
++#define RX_FILTER_CTL_REG_OFST 0x810 // Receive filter control registers
++ #define SCATTER_ENBL_NO_MATCH_Q_B0_LBN 40
++ #define SCATTER_ENBL_NO_MATCH_Q_B0_WIDTH 1
++ #define UDP_FULL_SRCH_LIMIT_LBN 32
++ #define UDP_FULL_SRCH_LIMIT_WIDTH 8
++ #define NUM_KER_LBN 24
++ #define NUM_KER_WIDTH 2
++ #define UDP_WILD_SRCH_LIMIT_LBN 16
++ #define UDP_WILD_SRCH_LIMIT_WIDTH 8
++ #define TCP_WILD_SRCH_LIMIT_LBN 8
++ #define TCP_WILD_SRCH_LIMIT_WIDTH 8
++ #define TCP_FULL_SRCH_LIMIT_LBN 0
++ #define TCP_FULL_SRCH_LIMIT_WIDTH 8
++#define RX_FLUSH_DESCQ_REG_KER_OFST 0x820 // Receive flush descriptor queue register
++#define RX_FLUSH_DESCQ_REG_OFST 0x820 // Receive flush descriptor queue register
++ #define RX_FLUSH_DESCQ_CMD_LBN 24
++ #define RX_FLUSH_DESCQ_CMD_WIDTH 1
++ #define RX_FLUSH_EVQ_ID_LBN 12
++ #define RX_FLUSH_EVQ_ID_WIDTH 12
++ #define RX_FLUSH_DESCQ_LBN 0
++ #define RX_FLUSH_DESCQ_WIDTH 12
++#define RX_DESC_UPD_REG_KER_OFST 0x830 // Kernel receive descriptor update register. Page-mapped
++#define RX_DESC_UPD_REG_PAGE4_OFST 0x8830 // Char & user receive descriptor update register. Page-mapped. For lowest 1K queues.
++#define RX_DESC_UPD_REG_PAGE123K_OFST 0x1000830 // Char & user receive descriptor update register. Page-mapped. For upper 3K queues.
++ #define RX_DESC_WPTR_LBN 96
++ #define RX_DESC_WPTR_WIDTH 12
++ #define RX_DESC_PUSH_CMD_LBN 95
++ #define RX_DESC_PUSH_CMD_WIDTH 1
++ #define RX_DESC_LBN 0
++ #define RX_DESC_WIDTH 64
++ #define RX_KER_DESC_LBN 0
++ #define RX_KER_DESC_WIDTH 64
++ #define RX_USR_DESC_LBN 0
++ #define RX_USR_DESC_WIDTH 32
++#define RX_DC_CFG_REG_KER_OFST 0x840 // Receive descriptor cache configuration register
++#define RX_DC_CFG_REG_OFST 0x840 // Receive descriptor cache configuration register
++ #define RX_DC_SIZE_LBN 0
++ #define RX_DC_SIZE_WIDTH 2
++#define RX_DC_PF_WM_REG_KER_OFST 0x850 // Receive descriptor cache pre-fetch watermark register
++#define RX_DC_PF_WM_REG_OFST 0x850 // Receive descriptor cache pre-fetch watermark register
++ #define RX_DC_PF_LWM_LO_LBN 0
++ #define RX_DC_PF_LWM_LO_WIDTH 6
++
++#define RX_RSS_TKEY_B0_OFST 0x860 // RSS Toeplitz hash key (B0 only)
++
++#define RX_NODESC_DROP_REG 0x880
++ #define RX_NODESC_DROP_CNT_LBN 0
++ #define RX_NODESC_DROP_CNT_WIDTH 16
++
++#define XM_TX_CFG_REG_OFST 0x1230
++ #define XM_AUTO_PAD_LBN 5
++ #define XM_AUTO_PAD_WIDTH 1
++
++#define RX_FILTER_TBL0_OFST 0xF00000 // Receive filter table - even entries
++ #define RSS_EN_0_B0_LBN 110
++ #define RSS_EN_0_B0_WIDTH 1
++ #define SCATTER_EN_0_B0_LBN 109
++ #define SCATTER_EN_0_B0_WIDTH 1
++ #define TCP_UDP_0_LBN 108
++ #define TCP_UDP_0_WIDTH 1
++ #define RXQ_ID_0_LBN 96
++ #define RXQ_ID_0_WIDTH 12
++ #define DEST_IP_0_LBN 64
++ #define DEST_IP_0_WIDTH 32
++ #define DEST_PORT_TCP_0_LBN 48
++ #define DEST_PORT_TCP_0_WIDTH 16
++ #define SRC_IP_0_LBN 16
++ #define SRC_IP_0_WIDTH 32
++ #define SRC_TCP_DEST_UDP_0_LBN 0
++ #define SRC_TCP_DEST_UDP_0_WIDTH 16
++#define RX_FILTER_TBL1_OFST 0xF00010 // Receive filter table - odd entries
++ #define RSS_EN_1_B0_LBN 110
++ #define RSS_EN_1_B0_WIDTH 1
++ #define SCATTER_EN_1_B0_LBN 109
++ #define SCATTER_EN_1_B0_WIDTH 1
++ #define TCP_UDP_1_LBN 108
++ #define TCP_UDP_1_WIDTH 1
++ #define RXQ_ID_1_LBN 96
++ #define RXQ_ID_1_WIDTH 12
++ #define DEST_IP_1_LBN 64
++ #define DEST_IP_1_WIDTH 32
++ #define DEST_PORT_TCP_1_LBN 48
++ #define DEST_PORT_TCP_1_WIDTH 16
++ #define SRC_IP_1_LBN 16
++ #define SRC_IP_1_WIDTH 32
++ #define SRC_TCP_DEST_UDP_1_LBN 0
++ #define SRC_TCP_DEST_UDP_1_WIDTH 16
++
++#if EFVI_FALCON_EXTENDED_P_BAR
++#define RX_DESC_PTR_TBL_KER_OFST 0x11800 // Receive descriptor pointer kernel access
++#else
++#define RX_DESC_PTR_TBL_KER_OFST 0x1800 // Receive descriptor pointer kernel access
++#endif
++
++
++#define RX_DESC_PTR_TBL_OFST 0xF40000 // Receive descriptor pointer table
++ #define RX_ISCSI_DDIG_EN_LBN 88
++ #define RX_ISCSI_DDIG_EN_WIDTH 1
++ #define RX_ISCSI_HDIG_EN_LBN 87
++ #define RX_ISCSI_HDIG_EN_WIDTH 1
++ #define RX_DESC_PREF_ACT_LBN 86
++ #define RX_DESC_PREF_ACT_WIDTH 1
++ #define RX_DC_HW_RPTR_LBN 80
++ #define RX_DC_HW_RPTR_WIDTH 6
++ #define RX_DESCQ_HW_RPTR_LBN 68
++ #define RX_DESCQ_HW_RPTR_WIDTH 12
++ #define RX_DESCQ_SW_WPTR_LBN 56
++ #define RX_DESCQ_SW_WPTR_WIDTH 12
++ #define RX_DESCQ_BUF_BASE_ID_LBN 36
++ #define RX_DESCQ_BUF_BASE_ID_WIDTH 20
++ #define RX_DESCQ_EVQ_ID_LBN 24
++ #define RX_DESCQ_EVQ_ID_WIDTH 12
++ #define RX_DESCQ_OWNER_ID_LBN 10
++ #define RX_DESCQ_OWNER_ID_WIDTH 14
++ #define RX_DESCQ_LABEL_LBN 5
++ #define RX_DESCQ_LABEL_WIDTH 5
++ #define RX_DESCQ_SIZE_LBN 3
++ #define RX_DESCQ_SIZE_WIDTH 2
++ #define RX_DESCQ_TYPE_LBN 2
++ #define RX_DESCQ_TYPE_WIDTH 1
++ #define RX_DESCQ_JUMBO_LBN 1
++ #define RX_DESCQ_JUMBO_WIDTH 1
++ #define RX_DESCQ_EN_LBN 0
++ #define RX_DESCQ_EN_WIDTH 1
++
++
++#define RX_RSS_INDIR_TBL_B0_OFST 0xFB0000 // RSS indirection table (B0 only)
++ #define RX_RSS_INDIR_ENT_B0_LBN 0
++ #define RX_RSS_INDIR_ENT_B0_WIDTH 6
++
++//////////////---- TX Datapath Registers C Header ----//////////////
++#define TX_FLUSH_DESCQ_REG_KER_OFST 0xA00 // Transmit flush descriptor queue register
++#define TX_FLUSH_DESCQ_REG_OFST 0xA00 // Transmit flush descriptor queue register
++ #define TX_FLUSH_DESCQ_CMD_LBN 12
++ #define TX_FLUSH_DESCQ_CMD_WIDTH 1
++ #define TX_FLUSH_DESCQ_LBN 0
++ #define TX_FLUSH_DESCQ_WIDTH 12
++#define TX_DESC_UPD_REG_KER_OFST 0xA10 // Kernel transmit descriptor update register. Page-mapped
++#define TX_DESC_UPD_REG_PAGE4_OFST 0x8A10 // Char & user transmit descriptor update register. Page-mapped
++#define TX_DESC_UPD_REG_PAGE123K_OFST 0x1000A10 // Char & user transmit descriptor update register. Page-mapped
++ #define TX_DESC_WPTR_LBN 96
++ #define TX_DESC_WPTR_WIDTH 12
++ #define TX_DESC_PUSH_CMD_LBN 95
++ #define TX_DESC_PUSH_CMD_WIDTH 1
++ #define TX_DESC_LBN 0
++ #define TX_DESC_WIDTH 95
++ #define TX_KER_DESC_LBN 0
++ #define TX_KER_DESC_WIDTH 64
++ #define TX_USR_DESC_LBN 0
++ #define TX_USR_DESC_WIDTH 64
++#define TX_DC_CFG_REG_KER_OFST 0xA20 // Transmit descriptor cache configuration register
++#define TX_DC_CFG_REG_OFST 0xA20 // Transmit descriptor cache configuration register
++ #define TX_DC_SIZE_LBN 0
++ #define TX_DC_SIZE_WIDTH 2
++
++#if EFVI_FALCON_EXTENDED_P_BAR
++#define TX_DESC_PTR_TBL_KER_OFST 0x11900 // Transmit descriptor pointer.
++#else
++#define TX_DESC_PTR_TBL_KER_OFST 0x1900 // Transmit descriptor pointer.
++#endif
++
++
++#define TX_DESC_PTR_TBL_OFST 0xF50000 // Transmit descriptor pointer
++ #define TX_NON_IP_DROP_DIS_B0_LBN 91
++ #define TX_NON_IP_DROP_DIS_B0_WIDTH 1
++ #define TX_IP_CHKSM_DIS_B0_LBN 90
++ #define TX_IP_CHKSM_DIS_B0_WIDTH 1
++ #define TX_TCP_CHKSM_DIS_B0_LBN 89
++ #define TX_TCP_CHKSM_DIS_B0_WIDTH 1
++ #define TX_DESCQ_EN_LBN 88
++ #define TX_DESCQ_EN_WIDTH 1
++ #define TX_ISCSI_DDIG_EN_LBN 87
++ #define TX_ISCSI_DDIG_EN_WIDTH 1
++ #define TX_ISCSI_HDIG_EN_LBN 86
++ #define TX_ISCSI_HDIG_EN_WIDTH 1
++ #define TX_DC_HW_RPTR_LBN 80
++ #define TX_DC_HW_RPTR_WIDTH 6
++ #define TX_DESCQ_HW_RPTR_LBN 68
++ #define TX_DESCQ_HW_RPTR_WIDTH 12
++ #define TX_DESCQ_SW_WPTR_LBN 56
++ #define TX_DESCQ_SW_WPTR_WIDTH 12
++ #define TX_DESCQ_BUF_BASE_ID_LBN 36
++ #define TX_DESCQ_BUF_BASE_ID_WIDTH 20
++ #define TX_DESCQ_EVQ_ID_LBN 24
++ #define TX_DESCQ_EVQ_ID_WIDTH 12
++ #define TX_DESCQ_OWNER_ID_LBN 10
++ #define TX_DESCQ_OWNER_ID_WIDTH 14
++ #define TX_DESCQ_LABEL_LBN 5
++ #define TX_DESCQ_LABEL_WIDTH 5
++ #define TX_DESCQ_SIZE_LBN 3
++ #define TX_DESCQ_SIZE_WIDTH 2
++ #define TX_DESCQ_TYPE_LBN 1
++ #define TX_DESCQ_TYPE_WIDTH 2
++ #define TX_DESCQ_FLUSH_LBN 0
++ #define TX_DESCQ_FLUSH_WIDTH 1
++#define TX_CFG_REG_KER_OFST 0xA50 // Transmit configuration register
++#define TX_CFG_REG_OFST 0xA50 // Transmit configuration register
++ #define TX_IP_ID_P1_OFS_LBN 32
++ #define TX_IP_ID_P1_OFS_WIDTH 15
++ #define TX_IP_ID_P0_OFS_LBN 16
++ #define TX_IP_ID_P0_OFS_WIDTH 15
++ #define TX_TURBO_EN_LBN 3
++ #define TX_TURBO_EN_WIDTH 1
++ #define TX_OWNERR_CTL_LBN 2
++ #define TX_OWNERR_CTL_WIDTH 2
++ #define TX_NON_IP_DROP_DIS_LBN 1
++ #define TX_NON_IP_DROP_DIS_WIDTH 1
++ #define TX_IP_ID_REP_EN_LBN 0
++ #define TX_IP_ID_REP_EN_WIDTH 1
++#define TX_RESERVED_REG_KER_OFST 0xA80 // Transmit configuration register
++#define TX_RESERVED_REG_OFST 0xA80 // Transmit configuration register
++ #define TX_CSR_PUSH_EN_LBN 89
++ #define TX_CSR_PUSH_EN_WIDTH 1
++ #define TX_RX_SPACER_LBN 64
++ #define TX_RX_SPACER_WIDTH 8
++ #define TX_SW_EV_EN_LBN 59
++ #define TX_SW_EV_EN_WIDTH 1
++ #define TX_RX_SPACER_EN_LBN 57
++ #define TX_RX_SPACER_EN_WIDTH 1
++ #define TX_CSR_PREF_WD_TMR_LBN 24
++ #define TX_CSR_PREF_WD_TMR_WIDTH 16
++ #define TX_CSR_ONLY1TAG_LBN 21
++ #define TX_CSR_ONLY1TAG_WIDTH 1
++ #define TX_PREF_THRESHOLD_LBN 19
++ #define TX_PREF_THRESHOLD_WIDTH 2
++ #define TX_ONE_PKT_PER_Q_LBN 18
++ #define TX_ONE_PKT_PER_Q_WIDTH 1
++ #define TX_DIS_NON_IP_EV_LBN 17
++ #define TX_DIS_NON_IP_EV_WIDTH 1
++ #define TX_DMA_SPACER_LBN 8
++ #define TX_DMA_SPACER_WIDTH 8
++ #define TX_FLUSH_MIN_LEN_EN_B0_LBN 7
++ #define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1
++ #define TX_TCP_DIS_A1_LBN 7
++ #define TX_TCP_DIS_A1_WIDTH 1
++ #define TX_IP_DIS_A1_LBN 6
++ #define TX_IP_DIS_A1_WIDTH 1
++ #define TX_MAX_CPL_LBN 2
++ #define TX_MAX_CPL_WIDTH 2
++ #define TX_MAX_PREF_LBN 0
++ #define TX_MAX_PREF_WIDTH 2
++#define TX_VLAN_REG_OFST 0xAE0 // Transmit VLAN tag register
++ #define TX_VLAN_EN_LBN 127
++ #define TX_VLAN_EN_WIDTH 1
++ #define TX_VLAN7_PORT1_EN_LBN 125
++ #define TX_VLAN7_PORT1_EN_WIDTH 1
++ #define TX_VLAN7_PORT0_EN_LBN 124
++ #define TX_VLAN7_PORT0_EN_WIDTH 1
++ #define TX_VLAN7_LBN 112
++ #define TX_VLAN7_WIDTH 12
++ #define TX_VLAN6_PORT1_EN_LBN 109
++ #define TX_VLAN6_PORT1_EN_WIDTH 1
++ #define TX_VLAN6_PORT0_EN_LBN 108
++ #define TX_VLAN6_PORT0_EN_WIDTH 1
++ #define TX_VLAN6_LBN 96
++ #define TX_VLAN6_WIDTH 12
++ #define TX_VLAN5_PORT1_EN_LBN 93
++ #define TX_VLAN5_PORT1_EN_WIDTH 1
++ #define TX_VLAN5_PORT0_EN_LBN 92
++ #define TX_VLAN5_PORT0_EN_WIDTH 1
++ #define TX_VLAN5_LBN 80
++ #define TX_VLAN5_WIDTH 12
++ #define TX_VLAN4_PORT1_EN_LBN 77
++ #define TX_VLAN4_PORT1_EN_WIDTH 1
++ #define TX_VLAN4_PORT0_EN_LBN 76
++ #define TX_VLAN4_PORT0_EN_WIDTH 1
++ #define TX_VLAN4_LBN 64
++ #define TX_VLAN4_WIDTH 12
++ #define TX_VLAN3_PORT1_EN_LBN 61
++ #define TX_VLAN3_PORT1_EN_WIDTH 1
++ #define TX_VLAN3_PORT0_EN_LBN 60
++ #define TX_VLAN3_PORT0_EN_WIDTH 1
++ #define TX_VLAN3_LBN 48
++ #define TX_VLAN3_WIDTH 12
++ #define TX_VLAN2_PORT1_EN_LBN 45
++ #define TX_VLAN2_PORT1_EN_WIDTH 1
++ #define TX_VLAN2_PORT0_EN_LBN 44
++ #define TX_VLAN2_PORT0_EN_WIDTH 1
++ #define TX_VLAN2_LBN 32
++ #define TX_VLAN2_WIDTH 12
++ #define TX_VLAN1_PORT1_EN_LBN 29
++ #define TX_VLAN1_PORT1_EN_WIDTH 1
++ #define TX_VLAN1_PORT0_EN_LBN 28
++ #define TX_VLAN1_PORT0_EN_WIDTH 1
++ #define TX_VLAN1_LBN 16
++ #define TX_VLAN1_WIDTH 12
++ #define TX_VLAN0_PORT1_EN_LBN 13
++ #define TX_VLAN0_PORT1_EN_WIDTH 1
++ #define TX_VLAN0_PORT0_EN_LBN 12
++ #define TX_VLAN0_PORT0_EN_WIDTH 1
++ #define TX_VLAN0_LBN 0
++ #define TX_VLAN0_WIDTH 12
++#define TX_FIL_CTL_REG_OFST 0xAF0 // Transmit filter control register
++ #define TX_MADR1_FIL_EN_LBN 65
++ #define TX_MADR1_FIL_EN_WIDTH 1
++ #define TX_MADR0_FIL_EN_LBN 64
++ #define TX_MADR0_FIL_EN_WIDTH 1
++ #define TX_IPFIL31_PORT1_EN_LBN 63
++ #define TX_IPFIL31_PORT1_EN_WIDTH 1
++ #define TX_IPFIL31_PORT0_EN_LBN 62
++ #define TX_IPFIL31_PORT0_EN_WIDTH 1
++ #define TX_IPFIL30_PORT1_EN_LBN 61
++ #define TX_IPFIL30_PORT1_EN_WIDTH 1
++ #define TX_IPFIL30_PORT0_EN_LBN 60
++ #define TX_IPFIL30_PORT0_EN_WIDTH 1
++ #define TX_IPFIL29_PORT1_EN_LBN 59
++ #define TX_IPFIL29_PORT1_EN_WIDTH 1
++ #define TX_IPFIL29_PORT0_EN_LBN 58
++ #define TX_IPFIL29_PORT0_EN_WIDTH 1
++ #define TX_IPFIL28_PORT1_EN_LBN 57
++ #define TX_IPFIL28_PORT1_EN_WIDTH 1
++ #define TX_IPFIL28_PORT0_EN_LBN 56
++ #define TX_IPFIL28_PORT0_EN_WIDTH 1
++ #define TX_IPFIL27_PORT1_EN_LBN 55
++ #define TX_IPFIL27_PORT1_EN_WIDTH 1
++ #define TX_IPFIL27_PORT0_EN_LBN 54
++ #define TX_IPFIL27_PORT0_EN_WIDTH 1
++ #define TX_IPFIL26_PORT1_EN_LBN 53
++ #define TX_IPFIL26_PORT1_EN_WIDTH 1
++ #define TX_IPFIL26_PORT0_EN_LBN 52
++ #define TX_IPFIL26_PORT0_EN_WIDTH 1
++ #define TX_IPFIL25_PORT1_EN_LBN 51
++ #define TX_IPFIL25_PORT1_EN_WIDTH 1
++ #define TX_IPFIL25_PORT0_EN_LBN 50
++ #define TX_IPFIL25_PORT0_EN_WIDTH 1
++ #define TX_IPFIL24_PORT1_EN_LBN 49
++ #define TX_IPFIL24_PORT1_EN_WIDTH 1
++ #define TX_IPFIL24_PORT0_EN_LBN 48
++ #define TX_IPFIL24_PORT0_EN_WIDTH 1
++ #define TX_IPFIL23_PORT1_EN_LBN 47
++ #define TX_IPFIL23_PORT1_EN_WIDTH 1
++ #define TX_IPFIL23_PORT0_EN_LBN 46
++ #define TX_IPFIL23_PORT0_EN_WIDTH 1
++ #define TX_IPFIL22_PORT1_EN_LBN 45
++ #define TX_IPFIL22_PORT1_EN_WIDTH 1
++ #define TX_IPFIL22_PORT0_EN_LBN 44
++ #define TX_IPFIL22_PORT0_EN_WIDTH 1
++ #define TX_IPFIL21_PORT1_EN_LBN 43
++ #define TX_IPFIL21_PORT1_EN_WIDTH 1
++ #define TX_IPFIL21_PORT0_EN_LBN 42
++ #define TX_IPFIL21_PORT0_EN_WIDTH 1
++ #define TX_IPFIL20_PORT1_EN_LBN 41
++ #define TX_IPFIL20_PORT1_EN_WIDTH 1
++ #define TX_IPFIL20_PORT0_EN_LBN 40
++ #define TX_IPFIL20_PORT0_EN_WIDTH 1
++ #define TX_IPFIL19_PORT1_EN_LBN 39
++ #define TX_IPFIL19_PORT1_EN_WIDTH 1
++ #define TX_IPFIL19_PORT0_EN_LBN 38
++ #define TX_IPFIL19_PORT0_EN_WIDTH 1
++ #define TX_IPFIL18_PORT1_EN_LBN 37
++ #define TX_IPFIL18_PORT1_EN_WIDTH 1
++ #define TX_IPFIL18_PORT0_EN_LBN 36
++ #define TX_IPFIL18_PORT0_EN_WIDTH 1
++ #define TX_IPFIL17_PORT1_EN_LBN 35
++ #define TX_IPFIL17_PORT1_EN_WIDTH 1
++ #define TX_IPFIL17_PORT0_EN_LBN 34
++ #define TX_IPFIL17_PORT0_EN_WIDTH 1
++ #define TX_IPFIL16_PORT1_EN_LBN 33
++ #define TX_IPFIL16_PORT1_EN_WIDTH 1
++ #define TX_IPFIL16_PORT0_EN_LBN 32
++ #define TX_IPFIL16_PORT0_EN_WIDTH 1
++ #define TX_IPFIL15_PORT1_EN_LBN 31
++ #define TX_IPFIL15_PORT1_EN_WIDTH 1
++ #define TX_IPFIL15_PORT0_EN_LBN 30
++ #define TX_IPFIL15_PORT0_EN_WIDTH 1
++ #define TX_IPFIL14_PORT1_EN_LBN 29
++ #define TX_IPFIL14_PORT1_EN_WIDTH 1
++ #define TX_IPFIL14_PORT0_EN_LBN 28
++ #define TX_IPFIL14_PORT0_EN_WIDTH 1
++ #define TX_IPFIL13_PORT1_EN_LBN 27
++ #define TX_IPFIL13_PORT1_EN_WIDTH 1
++ #define TX_IPFIL13_PORT0_EN_LBN 26
++ #define TX_IPFIL13_PORT0_EN_WIDTH 1
++ #define TX_IPFIL12_PORT1_EN_LBN 25
++ #define TX_IPFIL12_PORT1_EN_WIDTH 1
++ #define TX_IPFIL12_PORT0_EN_LBN 24
++ #define TX_IPFIL12_PORT0_EN_WIDTH 1
++ #define TX_IPFIL11_PORT1_EN_LBN 23
++ #define TX_IPFIL11_PORT1_EN_WIDTH 1
++ #define TX_IPFIL11_PORT0_EN_LBN 22
++ #define TX_IPFIL11_PORT0_EN_WIDTH 1
++ #define TX_IPFIL10_PORT1_EN_LBN 21
++ #define TX_IPFIL10_PORT1_EN_WIDTH 1
++ #define TX_IPFIL10_PORT0_EN_LBN 20
++ #define TX_IPFIL10_PORT0_EN_WIDTH 1
++ #define TX_IPFIL9_PORT1_EN_LBN 19
++ #define TX_IPFIL9_PORT1_EN_WIDTH 1
++ #define TX_IPFIL9_PORT0_EN_LBN 18
++ #define TX_IPFIL9_PORT0_EN_WIDTH 1
++ #define TX_IPFIL8_PORT1_EN_LBN 17
++ #define TX_IPFIL8_PORT1_EN_WIDTH 1
++ #define TX_IPFIL8_PORT0_EN_LBN 16
++ #define TX_IPFIL8_PORT0_EN_WIDTH 1
++ #define TX_IPFIL7_PORT1_EN_LBN 15
++ #define TX_IPFIL7_PORT1_EN_WIDTH 1
++ #define TX_IPFIL7_PORT0_EN_LBN 14
++ #define TX_IPFIL7_PORT0_EN_WIDTH 1
++ #define TX_IPFIL6_PORT1_EN_LBN 13
++ #define TX_IPFIL6_PORT1_EN_WIDTH 1
++ #define TX_IPFIL6_PORT0_EN_LBN 12
++ #define TX_IPFIL6_PORT0_EN_WIDTH 1
++ #define TX_IPFIL5_PORT1_EN_LBN 11
++ #define TX_IPFIL5_PORT1_EN_WIDTH 1
++ #define TX_IPFIL5_PORT0_EN_LBN 10
++ #define TX_IPFIL5_PORT0_EN_WIDTH 1
++ #define TX_IPFIL4_PORT1_EN_LBN 9
++ #define TX_IPFIL4_PORT1_EN_WIDTH 1
++ #define TX_IPFIL4_PORT0_EN_LBN 8
++ #define TX_IPFIL4_PORT0_EN_WIDTH 1
++ #define TX_IPFIL3_PORT1_EN_LBN 7
++ #define TX_IPFIL3_PORT1_EN_WIDTH 1
++ #define TX_IPFIL3_PORT0_EN_LBN 6
++ #define TX_IPFIL3_PORT0_EN_WIDTH 1
++ #define TX_IPFIL2_PORT1_EN_LBN 5
++ #define TX_IPFIL2_PORT1_EN_WIDTH 1
++ #define TX_IPFIL2_PORT0_EN_LBN 4
++ #define TX_IPFIL2_PORT0_EN_WIDTH 1
++ #define TX_IPFIL1_PORT1_EN_LBN 3
++ #define TX_IPFIL1_PORT1_EN_WIDTH 1
++ #define TX_IPFIL1_PORT0_EN_LBN 2
++ #define TX_IPFIL1_PORT0_EN_WIDTH 1
++ #define TX_IPFIL0_PORT1_EN_LBN 1
++ #define TX_IPFIL0_PORT1_EN_WIDTH 1
++ #define TX_IPFIL0_PORT0_EN_LBN 0
++ #define TX_IPFIL0_PORT0_EN_WIDTH 1
++#define TX_IPFIL_TBL_OFST 0xB00 // Transmit IP source address filter table
++ #define TX_IPFIL_MASK_LBN 32
++ #define TX_IPFIL_MASK_WIDTH 32
++ #define TX_IP_SRC_ADR_LBN 0
++ #define TX_IP_SRC_ADR_WIDTH 32
++#define TX_PACE_REG_A1_OFST 0xF80000 // Transmit pace control register
++#define TX_PACE_REG_B0_OFST 0xA90 // Transmit pace control register
++ #define TX_PACE_SB_AF_LBN 19
++ #define TX_PACE_SB_AF_WIDTH 10
++ #define TX_PACE_SB_NOTAF_LBN 9
++ #define TX_PACE_SB_NOTAF_WIDTH 10
++ #define TX_PACE_FB_BASE_LBN 5
++ #define TX_PACE_FB_BASE_WIDTH 4
++ #define TX_PACE_BIN_TH_LBN 0
++ #define TX_PACE_BIN_TH_WIDTH 5
++#define TX_PACE_TBL_A1_OFST 0xF80040 // Transmit pacing table
++#define TX_PACE_TBL_FIRST_QUEUE_A1 4
++#define TX_PACE_TBL_B0_OFST 0xF80000 // Transmit pacing table
++#define TX_PACE_TBL_FIRST_QUEUE_B0 0
++ #define TX_PACE_LBN 0
++ #define TX_PACE_WIDTH 5
++
++//////////////---- EE/Flash Registers C Header ----//////////////
++#define EE_SPI_HCMD_REG_KER_OFST 0x100 // SPI host command register
++#define EE_SPI_HCMD_REG_OFST 0x100 // SPI host command register
++ #define EE_SPI_HCMD_CMD_EN_LBN 31
++ #define EE_SPI_HCMD_CMD_EN_WIDTH 1
++ #define EE_WR_TIMER_ACTIVE_LBN 28
++ #define EE_WR_TIMER_ACTIVE_WIDTH 1
++ #define EE_SPI_HCMD_SF_SEL_LBN 24
++ #define EE_SPI_HCMD_SF_SEL_WIDTH 1
++ #define EE_SPI_HCMD_DABCNT_LBN 16
++ #define EE_SPI_HCMD_DABCNT_WIDTH 5
++ #define EE_SPI_HCMD_READ_LBN 15
++ #define EE_SPI_HCMD_READ_WIDTH 1
++ #define EE_SPI_HCMD_DUBCNT_LBN 12
++ #define EE_SPI_HCMD_DUBCNT_WIDTH 2
++ #define EE_SPI_HCMD_ADBCNT_LBN 8
++ #define EE_SPI_HCMD_ADBCNT_WIDTH 2
++ #define EE_SPI_HCMD_ENC_LBN 0
++ #define EE_SPI_HCMD_ENC_WIDTH 8
++#define EE_SPI_HADR_REG_KER_OFST 0X110 // SPI host address register
++#define EE_SPI_HADR_REG_OFST 0X110 // SPI host address register
++ #define EE_SPI_HADR_DUBYTE_LBN 24
++ #define EE_SPI_HADR_DUBYTE_WIDTH 8
++ #define EE_SPI_HADR_ADR_LBN 0
++ #define EE_SPI_HADR_ADR_WIDTH 24
++#define EE_SPI_HDATA_REG_KER_OFST 0x120 // SPI host data register
++#define EE_SPI_HDATA_REG_OFST 0x120 // SPI host data register
++ #define EE_SPI_HDATA3_LBN 96
++ #define EE_SPI_HDATA3_WIDTH 32
++ #define EE_SPI_HDATA2_LBN 64
++ #define EE_SPI_HDATA2_WIDTH 32
++ #define EE_SPI_HDATA1_LBN 32
++ #define EE_SPI_HDATA1_WIDTH 32
++ #define EE_SPI_HDATA0_LBN 0
++ #define EE_SPI_HDATA0_WIDTH 32
++#define EE_BASE_PAGE_REG_KER_OFST 0x130 // Expansion ROM base mirror register
++#define EE_BASE_PAGE_REG_OFST 0x130 // Expansion ROM base mirror register
++ #define EE_EXP_ROM_WINDOW_BASE_LBN 16
++ #define EE_EXP_ROM_WINDOW_BASE_WIDTH 13
++ #define EE_EXPROM_MASK_LBN 0
++ #define EE_EXPROM_MASK_WIDTH 13
++#define EE_VPD_CFG0_REG_KER_OFST 0X140 // SPI/VPD configuration register
++#define EE_VPD_CFG0_REG_OFST 0X140 // SPI/VPD configuration register
++ #define EE_SF_FASTRD_EN_LBN 127
++ #define EE_SF_FASTRD_EN_WIDTH 1
++ #define EE_SF_CLOCK_DIV_LBN 120
++ #define EE_SF_CLOCK_DIV_WIDTH 7
++ #define EE_VPD_WIP_POLL_LBN 119
++ #define EE_VPD_WIP_POLL_WIDTH 1
++ #define EE_VPDW_LENGTH_LBN 80
++ #define EE_VPDW_LENGTH_WIDTH 15
++ #define EE_VPDW_BASE_LBN 64
++ #define EE_VPDW_BASE_WIDTH 15
++ #define EE_VPD_WR_CMD_EN_LBN 56
++ #define EE_VPD_WR_CMD_EN_WIDTH 8
++ #define EE_VPD_BASE_LBN 32
++ #define EE_VPD_BASE_WIDTH 24
++ #define EE_VPD_LENGTH_LBN 16
++ #define EE_VPD_LENGTH_WIDTH 13
++ #define EE_VPD_AD_SIZE_LBN 8
++ #define EE_VPD_AD_SIZE_WIDTH 5
++ #define EE_VPD_ACCESS_ON_LBN 5
++ #define EE_VPD_ACCESS_ON_WIDTH 1
++#define EE_VPD_SW_CNTL_REG_KER_OFST 0X150 // VPD access SW control register
++#define EE_VPD_SW_CNTL_REG_OFST 0X150 // VPD access SW control register
++ #define EE_VPD_CYCLE_PENDING_LBN 31
++ #define EE_VPD_CYCLE_PENDING_WIDTH 1
++ #define EE_VPD_CYC_WRITE_LBN 28
++ #define EE_VPD_CYC_WRITE_WIDTH 1
++ #define EE_VPD_CYC_ADR_LBN 0
++ #define EE_VPD_CYC_ADR_WIDTH 15
++#define EE_VPD_SW_DATA_REG_KER_OFST 0x160 // VPD access SW data register
++#define EE_VPD_SW_DATA_REG_OFST 0x160 // VPD access SW data register
++ #define EE_VPD_CYC_DAT_LBN 0
++ #define EE_VPD_CYC_DAT_WIDTH 32
+Index: head-2008-11-25/drivers/xen/sfc_netfront/ef_vi_falcon_desc.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/ef_vi_falcon_desc.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,43 @@
++//////////////---- Descriptors C Headers ----//////////////
++// Receive Kernel IP Descriptor
++ #define RX_KER_BUF_SIZE_LBN 48
++ #define RX_KER_BUF_SIZE_WIDTH 14
++ #define RX_KER_BUF_REGION_LBN 46
++ #define RX_KER_BUF_REGION_WIDTH 2
++ #define RX_KER_BUF_REGION0_DECODE 0
++ #define RX_KER_BUF_REGION1_DECODE 1
++ #define RX_KER_BUF_REGION2_DECODE 2
++ #define RX_KER_BUF_REGION3_DECODE 3
++ #define RX_KER_BUF_ADR_LBN 0
++ #define RX_KER_BUF_ADR_WIDTH 46
++// Receive User IP Descriptor
++ #define RX_USR_2BYTE_OFS_LBN 20
++ #define RX_USR_2BYTE_OFS_WIDTH 12
++ #define RX_USR_BUF_ID_LBN 0
++ #define RX_USR_BUF_ID_WIDTH 20
++// Transmit Kernel IP Descriptor
++ #define TX_KER_PORT_LBN 63
++ #define TX_KER_PORT_WIDTH 1
++ #define TX_KER_CONT_LBN 62
++ #define TX_KER_CONT_WIDTH 1
++ #define TX_KER_BYTE_CNT_LBN 48
++ #define TX_KER_BYTE_CNT_WIDTH 14
++ #define TX_KER_BUF_REGION_LBN 46
++ #define TX_KER_BUF_REGION_WIDTH 2
++ #define TX_KER_BUF_REGION0_DECODE 0
++ #define TX_KER_BUF_REGION1_DECODE 1
++ #define TX_KER_BUF_REGION2_DECODE 2
++ #define TX_KER_BUF_REGION3_DECODE 3
++ #define TX_KER_BUF_ADR_LBN 0
++ #define TX_KER_BUF_ADR_WIDTH 46
++// Transmit User IP Descriptor
++ #define TX_USR_PORT_LBN 47
++ #define TX_USR_PORT_WIDTH 1
++ #define TX_USR_CONT_LBN 46
++ #define TX_USR_CONT_WIDTH 1
++ #define TX_USR_BYTE_CNT_LBN 33
++ #define TX_USR_BYTE_CNT_WIDTH 13
++ #define TX_USR_BUF_ID_LBN 13
++ #define TX_USR_BUF_ID_WIDTH 20
++ #define TX_USR_BYTE_OFS_LBN 0
++ #define TX_USR_BYTE_OFS_WIDTH 13
+Index: head-2008-11-25/drivers/xen/sfc_netfront/ef_vi_falcon_event.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/ef_vi_falcon_event.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,123 @@
++//////////////---- Events Format C Header ----//////////////
++//////////////---- Event entry ----//////////////
++ #define EV_CODE_LBN 60
++ #define EV_CODE_WIDTH 4
++ #define RX_IP_EV_DECODE 0
++ #define TX_IP_EV_DECODE 2
++ #define DRIVER_EV_DECODE 5
++ #define GLOBAL_EV_DECODE 6
++ #define DRV_GEN_EV_DECODE 7
++ #define EV_DATA_LBN 0
++ #define EV_DATA_WIDTH 60
++//////////////---- Receive IP events for both Kernel & User event queues ----//////////////
++ #define RX_EV_PKT_OK_LBN 56
++ #define RX_EV_PKT_OK_WIDTH 1
++ #define RX_EV_BUF_OWNER_ID_ERR_LBN 54
++ #define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
++ #define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
++ #define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
++ #define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
++ #define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
++ #define RX_EV_ETH_CRC_ERR_LBN 50
++ #define RX_EV_ETH_CRC_ERR_WIDTH 1
++ #define RX_EV_FRM_TRUNC_LBN 49
++ #define RX_EV_FRM_TRUNC_WIDTH 1
++ #define RX_EV_DRIB_NIB_LBN 48
++ #define RX_EV_DRIB_NIB_WIDTH 1
++ #define RX_EV_TOBE_DISC_LBN 47
++ #define RX_EV_TOBE_DISC_WIDTH 1
++ #define RX_EV_PKT_TYPE_LBN 44
++ #define RX_EV_PKT_TYPE_WIDTH 3
++ #define RX_EV_PKT_TYPE_ETH_DECODE 0
++ #define RX_EV_PKT_TYPE_LLC_DECODE 1
++ #define RX_EV_PKT_TYPE_JUMBO_DECODE 2
++ #define RX_EV_PKT_TYPE_VLAN_DECODE 3
++ #define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4
++ #define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5
++ #define RX_EV_HDR_TYPE_LBN 42
++ #define RX_EV_HDR_TYPE_WIDTH 2
++ #define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0
++ #define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1
++ #define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2
++ #define RX_EV_HDR_TYPE_NON_IP_DECODE 3
++ #define RX_EV_DESC_Q_EMPTY_LBN 41
++ #define RX_EV_DESC_Q_EMPTY_WIDTH 1
++ #define RX_EV_MCAST_HASH_MATCH_LBN 40
++ #define RX_EV_MCAST_HASH_MATCH_WIDTH 1
++ #define RX_EV_MCAST_PKT_LBN 39
++ #define RX_EV_MCAST_PKT_WIDTH 1
++ #define RX_EV_Q_LABEL_LBN 32
++ #define RX_EV_Q_LABEL_WIDTH 5
++ #define RX_JUMBO_CONT_LBN 31
++ #define RX_JUMBO_CONT_WIDTH 1
++ #define RX_SOP_LBN 15
++ #define RX_SOP_WIDTH 1
++ #define RX_PORT_LBN 30
++ #define RX_PORT_WIDTH 1
++ #define RX_EV_BYTE_CNT_LBN 16
++ #define RX_EV_BYTE_CNT_WIDTH 14
++ #define RX_iSCSI_PKT_OK_LBN 14
++ #define RX_iSCSI_PKT_OK_WIDTH 1
++ #define RX_ISCSI_DDIG_ERR_LBN 13
++ #define RX_ISCSI_DDIG_ERR_WIDTH 1
++ #define RX_ISCSI_HDIG_ERR_LBN 12
++ #define RX_ISCSI_HDIG_ERR_WIDTH 1
++ #define RX_EV_DESC_PTR_LBN 0
++ #define RX_EV_DESC_PTR_WIDTH 12
++//////////////---- Transmit IP events for both Kernel & User event queues ----//////////////
++ #define TX_EV_PKT_ERR_LBN 38
++ #define TX_EV_PKT_ERR_WIDTH 1
++ #define TX_EV_PKT_TOO_BIG_LBN 37
++ #define TX_EV_PKT_TOO_BIG_WIDTH 1
++ #define TX_EV_Q_LABEL_LBN 32
++ #define TX_EV_Q_LABEL_WIDTH 5
++ #define TX_EV_PORT_LBN 16
++ #define TX_EV_PORT_WIDTH 1
++ #define TX_EV_WQ_FF_FULL_LBN 15
++ #define TX_EV_WQ_FF_FULL_WIDTH 1
++ #define TX_EV_BUF_OWNER_ID_ERR_LBN 14
++ #define TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
++ #define TX_EV_COMP_LBN 12
++ #define TX_EV_COMP_WIDTH 1
++ #define TX_EV_DESC_PTR_LBN 0
++ #define TX_EV_DESC_PTR_WIDTH 12
++//////////////---- Char or Kernel driver events ----//////////////
++ #define DRIVER_EV_SUB_CODE_LBN 56
++ #define DRIVER_EV_SUB_CODE_WIDTH 4
++ #define TX_DESCQ_FLS_DONE_EV_DECODE 0x0
++ #define RX_DESCQ_FLS_DONE_EV_DECODE 0x1
++ #define EVQ_INIT_DONE_EV_DECODE 0x2
++ #define EVQ_NOT_EN_EV_DECODE 0x3
++ #define RX_DESCQ_FLSFF_OVFL_EV_DECODE 0x4
++ #define SRM_UPD_DONE_EV_DECODE 0x5
++ #define WAKE_UP_EV_DECODE 0x6
++ #define TX_PKT_NON_TCP_UDP_DECODE 0x9
++ #define TIMER_EV_DECODE 0xA
++ #define RX_DSC_ERROR_EV_DECODE 0xE
++ #define DRIVER_EV_TX_DESCQ_ID_LBN 0
++ #define DRIVER_EV_TX_DESCQ_ID_WIDTH 12
++ #define DRIVER_EV_RX_DESCQ_ID_LBN 0
++ #define DRIVER_EV_RX_DESCQ_ID_WIDTH 12
++ #define DRIVER_EV_EVQ_ID_LBN 0
++ #define DRIVER_EV_EVQ_ID_WIDTH 12
++ #define DRIVER_TMR_ID_LBN 0
++ #define DRIVER_TMR_ID_WIDTH 12
++ #define DRIVER_EV_SRM_UPD_LBN 0
++ #define DRIVER_EV_SRM_UPD_WIDTH 2
++ #define SRM_CLR_EV_DECODE 0
++ #define SRM_UPD_EV_DECODE 1
++ #define SRM_ILLCLR_EV_DECODE 2
++//////////////---- Global events. Sent to both event queue 0 and 4. ----//////////////
++ #define XFP_PHY_INTR_LBN 10
++ #define XFP_PHY_INTR_WIDTH 1
++ #define XG_PHY_INTR_LBN 9
++ #define XG_PHY_INTR_WIDTH 1
++ #define G_PHY1_INTR_LBN 8
++ #define G_PHY1_INTR_WIDTH 1
++ #define G_PHY0_INTR_LBN 7
++ #define G_PHY0_INTR_WIDTH 1
++//////////////---- Driver generated events ----//////////////
++ #define DRV_GEN_EV_CODE_LBN 60
++ #define DRV_GEN_EV_CODE_WIDTH 4
++ #define DRV_GEN_EV_DATA_LBN 0
++ #define DRV_GEN_EV_DATA_WIDTH 60
+Index: head-2008-11-25/drivers/xen/sfc_netfront/ef_vi_internal.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/ef_vi_internal.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,256 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr
++ * \brief Really-and-truely-honestly internal stuff for libef.
++ * \date 2004/06/13
++ */
++
++/*! \cidoxg_include_ci_ul */
++#ifndef __CI_EF_VI_INTERNAL_H__
++#define __CI_EF_VI_INTERNAL_H__
++
++
++/* These flags share space with enum ef_vi_flags. */
++#define EF_VI_BUG5692_WORKAROUND 0x10000
++
++
++/* ***********************************************************************
++ * COMPILATION CONTROL FLAGS (see ef_vi.h for "workaround" controls)
++ */
++
++#define EF_VI_DO_MAGIC_CHECKS 1
++
++
++/**********************************************************************
++ * Headers
++ */
++
++#include <etherfabric/ef_vi.h>
++#include "sysdep.h"
++#include "ef_vi_falcon.h"
++
++
++/**********************************************************************
++ * Debugging.
++ */
++
++#ifndef NDEBUG
++
++# define _ef_assert(exp, file, line) BUG_ON(!(exp));
++
++# define _ef_assert2(exp, x, y, file, line) do { \
++ if (unlikely(!(exp))) \
++ BUG(); \
++ } while (0)
++
++#else
++
++# define _ef_assert(exp, file, line)
++# define _ef_assert2(e, x, y, file, line)
++
++#endif
++
++#define ef_assert(a) do{ _ef_assert((a),__FILE__,__LINE__); } while(0)
++#define ef_assert_equal(a,b) _ef_assert2((a)==(b),(a),(b),__FILE__,__LINE__)
++#define ef_assert_eq ef_assert_equal
++#define ef_assert_lt(a,b) _ef_assert2((a)<(b),(a),(b),__FILE__,__LINE__)
++#define ef_assert_le(a,b) _ef_assert2((a)<=(b),(a),(b),__FILE__,__LINE__)
++#define ef_assert_nequal(a,b) _ef_assert2((a)!=(b),(a),(b),__FILE__,__LINE__)
++#define ef_assert_ne ef_assert_nequal
++#define ef_assert_ge(a,b) _ef_assert2((a)>=(b),(a),(b),__FILE__,__LINE__)
++#define ef_assert_gt(a,b) _ef_assert2((a)>(b),(a),(b),__FILE__,__LINE__)
++
++/**********************************************************************
++ * Debug checks. ******************************************************
++ **********************************************************************/
++
++#ifdef NDEBUG
++# define EF_VI_MAGIC_SET(p, type)
++# define EF_VI_CHECK_VI(p)
++# define EF_VI_CHECK_EVENT_Q(p)
++# define EF_VI_CHECK_IOBUFSET(p)
++# define EF_VI_CHECK_FILTER(p)
++# define EF_VI_CHECK_SHMBUF(p)
++# define EF_VI_CHECK_PT_EP(p)
++#else
++# define EF_VI 0x3
++# define EF_EPLOCK 0x6
++# define EF_IOBUFSET 0x9
++# define EF_FILTER 0xa
++# define EF_SHMBUF 0x11
++
++# define EF_VI_MAGIC(p, type) \
++ (((unsigned)(type) << 28) | \
++ (((unsigned)(intptr_t)(p)) & 0x0fffffffu))
++
++# if !EF_VI_DO_MAGIC_CHECKS
++# define EF_VI_MAGIC_SET(p, type)
++# define EF_VI_MAGIC_CHECK(p, type)
++# else
++# define EF_VI_MAGIC_SET(p, type) \
++ do { \
++ (p)->magic = EF_VI_MAGIC((p), (type)); \
++ } while (0)
++
++# define EF_VI_MAGIC_OKAY(p, type) \
++ ((p)->magic == EF_VI_MAGIC((p), (type)))
++
++# define EF_VI_MAGIC_CHECK(p, type) \
++ ef_assert(EF_VI_MAGIC_OKAY((p), (type)))
++
++#endif /* EF_VI_DO_MAGIC_CHECKS */
++
++# define EF_VI_CHECK_VI(p) \
++ ef_assert(p); \
++ EF_VI_MAGIC_CHECK((p), EF_VI);
++
++# define EF_VI_CHECK_EVENT_Q(p) \
++ ef_assert(p); \
++ EF_VI_MAGIC_CHECK((p), EF_VI); \
++ ef_assert((p)->evq_base); \
++ ef_assert((p)->evq_mask);
++
++# define EF_VI_CHECK_PT_EP(p) \
++ ef_assert(p); \
++ EF_VI_MAGIC_CHECK((p), EF_VI); \
++ ef_assert((p)->ep_state);
++
++# define EF_VI_CHECK_IOBUFSET(p) \
++ ef_assert(p); \
++ EF_VI_MAGIC_CHECK((p), EF_IOBUFSET)
++
++# define EF_VI_CHECK_FILTER(p) \
++ ef_assert(p); \
++ EF_VI_MAGIC_CHECK((p), EF_FILTER);
++
++# define EF_VI_CHECK_SHMBUF(p) \
++ ef_assert(p); \
++ EF_VI_MAGIC_CHECK((p), EF_SHMBUF);
++
++#endif
++
++#ifndef NDEBUG
++# define EF_DRIVER_MAGIC 0x00f00ba4
++# define EF_ASSERT_THIS_DRIVER_VALID(driver) \
++ do{ ef_assert(driver); \
++ EF_VI_MAGIC_CHECK((driver), EF_DRIVER_MAGIC); \
++ ef_assert((driver)->init); }while(0)
++
++# define EF_ASSERT_DRIVER_VALID() EF_ASSERT_THIS_DRIVER_VALID(&ci_driver)
++#else
++# define EF_ASSERT_THIS_DRIVER_VALID(driver)
++# define EF_ASSERT_DRIVER_VALID()
++#endif
++
++
++/* *************************************
++ * Power of 2 FIFO
++ */
++
++#define EF_VI_FIFO2_M(f, x) ((x) & ((f)->fifo_mask))
++#define ef_vi_fifo2_valid(f) ((f) && (f)->fifo && (f)->fifo_mask > 0 && \
++ (f)->fifo_rd_i <= (f)->fifo_mask && \
++ (f)->fifo_wr_i <= (f)->fifo_mask && \
++ EF_VI_IS_POW2((f)->fifo_mask+1u))
++
++#define ef_vi_fifo2_init(f, cap) \
++ do{ ef_assert(EF_VI_IS_POW2((cap) + 1)); \
++ (f)->fifo_rd_i = (f)->fifo_wr_i = 0u; \
++ (f)->fifo_mask = (cap); \
++ }while(0)
++
++#define ef_vi_fifo2_is_empty(f) ((f)->fifo_rd_i == (f)->fifo_wr_i)
++#define ef_vi_fifo2_capacity(f) ((f)->fifo_mask)
++#define ef_vi_fifo2_buf_size(f) ((f)->fifo_mask + 1u)
++#define ef_vi_fifo2_end(f) ((f)->fifo + ef_vi_fifo2_buf_size(f))
++#define ef_vi_fifo2_peek(f) ((f)->fifo[(f)->fifo_rd_i])
++#define ef_vi_fifo2_poke(f) ((f)->fifo[(f)->fifo_wr_i])
++#define ef_vi_fifo2_num(f) EF_VI_FIFO2_M((f),(f)->fifo_wr_i-(f)->fifo_rd_i)
++
++#define ef_vi_fifo2_wr_prev(f) \
++ do{ (f)->fifo_wr_i = EF_VI_FIFO2_M((f), (f)->fifo_wr_i - 1u); }while(0)
++#define ef_vi_fifo2_wr_next(f) \
++ do{ (f)->fifo_wr_i = EF_VI_FIFO2_M((f), (f)->fifo_wr_i + 1u); }while(0)
++#define ef_vi_fifo2_rd_adv(f, n) \
++ do{ (f)->fifo_rd_i = EF_VI_FIFO2_M((f), (f)->fifo_rd_i + (n)); }while(0)
++#define ef_vi_fifo2_rd_prev(f) \
++ do{ (f)->fifo_rd_i = EF_VI_FIFO2_M((f), (f)->fifo_rd_i - 1u); }while(0)
++#define ef_vi_fifo2_rd_next(f) \
++ do{ (f)->fifo_rd_i = EF_VI_FIFO2_M((f), (f)->fifo_rd_i + 1u); }while(0)
++
++#define ef_vi_fifo2_put(f, v) \
++ do{ ef_vi_fifo2_poke(f) = (v); ef_vi_fifo2_wr_next(f); }while(0)
++#define ef_vi_fifo2_get(f, pv) \
++ do{ *(pv) = ef_vi_fifo2_peek(f); ef_vi_fifo2_rd_next(f); }while(0)
++
++
++/* *********************************************************************
++ * Eventq handling
++ */
++
++typedef union {
++ uint64_t u64;
++ struct {
++ uint32_t a;
++ uint32_t b;
++ } opaque;
++} ef_vi_event;
++
++
++#define EF_VI_EVENT_OFFSET(q, i) \
++ (((q)->evq_state->evq_ptr - (i) * sizeof(ef_vi_event)) & (q)->evq_mask)
++
++#define EF_VI_EVENT_PTR(q, i) \
++ ((ef_vi_event*) ((q)->evq_base + EF_VI_EVENT_OFFSET((q), (i))))
++
++/* *********************************************************************
++ * Miscellaneous goodies
++ */
++#ifdef NDEBUG
++# define EF_VI_DEBUG(x)
++#else
++# define EF_VI_DEBUG(x) x
++#endif
++
++#define EF_VI_ROUND_UP(i, align) (((i)+(align)-1u) & ~((align)-1u))
++#define EF_VI_ALIGN_FWD(p, align) (((p)+(align)-1u) & ~((align)-1u))
++#define EF_VI_ALIGN_BACK(p, align) ((p) & ~((align)-1u))
++#define EF_VI_PTR_ALIGN_BACK(p, align) \
++ ((char*)EF_VI_ALIGN_BACK(((intptr_t)(p)), ((intptr_t)(align))))
++#define EF_VI_IS_POW2(x) ((x) && ! ((x) & ((x) - 1)))
++
++
++/* ********************************************************************
++ */
++
++extern void falcon_vi_init(ef_vi*, void* vvis ) EF_VI_HF;
++extern void ef_eventq_state_init(ef_vi* evq) EF_VI_HF;
++extern void __ef_init(void) EF_VI_HF;
++
++
++#endif /* __CI_EF_VI_INTERNAL_H__ */
++
+Index: head-2008-11-25/drivers/xen/sfc_netfront/etherfabric/ef_vi.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/etherfabric/ef_vi.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,665 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \brief Virtual Interface
++ * \date 2007/05/16
++ */
++
++#ifndef __EFAB_EF_VI_H__
++#define __EFAB_EF_VI_H__
++
++
++/**********************************************************************
++ * Primitive types ****************************************************
++ **********************************************************************/
++
++/* We standardise on the types from stdint.h and synthesise these types
++ * for compilers/platforms that don't provide them */
++
++# include <linux/types.h>
++# define EF_VI_ALIGN(x) __attribute__ ((aligned (x)))
++# define ef_vi_inline static inline
++
++
++
++/**********************************************************************
++ * Types **************************************************************
++ **********************************************************************/
++
++typedef uint32_t ef_eventq_ptr;
++
++typedef uint64_t ef_addr;
++typedef char* ef_vi_ioaddr_t;
++
++/**********************************************************************
++ * ef_event ***********************************************************
++ **********************************************************************/
++
++/*! \i_ef_vi A DMA request identifier.
++**
++** This is an integer token specified by the transport and associated
++** with a DMA request. It is returned to the VI user with DMA completion
++** events. It is typically used to identify the buffer associated with
++** the transfer.
++*/
++typedef int ef_request_id;
++
++typedef union {
++ uint64_t u64[1];
++ uint32_t u32[2];
++} ef_vi_qword;
++
++typedef ef_vi_qword ef_hw_event;
++
++#define EF_REQUEST_ID_BITS 16u
++#define EF_REQUEST_ID_MASK ((1u << EF_REQUEST_ID_BITS) - 1u)
++
++/*! \i_ef_event An [ef_event] is a token that identifies something that
++** has happened. Examples include packets received, packets transmitted
++** and errors.
++*/
++typedef union {
++ struct {
++ ef_hw_event ev;
++ unsigned type :16;
++ } generic;
++ struct {
++ ef_hw_event ev;
++ unsigned type :16;
++ /*ef_request_id request_id :EF_REQUEST_ID_BITS;*/
++ unsigned q_id :16;
++ unsigned len :16;
++ unsigned flags :16;
++ } rx;
++ struct { /* This *must* have same layout as [rx]. */
++ ef_hw_event ev;
++ unsigned type :16;
++ /*ef_request_id request_id :EF_REQUEST_ID_BITS;*/
++ unsigned q_id :16;
++ unsigned len :16;
++ unsigned flags :16;
++ unsigned subtype :16;
++ } rx_discard;
++ struct {
++ ef_hw_event ev;
++ unsigned type :16;
++ /*ef_request_id request_id :EF_REQUEST_ID_BITS;*/
++ unsigned q_id :16;
++ } tx;
++ struct {
++ ef_hw_event ev;
++ unsigned type :16;
++ /*ef_request_id request_id :EF_REQUEST_ID_BITS;*/
++ unsigned q_id :16;
++ unsigned subtype :16;
++ } tx_error;
++ struct {
++ ef_hw_event ev;
++ unsigned type :16;
++ unsigned q_id :16;
++ } rx_no_desc_trunc;
++ struct {
++ ef_hw_event ev;
++ unsigned type :16;
++ unsigned data;
++ } sw;
++} ef_event;
++
++
++#define EF_EVENT_TYPE(e) ((e).generic.type)
++enum {
++ /** Good data was received. */
++ EF_EVENT_TYPE_RX,
++ /** Packets have been sent. */
++ EF_EVENT_TYPE_TX,
++ /** Data received and buffer consumed, but something is wrong. */
++ EF_EVENT_TYPE_RX_DISCARD,
++ /** Transmit of packet failed. */
++ EF_EVENT_TYPE_TX_ERROR,
++ /** Received packet was truncated due to lack of descriptors. */
++ EF_EVENT_TYPE_RX_NO_DESC_TRUNC,
++ /** Software generated event. */
++ EF_EVENT_TYPE_SW,
++ /** Event queue overflow. */
++ EF_EVENT_TYPE_OFLOW,
++};
++
++#define EF_EVENT_RX_BYTES(e) ((e).rx.len)
++#define EF_EVENT_RX_Q_ID(e) ((e).rx.q_id)
++#define EF_EVENT_RX_CONT(e) ((e).rx.flags & EF_EVENT_FLAG_CONT)
++#define EF_EVENT_RX_SOP(e) ((e).rx.flags & EF_EVENT_FLAG_SOP)
++#define EF_EVENT_RX_ISCSI_OKAY(e) ((e).rx.flags & EF_EVENT_FLAG_ISCSI_OK)
++#define EF_EVENT_FLAG_SOP 0x1
++#define EF_EVENT_FLAG_CONT 0x2
++#define EF_EVENT_FLAG_ISCSI_OK 0x4
++
++#define EF_EVENT_TX_Q_ID(e) ((e).tx.q_id)
++
++#define EF_EVENT_RX_DISCARD_Q_ID(e) ((e).rx_discard.q_id)
++#define EF_EVENT_RX_DISCARD_LEN(e) ((e).rx_discard.len)
++#define EF_EVENT_RX_DISCARD_TYPE(e) ((e).rx_discard.subtype)
++enum {
++ EF_EVENT_RX_DISCARD_CSUM_BAD,
++ EF_EVENT_RX_DISCARD_CRC_BAD,
++ EF_EVENT_RX_DISCARD_TRUNC,
++ EF_EVENT_RX_DISCARD_RIGHTS,
++ EF_EVENT_RX_DISCARD_OTHER,
++};
++
++#define EF_EVENT_TX_ERROR_Q_ID(e) ((e).tx_error.q_id)
++#define EF_EVENT_TX_ERROR_TYPE(e) ((e).tx_error.subtype)
++enum {
++ EF_EVENT_TX_ERROR_RIGHTS,
++ EF_EVENT_TX_ERROR_OFLOW,
++ EF_EVENT_TX_ERROR_2BIG,
++ EF_EVENT_TX_ERROR_BUS,
++};
++
++#define EF_EVENT_RX_NO_DESC_TRUNC_Q_ID(e) ((e).rx_no_desc_trunc.q_id)
++
++#define EF_EVENT_SW_DATA_MASK 0xffff
++#define EF_EVENT_SW_DATA(e) ((e).sw.data)
++
++#define EF_EVENT_FMT "[ev:%x:%08x:%08x]"
++#define EF_EVENT_PRI_ARG(e) (unsigned) (e).generic.type, \
++ (unsigned) (e).generic.ev.u32[1], \
++ (unsigned) (e).generic.ev.u32[0]
++
++#define EF_GET_HW_EV(e) ((e).generic.ev)
++#define EF_GET_HW_EV_PTR(e) (&(e).generic.ev)
++#define EF_GET_HW_EV_U64(e) ((e).generic.ev.u64[0])
++
++
++/* ***************** */
++
++/*! Used by netif shared state. Must use types of explicit size. */
++typedef struct {
++ uint16_t rx_last_desc_ptr; /* for RX duplicates */
++ uint8_t bad_sop; /* bad SOP detected */
++ uint8_t frag_num; /* next fragment #, 0=>SOP */
++} ef_rx_dup_state_t;
++
++
++/* Max number of ports on any SF NIC. */
++#define EFAB_DMAQS_PER_EVQ_MAX 32
++
++typedef struct {
++ ef_eventq_ptr evq_ptr;
++ int32_t trashed;
++ ef_rx_dup_state_t rx_dup_state[EFAB_DMAQS_PER_EVQ_MAX];
++} ef_eventq_state;
++
++
++/*! \i_ef_base [ef_iovec] is similar the standard [struct iovec]. An
++** array of these is used to designate a scatter/gather list of I/O
++** buffers.
++*/
++typedef struct {
++ ef_addr iov_base EF_VI_ALIGN(8);
++ unsigned iov_len;
++} ef_iovec;
++
++/* Falcon constants */
++#define TX_EV_DESC_PTR_LBN 0
++
++/**********************************************************************
++ * ef_iobufset ********************************************************
++ **********************************************************************/
++
++/*! \i_ef_bufs An [ef_iobufset] is a collection of buffers to be used
++** with the NIC.
++*/
++typedef struct ef_iobufset {
++ unsigned magic;
++ unsigned bufs_mmap_bytes;
++ unsigned bufs_handle;
++ int bufs_ptr_off;
++ ef_addr bufs_addr;
++ unsigned bufs_size; /* size rounded to pow2 */
++ int bufs_num;
++ int faultonaccess;
++} ef_iobufset;
++
++
++/**********************************************************************
++ * ef_vi **************************************************************
++ **********************************************************************/
++
++enum ef_vi_flags {
++ EF_VI_RX_SCATTER = 0x1,
++ EF_VI_ISCSI_RX_HDIG = 0x2,
++ EF_VI_ISCSI_TX_HDIG = 0x4,
++ EF_VI_ISCSI_RX_DDIG = 0x8,
++ EF_VI_ISCSI_TX_DDIG = 0x10,
++ EF_VI_TX_PHYS_ADDR = 0x20,
++ EF_VI_RX_PHYS_ADDR = 0x40,
++ EF_VI_TX_IP_CSUM_DIS = 0x80,
++ EF_VI_TX_TCPUDP_CSUM_DIS= 0x100,
++ EF_VI_TX_TCPUDP_ONLY = 0x200,
++ /* Flags in range 0xXXXX0000 are for internal use. */
++};
++
++typedef struct {
++ uint32_t added;
++ uint32_t removed;
++} ef_vi_txq_state;
++
++typedef struct {
++ uint32_t added;
++ uint32_t removed;
++} ef_vi_rxq_state;
++
++typedef struct {
++ uint32_t mask;
++ void* doorbell;
++ void* descriptors;
++ uint16_t* ids;
++ unsigned misalign_mask;
++} ef_vi_txq;
++
++typedef struct {
++ uint32_t mask;
++ void* doorbell;
++ void* descriptors;
++ uint16_t* ids;
++} ef_vi_rxq;
++
++typedef struct {
++ ef_eventq_state evq;
++ ef_vi_txq_state txq;
++ ef_vi_rxq_state rxq;
++ /* Followed by request id fifos. */
++} ef_vi_state;
++
++/*! \i_ef_vi A virtual interface.
++**
++** An [ef_vi] represents a virtual interface on a specific NIC. A
++** virtual interface is a collection of an event queue and two DMA queues
++** used to pass Ethernet frames between the transport implementation and
++** the network.
++*/
++typedef struct ef_vi {
++ unsigned magic;
++
++ unsigned vi_resource_id;
++ unsigned vi_resource_handle_hack;
++ unsigned vi_i;
++
++ char* vi_mem_mmap_ptr;
++ int vi_mem_mmap_bytes;
++ char* vi_io_mmap_ptr;
++ int vi_io_mmap_bytes;
++
++ ef_eventq_state* evq_state;
++ char* evq_base;
++ unsigned evq_mask;
++ ef_vi_ioaddr_t evq_timer_reg;
++
++ ef_vi_txq vi_txq;
++ ef_vi_rxq vi_rxq;
++ ef_vi_state* ep_state;
++ enum ef_vi_flags vi_flags;
++} ef_vi;
++
++
++enum ef_vi_arch {
++ EF_VI_ARCH_FALCON,
++};
++
++
++struct ef_vi_nic_type {
++ unsigned char arch;
++ char variant;
++ unsigned char revision;
++};
++
++
++/* This structure is opaque to the client & used to pass mapping data
++ * from the resource manager to the ef_vi lib. for ef_vi_init().
++ */
++struct vi_mappings {
++ uint32_t signature;
++# define VI_MAPPING_VERSION 0x02 /*Byte: Increment me if struct altered*/
++# define VI_MAPPING_SIGNATURE (0xBA1150 + VI_MAPPING_VERSION)
++
++ struct ef_vi_nic_type nic_type;
++
++ int vi_instance;
++
++ unsigned evq_bytes;
++ char* evq_base;
++ ef_vi_ioaddr_t evq_timer_reg;
++
++ unsigned rx_queue_capacity;
++ ef_vi_ioaddr_t rx_dma_ef1;
++ char* rx_dma_falcon;
++ ef_vi_ioaddr_t rx_bell;
++
++ unsigned tx_queue_capacity;
++ ef_vi_ioaddr_t tx_dma_ef1;
++ char* tx_dma_falcon;
++ ef_vi_ioaddr_t tx_bell;
++};
++/* This is used by clients to allocate a suitably sized buffer for the
++ * resource manager to fill & ef_vi_init() to use. */
++#define VI_MAPPINGS_SIZE (sizeof(struct vi_mappings))
++
++
++/**********************************************************************
++ * ef_config **********************************************************
++ **********************************************************************/
++
++struct ef_config_t {
++ int log; /* debug logging level */
++};
++
++extern struct ef_config_t ef_config;
++
++
++/**********************************************************************
++ * ef_vi **************************************************************
++ **********************************************************************/
++
++/* Initialise [data_area] with information required to initialise an ef_vi.
++ * In the following, an unused param should be set to NULL. Note the case
++ * marked (*) of [iobuf_mmap] for falcon/driver; for normal driver this
++ * must be NULL.
++ *
++ * \param data_area [in,out] required, must ref at least VI_MAPPINGS_SIZE
++ * bytes
++ * \param evq_capacity [in] number of events in event queue. Specify 0 for
++ * no event queue.
++ * \param rxq_capacity [in] number of descriptors in RX DMA queue. Specify
++ * 0 for no RX queue.
++ * \param txq_capacity [in] number of descriptors in TX DMA queue. Specify
++ * 0 for no TX queue.
++ * \param mmap_info [in] mem-map info for resource
++ * \param io_mmap [in] ef1, required
++ * falcon, required
++ * \param iobuf_mmap [in] ef1, UL: unused
++ * falcon, UL: required
++ */
++extern void ef_vi_init_mapping_vi(void* data_area, struct ef_vi_nic_type,
++ unsigned rxq_capacity,
++ unsigned txq_capacity, int instance,
++ void* io_mmap, void* iobuf_mmap_rx,
++ void* iobuf_mmap_tx, enum ef_vi_flags);
++
++
++extern void ef_vi_init_mapping_evq(void* data_area, struct ef_vi_nic_type,
++ int instance, unsigned evq_bytes,
++ void* base, void* timer_reg);
++
++ef_vi_inline unsigned ef_vi_resource_id(ef_vi* vi)
++{
++ return vi->vi_resource_id;
++}
++
++ef_vi_inline enum ef_vi_flags ef_vi_flags(ef_vi* vi)
++{
++ return vi->vi_flags;
++}
++
++
++/**********************************************************************
++ * Receive interface **************************************************
++ **********************************************************************/
++
++/*! \i_ef_vi Returns the amount of space in the RX descriptor ring.
++**
++** \return the amount of space in the queue.
++*/
++ef_vi_inline int ef_vi_receive_space(ef_vi* vi)
++{
++ ef_vi_rxq_state* qs = &vi->ep_state->rxq;
++ return vi->vi_rxq.mask - (qs->added - qs->removed);
++}
++
++
++/*! \i_ef_vi Returns the fill level of the RX descriptor ring.
++**
++** \return the fill level of the queue.
++*/
++ef_vi_inline int ef_vi_receive_fill_level(ef_vi* vi)
++{
++ ef_vi_rxq_state* qs = &vi->ep_state->rxq;
++ return qs->added - qs->removed;
++}
++
++
++ef_vi_inline int ef_vi_receive_capacity(ef_vi* vi)
++{
++ return vi->vi_rxq.mask;
++}
++
++/*! \i_ef_vi Complete a receive operation.
++**
++** When a receive completion event is received, it should be passed to
++** this function. The request-id for the buffer that the packet was
++** delivered to is returned.
++**
++** After this function returns, more space may be available in the
++** receive queue.
++*/
++extern ef_request_id ef_vi_receive_done(const ef_vi*, const ef_event*);
++
++/*! \i_ef_vi Return request ID indicated by a receive event
++ */
++ef_vi_inline ef_request_id ef_vi_receive_request_id(const ef_vi* vi,
++ const ef_event* ef_ev)
++{
++ const ef_vi_qword* ev = EF_GET_HW_EV_PTR(*ef_ev);
++ return ev->u32[0] & vi->vi_rxq.mask;
++}
++
++
++/*! \i_ef_vi Form a receive descriptor.
++**
++** If \c initial_rx_bytes is zero use a reception size at least as large
++** as an MTU.
++*/
++extern int ef_vi_receive_init(ef_vi* vi, ef_addr addr, ef_request_id dma_id,
++ int intial_rx_bytes);
++
++/*! \i_ef_vi Submit initialised receive descriptors to the NIC. */
++extern void ef_vi_receive_push(ef_vi* vi);
++
++/*! \i_ef_vi Post a buffer on the receive queue.
++**
++** \return 0 on success, or -EAGAIN if the receive queue is full
++*/
++extern int ef_vi_receive_post(ef_vi*, ef_addr addr,
++ ef_request_id dma_id);
++
++/**********************************************************************
++ * Transmit interface *************************************************
++ **********************************************************************/
++
++/*! \i_ef_vi Return the amount of space (in descriptors) in the transmit
++** queue.
++**
++** \return the amount of space in the queue (in descriptors)
++*/
++ef_vi_inline int ef_vi_transmit_space(ef_vi* vi)
++{
++ ef_vi_txq_state* qs = &vi->ep_state->txq;
++ return vi->vi_txq.mask - (qs->added - qs->removed);
++}
++
++
++/*! \i_ef_vi Returns the fill level of the TX descriptor ring.
++**
++** \return the fill level of the queue.
++*/
++ef_vi_inline int ef_vi_transmit_fill_level(ef_vi* vi)
++{
++ ef_vi_txq_state* qs = &vi->ep_state->txq;
++ return qs->added - qs->removed;
++}
++
++
++/*! \i_ef_vi Returns the total capacity of the TX descriptor ring.
++**
++** \return the capacity of the queue.
++*/
++ef_vi_inline int ef_vi_transmit_capacity(ef_vi* vi)
++{
++ return vi->vi_txq.mask;
++}
++
++
++/*! \i_ef_vi Transmit a packet.
++**
++** \param bytes must be greater than ETH_ZLEN.
++** \return -EAGAIN if the transmit queue is full, or 0 on success
++*/
++extern int ef_vi_transmit(ef_vi*, ef_addr, int bytes, ef_request_id dma_id);
++
++/*! \i_ef_vi Transmit a packet using a gather list.
++**
++** \param iov_len must be greater than zero
++** \param iov the first must be non-zero in length (but others need not)
++**
++** \return -EAGAIN if the queue is full, or 0 on success
++*/
++extern int ef_vi_transmitv(ef_vi*, const ef_iovec* iov, int iov_len,
++ ef_request_id dma_id);
++
++/*! \i_ef_vi Initialise a DMA request.
++**
++** \return -EAGAIN if the queue is full, or 0 on success
++*/
++extern int ef_vi_transmit_init(ef_vi*, ef_addr, int bytes,
++ ef_request_id dma_id);
++
++/*! \i_ef_vi Initialise a DMA request.
++**
++** \return -EAGAIN if the queue is full, or 0 on success
++*/
++extern int ef_vi_transmitv_init(ef_vi*, const ef_iovec*, int iov_len,
++ ef_request_id dma_id);
++
++/*! \i_ef_vi Submit DMA requests to the NIC.
++**
++** The DMA requests must have been initialised using
++** ef_vi_transmit_init() or ef_vi_transmitv_init().
++*/
++extern void ef_vi_transmit_push(ef_vi*);
++
++
++/*! \i_ef_vi Maximum number of transmit completions per transmit event. */
++#define EF_VI_TRANSMIT_BATCH 64
++
++/*! \i_ef_vi Determine the set of [ef_request_id]s for each DMA request
++** which has been completed by a given transmit completion
++** event.
++**
++** \param ids must point to an array of length EF_VI_TRANSMIT_BATCH
++** \return the number of valid [ef_request_id]s (can be zero)
++*/
++extern int ef_vi_transmit_unbundle(ef_vi* ep, const ef_event*,
++ ef_request_id* ids);
++
++
++/*! \i_ef_event Returns true if ef_eventq_poll() will return event(s). */
++extern int ef_eventq_has_event(ef_vi* vi);
++
++/*! \i_ef_event Returns true if there are quite a few events in the event
++** queue.
++**
++** This looks ahead in the event queue, so has the property that it will
++** not ping-pong a cache-line when it is called concurrently with events
++** being delivered.
++*/
++extern int ef_eventq_has_many_events(ef_vi* evq, int look_ahead);
++
++/*! Type of function to handle unknown events arriving on event queue
++** Return CI_TRUE iff the event has been handled.
++*/
++typedef int/*bool*/ ef_event_handler_fn(void* priv, ef_vi* evq, ef_event* ev);
++
++/*! Standard poll exception routine */
++extern int/*bool*/ ef_eventq_poll_exception(void* priv, ef_vi* evq,
++ ef_event* ev);
++
++/*! \i_ef_event Retrieve events from the event queue, handle RX/TX events
++** and pass any others to an exception handler function
++**
++** \return The number of events retrieved.
++*/
++extern int ef_eventq_poll_evs(ef_vi* evq, ef_event* evs, int evs_len,
++ ef_event_handler_fn *exception, void *expt_priv);
++
++/*! \i_ef_event Retrieve events from the event queue.
++**
++** \return The number of events retrieved.
++*/
++ef_vi_inline int ef_eventq_poll(ef_vi* evq, ef_event* evs, int evs_len)
++{
++ return ef_eventq_poll_evs(evq, evs, evs_len,
++ &ef_eventq_poll_exception, (void*)0);
++}
++
++/*! \i_ef_event Returns the capacity of an event queue. */
++ef_vi_inline int ef_eventq_capacity(ef_vi* vi)
++{
++ return (vi->evq_mask + 1u) / sizeof(ef_hw_event);
++}
++
++/* Returns the instance ID of [vi] */
++ef_vi_inline unsigned ef_vi_instance(ef_vi* vi)
++{ return vi->vi_i; }
++
++
++/**********************************************************************
++ * Initialisation *****************************************************
++ **********************************************************************/
++
++/*! Return size of state buffer of an initialised VI. */
++extern int ef_vi_state_bytes(ef_vi*);
++
++/*! Return size of buffer needed for VI state given sizes of RX and TX
++** DMA queues. Queue sizes must be legal sizes (power of 2), or 0 (no
++** queue).
++*/
++extern int ef_vi_calc_state_bytes(int rxq_size, int txq_size);
++
++/*! Initialise [ef_vi] from the provided resources. [vvis] must have been
++** created by ef_make_vi_data() & remains owned by the caller.
++*/
++extern void ef_vi_init(ef_vi*, void* vi_info, ef_vi_state* state,
++ ef_eventq_state* evq_state, enum ef_vi_flags);
++
++extern void ef_vi_state_init(ef_vi*);
++extern void ef_eventq_state_init(ef_vi*);
++
++/*! Convert an efhw device arch to ef_vi_arch, or returns -1 if not
++** recognised.
++*/
++extern int ef_vi_arch_from_efhw_arch(int efhw_arch);
++
++
++#endif /* __EFAB_EF_VI_H__ */
+Index: head-2008-11-25/drivers/xen/sfc_netfront/falcon_event.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/falcon_event.c 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,346 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr
++ * \brief Routine to poll event queues.
++ * \date 2003/03/04
++ */
++
++/*! \cidoxg_lib_ef */
++#include "ef_vi_internal.h"
++
++/* Be worried about this on byteswapped machines */
++/* Due to crazy chipsets, we see the event words being written in
++** arbitrary order (bug4539). So test for presence of event must ensure
++** that both halves have changed from the null.
++*/
++# define EF_VI_IS_EVENT(evp) \
++ ( (((evp)->opaque.a != (uint32_t)-1) && \
++ ((evp)->opaque.b != (uint32_t)-1)) )
++
++
++#ifdef NDEBUG
++# define IS_DEBUG 0
++#else
++# define IS_DEBUG 1
++#endif
++
++
++/*! Check for RX events with inconsistent SOP/CONT
++**
++** Returns true if this event should be discarded
++*/
++ef_vi_inline int ef_eventq_is_rx_sop_cont_bad_efab(ef_vi* vi,
++ const ef_vi_qword* ev)
++{
++ ef_rx_dup_state_t* rx_dup_state;
++ uint8_t* bad_sop;
++
++ unsigned label = QWORD_GET_U(RX_EV_Q_LABEL, *ev);
++ unsigned sop = QWORD_TEST_BIT(RX_SOP, *ev);
++
++ ef_assert(vi);
++ ef_assert_lt(label, EFAB_DMAQS_PER_EVQ_MAX);
++
++ rx_dup_state = &vi->evq_state->rx_dup_state[label];
++ bad_sop = &rx_dup_state->bad_sop;
++
++ if( ! ((vi->vi_flags & EF_VI_BUG5692_WORKAROUND) || IS_DEBUG) ) {
++ *bad_sop = (*bad_sop && !sop);
++ }
++ else {
++ unsigned cont = QWORD_TEST_BIT(RX_JUMBO_CONT, *ev);
++ uint8_t *frag_num = &rx_dup_state->frag_num;
++
++ /* bad_sop should latch till the next sop */
++ *bad_sop = (*bad_sop && !sop) || ( !!sop != (*frag_num==0) );
++
++ /* we do not check the number of bytes relative to the
++ * fragment number and size of the user rx buffer here
++ * because we don't know the size of the user rx
++ * buffer - we probably should perform this check in
++ * the nearest code calling this though.
++ */
++ *frag_num = cont ? (*frag_num + 1) : 0;
++ }
++
++ return *bad_sop;
++}
++
++
++ef_vi_inline int falcon_rx_check_dup(ef_vi* evq, ef_event* ev_out,
++ const ef_vi_qword* ev)
++{
++ unsigned q_id = QWORD_GET_U(RX_EV_Q_LABEL, *ev);
++ unsigned desc_ptr = QWORD_GET_U(RX_EV_DESC_PTR, *ev);
++ ef_rx_dup_state_t* rx_dup_state = &evq->evq_state->rx_dup_state[q_id];
++
++ if(likely( desc_ptr != rx_dup_state->rx_last_desc_ptr )) {
++ rx_dup_state->rx_last_desc_ptr = desc_ptr;
++ return 0;
++ }
++
++ rx_dup_state->rx_last_desc_ptr = desc_ptr;
++ rx_dup_state->bad_sop = 1;
++#ifndef NDEBUG
++ rx_dup_state->frag_num = 0;
++#endif
++ BUG_ON(!QWORD_TEST_BIT(RX_EV_FRM_TRUNC, *ev));
++ BUG_ON( QWORD_TEST_BIT(RX_EV_PKT_OK, *ev));
++ BUG_ON(!QWORD_GET_U(RX_EV_BYTE_CNT, *ev) == 0);
++ ev_out->rx_no_desc_trunc.type = EF_EVENT_TYPE_RX_NO_DESC_TRUNC;
++ ev_out->rx_no_desc_trunc.q_id = q_id;
++ return 1;
++}
++
++
++ef_vi_inline void falcon_rx_event(ef_event* ev_out, const ef_vi_qword* ev)
++{
++ if(likely( QWORD_TEST_BIT(RX_EV_PKT_OK, *ev) )) {
++ ev_out->rx.type = EF_EVENT_TYPE_RX;
++ ev_out->rx.q_id = QWORD_GET_U(RX_EV_Q_LABEL, *ev);
++ ev_out->rx.len = QWORD_GET_U(RX_EV_BYTE_CNT, *ev);
++ if( QWORD_TEST_BIT(RX_SOP, *ev) )
++ ev_out->rx.flags = EF_EVENT_FLAG_SOP;
++ else
++ ev_out->rx.flags = 0;
++ if( QWORD_TEST_BIT(RX_JUMBO_CONT, *ev) )
++ ev_out->rx.flags |= EF_EVENT_FLAG_CONT;
++ if( QWORD_TEST_BIT(RX_iSCSI_PKT_OK, *ev) )
++ ev_out->rx.flags |= EF_EVENT_FLAG_ISCSI_OK;
++ }
++ else {
++ ev_out->rx_discard.type = EF_EVENT_TYPE_RX_DISCARD;
++ ev_out->rx_discard.q_id = QWORD_GET_U(RX_EV_Q_LABEL, *ev);
++ ev_out->rx_discard.len = QWORD_GET_U(RX_EV_BYTE_CNT, *ev);
++#if 1 /* hack for ptloop compatability: ?? TODO purge */
++ if( QWORD_TEST_BIT(RX_SOP, *ev) )
++ ev_out->rx_discard.flags = EF_EVENT_FLAG_SOP;
++ else
++ ev_out->rx_discard.flags = 0;
++ if( QWORD_TEST_BIT(RX_JUMBO_CONT, *ev) )
++ ev_out->rx_discard.flags |= EF_EVENT_FLAG_CONT;
++ if( QWORD_TEST_BIT(RX_iSCSI_PKT_OK, *ev) )
++ ev_out->rx_discard.flags |= EF_EVENT_FLAG_ISCSI_OK;
++#endif
++ /* Order matters here: more fundamental errors first. */
++ if( QWORD_TEST_BIT(RX_EV_BUF_OWNER_ID_ERR, *ev) )
++ ev_out->rx_discard.subtype =
++ EF_EVENT_RX_DISCARD_RIGHTS;
++ else if( QWORD_TEST_BIT(RX_EV_FRM_TRUNC, *ev) )
++ ev_out->rx_discard.subtype =
++ EF_EVENT_RX_DISCARD_TRUNC;
++ else if( QWORD_TEST_BIT(RX_EV_ETH_CRC_ERR, *ev) )
++ ev_out->rx_discard.subtype =
++ EF_EVENT_RX_DISCARD_CRC_BAD;
++ else if( QWORD_TEST_BIT(RX_EV_IP_HDR_CHKSUM_ERR, *ev) )
++ ev_out->rx_discard.subtype =
++ EF_EVENT_RX_DISCARD_CSUM_BAD;
++ else if( QWORD_TEST_BIT(RX_EV_TCP_UDP_CHKSUM_ERR, *ev) )
++ ev_out->rx_discard.subtype =
++ EF_EVENT_RX_DISCARD_CSUM_BAD;
++ else
++ ev_out->rx_discard.subtype =
++ EF_EVENT_RX_DISCARD_OTHER;
++ }
++}
++
++
++ef_vi_inline void falcon_tx_event(ef_event* ev_out, const ef_vi_qword* ev)
++{
++ /* Danger danger! No matter what we ask for wrt batching, we
++ ** will get a batched event every 16 descriptors, and we also
++ ** get dma-queue-empty events. i.e. Duplicates are expected.
++ **
++ ** In addition, if it's been requested in the descriptor, we
++ ** get an event per descriptor. (We don't currently request
++ ** this).
++ */
++ if(likely( QWORD_TEST_BIT(TX_EV_COMP, *ev) )) {
++ ev_out->tx.type = EF_EVENT_TYPE_TX;
++ ev_out->tx.q_id = QWORD_GET_U(TX_EV_Q_LABEL, *ev);
++ }
++ else {
++ ev_out->tx_error.type = EF_EVENT_TYPE_TX_ERROR;
++ ev_out->tx_error.q_id = QWORD_GET_U(TX_EV_Q_LABEL, *ev);
++ if(likely( QWORD_TEST_BIT(TX_EV_BUF_OWNER_ID_ERR, *ev) ))
++ ev_out->tx_error.subtype = EF_EVENT_TX_ERROR_RIGHTS;
++ else if(likely( QWORD_TEST_BIT(TX_EV_WQ_FF_FULL, *ev) ))
++ ev_out->tx_error.subtype = EF_EVENT_TX_ERROR_OFLOW;
++ else if(likely( QWORD_TEST_BIT(TX_EV_PKT_TOO_BIG, *ev) ))
++ ev_out->tx_error.subtype = EF_EVENT_TX_ERROR_2BIG;
++ else if(likely( QWORD_TEST_BIT(TX_EV_PKT_ERR, *ev) ))
++ ev_out->tx_error.subtype = EF_EVENT_TX_ERROR_BUS;
++ }
++}
++
++
++static void mark_bad(ef_event* ev)
++{
++ ev->generic.ev.u64[0] &=~ ((uint64_t) 1u << RX_EV_PKT_OK_LBN);
++}
++
++
++int ef_eventq_poll_evs(ef_vi* evq, ef_event* evs, int evs_len,
++ ef_event_handler_fn *exception, void *expt_priv)
++{
++ int evs_len_orig = evs_len;
++
++ EF_VI_CHECK_EVENT_Q(evq);
++ ef_assert(evs);
++ ef_assert_gt(evs_len, 0);
++
++ if(unlikely( EF_VI_IS_EVENT(EF_VI_EVENT_PTR(evq, 1)) ))
++ goto overflow;
++
++ do {
++ { /* Read the event out of the ring, then fiddle with
++ * copied version. Reason is that the ring is
++ * likely to get pushed out of cache by another
++ * event being delivered by hardware. */
++ ef_vi_event* ev = EF_VI_EVENT_PTR(evq, 0);
++ if( ! EF_VI_IS_EVENT(ev) )
++ break;
++ evs->generic.ev.u64[0] = cpu_to_le64 (ev->u64);
++ evq->evq_state->evq_ptr += sizeof(ef_vi_event);
++ ev->u64 = (uint64_t)(int64_t) -1;
++ }
++
++ /* Ugly: Exploit the fact that event code lies in top
++ * bits of event. */
++ ef_assert_ge(EV_CODE_LBN, 32u);
++ switch( evs->generic.ev.u32[1] >> (EV_CODE_LBN - 32u) ) {
++ case RX_IP_EV_DECODE:
++ /* Look for duplicate desc_ptr: it signals
++ * that a jumbo frame was truncated because we
++ * ran out of descriptors. */
++ if(unlikely( falcon_rx_check_dup
++ (evq, evs, &evs->generic.ev) )) {
++ --evs_len;
++ ++evs;
++ break;
++ }
++ else {
++ /* Cope with FalconA1 bugs where RX
++ * gives inconsistent RX events Mark
++ * events as bad until SOP becomes
++ * consistent again
++ * ef_eventq_is_rx_sop_cont_bad() has
++ * side effects - order is important
++ */
++ if(unlikely
++ (ef_eventq_is_rx_sop_cont_bad_efab
++ (evq, &evs->generic.ev) )) {
++ mark_bad(evs);
++ }
++ }
++ falcon_rx_event(evs, &evs->generic.ev);
++ --evs_len;
++ ++evs;
++ break;
++
++ case TX_IP_EV_DECODE:
++ falcon_tx_event(evs, &evs->generic.ev);
++ --evs_len;
++ ++evs;
++ break;
++
++ default:
++ break;
++ }
++ } while( evs_len );
++
++ return evs_len_orig - evs_len;
++
++
++ overflow:
++ evs->generic.type = EF_EVENT_TYPE_OFLOW;
++ evs->generic.ev.u64[0] = (uint64_t)((int64_t)-1);
++ return 1;
++}
++
++
++int/*bool*/ ef_eventq_poll_exception(void* priv, ef_vi* evq, ef_event* ev)
++{
++ int /*bool*/ handled = 0;
++
++ switch( ev->generic.ev.u32[1] >> (EV_CODE_LBN - 32u) ) {
++ case DRIVER_EV_DECODE:
++ if( QWORD_GET_U(DRIVER_EV_SUB_CODE, ev->generic.ev) ==
++ EVQ_INIT_DONE_EV_DECODE )
++ /* EVQ initialised event: ignore. */
++ handled = 1;
++ break;
++ }
++ return handled;
++}
++
++
++void ef_eventq_iterate(ef_vi* vi,
++ void (*fn)(void* arg, ef_vi*, int rel_pos,
++ int abs_pos, void* event),
++ void* arg, int stop_at_end)
++{
++ int i, size_evs = (vi->evq_mask + 1) / sizeof(ef_vi_event);
++
++ for( i = 0; i < size_evs; ++i ) {
++ ef_vi_event* e = EF_VI_EVENT_PTR(vi, -i);
++ if( EF_VI_IS_EVENT(e) )
++ fn(arg, vi, i,
++ EF_VI_EVENT_OFFSET(vi, -i) / sizeof(ef_vi_event),
++ e);
++ else if( stop_at_end )
++ break;
++ }
++}
++
++
++int ef_eventq_has_event(ef_vi* vi)
++{
++ return EF_VI_IS_EVENT(EF_VI_EVENT_PTR(vi, 0));
++}
++
++
++int ef_eventq_has_many_events(ef_vi* vi, int look_ahead)
++{
++ ef_assert_ge(look_ahead, 0);
++ return EF_VI_IS_EVENT(EF_VI_EVENT_PTR(vi, -look_ahead));
++}
++
++
++int ef_eventq_has_rx_event(ef_vi* vi)
++{
++ ef_vi_event* ev;
++ int i, n_evs = 0;
++
++ for( i = 0; EF_VI_IS_EVENT(EF_VI_EVENT_PTR(vi, i)); --i ) {
++ ev = EF_VI_EVENT_PTR(vi, i);
++ if( EFVI_FALCON_EVENT_CODE(ev) == EF_EVENT_TYPE_RX ) n_evs++;
++ }
++ return n_evs;
++}
++
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netfront/falcon_vi.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/falcon_vi.c 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,465 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr, stg
++ * \brief Falcon-specific VI
++ * \date 2006/11/30
++ */
++
++#include "ef_vi_internal.h"
++
++
++#define EFVI_FALCON_DMA_TX_FRAG 1
++
++
++/* TX descriptor for both physical and virtual packet transfers */
++typedef union {
++ uint32_t dword[2];
++} ef_vi_falcon_dma_tx_buf_desc;
++typedef ef_vi_falcon_dma_tx_buf_desc ef_vi_falcon_dma_tx_phys_desc;
++
++
++/* RX descriptor for physical addressed transfers */
++typedef union {
++ uint32_t dword[2];
++} ef_vi_falcon_dma_rx_phys_desc;
++
++
++/* RX descriptor for virtual packet transfers */
++typedef struct {
++ uint32_t dword[1];
++} ef_vi_falcon_dma_rx_buf_desc;
++
++/* Buffer table index */
++typedef uint32_t ef_vi_buffer_addr_t;
++
++ef_vi_inline int64_t dma_addr_to_u46(int64_t src_dma_addr)
++{
++ return (src_dma_addr & __FALCON_MASK(46, int64_t));
++}
++
++/*! Setup a physical address based descriptor with a specified length */
++ef_vi_inline void
++__falcon_dma_rx_calc_ip_phys(ef_vi_dma_addr_t dest_pa,
++ ef_vi_falcon_dma_rx_phys_desc *desc,
++ int bytes)
++{
++ int region = 0; /* TODO fixme */
++ int64_t dest = dma_addr_to_u46(dest_pa); /* lower 46 bits */
++
++ DWCHCK(__DW2(RX_KER_BUF_SIZE_LBN), RX_KER_BUF_SIZE_WIDTH);
++ DWCHCK(__DW2(RX_KER_BUF_REGION_LBN),RX_KER_BUF_REGION_WIDTH);
++
++ LWCHK(RX_KER_BUF_ADR_LBN, RX_KER_BUF_ADR_WIDTH);
++
++ RANGECHCK(bytes, RX_KER_BUF_SIZE_WIDTH);
++ RANGECHCK(region, RX_KER_BUF_REGION_WIDTH);
++
++ ef_assert(desc);
++
++ desc->dword[1] = ((bytes << __DW2(RX_KER_BUF_SIZE_LBN)) |
++ (region << __DW2(RX_KER_BUF_REGION_LBN)) |
++ (HIGH(dest,
++ RX_KER_BUF_ADR_LBN,
++ RX_KER_BUF_ADR_WIDTH)));
++
++ desc->dword[0] = LOW(dest,
++ RX_KER_BUF_ADR_LBN,
++ RX_KER_BUF_ADR_WIDTH);
++}
++
++/*! Setup a virtual buffer descriptor for an IPMODE transfer */
++ef_vi_inline void
++__falcon_dma_tx_calc_ip_buf(unsigned buf_id, unsigned buf_ofs, unsigned bytes,
++ int port, int frag,
++ ef_vi_falcon_dma_tx_buf_desc *desc)
++{
++ DWCHCK(__DW2(TX_USR_PORT_LBN), TX_USR_PORT_WIDTH);
++ DWCHCK(__DW2(TX_USR_CONT_LBN), TX_USR_CONT_WIDTH);
++ DWCHCK(__DW2(TX_USR_BYTE_CNT_LBN), TX_USR_BYTE_CNT_WIDTH);
++ LWCHK(RX_KER_BUF_ADR_LBN, RX_KER_BUF_ADR_WIDTH);
++ DWCHCK(TX_USR_BYTE_OFS_LBN, TX_USR_BYTE_OFS_WIDTH);
++
++ RANGECHCK(bytes, TX_USR_BYTE_CNT_WIDTH);
++ RANGECHCK(port, TX_USR_PORT_WIDTH);
++ RANGECHCK(frag, TX_USR_CONT_WIDTH);
++ RANGECHCK(buf_id, TX_USR_BUF_ID_WIDTH);
++ RANGECHCK(buf_ofs, TX_USR_BYTE_OFS_WIDTH);
++
++ ef_assert(desc);
++
++ desc->dword[1] = ((port << __DW2(TX_USR_PORT_LBN)) |
++ (frag << __DW2(TX_USR_CONT_LBN)) |
++ (bytes << __DW2(TX_USR_BYTE_CNT_LBN)) |
++ (HIGH(buf_id,
++ TX_USR_BUF_ID_LBN,
++ TX_USR_BUF_ID_WIDTH)));
++
++ desc->dword[0] = ((LOW(buf_id,
++ TX_USR_BUF_ID_LBN,
++ (TX_USR_BUF_ID_WIDTH))) |
++ (buf_ofs << TX_USR_BYTE_OFS_LBN));
++}
++
++ef_vi_inline void
++falcon_dma_tx_calc_ip_buf_4k(unsigned buf_vaddr, unsigned bytes,
++ int port, int frag,
++ ef_vi_falcon_dma_tx_buf_desc *desc)
++{
++ /* TODO FIXME [buf_vaddr] consists of the buffer index in the
++ ** high bits, and an offset in the low bits. Assumptions
++ ** permate the code that these can be rolled into one 32bit
++ ** value, so this is currently preserved for Falcon. But we
++ ** should change to support 8K pages
++ */
++ unsigned buf_id = EFVI_FALCON_BUFFER_4K_PAGE(buf_vaddr);
++ unsigned buf_ofs = EFVI_FALCON_BUFFER_4K_OFF(buf_vaddr);
++
++ __falcon_dma_tx_calc_ip_buf( buf_id, buf_ofs, bytes, port, frag, desc);
++}
++
++ef_vi_inline void
++falcon_dma_tx_calc_ip_buf(unsigned buf_vaddr, unsigned bytes, int port,
++ int frag, ef_vi_falcon_dma_tx_buf_desc *desc)
++{
++ falcon_dma_tx_calc_ip_buf_4k(buf_vaddr, bytes, port, frag, desc);
++}
++
++/*! Setup a virtual buffer based descriptor */
++ef_vi_inline void
++__falcon_dma_rx_calc_ip_buf(unsigned buf_id, unsigned buf_ofs,
++ ef_vi_falcon_dma_rx_buf_desc *desc)
++{
++ /* check alignment of buffer offset and pack */
++ ef_assert((buf_ofs & 0x1) == 0);
++
++ buf_ofs >>= 1;
++
++ DWCHCK(RX_USR_2BYTE_OFS_LBN, RX_USR_2BYTE_OFS_WIDTH);
++ DWCHCK(RX_USR_BUF_ID_LBN, RX_USR_BUF_ID_WIDTH);
++
++ RANGECHCK(buf_ofs, RX_USR_2BYTE_OFS_WIDTH);
++ RANGECHCK(buf_id, RX_USR_BUF_ID_WIDTH);
++
++ ef_assert(desc);
++
++ desc->dword[0] = ((buf_ofs << RX_USR_2BYTE_OFS_LBN) |
++ (buf_id << RX_USR_BUF_ID_LBN));
++}
++
++ef_vi_inline void
++falcon_dma_rx_calc_ip_buf_4k(unsigned buf_vaddr,
++ ef_vi_falcon_dma_rx_buf_desc *desc)
++{
++ /* TODO FIXME [buf_vaddr] consists of the buffer index in the
++ ** high bits, and an offset in the low bits. Assumptions
++ ** permeate the code that these can be rolled into one 32bit
++ ** value, so this is currently preserved for Falcon. But we
++ ** should change to support 8K pages
++ */
++ unsigned buf_id = EFVI_FALCON_BUFFER_4K_PAGE(buf_vaddr);
++ unsigned buf_ofs = EFVI_FALCON_BUFFER_4K_OFF(buf_vaddr);
++
++ __falcon_dma_rx_calc_ip_buf(buf_id, buf_ofs, desc);
++}
++
++ef_vi_inline void
++falcon_dma_rx_calc_ip_buf(unsigned buf_vaddr,
++ ef_vi_falcon_dma_rx_buf_desc *desc)
++{
++ falcon_dma_rx_calc_ip_buf_4k(buf_vaddr, desc);
++}
++
++
++ef_vi_inline ef_vi_dma_addr_t ef_physaddr(ef_addr efaddr)
++{
++ return (ef_vi_dma_addr_t) efaddr;
++}
++
++
++/*! Convert between an ef_addr and a buffer table index
++** Assert that this was not a physical address
++*/
++ef_vi_inline ef_vi_buffer_addr_t ef_bufaddr(ef_addr efaddr)
++{
++ ef_assert(efaddr < ((uint64_t)1 << 32) );
++
++ return (ef_vi_buffer_addr_t) efaddr;
++}
++
++
++/*! Setup an physical address based descriptor for an IPMODE transfer */
++ef_vi_inline void
++falcon_dma_tx_calc_ip_phys(ef_vi_dma_addr_t src_dma_addr, unsigned bytes,
++ int port, int frag,
++ ef_vi_falcon_dma_tx_phys_desc *desc)
++{
++
++ int region = 0; /* FIXME */
++ int64_t src = dma_addr_to_u46(src_dma_addr); /* lower 46 bits */
++
++ DWCHCK(__DW2(TX_KER_PORT_LBN), TX_KER_PORT_WIDTH);
++ DWCHCK(__DW2(TX_KER_CONT_LBN), TX_KER_CONT_WIDTH);
++ DWCHCK(__DW2(TX_KER_BYTE_CNT_LBN), TX_KER_BYTE_CNT_WIDTH);
++ DWCHCK(__DW2(TX_KER_BUF_REGION_LBN),TX_KER_BUF_REGION_WIDTH);
++
++ LWCHK(TX_KER_BUF_ADR_LBN, TX_KER_BUF_ADR_WIDTH);
++
++ RANGECHCK(port, TX_KER_PORT_WIDTH);
++ RANGECHCK(frag, TX_KER_CONT_WIDTH);
++ RANGECHCK(bytes, TX_KER_BYTE_CNT_WIDTH);
++ RANGECHCK(region, TX_KER_BUF_REGION_WIDTH);
++
++ desc->dword[1] = ((port << __DW2(TX_KER_PORT_LBN)) |
++ (frag << __DW2(TX_KER_CONT_LBN)) |
++ (bytes << __DW2(TX_KER_BYTE_CNT_LBN)) |
++ (region << __DW2(TX_KER_BUF_REGION_LBN)) |
++ (HIGH(src,
++ TX_KER_BUF_ADR_LBN,
++ TX_KER_BUF_ADR_WIDTH)));
++
++ ef_assert_equal(TX_KER_BUF_ADR_LBN, 0);
++ desc->dword[0] = (uint32_t) src_dma_addr;
++}
++
++
++void falcon_vi_init(ef_vi* vi, void* vvis)
++{
++ struct vi_mappings *vm = (struct vi_mappings*)vvis;
++ uint16_t* ids;
++
++ ef_assert(vi);
++ ef_assert(vvis);
++ ef_assert_equal(vm->signature, VI_MAPPING_SIGNATURE);
++ ef_assert_equal(vm->nic_type.arch, EF_VI_ARCH_FALCON);
++
++ /* Initialise masks to zero, so that ef_vi_state_init() will
++ ** not do any harm when we don't have DMA queues. */
++ vi->vi_rxq.mask = vi->vi_txq.mask = 0;
++
++ /* Used for BUG5391_WORKAROUND. */
++ vi->vi_txq.misalign_mask = 0;
++
++ /* Initialise doorbell addresses to a distinctive small value
++ ** which will cause a segfault, to trap doorbell pushes to VIs
++ ** without DMA queues. */
++ vi->vi_rxq.doorbell = vi->vi_txq.doorbell = (ef_vi_ioaddr_t)0xdb;
++
++ ids = (uint16_t*) (vi->ep_state + 1);
++
++ if( vm->tx_queue_capacity ) {
++ vi->vi_txq.mask = vm->tx_queue_capacity - 1;
++ vi->vi_txq.doorbell = vm->tx_bell + 12;
++ vi->vi_txq.descriptors = vm->tx_dma_falcon;
++ vi->vi_txq.ids = ids;
++ ids += vi->vi_txq.mask + 1;
++ /* Check that the id fifo fits in the space allocated. */
++ ef_assert_le((char*) (vi->vi_txq.ids + vm->tx_queue_capacity),
++ (char*) vi->ep_state
++ + ef_vi_calc_state_bytes(vm->rx_queue_capacity,
++ vm->tx_queue_capacity));
++ }
++ if( vm->rx_queue_capacity ) {
++ vi->vi_rxq.mask = vm->rx_queue_capacity - 1;
++ vi->vi_rxq.doorbell = vm->rx_bell + 12;
++ vi->vi_rxq.descriptors = vm->rx_dma_falcon;
++ vi->vi_rxq.ids = ids;
++ /* Check that the id fifo fits in the space allocated. */
++ ef_assert_le((char*) (vi->vi_rxq.ids + vm->rx_queue_capacity),
++ (char*) vi->ep_state
++ + ef_vi_calc_state_bytes(vm->rx_queue_capacity,
++ vm->tx_queue_capacity));
++ }
++
++ if( vm->nic_type.variant == 'A' ) {
++ vi->vi_txq.misalign_mask = 15; /* BUG5391_WORKAROUND */
++ vi->vi_flags |= EF_VI_BUG5692_WORKAROUND;
++ }
++}
++
++
++int ef_vi_transmitv_init(ef_vi* vi, const ef_iovec* iov, int iov_len,
++ ef_request_id dma_id)
++{
++ ef_vi_txq* q = &vi->vi_txq;
++ ef_vi_txq_state* qs = &vi->ep_state->txq;
++ ef_vi_falcon_dma_tx_buf_desc* dp;
++ unsigned len, dma_len, di;
++ unsigned added_save = qs->added;
++ ef_addr dma_addr;
++ unsigned last_len = 0;
++
++ ef_assert(iov_len > 0);
++ ef_assert(iov);
++ ef_assert_equal((dma_id & EF_REQUEST_ID_MASK), dma_id);
++ ef_assert_nequal(dma_id, 0xffff);
++
++ dma_addr = iov->iov_base;
++ len = iov->iov_len;
++
++ if( vi->vi_flags & EF_VI_ISCSI_TX_DDIG ) {
++ /* Last 4 bytes of placeholder for digest must be
++ * removed for h/w */
++ ef_assert(len > 4);
++ last_len = iov[iov_len - 1].iov_len;
++ if( last_len <= 4 ) {
++ ef_assert(iov_len > 1);
++ --iov_len;
++ last_len = iov[iov_len - 1].iov_len - (4 - last_len);
++ }
++ else {
++ last_len = iov[iov_len - 1].iov_len - 4;
++ }
++ if( iov_len == 1 )
++ len = last_len;
++ }
++
++ while( 1 ) {
++ if( qs->added - qs->removed >= q->mask ) {
++ qs->added = added_save;
++ return -EAGAIN;
++ }
++
++ dma_len = (~((unsigned) dma_addr) & 0xfff) + 1;
++ if( dma_len > len ) dma_len = len;
++ { /* BUG5391_WORKAROUND */
++ unsigned misalign =
++ (unsigned) dma_addr & q->misalign_mask;
++ if( misalign && dma_len + misalign > 512 )
++ dma_len = 512 - misalign;
++ }
++
++ di = qs->added++ & q->mask;
++ dp = (ef_vi_falcon_dma_tx_buf_desc*) q->descriptors + di;
++ if( vi->vi_flags & EF_VI_TX_PHYS_ADDR )
++ falcon_dma_tx_calc_ip_phys
++ (ef_physaddr(dma_addr), dma_len, /*port*/ 0,
++ (iov_len == 1 && dma_len == len) ? 0 :
++ EFVI_FALCON_DMA_TX_FRAG, dp);
++ else
++ falcon_dma_tx_calc_ip_buf
++ (ef_bufaddr(dma_addr), dma_len, /*port*/ 0,
++ (iov_len == 1 && dma_len == len) ? 0 :
++ EFVI_FALCON_DMA_TX_FRAG, dp);
++
++ dma_addr += dma_len;
++ len -= dma_len;
++
++ if( len == 0 ) {
++ if( --iov_len == 0 ) break;
++ ++iov;
++ dma_addr = iov->iov_base;
++ len = iov->iov_len;
++ if( (vi->vi_flags & EF_VI_ISCSI_TX_DDIG) &&
++ (iov_len == 1) )
++ len = last_len;
++ }
++ }
++
++ q->ids[di] = (uint16_t) dma_id;
++ return 0;
++}
++
++
++void ef_vi_transmit_push(ef_vi* vi)
++{
++ ef_vi_wiob();
++ writel((vi->ep_state->txq.added & vi->vi_txq.mask) <<
++ __DW4(TX_DESC_WPTR_LBN),
++ vi->vi_txq.doorbell);
++}
++
++
++/*! The value of initial_rx_bytes is used to set RX_KER_BUF_SIZE in an initial
++** receive descriptor here if physical addressing is being used. A value of
++** zero represents 16384 bytes. This is okay, because caller must provide a
++** buffer than is > MTU, and mac should filter anything bigger than that.
++*/
++int ef_vi_receive_init(ef_vi* vi, ef_addr addr, ef_request_id dma_id,
++ int initial_rx_bytes)
++{
++ ef_vi_rxq* q = &vi->vi_rxq;
++ ef_vi_rxq_state* qs = &vi->ep_state->rxq;
++ unsigned di;
++
++ if( ef_vi_receive_space(vi) ) {
++ di = qs->added++ & q->mask;
++ ef_assert_equal(q->ids[di], 0xffff);
++ q->ids[di] = (uint16_t) dma_id;
++
++ if( ! (vi->vi_flags & EF_VI_RX_PHYS_ADDR) ) {
++ ef_vi_falcon_dma_rx_buf_desc* dp;
++ dp = (ef_vi_falcon_dma_rx_buf_desc*)
++ q->descriptors + di;
++ falcon_dma_rx_calc_ip_buf(ef_bufaddr(addr), dp);
++ }
++ else {
++ ef_vi_falcon_dma_rx_phys_desc* dp;
++ dp = (ef_vi_falcon_dma_rx_phys_desc*)
++ q->descriptors + di;
++ __falcon_dma_rx_calc_ip_phys(addr, dp,
++ initial_rx_bytes);
++ }
++
++ return 0;
++ }
++
++ return -EAGAIN;
++}
++
++
++void ef_vi_receive_push(ef_vi* vi)
++{
++ ef_vi_wiob();
++ writel ((vi->ep_state->rxq.added & vi->vi_rxq.mask) <<
++ __DW4(RX_DESC_WPTR_LBN),
++ vi->vi_rxq.doorbell);
++}
++
++
++ef_request_id ef_vi_receive_done(const ef_vi* vi, const ef_event* ef_ev)
++{
++ const ef_vi_qword* ev = EF_GET_HW_EV_PTR(*ef_ev);
++ unsigned di = ev->u32[0] & vi->vi_rxq.mask;
++ ef_request_id rq_id;
++
++ ef_assert(EF_EVENT_TYPE(*ef_ev) == EF_EVENT_TYPE_RX ||
++ EF_EVENT_TYPE(*ef_ev) == EF_EVENT_TYPE_RX_DISCARD);
++
++ /* Detect spurious / duplicate RX events. We may need to modify this
++ ** code so that we are robust if they happen. */
++ ef_assert_equal(di, vi->ep_state->rxq.removed & vi->vi_rxq.mask);
++
++ /* We only support 1 port: so events should be in order. */
++ ef_assert(vi->vi_rxq.ids[di] != 0xffff);
++
++ rq_id = vi->vi_rxq.ids[di];
++ vi->vi_rxq.ids[di] = 0xffff;
++ ++vi->ep_state->rxq.removed;
++ return rq_id;
++}
++
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netfront/pt_tx.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/pt_tx.c 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,91 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr
++ * \brief Packet-mode transmit interface.
++ * \date 2003/04/02
++ */
++
++/*! \cidoxg_lib_ef */
++#include "ef_vi_internal.h"
++
++
++int ef_vi_transmit_init(ef_vi* vi, ef_addr base, int len, ef_request_id dma_id)
++{
++ ef_iovec iov = { base, len };
++ return ef_vi_transmitv_init(vi, &iov, 1, dma_id);
++}
++
++
++int ef_vi_transmit(ef_vi* vi, ef_addr base, int len, ef_request_id dma_id)
++{
++ ef_iovec iov = { base, len };
++ int rc = ef_vi_transmitv_init(vi, &iov, 1, dma_id);
++ if( rc == 0 ) ef_vi_transmit_push(vi);
++ return rc;
++}
++
++
++int ef_vi_transmitv(ef_vi* vi, const ef_iovec* iov, int iov_len,
++ ef_request_id dma_id)
++{
++ int rc = ef_vi_transmitv_init(vi, iov, iov_len, dma_id);
++ if( rc == 0 ) ef_vi_transmit_push(vi);
++ return rc;
++}
++
++
++int ef_vi_transmit_unbundle(ef_vi* vi, const ef_event* __ev,
++ ef_request_id* ids)
++{
++ ef_request_id* ids_in = ids;
++ ef_vi_txq* q = &vi->vi_txq;
++ ef_vi_txq_state* qs = &vi->ep_state->txq;
++ const ef_vi_qword* ev = EF_GET_HW_EV_PTR(*__ev);
++ unsigned i, stop = (ev->u32[0] + 1) & q->mask;
++
++ ef_assert(EF_EVENT_TYPE(*__ev) == EF_EVENT_TYPE_TX ||
++ EF_EVENT_TYPE(*__ev) == EF_EVENT_TYPE_TX_ERROR);
++
++ /* Shouldn't be batching more than 64 descriptors, and should not go
++ ** backwards. */
++ ef_assert_le((((ev->u32[0] + 1) - qs->removed) & q->mask), 64);
++ /* Should not complete more than we've posted. */
++ ef_assert_le((((ev->u32[0] + 1) - qs->removed) & q->mask),
++ qs->added - qs->removed);
++
++ for( i = qs->removed & q->mask; i != stop; i = ++qs->removed & q->mask )
++ if( q->ids[i] != 0xffff ) {
++ *ids++ = q->ids[i];
++ q->ids[i] = 0xffff;
++ }
++
++ ef_assert_le(ids - ids_in, EF_VI_TRANSMIT_BATCH);
++
++ return (int) (ids - ids_in);
++}
++
++/*! \cidoxg_end */
+Index: head-2008-11-25/drivers/xen/sfc_netfront/sysdep.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/sysdep.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,184 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author stg
++ * \brief System dependent support for ef vi lib
++ * \date 2007/05/10
++ */
++
++/*! \cidoxg_include_ci_ul */
++#ifndef __CI_CIUL_SYSDEP_LINUX_H__
++#define __CI_CIUL_SYSDEP_LINUX_H__
++
++/**********************************************************************
++ * Kernel version compatability
++ */
++
++#if defined(__GNUC__)
++
++/* Linux kernel doesn't have stdint.h or [u]intptr_t. */
++# if !defined(LINUX_VERSION_CODE)
++# include <linux/version.h>
++# endif
++# include <asm/io.h>
++
++/* In Linux 2.6.24, linux/types.h has uintptr_t */
++# if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
++# if BITS_PER_LONG == 32
++ typedef __u32 uintptr_t;
++# else
++ typedef __u64 uintptr_t;
++# endif
++# endif
++
++/* But even 2.6.24 doesn't define intptr_t */
++# if BITS_PER_LONG == 32
++ typedef __s32 intptr_t;
++# else
++ typedef __s64 intptr_t;
++# endif
++
++# if defined(__ia64__)
++# define EF_VI_PRIx64 "lx"
++# else
++# define EF_VI_PRIx64 "llx"
++# endif
++
++# define EF_VI_HF __attribute__((visibility("hidden")))
++# define EF_VI_HV __attribute__((visibility("hidden")))
++
++# if defined(__i386__) || defined(__x86_64__) /* GCC x86/x64 */
++ typedef unsigned long long ef_vi_dma_addr_t;
++# if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 96)
++# define ef_vi_wiob() __asm__ __volatile__ ("sfence")
++# else
++# define ef_vi_wiob() __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xF8")
++# endif
++
++# endif
++#endif
++
++#ifdef EFX_NOT_UPSTREAM
++
++/* Stuff for architectures/compilers not officially supported */
++
++#if !defined(__GNUC__)
++# if defined(__PPC__) /* GCC, PPC */
++ typedef unsigned long ef_vi_dma_addr_t;
++# define ef_vi_wiob() wmb()
++
++# ifdef __powerpc64__
++# ifdef CONFIG_SMP
++# define CI_SMP_SYNC "\n eieio \n" /* memory cache sync */
++# define CI_SMP_ISYNC "\n isync \n" /* instr cache sync */
++# else
++# define CI_SMP_SYNC
++# define CI_SMP_ISYNC
++# endif
++# else /* for ppc32 systems */
++# ifdef CONFIG_SMP
++# define CI_SMP_SYNC "\n eieio \n"
++# define CI_SMP_ISYNC "\n sync \n"
++# else
++# define CI_SMP_SYNC
++# define CI_SMP_ISYNC
++# endif
++# endif
++
++# elif defined(__ia64__) /* GCC, IA64 */
++ typedef unsigned long ef_vi_dma_addr_t;
++# define ef_vi_wiob() __asm__ __volatile__("mf.a": : :"memory")
++
++# else
++# error Unknown processor - GNU C
++# endif
++
++#elif defined(__PGI)
++# error PGI not supported
++
++#elif defined(__INTEL_COMPILER)
++
++/* Intel compilers v7 claim to be very gcc compatible. */
++# if __INTEL_COMPILER >= 700
++# if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ > 91)
++# define EF_VI_LIKELY(t) __builtin_expect((t), 1)
++# define EF_VI_UNLIKELY(t) __builtin_expect((t), 0)
++# endif
++
++# if __GNUC__ >= 3 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 96)
++# define ef_vi_wiob() __asm__ __volatile__ ("sfence")
++# else
++# define ef_vi_wiob() __asm__ __volatile__ (".byte 0x0F, 0xAE, 0xF8")
++# endif
++
++# else
++# error Old Intel compiler not supported.
++# endif
++
++#else
++# error Unknown compiler.
++#endif
++
++#endif
++
++
++# include <linux/errno.h>
++
++
++/**********************************************************************
++ * Extracting bit fields.
++ */
++
++#define _QWORD_GET_LOW(f, v) \
++ (((v).u32[0] >> (f##_LBN)) & ((1u << f##_WIDTH) - 1u))
++#define _QWORD_GET_HIGH(f, v) \
++ (((v).u32[1] >> (f##_LBN - 32u)) & ((1u << f##_WIDTH) - 1u))
++#define _QWORD_GET_ANY(f, v) \
++ (((v).u64[0] >> f##_LBN) & (((uint64_t) 1u << f##_WIDTH) - 1u))
++
++#define QWORD_GET(f, v) \
++ ((f##_LBN + f##_WIDTH) <= 32u \
++ ? _QWORD_GET_LOW(f, (v)) \
++ : ((f##_LBN >= 32u) ? _QWORD_GET_HIGH(f, (v)) : _QWORD_GET_ANY(f, (v))))
++
++#define QWORD_GET_U(f, v) ((unsigned) QWORD_GET(f, (v)))
++
++#define _QWORD_TEST_BIT_LOW(f, v) ((v).u32[0] & (1u << (f##_LBN)))
++#define _QWORD_TEST_BIT_HIGH(f, v) ((v).u32[1] & (1u << (f##_LBN - 32u)))
++
++#define QWORD_TEST_BIT(f, v) \
++ (f##_LBN < 32 ? _QWORD_TEST_BIT_LOW(f, (v)) : _QWORD_TEST_BIT_HIGH(f, (v)))
++
++
++
++
++#ifndef DECLSPEC_NORETURN
++/* normally defined on Windows to expand to a declaration that the
++ function will not return */
++# define DECLSPEC_NORETURN
++#endif
++
++#endif /* __CI_CIUL_SYSDEP_LINUX_H__ */
+Index: head-2008-11-25/drivers/xen/sfc_netfront/vi_init.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netfront/vi_init.c 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,183 @@
++/****************************************************************************
++ * Copyright 2002-2005: Level 5 Networks Inc.
++ * Copyright 2005-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications
++ * <linux-xen-drivers@solarflare.com>
++ * <onload-dev@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * \author djr
++ * \brief Initialisation of VIs.
++ * \date 2007/06/08
++ */
++
++#include "ef_vi_internal.h"
++
++#define EF_VI_STATE_BYTES(rxq_sz, txq_sz) \
++ (sizeof(ef_vi_state) + (rxq_sz) * sizeof(uint16_t) \
++ + (txq_sz) * sizeof(uint16_t))
++
++int ef_vi_calc_state_bytes(int rxq_sz, int txq_sz)
++{
++ ef_assert(rxq_sz == 0 || EF_VI_IS_POW2(rxq_sz));
++ ef_assert(txq_sz == 0 || EF_VI_IS_POW2(txq_sz));
++
++ return EF_VI_STATE_BYTES(rxq_sz, txq_sz);
++}
++
++
++int ef_vi_state_bytes(ef_vi* vi)
++{
++ int rxq_sz = 0, txq_sz = 0;
++ if( ef_vi_receive_capacity(vi) )
++ rxq_sz = ef_vi_receive_capacity(vi) + 1;
++ if( ef_vi_transmit_capacity(vi) )
++ txq_sz = ef_vi_transmit_capacity(vi) + 1;
++
++ ef_assert(rxq_sz == 0 || EF_VI_IS_POW2(rxq_sz));
++ ef_assert(txq_sz == 0 || EF_VI_IS_POW2(txq_sz));
++
++ return EF_VI_STATE_BYTES(rxq_sz, txq_sz);
++}
++
++
++void ef_eventq_state_init(ef_vi* evq)
++{
++ int j;
++
++ for (j = 0; j<EFAB_DMAQS_PER_EVQ_MAX; j++) {
++ ef_rx_dup_state_t *rx_dup_state =
++ &evq->evq_state->rx_dup_state[j];
++ rx_dup_state->bad_sop = 0;
++ rx_dup_state->rx_last_desc_ptr = -1;
++ rx_dup_state->frag_num = 0;
++ }
++
++ evq->evq_state->evq_ptr = 0;
++}
++
++
++void ef_vi_state_init(ef_vi* vi)
++{
++ ef_vi_state* state = vi->ep_state;
++ unsigned i;
++
++ state->txq.added = state->txq.removed = 0;
++ state->rxq.added = state->rxq.removed = 0;
++
++ if( vi->vi_rxq.mask )
++ for( i = 0; i <= vi->vi_rxq.mask; ++i )
++ vi->vi_rxq.ids[i] = (uint16_t) -1;
++ if( vi->vi_txq.mask )
++ for( i = 0; i <= vi->vi_txq.mask; ++i )
++ vi->vi_txq.ids[i] = (uint16_t) -1;
++}
++
++
++void ef_vi_init_mapping_evq(void* data_area, struct ef_vi_nic_type nic_type,
++ int instance, unsigned evq_bytes, void* base,
++ void* timer_reg)
++{
++ struct vi_mappings* vm = (struct vi_mappings*) data_area;
++
++ vm->signature = VI_MAPPING_SIGNATURE;
++ vm->vi_instance = instance;
++ vm->nic_type = nic_type;
++ vm->evq_bytes = evq_bytes;
++ vm->evq_base = base;
++ vm->evq_timer_reg = timer_reg;
++}
++
++
++void ef_vi_init(ef_vi* vi, void* vvis, ef_vi_state* state,
++ ef_eventq_state* evq_state, enum ef_vi_flags vi_flags)
++{
++ struct vi_mappings* vm = (struct vi_mappings*) vvis;
++
++ vi->vi_i = vm->vi_instance;
++ vi->ep_state = state;
++ vi->vi_flags = vi_flags;
++
++ switch( vm->nic_type.arch ) {
++ case EF_VI_ARCH_FALCON:
++ falcon_vi_init(vi, vvis);
++ break;
++ default:
++ /* ?? TODO: We should return an error code. */
++ ef_assert(0);
++ break;
++ }
++
++ if( vm->evq_bytes ) {
++ vi->evq_state = evq_state;
++ vi->evq_mask = vm->evq_bytes - 1u;
++ vi->evq_base = vm->evq_base;
++ vi->evq_timer_reg = vm->evq_timer_reg;
++ }
++
++ EF_VI_MAGIC_SET(vi, EF_VI);
++}
++
++
++/* Initialise [data_area] with information required to initialise an ef_vi.
++ * In the following, an unused param should be set to NULL. Note the case
++ * marked (*) of [iobuf_mmap] for falcon/driver; for the normal driver this
++ * must be NULL.
++ *
++ * \param data_area [in,out] required, must ref at least VI_MAPPING_SIZE
++ * bytes
++ * \param io_mmap [in] ef1, required
++ * falcon, required
++ * \param iobuf_mmap [in] ef1, unused
++ * falcon, required
++ */
++void ef_vi_init_mapping_vi(void* data_area, struct ef_vi_nic_type nic_type,
++ unsigned rxq_capacity, unsigned txq_capacity,
++ int instance, void* io_mmap,
++ void* iobuf_mmap_rx, void* iobuf_mmap_tx,
++ enum ef_vi_flags vi_flags)
++{
++ struct vi_mappings* vm = (struct vi_mappings*) data_area;
++ int rx_desc_bytes, rxq_bytes;
++
++ ef_assert(rxq_capacity > 0 || txq_capacity > 0);
++ ef_assert(vm);
++ ef_assert(io_mmap);
++ ef_assert(iobuf_mmap_rx || iobuf_mmap_tx);
++
++ vm->signature = VI_MAPPING_SIGNATURE;
++ vm->vi_instance = instance;
++ vm->nic_type = nic_type;
++
++ rx_desc_bytes = (vi_flags & EF_VI_RX_PHYS_ADDR) ? 8 : 4;
++ rxq_bytes = rxq_capacity * rx_desc_bytes;
++ rxq_bytes = (rxq_bytes + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
++
++ if( iobuf_mmap_rx == iobuf_mmap_tx )
++ iobuf_mmap_tx = (char*) iobuf_mmap_rx + rxq_bytes;
++
++ vm->rx_queue_capacity = rxq_capacity;
++ vm->rx_dma_falcon = iobuf_mmap_rx;
++ vm->rx_bell = (char*) io_mmap + (RX_DESC_UPD_REG_KER_OFST & 4095);
++ vm->tx_queue_capacity = txq_capacity;
++ vm->tx_dma_falcon = iobuf_mmap_tx;
++ vm->tx_bell = (char*) io_mmap + (TX_DESC_UPD_REG_KER_OFST & 4095);
++}
+Index: head-2008-11-25/drivers/xen/sfc_netutil/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netutil/Makefile 2008-02-26 10:54:12.000000000 +0100
+@@ -0,0 +1,11 @@
++EXTRA_CFLAGS += -Idrivers/xen/sfc_netutil
++EXTRA_CFLAGS += -Werror
++
++ifdef GGOV
++EXTRA_CFLAGS += -fprofile-arcs -ftest-coverage -DEFX_GCOV
++endif
++
++obj-$(CONFIG_XEN_NETDEV_ACCEL_SFC_UTIL) := sfc_netutil.o
++
++sfc_netutil-objs := accel_cuckoo_hash.o accel_msg_iface.o accel_util.o
++
+Index: head-2008-11-25/drivers/xen/sfc_netutil/accel_cuckoo_hash.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netutil/accel_cuckoo_hash.c 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,651 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/types.h> /* needed for linux/random.h */
++#include <linux/random.h>
++
++#include "accel_cuckoo_hash.h"
++#include "accel_util.h"
++
++static inline int cuckoo_hash_key_compare(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key1,
++ cuckoo_hash_key *key2)
++{
++ return !memcmp(key1, key2, hashtab->key_length);
++}
++
++
++static inline void cuckoo_hash_key_set(cuckoo_hash_key *key1,
++ cuckoo_hash_key *key2)
++{
++ *key1 = *key2;
++}
++
++
++/*
++ * Sets hash function parameters. Chooses "a" to be odd, 0 < a < 2^w
++ * where w is the length of the key
++ */
++static void set_hash_parameters(cuckoo_hash_table *hashtab)
++{
++ again:
++ hashtab->a0 = hashtab->a1 = 0;
++
++ /* Make sure random */
++ get_random_bytes(&hashtab->a0, hashtab->key_length);
++ get_random_bytes(&hashtab->a1, hashtab->key_length);
++
++ /* Make sure odd */
++ hashtab->a0 |= 1;
++ hashtab->a1 |= 1;
++
++ /* Being different is good */
++ if (hashtab->a0 != hashtab->a1)
++ return;
++
++ goto again;
++}
++
++int cuckoo_hash_init(cuckoo_hash_table *hashtab, unsigned length_bits,
++ unsigned key_length)
++{
++ char *table_mem;
++ unsigned length = 1 << length_bits;
++
++ BUG_ON(length_bits >= sizeof(unsigned) * 8);
++ BUG_ON(key_length > sizeof(cuckoo_hash_key));
++
++ table_mem = kmalloc(sizeof(cuckoo_hash_entry) * 2 * length, GFP_KERNEL);
++
++ if (table_mem == NULL)
++ return -ENOMEM;
++
++ hashtab->length = length;
++ hashtab->length_bits = length_bits;
++ hashtab->key_length = key_length;
++ hashtab->entries = 0;
++
++ hashtab->table0 = (cuckoo_hash_entry *)table_mem;
++ hashtab->table1 = (cuckoo_hash_entry *)
++ (table_mem + length * sizeof(cuckoo_hash_entry));
++
++ set_hash_parameters(hashtab);
++
++ /* Zero the table */
++ memset(hashtab->table0, 0, length * 2 * sizeof(cuckoo_hash_entry));
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_init);
++
++void cuckoo_hash_destroy(cuckoo_hash_table *hashtab)
++{
++ if (hashtab->table0 != NULL)
++ kfree(hashtab->table0);
++}
++
++EXPORT_SYMBOL_GPL(cuckoo_hash_destroy);
++
++/*
++ * This computes sizeof(cuckoo_hash) bits of hash, not all will be
++ * necessarily used, but the hash function throws away any that
++ * aren't
++ */
++static inline void cuckoo_compute_hash_helper(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *a,
++ cuckoo_hash_key *x,
++ cuckoo_hash *result)
++{
++ u64 multiply_result = 0, a_temp, x_temp;
++ u32 carry = 0;
++ u32 *a_words;
++ u32 *x_words;
++ int i;
++
++ /*
++ * As the mod and div operations in the function effectively
++ * reduce and shift the bits of the product down to just the
++ * third word, we need only compute that and return it as a
++ * result.
++ *
++ * Do enough long multiplication to get the word we need
++ */
++
++ /* This assumes things about the sizes of the key and hash */
++ BUG_ON(hashtab->key_length % sizeof(u32) != 0);
++ BUG_ON(sizeof(cuckoo_hash) != sizeof(u32));
++
++ a_words = (u32 *)a;
++ x_words = (u32 *)x;
++
++ for (i = 0; i < hashtab->key_length / sizeof(u32); i++) {
++ a_temp = a_words[i];
++ x_temp = x_words[i];
++
++ multiply_result = (a_temp * x_temp) + carry;
++ carry = (multiply_result >> 32) & 0xffffffff;
++ }
++
++ *result = multiply_result & 0xffffffff;
++}
++
++
++/*
++ * Want to implement (ax mod 2^w) div 2^(w-q) for odd a, 0 < a < 2^w;
++ * w is the length of the key, q is the length of the hash, I think.
++ * See http://www.it-c.dk/people/pagh/papers/cuckoo-jour.pdf
++ */
++static cuckoo_hash cuckoo_compute_hash(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key,
++ cuckoo_hash_key *a)
++{
++ unsigned q = hashtab->length_bits;
++ unsigned shift = 32 - q;
++ unsigned mask = ((1 << q) - 1) << shift;
++ cuckoo_hash hash;
++
++ cuckoo_compute_hash_helper(hashtab, a, key, &hash);
++
++ /*
++ * Take the top few bits to get the right length for this
++ * hash table
++ */
++ hash = (hash & mask) >> shift;
++
++ BUG_ON(hash >= hashtab->length);
++
++ return hash;
++}
++
++
++static int cuckoo_hash_lookup0(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key,
++ cuckoo_hash_value *value)
++{
++ cuckoo_hash hash = cuckoo_compute_hash(hashtab, key, &hashtab->a0);
++
++ if ((hashtab->table0[hash].state == CUCKOO_HASH_STATE_OCCUPIED)
++ && cuckoo_hash_key_compare(hashtab, &(hashtab->table0[hash].key),
++ key)) {
++ *value = hashtab->table0[hash].value;
++ return 1;
++ }
++
++ return 0;
++}
++
++static int cuckoo_hash_lookup1(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key,
++ cuckoo_hash_value *value)
++{
++ cuckoo_hash hash = cuckoo_compute_hash(hashtab, key, &hashtab->a1);
++
++ if ((hashtab->table1[hash].state == CUCKOO_HASH_STATE_OCCUPIED)
++ && cuckoo_hash_key_compare(hashtab, &(hashtab->table1[hash].key),
++ key)) {
++ *value = hashtab->table1[hash].value;
++ return 1;
++ }
++
++ return 0;
++}
++
++
++int cuckoo_hash_lookup(cuckoo_hash_table *hashtab, cuckoo_hash_key *key,
++ cuckoo_hash_value *value)
++{
++ return cuckoo_hash_lookup0(hashtab, key, value)
++ || cuckoo_hash_lookup1(hashtab, key, value);
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_lookup);
++
++
++/* Transfer any active entries from "old_table" into hashtab */
++static int cuckoo_hash_transfer_entries(cuckoo_hash_table *hashtab,
++ cuckoo_hash_entry *old_table,
++ unsigned capacity)
++{
++ int i, rc;
++ cuckoo_hash_entry *entry;
++
++ hashtab->entries = 0;
++
++ for (i = 0; i < capacity; i++) {
++ entry = &old_table[i];
++ if (entry->state == CUCKOO_HASH_STATE_OCCUPIED) {
++ rc = cuckoo_hash_add(hashtab, &(entry->key),
++ entry->value, 0);
++ if (rc != 0) {
++ return rc;
++ }
++ }
++ }
++
++ return 0;
++}
++
++
++int cuckoo_hash_rehash(cuckoo_hash_table *hashtab)
++{
++ cuckoo_hash_entry *new_table;
++ cuckoo_hash_table old_hashtab;
++ int resize = 0, rc, rehash_count;
++
++ /*
++ * Store old tables so we can access the existing values and
++ * copy across
++ */
++ memcpy(&old_hashtab, hashtab, sizeof(cuckoo_hash_table));
++
++ /* resize if hashtable is more than half full */
++ if (old_hashtab.entries > old_hashtab.length &&
++ old_hashtab.length_bits < 32)
++ resize = 1;
++
++ resize:
++ if (resize) {
++ new_table = kmalloc(sizeof(cuckoo_hash_entry) * 4 * hashtab->length,
++ GFP_ATOMIC);
++ if (new_table == NULL) {
++ rc = -ENOMEM;
++ goto err;
++ }
++
++ hashtab->length = 2 * hashtab->length;
++ hashtab->length_bits++;
++ } else {
++ new_table = kmalloc(sizeof(cuckoo_hash_entry) * 2 * hashtab->length,
++ GFP_ATOMIC);
++ if (new_table == NULL) {
++ rc = -ENOMEM;
++ goto err;
++ }
++ }
++
++ /*
++ * Point hashtab to new memory region so we can try to
++ * construct new table
++ */
++ hashtab->table0 = new_table;
++ hashtab->table1 = (cuckoo_hash_entry *)
++ ((char *)new_table + hashtab->length * sizeof(cuckoo_hash_entry));
++
++ rehash_count = 0;
++
++ again:
++ /* Zero the new tables */
++ memset(new_table, 0, hashtab->length * 2 * sizeof(cuckoo_hash_entry));
++
++ /* Choose new parameters for the hash functions */
++ set_hash_parameters(hashtab);
++
++ /*
++ * Multiply old_table_length by 2 as the length refers to each
++ * table, and there are two of them. This assumes that they
++ * are arranged sequentially in memory, so assert it
++ */
++ BUG_ON(((char *)old_hashtab.table1) !=
++ ((char *)old_hashtab.table0 + old_hashtab.length
++ * sizeof(cuckoo_hash_entry)));
++ rc = cuckoo_hash_transfer_entries(hashtab, old_hashtab.table0,
++ old_hashtab.length * 2);
++ if (rc < 0) {
++ /* Problem */
++ if (rc == -ENOSPC) {
++ ++rehash_count;
++ if (rehash_count < CUCKOO_HASH_MAX_LOOP) {
++ /*
++ * Wanted to rehash, but rather than
++ * recurse we can just do it here
++ */
++ goto again;
++ } else {
++ /*
++ * Didn't manage to rehash, so let's
++ * go up a size (if we haven't already
++ * and there's space)
++ */
++ if (!resize && hashtab->length_bits < 32) {
++ resize = 1;
++ kfree(new_table);
++ goto resize;
++ }
++ else
++ goto err;
++ }
++ }
++ else
++ goto err;
++ }
++
++ /* Success, I think. Free up the old table */
++ kfree(old_hashtab.table0);
++
++ /* We should have put all the entries from old table in the new one */
++ BUG_ON(hashtab->entries != old_hashtab.entries);
++
++ return 0;
++ err:
++ EPRINTK("%s: Rehash failed, giving up\n", __FUNCTION__);
++ /* Some other error, give up, at least restore table to how it was */
++ memcpy(hashtab, &old_hashtab, sizeof(cuckoo_hash_table));
++ if (new_table)
++ kfree(new_table);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_rehash);
++
++
++static int
++cuckoo_hash_insert_or_displace(cuckoo_hash_entry *table, unsigned hash,
++ cuckoo_hash_key *key,
++ cuckoo_hash_value value,
++ cuckoo_hash_key *displaced_key,
++ cuckoo_hash_value *displaced_value)
++{
++ if (table[hash].state == CUCKOO_HASH_STATE_VACANT) {
++ cuckoo_hash_key_set(&(table[hash].key), key);
++ table[hash].value = value;
++ table[hash].state = CUCKOO_HASH_STATE_OCCUPIED;
++
++ return 1;
++ } else {
++ cuckoo_hash_key_set(displaced_key, &(table[hash].key));
++ *displaced_value = table[hash].value;
++ cuckoo_hash_key_set(&(table[hash].key), key);
++ table[hash].value = value;
++
++ return 0;
++ }
++}
++
++
++int cuckoo_hash_add(cuckoo_hash_table *hashtab, cuckoo_hash_key *key,
++ cuckoo_hash_value value, int can_rehash)
++{
++ cuckoo_hash hash0, hash1;
++ int i, rc;
++ cuckoo_hash_key key1, key2;
++
++ cuckoo_hash_key_set(&key1, key);
++
++ again:
++ i = 0;
++ do {
++ hash0 = cuckoo_compute_hash(hashtab, &key1, &hashtab->a0);
++ if (cuckoo_hash_insert_or_displace(hashtab->table0, hash0,
++ &key1, value, &key2,
++ &value)) {
++ /* Success */
++ hashtab->entries++;
++ return 0;
++ }
++
++ hash1 = cuckoo_compute_hash(hashtab, &key2, &hashtab->a1);
++ if (cuckoo_hash_insert_or_displace(hashtab->table1, hash1,
++ &key2, value, &key1,
++ &value)) {
++ /* Success */
++ hashtab->entries++;
++ return 0;
++ }
++ } while (++i < CUCKOO_HASH_MAX_LOOP);
++
++ if (can_rehash) {
++ if ((rc = cuckoo_hash_rehash(hashtab)) < 0) {
++ /*
++ * Give up - this will drop whichever
++ * key/value pair we have currently displaced
++ * on the floor
++ */
++ return rc;
++ }
++ goto again;
++ }
++
++ EPRINTK("%s: failed hash add\n", __FUNCTION__);
++ /*
++ * Couldn't do it - bad as we've now removed some random thing
++ * from the table, and will just drop it on the floor. Better
++ * would be to somehow revert the table to the state it was in
++ * at the start
++ */
++ return -ENOSPC;
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_add);
++
++
++int cuckoo_hash_add_check(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key, cuckoo_hash_value value,
++ int can_rehash)
++{
++ int stored_value;
++
++ if (cuckoo_hash_lookup(hashtab, key, &stored_value))
++ return -EBUSY;
++
++ return cuckoo_hash_add(hashtab, key, value, can_rehash);
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_add_check);
++
++
++int cuckoo_hash_remove(cuckoo_hash_table *hashtab, cuckoo_hash_key *key)
++{
++ cuckoo_hash hash;
++
++ hash = cuckoo_compute_hash(hashtab, key, &hashtab->a0);
++ if ((hashtab->table0[hash].state == CUCKOO_HASH_STATE_OCCUPIED) &&
++ cuckoo_hash_key_compare(hashtab, &(hashtab->table0[hash].key),
++ key)) {
++ hashtab->table0[hash].state = CUCKOO_HASH_STATE_VACANT;
++ hashtab->entries--;
++ return 0;
++ }
++
++ hash = cuckoo_compute_hash(hashtab, key, &hashtab->a1);
++ if ((hashtab->table1[hash].state == CUCKOO_HASH_STATE_OCCUPIED) &&
++ cuckoo_hash_key_compare(hashtab, &(hashtab->table1[hash].key),
++ key)) {
++ hashtab->table1[hash].state = CUCKOO_HASH_STATE_VACANT;
++ hashtab->entries--;
++ return 0;
++ }
++
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_remove);
++
++
++int cuckoo_hash_update(cuckoo_hash_table *hashtab, cuckoo_hash_key *key,
++ cuckoo_hash_value value)
++{
++ cuckoo_hash hash;
++
++ hash = cuckoo_compute_hash(hashtab, key, &hashtab->a0);
++ if ((hashtab->table0[hash].state == CUCKOO_HASH_STATE_OCCUPIED) &&
++ cuckoo_hash_key_compare(hashtab, &(hashtab->table0[hash].key),
++ key)) {
++ hashtab->table0[hash].value = value;
++ return 0;
++ }
++
++ hash = cuckoo_compute_hash(hashtab, key, &hashtab->a1);
++ if ((hashtab->table1[hash].state == CUCKOO_HASH_STATE_OCCUPIED) &&
++ cuckoo_hash_key_compare(hashtab, &(hashtab->table1[hash].key),
++ key)) {
++ hashtab->table1[hash].value = value;
++ return 0;
++ }
++
++ return -EINVAL;
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_update);
++
++
++void cuckoo_hash_iterate_reset(cuckoo_hash_table *hashtab)
++{
++ hashtab->iterate_index = 0;
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_iterate_reset);
++
++
++int cuckoo_hash_iterate(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key, cuckoo_hash_value *value)
++{
++ unsigned index;
++
++ while (hashtab->iterate_index < hashtab->length) {
++ index = hashtab->iterate_index;
++ ++hashtab->iterate_index;
++ if (hashtab->table0[index].state == CUCKOO_HASH_STATE_OCCUPIED) {
++ *key = hashtab->table0[index].key;
++ *value = hashtab->table0[index].value;
++ return 0;
++ }
++ }
++
++ while (hashtab->iterate_index >= hashtab->length &&
++ hashtab->iterate_index < hashtab->length * 2) {
++ index = hashtab->iterate_index - hashtab->length;
++ ++hashtab->iterate_index;
++ if (hashtab->table1[index].state == CUCKOO_HASH_STATE_OCCUPIED) {
++ *key = hashtab->table1[index].key;
++ *value = hashtab->table1[index].value;
++ return 0;
++ }
++ }
++
++ return -ENOSPC;
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_iterate);
++
++
++#if 0
++void cuckoo_hash_valid(cuckoo_hash_table *hashtab)
++{
++ int i, entry_count = 0;
++
++ for (i=0; i < hashtab->length; i++) {
++ EPRINTK_ON(hashtab->table0[i].state != CUCKOO_HASH_STATE_VACANT &&
++ hashtab->table0[i].state != CUCKOO_HASH_STATE_OCCUPIED);
++ if (hashtab->table0[i].state == CUCKOO_HASH_STATE_OCCUPIED)
++ entry_count++;
++ EPRINTK_ON(hashtab->table1[i].state != CUCKOO_HASH_STATE_VACANT &&
++ hashtab->table1[i].state != CUCKOO_HASH_STATE_OCCUPIED);
++ if (hashtab->table1[i].state == CUCKOO_HASH_STATE_OCCUPIED)
++ entry_count++;
++ }
++
++ if (entry_count != hashtab->entries) {
++ EPRINTK("%s: bad count\n", __FUNCTION__);
++ cuckoo_hash_dump(hashtab);
++ return;
++ }
++
++ for (i=0; i< hashtab->length; i++) {
++ if (hashtab->table0[i].state == CUCKOO_HASH_STATE_OCCUPIED)
++ if (i != cuckoo_compute_hash(hashtab,
++ &hashtab->table0[i].key,
++ &hashtab->a0)) {
++ EPRINTK("%s: Bad key table 0 index %d\n",
++ __FUNCTION__, i);
++ cuckoo_hash_dump(hashtab);
++ return;
++ }
++ if (hashtab->table1[i].state == CUCKOO_HASH_STATE_OCCUPIED)
++ if (i != cuckoo_compute_hash(hashtab,
++ &hashtab->table1[i].key,
++ &hashtab->a1)) {
++ EPRINTK("%s: Bad key table 1 index %d\n",
++ __FUNCTION__, i);
++ cuckoo_hash_dump(hashtab);
++ return;
++ }
++ }
++
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_valid);
++
++
++void cuckoo_hash_dump(cuckoo_hash_table *hashtab)
++{
++ int i, entry_count;
++
++ entry_count = 0;
++ for (i=0; i < hashtab->length; i++) {
++ EPRINTK_ON(hashtab->table0[i].state != CUCKOO_HASH_STATE_VACANT &&
++ hashtab->table0[i].state != CUCKOO_HASH_STATE_OCCUPIED);
++ if (hashtab->table0[i].state == CUCKOO_HASH_STATE_OCCUPIED)
++ entry_count++;
++ EPRINTK_ON(hashtab->table1[i].state != CUCKOO_HASH_STATE_VACANT &&
++ hashtab->table1[i].state != CUCKOO_HASH_STATE_OCCUPIED);
++ if (hashtab->table1[i].state == CUCKOO_HASH_STATE_OCCUPIED)
++ entry_count++;
++ }
++
++ EPRINTK("======================\n");
++ EPRINTK("Cuckoo hash table dump\n");
++ EPRINTK("======================\n");
++ EPRINTK("length: %d; length_bits: %d; key_length: %d\n", hashtab->length,
++ hashtab->length_bits, hashtab->key_length);
++ EPRINTK("Recorded entries: %d\n", hashtab->entries);
++ EPRINTK("Counted entries: %d\n", entry_count);
++ EPRINTK("a0: %llx; a1: %llx\n", hashtab->a0, hashtab->a1);
++ EPRINTK("-----------------------------------------\n");
++ EPRINTK("Index Occupied Key Value Index0 Index1\n");
++ EPRINTK("-----------------------------------------\n");
++ for (i=0; i< hashtab->length; i++) {
++ if (hashtab->table0[i].state == CUCKOO_HASH_STATE_OCCUPIED)
++ EPRINTK("%d %d %llx %d %d %d\n", i,
++ hashtab->table0[i].state == CUCKOO_HASH_STATE_OCCUPIED,
++ hashtab->table0[i].key, hashtab->table0[i].value,
++ cuckoo_compute_hash(hashtab, &hashtab->table0[i].key,
++ &hashtab->a0),
++ cuckoo_compute_hash(hashtab, &hashtab->table0[i].key,
++ &hashtab->a1));
++ else
++ EPRINTK("%d %d - - - -\n", i,
++ hashtab->table0[i].state == CUCKOO_HASH_STATE_OCCUPIED);
++
++ }
++ EPRINTK("-----------------------------------------\n");
++ EPRINTK("Index Occupied Key Value Index0 Index1\n");
++ EPRINTK("-----------------------------------------\n");
++ for (i=0; i< hashtab->length; i++) {
++ if (hashtab->table1[i].state == CUCKOO_HASH_STATE_OCCUPIED)
++ EPRINTK("%d %d %llx %d %d %d\n", i,
++ hashtab->table1[i].state == CUCKOO_HASH_STATE_OCCUPIED,
++ hashtab->table1[i].key, hashtab->table1[i].value,
++ cuckoo_compute_hash(hashtab, &hashtab->table1[i].key,
++ &hashtab->a0),
++ cuckoo_compute_hash(hashtab, &hashtab->table1[i].key,
++ &hashtab->a1));
++ else
++ EPRINTK("%d %d - - - -\n", i,
++ hashtab->table1[i].state == CUCKOO_HASH_STATE_OCCUPIED);
++ }
++ EPRINTK("======================\n");
++}
++EXPORT_SYMBOL_GPL(cuckoo_hash_dump);
++#endif
+Index: head-2008-11-25/drivers/xen/sfc_netutil/accel_cuckoo_hash.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netutil/accel_cuckoo_hash.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,227 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++/*
++ * A cuckoo hash table consists of two sub tables. Each entry can
++ * hash to a position in each table. If, on entry, its position is
++ * found to be occupied, the existing element is moved to it's other
++ * location. This recurses until success or a loop is found. If a
++ * loop is found the table is rehashed.
++ *
++ * See http://www.it-c.dk/people/pagh/papers/cuckoo-jour.pdf
++ */
++
++#ifndef NET_ACCEL_CUCKOO_HASH_H
++#define NET_ACCEL_CUCKOO_HASH_H
++
++/*! Type used for hash table keys of ip pairs */
++typedef struct {
++ u32 local_ip;
++ //u32 remote_ip;
++ u16 local_port;
++ //u16 remote_port;
++ /* Technically only 1 bit, but use 16 to make key a round
++ number size */
++ u16 proto;
++} cuckoo_hash_ip_key;
++
++/*! Type used for hash table keys of mac addresses */
++typedef u64 cuckoo_hash_mac_key;
++
++/*! This type is designed to be large enough to hold all supported key
++ * sizes to avoid having to malloc storage for them.
++ */
++typedef u64 cuckoo_hash_key;
++
++/*! Type used for the values stored in the hash table */
++typedef int cuckoo_hash_value;
++
++/*! Type used for the hash used to index the table */
++typedef u32 cuckoo_hash;
++
++/*! How long to spend displacing values when adding before giving up
++ * and rehashing */
++#define CUCKOO_HASH_MAX_LOOP (hashtab->length)
++
++/*! State of hash table entry */
++typedef enum {
++ CUCKOO_HASH_STATE_VACANT = 0,
++ CUCKOO_HASH_STATE_OCCUPIED
++} cuckoo_hash_state;
++
++/*! An entry in the hash table */
++typedef struct {
++ cuckoo_hash_state state;
++ cuckoo_hash_key key;
++ cuckoo_hash_value value;
++} cuckoo_hash_entry;
++
++/*! A cuckoo hash table */
++typedef struct {
++ /*! The length of each table (NB. there are two tables of this
++ * length) */
++ unsigned length;
++ /*! The length of each table in bits */
++ unsigned length_bits;
++ /*! The length of the key in bytes */
++ unsigned key_length;
++ /*! The number of entries currently stored in the table */
++ unsigned entries;
++ /*! Index into table used by cuckoo_hash_iterate */
++ unsigned iterate_index;
++
++ /* parameter of hash functions */
++ /*! The "a" parameter of the first hash function */
++ cuckoo_hash_key a0;
++ /*! The "a" parameter of the second hash function */
++ cuckoo_hash_key a1;
++
++ /*! The first table */
++ cuckoo_hash_entry *table0;
++ /*! The second table */
++ cuckoo_hash_entry *table1;
++} cuckoo_hash_table;
++
++/*! Initialise the cuckoo has table
++ *
++ * \param hashtab A pointer to an unitialised hash table structure
++ * \param length_bits The number of elements in each table equals
++ * 2**length_bits
++ * \param key_length The length of the key in bytes
++ *
++ * \return 0 on success, -ENOMEM if it couldn't allocate the tables
++ */
++extern
++int cuckoo_hash_init(cuckoo_hash_table *hashtab, unsigned length_bits,
++ unsigned key_length);
++
++
++/*! Destroy a hash table
++ *
++ * \param hashtab A hash table that has previously been passed to a
++ * successful call of cuckoo_hash_init()
++ */
++extern
++void cuckoo_hash_destroy(cuckoo_hash_table *hashtab);
++
++
++/*! Lookup an entry in the hash table
++ *
++ * \param hashtab The hash table in which to look.
++ * \param key Pointer to a mac address to use as the key
++ * \param value On exit set to the value stored if key was present
++ *
++ * \return 0 if not present in the table, non-zero if it is (and value
++ * is set accordingly)
++ */
++extern
++int cuckoo_hash_lookup(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key,
++ cuckoo_hash_value *value);
++
++/*! Add an entry to the hash table. Key must not be a duplicate of
++ * anything already in the table. If this is a risk, see
++ * cuckoo_hash_add_check
++ *
++ * \param hashtab The hash table to add the entry to
++ * \param key Pointer to a mac address to use as a key
++ * \param value The value to store
++ * \param can_rehash Flag to allow the add function to rehash the
++ * table if necessary
++ *
++ * \return 0 on success, non-zero on failure. -ENOSPC means it just
++ * couldn't find anywhere to put it - this is bad and probably means
++ * an entry has been dropped on the floor (but the entry you just
++ * tried to add may now be included)
++ */
++extern
++int cuckoo_hash_add(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key,
++ cuckoo_hash_value value,
++ int can_rehash);
++
++/*! Same as cuckoo_hash_add but first checks to ensure entry is not
++ * already there
++ * \return -EBUSY if already there
++ */
++
++extern
++int cuckoo_hash_add_check(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key,
++ cuckoo_hash_value value,
++ int can_rehash);
++/*! Remove an entry from the table
++ *
++ * \param hashtab The hash table to remove the entry from
++ * \param key The key that was used to previously add the entry
++ *
++ * \return 0 on success, -EINVAL if the entry couldn't be found
++ */
++extern
++int cuckoo_hash_remove(cuckoo_hash_table *hashtab, cuckoo_hash_key *key);
++
++
++/*! Helper for those using mac addresses to convert to a key for the
++ * hash table
++ */
++static inline cuckoo_hash_mac_key cuckoo_mac_to_key(const u8 *mac)
++{
++ return (cuckoo_hash_mac_key)(mac[0])
++ | (cuckoo_hash_mac_key)(mac[1]) << 8
++ | (cuckoo_hash_mac_key)(mac[2]) << 16
++ | (cuckoo_hash_mac_key)(mac[3]) << 24
++ | (cuckoo_hash_mac_key)(mac[4]) << 32
++ | (cuckoo_hash_mac_key)(mac[5]) << 40;
++}
++
++
++/*! Update an entry already in the hash table to take a new value
++ *
++ * \param hashtab The hash table to add the entry to
++ * \param key Pointer to a mac address to use as a key
++ * \param value The value to store
++ *
++ * \return 0 on success, non-zero on failure.
++ */
++int cuckoo_hash_update(cuckoo_hash_table *hashtab, cuckoo_hash_key *key,
++ cuckoo_hash_value value);
++
++
++/*! Go through the hash table and return all used entries (one per call)
++ *
++ * \param hashtab The hash table to iterate over
++ * \param key Pointer to a key to take the returned key
++ * \param value Pointer to a value to take the returned value
++ *
++ * \return 0 on success (key, value set), non-zero on failure.
++ */
++int cuckoo_hash_iterate(cuckoo_hash_table *hashtab,
++ cuckoo_hash_key *key, cuckoo_hash_value *value);
++void cuckoo_hash_iterate_reset(cuckoo_hash_table *hashtab);
++
++/* debug, not compiled by default */
++void cuckoo_hash_valid(cuckoo_hash_table *hashtab);
++void cuckoo_hash_dump(cuckoo_hash_table *hashtab);
++
++#endif /* NET_ACCEL_CUCKOO_HASH_H */
+Index: head-2008-11-25/drivers/xen/sfc_netutil/accel_msg_iface.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netutil/accel_msg_iface.c 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,301 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <xen/evtchn.h>
++
++#include "accel_util.h"
++#include "accel_msg_iface.h"
++
++#define NET_ACCEL_MSG_Q_SIZE (1024)
++#define NET_ACCEL_MSG_Q_MASK (NET_ACCEL_MSG_Q_SIZE - 1)
++
++#ifdef NDEBUG
++#define NET_ACCEL_CHECK_MAGIC(_p, _errval)
++#define NET_ACCEL_SHOW_QUEUE(_t, _q, _id)
++#else
++#define NET_ACCEL_CHECK_MAGIC(_p, _errval) \
++ if (_p->magic != NET_ACCEL_MSG_MAGIC) { \
++ printk(KERN_ERR "%s: passed invalid shared page %p!\n", \
++ __FUNCTION__, _p); \
++ return _errval; \
++ }
++#define NET_ACCEL_SHOW_QUEUE(_t, _q, _id) \
++ printk(_t ": queue %d write %x read %x base %x limit %x\n", \
++ _id, _q->write, _q->read, _q->base, _q->limit);
++#endif
++
++/*
++ * We've been passed at least 2 pages. 1 control page and 1 or more
++ * data pages.
++ */
++int net_accel_msg_init_page(void *mem, int len, int up)
++{
++ struct net_accel_shared_page *shared_page =
++ (struct net_accel_shared_page*)mem;
++
++ if ((unsigned long)shared_page & NET_ACCEL_MSG_Q_MASK)
++ return -EINVAL;
++
++ shared_page->magic = NET_ACCEL_MSG_MAGIC;
++
++ shared_page->aflags = 0;
++
++ shared_page->net_dev_up = up;
++
++ return 0;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_init_page);
++
++
++void net_accel_msg_init_queue(sh_msg_fifo2 *queue,
++ struct net_accel_msg_queue *indices,
++ struct net_accel_msg *base, int size)
++{
++ queue->fifo = base;
++ spin_lock_init(&queue->lock);
++ sh_fifo2_init(queue, size-1, &indices->read, &indices->write);
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_init_queue);
++
++
++static inline int _net_accel_msg_send(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ struct net_accel_msg *msg,
++ int is_reply)
++{
++ int rc = 0;
++ NET_ACCEL_CHECK_MAGIC(sp, -EINVAL);
++ rmb();
++ if (is_reply) {
++ EPRINTK_ON(sh_fifo2_is_full(queue));
++ sh_fifo2_put(queue, *msg);
++ } else {
++ if (sh_fifo2_not_half_full(queue)) {
++ sh_fifo2_put(queue, *msg);
++ } else {
++ rc = -ENOSPC;
++ }
++ }
++ wmb();
++ return rc;
++}
++
++/* Notify after a batch of messages have been sent */
++void net_accel_msg_notify(int irq)
++{
++ notify_remote_via_irq(irq);
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_notify);
++
++/*
++ * Send a message on the specified FIFO. Returns 0 on success, -errno
++ * on failure. The message in msg is copied to the current slot of the
++ * FIFO.
++ */
++int net_accel_msg_send(struct net_accel_shared_page *sp, sh_msg_fifo2 *q,
++ struct net_accel_msg *msg)
++{
++ unsigned long flags;
++ int rc;
++ net_accel_msg_lock_queue(q, &flags);
++ rc = _net_accel_msg_send(sp, q, msg, 0);
++ net_accel_msg_unlock_queue(q, &flags);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_send);
++
++
++/* As net_accel_msg_send but also posts a notification to the far end. */
++int net_accel_msg_send_notify(struct net_accel_shared_page *sp, int irq,
++ sh_msg_fifo2 *q, struct net_accel_msg *msg)
++{
++ unsigned long flags;
++ int rc;
++ net_accel_msg_lock_queue(q, &flags);
++ rc = _net_accel_msg_send(sp, q, msg, 0);
++ net_accel_msg_unlock_queue(q, &flags);
++ if (rc >= 0)
++ notify_remote_via_irq(irq);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_send_notify);
++
++
++int net_accel_msg_reply(struct net_accel_shared_page *sp, sh_msg_fifo2 *q,
++ struct net_accel_msg *msg)
++{
++ unsigned long flags;
++ int rc;
++ net_accel_msg_lock_queue(q, &flags);
++ rc = _net_accel_msg_send(sp, q, msg, 1);
++ net_accel_msg_unlock_queue(q, &flags);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_reply);
++
++
++/* As net_accel_msg_send but also posts a notification to the far end. */
++int net_accel_msg_reply_notify(struct net_accel_shared_page *sp, int irq,
++ sh_msg_fifo2 *q, struct net_accel_msg *msg)
++{
++ unsigned long flags;
++ int rc;
++ net_accel_msg_lock_queue(q, &flags);
++ rc = _net_accel_msg_send(sp, q, msg, 1);
++ net_accel_msg_unlock_queue(q, &flags);
++ if (rc >= 0)
++ notify_remote_via_irq(irq);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_reply_notify);
++
++
++/*
++ * Look at a received message, if any, so a decision can be made about
++ * whether to read it now or not. Cookie is a bit of debug which is
++ * set here and checked when passed to net_accel_msg_recv_next()
++ */
++int net_accel_msg_peek(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ struct net_accel_msg *msg, int *cookie)
++{
++ unsigned long flags;
++ int rc = 0;
++ NET_ACCEL_CHECK_MAGIC(sp, -EINVAL);
++ net_accel_msg_lock_queue(queue, &flags);
++ rmb();
++ if (sh_fifo2_is_empty(queue)) {
++ rc = -ENOENT;
++ } else {
++ *msg = sh_fifo2_peek(queue);
++ *cookie = *(queue->fifo_rd_i);
++ }
++ net_accel_msg_unlock_queue(queue, &flags);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_peek);
++
++
++/*
++ * Move the queue onto the next element, used after finished with a
++ * peeked msg
++ */
++int net_accel_msg_recv_next(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue, int cookie)
++{
++ unsigned long flags;
++ NET_ACCEL_CHECK_MAGIC(sp, -EINVAL);
++ net_accel_msg_lock_queue(queue, &flags);
++ rmb();
++ /* Mustn't be empty */
++ BUG_ON(sh_fifo2_is_empty(queue));
++ /*
++ * Check cookie matches, i.e. we're advancing over the same message
++ * as was got using peek
++ */
++ BUG_ON(cookie != *(queue->fifo_rd_i));
++ sh_fifo2_rd_next(queue);
++ wmb();
++ net_accel_msg_unlock_queue(queue, &flags);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_recv_next);
++
++
++/*
++ * Receive a message on the specified FIFO. Returns 0 on success,
++ * -errno on failure.
++ */
++int net_accel_msg_recv(struct net_accel_shared_page *sp, sh_msg_fifo2 *queue,
++ struct net_accel_msg *msg)
++{
++ unsigned long flags;
++ int rc = 0;
++ NET_ACCEL_CHECK_MAGIC(sp, -EINVAL);
++ net_accel_msg_lock_queue(queue, &flags);
++ rmb();
++ if (sh_fifo2_is_empty(queue)) {
++ rc = -ENOENT;
++ } else {
++ sh_fifo2_get(queue, msg);
++ }
++ wmb();
++ net_accel_msg_unlock_queue(queue, &flags);
++ return rc;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_recv);
++
++
++/*
++ * Start sending a message without copying. returns a pointer to a message
++ * that will be filled out in place. The queue is locked until the message
++ * is sent.
++ */
++struct net_accel_msg *net_accel_msg_start_send(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue, unsigned long *flags)
++{
++ struct net_accel_msg *msg;
++ NET_ACCEL_CHECK_MAGIC(sp, NULL);
++ net_accel_msg_lock_queue(queue, flags);
++ rmb();
++ if (sh_fifo2_not_half_full(queue)) {
++ msg = sh_fifo2_pokep(queue);
++ } else {
++ net_accel_msg_unlock_queue(queue, flags);
++ msg = NULL;
++ }
++ return msg;
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_start_send);
++
++
++static inline void _msg_complete(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ unsigned long *flags)
++{
++ sh_fifo2_wr_next(queue);
++ net_accel_msg_unlock_queue(queue, flags);
++}
++
++/*
++ * Complete the sending of a message started with net_accel_msg_start_send. The
++ * message is implicit since the queue was locked by _start
++ */
++void net_accel_msg_complete_send(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ unsigned long *flags)
++{
++ _msg_complete(sp, queue, flags);
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_complete_send);
++
++/* As net_accel_msg_complete_send but does the notify. */
++void net_accel_msg_complete_send_notify(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ unsigned long *flags, int irq)
++{
++ _msg_complete(sp, queue, flags);
++ notify_remote_via_irq(irq);
++}
++EXPORT_SYMBOL_GPL(net_accel_msg_complete_send_notify);
+Index: head-2008-11-25/drivers/xen/sfc_netutil/accel_msg_iface.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netutil/accel_msg_iface.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,414 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NET_ACCEL_MSG_IFACE_H
++#define NET_ACCEL_MSG_IFACE_H
++
++#include <linux/ip.h>
++#include <linux/tcp.h>
++#include <linux/udp.h>
++#include <linux/in.h>
++#include <linux/netdevice.h>
++#include <linux/etherdevice.h>
++
++#include "accel_shared_fifo.h"
++
++#define NET_ACCEL_MSG_MAGIC (0x85465479)
++
++/*! We talk version 0.010 of the interdomain protocol */
++#define NET_ACCEL_MSG_VERSION (0x00001000)
++
++/*! Shared memory portion of inter-domain FIFO */
++struct net_accel_msg_queue {
++ u32 read;
++ u32 write;
++};
++
++
++/*
++ * The aflags in the following structure is used as follows:
++ *
++ * - each bit is set when one of the corresponding variables is
++ * changed by either end.
++ *
++ * - the end that has made the change then forwards an IRQ to the
++ * other
++ *
++ * - the IRQ handler deals with these bits either on the fast path, or
++ * for less common changes, by jumping onto the slow path.
++ *
++ * - once it has seen a change, it clears the relevant bit.
++ *
++ * aflags is accessed atomically using clear_bit, test_bit,
++ * test_and_set_bit etc
++ */
++
++/*
++ * The following used to signify to the other domain when the queue
++ * they want to use is full, and when it is no longer full. Could be
++ * compressed to use fewer bits but done this way for simplicity and
++ * clarity
++ */
++
++/* "dom0->domU queue" is full */
++#define NET_ACCEL_MSG_AFLAGS_QUEUE0FULL 0x1
++#define NET_ACCEL_MSG_AFLAGS_QUEUE0FULL_B 0
++/* "dom0->domU queue" is not full */
++#define NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL 0x2
++#define NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL_B 1
++/* "domU->dom0 queue" is full */
++#define NET_ACCEL_MSG_AFLAGS_QUEUEUFULL 0x4
++#define NET_ACCEL_MSG_AFLAGS_QUEUEUFULL_B 2
++/* "domU->dom0 queue" is not full */
++#define NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL 0x8
++#define NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL_B 3
++/* dom0 -> domU net_dev up/down events */
++#define NET_ACCEL_MSG_AFLAGS_NETUPDOWN 0x10
++#define NET_ACCEL_MSG_AFLAGS_NETUPDOWN_B 4
++
++/*
++ * Masks used to test if there are any messages for domU and dom0
++ * respectively
++ */
++#define NET_ACCEL_MSG_AFLAGS_TO_DOMU_MASK \
++ (NET_ACCEL_MSG_AFLAGS_QUEUE0FULL | \
++ NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL | \
++ NET_ACCEL_MSG_AFLAGS_NETUPDOWN)
++#define NET_ACCEL_MSG_AFLAGS_TO_DOM0_MASK \
++ (NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL | \
++ NET_ACCEL_MSG_AFLAGS_QUEUEUFULL)
++
++/*! The shared data structure used for inter-VM communication. */
++struct net_accel_shared_page {
++ /*! Sanity check */
++ u32 magic;
++ /*! Used by host/Dom0 */
++ struct net_accel_msg_queue queue0;
++ /*! Used by guest/DomU */
++ struct net_accel_msg_queue queue1;
++ /*! Atomic flags, used to communicate simple state changes */
++ u32 aflags;
++ /*! State of net_dev used for acceleration */
++ u32 net_dev_up;
++};
++
++
++enum net_accel_hw_type {
++ /*! Not a virtualisable NIC: use slow path. */
++ NET_ACCEL_MSG_HWTYPE_NONE = 0,
++ /*! NIC is Falcon-based */
++ NET_ACCEL_MSG_HWTYPE_FALCON_A = 1,
++ NET_ACCEL_MSG_HWTYPE_FALCON_B = 2,
++};
++
++/*! The maximum number of pages used by an event queue. */
++#define EF_HW_FALCON_EVQ_PAGES 8
++
++struct net_accel_hw_falcon_b {
++ /* VI */
++ /*! Grant for Tx DMA Q */
++ u32 txdmaq_gnt;
++ /*! Grant for Rx DMA Q */
++ u32 rxdmaq_gnt;
++ /*! Machine frame number for Tx/Rx doorbell page */
++ u32 doorbell_mfn;
++ /*! Grant for Tx/Rx doorbell page */
++ u32 doorbell_gnt;
++
++ /* Event Q */
++ /*! Grants for the pages of the EVQ */
++ u32 evq_mem_gnts[EF_HW_FALCON_EVQ_PAGES];
++ u32 evq_offs;
++ /*! log2(pages in event Q) */
++ u32 evq_order;
++ /*! Capacity in events */
++ u32 evq_capacity;
++ /*! Eventq pointer register physical address */
++ u32 evq_rptr;
++ /*! Interface instance */
++ u32 instance;
++ /*! Capacity of RX queue */
++ u32 rx_capacity;
++ /*! Capacity of TX queue */
++ u32 tx_capacity;
++
++ /* NIC */
++ s32 nic_arch;
++ s32 nic_revision;
++ u8 nic_variant;
++};
++
++struct net_accel_hw_falcon_a {
++ struct net_accel_hw_falcon_b common;
++ u32 evq_rptr_gnt;
++};
++
++
++/*! Description of the hardware that the DomU is being given. */
++struct net_accel_msg_hw {
++ u32 type; /*!< Hardware type */
++ union {
++ struct net_accel_hw_falcon_a falcon_a;
++ struct net_accel_hw_falcon_b falcon_b;
++ } resources;
++};
++
++/*! Start-of-day handshake message. Dom0 fills in its version and
++ * sends, DomU checks, inserts its version and replies
++ */
++struct net_accel_msg_hello {
++ /*! Sender's version (set by each side in turn) */
++ u32 version;
++ /*! max pages allocated/allowed for buffers */
++ u32 max_pages;
++};
++
++/*! Maximum number of page requests that can fit in a message. */
++#define NET_ACCEL_MSG_MAX_PAGE_REQ (8)
++
++/*! Request for NIC buffers. DomU fils out pages and grants (and
++ * optionally) reqid, dom0 fills out buf and sends reply
++ */
++struct net_accel_msg_map_buffers {
++ u32 reqid; /*!< Optional request ID */
++ u32 pages; /*!< Number of pages to map */
++ u32 grants[NET_ACCEL_MSG_MAX_PAGE_REQ]; /*!< Grant ids to map */
++ u32 buf; /*!< NIC buffer address of pages obtained */
++};
++
++/*! Notification of a change to local mac address, used to filter
++ locally destined packets off the fast path */
++struct net_accel_msg_localmac {
++ u32 flags; /*!< Should this be added or removed? */
++ u8 mac[ETH_ALEN]; /*!< The mac address to filter onto slow path */
++};
++
++struct net_accel_msg_fastpath {
++ u32 flags; /*!< Should this be added or removed? */
++ u8 mac[ETH_ALEN];/*!< The mac address to filter onto fast path */
++ u16 port; /*!< The port of the connection */
++ u32 ip; /*!< The IP address of the connection */
++ u8 proto; /*!< The protocol of connection (TCP/UDP) */
++};
++
++/*! Values for struct ef_msg_localmac/fastpath.flags */
++#define NET_ACCEL_MSG_ADD 0x1
++#define NET_ACCEL_MSG_REMOVE 0x2
++
++/*! Overall message structure */
++struct net_accel_msg {
++ /*! ID specifying type of messge */
++ u32 id;
++ union {
++ /*! handshake */
++ struct net_accel_msg_hello hello;
++ /*! hardware description */
++ struct net_accel_msg_hw hw;
++ /*! buffer map request */
++ struct net_accel_msg_map_buffers mapbufs;
++ /*! mac address of a local interface */
++ struct net_accel_msg_localmac localmac;
++ /*! address of a new fastpath connection */
++ struct net_accel_msg_fastpath fastpath;
++ /*! make the message a fixed size */
++ u8 pad[128 - sizeof(u32)];
++ } u;
++};
++
++
++#define NET_ACCEL_MSG_HW_TO_MSG(_u) container_of(_u, struct net_accel_msg, u.hw)
++
++/*! Inter-domain message FIFO */
++typedef struct {
++ struct net_accel_msg *fifo;
++ u32 fifo_mask;
++ u32 *fifo_rd_i;
++ u32 *fifo_wr_i;
++ spinlock_t lock;
++ u32 is_locked; /* Debug flag */
++} sh_msg_fifo2;
++
++
++#define NET_ACCEL_MSG_OFFSET_MASK PAGE_MASK
++
++/* Modifiers */
++#define NET_ACCEL_MSG_REPLY (0x80000000)
++#define NET_ACCEL_MSG_ERROR (0x40000000)
++
++/* Dom0 -> DomU and reply. Handshake/version check. */
++#define NET_ACCEL_MSG_HELLO (0x00000001)
++/* Dom0 -> DomU : hardware setup (VI info.) */
++#define NET_ACCEL_MSG_SETHW (0x00000002)
++/*
++ * Dom0 -> DomU. Notification of a local mac to add/remove from slow
++ * path filter
++ */
++#define NET_ACCEL_MSG_LOCALMAC (0x00000003)
++/*
++ * DomU -> Dom0 and reply. Request for buffer table entries for
++ * preallocated pages.
++ */
++#define NET_ACCEL_MSG_MAPBUF (0x00000004)
++/*
++ * Dom0 -> DomU. Notification of a local mac to add/remove from fast
++ * path filter
++ */
++#define NET_ACCEL_MSG_FASTPATH (0x00000005)
++
++/*! Initialise a message and set the type
++ * \param message : the message
++ * \param code : the message type
++ */
++static inline void net_accel_msg_init(struct net_accel_msg *msg, int code) {
++ msg->id = (u32)code;
++}
++
++/*! initialise a shared page structure
++ * \param shared_page : mapped memory in which the structure resides
++ * \param len : size of the message FIFO area that follows
++ * \param up : initial up/down state of netdev
++ * \return 0 or an error code
++ */
++extern int net_accel_msg_init_page(void *shared_page, int len, int up);
++
++/*! initialise a message queue
++ * \param queue : the message FIFO to initialise
++ * \param indices : the read and write indices in shared memory
++ * \param base : the start of the memory area for the FIFO
++ * \param size : the size of the FIFO in bytes
++ */
++extern void net_accel_msg_init_queue(sh_msg_fifo2 *queue,
++ struct net_accel_msg_queue *indices,
++ struct net_accel_msg *base, int size);
++
++/* Notify after a batch of messages have been sent */
++extern void net_accel_msg_notify(int irq);
++
++/*! Send a message on the specified FIFO. The message is copied to the
++ * current slot of the FIFO.
++ * \param sp : pointer to shared page
++ * \param q : pointer to message FIFO to use
++ * \param msg : pointer to message
++ * \return 0 on success, -errno on
++ */
++extern int net_accel_msg_send(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *q,
++ struct net_accel_msg *msg);
++extern int net_accel_msg_reply(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *q,
++ struct net_accel_msg *msg);
++
++/*! As net_accel_msg_send but also posts a notification to the far end. */
++extern int net_accel_msg_send_notify(struct net_accel_shared_page *sp,
++ int irq, sh_msg_fifo2 *q,
++ struct net_accel_msg *msg);
++/*! As net_accel_msg_send but also posts a notification to the far end. */
++extern int net_accel_msg_reply_notify(struct net_accel_shared_page *sp,
++ int irq, sh_msg_fifo2 *q,
++ struct net_accel_msg *msg);
++
++/*! Receive a message on the specified FIFO. Returns 0 on success,
++ * -errno on failure.
++ */
++extern int net_accel_msg_recv(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *q,
++ struct net_accel_msg *msg);
++
++/*! Look at a received message, if any, so a decision can be made
++ * about whether to read it now or not. Cookie is a bit of debug
++ * which is set here and checked when passed to
++ * net_accel_msg_recv_next()
++ */
++extern int net_accel_msg_peek(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ struct net_accel_msg *msg, int *cookie);
++/*! Move the queue onto the next element, used after finished with a
++ * peeked msg
++ */
++extern int net_accel_msg_recv_next(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue, int cookie);
++
++/*! Start sending a message without copying. returns a pointer to a
++ * message that will be filled out in place. The queue is locked
++ * until the message is sent.
++ */
++extern
++struct net_accel_msg *net_accel_msg_start_send(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ unsigned long *flags);
++
++
++/*! Complete the sending of a message started with
++ * net_accel_msg_start_send. The message is implicit since the queue
++ * was locked by _start
++ */
++extern void net_accel_msg_complete_send(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ unsigned long *flags);
++
++/*! As net_accel_msg_complete_send but does the notify. */
++extern void net_accel_msg_complete_send_notify(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue,
++ unsigned long *flags, int irq);
++
++/*! Lock the queue so that multiple "_locked" functions can be called
++ * without the queue being modified by others
++ */
++static inline
++void net_accel_msg_lock_queue(sh_msg_fifo2 *queue, unsigned long *flags)
++{
++ spin_lock_irqsave(&queue->lock, (*flags));
++ rmb();
++ BUG_ON(queue->is_locked);
++ queue->is_locked = 1;
++}
++
++/*! Unlock the queue */
++static inline
++void net_accel_msg_unlock_queue(sh_msg_fifo2 *queue, unsigned long *flags)
++{
++ BUG_ON(!queue->is_locked);
++ queue->is_locked = 0;
++ wmb();
++ spin_unlock_irqrestore(&queue->lock, (*flags));
++}
++
++/*! Give up without sending a message that was started with
++ * net_accel_msg_start_send()
++ */
++static inline
++void net_accel_msg_abort_send(struct net_accel_shared_page *sp,
++ sh_msg_fifo2 *queue, unsigned long *flags)
++{
++ net_accel_msg_unlock_queue(queue, flags);
++}
++
++/*! Test the queue to ensure there is sufficient space */
++static inline
++int net_accel_msg_check_space(sh_msg_fifo2 *queue, unsigned space)
++{
++ return sh_fifo2_space(queue) >= space;
++}
++
++#endif /* NET_ACCEL_MSG_IFACE_H */
+Index: head-2008-11-25/drivers/xen/sfc_netutil/accel_shared_fifo.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netutil/accel_shared_fifo.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,127 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NET_ACCEL_SHARED_FIFO_H
++#define NET_ACCEL_SHARED_FIFO_H
++
++/*
++ * This is based on fifo.h, but handles sharing between address spaces
++ * that don't trust each other, by splitting out the read and write
++ * indices. This costs at least one pointer indirection more than the
++ * vanilla version per access.
++ */
++
++typedef struct {
++ char* fifo;
++ unsigned fifo_mask;
++ unsigned *fifo_rd_i;
++ unsigned *fifo_wr_i;
++} sh_byte_fifo2;
++
++#define SH_FIFO2_M(f, x) ((x) & ((f)->fifo_mask))
++
++static inline unsigned log2_ge(unsigned long n, unsigned min_order) {
++ unsigned order = min_order;
++ while((1ul << order) < n) ++order;
++ return order;
++}
++
++static inline unsigned long pow2(unsigned order) {
++ return (1ul << order);
++}
++
++#define is_pow2(x) (pow2(log2_ge((x), 0)) == (x))
++
++#define sh_fifo2_valid(f) ((f) && (f)->fifo && (f)->fifo_mask > 0 && \
++ is_pow2((f)->fifo_mask+1u))
++
++#define sh_fifo2_init(f, cap, _rptr, _wptr) \
++ do { \
++ BUG_ON(!is_pow2((cap) + 1)); \
++ (f)->fifo_rd_i = _rptr; \
++ (f)->fifo_wr_i = _wptr; \
++ *(f)->fifo_rd_i = *(f)->fifo_wr_i = 0u; \
++ (f)->fifo_mask = (cap); \
++ } while(0)
++
++#define sh_fifo2_num(f) SH_FIFO2_M((f),*(f)->fifo_wr_i - *(f)->fifo_rd_i)
++#define sh_fifo2_space(f) SH_FIFO2_M((f),*(f)->fifo_rd_i - *(f)->fifo_wr_i-1u)
++#define sh_fifo2_is_empty(f) (sh_fifo2_num(f)==0)
++#define sh_fifo2_not_empty(f) (sh_fifo2_num(f)!=0)
++#define sh_fifo2_is_full(f) (sh_fifo2_space(f)==0u)
++#define sh_fifo2_not_full(f) (sh_fifo2_space(f)!=0u)
++#define sh_fifo2_buf_size(f) ((f)->fifo_mask + 1u)
++#define sh_fifo2_capacity(f) ((f)->fifo_mask)
++#define sh_fifo2_end(f) ((f)->fifo + sh_fifo2_buf_size(f))
++#define sh_fifo2_not_half_full(f) (sh_fifo2_space(f) > (sh_fifo2_capacity(f) >> 1))
++
++#define sh_fifo2_peek(f) ((f)->fifo[SH_FIFO2_M((f), *(f)->fifo_rd_i)])
++#define sh_fifo2_peekp(f) ((f)->fifo + SH_FIFO2_M((f), *(f)->fifo_rd_i))
++#define sh_fifo2_poke(f) ((f)->fifo[SH_FIFO2_M((f), *(f)->fifo_wr_i)])
++#define sh_fifo2_pokep(f) ((f)->fifo + SH_FIFO2_M((f), *(f)->fifo_wr_i))
++#define sh_fifo2_peek_i(f,i) ((f)->fifo[SH_FIFO2_M((f), *(f)->fifo_rd_i+(i))])
++#define sh_fifo2_poke_i(f,i) ((f)->fifo[SH_FIFO2_M((f), *(f)->fifo_wr_i+(i))])
++
++#define sh_fifo2_rd_next(f) \
++ do {*(f)->fifo_rd_i = *(f)->fifo_rd_i + 1u;} while(0)
++#define sh_fifo2_wr_next(f) \
++ do {*(f)->fifo_wr_i = *(f)->fifo_wr_i + 1u;} while(0)
++#define sh_fifo2_rd_adv(f, n) \
++ do {*(f)->fifo_rd_i = *(f)->fifo_rd_i + (n);} while(0)
++#define sh_fifo2_wr_adv(f, n) \
++ do {*(f)->fifo_wr_i = *(f)->fifo_wr_i + (n);} while(0)
++
++#define sh_fifo2_put(f, v) \
++ do {sh_fifo2_poke(f) = (v); wmb(); sh_fifo2_wr_next(f);} while(0)
++
++#define sh_fifo2_get(f, pv) \
++ do {*(pv) = sh_fifo2_peek(f); mb(); sh_fifo2_rd_next(f);} while(0)
++
++static inline unsigned sh_fifo2_contig_num(sh_byte_fifo2 *f)
++{
++ unsigned fifo_wr_i = SH_FIFO2_M(f, *f->fifo_wr_i);
++ unsigned fifo_rd_i = SH_FIFO2_M(f, *f->fifo_rd_i);
++
++ return (fifo_wr_i >= fifo_rd_i)
++ ? fifo_wr_i - fifo_rd_i
++ : f->fifo_mask + 1u - *(f)->fifo_rd_i;
++}
++
++static inline unsigned sh_fifo2_contig_space(sh_byte_fifo2 *f)
++{
++ unsigned fifo_wr_i = SH_FIFO2_M(f, *f->fifo_wr_i);
++ unsigned fifo_rd_i = SH_FIFO2_M(f, *f->fifo_rd_i);
++
++ return (fifo_rd_i > fifo_wr_i)
++ ? fifo_rd_i - fifo_wr_i - 1
++ : (f->fifo_mask + 1u - fifo_wr_i
++ /*
++ * The last byte can't be used if the read pointer
++ * is at zero.
++ */
++ - (fifo_rd_i==0));
++}
++
++
++#endif /* NET_ACCEL_SHARED_FIFO_H */
+Index: head-2008-11-25/drivers/xen/sfc_netutil/accel_util.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netutil/accel_util.c 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,333 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#include <linux/if_ether.h>
++#include <asm/io.h>
++#include <asm/pgtable.h>
++#include <asm/hypercall.h>
++#include <xen/xenbus.h>
++#include <xen/driver_util.h>
++#include <xen/gnttab.h>
++
++#include "accel_util.h"
++
++#ifdef EFX_GCOV
++#include "gcov.h"
++
++static int __init net_accel_init(void)
++{
++ gcov_provider_init(THIS_MODULE);
++ return 0;
++}
++module_init(net_accel_init);
++
++static void __exit net_accel_exit(void)
++{
++ gcov_provider_fini(THIS_MODULE);
++}
++module_exit(net_accel_exit);
++#endif
++
++/* Shutdown remote domain that is misbehaving */
++int net_accel_shutdown_remote(int domain)
++{
++ struct sched_remote_shutdown sched_shutdown = {
++ .domain_id = domain,
++ .reason = SHUTDOWN_crash
++ };
++
++ EPRINTK("Crashing domain %d\n", domain);
++
++ return HYPERVISOR_sched_op(SCHEDOP_remote_shutdown, &sched_shutdown);
++}
++EXPORT_SYMBOL(net_accel_shutdown_remote);
++
++
++/* Based on xenbus_backend_client.c:xenbus_map_ring() */
++static int net_accel_map_grant(struct xenbus_device *dev, int gnt_ref,
++ grant_handle_t *handle, void *vaddr,
++ u64 *dev_bus_addr, unsigned flags)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)vaddr, flags,
++ gnt_ref, dev->otherend_id);
++
++ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1));
++
++ if (op.status != GNTST_okay) {
++ xenbus_dev_error
++ (dev, op.status,
++ "failed mapping in shared page %d from domain %d\n",
++ gnt_ref, dev->otherend_id);
++ } else {
++ *handle = op.handle;
++ if (dev_bus_addr)
++ *dev_bus_addr = op.dev_bus_addr;
++ }
++
++ return op.status;
++}
++
++
++/* Based on xenbus_backend_client.c:xenbus_unmap_ring() */
++static int net_accel_unmap_grant(struct xenbus_device *dev,
++ grant_handle_t handle,
++ void *vaddr, u64 dev_bus_addr,
++ unsigned flags)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)vaddr, flags, handle);
++
++ if (dev_bus_addr)
++ op.dev_bus_addr = dev_bus_addr;
++
++ BUG_ON(HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1));
++
++ if (op.status != GNTST_okay)
++ xenbus_dev_error(dev, op.status,
++ "failed unmapping page at handle %d error %d\n",
++ handle, op.status);
++
++ return op.status;
++}
++
++
++int net_accel_map_device_page(struct xenbus_device *dev,
++ int gnt_ref, grant_handle_t *handle,
++ u64 *dev_bus_addr)
++{
++ return net_accel_map_grant(dev, gnt_ref, handle, 0, dev_bus_addr,
++ GNTMAP_device_map);
++}
++EXPORT_SYMBOL_GPL(net_accel_map_device_page);
++
++
++int net_accel_unmap_device_page(struct xenbus_device *dev,
++ grant_handle_t handle, u64 dev_bus_addr)
++{
++ return net_accel_unmap_grant(dev, handle, 0, dev_bus_addr,
++ GNTMAP_device_map);
++}
++EXPORT_SYMBOL_GPL(net_accel_unmap_device_page);
++
++
++struct net_accel_valloc_grant_mapping {
++ struct vm_struct *vm;
++ int pages;
++ grant_handle_t grant_handles[0];
++};
++
++/* Map a series of grants into a contiguous virtual area */
++static void *net_accel_map_grants_valloc(struct xenbus_device *dev,
++ unsigned *grants, int npages,
++ unsigned flags, void **priv)
++{
++ struct net_accel_valloc_grant_mapping *map;
++ struct vm_struct *vm;
++ void *addr;
++ int i, j, rc;
++
++ vm = alloc_vm_area(PAGE_SIZE * npages);
++ if (vm == NULL) {
++ EPRINTK("No memory from alloc_vm_area.\n");
++ return NULL;
++ }
++ /*
++ * Get a structure in which we will record all the info needed
++ * to undo the mapping.
++ */
++ map = kzalloc(sizeof(struct net_accel_valloc_grant_mapping) +
++ npages * sizeof(grant_handle_t), GFP_KERNEL);
++ if (map == NULL) {
++ EPRINTK("No memory for net_accel_valloc_grant_mapping\n");
++ free_vm_area(vm);
++ return NULL;
++ }
++ map->vm = vm;
++ map->pages = npages;
++
++ /* Do the actual mapping */
++ addr = vm->addr;
++ for (i = 0; i < npages; i++) {
++ rc = net_accel_map_grant(dev, grants[i], map->grant_handles + i,
++ addr, NULL, flags);
++ if (rc != 0)
++ goto undo;
++ addr = (void*)((unsigned long)addr + PAGE_SIZE);
++ }
++
++ if (priv)
++ *priv = (void *)map;
++ else
++ kfree(map);
++
++ return vm->addr;
++
++ undo:
++ EPRINTK("Aborting contig map due to single map failure %d (%d of %d)\n",
++ rc, i+1, npages);
++ for (j = 0; j < i; j++) {
++ addr = (void*)((unsigned long)vm->addr + (j * PAGE_SIZE));
++ net_accel_unmap_grant(dev, map->grant_handles[j], addr, 0,
++ flags);
++ }
++ free_vm_area(vm);
++ kfree(map);
++ return NULL;
++}
++
++/* Undo the result of the mapping */
++static void net_accel_unmap_grants_vfree(struct xenbus_device *dev,
++ unsigned flags, void *priv)
++{
++ struct net_accel_valloc_grant_mapping *map =
++ (struct net_accel_valloc_grant_mapping *)priv;
++
++ void *addr = map->vm->addr;
++ int npages = map->pages;
++ int i;
++
++ for (i = 0; i < npages; i++) {
++ net_accel_unmap_grant(dev, map->grant_handles[i], addr, 0,
++ flags);
++ addr = (void*)((unsigned long)addr + PAGE_SIZE);
++ }
++ free_vm_area(map->vm);
++ kfree(map);
++}
++
++
++void *net_accel_map_grants_contig(struct xenbus_device *dev,
++ unsigned *grants, int npages,
++ void **priv)
++{
++ return net_accel_map_grants_valloc(dev, grants, npages,
++ GNTMAP_host_map, priv);
++}
++EXPORT_SYMBOL(net_accel_map_grants_contig);
++
++
++void net_accel_unmap_grants_contig(struct xenbus_device *dev,
++ void *priv)
++{
++ net_accel_unmap_grants_vfree(dev, GNTMAP_host_map, priv);
++}
++EXPORT_SYMBOL(net_accel_unmap_grants_contig);
++
++
++void *net_accel_map_iomem_page(struct xenbus_device *dev, int gnt_ref,
++ void **priv)
++{
++ return net_accel_map_grants_valloc(dev, &gnt_ref, 1,
++ GNTMAP_host_map, priv);
++}
++EXPORT_SYMBOL(net_accel_map_iomem_page);
++
++
++void net_accel_unmap_iomem_page(struct xenbus_device *dev, void *priv)
++{
++ net_accel_unmap_grants_vfree(dev, GNTMAP_host_map, priv);
++}
++EXPORT_SYMBOL(net_accel_unmap_iomem_page);
++
++
++int net_accel_grant_page(struct xenbus_device *dev, unsigned long mfn,
++ int is_iomem)
++{
++ int err = gnttab_grant_foreign_access(dev->otherend_id, mfn,
++ is_iomem ? GTF_PCD : 0);
++ if (err < 0)
++ xenbus_dev_error(dev, err, "failed granting access to page\n");
++ return err;
++}
++EXPORT_SYMBOL_GPL(net_accel_grant_page);
++
++
++int net_accel_ungrant_page(grant_ref_t gntref)
++{
++ if (unlikely(gnttab_query_foreign_access(gntref) != 0)) {
++ EPRINTK("%s: remote domain still using grant %d\n", __FUNCTION__,
++ gntref);
++ return -EBUSY;
++ }
++
++ gnttab_end_foreign_access(gntref, 0);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(net_accel_ungrant_page);
++
++
++int net_accel_xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
++{
++ char *s, *e, *macstr;
++ int i;
++
++ macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
++ if (IS_ERR(macstr))
++ return PTR_ERR(macstr);
++
++ for (i = 0; i < ETH_ALEN; i++) {
++ mac[i] = simple_strtoul(s, &e, 16);
++ if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
++ kfree(macstr);
++ return -ENOENT;
++ }
++ s = e+1;
++ }
++
++ kfree(macstr);
++ return 0;
++}
++EXPORT_SYMBOL_GPL(net_accel_xen_net_read_mac);
++
++
++void net_accel_update_state(struct xenbus_device *dev, int state)
++{
++ struct xenbus_transaction tr;
++ int err;
++
++ DPRINTK("%s: setting accelstate to %s\n", __FUNCTION__,
++ xenbus_strstate(state));
++
++ if (xenbus_exists(XBT_NIL, dev->nodename, "")) {
++ VPRINTK("%s: nodename %s\n", __FUNCTION__, dev->nodename);
++ again:
++ err = xenbus_transaction_start(&tr);
++ if (err == 0)
++ err = xenbus_printf(tr, dev->nodename, "accelstate",
++ "%d", state);
++ if (err != 0) {
++ xenbus_transaction_end(tr, 1);
++ } else {
++ err = xenbus_transaction_end(tr, 0);
++ if (err == -EAGAIN)
++ goto again;
++ }
++ }
++}
++EXPORT_SYMBOL_GPL(net_accel_update_state);
++
++MODULE_LICENSE("GPL");
+Index: head-2008-11-25/drivers/xen/sfc_netutil/accel_util.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/sfc_netutil/accel_util.h 2008-02-20 09:32:49.000000000 +0100
+@@ -0,0 +1,127 @@
++/****************************************************************************
++ * Solarflare driver for Xen network acceleration
++ *
++ * Copyright 2006-2008: Solarflare Communications Inc,
++ * 9501 Jeronimo Road, Suite 250,
++ * Irvine, CA 92618, USA
++ *
++ * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
++ *
++ * This program is free software; you can redistribute it and/or modify it
++ * under the terms of the GNU General Public License version 2 as published
++ * by the Free Software Foundation, incorporated herein by reference.
++ *
++ * This program is distributed in the hope that it will be useful,
++ * but WITHOUT ANY WARRANTY; without even the implied warranty of
++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ * GNU General Public License for more details.
++ *
++ * You should have received a copy of the GNU General Public License
++ * along with this program; if not, write to the Free Software
++ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
++ ****************************************************************************
++ */
++
++#ifndef NETBACK_ACCEL_UTIL_H
++#define NETBACK_ACCEL_UTIL_H
++
++#ifdef DPRINTK
++#undef DPRINTK
++#endif
++
++#define FILE_LEAF strrchr(__FILE__, '/') ? strrchr(__FILE__, '/') + 1 : __FILE__
++
++#if 1
++#define VPRINTK(_f, _a...)
++#else
++#define VPRINTK(_f, _a...) \
++ printk("(file=%s, line=%d) " _f, \
++ FILE_LEAF , __LINE__ , ## _a )
++#endif
++
++#if 1
++#define DPRINTK(_f, _a...)
++#else
++#define DPRINTK(_f, _a...) \
++ printk("(file=%s, line=%d) " _f, \
++ FILE_LEAF , __LINE__ , ## _a )
++#endif
++
++#define EPRINTK(_f, _a...) \
++ printk("(file=%s, line=%d) " _f, \
++ FILE_LEAF , __LINE__ , ## _a )
++
++#define EPRINTK_ON(exp) \
++ do { \
++ if (exp) \
++ EPRINTK("%s at %s:%d\n", #exp, __FILE__, __LINE__); \
++ } while(0)
++
++#define DPRINTK_ON(exp) \
++ do { \
++ if (exp) \
++ DPRINTK("%s at %s:%d\n", #exp, __FILE__, __LINE__); \
++ } while(0)
++
++#define MAC_FMT "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x"
++#define MAC_ARG(_mac) (_mac)[0], (_mac)[1], (_mac)[2], (_mac)[3], (_mac)[4], (_mac)[5]
++
++#include <xen/xenbus.h>
++
++/*! Map a set of pages from another domain
++ * \param dev The xenbus device context
++ * \param priv The private data returned by the mapping function
++ */
++extern
++void *net_accel_map_grants_contig(struct xenbus_device *dev,
++ unsigned *grants, int npages,
++ void **priv);
++
++/*! Unmap a set of pages mapped using net_accel_map_grants_contig.
++ * \param dev The xenbus device context
++ * \param priv The private data returned by the mapping function
++ */
++extern
++void net_accel_unmap_grants_contig(struct xenbus_device *dev, void *priv);
++
++/*! Read the MAC address of a device from xenstore */
++extern
++int net_accel_xen_net_read_mac(struct xenbus_device *dev, u8 mac[]);
++
++/*! Update the accelstate field for a device in xenstore */
++extern
++void net_accel_update_state(struct xenbus_device *dev, int state);
++
++/* These four map/unmap functions are based on
++ * xenbus_backend_client.c:xenbus_map_ring(). However, they are not
++ * used for ring buffers, instead just to map pages between domains,
++ * or to map a page so that it is accessible by a device
++ */
++extern
++int net_accel_map_device_page(struct xenbus_device *dev,
++ int gnt_ref, grant_handle_t *handle,
++ u64 *dev_bus_addr);
++extern
++int net_accel_unmap_device_page(struct xenbus_device *dev,
++ grant_handle_t handle, u64 dev_bus_addr);
++extern
++void *net_accel_map_iomem_page(struct xenbus_device *dev, int gnt_ref,
++ void **priv);
++extern
++void net_accel_unmap_iomem_page(struct xenbus_device *dev, void *priv);
++
++/*! Grrant a page to remote domain */
++extern
++int net_accel_grant_page(struct xenbus_device *dev, unsigned long mfn,
++ int is_iomem);
++/*! Undo a net_accel_grant_page */
++extern
++int net_accel_ungrant_page(grant_ref_t gntref);
++
++
++/*! Shutdown remote domain that is misbehaving */
++extern
++int net_accel_shutdown_remote(int domain);
++
++
++#endif
+Index: head-2008-11-25/drivers/xen/tpmback/Makefile
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/tpmback/Makefile 2007-06-12 13:13:45.000000000 +0200
+@@ -0,0 +1,4 @@
++
++obj-$(CONFIG_XEN_TPMDEV_BACKEND) += tpmbk.o
++
++tpmbk-y += tpmback.o interface.o xenbus.o
+Index: head-2008-11-25/drivers/xen/tpmback/common.h
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/tpmback/common.h 2007-06-12 13:13:45.000000000 +0200
+@@ -0,0 +1,85 @@
++/******************************************************************************
++ * drivers/xen/tpmback/common.h
++ */
++
++#ifndef __TPM__BACKEND__COMMON_H__
++#define __TPM__BACKEND__COMMON_H__
++
++#include <linux/version.h>
++#include <linux/module.h>
++#include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <xen/evtchn.h>
++#include <xen/driver_util.h>
++#include <xen/interface/grant_table.h>
++#include <xen/interface/io/tpmif.h>
++#include <asm/io.h>
++#include <asm/pgalloc.h>
++
++#define DPRINTK(_f, _a...) \
++ pr_debug("(file=%s, line=%d) " _f, \
++ __FILE__ , __LINE__ , ## _a )
++
++struct backend_info;
++
++typedef struct tpmif_st {
++ struct list_head tpmif_list;
++ /* Unique identifier for this interface. */
++ domid_t domid;
++ unsigned int handle;
++
++ /* Physical parameters of the comms window. */
++ unsigned int irq;
++
++ /* The shared rings and indexes. */
++ tpmif_tx_interface_t *tx;
++ struct vm_struct *tx_area;
++
++ /* Miscellaneous private stuff. */
++ enum { DISCONNECTED, DISCONNECTING, CONNECTED } status;
++ int active;
++
++ struct tpmif_st *hash_next;
++ struct list_head list; /* scheduling list */
++ atomic_t refcnt;
++
++ struct backend_info *bi;
++
++ grant_handle_t shmem_handle;
++ grant_ref_t shmem_ref;
++ struct page **mmap_pages;
++
++ char devname[20];
++} tpmif_t;
++
++void tpmif_disconnect_complete(tpmif_t * tpmif);
++tpmif_t *tpmif_find(domid_t domid, struct backend_info *bi);
++void tpmif_interface_init(void);
++void tpmif_interface_exit(void);
++void tpmif_schedule_work(tpmif_t * tpmif);
++void tpmif_deschedule_work(tpmif_t * tpmif);
++void tpmif_xenbus_init(void);
++void tpmif_xenbus_exit(void);
++int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn);
++irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs);
++
++long int tpmback_get_instance(struct backend_info *bi);
++
++int vtpm_release_packets(tpmif_t * tpmif, int send_msgs);
++
++
++#define tpmif_get(_b) (atomic_inc(&(_b)->refcnt))
++#define tpmif_put(_b) \
++ do { \
++ if (atomic_dec_and_test(&(_b)->refcnt)) \
++ tpmif_disconnect_complete(_b); \
++ } while (0)
++
++extern int num_frontends;
++
++static inline unsigned long idx_to_kaddr(tpmif_t *t, unsigned int idx)
++{
++ return (unsigned long)pfn_to_kaddr(page_to_pfn(t->mmap_pages[idx]));
++}
++
++#endif /* __TPMIF__BACKEND__COMMON_H__ */
+Index: head-2008-11-25/drivers/xen/tpmback/interface.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/tpmback/interface.c 2008-01-21 11:15:26.000000000 +0100
+@@ -0,0 +1,168 @@
++ /*****************************************************************************
++ * drivers/xen/tpmback/interface.c
++ *
++ * Vritual TPM interface management.
++ *
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb@us.ibm.com
++ *
++ * This code has been derived from drivers/xen/netback/interface.c
++ * Copyright (c) 2004, Keir Fraser
++ */
++
++#include "common.h"
++#include <xen/balloon.h>
++#include <xen/gnttab.h>
++
++static kmem_cache_t *tpmif_cachep;
++int num_frontends = 0;
++
++LIST_HEAD(tpmif_list);
++
++static tpmif_t *alloc_tpmif(domid_t domid, struct backend_info *bi)
++{
++ tpmif_t *tpmif;
++
++ tpmif = kmem_cache_alloc(tpmif_cachep, GFP_KERNEL);
++ if (tpmif == NULL)
++ goto out_of_memory;
++
++ memset(tpmif, 0, sizeof (*tpmif));
++ tpmif->domid = domid;
++ tpmif->status = DISCONNECTED;
++ tpmif->bi = bi;
++ snprintf(tpmif->devname, sizeof(tpmif->devname), "tpmif%d", domid);
++ atomic_set(&tpmif->refcnt, 1);
++
++ tpmif->mmap_pages = alloc_empty_pages_and_pagevec(TPMIF_TX_RING_SIZE);
++ if (tpmif->mmap_pages == NULL)
++ goto out_of_memory;
++
++ list_add(&tpmif->tpmif_list, &tpmif_list);
++ num_frontends++;
++
++ return tpmif;
++
++ out_of_memory:
++ if (tpmif != NULL)
++ kmem_cache_free(tpmif_cachep, tpmif);
++ printk("%s: out of memory\n", __FUNCTION__);
++ return ERR_PTR(-ENOMEM);
++}
++
++static void free_tpmif(tpmif_t * tpmif)
++{
++ num_frontends--;
++ list_del(&tpmif->tpmif_list);
++ free_empty_pages_and_pagevec(tpmif->mmap_pages, TPMIF_TX_RING_SIZE);
++ kmem_cache_free(tpmif_cachep, tpmif);
++}
++
++tpmif_t *tpmif_find(domid_t domid, struct backend_info *bi)
++{
++ tpmif_t *tpmif;
++
++ list_for_each_entry(tpmif, &tpmif_list, tpmif_list) {
++ if (tpmif->bi == bi) {
++ if (tpmif->domid == domid) {
++ tpmif_get(tpmif);
++ return tpmif;
++ } else {
++ return ERR_PTR(-EEXIST);
++ }
++ }
++ }
++
++ return alloc_tpmif(domid, bi);
++}
++
++static int map_frontend_page(tpmif_t *tpmif, unsigned long shared_page)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)tpmif->tx_area->addr,
++ GNTMAP_host_map, shared_page, tpmif->domid);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status) {
++ DPRINTK(" Grant table operation failure !\n");
++ return op.status;
++ }
++
++ tpmif->shmem_ref = shared_page;
++ tpmif->shmem_handle = op.handle;
++
++ return 0;
++}
++
++static void unmap_frontend_page(tpmif_t *tpmif)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)tpmif->tx_area->addr,
++ GNTMAP_host_map, tpmif->shmem_handle);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++}
++
++int tpmif_map(tpmif_t *tpmif, unsigned long shared_page, unsigned int evtchn)
++{
++ int err;
++
++ if (tpmif->irq)
++ return 0;
++
++ if ((tpmif->tx_area = alloc_vm_area(PAGE_SIZE)) == NULL)
++ return -ENOMEM;
++
++ err = map_frontend_page(tpmif, shared_page);
++ if (err) {
++ free_vm_area(tpmif->tx_area);
++ return err;
++ }
++
++ tpmif->tx = (tpmif_tx_interface_t *)tpmif->tx_area->addr;
++ memset(tpmif->tx, 0, PAGE_SIZE);
++
++ err = bind_interdomain_evtchn_to_irqhandler(
++ tpmif->domid, evtchn, tpmif_be_int, 0, tpmif->devname, tpmif);
++ if (err < 0) {
++ unmap_frontend_page(tpmif);
++ free_vm_area(tpmif->tx_area);
++ return err;
++ }
++ tpmif->irq = err;
++
++ tpmif->shmem_ref = shared_page;
++ tpmif->active = 1;
++
++ return 0;
++}
++
++void tpmif_disconnect_complete(tpmif_t *tpmif)
++{
++ if (tpmif->irq)
++ unbind_from_irqhandler(tpmif->irq, tpmif);
++
++ if (tpmif->tx) {
++ unmap_frontend_page(tpmif);
++ free_vm_area(tpmif->tx_area);
++ }
++
++ free_tpmif(tpmif);
++}
++
++void __init tpmif_interface_init(void)
++{
++ tpmif_cachep = kmem_cache_create("tpmif_cache", sizeof (tpmif_t),
++ 0, 0, NULL, NULL);
++}
++
++void __exit tpmif_interface_exit(void)
++{
++ kmem_cache_destroy(tpmif_cachep);
++}
+Index: head-2008-11-25/drivers/xen/tpmback/tpmback.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/tpmback/tpmback.c 2007-06-12 13:13:45.000000000 +0200
+@@ -0,0 +1,944 @@
++/******************************************************************************
++ * drivers/xen/tpmback/tpmback.c
++ *
++ * Copyright (c) 2005, IBM Corporation
++ *
++ * Author: Stefan Berger, stefanb@us.ibm.com
++ * Grant table support: Mahadevan Gomathisankaran
++ *
++ * This code has been derived from drivers/xen/netback/netback.c
++ * Copyright (c) 2002-2004, K A Fraser
++ *
++ */
++
++#include "common.h"
++#include <xen/evtchn.h>
++
++#include <linux/types.h>
++#include <linux/list.h>
++#include <linux/miscdevice.h>
++#include <linux/poll.h>
++#include <asm/uaccess.h>
++#include <xen/xenbus.h>
++#include <xen/interface/grant_table.h>
++#include <xen/gnttab.h>
++
++/* local data structures */
++struct data_exchange {
++ struct list_head pending_pak;
++ struct list_head current_pak;
++ unsigned int copied_so_far;
++ u8 has_opener:1;
++ u8 aborted:1;
++ rwlock_t pak_lock; // protects all of the previous fields
++ wait_queue_head_t wait_queue;
++};
++
++struct vtpm_resp_hdr {
++ uint32_t instance_no;
++ uint16_t tag_no;
++ uint32_t len_no;
++ uint32_t ordinal_no;
++} __attribute__ ((packed));
++
++struct packet {
++ struct list_head next;
++ unsigned int data_len;
++ u8 *data_buffer;
++ tpmif_t *tpmif;
++ u32 tpm_instance;
++ u8 req_tag;
++ u32 last_read;
++ u8 flags;
++ struct timer_list processing_timer;
++};
++
++enum {
++ PACKET_FLAG_DISCARD_RESPONSE = 1,
++};
++
++/* local variables */
++static struct data_exchange dataex;
++
++/* local function prototypes */
++static int _packet_write(struct packet *pak,
++ const char *data, size_t size, int userbuffer);
++static void processing_timeout(unsigned long ptr);
++static int packet_read_shmem(struct packet *pak,
++ tpmif_t * tpmif,
++ u32 offset,
++ char *buffer, int isuserbuffer, u32 left);
++static int vtpm_queue_packet(struct packet *pak);
++
++/***************************************************************
++ Buffer copying fo user and kernel space buffes.
++***************************************************************/
++static inline int copy_from_buffer(void *to,
++ const void *from, unsigned long size,
++ int isuserbuffer)
++{
++ if (isuserbuffer) {
++ if (copy_from_user(to, (void __user *)from, size))
++ return -EFAULT;
++ } else {
++ memcpy(to, from, size);
++ }
++ return 0;
++}
++
++static inline int copy_to_buffer(void *to,
++ const void *from, unsigned long size,
++ int isuserbuffer)
++{
++ if (isuserbuffer) {
++ if (copy_to_user((void __user *)to, from, size))
++ return -EFAULT;
++ } else {
++ memcpy(to, from, size);
++ }
++ return 0;
++}
++
++
++static void dataex_init(struct data_exchange *dataex)
++{
++ INIT_LIST_HEAD(&dataex->pending_pak);
++ INIT_LIST_HEAD(&dataex->current_pak);
++ dataex->has_opener = 0;
++ rwlock_init(&dataex->pak_lock);
++ init_waitqueue_head(&dataex->wait_queue);
++}
++
++/***************************************************************
++ Packet-related functions
++***************************************************************/
++
++static struct packet *packet_find_instance(struct list_head *head,
++ u32 tpm_instance)
++{
++ struct packet *pak;
++ struct list_head *p;
++
++ /*
++ * traverse the list of packets and return the first
++ * one with the given instance number
++ */
++ list_for_each(p, head) {
++ pak = list_entry(p, struct packet, next);
++
++ if (pak->tpm_instance == tpm_instance) {
++ return pak;
++ }
++ }
++ return NULL;
++}
++
++static struct packet *packet_find_packet(struct list_head *head, void *packet)
++{
++ struct packet *pak;
++ struct list_head *p;
++
++ /*
++ * traverse the list of packets and return the first
++ * one with the given instance number
++ */
++ list_for_each(p, head) {
++ pak = list_entry(p, struct packet, next);
++
++ if (pak == packet) {
++ return pak;
++ }
++ }
++ return NULL;
++}
++
++static struct packet *packet_alloc(tpmif_t * tpmif,
++ u32 size, u8 req_tag, u8 flags)
++{
++ struct packet *pak = NULL;
++ pak = kzalloc(sizeof (struct packet), GFP_ATOMIC);
++ if (NULL != pak) {
++ if (tpmif) {
++ pak->tpmif = tpmif;
++ pak->tpm_instance = tpmback_get_instance(tpmif->bi);
++ tpmif_get(tpmif);
++ }
++ pak->data_len = size;
++ pak->req_tag = req_tag;
++ pak->last_read = 0;
++ pak->flags = flags;
++
++ /*
++ * cannot do tpmif_get(tpmif); bad things happen
++ * on the last tpmif_put()
++ */
++ init_timer(&pak->processing_timer);
++ pak->processing_timer.function = processing_timeout;
++ pak->processing_timer.data = (unsigned long)pak;
++ }
++ return pak;
++}
++
++static void inline packet_reset(struct packet *pak)
++{
++ pak->last_read = 0;
++}
++
++static void packet_free(struct packet *pak)
++{
++ if (timer_pending(&pak->processing_timer)) {
++ BUG();
++ }
++
++ if (pak->tpmif)
++ tpmif_put(pak->tpmif);
++ kfree(pak->data_buffer);
++ /*
++ * cannot do tpmif_put(pak->tpmif); bad things happen
++ * on the last tpmif_put()
++ */
++ kfree(pak);
++}
++
++
++/*
++ * Write data to the shared memory and send it to the FE.
++ */
++static int packet_write(struct packet *pak,
++ const char *data, size_t size, int isuserbuffer)
++{
++ int rc = 0;
++
++ if (0 != (pak->flags & PACKET_FLAG_DISCARD_RESPONSE)) {
++ /* Don't send a respone to this packet. Just acknowledge it. */
++ rc = size;
++ } else {
++ rc = _packet_write(pak, data, size, isuserbuffer);
++ }
++
++ return rc;
++}
++
++int _packet_write(struct packet *pak,
++ const char *data, size_t size, int isuserbuffer)
++{
++ /*
++ * Write into the shared memory pages directly
++ * and send it to the front end.
++ */
++ tpmif_t *tpmif = pak->tpmif;
++ grant_handle_t handle;
++ int rc = 0;
++ unsigned int i = 0;
++ unsigned int offset = 0;
++
++ if (tpmif == NULL) {
++ return -EFAULT;
++ }
++
++ if (tpmif->status == DISCONNECTED) {
++ return size;
++ }
++
++ while (offset < size && i < TPMIF_TX_RING_SIZE) {
++ unsigned int tocopy;
++ struct gnttab_map_grant_ref map_op;
++ struct gnttab_unmap_grant_ref unmap_op;
++ tpmif_tx_request_t *tx;
++
++ tx = &tpmif->tx->ring[i].req;
++
++ if (0 == tx->addr) {
++ DPRINTK("ERROR: Buffer for outgoing packet NULL?! i=%d\n", i);
++ return 0;
++ }
++
++ gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
++ GNTMAP_host_map, tx->ref, tpmif->domid);
++
++ if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++ &map_op, 1))) {
++ BUG();
++ }
++
++ handle = map_op.handle;
++
++ if (map_op.status) {
++ DPRINTK(" Grant table operation failure !\n");
++ return 0;
++ }
++
++ tocopy = min_t(size_t, size - offset, PAGE_SIZE);
++
++ if (copy_from_buffer((void *)(idx_to_kaddr(tpmif, i) |
++ (tx->addr & ~PAGE_MASK)),
++ &data[offset], tocopy, isuserbuffer)) {
++ tpmif_put(tpmif);
++ return -EFAULT;
++ }
++ tx->size = tocopy;
++
++ gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
++ GNTMAP_host_map, handle);
++
++ if (unlikely
++ (HYPERVISOR_grant_table_op
++ (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
++ BUG();
++ }
++
++ offset += tocopy;
++ i++;
++ }
++
++ rc = offset;
++ DPRINTK("Notifying frontend via irq %d\n", tpmif->irq);
++ notify_remote_via_irq(tpmif->irq);
++
++ return rc;
++}
++
++/*
++ * Read data from the shared memory and copy it directly into the
++ * provided buffer. Advance the read_last indicator which tells
++ * how many bytes have already been read.
++ */
++static int packet_read(struct packet *pak, size_t numbytes,
++ char *buffer, size_t buffersize, int isuserbuffer)
++{
++ tpmif_t *tpmif = pak->tpmif;
++
++ /*
++ * Read 'numbytes' of data from the buffer. The first 4
++ * bytes are the instance number in network byte order,
++ * after that come the data from the shared memory buffer.
++ */
++ u32 to_copy;
++ u32 offset = 0;
++ u32 room_left = buffersize;
++
++ if (pak->last_read < 4) {
++ /*
++ * copy the instance number into the buffer
++ */
++ u32 instance_no = htonl(pak->tpm_instance);
++ u32 last_read = pak->last_read;
++
++ to_copy = min_t(size_t, 4 - last_read, numbytes);
++
++ if (copy_to_buffer(&buffer[0],
++ &(((u8 *) & instance_no)[last_read]),
++ to_copy, isuserbuffer)) {
++ return -EFAULT;
++ }
++
++ pak->last_read += to_copy;
++ offset += to_copy;
++ room_left -= to_copy;
++ }
++
++ /*
++ * If the packet has a data buffer appended, read from it...
++ */
++
++ if (room_left > 0) {
++ if (pak->data_buffer) {
++ u32 to_copy = min_t(u32, pak->data_len - offset, room_left);
++ u32 last_read = pak->last_read - 4;
++
++ if (copy_to_buffer(&buffer[offset],
++ &pak->data_buffer[last_read],
++ to_copy, isuserbuffer)) {
++ return -EFAULT;
++ }
++ pak->last_read += to_copy;
++ offset += to_copy;
++ } else {
++ offset = packet_read_shmem(pak,
++ tpmif,
++ offset,
++ buffer,
++ isuserbuffer, room_left);
++ }
++ }
++ return offset;
++}
++
++static int packet_read_shmem(struct packet *pak,
++ tpmif_t * tpmif,
++ u32 offset, char *buffer, int isuserbuffer,
++ u32 room_left)
++{
++ u32 last_read = pak->last_read - 4;
++ u32 i = (last_read / PAGE_SIZE);
++ u32 pg_offset = last_read & (PAGE_SIZE - 1);
++ u32 to_copy;
++ grant_handle_t handle;
++
++ tpmif_tx_request_t *tx;
++
++ tx = &tpmif->tx->ring[0].req;
++ /*
++ * Start copying data at the page with index 'index'
++ * and within that page at offset 'offset'.
++ * Copy a maximum of 'room_left' bytes.
++ */
++ to_copy = min_t(u32, PAGE_SIZE - pg_offset, room_left);
++ while (to_copy > 0) {
++ void *src;
++ struct gnttab_map_grant_ref map_op;
++ struct gnttab_unmap_grant_ref unmap_op;
++
++ tx = &tpmif->tx->ring[i].req;
++
++ gnttab_set_map_op(&map_op, idx_to_kaddr(tpmif, i),
++ GNTMAP_host_map, tx->ref, tpmif->domid);
++
++ if (unlikely(HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
++ &map_op, 1))) {
++ BUG();
++ }
++
++ if (map_op.status) {
++ DPRINTK(" Grant table operation failure !\n");
++ return -EFAULT;
++ }
++
++ handle = map_op.handle;
++
++ if (to_copy > tx->size) {
++ /*
++ * User requests more than what's available
++ */
++ to_copy = min_t(u32, tx->size, to_copy);
++ }
++
++ DPRINTK("Copying from mapped memory at %08lx\n",
++ (unsigned long)(idx_to_kaddr(tpmif, i) |
++ (tx->addr & ~PAGE_MASK)));
++
++ src = (void *)(idx_to_kaddr(tpmif, i) |
++ ((tx->addr & ~PAGE_MASK) + pg_offset));
++ if (copy_to_buffer(&buffer[offset],
++ src, to_copy, isuserbuffer)) {
++ return -EFAULT;
++ }
++
++ DPRINTK("Data from TPM-FE of domain %d are %d %d %d %d\n",
++ tpmif->domid, buffer[offset], buffer[offset + 1],
++ buffer[offset + 2], buffer[offset + 3]);
++
++ gnttab_set_unmap_op(&unmap_op, idx_to_kaddr(tpmif, i),
++ GNTMAP_host_map, handle);
++
++ if (unlikely
++ (HYPERVISOR_grant_table_op
++ (GNTTABOP_unmap_grant_ref, &unmap_op, 1))) {
++ BUG();
++ }
++
++ offset += to_copy;
++ pg_offset = 0;
++ last_read += to_copy;
++ room_left -= to_copy;
++
++ to_copy = min_t(u32, PAGE_SIZE, room_left);
++ i++;
++ } /* while (to_copy > 0) */
++ /*
++ * Adjust the last_read pointer
++ */
++ pak->last_read = last_read + 4;
++ return offset;
++}
++
++/* ============================================================
++ * The file layer for reading data from this device
++ * ============================================================
++ */
++static int vtpm_op_open(struct inode *inode, struct file *f)
++{
++ int rc = 0;
++ unsigned long flags;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ if (dataex.has_opener == 0) {
++ dataex.has_opener = 1;
++ } else {
++ rc = -EPERM;
++ }
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ return rc;
++}
++
++static ssize_t vtpm_op_read(struct file *file,
++ char __user * data, size_t size, loff_t * offset)
++{
++ int ret_size = -ENODATA;
++ struct packet *pak = NULL;
++ unsigned long flags;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ if (dataex.aborted) {
++ dataex.aborted = 0;
++ dataex.copied_so_far = 0;
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ return -EIO;
++ }
++
++ if (list_empty(&dataex.pending_pak)) {
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ wait_event_interruptible(dataex.wait_queue,
++ !list_empty(&dataex.pending_pak));
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ dataex.copied_so_far = 0;
++ }
++
++ if (!list_empty(&dataex.pending_pak)) {
++ unsigned int left;
++
++ pak = list_entry(dataex.pending_pak.next, struct packet, next);
++ left = pak->data_len - dataex.copied_so_far;
++ list_del(&pak->next);
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++
++ DPRINTK("size given by app: %d, available: %d\n", size, left);
++
++ ret_size = min_t(size_t, size, left);
++
++ ret_size = packet_read(pak, ret_size, data, size, 1);
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++
++ if (ret_size < 0) {
++ del_singleshot_timer_sync(&pak->processing_timer);
++ packet_free(pak);
++ dataex.copied_so_far = 0;
++ } else {
++ DPRINTK("Copied %d bytes to user buffer\n", ret_size);
++
++ dataex.copied_so_far += ret_size;
++ if (dataex.copied_so_far >= pak->data_len + 4) {
++ DPRINTK("All data from this packet given to app.\n");
++ /* All data given to app */
++
++ del_singleshot_timer_sync(&pak->
++ processing_timer);
++ list_add_tail(&pak->next, &dataex.current_pak);
++ /*
++ * The more fontends that are handled at the same time,
++ * the more time we give the TPM to process the request.
++ */
++ mod_timer(&pak->processing_timer,
++ jiffies + (num_frontends * 60 * HZ));
++ dataex.copied_so_far = 0;
++ } else {
++ list_add(&pak->next, &dataex.pending_pak);
++ }
++ }
++ }
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++
++ DPRINTK("Returning result from read to app: %d\n", ret_size);
++
++ return ret_size;
++}
++
++/*
++ * Write operation - only works after a previous read operation!
++ */
++static ssize_t vtpm_op_write(struct file *file,
++ const char __user * data, size_t size,
++ loff_t * offset)
++{
++ struct packet *pak;
++ int rc = 0;
++ unsigned int off = 4;
++ unsigned long flags;
++ struct vtpm_resp_hdr vrh;
++
++ /*
++ * Minimum required packet size is:
++ * 4 bytes for instance number
++ * 2 bytes for tag
++ * 4 bytes for paramSize
++ * 4 bytes for the ordinal
++ * sum: 14 bytes
++ */
++ if (size < sizeof (vrh))
++ return -EFAULT;
++
++ if (copy_from_user(&vrh, data, sizeof (vrh)))
++ return -EFAULT;
++
++ /* malformed packet? */
++ if ((off + ntohl(vrh.len_no)) != size)
++ return -EFAULT;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ pak = packet_find_instance(&dataex.current_pak,
++ ntohl(vrh.instance_no));
++
++ if (pak == NULL) {
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ DPRINTK(KERN_ALERT "No associated packet! (inst=%d)\n",
++ ntohl(vrh.instance_no));
++ return -EFAULT;
++ }
++
++ del_singleshot_timer_sync(&pak->processing_timer);
++ list_del(&pak->next);
++
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++
++ /*
++ * The first 'offset' bytes must be the instance number - skip them.
++ */
++ size -= off;
++
++ rc = packet_write(pak, &data[off], size, 1);
++
++ if (rc > 0) {
++ /* I neglected the first 4 bytes */
++ rc += off;
++ }
++ packet_free(pak);
++ return rc;
++}
++
++static int vtpm_op_release(struct inode *inode, struct file *file)
++{
++ unsigned long flags;
++
++ vtpm_release_packets(NULL, 1);
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ dataex.has_opener = 0;
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ return 0;
++}
++
++static unsigned int vtpm_op_poll(struct file *file,
++ struct poll_table_struct *pts)
++{
++ unsigned int flags = POLLOUT | POLLWRNORM;
++
++ poll_wait(file, &dataex.wait_queue, pts);
++ if (!list_empty(&dataex.pending_pak)) {
++ flags |= POLLIN | POLLRDNORM;
++ }
++ return flags;
++}
++
++static const struct file_operations vtpm_ops = {
++ .owner = THIS_MODULE,
++ .llseek = no_llseek,
++ .open = vtpm_op_open,
++ .read = vtpm_op_read,
++ .write = vtpm_op_write,
++ .release = vtpm_op_release,
++ .poll = vtpm_op_poll,
++};
++
++static struct miscdevice vtpms_miscdevice = {
++ .minor = 225,
++ .name = "vtpm",
++ .fops = &vtpm_ops,
++};
++
++/***************************************************************
++ Utility functions
++***************************************************************/
++
++static int tpm_send_fail_message(struct packet *pak, u8 req_tag)
++{
++ int rc;
++ static const unsigned char tpm_error_message_fail[] = {
++ 0x00, 0x00,
++ 0x00, 0x00, 0x00, 0x0a,
++ 0x00, 0x00, 0x00, 0x09 /* TPM_FAIL */
++ };
++ unsigned char buffer[sizeof (tpm_error_message_fail)];
++
++ memcpy(buffer, tpm_error_message_fail,
++ sizeof (tpm_error_message_fail));
++ /*
++ * Insert the right response tag depending on the given tag
++ * All response tags are '+3' to the request tag.
++ */
++ buffer[1] = req_tag + 3;
++
++ /*
++ * Write the data to shared memory and notify the front-end
++ */
++ rc = packet_write(pak, buffer, sizeof (buffer), 0);
++
++ return rc;
++}
++
++static int _vtpm_release_packets(struct list_head *head,
++ tpmif_t * tpmif, int send_msgs)
++{
++ int aborted = 0;
++ int c = 0;
++ struct packet *pak;
++ struct list_head *pos, *tmp;
++
++ list_for_each_safe(pos, tmp, head) {
++ pak = list_entry(pos, struct packet, next);
++ c += 1;
++
++ if (tpmif == NULL || pak->tpmif == tpmif) {
++ int can_send = 0;
++
++ del_singleshot_timer_sync(&pak->processing_timer);
++ list_del(&pak->next);
++
++ if (pak->tpmif && pak->tpmif->status == CONNECTED) {
++ can_send = 1;
++ }
++
++ if (send_msgs && can_send) {
++ tpm_send_fail_message(pak, pak->req_tag);
++ }
++ packet_free(pak);
++ if (c == 1)
++ aborted = 1;
++ }
++ }
++ return aborted;
++}
++
++int vtpm_release_packets(tpmif_t * tpmif, int send_msgs)
++{
++ unsigned long flags;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++
++ dataex.aborted = _vtpm_release_packets(&dataex.pending_pak,
++ tpmif,
++ send_msgs);
++ _vtpm_release_packets(&dataex.current_pak, tpmif, send_msgs);
++
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++ return 0;
++}
++
++static int vtpm_queue_packet(struct packet *pak)
++{
++ int rc = 0;
++
++ if (dataex.has_opener) {
++ unsigned long flags;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ list_add_tail(&pak->next, &dataex.pending_pak);
++ /* give the TPM some time to pick up the request */
++ mod_timer(&pak->processing_timer, jiffies + (30 * HZ));
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++
++ wake_up_interruptible(&dataex.wait_queue);
++ } else {
++ rc = -EFAULT;
++ }
++ return rc;
++}
++
++static int vtpm_receive(tpmif_t * tpmif, u32 size)
++{
++ int rc = 0;
++ unsigned char buffer[10];
++ __be32 *native_size;
++ struct packet *pak = packet_alloc(tpmif, size, 0, 0);
++
++ if (!pak)
++ return -ENOMEM;
++ /*
++ * Read 10 bytes from the received buffer to test its
++ * content for validity.
++ */
++ if (sizeof (buffer) != packet_read(pak,
++ sizeof (buffer), buffer,
++ sizeof (buffer), 0)) {
++ goto failexit;
++ }
++ /*
++ * Reset the packet read pointer so we can read all its
++ * contents again.
++ */
++ packet_reset(pak);
++
++ native_size = (__force __be32 *) (&buffer[4 + 2]);
++ /*
++ * Verify that the size of the packet is correct
++ * as indicated and that there's actually someone reading packets.
++ * The minimum size of the packet is '10' for tag, size indicator
++ * and ordinal.
++ */
++ if (size < 10 ||
++ be32_to_cpu(*native_size) != size ||
++ 0 == dataex.has_opener || tpmif->status != CONNECTED) {
++ rc = -EINVAL;
++ goto failexit;
++ } else {
++ rc = vtpm_queue_packet(pak);
++ if (rc < 0)
++ goto failexit;
++ }
++ return 0;
++
++ failexit:
++ if (pak) {
++ tpm_send_fail_message(pak, buffer[4 + 1]);
++ packet_free(pak);
++ }
++ return rc;
++}
++
++/*
++ * Timeout function that gets invoked when a packet has not been processed
++ * during the timeout period.
++ * The packet must be on a list when this function is invoked. This
++ * also means that once its taken off a list, the timer must be
++ * destroyed as well.
++ */
++static void processing_timeout(unsigned long ptr)
++{
++ struct packet *pak = (struct packet *)ptr;
++ unsigned long flags;
++
++ write_lock_irqsave(&dataex.pak_lock, flags);
++ /*
++ * The packet needs to be searched whether it
++ * is still on the list.
++ */
++ if (pak == packet_find_packet(&dataex.pending_pak, pak) ||
++ pak == packet_find_packet(&dataex.current_pak, pak)) {
++ if ((pak->flags & PACKET_FLAG_DISCARD_RESPONSE) == 0) {
++ tpm_send_fail_message(pak, pak->req_tag);
++ }
++ /* discard future responses */
++ pak->flags |= PACKET_FLAG_DISCARD_RESPONSE;
++ }
++
++ write_unlock_irqrestore(&dataex.pak_lock, flags);
++}
++
++static void tpm_tx_action(unsigned long unused);
++static DECLARE_TASKLET(tpm_tx_tasklet, tpm_tx_action, 0);
++
++static struct list_head tpm_schedule_list;
++static spinlock_t tpm_schedule_list_lock;
++
++static inline void maybe_schedule_tx_action(void)
++{
++ smp_mb();
++ tasklet_schedule(&tpm_tx_tasklet);
++}
++
++static inline int __on_tpm_schedule_list(tpmif_t * tpmif)
++{
++ return tpmif->list.next != NULL;
++}
++
++static void remove_from_tpm_schedule_list(tpmif_t * tpmif)
++{
++ spin_lock_irq(&tpm_schedule_list_lock);
++ if (likely(__on_tpm_schedule_list(tpmif))) {
++ list_del(&tpmif->list);
++ tpmif->list.next = NULL;
++ tpmif_put(tpmif);
++ }
++ spin_unlock_irq(&tpm_schedule_list_lock);
++}
++
++static void add_to_tpm_schedule_list_tail(tpmif_t * tpmif)
++{
++ if (__on_tpm_schedule_list(tpmif))
++ return;
++
++ spin_lock_irq(&tpm_schedule_list_lock);
++ if (!__on_tpm_schedule_list(tpmif) && tpmif->active) {
++ list_add_tail(&tpmif->list, &tpm_schedule_list);
++ tpmif_get(tpmif);
++ }
++ spin_unlock_irq(&tpm_schedule_list_lock);
++}
++
++void tpmif_schedule_work(tpmif_t * tpmif)
++{
++ add_to_tpm_schedule_list_tail(tpmif);
++ maybe_schedule_tx_action();
++}
++
++void tpmif_deschedule_work(tpmif_t * tpmif)
++{
++ remove_from_tpm_schedule_list(tpmif);
++}
++
++static void tpm_tx_action(unsigned long unused)
++{
++ struct list_head *ent;
++ tpmif_t *tpmif;
++ tpmif_tx_request_t *tx;
++
++ DPRINTK("%s: Getting data from front-end(s)!\n", __FUNCTION__);
++
++ while (!list_empty(&tpm_schedule_list)) {
++ /* Get a tpmif from the list with work to do. */
++ ent = tpm_schedule_list.next;
++ tpmif = list_entry(ent, tpmif_t, list);
++ tpmif_get(tpmif);
++ remove_from_tpm_schedule_list(tpmif);
++
++ tx = &tpmif->tx->ring[0].req;
++
++ /* pass it up */
++ vtpm_receive(tpmif, tx->size);
++
++ tpmif_put(tpmif);
++ }
++}
++
++irqreturn_t tpmif_be_int(int irq, void *dev_id, struct pt_regs *regs)
++{
++ tpmif_t *tpmif = (tpmif_t *) dev_id;
++
++ add_to_tpm_schedule_list_tail(tpmif);
++ maybe_schedule_tx_action();
++ return IRQ_HANDLED;
++}
++
++static int __init tpmback_init(void)
++{
++ int rc;
++
++ if ((rc = misc_register(&vtpms_miscdevice)) != 0) {
++ printk(KERN_ALERT
++ "Could not register misc device for TPM BE.\n");
++ return rc;
++ }
++
++ dataex_init(&dataex);
++
++ spin_lock_init(&tpm_schedule_list_lock);
++ INIT_LIST_HEAD(&tpm_schedule_list);
++
++ tpmif_interface_init();
++ tpmif_xenbus_init();
++
++ printk(KERN_ALERT "Successfully initialized TPM backend driver.\n");
++
++ return 0;
++}
++
++module_init(tpmback_init);
++
++void __exit tpmback_exit(void)
++{
++ vtpm_release_packets(NULL, 0);
++ tpmif_xenbus_exit();
++ tpmif_interface_exit();
++ misc_deregister(&vtpms_miscdevice);
++}
++
++MODULE_LICENSE("Dual BSD/GPL");
+Index: head-2008-11-25/drivers/xen/tpmback/xenbus.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/tpmback/xenbus.c 2008-03-06 08:54:32.000000000 +0100
+@@ -0,0 +1,289 @@
++/* Xenbus code for tpmif backend
++ Copyright (C) 2005 IBM Corporation
++ Copyright (C) 2005 Rusty Russell <rusty@rustcorp.com.au>
++
++ This program is free software; you can redistribute it and/or modify
++ it under the terms of the GNU General Public License as published by
++ the Free Software Foundation; either version 2 of the License, or
++ (at your option) any later version.
++
++ This program is distributed in the hope that it will be useful,
++ but WITHOUT ANY WARRANTY; without even the implied warranty of
++ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
++ GNU General Public License for more details.
++
++ You should have received a copy of the GNU General Public License
++ along with this program; if not, write to the Free Software
++ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
++*/
++#include <stdarg.h>
++#include <linux/module.h>
++#include <xen/xenbus.h>
++#include "common.h"
++
++struct backend_info
++{
++ struct xenbus_device *dev;
++
++ /* our communications channel */
++ tpmif_t *tpmif;
++
++ long int frontend_id;
++ long int instance; // instance of TPM
++ u8 is_instance_set;// whether instance number has been set
++
++ /* watch front end for changes */
++ struct xenbus_watch backend_watch;
++};
++
++static void maybe_connect(struct backend_info *be);
++static void connect(struct backend_info *be);
++static int connect_ring(struct backend_info *be);
++static void backend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len);
++static void frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state);
++
++long int tpmback_get_instance(struct backend_info *bi)
++{
++ long int res = -1;
++ if (bi && bi->is_instance_set)
++ res = bi->instance;
++ return res;
++}
++
++static int tpmback_remove(struct xenbus_device *dev)
++{
++ struct backend_info *be = dev->dev.driver_data;
++
++ if (!be) return 0;
++
++ if (be->backend_watch.node) {
++ unregister_xenbus_watch(&be->backend_watch);
++ kfree(be->backend_watch.node);
++ be->backend_watch.node = NULL;
++ }
++ if (be->tpmif) {
++ be->tpmif->bi = NULL;
++ vtpm_release_packets(be->tpmif, 0);
++ tpmif_put(be->tpmif);
++ be->tpmif = NULL;
++ }
++ kfree(be);
++ dev->dev.driver_data = NULL;
++ return 0;
++}
++
++static int tpmback_probe(struct xenbus_device *dev,
++ const struct xenbus_device_id *id)
++{
++ int err;
++ struct backend_info *be = kzalloc(sizeof(struct backend_info),
++ GFP_KERNEL);
++
++ if (!be) {
++ xenbus_dev_fatal(dev, -ENOMEM,
++ "allocating backend structure");
++ return -ENOMEM;
++ }
++
++ be->is_instance_set = 0;
++ be->dev = dev;
++ dev->dev.driver_data = be;
++
++ err = xenbus_watch_path2(dev, dev->nodename,
++ "instance", &be->backend_watch,
++ backend_changed);
++ if (err) {
++ goto fail;
++ }
++
++ err = xenbus_switch_state(dev, XenbusStateInitWait);
++ if (err) {
++ goto fail;
++ }
++ return 0;
++fail:
++ tpmback_remove(dev);
++ return err;
++}
++
++
++static void backend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ int err;
++ long instance;
++ struct backend_info *be
++ = container_of(watch, struct backend_info, backend_watch);
++ struct xenbus_device *dev = be->dev;
++
++ err = xenbus_scanf(XBT_NIL, dev->nodename,
++ "instance","%li", &instance);
++ if (XENBUS_EXIST_ERR(err)) {
++ return;
++ }
++
++ if (err != 1) {
++ xenbus_dev_fatal(dev, err, "reading instance");
++ return;
++ }
++
++ if (be->is_instance_set == 0) {
++ be->instance = instance;
++ be->is_instance_set = 1;
++ }
++}
++
++
++static void frontend_changed(struct xenbus_device *dev,
++ enum xenbus_state frontend_state)
++{
++ struct backend_info *be = dev->dev.driver_data;
++ int err;
++
++ switch (frontend_state) {
++ case XenbusStateInitialising:
++ case XenbusStateInitialised:
++ break;
++
++ case XenbusStateConnected:
++ err = connect_ring(be);
++ if (err) {
++ return;
++ }
++ maybe_connect(be);
++ break;
++
++ case XenbusStateClosing:
++ be->instance = -1;
++ xenbus_switch_state(dev, XenbusStateClosing);
++ break;
++
++ case XenbusStateUnknown: /* keep it here */
++ case XenbusStateClosed:
++ xenbus_switch_state(dev, XenbusStateClosed);
++ device_unregister(&be->dev->dev);
++ tpmback_remove(dev);
++ break;
++
++ default:
++ xenbus_dev_fatal(dev, -EINVAL,
++ "saw state %d at frontend",
++ frontend_state);
++ break;
++ }
++}
++
++
++
++static void maybe_connect(struct backend_info *be)
++{
++ if (be->tpmif == NULL || be->tpmif->status == CONNECTED)
++ return;
++
++ connect(be);
++}
++
++
++static void connect(struct backend_info *be)
++{
++ struct xenbus_transaction xbt;
++ int err;
++ struct xenbus_device *dev = be->dev;
++ unsigned long ready = 1;
++
++again:
++ err = xenbus_transaction_start(&xbt);
++ if (err) {
++ xenbus_dev_fatal(be->dev, err, "starting transaction");
++ return;
++ }
++
++ err = xenbus_printf(xbt, be->dev->nodename,
++ "ready", "%lu", ready);
++ if (err) {
++ xenbus_dev_fatal(be->dev, err, "writing 'ready'");
++ goto abort;
++ }
++
++ err = xenbus_transaction_end(xbt, 0);
++ if (err == -EAGAIN)
++ goto again;
++ if (err)
++ xenbus_dev_fatal(be->dev, err, "end of transaction");
++
++ err = xenbus_switch_state(dev, XenbusStateConnected);
++ if (!err)
++ be->tpmif->status = CONNECTED;
++ return;
++abort:
++ xenbus_transaction_end(xbt, 1);
++}
++
++
++static int connect_ring(struct backend_info *be)
++{
++ struct xenbus_device *dev = be->dev;
++ unsigned long ring_ref;
++ unsigned int evtchn;
++ int err;
++
++ err = xenbus_gather(XBT_NIL, dev->otherend,
++ "ring-ref", "%lu", &ring_ref,
++ "event-channel", "%u", &evtchn, NULL);
++ if (err) {
++ xenbus_dev_error(dev, err,
++ "reading %s/ring-ref and event-channel",
++ dev->otherend);
++ return err;
++ }
++
++ if (!be->tpmif) {
++ be->tpmif = tpmif_find(dev->otherend_id, be);
++ if (IS_ERR(be->tpmif)) {
++ err = PTR_ERR(be->tpmif);
++ be->tpmif = NULL;
++ xenbus_dev_fatal(dev,err,"creating vtpm interface");
++ return err;
++ }
++ }
++
++ if (be->tpmif != NULL) {
++ err = tpmif_map(be->tpmif, ring_ref, evtchn);
++ if (err) {
++ xenbus_dev_error(dev, err,
++ "mapping shared-frame %lu port %u",
++ ring_ref, evtchn);
++ return err;
++ }
++ }
++ return 0;
++}
++
++
++static const struct xenbus_device_id tpmback_ids[] = {
++ { "vtpm" },
++ { "" }
++};
++
++
++static struct xenbus_driver tpmback = {
++ .name = "vtpm",
++ .owner = THIS_MODULE,
++ .ids = tpmback_ids,
++ .probe = tpmback_probe,
++ .remove = tpmback_remove,
++ .otherend_changed = frontend_changed,
++};
++
++
++void tpmif_xenbus_init(void)
++{
++ xenbus_register_backend(&tpmback);
++}
++
++void tpmif_xenbus_exit(void)
++{
++ xenbus_unregister_driver(&tpmback);
++}
+Index: head-2008-11-25/drivers/xen/util.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/util.c 2007-07-10 09:42:30.000000000 +0200
+@@ -0,0 +1,65 @@
++#include <linux/mm.h>
++#include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <asm/uaccess.h>
++#include <xen/driver_util.h>
++
++struct class *get_xen_class(void)
++{
++ static struct class *xen_class;
++
++ if (xen_class)
++ return xen_class;
++
++ xen_class = class_create(THIS_MODULE, "xen");
++ if (IS_ERR(xen_class)) {
++ printk("Failed to create xen sysfs class.\n");
++ xen_class = NULL;
++ }
++
++ return xen_class;
++}
++EXPORT_SYMBOL_GPL(get_xen_class);
++
++#ifdef CONFIG_X86
++static int f(pte_t *pte, struct page *pmd_page, unsigned long addr, void *data)
++{
++ /* apply_to_page_range() does all the hard work. */
++ return 0;
++}
++
++struct vm_struct *alloc_vm_area(unsigned long size)
++{
++ struct vm_struct *area;
++
++ area = get_vm_area(size, VM_IOREMAP);
++ if (area == NULL)
++ return NULL;
++
++ /*
++ * This ensures that page tables are constructed for this region
++ * of kernel virtual address space and mapped into init_mm.
++ */
++ if (apply_to_page_range(&init_mm, (unsigned long)area->addr,
++ area->size, f, NULL)) {
++ free_vm_area(area);
++ return NULL;
++ }
++
++ /* Map page directories into every address space. */
++ vmalloc_sync_all();
++
++ return area;
++}
++EXPORT_SYMBOL_GPL(alloc_vm_area);
++
++void free_vm_area(struct vm_struct *area)
++{
++ struct vm_struct *ret;
++ ret = remove_vm_area(area->addr);
++ BUG_ON(ret != area);
++ kfree(area);
++}
++EXPORT_SYMBOL_GPL(free_vm_area);
++#endif /* CONFIG_X86 */
+Index: head-2008-11-25/drivers/xen/xenbus/xenbus_backend_client.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/xenbus/xenbus_backend_client.c 2007-06-12 13:13:45.000000000 +0200
+@@ -0,0 +1,147 @@
++/******************************************************************************
++ * Backend-client-facing interface for the Xenbus driver. In other words, the
++ * interface between the Xenbus and the device-specific code in the backend
++ * driver.
++ *
++ * Copyright (C) 2005-2006 XenSource Ltd
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/err.h>
++#include <xen/gnttab.h>
++#include <xen/xenbus.h>
++#include <xen/driver_util.h>
++
++/* Based on Rusty Russell's skeleton driver's map_page */
++struct vm_struct *xenbus_map_ring_valloc(struct xenbus_device *dev, int gnt_ref)
++{
++ struct gnttab_map_grant_ref op;
++ struct vm_struct *area;
++
++ area = alloc_vm_area(PAGE_SIZE);
++ if (!area)
++ return ERR_PTR(-ENOMEM);
++
++ gnttab_set_map_op(&op, (unsigned long)area->addr, GNTMAP_host_map,
++ gnt_ref, dev->otherend_id);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status != GNTST_okay) {
++ free_vm_area(area);
++ xenbus_dev_fatal(dev, op.status,
++ "mapping in shared page %d from domain %d",
++ gnt_ref, dev->otherend_id);
++ BUG_ON(!IS_ERR(ERR_PTR(op.status)));
++ return ERR_PTR(op.status);
++ }
++
++ /* Stuff the handle in an unused field */
++ area->phys_addr = (unsigned long)op.handle;
++
++ return area;
++}
++EXPORT_SYMBOL_GPL(xenbus_map_ring_valloc);
++
++
++int xenbus_map_ring(struct xenbus_device *dev, int gnt_ref,
++ grant_handle_t *handle, void *vaddr)
++{
++ struct gnttab_map_grant_ref op;
++
++ gnttab_set_map_op(&op, (unsigned long)vaddr, GNTMAP_host_map,
++ gnt_ref, dev->otherend_id);
++ if (HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status != GNTST_okay) {
++ xenbus_dev_fatal(dev, op.status,
++ "mapping in shared page %d from domain %d",
++ gnt_ref, dev->otherend_id);
++ } else
++ *handle = op.handle;
++
++ return op.status;
++}
++EXPORT_SYMBOL_GPL(xenbus_map_ring);
++
++
++/* Based on Rusty Russell's skeleton driver's unmap_page */
++int xenbus_unmap_ring_vfree(struct xenbus_device *dev, struct vm_struct *area)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)area->addr, GNTMAP_host_map,
++ (grant_handle_t)area->phys_addr);
++
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status == GNTST_okay)
++ free_vm_area(area);
++ else
++ xenbus_dev_error(dev, op.status,
++ "unmapping page at handle %d error %d",
++ (int16_t)area->phys_addr, op.status);
++
++ return op.status;
++}
++EXPORT_SYMBOL_GPL(xenbus_unmap_ring_vfree);
++
++
++int xenbus_unmap_ring(struct xenbus_device *dev,
++ grant_handle_t handle, void *vaddr)
++{
++ struct gnttab_unmap_grant_ref op;
++
++ gnttab_set_unmap_op(&op, (unsigned long)vaddr, GNTMAP_host_map,
++ handle);
++ if (HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, &op, 1))
++ BUG();
++
++ if (op.status != GNTST_okay)
++ xenbus_dev_error(dev, op.status,
++ "unmapping page at handle %d error %d",
++ handle, op.status);
++
++ return op.status;
++}
++EXPORT_SYMBOL_GPL(xenbus_unmap_ring);
++
++int xenbus_dev_is_online(struct xenbus_device *dev)
++{
++ int rc, val;
++
++ rc = xenbus_scanf(XBT_NIL, dev->nodename, "online", "%d", &val);
++ if (rc != 1)
++ val = 0; /* no online node present */
++
++ return val;
++}
++EXPORT_SYMBOL_GPL(xenbus_dev_is_online);
++
++MODULE_LICENSE("Dual BSD/GPL");
+Index: head-2008-11-25/drivers/xen/xenbus/xenbus_dev.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/xenbus/xenbus_dev.c 2008-07-21 11:00:33.000000000 +0200
+@@ -0,0 +1,408 @@
++/*
++ * xenbus_dev.c
++ *
++ * Driver giving user-space access to the kernel's xenbus connection
++ * to xenstore.
++ *
++ * Copyright (c) 2005, Christian Limpach
++ * Copyright (c) 2005, Rusty Russell, IBM Corporation
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/errno.h>
++#include <linux/uio.h>
++#include <linux/notifier.h>
++#include <linux/wait.h>
++#include <linux/fs.h>
++#include <linux/poll.h>
++#include <linux/mutex.h>
++
++#include "xenbus_comms.h"
++
++#include <asm/uaccess.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/xen_proc.h>
++#include <asm/hypervisor.h>
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++struct xenbus_dev_transaction {
++ struct list_head list;
++ struct xenbus_transaction handle;
++};
++
++struct read_buffer {
++ struct list_head list;
++ unsigned int cons;
++ unsigned int len;
++ char msg[];
++};
++
++struct xenbus_dev_data {
++ /* In-progress transaction. */
++ struct list_head transactions;
++
++ /* Active watches. */
++ struct list_head watches;
++
++ /* Partial request. */
++ unsigned int len;
++ union {
++ struct xsd_sockmsg msg;
++ char buffer[PAGE_SIZE];
++ } u;
++
++ /* Response queue. */
++ struct list_head read_buffers;
++ wait_queue_head_t read_waitq;
++
++ struct mutex reply_mutex;
++};
++
++static struct proc_dir_entry *xenbus_dev_intf;
++
++static ssize_t xenbus_dev_read(struct file *filp,
++ char __user *ubuf,
++ size_t len, loff_t *ppos)
++{
++ struct xenbus_dev_data *u = filp->private_data;
++ struct read_buffer *rb;
++ int i, ret;
++
++ mutex_lock(&u->reply_mutex);
++ while (list_empty(&u->read_buffers)) {
++ mutex_unlock(&u->reply_mutex);
++ ret = wait_event_interruptible(u->read_waitq,
++ !list_empty(&u->read_buffers));
++ if (ret)
++ return ret;
++ mutex_lock(&u->reply_mutex);
++ }
++
++ rb = list_entry(u->read_buffers.next, struct read_buffer, list);
++ for (i = 0; i < len;) {
++ put_user(rb->msg[rb->cons], ubuf + i);
++ i++;
++ rb->cons++;
++ if (rb->cons == rb->len) {
++ list_del(&rb->list);
++ kfree(rb);
++ if (list_empty(&u->read_buffers))
++ break;
++ rb = list_entry(u->read_buffers.next,
++ struct read_buffer, list);
++ }
++ }
++ mutex_unlock(&u->reply_mutex);
++
++ return i;
++}
++
++static void queue_reply(struct xenbus_dev_data *u,
++ char *data, unsigned int len)
++{
++ struct read_buffer *rb;
++
++ if (len == 0)
++ return;
++
++ rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
++ BUG_ON(rb == NULL);
++
++ rb->cons = 0;
++ rb->len = len;
++
++ memcpy(rb->msg, data, len);
++
++ list_add_tail(&rb->list, &u->read_buffers);
++
++ wake_up(&u->read_waitq);
++}
++
++struct watch_adapter
++{
++ struct list_head list;
++ struct xenbus_watch watch;
++ struct xenbus_dev_data *dev_data;
++ char *token;
++};
++
++static void free_watch_adapter (struct watch_adapter *watch)
++{
++ kfree(watch->watch.node);
++ kfree(watch->token);
++ kfree(watch);
++}
++
++static void watch_fired(struct xenbus_watch *watch,
++ const char **vec,
++ unsigned int len)
++{
++ struct watch_adapter *adap =
++ container_of(watch, struct watch_adapter, watch);
++ struct xsd_sockmsg hdr;
++ const char *path, *token;
++ int path_len, tok_len, body_len, data_len = 0;
++
++ path = vec[XS_WATCH_PATH];
++ token = adap->token;
++
++ path_len = strlen(path) + 1;
++ tok_len = strlen(token) + 1;
++ if (len > 2)
++ data_len = vec[len] - vec[2] + 1;
++ body_len = path_len + tok_len + data_len;
++
++ hdr.type = XS_WATCH_EVENT;
++ hdr.len = body_len;
++
++ mutex_lock(&adap->dev_data->reply_mutex);
++ queue_reply(adap->dev_data, (char *)&hdr, sizeof(hdr));
++ queue_reply(adap->dev_data, (char *)path, path_len);
++ queue_reply(adap->dev_data, (char *)token, tok_len);
++ if (len > 2)
++ queue_reply(adap->dev_data, (char *)vec[2], data_len);
++ mutex_unlock(&adap->dev_data->reply_mutex);
++}
++
++static LIST_HEAD(watch_list);
++
++static ssize_t xenbus_dev_write(struct file *filp,
++ const char __user *ubuf,
++ size_t len, loff_t *ppos)
++{
++ struct xenbus_dev_data *u = filp->private_data;
++ struct xenbus_dev_transaction *trans = NULL;
++ uint32_t msg_type;
++ void *reply;
++ char *path, *token;
++ struct watch_adapter *watch, *tmp_watch;
++ int err, rc = len;
++
++ if ((len + u->len) > sizeof(u->u.buffer)) {
++ rc = -EINVAL;
++ goto out;
++ }
++
++ if (copy_from_user(u->u.buffer + u->len, ubuf, len) != 0) {
++ rc = -EFAULT;
++ goto out;
++ }
++
++ u->len += len;
++ if ((u->len < sizeof(u->u.msg)) ||
++ (u->len < (sizeof(u->u.msg) + u->u.msg.len)))
++ return rc;
++
++ msg_type = u->u.msg.type;
++
++ switch (msg_type) {
++ case XS_TRANSACTION_START:
++ case XS_TRANSACTION_END:
++ case XS_DIRECTORY:
++ case XS_READ:
++ case XS_GET_PERMS:
++ case XS_RELEASE:
++ case XS_GET_DOMAIN_PATH:
++ case XS_WRITE:
++ case XS_MKDIR:
++ case XS_RM:
++ case XS_SET_PERMS:
++ if (msg_type == XS_TRANSACTION_START) {
++ trans = kmalloc(sizeof(*trans), GFP_KERNEL);
++ if (!trans) {
++ rc = -ENOMEM;
++ goto out;
++ }
++ }
++
++ reply = xenbus_dev_request_and_reply(&u->u.msg);
++ if (IS_ERR(reply)) {
++ kfree(trans);
++ rc = PTR_ERR(reply);
++ goto out;
++ }
++
++ if (msg_type == XS_TRANSACTION_START) {
++ trans->handle.id = simple_strtoul(reply, NULL, 0);
++ list_add(&trans->list, &u->transactions);
++ } else if (msg_type == XS_TRANSACTION_END) {
++ list_for_each_entry(trans, &u->transactions, list)
++ if (trans->handle.id == u->u.msg.tx_id)
++ break;
++ BUG_ON(&trans->list == &u->transactions);
++ list_del(&trans->list);
++ kfree(trans);
++ }
++ mutex_lock(&u->reply_mutex);
++ queue_reply(u, (char *)&u->u.msg, sizeof(u->u.msg));
++ queue_reply(u, (char *)reply, u->u.msg.len);
++ mutex_unlock(&u->reply_mutex);
++ kfree(reply);
++ break;
++
++ case XS_WATCH:
++ case XS_UNWATCH: {
++ static const char *XS_RESP = "OK";
++ struct xsd_sockmsg hdr;
++
++ path = u->u.buffer + sizeof(u->u.msg);
++ token = memchr(path, 0, u->u.msg.len);
++ if (token == NULL) {
++ rc = -EILSEQ;
++ goto out;
++ }
++ token++;
++
++ if (msg_type == XS_WATCH) {
++ watch = kzalloc(sizeof(*watch), GFP_KERNEL);
++ watch->watch.node = kmalloc(strlen(path)+1,
++ GFP_KERNEL);
++ strcpy((char *)watch->watch.node, path);
++ watch->watch.callback = watch_fired;
++ watch->token = kmalloc(strlen(token)+1, GFP_KERNEL);
++ strcpy(watch->token, token);
++ watch->dev_data = u;
++
++ err = register_xenbus_watch(&watch->watch);
++ if (err) {
++ free_watch_adapter(watch);
++ rc = err;
++ goto out;
++ }
++
++ list_add(&watch->list, &u->watches);
++ } else {
++ list_for_each_entry_safe(watch, tmp_watch,
++ &u->watches, list) {
++ if (!strcmp(watch->token, token) &&
++ !strcmp(watch->watch.node, path))
++ {
++ unregister_xenbus_watch(&watch->watch);
++ list_del(&watch->list);
++ free_watch_adapter(watch);
++ break;
++ }
++ }
++ }
++
++ hdr.type = msg_type;
++ hdr.len = strlen(XS_RESP) + 1;
++ mutex_lock(&u->reply_mutex);
++ queue_reply(u, (char *)&hdr, sizeof(hdr));
++ queue_reply(u, (char *)XS_RESP, hdr.len);
++ mutex_unlock(&u->reply_mutex);
++ break;
++ }
++
++ default:
++ rc = -EINVAL;
++ break;
++ }
++
++ out:
++ u->len = 0;
++ return rc;
++}
++
++static int xenbus_dev_open(struct inode *inode, struct file *filp)
++{
++ struct xenbus_dev_data *u;
++
++ if (xen_store_evtchn == 0)
++ return -ENOENT;
++
++ nonseekable_open(inode, filp);
++
++ u = kzalloc(sizeof(*u), GFP_KERNEL);
++ if (u == NULL)
++ return -ENOMEM;
++
++ INIT_LIST_HEAD(&u->transactions);
++ INIT_LIST_HEAD(&u->watches);
++ INIT_LIST_HEAD(&u->read_buffers);
++ init_waitqueue_head(&u->read_waitq);
++
++ mutex_init(&u->reply_mutex);
++
++ filp->private_data = u;
++
++ return 0;
++}
++
++static int xenbus_dev_release(struct inode *inode, struct file *filp)
++{
++ struct xenbus_dev_data *u = filp->private_data;
++ struct xenbus_dev_transaction *trans, *tmp;
++ struct watch_adapter *watch, *tmp_watch;
++
++ list_for_each_entry_safe(trans, tmp, &u->transactions, list) {
++ xenbus_transaction_end(trans->handle, 1);
++ list_del(&trans->list);
++ kfree(trans);
++ }
++
++ list_for_each_entry_safe(watch, tmp_watch, &u->watches, list) {
++ unregister_xenbus_watch(&watch->watch);
++ list_del(&watch->list);
++ free_watch_adapter(watch);
++ }
++
++ kfree(u);
++
++ return 0;
++}
++
++static unsigned int xenbus_dev_poll(struct file *file, poll_table *wait)
++{
++ struct xenbus_dev_data *u = file->private_data;
++
++ poll_wait(file, &u->read_waitq, wait);
++ if (!list_empty(&u->read_buffers))
++ return POLLIN | POLLRDNORM;
++ return 0;
++}
++
++static const struct file_operations xenbus_dev_file_ops = {
++ .read = xenbus_dev_read,
++ .write = xenbus_dev_write,
++ .open = xenbus_dev_open,
++ .release = xenbus_dev_release,
++ .poll = xenbus_dev_poll,
++};
++
++int xenbus_dev_init(void)
++{
++ xenbus_dev_intf = create_xen_proc_entry("xenbus", 0400);
++ if (xenbus_dev_intf)
++ xenbus_dev_intf->proc_fops = &xenbus_dev_file_ops;
++
++ return 0;
++}
+Index: head-2008-11-25/drivers/xen/xenbus/xenbus_probe_backend.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/xenbus/xenbus_probe_backend.c 2008-01-21 11:15:26.000000000 +0100
+@@ -0,0 +1,292 @@
++/******************************************************************************
++ * Talks to Xen Store to figure out what devices we have (backend half).
++ *
++ * Copyright (C) 2005 Rusty Russell, IBM Corporation
++ * Copyright (C) 2005 Mike Wray, Hewlett-Packard
++ * Copyright (C) 2005, 2006 XenSource Ltd
++ * Copyright (C) 2007 Solarflare Communications, Inc.
++ *
++ * This program is free software; you can redistribute it and/or
++ * modify it under the terms of the GNU General Public License version 2
++ * as published by the Free Software Foundation; or, when distributed
++ * separately from the Linux kernel or incorporated into other
++ * software packages, subject to the following license:
++ *
++ * Permission is hereby granted, free of charge, to any person obtaining a copy
++ * of this source file (the "Software"), to deal in the Software without
++ * restriction, including without limitation the rights to use, copy, modify,
++ * merge, publish, distribute, sublicense, and/or sell copies of the Software,
++ * and to permit persons to whom the Software is furnished to do so, subject to
++ * the following conditions:
++ *
++ * The above copyright notice and this permission notice shall be included in
++ * all copies or substantial portions of the Software.
++ *
++ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
++ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
++ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
++ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
++ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
++ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
++ * IN THE SOFTWARE.
++ */
++
++#define DPRINTK(fmt, args...) \
++ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \
++ __FUNCTION__, __LINE__, ##args)
++
++#include <linux/kernel.h>
++#include <linux/err.h>
++#include <linux/string.h>
++#include <linux/ctype.h>
++#include <linux/fcntl.h>
++#include <linux/mm.h>
++#include <linux/notifier.h>
++
++#include <asm/io.h>
++#include <asm/page.h>
++#include <asm/maddr.h>
++#include <asm/pgtable.h>
++#include <asm/hypervisor.h>
++#include <xen/xenbus.h>
++#include <xen/xen_proc.h>
++#include <xen/evtchn.h>
++#include <xen/features.h>
++
++#include "xenbus_comms.h"
++#include "xenbus_probe.h"
++
++#ifdef HAVE_XEN_PLATFORM_COMPAT_H
++#include <xen/platform-compat.h>
++#endif
++
++static int xenbus_uevent_backend(struct device *dev, char **envp,
++ int num_envp, char *buffer, int buffer_size);
++static int xenbus_probe_backend(const char *type, const char *domid);
++
++extern int read_otherend_details(struct xenbus_device *xendev,
++ char *id_node, char *path_node);
++
++static int read_frontend_details(struct xenbus_device *xendev)
++{
++ return read_otherend_details(xendev, "frontend-id", "frontend");
++}
++
++/* backend/<type>/<fe-uuid>/<id> => <type>-<fe-domid>-<id> */
++static int backend_bus_id(char bus_id[BUS_ID_SIZE], const char *nodename)
++{
++ int domid, err;
++ const char *devid, *type, *frontend;
++ unsigned int typelen;
++
++ type = strchr(nodename, '/');
++ if (!type)
++ return -EINVAL;
++ type++;
++ typelen = strcspn(type, "/");
++ if (!typelen || type[typelen] != '/')
++ return -EINVAL;
++
++ devid = strrchr(nodename, '/') + 1;
++
++ err = xenbus_gather(XBT_NIL, nodename, "frontend-id", "%i", &domid,
++ "frontend", NULL, &frontend,
++ NULL);
++ if (err)
++ return err;
++ if (strlen(frontend) == 0)
++ err = -ERANGE;
++ if (!err && !xenbus_exists(XBT_NIL, frontend, ""))
++ err = -ENOENT;
++ kfree(frontend);
++
++ if (err)
++ return err;
++
++ if (snprintf(bus_id, BUS_ID_SIZE,
++ "%.*s-%i-%s", typelen, type, domid, devid) >= BUS_ID_SIZE)
++ return -ENOSPC;
++ return 0;
++}
++
++static struct xen_bus_type xenbus_backend = {
++ .root = "backend",
++ .levels = 3, /* backend/type/<frontend>/<id> */
++ .get_bus_id = backend_bus_id,
++ .probe = xenbus_probe_backend,
++ .error = -ENODEV,
++ .bus = {
++ .name = "xen-backend",
++ .match = xenbus_match,
++ .probe = xenbus_dev_probe,
++ .remove = xenbus_dev_remove,
++// .shutdown = xenbus_dev_shutdown,
++ .uevent = xenbus_uevent_backend,
++ },
++ .dev = {
++ .bus_id = "xen-backend",
++ },
++};
++
++static int xenbus_uevent_backend(struct device *dev, char **envp,
++ int num_envp, char *buffer, int buffer_size)
++{
++ struct xenbus_device *xdev;
++ struct xenbus_driver *drv;
++ int i = 0;
++ int length = 0;
++
++ DPRINTK("");
++
++ if (dev == NULL)
++ return -ENODEV;
++
++ xdev = to_xenbus_device(dev);
++ if (xdev == NULL)
++ return -ENODEV;
++
++ /* stuff we want to pass to /sbin/hotplug */
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "XENBUS_TYPE=%s", xdev->devicetype);
++
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "XENBUS_PATH=%s", xdev->nodename);
++
++ add_uevent_var(envp, num_envp, &i, buffer, buffer_size, &length,
++ "XENBUS_BASE_PATH=%s", xenbus_backend.root);
++
++ /* terminate, set to next free slot, shrink available space */
++ envp[i] = NULL;
++ envp = &envp[i];
++ num_envp -= i;
++ buffer = &buffer[length];
++ buffer_size -= length;
++
++ if (dev->driver) {
++ drv = to_xenbus_driver(dev->driver);
++ if (drv && drv->uevent)
++ return drv->uevent(xdev, envp, num_envp, buffer,
++ buffer_size);
++ }
++
++ return 0;
++}
++
++int xenbus_register_backend(struct xenbus_driver *drv)
++{
++ drv->read_otherend_details = read_frontend_details;
++
++ return xenbus_register_driver_common(drv, &xenbus_backend);
++}
++EXPORT_SYMBOL_GPL(xenbus_register_backend);
++
++/* backend/<typename>/<frontend-uuid>/<name> */
++static int xenbus_probe_backend_unit(const char *dir,
++ const char *type,
++ const char *name)
++{
++ char *nodename;
++ int err;
++
++ nodename = kasprintf(GFP_KERNEL, "%s/%s", dir, name);
++ if (!nodename)
++ return -ENOMEM;
++
++ DPRINTK("%s\n", nodename);
++
++ err = xenbus_probe_node(&xenbus_backend, type, nodename);
++ kfree(nodename);
++ return err;
++}
++
++/* backend/<typename>/<frontend-domid> */
++static int xenbus_probe_backend(const char *type, const char *domid)
++{
++ char *nodename;
++ int err = 0;
++ char **dir;
++ unsigned int i, dir_n = 0;
++
++ DPRINTK("");
++
++ nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", xenbus_backend.root, type, domid);
++ if (!nodename)
++ return -ENOMEM;
++
++ dir = xenbus_directory(XBT_NIL, nodename, "", &dir_n);
++ if (IS_ERR(dir)) {
++ kfree(nodename);
++ return PTR_ERR(dir);
++ }
++
++ for (i = 0; i < dir_n; i++) {
++ err = xenbus_probe_backend_unit(nodename, type, dir[i]);
++ if (err)
++ break;
++ }
++ kfree(dir);
++ kfree(nodename);
++ return err;
++}
++
++static void backend_changed(struct xenbus_watch *watch,
++ const char **vec, unsigned int len)
++{
++ DPRINTK("");
++
++ dev_changed(vec[XS_WATCH_PATH], &xenbus_backend);
++}
++
++static struct xenbus_watch be_watch = {
++ .node = "backend",
++ .callback = backend_changed,
++};
++
++void xenbus_backend_suspend(int (*fn)(struct device *, void *))
++{
++ DPRINTK("");
++ if (!xenbus_backend.error)
++ bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn);
++}
++
++void xenbus_backend_resume(int (*fn)(struct device *, void *))
++{
++ DPRINTK("");
++ if (!xenbus_backend.error)
++ bus_for_each_dev(&xenbus_backend.bus, NULL, NULL, fn);
++}
++
++void xenbus_backend_probe_and_watch(void)
++{
++ xenbus_probe_devices(&xenbus_backend);
++ register_xenbus_watch(&be_watch);
++}
++
++void xenbus_backend_bus_register(void)
++{
++ xenbus_backend.error = bus_register(&xenbus_backend.bus);
++ if (xenbus_backend.error)
++ printk(KERN_WARNING
++ "XENBUS: Error registering backend bus: %i\n",
++ xenbus_backend.error);
++}
++
++void xenbus_backend_device_register(void)
++{
++ if (xenbus_backend.error)
++ return;
++
++ xenbus_backend.error = device_register(&xenbus_backend.dev);
++ if (xenbus_backend.error) {
++ bus_unregister(&xenbus_backend.bus);
++ printk(KERN_WARNING
++ "XENBUS: Error registering backend device: %i\n",
++ xenbus_backend.error);
++ }
++}
++
++int xenbus_for_each_backend(void *arg, int (*fn)(struct device *, void *))
++{
++ return bus_for_each_dev(&xenbus_backend.bus, NULL, arg, fn);
++}
++EXPORT_SYMBOL_GPL(xenbus_for_each_backend);
+Index: head-2008-11-25/drivers/xen/xenoprof/xenoprofile.c
+===================================================================
+--- /dev/null 1970-01-01 00:00:00.000000000 +0000
++++ head-2008-11-25/drivers/xen/xenoprof/xenoprofile.c 2008-09-15 13:40:15.000000000 +0200
+@@ -0,0 +1,545 @@
++/**
++ * @file xenoprofile.c
++ *
++ * @remark Copyright 2002 OProfile authors
++ * @remark Read the file COPYING
++ *
++ * @author John Levon <levon@movementarian.org>
++ *
++ * Modified by Aravind Menon and Jose Renato Santos for Xen
++ * These modifications are:
++ * Copyright (C) 2005 Hewlett-Packard Co.
++ *
++ * Separated out arch-generic part
++ * Copyright (c) 2006 Isaku Yamahata <yamahata at valinux co jp>
++ * VA Linux Systems Japan K.K.
++ */
++
++#include <linux/init.h>
++#include <linux/notifier.h>
++#include <linux/smp.h>
++#include <linux/oprofile.h>
++#include <linux/sysdev.h>
++#include <linux/slab.h>
++#include <linux/interrupt.h>
++#include <linux/vmalloc.h>
++#include <asm/pgtable.h>
++#include <xen/evtchn.h>
++#include <xen/xenoprof.h>
++#include <xen/driver_util.h>
++#include <xen/interface/xen.h>
++#include <xen/interface/xenoprof.h>
++#include "../../../drivers/oprofile/cpu_buffer.h"
++#include "../../../drivers/oprofile/event_buffer.h"
++
++#define MAX_XENOPROF_SAMPLES 16
++
++/* sample buffers shared with Xen */
++static xenoprof_buf_t *xenoprof_buf[MAX_VIRT_CPUS];
++/* Shared buffer area */
++static struct xenoprof_shared_buffer shared_buffer;
++
++/* Passive sample buffers shared with Xen */
++static xenoprof_buf_t *p_xenoprof_buf[MAX_OPROF_DOMAINS][MAX_VIRT_CPUS];
++/* Passive shared buffer area */
++static struct xenoprof_shared_buffer p_shared_buffer[MAX_OPROF_DOMAINS];
++
++static int xenoprof_start(void);
++static void xenoprof_stop(void);
++
++static int xenoprof_enabled = 0;
++static int xenoprof_is_primary = 0;
++static int active_defined;
++
++extern unsigned long backtrace_depth;
++
++/* Number of buffers in shared area (one per VCPU) */
++static int nbuf;
++/* Mappings of VIRQ_XENOPROF to irq number (per cpu) */
++static int ovf_irq[NR_CPUS];
++/* cpu model type string - copied from Xen on XENOPROF_init command */
++static char cpu_type[XENOPROF_CPU_TYPE_SIZE];
++
++#ifdef CONFIG_PM
++
++static int xenoprof_suspend(struct sys_device * dev, pm_message_t state)
++{
++ if (xenoprof_enabled == 1)
++ xenoprof_stop();
++ return 0;
++}
++
++
++static int xenoprof_resume(struct sys_device * dev)
++{
++ if (xenoprof_enabled == 1)
++ xenoprof_start();
++ return 0;
++}
++
++
++static struct sysdev_class oprofile_sysclass = {
++ set_kset_name("oprofile"),
++ .resume = xenoprof_resume,
++ .suspend = xenoprof_suspend
++};
++
++
++static struct sys_device device_oprofile = {
++ .id = 0,
++ .cls = &oprofile_sysclass,
++};
++
++
++static int __init init_driverfs(void)
++{
++ int error;
++ if (!(error = sysdev_class_register(&oprofile_sysclass)))
++ error = sysdev_register(&device_oprofile);
++ return error;
++}
++
++
++static void exit_driverfs(void)
++{
++ sysdev_unregister(&device_oprofile);
++ sysdev_class_unregister(&oprofile_sysclass);
++}
++
++#else
++#define init_driverfs() do { } while (0)
++#define exit_driverfs() do { } while (0)
++#endif /* CONFIG_PM */
++
++static unsigned long long oprofile_samples;
++static unsigned long long p_oprofile_samples;
++
++static unsigned int pdomains;
++static struct xenoprof_passive passive_domains[MAX_OPROF_DOMAINS];
++
++/* Check whether the given entry is an escape code */
++static int xenoprof_is_escape(xenoprof_buf_t * buf, int tail)
++{
++ return (buf->event_log[tail].eip == XENOPROF_ESCAPE_CODE);
++}
++
++/* Get the event at the given entry */
++static uint8_t xenoprof_get_event(xenoprof_buf_t * buf, int tail)
++{
++ return (buf->event_log[tail].event);
++}
++
++static void xenoprof_add_pc(xenoprof_buf_t *buf, int is_passive)
++{
++ int head, tail, size;
++ int tracing = 0;
++
++ head = buf->event_head;
++ tail = buf->event_tail;
++ size = buf->event_size;
++
++ while (tail != head) {
++ if (xenoprof_is_escape(buf, tail) &&
++ xenoprof_get_event(buf, tail) == XENOPROF_TRACE_BEGIN) {
++ tracing=1;
++ oprofile_add_pc(ESCAPE_CODE, buf->event_log[tail].mode,
++ CPU_TRACE_BEGIN);
++ if (!is_passive)
++ oprofile_samples++;
++ else
++ p_oprofile_samples++;
++
++ } else {
++ oprofile_add_pc(buf->event_log[tail].eip,
++ buf->event_log[tail].mode,
++ buf->event_log[tail].event);
++ if (!tracing) {
++ if (!is_passive)
++ oprofile_samples++;
++ else
++ p_oprofile_samples++;
++ }
++
++ }
++ tail++;
++ if(tail==size)
++ tail=0;
++ }
++ buf->event_tail = tail;
++}
++
++static void xenoprof_handle_passive(void)
++{
++ int i, j;
++ int flag_domain, flag_switch = 0;
++
++ for (i = 0; i < pdomains; i++) {
++ flag_domain = 0;
++ for (j = 0; j < passive_domains[i].nbuf; j++) {
++ xenoprof_buf_t *buf = p_xenoprof_buf[i][j];
++ if (buf->event_head == buf->event_tail)
++ continue;
++ if (!flag_domain) {
++ if (!oprofile_add_domain_switch(
++ passive_domains[i].domain_id))
++ goto done;
++ flag_domain = 1;
++ }
++ xenoprof_add_pc(buf, 1);
++ flag_switch = 1;
++ }
++ }
++done:
++ if (flag_switch)
++ oprofile_add_domain_switch(COORDINATOR_DOMAIN);
++}
++
++static irqreturn_t
++xenoprof_ovf_interrupt(int irq, void * dev_id, struct pt_regs * regs)
++{
++ struct xenoprof_buf * buf;
++ static unsigned long flag;
++
++ buf = xenoprof_buf[smp_processor_id()];
++
++ xenoprof_add_pc(buf, 0);
++
++ if (xenoprof_is_primary && !test_and_set_bit(0, &flag)) {
++ xenoprof_handle_passive();
++ smp_mb__before_clear_bit();
++ clear_bit(0, &flag);
++ }
++
++ return IRQ_HANDLED;
++}
++
++
++static void unbind_virq(void)
++{
++ unsigned int i;
++
++ for_each_online_cpu(i) {
++ if (ovf_irq[i] >= 0) {
++ unbind_from_irqhandler(ovf_irq[i], NULL);
++ ovf_irq[i] = -1;
++ }
++ }
++}
++
++
++static int bind_virq(void)
++{
++ unsigned int i;
++ int result;
++
++ for_each_online_cpu(i) {
++ result = bind_virq_to_irqhandler(VIRQ_XENOPROF,
++ i,
++ xenoprof_ovf_interrupt,
++ SA_INTERRUPT,
++ "xenoprof",
++ NULL);
++
++ if (result < 0) {
++ unbind_virq();
++ return result;
++ }
++
++ ovf_irq[i] = result;
++ }
++
++ return 0;
++}
++
++
++static void unmap_passive_list(void)
++{
++ int i;
++ for (i = 0; i < pdomains; i++)
++ xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
++ pdomains = 0;
++}
++
++
++static int map_xenoprof_buffer(int max_samples)
++{
++ struct xenoprof_get_buffer get_buffer;
++ struct xenoprof_buf *buf;
++ int ret, i;
++
++ if ( shared_buffer.buffer )
++ return 0;
++
++ get_buffer.max_samples = max_samples;
++ ret = xenoprof_arch_map_shared_buffer(&get_buffer, &shared_buffer);
++ if (ret)
++ return ret;
++ nbuf = get_buffer.nbuf;
++
++ for (i=0; i< nbuf; i++) {
++ buf = (struct xenoprof_buf*)
++ &shared_buffer.buffer[i * get_buffer.bufsize];
++ BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
++ xenoprof_buf[buf->vcpu_id] = buf;
++ }
++
++ return 0;
++}
++
++
++static int xenoprof_setup(void)
++{
++ int ret;
++
++ if ( (ret = map_xenoprof_buffer(MAX_XENOPROF_SAMPLES)) )
++ return ret;
++
++ if ( (ret = bind_virq()) )
++ return ret;
++
++ if (xenoprof_is_primary) {
++ /* Define dom0 as an active domain if not done yet */
++ if (!active_defined) {
++ domid_t domid;
++ ret = HYPERVISOR_xenoprof_op(
++ XENOPROF_reset_active_list, NULL);
++ if (ret)
++ goto err;
++ domid = 0;
++ ret = HYPERVISOR_xenoprof_op(
++ XENOPROF_set_active, &domid);
++ if (ret)
++ goto err;
++ active_defined = 1;
++ }
++
++ if (backtrace_depth > 0) {
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_backtrace,
++ &backtrace_depth);
++ if (ret)
++ backtrace_depth = 0;
++ }
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_reserve_counters, NULL);
++ if (ret)
++ goto err;
++
++ xenoprof_arch_counter();
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_setup_events, NULL);
++ if (ret)
++ goto err;
++ }
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_enable_virq, NULL);
++ if (ret)
++ goto err;
++
++ xenoprof_enabled = 1;
++ return 0;
++ err:
++ unbind_virq();
++ return ret;
++}
++
++
++static void xenoprof_shutdown(void)
++{
++ xenoprof_enabled = 0;
++
++ WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_disable_virq, NULL));
++
++ if (xenoprof_is_primary) {
++ WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_release_counters,
++ NULL));
++ active_defined = 0;
++ }
++
++ unbind_virq();
++
++ xenoprof_arch_unmap_shared_buffer(&shared_buffer);
++ if (xenoprof_is_primary)
++ unmap_passive_list();
++}
++
++
++static int xenoprof_start(void)
++{
++ int ret = 0;
++
++ if (xenoprof_is_primary)
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_start, NULL);
++ if (!ret)
++ xenoprof_arch_start();
++ return ret;
++}
++
++
++static void xenoprof_stop(void)
++{
++ if (xenoprof_is_primary)
++ WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_stop, NULL));
++ xenoprof_arch_stop();
++}
++
++
++static int xenoprof_set_active(int * active_domains,
++ unsigned int adomains)
++{
++ int ret = 0;
++ int i;
++ int set_dom0 = 0;
++ domid_t domid;
++
++ if (!xenoprof_is_primary)
++ return 0;
++
++ if (adomains > MAX_OPROF_DOMAINS)
++ return -E2BIG;
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list, NULL);
++ if (ret)
++ return ret;
++
++ for (i=0; i<adomains; i++) {
++ domid = active_domains[i];
++ if (domid != active_domains[i]) {
++ ret = -EINVAL;
++ goto out;
++ }
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
++ if (ret)
++ goto out;
++ if (active_domains[i] == 0)
++ set_dom0 = 1;
++ }
++ /* dom0 must always be active but may not be in the list */
++ if (!set_dom0) {
++ domid = 0;
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_set_active, &domid);
++ }
++
++out:
++ if (ret)
++ WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_reset_active_list,
++ NULL));
++ active_defined = !ret;
++ return ret;
++}
++
++static int xenoprof_set_passive(int * p_domains,
++ unsigned int pdoms)
++{
++ int ret;
++ unsigned int i, j;
++ struct xenoprof_buf *buf;
++
++ if (!xenoprof_is_primary)
++ return 0;
++
++ if (pdoms > MAX_OPROF_DOMAINS)
++ return -E2BIG;
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_reset_passive_list, NULL);
++ if (ret)
++ return ret;
++ unmap_passive_list();
++
++ for (i = 0; i < pdoms; i++) {
++ passive_domains[i].domain_id = p_domains[i];
++ passive_domains[i].max_samples = 2048;
++ ret = xenoprof_arch_set_passive(&passive_domains[i],
++ &p_shared_buffer[i]);
++ if (ret)
++ goto out;
++ for (j = 0; j < passive_domains[i].nbuf; j++) {
++ buf = (struct xenoprof_buf *)
++ &p_shared_buffer[i].buffer[
++ j * passive_domains[i].bufsize];
++ BUG_ON(buf->vcpu_id >= MAX_VIRT_CPUS);
++ p_xenoprof_buf[i][buf->vcpu_id] = buf;
++ }
++ }
++
++ pdomains = pdoms;
++ return 0;
++
++out:
++ for (j = 0; j < i; j++)
++ xenoprof_arch_unmap_shared_buffer(&p_shared_buffer[i]);
++
++ return ret;
++}
++
++
++/* The dummy backtrace function to keep oprofile happy
++ * The real backtrace is done in xen
++ */
++static void xenoprof_dummy_backtrace(struct pt_regs * const regs,
++ unsigned int depth)
++{
++ /* this should never be called */
++ BUG();
++ return;
++}
++
++
++static struct oprofile_operations xenoprof_ops = {
++#ifdef HAVE_XENOPROF_CREATE_FILES
++ .create_files = xenoprof_create_files,
++#endif
++ .set_active = xenoprof_set_active,
++ .set_passive = xenoprof_set_passive,
++ .setup = xenoprof_setup,
++ .shutdown = xenoprof_shutdown,
++ .start = xenoprof_start,
++ .stop = xenoprof_stop,
++ .backtrace = xenoprof_dummy_backtrace
++};
++
++
++/* in order to get driverfs right */
++static int using_xenoprof;
++
++int __init xenoprofile_init(struct oprofile_operations * ops)
++{
++ struct xenoprof_init init;
++ unsigned int i;
++ int ret;
++
++ ret = HYPERVISOR_xenoprof_op(XENOPROF_init, &init);
++ if (!ret) {
++ xenoprof_arch_init_counter(&init);
++ xenoprof_is_primary = init.is_primary;
++
++ /* cpu_type is detected by Xen */
++ cpu_type[XENOPROF_CPU_TYPE_SIZE-1] = 0;
++ strncpy(cpu_type, init.cpu_type, XENOPROF_CPU_TYPE_SIZE - 1);
++ xenoprof_ops.cpu_type = cpu_type;
++
++ init_driverfs();
++ using_xenoprof = 1;
++ *ops = xenoprof_ops;
++
++ for (i=0; i<NR_CPUS; i++)
++ ovf_irq[i] = -1;
++
++ active_defined = 0;
++ }
++
++ printk(KERN_INFO "%s: ret %d, events %d, xenoprof_is_primary %d\n",
++ __func__, ret, init.num_events, xenoprof_is_primary);
++ return ret;
++}
++
++
++void xenoprofile_exit(void)
++{
++ if (using_xenoprof)
++ exit_driverfs();
++
++ xenoprof_arch_unmap_shared_buffer(&shared_buffer);
++ if (xenoprof_is_primary) {
++ unmap_passive_list();
++ WARN_ON(HYPERVISOR_xenoprof_op(XENOPROF_shutdown, NULL));
++ }
++}
--- /dev/null
+Subject: xen3 xen-kconfig
+From: http://xenbits.xensource.com/linux-2.6.18-xen.hg (tip 728:832aac894efd)
+Patch-mainline: obsolete
+Acked-by: jbeulich@novell.com
+
+--- sle11-2009-06-04.orig/arch/x86/Kconfig 2009-06-04 10:17:50.000000000 +0200
++++ sle11-2009-06-04/arch/x86/Kconfig 2009-06-04 10:18:21.000000000 +0200
+@@ -44,6 +44,7 @@ config GENERIC_LOCKBREAK
+
+ config GENERIC_TIME
+ def_bool y
++ depends on !X86_XEN
+
+ config GENERIC_CMOS_UPDATE
+ def_bool y
+@@ -191,7 +192,7 @@ config X86_64_SMP
+
+ config X86_HT
+ bool
+- depends on SMP
++ depends on SMP && !XEN
+ depends on (X86_32 && !X86_VOYAGER) || X86_64
+ default y
+
+@@ -203,6 +204,17 @@ config X86_BIOS_REBOOT
+ config X86_TRAMPOLINE
+ bool
+ depends on X86_SMP || (X86_VOYAGER && SMP) || (64BIT && ACPI_SLEEP)
++ depends on !XEN
++ default y
++
++config X86_NO_TSS
++ bool
++ depends on X86_XEN || X86_64_XEN
++ default y
++
++config X86_NO_IDT
++ bool
++ depends on X86_XEN || X86_64_XEN
+ default y
+
+ config KTIME_SCALAR
+@@ -271,6 +283,17 @@ config X86_PC
+ help
+ Choose this option if your computer is a standard PC or compatible.
+
++config X86_XEN
++ bool "Xen-compatible"
++ select XEN
++ select X86_PAE
++ select X86_UP_APIC if !SMP && XEN_PRIVILEGED_GUEST
++ select X86_UP_IOAPIC if !SMP && XEN_PRIVILEGED_GUEST
++ select SWIOTLB
++ help
++ Choose this option if you plan to run this kernel on top of the
++ Xen Hypervisor.
++
+ config X86_ELAN
+ bool "AMD Elan"
+ depends on X86_32
+@@ -338,6 +361,13 @@ config X86_BIGSMP
+
+ endif
+
++config X86_64_XEN
++ bool "Enable Xen compatible kernel"
++ select XEN
++ select SWIOTLB
++ help
++ This option will compile a kernel compatible with Xen hypervisor
++
+ config X86_VSMP
+ bool "Support for ScaleMP vSMP"
+ select PARAVIRT
+@@ -478,6 +508,7 @@ source "arch/x86/Kconfig.cpu"
+ config HPET_TIMER
+ def_bool X86_64
+ prompt "HPET Timer Support" if X86_32
++ depends on !X86_XEN && !X86_64_XEN
+ help
+ Use the IA-PC HPET (High Precision Event Timer) to manage
+ time in preference to the PIT and RTC, if a HPET is
+@@ -514,7 +545,7 @@ config GART_IOMMU
+ default y
+ select SWIOTLB
+ select AGP
+- depends on X86_64 && PCI
++ depends on X86_64 && PCI && !X86_64_XEN
+ help
+ Support for full DMA access of devices with 32bit memory access only
+ on systems with more than 3GB. This is usually needed for USB,
+@@ -529,7 +560,7 @@ config GART_IOMMU
+ config CALGARY_IOMMU
+ bool "IBM Calgary IOMMU support"
+ select SWIOTLB
+- depends on X86_64 && PCI && EXPERIMENTAL
++ depends on X86_64 && PCI && !X86_64_XEN && EXPERIMENTAL
+ help
+ Support for hardware IOMMUs in IBM's xSeries x366 and x460
+ systems. Needed to run systems with more than 3GB of memory
+@@ -597,6 +628,7 @@ config NR_CPUS
+ depends on SMP
+ default "4096" if MAXSMP
+ default "32" if X86_NUMAQ || X86_SUMMIT || X86_BIGSMP || X86_ES7000
++ default "16" if X86_64_XEN
+ default "8"
+ help
+ This allows you to specify the maximum number of CPUs which this
+@@ -652,7 +684,7 @@ config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
+
+ config X86_UP_APIC
+ bool "Local APIC support on uniprocessors"
+- depends on X86_32 && !SMP && !(X86_VOYAGER || X86_GENERICARCH)
++ depends on X86_32 && !SMP && !(X86_VOYAGER || X86_GENERICARCH || XEN_UNPRIVILEGED_GUEST)
+ help
+ A local APIC (Advanced Programmable Interrupt Controller) is an
+ integrated interrupt controller in the CPU. If you have a single-CPU
+@@ -678,18 +710,25 @@ config X86_UP_IOAPIC
+ config X86_LOCAL_APIC
+ def_bool y
+ depends on X86_64 || (X86_32 && (X86_UP_APIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH))
++ depends on !XEN_UNPRIVILEGED_GUEST
+
+ config X86_IO_APIC
+ def_bool y
+ depends on X86_64 || (X86_32 && (X86_UP_IOAPIC || (SMP && !X86_VOYAGER) || X86_GENERICARCH))
++ depends on !XEN_UNPRIVILEGED_GUEST
+
+ config X86_VISWS_APIC
+ def_bool y
+ depends on X86_32 && X86_VISWS
+
++config X86_XEN_GENAPIC
++ bool
++ depends on X86_64_XEN
++ default y
++
+ config X86_MCE
+ bool "Machine Check Exception"
+- depends on !X86_VOYAGER
++ depends on !(X86_VOYAGER || XEN)
+ ---help---
+ Machine Check Exception support allows the processor to notify the
+ kernel if it detects a problem (e.g. overheating, component failure).
+@@ -789,7 +828,7 @@ config I8K
+ config X86_REBOOTFIXUPS
+ def_bool n
+ prompt "Enable X86 board specific fixups for reboot"
+- depends on X86_32 && X86
++ depends on X86_32 && !X86_XEN
+ ---help---
+ This enables chipset and/or board specific fixups to be done
+ in order to get reboot to work correctly. This is only needed on
+@@ -806,6 +845,7 @@ config X86_REBOOTFIXUPS
+
+ config MICROCODE
+ tristate "/dev/cpu/microcode - Intel IA32 CPU microcode support"
++ depends on !XEN_UNPRIVILEGED_GUEST
+ select FW_LOADER
+ ---help---
+ If you say Y here, you will be able to update the microcode on
+@@ -973,7 +1013,7 @@ config DIRECT_GBPAGES
+ # Common NUMA Features
+ config NUMA
+ bool "Numa Memory Allocation and Scheduler Support (EXPERIMENTAL)"
+- depends on SMP
++ depends on SMP && !XEN
+ depends on X86_64 || (X86_32 && HIGHMEM64G && (X86_NUMAQ || X86_BIGSMP || X86_SUMMIT && ACPI) && EXPERIMENTAL)
+ default n if X86_PC
+ default y if (X86_NUMAQ || X86_SUMMIT || X86_BIGSMP)
+@@ -1075,7 +1115,7 @@ config ARCH_SPARSEMEM_DEFAULT
+
+ config ARCH_SPARSEMEM_ENABLE
+ def_bool y
+- depends on X86_64 || NUMA || (EXPERIMENTAL && X86_PC)
++ depends on (X86_64 && !X86_64_XEN) || NUMA || (EXPERIMENTAL && X86_PC)
+ select SPARSEMEM_STATIC if X86_32
+ select SPARSEMEM_VMEMMAP_ENABLE if X86_64
+
+@@ -1121,6 +1161,7 @@ config X86_RESERVE_LOW_64K
+ config MATH_EMULATION
+ bool
+ prompt "Math emulation" if X86_32
++ depends on !X86_XEN
+ ---help---
+ Linux can emulate a math coprocessor (used for floating point
+ operations) if you don't have one. 486DX and Pentium processors have
+@@ -1146,6 +1187,7 @@ config MATH_EMULATION
+
+ config MTRR
+ bool "MTRR (Memory Type Range Register) support"
++ depends on !XEN_UNPRIVILEGED_GUEST
+ ---help---
+ On Intel P6 family processors (Pentium Pro, Pentium II and later)
+ the Memory Type Range Registers (MTRRs) may be used to control
+@@ -1227,7 +1269,7 @@ config X86_PAT
+ config EFI
+ def_bool n
+ prompt "EFI runtime service support"
+- depends on ACPI
++ depends on ACPI && !X86_XEN && !X86_64_XEN
+ ---help---
+ This enables the kernel to use EFI runtime services that are
+ available (such as the EFI variable services).
+@@ -1242,7 +1284,7 @@ config EFI
+ config IRQBALANCE
+ def_bool y
+ prompt "Enable kernel irq balancing"
+- depends on X86_32 && SMP && X86_IO_APIC
++ depends on X86_32 && SMP && X86_IO_APIC && !X86_XEN
+ help
+ The default yes will allow the kernel to do irq load balancing.
+ Saying no will keep the kernel from doing irq load balancing.
+@@ -1305,6 +1347,7 @@ source kernel/Kconfig.hz
+ config KEXEC
+ bool "kexec system call"
+ depends on X86_BIOS_REBOOT
++ depends on !XEN_UNPRIVILEGED_GUEST
+ help
+ kexec is a system call that implements the ability to shutdown your
+ current kernel, and to start another kernel. It is like a reboot
+@@ -1322,6 +1365,7 @@ config KEXEC
+ config CRASH_DUMP
+ bool "kernel crash dumps"
+ depends on X86_64 || (X86_32 && HIGHMEM)
++ depends on !XEN
+ help
+ Generate crash dump after being started by kexec.
+ This should be normally only set in special crash dump kernels
+@@ -1440,6 +1484,7 @@ config COMPAT_VDSO
+ def_bool y
+ prompt "Compat VDSO support"
+ depends on X86_32 || IA32_EMULATION
++ depends on !X86_XEN
+ help
+ Map the 32-bit VDSO to the predictable old-style address too.
+ ---help---
+@@ -1462,7 +1507,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
+ depends on NUMA
+
+ menu "Power management options"
+- depends on !X86_VOYAGER
++ depends on !(X86_VOYAGER || XEN_UNPRIVILEGED_GUEST)
+
+ config ARCH_HIBERNATION_HEADER
+ def_bool y
+@@ -1479,7 +1524,7 @@ config X86_APM_BOOT
+
+ menuconfig APM
+ tristate "APM (Advanced Power Management) BIOS support"
+- depends on X86_32 && PM_SLEEP
++ depends on X86_32 && PM_SLEEP && !XEN
+ ---help---
+ APM is a BIOS specification for saving power using several different
+ techniques. This is mostly useful for battery powered laptops with
+@@ -1645,6 +1690,7 @@ choice
+
+ config PCI_GOBIOS
+ bool "BIOS"
++ depends on !X86_XEN
+
+ config PCI_GOMMCONFIG
+ bool "MMConfig"
+@@ -1656,6 +1702,13 @@ config PCI_GOOLPC
+ bool "OLPC"
+ depends on OLPC
+
++config PCI_GOXEN_FE
++ bool "Xen PCI Frontend"
++ depends on X86_XEN
++ help
++ The PCI device frontend driver allows the kernel to import arbitrary
++ PCI devices from a PCI backend to support PCI driver domains.
++
+ config PCI_GOANY
+ bool "Any"
+
+@@ -1663,7 +1716,7 @@ endchoice
+
+ config PCI_BIOS
+ def_bool y
+- depends on X86_32 && PCI && (PCI_GOBIOS || PCI_GOANY)
++ depends on X86_32 && PCI && !XEN && (PCI_GOBIOS || PCI_GOANY)
+
+ # x86-64 doesn't support PCI BIOS access from long mode so always go direct.
+ config PCI_DIRECT
+@@ -1686,6 +1739,22 @@ config PCI_MMCONFIG
+ bool "Support mmconfig PCI config space access"
+ depends on X86_64 && PCI && ACPI
+
++config XEN_PCIDEV_FRONTEND
++ bool "Xen PCI Frontend" if X86_64
++ depends on PCI && ((X86_XEN && (PCI_GOXEN_FE || PCI_GOANY)) || X86_64_XEN)
++ select HOTPLUG
++ default y
++ help
++ The PCI device frontend driver allows the kernel to import arbitrary
++ PCI devices from a PCI backend to support PCI driver domains.
++
++config XEN_PCIDEV_FE_DEBUG
++ bool "Xen PCI Frontend Debugging"
++ depends on XEN_PCIDEV_FRONTEND
++ default n
++ help
++ Enables some debug statements within the PCI Frontend.
++
+ config DMAR
+ bool "Support for DMA Remapping Devices (EXPERIMENTAL)"
+ depends on X86_64 && PCI_MSI && ACPI && EXPERIMENTAL
+@@ -1736,7 +1805,7 @@ if X86_32
+
+ config ISA
+ bool "ISA support"
+- depends on !X86_VOYAGER
++ depends on !X86_VOYAGER && !XEN
+ help
+ Find out whether you have ISA slots on your motherboard. ISA is the
+ name of a bus system, i.e. the way the CPU talks to the other stuff
+@@ -1763,7 +1832,7 @@ config EISA
+ source "drivers/eisa/Kconfig"
+
+ config MCA
+- bool "MCA support" if !X86_VOYAGER
++ bool "MCA support" if !X86_VOYAGER && !XEN
+ default y if X86_VOYAGER
+ help
+ MicroChannel Architecture is found in some IBM PS/2 machines and
+@@ -1875,4 +1944,6 @@ source "crypto/Kconfig"
+
+ source "arch/x86/kvm/Kconfig"
+
++source "drivers/xen/Kconfig"
++
+ source "lib/Kconfig"
+--- sle11-2009-06-04.orig/arch/x86/Kconfig.cpu 2009-06-04 10:17:50.000000000 +0200
++++ sle11-2009-06-04/arch/x86/Kconfig.cpu 2009-06-04 10:18:21.000000000 +0200
+@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
+
+ config X86_F00F_BUG
+ def_bool y
+- depends on M586MMX || M586TSC || M586 || M486 || M386
++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !X86_NO_IDT
+
+ config X86_WP_WORKS_OK
+ def_bool y
+@@ -397,6 +397,7 @@ config X86_P6_NOP
+ config X86_TSC
+ def_bool y
+ depends on ((MWINCHIP3D || MWINCHIP2 || MCRUSOE || MEFFICEON || MCYRIXIII || MK7 || MK6 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || MK8 || MVIAC3_2 || MVIAC7 || MGEODEGX1 || MGEODE_LX || MCORE2) && !X86_NUMAQ) || X86_64
++ depends on !XEN
+
+ config X86_CMPXCHG64
+ def_bool y
+--- sle11-2009-06-04.orig/arch/x86/Kconfig.debug 2009-06-04 10:17:50.000000000 +0200
++++ sle11-2009-06-04/arch/x86/Kconfig.debug 2009-06-04 10:18:21.000000000 +0200
+@@ -143,7 +143,7 @@ config 4KSTACKS
+ config DOUBLEFAULT
+ default y
+ bool "Enable doublefault exception handler" if EMBEDDED
+- depends on X86_32
++ depends on X86_32 && !X86_NO_TSS
+ help
+ This option allows trapping of rare doublefault exceptions that
+ would otherwise cause a system to silently reboot. Disabling this
+--- sle11-2009-06-04.orig/drivers/acpi/Kconfig 2009-06-04 10:17:50.000000000 +0200
++++ sle11-2009-06-04/drivers/acpi/Kconfig 2009-06-04 10:18:21.000000000 +0200
+@@ -362,6 +362,7 @@ config ACPI_SYSTEM
+ config X86_PM_TIMER
+ bool "Power Management Timer Support" if EMBEDDED
+ depends on X86
++ depends on !XEN
+ default y
+ help
+ The Power Management Timer is available on all ACPI-capable,
+@@ -415,4 +416,13 @@ config ACPI_SBS
+ This driver adds support for the Smart Battery System, another
+ type of access to battery information, found on some laptops.
+
++config ACPI_PV_SLEEP
++ bool
++ depends on X86 && XEN && ACPI_SLEEP
++ default y
++
++config PROCESSOR_EXTERNAL_CONTROL
++ bool
++ depends on (X86 || IA64) && XEN
++ default y
+ endif # ACPI
+--- sle11-2009-06-04.orig/drivers/char/Kconfig 2009-06-04 10:17:50.000000000 +0200
++++ sle11-2009-06-04/drivers/char/Kconfig 2009-06-04 10:18:21.000000000 +0200
+@@ -1037,7 +1037,7 @@ config MAX_RAW_DEVS
+ config HPET
+ bool "HPET - High Precision Event Timer" if (X86 || IA64)
+ default n
+- depends on ACPI
++ depends on ACPI && !XEN
+ help
+ If you say Y here, you will have a miscdevice named "/dev/hpet/". Each
+ open selects one of the timers supported by the HPET. The timers are
+--- sle11-2009-06-04.orig/drivers/char/tpm/Kconfig 2009-06-04 10:17:50.000000000 +0200
++++ sle11-2009-06-04/drivers/char/tpm/Kconfig 2009-06-04 10:18:21.000000000 +0200
+@@ -57,4 +57,13 @@ config TCG_INFINEON
+ Further information on this driver and the supported hardware
+ can be found at http://www.prosec.rub.de/tpm
+
++config TCG_XEN
++ tristate "XEN TPM Interface"
++ depends on XEN
++ ---help---
++ If you want to make TPM support available to a Xen user domain,
++ say Yes and it will be accessible from within Linux.
++ To compile this driver as a module, choose M here; the module
++ will be called tpm_xenu.
++
+ endif # TCG_TPM
+--- sle11-2009-06-04.orig/drivers/cpufreq/Kconfig 2009-06-04 10:17:50.000000000 +0200
++++ sle11-2009-06-04/drivers/cpufreq/Kconfig 2009-06-04 10:18:21.000000000 +0200
+@@ -1,5 +1,6 @@
+ config CPU_FREQ
+ bool "CPU Frequency scaling"
++ depends on !PROCESSOR_EXTERNAL_CONTROL
+ help
+ CPU Frequency scaling allows you to change the clock speed of
+ CPUs on the fly. This is a nice method to save power, because
+--- sle11-2009-06-04.orig/drivers/serial/Kconfig 2009-06-04 10:17:50.000000000 +0200
++++ sle11-2009-06-04/drivers/serial/Kconfig 2009-06-04 10:18:21.000000000 +0200
+@@ -10,6 +10,7 @@ menu "Serial drivers"
+ config SERIAL_8250
+ tristate "8250/16550 and compatible serial support"
+ depends on (BROKEN || !SPARC)
++ depends on !XEN_DISABLE_SERIAL
+ select SERIAL_CORE
+ ---help---
+ This selects whether you want to include the driver for the standard
+--- sle11-2009-06-04.orig/drivers/video/Kconfig 2009-06-04 10:17:50.000000000 +0200
++++ sle11-2009-06-04/drivers/video/Kconfig 2009-06-04 10:18:21.000000000 +0200
+@@ -1583,7 +1583,7 @@ config FB_CYBLA
+ tristate "Cyberblade/i1 support"
+ depends on FB && PCI && X86_32 && !64BIT
+ select FB_CFB_IMAGEBLIT
+- select VIDEO_SELECT
++ select VIDEO_SELECT if !XEN
+ ---help---
+ This driver is supposed to support the Trident Cyberblade/i1
+ graphics core integrated in the VIA VT8601A North Bridge,
+--- sle11-2009-06-04.orig/drivers/video/console/Kconfig 2009-06-04 10:17:50.000000000 +0200
++++ sle11-2009-06-04/drivers/video/console/Kconfig 2009-06-04 10:18:21.000000000 +0200
+@@ -46,6 +46,7 @@ config VGACON_SOFT_SCROLLBACK_SIZE
+ config VIDEO_SELECT
+ bool "Video mode selection support"
+ depends on X86 && VGA_CONSOLE
++ depends on !XEN
+ ---help---
+ This enables support for text mode selection on kernel startup. If
+ you want to take advantage of some high-resolution text mode your
+--- sle11-2009-06-04.orig/drivers/xen/Kconfig 2009-06-04 10:17:50.000000000 +0200
++++ sle11-2009-06-04/drivers/xen/Kconfig 2009-06-04 10:18:21.000000000 +0200
+@@ -1,6 +1,307 @@
++#
++# This Kconfig describe xen options
++#
++
++mainmenu "Xen Configuration"
++
++config XEN
++ bool
++
++if XEN
++config XEN_INTERFACE_VERSION
++ hex
++ default 0x00030207
++
++menu "XEN"
++
++config XEN_PRIVILEGED_GUEST
++ bool "Privileged Guest (domain 0)"
++ select PCI_REASSIGN if PCI
++ help
++ Support for privileged operation (domain 0)
++
++config XEN_UNPRIVILEGED_GUEST
++ def_bool !XEN_PRIVILEGED_GUEST
++
++config XEN_PRIVCMD
++ def_bool y
++ depends on PROC_FS
++
++config XEN_XENBUS_DEV
++ def_bool y
++ depends on PROC_FS
++
++config XEN_NETDEV_ACCEL_SFC_UTIL
++ depends on X86
++ tristate
++
++config XEN_BACKEND
++ tristate "Backend driver support"
++ default XEN_PRIVILEGED_GUEST
++ help
++ Support for backend device drivers that provide I/O services
++ to other virtual machines.
++
++config XEN_BLKDEV_BACKEND
++ tristate "Block-device backend driver"
++ depends on XEN_BACKEND
++ default XEN_BACKEND
++ help
++ The block-device backend driver allows the kernel to export its
++ block devices to other guests via a high-performance shared-memory
++ interface.
++
++config XEN_BLKDEV_TAP
++ tristate "Block-device tap backend driver"
++ depends on XEN_BACKEND
++ default XEN_BACKEND
++ help
++ The block tap driver is an alternative to the block back driver
++ and allows VM block requests to be redirected to userspace through
++ a device interface. The tap allows user-space development of
++ high-performance block backends, where disk images may be implemented
++ as files, in memory, or on other hosts across the network. This
++ driver can safely coexist with the existing blockback driver.
++
++config XEN_NETDEV_BACKEND
++ tristate "Network-device backend driver"
++ depends on XEN_BACKEND && NET
++ default XEN_BACKEND
++ help
++ The network-device backend driver allows the kernel to export its
++ network devices to other guests via a high-performance shared-memory
++ interface.
++
++config XEN_NETDEV_PIPELINED_TRANSMITTER
++ bool "Pipelined transmitter (DANGEROUS)"
++ depends on XEN_NETDEV_BACKEND
++ help
++ If the net backend is a dumb domain, such as a transparent Ethernet
++ bridge with no local IP interface, it is safe to say Y here to get
++ slightly lower network overhead.
++ If the backend has a local IP interface; or may be doing smart things
++ like reassembling packets to perform firewall filtering; or if you
++ are unsure; or if you experience network hangs when this option is
++ enabled; then you must say N here.
++
++config XEN_NETDEV_ACCEL_SFC_BACKEND
++ tristate "Network-device backend driver acceleration for Solarflare NICs"
++ depends on XEN_NETDEV_BACKEND && SFC && SFC_RESOURCE && X86
++ select XEN_NETDEV_ACCEL_SFC_UTIL
++ default m
++
++config XEN_NETDEV_LOOPBACK
++ tristate "Network-device loopback driver"
++ depends on XEN_NETDEV_BACKEND
++ help
++ A two-interface loopback device to emulate a local netfront-netback
++ connection. If unsure, it is probably safe to say N here.
++
++config XEN_PCIDEV_BACKEND
++ tristate "PCI-device backend driver"
++ depends on PCI && XEN_BACKEND
++ default XEN_BACKEND
++ help
++ The PCI device backend driver allows the kernel to export arbitrary
++ PCI devices to other guests. If you select this to be a module, you
++ will need to make sure no other driver has bound to the device(s)
++ you want to make visible to other guests.
++
++choice
++ prompt "PCI Backend Mode"
++ depends on XEN_PCIDEV_BACKEND
++ default XEN_PCIDEV_BACKEND_VPCI if !IA64
++ default XEN_PCIDEV_BACKEND_CONTROLLER if IA64
++
++config XEN_PCIDEV_BACKEND_VPCI
++ bool "Virtual PCI"
++ ---help---
++ This PCI Backend hides the true PCI topology and makes the frontend
++ think there is a single PCI bus with only the exported devices on it.
++ For example, a device at 03:05.0 will be re-assigned to 00:00.0. A
++ second device at 02:1a.1 will be re-assigned to 00:01.1.
++
++config XEN_PCIDEV_BACKEND_PASS
++ bool "Passthrough"
++ ---help---
++ This PCI Backend provides a real view of the PCI topology to the
++ frontend (for example, a device at 06:01.b will still appear at
++ 06:01.b to the frontend). This is similar to how Xen 2.0.x exposed
++ PCI devices to its driver domains. This may be required for drivers
++ which depend on finding their hardward in certain bus/slot
++ locations.
++
++config XEN_PCIDEV_BACKEND_SLOT
++ bool "Slot"
++ ---help---
++ This PCI Backend hides the true PCI topology and makes the frontend
++ think there is a single PCI bus with only the exported devices on it.
++ Contrary to the virtual PCI backend, a function becomes a new slot.
++ For example, a device at 03:05.2 will be re-assigned to 00:00.0. A
++ second device at 02:1a.1 will be re-assigned to 00:01.0.
++
++config XEN_PCIDEV_BACKEND_CONTROLLER
++ bool "Controller"
++ depends on IA64
++ ---help---
++ This PCI backend virtualizes the PCI bus topology by providing a
++ virtual bus per PCI root device. Devices which are physically under
++ the same root bus will appear on the same virtual bus. For systems
++ with complex I/O addressing, this is the only backend which supports
++ extended I/O port spaces and MMIO translation offsets. This backend
++ also supports slot virtualization. For example, a device at
++ 0000:01:02.1 will be re-assigned to 0000:00:00.0. A second device
++ at 0000:02:05.0 (behind a P2P bridge on bus 0000:01) will be
++ re-assigned to 0000:00:01.0. A third device at 0000:16:05.0 (under
++ a different PCI root bus) will be re-assigned to 0000:01:00.0.
++
++endchoice
++
++config XEN_PCIDEV_BE_DEBUG
++ bool "PCI Backend Debugging"
++ depends on XEN_PCIDEV_BACKEND
++
++config XEN_TPMDEV_BACKEND
++ tristate "TPM-device backend driver"
++ depends on XEN_BACKEND
++ help
++ The TPM-device backend driver
++
++config XEN_SCSI_BACKEND
++ tristate "SCSI backend driver"
++ depends on SCSI && XEN_BACKEND
++ default m
++ help
++ The SCSI backend driver allows the kernel to export its SCSI Devices
++ to other guests via a high-performance shared-memory interface.
++
++config XEN_BLKDEV_FRONTEND
++ tristate "Block-device frontend driver"
++ default y
++ help
++ The block-device frontend driver allows the kernel to access block
++ devices mounted within another guest OS. Unless you are building a
++ dedicated device-driver domain, or your master control domain
++ (domain 0), then you almost certainly want to say Y here.
++
++config XEN_NETDEV_FRONTEND
++ tristate "Network-device frontend driver"
++ depends on NET
++ default y
++ help
++ The network-device frontend driver allows the kernel to access
++ network interfaces within another guest OS. Unless you are building a
++ dedicated device-driver domain, or your master control domain
++ (domain 0), then you almost certainly want to say Y here.
++
++config XEN_NETDEV_ACCEL_SFC_FRONTEND
++ tristate "Network-device frontend driver acceleration for Solarflare NICs"
++ depends on XEN_NETDEV_FRONTEND && X86
++ select XEN_NETDEV_ACCEL_SFC_UTIL
++ default m
++
++config XEN_SCSI_FRONTEND
++ tristate "SCSI frontend driver"
++ depends on SCSI
++ default m
++ help
++ The SCSI frontend driver allows the kernel to access SCSI Devices
++ within another guest OS.
++
++config XEN_GRANT_DEV
++ tristate "User-space granted page access driver"
++ default XEN_PRIVILEGED_GUEST
++ help
++ Device for accessing (in user-space) pages that have been granted
++ by other domains.
++
++config XEN_FRAMEBUFFER
++ tristate "Framebuffer-device frontend driver"
++ depends on FB
++ select FB_CFB_FILLRECT
++ select FB_CFB_COPYAREA
++ select FB_CFB_IMAGEBLIT
++ default y
++ help
++ The framebuffer-device frontend drivers allows the kernel to create a
++ virtual framebuffer. This framebuffer can be viewed in another
++ domain. Unless this domain has access to a real video card, you
++ probably want to say Y here.
++
++config XEN_KEYBOARD
++ tristate "Keyboard-device frontend driver"
++ depends on XEN_FRAMEBUFFER && INPUT
++ default y
++ help
++ The keyboard-device frontend driver allows the kernel to create a
++ virtual keyboard. This keyboard can then be driven by another
++ domain. If you've said Y to CONFIG_XEN_FRAMEBUFFER, you probably
++ want to say Y here.
++
++config XEN_DISABLE_SERIAL
++ bool "Disable serial port drivers"
++ default y
++ help
++ Disable serial port drivers, allowing the Xen console driver
++ to provide a serial console at ttyS0.
++
++config XEN_SYSFS
++ tristate "Export Xen attributes in sysfs"
++ depends on SYSFS
++ select SYS_HYPERVISOR
++ default y
++ help
++ Xen hypervisor attributes will show up under /sys/hypervisor/.
++
++choice
++ prompt "Xen version compatibility"
++ default XEN_COMPAT_030002_AND_LATER
++
++ config XEN_COMPAT_030002_AND_LATER
++ bool "3.0.2 and later"
++
++ config XEN_COMPAT_030004_AND_LATER
++ bool "3.0.4 and later"
++
++ config XEN_COMPAT_030100_AND_LATER
++ bool "3.1.0 and later"
++
++ config XEN_COMPAT_LATEST_ONLY
++ bool "no compatibility code"
++
++endchoice
++
++config XEN_COMPAT
++ hex
++ default 0xffffff if XEN_COMPAT_LATEST_ONLY
++ default 0x030100 if XEN_COMPAT_030100_AND_LATER
++ default 0x030004 if XEN_COMPAT_030004_AND_LATER
++ default 0x030002 if XEN_COMPAT_030002_AND_LATER
++ default 0
++
++endmenu
++
++config HAVE_IRQ_IGNORE_UNHANDLED
++ def_bool y
++
++config NO_IDLE_HZ
++ def_bool y
++
++config XEN_SMPBOOT
++ def_bool y
++ depends on SMP && !PPC_XEN
++
++config XEN_XENCOMM
++ bool
++
++config XEN_DEVMEM
++ def_bool y
++
++endif
++
+ config XEN_BALLOON
+- bool "Xen memory balloon driver"
+- depends on XEN
++ bool "Xen memory balloon driver" if PARAVIRT_XEN
++ depends on (XEN && !PPC_XEN) || PARAVIRT_XEN
+ default y
+ help
+ The balloon driver allows the Xen domain to request more memory from
+@@ -8,12 +309,14 @@ config XEN_BALLOON
+ return unneeded memory to the system.
+
+ config XEN_SCRUB_PAGES
+- bool "Scrub pages before returning them to system"
+- depends on XEN_BALLOON
++ bool "Scrub memory before freeing it to Xen"
++ depends on XEN || XEN_BALLOON
+ default y
+ help
+- Scrub pages before returning them to the system for reuse by
+- other domains. This makes sure that any confidential data
+- is not accidentally visible to other domains. Is it more
+- secure, but slightly less efficient.
++ Erase memory contents before freeing it back to Xen's global
++ pool. This ensures that any secrets contained within that
++ memory (e.g., private keys) cannot be found by other guests that
++ may be running on the machine. Most people will want to say Y here.
++ If security is not a concern then you may increase performance by
++ saying N.
+ If in doubt, say yes.
+--- sle11-2009-06-04.orig/fs/Kconfig 2009-06-04 10:17:50.000000000 +0200
++++ sle11-2009-06-04/fs/Kconfig 2009-06-04 10:18:21.000000000 +0200
+@@ -1011,6 +1011,7 @@ config HUGETLBFS
+ bool "HugeTLB file system support"
+ depends on X86 || IA64 || PPC64 || SPARC64 || (SUPERH && MMU) || \
+ (S390 && 64BIT) || BROKEN
++ depends on !XEN
+ help
+ hugetlbfs is a filesystem backing for HugeTLB pages, based on
+ ramfs. For architectures that support it, say Y here and read
+--- sle11-2009-06-04.orig/kernel/Kconfig.preempt 2009-06-04 10:17:50.000000000 +0200
++++ sle11-2009-06-04/kernel/Kconfig.preempt 2009-06-04 10:18:21.000000000 +0200
+@@ -35,6 +35,7 @@ config PREEMPT_VOLUNTARY
+
+ config PREEMPT
+ bool "Preemptible Kernel (Low-Latency Desktop)"
++ depends on !XEN
+ help
+ This option reduces the latency of the kernel by making
+ all kernel code (that is not executing in a critical section)
--- /dev/null
+From: Kurt Garloff <garloff@suse.de>
+Subject: [PATCH] X86: sysctl to allow panic on IOCK NMI error
+References: bnc427979
+
+This patch introduces a sysctl /proc/sys/kernel/panic_on_io_nmi.,
+which defaults to 0 (off).
+When enabled, the kernel panics when the kernel receives an NMI
+caused by an IO error.
+
+The IO error triggered NMI indicates a serious system condition,
+which could result in IO data corruption. Rather than contiuing,
+panicing and dumping might be a better choice, so one can figure
+out what's causing the IO error.
+This could be especially important to companies running IO intensive
+applications where corruption must be avoided, e.g. a banks databases.
+
+
+Signed-off-by: Roberto Angelino <robertangelino@gmail.com>
+
+
+Automatically created from "patches.suse/panic-on-io-nmi.diff" by xen-port-patches.py
+
+--- sle11-2009-08-26.orig/arch/x86/kernel/traps_32-xen.c 2008-11-25 13:17:09.000000000 +0100
++++ sle11-2009-08-26/arch/x86/kernel/traps_32-xen.c 2008-11-25 13:13:12.000000000 +0100
+@@ -83,6 +83,7 @@ gate_desc idt_table[256]
+ #endif
+
+ int panic_on_unrecovered_nmi;
++int panic_on_io_nmi;
+ int kstack_depth_to_print = 24;
+ static unsigned int code_bytes = 64;
+ #ifdef CONFIG_STACK_UNWIND
+@@ -738,6 +739,9 @@ io_check_error(unsigned char reason, str
+ printk(KERN_EMERG "NMI: IOCK error (debug interrupt?)\n");
+ show_registers(regs);
+
++ if (panic_on_io_nmi)
++ panic("NMI IOCK error: Not continuing");
++
+ /* Re-enable the IOCK line, wait for a few seconds */
+ clear_io_check_error(reason);
+ }
+--- sle11-2009-08-26.orig/arch/x86/kernel/traps_64-xen.c 2008-11-25 13:17:09.000000000 +0100
++++ sle11-2009-08-26/arch/x86/kernel/traps_64-xen.c 2008-11-25 13:13:12.000000000 +0100
+@@ -56,6 +56,7 @@
+ #include <mach_traps.h>
+
+ int panic_on_unrecovered_nmi;
++int panic_on_io_nmi;
+ int kstack_depth_to_print = 12;
+ static unsigned int code_bytes = 64;
+ #ifdef CONFIG_STACK_UNWIND
+@@ -841,6 +842,9 @@ io_check_error(unsigned char reason, str
+ printk("NMI: IOCK error (debug interrupt?)\n");
+ show_registers(regs);
+
++ if (panic_on_io_nmi)
++ panic("NMI IOCK error: Not continuing");
++
+ /* Re-enable the IOCK line, wait for a few seconds */
+ clear_io_check_error(reason);
+ }
--- /dev/null
+From: Dean Nelson <dcn@sgi.com>
+Date: Thu, 2 Oct 2008 17:18:21 +0000 (-0500)
+Subject: x86, UV: add uv_setup_irq() and uv_teardown_irq() functions, v3
+X-Git-Tag: v2.6.28-rc1~80^2~27
+X-Git-Url: http://git.kernel.org/?p=linux%2Fkernel%2Fgit%2Ftorvalds%2Flinux-2.6.git;a=commitdiff_plain;h=4173a0e7371ece227559b44943c6fd456ee470d1
+References: bnc#442461
+
+x86, UV: add uv_setup_irq() and uv_teardown_irq() functions, v3
+
+Provide a means for UV interrupt MMRs to be setup with the message to be sent
+when an MSI is raised.
+
+Signed-off-by: Dean Nelson <dcn@sgi.com>
+Signed-off-by: Ingo Molnar <mingo@elte.hu>
+Acked-by: Bernhard Walle <bwalle@suse.de>
+
+Automatically created from "patches.fixes/uv_setup_irq.diff" by xen-port-patches.py
+
+Index: head-2008-12-01/arch/x86/kernel/Makefile
+===================================================================
+--- head-2008-12-01.orig/arch/x86/kernel/Makefile 2008-12-01 11:49:07.000000000 +0100
++++ head-2008-12-01/arch/x86/kernel/Makefile 2008-12-01 12:06:39.000000000 +0100
+@@ -126,4 +126,4 @@ endif
+
+ disabled-obj-$(CONFIG_XEN) := %_uv.o crash.o early-quirks.o hpet.o i8253.o \
+ i8259.o irqinit_$(BITS).o pci-swiotlb_64.o reboot.o smpboot.o \
+- tlb_$(BITS).o tsc.o tsc_sync.o vsmp_64.o
++ tlb_$(BITS).o tsc.o tsc_sync.o uv_%.o vsmp_64.o