#define KMSG_COMPONENT "sclp_cmd"
#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
-#include <linux/cpufeature.h>
#include <linux/completion.h>
-#include <linux/init.h>
-#include <linux/errno.h>
+#include <linux/cpufeature.h>
#include <linux/err.h>
-#include <linux/slab.h>
-#include <linux/string.h>
-#include <linux/mm.h>
-#include <linux/mmzone.h>
+#include <linux/errno.h>
+#include <linux/init.h>
#include <linux/memory.h>
#include <linux/memory_hotplug.h>
-#include <linux/module.h>
-#include <asm/ctlreg.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/slab.h>
+#include <linux/string.h>
#include <asm/chpid.h>
-#include <asm/setup.h>
-#include <asm/page.h>
-#include <asm/sclp.h>
-#include <asm/numa.h>
+#include <asm/ctlreg.h>
#include <asm/facility.h>
+#include <asm/page.h>
#include <asm/page-states.h>
+#include <asm/sclp.h>
#include "sclp.h"
-#define SCLP_CMDW_ASSIGN_STORAGE 0x000d0001
-#define SCLP_CMDW_UNASSIGN_STORAGE 0x000c0001
+#define SCLP_CMDW_ASSIGN_STORAGE 0x000d0001
+#define SCLP_CMDW_UNASSIGN_STORAGE 0x000c0001
+/* CPU configuration related functions */
+#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
+#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
+/* Channel path configuration related functions */
+#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
+#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
+#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
+
+struct cpu_configure_sccb {
+ struct sccb_header header;
+} __packed __aligned(8);
+
+struct chp_cfg_sccb {
+ struct sccb_header header;
+ u8 ccm;
+ u8 reserved[6];
+ u8 cssid;
+} __packed;
+
+struct chp_info_sccb {
+ struct sccb_header header;
+ u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
+ u8 standby[SCLP_CHP_INFO_MASK_SIZE];
+ u8 configured[SCLP_CHP_INFO_MASK_SIZE];
+ u8 ccm;
+ u8 reserved[6];
+ u8 cssid;
+} __packed;
static void sclp_sync_callback(struct sclp_req *req, void *data)
{
request->callback_data = &completion;
init_completion(&completion);
- /* Perform sclp request. */
rc = sclp_add_request(request);
if (rc)
goto out;
wait_for_completion(&completion);
- /* Check response. */
if (request->status != SCLP_REQ_DONE) {
pr_warn("sync request failed (cmd=0x%08x, status=0x%02x)\n",
cmd, request->status);
return rc;
}
-/*
- * CPU configuration related functions.
- */
-
-#define SCLP_CMDW_CONFIGURE_CPU 0x00110001
-#define SCLP_CMDW_DECONFIGURE_CPU 0x00100001
-
int _sclp_get_core_info(struct sclp_core_info *info)
{
- int rc;
- int length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
struct read_cpu_info_sccb *sccb;
+ int rc, length;
if (!SCLP_HAS_CPU_INFO)
return -EOPNOTSUPP;
+ length = test_facility(140) ? EXT_SCCB_READ_CPU : PAGE_SIZE;
sccb = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA | __GFP_ZERO, get_order(length));
if (!sccb)
return -ENOMEM;
}
sclp_fill_core_info(info, sccb);
out:
- free_pages((unsigned long) sccb, get_order(length));
+ free_pages((unsigned long)sccb, get_order(length));
return rc;
}
-struct cpu_configure_sccb {
- struct sccb_header header;
-} __attribute__((packed, aligned(8)));
-
static int do_core_configure(sclp_cmdw_t cmd)
{
struct cpu_configure_sccb *sccb;
if (!SCLP_HAS_CPU_RECONFIG)
return -EOPNOTSUPP;
/*
- * This is not going to cross a page boundary since we force
- * kmalloc to have a minimum alignment of 8 bytes on s390.
+ * Use kmalloc to have a minimum alignment of 8 bytes and ensure sccb
+ * is not going to cross a page boundary.
*/
sccb = kzalloc(sizeof(*sccb), GFP_KERNEL | GFP_DMA);
if (!sccb)
u16 rn;
} __packed;
+struct attach_storage_sccb {
+ struct sccb_header header;
+ u16 :16;
+ u16 assigned;
+ u32 :32;
+ u32 entries[];
+} __packed;
+
int arch_get_memory_phys_device(unsigned long start_pfn)
{
if (!sclp.rzm)
return PFN_PHYS(start_pfn) >> ilog2(sclp.rzm);
}
-static unsigned long long rn2addr(u16 rn)
+static unsigned long rn2addr(u16 rn)
{
- return (unsigned long long) (rn - 1) * sclp.rzm;
+ return (unsigned long)(rn - 1) * sclp.rzm;
}
static int do_assign_storage(sclp_cmdw_t cmd, u16 rn)
struct assign_storage_sccb *sccb;
int rc;
- sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sccb = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
break;
}
out:
- free_page((unsigned long) sccb);
+ free_page((unsigned long)sccb);
return rc;
}
static int sclp_assign_storage(u16 rn)
{
- unsigned long long start;
+ unsigned long start;
int rc;
rc = do_assign_storage(SCLP_CMDW_ASSIGN_STORAGE, rn);
return do_assign_storage(SCLP_CMDW_UNASSIGN_STORAGE, rn);
}
-struct attach_storage_sccb {
- struct sccb_header header;
- u16 :16;
- u16 assigned;
- u32 :32;
- u32 entries[];
-} __packed;
-
static int sclp_attach_storage(u8 id)
{
struct attach_storage_sccb *sccb;
- int rc;
- int i;
+ int rc, i;
- sccb = (void *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sccb = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = PAGE_SIZE;
break;
}
out:
- free_page((unsigned long) sccb);
+ free_page((unsigned long)sccb);
return rc;
}
int online)
{
struct memory_increment *incr;
- unsigned long long istart;
+ unsigned long istart;
int rc = 0;
list_for_each_entry(incr, &sclp_mem_list, list) {
switch (action) {
case MEM_GOING_OFFLINE:
/*
- * We do not allow to set memory blocks offline that contain
+ * Do not allow to set memory blocks offline that contain
* standby memory. This is done to simplify the "memory online"
* case.
*/
.notifier_call = sclp_mem_notifier,
};
-static void __init align_to_block_size(unsigned long long *start,
- unsigned long long *size,
- unsigned long long alignment)
+static void __init align_to_block_size(unsigned long *start,
+ unsigned long *size,
+ unsigned long alignment)
{
- unsigned long long start_align, size_align;
+ unsigned long start_align, size_align;
start_align = roundup(*start, alignment);
size_align = rounddown(*start + *size, alignment) - start_align;
- pr_info("Standby memory at 0x%llx (%lluM of %lluM usable)\n",
+ pr_info("Standby memory at 0x%lx (%luM of %luM usable)\n",
*start, size_align >> 20, *size >> 20);
*start = start_align;
*size = size_align;
static void __init add_memory_merged(u16 rn)
{
- unsigned long long start, size, addr, block_size;
+ unsigned long start, size, addr, block_size;
static u16 first_rn, num;
if (rn && first_rn && (first_rn + num == rn)) {
if (!first_rn)
goto skip_add;
start = rn2addr(first_rn);
- size = (unsigned long long) num * sclp.rzm;
+ size = (unsigned long)num * sclp.rzm;
if (start >= ident_map_size)
goto skip_add;
if (start + size > ident_map_size)
align_to_block_size(&start, &size, block_size);
if (!size)
goto skip_add;
- for (addr = start; addr < start + size; addr += block_size)
+ for (addr = start; addr < start + size; addr += block_size) {
add_memory(0, addr, block_size,
cpu_has_edat1() ?
MHP_MEMMAP_ON_MEMORY | MHP_OFFLINE_INACCESSIBLE : MHP_NONE);
+ }
skip_add:
first_rn = rn;
num = 1;
{
struct memory_increment *incr;
- list_for_each_entry(incr, &sclp_mem_list, list)
+ list_for_each_entry(incr, &sclp_mem_list, list) {
if (incr->standby)
add_memory_merged(incr->rn);
+ }
add_memory_merged(0);
}
struct read_storage_sccb *sccb;
int i, id, assigned, rc;
- if (oldmem_data.start) /* No standby memory in kdump mode */
+ /* No standby memory in kdump mode */
+ if (oldmem_data.start)
return 0;
- if ((sclp.facilities & 0xe00000000000ULL) != 0xe00000000000ULL)
+ if ((sclp.facilities & 0xe00000000000UL) != 0xe00000000000UL)
return 0;
rc = -ENOMEM;
- sccb = (void *) __get_free_page(GFP_KERNEL | GFP_DMA);
+ sccb = (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
goto out;
assigned = 0;
goto out;
sclp_add_standby_memory();
out:
- free_page((unsigned long) sccb);
+ free_page((unsigned long)sccb);
return rc;
}
__initcall(sclp_detect_standby_memory);
#endif /* CONFIG_MEMORY_HOTPLUG */
-/*
- * Channel path configuration related functions.
- */
-
-#define SCLP_CMDW_CONFIGURE_CHPATH 0x000f0001
-#define SCLP_CMDW_DECONFIGURE_CHPATH 0x000e0001
-#define SCLP_CMDW_READ_CHPATH_INFORMATION 0x00030001
-
-struct chp_cfg_sccb {
- struct sccb_header header;
- u8 ccm;
- u8 reserved[6];
- u8 cssid;
-} __attribute__((packed));
-
static int do_chp_configure(sclp_cmdw_t cmd)
{
struct chp_cfg_sccb *sccb;
if (!SCLP_HAS_CHP_RECONFIG)
return -EOPNOTSUPP;
- /* Prepare sccb. */
- sccb = (struct chp_cfg_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sccb = (struct chp_cfg_sccb *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
break;
}
out:
- free_page((unsigned long) sccb);
+ free_page((unsigned long)sccb);
return rc;
}
return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
}
-struct chp_info_sccb {
- struct sccb_header header;
- u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
- u8 standby[SCLP_CHP_INFO_MASK_SIZE];
- u8 configured[SCLP_CHP_INFO_MASK_SIZE];
- u8 ccm;
- u8 reserved[6];
- u8 cssid;
-} __attribute__((packed));
-
/**
* sclp_chp_read_info - perform read channel-path information sclp command
* @info: resulting channel-path information data
if (!SCLP_HAS_CHP_INFO)
return -EOPNOTSUPP;
- /* Prepare sccb. */
- sccb = (struct chp_info_sccb *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
+ sccb = (struct chp_info_sccb *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
if (!sccb)
return -ENOMEM;
sccb->header.length = sizeof(*sccb);
memcpy(info->standby, sccb->standby, SCLP_CHP_INFO_MASK_SIZE);
memcpy(info->configured, sccb->configured, SCLP_CHP_INFO_MASK_SIZE);
out:
- free_page((unsigned long) sccb);
+ free_page((unsigned long)sccb);
return rc;
}