--- /dev/null
+From 6e7bc478c9a006c701c14476ec9d389a484b4864 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Fri, 3 Feb 2017 14:29:42 -0800
+Subject: net: skb_needs_check() accepts CHECKSUM_NONE for tx
+
+From: Eric Dumazet <edumazet@google.com>
+
+commit 6e7bc478c9a006c701c14476ec9d389a484b4864 upstream.
+
+My recent change missed fact that UFO would perform a complete
+UDP checksum before segmenting in frags.
+
+In this case skb->ip_summed is set to CHECKSUM_NONE.
+
+We need to add this valid case to skb_needs_check()
+
+Fixes: b2504a5dbef3 ("net: reduce skb_warn_bad_offload() noise")
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Willem de Bruijn <willemb@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/core/dev.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+--- a/net/core/dev.c
++++ b/net/core/dev.c
+@@ -2550,9 +2550,10 @@ EXPORT_SYMBOL(skb_mac_gso_segment);
+ static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
+ {
+ if (tx_path)
+- return skb->ip_summed != CHECKSUM_PARTIAL;
+- else
+- return skb->ip_summed == CHECKSUM_NONE;
++ return skb->ip_summed != CHECKSUM_PARTIAL &&
++ skb->ip_summed != CHECKSUM_NONE;
++
++ return skb->ip_summed == CHECKSUM_NONE;
+ }
+
+ /**
--- /dev/null
+From 663deb47880f2283809669563c5a52ac7c6aef1a Mon Sep 17 00:00:00 2001
+From: Joel Fernandes <joelaf@google.com>
+Date: Thu, 20 Oct 2016 00:34:01 -0700
+Subject: pstore: Allow prz to control need for locking
+
+From: Joel Fernandes <joelaf@google.com>
+
+commit 663deb47880f2283809669563c5a52ac7c6aef1a upstream.
+
+In preparation of not locking at all for certain buffers depending on if
+there's contention, make locking optional depending on the initialization
+of the prz.
+
+Signed-off-by: Joel Fernandes <joelaf@google.com>
+[kees: moved locking flag into prz instead of via caller arguments]
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram.c | 5 +++--
+ fs/pstore/ram_core.c | 24 +++++++++++++++---------
+ include/linux/pstore_ram.h | 10 +++++++++-
+ 3 files changed, 27 insertions(+), 12 deletions(-)
+
+--- a/fs/pstore/ram.c
++++ b/fs/pstore/ram.c
+@@ -413,7 +413,7 @@ static int ramoops_init_przs(struct devi
+ for (i = 0; i < cxt->max_dump_cnt; i++) {
+ cxt->przs[i] = persistent_ram_new(*paddr, cxt->record_size, 0,
+ &cxt->ecc_info,
+- cxt->memtype);
++ cxt->memtype, 0);
+ if (IS_ERR(cxt->przs[i])) {
+ err = PTR_ERR(cxt->przs[i]);
+ dev_err(dev, "failed to request mem region (0x%zx@0x%llx): %d\n",
+@@ -450,7 +450,8 @@ static int ramoops_init_prz(struct devic
+ return -ENOMEM;
+ }
+
+- *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info, cxt->memtype);
++ *prz = persistent_ram_new(*paddr, sz, sig, &cxt->ecc_info,
++ cxt->memtype, 0);
+ if (IS_ERR(*prz)) {
+ int err = PTR_ERR(*prz);
+
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -52,9 +52,10 @@ static size_t buffer_start_add(struct pe
+ {
+ int old;
+ int new;
+- unsigned long flags;
++ unsigned long flags = 0;
+
+- raw_spin_lock_irqsave(&prz->buffer_lock, flags);
++ if (!(prz->flags & PRZ_FLAG_NO_LOCK))
++ raw_spin_lock_irqsave(&prz->buffer_lock, flags);
+
+ old = atomic_read(&prz->buffer->start);
+ new = old + a;
+@@ -62,7 +63,8 @@ static size_t buffer_start_add(struct pe
+ new -= prz->buffer_size;
+ atomic_set(&prz->buffer->start, new);
+
+- raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
++ if (!(prz->flags & PRZ_FLAG_NO_LOCK))
++ raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
+
+ return old;
+ }
+@@ -72,9 +74,10 @@ static void buffer_size_add(struct persi
+ {
+ size_t old;
+ size_t new;
+- unsigned long flags;
++ unsigned long flags = 0;
+
+- raw_spin_lock_irqsave(&prz->buffer_lock, flags);
++ if (!(prz->flags & PRZ_FLAG_NO_LOCK))
++ raw_spin_lock_irqsave(&prz->buffer_lock, flags);
+
+ old = atomic_read(&prz->buffer->size);
+ if (old == prz->buffer_size)
+@@ -86,7 +89,8 @@ static void buffer_size_add(struct persi
+ atomic_set(&prz->buffer->size, new);
+
+ exit:
+- raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
++ if (!(prz->flags & PRZ_FLAG_NO_LOCK))
++ raw_spin_unlock_irqrestore(&prz->buffer_lock, flags);
+ }
+
+ static void notrace persistent_ram_encode_rs8(struct persistent_ram_zone *prz,
+@@ -420,7 +424,8 @@ static int persistent_ram_buffer_map(phy
+ }
+
+ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
+- struct persistent_ram_ecc_info *ecc_info)
++ struct persistent_ram_ecc_info *ecc_info,
++ unsigned long flags)
+ {
+ int ret;
+
+@@ -449,6 +454,7 @@ static int persistent_ram_post_init(stru
+ prz->buffer->sig = sig;
+ persistent_ram_zap(prz);
+ prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock);
++ prz->flags = flags;
+
+ return 0;
+ }
+@@ -473,7 +479,7 @@ void persistent_ram_free(struct persiste
+
+ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+ u32 sig, struct persistent_ram_ecc_info *ecc_info,
+- unsigned int memtype)
++ unsigned int memtype, u32 flags)
+ {
+ struct persistent_ram_zone *prz;
+ int ret = -ENOMEM;
+@@ -488,7 +494,7 @@ struct persistent_ram_zone *persistent_r
+ if (ret)
+ goto err;
+
+- ret = persistent_ram_post_init(prz, sig, ecc_info);
++ ret = persistent_ram_post_init(prz, sig, ecc_info, flags);
+ if (ret)
+ goto err;
+
+--- a/include/linux/pstore_ram.h
++++ b/include/linux/pstore_ram.h
+@@ -23,6 +23,13 @@
+ #include <linux/types.h>
+ #include <linux/init.h>
+
++/*
++ * Choose whether access to the RAM zone requires locking or not. If a zone
++ * can be written to from different CPUs like with ftrace for example, then
++ * PRZ_FLAG_NO_LOCK is used. For all other cases, locking is required.
++ */
++#define PRZ_FLAG_NO_LOCK BIT(0)
++
+ struct persistent_ram_buffer;
+ struct rs_control;
+
+@@ -39,6 +46,7 @@ struct persistent_ram_zone {
+ void *vaddr;
+ struct persistent_ram_buffer *buffer;
+ size_t buffer_size;
++ u32 flags;
+ raw_spinlock_t buffer_lock;
+
+ /* ECC correction */
+@@ -55,7 +63,7 @@ struct persistent_ram_zone {
+
+ struct persistent_ram_zone *persistent_ram_new(phys_addr_t start, size_t size,
+ u32 sig, struct persistent_ram_ecc_info *ecc_info,
+- unsigned int memtype);
++ unsigned int memtype, u32 flags);
+ void persistent_ram_free(struct persistent_ram_zone *prz);
+ void persistent_ram_zap(struct persistent_ram_zone *prz);
+
--- /dev/null
+From 76d5692a58031696e282384cbd893832bc92bd76 Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Thu, 9 Feb 2017 15:43:44 -0800
+Subject: pstore: Correctly initialize spinlock and flags
+
+From: Kees Cook <keescook@chromium.org>
+
+commit 76d5692a58031696e282384cbd893832bc92bd76 upstream.
+
+The ram backend wasn't always initializing its spinlock correctly. Since
+it was coming from kzalloc memory, though, it was harmless on
+architectures that initialize unlocked spinlocks to 0 (at least x86 and
+ARM). This also fixes a possibly ignored flag setting too.
+
+When running under CONFIG_DEBUG_SPINLOCK, the following Oops was visible:
+
+[ 0.760836] persistent_ram: found existing buffer, size 29988, start 29988
+[ 0.765112] persistent_ram: found existing buffer, size 30105, start 30105
+[ 0.769435] persistent_ram: found existing buffer, size 118542, start 118542
+[ 0.785960] persistent_ram: found existing buffer, size 0, start 0
+[ 0.786098] persistent_ram: found existing buffer, size 0, start 0
+[ 0.786131] pstore: using zlib compression
+[ 0.790716] BUG: spinlock bad magic on CPU#0, swapper/0/1
+[ 0.790729] lock: 0xffffffc0d1ca9bb0, .magic: 00000000, .owner: <none>/-1, .owner_cpu: 0
+[ 0.790742] CPU: 0 PID: 1 Comm: swapper/0 Not tainted 4.10.0-rc2+ #913
+[ 0.790747] Hardware name: Google Kevin (DT)
+[ 0.790750] Call trace:
+[ 0.790768] [<ffffff900808ae88>] dump_backtrace+0x0/0x2bc
+[ 0.790780] [<ffffff900808b164>] show_stack+0x20/0x28
+[ 0.790794] [<ffffff9008460ee0>] dump_stack+0xa4/0xcc
+[ 0.790809] [<ffffff9008113cfc>] spin_dump+0xe0/0xf0
+[ 0.790821] [<ffffff9008113d3c>] spin_bug+0x30/0x3c
+[ 0.790834] [<ffffff9008113e28>] do_raw_spin_lock+0x50/0x1b8
+[ 0.790846] [<ffffff9008a2d2ec>] _raw_spin_lock_irqsave+0x54/0x6c
+[ 0.790862] [<ffffff90083ac3b4>] buffer_size_add+0x48/0xcc
+[ 0.790875] [<ffffff90083acb34>] persistent_ram_write+0x60/0x11c
+[ 0.790888] [<ffffff90083aab1c>] ramoops_pstore_write_buf+0xd4/0x2a4
+[ 0.790900] [<ffffff90083a9d3c>] pstore_console_write+0xf0/0x134
+[ 0.790912] [<ffffff900811c304>] console_unlock+0x48c/0x5e8
+[ 0.790923] [<ffffff900811da18>] register_console+0x3b0/0x4d4
+[ 0.790935] [<ffffff90083aa7d0>] pstore_register+0x1a8/0x234
+[ 0.790947] [<ffffff90083ac250>] ramoops_probe+0x6b8/0x7d4
+[ 0.790961] [<ffffff90085ca548>] platform_drv_probe+0x7c/0xd0
+[ 0.790972] [<ffffff90085c76ac>] driver_probe_device+0x1b4/0x3bc
+[ 0.790982] [<ffffff90085c7ac8>] __device_attach_driver+0xc8/0xf4
+[ 0.790996] [<ffffff90085c4bfc>] bus_for_each_drv+0xb4/0xe4
+[ 0.791006] [<ffffff90085c7414>] __device_attach+0xd0/0x158
+[ 0.791016] [<ffffff90085c7b18>] device_initial_probe+0x24/0x30
+[ 0.791026] [<ffffff90085c648c>] bus_probe_device+0x50/0xe4
+[ 0.791038] [<ffffff90085c35b8>] device_add+0x3a4/0x76c
+[ 0.791051] [<ffffff90087d0e84>] of_device_add+0x74/0x84
+[ 0.791062] [<ffffff90087d19b8>] of_platform_device_create_pdata+0xc0/0x100
+[ 0.791073] [<ffffff90087d1a2c>] of_platform_device_create+0x34/0x40
+[ 0.791086] [<ffffff900903c910>] of_platform_default_populate_init+0x58/0x78
+[ 0.791097] [<ffffff90080831fc>] do_one_initcall+0x88/0x160
+[ 0.791109] [<ffffff90090010ac>] kernel_init_freeable+0x264/0x31c
+[ 0.791123] [<ffffff9008a25bd0>] kernel_init+0x18/0x11c
+[ 0.791133] [<ffffff9008082ec0>] ret_from_fork+0x10/0x50
+[ 0.793717] console [pstore-1] enabled
+[ 0.797845] pstore: Registered ramoops as persistent store backend
+[ 0.804647] ramoops: attached 0x100000@0xf7edc000, ecc: 0/0
+
+Fixes: 663deb47880f ("pstore: Allow prz to control need for locking")
+Fixes: 109704492ef6 ("pstore: Make spinlock per zone instead of global")
+Reported-by: Brian Norris <briannorris@chromium.org>
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram_core.c | 12 +++++++-----
+ 1 file changed, 7 insertions(+), 5 deletions(-)
+
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -424,8 +424,7 @@ static int persistent_ram_buffer_map(phy
+ }
+
+ static int persistent_ram_post_init(struct persistent_ram_zone *prz, u32 sig,
+- struct persistent_ram_ecc_info *ecc_info,
+- unsigned long flags)
++ struct persistent_ram_ecc_info *ecc_info)
+ {
+ int ret;
+
+@@ -451,10 +450,9 @@ static int persistent_ram_post_init(stru
+ prz->buffer->sig);
+ }
+
++ /* Rewind missing or invalid memory area. */
+ prz->buffer->sig = sig;
+ persistent_ram_zap(prz);
+- prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock);
+- prz->flags = flags;
+
+ return 0;
+ }
+@@ -490,11 +488,15 @@ struct persistent_ram_zone *persistent_r
+ goto err;
+ }
+
++ /* Initialize general buffer state. */
++ prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock);
++ prz->flags = flags;
++
+ ret = persistent_ram_buffer_map(start, size, prz, memtype);
+ if (ret)
+ goto err;
+
+- ret = persistent_ram_post_init(prz, sig, ecc_info, flags);
++ ret = persistent_ram_post_init(prz, sig, ecc_info);
+ if (ret)
+ goto err;
+
--- /dev/null
+From e9a330c4289f2ba1ca4bf98c2b430ab165a8931b Mon Sep 17 00:00:00 2001
+From: Kees Cook <keescook@chromium.org>
+Date: Sun, 5 Mar 2017 22:08:58 -0800
+Subject: pstore: Use dynamic spinlock initializer
+
+From: Kees Cook <keescook@chromium.org>
+
+commit e9a330c4289f2ba1ca4bf98c2b430ab165a8931b upstream.
+
+The per-prz spinlock should be using the dynamic initializer so that
+lockdep can correctly track it. Without this, under lockdep, we get a
+warning at boot that the lock is in non-static memory.
+
+Fixes: 109704492ef6 ("pstore: Make spinlock per zone instead of global")
+Fixes: 76d5692a5803 ("pstore: Correctly initialize spinlock and flags")
+Signed-off-by: Kees Cook <keescook@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/pstore/ram_core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/pstore/ram_core.c
++++ b/fs/pstore/ram_core.c
+@@ -489,7 +489,7 @@ struct persistent_ram_zone *persistent_r
+ }
+
+ /* Initialize general buffer state. */
+- prz->buffer_lock = __RAW_SPIN_LOCK_UNLOCKED(buffer_lock);
++ raw_spin_lock_init(&prz->buffer_lock);
+ prz->flags = flags;
+
+ ret = persistent_ram_buffer_map(start, size, prz, memtype);
--- /dev/null
+From 3d89e5478bf550a50c99e93adf659369798263b0 Mon Sep 17 00:00:00 2001
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+Date: Mon, 13 Jun 2016 18:32:45 +0800
+Subject: sched/cputime: Fix prev steal time accouting during CPU hotplug
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Wanpeng Li <wanpeng.li@hotmail.com>
+
+commit 3d89e5478bf550a50c99e93adf659369798263b0 upstream.
+
+Commit:
+
+ e9532e69b8d1 ("sched/cputime: Fix steal time accounting vs. CPU hotplug")
+
+... set rq->prev_* to 0 after a CPU hotplug comes back, in order to
+fix the case where (after CPU hotplug) steal time is smaller than
+rq->prev_steal_time.
+
+However, this should never happen. Steal time was only smaller because of the
+KVM-specific bug fixed by the previous patch. Worse, the previous patch
+triggers a bug on CPU hot-unplug/plug operation: because
+rq->prev_steal_time is cleared, all of the CPU's past steal time will be
+accounted again on hot-plug.
+
+Since the root cause has been fixed, we can just revert commit e9532e69b8d1.
+
+Signed-off-by: Wanpeng Li <wanpeng.li@hotmail.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Paolo Bonzini <pbonzini@redhat.com>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Mike Galbraith <efault@gmx.de>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Radim Krčmář <rkrcmar@redhat.com>
+Cc: Rik van Riel <riel@redhat.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Fixes: 'commit e9532e69b8d1 ("sched/cputime: Fix steal time accounting vs. CPU hotplug")'
+Link: http://lkml.kernel.org/r/1465813966-3116-3-git-send-email-wanpeng.li@hotmail.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Andres Oportus <andresoportus@google.com>
+Acked-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/core.c | 1 -
+ kernel/sched/sched.h | 13 -------------
+ 2 files changed, 14 deletions(-)
+
+--- a/kernel/sched/core.c
++++ b/kernel/sched/core.c
+@@ -5553,7 +5553,6 @@ migration_call(struct notifier_block *nf
+
+ case CPU_UP_PREPARE:
+ rq->calc_load_update = calc_load_update;
+- account_reset_rq(rq);
+ break;
+
+ case CPU_ONLINE:
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -1770,16 +1770,3 @@ static inline u64 irq_time_read(int cpu)
+ }
+ #endif /* CONFIG_64BIT */
+ #endif /* CONFIG_IRQ_TIME_ACCOUNTING */
+-
+-static inline void account_reset_rq(struct rq *rq)
+-{
+-#ifdef CONFIG_IRQ_TIME_ACCOUNTING
+- rq->prev_irq_time = 0;
+-#endif
+-#ifdef CONFIG_PARAVIRT
+- rq->prev_steal_time = 0;
+-#endif
+-#ifdef CONFIG_PARAVIRT_TIME_ACCOUNTING
+- rq->prev_steal_time_rq = 0;
+-#endif
+-}
make-file-credentials-available-to-the-seqfile-interfaces.patch
proc-iomem-only-expose-physical-resource-addresses-to-privileged-users.patch
vlan-propagate-mac-address-to-vlans.patch
+pstore-allow-prz-to-control-need-for-locking.patch
+pstore-correctly-initialize-spinlock-and-flags.patch
+pstore-use-dynamic-spinlock-initializer.patch
+staging-sm750fb-avoid-conflicting-vesafb.patch
+net-skb_needs_check-accepts-checksum_none-for-tx.patch
+sched-cputime-fix-prev-steal-time-accouting-during-cpu-hotplug.patch
--- /dev/null
+From 740c433ec35187b45abe08bb6c45a321a791be8e Mon Sep 17 00:00:00 2001
+From: Teddy Wang <teddy.wang@siliconmotion.com>
+Date: Fri, 30 Jun 2017 21:57:43 +0100
+Subject: staging: sm750fb: avoid conflicting vesafb
+
+From: Teddy Wang <teddy.wang@siliconmotion.com>
+
+commit 740c433ec35187b45abe08bb6c45a321a791be8e upstream.
+
+If vesafb is enabled in the config then /dev/fb0 is created by vesa
+and this sm750 driver gets fb1, fb2. But we need to be fb0 and fb1 to
+effectively work with xorg.
+So if it has been alloted fb1, then try to remove the other fb0.
+
+In the previous send, why #ifdef is used was asked.
+https://lkml.org/lkml/2017/6/25/57
+
+Answered at: https://lkml.org/lkml/2017/6/25/69
+Also pasting here for reference.
+
+'Did a quick research into "why".
+The patch d8801e4df91e ("x86/PCI: Set IORESOURCE_ROM_SHADOW only for the
+default VGA device") has started setting IORESOURCE_ROM_SHADOW in flags
+for a default VGA device and that is being done only for x86.
+And so, we will need that #ifdef to check IORESOURCE_ROM_SHADOW as that
+needs to be checked only for a x86 and not for other arch.'
+
+Cc: <stable@vger.kernel.org> # v4.4+
+Signed-off-by: Teddy Wang <teddy.wang@siliconmotion.com>
+Signed-off-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/sm750fb/sm750.c | 24 ++++++++++++++++++++++++
+ 1 file changed, 24 insertions(+)
+
+--- a/drivers/staging/sm750fb/sm750.c
++++ b/drivers/staging/sm750fb/sm750.c
+@@ -1002,6 +1002,26 @@ NO_PARAM:
+ }
+ }
+
++static int lynxfb_kick_out_firmware_fb(struct pci_dev *pdev)
++{
++ struct apertures_struct *ap;
++ bool primary = false;
++
++ ap = alloc_apertures(1);
++ if (!ap)
++ return -ENOMEM;
++
++ ap->ranges[0].base = pci_resource_start(pdev, 0);
++ ap->ranges[0].size = pci_resource_len(pdev, 0);
++#ifdef CONFIG_X86
++ primary = pdev->resource[PCI_ROM_RESOURCE].flags &
++ IORESOURCE_ROM_SHADOW;
++#endif
++ remove_conflicting_framebuffers(ap, "sm750_fb1", primary);
++ kfree(ap);
++ return 0;
++}
++
+ static int lynxfb_pci_probe(struct pci_dev *pdev,
+ const struct pci_device_id *ent)
+ {
+@@ -1009,6 +1029,10 @@ static int lynxfb_pci_probe(struct pci_d
+ struct sm750_dev *sm750_dev = NULL;
+ int fbidx;
+
++ err = lynxfb_kick_out_firmware_fb(pdev);
++ if (err)
++ return err;
++
+ /* enable device */
+ if (pci_enable_device(pdev)) {
+ pr_err("can not enable device.\n");