mirror of
git://git.yoctoproject.org/linux-yocto.git
synced 2025-07-05 13:25:20 +02:00
Merge tag 'v5.2.60' into v5.2/standard/base
This is the 5.2.60 stable release
This commit is contained in:
commit
ec6bf56429
|
@ -21,7 +21,7 @@ controller state. The mux controller state is described in
|
|||
|
||||
Example:
|
||||
mux: mux-controller {
|
||||
compatible = "mux-gpio";
|
||||
compatible = "gpio-mux";
|
||||
#mux-control-cells = <0>;
|
||||
|
||||
mux-gpios = <&pioA 0 GPIO_ACTIVE_HIGH>,
|
||||
|
|
2
Makefile
2
Makefile
|
@ -1,7 +1,7 @@
|
|||
# SPDX-License-Identifier: GPL-2.0
|
||||
VERSION = 5
|
||||
PATCHLEVEL = 2
|
||||
SUBLEVEL = 59
|
||||
SUBLEVEL = 60
|
||||
EXTRAVERSION =
|
||||
NAME = Bobtail Squid
|
||||
|
||||
|
|
|
@ -154,7 +154,7 @@ armv8pmu_events_sysfs_show(struct device *dev,
|
|||
|
||||
pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
|
||||
|
||||
return sprintf(page, "event=0x%03llx\n", pmu_attr->id);
|
||||
return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
|
||||
}
|
||||
|
||||
#define ARMV8_EVENT_ATTR_RESOLVE(m) #m
|
||||
|
@ -303,10 +303,13 @@ armv8pmu_event_attr_is_visible(struct kobject *kobj,
|
|||
test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap))
|
||||
return attr->mode;
|
||||
|
||||
pmu_attr->id -= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
|
||||
if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
|
||||
test_bit(pmu_attr->id, cpu_pmu->pmceid_ext_bitmap))
|
||||
if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) {
|
||||
u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE;
|
||||
|
||||
if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS &&
|
||||
test_bit(id, cpu_pmu->pmceid_ext_bitmap))
|
||||
return attr->mode;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ static int __init topology_init(void)
|
|||
for_each_present_cpu(i) {
|
||||
struct cpu *c = &per_cpu(cpu_devices, i);
|
||||
|
||||
c->hotpluggable = 1;
|
||||
c->hotpluggable = !!i;
|
||||
ret = register_cpu(c, i);
|
||||
if (ret)
|
||||
printk(KERN_WARNING "topology_init: register_cpu %d "
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include <linux/export.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/debug.h>
|
||||
#include <linux/sched/task_stack.h>
|
||||
#include <linux/stacktrace.h>
|
||||
|
||||
#include <asm/processor.h>
|
||||
|
@ -68,12 +69,25 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
|
|||
{
|
||||
unsigned long *sp = NULL;
|
||||
|
||||
if (!try_get_task_stack(tsk))
|
||||
return;
|
||||
|
||||
if (tsk == current)
|
||||
sp = (unsigned long *) &sp;
|
||||
else
|
||||
sp = (unsigned long *) KSTK_ESP(tsk);
|
||||
else {
|
||||
unsigned long ksp;
|
||||
|
||||
/* Locate stack from kernel context */
|
||||
ksp = task_thread_info(tsk)->ksp;
|
||||
ksp += STACK_FRAME_OVERHEAD; /* redzone */
|
||||
ksp += sizeof(struct pt_regs);
|
||||
|
||||
sp = (unsigned long *) ksp;
|
||||
}
|
||||
|
||||
unwind_stack(trace, sp, save_stack_address_nosched);
|
||||
|
||||
put_task_stack(tsk);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
|
||||
|
||||
|
|
|
@ -262,6 +262,9 @@ static bool bad_kernel_fault(struct pt_regs *regs, unsigned long error_code,
|
|||
return false;
|
||||
}
|
||||
|
||||
// This comes from 64-bit struct rt_sigframe + __SIGNAL_FRAMESIZE
|
||||
#define SIGFRAME_MAX_SIZE (4096 + 128)
|
||||
|
||||
static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
|
||||
struct vm_area_struct *vma, unsigned int flags,
|
||||
bool *must_retry)
|
||||
|
@ -269,7 +272,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
|
|||
/*
|
||||
* N.B. The POWER/Open ABI allows programs to access up to
|
||||
* 288 bytes below the stack pointer.
|
||||
* The kernel signal delivery code writes up to about 1.5kB
|
||||
* The kernel signal delivery code writes a bit over 4KB
|
||||
* below the stack pointer (r1) before decrementing it.
|
||||
* The exec code can write slightly over 640kB to the stack
|
||||
* before setting the user r1. Thus we allow the stack to
|
||||
|
@ -294,7 +297,7 @@ static bool bad_stack_expansion(struct pt_regs *regs, unsigned long address,
|
|||
* between the last mapped region and the stack will
|
||||
* expand the stack rather than segfaulting.
|
||||
*/
|
||||
if (address + 2048 >= uregs->gpr[1])
|
||||
if (address + SIGFRAME_MAX_SIZE >= uregs->gpr[1])
|
||||
return false;
|
||||
|
||||
if ((flags & FAULT_FLAG_WRITE) && (flags & FAULT_FLAG_USER) &&
|
||||
|
|
|
@ -27,7 +27,7 @@ static bool rtas_hp_event;
|
|||
unsigned long pseries_memory_block_size(void)
|
||||
{
|
||||
struct device_node *np;
|
||||
unsigned int memblock_size = MIN_MEMORY_BLOCK_SIZE;
|
||||
u64 memblock_size = MIN_MEMORY_BLOCK_SIZE;
|
||||
struct resource r;
|
||||
|
||||
np = of_find_node_by_path("/ibm,dynamic-reconfiguration-memory");
|
||||
|
|
|
@ -82,6 +82,9 @@ device_initcall(landisk_devices_setup);
|
|||
|
||||
static void __init landisk_setup(char **cmdline_p)
|
||||
{
|
||||
/* I/O port identity mapping */
|
||||
__set_io_port_base(0);
|
||||
|
||||
/* LED ON */
|
||||
__raw_writeb(__raw_readb(PA_LED) | 0x03, PA_LED);
|
||||
|
||||
|
|
|
@ -55,6 +55,10 @@ struct thread_info {
|
|||
mm_segment_t addr_limit; /* thread address space */
|
||||
|
||||
unsigned long cpenable;
|
||||
#if XCHAL_HAVE_EXCLUSIVE
|
||||
/* result of the most recent exclusive store */
|
||||
unsigned long atomctl8;
|
||||
#endif
|
||||
|
||||
/* Allocate storage for extra user states and coprocessor states. */
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
|
|
|
@ -93,6 +93,9 @@ int main(void)
|
|||
DEFINE(THREAD_RA, offsetof (struct task_struct, thread.ra));
|
||||
DEFINE(THREAD_SP, offsetof (struct task_struct, thread.sp));
|
||||
DEFINE(THREAD_CPENABLE, offsetof (struct thread_info, cpenable));
|
||||
#if XCHAL_HAVE_EXCLUSIVE
|
||||
DEFINE(THREAD_ATOMCTL8, offsetof (struct thread_info, atomctl8));
|
||||
#endif
|
||||
#if XTENSA_HAVE_COPROCESSORS
|
||||
DEFINE(THREAD_XTREGS_CP0, offsetof(struct thread_info, xtregs_cp.cp0));
|
||||
DEFINE(THREAD_XTREGS_CP1, offsetof(struct thread_info, xtregs_cp.cp1));
|
||||
|
|
|
@ -374,6 +374,11 @@ common_exception:
|
|||
s32i a2, a1, PT_LCOUNT
|
||||
#endif
|
||||
|
||||
#if XCHAL_HAVE_EXCLUSIVE
|
||||
/* Clear exclusive access monitor set by interrupted code */
|
||||
clrex
|
||||
#endif
|
||||
|
||||
/* It is now save to restore the EXC_TABLE_FIXUP variable. */
|
||||
|
||||
rsr a2, exccause
|
||||
|
@ -1989,6 +1994,12 @@ ENTRY(_switch_to)
|
|||
s32i a3, a4, THREAD_CPENABLE
|
||||
#endif
|
||||
|
||||
#if XCHAL_HAVE_EXCLUSIVE
|
||||
l32i a3, a5, THREAD_ATOMCTL8
|
||||
getex a3
|
||||
s32i a3, a4, THREAD_ATOMCTL8
|
||||
#endif
|
||||
|
||||
/* Flush register file. */
|
||||
|
||||
spill_registers_kernel
|
||||
|
|
|
@ -401,7 +401,7 @@ static struct pmu xtensa_pmu = {
|
|||
.read = xtensa_pmu_read,
|
||||
};
|
||||
|
||||
static int xtensa_pmu_setup(int cpu)
|
||||
static int xtensa_pmu_setup(unsigned int cpu)
|
||||
{
|
||||
unsigned i;
|
||||
|
||||
|
|
|
@ -835,8 +835,10 @@ bool __bio_try_merge_page(struct bio *bio, struct page *page,
|
|||
struct bio_vec *bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
|
||||
|
||||
if (page_is_mergeable(bv, page, len, off, same_page)) {
|
||||
if (bio->bi_iter.bi_size > UINT_MAX - len)
|
||||
if (bio->bi_iter.bi_size > UINT_MAX - len) {
|
||||
*same_page = false;
|
||||
return false;
|
||||
}
|
||||
bv->bv_len += len;
|
||||
bio->bi_iter.bi_size += len;
|
||||
return true;
|
||||
|
|
|
@ -16,6 +16,7 @@
|
|||
#include <linux/module.h>
|
||||
#include <linux/net.h>
|
||||
#include <linux/rwsem.h>
|
||||
#include <linux/sched.h>
|
||||
#include <linux/sched/signal.h>
|
||||
#include <linux/security.h>
|
||||
|
||||
|
@ -635,6 +636,7 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
|
|||
|
||||
if (!ctx->used)
|
||||
ctx->merge = 0;
|
||||
ctx->init = ctx->more;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(af_alg_pull_tsgl);
|
||||
|
||||
|
@ -734,9 +736,10 @@ EXPORT_SYMBOL_GPL(af_alg_wmem_wakeup);
|
|||
*
|
||||
* @sk socket of connection to user space
|
||||
* @flags If MSG_DONTWAIT is set, then only report if function would sleep
|
||||
* @min Set to minimum request size if partial requests are allowed.
|
||||
* @return 0 when writable memory is available, < 0 upon error
|
||||
*/
|
||||
int af_alg_wait_for_data(struct sock *sk, unsigned flags)
|
||||
int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min)
|
||||
{
|
||||
DEFINE_WAIT_FUNC(wait, woken_wake_function);
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
|
@ -754,7 +757,9 @@ int af_alg_wait_for_data(struct sock *sk, unsigned flags)
|
|||
if (signal_pending(current))
|
||||
break;
|
||||
timeout = MAX_SCHEDULE_TIMEOUT;
|
||||
if (sk_wait_event(sk, &timeout, (ctx->used || !ctx->more),
|
||||
if (sk_wait_event(sk, &timeout,
|
||||
ctx->init && (!ctx->more ||
|
||||
(min && ctx->used >= min)),
|
||||
&wait)) {
|
||||
err = 0;
|
||||
break;
|
||||
|
@ -843,11 +848,18 @@ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
|
|||
}
|
||||
|
||||
lock_sock(sk);
|
||||
if (!ctx->more && ctx->used) {
|
||||
if (ctx->init && !ctx->more) {
|
||||
if (ctx->used) {
|
||||
err = -EINVAL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
pr_info_once(
|
||||
"%s sent an empty control message without MSG_MORE.\n",
|
||||
current->comm);
|
||||
}
|
||||
ctx->init = true;
|
||||
|
||||
if (init) {
|
||||
ctx->enc = enc;
|
||||
if (con.iv)
|
||||
|
|
|
@ -106,8 +106,8 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
|
|||
size_t usedpages = 0; /* [in] RX bufs to be used from user */
|
||||
size_t processed = 0; /* [in] TX bufs to be consumed */
|
||||
|
||||
if (!ctx->used) {
|
||||
err = af_alg_wait_for_data(sk, flags);
|
||||
if (!ctx->init || ctx->more) {
|
||||
err = af_alg_wait_for_data(sk, flags, 0);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
@ -558,12 +558,6 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
|
|||
|
||||
INIT_LIST_HEAD(&ctx->tsgl_list);
|
||||
ctx->len = len;
|
||||
ctx->used = 0;
|
||||
atomic_set(&ctx->rcvused, 0);
|
||||
ctx->more = 0;
|
||||
ctx->merge = 0;
|
||||
ctx->enc = 0;
|
||||
ctx->aead_assoclen = 0;
|
||||
crypto_init_wait(&ctx->wait);
|
||||
|
||||
ask->private = ctx;
|
||||
|
|
|
@ -61,8 +61,8 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
|||
int err = 0;
|
||||
size_t len = 0;
|
||||
|
||||
if (!ctx->used) {
|
||||
err = af_alg_wait_for_data(sk, flags);
|
||||
if (!ctx->init || (ctx->more && ctx->used < bs)) {
|
||||
err = af_alg_wait_for_data(sk, flags, bs);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
@ -333,6 +333,7 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
|
|||
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
|
||||
if (!ctx)
|
||||
return -ENOMEM;
|
||||
memset(ctx, 0, len);
|
||||
|
||||
ctx->iv = sock_kmalloc(sk, crypto_skcipher_ivsize(tfm),
|
||||
GFP_KERNEL);
|
||||
|
@ -340,16 +341,10 @@ static int skcipher_accept_parent_nokey(void *private, struct sock *sk)
|
|||
sock_kfree_s(sk, ctx, len);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
memset(ctx->iv, 0, crypto_skcipher_ivsize(tfm));
|
||||
|
||||
INIT_LIST_HEAD(&ctx->tsgl_list);
|
||||
ctx->len = len;
|
||||
ctx->used = 0;
|
||||
atomic_set(&ctx->rcvused, 0);
|
||||
ctx->more = 0;
|
||||
ctx->merge = 0;
|
||||
ctx->enc = 0;
|
||||
crypto_init_wait(&ctx->wait);
|
||||
|
||||
ask->private = ctx;
|
||||
|
|
|
@ -819,7 +819,9 @@ static int __device_attach(struct device *dev, bool allow_async)
|
|||
int ret = 0;
|
||||
|
||||
device_lock(dev);
|
||||
if (dev->driver) {
|
||||
if (dev->p->dead) {
|
||||
goto out_unlock;
|
||||
} else if (dev->driver) {
|
||||
if (device_is_bound(dev)) {
|
||||
ret = 1;
|
||||
goto out_unlock;
|
||||
|
|
|
@ -4305,6 +4305,9 @@ static ssize_t rbd_config_info_show(struct device *dev,
|
|||
{
|
||||
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
return sprintf(buf, "%s\n", rbd_dev->config_info);
|
||||
}
|
||||
|
||||
|
@ -4416,6 +4419,9 @@ static ssize_t rbd_image_refresh(struct device *dev,
|
|||
struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
|
||||
int ret;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
ret = rbd_dev_refresh(rbd_dev);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
@ -6012,6 +6018,9 @@ static ssize_t do_rbd_add(struct bus_type *bus,
|
|||
struct rbd_client *rbdc;
|
||||
int rc;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
if (!try_module_get(THIS_MODULE))
|
||||
return -ENODEV;
|
||||
|
||||
|
@ -6167,6 +6176,9 @@ static ssize_t do_rbd_remove(struct bus_type *bus,
|
|||
bool force = false;
|
||||
int ret;
|
||||
|
||||
if (!capable(CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
|
||||
dev_id = -1;
|
||||
opt_buf[0] = '\0';
|
||||
sscanf(buf, "%d %5s", &dev_id, opt_buf);
|
||||
|
|
|
@ -183,7 +183,7 @@ static OWL_GATE(timer_clk, "timer_clk", "hosc", CMU_DEVCLKEN1, 27, 0, 0);
|
|||
static OWL_GATE(hdmi_clk, "hdmi_clk", "hosc", CMU_DEVCLKEN1, 3, 0, 0);
|
||||
|
||||
/* divider clocks */
|
||||
static OWL_DIVIDER(h_clk, "h_clk", "ahbprevdiv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
|
||||
static OWL_DIVIDER(h_clk, "h_clk", "ahbprediv_clk", CMU_BUSCLK1, 12, 2, NULL, 0, 0);
|
||||
static OWL_DIVIDER(rmii_ref_clk, "rmii_ref_clk", "ethernet_pll_clk", CMU_ETHERNETPLL, 1, 1, rmii_ref_div_table, 0, 0);
|
||||
|
||||
/* factor clocks */
|
||||
|
|
|
@ -1715,6 +1715,9 @@ static struct clk_branch gcc_mss_cfg_ahb_clk = {
|
|||
|
||||
static struct clk_branch gcc_mss_mnoc_bimc_axi_clk = {
|
||||
.halt_reg = 0x8a004,
|
||||
.halt_check = BRANCH_HALT,
|
||||
.hwcg_reg = 0x8a004,
|
||||
.hwcg_bit = 1,
|
||||
.clkr = {
|
||||
.enable_reg = 0x8a004,
|
||||
.enable_mask = BIT(0),
|
||||
|
|
|
@ -135,7 +135,7 @@ static void __init atlas6_clk_init(struct device_node *np)
|
|||
|
||||
for (i = pll1; i < maxclk; i++) {
|
||||
atlas6_clks[i] = clk_register(NULL, atlas6_clk_hw_array[i]);
|
||||
BUG_ON(!atlas6_clks[i]);
|
||||
BUG_ON(IS_ERR(atlas6_clks[i]));
|
||||
}
|
||||
clk_register_clkdev(atlas6_clks[cpu], NULL, "cpu");
|
||||
clk_register_clkdev(atlas6_clks[io], NULL, "io");
|
||||
|
|
|
@ -2724,7 +2724,10 @@ static int ci_initialize_mc_reg_table(struct pp_hwmgr *hwmgr)
|
|||
|
||||
static bool ci_is_dpm_running(struct pp_hwmgr *hwmgr)
|
||||
{
|
||||
return ci_is_smc_ram_running(hwmgr);
|
||||
return (1 == PHM_READ_INDIRECT_FIELD(hwmgr->device,
|
||||
CGS_IND_REG__SMC, FEATURE_STATUS,
|
||||
VOLTAGE_CONTROLLER_ON))
|
||||
? true : false;
|
||||
}
|
||||
|
||||
static int ci_smu_init(struct pp_hwmgr *hwmgr)
|
||||
|
|
|
@ -3241,11 +3241,11 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
|
|||
{
|
||||
int ret;
|
||||
|
||||
port = drm_dp_mst_topology_get_port_validated(mgr, port);
|
||||
if (!port)
|
||||
if (slots < 0)
|
||||
return false;
|
||||
|
||||
if (slots < 0)
|
||||
port = drm_dp_mst_topology_get_port_validated(mgr, port);
|
||||
if (!port)
|
||||
return false;
|
||||
|
||||
if (port->vcpi.vcpi > 0) {
|
||||
|
@ -3261,6 +3261,7 @@ bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
|
|||
if (ret) {
|
||||
DRM_DEBUG_KMS("failed to init vcpi slots=%d max=63 ret=%d\n",
|
||||
DIV_ROUND_UP(pbn, mgr->pbn_div), ret);
|
||||
drm_dp_mst_topology_put_port(port);
|
||||
goto out;
|
||||
}
|
||||
DRM_DEBUG_KMS("initing vcpi for pbn=%d slots=%d\n",
|
||||
|
|
|
@ -121,6 +121,12 @@ static const struct dmi_system_id orientation_data[] = {
|
|||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T101HA"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* Asus T103HAF */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
|
||||
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "T103HAF"),
|
||||
},
|
||||
.driver_data = (void *)&lcd800x1280_rightside_up,
|
||||
}, { /* GPD MicroPC (generic strings, also match on bios date) */
|
||||
.matches = {
|
||||
DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Default string"),
|
||||
|
|
|
@ -303,18 +303,19 @@ static void imx_ldb_encoder_disable(struct drm_encoder *encoder)
|
|||
{
|
||||
struct imx_ldb_channel *imx_ldb_ch = enc_to_imx_ldb_ch(encoder);
|
||||
struct imx_ldb *ldb = imx_ldb_ch->ldb;
|
||||
int dual = ldb->ldb_ctrl & LDB_SPLIT_MODE_EN;
|
||||
int mux, ret;
|
||||
|
||||
drm_panel_disable(imx_ldb_ch->panel);
|
||||
|
||||
if (imx_ldb_ch == &ldb->channel[0])
|
||||
if (imx_ldb_ch == &ldb->channel[0] || dual)
|
||||
ldb->ldb_ctrl &= ~LDB_CH0_MODE_EN_MASK;
|
||||
else if (imx_ldb_ch == &ldb->channel[1])
|
||||
if (imx_ldb_ch == &ldb->channel[1] || dual)
|
||||
ldb->ldb_ctrl &= ~LDB_CH1_MODE_EN_MASK;
|
||||
|
||||
regmap_write(ldb->regmap, IOMUXC_GPR2, ldb->ldb_ctrl);
|
||||
|
||||
if (ldb->ldb_ctrl & LDB_SPLIT_MODE_EN) {
|
||||
if (dual) {
|
||||
clk_disable_unprepare(ldb->clk[0]);
|
||||
clk_disable_unprepare(ldb->clk[1]);
|
||||
}
|
||||
|
|
|
@ -2657,7 +2657,7 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
|
|||
++i;
|
||||
}
|
||||
|
||||
if (i != unit) {
|
||||
if (&con->head == &dev_priv->dev->mode_config.connector_list) {
|
||||
DRM_ERROR("Could not find initial display unit.\n");
|
||||
ret = -EINVAL;
|
||||
goto out_unlock;
|
||||
|
@ -2681,13 +2681,13 @@ int vmw_kms_fbdev_init_data(struct vmw_private *dev_priv,
|
|||
break;
|
||||
}
|
||||
|
||||
if (mode->type & DRM_MODE_TYPE_PREFERRED)
|
||||
*p_mode = mode;
|
||||
else {
|
||||
if (&mode->head == &con->modes) {
|
||||
WARN_ONCE(true, "Could not find initial preferred mode.\n");
|
||||
*p_mode = list_first_entry(&con->modes,
|
||||
struct drm_display_mode,
|
||||
head);
|
||||
} else {
|
||||
*p_mode = mode;
|
||||
}
|
||||
|
||||
out_unlock:
|
||||
|
|
|
@ -79,7 +79,7 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
|
|||
struct vmw_legacy_display_unit *entry;
|
||||
struct drm_framebuffer *fb = NULL;
|
||||
struct drm_crtc *crtc = NULL;
|
||||
int i = 0;
|
||||
int i;
|
||||
|
||||
/* If there is no display topology the host just assumes
|
||||
* that the guest will set the same layout as the host.
|
||||
|
@ -90,12 +90,11 @@ static int vmw_ldu_commit_list(struct vmw_private *dev_priv)
|
|||
crtc = &entry->base.crtc;
|
||||
w = max(w, crtc->x + crtc->mode.hdisplay);
|
||||
h = max(h, crtc->y + crtc->mode.vdisplay);
|
||||
i++;
|
||||
}
|
||||
|
||||
if (crtc == NULL)
|
||||
return 0;
|
||||
fb = entry->base.crtc.primary->state->fb;
|
||||
fb = crtc->primary->state->fb;
|
||||
|
||||
return vmw_kms_write_svga(dev_priv, w, h, fb->pitches[0],
|
||||
fb->format->cpp[0] * 8,
|
||||
|
|
|
@ -137,6 +137,17 @@ struct ipu_image_convert_ctx;
|
|||
struct ipu_image_convert_chan;
|
||||
struct ipu_image_convert_priv;
|
||||
|
||||
enum eof_irq_mask {
|
||||
EOF_IRQ_IN = BIT(0),
|
||||
EOF_IRQ_ROT_IN = BIT(1),
|
||||
EOF_IRQ_OUT = BIT(2),
|
||||
EOF_IRQ_ROT_OUT = BIT(3),
|
||||
};
|
||||
|
||||
#define EOF_IRQ_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT)
|
||||
#define EOF_IRQ_ROT_COMPLETE (EOF_IRQ_IN | EOF_IRQ_OUT | \
|
||||
EOF_IRQ_ROT_IN | EOF_IRQ_ROT_OUT)
|
||||
|
||||
struct ipu_image_convert_ctx {
|
||||
struct ipu_image_convert_chan *chan;
|
||||
|
||||
|
@ -172,6 +183,9 @@ struct ipu_image_convert_ctx {
|
|||
/* where to place converted tile in dest image */
|
||||
unsigned int out_tile_map[MAX_TILES];
|
||||
|
||||
/* mask of completed EOF irqs at every tile conversion */
|
||||
enum eof_irq_mask eof_mask;
|
||||
|
||||
struct list_head list;
|
||||
};
|
||||
|
||||
|
@ -188,6 +202,8 @@ struct ipu_image_convert_chan {
|
|||
struct ipuv3_channel *rotation_out_chan;
|
||||
|
||||
/* the IPU end-of-frame irqs */
|
||||
int in_eof_irq;
|
||||
int rot_in_eof_irq;
|
||||
int out_eof_irq;
|
||||
int rot_out_eof_irq;
|
||||
|
||||
|
@ -1361,6 +1377,9 @@ static int convert_start(struct ipu_image_convert_run *run, unsigned int tile)
|
|||
init_idmac_channel(ctx, chan->in_chan, s_image,
|
||||
IPU_ROTATE_NONE, false, tile);
|
||||
|
||||
/* clear EOF irq mask */
|
||||
ctx->eof_mask = 0;
|
||||
|
||||
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
|
||||
/* init the IC PP-->MEM IDMAC channel */
|
||||
init_idmac_channel(ctx, chan->out_chan, d_image,
|
||||
|
@ -1559,7 +1578,7 @@ static bool ic_settings_changed(struct ipu_image_convert_ctx *ctx)
|
|||
}
|
||||
|
||||
/* hold irqlock when calling */
|
||||
static irqreturn_t do_irq(struct ipu_image_convert_run *run)
|
||||
static irqreturn_t do_tile_complete(struct ipu_image_convert_run *run)
|
||||
{
|
||||
struct ipu_image_convert_ctx *ctx = run->ctx;
|
||||
struct ipu_image_convert_chan *chan = ctx->chan;
|
||||
|
@ -1644,6 +1663,7 @@ static irqreturn_t do_irq(struct ipu_image_convert_run *run)
|
|||
ctx->cur_buf_num ^= 1;
|
||||
}
|
||||
|
||||
ctx->eof_mask = 0; /* clear EOF irq mask for next tile */
|
||||
ctx->next_tile++;
|
||||
return IRQ_HANDLED;
|
||||
done:
|
||||
|
@ -1653,45 +1673,15 @@ done:
|
|||
return IRQ_WAKE_THREAD;
|
||||
}
|
||||
|
||||
static irqreturn_t norotate_irq(int irq, void *data)
|
||||
{
|
||||
struct ipu_image_convert_chan *chan = data;
|
||||
struct ipu_image_convert_ctx *ctx;
|
||||
struct ipu_image_convert_run *run;
|
||||
unsigned long flags;
|
||||
irqreturn_t ret;
|
||||
|
||||
spin_lock_irqsave(&chan->irqlock, flags);
|
||||
|
||||
/* get current run and its context */
|
||||
run = chan->current_run;
|
||||
if (!run) {
|
||||
ret = IRQ_NONE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ctx = run->ctx;
|
||||
|
||||
if (ipu_rot_mode_is_irt(ctx->rot_mode)) {
|
||||
/* this is a rotation operation, just ignore */
|
||||
spin_unlock_irqrestore(&chan->irqlock, flags);
|
||||
return IRQ_HANDLED;
|
||||
}
|
||||
|
||||
ret = do_irq(run);
|
||||
out:
|
||||
spin_unlock_irqrestore(&chan->irqlock, flags);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static irqreturn_t rotate_irq(int irq, void *data)
|
||||
static irqreturn_t eof_irq(int irq, void *data)
|
||||
{
|
||||
struct ipu_image_convert_chan *chan = data;
|
||||
struct ipu_image_convert_priv *priv = chan->priv;
|
||||
struct ipu_image_convert_ctx *ctx;
|
||||
struct ipu_image_convert_run *run;
|
||||
irqreturn_t ret = IRQ_HANDLED;
|
||||
bool tile_complete = false;
|
||||
unsigned long flags;
|
||||
irqreturn_t ret;
|
||||
|
||||
spin_lock_irqsave(&chan->irqlock, flags);
|
||||
|
||||
|
@ -1704,14 +1694,33 @@ static irqreturn_t rotate_irq(int irq, void *data)
|
|||
|
||||
ctx = run->ctx;
|
||||
|
||||
if (irq == chan->in_eof_irq) {
|
||||
ctx->eof_mask |= EOF_IRQ_IN;
|
||||
} else if (irq == chan->out_eof_irq) {
|
||||
ctx->eof_mask |= EOF_IRQ_OUT;
|
||||
} else if (irq == chan->rot_in_eof_irq ||
|
||||
irq == chan->rot_out_eof_irq) {
|
||||
if (!ipu_rot_mode_is_irt(ctx->rot_mode)) {
|
||||
/* this was NOT a rotation operation, shouldn't happen */
|
||||
dev_err(priv->ipu->dev, "Unexpected rotation interrupt\n");
|
||||
spin_unlock_irqrestore(&chan->irqlock, flags);
|
||||
return IRQ_HANDLED;
|
||||
/* this was NOT a rotation op, shouldn't happen */
|
||||
dev_err(priv->ipu->dev,
|
||||
"Unexpected rotation interrupt\n");
|
||||
goto out;
|
||||
}
|
||||
ctx->eof_mask |= (irq == chan->rot_in_eof_irq) ?
|
||||
EOF_IRQ_ROT_IN : EOF_IRQ_ROT_OUT;
|
||||
} else {
|
||||
dev_err(priv->ipu->dev, "Received unknown irq %d\n", irq);
|
||||
ret = IRQ_NONE;
|
||||
goto out;
|
||||
}
|
||||
|
||||
ret = do_irq(run);
|
||||
if (ipu_rot_mode_is_irt(ctx->rot_mode))
|
||||
tile_complete = (ctx->eof_mask == EOF_IRQ_ROT_COMPLETE);
|
||||
else
|
||||
tile_complete = (ctx->eof_mask == EOF_IRQ_COMPLETE);
|
||||
|
||||
if (tile_complete)
|
||||
ret = do_tile_complete(run);
|
||||
out:
|
||||
spin_unlock_irqrestore(&chan->irqlock, flags);
|
||||
return ret;
|
||||
|
@ -1745,6 +1754,10 @@ static void force_abort(struct ipu_image_convert_ctx *ctx)
|
|||
|
||||
static void release_ipu_resources(struct ipu_image_convert_chan *chan)
|
||||
{
|
||||
if (chan->in_eof_irq >= 0)
|
||||
free_irq(chan->in_eof_irq, chan);
|
||||
if (chan->rot_in_eof_irq >= 0)
|
||||
free_irq(chan->rot_in_eof_irq, chan);
|
||||
if (chan->out_eof_irq >= 0)
|
||||
free_irq(chan->out_eof_irq, chan);
|
||||
if (chan->rot_out_eof_irq >= 0)
|
||||
|
@ -1763,7 +1776,27 @@ static void release_ipu_resources(struct ipu_image_convert_chan *chan)
|
|||
|
||||
chan->in_chan = chan->out_chan = chan->rotation_in_chan =
|
||||
chan->rotation_out_chan = NULL;
|
||||
chan->out_eof_irq = chan->rot_out_eof_irq = -1;
|
||||
chan->in_eof_irq = -1;
|
||||
chan->rot_in_eof_irq = -1;
|
||||
chan->out_eof_irq = -1;
|
||||
chan->rot_out_eof_irq = -1;
|
||||
}
|
||||
|
||||
static int get_eof_irq(struct ipu_image_convert_chan *chan,
|
||||
struct ipuv3_channel *channel)
|
||||
{
|
||||
struct ipu_image_convert_priv *priv = chan->priv;
|
||||
int ret, irq;
|
||||
|
||||
irq = ipu_idmac_channel_irq(priv->ipu, channel, IPU_IRQ_EOF);
|
||||
|
||||
ret = request_threaded_irq(irq, eof_irq, do_bh, 0, "ipu-ic", chan);
|
||||
if (ret < 0) {
|
||||
dev_err(priv->ipu->dev, "could not acquire irq %d\n", irq);
|
||||
return ret;
|
||||
}
|
||||
|
||||
return irq;
|
||||
}
|
||||
|
||||
static int get_ipu_resources(struct ipu_image_convert_chan *chan)
|
||||
|
@ -1799,31 +1832,33 @@ static int get_ipu_resources(struct ipu_image_convert_chan *chan)
|
|||
}
|
||||
|
||||
/* acquire the EOF interrupts */
|
||||
chan->out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
|
||||
chan->out_chan,
|
||||
IPU_IRQ_EOF);
|
||||
|
||||
ret = request_threaded_irq(chan->out_eof_irq, norotate_irq, do_bh,
|
||||
0, "ipu-ic", chan);
|
||||
ret = get_eof_irq(chan, chan->in_chan);
|
||||
if (ret < 0) {
|
||||
chan->in_eof_irq = -1;
|
||||
goto err;
|
||||
}
|
||||
chan->in_eof_irq = ret;
|
||||
|
||||
ret = get_eof_irq(chan, chan->rotation_in_chan);
|
||||
if (ret < 0) {
|
||||
chan->rot_in_eof_irq = -1;
|
||||
goto err;
|
||||
}
|
||||
chan->rot_in_eof_irq = ret;
|
||||
|
||||
ret = get_eof_irq(chan, chan->out_chan);
|
||||
if (ret < 0) {
|
||||
dev_err(priv->ipu->dev, "could not acquire irq %d\n",
|
||||
chan->out_eof_irq);
|
||||
chan->out_eof_irq = -1;
|
||||
goto err;
|
||||
}
|
||||
chan->out_eof_irq = ret;
|
||||
|
||||
chan->rot_out_eof_irq = ipu_idmac_channel_irq(priv->ipu,
|
||||
chan->rotation_out_chan,
|
||||
IPU_IRQ_EOF);
|
||||
|
||||
ret = request_threaded_irq(chan->rot_out_eof_irq, rotate_irq, do_bh,
|
||||
0, "ipu-ic", chan);
|
||||
ret = get_eof_irq(chan, chan->rotation_out_chan);
|
||||
if (ret < 0) {
|
||||
dev_err(priv->ipu->dev, "could not acquire irq %d\n",
|
||||
chan->rot_out_eof_irq);
|
||||
chan->rot_out_eof_irq = -1;
|
||||
goto err;
|
||||
}
|
||||
chan->rot_out_eof_irq = ret;
|
||||
|
||||
return 0;
|
||||
err:
|
||||
|
@ -2389,6 +2424,8 @@ int ipu_image_convert_init(struct ipu_soc *ipu, struct device *dev)
|
|||
chan->ic_task = i;
|
||||
chan->priv = priv;
|
||||
chan->dma_ch = &image_convert_dma_chan[i];
|
||||
chan->in_eof_irq = -1;
|
||||
chan->rot_in_eof_irq = -1;
|
||||
chan->out_eof_irq = -1;
|
||||
chan->rot_out_eof_irq = -1;
|
||||
|
||||
|
|
|
@ -580,13 +580,14 @@ static bool rcar_i2c_slave_irq(struct rcar_i2c_priv *priv)
|
|||
rcar_i2c_write(priv, ICSIER, SDR | SSR | SAR);
|
||||
}
|
||||
|
||||
rcar_i2c_write(priv, ICSSR, ~SAR & 0xff);
|
||||
/* Clear SSR, too, because of old STOPs to other clients than us */
|
||||
rcar_i2c_write(priv, ICSSR, ~(SAR | SSR) & 0xff);
|
||||
}
|
||||
|
||||
/* master sent stop */
|
||||
if (ssr_filtered & SSR) {
|
||||
i2c_slave_event(priv->slave, I2C_SLAVE_STOP, &value);
|
||||
rcar_i2c_write(priv, ICSIER, SAR | SSR);
|
||||
rcar_i2c_write(priv, ICSIER, SAR);
|
||||
rcar_i2c_write(priv, ICSSR, ~SSR & 0xff);
|
||||
}
|
||||
|
||||
|
@ -850,7 +851,7 @@ static int rcar_reg_slave(struct i2c_client *slave)
|
|||
priv->slave = slave;
|
||||
rcar_i2c_write(priv, ICSAR, slave->addr);
|
||||
rcar_i2c_write(priv, ICSSR, 0);
|
||||
rcar_i2c_write(priv, ICSIER, SAR | SSR);
|
||||
rcar_i2c_write(priv, ICSIER, SAR);
|
||||
rcar_i2c_write(priv, ICSCR, SIE | SDBS);
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -416,7 +416,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
|
|||
s64 tmp = *val * (3767897513LL / 25LL);
|
||||
*val = div_s64_rem(tmp, 1000000000LL, val2);
|
||||
|
||||
ret = IIO_VAL_INT_PLUS_MICRO;
|
||||
return IIO_VAL_INT_PLUS_MICRO;
|
||||
} else {
|
||||
int mult;
|
||||
|
||||
|
@ -447,7 +447,7 @@ static int ad5592r_read_raw(struct iio_dev *iio_dev,
|
|||
ret = IIO_VAL_INT;
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
unlock:
|
||||
|
|
|
@ -760,6 +760,7 @@ static int ib_uverbs_reg_mr(struct uverbs_attr_bundle *attrs)
|
|||
mr->uobject = uobj;
|
||||
atomic_inc(&pd->usecnt);
|
||||
mr->res.type = RDMA_RESTRACK_MR;
|
||||
mr->iova = cmd.hca_va;
|
||||
rdma_restrack_uadd(&mr->res);
|
||||
|
||||
uobj->object = mr;
|
||||
|
@ -850,6 +851,9 @@ static int ib_uverbs_rereg_mr(struct uverbs_attr_bundle *attrs)
|
|||
atomic_dec(&old_pd->usecnt);
|
||||
}
|
||||
|
||||
if (cmd.flags & IB_MR_REREG_TRANS)
|
||||
mr->iova = cmd.hca_va;
|
||||
|
||||
memset(&resp, 0, sizeof(resp));
|
||||
resp.lkey = mr->lkey;
|
||||
resp.rkey = mr->rkey;
|
||||
|
|
|
@ -398,7 +398,6 @@ static int finish_mem_reg(struct c4iw_mr *mhp, u32 stag)
|
|||
mmid = stag >> 8;
|
||||
mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
|
||||
mhp->ibmr.length = mhp->attr.len;
|
||||
mhp->ibmr.iova = mhp->attr.va_fbo;
|
||||
mhp->ibmr.page_size = 1U << (mhp->attr.page_size + 12);
|
||||
pr_debug("mmid 0x%x mhp %p\n", mmid, mhp);
|
||||
return xa_insert_irq(&mhp->rhp->mrs, mmid, mhp, GFP_KERNEL);
|
||||
|
|
|
@ -780,6 +780,7 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
|
|||
props->ip_gids = true;
|
||||
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
|
||||
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
|
||||
if (mdev->dev->caps.pkey_table_len[port])
|
||||
props->pkey_tbl_len = 1;
|
||||
props->max_mtu = IB_MTU_4096;
|
||||
props->max_vl_num = 2;
|
||||
|
|
|
@ -440,7 +440,6 @@ struct ib_mr *mlx4_ib_reg_user_mr(struct ib_pd *pd, u64 start, u64 length,
|
|||
|
||||
mr->ibmr.rkey = mr->ibmr.lkey = mr->mmr.key;
|
||||
mr->ibmr.length = length;
|
||||
mr->ibmr.iova = virt_addr;
|
||||
mr->ibmr.page_size = 1U << shift;
|
||||
|
||||
return &mr->ibmr;
|
||||
|
|
|
@ -515,7 +515,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev);
|
|||
|
||||
int ipoib_ib_dev_open_default(struct net_device *dev);
|
||||
int ipoib_ib_dev_open(struct net_device *dev);
|
||||
int ipoib_ib_dev_stop(struct net_device *dev);
|
||||
void ipoib_ib_dev_stop(struct net_device *dev);
|
||||
void ipoib_ib_dev_up(struct net_device *dev);
|
||||
void ipoib_ib_dev_down(struct net_device *dev);
|
||||
int ipoib_ib_dev_stop_default(struct net_device *dev);
|
||||
|
|
|
@ -669,13 +669,12 @@ int ipoib_send(struct net_device *dev, struct sk_buff *skb,
|
|||
return rc;
|
||||
}
|
||||
|
||||
static void __ipoib_reap_ah(struct net_device *dev)
|
||||
static void ipoib_reap_dead_ahs(struct ipoib_dev_priv *priv)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = ipoib_priv(dev);
|
||||
struct ipoib_ah *ah, *tah;
|
||||
unsigned long flags;
|
||||
|
||||
netif_tx_lock_bh(dev);
|
||||
netif_tx_lock_bh(priv->dev);
|
||||
spin_lock_irqsave(&priv->lock, flags);
|
||||
|
||||
list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list)
|
||||
|
@ -686,37 +685,37 @@ static void __ipoib_reap_ah(struct net_device *dev)
|
|||
}
|
||||
|
||||
spin_unlock_irqrestore(&priv->lock, flags);
|
||||
netif_tx_unlock_bh(dev);
|
||||
netif_tx_unlock_bh(priv->dev);
|
||||
}
|
||||
|
||||
void ipoib_reap_ah(struct work_struct *work)
|
||||
{
|
||||
struct ipoib_dev_priv *priv =
|
||||
container_of(work, struct ipoib_dev_priv, ah_reap_task.work);
|
||||
struct net_device *dev = priv->dev;
|
||||
|
||||
__ipoib_reap_ah(dev);
|
||||
ipoib_reap_dead_ahs(priv);
|
||||
|
||||
if (!test_bit(IPOIB_STOP_REAPER, &priv->flags))
|
||||
queue_delayed_work(priv->wq, &priv->ah_reap_task,
|
||||
round_jiffies_relative(HZ));
|
||||
}
|
||||
|
||||
static void ipoib_flush_ah(struct net_device *dev)
|
||||
static void ipoib_start_ah_reaper(struct ipoib_dev_priv *priv)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = ipoib_priv(dev);
|
||||
|
||||
cancel_delayed_work(&priv->ah_reap_task);
|
||||
flush_workqueue(priv->wq);
|
||||
ipoib_reap_ah(&priv->ah_reap_task.work);
|
||||
clear_bit(IPOIB_STOP_REAPER, &priv->flags);
|
||||
queue_delayed_work(priv->wq, &priv->ah_reap_task,
|
||||
round_jiffies_relative(HZ));
|
||||
}
|
||||
|
||||
static void ipoib_stop_ah(struct net_device *dev)
|
||||
static void ipoib_stop_ah_reaper(struct ipoib_dev_priv *priv)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = ipoib_priv(dev);
|
||||
|
||||
set_bit(IPOIB_STOP_REAPER, &priv->flags);
|
||||
ipoib_flush_ah(dev);
|
||||
cancel_delayed_work(&priv->ah_reap_task);
|
||||
/*
|
||||
* After ipoib_stop_ah_reaper() we always go through
|
||||
* ipoib_reap_dead_ahs() which ensures the work is really stopped and
|
||||
* does a final flush out of the dead_ah's list
|
||||
*/
|
||||
}
|
||||
|
||||
static int recvs_pending(struct net_device *dev)
|
||||
|
@ -845,18 +844,6 @@ timeout:
|
|||
return 0;
|
||||
}
|
||||
|
||||
int ipoib_ib_dev_stop(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = ipoib_priv(dev);
|
||||
|
||||
priv->rn_ops->ndo_stop(dev);
|
||||
|
||||
clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
|
||||
ipoib_flush_ah(dev);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int ipoib_ib_dev_open_default(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = ipoib_priv(dev);
|
||||
|
@ -900,10 +887,7 @@ int ipoib_ib_dev_open(struct net_device *dev)
|
|||
return -1;
|
||||
}
|
||||
|
||||
clear_bit(IPOIB_STOP_REAPER, &priv->flags);
|
||||
queue_delayed_work(priv->wq, &priv->ah_reap_task,
|
||||
round_jiffies_relative(HZ));
|
||||
|
||||
ipoib_start_ah_reaper(priv);
|
||||
if (priv->rn_ops->ndo_open(dev)) {
|
||||
pr_warn("%s: Failed to open dev\n", dev->name);
|
||||
goto dev_stop;
|
||||
|
@ -914,13 +898,20 @@ int ipoib_ib_dev_open(struct net_device *dev)
|
|||
return 0;
|
||||
|
||||
dev_stop:
|
||||
set_bit(IPOIB_STOP_REAPER, &priv->flags);
|
||||
cancel_delayed_work(&priv->ah_reap_task);
|
||||
set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
|
||||
ipoib_ib_dev_stop(dev);
|
||||
ipoib_stop_ah_reaper(priv);
|
||||
return -1;
|
||||
}
|
||||
|
||||
void ipoib_ib_dev_stop(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = ipoib_priv(dev);
|
||||
|
||||
priv->rn_ops->ndo_stop(dev);
|
||||
|
||||
clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags);
|
||||
ipoib_stop_ah_reaper(priv);
|
||||
}
|
||||
|
||||
void ipoib_pkey_dev_check_presence(struct net_device *dev)
|
||||
{
|
||||
struct ipoib_dev_priv *priv = ipoib_priv(dev);
|
||||
|
@ -1231,7 +1222,7 @@ static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv,
|
|||
ipoib_mcast_dev_flush(dev);
|
||||
if (oper_up)
|
||||
set_bit(IPOIB_FLAG_OPER_UP, &priv->flags);
|
||||
ipoib_flush_ah(dev);
|
||||
ipoib_reap_dead_ahs(priv);
|
||||
}
|
||||
|
||||
if (level >= IPOIB_FLUSH_NORMAL)
|
||||
|
@ -1306,7 +1297,7 @@ void ipoib_ib_dev_cleanup(struct net_device *dev)
|
|||
* the neighbor garbage collection is stopped and reaped.
|
||||
* That should all be done now, so make a final ah flush.
|
||||
*/
|
||||
ipoib_stop_ah(dev);
|
||||
ipoib_reap_dead_ahs(priv);
|
||||
|
||||
clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
|
||||
|
||||
|
|
|
@ -1979,6 +1979,8 @@ static void ipoib_ndo_uninit(struct net_device *dev)
|
|||
|
||||
/* no more works over the priv->wq */
|
||||
if (priv->wq) {
|
||||
/* See ipoib_mcast_carrier_on_task() */
|
||||
WARN_ON(test_bit(IPOIB_FLAG_OPER_UP, &priv->flags));
|
||||
flush_workqueue(priv->wq);
|
||||
destroy_workqueue(priv->wq);
|
||||
priv->wq = NULL;
|
||||
|
|
|
@ -441,7 +441,7 @@ static ssize_t fsp_attr_set_setreg(struct psmouse *psmouse, void *data,
|
|||
|
||||
fsp_reg_write_enable(psmouse, false);
|
||||
|
||||
return count;
|
||||
return retval;
|
||||
}
|
||||
|
||||
PSMOUSE_DEFINE_WO_ATTR(setreg, S_IWUSR, NULL, fsp_attr_set_setreg);
|
||||
|
|
|
@ -98,8 +98,11 @@ static ssize_t debug_read_regs(struct file *file, char __user *userbuf,
|
|||
mutex_lock(&iommu_debug_lock);
|
||||
|
||||
bytes = omap_iommu_dump_ctx(obj, p, count);
|
||||
if (bytes < 0)
|
||||
goto err;
|
||||
bytes = simple_read_from_buffer(userbuf, count, ppos, buf, bytes);
|
||||
|
||||
err:
|
||||
mutex_unlock(&iommu_debug_lock);
|
||||
kfree(buf);
|
||||
|
||||
|
|
|
@ -264,7 +264,7 @@ struct bcache_device {
|
|||
#define BCACHE_DEV_UNLINK_DONE 2
|
||||
#define BCACHE_DEV_WB_RUNNING 3
|
||||
#define BCACHE_DEV_RATE_DW_RUNNING 4
|
||||
unsigned int nr_stripes;
|
||||
int nr_stripes;
|
||||
unsigned int stripe_size;
|
||||
atomic_t *stripe_sectors_dirty;
|
||||
unsigned long *full_dirty_stripes;
|
||||
|
|
|
@ -321,7 +321,7 @@ int bch_btree_keys_alloc(struct btree_keys *b,
|
|||
|
||||
b->page_order = page_order;
|
||||
|
||||
t->data = (void *) __get_free_pages(gfp, b->page_order);
|
||||
t->data = (void *) __get_free_pages(__GFP_COMP|gfp, b->page_order);
|
||||
if (!t->data)
|
||||
goto err;
|
||||
|
||||
|
|
|
@ -836,7 +836,7 @@ int bch_btree_cache_alloc(struct cache_set *c)
|
|||
mutex_init(&c->verify_lock);
|
||||
|
||||
c->verify_ondisk = (void *)
|
||||
__get_free_pages(GFP_KERNEL, ilog2(bucket_pages(c)));
|
||||
__get_free_pages(GFP_KERNEL|__GFP_COMP, ilog2(bucket_pages(c)));
|
||||
|
||||
c->verify_data = mca_bucket_alloc(c, &ZERO_KEY, GFP_KERNEL);
|
||||
|
||||
|
|
|
@ -865,8 +865,8 @@ int bch_journal_alloc(struct cache_set *c)
|
|||
j->w[1].c = c;
|
||||
|
||||
if (!(init_fifo(&j->pin, JOURNAL_PIN, GFP_KERNEL)) ||
|
||||
!(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)) ||
|
||||
!(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL, JSET_BITS)))
|
||||
!(j->w[0].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)) ||
|
||||
!(j->w[1].data = (void *) __get_free_pages(GFP_KERNEL|__GFP_COMP, JSET_BITS)))
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
|
|
|
@ -1703,7 +1703,7 @@ void bch_cache_set_unregister(struct cache_set *c)
|
|||
}
|
||||
|
||||
#define alloc_bucket_pages(gfp, c) \
|
||||
((void *) __get_free_pages(__GFP_ZERO|gfp, ilog2(bucket_pages(c))))
|
||||
((void *) __get_free_pages(__GFP_ZERO|__GFP_COMP|gfp, ilog2(bucket_pages(c))))
|
||||
|
||||
struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
|
||||
{
|
||||
|
|
|
@ -516,15 +516,19 @@ void bcache_dev_sectors_dirty_add(struct cache_set *c, unsigned int inode,
|
|||
uint64_t offset, int nr_sectors)
|
||||
{
|
||||
struct bcache_device *d = c->devices[inode];
|
||||
unsigned int stripe_offset, stripe, sectors_dirty;
|
||||
unsigned int stripe_offset, sectors_dirty;
|
||||
int stripe;
|
||||
|
||||
if (!d)
|
||||
return;
|
||||
|
||||
stripe = offset_to_stripe(d, offset);
|
||||
if (stripe < 0)
|
||||
return;
|
||||
|
||||
if (UUID_FLASH_ONLY(&c->uuids[inode]))
|
||||
atomic_long_add(nr_sectors, &c->flash_dev_dirty_sectors);
|
||||
|
||||
stripe = offset_to_stripe(d, offset);
|
||||
stripe_offset = offset & (d->stripe_size - 1);
|
||||
|
||||
while (nr_sectors) {
|
||||
|
@ -564,12 +568,12 @@ static bool dirty_pred(struct keybuf *buf, struct bkey *k)
|
|||
static void refill_full_stripes(struct cached_dev *dc)
|
||||
{
|
||||
struct keybuf *buf = &dc->writeback_keys;
|
||||
unsigned int start_stripe, stripe, next_stripe;
|
||||
unsigned int start_stripe, next_stripe;
|
||||
int stripe;
|
||||
bool wrapped = false;
|
||||
|
||||
stripe = offset_to_stripe(&dc->disk, KEY_OFFSET(&buf->last_scanned));
|
||||
|
||||
if (stripe >= dc->disk.nr_stripes)
|
||||
if (stripe < 0)
|
||||
stripe = 0;
|
||||
|
||||
start_stripe = stripe;
|
||||
|
|
|
@ -33,10 +33,22 @@ static inline uint64_t bcache_dev_sectors_dirty(struct bcache_device *d)
|
|||
return ret;
|
||||
}
|
||||
|
||||
static inline unsigned int offset_to_stripe(struct bcache_device *d,
|
||||
static inline int offset_to_stripe(struct bcache_device *d,
|
||||
uint64_t offset)
|
||||
{
|
||||
do_div(offset, d->stripe_size);
|
||||
|
||||
/* d->nr_stripes is in range [1, INT_MAX] */
|
||||
if (unlikely(offset >= d->nr_stripes)) {
|
||||
pr_err("Invalid stripe %llu (>= nr_stripes %d).\n",
|
||||
offset, d->nr_stripes);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* Here offset is definitly smaller than INT_MAX,
|
||||
* return it as int will never overflow.
|
||||
*/
|
||||
return offset;
|
||||
}
|
||||
|
||||
|
@ -44,7 +56,10 @@ static inline bool bcache_dev_stripe_dirty(struct cached_dev *dc,
|
|||
uint64_t offset,
|
||||
unsigned int nr_sectors)
|
||||
{
|
||||
unsigned int stripe = offset_to_stripe(&dc->disk, offset);
|
||||
int stripe = offset_to_stripe(&dc->disk, offset);
|
||||
|
||||
if (stripe < 0)
|
||||
return false;
|
||||
|
||||
while (1) {
|
||||
if (atomic_read(dc->disk.stripe_sectors_dirty + stripe))
|
||||
|
|
|
@ -70,9 +70,6 @@ void dm_start_queue(struct request_queue *q)
|
|||
|
||||
void dm_stop_queue(struct request_queue *q)
|
||||
{
|
||||
if (blk_mq_queue_stopped(q))
|
||||
return;
|
||||
|
||||
blk_mq_quiesce_queue(q);
|
||||
}
|
||||
|
||||
|
|
|
@ -1139,6 +1139,7 @@ static int resize_bitmaps(struct mddev *mddev, sector_t newsize, sector_t oldsiz
|
|||
bitmap = get_bitmap_from_slot(mddev, i);
|
||||
if (IS_ERR(bitmap)) {
|
||||
pr_err("can't get bitmap from slot %d\n", i);
|
||||
bitmap = NULL;
|
||||
goto out;
|
||||
}
|
||||
counts = &bitmap->counts;
|
||||
|
|
|
@ -3598,6 +3598,7 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
|
|||
* is missing/faulty, then we need to read everything we can.
|
||||
*/
|
||||
if (sh->raid_conf->level != 6 &&
|
||||
sh->raid_conf->rmw_level != PARITY_DISABLE_RMW &&
|
||||
sh->sector < sh->raid_conf->mddev->recovery_cp)
|
||||
/* reconstruct-write isn't being forced */
|
||||
return 0;
|
||||
|
@ -4834,7 +4835,7 @@ static void handle_stripe(struct stripe_head *sh)
|
|||
* or to load a block that is being partially written.
|
||||
*/
|
||||
if (s.to_read || s.non_overwrite
|
||||
|| (conf->level == 6 && s.to_write && s.failed)
|
||||
|| (s.to_write && s.failed)
|
||||
|| (s.syncing && (s.uptodate + s.compute < disks))
|
||||
|| s.replacing
|
||||
|| s.expanding)
|
||||
|
|
|
@ -200,22 +200,25 @@ static void rga_cmd_set_trans_info(struct rga_ctx *ctx)
|
|||
dst_info.data.format = ctx->out.fmt->hw_format;
|
||||
dst_info.data.swap = ctx->out.fmt->color_swap;
|
||||
|
||||
if (ctx->in.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) {
|
||||
if (ctx->out.fmt->hw_format < RGA_COLOR_FMT_YUV422SP) {
|
||||
/*
|
||||
* CSC mode must only be set when the colorspace families differ between
|
||||
* input and output. It must remain unset (zeroed) if both are the same.
|
||||
*/
|
||||
|
||||
if (RGA_COLOR_FMT_IS_YUV(ctx->in.fmt->hw_format) &&
|
||||
RGA_COLOR_FMT_IS_RGB(ctx->out.fmt->hw_format)) {
|
||||
switch (ctx->in.colorspace) {
|
||||
case V4L2_COLORSPACE_REC709:
|
||||
src_info.data.csc_mode =
|
||||
RGA_SRC_CSC_MODE_BT709_R0;
|
||||
src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
|
||||
break;
|
||||
default:
|
||||
src_info.data.csc_mode =
|
||||
RGA_SRC_CSC_MODE_BT601_R0;
|
||||
src_info.data.csc_mode = RGA_SRC_CSC_MODE_BT601_R0;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (ctx->out.fmt->hw_format >= RGA_COLOR_FMT_YUV422SP) {
|
||||
if (RGA_COLOR_FMT_IS_RGB(ctx->in.fmt->hw_format) &&
|
||||
RGA_COLOR_FMT_IS_YUV(ctx->out.fmt->hw_format)) {
|
||||
switch (ctx->out.colorspace) {
|
||||
case V4L2_COLORSPACE_REC709:
|
||||
dst_info.data.csc_mode = RGA_SRC_CSC_MODE_BT709_R0;
|
||||
|
|
|
@ -95,6 +95,11 @@
|
|||
#define RGA_COLOR_FMT_CP_8BPP 15
|
||||
#define RGA_COLOR_FMT_MASK 15
|
||||
|
||||
#define RGA_COLOR_FMT_IS_YUV(fmt) \
|
||||
(((fmt) >= RGA_COLOR_FMT_YUV422SP) && ((fmt) < RGA_COLOR_FMT_CP_1BPP))
|
||||
#define RGA_COLOR_FMT_IS_RGB(fmt) \
|
||||
((fmt) < RGA_COLOR_FMT_YUV422SP)
|
||||
|
||||
#define RGA_COLOR_NONE_SWAP 0
|
||||
#define RGA_COLOR_RB_SWAP 1
|
||||
#define RGA_COLOR_ALPHA_SWAP 2
|
||||
|
|
|
@ -431,6 +431,8 @@ vsp1_dl_cmd_pool_create(struct vsp1_device *vsp1, enum vsp1_extcmd_type type,
|
|||
if (!pool)
|
||||
return NULL;
|
||||
|
||||
pool->vsp1 = vsp1;
|
||||
|
||||
spin_lock_init(&pool->lock);
|
||||
INIT_LIST_HEAD(&pool->free);
|
||||
|
||||
|
|
|
@ -1430,6 +1430,15 @@ err_irq:
|
|||
arizona_irq_exit(arizona);
|
||||
err_pm:
|
||||
pm_runtime_disable(arizona->dev);
|
||||
|
||||
switch (arizona->pdata.clk32k_src) {
|
||||
case ARIZONA_32KZ_MCLK1:
|
||||
case ARIZONA_32KZ_MCLK2:
|
||||
arizona_clk32k_disable(arizona);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
err_reset:
|
||||
arizona_enable_reset(arizona);
|
||||
regulator_disable(arizona->dcvdd);
|
||||
|
@ -1452,6 +1461,15 @@ int arizona_dev_exit(struct arizona *arizona)
|
|||
regulator_disable(arizona->dcvdd);
|
||||
regulator_put(arizona->dcvdd);
|
||||
|
||||
switch (arizona->pdata.clk32k_src) {
|
||||
case ARIZONA_32KZ_MCLK1:
|
||||
case ARIZONA_32KZ_MCLK2:
|
||||
arizona_clk32k_disable(arizona);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
mfd_remove_devices(arizona->dev);
|
||||
arizona_free_irq(arizona, ARIZONA_IRQ_UNDERCLOCKED, arizona);
|
||||
arizona_free_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, arizona);
|
||||
|
|
|
@ -287,7 +287,11 @@ static void dln2_rx(struct urb *urb)
|
|||
len = urb->actual_length - sizeof(struct dln2_header);
|
||||
|
||||
if (handle == DLN2_HANDLE_EVENT) {
|
||||
unsigned long flags;
|
||||
|
||||
spin_lock_irqsave(&dln2->event_cb_lock, flags);
|
||||
dln2_run_event_callbacks(dln2, id, echo, data, len);
|
||||
spin_unlock_irqrestore(&dln2->event_cb_lock, flags);
|
||||
} else {
|
||||
/* URB will be re-submitted in _dln2_transfer (free_rx_slot) */
|
||||
if (dln2_transfer_complete(dln2, urb, handle, echo))
|
||||
|
|
|
@ -229,15 +229,12 @@ static void renesas_sdhi_internal_dmac_issue_tasklet_fn(unsigned long arg)
|
|||
DTRAN_CTRL_DM_START);
|
||||
}
|
||||
|
||||
static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
|
||||
static bool renesas_sdhi_internal_dmac_complete(struct tmio_mmc_host *host)
|
||||
{
|
||||
struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
|
||||
enum dma_data_direction dir;
|
||||
|
||||
spin_lock_irq(&host->lock);
|
||||
|
||||
if (!host->data)
|
||||
goto out;
|
||||
return false;
|
||||
|
||||
if (host->data->flags & MMC_DATA_READ)
|
||||
dir = DMA_FROM_DEVICE;
|
||||
|
@ -250,6 +247,17 @@ static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
|
|||
if (dir == DMA_FROM_DEVICE)
|
||||
clear_bit(SDHI_INTERNAL_DMAC_RX_IN_USE, &global_flags);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static void renesas_sdhi_internal_dmac_complete_tasklet_fn(unsigned long arg)
|
||||
{
|
||||
struct tmio_mmc_host *host = (struct tmio_mmc_host *)arg;
|
||||
|
||||
spin_lock_irq(&host->lock);
|
||||
if (!renesas_sdhi_internal_dmac_complete(host))
|
||||
goto out;
|
||||
|
||||
tmio_mmc_do_data_irq(host);
|
||||
out:
|
||||
spin_unlock_irq(&host->lock);
|
||||
|
|
|
@ -62,7 +62,6 @@ static int fun_chip_ready(struct nand_chip *chip)
|
|||
static void fun_wait_rnb(struct fsl_upm_nand *fun)
|
||||
{
|
||||
if (fun->rnb_gpio[fun->mchip_number] >= 0) {
|
||||
struct mtd_info *mtd = nand_to_mtd(&fun->chip);
|
||||
int cnt = 1000000;
|
||||
|
||||
while (--cnt && !fun_chip_ready(&fun->chip))
|
||||
|
|
|
@ -43,7 +43,7 @@ struct qmem {
|
|||
void *base;
|
||||
dma_addr_t iova;
|
||||
int alloc_sz;
|
||||
u8 entry_sz;
|
||||
u16 entry_sz;
|
||||
u8 align;
|
||||
u32 qsize;
|
||||
};
|
||||
|
|
|
@ -485,13 +485,24 @@ static int emac_clks_phase1_init(struct platform_device *pdev,
|
|||
|
||||
ret = clk_prepare_enable(adpt->clk[EMAC_CLK_CFG_AHB]);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto disable_clk_axi;
|
||||
|
||||
ret = clk_set_rate(adpt->clk[EMAC_CLK_HIGH_SPEED], 19200000);
|
||||
if (ret)
|
||||
return ret;
|
||||
goto disable_clk_cfg_ahb;
|
||||
|
||||
return clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
|
||||
ret = clk_prepare_enable(adpt->clk[EMAC_CLK_HIGH_SPEED]);
|
||||
if (ret)
|
||||
goto disable_clk_cfg_ahb;
|
||||
|
||||
return 0;
|
||||
|
||||
disable_clk_cfg_ahb:
|
||||
clk_disable_unprepare(adpt->clk[EMAC_CLK_CFG_AHB]);
|
||||
disable_clk_axi:
|
||||
clk_disable_unprepare(adpt->clk[EMAC_CLK_AXI]);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Enable clocks; needs emac_clks_phase1_init to be called before */
|
||||
|
|
|
@ -350,6 +350,7 @@ static int ipq806x_gmac_probe(struct platform_device *pdev)
|
|||
plat_dat->has_gmac = true;
|
||||
plat_dat->bsp_priv = gmac;
|
||||
plat_dat->fix_mac_speed = ipq806x_gmac_fix_mac_speed;
|
||||
plat_dat->multicast_filter_bins = 0;
|
||||
|
||||
err = stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res);
|
||||
if (err)
|
||||
|
|
|
@ -166,6 +166,9 @@ static void dwmac1000_set_filter(struct mac_device_info *hw,
|
|||
value = GMAC_FRAME_FILTER_PR;
|
||||
} else if (dev->flags & IFF_ALLMULTI) {
|
||||
value = GMAC_FRAME_FILTER_PM; /* pass all multi */
|
||||
} else if (!netdev_mc_empty(dev) && (mcbitslog2 == 0)) {
|
||||
/* Fall back to all multicast if we've no filter */
|
||||
value = GMAC_FRAME_FILTER_PM;
|
||||
} else if (!netdev_mc_empty(dev)) {
|
||||
struct netdev_hw_addr *ha;
|
||||
|
||||
|
|
|
@ -461,14 +461,19 @@ void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
|
|||
else
|
||||
dev_dbg(&nvdimm->dev, "overwrite completed\n");
|
||||
|
||||
if (nvdimm->sec.overwrite_state)
|
||||
sysfs_notify_dirent(nvdimm->sec.overwrite_state);
|
||||
/*
|
||||
* Mark the overwrite work done and update dimm security flags,
|
||||
* then send a sysfs event notification to wake up userspace
|
||||
* poll threads to picked up the changed state.
|
||||
*/
|
||||
nvdimm->sec.overwrite_tmo = 0;
|
||||
clear_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
|
||||
clear_bit(NDD_WORK_PENDING, &nvdimm->flags);
|
||||
put_device(&nvdimm->dev);
|
||||
nvdimm->sec.state = nvdimm_security_state(nvdimm, NVDIMM_USER);
|
||||
nvdimm->sec.ext_state = nvdimm_security_state(nvdimm, NVDIMM_MASTER);
|
||||
if (nvdimm->sec.overwrite_state)
|
||||
sysfs_notify_dirent(nvdimm->sec.overwrite_state);
|
||||
put_device(&nvdimm->dev);
|
||||
}
|
||||
|
||||
void nvdimm_security_overwrite_query(struct work_struct *work)
|
||||
|
|
|
@ -322,12 +322,8 @@ void pci_bus_add_device(struct pci_dev *dev)
|
|||
|
||||
dev->match_driver = true;
|
||||
retval = device_attach(&dev->dev);
|
||||
if (retval < 0 && retval != -EPROBE_DEFER) {
|
||||
if (retval < 0 && retval != -EPROBE_DEFER)
|
||||
pci_warn(dev, "device attach failed (%d)\n", retval);
|
||||
pci_proc_detach_device(dev);
|
||||
pci_remove_sysfs_dev_files(dev);
|
||||
return;
|
||||
}
|
||||
|
||||
pci_dev_assign_added(dev, true);
|
||||
}
|
||||
|
|
|
@ -45,7 +45,13 @@
|
|||
#define PCIE_CAP_CPL_TIMEOUT_DISABLE 0x10
|
||||
|
||||
#define PCIE20_PARF_PHY_CTRL 0x40
|
||||
#define PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK GENMASK(20, 16)
|
||||
#define PHY_CTRL_PHY_TX0_TERM_OFFSET(x) ((x) << 16)
|
||||
|
||||
#define PCIE20_PARF_PHY_REFCLK 0x4C
|
||||
#define PHY_REFCLK_SSP_EN BIT(16)
|
||||
#define PHY_REFCLK_USE_PAD BIT(12)
|
||||
|
||||
#define PCIE20_PARF_DBI_BASE_ADDR 0x168
|
||||
#define PCIE20_PARF_SLV_ADDR_SPACE_SIZE 0x16C
|
||||
#define PCIE20_PARF_MHI_CLOCK_RESET_CTRL 0x174
|
||||
|
@ -76,6 +82,18 @@
|
|||
#define DBI_RO_WR_EN 1
|
||||
|
||||
#define PERST_DELAY_US 1000
|
||||
/* PARF registers */
|
||||
#define PCIE20_PARF_PCS_DEEMPH 0x34
|
||||
#define PCS_DEEMPH_TX_DEEMPH_GEN1(x) ((x) << 16)
|
||||
#define PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(x) ((x) << 8)
|
||||
#define PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(x) ((x) << 0)
|
||||
|
||||
#define PCIE20_PARF_PCS_SWING 0x38
|
||||
#define PCS_SWING_TX_SWING_FULL(x) ((x) << 8)
|
||||
#define PCS_SWING_TX_SWING_LOW(x) ((x) << 0)
|
||||
|
||||
#define PCIE20_PARF_CONFIG_BITS 0x50
|
||||
#define PHY_RX0_EQ(x) ((x) << 24)
|
||||
|
||||
#define PCIE20_v3_PARF_SLV_ADDR_SPACE_SIZE 0x358
|
||||
#define SLV_ADDR_SPACE_SZ 0x10000000
|
||||
|
@ -275,6 +293,7 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
|
|||
struct qcom_pcie_resources_2_1_0 *res = &pcie->res.v2_1_0;
|
||||
struct dw_pcie *pci = pcie->pci;
|
||||
struct device *dev = pci->dev;
|
||||
struct device_node *node = dev->of_node;
|
||||
u32 val;
|
||||
int ret;
|
||||
|
||||
|
@ -319,9 +338,29 @@ static int qcom_pcie_init_2_1_0(struct qcom_pcie *pcie)
|
|||
val &= ~BIT(0);
|
||||
writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
|
||||
|
||||
if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
|
||||
writel(PCS_DEEMPH_TX_DEEMPH_GEN1(24) |
|
||||
PCS_DEEMPH_TX_DEEMPH_GEN2_3_5DB(24) |
|
||||
PCS_DEEMPH_TX_DEEMPH_GEN2_6DB(34),
|
||||
pcie->parf + PCIE20_PARF_PCS_DEEMPH);
|
||||
writel(PCS_SWING_TX_SWING_FULL(120) |
|
||||
PCS_SWING_TX_SWING_LOW(120),
|
||||
pcie->parf + PCIE20_PARF_PCS_SWING);
|
||||
writel(PHY_RX0_EQ(4), pcie->parf + PCIE20_PARF_CONFIG_BITS);
|
||||
}
|
||||
|
||||
if (of_device_is_compatible(node, "qcom,pcie-ipq8064")) {
|
||||
/* set TX termination offset */
|
||||
val = readl(pcie->parf + PCIE20_PARF_PHY_CTRL);
|
||||
val &= ~PHY_CTRL_PHY_TX0_TERM_OFFSET_MASK;
|
||||
val |= PHY_CTRL_PHY_TX0_TERM_OFFSET(7);
|
||||
writel(val, pcie->parf + PCIE20_PARF_PHY_CTRL);
|
||||
}
|
||||
|
||||
/* enable external reference clock */
|
||||
val = readl(pcie->parf + PCIE20_PARF_PHY_REFCLK);
|
||||
val |= BIT(16);
|
||||
val &= ~PHY_REFCLK_USE_PAD;
|
||||
val |= PHY_REFCLK_SSP_EN;
|
||||
writel(val, pcie->parf + PCIE20_PARF_PHY_REFCLK);
|
||||
|
||||
ret = reset_control_deassert(res->phy_reset);
|
||||
|
|
|
@ -122,13 +122,21 @@ static struct acpiphp_context *acpiphp_grab_context(struct acpi_device *adev)
|
|||
struct acpiphp_context *context;
|
||||
|
||||
acpi_lock_hp_context();
|
||||
|
||||
context = acpiphp_get_context(adev);
|
||||
if (!context || context->func.parent->is_going_away) {
|
||||
acpi_unlock_hp_context();
|
||||
return NULL;
|
||||
if (!context)
|
||||
goto unlock;
|
||||
|
||||
if (context->func.parent->is_going_away) {
|
||||
acpiphp_put_context(context);
|
||||
context = NULL;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
get_bridge(context->func.parent);
|
||||
acpiphp_put_context(context);
|
||||
|
||||
unlock:
|
||||
acpi_unlock_hp_context();
|
||||
return context;
|
||||
}
|
||||
|
|
|
@ -5118,7 +5118,8 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_SERVERWORKS, 0x0422, quirk_no_ext_tags);
|
|||
*/
|
||||
static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
|
||||
{
|
||||
if (pdev->device == 0x7340 && pdev->revision != 0xc5)
|
||||
if ((pdev->device == 0x7312 && pdev->revision != 0x00) ||
|
||||
(pdev->device == 0x7340 && pdev->revision != 0xc5))
|
||||
return;
|
||||
|
||||
pci_info(pdev, "disabling ATS\n");
|
||||
|
@ -5129,6 +5130,8 @@ static void quirk_amd_harvest_no_ats(struct pci_dev *pdev)
|
|||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x98e4, quirk_amd_harvest_no_ats);
|
||||
/* AMD Iceland dGPU */
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x6900, quirk_amd_harvest_no_ats);
|
||||
/* AMD Navi10 dGPU */
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7312, quirk_amd_harvest_no_ats);
|
||||
/* AMD Navi14 dGPU */
|
||||
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x7340, quirk_amd_harvest_no_ats);
|
||||
#endif /* CONFIG_PCI_ATS */
|
||||
|
|
|
@ -873,9 +873,9 @@ static void ingenic_gpio_irq_ack(struct irq_data *irqd)
|
|||
*/
|
||||
high = ingenic_gpio_get_value(jzgc, irq);
|
||||
if (high)
|
||||
irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_FALLING);
|
||||
irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_LOW);
|
||||
else
|
||||
irq_set_type(jzgc, irq, IRQ_TYPE_EDGE_RISING);
|
||||
irq_set_type(jzgc, irq, IRQ_TYPE_LEVEL_HIGH);
|
||||
}
|
||||
|
||||
if (jzgc->jzpc->version >= ID_JZ4770)
|
||||
|
@ -911,7 +911,7 @@ static int ingenic_gpio_irq_set_type(struct irq_data *irqd, unsigned int type)
|
|||
*/
|
||||
bool high = ingenic_gpio_get_value(jzgc, irqd->hwirq);
|
||||
|
||||
type = high ? IRQ_TYPE_EDGE_FALLING : IRQ_TYPE_EDGE_RISING;
|
||||
type = high ? IRQ_TYPE_LEVEL_LOW : IRQ_TYPE_LEVEL_HIGH;
|
||||
}
|
||||
|
||||
irq_set_type(jzgc, irqd->hwirq, type);
|
||||
|
|
|
@ -85,8 +85,6 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
|
|||
u64 tmp, multi, rate;
|
||||
u32 value, prescale;
|
||||
|
||||
rate = clk_get_rate(ip->clk);
|
||||
|
||||
value = readl(ip->base + IPROC_PWM_CTRL_OFFSET);
|
||||
|
||||
if (value & BIT(IPROC_PWM_CTRL_EN_SHIFT(pwm->hwpwm)))
|
||||
|
@ -99,6 +97,13 @@ static void iproc_pwmc_get_state(struct pwm_chip *chip, struct pwm_device *pwm,
|
|||
else
|
||||
state->polarity = PWM_POLARITY_INVERSED;
|
||||
|
||||
rate = clk_get_rate(ip->clk);
|
||||
if (rate == 0) {
|
||||
state->period = 0;
|
||||
state->duty_cycle = 0;
|
||||
return;
|
||||
}
|
||||
|
||||
value = readl(ip->base + IPROC_PWM_PRESCALE_OFFSET);
|
||||
prescale = value >> IPROC_PWM_PRESCALE_SHIFT(pwm->hwpwm);
|
||||
prescale &= IPROC_PWM_PRESCALE_MAX;
|
||||
|
|
|
@ -4880,7 +4880,10 @@ static int regulator_init_coupling(struct regulator_dev *rdev)
|
|||
if (!of_check_coupling_data(rdev))
|
||||
return -EPERM;
|
||||
|
||||
mutex_lock(®ulator_list_mutex);
|
||||
rdev->coupling_desc.coupler = regulator_find_coupler(rdev);
|
||||
mutex_unlock(®ulator_list_mutex);
|
||||
|
||||
if (IS_ERR(rdev->coupling_desc.coupler)) {
|
||||
err = PTR_ERR(rdev->coupling_desc.coupler);
|
||||
rdev_err(rdev, "failed to get coupler: %d\n", err);
|
||||
|
@ -4975,6 +4978,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
|
|||
ret = -ENOMEM;
|
||||
goto rinse;
|
||||
}
|
||||
device_initialize(&rdev->dev);
|
||||
|
||||
/*
|
||||
* Duplicate the config so the driver could override it after
|
||||
|
@ -4982,13 +4986,23 @@ regulator_register(const struct regulator_desc *regulator_desc,
|
|||
*/
|
||||
config = kmemdup(cfg, sizeof(*cfg), GFP_KERNEL);
|
||||
if (config == NULL) {
|
||||
kfree(rdev);
|
||||
ret = -ENOMEM;
|
||||
goto rinse;
|
||||
goto clean;
|
||||
}
|
||||
|
||||
init_data = regulator_of_get_init_data(dev, regulator_desc, config,
|
||||
&rdev->dev.of_node);
|
||||
|
||||
/*
|
||||
* Sometimes not all resources are probed already so we need to take
|
||||
* that into account. This happens most the time if the ena_gpiod comes
|
||||
* from a gpio extender or something else.
|
||||
*/
|
||||
if (PTR_ERR(init_data) == -EPROBE_DEFER) {
|
||||
ret = -EPROBE_DEFER;
|
||||
goto clean;
|
||||
}
|
||||
|
||||
/*
|
||||
* We need to keep track of any GPIO descriptor coming from the
|
||||
* device tree until we have handled it over to the core. If the
|
||||
|
@ -5041,7 +5055,6 @@ regulator_register(const struct regulator_desc *regulator_desc,
|
|||
}
|
||||
|
||||
/* register with sysfs */
|
||||
device_initialize(&rdev->dev);
|
||||
rdev->dev.class = ®ulator_class;
|
||||
rdev->dev.parent = dev;
|
||||
dev_set_name(&rdev->dev, "regulator.%lu",
|
||||
|
@ -5069,9 +5082,7 @@ regulator_register(const struct regulator_desc *regulator_desc,
|
|||
if (ret < 0)
|
||||
goto wash;
|
||||
|
||||
mutex_lock(®ulator_list_mutex);
|
||||
ret = regulator_init_coupling(rdev);
|
||||
mutex_unlock(®ulator_list_mutex);
|
||||
if (ret < 0)
|
||||
goto wash;
|
||||
|
||||
|
@ -5124,13 +5135,11 @@ wash:
|
|||
mutex_lock(®ulator_list_mutex);
|
||||
regulator_ena_gpio_free(rdev);
|
||||
mutex_unlock(®ulator_list_mutex);
|
||||
put_device(&rdev->dev);
|
||||
rdev = NULL;
|
||||
clean:
|
||||
if (dangling_of_gpiod)
|
||||
gpiod_put(config->ena_gpiod);
|
||||
kfree(rdev);
|
||||
kfree(config);
|
||||
put_device(&rdev->dev);
|
||||
rinse:
|
||||
if (dangling_cfg_gpiod)
|
||||
gpiod_put(cfg->ena_gpiod);
|
||||
|
|
|
@ -443,12 +443,21 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
|
|||
goto error;
|
||||
}
|
||||
|
||||
if (desc->of_parse_cb && desc->of_parse_cb(child, desc, config)) {
|
||||
if (desc->of_parse_cb) {
|
||||
int ret;
|
||||
|
||||
ret = desc->of_parse_cb(child, desc, config);
|
||||
if (ret) {
|
||||
if (ret == -EPROBE_DEFER) {
|
||||
of_node_put(child);
|
||||
return ERR_PTR(-EPROBE_DEFER);
|
||||
}
|
||||
dev_err(dev,
|
||||
"driver callback failed to parse DT for regulator %pOFn\n",
|
||||
child);
|
||||
goto error;
|
||||
}
|
||||
}
|
||||
|
||||
*node = child;
|
||||
|
||||
|
|
|
@ -151,6 +151,8 @@ int qcom_q6v5_request_stop(struct qcom_q6v5 *q6v5)
|
|||
{
|
||||
int ret;
|
||||
|
||||
q6v5->running = false;
|
||||
|
||||
qcom_smem_state_update_bits(q6v5->state,
|
||||
BIT(q6v5->stop_bit), BIT(q6v5->stop_bit));
|
||||
|
||||
|
|
|
@ -381,6 +381,12 @@ static int q6v5_load(struct rproc *rproc, const struct firmware *fw)
|
|||
{
|
||||
struct q6v5 *qproc = rproc->priv;
|
||||
|
||||
/* MBA is restricted to a maximum size of 1M */
|
||||
if (fw->size > qproc->mba_size || fw->size > SZ_1M) {
|
||||
dev_err(qproc->dev, "MBA firmware load failed\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
memcpy(qproc->mba_region, fw->data, fw->size);
|
||||
|
||||
return 0;
|
||||
|
@ -1028,15 +1034,14 @@ static int q6v5_mpss_load(struct q6v5 *qproc)
|
|||
} else if (phdr->p_filesz) {
|
||||
/* Replace "xxx.xxx" with "xxx.bxx" */
|
||||
sprintf(fw_name + fw_name_len - 3, "b%02d", i);
|
||||
ret = request_firmware(&seg_fw, fw_name, qproc->dev);
|
||||
ret = request_firmware_into_buf(&seg_fw, fw_name, qproc->dev,
|
||||
ptr, phdr->p_filesz);
|
||||
if (ret) {
|
||||
dev_err(qproc->dev, "failed to load %s\n", fw_name);
|
||||
iounmap(ptr);
|
||||
goto release_firmware;
|
||||
}
|
||||
|
||||
memcpy(ptr, seg_fw->data, seg_fw->size);
|
||||
|
||||
release_firmware(seg_fw);
|
||||
}
|
||||
|
||||
|
|
|
@ -1723,7 +1723,7 @@ lpfc_nvmet_destroy_targetport(struct lpfc_hba *phba)
|
|||
}
|
||||
tgtp->tport_unreg_cmp = &tport_unreg_cmp;
|
||||
nvmet_fc_unregister_targetport(phba->targetport);
|
||||
if (!wait_for_completion_timeout(tgtp->tport_unreg_cmp,
|
||||
if (!wait_for_completion_timeout(&tport_unreg_cmp,
|
||||
msecs_to_jiffies(LPFC_NVMET_WAIT_TMO)))
|
||||
lpfc_printf_log(phba, KERN_ERR, LOG_NVME,
|
||||
"6179 Unreg targetport %p timeout "
|
||||
|
|
|
@ -532,13 +532,8 @@ static void hfa384x_usb_defer(struct work_struct *data)
|
|||
*/
|
||||
void hfa384x_create(struct hfa384x *hw, struct usb_device *usb)
|
||||
{
|
||||
memset(hw, 0, sizeof(*hw));
|
||||
hw->usb = usb;
|
||||
|
||||
/* set up the endpoints */
|
||||
hw->endp_in = usb_rcvbulkpipe(usb, 1);
|
||||
hw->endp_out = usb_sndbulkpipe(usb, 2);
|
||||
|
||||
/* Set up the waitq */
|
||||
init_waitqueue_head(&hw->cmdq);
|
||||
|
||||
|
|
|
@ -61,23 +61,14 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
|
|||
const struct usb_device_id *id)
|
||||
{
|
||||
struct usb_device *dev;
|
||||
const struct usb_endpoint_descriptor *epd;
|
||||
const struct usb_host_interface *iface_desc = interface->cur_altsetting;
|
||||
struct usb_endpoint_descriptor *bulk_in, *bulk_out;
|
||||
struct usb_host_interface *iface_desc = interface->cur_altsetting;
|
||||
struct wlandevice *wlandev = NULL;
|
||||
struct hfa384x *hw = NULL;
|
||||
int result = 0;
|
||||
|
||||
if (iface_desc->desc.bNumEndpoints != 2) {
|
||||
result = -ENODEV;
|
||||
goto failed;
|
||||
}
|
||||
|
||||
result = -EINVAL;
|
||||
epd = &iface_desc->endpoint[1].desc;
|
||||
if (!usb_endpoint_is_bulk_in(epd))
|
||||
goto failed;
|
||||
epd = &iface_desc->endpoint[2].desc;
|
||||
if (!usb_endpoint_is_bulk_out(epd))
|
||||
result = usb_find_common_endpoints(iface_desc, &bulk_in, &bulk_out, NULL, NULL);
|
||||
if (result)
|
||||
goto failed;
|
||||
|
||||
dev = interface_to_usbdev(interface);
|
||||
|
@ -96,6 +87,8 @@ static int prism2sta_probe_usb(struct usb_interface *interface,
|
|||
}
|
||||
|
||||
/* Initialize the hw data */
|
||||
hw->endp_in = usb_rcvbulkpipe(dev, bulk_in->bEndpointAddress);
|
||||
hw->endp_out = usb_sndbulkpipe(dev, bulk_out->bEndpointAddress);
|
||||
hfa384x_create(hw, dev);
|
||||
hw->wlandev = wlandev;
|
||||
|
||||
|
|
|
@ -2437,12 +2437,11 @@ static int ftdi_prepare_write_buffer(struct usb_serial_port *port,
|
|||
#define FTDI_RS_ERR_MASK (FTDI_RS_BI | FTDI_RS_PE | FTDI_RS_FE | FTDI_RS_OE)
|
||||
|
||||
static int ftdi_process_packet(struct usb_serial_port *port,
|
||||
struct ftdi_private *priv, char *packet, int len)
|
||||
struct ftdi_private *priv, unsigned char *buf, int len)
|
||||
{
|
||||
unsigned char status;
|
||||
int i;
|
||||
char status;
|
||||
char flag;
|
||||
char *ch;
|
||||
|
||||
if (len < 2) {
|
||||
dev_dbg(&port->dev, "malformed packet\n");
|
||||
|
@ -2452,7 +2451,7 @@ static int ftdi_process_packet(struct usb_serial_port *port,
|
|||
/* Compare new line status to the old one, signal if different/
|
||||
N.B. packet may be processed more than once, but differences
|
||||
are only processed once. */
|
||||
status = packet[0] & FTDI_STATUS_B0_MASK;
|
||||
status = buf[0] & FTDI_STATUS_B0_MASK;
|
||||
if (status != priv->prev_status) {
|
||||
char diff_status = status ^ priv->prev_status;
|
||||
|
||||
|
@ -2478,13 +2477,12 @@ static int ftdi_process_packet(struct usb_serial_port *port,
|
|||
}
|
||||
|
||||
/* save if the transmitter is empty or not */
|
||||
if (packet[1] & FTDI_RS_TEMT)
|
||||
if (buf[1] & FTDI_RS_TEMT)
|
||||
priv->transmit_empty = 1;
|
||||
else
|
||||
priv->transmit_empty = 0;
|
||||
|
||||
len -= 2;
|
||||
if (!len)
|
||||
if (len == 2)
|
||||
return 0; /* status only */
|
||||
|
||||
/*
|
||||
|
@ -2492,40 +2490,41 @@ static int ftdi_process_packet(struct usb_serial_port *port,
|
|||
* data payload to avoid over-reporting.
|
||||
*/
|
||||
flag = TTY_NORMAL;
|
||||
if (packet[1] & FTDI_RS_ERR_MASK) {
|
||||
if (buf[1] & FTDI_RS_ERR_MASK) {
|
||||
/* Break takes precedence over parity, which takes precedence
|
||||
* over framing errors */
|
||||
if (packet[1] & FTDI_RS_BI) {
|
||||
if (buf[1] & FTDI_RS_BI) {
|
||||
flag = TTY_BREAK;
|
||||
port->icount.brk++;
|
||||
usb_serial_handle_break(port);
|
||||
} else if (packet[1] & FTDI_RS_PE) {
|
||||
} else if (buf[1] & FTDI_RS_PE) {
|
||||
flag = TTY_PARITY;
|
||||
port->icount.parity++;
|
||||
} else if (packet[1] & FTDI_RS_FE) {
|
||||
} else if (buf[1] & FTDI_RS_FE) {
|
||||
flag = TTY_FRAME;
|
||||
port->icount.frame++;
|
||||
}
|
||||
/* Overrun is special, not associated with a char */
|
||||
if (packet[1] & FTDI_RS_OE) {
|
||||
if (buf[1] & FTDI_RS_OE) {
|
||||
port->icount.overrun++;
|
||||
tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
|
||||
}
|
||||
}
|
||||
|
||||
port->icount.rx += len;
|
||||
ch = packet + 2;
|
||||
port->icount.rx += len - 2;
|
||||
|
||||
if (port->port.console && port->sysrq) {
|
||||
for (i = 0; i < len; i++, ch++) {
|
||||
if (!usb_serial_handle_sysrq_char(port, *ch))
|
||||
tty_insert_flip_char(&port->port, *ch, flag);
|
||||
for (i = 2; i < len; i++) {
|
||||
if (usb_serial_handle_sysrq_char(port, buf[i]))
|
||||
continue;
|
||||
tty_insert_flip_char(&port->port, buf[i], flag);
|
||||
}
|
||||
} else {
|
||||
tty_insert_flip_string_fixed_flag(&port->port, ch, flag, len);
|
||||
tty_insert_flip_string_fixed_flag(&port->port, buf + 2, flag,
|
||||
len - 2);
|
||||
}
|
||||
|
||||
return len;
|
||||
return len - 2;
|
||||
}
|
||||
|
||||
static void ftdi_process_read_urb(struct urb *urb)
|
||||
|
|
|
@ -677,9 +677,9 @@ static int __init watchdog_init(int sioaddr)
|
|||
* into the module have been registered yet.
|
||||
*/
|
||||
watchdog.sioaddr = sioaddr;
|
||||
watchdog.ident.options = WDIOC_SETTIMEOUT
|
||||
| WDIOF_MAGICCLOSE
|
||||
| WDIOF_KEEPALIVEPING;
|
||||
watchdog.ident.options = WDIOF_MAGICCLOSE
|
||||
| WDIOF_KEEPALIVEPING
|
||||
| WDIOF_CARDRESET;
|
||||
|
||||
snprintf(watchdog.ident.identity,
|
||||
sizeof(watchdog.ident.identity), "%s watchdog",
|
||||
|
@ -693,6 +693,13 @@ static int __init watchdog_init(int sioaddr)
|
|||
wdt_conf = superio_inb(sioaddr, F71808FG_REG_WDT_CONF);
|
||||
watchdog.caused_reboot = wdt_conf & BIT(F71808FG_FLAG_WDTMOUT_STS);
|
||||
|
||||
/*
|
||||
* We don't want WDTMOUT_STS to stick around till regular reboot.
|
||||
* Write 1 to the bit to clear it to zero.
|
||||
*/
|
||||
superio_outb(sioaddr, F71808FG_REG_WDT_CONF,
|
||||
wdt_conf | BIT(F71808FG_FLAG_WDTMOUT_STS));
|
||||
|
||||
superio_exit(sioaddr);
|
||||
|
||||
err = watchdog_set_timeout(timeout);
|
||||
|
|
|
@ -92,8 +92,8 @@ responded:
|
|||
}
|
||||
}
|
||||
|
||||
rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall);
|
||||
if (rtt_us < server->probe.rtt) {
|
||||
if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
|
||||
rtt_us < server->probe.rtt) {
|
||||
server->probe.rtt = rtt_us;
|
||||
alist->preferred = index;
|
||||
have_result = true;
|
||||
|
|
|
@ -92,8 +92,8 @@ responded:
|
|||
}
|
||||
}
|
||||
|
||||
rtt_us = rxrpc_kernel_get_srtt(call->net->socket, call->rxcall);
|
||||
if (rtt_us < server->probe.rtt) {
|
||||
if (rxrpc_kernel_get_srtt(call->net->socket, call->rxcall, &rtt_us) &&
|
||||
rtt_us < server->probe.rtt) {
|
||||
server->probe.rtt = rtt_us;
|
||||
alist->preferred = index;
|
||||
have_result = true;
|
||||
|
|
|
@ -1511,9 +1511,16 @@ int btrfs_init_fs_root(struct btrfs_root *root)
|
|||
spin_lock_init(&root->ino_cache_lock);
|
||||
init_waitqueue_head(&root->ino_cache_wait);
|
||||
|
||||
/*
|
||||
* Don't assign anonymous block device to roots that are not exposed to
|
||||
* userspace, the id pool is limited to 1M
|
||||
*/
|
||||
if (is_fstree(root->root_key.objectid) &&
|
||||
btrfs_root_refs(&root->root_item) > 0) {
|
||||
ret = get_anon_bdev(&root->anon_dev);
|
||||
if (ret)
|
||||
goto fail;
|
||||
}
|
||||
|
||||
mutex_lock(&root->objectid_mutex);
|
||||
ret = btrfs_find_highest_objectid(root,
|
||||
|
|
|
@ -2166,7 +2166,7 @@ out:
|
|||
static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
|
||||
struct btrfs_free_space *info, bool update_stat)
|
||||
{
|
||||
struct btrfs_free_space *left_info;
|
||||
struct btrfs_free_space *left_info = NULL;
|
||||
struct btrfs_free_space *right_info;
|
||||
bool merged = false;
|
||||
u64 offset = info->offset;
|
||||
|
@ -2181,7 +2181,7 @@ static bool try_merge_free_space(struct btrfs_free_space_ctl *ctl,
|
|||
if (right_info && rb_prev(&right_info->offset_index))
|
||||
left_info = rb_entry(rb_prev(&right_info->offset_index),
|
||||
struct btrfs_free_space, offset_index);
|
||||
else
|
||||
else if (!right_info)
|
||||
left_info = tree_search_offset(ctl, offset - 1, 0, 0);
|
||||
|
||||
if (right_info && !right_info->bitmap) {
|
||||
|
|
|
@ -4547,6 +4547,8 @@ int btrfs_delete_subvolume(struct inode *dir, struct dentry *dentry)
|
|||
}
|
||||
}
|
||||
|
||||
free_anon_bdev(dest->anon_dev);
|
||||
dest->anon_dev = 0;
|
||||
out_end_trans:
|
||||
trans->block_rsv = NULL;
|
||||
trans->bytes_reserved = 0;
|
||||
|
|
|
@ -286,6 +286,8 @@ static struct block_entry *add_block_entry(struct btrfs_fs_info *fs_info,
|
|||
exist_re = insert_root_entry(&exist->roots, re);
|
||||
if (exist_re)
|
||||
kfree(re);
|
||||
} else {
|
||||
kfree(re);
|
||||
}
|
||||
kfree(be);
|
||||
return exist;
|
||||
|
|
|
@ -432,6 +432,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
|
|||
char *compress_type;
|
||||
bool compress_force = false;
|
||||
enum btrfs_compression_type saved_compress_type;
|
||||
int saved_compress_level;
|
||||
bool saved_compress_force;
|
||||
int no_compress = 0;
|
||||
|
||||
|
@ -514,6 +515,7 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
|
|||
info->compress_type : BTRFS_COMPRESS_NONE;
|
||||
saved_compress_force =
|
||||
btrfs_test_opt(info, FORCE_COMPRESS);
|
||||
saved_compress_level = info->compress_level;
|
||||
if (token == Opt_compress ||
|
||||
token == Opt_compress_force ||
|
||||
strncmp(args[0].from, "zlib", 4) == 0) {
|
||||
|
@ -558,6 +560,8 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
|
|||
no_compress = 0;
|
||||
} else if (strncmp(args[0].from, "no", 2) == 0) {
|
||||
compress_type = "no";
|
||||
info->compress_level = 0;
|
||||
info->compress_type = 0;
|
||||
btrfs_clear_opt(info->mount_opt, COMPRESS);
|
||||
btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
|
||||
compress_force = false;
|
||||
|
@ -578,11 +582,11 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
|
|||
*/
|
||||
btrfs_clear_opt(info->mount_opt, FORCE_COMPRESS);
|
||||
}
|
||||
if ((btrfs_test_opt(info, COMPRESS) &&
|
||||
(info->compress_type != saved_compress_type ||
|
||||
compress_force != saved_compress_force)) ||
|
||||
(!btrfs_test_opt(info, COMPRESS) &&
|
||||
no_compress == 1)) {
|
||||
if (no_compress == 1) {
|
||||
btrfs_info(info, "use no compression");
|
||||
} else if ((info->compress_type != saved_compress_type) ||
|
||||
(compress_force != saved_compress_force) ||
|
||||
(info->compress_level != saved_compress_level)) {
|
||||
btrfs_info(info, "%s %s compression, level %d",
|
||||
(compress_force) ? "force" : "use",
|
||||
compress_type, info->compress_level);
|
||||
|
@ -2265,9 +2269,7 @@ static int btrfs_unfreeze(struct super_block *sb)
|
|||
static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
|
||||
{
|
||||
struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
|
||||
struct btrfs_fs_devices *cur_devices;
|
||||
struct btrfs_device *dev, *first_dev = NULL;
|
||||
struct list_head *head;
|
||||
|
||||
/*
|
||||
* Lightweight locking of the devices. We should not need
|
||||
|
@ -2277,10 +2279,7 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
|
|||
* least until the rcu_read_unlock.
|
||||
*/
|
||||
rcu_read_lock();
|
||||
cur_devices = fs_info->fs_devices;
|
||||
while (cur_devices) {
|
||||
head = &cur_devices->devices;
|
||||
list_for_each_entry_rcu(dev, head, dev_list) {
|
||||
list_for_each_entry_rcu(dev, &fs_info->fs_devices->devices, dev_list) {
|
||||
if (test_bit(BTRFS_DEV_STATE_MISSING, &dev->dev_state))
|
||||
continue;
|
||||
if (!dev->name)
|
||||
|
@ -2288,8 +2287,6 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
|
|||
if (!first_dev || dev->devid < first_dev->devid)
|
||||
first_dev = dev;
|
||||
}
|
||||
cur_devices = cur_devices->seed;
|
||||
}
|
||||
|
||||
if (first_dev)
|
||||
seq_escape(m, rcu_str_deref(first_dev->name), " \t\n\\");
|
||||
|
|
|
@ -4049,11 +4049,8 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
|
|||
fs_info->csum_root,
|
||||
ds + cs, ds + cs + cl - 1,
|
||||
&ordered_sums, 0);
|
||||
if (ret) {
|
||||
btrfs_release_path(dst_path);
|
||||
kfree(ins_data);
|
||||
return ret;
|
||||
}
|
||||
if (ret)
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -4066,7 +4063,6 @@ static noinline int copy_items(struct btrfs_trans_handle *trans,
|
|||
* we have to do this after the loop above to avoid changing the
|
||||
* log tree while trying to change the log tree.
|
||||
*/
|
||||
ret = 0;
|
||||
while (!list_empty(&ordered_sums)) {
|
||||
struct btrfs_ordered_sum *sums = list_entry(ordered_sums.next,
|
||||
struct btrfs_ordered_sum,
|
||||
|
|
|
@ -216,7 +216,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
|
|||
*
|
||||
* global::fs_devs - add, remove, updates to the global list
|
||||
*
|
||||
* does not protect: manipulation of the fs_devices::devices list!
|
||||
* does not protect: manipulation of the fs_devices::devices list in general
|
||||
* but in mount context it could be used to exclude list modifications by eg.
|
||||
* scan ioctl
|
||||
*
|
||||
* btrfs_device::name - renames (write side), read is RCU
|
||||
*
|
||||
|
@ -229,6 +231,9 @@ static int __btrfs_map_block(struct btrfs_fs_info *fs_info,
|
|||
* may be used to exclude some operations from running concurrently without any
|
||||
* modifications to the list (see write_all_supers)
|
||||
*
|
||||
* Is not required at mount and close times, because our device list is
|
||||
* protected by the uuid_mutex at that point.
|
||||
*
|
||||
* balance_mutex
|
||||
* -------------
|
||||
* protects balance structures (status, state) and context accessed from
|
||||
|
@ -786,6 +791,11 @@ static int btrfs_free_stale_devices(const char *path,
|
|||
return ret;
|
||||
}
|
||||
|
||||
/*
|
||||
* This is only used on mount, and we are protected from competing things
|
||||
* messing with our fs_devices by the uuid_mutex, thus we do not need the
|
||||
* fs_devices->device_list_mutex here.
|
||||
*/
|
||||
static int btrfs_open_one_device(struct btrfs_fs_devices *fs_devices,
|
||||
struct btrfs_device *device, fmode_t flags,
|
||||
void *holder)
|
||||
|
@ -1422,8 +1432,14 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
|
|||
int ret;
|
||||
|
||||
lockdep_assert_held(&uuid_mutex);
|
||||
/*
|
||||
* The device_list_mutex cannot be taken here in case opening the
|
||||
* underlying device takes further locks like bd_mutex.
|
||||
*
|
||||
* We also don't need the lock here as this is called during mount and
|
||||
* exclusion is provided by uuid_mutex
|
||||
*/
|
||||
|
||||
mutex_lock(&fs_devices->device_list_mutex);
|
||||
if (fs_devices->opened) {
|
||||
fs_devices->opened++;
|
||||
ret = 0;
|
||||
|
@ -1431,7 +1447,6 @@ int btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
|
|||
list_sort(NULL, &fs_devices->devices, devid_cmp);
|
||||
ret = open_fs_devices(fs_devices, flags, holder);
|
||||
}
|
||||
mutex_unlock(&fs_devices->device_list_mutex);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
@ -7230,7 +7245,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
|
|||
* otherwise we don't need it.
|
||||
*/
|
||||
mutex_lock(&uuid_mutex);
|
||||
mutex_lock(&fs_info->chunk_mutex);
|
||||
|
||||
/*
|
||||
* It is possible for mount and umount to race in such a way that
|
||||
|
@ -7275,7 +7289,9 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
|
|||
} else if (found_key.type == BTRFS_CHUNK_ITEM_KEY) {
|
||||
struct btrfs_chunk *chunk;
|
||||
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
|
||||
mutex_lock(&fs_info->chunk_mutex);
|
||||
ret = read_one_chunk(&found_key, leaf, chunk);
|
||||
mutex_unlock(&fs_info->chunk_mutex);
|
||||
if (ret)
|
||||
goto error;
|
||||
}
|
||||
|
@ -7305,7 +7321,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
|
|||
}
|
||||
ret = 0;
|
||||
error:
|
||||
mutex_unlock(&fs_info->chunk_mutex);
|
||||
mutex_unlock(&uuid_mutex);
|
||||
|
||||
btrfs_free_path(path);
|
||||
|
|
|
@ -509,15 +509,31 @@ cifs_ses_oplock_break(struct work_struct *work)
|
|||
kfree(lw);
|
||||
}
|
||||
|
||||
static bool
|
||||
smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
|
||||
struct smb2_lease_break_work *lw)
|
||||
static void
|
||||
smb2_queue_pending_open_break(struct tcon_link *tlink, __u8 *lease_key,
|
||||
__le32 new_lease_state)
|
||||
{
|
||||
struct smb2_lease_break_work *lw;
|
||||
|
||||
lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
|
||||
if (!lw) {
|
||||
cifs_put_tlink(tlink);
|
||||
return;
|
||||
}
|
||||
|
||||
INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
|
||||
lw->tlink = tlink;
|
||||
lw->lease_state = new_lease_state;
|
||||
memcpy(lw->lease_key, lease_key, SMB2_LEASE_KEY_SIZE);
|
||||
queue_work(cifsiod_wq, &lw->lease_break);
|
||||
}
|
||||
|
||||
static bool
|
||||
smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
|
||||
{
|
||||
bool found;
|
||||
__u8 lease_state;
|
||||
struct list_head *tmp;
|
||||
struct cifsFileInfo *cfile;
|
||||
struct cifs_pending_open *open;
|
||||
struct cifsInodeInfo *cinode;
|
||||
int ack_req = le32_to_cpu(rsp->Flags &
|
||||
SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
|
||||
|
@ -556,22 +572,29 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp,
|
|||
&cinode->flags);
|
||||
|
||||
cifs_queue_oplock_break(cfile);
|
||||
kfree(lw);
|
||||
return true;
|
||||
}
|
||||
|
||||
found = false;
|
||||
return false;
|
||||
}
|
||||
|
||||
static struct cifs_pending_open *
|
||||
smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon,
|
||||
struct smb2_lease_break *rsp)
|
||||
{
|
||||
__u8 lease_state = le32_to_cpu(rsp->NewLeaseState);
|
||||
int ack_req = le32_to_cpu(rsp->Flags &
|
||||
SMB2_NOTIFY_BREAK_LEASE_FLAG_ACK_REQUIRED);
|
||||
struct cifs_pending_open *open;
|
||||
struct cifs_pending_open *found = NULL;
|
||||
|
||||
list_for_each_entry(open, &tcon->pending_opens, olist) {
|
||||
if (memcmp(open->lease_key, rsp->LeaseKey,
|
||||
SMB2_LEASE_KEY_SIZE))
|
||||
continue;
|
||||
|
||||
if (!found && ack_req) {
|
||||
found = true;
|
||||
memcpy(lw->lease_key, open->lease_key,
|
||||
SMB2_LEASE_KEY_SIZE);
|
||||
lw->tlink = cifs_get_tlink(open->tlink);
|
||||
queue_work(cifsiod_wq, &lw->lease_break);
|
||||
found = open;
|
||||
}
|
||||
|
||||
cifs_dbg(FYI, "found in the pending open list\n");
|
||||
|
@ -592,14 +615,7 @@ smb2_is_valid_lease_break(char *buffer)
|
|||
struct TCP_Server_Info *server;
|
||||
struct cifs_ses *ses;
|
||||
struct cifs_tcon *tcon;
|
||||
struct smb2_lease_break_work *lw;
|
||||
|
||||
lw = kmalloc(sizeof(struct smb2_lease_break_work), GFP_KERNEL);
|
||||
if (!lw)
|
||||
return false;
|
||||
|
||||
INIT_WORK(&lw->lease_break, cifs_ses_oplock_break);
|
||||
lw->lease_state = rsp->NewLeaseState;
|
||||
struct cifs_pending_open *open;
|
||||
|
||||
cifs_dbg(FYI, "Checking for lease break\n");
|
||||
|
||||
|
@ -617,11 +633,27 @@ smb2_is_valid_lease_break(char *buffer)
|
|||
spin_lock(&tcon->open_file_lock);
|
||||
cifs_stats_inc(
|
||||
&tcon->stats.cifs_stats.num_oplock_brks);
|
||||
if (smb2_tcon_has_lease(tcon, rsp, lw)) {
|
||||
if (smb2_tcon_has_lease(tcon, rsp)) {
|
||||
spin_unlock(&tcon->open_file_lock);
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
return true;
|
||||
}
|
||||
open = smb2_tcon_find_pending_open_lease(tcon,
|
||||
rsp);
|
||||
if (open) {
|
||||
__u8 lease_key[SMB2_LEASE_KEY_SIZE];
|
||||
struct tcon_link *tlink;
|
||||
|
||||
tlink = cifs_get_tlink(open->tlink);
|
||||
memcpy(lease_key, open->lease_key,
|
||||
SMB2_LEASE_KEY_SIZE);
|
||||
spin_unlock(&tcon->open_file_lock);
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
smb2_queue_pending_open_break(tlink,
|
||||
lease_key,
|
||||
rsp->NewLeaseState);
|
||||
return true;
|
||||
}
|
||||
spin_unlock(&tcon->open_file_lock);
|
||||
|
||||
if (tcon->crfid.is_valid &&
|
||||
|
@ -639,7 +671,6 @@ smb2_is_valid_lease_break(char *buffer)
|
|||
}
|
||||
}
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
kfree(lw);
|
||||
cifs_dbg(FYI, "Can not process lease break - no lease matched\n");
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -1298,6 +1298,8 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
|
|||
spnego_key = cifs_get_spnego_key(ses);
|
||||
if (IS_ERR(spnego_key)) {
|
||||
rc = PTR_ERR(spnego_key);
|
||||
if (rc == -ENOKEY)
|
||||
cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n");
|
||||
spnego_key = NULL;
|
||||
goto out;
|
||||
}
|
||||
|
|
|
@ -148,7 +148,7 @@ static int open_proxy_open(struct inode *inode, struct file *filp)
|
|||
|
||||
real_fops = debugfs_real_fops(filp);
|
||||
if (!fops_get(real_fops)) {
|
||||
#ifdef MODULE
|
||||
#ifdef CONFIG_MODULES
|
||||
if (real_fops->owner &&
|
||||
real_fops->owner->state == MODULE_STATE_GOING)
|
||||
goto out;
|
||||
|
@ -278,7 +278,7 @@ static int full_proxy_open(struct inode *inode, struct file *filp)
|
|||
|
||||
real_fops = debugfs_real_fops(filp);
|
||||
if (!fops_get(real_fops)) {
|
||||
#ifdef MODULE
|
||||
#ifdef CONFIG_MODULES
|
||||
if (real_fops->owner &&
|
||||
real_fops->owner->state == MODULE_STATE_GOING)
|
||||
goto out;
|
||||
|
|
|
@ -80,6 +80,7 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir)
|
|||
if (dir)
|
||||
le16_add_cpu(&desc->bg_used_dirs_count, -1);
|
||||
spin_unlock(sb_bgl_lock(EXT2_SB(sb), group));
|
||||
percpu_counter_inc(&EXT2_SB(sb)->s_freeinodes_counter);
|
||||
if (dir)
|
||||
percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter);
|
||||
mark_buffer_dirty(bh);
|
||||
|
@ -529,7 +530,7 @@ got:
|
|||
goto fail;
|
||||
}
|
||||
|
||||
percpu_counter_add(&sbi->s_freeinodes_counter, -1);
|
||||
percpu_counter_dec(&sbi->s_freeinodes_counter);
|
||||
if (S_ISDIR(mode))
|
||||
percpu_counter_inc(&sbi->s_dirs_counter);
|
||||
|
||||
|
|
|
@ -1853,7 +1853,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
|
|||
blocksize, hinfo, map);
|
||||
map -= count;
|
||||
dx_sort_map(map, count);
|
||||
/* Split the existing block in the middle, size-wise */
|
||||
/* Ensure that neither split block is over half full */
|
||||
size = 0;
|
||||
move = 0;
|
||||
for (i = count-1; i >= 0; i--) {
|
||||
|
@ -1863,8 +1863,18 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
|
|||
size += map[i].size;
|
||||
move++;
|
||||
}
|
||||
/* map index at which we will split */
|
||||
/*
|
||||
* map index at which we will split
|
||||
*
|
||||
* If the sum of active entries didn't exceed half the block size, just
|
||||
* split it in half by count; each resulting block will have at least
|
||||
* half the space free.
|
||||
*/
|
||||
if (i > 0)
|
||||
split = count - move;
|
||||
else
|
||||
split = count/2;
|
||||
|
||||
hash2 = map[split].hash;
|
||||
continued = hash2 == map[split - 1].hash;
|
||||
dxtrace(printk(KERN_INFO "Split block %lu at %x, %i/%i\n",
|
||||
|
|
|
@ -1353,8 +1353,10 @@ static int jbd2_write_superblock(journal_t *journal, int write_flags)
|
|||
int ret;
|
||||
|
||||
/* Buffer got discarded which means block device got invalidated */
|
||||
if (!buffer_mapped(bh))
|
||||
if (!buffer_mapped(bh)) {
|
||||
unlock_buffer(bh);
|
||||
return -EIO;
|
||||
}
|
||||
|
||||
trace_jbd2_write_superblock(journal, write_flags);
|
||||
if (!(journal->j_flags & JBD2_BARRIER))
|
||||
|
|
|
@ -150,8 +150,10 @@ static int minix_remount (struct super_block * sb, int * flags, char * data)
|
|||
return 0;
|
||||
}
|
||||
|
||||
static bool minix_check_superblock(struct minix_sb_info *sbi)
|
||||
static bool minix_check_superblock(struct super_block *sb)
|
||||
{
|
||||
struct minix_sb_info *sbi = minix_sb(sb);
|
||||
|
||||
if (sbi->s_imap_blocks == 0 || sbi->s_zmap_blocks == 0)
|
||||
return false;
|
||||
|
||||
|
@ -161,7 +163,7 @@ static bool minix_check_superblock(struct minix_sb_info *sbi)
|
|||
* of indirect blocks which places the limit well above U32_MAX.
|
||||
*/
|
||||
if (sbi->s_version == MINIX_V1 &&
|
||||
sbi->s_max_size > (7 + 512 + 512*512) * BLOCK_SIZE)
|
||||
sb->s_maxbytes > (7 + 512 + 512*512) * BLOCK_SIZE)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
|
@ -202,7 +204,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
|
|||
sbi->s_zmap_blocks = ms->s_zmap_blocks;
|
||||
sbi->s_firstdatazone = ms->s_firstdatazone;
|
||||
sbi->s_log_zone_size = ms->s_log_zone_size;
|
||||
sbi->s_max_size = ms->s_max_size;
|
||||
s->s_maxbytes = ms->s_max_size;
|
||||
s->s_magic = ms->s_magic;
|
||||
if (s->s_magic == MINIX_SUPER_MAGIC) {
|
||||
sbi->s_version = MINIX_V1;
|
||||
|
@ -233,7 +235,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
|
|||
sbi->s_zmap_blocks = m3s->s_zmap_blocks;
|
||||
sbi->s_firstdatazone = m3s->s_firstdatazone;
|
||||
sbi->s_log_zone_size = m3s->s_log_zone_size;
|
||||
sbi->s_max_size = m3s->s_max_size;
|
||||
s->s_maxbytes = m3s->s_max_size;
|
||||
sbi->s_ninodes = m3s->s_ninodes;
|
||||
sbi->s_nzones = m3s->s_zones;
|
||||
sbi->s_dirsize = 64;
|
||||
|
@ -245,7 +247,7 @@ static int minix_fill_super(struct super_block *s, void *data, int silent)
|
|||
} else
|
||||
goto out_no_fs;
|
||||
|
||||
if (!minix_check_superblock(sbi))
|
||||
if (!minix_check_superblock(s))
|
||||
goto out_illegal_sb;
|
||||
|
||||
/*
|
||||
|
|
|
@ -29,12 +29,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
|
|||
if (block < 0) {
|
||||
printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n",
|
||||
block, inode->i_sb->s_bdev);
|
||||
} else if (block >= (minix_sb(inode->i_sb)->s_max_size/BLOCK_SIZE)) {
|
||||
if (printk_ratelimit())
|
||||
printk("MINIX-fs: block_to_path: "
|
||||
"block %ld too big on dev %pg\n",
|
||||
block, inode->i_sb->s_bdev);
|
||||
} else if (block < 7) {
|
||||
return 0;
|
||||
}
|
||||
if ((u64)block * BLOCK_SIZE >= inode->i_sb->s_maxbytes)
|
||||
return 0;
|
||||
|
||||
if (block < 7) {
|
||||
offsets[n++] = block;
|
||||
} else if ((block -= 7) < 512) {
|
||||
offsets[n++] = 7;
|
||||
|
|
|
@ -32,13 +32,12 @@ static int block_to_path(struct inode * inode, long block, int offsets[DEPTH])
|
|||
if (block < 0) {
|
||||
printk("MINIX-fs: block_to_path: block %ld < 0 on dev %pg\n",
|
||||
block, sb->s_bdev);
|
||||
} else if ((u64)block * (u64)sb->s_blocksize >=
|
||||
minix_sb(sb)->s_max_size) {
|
||||
if (printk_ratelimit())
|
||||
printk("MINIX-fs: block_to_path: "
|
||||
"block %ld too big on dev %pg\n",
|
||||
block, sb->s_bdev);
|
||||
} else if (block < DIRCOUNT) {
|
||||
return 0;
|
||||
}
|
||||
if ((u64)block * (u64)sb->s_blocksize >= sb->s_maxbytes)
|
||||
return 0;
|
||||
|
||||
if (block < DIRCOUNT) {
|
||||
offsets[n++] = block;
|
||||
} else if ((block -= DIRCOUNT) < INDIRCOUNT(sb)) {
|
||||
offsets[n++] = DIRCOUNT;
|
||||
|
|
|
@ -32,7 +32,6 @@ struct minix_sb_info {
|
|||
unsigned long s_zmap_blocks;
|
||||
unsigned long s_firstdatazone;
|
||||
unsigned long s_log_zone_size;
|
||||
unsigned long s_max_size;
|
||||
int s_dirsize;
|
||||
int s_namelen;
|
||||
struct buffer_head ** s_imap;
|
||||
|
|
|
@ -140,6 +140,7 @@ static int
|
|||
nfs_file_flush(struct file *file, fl_owner_t id)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
errseq_t since;
|
||||
|
||||
dprintk("NFS: flush(%pD2)\n", file);
|
||||
|
||||
|
@ -148,7 +149,9 @@ nfs_file_flush(struct file *file, fl_owner_t id)
|
|||
return 0;
|
||||
|
||||
/* Flush writes to the server and return any errors */
|
||||
return nfs_wb_all(inode);
|
||||
since = filemap_sample_wb_err(file->f_mapping);
|
||||
nfs_wb_all(inode);
|
||||
return filemap_check_wb_err(file->f_mapping, since);
|
||||
}
|
||||
|
||||
ssize_t
|
||||
|
@ -580,12 +583,14 @@ static const struct vm_operations_struct nfs_file_vm_ops = {
|
|||
.page_mkwrite = nfs_vm_page_mkwrite,
|
||||
};
|
||||
|
||||
static int nfs_need_check_write(struct file *filp, struct inode *inode)
|
||||
static int nfs_need_check_write(struct file *filp, struct inode *inode,
|
||||
int error)
|
||||
{
|
||||
struct nfs_open_context *ctx;
|
||||
|
||||
ctx = nfs_file_open_context(filp);
|
||||
if (nfs_ctx_key_to_expire(ctx, inode))
|
||||
if (nfs_error_is_fatal_on_server(error) ||
|
||||
nfs_ctx_key_to_expire(ctx, inode))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
@ -596,6 +601,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
|
|||
struct inode *inode = file_inode(file);
|
||||
unsigned long written = 0;
|
||||
ssize_t result;
|
||||
errseq_t since;
|
||||
int error;
|
||||
|
||||
result = nfs_key_timeout_notify(file, inode);
|
||||
if (result)
|
||||
|
@ -620,6 +627,7 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
|
|||
if (iocb->ki_pos > i_size_read(inode))
|
||||
nfs_revalidate_mapping(inode, file->f_mapping);
|
||||
|
||||
since = filemap_sample_wb_err(file->f_mapping);
|
||||
nfs_start_io_write(inode);
|
||||
result = generic_write_checks(iocb, from);
|
||||
if (result > 0) {
|
||||
|
@ -638,7 +646,8 @@ ssize_t nfs_file_write(struct kiocb *iocb, struct iov_iter *from)
|
|||
goto out;
|
||||
|
||||
/* Return error values */
|
||||
if (nfs_need_check_write(file, inode)) {
|
||||
error = filemap_check_wb_err(file->f_mapping, since);
|
||||
if (nfs_need_check_write(file, inode, error)) {
|
||||
int err = nfs_wb_all(inode);
|
||||
if (err < 0)
|
||||
result = err;
|
||||
|
|
|
@ -109,6 +109,7 @@ static int
|
|||
nfs4_file_flush(struct file *file, fl_owner_t id)
|
||||
{
|
||||
struct inode *inode = file_inode(file);
|
||||
errseq_t since;
|
||||
|
||||
dprintk("NFS: flush(%pD2)\n", file);
|
||||
|
||||
|
@ -124,7 +125,9 @@ nfs4_file_flush(struct file *file, fl_owner_t id)
|
|||
return filemap_fdatawrite(file->f_mapping);
|
||||
|
||||
/* Flush writes to the server and return any errors */
|
||||
return nfs_wb_all(inode);
|
||||
since = filemap_sample_wb_err(file->f_mapping);
|
||||
nfs_wb_all(inode);
|
||||
return filemap_check_wb_err(file->f_mapping, since);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_NFS_V4_2
|
||||
|
|
|
@ -5729,8 +5729,6 @@ static int _nfs4_get_security_label(struct inode *inode, void *buf,
|
|||
return ret;
|
||||
if (!(fattr.valid & NFS_ATTR_FATTR_V4_SECURITY_LABEL))
|
||||
return -ENOENT;
|
||||
if (buflen < label.len)
|
||||
return -ERANGE;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -4163,7 +4163,11 @@ static int decode_attr_security_label(struct xdr_stream *xdr, uint32_t *bitmap,
|
|||
return -EIO;
|
||||
if (len < NFS4_MAXLABELLEN) {
|
||||
if (label) {
|
||||
if (label->len) {
|
||||
if (label->len < len)
|
||||
return -ERANGE;
|
||||
memcpy(label->label, p, len);
|
||||
}
|
||||
label->len = len;
|
||||
label->pi = pi;
|
||||
label->lfs = lfs;
|
||||
|
|
|
@ -313,23 +313,8 @@ static ssize_t orangefs_file_read_iter(struct kiocb *iocb,
|
|||
struct iov_iter *iter)
|
||||
{
|
||||
int ret;
|
||||
struct orangefs_read_options *ro;
|
||||
|
||||
orangefs_stats.reads++;
|
||||
|
||||
/*
|
||||
* Remember how they set "count" in read(2) or pread(2) or whatever -
|
||||
* users can use count as a knob to control orangefs io size and later
|
||||
* we can try to help them fill as many pages as possible in readpage.
|
||||
*/
|
||||
if (!iocb->ki_filp->private_data) {
|
||||
iocb->ki_filp->private_data = kmalloc(sizeof *ro, GFP_KERNEL);
|
||||
if (!iocb->ki_filp->private_data)
|
||||
return(ENOMEM);
|
||||
ro = iocb->ki_filp->private_data;
|
||||
ro->blksiz = iter->count;
|
||||
}
|
||||
|
||||
down_read(&file_inode(iocb->ki_filp)->i_rwsem);
|
||||
ret = orangefs_revalidate_mapping(file_inode(iocb->ki_filp));
|
||||
if (ret)
|
||||
|
@ -598,12 +583,6 @@ static int orangefs_lock(struct file *filp, int cmd, struct file_lock *fl)
|
|||
return rc;
|
||||
}
|
||||
|
||||
static int orangefs_file_open(struct inode * inode, struct file *file)
|
||||
{
|
||||
file->private_data = NULL;
|
||||
return generic_file_open(inode, file);
|
||||
}
|
||||
|
||||
static int orangefs_flush(struct file *file, fl_owner_t id)
|
||||
{
|
||||
/*
|
||||
|
@ -617,9 +596,6 @@ static int orangefs_flush(struct file *file, fl_owner_t id)
|
|||
struct inode *inode = file->f_mapping->host;
|
||||
int r;
|
||||
|
||||
kfree(file->private_data);
|
||||
file->private_data = NULL;
|
||||
|
||||
if (inode->i_state & I_DIRTY_TIME) {
|
||||
spin_lock(&inode->i_lock);
|
||||
inode->i_state &= ~I_DIRTY_TIME;
|
||||
|
@ -642,7 +618,7 @@ const struct file_operations orangefs_file_operations = {
|
|||
.lock = orangefs_lock,
|
||||
.unlocked_ioctl = orangefs_ioctl,
|
||||
.mmap = orangefs_file_mmap,
|
||||
.open = orangefs_file_open,
|
||||
.open = generic_file_open,
|
||||
.flush = orangefs_flush,
|
||||
.release = orangefs_file_release,
|
||||
.fsync = orangefs_fsync,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user