This is the 4.14.236 stable release

-----BEGIN PGP SIGNATURE-----
 
 iQIzBAABCAAdFiEEZH8oZUiU471FcZm+ONu9yGCSaT4FAmDB7PYACgkQONu9yGCS
 aT4sABAAo15TKI4d1BKsnGEIjv7LYtIAnkfRm4UqoJnFWe60zgdaKKPtJnEICSkF
 ez0DNkDMEx4Y9uDeKzTuIYvOV+2anmZyH8xngW1UiAsKhTkofph5RExxCeze68T5
 y84sAHtTpHSkuEN55R5kexZ8JkNohYuphe+7g//5zyqsgbIDzyYd2c7TUQJMOWdw
 wVQtuitq2vN7EuLmEeI5jTDP3qg2gjVi/DUp/OGfeYQAaoeDl0ZMaE/vGvzZngPA
 mm9EgX3eIc4k0HNAYbw693LP5FBPaAro5qiJ9yEGjbxwSFvmkLkpGFepk475c8CP
 H5GILJ8RE95VGC0baK+TbMF+CGwgJorFMMniFHC0T1GApCv3vgVtxJUXZkasmcVJ
 Mw/xhWI4x6zVvu9Ofq1G9eJ5MRpU+c6jpu4dUQpk3XJBihUHTaHZ6wGG48osB5/7
 ajwODcnKwNAQVY/bSC5IStQsx8f7lIDTA98Pg7i3POjor40MwU8UXUub2LTvlp3y
 Q4b/UP0kxC6uBtcSCyCwswBj0rLK/AS0Lesf6LKXKmtTbb3cHGP+/pbq4TqTwjSa
 tAmTVrUAnVTbmTfzMZ2hYnu+qmRflEp92AvjHw8YqFcg27Shv4XIK0vSMXJu4gtK
 r7yLMLltDcNU1jA1KYZ2IRDqNMWFsLuO01A3rZYtB1jhHaIs9dA=
 =Nrhr
 -----END PGP SIGNATURE-----

Merge tag 'v4.14.236' into v4.14/standard/base

This is the 4.14.236 stable release

# gpg: Signature made Thu 10 Jun 2021 06:44:06 AM EDT
# gpg:                using RSA key 647F28654894E3BD457199BE38DBBDC86092693E
# gpg: Can't check signature: No public key
This commit is contained in:
Bruce Ashfield 2021-08-25 13:24:24 -04:00
commit 2251c17b6c
39 changed files with 502 additions and 305 deletions

View File

@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0 # SPDX-License-Identifier: GPL-2.0
VERSION = 4 VERSION = 4
PATCHLEVEL = 14 PATCHLEVEL = 14
SUBLEVEL = 235 SUBLEVEL = 236
EXTRAVERSION = EXTRAVERSION =
NAME = Petit Gorille NAME = Petit Gorille

View File

@ -3532,7 +3532,7 @@ static int cr_interception(struct vcpu_svm *svm)
err = 0; err = 0;
if (cr >= 16) { /* mov to cr */ if (cr >= 16) { /* mov to cr */
cr -= 16; cr -= 16;
val = kvm_register_read(&svm->vcpu, reg); val = kvm_register_readl(&svm->vcpu, reg);
switch (cr) { switch (cr) {
case 0: case 0:
if (!check_selective_cr0_intercepted(svm, val)) if (!check_selective_cr0_intercepted(svm, val))
@ -3577,7 +3577,7 @@ static int cr_interception(struct vcpu_svm *svm)
kvm_queue_exception(&svm->vcpu, UD_VECTOR); kvm_queue_exception(&svm->vcpu, UD_VECTOR);
return 1; return 1;
} }
kvm_register_write(&svm->vcpu, reg, val); kvm_register_writel(&svm->vcpu, reg, val);
} }
return kvm_complete_insn_gp(&svm->vcpu, err); return kvm_complete_insn_gp(&svm->vcpu, err);
} }
@ -3607,13 +3607,13 @@ static int dr_interception(struct vcpu_svm *svm)
if (dr >= 16) { /* mov to DRn */ if (dr >= 16) { /* mov to DRn */
if (!kvm_require_dr(&svm->vcpu, dr - 16)) if (!kvm_require_dr(&svm->vcpu, dr - 16))
return 1; return 1;
val = kvm_register_read(&svm->vcpu, reg); val = kvm_register_readl(&svm->vcpu, reg);
kvm_set_dr(&svm->vcpu, dr - 16, val); kvm_set_dr(&svm->vcpu, dr - 16, val);
} else { } else {
if (!kvm_require_dr(&svm->vcpu, dr)) if (!kvm_require_dr(&svm->vcpu, dr))
return 1; return 1;
kvm_get_dr(&svm->vcpu, dr, &val); kvm_get_dr(&svm->vcpu, dr, &val);
kvm_register_write(&svm->vcpu, reg, val); kvm_register_writel(&svm->vcpu, reg, val);
} }
return kvm_skip_emulated_instruction(&svm->vcpu); return kvm_skip_emulated_instruction(&svm->vcpu);

View File

@ -380,8 +380,7 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE)) if (!msg || !(mem->validation_bits & CPER_MEM_VALID_MODULE_HANDLE))
return 0; return 0;
n = 0; len = CPER_REC_LEN;
len = CPER_REC_LEN - 1;
dmi_memdev_name(mem->mem_dev_handle, &bank, &device); dmi_memdev_name(mem->mem_dev_handle, &bank, &device);
if (bank && device) if (bank && device)
n = snprintf(msg, len, "DIMM location: %s %s ", bank, device); n = snprintf(msg, len, "DIMM location: %s %s ", bank, device);
@ -390,7 +389,6 @@ static int cper_dimm_err_location(struct cper_mem_err_compact *mem, char *msg)
"DIMM location: not present. DMI handle: 0x%.4x ", "DIMM location: not present. DMI handle: 0x%.4x ",
mem->mem_dev_handle); mem->mem_dev_handle);
msg[n] = '\0';
return n; return n;
} }

View File

@ -69,11 +69,6 @@ static bool entry_is_valid(const efi_memory_desc_t *in, efi_memory_desc_t *out)
return false; return false;
} }
if (!(in->attribute & (EFI_MEMORY_RO | EFI_MEMORY_XP))) {
pr_warn("Entry attributes invalid: RO and XP bits both cleared\n");
return false;
}
if (PAGE_SIZE > EFI_PAGE_SIZE && if (PAGE_SIZE > EFI_PAGE_SIZE &&
(!PAGE_ALIGNED(in->phys_addr) || (!PAGE_ALIGNED(in->phys_addr) ||
!PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) { !PAGE_ALIGNED(in->num_pages << EFI_PAGE_SHIFT))) {

View File

@ -1092,8 +1092,8 @@ static int i2c_hid_probe(struct i2c_client *client,
hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID); hid->vendor = le16_to_cpu(ihid->hdesc.wVendorID);
hid->product = le16_to_cpu(ihid->hdesc.wProductID); hid->product = le16_to_cpu(ihid->hdesc.wProductID);
snprintf(hid->name, sizeof(hid->name), "%s %04hX:%04hX", snprintf(hid->name, sizeof(hid->name), "%s %04X:%04X",
client->name, hid->vendor, hid->product); client->name, (u16)hid->vendor, (u16)hid->product);
strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys)); strlcpy(hid->phys, dev_name(&client->dev), sizeof(hid->phys));
ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product); ihid->quirks = i2c_hid_lookup_quirk(hid->vendor, hid->product);

View File

@ -1304,6 +1304,7 @@ int hid_pidff_init(struct hid_device *hid)
if (pidff->pool[PID_DEVICE_MANAGED_POOL].value && if (pidff->pool[PID_DEVICE_MANAGED_POOL].value &&
pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) { pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) {
error = -EPERM;
hid_notice(hid, hid_notice(hid,
"device does not support device managed pool\n"); "device does not support device managed pool\n");
goto fail; goto fail;

View File

@ -4791,7 +4791,6 @@ static int bnxt_hwrm_func_qcaps(struct bnxt *bp)
pf->fw_fid = le16_to_cpu(resp->fid); pf->fw_fid = le16_to_cpu(resp->fid);
pf->port_id = le16_to_cpu(resp->port_id); pf->port_id = le16_to_cpu(resp->port_id);
bp->dev->dev_port = pf->port_id;
memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN); memcpy(pf->mac_addr, resp->mac_address, ETH_ALEN);
pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx); pf->max_rsscos_ctxs = le16_to_cpu(resp->max_rsscos_ctx);
pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings); pf->max_cp_rings = le16_to_cpu(resp->max_cmpl_rings);

View File

@ -1591,6 +1591,15 @@ cdc_ncm_speed_change(struct usbnet *dev,
uint32_t rx_speed = le32_to_cpu(data->DLBitRRate); uint32_t rx_speed = le32_to_cpu(data->DLBitRRate);
uint32_t tx_speed = le32_to_cpu(data->ULBitRate); uint32_t tx_speed = le32_to_cpu(data->ULBitRate);
/* if the speed hasn't changed, don't report it.
* RTL8156 shipped before 2021 sends notification about every 32ms.
*/
if (dev->rx_speed == rx_speed && dev->tx_speed == tx_speed)
return;
dev->rx_speed = rx_speed;
dev->tx_speed = tx_speed;
/* /*
* Currently the USB-NET API does not support reporting the actual * Currently the USB-NET API does not support reporting the actual
* device speed. Do print it instead. * device speed. Do print it instead.
@ -1634,6 +1643,7 @@ static void cdc_ncm_status(struct usbnet *dev, struct urb *urb)
* USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be * USB_CDC_NOTIFY_NETWORK_CONNECTION notification shall be
* sent by device after USB_CDC_NOTIFY_SPEED_CHANGE. * sent by device after USB_CDC_NOTIFY_SPEED_CHANGE.
*/ */
if (netif_carrier_ok(dev->net) != !!event->wValue)
usbnet_link_change(dev, !!event->wValue, 0); usbnet_link_change(dev, !!event->wValue, 0);
break; break;

View File

@ -1,6 +1,7 @@
config VFIO_PCI config VFIO_PCI
tristate "VFIO support for PCI devices" tristate "VFIO support for PCI devices"
depends on VFIO && PCI && EVENTFD depends on VFIO && PCI && EVENTFD
depends on MMU
select VFIO_VIRQFD select VFIO_VIRQFD
select IRQ_BYPASS_MANAGER select IRQ_BYPASS_MANAGER
help help

View File

@ -1579,7 +1579,7 @@ static int vfio_ecap_init(struct vfio_pci_device *vdev)
if (len == 0xFF) { if (len == 0xFF) {
len = vfio_ext_cap_len(vdev, ecap, epos); len = vfio_ext_cap_len(vdev, ecap, epos);
if (len < 0) if (len < 0)
return ret; return len;
} }
} }

View File

@ -288,7 +288,7 @@ err_irq:
vfio_platform_regions_cleanup(vdev); vfio_platform_regions_cleanup(vdev);
err_reg: err_reg:
mutex_unlock(&driver_lock); mutex_unlock(&driver_lock);
module_put(THIS_MODULE); module_put(vdev->parent_module);
return ret; return ret;
} }

View File

@ -69,7 +69,7 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
struct pci_dev *dev, int devid, struct pci_dev *dev, int devid,
publish_pci_dev_cb publish_cb) publish_pci_dev_cb publish_cb)
{ {
int err = 0, slot, func = -1; int err = 0, slot, func = PCI_FUNC(dev->devfn);
struct pci_dev_entry *t, *dev_entry; struct pci_dev_entry *t, *dev_entry;
struct vpci_dev_data *vpci_dev = pdev->pci_dev_data; struct vpci_dev_data *vpci_dev = pdev->pci_dev_data;
@ -94,23 +94,26 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
/* /*
* Keep multi-function devices together on the virtual PCI bus, except * Keep multi-function devices together on the virtual PCI bus, except
* virtual functions. * that we want to keep virtual functions at func 0 on their own. They
* aren't multi-function devices and hence their presence at func 0
* may cause guests to not scan the other functions.
*/ */
if (!dev->is_virtfn) { if (!dev->is_virtfn || func) {
for (slot = 0; slot < PCI_SLOT_MAX; slot++) { for (slot = 0; slot < PCI_SLOT_MAX; slot++) {
if (list_empty(&vpci_dev->dev_list[slot])) if (list_empty(&vpci_dev->dev_list[slot]))
continue; continue;
t = list_entry(list_first(&vpci_dev->dev_list[slot]), t = list_entry(list_first(&vpci_dev->dev_list[slot]),
struct pci_dev_entry, list); struct pci_dev_entry, list);
if (t->dev->is_virtfn && !PCI_FUNC(t->dev->devfn))
continue;
if (match_slot(dev, t->dev)) { if (match_slot(dev, t->dev)) {
pr_info("vpci: %s: assign to virtual slot %d func %d\n", pr_info("vpci: %s: assign to virtual slot %d func %d\n",
pci_name(dev), slot, pci_name(dev), slot,
PCI_FUNC(dev->devfn)); func);
list_add_tail(&dev_entry->list, list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]); &vpci_dev->dev_list[slot]);
func = PCI_FUNC(dev->devfn);
goto unlock; goto unlock;
} }
} }
@ -123,7 +126,6 @@ static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev,
pci_name(dev), slot); pci_name(dev), slot);
list_add_tail(&dev_entry->list, list_add_tail(&dev_entry->list,
&vpci_dev->dev_list[slot]); &vpci_dev->dev_list[slot]);
func = dev->is_virtfn ? 0 : PCI_FUNC(dev->devfn);
goto unlock; goto unlock;
} }
} }

View File

@ -599,7 +599,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
u64 end_byte = bytenr + len; u64 end_byte = bytenr + len;
u64 csum_end; u64 csum_end;
struct extent_buffer *leaf; struct extent_buffer *leaf;
int ret; int ret = 0;
u16 csum_size = btrfs_super_csum_size(fs_info->super_copy); u16 csum_size = btrfs_super_csum_size(fs_info->super_copy);
int blocksize_bits = fs_info->sb->s_blocksize_bits; int blocksize_bits = fs_info->sb->s_blocksize_bits;
@ -615,6 +615,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
path->leave_spinning = 1; path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1); ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) { if (ret > 0) {
ret = 0;
if (path->slots[0] == 0) if (path->slots[0] == 0)
break; break;
path->slots[0]--; path->slots[0]--;
@ -671,7 +672,7 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
ret = btrfs_del_items(trans, root, path, ret = btrfs_del_items(trans, root, path,
path->slots[0], del_nr); path->slots[0], del_nr);
if (ret) if (ret)
goto out; break;
if (key.offset == bytenr) if (key.offset == bytenr)
break; break;
} else if (key.offset < bytenr && csum_end > end_byte) { } else if (key.offset < bytenr && csum_end > end_byte) {
@ -715,8 +716,9 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
ret = btrfs_split_item(trans, root, path, &key, offset); ret = btrfs_split_item(trans, root, path, &key, offset);
if (ret && ret != -EAGAIN) { if (ret && ret != -EAGAIN) {
btrfs_abort_transaction(trans, ret); btrfs_abort_transaction(trans, ret);
goto out; break;
} }
ret = 0;
key.offset = end_byte - 1; key.offset = end_byte - 1;
} else { } else {
@ -726,8 +728,6 @@ int btrfs_del_csums(struct btrfs_trans_handle *trans,
} }
btrfs_release_path(path); btrfs_release_path(path);
} }
ret = 0;
out:
btrfs_free_path(path); btrfs_free_path(path);
return ret; return ret;
} }

View File

@ -1558,6 +1558,7 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
break; break;
if (ret == 1) { if (ret == 1) {
ret = 0;
if (path->slots[0] == 0) if (path->slots[0] == 0)
break; break;
path->slots[0]--; path->slots[0]--;
@ -1570,17 +1571,19 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
ret = btrfs_del_item(trans, root, path); ret = btrfs_del_item(trans, root, path);
if (ret) if (ret)
goto out; break;
btrfs_release_path(path); btrfs_release_path(path);
inode = read_one_inode(root, key.offset); inode = read_one_inode(root, key.offset);
if (!inode) if (!inode) {
return -EIO; ret = -EIO;
break;
}
ret = fixup_inode_link_count(trans, root, inode); ret = fixup_inode_link_count(trans, root, inode);
iput(inode); iput(inode);
if (ret) if (ret)
goto out; break;
/* /*
* fixup on a directory may create new entries, * fixup on a directory may create new entries,
@ -1589,8 +1592,6 @@ static noinline int fixup_inode_link_counts(struct btrfs_trans_handle *trans,
*/ */
key.offset = (u64)-1; key.offset = (u64)-1;
} }
ret = 0;
out:
btrfs_release_path(path); btrfs_release_path(path);
return ret; return ret;
} }

View File

@ -3275,7 +3275,10 @@ static int ext4_split_extent_at(handle_t *handle,
ext4_ext_mark_unwritten(ex2); ext4_ext_mark_unwritten(ex2);
err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags); err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { if (err != -ENOSPC && err != -EDQUOT)
goto out;
if (EXT4_EXT_MAY_ZEROOUT & split_flag) {
if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
if (split_flag & EXT4_EXT_DATA_VALID1) { if (split_flag & EXT4_EXT_DATA_VALID1) {
err = ext4_ext_zeroout(inode, ex2); err = ext4_ext_zeroout(inode, ex2);
@ -3301,30 +3304,30 @@ static int ext4_split_extent_at(handle_t *handle,
ext4_ext_pblock(&orig_ex)); ext4_ext_pblock(&orig_ex));
} }
if (err) if (!err) {
goto fix_extent_len;
/* update the extent length and mark as initialized */ /* update the extent length and mark as initialized */
ex->ee_len = cpu_to_le16(ee_len); ex->ee_len = cpu_to_le16(ee_len);
ext4_ext_try_to_merge(handle, inode, path, ex); ext4_ext_try_to_merge(handle, inode, path, ex);
err = ext4_ext_dirty(handle, inode, path + path->p_depth); err = ext4_ext_dirty(handle, inode, path + path->p_depth);
if (err) if (!err)
goto fix_extent_len;
/* update extent status tree */ /* update extent status tree */
err = ext4_zeroout_es(inode, &zero_ex); err = ext4_zeroout_es(inode, &zero_ex);
/* If we failed at this point, we don't know in which
* state the extent tree exactly is so don't try to fix
* length of the original extent as it may do even more
* damage.
*/
goto out; goto out;
} else if (err) }
goto fix_extent_len; }
out:
ext4_ext_show_leaf(inode, path);
return err;
fix_extent_len: fix_extent_len:
ex->ee_len = orig_ex.ee_len; ex->ee_len = orig_ex.ee_len;
ext4_ext_dirty(handle, inode, path + path->p_depth); ext4_ext_dirty(handle, inode, path + path->p_depth);
return err; return err;
out:
ext4_ext_show_leaf(inode, path);
return err;
} }
/* /*

View File

@ -1861,6 +1861,45 @@ out:
return ret; return ret;
} }
/*
* zero out partial blocks of one cluster.
*
* start: file offset where zero starts, will be made upper block aligned.
* len: it will be trimmed to the end of current cluster if "start + len"
* is bigger than it.
*/
static int ocfs2_zeroout_partial_cluster(struct inode *inode,
u64 start, u64 len)
{
int ret;
u64 start_block, end_block, nr_blocks;
u64 p_block, offset;
u32 cluster, p_cluster, nr_clusters;
struct super_block *sb = inode->i_sb;
u64 end = ocfs2_align_bytes_to_clusters(sb, start);
if (start + len < end)
end = start + len;
start_block = ocfs2_blocks_for_bytes(sb, start);
end_block = ocfs2_blocks_for_bytes(sb, end);
nr_blocks = end_block - start_block;
if (!nr_blocks)
return 0;
cluster = ocfs2_bytes_to_clusters(sb, start);
ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
&nr_clusters, NULL);
if (ret)
return ret;
if (!p_cluster)
return 0;
offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
}
/* /*
* Parts of this function taken from xfs_change_file_space() * Parts of this function taken from xfs_change_file_space()
*/ */
@ -1871,7 +1910,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
{ {
int ret; int ret;
s64 llen; s64 llen;
loff_t size; loff_t size, orig_isize;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct buffer_head *di_bh = NULL; struct buffer_head *di_bh = NULL;
handle_t *handle; handle_t *handle;
@ -1902,6 +1941,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
goto out_inode_unlock; goto out_inode_unlock;
} }
orig_isize = i_size_read(inode);
switch (sr->l_whence) { switch (sr->l_whence) {
case 0: /*SEEK_SET*/ case 0: /*SEEK_SET*/
break; break;
@ -1909,7 +1949,7 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
sr->l_start += f_pos; sr->l_start += f_pos;
break; break;
case 2: /*SEEK_END*/ case 2: /*SEEK_END*/
sr->l_start += i_size_read(inode); sr->l_start += orig_isize;
break; break;
default: default:
ret = -EINVAL; ret = -EINVAL;
@ -1963,6 +2003,14 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
default: default:
ret = -EINVAL; ret = -EINVAL;
} }
/* zeroout eof blocks in the cluster. */
if (!ret && change_size && orig_isize < size) {
ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
size - orig_isize);
if (!ret)
i_size_write(inode, size);
}
up_write(&OCFS2_I(inode)->ip_alloc_sem); up_write(&OCFS2_I(inode)->ip_alloc_sem);
if (ret) { if (ret) {
mlog_errno(ret); mlog_errno(ret);
@ -1979,9 +2027,6 @@ static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
goto out_inode_unlock; goto out_inode_unlock;
} }
if (change_size && i_size_read(inode) < size)
i_size_write(inode, size);
inode->i_ctime = inode->i_mtime = current_time(inode); inode->i_ctime = inode->i_mtime = current_time(inode);
ret = ocfs2_mark_inode_dirty(handle, inode, di_bh); ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
if (ret < 0) if (ret < 0)

View File

@ -114,10 +114,11 @@ struct bpf_verifier_state_list {
}; };
/* Possible states for alu_state member. */ /* Possible states for alu_state member. */
#define BPF_ALU_SANITIZE_SRC 1U #define BPF_ALU_SANITIZE_SRC (1U << 0)
#define BPF_ALU_SANITIZE_DST 2U #define BPF_ALU_SANITIZE_DST (1U << 1)
#define BPF_ALU_NEG_VALUE (1U << 2) #define BPF_ALU_NEG_VALUE (1U << 2)
#define BPF_ALU_NON_POINTER (1U << 3) #define BPF_ALU_NON_POINTER (1U << 3)
#define BPF_ALU_IMMEDIATE (1U << 4)
#define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \ #define BPF_ALU_SANITIZE (BPF_ALU_SANITIZE_SRC | \
BPF_ALU_SANITIZE_DST) BPF_ALU_SANITIZE_DST)

View File

@ -82,6 +82,8 @@ struct usbnet {
# define EVENT_LINK_CHANGE 11 # define EVENT_LINK_CHANGE 11
# define EVENT_SET_RX_MODE 12 # define EVENT_SET_RX_MODE 12
# define EVENT_NO_IP_ALIGN 13 # define EVENT_NO_IP_ALIGN 13
u32 rx_speed; /* in bps - NOT Mbps */
u32 tx_speed; /* in bps - NOT Mbps */
}; };
static inline struct usb_driver *driver_of(struct usb_interface *intf) static inline struct usb_driver *driver_of(struct usb_interface *intf)

View File

@ -119,7 +119,7 @@ void caif_free_client(struct cflayer *adap_layer);
* The link_support layer is used to add any Link Layer specific * The link_support layer is used to add any Link Layer specific
* framing. * framing.
*/ */
void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
struct cflayer *link_support, int head_room, struct cflayer *link_support, int head_room,
struct cflayer **layer, int (**rcv_func)( struct cflayer **layer, int (**rcv_func)(
struct sk_buff *, struct net_device *, struct sk_buff *, struct net_device *,

View File

@ -62,7 +62,7 @@ void cfcnfg_remove(struct cfcnfg *cfg);
* @fcs: Specify if checksum is used in CAIF Framing Layer. * @fcs: Specify if checksum is used in CAIF Framing Layer.
* @head_room: Head space needed by link specific protocol. * @head_room: Head space needed by link specific protocol.
*/ */
void int
cfcnfg_add_phy_layer(struct cfcnfg *cnfg, cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
struct net_device *dev, struct cflayer *phy_layer, struct net_device *dev, struct cflayer *phy_layer,
enum cfcnfg_phy_preference pref, enum cfcnfg_phy_preference pref,

View File

@ -9,4 +9,5 @@
#include <net/caif/caif_layer.h> #include <net/caif/caif_layer.h>
struct cflayer *cfserl_create(int instance, bool use_stx); struct cflayer *cfserl_create(int instance, bool use_stx);
void cfserl_release(struct cflayer *layer);
#endif #endif

View File

@ -1055,7 +1055,7 @@ static noinline void __init kernel_init_freeable(void)
*/ */
set_mems_allowed(node_states[N_MEMORY]); set_mems_allowed(node_states[N_MEMORY]);
cad_pid = task_pid(current); cad_pid = get_pid(task_pid(current));
smp_prepare_cpus(setup_max_cpus); smp_prepare_cpus(setup_max_cpus);

View File

@ -2024,37 +2024,43 @@ static struct bpf_insn_aux_data *cur_aux(struct bpf_verifier_env *env)
return &env->insn_aux_data[env->insn_idx]; return &env->insn_aux_data[env->insn_idx];
} }
enum {
REASON_BOUNDS = -1,
REASON_TYPE = -2,
REASON_PATHS = -3,
REASON_LIMIT = -4,
REASON_STACK = -5,
};
static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg, static int retrieve_ptr_limit(const struct bpf_reg_state *ptr_reg,
u32 *ptr_limit, u8 opcode, bool off_is_neg) u32 *alu_limit, bool mask_to_left)
{ {
bool mask_to_left = (opcode == BPF_ADD && off_is_neg) || u32 max = 0, ptr_limit = 0;
(opcode == BPF_SUB && !off_is_neg);
u32 off, max;
switch (ptr_reg->type) { switch (ptr_reg->type) {
case PTR_TO_STACK: case PTR_TO_STACK:
/* Offset 0 is out-of-bounds, but acceptable start for the /* Offset 0 is out-of-bounds, but acceptable start for the
* left direction, see BPF_REG_FP. * left direction, see BPF_REG_FP. Also, unknown scalar
* offset where we would need to deal with min/max bounds is
* currently prohibited for unprivileged.
*/ */
max = MAX_BPF_STACK + mask_to_left; max = MAX_BPF_STACK + mask_to_left;
off = ptr_reg->off + ptr_reg->var_off.value; ptr_limit = -(ptr_reg->var_off.value + ptr_reg->off);
if (mask_to_left) break;
*ptr_limit = MAX_BPF_STACK + off;
else
*ptr_limit = -off - 1;
return *ptr_limit >= max ? -ERANGE : 0;
case PTR_TO_MAP_VALUE: case PTR_TO_MAP_VALUE:
max = ptr_reg->map_ptr->value_size; max = ptr_reg->map_ptr->value_size;
if (mask_to_left) { ptr_limit = (mask_to_left ?
*ptr_limit = ptr_reg->umax_value + ptr_reg->off; ptr_reg->smin_value :
} else { ptr_reg->umax_value) + ptr_reg->off;
off = ptr_reg->smin_value + ptr_reg->off; break;
*ptr_limit = ptr_reg->map_ptr->value_size - off - 1;
}
return *ptr_limit >= max ? -ERANGE : 0;
default: default:
return -EINVAL; return REASON_TYPE;
} }
if (ptr_limit >= max)
return REASON_LIMIT;
*alu_limit = ptr_limit;
return 0;
} }
static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env, static bool can_skip_alu_sanitation(const struct bpf_verifier_env *env,
@ -2072,7 +2078,7 @@ static int update_alu_sanitation_state(struct bpf_insn_aux_data *aux,
if (aux->alu_state && if (aux->alu_state &&
(aux->alu_state != alu_state || (aux->alu_state != alu_state ||
aux->alu_limit != alu_limit)) aux->alu_limit != alu_limit))
return -EACCES; return REASON_PATHS;
/* Corresponding fixup done in fixup_bpf_calls(). */ /* Corresponding fixup done in fixup_bpf_calls(). */
aux->alu_state = alu_state; aux->alu_state = alu_state;
@ -2091,14 +2097,28 @@ static int sanitize_val_alu(struct bpf_verifier_env *env,
return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0); return update_alu_sanitation_state(aux, BPF_ALU_NON_POINTER, 0);
} }
static bool sanitize_needed(u8 opcode)
{
return opcode == BPF_ADD || opcode == BPF_SUB;
}
struct bpf_sanitize_info {
struct bpf_insn_aux_data aux;
bool mask_to_left;
};
static int sanitize_ptr_alu(struct bpf_verifier_env *env, static int sanitize_ptr_alu(struct bpf_verifier_env *env,
struct bpf_insn *insn, struct bpf_insn *insn,
const struct bpf_reg_state *ptr_reg, const struct bpf_reg_state *ptr_reg,
const struct bpf_reg_state *off_reg,
struct bpf_reg_state *dst_reg, struct bpf_reg_state *dst_reg,
bool off_is_neg) struct bpf_sanitize_info *info,
const bool commit_window)
{ {
struct bpf_insn_aux_data *aux = commit_window ? cur_aux(env) : &info->aux;
struct bpf_verifier_state *vstate = env->cur_state; struct bpf_verifier_state *vstate = env->cur_state;
struct bpf_insn_aux_data *aux = cur_aux(env); bool off_is_imm = tnum_is_const(off_reg->var_off);
bool off_is_neg = off_reg->smin_value < 0;
bool ptr_is_dst_reg = ptr_reg == dst_reg; bool ptr_is_dst_reg = ptr_reg == dst_reg;
u8 opcode = BPF_OP(insn->code); u8 opcode = BPF_OP(insn->code);
u32 alu_state, alu_limit; u32 alu_state, alu_limit;
@ -2116,18 +2136,47 @@ static int sanitize_ptr_alu(struct bpf_verifier_env *env,
if (vstate->speculative) if (vstate->speculative)
goto do_sim; goto do_sim;
alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0; if (!commit_window) {
alu_state |= ptr_is_dst_reg ? if (!tnum_is_const(off_reg->var_off) &&
BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST; (off_reg->smin_value < 0) != (off_reg->smax_value < 0))
return REASON_BOUNDS;
err = retrieve_ptr_limit(ptr_reg, &alu_limit, opcode, off_is_neg); info->mask_to_left = (opcode == BPF_ADD && off_is_neg) ||
(opcode == BPF_SUB && !off_is_neg);
}
err = retrieve_ptr_limit(ptr_reg, &alu_limit, info->mask_to_left);
if (err < 0) if (err < 0)
return err; return err;
if (commit_window) {
/* In commit phase we narrow the masking window based on
* the observed pointer move after the simulated operation.
*/
alu_state = info->aux.alu_state;
alu_limit = abs(info->aux.alu_limit - alu_limit);
} else {
alu_state = off_is_neg ? BPF_ALU_NEG_VALUE : 0;
alu_state |= off_is_imm ? BPF_ALU_IMMEDIATE : 0;
alu_state |= ptr_is_dst_reg ?
BPF_ALU_SANITIZE_SRC : BPF_ALU_SANITIZE_DST;
}
err = update_alu_sanitation_state(aux, alu_state, alu_limit); err = update_alu_sanitation_state(aux, alu_state, alu_limit);
if (err < 0) if (err < 0)
return err; return err;
do_sim: do_sim:
/* If we're in commit phase, we're done here given we already
* pushed the truncated dst_reg into the speculative verification
* stack.
*
* Also, when register is a known constant, we rewrite register-based
* operation to immediate-based, and thus do not need masking (and as
* a consequence, do not need to simulate the zero-truncation either).
*/
if (commit_window || off_is_imm)
return 0;
/* Simulate and find potential out-of-bounds access under /* Simulate and find potential out-of-bounds access under
* speculative execution from truncation as a result of * speculative execution from truncation as a result of
* masking when off was not within expected range. If off * masking when off was not within expected range. If off
@ -2144,7 +2193,81 @@ do_sim:
ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true); ret = push_stack(env, env->insn_idx + 1, env->insn_idx, true);
if (!ptr_is_dst_reg && ret) if (!ptr_is_dst_reg && ret)
*dst_reg = tmp; *dst_reg = tmp;
return !ret ? -EFAULT : 0; return !ret ? REASON_STACK : 0;
}
static int sanitize_err(struct bpf_verifier_env *env,
const struct bpf_insn *insn, int reason,
const struct bpf_reg_state *off_reg,
const struct bpf_reg_state *dst_reg)
{
static const char *err = "pointer arithmetic with it prohibited for !root";
const char *op = BPF_OP(insn->code) == BPF_ADD ? "add" : "sub";
u32 dst = insn->dst_reg, src = insn->src_reg;
switch (reason) {
case REASON_BOUNDS:
verbose("R%d has unknown scalar with mixed signed bounds, %s\n",
off_reg == dst_reg ? dst : src, err);
break;
case REASON_TYPE:
verbose("R%d has pointer with unsupported alu operation, %s\n",
off_reg == dst_reg ? src : dst, err);
break;
case REASON_PATHS:
verbose("R%d tried to %s from different maps, paths or scalars, %s\n",
dst, op, err);
break;
case REASON_LIMIT:
verbose("R%d tried to %s beyond pointer bounds, %s\n",
dst, op, err);
break;
case REASON_STACK:
verbose("R%d could not be pushed for speculative verification, %s\n",
dst, err);
break;
default:
verbose("verifier internal error: unknown reason (%d)\n",
reason);
break;
}
return -EACCES;
}
static int sanitize_check_bounds(struct bpf_verifier_env *env,
const struct bpf_insn *insn,
const struct bpf_reg_state *dst_reg)
{
u32 dst = insn->dst_reg;
/* For unprivileged we require that resulting offset must be in bounds
* in order to be able to sanitize access later on.
*/
if (env->allow_ptr_leaks)
return 0;
switch (dst_reg->type) {
case PTR_TO_STACK:
if (check_stack_access(env, dst_reg, dst_reg->off +
dst_reg->var_off.value, 1)) {
verbose("R%d stack pointer arithmetic goes out of range, "
"prohibited for !root\n", dst);
return -EACCES;
}
break;
case PTR_TO_MAP_VALUE:
if (check_map_access(env, dst, dst_reg->off, 1)) {
verbose("R%d pointer arithmetic of map value goes out of range, "
"prohibited for !root\n", dst);
return -EACCES;
}
break;
default:
break;
}
return 0;
} }
/* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off. /* Handles arithmetic on a pointer and a scalar: computes new min/max and var_off.
@ -2163,8 +2286,9 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value; smin_ptr = ptr_reg->smin_value, smax_ptr = ptr_reg->smax_value;
u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value, u64 umin_val = off_reg->umin_value, umax_val = off_reg->umax_value,
umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value; umin_ptr = ptr_reg->umin_value, umax_ptr = ptr_reg->umax_value;
u32 dst = insn->dst_reg, src = insn->src_reg; struct bpf_sanitize_info info = {};
u8 opcode = BPF_OP(insn->code); u8 opcode = BPF_OP(insn->code);
u32 dst = insn->dst_reg;
int ret; int ret;
dst_reg = &regs[dst]; dst_reg = &regs[dst];
@ -2180,37 +2304,26 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
if (BPF_CLASS(insn->code) != BPF_ALU64) { if (BPF_CLASS(insn->code) != BPF_ALU64) {
/* 32-bit ALU ops on pointers produce (meaningless) scalars */ /* 32-bit ALU ops on pointers produce (meaningless) scalars */
if (!env->allow_ptr_leaks)
verbose("R%d 32-bit pointer arithmetic prohibited\n", verbose("R%d 32-bit pointer arithmetic prohibited\n",
dst); dst);
return -EACCES; return -EACCES;
} }
if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) { if (ptr_reg->type == PTR_TO_MAP_VALUE_OR_NULL) {
if (!env->allow_ptr_leaks)
verbose("R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n", verbose("R%d pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL prohibited, null-check it first\n",
dst); dst);
return -EACCES; return -EACCES;
} }
if (ptr_reg->type == CONST_PTR_TO_MAP) { if (ptr_reg->type == CONST_PTR_TO_MAP) {
if (!env->allow_ptr_leaks)
verbose("R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n", verbose("R%d pointer arithmetic on CONST_PTR_TO_MAP prohibited\n",
dst); dst);
return -EACCES; return -EACCES;
} }
if (ptr_reg->type == PTR_TO_PACKET_END) { if (ptr_reg->type == PTR_TO_PACKET_END) {
if (!env->allow_ptr_leaks)
verbose("R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n", verbose("R%d pointer arithmetic on PTR_TO_PACKET_END prohibited\n",
dst); dst);
return -EACCES; return -EACCES;
} }
if (ptr_reg->type == PTR_TO_MAP_VALUE) {
if (!env->allow_ptr_leaks && !known && (smin_val < 0) != (smax_val < 0)) {
verbose("R%d has unknown scalar with mixed signed bounds, pointer arithmetic with it prohibited for !root\n",
off_reg == dst_reg ? dst : src);
return -EACCES;
}
}
/* In case of 'scalar += pointer', dst_reg inherits pointer type and id. /* In case of 'scalar += pointer', dst_reg inherits pointer type and id.
* The id may be overwritten later if we create a new variable offset. * The id may be overwritten later if we create a new variable offset.
@ -2222,13 +2335,15 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
!check_reg_sane_offset(env, ptr_reg, ptr_reg->type)) !check_reg_sane_offset(env, ptr_reg, ptr_reg->type))
return -EINVAL; return -EINVAL;
if (sanitize_needed(opcode)) {
ret = sanitize_ptr_alu(env, insn, ptr_reg, off_reg, dst_reg,
&info, false);
if (ret < 0)
return sanitize_err(env, insn, ret, off_reg, dst_reg);
}
switch (opcode) { switch (opcode) {
case BPF_ADD: case BPF_ADD:
ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
if (ret < 0) {
verbose("R%d tried to add from different maps, paths, or prohibited types\n", dst);
return ret;
}
/* We can take a fixed offset as long as it doesn't overflow /* We can take a fixed offset as long as it doesn't overflow
* the s32 'off' field * the s32 'off' field
*/ */
@ -2279,14 +2394,8 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
} }
break; break;
case BPF_SUB: case BPF_SUB:
ret = sanitize_ptr_alu(env, insn, ptr_reg, dst_reg, smin_val < 0);
if (ret < 0) {
verbose("R%d tried to sub from different maps, paths, or prohibited types\n", dst);
return ret;
}
if (dst_reg == off_reg) { if (dst_reg == off_reg) {
/* scalar -= pointer. Creates an unknown scalar */ /* scalar -= pointer. Creates an unknown scalar */
if (!env->allow_ptr_leaks)
verbose("R%d tried to subtract pointer from scalar\n", verbose("R%d tried to subtract pointer from scalar\n",
dst); dst);
return -EACCES; return -EACCES;
@ -2296,7 +2405,6 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
* be able to deal with it. * be able to deal with it.
*/ */
if (ptr_reg->type == PTR_TO_STACK) { if (ptr_reg->type == PTR_TO_STACK) {
if (!env->allow_ptr_leaks)
verbose("R%d subtraction from stack pointer prohibited\n", verbose("R%d subtraction from stack pointer prohibited\n",
dst); dst);
return -EACCES; return -EACCES;
@ -2348,17 +2456,12 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
case BPF_AND: case BPF_AND:
case BPF_OR: case BPF_OR:
case BPF_XOR: case BPF_XOR:
/* bitwise ops on pointers are troublesome, prohibit for now. /* bitwise ops on pointers are troublesome. */
* (However, in principle we could allow some cases, e.g.
* ptr &= ~3 which would reduce min_value by 3.)
*/
if (!env->allow_ptr_leaks)
verbose("R%d bitwise operator %s on pointer prohibited\n", verbose("R%d bitwise operator %s on pointer prohibited\n",
dst, bpf_alu_string[opcode >> 4]); dst, bpf_alu_string[opcode >> 4]);
return -EACCES; return -EACCES;
default: default:
/* other operators (e.g. MUL,LSH) produce non-pointer results */ /* other operators (e.g. MUL,LSH) produce non-pointer results */
if (!env->allow_ptr_leaks)
verbose("R%d pointer arithmetic with %s operator prohibited\n", verbose("R%d pointer arithmetic with %s operator prohibited\n",
dst, bpf_alu_string[opcode >> 4]); dst, bpf_alu_string[opcode >> 4]);
return -EACCES; return -EACCES;
@ -2371,22 +2474,13 @@ static int adjust_ptr_min_max_vals(struct bpf_verifier_env *env,
__reg_deduce_bounds(dst_reg); __reg_deduce_bounds(dst_reg);
__reg_bound_offset(dst_reg); __reg_bound_offset(dst_reg);
/* For unprivileged we require that resulting offset must be in bounds if (sanitize_check_bounds(env, insn, dst_reg) < 0)
* in order to be able to sanitize access later on.
*/
if (!env->allow_ptr_leaks) {
if (dst_reg->type == PTR_TO_MAP_VALUE &&
check_map_access(env, dst, dst_reg->off, 1)) {
verbose("R%d pointer arithmetic of map value goes out of range, "
"prohibited for !root\n", dst);
return -EACCES; return -EACCES;
} else if (dst_reg->type == PTR_TO_STACK && if (sanitize_needed(opcode)) {
check_stack_access(env, dst_reg, dst_reg->off + ret = sanitize_ptr_alu(env, insn, dst_reg, off_reg, dst_reg,
dst_reg->var_off.value, 1)) { &info, true);
verbose("R%d stack pointer arithmetic goes out of range, " if (ret < 0)
"prohibited for !root\n", dst); return sanitize_err(env, insn, ret, off_reg, dst_reg);
return -EACCES;
}
} }
return 0; return 0;
@ -2407,7 +2501,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
s64 smin_val, smax_val; s64 smin_val, smax_val;
u64 umin_val, umax_val; u64 umin_val, umax_val;
u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32; u64 insn_bitness = (BPF_CLASS(insn->code) == BPF_ALU64) ? 64 : 32;
u32 dst = insn->dst_reg;
int ret; int ret;
if (insn_bitness == 32) { if (insn_bitness == 32) {
@ -2441,13 +2534,14 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
return 0; return 0;
} }
if (sanitize_needed(opcode)) {
ret = sanitize_val_alu(env, insn);
if (ret < 0)
return sanitize_err(env, insn, ret, NULL, NULL);
}
switch (opcode) { switch (opcode) {
case BPF_ADD: case BPF_ADD:
ret = sanitize_val_alu(env, insn);
if (ret < 0) {
verbose("R%d tried to add from different pointers or scalars\n", dst);
return ret;
}
if (signed_add_overflows(dst_reg->smin_value, smin_val) || if (signed_add_overflows(dst_reg->smin_value, smin_val) ||
signed_add_overflows(dst_reg->smax_value, smax_val)) { signed_add_overflows(dst_reg->smax_value, smax_val)) {
dst_reg->smin_value = S64_MIN; dst_reg->smin_value = S64_MIN;
@ -2467,11 +2561,6 @@ static int adjust_scalar_min_max_vals(struct bpf_verifier_env *env,
dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off); dst_reg->var_off = tnum_add(dst_reg->var_off, src_reg.var_off);
break; break;
case BPF_SUB: case BPF_SUB:
ret = sanitize_val_alu(env, insn);
if (ret < 0) {
verbose("R%d tried to sub from different pointers or scalars\n", dst);
return ret;
}
if (signed_sub_overflows(dst_reg->smin_value, smax_val) || if (signed_sub_overflows(dst_reg->smin_value, smax_val) ||
signed_sub_overflows(dst_reg->smax_value, smin_val)) { signed_sub_overflows(dst_reg->smax_value, smin_val)) {
/* Overflow possible, we know nothing */ /* Overflow possible, we know nothing */
@ -2664,7 +2753,6 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg; struct bpf_reg_state *regs = cur_regs(env), *dst_reg, *src_reg;
struct bpf_reg_state *ptr_reg = NULL, off_reg = {0}; struct bpf_reg_state *ptr_reg = NULL, off_reg = {0};
u8 opcode = BPF_OP(insn->code); u8 opcode = BPF_OP(insn->code);
int rc;
dst_reg = &regs[insn->dst_reg]; dst_reg = &regs[insn->dst_reg];
src_reg = NULL; src_reg = NULL;
@ -2675,43 +2763,29 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
if (src_reg->type != SCALAR_VALUE) { if (src_reg->type != SCALAR_VALUE) {
if (dst_reg->type != SCALAR_VALUE) { if (dst_reg->type != SCALAR_VALUE) {
/* Combining two pointers by any ALU op yields /* Combining two pointers by any ALU op yields
* an arbitrary scalar. * an arbitrary scalar. Disallow all math except
* pointer subtraction
*/ */
if (!env->allow_ptr_leaks) { if (opcode == BPF_SUB && env->allow_ptr_leaks) {
mark_reg_unknown(regs, insn->dst_reg);
return 0;
}
verbose("R%d pointer %s pointer prohibited\n", verbose("R%d pointer %s pointer prohibited\n",
insn->dst_reg, insn->dst_reg,
bpf_alu_string[opcode >> 4]); bpf_alu_string[opcode >> 4]);
return -EACCES; return -EACCES;
}
mark_reg_unknown(regs, insn->dst_reg);
return 0;
} else { } else {
/* scalar += pointer /* scalar += pointer
* This is legal, but we have to reverse our * This is legal, but we have to reverse our
* src/dest handling in computing the range * src/dest handling in computing the range
*/ */
rc = adjust_ptr_min_max_vals(env, insn, return adjust_ptr_min_max_vals(env, insn,
src_reg, dst_reg); src_reg, dst_reg);
if (rc == -EACCES && env->allow_ptr_leaks) {
/* scalar += unknown scalar */
__mark_reg_unknown(&off_reg);
return adjust_scalar_min_max_vals(
env, insn,
dst_reg, off_reg);
}
return rc;
} }
} else if (ptr_reg) { } else if (ptr_reg) {
/* pointer += scalar */ /* pointer += scalar */
rc = adjust_ptr_min_max_vals(env, insn, return adjust_ptr_min_max_vals(env, insn,
dst_reg, src_reg); dst_reg, src_reg);
if (rc == -EACCES && env->allow_ptr_leaks) {
/* unknown scalar += scalar */
__mark_reg_unknown(dst_reg);
return adjust_scalar_min_max_vals(
env, insn, dst_reg, *src_reg);
}
return rc;
} }
} else { } else {
/* Pretend the src is a reg with a known value, since we only /* Pretend the src is a reg with a known value, since we only
@ -2720,17 +2794,9 @@ static int adjust_reg_min_max_vals(struct bpf_verifier_env *env,
off_reg.type = SCALAR_VALUE; off_reg.type = SCALAR_VALUE;
__mark_reg_known(&off_reg, insn->imm); __mark_reg_known(&off_reg, insn->imm);
src_reg = &off_reg; src_reg = &off_reg;
if (ptr_reg) { /* pointer += K */ if (ptr_reg) /* pointer += K */
rc = adjust_ptr_min_max_vals(env, insn, return adjust_ptr_min_max_vals(env, insn,
ptr_reg, src_reg); ptr_reg, src_reg);
if (rc == -EACCES && env->allow_ptr_leaks) {
/* unknown scalar += K */
__mark_reg_unknown(dst_reg);
return adjust_scalar_min_max_vals(
env, insn, dst_reg, off_reg);
}
return rc;
}
} }
/* Got here implies adding two SCALAR_VALUEs */ /* Got here implies adding two SCALAR_VALUEs */
@ -4796,7 +4862,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X; const u8 code_sub = BPF_ALU64 | BPF_SUB | BPF_X;
struct bpf_insn insn_buf[16]; struct bpf_insn insn_buf[16];
struct bpf_insn *patch = &insn_buf[0]; struct bpf_insn *patch = &insn_buf[0];
bool issrc, isneg; bool issrc, isneg, isimm;
u32 off_reg; u32 off_reg;
aux = &env->insn_aux_data[i + delta]; aux = &env->insn_aux_data[i + delta];
@ -4807,8 +4873,12 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
isneg = aux->alu_state & BPF_ALU_NEG_VALUE; isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
issrc = (aux->alu_state & BPF_ALU_SANITIZE) == issrc = (aux->alu_state & BPF_ALU_SANITIZE) ==
BPF_ALU_SANITIZE_SRC; BPF_ALU_SANITIZE_SRC;
isimm = aux->alu_state & BPF_ALU_IMMEDIATE;
off_reg = issrc ? insn->src_reg : insn->dst_reg; off_reg = issrc ? insn->src_reg : insn->dst_reg;
if (isimm) {
*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
} else {
if (isneg) if (isneg)
*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
*patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit); *patch++ = BPF_MOV32_IMM(BPF_REG_AX, aux->alu_limit);
@ -4817,6 +4887,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
*patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0); *patch++ = BPF_ALU64_IMM(BPF_NEG, BPF_REG_AX, 0);
*patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63); *patch++ = BPF_ALU64_IMM(BPF_ARSH, BPF_REG_AX, 63);
*patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg); *patch++ = BPF_ALU64_REG(BPF_AND, BPF_REG_AX, off_reg);
}
if (!issrc) if (!issrc)
*patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg); *patch++ = BPF_MOV64_REG(insn->dst_reg, insn->src_reg);
insn->src_reg = BPF_REG_AX; insn->src_reg = BPF_REG_AX;
@ -4824,7 +4895,7 @@ static int fixup_bpf_calls(struct bpf_verifier_env *env)
insn->code = insn->code == code_add ? insn->code = insn->code == code_add ?
code_sub : code_add; code_sub : code_add;
*patch++ = *insn; *patch++ = *insn;
if (issrc && isneg) if (issrc && isneg && !isimm)
*patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1); *patch++ = BPF_ALU64_IMM(BPF_MUL, off_reg, -1);
cnt = patch - insn_buf; cnt = patch - insn_buf;

View File

@ -5779,6 +5779,7 @@ static inline int select_idle_smt(struct task_struct *p, struct sched_domain *sd
*/ */
static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target) static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int target)
{ {
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
struct sched_domain *this_sd; struct sched_domain *this_sd;
u64 avg_cost, avg_idle; u64 avg_cost, avg_idle;
u64 time, cost; u64 time, cost;
@ -5809,11 +5810,11 @@ static int select_idle_cpu(struct task_struct *p, struct sched_domain *sd, int t
time = local_clock(); time = local_clock();
for_each_cpu_wrap(cpu, sched_domain_span(sd), target) { cpumask_and(cpus, sched_domain_span(sd), &p->cpus_allowed);
for_each_cpu_wrap(cpu, cpus, target) {
if (!--nr) if (!--nr)
return -1; return -1;
if (!cpumask_test_cpu(cpu, &p->cpus_allowed))
continue;
if (idle_cpu(cpu)) if (idle_cpu(cpu))
break; break;
} }

View File

@ -4099,10 +4099,20 @@ int hugetlb_mcopy_atomic_pte(struct mm_struct *dst_mm,
struct page *page; struct page *page;
if (!*pagep) { if (!*pagep) {
ret = -ENOMEM; /* If a page already exists, then it's UFFDIO_COPY for
page = alloc_huge_page(dst_vma, dst_addr, 0); * a non-missing case. Return -EEXIST.
if (IS_ERR(page)) */
if (vm_shared &&
hugetlbfs_pagecache_present(h, dst_vma, dst_addr)) {
ret = -EEXIST;
goto out; goto out;
}
page = alloc_huge_page(dst_vma, dst_addr, 0);
if (IS_ERR(page)) {
ret = -ENOMEM;
goto out;
}
ret = copy_huge_page_from_user(page, ret = copy_huge_page_from_user(page,
(const void __user *) src_addr, (const void __user *) src_addr,

View File

@ -1458,8 +1458,13 @@ static int hci_dev_do_open(struct hci_dev *hdev)
} else { } else {
/* Init failed, cleanup */ /* Init failed, cleanup */
flush_work(&hdev->tx_work); flush_work(&hdev->tx_work);
flush_work(&hdev->cmd_work);
/* Since hci_rx_work() is possible to awake new cmd_work
* it should be flushed first to avoid unexpected call of
* hci_cmd_work()
*/
flush_work(&hdev->rx_work); flush_work(&hdev->rx_work);
flush_work(&hdev->cmd_work);
skb_queue_purge(&hdev->cmd_q); skb_queue_purge(&hdev->cmd_q);
skb_queue_purge(&hdev->rx_q); skb_queue_purge(&hdev->rx_q);

View File

@ -750,7 +750,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
/* Detach sockets from device */ /* Detach sockets from device */
read_lock(&hci_sk_list.lock); read_lock(&hci_sk_list.lock);
sk_for_each(sk, &hci_sk_list.head) { sk_for_each(sk, &hci_sk_list.head) {
bh_lock_sock_nested(sk); lock_sock(sk);
if (hci_pi(sk)->hdev == hdev) { if (hci_pi(sk)->hdev == hdev) {
hci_pi(sk)->hdev = NULL; hci_pi(sk)->hdev = NULL;
sk->sk_err = EPIPE; sk->sk_err = EPIPE;
@ -759,7 +759,7 @@ void hci_sock_dev_event(struct hci_dev *hdev, int event)
hci_dev_put(hdev); hci_dev_put(hdev);
} }
bh_unlock_sock(sk); release_sock(sk);
} }
read_unlock(&hci_sk_list.lock); read_unlock(&hci_sk_list.lock);
} }

View File

@ -303,7 +303,7 @@ static void dev_flowctrl(struct net_device *dev, int on)
caifd_put(caifd); caifd_put(caifd);
} }
void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev, int caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
struct cflayer *link_support, int head_room, struct cflayer *link_support, int head_room,
struct cflayer **layer, struct cflayer **layer,
int (**rcv_func)(struct sk_buff *, struct net_device *, int (**rcv_func)(struct sk_buff *, struct net_device *,
@ -314,11 +314,12 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
enum cfcnfg_phy_preference pref; enum cfcnfg_phy_preference pref;
struct cfcnfg *cfg = get_cfcnfg(dev_net(dev)); struct cfcnfg *cfg = get_cfcnfg(dev_net(dev));
struct caif_device_entry_list *caifdevs; struct caif_device_entry_list *caifdevs;
int res;
caifdevs = caif_device_list(dev_net(dev)); caifdevs = caif_device_list(dev_net(dev));
caifd = caif_device_alloc(dev); caifd = caif_device_alloc(dev);
if (!caifd) if (!caifd)
return; return -ENOMEM;
*layer = &caifd->layer; *layer = &caifd->layer;
spin_lock_init(&caifd->flow_lock); spin_lock_init(&caifd->flow_lock);
@ -340,7 +341,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
sizeof(caifd->layer.name) - 1); sizeof(caifd->layer.name) - 1);
caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0; caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
caifd->layer.transmit = transmit; caifd->layer.transmit = transmit;
cfcnfg_add_phy_layer(cfg, res = cfcnfg_add_phy_layer(cfg,
dev, dev,
&caifd->layer, &caifd->layer,
pref, pref,
@ -350,6 +351,7 @@ void caif_enroll_dev(struct net_device *dev, struct caif_dev_common *caifdev,
mutex_unlock(&caifdevs->lock); mutex_unlock(&caifdevs->lock);
if (rcv_func) if (rcv_func)
*rcv_func = receive; *rcv_func = receive;
return res;
} }
EXPORT_SYMBOL(caif_enroll_dev); EXPORT_SYMBOL(caif_enroll_dev);
@ -364,6 +366,7 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
struct cflayer *layer, *link_support; struct cflayer *layer, *link_support;
int head_room = 0; int head_room = 0;
struct caif_device_entry_list *caifdevs; struct caif_device_entry_list *caifdevs;
int res;
cfg = get_cfcnfg(dev_net(dev)); cfg = get_cfcnfg(dev_net(dev));
caifdevs = caif_device_list(dev_net(dev)); caifdevs = caif_device_list(dev_net(dev));
@ -389,8 +392,10 @@ static int caif_device_notify(struct notifier_block *me, unsigned long what,
break; break;
} }
} }
caif_enroll_dev(dev, caifdev, link_support, head_room, res = caif_enroll_dev(dev, caifdev, link_support, head_room,
&layer, NULL); &layer, NULL);
if (res)
cfserl_release(link_support);
caifdev->flowctrl = dev_flowctrl; caifdev->flowctrl = dev_flowctrl;
break; break;

View File

@ -116,6 +116,11 @@ static struct cflayer *cfusbl_create(int phyid, u8 ethaddr[ETH_ALEN],
return (struct cflayer *) this; return (struct cflayer *) this;
} }
static void cfusbl_release(struct cflayer *layer)
{
kfree(layer);
}
static struct packet_type caif_usb_type __read_mostly = { static struct packet_type caif_usb_type __read_mostly = {
.type = cpu_to_be16(ETH_P_802_EX1), .type = cpu_to_be16(ETH_P_802_EX1),
}; };
@ -128,6 +133,7 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
struct cflayer *layer, *link_support; struct cflayer *layer, *link_support;
struct usbnet *usbnet; struct usbnet *usbnet;
struct usb_device *usbdev; struct usb_device *usbdev;
int res;
/* Check whether we have a NCM device, and find its VID/PID. */ /* Check whether we have a NCM device, and find its VID/PID. */
if (!(dev->dev.parent && dev->dev.parent->driver && if (!(dev->dev.parent && dev->dev.parent->driver &&
@ -170,8 +176,11 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
if (dev->num_tx_queues > 1) if (dev->num_tx_queues > 1)
pr_warn("USB device uses more than one tx queue\n"); pr_warn("USB device uses more than one tx queue\n");
caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN, res = caif_enroll_dev(dev, &common, link_support, CFUSB_MAX_HEADLEN,
&layer, &caif_usb_type.func); &layer, &caif_usb_type.func);
if (res)
goto err;
if (!pack_added) if (!pack_added)
dev_add_pack(&caif_usb_type); dev_add_pack(&caif_usb_type);
pack_added = true; pack_added = true;
@ -181,6 +190,9 @@ static int cfusbl_device_notify(struct notifier_block *me, unsigned long what,
layer->name[sizeof(layer->name) - 1] = 0; layer->name[sizeof(layer->name) - 1] = 0;
return 0; return 0;
err:
cfusbl_release(link_support);
return res;
} }
static struct notifier_block caif_device_notifier = { static struct notifier_block caif_device_notifier = {

View File

@ -452,7 +452,7 @@ unlock:
rcu_read_unlock(); rcu_read_unlock();
} }
void int
cfcnfg_add_phy_layer(struct cfcnfg *cnfg, cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
struct net_device *dev, struct cflayer *phy_layer, struct net_device *dev, struct cflayer *phy_layer,
enum cfcnfg_phy_preference pref, enum cfcnfg_phy_preference pref,
@ -461,7 +461,7 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
{ {
struct cflayer *frml; struct cflayer *frml;
struct cfcnfg_phyinfo *phyinfo = NULL; struct cfcnfg_phyinfo *phyinfo = NULL;
int i; int i, res = 0;
u8 phyid; u8 phyid;
mutex_lock(&cnfg->lock); mutex_lock(&cnfg->lock);
@ -475,12 +475,15 @@ cfcnfg_add_phy_layer(struct cfcnfg *cnfg,
goto got_phyid; goto got_phyid;
} }
pr_warn("Too many CAIF Link Layers (max 6)\n"); pr_warn("Too many CAIF Link Layers (max 6)\n");
res = -EEXIST;
goto out; goto out;
got_phyid: got_phyid:
phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC); phyinfo = kzalloc(sizeof(struct cfcnfg_phyinfo), GFP_ATOMIC);
if (!phyinfo) if (!phyinfo) {
res = -ENOMEM;
goto out_err; goto out_err;
}
phy_layer->id = phyid; phy_layer->id = phyid;
phyinfo->pref = pref; phyinfo->pref = pref;
@ -494,8 +497,10 @@ got_phyid:
frml = cffrml_create(phyid, fcs); frml = cffrml_create(phyid, fcs);
if (!frml) if (!frml) {
res = -ENOMEM;
goto out_err; goto out_err;
}
phyinfo->frm_layer = frml; phyinfo->frm_layer = frml;
layer_set_up(frml, cnfg->mux); layer_set_up(frml, cnfg->mux);
@ -513,11 +518,12 @@ got_phyid:
list_add_rcu(&phyinfo->node, &cnfg->phys); list_add_rcu(&phyinfo->node, &cnfg->phys);
out: out:
mutex_unlock(&cnfg->lock); mutex_unlock(&cnfg->lock);
return; return res;
out_err: out_err:
kfree(phyinfo); kfree(phyinfo);
mutex_unlock(&cnfg->lock); mutex_unlock(&cnfg->lock);
return res;
} }
EXPORT_SYMBOL(cfcnfg_add_phy_layer); EXPORT_SYMBOL(cfcnfg_add_phy_layer);

View File

@ -31,6 +31,11 @@ static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl, static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
int phyid); int phyid);
void cfserl_release(struct cflayer *layer)
{
kfree(layer);
}
struct cflayer *cfserl_create(int instance, bool use_stx) struct cflayer *cfserl_create(int instance, bool use_stx)
{ {
struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC); struct cfserl *this = kzalloc(sizeof(struct cfserl), GFP_ATOMIC);

View File

@ -688,8 +688,10 @@ int ieee802154_llsec_getparams(struct sk_buff *skb, struct genl_info *info)
nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) || nla_put_u8(msg, IEEE802154_ATTR_LLSEC_SECLEVEL, params.out_level) ||
nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER, nla_put_u32(msg, IEEE802154_ATTR_LLSEC_FRAME_COUNTER,
be32_to_cpu(params.frame_counter)) || be32_to_cpu(params.frame_counter)) ||
ieee802154_llsec_fill_key_id(msg, &params.out_key)) ieee802154_llsec_fill_key_id(msg, &params.out_key)) {
rc = -ENOBUFS;
goto out_free; goto out_free;
}
dev_put(dev); dev_put(dev);

View File

@ -249,8 +249,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
} }
if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) || if (nla_put_string(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)) ||
nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) nla_put_string(msg, IEEE802154_ATTR_DEV_NAME, dev->name)) {
rc = -EMSGSIZE;
goto nla_put_failure; goto nla_put_failure;
}
dev_put(dev); dev_put(dev);
wpan_phy_put(phy); wpan_phy_put(phy);

View File

@ -1262,7 +1262,7 @@ ip_vs_add_service(struct netns_ipvs *ipvs, struct ip_vs_service_user_kern *u,
ip_vs_addr_copy(svc->af, &svc->addr, &u->addr); ip_vs_addr_copy(svc->af, &svc->addr, &u->addr);
svc->port = u->port; svc->port = u->port;
svc->fwmark = u->fwmark; svc->fwmark = u->fwmark;
svc->flags = u->flags; svc->flags = u->flags & ~IP_VS_SVC_F_HASHED;
svc->timeout = u->timeout * HZ; svc->timeout = u->timeout * HZ;
svc->netmask = u->netmask; svc->netmask = u->netmask;
svc->ipvs = ipvs; svc->ipvs = ipvs;

View File

@ -370,10 +370,14 @@ static int
nfnl_cthelper_update(const struct nlattr * const tb[], nfnl_cthelper_update(const struct nlattr * const tb[],
struct nf_conntrack_helper *helper) struct nf_conntrack_helper *helper)
{ {
u32 size;
int ret; int ret;
if (tb[NFCTH_PRIV_DATA_LEN]) if (tb[NFCTH_PRIV_DATA_LEN]) {
size = ntohl(nla_get_be32(tb[NFCTH_PRIV_DATA_LEN]));
if (size != helper->data_len)
return -EBUSY; return -EBUSY;
}
if (tb[NFCTH_POLICY]) { if (tb[NFCTH_POLICY]) {
ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]); ret = nfnl_cthelper_update_policy(helper, tb[NFCTH_POLICY]);

View File

@ -122,6 +122,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
if (!llcp_sock->service_name) { if (!llcp_sock->service_name) {
nfc_llcp_local_put(llcp_sock->local); nfc_llcp_local_put(llcp_sock->local);
llcp_sock->local = NULL; llcp_sock->local = NULL;
llcp_sock->dev = NULL;
ret = -ENOMEM; ret = -ENOMEM;
goto put_dev; goto put_dev;
} }
@ -131,6 +132,7 @@ static int llcp_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
llcp_sock->local = NULL; llcp_sock->local = NULL;
kfree(llcp_sock->service_name); kfree(llcp_sock->service_name);
llcp_sock->service_name = NULL; llcp_sock->service_name = NULL;
llcp_sock->dev = NULL;
ret = -EADDRINUSE; ret = -EADDRINUSE;
goto put_dev; goto put_dev;
} }

View File

@ -490,9 +490,10 @@ static void snd_timer_notify1(struct snd_timer_instance *ti, int event)
return; return;
if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE) if (timer->hw.flags & SNDRV_TIMER_HW_SLAVE)
return; return;
event += 10; /* convert to SNDRV_TIMER_EVENT_MXXX */
list_for_each_entry(ts, &ti->slave_active_head, active_list) list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback) if (ts->ccallback)
ts->ccallback(ts, event + 100, &tstamp, resolution); ts->ccallback(ts, event, &tstamp, resolution);
} }
/* start/continue a master timer */ /* start/continue a master timer */

View File

@ -446,11 +446,9 @@ static struct bpf_align_test tests[] = {
.insns = { .insns = {
PREP_PKT_POINTERS, PREP_PKT_POINTERS,
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
/* ptr & const => unknown & const */ /* (ptr - ptr) << 2 */
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2), BPF_MOV64_REG(BPF_REG_5, BPF_REG_3),
BPF_ALU64_IMM(BPF_AND, BPF_REG_5, 0x40), BPF_ALU64_REG(BPF_SUB, BPF_REG_5, BPF_REG_2),
/* ptr << const => unknown << const */
BPF_MOV64_REG(BPF_REG_5, BPF_REG_2),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2), BPF_ALU64_IMM(BPF_LSH, BPF_REG_5, 2),
/* We have a (4n) value. Let's make a packet offset /* We have a (4n) value. Let's make a packet offset
* out of it. First add 14, to make it a (4n+2) * out of it. First add 14, to make it a (4n+2)
@ -473,20 +471,18 @@ static struct bpf_align_test tests[] = {
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
.result = REJECT, .result = REJECT,
.matches = { .matches = {
{4, "R5=pkt(id=0,off=0,r=0,imm=0)"}, {4, "R5=pkt_end(id=0,off=0,imm=0)"},
/* ptr & 0x40 == either 0 or 0x40 */ /* (ptr - ptr) << 2 == unknown, (4n) */
{5, "R5=inv(id=0,umax_value=64,var_off=(0x0; 0x40))"}, {6, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
/* ptr << 2 == unknown, (4n) */
{7, "R5=inv(id=0,smax_value=9223372036854775804,umax_value=18446744073709551612,var_off=(0x0; 0xfffffffffffffffc))"},
/* (4n) + 14 == (4n+2). We blow our bounds, because /* (4n) + 14 == (4n+2). We blow our bounds, because
* the add could overflow. * the add could overflow.
*/ */
{8, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"}, {7, "R5=inv(id=0,var_off=(0x2; 0xfffffffffffffffc))"},
/* Checked s>=0 */ /* Checked s>=0 */
{10, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, {9, "R5=inv(id=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
/* packet pointer + nonnegative (4n+2) */ /* packet pointer + nonnegative (4n+2) */
{12, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, {11, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
{14, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, {13, "R4=pkt(id=1,off=4,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
/* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine. /* NET_IP_ALIGN + (4n+2) == (4n), alignment is fine.
* We checked the bounds, but it might have been able * We checked the bounds, but it might have been able
* to overflow if the packet pointer started in the * to overflow if the packet pointer started in the
@ -494,7 +490,7 @@ static struct bpf_align_test tests[] = {
* So we did not get a 'range' on R6, and the access * So we did not get a 'range' on R6, and the access
* attempt will fail. * attempt will fail.
*/ */
{16, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"}, {15, "R6=pkt(id=1,off=0,r=0,umin_value=2,umax_value=9223372036854775806,var_off=(0x2; 0x7ffffffffffffffc))"},
} }
}, },
{ {

View File

@ -462,9 +462,7 @@ static struct bpf_test tests[] = {
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr_unpriv = "R1 subtraction from stack pointer", .errstr = "R1 subtraction from stack pointer",
.result_unpriv = REJECT,
.errstr = "R1 invalid mem access",
.result = REJECT, .result = REJECT,
}, },
{ {
@ -1900,9 +1898,8 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = ACCEPT, .result = REJECT,
.result_unpriv = REJECT, .errstr = "R1 pointer += pointer",
.errstr_unpriv = "R1 pointer += pointer",
}, },
{ {
"unpriv: neg pointer", "unpriv: neg pointer",
@ -2235,7 +2232,7 @@ static struct bpf_test tests[] = {
.result = ACCEPT, .result = ACCEPT,
}, },
{ {
"unpriv: adding of fp", "unpriv: adding of fp, reg",
.insns = { .insns = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_IMM(BPF_REG_1, 0), BPF_MOV64_IMM(BPF_REG_1, 0),
@ -2243,9 +2240,22 @@ static struct bpf_test tests[] = {
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8), BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = ACCEPT,
.result_unpriv = REJECT,
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range", .errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
.result_unpriv = REJECT,
.result = ACCEPT,
},
{
"unpriv: adding of fp, imm",
.insns = {
BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_MOV64_REG(BPF_REG_1, BPF_REG_10),
BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, 0),
BPF_STX_MEM(BPF_DW, BPF_REG_1, BPF_REG_0, -8),
BPF_EXIT_INSN(),
},
.errstr_unpriv = "R1 stack pointer arithmetic goes out of range",
.result_unpriv = REJECT,
.result = ACCEPT,
}, },
{ {
"unpriv: cmp of stack pointer", "unpriv: cmp of stack pointer",
@ -2681,7 +2691,8 @@ static struct bpf_test tests[] = {
BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1, BPF_LDX_MEM(BPF_W, BPF_REG_3, BPF_REG_1,
offsetof(struct __sk_buff, data)), offsetof(struct __sk_buff, data)),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4), BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_4),
BPF_MOV64_REG(BPF_REG_2, BPF_REG_1), BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, len)),
BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49), BPF_ALU64_IMM(BPF_LSH, BPF_REG_2, 49),
BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49), BPF_ALU64_IMM(BPF_RSH, BPF_REG_2, 49),
BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2), BPF_ALU64_REG(BPF_ADD, BPF_REG_3, BPF_REG_2),
@ -2988,7 +2999,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "invalid access to packet", .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
.result = REJECT, .result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
}, },
@ -3975,9 +3986,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map2 = { 3, 11 }, .fixup_map2 = { 3, 11 },
.errstr_unpriv = "R0 pointer += pointer", .errstr = "R0 pointer += pointer",
.errstr = "R0 invalid mem access 'inv'",
.result_unpriv = REJECT,
.result = REJECT, .result = REJECT,
.flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS, .flags = F_NEEDS_EFFICIENT_UNALIGNED_ACCESS,
}, },
@ -4018,7 +4027,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map1 = { 4 }, .fixup_map1 = { 4 },
.errstr = "R4 invalid mem access", .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
.result = REJECT, .result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS .prog_type = BPF_PROG_TYPE_SCHED_CLS
}, },
@ -4039,7 +4048,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map1 = { 4 }, .fixup_map1 = { 4 },
.errstr = "R4 invalid mem access", .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
.result = REJECT, .result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS .prog_type = BPF_PROG_TYPE_SCHED_CLS
}, },
@ -4060,7 +4069,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map1 = { 4 }, .fixup_map1 = { 4 },
.errstr = "R4 invalid mem access", .errstr = "R4 pointer arithmetic on PTR_TO_MAP_VALUE_OR_NULL",
.result = REJECT, .result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS .prog_type = BPF_PROG_TYPE_SCHED_CLS
}, },
@ -5291,10 +5300,8 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map2 = { 3 }, .fixup_map2 = { 3 },
.errstr_unpriv = "R0 bitwise operator &= on pointer", .errstr = "R0 bitwise operator &= on pointer",
.errstr = "invalid mem access 'inv'",
.result = REJECT, .result = REJECT,
.result_unpriv = REJECT,
}, },
{ {
"map element value illegal alu op, 2", "map element value illegal alu op, 2",
@ -5310,10 +5317,8 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map2 = { 3 }, .fixup_map2 = { 3 },
.errstr_unpriv = "R0 32-bit pointer arithmetic prohibited", .errstr = "R0 32-bit pointer arithmetic prohibited",
.errstr = "invalid mem access 'inv'",
.result = REJECT, .result = REJECT,
.result_unpriv = REJECT,
}, },
{ {
"map element value illegal alu op, 3", "map element value illegal alu op, 3",
@ -5329,10 +5334,8 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map2 = { 3 }, .fixup_map2 = { 3 },
.errstr_unpriv = "R0 pointer arithmetic with /= operator", .errstr = "R0 pointer arithmetic with /= operator",
.errstr = "invalid mem access 'inv'",
.result = REJECT, .result = REJECT,
.result_unpriv = REJECT,
}, },
{ {
"map element value illegal alu op, 4", "map element value illegal alu op, 4",
@ -5925,8 +5928,7 @@ static struct bpf_test tests[] = {
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.fixup_map_in_map = { 3 }, .fixup_map_in_map = { 3 },
.errstr = "R1 type=inv expected=map_ptr", .errstr = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
.errstr_unpriv = "R1 pointer arithmetic on CONST_PTR_TO_MAP prohibited",
.result = REJECT, .result = REJECT,
}, },
{ {
@ -6207,7 +6209,6 @@ static struct bpf_test tests[] = {
}, },
.fixup_map1 = { 3 }, .fixup_map1 = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
@ -6232,7 +6233,6 @@ static struct bpf_test tests[] = {
}, },
.fixup_map1 = { 3 }, .fixup_map1 = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
@ -6259,7 +6259,6 @@ static struct bpf_test tests[] = {
}, },
.fixup_map1 = { 3 }, .fixup_map1 = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
@ -6285,7 +6284,6 @@ static struct bpf_test tests[] = {
}, },
.fixup_map1 = { 3 }, .fixup_map1 = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R8 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
@ -6334,7 +6332,6 @@ static struct bpf_test tests[] = {
}, },
.fixup_map1 = { 3 }, .fixup_map1 = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
@ -6406,7 +6403,6 @@ static struct bpf_test tests[] = {
}, },
.fixup_map1 = { 3 }, .fixup_map1 = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
@ -6458,7 +6454,6 @@ static struct bpf_test tests[] = {
}, },
.fixup_map1 = { 3 }, .fixup_map1 = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
@ -6486,7 +6481,6 @@ static struct bpf_test tests[] = {
}, },
.fixup_map1 = { 3 }, .fixup_map1 = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
@ -6513,7 +6507,6 @@ static struct bpf_test tests[] = {
}, },
.fixup_map1 = { 3 }, .fixup_map1 = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
@ -6543,7 +6536,6 @@ static struct bpf_test tests[] = {
}, },
.fixup_map1 = { 3 }, .fixup_map1 = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R7 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
}, },
{ {
@ -6602,7 +6594,6 @@ static struct bpf_test tests[] = {
}, },
.fixup_map1 = { 3 }, .fixup_map1 = { 3 },
.errstr = "unbounded min value", .errstr = "unbounded min value",
.errstr_unpriv = "R1 has unknown scalar with mixed signed bounds",
.result = REJECT, .result = REJECT,
.result_unpriv = REJECT, .result_unpriv = REJECT,
}, },
@ -7297,6 +7288,19 @@ static struct bpf_test tests[] = {
.result = REJECT, .result = REJECT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS, .prog_type = BPF_PROG_TYPE_SCHED_CLS,
}, },
{
"pkt_end - pkt_start is allowed",
.insns = {
BPF_LDX_MEM(BPF_W, BPF_REG_0, BPF_REG_1,
offsetof(struct __sk_buff, data_end)),
BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_1,
offsetof(struct __sk_buff, data)),
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_2),
BPF_EXIT_INSN(),
},
.result = ACCEPT,
.prog_type = BPF_PROG_TYPE_SCHED_CLS,
},
{ {
"XDP pkt read, pkt_end mangling, bad access 1", "XDP pkt read, pkt_end mangling, bad access 1",
.insns = { .insns = {
@ -7312,7 +7316,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "R1 offset is outside of the packet", .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
.result = REJECT, .result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP, .prog_type = BPF_PROG_TYPE_XDP,
}, },
@ -7331,7 +7335,7 @@ static struct bpf_test tests[] = {
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr = "R1 offset is outside of the packet", .errstr = "R3 pointer arithmetic on PTR_TO_PACKET_END",
.result = REJECT, .result = REJECT,
.prog_type = BPF_PROG_TYPE_XDP, .prog_type = BPF_PROG_TYPE_XDP,
}, },
@ -7766,8 +7770,9 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT, .errstr_unpriv = "R1 has pointer with unsupported alu operation",
.errstr = "R0 tried to subtract pointer from scalar", .errstr = "R0 tried to subtract pointer from scalar",
.result = REJECT,
}, },
{ {
"check deducing bounds from const, 2", "check deducing bounds from const, 2",
@ -7780,6 +7785,8 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr_unpriv = "R1 has pointer with unsupported alu operation",
.result_unpriv = REJECT,
.result = ACCEPT, .result = ACCEPT,
}, },
{ {
@ -7790,20 +7797,24 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT, .errstr_unpriv = "R1 has pointer with unsupported alu operation",
.errstr = "R0 tried to subtract pointer from scalar", .errstr = "R0 tried to subtract pointer from scalar",
.result = REJECT,
}, },
{ {
"check deducing bounds from const, 4", "check deducing bounds from const, 4",
.insns = { .insns = {
BPF_MOV64_REG(BPF_REG_6, BPF_REG_1),
BPF_MOV64_IMM(BPF_REG_0, 0), BPF_MOV64_IMM(BPF_REG_0, 0),
BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1), BPF_JMP_IMM(BPF_JSLE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1), BPF_JMP_IMM(BPF_JSGE, BPF_REG_0, 0, 1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
BPF_ALU64_REG(BPF_SUB, BPF_REG_1, BPF_REG_0), BPF_ALU64_REG(BPF_SUB, BPF_REG_6, BPF_REG_0),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.errstr_unpriv = "R6 has pointer with unsupported alu operation",
.result_unpriv = REJECT,
.result = ACCEPT, .result = ACCEPT,
}, },
{ {
@ -7814,8 +7825,9 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT, .errstr_unpriv = "R1 has pointer with unsupported alu operation",
.errstr = "R0 tried to subtract pointer from scalar", .errstr = "R0 tried to subtract pointer from scalar",
.result = REJECT,
}, },
{ {
"check deducing bounds from const, 6", "check deducing bounds from const, 6",
@ -7826,8 +7838,9 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT, .errstr_unpriv = "R1 has pointer with unsupported alu operation",
.errstr = "R0 tried to subtract pointer from scalar", .errstr = "R0 tried to subtract pointer from scalar",
.result = REJECT,
}, },
{ {
"check deducing bounds from const, 7", "check deducing bounds from const, 7",
@ -7839,8 +7852,9 @@ static struct bpf_test tests[] = {
offsetof(struct __sk_buff, mark)), offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT, .errstr_unpriv = "R1 has pointer with unsupported alu operation",
.errstr = "dereference of modified ctx ptr", .errstr = "dereference of modified ctx ptr",
.result = REJECT,
}, },
{ {
"check deducing bounds from const, 8", "check deducing bounds from const, 8",
@ -7852,8 +7866,9 @@ static struct bpf_test tests[] = {
offsetof(struct __sk_buff, mark)), offsetof(struct __sk_buff, mark)),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT, .errstr_unpriv = "R1 has pointer with unsupported alu operation",
.errstr = "dereference of modified ctx ptr", .errstr = "dereference of modified ctx ptr",
.result = REJECT,
}, },
{ {
"check deducing bounds from const, 9", "check deducing bounds from const, 9",
@ -7863,8 +7878,9 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT, .errstr_unpriv = "R1 has pointer with unsupported alu operation",
.errstr = "R0 tried to subtract pointer from scalar", .errstr = "R0 tried to subtract pointer from scalar",
.result = REJECT,
}, },
{ {
"check deducing bounds from const, 10", "check deducing bounds from const, 10",
@ -7876,8 +7892,8 @@ static struct bpf_test tests[] = {
BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1), BPF_ALU64_REG(BPF_SUB, BPF_REG_0, BPF_REG_1),
BPF_EXIT_INSN(), BPF_EXIT_INSN(),
}, },
.result = REJECT,
.errstr = "math between ctx pointer and register with unbounded min value is not allowed", .errstr = "math between ctx pointer and register with unbounded min value is not allowed",
.result = REJECT,
}, },
{ {
"XDP pkt read, pkt_end <= pkt_data', bad access 2", "XDP pkt read, pkt_end <= pkt_data', bad access 2",