diff options
31 files changed, 2028 insertions, 129 deletions
diff --git a/drivers/infiniband/core/Makefile b/drivers/infiniband/core/Makefile index 6ebd9ad95010..e3cdafff8ece 100644 --- a/drivers/infiniband/core/Makefile +++ b/drivers/infiniband/core/Makefile @@ -10,7 +10,8 @@ obj-$(CONFIG_INFINIBAND_USER_ACCESS) += ib_uverbs.o ib_ucm.o \ ib_core-y := packer.o ud_header.o verbs.o cq.o rw.o sysfs.o \ device.o fmr_pool.o cache.o netlink.o \ roce_gid_mgmt.o mr_pool.o addr.o sa_query.o \ - multicast.o mad.o smi.o agent.o mad_rmpp.o + multicast.o mad.o smi.o agent.o mad_rmpp.o \ + security.o ib_core-$(CONFIG_INFINIBAND_USER_MEM) += umem.o ib_core-$(CONFIG_INFINIBAND_ON_DEMAND_PAGING) += umem_odp.o umem_rbtree.o ib_core-$(CONFIG_CGROUP_RDMA) += cgroup.o diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c index b1371eb9f46c..efc94304dee3 100644 --- a/drivers/infiniband/core/cache.c +++ b/drivers/infiniband/core/cache.c @@ -53,6 +53,7 @@ struct ib_update_work { struct work_struct work; struct ib_device *device; u8 port_num; + bool enforce_security; }; union ib_gid zgid; @@ -911,6 +912,26 @@ int ib_get_cached_pkey(struct ib_device *device, } EXPORT_SYMBOL(ib_get_cached_pkey); +int ib_get_cached_subnet_prefix(struct ib_device *device, + u8 port_num, + u64 *sn_pfx) +{ + unsigned long flags; + int p; + + if (port_num < rdma_start_port(device) || + port_num > rdma_end_port(device)) + return -EINVAL; + + p = port_num - rdma_start_port(device); + read_lock_irqsave(&device->cache.lock, flags); + *sn_pfx = device->cache.ports[p].subnet_prefix; + read_unlock_irqrestore(&device->cache.lock, flags); + + return 0; +} +EXPORT_SYMBOL(ib_get_cached_subnet_prefix); + int ib_find_cached_pkey(struct ib_device *device, u8 port_num, u16 pkey, @@ -1022,7 +1043,8 @@ int ib_get_cached_port_state(struct ib_device *device, EXPORT_SYMBOL(ib_get_cached_port_state); static void ib_cache_update(struct ib_device *device, - u8 port) + u8 port, + bool enforce_security) { struct ib_port_attr *tprops = NULL; struct ib_pkey_cache *pkey_cache = NULL, *old_pkey_cache; @@ -1108,8 +1130,15 @@ static void ib_cache_update(struct ib_device *device, device->cache.ports[port - rdma_start_port(device)].port_state = tprops->state; + device->cache.ports[port - rdma_start_port(device)].subnet_prefix = + tprops->subnet_prefix; write_unlock_irq(&device->cache.lock); + if (enforce_security) + ib_security_cache_change(device, + port, + tprops->subnet_prefix); + kfree(gid_cache); kfree(old_pkey_cache); kfree(tprops); @@ -1126,7 +1155,9 @@ static void ib_cache_task(struct work_struct *_work) struct ib_update_work *work = container_of(_work, struct ib_update_work, work); - ib_cache_update(work->device, work->port_num); + ib_cache_update(work->device, + work->port_num, + work->enforce_security); kfree(work); } @@ -1147,6 +1178,12 @@ static void ib_cache_event(struct ib_event_handler *handler, INIT_WORK(&work->work, ib_cache_task); work->device = event->device; work->port_num = event->element.port_num; + if (event->event == IB_EVENT_PKEY_CHANGE || + event->event == IB_EVENT_GID_CHANGE) + work->enforce_security = true; + else + work->enforce_security = false; + queue_work(ib_wq, &work->work); } } @@ -1172,7 +1209,7 @@ int ib_cache_setup_one(struct ib_device *device) goto out; for (p = 0; p <= rdma_end_port(device) - rdma_start_port(device); ++p) - ib_cache_update(device, p + rdma_start_port(device)); + ib_cache_update(device, p + rdma_start_port(device), true); INIT_IB_EVENT_HANDLER(&device->cache.event_handler, device, ib_cache_event); diff --git a/drivers/infiniband/core/core_priv.h b/drivers/infiniband/core/core_priv.h index cb7d372e4bdf..06645272c784 100644 --- a/drivers/infiniband/core/core_priv.h +++ b/drivers/infiniband/core/core_priv.h @@ -38,6 +38,16 @@ #include <linux/cgroup_rdma.h> #include <rdma/ib_verbs.h> +#include <rdma/ib_mad.h> +#include "mad_priv.h" + +struct pkey_index_qp_list { + struct list_head pkey_index_list; + u16 pkey_index; + /* Lock to hold while iterating the qp_list. */ + spinlock_t qp_list_lock; + struct list_head qp_list; +}; #if IS_ENABLED(CONFIG_INFINIBAND_ADDR_TRANS_CONFIGFS) int cma_configfs_init(void); @@ -176,4 +186,109 @@ int ib_nl_handle_set_timeout(struct sk_buff *skb, int ib_nl_handle_ip_res_resp(struct sk_buff *skb, struct netlink_callback *cb); +int ib_get_cached_subnet_prefix(struct ib_device *device, + u8 port_num, + u64 *sn_pfx); + +#ifdef CONFIG_SECURITY_INFINIBAND +int ib_security_pkey_access(struct ib_device *dev, + u8 port_num, + u16 pkey_index, + void *sec); + +void ib_security_destroy_port_pkey_list(struct ib_device *device); + +void ib_security_cache_change(struct ib_device *device, + u8 port_num, + u64 subnet_prefix); + +int ib_security_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_udata *udata); + +int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev); +void ib_destroy_qp_security_begin(struct ib_qp_security *sec); +void ib_destroy_qp_security_abort(struct ib_qp_security *sec); +void ib_destroy_qp_security_end(struct ib_qp_security *sec); +int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev); +void ib_close_shared_qp_security(struct ib_qp_security *sec); +int ib_mad_agent_security_setup(struct ib_mad_agent *agent, + enum ib_qp_type qp_type); +void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent); +int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index); +#else +static inline int ib_security_pkey_access(struct ib_device *dev, + u8 port_num, + u16 pkey_index, + void *sec) +{ + return 0; +} + +static inline void ib_security_destroy_port_pkey_list(struct ib_device *device) +{ +} + +static inline void ib_security_cache_change(struct ib_device *device, + u8 port_num, + u64 subnet_prefix) +{ +} + +static inline int ib_security_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_udata *udata) +{ + return qp->device->modify_qp(qp->real_qp, + qp_attr, + qp_attr_mask, + udata); +} + +static inline int ib_create_qp_security(struct ib_qp *qp, + struct ib_device *dev) +{ + return 0; +} + +static inline void ib_destroy_qp_security_begin(struct ib_qp_security *sec) +{ +} + +static inline void ib_destroy_qp_security_abort(struct ib_qp_security *sec) +{ +} + +static inline void ib_destroy_qp_security_end(struct ib_qp_security *sec) +{ +} + +static inline int ib_open_shared_qp_security(struct ib_qp *qp, + struct ib_device *dev) +{ + return 0; +} + +static inline void ib_close_shared_qp_security(struct ib_qp_security *sec) +{ +} + +static inline int ib_mad_agent_security_setup(struct ib_mad_agent *agent, + enum ib_qp_type qp_type) +{ + return 0; +} + +static inline void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) +{ +} + +static inline int ib_mad_enforce_security(struct ib_mad_agent_private *map, + u16 pkey_index) +{ + return 0; +} +#endif #endif /* _CORE_PRIV_H */ diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index 81d447da0048..631eaa9daf65 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -39,6 +39,8 @@ #include <linux/init.h> #include <linux/mutex.h> #include <linux/netdevice.h> +#include <linux/security.h> +#include <linux/notifier.h> #include <rdma/rdma_netlink.h> #include <rdma/ib_addr.h> #include <rdma/ib_cache.h> @@ -82,6 +84,14 @@ static LIST_HEAD(client_list); static DEFINE_MUTEX(device_mutex); static DECLARE_RWSEM(lists_rwsem); +static int ib_security_change(struct notifier_block *nb, unsigned long event, + void *lsm_data); +static void ib_policy_change_task(struct work_struct *work); +static DECLARE_WORK(ib_policy_change_work, ib_policy_change_task); + +static struct notifier_block ibdev_lsm_nb = { + .notifier_call = ib_security_change, +}; static int ib_device_check_mandatory(struct ib_device *device) { @@ -325,6 +335,64 @@ void ib_get_device_fw_str(struct ib_device *dev, char *str, size_t str_len) } EXPORT_SYMBOL(ib_get_device_fw_str); +static int setup_port_pkey_list(struct ib_device *device) +{ + int i; + + /** + * device->port_pkey_list is indexed directly by the port number, + * Therefore it is declared as a 1 based array with potential empty + * slots at the beginning. + */ + device->port_pkey_list = kcalloc(rdma_end_port(device) + 1, + sizeof(*device->port_pkey_list), + GFP_KERNEL); + + if (!device->port_pkey_list) + return -ENOMEM; + + for (i = 0; i < (rdma_end_port(device) + 1); i++) { + spin_lock_init(&device->port_pkey_list[i].list_lock); + INIT_LIST_HEAD(&device->port_pkey_list[i].pkey_list); + } + + return 0; +} + +static void ib_policy_change_task(struct work_struct *work) +{ + struct ib_device *dev; + + down_read(&lists_rwsem); + list_for_each_entry(dev, &device_list, core_list) { + int i; + + for (i = rdma_start_port(dev); i <= rdma_end_port(dev); i++) { + u64 sp; + int ret = ib_get_cached_subnet_prefix(dev, + i, + &sp); + + WARN_ONCE(ret, + "ib_get_cached_subnet_prefix err: %d, this should never happen here\n", + ret); + ib_security_cache_change(dev, i, sp); + } + } + up_read(&lists_rwsem); +} + +static int ib_security_change(struct notifier_block *nb, unsigned long event, + void *lsm_data) +{ + if (event != LSM_POLICY_CHANGE) + return NOTIFY_DONE; + + schedule_work(&ib_policy_change_work); + + return NOTIFY_OK; +} + /** * ib_register_device - Register an IB device with IB core * @device:Device to register @@ -385,6 +453,12 @@ int ib_register_device(struct ib_device *device, goto out; } + ret = setup_port_pkey_list(device); + if (ret) { + pr_warn("Couldn't create per port_pkey_list\n"); + goto out; + } + ret = ib_cache_setup_one(device); if (ret) { pr_warn("Couldn't set up InfiniBand P_Key/GID cache\n"); @@ -468,6 +542,9 @@ void ib_unregister_device(struct ib_device *device) ib_device_unregister_sysfs(device); ib_cache_cleanup_one(device); + ib_security_destroy_port_pkey_list(device); + kfree(device->port_pkey_list); + down_write(&lists_rwsem); spin_lock_irqsave(&device->client_data_lock, flags); list_for_each_entry_safe(context, tmp, &device->client_data_list, list) @@ -1082,10 +1159,18 @@ static int __init ib_core_init(void) goto err_sa; } + ret = register_lsm_notifier(&ibdev_lsm_nb); + if (ret) { + pr_warn("Couldn't register LSM notifier. ret %d\n", ret); + goto err_ibnl_clients; + } + ib_cache_setup(); return 0; +err_ibnl_clients: + ib_remove_ibnl_clients(); err_sa: ib_sa_cleanup(); err_mad: @@ -1105,6 +1190,7 @@ err: static void __exit ib_core_cleanup(void) { + unregister_lsm_notifier(&ibdev_lsm_nb); ib_cache_cleanup(); ib_remove_ibnl_clients(); ib_sa_cleanup(); diff --git a/drivers/infiniband/core/mad.c b/drivers/infiniband/core/mad.c index 192ee3dafb80..f8f53bb90837 100644 --- a/drivers/infiniband/core/mad.c +++ b/drivers/infiniband/core/mad.c @@ -40,9 +40,11 @@ #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/module.h> +#include <linux/security.h> #include <rdma/ib_cache.h> #include "mad_priv.h" +#include "core_priv.h" #include "mad_rmpp.h" #include "smi.h" #include "opa_smi.h" @@ -369,6 +371,12 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, atomic_set(&mad_agent_priv->refcount, 1); init_completion(&mad_agent_priv->comp); + ret2 = ib_mad_agent_security_setup(&mad_agent_priv->agent, qp_type); + if (ret2) { + ret = ERR_PTR(ret2); + goto error4; + } + spin_lock_irqsave(&port_priv->reg_lock, flags); mad_agent_priv->agent.hi_tid = ++ib_mad_client_id; @@ -386,7 +394,7 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, if (method) { if (method_in_use(&method, mad_reg_req)) - goto error4; + goto error5; } } ret2 = add_nonoui_reg_req(mad_reg_req, mad_agent_priv, @@ -402,14 +410,14 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, if (is_vendor_method_in_use( vendor_class, mad_reg_req)) - goto error4; + goto error5; } } ret2 = add_oui_reg_req(mad_reg_req, mad_agent_priv); } if (ret2) { ret = ERR_PTR(ret2); - goto error4; + goto error5; } } @@ -418,9 +426,10 @@ struct ib_mad_agent *ib_register_mad_agent(struct ib_device *device, spin_unlock_irqrestore(&port_priv->reg_lock, flags); return &mad_agent_priv->agent; - -error4: +error5: spin_unlock_irqrestore(&port_priv->reg_lock, flags); + ib_mad_agent_security_cleanup(&mad_agent_priv->agent); +error4: kfree(reg_req); error3: kfree(mad_agent_priv); @@ -491,6 +500,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, struct ib_mad_agent *ret; struct ib_mad_snoop_private *mad_snoop_priv; int qpn; + int err; /* Validate parameters */ if ((is_snooping_sends(mad_snoop_flags) && !snoop_handler) || @@ -525,17 +535,25 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device, mad_snoop_priv->agent.port_num = port_num; mad_snoop_priv->mad_snoop_flags = mad_snoop_flags; init_completion(&mad_snoop_priv->comp); + + err = ib_mad_agent_security_setup(&mad_snoop_priv->agent, qp_type); + if (err) { + ret = ERR_PTR(err); + goto error2; + } + mad_snoop_priv->snoop_index = register_snoop_agent( &port_priv->qp_info[qpn], mad_snoop_priv); if (mad_snoop_priv->snoop_index < 0) { ret = ERR_PTR(mad_snoop_priv->snoop_index); - goto error2; + goto error3; } atomic_set(&mad_snoop_priv->refcount, 1); return &mad_snoop_priv->agent; - +error3: + ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); error2: kfree(mad_snoop_priv); error1: @@ -581,6 +599,8 @@ static void unregister_mad_agent(struct ib_mad_agent_private *mad_agent_priv) deref_mad_agent(mad_agent_priv); wait_for_completion(&mad_agent_priv->comp); + ib_mad_agent_security_cleanup(&mad_agent_priv->agent); + kfree(mad_agent_priv->reg_req); kfree(mad_agent_priv); } @@ -599,6 +619,8 @@ static void unregister_mad_snoop(struct ib_mad_snoop_private *mad_snoop_priv) deref_snoop_agent(mad_snoop_priv); wait_for_completion(&mad_snoop_priv->comp); + ib_mad_agent_security_cleanup(&mad_snoop_priv->agent); + kfree(mad_snoop_priv); } @@ -1215,12 +1237,16 @@ int ib_post_send_mad(struct ib_mad_send_buf *send_buf, /* Walk list of send WRs and post each on send list */ for (; send_buf; send_buf = next_send_buf) { - mad_send_wr = container_of(send_buf, struct ib_mad_send_wr_private, send_buf); mad_agent_priv = mad_send_wr->mad_agent_priv; + ret = ib_mad_enforce_security(mad_agent_priv, + mad_send_wr->send_wr.pkey_index); + if (ret) + goto error; + if (!send_buf->mad_agent->send_handler || (send_buf->timeout_ms && !send_buf->mad_agent->recv_handler)) { @@ -1946,6 +1972,14 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, struct ib_mad_send_wr_private *mad_send_wr; struct ib_mad_send_wc mad_send_wc; unsigned long flags; + int ret; + + ret = ib_mad_enforce_security(mad_agent_priv, + mad_recv_wc->wc->pkey_index); + if (ret) { + ib_free_recv_mad(mad_recv_wc); + deref_mad_agent(mad_agent_priv); + } INIT_LIST_HEAD(&mad_recv_wc->rmpp_list); list_add(&mad_recv_wc->recv_buf.list, &mad_recv_wc->rmpp_list); @@ -2003,6 +2037,8 @@ static void ib_mad_complete_recv(struct ib_mad_agent_private *mad_agent_priv, mad_recv_wc); deref_mad_agent(mad_agent_priv); } + + return; } static enum smi_action handle_ib_smi(const struct ib_mad_port_private *port_priv, diff --git a/drivers/infiniband/core/security.c b/drivers/infiniband/core/security.c new file mode 100644 index 000000000000..3e8c38953912 --- /dev/null +++ b/drivers/infiniband/core/security.c @@ -0,0 +1,705 @@ +/* + * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved. + * + * This software is available to you under a choice of one of two + * licenses. You may choose to be licensed under the terms of the GNU + * General Public License (GPL) Version 2, available from the file + * COPYING in the main directory of this source tree, or the + * OpenIB.org BSD license below: + * + * Redistribution and use in source and binary forms, with or + * without modification, are permitted provided that the following + * conditions are met: + * + * - Redistributions of source code must retain the above + * copyright notice, this list of conditions and the following + * disclaimer. + * + * - Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials + * provided with the distribution. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, + * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND + * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS + * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN + * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN + * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE + * SOFTWARE. + */ + +#ifdef CONFIG_SECURITY_INFINIBAND + +#include <linux/security.h> +#include <linux/completion.h> +#include <linux/list.h> + +#include <rdma/ib_verbs.h> +#include <rdma/ib_cache.h> +#include "core_priv.h" +#include "mad_priv.h" + +static struct pkey_index_qp_list *get_pkey_idx_qp_list(struct ib_port_pkey *pp) +{ + struct pkey_index_qp_list *pkey = NULL; + struct pkey_index_qp_list *tmp_pkey; + struct ib_device *dev = pp->sec->dev; + + spin_lock(&dev->port_pkey_list[pp->port_num].list_lock); + list_for_each_entry(tmp_pkey, + &dev->port_pkey_list[pp->port_num].pkey_list, + pkey_index_list) { + if (tmp_pkey->pkey_index == pp->pkey_index) { + pkey = tmp_pkey; + break; + } + } + spin_unlock(&dev->port_pkey_list[pp->port_num].list_lock); + return pkey; +} + +static int get_pkey_and_subnet_prefix(struct ib_port_pkey *pp, + u16 *pkey, + u64 *subnet_prefix) +{ + struct ib_device *dev = pp->sec->dev; + int ret; + + ret = ib_get_cached_pkey(dev, pp->port_num, pp->pkey_index, pkey); + if (ret) + return ret; + + ret = ib_get_cached_subnet_prefix(dev, pp->port_num, subnet_prefix); + + return ret; +} + +static int enforce_qp_pkey_security(u16 pkey, + u64 subnet_prefix, + struct ib_qp_security *qp_sec) +{ + struct ib_qp_security *shared_qp_sec; + int ret; + + ret = security_ib_pkey_access(qp_sec->security, subnet_prefix, pkey); + if (ret) + return ret; + + if (qp_sec->qp == qp_sec->qp->real_qp) { + list_for_each_entry(shared_qp_sec, + &qp_sec->shared_qp_list, + shared_qp_list) { + ret = security_ib_pkey_access(shared_qp_sec->security, + subnet_prefix, + pkey); + if (ret) + return ret; + } + } + return 0; +} + +/* The caller of this function must hold the QP security + * mutex of the QP of the security structure in *pps. + * + * It takes separate ports_pkeys and security structure + * because in some cases the pps will be for a new settings + * or the pps will be for the real QP and security structure + * will be for a shared QP. + */ +static int check_qp_port_pkey_settings(struct ib_ports_pkeys *pps, + struct ib_qp_security *sec) +{ + u64 subnet_prefix; + u16 pkey; + int ret = 0; + + if (!pps) + return 0; + + if (pps->main.state != IB_PORT_PKEY_NOT_VALID) { + get_pkey_and_subnet_prefix(&pps->main, + &pkey, + &subnet_prefix); + + ret = enforce_qp_pkey_security(pkey, + subnet_prefix, + sec); + } + if (ret) + return ret; + + if (pps->alt.state != IB_PORT_PKEY_NOT_VALID) { + get_pkey_and_subnet_prefix(&pps->alt, + &pkey, + &subnet_prefix); + + ret = enforce_qp_pkey_security(pkey, + subnet_prefix, + sec); + } + + return ret; +} + +/* The caller of this function must hold the QP security + * mutex. + */ +static void qp_to_error(struct ib_qp_security *sec) +{ + struct ib_qp_security *shared_qp_sec; + struct ib_qp_attr attr = { + .qp_state = IB_QPS_ERR + }; + struct ib_event event = { + .event = IB_EVENT_QP_FATAL + }; + + /* If the QP is in the process of being destroyed + * the qp pointer in the security structure is + * undefined. It cannot be modified now. + */ + if (sec->destroying) + return; + + ib_modify_qp(sec->qp, + &attr, + IB_QP_STATE); + + if (sec->qp->event_handler && sec->qp->qp_context) { + event.element.qp = sec->qp; + sec->qp->event_handler(&event, + sec->qp->qp_context); + } + + list_for_each_entry(shared_qp_sec, + &sec->shared_qp_list, + shared_qp_list) { + struct ib_qp *qp = shared_qp_sec->qp; + + if (qp->event_handler && qp->qp_context) { + event.element.qp = qp; + event.device = qp->device; + qp->event_handler(&event, + qp->qp_context); + } + } +} + +static inline void check_pkey_qps(struct pkey_index_qp_list *pkey, + struct ib_device *device, + u8 port_num, + u64 subnet_prefix) +{ + struct ib_port_pkey *pp, *tmp_pp; + bool comp; + LIST_HEAD(to_error_list); + u16 pkey_val; + + if (!ib_get_cached_pkey(device, + port_num, + pkey->pkey_index, + &pkey_val)) { + spin_lock(&pkey->qp_list_lock); + list_for_each_entry(pp, &pkey->qp_list, qp_list) { + if (atomic_read(&pp->sec->error_list_count)) + continue; + + if (enforce_qp_pkey_security(pkey_val, + subnet_prefix, + pp->sec)) { + atomic_inc(&pp->sec->error_list_count); + list_add(&pp->to_error_list, + &to_error_list); + } + } + spin_unlock(&pkey->qp_list_lock); + } + + list_for_each_entry_safe(pp, + tmp_pp, + &to_error_list, + to_error_list) { + mutex_lock(&pp->sec->mutex); + qp_to_error(pp->sec); + list_del(&pp->to_error_list); + atomic_dec(&pp->sec->error_list_count); + comp = pp->sec->destroying; + mutex_unlock(&pp->sec->mutex); + + if (comp) + complete(&pp->sec->error_complete); + } +} + +/* The caller of this function must hold the QP security + * mutex. + */ +static int port_pkey_list_insert(struct ib_port_pkey *pp) +{ + struct pkey_index_qp_list *tmp_pkey; + struct pkey_index_qp_list *pkey; + struct ib_device *dev; + u8 port_num = pp->port_num; + int ret = 0; + + if (pp->state != IB_PORT_PKEY_VALID) + return 0; + + dev = pp->sec->dev; + + pkey = get_pkey_idx_qp_list(pp); + + if (!pkey) { + bool found = false; + + pkey = kzalloc(sizeof(*pkey), GFP_KERNEL); + if (!pkey) + return -ENOMEM; + + spin_lock(&dev->port_pkey_list[port_num].list_lock); + /* Check for the PKey again. A racing process may + * have created it. + */ + list_for_each_entry(tmp_pkey, + &dev->port_pkey_list[port_num].pkey_list, + pkey_index_list) { + if (tmp_pkey->pkey_index == pp->pkey_index) { + kfree(pkey); + pkey = tmp_pkey; + found = true; + break; + } + } + + if (!found) { + pkey->pkey_index = pp->pkey_index; + spin_lock_init(&pkey->qp_list_lock); + INIT_LIST_HEAD(&pkey->qp_list); + list_add(&pkey->pkey_index_list, + &dev->port_pkey_list[port_num].pkey_list); + } + spin_unlock(&dev->port_pkey_list[port_num].list_lock); + } + + spin_lock(&pkey->qp_list_lock); + list_add(&pp->qp_list, &pkey->qp_list); + spin_unlock(&pkey->qp_list_lock); + + pp->state = IB_PORT_PKEY_LISTED; + + return ret; +} + +/* The caller of this function must hold the QP security + * mutex. + */ +static void port_pkey_list_remove(struct ib_port_pkey *pp) +{ + struct pkey_index_qp_list *pkey; + + if (pp->state != IB_PORT_PKEY_LISTED) + return; + + pkey = get_pkey_idx_qp_list(pp); + + spin_lock(&pkey->qp_list_lock); + list_del(&pp->qp_list); + spin_unlock(&pkey->qp_list_lock); + + /* The setting may still be valid, i.e. after + * a destroy has failed for example. + */ + pp->state = IB_PORT_PKEY_VALID; +} + +static void destroy_qp_security(struct ib_qp_security *sec) +{ + security_ib_free_security(sec->security); + kfree(sec->ports_pkeys); + kfree(sec); +} + +/* The caller of this function must hold the QP security + * mutex. + */ +static struct ib_ports_pkeys *get_new_pps(const struct ib_qp *qp, + const struct ib_qp_attr *qp_attr, + int qp_attr_mask) +{ + struct ib_ports_pkeys *new_pps; + struct ib_ports_pkeys *qp_pps = qp->qp_sec->ports_pkeys; + + new_pps = kzalloc(sizeof(*new_pps), GFP_KERNEL); + if (!new_pps) + return NULL; + + if (qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) { + if (!qp_pps) { + new_pps->main.port_num = qp_attr->port_num; + new_pps->main.pkey_index = qp_attr->pkey_index; + } else { + new_pps->main.port_num = (qp_attr_mask & IB_QP_PORT) ? + qp_attr->port_num : + qp_pps->main.port_num; + + new_pps->main.pkey_index = + (qp_attr_mask & IB_QP_PKEY_INDEX) ? + qp_attr->pkey_index : + qp_pps->main.pkey_index; + } + new_pps->main.state = IB_PORT_PKEY_VALID; + } else if (qp_pps) { + new_pps->main.port_num = qp_pps->main.port_num; + new_pps->main.pkey_index = qp_pps->main.pkey_index; + if (qp_pps->main.state != IB_PORT_PKEY_NOT_VALID) + new_pps->main.state = IB_PORT_PKEY_VALID; + } + + if (qp_attr_mask & IB_QP_ALT_PATH) { + new_pps->alt.port_num = qp_attr->alt_port_num; + new_pps->alt.pkey_index = qp_attr->alt_pkey_index; + new_pps->alt.state = IB_PORT_PKEY_VALID; + } else if (qp_pps) { + new_pps->alt.port_num = qp_pps->alt.port_num; + new_pps->alt.pkey_index = qp_pps->alt.pkey_index; + if (qp_pps->alt.state != IB_PORT_PKEY_NOT_VALID) + new_pps->alt.state = IB_PORT_PKEY_VALID; + } + + new_pps->main.sec = qp->qp_sec; + new_pps->alt.sec = qp->qp_sec; + return new_pps; +} + +int ib_open_shared_qp_security(struct ib_qp *qp, struct ib_device *dev) +{ + struct ib_qp *real_qp = qp->real_qp; + int ret; + + ret = ib_create_qp_security(qp, dev); + + if (ret) + return ret; + + mutex_lock(&real_qp->qp_sec->mutex); + ret = check_qp_port_pkey_settings(real_qp->qp_sec->ports_pkeys, + qp->qp_sec); + + if (ret) + goto ret; + + if (qp != real_qp) + list_add(&qp->qp_sec->shared_qp_list, + &real_qp->qp_sec->shared_qp_list); +ret: + mutex_unlock(&real_qp->qp_sec->mutex); + if (ret) + destroy_qp_security(qp->qp_sec); + + return ret; +} + +void ib_close_shared_qp_security(struct ib_qp_security *sec) +{ + struct ib_qp *real_qp = sec->qp->real_qp; + + mutex_lock(&real_qp->qp_sec->mutex); + list_del(&sec->shared_qp_list); + mutex_unlock(&real_qp->qp_sec->mutex); + + destroy_qp_security(sec); +} + +int ib_create_qp_security(struct ib_qp *qp, struct ib_device *dev) +{ + int ret; + + qp->qp_sec = kzalloc(sizeof(*qp->qp_sec), GFP_KERNEL); + if (!qp->qp_sec) + return -ENOMEM; + + qp->qp_sec->qp = qp; + qp->qp_sec->dev = dev; + mutex_init(&qp->qp_sec->mutex); + INIT_LIST_HEAD(&qp->qp_sec->shared_qp_list); + atomic_set(&qp->qp_sec->error_list_count, 0); + init_completion(&qp->qp_sec->error_complete); + ret = security_ib_alloc_security(&qp->qp_sec->security); + if (ret) + kfree(qp->qp_sec); + + return ret; +} +EXPORT_SYMBOL(ib_create_qp_security); + +void ib_destroy_qp_security_begin(struct ib_qp_security *sec) +{ + mutex_lock(&sec->mutex); + + /* Remove the QP from the lists so it won't get added to + * a to_error_list during the destroy process. + */ + if (sec->ports_pkeys) { + port_pkey_list_remove(&sec->ports_pkeys->main); + port_pkey_list_remove(&sec->ports_pkeys->alt); + } + + /* If the QP is already in one or more of those lists + * the destroying flag will ensure the to error flow + * doesn't operate on an undefined QP. + */ + sec->destroying = true; + + /* Record the error list count to know how many completions + * to wait for. + */ + sec->error_comps_pending = atomic_read(&sec->error_list_count); + + mutex_unlock(&sec->mutex); +} + +void ib_destroy_qp_security_abort(struct ib_qp_security *sec) +{ + int ret; + int i; + + /* If a concurrent cache update is in progress this + * QP security could be marked for an error state + * transition. Wait for this to complete. + */ + for (i = 0; i < sec->error_comps_pending; i++) + wait_for_completion(&sec->error_complete); + + mutex_lock(&sec->mutex); + sec->destroying = false; + + /* Restore the position in the lists and verify + * access is still allowed in case a cache update + * occurred while attempting to destroy. + * + * Because these setting were listed already + * and removed during ib_destroy_qp_security_begin + * we know the pkey_index_qp_list for the PKey + * already exists so port_pkey_list_insert won't fail. + */ + if (sec->ports_pkeys) { + port_pkey_list_insert(&sec->ports_pkeys->main); + port_pkey_list_insert(&sec->ports_pkeys->alt); + } + + ret = check_qp_port_pkey_settings(sec->ports_pkeys, sec); + if (ret) + qp_to_error(sec); + + mutex_unlock(&sec->mutex); +} + +void ib_destroy_qp_security_end(struct ib_qp_security *sec) +{ + int i; + + /* If a concurrent cache update is occurring we must + * wait until this QP security structure is processed + * in the QP to error flow before destroying it because + * the to_error_list is in use. + */ + for (i = 0; i < sec->error_comps_pending; i++) + wait_for_completion(&sec->error_complete); + + destroy_qp_security(sec); +} + +void ib_security_cache_change(struct ib_device *device, + u8 port_num, + u64 subnet_prefix) +{ + struct pkey_index_qp_list *pkey; + + list_for_each_entry(pkey, + &device->port_pkey_list[port_num].pkey_list, + pkey_index_list) { + check_pkey_qps(pkey, + device, + port_num, + subnet_prefix); + } +} + +void ib_security_destroy_port_pkey_list(struct ib_device *device) +{ + struct pkey_index_qp_list *pkey, *tmp_pkey; + int i; + + for (i = rdma_start_port(device); i <= rdma_end_port(device); i++) { + spin_lock(&device->port_pkey_list[i].list_lock); + list_for_each_entry_safe(pkey, + tmp_pkey, + &device->port_pkey_list[i].pkey_list, + pkey_index_list) { + list_del(&pkey->pkey_index_list); + kfree(pkey); + } + spin_unlock(&device->port_pkey_list[i].list_lock); + } +} + +int ib_security_modify_qp(struct ib_qp *qp, + struct ib_qp_attr *qp_attr, + int qp_attr_mask, + struct ib_udata *udata) +{ + int ret = 0; + struct ib_ports_pkeys *tmp_pps; + struct ib_ports_pkeys *new_pps; + bool special_qp = (qp->qp_type == IB_QPT_SMI || + qp->qp_type == IB_QPT_GSI || + qp->qp_type >= IB_QPT_RESERVED1); + bool pps_change = ((qp_attr_mask & (IB_QP_PKEY_INDEX | IB_QP_PORT)) || + (qp_attr_mask & IB_QP_ALT_PATH)); + + if (pps_change && !special_qp) { + mutex_lock(&qp->qp_sec->mutex); + new_pps = get_new_pps(qp, + qp_attr, + qp_attr_mask); + + /* Add this QP to the lists for the new port + * and pkey settings before checking for permission + * in case there is a concurrent cache update + * occurring. Walking the list for a cache change + * doesn't acquire the security mutex unless it's + * sending the QP to error. + */ + ret = port_pkey_list_insert(&new_pps->main); + + if (!ret) + ret = port_pkey_list_insert(&new_pps->alt); + + if (!ret) + ret = check_qp_port_pkey_settings(new_pps, + qp->qp_sec); + } + + if (!ret) + ret = qp->device->modify_qp(qp->real_qp, + qp_attr, + qp_attr_mask, + udata); + + if (pps_change && !special_qp) { + /* Clean up the lists and free the appropriate + * ports_pkeys structure. + */ + if (ret) { + tmp_pps = new_pps; + } else { + tmp_pps = qp->qp_sec->ports_pkeys; + qp->qp_sec->ports_pkeys = new_pps; + } + + if (tmp_pps) { + port_pkey_list_remove(&tmp_pps->main); + port_pkey_list_remove(&tmp_pps->alt); + } + kfree(tmp_pps); + mutex_unlock(&qp->qp_sec->mutex); + } + return ret; +} +EXPORT_SYMBOL(ib_security_modify_qp); + +int ib_security_pkey_access(struct ib_device *dev, + u8 port_num, + u16 pkey_index, + void *sec) +{ + u64 subnet_prefix; + u16 pkey; + int ret; + + ret = ib_get_cached_pkey(dev, port_num, pkey_index, &pkey); + if (ret) + return ret; + + ret = ib_get_cached_subnet_prefix(dev, port_num, &subnet_prefix); + + if (ret) + return ret; + + return security_ib_pkey_access(sec, subnet_prefix, pkey); +} +EXPORT_SYMBOL(ib_security_pkey_access); + +static int ib_mad_agent_security_change(struct notifier_block *nb, + unsigned long event, + void *data) +{ + struct ib_mad_agent *ag = container_of(nb, struct ib_mad_agent, lsm_nb); + + if (event != LSM_POLICY_CHANGE) + return NOTIFY_DONE; + + ag->smp_allowed = !security_ib_endport_manage_subnet(ag->security, + ag->device->name, + ag->port_num); + + return NOTIFY_OK; +} + +int ib_mad_agent_security_setup(struct ib_mad_agent *agent, + enum ib_qp_type qp_type) +{ + int ret; + + ret = security_ib_alloc_security(&agent->security); + if (ret) + return ret; + + if (qp_type != IB_QPT_SMI) + return 0; + + ret = security_ib_endport_manage_subnet(agent->security, + agent->device->name, + agent->port_num); + if (ret) + return ret; + + agent->lsm_nb.notifier_call = ib_mad_agent_security_change; + ret = register_lsm_notifier(&agent->lsm_nb); + if (ret) + return ret; + + agent->smp_allowed = true; + agent->lsm_nb_reg = true; + return 0; +} + +void ib_mad_agent_security_cleanup(struct ib_mad_agent *agent) +{ + security_ib_free_security(agent->security); + if (agent->lsm_nb_reg) + unregister_lsm_notifier(&agent->lsm_nb); +} + +int ib_mad_enforce_security(struct ib_mad_agent_private *map, u16 pkey_index) +{ + int ret; + + if (map->agent.qp->qp_type == IB_QPT_SMI && !map->agent.smp_allowed) + return -EACCES; + + ret = ib_security_pkey_access(map->agent.device, + map->agent.port_num, + pkey_index, + map->agent.security); + + if (ret) + return ret; + + return 0; +} + +#endif /* CONFIG_SECURITY_INFINIBAND */ diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c index 70b7fb156414..0ad3b05405d8 100644 --- a/drivers/infiniband/core/uverbs_cmd.c +++ b/drivers/infiniband/core/uverbs_cmd.c @@ -1508,6 +1508,10 @@ static int create_qp(struct ib_uverbs_file *file, } if (cmd->qp_type != IB_QPT_XRC_TGT) { + ret = ib_create_qp_security(qp, device); + if (ret) + goto err_cb; + qp->real_qp = qp; qp->device = device; qp->pd = pd; @@ -2002,14 +2006,17 @@ static int modify_qp(struct ib_uverbs_file *file, if (ret) goto release_qp; } - ret = qp->device->modify_qp(qp, attr, + ret = ib_security_modify_qp(qp, + attr, modify_qp_mask(qp->qp_type, cmd->base.attr_mask), udata); } else { - ret = ib_modify_qp(qp, attr, - modify_qp_mask(qp->qp_type, - cmd->base.attr_mask)); + ret = ib_security_modify_qp(qp, + attr, + modify_qp_mask(qp->qp_type, + cmd->base.attr_mask), + NULL); } release_qp: diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c index 4792f5209ac2..c973a83c898b 100644 --- a/drivers/infiniband/core/verbs.c +++ b/drivers/infiniband/core/verbs.c @@ -44,6 +44,7 @@ #include <linux/in.h> #include <linux/in6.h> #include <net/addrconf.h> +#include <linux/security.h> #include <rdma/ib_verbs.h> #include <rdma/ib_cache.h> @@ -713,12 +714,20 @@ static struct ib_qp *__ib_open_qp(struct ib_qp *real_qp, { struct ib_qp *qp; unsigned long flags; + int err; qp = kzalloc(sizeof *qp, GFP_KERNEL); if (!qp) return ERR_PTR(-ENOMEM); qp->real_qp = real_qp; + err = ib_open_shared_qp_security(qp, real_qp->device); + if (err) { + kfree(qp); + return ERR_PTR(err); + } + + qp->real_qp = real_qp; atomic_inc(&real_qp->usecnt); qp->device = real_qp->device; qp->event_handler = event_handler; @@ -804,6 +813,12 @@ struct ib_qp *ib_create_qp(struct ib_pd *pd, if (IS_ERR(qp)) return qp; + ret = ib_create_qp_security(qp, device); + if (ret) { + ib_destroy_qp(qp); + return ERR_PTR(ret); + } + qp->device = device; qp->real_qp = qp; qp->uobject = NULL; @@ -1266,7 +1281,7 @@ int ib_modify_qp(struct ib_qp *qp, return ret; } - return qp->device->modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); + return ib_security_modify_qp(qp->real_qp, qp_attr, qp_attr_mask, NULL); } EXPORT_SYMBOL(ib_modify_qp); @@ -1295,6 +1310,7 @@ int ib_close_qp(struct ib_qp *qp) spin_unlock_irqrestore(&real_qp->device->event_handler_lock, flags); atomic_dec(&real_qp->usecnt); + ib_close_shared_qp_security(qp->qp_sec); kfree(qp); return 0; @@ -1335,6 +1351,7 @@ int ib_destroy_qp(struct ib_qp *qp) struct ib_cq *scq, *rcq; struct ib_srq *srq; struct ib_rwq_ind_table *ind_tbl; + struct ib_qp_security *sec; int ret; WARN_ON_ONCE(qp->mrs_used > 0); @@ -1350,6 +1367,9 @@ int ib_destroy_qp(struct ib_qp *qp) rcq = qp->recv_cq; srq = qp->srq; ind_tbl = qp->rwq_ind_tbl; + sec = qp->qp_sec; + if (sec) + ib_destroy_qp_security_begin(sec); if (!qp->uobject) rdma_rw_cleanup_mrs(qp); @@ -1366,6 +1386,11 @@ int ib_destroy_qp(struct ib_qp *qp) atomic_dec(&srq->usecnt); if (ind_tbl) atomic_dec(&ind_tbl->usecnt); + if (sec) + ib_destroy_qp_security_end(sec); + } else { + if (sec) + ib_destroy_qp_security_abort(sec); } return ret; diff --git a/fs/nfs/super.c b/fs/nfs/super.c index 2f3822a4a7d5..b8e073546507 100644 --- a/fs/nfs/super.c +++ b/fs/nfs/super.c @@ -2544,10 +2544,25 @@ EXPORT_SYMBOL_GPL(nfs_set_sb_security); int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot, struct nfs_mount_info *mount_info) { + int error; + unsigned long kflags = 0, kflags_out = 0; + /* clone any lsm security options from the parent to the new sb */ if (d_inode(mntroot)->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) return -ESTALE; - return security_sb_clone_mnt_opts(mount_info->cloned->sb, s); + + if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL) + kflags |= SECURITY_LSM_NATIVE_LABELS; + + error = security_sb_clone_mnt_opts(mount_info->cloned->sb, s, kflags, + &kflags_out); + if (error) + return error; + + if (NFS_SB(s)->caps & NFS_CAP_SECURITY_LABEL && + !(kflags_out & SECURITY_LSM_NATIVE_LABELS)) + NFS_SB(s)->caps &= ~NFS_CAP_SECURITY_LABEL; + return 0; } EXPORT_SYMBOL_GPL(nfs_clone_sb_security); diff --git a/include/linux/lsm_audit.h b/include/linux/lsm_audit.h index e58e577117b6..22b5d4e687ce 100644 --- a/include/linux/lsm_audit.h +++ b/include/linux/lsm_audit.h @@ -21,6 +21,7 @@ #include <linux/path.h> #include <linux/key.h> #include <linux/skbuff.h> +#include <rdma/ib_verbs.h> struct lsm_network_audit { int netif; @@ -45,6 +46,16 @@ struct lsm_ioctlop_audit { u16 cmd; }; +struct lsm_ibpkey_audit { + u64 subnet_prefix; + u16 pkey; +}; + +struct lsm_ibendport_audit { + char dev_name[IB_DEVICE_NAME_MAX]; + u8 port; +}; + /* Auxiliary data to use in generating the audit record. */ struct common_audit_data { char type; @@ -60,6 +71,8 @@ struct common_audit_data { #define LSM_AUDIT_DATA_DENTRY 10 #define LSM_AUDIT_DATA_IOCTL_OP 11 #define LSM_AUDIT_DATA_FILE 12 +#define LSM_AUDIT_DATA_IBPKEY 13 +#define LSM_AUDIT_DATA_IBENDPORT 14 union { struct path path; struct dentry *dentry; @@ -77,6 +90,8 @@ struct common_audit_data { char *kmod_name; struct lsm_ioctlop_audit *op; struct file *file; + struct lsm_ibpkey_audit *ibpkey; + struct lsm_ibendport_audit *ibendport; } u; /* this union contains LSM specific data */ union { diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h index 080f34e66017..3cc9d77c7527 100644 --- a/include/linux/lsm_hooks.h +++ b/include/linux/lsm_hooks.h @@ -8,6 +8,7 @@ * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group) * Copyright (C) 2015 Intel Corporation. * Copyright (C) 2015 Casey Schaufler <casey@schaufler-ca.com> + * Copyright (C) 2016 Mellanox Techonologies * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -911,6 +912,26 @@ * associated with the TUN device's security structure. * @security pointer to the TUN devices's security structure. * + * Security hooks for Infiniband + * + * @ib_pkey_access: + * Check permission to access a pkey when modifing a QP. + * @subnet_prefix the subnet prefix of the port being used. + * @pkey the pkey to be accessed. + * @sec pointer to a security structure. + * @ib_endport_manage_subnet: + * Check permissions to send and receive SMPs on a end port. + * @dev_name the IB device name (i.e. mlx4_0). + * @port_num the port number. + * @sec pointer to a security structure. + * @ib_alloc_security: + * Allocate a security structure for Infiniband objects. + * @sec pointer to a security structure pointer. + * Returns 0 on success, non-zero on failure + * @ib_free_security: + * Deallocate an Infiniband security structure. + * @sec contains the security structure to be freed. + * * Security hooks for XFRM operations. * * @xfrm_policy_alloc_security: @@ -1388,7 +1409,9 @@ union security_list_options { unsigned long kern_flags, unsigned long *set_kern_flags); int (*sb_clone_mnt_opts)(const struct super_block *oldsb, - struct super_block *newsb); + struct super_block *newsb, + unsigned long kern_flags, + unsigned long *set_kern_flags); int (*sb_parse_opts_str)(char *options, struct security_mnt_opts *opts); int (*dentry_init_security)(struct dentry *dentry, int mode, const struct qstr *name, void **ctx, @@ -1620,6 +1643,14 @@ union security_list_options { int (*tun_dev_open)(void *security); #endif /* CONFIG_SECURITY_NETWORK */ +#ifdef CONFIG_SECURITY_INFINIBAND + int (*ib_pkey_access)(void *sec, u64 subnet_prefix, u16 pkey); + int (*ib_endport_manage_subnet)(void *sec, const char *dev_name, + u8 port_num); + int (*ib_alloc_security)(void **sec); + void (*ib_free_security)(void *sec); +#endif /* CONFIG_SECURITY_INFINIBAND */ + #ifdef CONFIG_SECURITY_NETWORK_XFRM int (*xfrm_policy_alloc_security)(struct xfrm_sec_ctx **ctxp, struct xfrm_user_sec_ctx *sec_ctx, @@ -1851,6 +1882,12 @@ struct security_hook_heads { struct list_head tun_dev_attach; struct list_head tun_dev_open; #endif /* CONFIG_SECURITY_NETWORK */ +#ifdef CONFIG_SECURITY_INFINIBAND + struct list_head ib_pkey_access; + struct list_head ib_endport_manage_subnet; + struct list_head ib_alloc_security; + struct list_head ib_free_security; +#endif /* CONFIG_SECURITY_INFINIBAND */ #ifdef CONFIG_SECURITY_NETWORK_XFRM struct list_head xfrm_policy_alloc_security; struct list_head xfrm_policy_clone_security; diff --git a/include/linux/security.h b/include/linux/security.h index caf8b64d8b5c..b6ea1dc9cc9d 100644 --- a/include/linux/security.h +++ b/include/linux/security.h @@ -6,6 +6,7 @@ * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com> * Copyright (C) 2001 James Morris <jmorris@intercode.com.au> * Copyright (C) 2001 Silicon Graphics, Inc. (Trust Technology Group) + * Copyright (C) 2016 Mellanox Techonologies * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -68,6 +69,10 @@ struct audit_krule; struct user_namespace; struct timezone; +enum lsm_event { + LSM_POLICY_CHANGE, +}; + /* These functions are in security/commoncap.c */ extern int cap_capable(const struct cred *cred, struct user_namespace *ns, int cap, int audit); @@ -163,6 +168,10 @@ struct security_mnt_opts { int num_mnt_opts; }; +int call_lsm_notifier(enum lsm_event event, void *data); +int register_lsm_notifier(struct notifier_block *nb); +int unregister_lsm_notifier(struct notifier_block *nb); + static inline void security_init_mnt_opts(struct security_mnt_opts *opts) { opts->mnt_opts = NULL; @@ -240,7 +249,9 @@ int security_sb_set_mnt_opts(struct super_block *sb, unsigned long kern_flags, unsigned long *set_kern_flags); int security_sb_clone_mnt_opts(const struct super_block *oldsb, - struct super_block *newsb); + struct super_block *newsb, + unsigned long kern_flags, + unsigned long *set_kern_flags); int security_sb_parse_opts_str(char *options, struct security_mnt_opts *opts); int security_dentry_init_security(struct dentry *dentry, int mode, const struct qstr *name, void **ctx, @@ -381,6 +392,21 @@ int security_inode_getsecctx(struct inode *inode, void **ctx, u32 *ctxlen); struct security_mnt_opts { }; +static inline int call_lsm_notifier(enum lsm_event event, void *data) +{ + return 0; +} + +static inline int register_lsm_notifier(struct notifier_block *nb) +{ + return 0; +} + +static inline int unregister_lsm_notifier(struct notifier_block *nb) +{ + return 0; +} + static inline void security_init_mnt_opts(struct security_mnt_opts *opts) { } @@ -581,7 +607,9 @@ static inline int security_sb_set_mnt_opts(struct super_block *sb, } static inline int security_sb_clone_mnt_opts(const struct super_block *oldsb, - struct super_block *newsb) + struct super_block *newsb, + unsigned long kern_flags, + unsigned long *set_kern_flags) { return 0; } @@ -1406,6 +1434,32 @@ static inline int security_tun_dev_open(void *security) } #endif /* CONFIG_SECURITY_NETWORK */ +#ifdef CONFIG_SECURITY_INFINIBAND +int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey); +int security_ib_endport_manage_subnet(void *sec, const char *name, u8 port_num); +int security_ib_alloc_security(void **sec); +void security_ib_free_security(void *sec); +#else /* CONFIG_SECURITY_INFINIBAND */ +static inline int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey) +{ + return 0; +} + +static inline int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num) +{ + return 0; +} + +static inline int security_ib_alloc_security(void **sec) +{ + return 0; +} + +static inline void security_ib_free_security(void *sec) +{ +} +#endif /* CONFIG_SECURITY_INFINIBAND */ + #ifdef CONFIG_SECURITY_NETWORK_XFRM int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h index d67b11b72029..2f4f1768ded4 100644 --- a/include/rdma/ib_mad.h +++ b/include/rdma/ib_mad.h @@ -575,6 +575,10 @@ struct ib_mad_agent { u32 flags; u8 port_num; u8 rmpp_version; + void *security; + bool smp_allowed; + bool lsm_nb_reg; + struct notifier_block lsm_nb; }; /** diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h index ba8314ec5768..0e480a5630d4 100644 --- a/include/rdma/ib_verbs.h +++ b/include/rdma/ib_verbs.h @@ -1614,6 +1614,45 @@ struct ib_rwq_ind_table_init_attr { struct ib_wq **ind_tbl; }; +enum port_pkey_state { + IB_PORT_PKEY_NOT_VALID = 0, + IB_PORT_PKEY_VALID = 1, + IB_PORT_PKEY_LISTED = 2, +}; + +struct ib_qp_security; + +struct ib_port_pkey { + enum port_pkey_state state; + u16 pkey_index; + u8 port_num; + struct list_head qp_list; + struct list_head to_error_list; + struct ib_qp_security *sec; +}; + +struct ib_ports_pkeys { + struct ib_port_pkey main; + struct ib_port_pkey alt; +}; + +struct ib_qp_security { + struct ib_qp *qp; + struct ib_device *dev; + /* Hold this mutex when changing port and pkey settings. */ + struct mutex mutex; + struct ib_ports_pkeys *ports_pkeys; + /* A list of all open shared QP handles. Required to enforce security + * properly for all users of a shared QP. + */ + struct list_head shared_qp_list; + void *security; + bool destroying; + atomic_t error_list_count; + struct completion error_complete; + int error_comps_pending; +}; + /* * @max_write_sge: Maximum SGE elements per RDMA WRITE request. * @max_read_sge: Maximum SGE elements per RDMA READ request. @@ -1643,6 +1682,7 @@ struct ib_qp { u32 max_read_sge; enum ib_qp_type qp_type; struct ib_rwq_ind_table *rwq_ind_tbl; + struct ib_qp_security *qp_sec; }; struct ib_mr { @@ -1891,6 +1931,7 @@ enum ib_mad_result { }; struct ib_port_cache { + u64 subnet_prefix; struct ib_pkey_cache *pkey; struct ib_gid_table *gid; u8 lmc; @@ -1940,6 +1981,12 @@ struct rdma_netdev { union ib_gid *gid, u16 mlid); }; +struct ib_port_pkey_list { + /* Lock to hold while modifying the list. */ + spinlock_t list_lock; + struct list_head pkey_list; +}; + struct ib_device { /* Do not access @dma_device directly from ULP nor from HW drivers. */ struct device *dma_device; @@ -1963,6 +2010,8 @@ struct ib_device { int num_comp_vectors; + struct ib_port_pkey_list *port_pkey_list; + struct iw_cm_verbs *iwcm; /** diff --git a/security/Kconfig b/security/Kconfig index bdcbb92927ab..d540bfe73190 100644 --- a/security/Kconfig +++ b/security/Kconfig @@ -54,6 +54,15 @@ config SECURITY_NETWORK implement socket and networking access controls. If you are unsure how to answer this question, answer N. +config SECURITY_INFINIBAND + bool "Infiniband Security Hooks" + depends on SECURITY && INFINIBAND + help + This enables the Infiniband security hooks. + If enabled, a security module can use these hooks to + implement Infiniband access controls. + If you are unsure how to answer this question, answer N. + config SECURITY_NETWORK_XFRM bool "XFRM (IPSec) Networking Security Hooks" depends on XFRM && SECURITY_NETWORK diff --git a/security/lsm_audit.c b/security/lsm_audit.c index 37f04dadc8d6..28d4c3a528ab 100644 --- a/security/lsm_audit.c +++ b/security/lsm_audit.c @@ -410,6 +410,22 @@ static void dump_common_audit_data(struct audit_buffer *ab, audit_log_format(ab, " kmod="); audit_log_untrustedstring(ab, a->u.kmod_name); break; + case LSM_AUDIT_DATA_IBPKEY: { + struct in6_addr sbn_pfx; + + memset(&sbn_pfx.s6_addr, 0, + sizeof(sbn_pfx.s6_addr)); + memcpy(&sbn_pfx.s6_addr, &a->u.ibpkey->subnet_prefix, + sizeof(a->u.ibpkey->subnet_prefix)); + audit_log_format(ab, " pkey=0x%x subnet_prefix=%pI6c", + a->u.ibpkey->pkey, &sbn_pfx); + break; + } + case LSM_AUDIT_DATA_IBENDPORT: + audit_log_format(ab, " device=%s port_num=%u", + a->u.ibendport->dev_name, + a->u.ibendport->port); + break; } /* switch (a->type) */ } diff --git a/security/security.c b/security/security.c index 38316bb28b16..30132378d103 100644 --- a/security/security.c +++ b/security/security.c @@ -4,6 +4,7 @@ * Copyright (C) 2001 WireX Communications, Inc <chris@wirex.com> * Copyright (C) 2001-2002 Greg Kroah-Hartman <greg@kroah.com> * Copyright (C) 2001 Networks Associates Technology, Inc <ssmalley@nai.com> + * Copyright (C) 2016 Mellanox Technologies * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by @@ -34,6 +35,8 @@ #define SECURITY_NAME_MAX 10 struct security_hook_heads security_hook_heads __lsm_ro_after_init; +static ATOMIC_NOTIFIER_HEAD(lsm_notifier_chain); + char *lsm_names; /* Boot-time LSM user choice */ static __initdata char chosen_lsm[SECURITY_NAME_MAX + 1] = @@ -165,6 +168,24 @@ void __init security_add_hooks(struct security_hook_list *hooks, int count, panic("%s - Cannot get early memory.\n", __func__); } +int call_lsm_notifier(enum lsm_event event, void *data) +{ + return atomic_notifier_call_chain(&lsm_notifier_chain, event, data); +} +EXPORT_SYMBOL(call_lsm_notifier); + +int register_lsm_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_register(&lsm_notifier_chain, nb); +} +EXPORT_SYMBOL(register_lsm_notifier); + +int unregister_lsm_notifier(struct notifier_block *nb) +{ + return atomic_notifier_chain_unregister(&lsm_notifier_chain, nb); +} +EXPORT_SYMBOL(unregister_lsm_notifier); + /* * Hook list operation macros. * @@ -399,9 +420,12 @@ int security_sb_set_mnt_opts(struct super_block *sb, EXPORT_SYMBOL(security_sb_set_mnt_opts); int security_sb_clone_mnt_opts(const struct super_block *oldsb, - struct super_block *newsb) + struct super_block *newsb, + unsigned long kern_flags, + unsigned long *set_kern_flags) { - return call_int_hook(sb_clone_mnt_opts, 0, oldsb, newsb); + return call_int_hook(sb_clone_mnt_opts, 0, oldsb, newsb, + kern_flags, set_kern_flags); } EXPORT_SYMBOL(security_sb_clone_mnt_opts); @@ -1515,6 +1539,33 @@ EXPORT_SYMBOL(security_tun_dev_open); #endif /* CONFIG_SECURITY_NETWORK */ +#ifdef CONFIG_SECURITY_INFINIBAND + +int security_ib_pkey_access(void *sec, u64 subnet_prefix, u16 pkey) +{ + return call_int_hook(ib_pkey_access, 0, sec, subnet_prefix, pkey); +} +EXPORT_SYMBOL(security_ib_pkey_access); + +int security_ib_endport_manage_subnet(void *sec, const char *dev_name, u8 port_num) +{ + return call_int_hook(ib_endport_manage_subnet, 0, sec, dev_name, port_num); +} +EXPORT_SYMBOL(security_ib_endport_manage_subnet); + +int security_ib_alloc_security(void **sec) +{ + return call_int_hook(ib_alloc_security, 0, sec); +} +EXPORT_SYMBOL(security_ib_alloc_security); + +void security_ib_free_security(void *sec) +{ + call_void_hook(ib_free_security, sec); +} +EXPORT_SYMBOL(security_ib_free_security); +#endif /* CONFIG_SECURITY_INFINIBAND */ + #ifdef CONFIG_SECURITY_NETWORK_XFRM int security_xfrm_policy_alloc(struct xfrm_sec_ctx **ctxp, diff --git a/security/selinux/Makefile b/security/selinux/Makefile index 3411c33e2a44..ff5895ede96f 100644 --- a/security/selinux/Makefile +++ b/security/selinux/Makefile @@ -5,7 +5,7 @@ obj-$(CONFIG_SECURITY_SELINUX) := selinux.o selinux-y := avc.o hooks.o selinuxfs.o netlink.o nlmsgtab.o netif.o \ - netnode.o netport.o exports.o \ + netnode.o netport.o ibpkey.o exports.o \ ss/ebitmap.o ss/hashtab.o ss/symtab.o ss/sidtab.o ss/avtab.o \ ss/policydb.o ss/services.o ss/conditional.o ss/mls.o ss/status.o diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c index e67a526d1f30..3a06afbd2f6f 100644 --- a/security/selinux/hooks.c +++ b/security/selinux/hooks.c @@ -17,6 +17,7 @@ * Paul Moore <paul@paul-moore.com> * Copyright (C) 2007 Hitachi Software Engineering Co., Ltd. * Yuichi Nakamura <ynakam@hitachisoft.jp> + * Copyright (C) 2016 Mellanox Technologies * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, @@ -90,6 +91,7 @@ #include "netif.h" #include "netnode.h" #include "netport.h" +#include "ibpkey.h" #include "xfrm.h" #include "netlabel.h" #include "audit.h" @@ -171,6 +173,16 @@ static int selinux_netcache_avc_callback(u32 event) return 0; } +static int selinux_lsm_notifier_avc_callback(u32 event) +{ + if (event == AVC_CALLBACK_RESET) { + sel_ib_pkey_flush(); + call_lsm_notifier(LSM_POLICY_CHANGE, NULL); + } + + return 0; +} + /* * initialise the security for the init task */ @@ -398,18 +410,6 @@ static void superblock_free_security(struct super_block *sb) kfree(sbsec); } -/* The file system's label must be initialized prior to use. */ - -static const char *labeling_behaviors[7] = { - "uses xattr", - "uses transition SIDs", - "uses task SIDs", - "uses genfs_contexts", - "not configured for labeling", - "uses mountpoint labeling", - "uses native labeling", -}; - static inline int inode_doinit(struct inode *inode) { return inode_doinit_with_dentry(inode, NULL); @@ -524,13 +524,17 @@ static int sb_finish_set_opts(struct super_block *sb) } } - if (sbsec->behavior > ARRAY_SIZE(labeling_behaviors)) - printk(KERN_ERR "SELinux: initialized (dev %s, type %s), unknown behavior\n", - sb->s_id, sb->s_type->name); - sbsec->flags |= SE_SBINITIALIZED; + + /* + * Explicitly set or clear SBLABEL_MNT. It's not sufficient to simply + * leave the flag untouched because sb_clone_mnt_opts might be handing + * us a superblock that needs the flag to be cleared. + */ if (selinux_is_sblabel_mnt(sb)) sbsec->flags |= SBLABEL_MNT; + else + sbsec->flags &= ~SBLABEL_MNT; /* Initialize the root inode. */ rc = inode_doinit_with_dentry(root_inode, root); @@ -809,6 +813,7 @@ static int selinux_set_mnt_opts(struct super_block *sb, sbsec->flags |= SE_SBPROC | SE_SBGENFS; if (!strcmp(sb->s_type->name, "debugfs") || + !strcmp(sb->s_type->name, "tracefs") || !strcmp(sb->s_type->name, "sysfs") || !strcmp(sb->s_type->name, "pstore")) sbsec->flags |= SE_SBGENFS; @@ -963,8 +968,11 @@ mismatch: } static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb, - struct super_block *newsb) + struct super_block *newsb, + unsigned long kern_flags, + unsigned long *set_kern_flags) { + int rc = 0; const struct superblock_security_struct *oldsbsec = oldsb->s_security; struct superblock_security_struct *newsbsec = newsb->s_security; @@ -979,6 +987,13 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb, if (!ss_initialized) return 0; + /* + * Specifying internal flags without providing a place to + * place the results is not allowed. + */ + if (kern_flags && !set_kern_flags) + return -EINVAL; + /* how can we clone if the old one wasn't set up?? */ BUG_ON(!(oldsbsec->flags & SE_SBINITIALIZED)); @@ -994,6 +1009,18 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb, newsbsec->def_sid = oldsbsec->def_sid; newsbsec->behavior = oldsbsec->behavior; + if (newsbsec->behavior == SECURITY_FS_USE_NATIVE && + !(kern_flags & SECURITY_LSM_NATIVE_LABELS) && !set_context) { + rc = security_fs_use(newsb); + if (rc) + goto out; + } + + if (kern_flags & SECURITY_LSM_NATIVE_LABELS && !set_context) { + newsbsec->behavior = SECURITY_FS_USE_NATIVE; + *set_kern_flags |= SECURITY_LSM_NATIVE_LABELS; + } + if (set_context) { u32 sid = oldsbsec->mntpoint_sid; @@ -1013,8 +1040,9 @@ static int selinux_sb_clone_mnt_opts(const struct super_block *oldsb, } sb_finish_set_opts(newsb); +out: mutex_unlock(&newsbsec->lock); - return 0; + return rc; } static int selinux_parse_opts_str(char *options, @@ -2063,8 +2091,9 @@ static inline u32 file_to_av(struct file *file) static inline u32 open_file_to_av(struct file *file) { u32 av = file_to_av(file); + struct inode *inode = file_inode(file); - if (selinux_policycap_openperm) + if (selinux_policycap_openperm && inode->i_sb->s_magic != SOCKFS_MAGIC) av |= FILE__OPEN; return av; @@ -3059,6 +3088,7 @@ static int selinux_inode_permission(struct inode *inode, int mask) static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr) { const struct cred *cred = current_cred(); + struct inode *inode = d_backing_inode(dentry); unsigned int ia_valid = iattr->ia_valid; __u32 av = FILE__WRITE; @@ -3074,8 +3104,10 @@ static int selinux_inode_setattr(struct dentry *dentry, struct iattr *iattr) ATTR_ATIME_SET | ATTR_MTIME_SET | ATTR_TIMES_SET)) return dentry_has_perm(cred, dentry, FILE__SETATTR); - if (selinux_policycap_openperm && (ia_valid & ATTR_SIZE) - && !(ia_valid & ATTR_FILE)) + if (selinux_policycap_openperm && + inode->i_sb->s_magic != SOCKFS_MAGIC && + (ia_valid & ATTR_SIZE) && + !(ia_valid & ATTR_FILE)) av |= FILE__OPEN; return dentry_has_perm(cred, dentry, av); @@ -3107,6 +3139,18 @@ static int selinux_inode_setotherxattr(struct dentry *dentry, const char *name) return dentry_has_perm(cred, dentry, FILE__SETATTR); } +static bool has_cap_mac_admin(bool audit) +{ + const struct cred *cred = current_cred(); + int cap_audit = audit ? SECURITY_CAP_AUDIT : SECURITY_CAP_NOAUDIT; + + if (cap_capable(cred, &init_user_ns, CAP_MAC_ADMIN, cap_audit)) + return false; + if (cred_has_capability(cred, CAP_MAC_ADMIN, cap_audit, true)) + return false; + return true; +} + static int selinux_inode_setxattr(struct dentry *dentry, const char *name, const void *value, size_t size, int flags) { @@ -3138,7 +3182,7 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name, rc = security_context_to_sid(value, size, &newsid, GFP_KERNEL); if (rc == -EINVAL) { - if (!capable(CAP_MAC_ADMIN)) { + if (!has_cap_mac_admin(true)) { struct audit_buffer *ab; size_t audit_size; const char *str; @@ -3264,13 +3308,8 @@ static int selinux_inode_getsecurity(struct inode *inode, const char *name, void * and lack of permission just means that we fall back to the * in-core context value, not a denial. */ - error = cap_capable(current_cred(), &init_user_ns, CAP_MAC_ADMIN, - SECURITY_CAP_NOAUDIT); - if (!error) - error = cred_has_capability(current_cred(), CAP_MAC_ADMIN, - SECURITY_CAP_NOAUDIT, true); isec = inode_security(inode); - if (!error) + if (has_cap_mac_admin(false)) error = security_sid_to_context_force(isec->sid, &context, &size); else @@ -3550,6 +3589,18 @@ static int selinux_mmap_addr(unsigned long addr) static int selinux_mmap_file(struct file *file, unsigned long reqprot, unsigned long prot, unsigned long flags) { + struct common_audit_data ad; + int rc; + + if (file) { + ad.type = LSM_AUDIT_DATA_FILE; + ad.u.file = file; + rc = inode_has_perm(current_cred(), file_inode(file), + FILE__MAP, &ad); + if (rc) + return rc; + } + if (selinux_checkreqprot) prot = reqprot; @@ -3710,7 +3761,8 @@ static int selinux_file_open(struct file *file, const struct cred *cred) /* task security operations */ -static int selinux_task_create(unsigned long clone_flags) +static int selinux_task_alloc(struct task_struct *task, + unsigned long clone_flags) { u32 sid = current_sid(); @@ -5918,7 +5970,7 @@ static int selinux_setprocattr(const char *name, void *value, size_t size) } error = security_context_to_sid(value, size, &sid, GFP_KERNEL); if (error == -EINVAL && !strcmp(name, "fscreate")) { - if (!capable(CAP_MAC_ADMIN)) { + if (!has_cap_mac_admin(true)) { struct audit_buffer *ab; size_t audit_size; @@ -6128,7 +6180,70 @@ static int selinux_key_getsecurity(struct key *key, char **_buffer) *_buffer = context; return rc; } +#endif + +#ifdef CONFIG_SECURITY_INFINIBAND +static int selinux_ib_pkey_access(void *ib_sec, u64 subnet_prefix, u16 pkey_val) +{ + struct common_audit_data ad; + int err; + u32 sid = 0; + struct ib_security_struct *sec = ib_sec; + struct lsm_ibpkey_audit ibpkey; + + err = sel_ib_pkey_sid(subnet_prefix, pkey_val, &sid); + if (err) + return err; + + ad.type = LSM_AUDIT_DATA_IBPKEY; + ibpkey.subnet_prefix = subnet_prefix; + ibpkey.pkey = pkey_val; + ad.u.ibpkey = &ibpkey; + return avc_has_perm(sec->sid, sid, + SECCLASS_INFINIBAND_PKEY, + INFINIBAND_PKEY__ACCESS, &ad); +} + +static int selinux_ib_endport_manage_subnet(void *ib_sec, const char *dev_name, + u8 port_num) +{ + struct common_audit_data ad; + int err; + u32 sid = 0; + struct ib_security_struct *sec = ib_sec; + struct lsm_ibendport_audit ibendport; + + err = security_ib_endport_sid(dev_name, port_num, &sid); + + if (err) + return err; + + ad.type = LSM_AUDIT_DATA_IBENDPORT; + strncpy(ibendport.dev_name, dev_name, sizeof(ibendport.dev_name)); + ibendport.port = port_num; + ad.u.ibendport = &ibendport; + return avc_has_perm(sec->sid, sid, + SECCLASS_INFINIBAND_ENDPORT, + INFINIBAND_ENDPORT__MANAGE_SUBNET, &ad); +} + +static int selinux_ib_alloc_security(void **ib_sec) +{ + struct ib_security_struct *sec; + sec = kzalloc(sizeof(*sec), GFP_KERNEL); + if (!sec) + return -ENOMEM; + sec->sid = current_sid(); + + *ib_sec = sec; + return 0; +} + +static void selinux_ib_free_security(void *ib_sec) +{ + kfree(ib_sec); +} #endif static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = { @@ -6213,7 +6328,7 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = { LSM_HOOK_INIT(file_open, selinux_file_open), - LSM_HOOK_INIT(task_create, selinux_task_create), + LSM_HOOK_INIT(task_alloc, selinux_task_alloc), LSM_HOOK_INIT(cred_alloc_blank, selinux_cred_alloc_blank), LSM_HOOK_INIT(cred_free, selinux_cred_free), LSM_HOOK_INIT(cred_prepare, selinux_cred_prepare), @@ -6315,7 +6430,13 @@ static struct security_hook_list selinux_hooks[] __lsm_ro_after_init = { LSM_HOOK_INIT(tun_dev_attach_queue, selinux_tun_dev_attach_queue), LSM_HOOK_INIT(tun_dev_attach, selinux_tun_dev_attach), LSM_HOOK_INIT(tun_dev_open, selinux_tun_dev_open), - +#ifdef CONFIG_SECURITY_INFINIBAND + LSM_HOOK_INIT(ib_pkey_access, selinux_ib_pkey_access), + LSM_HOOK_INIT(ib_endport_manage_subnet, + selinux_ib_endport_manage_subnet), + LSM_HOOK_INIT(ib_alloc_security, selinux_ib_alloc_security), + LSM_HOOK_INIT(ib_free_security, selinux_ib_free_security), +#endif #ifdef CONFIG_SECURITY_NETWORK_XFRM LSM_HOOK_INIT(xfrm_policy_alloc_security, selinux_xfrm_policy_alloc), LSM_HOOK_INIT(xfrm_policy_clone_security, selinux_xfrm_policy_clone), @@ -6379,6 +6500,9 @@ static __init int selinux_init(void) if (avc_add_callback(selinux_netcache_avc_callback, AVC_CALLBACK_RESET)) panic("SELinux: Unable to register AVC netcache callback\n"); + if (avc_add_callback(selinux_lsm_notifier_avc_callback, AVC_CALLBACK_RESET)) + panic("SELinux: Unable to register AVC LSM notifier callback\n"); + if (selinux_enforcing) printk(KERN_DEBUG "SELinux: Starting in enforcing mode\n"); else @@ -6448,6 +6572,23 @@ static struct nf_hook_ops selinux_nf_ops[] = { #endif /* IPV6 */ }; +static int __net_init selinux_nf_register(struct net *net) +{ + return nf_register_net_hooks(net, selinux_nf_ops, + ARRAY_SIZE(selinux_nf_ops)); +} + +static void __net_exit selinux_nf_unregister(struct net *net) +{ + nf_unregister_net_hooks(net, selinux_nf_ops, + ARRAY_SIZE(selinux_nf_ops)); +} + +static struct pernet_operations selinux_net_ops = { + .init = selinux_nf_register, + .exit = selinux_nf_unregister, +}; + static int __init selinux_nf_ip_init(void) { int err; @@ -6457,13 +6598,12 @@ static int __init selinux_nf_ip_init(void) printk(KERN_DEBUG "SELinux: Registering netfilter hooks\n"); - err = nf_register_hooks(selinux_nf_ops, ARRAY_SIZE(selinux_nf_ops)); + err = register_pernet_subsys(&selinux_net_ops); if (err) - panic("SELinux: nf_register_hooks: error %d\n", err); + panic("SELinux: register_pernet_subsys: error %d\n", err); return 0; } - __initcall(selinux_nf_ip_init); #ifdef CONFIG_SECURITY_SELINUX_DISABLE @@ -6471,7 +6611,7 @@ static void selinux_nf_ip_exit(void) { printk(KERN_DEBUG "SELinux: Unregistering netfilter hooks\n"); - nf_unregister_hooks(selinux_nf_ops, ARRAY_SIZE(selinux_nf_ops)); + unregister_pernet_subsys(&selinux_net_ops); } #endif diff --git a/security/selinux/ibpkey.c b/security/selinux/ibpkey.c new file mode 100644 index 000000000000..e3614ee5f1c0 --- /dev/null +++ b/security/selinux/ibpkey.c @@ -0,0 +1,245 @@ +/* + * Pkey table + * + * SELinux must keep a mapping of Infinband PKEYs to labels/SIDs. This + * mapping is maintained as part of the normal policy but a fast cache is + * needed to reduce the lookup overhead. + * + * This code is heavily based on the "netif" and "netport" concept originally + * developed by + * James Morris <jmorris@redhat.com> and + * Paul Moore <paul@paul-moore.com> + * (see security/selinux/netif.c and security/selinux/netport.c for more + * information) + * + */ + +/* + * (c) Mellanox Technologies, 2016 + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#include <linux/types.h> +#include <linux/rcupdate.h> +#include <linux/list.h> +#include <linux/spinlock.h> + +#include "ibpkey.h" +#include "objsec.h" + +#define SEL_PKEY_HASH_SIZE 256 +#define SEL_PKEY_HASH_BKT_LIMIT 16 + +struct sel_ib_pkey_bkt { + int size; + struct list_head list; +}; + +struct sel_ib_pkey { + struct pkey_security_struct psec; + struct list_head list; + struct rcu_head rcu; +}; + +static LIST_HEAD(sel_ib_pkey_list); +static DEFINE_SPINLOCK(sel_ib_pkey_lock); +static struct sel_ib_pkey_bkt sel_ib_pkey_hash[SEL_PKEY_HASH_SIZE]; + +/** + * sel_ib_pkey_hashfn - Hashing function for the pkey table + * @pkey: pkey number + * + * Description: + * This is the hashing function for the pkey table, it returns the bucket + * number for the given pkey. + * + */ +static unsigned int sel_ib_pkey_hashfn(u16 pkey) +{ + return (pkey & (SEL_PKEY_HASH_SIZE - 1)); +} + +/** + * sel_ib_pkey_find - Search for a pkey record + * @subnet_prefix: subnet_prefix + * @pkey_num: pkey_num + * + * Description: + * Search the pkey table and return the matching record. If an entry + * can not be found in the table return NULL. + * + */ +static struct sel_ib_pkey *sel_ib_pkey_find(u64 subnet_prefix, u16 pkey_num) +{ + unsigned int idx; + struct sel_ib_pkey *pkey; + + idx = sel_ib_pkey_hashfn(pkey_num); + list_for_each_entry_rcu(pkey, &sel_ib_pkey_hash[idx].list, list) { + if (pkey->psec.pkey == pkey_num && + pkey->psec.subnet_prefix == subnet_prefix) + return pkey; + } + + return NULL; +} + +/** + * sel_ib_pkey_insert - Insert a new pkey into the table + * @pkey: the new pkey record + * + * Description: + * Add a new pkey record to the hash table. + * + */ +static void sel_ib_pkey_insert(struct sel_ib_pkey *pkey) +{ + unsigned int idx; + + /* we need to impose a limit on the growth of the hash table so check + * this bucket to make sure it is within the specified bounds + */ + idx = sel_ib_pkey_hashfn(pkey->psec.pkey); + list_add_rcu(&pkey->list, &sel_ib_pkey_hash[idx].list); + if (sel_ib_pkey_hash[idx].size == SEL_PKEY_HASH_BKT_LIMIT) { + struct sel_ib_pkey *tail; + + tail = list_entry( + rcu_dereference_protected( + sel_ib_pkey_hash[idx].list.prev, + lockdep_is_held(&sel_ib_pkey_lock)), + struct sel_ib_pkey, list); + list_del_rcu(&tail->list); + kfree_rcu(tail, rcu); + } else { + sel_ib_pkey_hash[idx].size++; + } +} + +/** + * sel_ib_pkey_sid_slow - Lookup the SID of a pkey using the policy + * @subnet_prefix: subnet prefix + * @pkey_num: pkey number + * @sid: pkey SID + * + * Description: + * This function determines the SID of a pkey by querying the security + * policy. The result is added to the pkey table to speedup future + * queries. Returns zero on success, negative values on failure. + * + */ +static int sel_ib_pkey_sid_slow(u64 subnet_prefix, u16 pkey_num, u32 *sid) +{ + int ret; + struct sel_ib_pkey *pkey; + struct sel_ib_pkey *new = NULL; + unsigned long flags; + + spin_lock_irqsave(&sel_ib_pkey_lock, flags); + pkey = sel_ib_pkey_find(subnet_prefix, pkey_num); + if (pkey) { + *sid = pkey->psec.sid; + spin_unlock_irqrestore(&sel_ib_pkey_lock, flags); + return 0; + } + + ret = security_ib_pkey_sid(subnet_prefix, pkey_num, sid); + if (ret) + goto out; + + /* If this memory allocation fails still return 0. The SID + * is valid, it just won't be added to the cache. + */ + new = kzalloc(sizeof(*new), GFP_ATOMIC); + if (!new) + goto out; + + new->psec.subnet_prefix = subnet_prefix; + new->psec.pkey = pkey_num; + new->psec.sid = *sid; + sel_ib_pkey_insert(new); + +out: + spin_unlock_irqrestore(&sel_ib_pkey_lock, flags); + return ret; +} + +/** + * sel_ib_pkey_sid - Lookup the SID of a PKEY + * @subnet_prefix: subnet_prefix + * @pkey_num: pkey number + * @sid: pkey SID + * + * Description: + * This function determines the SID of a PKEY using the fastest method + * possible. First the pkey table is queried, but if an entry can't be found + * then the policy is queried and the result is added to the table to speedup + * future queries. Returns zero on success, negative values on failure. + * + */ +int sel_ib_pkey_sid(u64 subnet_prefix, u16 pkey_num, u32 *sid) +{ + struct sel_ib_pkey *pkey; + + rcu_read_lock(); + pkey = sel_ib_pkey_find(subnet_prefix, pkey_num); + if (pkey) { + *sid = pkey->psec.sid; + rcu_read_unlock(); + return 0; + } + rcu_read_unlock(); + + return sel_ib_pkey_sid_slow(subnet_prefix, pkey_num, sid); +} + +/** + * sel_ib_pkey_flush - Flush the entire pkey table + * + * Description: + * Remove all entries from the pkey table + * + */ +void sel_ib_pkey_flush(void) +{ + unsigned int idx; + struct sel_ib_pkey *pkey, *pkey_tmp; + unsigned long flags; + + spin_lock_irqsave(&sel_ib_pkey_lock, flags); + for (idx = 0; idx < SEL_PKEY_HASH_SIZE; idx++) { + list_for_each_entry_safe(pkey, pkey_tmp, + &sel_ib_pkey_hash[idx].list, list) { + list_del_rcu(&pkey->list); + kfree_rcu(pkey, rcu); + } + sel_ib_pkey_hash[idx].size = 0; + } + spin_unlock_irqrestore(&sel_ib_pkey_lock, flags); +} + +static __init int sel_ib_pkey_init(void) +{ + int iter; + + if (!selinux_enabled) + return 0; + + for (iter = 0; iter < SEL_PKEY_HASH_SIZE; iter++) { + INIT_LIST_HEAD(&sel_ib_pkey_hash[iter].list); + sel_ib_pkey_hash[iter].size = 0; + } + + return 0; +} + +subsys_initcall(sel_ib_pkey_init); diff --git a/security/selinux/include/classmap.h b/security/selinux/include/classmap.h index 1e0cc9b5de20..b9fe3434b036 100644 --- a/security/selinux/include/classmap.h +++ b/security/selinux/include/classmap.h @@ -1,7 +1,7 @@ #include <linux/capability.h> #define COMMON_FILE_SOCK_PERMS "ioctl", "read", "write", "create", \ - "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append" + "getattr", "setattr", "lock", "relabelfrom", "relabelto", "append", "map" #define COMMON_FILE_PERMS COMMON_FILE_SOCK_PERMS, "unlink", "link", \ "rename", "execute", "quotaon", "mounton", "audit_access", \ @@ -231,6 +231,10 @@ struct security_class_mapping secclass_map[] = { { COMMON_SOCK_PERMS, NULL } }, { "smc_socket", { COMMON_SOCK_PERMS, NULL } }, + { "infiniband_pkey", + { "access", NULL } }, + { "infiniband_endport", + { "manage_subnet", NULL } }, { NULL } }; diff --git a/security/selinux/include/ibpkey.h b/security/selinux/include/ibpkey.h new file mode 100644 index 000000000000..b17a19e348e6 --- /dev/null +++ b/security/selinux/include/ibpkey.h @@ -0,0 +1,31 @@ +/* + * pkey table + * + * SELinux must keep a mapping of pkeys to labels/SIDs. This + * mapping is maintained as part of the normal policy but a fast cache is + * needed to reduce the lookup overhead. + * + */ + +/* + * (c) Mellanox Technologies, 2016 + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + */ + +#ifndef _SELINUX_IB_PKEY_H +#define _SELINUX_IB_PKEY_H + +void sel_ib_pkey_flush(void); + +int sel_ib_pkey_sid(u64 subnet_prefix, u16 pkey, u32 *sid); + +#endif diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h index c03cdcd12a3b..6ebc61e370ff 100644 --- a/security/selinux/include/objsec.h +++ b/security/selinux/include/objsec.h @@ -10,6 +10,7 @@ * * Copyright (C) 2001,2002 Networks Associates Technology, Inc. * Copyright (C) 2003 Red Hat, Inc., James Morris <jmorris@redhat.com> + * Copyright (C) 2016 Mellanox Technologies * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2, @@ -139,6 +140,16 @@ struct key_security_struct { u32 sid; /* SID of key */ }; +struct ib_security_struct { + u32 sid; /* SID of the queue pair or MAD agent */ +}; + +struct pkey_security_struct { + u64 subnet_prefix; /* Port subnet prefix */ + u16 pkey; /* PKey number */ + u32 sid; /* SID of pkey */ +}; + extern unsigned int selinux_checkreqprot; #endif /* _SELINUX_OBJSEC_H_ */ diff --git a/security/selinux/include/security.h b/security/selinux/include/security.h index f979c35e037e..e91f08c16c0b 100644 --- a/security/selinux/include/security.h +++ b/security/selinux/include/security.h @@ -36,10 +36,11 @@ #define POLICYDB_VERSION_DEFAULT_TYPE 28 #define POLICYDB_VERSION_CONSTRAINT_NAMES 29 #define POLICYDB_VERSION_XPERMS_IOCTL 30 +#define POLICYDB_VERSION_INFINIBAND 31 /* Range of policy versions we understand*/ #define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE -#define POLICYDB_VERSION_MAX POLICYDB_VERSION_XPERMS_IOCTL +#define POLICYDB_VERSION_MAX POLICYDB_VERSION_INFINIBAND /* Mask for just the mount related flags */ #define SE_MNTMASK 0x0f @@ -76,6 +77,8 @@ enum { }; #define POLICYDB_CAPABILITY_MAX (__POLICYDB_CAPABILITY_MAX - 1) +extern char *selinux_policycap_names[__POLICYDB_CAPABILITY_MAX]; + extern int selinux_policycap_netpeer; extern int selinux_policycap_openperm; extern int selinux_policycap_extsockclass; @@ -178,6 +181,10 @@ int security_get_user_sids(u32 callsid, char *username, int security_port_sid(u8 protocol, u16 port, u32 *out_sid); +int security_ib_pkey_sid(u64 subnet_prefix, u16 pkey_num, u32 *out_sid); + +int security_ib_endport_sid(const char *dev_name, u8 port_num, u32 *out_sid); + int security_netif_sid(char *name, u32 *if_sid); int security_node_sid(u16 domain, void *addr, u32 addrlen, diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c index 50062e70140d..9010a3632d6f 100644 --- a/security/selinux/selinuxfs.c +++ b/security/selinux/selinuxfs.c @@ -41,15 +41,6 @@ #include "objsec.h" #include "conditional.h" -/* Policy capability filenames */ -static char *policycap_names[] = { - "network_peer_controls", - "open_perms", - "extended_socket_class", - "always_check_network", - "cgroup_seclabel" -}; - unsigned int selinux_checkreqprot = CONFIG_SECURITY_SELINUX_CHECKREQPROT_VALUE; static int __init checkreqprot_setup(char *str) @@ -163,6 +154,8 @@ static ssize_t sel_write_enforce(struct file *file, const char __user *buf, avc_ss_reset(0); selnl_notify_setenforce(selinux_enforcing); selinux_status_update_setenforce(selinux_enforcing); + if (!selinux_enforcing) + call_lsm_notifier(LSM_POLICY_CHANGE, NULL); } length = count; out: @@ -1750,9 +1743,9 @@ static int sel_make_policycap(void) sel_remove_entries(policycap_dir); for (iter = 0; iter <= POLICYDB_CAPABILITY_MAX; iter++) { - if (iter < ARRAY_SIZE(policycap_names)) + if (iter < ARRAY_SIZE(selinux_policycap_names)) dentry = d_alloc_name(policycap_dir, - policycap_names[iter]); + selinux_policycap_names[iter]); else dentry = d_alloc_name(policycap_dir, "unknown"); diff --git a/security/selinux/ss/ebitmap.c b/security/selinux/ss/ebitmap.c index 9db4709a6877..ad38299164c3 100644 --- a/security/selinux/ss/ebitmap.c +++ b/security/selinux/ss/ebitmap.c @@ -24,6 +24,8 @@ #define BITS_PER_U64 (sizeof(u64) * 8) +static struct kmem_cache *ebitmap_node_cachep; + int ebitmap_cmp(struct ebitmap *e1, struct ebitmap *e2) { struct ebitmap_node *n1, *n2; @@ -54,7 +56,7 @@ int ebitmap_cpy(struct ebitmap *dst, struct ebitmap *src) n = src->node; prev = NULL; while (n) { - new = kzalloc(sizeof(*new), GFP_ATOMIC); + new = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC); if (!new) { ebitmap_destroy(dst); return -ENOMEM; @@ -162,7 +164,7 @@ int ebitmap_netlbl_import(struct ebitmap *ebmap, if (e_iter == NULL || offset >= e_iter->startbit + EBITMAP_SIZE) { e_prev = e_iter; - e_iter = kzalloc(sizeof(*e_iter), GFP_ATOMIC); + e_iter = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC); if (e_iter == NULL) goto netlbl_import_failure; e_iter->startbit = offset - (offset % EBITMAP_SIZE); @@ -288,7 +290,7 @@ int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value) prev->next = n->next; else e->node = n->next; - kfree(n); + kmem_cache_free(ebitmap_node_cachep, n); } return 0; } @@ -299,7 +301,7 @@ int ebitmap_set_bit(struct ebitmap *e, unsigned long bit, int value) if (!value) return 0; - new = kzalloc(sizeof(*new), GFP_ATOMIC); + new = kmem_cache_zalloc(ebitmap_node_cachep, GFP_ATOMIC); if (!new) return -ENOMEM; @@ -332,7 +334,7 @@ void ebitmap_destroy(struct ebitmap *e) while (n) { temp = n; n = n->next; - kfree(temp); + kmem_cache_free(ebitmap_node_cachep, temp); } e->highbit = 0; @@ -400,7 +402,7 @@ int ebitmap_read(struct ebitmap *e, void *fp) if (!n || startbit >= n->startbit + EBITMAP_SIZE) { struct ebitmap_node *tmp; - tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); + tmp = kmem_cache_zalloc(ebitmap_node_cachep, GFP_KERNEL); if (!tmp) { printk(KERN_ERR "SELinux: ebitmap: out of memory\n"); @@ -519,3 +521,15 @@ int ebitmap_write(struct ebitmap *e, void *fp) } return 0; } + +void ebitmap_cache_init(void) +{ + ebitmap_node_cachep = kmem_cache_create("ebitmap_node", + sizeof(struct ebitmap_node), + 0, SLAB_PANIC, NULL); +} + +void ebitmap_cache_destroy(void) +{ + kmem_cache_destroy(ebitmap_node_cachep); +} diff --git a/security/selinux/ss/ebitmap.h b/security/selinux/ss/ebitmap.h index 9637b8c71085..6d5a9ac4251f 100644 --- a/security/selinux/ss/ebitmap.h +++ b/security/selinux/ss/ebitmap.h @@ -130,6 +130,9 @@ void ebitmap_destroy(struct ebitmap *e); int ebitmap_read(struct ebitmap *e, void *fp); int ebitmap_write(struct ebitmap *e, void *fp); +void ebitmap_cache_init(void); +void ebitmap_cache_destroy(void); + #ifdef CONFIG_NETLABEL int ebitmap_netlbl_export(struct ebitmap *ebmap, struct netlbl_lsm_catmap **catmap); diff --git a/security/selinux/ss/policydb.c b/security/selinux/ss/policydb.c index 0080122760ad..aa6500abb178 100644 --- a/security/selinux/ss/policydb.c +++ b/security/selinux/ss/policydb.c @@ -17,6 +17,11 @@ * * Added support for the policy capability bitmap * + * Update: Mellanox Techonologies + * + * Added Infiniband support + * + * Copyright (C) 2016 Mellanox Techonologies * Copyright (C) 2007 Hewlett-Packard Development Company, L.P. * Copyright (C) 2004-2005 Trusted Computer Solutions, Inc. * Copyright (C) 2003 - 2004 Tresys Technology, LLC @@ -76,81 +81,86 @@ static struct policydb_compat_info policydb_compat[] = { { .version = POLICYDB_VERSION_BASE, .sym_num = SYM_NUM - 3, - .ocon_num = OCON_NUM - 1, + .ocon_num = OCON_NUM - 3, }, { .version = POLICYDB_VERSION_BOOL, .sym_num = SYM_NUM - 2, - .ocon_num = OCON_NUM - 1, + .ocon_num = OCON_NUM - 3, }, { .version = POLICYDB_VERSION_IPV6, .sym_num = SYM_NUM - 2, - .ocon_num = OCON_NUM, + .ocon_num = OCON_NUM - 2, }, { .version = POLICYDB_VERSION_NLCLASS, .sym_num = SYM_NUM - 2, - .ocon_num = OCON_NUM, + .ocon_num = OCON_NUM - 2, }, { .version = POLICYDB_VERSION_MLS, .sym_num = SYM_NUM, - .ocon_num = OCON_NUM, + .ocon_num = OCON_NUM - 2, }, { .version = POLICYDB_VERSION_AVTAB, .sym_num = SYM_NUM, - .ocon_num = OCON_NUM, + .ocon_num = OCON_NUM - 2, }, { .version = POLICYDB_VERSION_RANGETRANS, .sym_num = SYM_NUM, - .ocon_num = OCON_NUM, + .ocon_num = OCON_NUM - 2, }, { .version = POLICYDB_VERSION_POLCAP, .sym_num = SYM_NUM, - .ocon_num = OCON_NUM, + .ocon_num = OCON_NUM - 2, }, { .version = POLICYDB_VERSION_PERMISSIVE, .sym_num = SYM_NUM, - .ocon_num = OCON_NUM, + .ocon_num = OCON_NUM - 2, }, { .version = POLICYDB_VERSION_BOUNDARY, .sym_num = SYM_NUM, - .ocon_num = OCON_NUM, + .ocon_num = OCON_NUM - 2, }, { .version = POLICYDB_VERSION_FILENAME_TRANS, .sym_num = SYM_NUM, - .ocon_num = OCON_NUM, + .ocon_num = OCON_NUM - 2, }, { .version = POLICYDB_VERSION_ROLETRANS, .sym_num = SYM_NUM, - .ocon_num = OCON_NUM, + .ocon_num = OCON_NUM - 2, }, { .version = POLICYDB_VERSION_NEW_OBJECT_DEFAULTS, .sym_num = SYM_NUM, - .ocon_num = OCON_NUM, + .ocon_num = OCON_NUM - 2, }, { .version = POLICYDB_VERSION_DEFAULT_TYPE, .sym_num = SYM_NUM, - .ocon_num = OCON_NUM, + .ocon_num = OCON_NUM - 2, }, { .version = POLICYDB_VERSION_CONSTRAINT_NAMES, .sym_num = SYM_NUM, - .ocon_num = OCON_NUM, + .ocon_num = OCON_NUM - 2, }, { .version = POLICYDB_VERSION_XPERMS_IOCTL, .sym_num = SYM_NUM, + .ocon_num = OCON_NUM - 2, + }, + { + .version = POLICYDB_VERSION_INFINIBAND, + .sym_num = SYM_NUM, .ocon_num = OCON_NUM, }, }; @@ -538,34 +548,30 @@ static int policydb_index(struct policydb *p) symtab_hash_eval(p->symtab); #endif - rc = -ENOMEM; p->class_val_to_struct = kcalloc(p->p_classes.nprim, sizeof(*p->class_val_to_struct), GFP_KERNEL); if (!p->class_val_to_struct) - goto out; + return -ENOMEM; - rc = -ENOMEM; p->role_val_to_struct = kcalloc(p->p_roles.nprim, sizeof(*p->role_val_to_struct), GFP_KERNEL); if (!p->role_val_to_struct) - goto out; + return -ENOMEM; - rc = -ENOMEM; p->user_val_to_struct = kcalloc(p->p_users.nprim, sizeof(*p->user_val_to_struct), GFP_KERNEL); if (!p->user_val_to_struct) - goto out; + return -ENOMEM; /* Yes, I want the sizeof the pointer, not the structure */ - rc = -ENOMEM; p->type_val_to_struct_array = flex_array_alloc(sizeof(struct type_datum *), p->p_types.nprim, GFP_KERNEL | __GFP_ZERO); if (!p->type_val_to_struct_array) - goto out; + return -ENOMEM; rc = flex_array_prealloc(p->type_val_to_struct_array, 0, p->p_types.nprim, GFP_KERNEL | __GFP_ZERO); @@ -577,12 +583,11 @@ static int policydb_index(struct policydb *p) goto out; for (i = 0; i < SYM_NUM; i++) { - rc = -ENOMEM; p->sym_val_to_name[i] = flex_array_alloc(sizeof(char *), p->symtab[i].nprim, GFP_KERNEL | __GFP_ZERO); if (!p->sym_val_to_name[i]) - goto out; + return -ENOMEM; rc = flex_array_prealloc(p->sym_val_to_name[i], 0, p->symtab[i].nprim, @@ -2211,6 +2216,51 @@ static int ocontext_read(struct policydb *p, struct policydb_compat_info *info, goto out; break; } + case OCON_IBPKEY: + rc = next_entry(nodebuf, fp, sizeof(u32) * 4); + if (rc) + goto out; + + c->u.ibpkey.subnet_prefix = be64_to_cpu(*((__be64 *)nodebuf)); + + if (nodebuf[2] > 0xffff || + nodebuf[3] > 0xffff) { + rc = -EINVAL; + goto out; + } + + c->u.ibpkey.low_pkey = le32_to_cpu(nodebuf[2]); + c->u.ibpkey.high_pkey = le32_to_cpu(nodebuf[3]); + + rc = context_read_and_validate(&c->context[0], + p, + fp); + if (rc) + goto out; + break; + case OCON_IBENDPORT: + rc = next_entry(buf, fp, sizeof(u32) * 2); + if (rc) + goto out; + len = le32_to_cpu(buf[0]); + + rc = str_read(&c->u.ibendport.dev_name, GFP_KERNEL, fp, len); + if (rc) + goto out; + + if (buf[1] > 0xff || buf[1] == 0) { + rc = -EINVAL; + goto out; + } + + c->u.ibendport.port = le32_to_cpu(buf[1]); + + rc = context_read_and_validate(&c->context[0], + p, + fp); + if (rc) + goto out; + break; } } } @@ -3140,6 +3190,33 @@ static int ocontext_write(struct policydb *p, struct policydb_compat_info *info, if (rc) return rc; break; + case OCON_IBPKEY: + *((__be64 *)nodebuf) = cpu_to_be64(c->u.ibpkey.subnet_prefix); + + nodebuf[2] = cpu_to_le32(c->u.ibpkey.low_pkey); + nodebuf[3] = cpu_to_le32(c->u.ibpkey.high_pkey); + + rc = put_entry(nodebuf, sizeof(u32), 4, fp); + if (rc) + return rc; + rc = context_write(p, &c->context[0], fp); + if (rc) + return rc; + break; + case OCON_IBENDPORT: + len = strlen(c->u.ibendport.dev_name); + buf[0] = cpu_to_le32(len); + buf[1] = cpu_to_le32(c->u.ibendport.port); + rc = put_entry(buf, sizeof(u32), 2, fp); + if (rc) + return rc; + rc = put_entry(c->u.ibendport.dev_name, 1, len, fp); + if (rc) + return rc; + rc = context_write(p, &c->context[0], fp); + if (rc) + return rc; + break; } } } diff --git a/security/selinux/ss/policydb.h b/security/selinux/ss/policydb.h index 725d5945a97e..5d23eed35fa7 100644 --- a/security/selinux/ss/policydb.h +++ b/security/selinux/ss/policydb.h @@ -187,6 +187,15 @@ struct ocontext { u32 addr[4]; u32 mask[4]; } node6; /* IPv6 node information */ + struct { + u64 subnet_prefix; + u16 low_pkey; + u16 high_pkey; + } ibpkey; + struct { + char *dev_name; + u8 port; + } ibendport; } u; union { u32 sclass; /* security class for genfs */ @@ -215,14 +224,16 @@ struct genfs { #define SYM_NUM 8 /* object context array indices */ -#define OCON_ISID 0 /* initial SIDs */ -#define OCON_FS 1 /* unlabeled file systems */ -#define OCON_PORT 2 /* TCP and UDP port numbers */ -#define OCON_NETIF 3 /* network interfaces */ -#define OCON_NODE 4 /* nodes */ -#define OCON_FSUSE 5 /* fs_use */ -#define OCON_NODE6 6 /* IPv6 nodes */ -#define OCON_NUM 7 +#define OCON_ISID 0 /* initial SIDs */ +#define OCON_FS 1 /* unlabeled file systems */ +#define OCON_PORT 2 /* TCP and UDP port numbers */ +#define OCON_NETIF 3 /* network interfaces */ +#define OCON_NODE 4 /* nodes */ +#define OCON_FSUSE 5 /* fs_use */ +#define OCON_NODE6 6 /* IPv6 nodes */ +#define OCON_IBPKEY 7 /* Infiniband PKeys */ +#define OCON_IBENDPORT 8 /* Infiniband end ports */ +#define OCON_NUM 9 /* The policy database */ struct policydb { diff --git a/security/selinux/ss/services.c b/security/selinux/ss/services.c index 60d9b0252321..2f02fa67ec2e 100644 --- a/security/selinux/ss/services.c +++ b/security/selinux/ss/services.c @@ -70,6 +70,15 @@ #include "ebitmap.h" #include "audit.h" +/* Policy capability names */ +char *selinux_policycap_names[__POLICYDB_CAPABILITY_MAX] = { + "network_peer_controls", + "open_perms", + "extended_socket_class", + "always_check_network", + "cgroup_seclabel" +}; + int selinux_policycap_netpeer; int selinux_policycap_openperm; int selinux_policycap_extsockclass; @@ -1986,6 +1995,9 @@ bad: static void security_load_policycaps(void) { + unsigned int i; + struct ebitmap_node *node; + selinux_policycap_netpeer = ebitmap_get_bit(&policydb.policycaps, POLICYDB_CAPABILITY_NETPEER); selinux_policycap_openperm = ebitmap_get_bit(&policydb.policycaps, @@ -1997,6 +2009,17 @@ static void security_load_policycaps(void) selinux_policycap_cgroupseclabel = ebitmap_get_bit(&policydb.policycaps, POLICYDB_CAPABILITY_CGROUPSECLABEL); + + for (i = 0; i < ARRAY_SIZE(selinux_policycap_names); i++) + pr_info("SELinux: policy capability %s=%d\n", + selinux_policycap_names[i], + ebitmap_get_bit(&policydb.policycaps, i)); + + ebitmap_for_each_positive_bit(&policydb.policycaps, node, i) { + if (i >= ARRAY_SIZE(selinux_policycap_names)) + pr_info("SELinux: unknown policy capability %u\n", + i); + } } static int security_preserve_bools(struct policydb *p); @@ -2031,9 +2054,11 @@ int security_load_policy(void *data, size_t len) if (!ss_initialized) { avtab_cache_init(); + ebitmap_cache_init(); rc = policydb_read(&policydb, fp); if (rc) { avtab_cache_destroy(); + ebitmap_cache_destroy(); goto out; } @@ -2044,6 +2069,7 @@ int security_load_policy(void *data, size_t len) if (rc) { policydb_destroy(&policydb); avtab_cache_destroy(); + ebitmap_cache_destroy(); goto out; } @@ -2051,6 +2077,7 @@ int security_load_policy(void *data, size_t len) if (rc) { policydb_destroy(&policydb); avtab_cache_destroy(); + ebitmap_cache_destroy(); goto out; } @@ -2210,6 +2237,87 @@ out: } /** + * security_pkey_sid - Obtain the SID for a pkey. + * @subnet_prefix: Subnet Prefix + * @pkey_num: pkey number + * @out_sid: security identifier + */ +int security_ib_pkey_sid(u64 subnet_prefix, u16 pkey_num, u32 *out_sid) +{ + struct ocontext *c; + int rc = 0; + + read_lock(&policy_rwlock); + + c = policydb.ocontexts[OCON_IBPKEY]; + while (c) { + if (c->u.ibpkey.low_pkey <= pkey_num && + c->u.ibpkey.high_pkey >= pkey_num && + c->u.ibpkey.subnet_prefix == subnet_prefix) + break; + + c = c->next; + } + + if (c) { + if (!c->sid[0]) { + rc = sidtab_context_to_sid(&sidtab, + &c->context[0], + &c->sid[0]); + if (rc) + goto out; + } + *out_sid = c->sid[0]; + } else + *out_sid = SECINITSID_UNLABELED; + +out: + read_unlock(&policy_rwlock); + return rc; +} + +/** + * security_ib_endport_sid - Obtain the SID for a subnet management interface. + * @dev_name: device name + * @port: port number + * @out_sid: security identifier + */ +int security_ib_endport_sid(const char *dev_name, u8 port_num, u32 *out_sid) +{ + struct ocontext *c; + int rc = 0; + + read_lock(&policy_rwlock); + + c = policydb.ocontexts[OCON_IBENDPORT]; + while (c) { + if (c->u.ibendport.port == port_num && + !strncmp(c->u.ibendport.dev_name, + dev_name, + IB_DEVICE_NAME_MAX)) + break; + + c = c->next; + } + + if (c) { + if (!c->sid[0]) { + rc = sidtab_context_to_sid(&sidtab, + &c->context[0], + &c->sid[0]); + if (rc) + goto out; + } + *out_sid = c->sid[0]; + } else + *out_sid = SECINITSID_UNLABELED; + +out: + read_unlock(&policy_rwlock); + return rc; +} + +/** * security_netif_sid - Obtain the SID for a network interface. * @name: interface name * @if_sid: interface SID diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c index f6915f257486..c5f436b15d19 100644 --- a/security/selinux/ss/sidtab.c +++ b/security/selinux/ss/sidtab.c @@ -32,13 +32,11 @@ int sidtab_init(struct sidtab *s) int sidtab_insert(struct sidtab *s, u32 sid, struct context *context) { - int hvalue, rc = 0; + int hvalue; struct sidtab_node *prev, *cur, *newnode; - if (!s) { - rc = -ENOMEM; - goto out; - } + if (!s) + return -ENOMEM; hvalue = SIDTAB_HASH(sid); prev = NULL; @@ -48,21 +46,17 @@ int sidtab_insert(struct sidtab *s, u32 sid, struct context *context) cur = cur->next; } - if (cur && sid == cur->sid) { - rc = -EEXIST; - goto out; - } + if (cur && sid == cur->sid) + return -EEXIST; newnode = kmalloc(sizeof(*newnode), GFP_ATOMIC); - if (!newnode) { - rc = -ENOMEM; - goto out; - } + if (!newnode) + return -ENOMEM; + newnode->sid = sid; if (context_cpy(&newnode->context, context)) { kfree(newnode); - rc = -ENOMEM; - goto out; + return -ENOMEM; } if (prev) { @@ -78,8 +72,7 @@ int sidtab_insert(struct sidtab *s, u32 sid, struct context *context) s->nel++; if (sid >= s->next_sid) s->next_sid = sid + 1; -out: - return rc; + return 0; } static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force) |