0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011
0012
0013
0014 #include <linux/init.h>
0015 #include <linux/types.h>
0016 #include <linux/slab.h>
0017 #include <linux/stddef.h>
0018 #include <linux/kernel.h>
0019 #include <linux/list.h>
0020 #include <linux/notifier.h>
0021 #include <linux/netdevice.h>
0022 #include <linux/rcupdate.h>
0023 #include <net/net_namespace.h>
0024
0025 #include "security.h"
0026 #include "objsec.h"
0027 #include "netif.h"
0028
0029 #define SEL_NETIF_HASH_SIZE 64
0030 #define SEL_NETIF_HASH_MAX 1024
0031
0032 struct sel_netif {
0033 struct list_head list;
0034 struct netif_security_struct nsec;
0035 struct rcu_head rcu_head;
0036 };
0037
0038 static u32 sel_netif_total;
0039 static DEFINE_SPINLOCK(sel_netif_lock);
0040 static struct list_head sel_netif_hash[SEL_NETIF_HASH_SIZE];
0041
0042
0043
0044
0045
0046
0047
0048
0049
0050
0051
0052 static inline u32 sel_netif_hashfn(const struct net *ns, int ifindex)
0053 {
0054 return (((uintptr_t)ns + ifindex) & (SEL_NETIF_HASH_SIZE - 1));
0055 }
0056
0057
0058
0059
0060
0061
0062
0063
0064
0065
0066
0067 static inline struct sel_netif *sel_netif_find(const struct net *ns,
0068 int ifindex)
0069 {
0070 int idx = sel_netif_hashfn(ns, ifindex);
0071 struct sel_netif *netif;
0072
0073 list_for_each_entry_rcu(netif, &sel_netif_hash[idx], list)
0074 if (net_eq(netif->nsec.ns, ns) &&
0075 netif->nsec.ifindex == ifindex)
0076 return netif;
0077
0078 return NULL;
0079 }
0080
0081
0082
0083
0084
0085
0086
0087
0088
0089
0090 static int sel_netif_insert(struct sel_netif *netif)
0091 {
0092 int idx;
0093
0094 if (sel_netif_total >= SEL_NETIF_HASH_MAX)
0095 return -ENOSPC;
0096
0097 idx = sel_netif_hashfn(netif->nsec.ns, netif->nsec.ifindex);
0098 list_add_rcu(&netif->list, &sel_netif_hash[idx]);
0099 sel_netif_total++;
0100
0101 return 0;
0102 }
0103
0104
0105
0106
0107
0108
0109
0110
0111
0112 static void sel_netif_destroy(struct sel_netif *netif)
0113 {
0114 list_del_rcu(&netif->list);
0115 sel_netif_total--;
0116 kfree_rcu(netif, rcu_head);
0117 }
0118
0119
0120
0121
0122
0123
0124
0125
0126
0127
0128
0129
0130
0131
0132 static int sel_netif_sid_slow(struct net *ns, int ifindex, u32 *sid)
0133 {
0134 int ret = 0;
0135 struct sel_netif *netif;
0136 struct sel_netif *new;
0137 struct net_device *dev;
0138
0139
0140
0141
0142 dev = dev_get_by_index(ns, ifindex);
0143 if (unlikely(dev == NULL)) {
0144 pr_warn("SELinux: failure in %s(), invalid network interface (%d)\n",
0145 __func__, ifindex);
0146 return -ENOENT;
0147 }
0148
0149 spin_lock_bh(&sel_netif_lock);
0150 netif = sel_netif_find(ns, ifindex);
0151 if (netif != NULL) {
0152 *sid = netif->nsec.sid;
0153 goto out;
0154 }
0155
0156 ret = security_netif_sid(&selinux_state, dev->name, sid);
0157 if (ret != 0)
0158 goto out;
0159 new = kzalloc(sizeof(*new), GFP_ATOMIC);
0160 if (new) {
0161 new->nsec.ns = ns;
0162 new->nsec.ifindex = ifindex;
0163 new->nsec.sid = *sid;
0164 if (sel_netif_insert(new))
0165 kfree(new);
0166 }
0167
0168 out:
0169 spin_unlock_bh(&sel_netif_lock);
0170 dev_put(dev);
0171 if (unlikely(ret))
0172 pr_warn("SELinux: failure in %s(), unable to determine network interface label (%d)\n",
0173 __func__, ifindex);
0174 return ret;
0175 }
0176
0177
0178
0179
0180
0181
0182
0183
0184
0185
0186
0187
0188
0189
0190
0191 int sel_netif_sid(struct net *ns, int ifindex, u32 *sid)
0192 {
0193 struct sel_netif *netif;
0194
0195 rcu_read_lock();
0196 netif = sel_netif_find(ns, ifindex);
0197 if (likely(netif != NULL)) {
0198 *sid = netif->nsec.sid;
0199 rcu_read_unlock();
0200 return 0;
0201 }
0202 rcu_read_unlock();
0203
0204 return sel_netif_sid_slow(ns, ifindex, sid);
0205 }
0206
0207
0208
0209
0210
0211
0212
0213
0214
0215
0216
0217 static void sel_netif_kill(const struct net *ns, int ifindex)
0218 {
0219 struct sel_netif *netif;
0220
0221 rcu_read_lock();
0222 spin_lock_bh(&sel_netif_lock);
0223 netif = sel_netif_find(ns, ifindex);
0224 if (netif)
0225 sel_netif_destroy(netif);
0226 spin_unlock_bh(&sel_netif_lock);
0227 rcu_read_unlock();
0228 }
0229
0230
0231
0232
0233
0234
0235
0236
0237 void sel_netif_flush(void)
0238 {
0239 int idx;
0240 struct sel_netif *netif;
0241
0242 spin_lock_bh(&sel_netif_lock);
0243 for (idx = 0; idx < SEL_NETIF_HASH_SIZE; idx++)
0244 list_for_each_entry(netif, &sel_netif_hash[idx], list)
0245 sel_netif_destroy(netif);
0246 spin_unlock_bh(&sel_netif_lock);
0247 }
0248
0249 static int sel_netif_netdev_notifier_handler(struct notifier_block *this,
0250 unsigned long event, void *ptr)
0251 {
0252 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
0253
0254 if (event == NETDEV_DOWN)
0255 sel_netif_kill(dev_net(dev), dev->ifindex);
0256
0257 return NOTIFY_DONE;
0258 }
0259
0260 static struct notifier_block sel_netif_netdev_notifier = {
0261 .notifier_call = sel_netif_netdev_notifier_handler,
0262 };
0263
0264 static __init int sel_netif_init(void)
0265 {
0266 int i;
0267
0268 if (!selinux_enabled_boot)
0269 return 0;
0270
0271 for (i = 0; i < SEL_NETIF_HASH_SIZE; i++)
0272 INIT_LIST_HEAD(&sel_netif_hash[i]);
0273
0274 register_netdevice_notifier(&sel_netif_netdev_notifier);
0275
0276 return 0;
0277 }
0278
0279 __initcall(sel_netif_init);
0280