summaryrefslogtreecommitdiff
path: root/net/caif
diff options
context:
space:
mode:
Diffstat (limited to 'net/caif')
-rw-r--r--net/caif/Kconfig48
-rw-r--r--net/caif/Makefile26
-rw-r--r--net/caif/caif_config_util.c87
-rw-r--r--net/caif/caif_dev.c418
-rw-r--r--net/caif/caif_socket.c1252
-rw-r--r--net/caif/cfcnfg.c471
-rw-r--r--net/caif/cfctrl.c693
-rw-r--r--net/caif/cfdbgl.c40
-rw-r--r--net/caif/cfdgml.c108
-rw-r--r--net/caif/cffrml.c151
-rw-r--r--net/caif/cfmuxl.c251
-rw-r--r--net/caif/cfpkt_skbuff.c571
-rw-r--r--net/caif/cfrfml.c108
-rw-r--r--net/caif/cfserl.c192
-rw-r--r--net/caif/cfsrvl.c192
-rw-r--r--net/caif/cfutill.c115
-rw-r--r--net/caif/cfveil.c107
-rw-r--r--net/caif/cfvidl.c65
-rw-r--r--net/caif/chnl_net.c467
19 files changed, 5362 insertions, 0 deletions
diff --git a/net/caif/Kconfig b/net/caif/Kconfig
new file mode 100644
index 000000000000..cd1daf6008bd
--- /dev/null
+++ b/net/caif/Kconfig
@@ -0,0 +1,48 @@
+#
+# CAIF net configurations
+#
+
+#menu "CAIF Support"
+comment "CAIF Support"
+menuconfig CAIF
+ tristate "Enable CAIF support"
+ select CRC_CCITT
+ default n
+ ---help---
+ The "Communication CPU to Application CPU Interface" (CAIF) is a packet
+ based connection-oriented MUX protocol developed by ST-Ericsson for use
+ with its modems. It is accessed from user space as sockets (PF_CAIF).
+
+ Say Y (or M) here if you build for a phone product (e.g. Android or
+ MeeGo ) that uses CAIF as transport, if unsure say N.
+
+ If you select to build it as module then CAIF_NETDEV also needs to be
+ built as modules. You will also need to say yes to any CAIF physical
+ devices that your platform requires.
+
+ See Documentation/networking/caif for a further explanation on how to
+ use and configure CAIF.
+
+if CAIF
+
+config CAIF_DEBUG
+ bool "Enable Debug"
+ default n
+ --- help ---
+ Enable the inclusion of debug code in the CAIF stack.
+ Be aware that doing this will impact performance.
+ If unsure say N.
+
+
+config CAIF_NETDEV
+ tristate "CAIF GPRS Network device"
+ default CAIF
+ ---help---
+ Say Y if you will be using a CAIF based GPRS network device.
+ This can be either built-in or a loadable module,
+ If you select to build it as a built-in then the main CAIF device must
+ also be a built-in.
+ If unsure say Y.
+
+endif
+#endmenu
diff --git a/net/caif/Makefile b/net/caif/Makefile
new file mode 100644
index 000000000000..34852af2595e
--- /dev/null
+++ b/net/caif/Makefile
@@ -0,0 +1,26 @@
+ifeq ($(CONFIG_CAIF_DEBUG),1)
+CAIF_DBG_FLAGS := -DDEBUG
+endif
+
+ccflags-y := $(CAIF_FLAGS) $(CAIF_DBG_FLAGS)
+
+caif-objs := caif_dev.o \
+ cfcnfg.o cfmuxl.o cfctrl.o \
+ cffrml.o cfveil.o cfdbgl.o\
+ cfserl.o cfdgml.o \
+ cfrfml.o cfvidl.o cfutill.o \
+ cfsrvl.o cfpkt_skbuff.o caif_config_util.o
+clean-dirs:= .tmp_versions
+
+clean-files:= \
+ Module.symvers \
+ modules.order \
+ *.cmd \
+ *.o \
+ *~
+
+obj-$(CONFIG_CAIF) += caif.o
+obj-$(CONFIG_CAIF_NETDEV) += chnl_net.o
+obj-$(CONFIG_CAIF) += caif_socket.o
+
+export-objs := caif.o
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
new file mode 100644
index 000000000000..6f36580366f0
--- /dev/null
+++ b/net/caif/caif_config_util.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <net/caif/cfctrl.h>
+#include <net/caif/cfcnfg.h>
+#include <net/caif/caif_dev.h>
+
+int connect_req_to_link_param(struct cfcnfg *cnfg,
+ struct caif_connect_request *s,
+ struct cfctrl_link_param *l)
+{
+ struct dev_info *dev_info;
+ enum cfcnfg_phy_preference pref;
+ memset(l, 0, sizeof(*l));
+ l->priority = s->priority;
+
+ if (s->link_name[0] != '\0')
+ l->phyid = cfcnfg_get_named(cnfg, s->link_name);
+ else {
+ switch (s->link_selector) {
+ case CAIF_LINK_HIGH_BANDW:
+ pref = CFPHYPREF_HIGH_BW;
+ break;
+ case CAIF_LINK_LOW_LATENCY:
+ pref = CFPHYPREF_LOW_LAT;
+ break;
+ default:
+ return -EINVAL;
+ }
+ dev_info = cfcnfg_get_phyid(cnfg, pref);
+ if (dev_info == NULL)
+ return -ENODEV;
+ l->phyid = dev_info->id;
+ }
+ switch (s->protocol) {
+ case CAIFPROTO_AT:
+ l->linktype = CFCTRL_SRV_VEI;
+ if (s->sockaddr.u.at.type == CAIF_ATTYPE_PLAIN)
+ l->chtype = 0x02;
+ else
+ l->chtype = s->sockaddr.u.at.type;
+ l->endpoint = 0x00;
+ break;
+ case CAIFPROTO_DATAGRAM:
+ l->linktype = CFCTRL_SRV_DATAGRAM;
+ l->chtype = 0x00;
+ l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
+ break;
+ case CAIFPROTO_DATAGRAM_LOOP:
+ l->linktype = CFCTRL_SRV_DATAGRAM;
+ l->chtype = 0x03;
+ l->endpoint = 0x00;
+ l->u.datagram.connid = s->sockaddr.u.dgm.connection_id;
+ break;
+ case CAIFPROTO_RFM:
+ l->linktype = CFCTRL_SRV_RFM;
+ l->u.datagram.connid = s->sockaddr.u.rfm.connection_id;
+ strncpy(l->u.rfm.volume, s->sockaddr.u.rfm.volume,
+ sizeof(l->u.rfm.volume)-1);
+ l->u.rfm.volume[sizeof(l->u.rfm.volume)-1] = 0;
+ break;
+ case CAIFPROTO_UTIL:
+ l->linktype = CFCTRL_SRV_UTIL;
+ l->endpoint = 0x00;
+ l->chtype = 0x00;
+ strncpy(l->u.utility.name, s->sockaddr.u.util.service,
+ sizeof(l->u.utility.name)-1);
+ l->u.utility.name[sizeof(l->u.utility.name)-1] = 0;
+ caif_assert(sizeof(l->u.utility.name) > 10);
+ l->u.utility.paramlen = s->param.size;
+ if (l->u.utility.paramlen > sizeof(l->u.utility.params))
+ l->u.utility.paramlen = sizeof(l->u.utility.params);
+
+ memcpy(l->u.utility.params, s->param.data,
+ l->u.utility.paramlen);
+
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
new file mode 100644
index 000000000000..024fd5bb2d39
--- /dev/null
+++ b/net/caif/caif_dev.c
@@ -0,0 +1,418 @@
+/*
+ * CAIF Interface registration.
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ *
+ * Borrowed heavily from file: pn_dev.c. Thanks to
+ * Remi Denis-Courmont <remi.denis-courmont@nokia.com>
+ * and Sakari Ailus <sakari.ailus@nokia.com>
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/if_arp.h>
+#include <linux/net.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <net/netns/generic.h>
+#include <net/net_namespace.h>
+#include <net/pkt_sched.h>
+#include <net/caif/caif_device.h>
+#include <net/caif/caif_dev.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfcnfg.h>
+
+MODULE_LICENSE("GPL");
+#define TIMEOUT (HZ*5)
+
+/* Used for local tracking of the CAIF net devices */
+struct caif_device_entry {
+ struct cflayer layer;
+ struct list_head list;
+ atomic_t in_use;
+ atomic_t state;
+ u16 phyid;
+ struct net_device *netdev;
+ wait_queue_head_t event;
+};
+
+struct caif_device_entry_list {
+ struct list_head list;
+ /* Protects simulanous deletes in list */
+ spinlock_t lock;
+};
+
+struct caif_net {
+ struct caif_device_entry_list caifdevs;
+};
+
+static int caif_net_id;
+static struct cfcnfg *cfg;
+
+static struct caif_device_entry_list *caif_device_list(struct net *net)
+{
+ struct caif_net *caifn;
+ BUG_ON(!net);
+ caifn = net_generic(net, caif_net_id);
+ BUG_ON(!caifn);
+ return &caifn->caifdevs;
+}
+
+/* Allocate new CAIF device. */
+static struct caif_device_entry *caif_device_alloc(struct net_device *dev)
+{
+ struct caif_device_entry_list *caifdevs;
+ struct caif_device_entry *caifd;
+ caifdevs = caif_device_list(dev_net(dev));
+ BUG_ON(!caifdevs);
+ caifd = kzalloc(sizeof(*caifd), GFP_ATOMIC);
+ if (!caifd)
+ return NULL;
+ caifd->netdev = dev;
+ list_add(&caifd->list, &caifdevs->list);
+ init_waitqueue_head(&caifd->event);
+ return caifd;
+}
+
+static struct caif_device_entry *caif_get(struct net_device *dev)
+{
+ struct caif_device_entry_list *caifdevs =
+ caif_device_list(dev_net(dev));
+ struct caif_device_entry *caifd;
+ BUG_ON(!caifdevs);
+ list_for_each_entry(caifd, &caifdevs->list, list) {
+ if (caifd->netdev == dev)
+ return caifd;
+ }
+ return NULL;
+}
+
+static void caif_device_destroy(struct net_device *dev)
+{
+ struct caif_device_entry_list *caifdevs =
+ caif_device_list(dev_net(dev));
+ struct caif_device_entry *caifd;
+ ASSERT_RTNL();
+ if (dev->type != ARPHRD_CAIF)
+ return;
+
+ spin_lock_bh(&caifdevs->lock);
+ caifd = caif_get(dev);
+ if (caifd == NULL) {
+ spin_unlock_bh(&caifdevs->lock);
+ return;
+ }
+
+ list_del(&caifd->list);
+ spin_unlock_bh(&caifdevs->lock);
+
+ kfree(caifd);
+ return;
+}
+
+static int transmit(struct cflayer *layer, struct cfpkt *pkt)
+{
+ struct caif_device_entry *caifd =
+ container_of(layer, struct caif_device_entry, layer);
+ struct sk_buff *skb, *skb2;
+ int ret = -EINVAL;
+ skb = cfpkt_tonative(pkt);
+ skb->dev = caifd->netdev;
+ /*
+ * Don't allow SKB to be destroyed upon error, but signal resend
+ * notification to clients. We can't rely on the return value as
+ * congestion (NET_XMIT_CN) sometimes drops the packet, sometimes don't.
+ */
+ if (netif_queue_stopped(caifd->netdev))
+ return -EAGAIN;
+ skb2 = skb_get(skb);
+
+ ret = dev_queue_xmit(skb2);
+
+ if (!ret)
+ kfree_skb(skb);
+ else
+ return -EAGAIN;
+
+ return 0;
+}
+
+static int modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
+{
+ struct caif_device_entry *caifd;
+ struct caif_dev_common *caifdev;
+ caifd = container_of(layr, struct caif_device_entry, layer);
+ caifdev = netdev_priv(caifd->netdev);
+ if (ctrl == _CAIF_MODEMCMD_PHYIF_USEFULL) {
+ atomic_set(&caifd->in_use, 1);
+ wake_up_interruptible(&caifd->event);
+
+ } else if (ctrl == _CAIF_MODEMCMD_PHYIF_USELESS) {
+ atomic_set(&caifd->in_use, 0);
+ wake_up_interruptible(&caifd->event);
+ }
+ return 0;
+}
+
+/*
+ * Stuff received packets to associated sockets.
+ * On error, returns non-zero and releases the skb.
+ */
+static int receive(struct sk_buff *skb, struct net_device *dev,
+ struct packet_type *pkttype, struct net_device *orig_dev)
+{
+ struct net *net;
+ struct cfpkt *pkt;
+ struct caif_device_entry *caifd;
+ net = dev_net(dev);
+ pkt = cfpkt_fromnative(CAIF_DIR_IN, skb);
+ caifd = caif_get(dev);
+ if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
+ return NET_RX_DROP;
+
+ if (caifd->layer.up->receive(caifd->layer.up, pkt))
+ return NET_RX_DROP;
+
+ return 0;
+}
+
+static struct packet_type caif_packet_type __read_mostly = {
+ .type = cpu_to_be16(ETH_P_CAIF),
+ .func = receive,
+};
+
+static void dev_flowctrl(struct net_device *dev, int on)
+{
+ struct caif_device_entry *caifd = caif_get(dev);
+ if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
+ return;
+
+ caifd->layer.up->ctrlcmd(caifd->layer.up,
+ on ?
+ _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND :
+ _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND,
+ caifd->layer.id);
+}
+
+/* notify Caif of device events */
+static int caif_device_notify(struct notifier_block *me, unsigned long what,
+ void *arg)
+{
+ struct net_device *dev = arg;
+ struct caif_device_entry *caifd = NULL;
+ struct caif_dev_common *caifdev;
+ enum cfcnfg_phy_preference pref;
+ int res = -EINVAL;
+ enum cfcnfg_phy_type phy_type;
+
+ if (dev->type != ARPHRD_CAIF)
+ return 0;
+
+ switch (what) {
+ case NETDEV_REGISTER:
+ pr_info("CAIF: %s():register %s\n", __func__, dev->name);
+ caifd = caif_device_alloc(dev);
+ if (caifd == NULL)
+ break;
+ caifdev = netdev_priv(dev);
+ caifdev->flowctrl = dev_flowctrl;
+ atomic_set(&caifd->state, what);
+ res = 0;
+ break;
+
+ case NETDEV_UP:
+ pr_info("CAIF: %s(): up %s\n", __func__, dev->name);
+ caifd = caif_get(dev);
+ if (caifd == NULL)
+ break;
+ caifdev = netdev_priv(dev);
+ if (atomic_read(&caifd->state) == NETDEV_UP) {
+ pr_info("CAIF: %s():%s already up\n",
+ __func__, dev->name);
+ break;
+ }
+ atomic_set(&caifd->state, what);
+ caifd->layer.transmit = transmit;
+ caifd->layer.modemcmd = modemcmd;
+
+ if (caifdev->use_frag)
+ phy_type = CFPHYTYPE_FRAG;
+ else
+ phy_type = CFPHYTYPE_CAIF;
+
+ switch (caifdev->link_select) {
+ case CAIF_LINK_HIGH_BANDW:
+ pref = CFPHYPREF_HIGH_BW;
+ break;
+ case CAIF_LINK_LOW_LATENCY:
+ pref = CFPHYPREF_LOW_LAT;
+ break;
+ default:
+ pref = CFPHYPREF_HIGH_BW;
+ break;
+ }
+
+ cfcnfg_add_phy_layer(get_caif_conf(),
+ phy_type,
+ dev,
+ &caifd->layer,
+ &caifd->phyid,
+ pref,
+ caifdev->use_fcs,
+ caifdev->use_stx);
+ strncpy(caifd->layer.name, dev->name,
+ sizeof(caifd->layer.name) - 1);
+ caifd->layer.name[sizeof(caifd->layer.name) - 1] = 0;
+ break;
+
+ case NETDEV_GOING_DOWN:
+ caifd = caif_get(dev);
+ if (caifd == NULL)
+ break;
+ pr_info("CAIF: %s():going down %s\n", __func__, dev->name);
+
+ if (atomic_read(&caifd->state) == NETDEV_GOING_DOWN ||
+ atomic_read(&caifd->state) == NETDEV_DOWN)
+ break;
+
+ atomic_set(&caifd->state, what);
+ if (!caifd || !caifd->layer.up || !caifd->layer.up->ctrlcmd)
+ return -EINVAL;
+ caifd->layer.up->ctrlcmd(caifd->layer.up,
+ _CAIF_CTRLCMD_PHYIF_DOWN_IND,
+ caifd->layer.id);
+ res = wait_event_interruptible_timeout(caifd->event,
+ atomic_read(&caifd->in_use) == 0,
+ TIMEOUT);
+ break;
+
+ case NETDEV_DOWN:
+ caifd = caif_get(dev);
+ if (caifd == NULL)
+ break;
+ pr_info("CAIF: %s(): down %s\n", __func__, dev->name);
+ if (atomic_read(&caifd->in_use))
+ pr_warning("CAIF: %s(): "
+ "Unregistering an active CAIF device: %s\n",
+ __func__, dev->name);
+ cfcnfg_del_phy_layer(get_caif_conf(), &caifd->layer);
+ atomic_set(&caifd->state, what);
+ break;
+
+ case NETDEV_UNREGISTER:
+ caifd = caif_get(dev);
+ pr_info("CAIF: %s(): unregister %s\n", __func__, dev->name);
+ atomic_set(&caifd->state, what);
+ caif_device_destroy(dev);
+ break;
+ }
+ return 0;
+}
+
+static struct notifier_block caif_device_notifier = {
+ .notifier_call = caif_device_notify,
+ .priority = 0,
+};
+
+
+struct cfcnfg *get_caif_conf(void)
+{
+ return cfg;
+}
+EXPORT_SYMBOL(get_caif_conf);
+
+int caif_connect_client(struct caif_connect_request *conn_req,
+ struct cflayer *client_layer)
+{
+ struct cfctrl_link_param param;
+ int ret;
+ ret = connect_req_to_link_param(get_caif_conf(), conn_req, &param);
+ if (ret)
+ return ret;
+ /* Hook up the adaptation layer. */
+ return cfcnfg_add_adaptation_layer(get_caif_conf(),
+ &param, client_layer);
+}
+EXPORT_SYMBOL(caif_connect_client);
+
+int caif_disconnect_client(struct cflayer *adap_layer)
+{
+ return cfcnfg_disconn_adapt_layer(get_caif_conf(), adap_layer);
+}
+EXPORT_SYMBOL(caif_disconnect_client);
+
+void caif_release_client(struct cflayer *adap_layer)
+{
+ cfcnfg_release_adap_layer(adap_layer);
+}
+EXPORT_SYMBOL(caif_release_client);
+
+/* Per-namespace Caif devices handling */
+static int caif_init_net(struct net *net)
+{
+ struct caif_net *caifn = net_generic(net, caif_net_id);
+ INIT_LIST_HEAD(&caifn->caifdevs.list);
+ spin_lock_init(&caifn->caifdevs.lock);
+ return 0;
+}
+
+static void caif_exit_net(struct net *net)
+{
+ struct net_device *dev;
+ int res;
+ rtnl_lock();
+ for_each_netdev(net, dev) {
+ if (dev->type != ARPHRD_CAIF)
+ continue;
+ res = dev_close(dev);
+ caif_device_destroy(dev);
+ }
+ rtnl_unlock();
+}
+
+static struct pernet_operations caif_net_ops = {
+ .init = caif_init_net,
+ .exit = caif_exit_net,
+ .id = &caif_net_id,
+ .size = sizeof(struct caif_net),
+};
+
+/* Initialize Caif devices list */
+static int __init caif_device_init(void)
+{
+ int result;
+ cfg = cfcnfg_create();
+ if (!cfg) {
+ pr_warning("CAIF: %s(): can't create cfcnfg.\n", __func__);
+ goto err_cfcnfg_create_failed;
+ }
+ result = register_pernet_device(&caif_net_ops);
+
+ if (result) {
+ kfree(cfg);
+ cfg = NULL;
+ return result;
+ }
+ dev_add_pack(&caif_packet_type);
+ register_netdevice_notifier(&caif_device_notifier);
+
+ return result;
+err_cfcnfg_create_failed:
+ return -ENODEV;
+}
+
+static void __exit caif_device_exit(void)
+{
+ dev_remove_pack(&caif_packet_type);
+ unregister_pernet_device(&caif_net_ops);
+ unregister_netdevice_notifier(&caif_device_notifier);
+ cfcnfg_remove(cfg);
+}
+
+module_init(caif_device_init);
+module_exit(caif_device_exit);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
new file mode 100644
index 000000000000..c3a70c5c893a
--- /dev/null
+++ b/net/caif/caif_socket.c
@@ -0,0 +1,1252 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/spinlock.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/wait.h>
+#include <linux/poll.h>
+#include <linux/tcp.h>
+#include <linux/uaccess.h>
+#include <linux/mutex.h>
+#include <linux/debugfs.h>
+#include <linux/caif/caif_socket.h>
+#include <asm/atomic.h>
+#include <net/sock.h>
+#include <net/tcp_states.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/caif_dev.h>
+#include <net/caif/cfpkt.h>
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_NETPROTO(AF_CAIF);
+
+#define CAIF_DEF_SNDBUF (CAIF_MAX_PAYLOAD_SIZE*10)
+#define CAIF_DEF_RCVBUF (CAIF_MAX_PAYLOAD_SIZE*100)
+
+/*
+ * CAIF state is re-using the TCP socket states.
+ * caif_states stored in sk_state reflect the state as reported by
+ * the CAIF stack, while sk_socket->state is the state of the socket.
+ */
+enum caif_states {
+ CAIF_CONNECTED = TCP_ESTABLISHED,
+ CAIF_CONNECTING = TCP_SYN_SENT,
+ CAIF_DISCONNECTED = TCP_CLOSE
+};
+
+#define TX_FLOW_ON_BIT 1
+#define RX_FLOW_ON_BIT 2
+
+static struct dentry *debugfsdir;
+
+#ifdef CONFIG_DEBUG_FS
+struct debug_fs_counter {
+ atomic_t caif_nr_socks;
+ atomic_t num_connect_req;
+ atomic_t num_connect_resp;
+ atomic_t num_connect_fail_resp;
+ atomic_t num_disconnect;
+ atomic_t num_remote_shutdown_ind;
+ atomic_t num_tx_flow_off_ind;
+ atomic_t num_tx_flow_on_ind;
+ atomic_t num_rx_flow_off;
+ atomic_t num_rx_flow_on;
+};
+struct debug_fs_counter cnt;
+#define dbfs_atomic_inc(v) atomic_inc(v)
+#define dbfs_atomic_dec(v) atomic_dec(v)
+#else
+#define dbfs_atomic_inc(v)
+#define dbfs_atomic_dec(v)
+#endif
+
+struct caifsock {
+ struct sock sk; /* must be first member */
+ struct cflayer layer;
+ char name[CAIF_LAYER_NAME_SZ]; /* Used for debugging */
+ u32 flow_state;
+ struct caif_connect_request conn_req;
+ struct mutex readlock;
+ struct dentry *debugfs_socket_dir;
+};
+
+static int rx_flow_is_on(struct caifsock *cf_sk)
+{
+ return test_bit(RX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static int tx_flow_is_on(struct caifsock *cf_sk)
+{
+ return test_bit(TX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static void set_rx_flow_off(struct caifsock *cf_sk)
+{
+ clear_bit(RX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static void set_rx_flow_on(struct caifsock *cf_sk)
+{
+ set_bit(RX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static void set_tx_flow_off(struct caifsock *cf_sk)
+{
+ clear_bit(TX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static void set_tx_flow_on(struct caifsock *cf_sk)
+{
+ set_bit(TX_FLOW_ON_BIT,
+ (void *) &cf_sk->flow_state);
+}
+
+static void caif_read_lock(struct sock *sk)
+{
+ struct caifsock *cf_sk;
+ cf_sk = container_of(sk, struct caifsock, sk);
+ mutex_lock(&cf_sk->readlock);
+}
+
+static void caif_read_unlock(struct sock *sk)
+{
+ struct caifsock *cf_sk;
+ cf_sk = container_of(sk, struct caifsock, sk);
+ mutex_unlock(&cf_sk->readlock);
+}
+
+int sk_rcvbuf_lowwater(struct caifsock *cf_sk)
+{
+ /* A quarter of full buffer is used a low water mark */
+ return cf_sk->sk.sk_rcvbuf / 4;
+}
+
+void caif_flow_ctrl(struct sock *sk, int mode)
+{
+ struct caifsock *cf_sk;
+ cf_sk = container_of(sk, struct caifsock, sk);
+ if (cf_sk->layer.dn)
+ cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode);
+}
+
+/*
+ * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
+ * not dropped, but CAIF is sending flow off instead.
+ */
+int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
+{
+ int err;
+ int skb_len;
+ unsigned long flags;
+ struct sk_buff_head *list = &sk->sk_receive_queue;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+
+ if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
+ (unsigned)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
+ trace_printk("CAIF: %s():"
+ " sending flow OFF (queue len = %d %d)\n",
+ __func__,
+ atomic_read(&cf_sk->sk.sk_rmem_alloc),
+ sk_rcvbuf_lowwater(cf_sk));
+ set_rx_flow_off(cf_sk);
+ if (cf_sk->layer.dn)
+ cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
+ CAIF_MODEMCMD_FLOW_OFF_REQ);
+ }
+
+ err = sk_filter(sk, skb);
+ if (err)
+ return err;
+ if (!sk_rmem_schedule(sk, skb->truesize) && rx_flow_is_on(cf_sk)) {
+ set_rx_flow_off(cf_sk);
+ trace_printk("CAIF: %s():"
+ " sending flow OFF due to rmem_schedule\n",
+ __func__);
+ if (cf_sk->layer.dn)
+ cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
+ CAIF_MODEMCMD_FLOW_OFF_REQ);
+ }
+ skb->dev = NULL;
+ skb_set_owner_r(skb, sk);
+ /* Cache the SKB length before we tack it onto the receive
+ * queue. Once it is added it no longer belongs to us and
+ * may be freed by other threads of control pulling packets
+ * from the queue.
+ */
+ skb_len = skb->len;
+ spin_lock_irqsave(&list->lock, flags);
+ if (!sock_flag(sk, SOCK_DEAD))
+ __skb_queue_tail(list, skb);
+ spin_unlock_irqrestore(&list->lock, flags);
+
+ if (!sock_flag(sk, SOCK_DEAD))
+ sk->sk_data_ready(sk, skb_len);
+ else
+ kfree_skb(skb);
+ return 0;
+}
+
+/* Packet Receive Callback function called from CAIF Stack */
+static int caif_sktrecv_cb(struct cflayer *layr, struct cfpkt *pkt)
+{
+ struct caifsock *cf_sk;
+ struct sk_buff *skb;
+
+ cf_sk = container_of(layr, struct caifsock, layer);
+ skb = cfpkt_tonative(pkt);
+
+ if (unlikely(cf_sk->sk.sk_state != CAIF_CONNECTED)) {
+ cfpkt_destroy(pkt);
+ return 0;
+ }
+ caif_queue_rcv_skb(&cf_sk->sk, skb);
+ return 0;
+}
+
+/* Packet Control Callback function called from CAIF */
+static void caif_ctrl_cb(struct cflayer *layr,
+ enum caif_ctrlcmd flow,
+ int phyid)
+{
+ struct caifsock *cf_sk = container_of(layr, struct caifsock, layer);
+ switch (flow) {
+ case CAIF_CTRLCMD_FLOW_ON_IND:
+ /* OK from modem to start sending again */
+ dbfs_atomic_inc(&cnt.num_tx_flow_on_ind);
+ set_tx_flow_on(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ break;
+
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ /* Modem asks us to shut up */
+ dbfs_atomic_inc(&cnt.num_tx_flow_off_ind);
+ set_tx_flow_off(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ break;
+
+ case CAIF_CTRLCMD_INIT_RSP:
+ /* We're now connected */
+ dbfs_atomic_inc(&cnt.num_connect_resp);
+ cf_sk->sk.sk_state = CAIF_CONNECTED;
+ set_tx_flow_on(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ break;
+
+ case CAIF_CTRLCMD_DEINIT_RSP:
+ /* We're now disconnected */
+ cf_sk->sk.sk_state = CAIF_DISCONNECTED;
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ cfcnfg_release_adap_layer(&cf_sk->layer);
+ break;
+
+ case CAIF_CTRLCMD_INIT_FAIL_RSP:
+ /* Connect request failed */
+ dbfs_atomic_inc(&cnt.num_connect_fail_resp);
+ cf_sk->sk.sk_err = ECONNREFUSED;
+ cf_sk->sk.sk_state = CAIF_DISCONNECTED;
+ cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
+ /*
+ * Socket "standards" seems to require POLLOUT to
+ * be set at connect failure.
+ */
+ set_tx_flow_on(cf_sk);
+ cf_sk->sk.sk_state_change(&cf_sk->sk);
+ break;
+
+ case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
+ /* Modem has closed this connection, or device is down. */
+ dbfs_atomic_inc(&cnt.num_remote_shutdown_ind);
+ cf_sk->sk.sk_shutdown = SHUTDOWN_MASK;
+ cf_sk->sk.sk_err = ECONNRESET;
+ set_rx_flow_on(cf_sk);
+ cf_sk->sk.sk_error_report(&cf_sk->sk);
+ break;
+
+ default:
+ pr_debug("CAIF: %s(): Unexpected flow command %d\n",
+ __func__, flow);
+ }
+}
+
+static void caif_check_flow_release(struct sock *sk)
+{
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+
+ if (cf_sk->layer.dn == NULL || cf_sk->layer.dn->modemcmd == NULL)
+ return;
+ if (rx_flow_is_on(cf_sk))
+ return;
+
+ if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) {
+ dbfs_atomic_inc(&cnt.num_rx_flow_on);
+ set_rx_flow_on(cf_sk);
+ cf_sk->layer.dn->modemcmd(cf_sk->layer.dn,
+ CAIF_MODEMCMD_FLOW_ON_REQ);
+ }
+}
+/*
+ * Copied from sock.c:sock_queue_rcv_skb(), and added check that user buffer
+ * has sufficient size.
+ */
+
+static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *m, size_t buf_len, int flags)
+
+{
+ struct sock *sk = sock->sk;
+ struct sk_buff *skb;
+ int ret = 0;
+ int len;
+
+ if (unlikely(!buf_len))
+ return -EINVAL;
+
+ skb = skb_recv_datagram(sk, flags, 0 , &ret);
+ if (!skb)
+ goto read_error;
+
+ len = skb->len;
+
+ if (skb && skb->len > buf_len && !(flags & MSG_PEEK)) {
+ len = buf_len;
+ /*
+ * Push skb back on receive queue if buffer too small.
+ * This has a built-in race where multi-threaded receive
+ * may get packet in wrong order, but multiple read does
+ * not really guarantee ordered delivery anyway.
+ * Let's optimize for speed without taking locks.
+ */
+
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ ret = -EMSGSIZE;
+ goto read_error;
+ }
+
+ ret = skb_copy_datagram_iovec(skb, 0, m->msg_iov, len);
+ if (ret)
+ goto read_error;
+
+ skb_free_datagram(sk, skb);
+
+ caif_check_flow_release(sk);
+
+ return len;
+
+read_error:
+ return ret;
+}
+
+
+/* Copied from unix_stream_wait_data, identical except for lock call. */
+static long caif_stream_data_wait(struct sock *sk, long timeo)
+{
+ DEFINE_WAIT(wait);
+ lock_sock(sk);
+
+ for (;;) {
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ sk->sk_err ||
+ sk->sk_state != CAIF_CONNECTED ||
+ sock_flag(sk, SOCK_DEAD) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN) ||
+ signal_pending(current) ||
+ !timeo)
+ break;
+
+ set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ release_sock(sk);
+ timeo = schedule_timeout(timeo);
+ lock_sock(sk);
+ clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
+ }
+
+ finish_wait(sk_sleep(sk), &wait);
+ release_sock(sk);
+ return timeo;
+}
+
+
+/*
+ * Copied from unix_stream_recvmsg, but removed credit checks,
+ * changed locking calls, changed address handling.
+ */
+static int caif_stream_recvmsg(struct kiocb *iocb, struct socket *sock,
+ struct msghdr *msg, size_t size,
+ int flags)
+{
+ struct sock *sk = sock->sk;
+ int copied = 0;
+ int target;
+ int err = 0;
+ long timeo;
+
+ err = -EOPNOTSUPP;
+ if (flags&MSG_OOB)
+ goto out;
+
+ msg->msg_namelen = 0;
+
+ /*
+ * Lock the socket to prevent queue disordering
+ * while sleeps in memcpy_tomsg
+ */
+ err = -EAGAIN;
+ if (sk->sk_state == CAIF_CONNECTING)
+ goto out;
+
+ caif_read_lock(sk);
+ target = sock_rcvlowat(sk, flags&MSG_WAITALL, size);
+ timeo = sock_rcvtimeo(sk, flags&MSG_DONTWAIT);
+
+ do {
+ int chunk;
+ struct sk_buff *skb;
+
+ lock_sock(sk);
+ skb = skb_dequeue(&sk->sk_receive_queue);
+ caif_check_flow_release(sk);
+
+ if (skb == NULL) {
+ if (copied >= target)
+ goto unlock;
+ /*
+ * POSIX 1003.1g mandates this order.
+ */
+ err = sock_error(sk);
+ if (err)
+ goto unlock;
+ err = -ECONNRESET;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ goto unlock;
+
+ err = -EPIPE;
+ if (sk->sk_state != CAIF_CONNECTED)
+ goto unlock;
+ if (sock_flag(sk, SOCK_DEAD))
+ goto unlock;
+
+ release_sock(sk);
+
+ err = -EAGAIN;
+ if (!timeo)
+ break;
+
+ caif_read_unlock(sk);
+
+ timeo = caif_stream_data_wait(sk, timeo);
+
+ if (signal_pending(current)) {
+ err = sock_intr_errno(timeo);
+ goto out;
+ }
+ caif_read_lock(sk);
+ continue;
+unlock:
+ release_sock(sk);
+ break;
+ }
+ release_sock(sk);
+ chunk = min_t(unsigned int, skb->len, size);
+ if (memcpy_toiovec(msg->msg_iov, skb->data, chunk)) {
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ if (copied == 0)
+ copied = -EFAULT;
+ break;
+ }
+ copied += chunk;
+ size -= chunk;
+
+ /* Mark read part of skb as used */
+ if (!(flags & MSG_PEEK)) {
+ skb_pull(skb, chunk);
+
+ /* put the skb back if we didn't use it up. */
+ if (skb->len) {
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ break;
+ }
+ kfree_skb(skb);
+
+ } else {
+ /*
+ * It is questionable, see note in unix_dgram_recvmsg.
+ */
+ /* put message back and return */
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ break;
+ }
+ } while (size);
+ caif_read_unlock(sk);
+
+out:
+ return copied ? : err;
+}
+
+/*
+ * Copied from sock.c:sock_wait_for_wmem, but change to wait for
+ * CAIF flow-on and sock_writable.
+ */
+static long caif_wait_for_flow_on(struct caifsock *cf_sk,
+ int wait_writeable, long timeo, int *err)
+{
+ struct sock *sk = &cf_sk->sk;
+ DEFINE_WAIT(wait);
+ for (;;) {
+ *err = 0;
+ if (tx_flow_is_on(cf_sk) &&
+ (!wait_writeable || sock_writeable(&cf_sk->sk)))
+ break;
+ *err = -ETIMEDOUT;
+ if (!timeo)
+ break;
+ *err = -ERESTARTSYS;
+ if (signal_pending(current))
+ break;
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
+ *err = -ECONNRESET;
+ if (sk->sk_shutdown & SHUTDOWN_MASK)
+ break;
+ *err = -sk->sk_err;
+ if (sk->sk_err)
+ break;
+ *err = -EPIPE;
+ if (cf_sk->sk.sk_state != CAIF_CONNECTED)
+ break;
+ timeo = schedule_timeout(timeo);
+ }
+ finish_wait(sk_sleep(sk), &wait);
+ return timeo;
+}
+
+/*
+ * Transmit a SKB. The device may temporarily request re-transmission
+ * by returning EAGAIN.
+ */
+static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk,
+ int noblock, long timeo)
+{
+ struct cfpkt *pkt;
+ int ret, loopcnt = 0;
+
+ pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb);
+ memset(cfpkt_info(pkt), 0, sizeof(struct caif_payload_info));
+ do {
+
+ ret = -ETIMEDOUT;
+
+ /* Slight paranoia, probably not needed. */
+ if (unlikely(loopcnt++ > 1000)) {
+ pr_warning("CAIF: %s(): transmit retries failed,"
+ " error = %d\n", __func__, ret);
+ break;
+ }
+
+ if (cf_sk->layer.dn != NULL)
+ ret = cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt);
+ if (likely(ret >= 0))
+ break;
+ /* if transmit return -EAGAIN, then retry */
+ if (noblock && ret == -EAGAIN)
+ break;
+ timeo = caif_wait_for_flow_on(cf_sk, 0, timeo, &ret);
+ if (signal_pending(current)) {
+ ret = sock_intr_errno(timeo);
+ break;
+ }
+ if (ret)
+ break;
+ if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
+ sock_flag(&cf_sk->sk, SOCK_DEAD) ||
+ (cf_sk->sk.sk_shutdown & RCV_SHUTDOWN)) {
+ ret = -EPIPE;
+ cf_sk->sk.sk_err = EPIPE;
+ break;
+ }
+ } while (ret == -EAGAIN);
+ return ret;
+}
+
+/* Copied from af_unix:unix_dgram_sendmsg, and adapted to CAIF */
+static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ struct msghdr *msg, size_t len)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ int buffer_size;
+ int ret = 0;
+ struct sk_buff *skb = NULL;
+ int noblock;
+ long timeo;
+ caif_assert(cf_sk);
+ ret = sock_error(sk);
+ if (ret)
+ goto err;
+
+ ret = -EOPNOTSUPP;
+ if (msg->msg_flags&MSG_OOB)
+ goto err;
+
+ ret = -EOPNOTSUPP;
+ if (msg->msg_namelen)
+ goto err;
+
+ ret = -EINVAL;
+ if (unlikely(msg->msg_iov->iov_base == NULL))
+ goto err;
+ noblock = msg->msg_flags & MSG_DONTWAIT;
+
+ buffer_size = len + CAIF_NEEDED_HEADROOM + CAIF_NEEDED_TAILROOM;
+
+ ret = -EMSGSIZE;
+ if (buffer_size > CAIF_MAX_PAYLOAD_SIZE)
+ goto err;
+
+ timeo = sock_sndtimeo(sk, noblock);
+ timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk),
+ 1, timeo, &ret);
+
+ ret = -EPIPE;
+ if (cf_sk->sk.sk_state != CAIF_CONNECTED ||
+ sock_flag(sk, SOCK_DEAD) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN))
+ goto err;
+
+ ret = -ENOMEM;
+ skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret);
+ if (!skb)
+ goto err;
+ skb_reserve(skb, CAIF_NEEDED_HEADROOM);
+
+ ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len);
+
+ if (ret)
+ goto err;
+ ret = transmit_skb(skb, cf_sk, noblock, timeo);
+ if (ret < 0)
+ goto err;
+ return len;
+err:
+ kfree_skb(skb);
+ return ret;
+}
+
+/*
+ * Copied from unix_stream_sendmsg and adapted to CAIF:
+ * Changed removed permission handling and added waiting for flow on
+ * and other minor adaptations.
+ */
+static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock,
+ struct msghdr *msg, size_t len)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ int err, size;
+ struct sk_buff *skb;
+ int sent = 0;
+ long timeo;
+
+ err = -EOPNOTSUPP;
+
+ if (unlikely(msg->msg_flags&MSG_OOB))
+ goto out_err;
+
+ if (unlikely(msg->msg_namelen))
+ goto out_err;
+
+ timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT);
+ timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err);
+
+ if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN))
+ goto pipe_err;
+
+ while (sent < len) {
+
+ size = len-sent;
+
+ if (size > CAIF_MAX_PAYLOAD_SIZE)
+ size = CAIF_MAX_PAYLOAD_SIZE;
+
+ /* If size is more than half of sndbuf, chop up message */
+ if (size > ((sk->sk_sndbuf >> 1) - 64))
+ size = (sk->sk_sndbuf >> 1) - 64;
+
+ if (size > SKB_MAX_ALLOC)
+ size = SKB_MAX_ALLOC;
+
+ skb = sock_alloc_send_skb(sk,
+ size + CAIF_NEEDED_HEADROOM
+ + CAIF_NEEDED_TAILROOM,
+ msg->msg_flags&MSG_DONTWAIT,
+ &err);
+ if (skb == NULL)
+ goto out_err;
+
+ skb_reserve(skb, CAIF_NEEDED_HEADROOM);
+ /*
+ * If you pass two values to the sock_alloc_send_skb
+ * it tries to grab the large buffer with GFP_NOFS
+ * (which can fail easily), and if it fails grab the
+ * fallback size buffer which is under a page and will
+ * succeed. [Alan]
+ */
+ size = min_t(int, size, skb_tailroom(skb));
+
+ err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size);
+ if (err) {
+ kfree_skb(skb);
+ goto out_err;
+ }
+ err = transmit_skb(skb, cf_sk,
+ msg->msg_flags&MSG_DONTWAIT, timeo);
+ if (err < 0) {
+ kfree_skb(skb);
+ goto pipe_err;
+ }
+ sent += size;
+ }
+
+ return sent;
+
+pipe_err:
+ if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL))
+ send_sig(SIGPIPE, current, 0);
+ err = -EPIPE;
+out_err:
+ return sent ? : err;
+}
+
+static int setsockopt(struct socket *sock,
+ int lvl, int opt, char __user *ov, unsigned int ol)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ int prio, linksel;
+ struct ifreq ifreq;
+
+ if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
+ return -ENOPROTOOPT;
+
+ switch (opt) {
+ case CAIFSO_LINK_SELECT:
+ if (ol < sizeof(int))
+ return -EINVAL;
+ if (lvl != SOL_CAIF)
+ goto bad_sol;
+ if (copy_from_user(&linksel, ov, sizeof(int)))
+ return -EINVAL;
+ lock_sock(&(cf_sk->sk));
+ cf_sk->conn_req.link_selector = linksel;
+ release_sock(&cf_sk->sk);
+ return 0;
+
+ case SO_PRIORITY:
+ if (lvl != SOL_SOCKET)
+ goto bad_sol;
+ if (ol < sizeof(int))
+ return -EINVAL;
+ if (copy_from_user(&prio, ov, sizeof(int)))
+ return -EINVAL;
+ lock_sock(&(cf_sk->sk));
+ cf_sk->conn_req.priority = prio;
+ release_sock(&cf_sk->sk);
+ return 0;
+
+ case SO_BINDTODEVICE:
+ if (lvl != SOL_SOCKET)
+ goto bad_sol;
+ if (ol < sizeof(struct ifreq))
+ return -EINVAL;
+ if (copy_from_user(&ifreq, ov, sizeof(ifreq)))
+ return -EFAULT;
+ lock_sock(&(cf_sk->sk));
+ strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name,
+ sizeof(cf_sk->conn_req.link_name));
+ cf_sk->conn_req.link_name
+ [sizeof(cf_sk->conn_req.link_name)-1] = 0;
+ release_sock(&cf_sk->sk);
+ return 0;
+
+ case CAIFSO_REQ_PARAM:
+ if (lvl != SOL_CAIF)
+ goto bad_sol;
+ if (cf_sk->sk.sk_protocol != CAIFPROTO_UTIL)
+ return -ENOPROTOOPT;
+ lock_sock(&(cf_sk->sk));
+ cf_sk->conn_req.param.size = ol;
+ if (ol > sizeof(cf_sk->conn_req.param.data) ||
+ copy_from_user(&cf_sk->conn_req.param.data, ov, ol)) {
+ release_sock(&cf_sk->sk);
+ return -EINVAL;
+ }
+ release_sock(&cf_sk->sk);
+ return 0;
+
+ default:
+ return -ENOPROTOOPT;
+ }
+
+ return 0;
+bad_sol:
+ return -ENOPROTOOPT;
+
+}
+
+/*
+ * caif_connect() - Connect a CAIF Socket
+ * Copied and modified af_irda.c:irda_connect().
+ *
+ * Note : by consulting "errno", the user space caller may learn the cause
+ * of the failure. Most of them are visible in the function, others may come
+ * from subroutines called and are listed here :
+ * o -EAFNOSUPPORT: bad socket family or type.
+ * o -ESOCKTNOSUPPORT: bad socket type or protocol
+ * o -EINVAL: bad socket address, or CAIF link type
+ * o -ECONNREFUSED: remote end refused the connection.
+ * o -EINPROGRESS: connect request sent but timed out (or non-blocking)
+ * o -EISCONN: already connected.
+ * o -ETIMEDOUT: Connection timed out (send timeout)
+ * o -ENODEV: No link layer to send request
+ * o -ECONNRESET: Received Shutdown indication or lost link layer
+ * o -ENOMEM: Out of memory
+ *
+ * State Strategy:
+ * o sk_state: holds the CAIF_* protocol state, it's updated by
+ * caif_ctrl_cb.
+ * o sock->state: holds the SS_* socket state and is updated by connect and
+ * disconnect.
+ */
+static int caif_connect(struct socket *sock, struct sockaddr *uaddr,
+ int addr_len, int flags)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ long timeo;
+ int err;
+ lock_sock(sk);
+
+ err = -EAFNOSUPPORT;
+ if (uaddr->sa_family != AF_CAIF)
+ goto out;
+
+ err = -ESOCKTNOSUPPORT;
+ if (unlikely(!(sk->sk_type == SOCK_STREAM &&
+ cf_sk->sk.sk_protocol == CAIFPROTO_AT) &&
+ sk->sk_type != SOCK_SEQPACKET))
+ goto out;
+ switch (sock->state) {
+ case SS_UNCONNECTED:
+ /* Normal case, a fresh connect */
+ caif_assert(sk->sk_state == CAIF_DISCONNECTED);
+ break;
+ case SS_CONNECTING:
+ switch (sk->sk_state) {
+ case CAIF_CONNECTED:
+ sock->state = SS_CONNECTED;
+ err = -EISCONN;
+ goto out;
+ case CAIF_DISCONNECTED:
+ /* Reconnect allowed */
+ break;
+ case CAIF_CONNECTING:
+ err = -EALREADY;
+ if (flags & O_NONBLOCK)
+ goto out;
+ goto wait_connect;
+ }
+ break;
+ case SS_CONNECTED:
+ caif_assert(sk->sk_state == CAIF_CONNECTED ||
+ sk->sk_state == CAIF_DISCONNECTED);
+ if (sk->sk_shutdown & SHUTDOWN_MASK) {
+ /* Allow re-connect after SHUTDOWN_IND */
+ caif_disconnect_client(&cf_sk->layer);
+ break;
+ }
+ /* No reconnect on a seqpacket socket */
+ err = -EISCONN;
+ goto out;
+ case SS_DISCONNECTING:
+ case SS_FREE:
+ caif_assert(1); /*Should never happen */
+ break;
+ }
+ sk->sk_state = CAIF_DISCONNECTED;
+ sock->state = SS_UNCONNECTED;
+ sk_stream_kill_queues(&cf_sk->sk);
+
+ err = -EINVAL;
+ if (addr_len != sizeof(struct sockaddr_caif) ||
+ !uaddr)
+ goto out;
+
+ memcpy(&cf_sk->conn_req.sockaddr, uaddr,
+ sizeof(struct sockaddr_caif));
+
+ /* Move to connecting socket, start sending Connect Requests */
+ sock->state = SS_CONNECTING;
+ sk->sk_state = CAIF_CONNECTING;
+
+ dbfs_atomic_inc(&cnt.num_connect_req);
+ cf_sk->layer.receive = caif_sktrecv_cb;
+ err = caif_connect_client(&cf_sk->conn_req,
+ &cf_sk->layer);
+ if (err < 0) {
+ cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
+ cf_sk->sk.sk_state = CAIF_DISCONNECTED;
+ goto out;
+ }
+
+ err = -EINPROGRESS;
+wait_connect:
+
+ if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK))
+ goto out;
+
+ timeo = sock_sndtimeo(sk, flags & O_NONBLOCK);
+
+ release_sock(sk);
+ err = wait_event_interruptible_timeout(*sk_sleep(sk),
+ sk->sk_state != CAIF_CONNECTING,
+ timeo);
+ lock_sock(sk);
+ if (err < 0)
+ goto out; /* -ERESTARTSYS */
+ if (err == 0 && sk->sk_state != CAIF_CONNECTED) {
+ err = -ETIMEDOUT;
+ goto out;
+ }
+
+ if (sk->sk_state != CAIF_CONNECTED) {
+ sock->state = SS_UNCONNECTED;
+ err = sock_error(sk);
+ if (!err)
+ err = -ECONNREFUSED;
+ goto out;
+ }
+ sock->state = SS_CONNECTED;
+ err = 0;
+out:
+ release_sock(sk);
+ return err;
+}
+
+
+/*
+ * caif_release() - Disconnect a CAIF Socket
+ * Copied and modified af_irda.c:irda_release().
+ */
+static int caif_release(struct socket *sock)
+{
+ struct sock *sk = sock->sk;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ int res = 0;
+
+ if (!sk)
+ return 0;
+
+ set_tx_flow_off(cf_sk);
+
+ /*
+ * Ensure that packets are not queued after this point in time.
+ * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock,
+ * this ensures no packets when sock is dead.
+ */
+ spin_lock(&sk->sk_receive_queue.lock);
+ sock_set_flag(sk, SOCK_DEAD);
+ spin_unlock(&sk->sk_receive_queue.lock);
+ sock->sk = NULL;
+
+ dbfs_atomic_inc(&cnt.num_disconnect);
+
+ if (cf_sk->debugfs_socket_dir != NULL)
+ debugfs_remove_recursive(cf_sk->debugfs_socket_dir);
+
+ lock_sock(&(cf_sk->sk));
+ sk->sk_state = CAIF_DISCONNECTED;
+ sk->sk_shutdown = SHUTDOWN_MASK;
+
+ if (cf_sk->sk.sk_socket->state == SS_CONNECTED ||
+ cf_sk->sk.sk_socket->state == SS_CONNECTING)
+ res = caif_disconnect_client(&cf_sk->layer);
+
+ cf_sk->sk.sk_socket->state = SS_DISCONNECTING;
+ wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP);
+
+ sock_orphan(sk);
+ cf_sk->layer.dn = NULL;
+ sk_stream_kill_queues(&cf_sk->sk);
+ release_sock(sk);
+ sock_put(sk);
+ return res;
+}
+
+/* Copied from af_unix.c:unix_poll(), added CAIF tx_flow handling */
+static unsigned int caif_poll(struct file *file,
+ struct socket *sock, poll_table *wait)
+{
+ struct sock *sk = sock->sk;
+ unsigned int mask;
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+
+ sock_poll_wait(file, sk_sleep(sk), wait);
+ mask = 0;
+
+ /* exceptional events? */
+ if (sk->sk_err)
+ mask |= POLLERR;
+ if (sk->sk_shutdown == SHUTDOWN_MASK)
+ mask |= POLLHUP;
+ if (sk->sk_shutdown & RCV_SHUTDOWN)
+ mask |= POLLRDHUP;
+
+ /* readable? */
+ if (!skb_queue_empty(&sk->sk_receive_queue) ||
+ (sk->sk_shutdown & RCV_SHUTDOWN))
+ mask |= POLLIN | POLLRDNORM;
+
+ /* Connection-based need to check for termination and startup */
+ if (sk->sk_state == CAIF_DISCONNECTED)
+ mask |= POLLHUP;
+
+ /*
+ * we set writable also when the other side has shut down the
+ * connection. This prevents stuck sockets.
+ */
+ if (sock_writeable(sk) && tx_flow_is_on(cf_sk))
+ mask |= POLLOUT | POLLWRNORM | POLLWRBAND;
+
+ return mask;
+}
+
+static const struct proto_ops caif_seqpacket_ops = {
+ .family = PF_CAIF,
+ .owner = THIS_MODULE,
+ .release = caif_release,
+ .bind = sock_no_bind,
+ .connect = caif_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = caif_poll,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = caif_seqpkt_sendmsg,
+ .recvmsg = caif_seqpkt_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+};
+
+static const struct proto_ops caif_stream_ops = {
+ .family = PF_CAIF,
+ .owner = THIS_MODULE,
+ .release = caif_release,
+ .bind = sock_no_bind,
+ .connect = caif_connect,
+ .socketpair = sock_no_socketpair,
+ .accept = sock_no_accept,
+ .getname = sock_no_getname,
+ .poll = caif_poll,
+ .ioctl = sock_no_ioctl,
+ .listen = sock_no_listen,
+ .shutdown = sock_no_shutdown,
+ .setsockopt = setsockopt,
+ .getsockopt = sock_no_getsockopt,
+ .sendmsg = caif_stream_sendmsg,
+ .recvmsg = caif_stream_recvmsg,
+ .mmap = sock_no_mmap,
+ .sendpage = sock_no_sendpage,
+};
+
+/* This function is called when a socket is finally destroyed. */
+static void caif_sock_destructor(struct sock *sk)
+{
+ struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
+ caif_assert(!atomic_read(&sk->sk_wmem_alloc));
+ caif_assert(sk_unhashed(sk));
+ caif_assert(!sk->sk_socket);
+ if (!sock_flag(sk, SOCK_DEAD)) {
+ pr_info("Attempt to release alive CAIF socket: %p\n", sk);
+ return;
+ }
+ sk_stream_kill_queues(&cf_sk->sk);
+ dbfs_atomic_dec(&cnt.caif_nr_socks);
+}
+
+static int caif_create(struct net *net, struct socket *sock, int protocol,
+ int kern)
+{
+ struct sock *sk = NULL;
+ struct caifsock *cf_sk = NULL;
+ static struct proto prot = {.name = "PF_CAIF",
+ .owner = THIS_MODULE,
+ .obj_size = sizeof(struct caifsock),
+ };
+
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN))
+ return -EPERM;
+ /*
+ * The sock->type specifies the socket type to use.
+ * The CAIF socket is a packet stream in the sense
+ * that it is packet based. CAIF trusts the reliability
+ * of the link, no resending is implemented.
+ */
+ if (sock->type == SOCK_SEQPACKET)
+ sock->ops = &caif_seqpacket_ops;
+ else if (sock->type == SOCK_STREAM)
+ sock->ops = &caif_stream_ops;
+ else
+ return -ESOCKTNOSUPPORT;
+
+ if (protocol < 0 || protocol >= CAIFPROTO_MAX)
+ return -EPROTONOSUPPORT;
+ /*
+ * Set the socket state to unconnected. The socket state
+ * is really not used at all in the net/core or socket.c but the
+ * initialization makes sure that sock->state is not uninitialized.
+ */
+ sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot);
+ if (!sk)
+ return -ENOMEM;
+
+ cf_sk = container_of(sk, struct caifsock, sk);
+
+ /* Store the protocol */
+ sk->sk_protocol = (unsigned char) protocol;
+
+ /* Sendbuf dictates the amount of outbound packets not yet sent */
+ sk->sk_sndbuf = CAIF_DEF_SNDBUF;
+ sk->sk_rcvbuf = CAIF_DEF_RCVBUF;
+
+ /*
+ * Lock in order to try to stop someone from opening the socket
+ * too early.
+ */
+ lock_sock(&(cf_sk->sk));
+
+ /* Initialize the nozero default sock structure data. */
+ sock_init_data(sock, sk);
+ sk->sk_destruct = caif_sock_destructor;
+
+ mutex_init(&cf_sk->readlock); /* single task reading lock */
+ cf_sk->layer.ctrlcmd = caif_ctrl_cb;
+ cf_sk->sk.sk_socket->state = SS_UNCONNECTED;
+ cf_sk->sk.sk_state = CAIF_DISCONNECTED;
+
+ set_tx_flow_off(cf_sk);
+ set_rx_flow_on(cf_sk);
+
+ /* Set default options on configuration */
+ cf_sk->conn_req.priority = CAIF_PRIO_NORMAL;
+ cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
+ cf_sk->conn_req.protocol = protocol;
+ /* Increase the number of sockets created. */
+ dbfs_atomic_inc(&cnt.caif_nr_socks);
+#ifdef CONFIG_DEBUG_FS
+ if (!IS_ERR(debugfsdir)) {
+ /* Fill in some information concerning the misc socket. */
+ snprintf(cf_sk->name, sizeof(cf_sk->name), "cfsk%d",
+ atomic_read(&cnt.caif_nr_socks));
+
+ cf_sk->debugfs_socket_dir =
+ debugfs_create_dir(cf_sk->name, debugfsdir);
+ debugfs_create_u32("sk_state", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir,
+ (u32 *) &cf_sk->sk.sk_state);
+ debugfs_create_u32("flow_state", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir, &cf_sk->flow_state);
+ debugfs_create_u32("sk_rmem_alloc", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir,
+ (u32 *) &cf_sk->sk.sk_rmem_alloc);
+ debugfs_create_u32("sk_wmem_alloc", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir,
+ (u32 *) &cf_sk->sk.sk_wmem_alloc);
+ debugfs_create_u32("identity", S_IRUSR | S_IWUSR,
+ cf_sk->debugfs_socket_dir,
+ (u32 *) &cf_sk->layer.id);
+ }
+#endif
+ release_sock(&cf_sk->sk);
+ return 0;
+}
+
+
+static struct net_proto_family caif_family_ops = {
+ .family = PF_CAIF,
+ .create = caif_create,
+ .owner = THIS_MODULE,
+};
+
+int af_caif_init(void)
+{
+ int err = sock_register(&caif_family_ops);
+ if (!err)
+ return err;
+ return 0;
+}
+
+static int __init caif_sktinit_module(void)
+{
+#ifdef CONFIG_DEBUG_FS
+ debugfsdir = debugfs_create_dir("caif_sk", NULL);
+ if (!IS_ERR(debugfsdir)) {
+ debugfs_create_u32("num_sockets", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.caif_nr_socks);
+ debugfs_create_u32("num_connect_req", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_connect_req);
+ debugfs_create_u32("num_connect_resp", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_connect_resp);
+ debugfs_create_u32("num_connect_fail_resp", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_connect_fail_resp);
+ debugfs_create_u32("num_disconnect", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_disconnect);
+ debugfs_create_u32("num_remote_shutdown_ind",
+ S_IRUSR | S_IWUSR, debugfsdir,
+ (u32 *) &cnt.num_remote_shutdown_ind);
+ debugfs_create_u32("num_tx_flow_off_ind", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_tx_flow_off_ind);
+ debugfs_create_u32("num_tx_flow_on_ind", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_tx_flow_on_ind);
+ debugfs_create_u32("num_rx_flow_off", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_rx_flow_off);
+ debugfs_create_u32("num_rx_flow_on", S_IRUSR | S_IWUSR,
+ debugfsdir,
+ (u32 *) &cnt.num_rx_flow_on);
+ }
+#endif
+ return af_caif_init();
+}
+
+static void __exit caif_sktexit_module(void)
+{
+ sock_unregister(PF_CAIF);
+ if (debugfsdir != NULL)
+ debugfs_remove_recursive(debugfsdir);
+}
+module_init(caif_sktinit_module);
+module_exit(caif_sktexit_module);
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
new file mode 100644
index 000000000000..471c62939fad
--- /dev/null
+++ b/net/caif/cfcnfg.c
@@ -0,0 +1,471 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/kernel.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfcnfg.h>
+#include <net/caif/cfctrl.h>
+#include <net/caif/cfmuxl.h>
+#include <net/caif/cffrml.h>
+#include <net/caif/cfserl.h>
+#include <net/caif/cfsrvl.h>
+
+#include <linux/module.h>
+#include <asm/atomic.h>
+
+#define MAX_PHY_LAYERS 7
+#define PHY_NAME_LEN 20
+
+#define container_obj(layr) container_of(layr, struct cfcnfg, layer)
+
+/* Information about CAIF physical interfaces held by Config Module in order
+ * to manage physical interfaces
+ */
+struct cfcnfg_phyinfo {
+ /* Pointer to the layer below the MUX (framing layer) */
+ struct cflayer *frm_layer;
+ /* Pointer to the lowest actual physical layer */
+ struct cflayer *phy_layer;
+ /* Unique identifier of the physical interface */
+ unsigned int id;
+ /* Preference of the physical in interface */
+ enum cfcnfg_phy_preference pref;
+
+ /* Reference count, number of channels using the device */
+ int phy_ref_count;
+
+ /* Information about the physical device */
+ struct dev_info dev_info;
+};
+
+struct cfcnfg {
+ struct cflayer layer;
+ struct cflayer *ctrl;
+ struct cflayer *mux;
+ u8 last_phyid;
+ struct cfcnfg_phyinfo phy_layers[MAX_PHY_LAYERS];
+};
+
+static void cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id,
+ enum cfctrl_srv serv, u8 phyid,
+ struct cflayer *adapt_layer);
+static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id);
+static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
+ struct cflayer *adapt_layer);
+static void cfctrl_resp_func(void);
+static void cfctrl_enum_resp(void);
+
+struct cfcnfg *cfcnfg_create(void)
+{
+ struct cfcnfg *this;
+ struct cfctrl_rsp *resp;
+ /* Initiate this layer */
+ this = kmalloc(sizeof(struct cfcnfg), GFP_ATOMIC);
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ memset(this, 0, sizeof(struct cfcnfg));
+ this->mux = cfmuxl_create();
+ if (!this->mux)
+ goto out_of_mem;
+ this->ctrl = cfctrl_create();
+ if (!this->ctrl)
+ goto out_of_mem;
+ /* Initiate response functions */
+ resp = cfctrl_get_respfuncs(this->ctrl);
+ resp->enum_rsp = cfctrl_enum_resp;
+ resp->linkerror_ind = cfctrl_resp_func;
+ resp->linkdestroy_rsp = cfcnfg_linkdestroy_rsp;
+ resp->sleep_rsp = cfctrl_resp_func;
+ resp->wake_rsp = cfctrl_resp_func;
+ resp->restart_rsp = cfctrl_resp_func;
+ resp->radioset_rsp = cfctrl_resp_func;
+ resp->linksetup_rsp = cfcnfg_linkup_rsp;
+ resp->reject_rsp = cfcnfg_reject_rsp;
+
+ this->last_phyid = 1;
+
+ cfmuxl_set_uplayer(this->mux, this->ctrl, 0);
+ layer_set_dn(this->ctrl, this->mux);
+ layer_set_up(this->ctrl, this);
+ return this;
+out_of_mem:
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ kfree(this->mux);
+ kfree(this->ctrl);
+ kfree(this);
+ return NULL;
+}
+EXPORT_SYMBOL(cfcnfg_create);
+
+void cfcnfg_remove(struct cfcnfg *cfg)
+{
+ if (cfg) {
+ kfree(cfg->mux);
+ kfree(cfg->ctrl);
+ kfree(cfg);
+ }
+}
+
+static void cfctrl_resp_func(void)
+{
+}
+
+static void cfctrl_enum_resp(void)
+{
+}
+
+struct dev_info *cfcnfg_get_phyid(struct cfcnfg *cnfg,
+ enum cfcnfg_phy_preference phy_pref)
+{
+ u16 i;
+
+ /* Try to match with specified preference */
+ for (i = 1; i < MAX_PHY_LAYERS; i++) {
+ if (cnfg->phy_layers[i].id == i &&
+ cnfg->phy_layers[i].pref == phy_pref &&
+ cnfg->phy_layers[i].frm_layer != NULL) {
+ caif_assert(cnfg->phy_layers != NULL);
+ caif_assert(cnfg->phy_layers[i].id == i);
+ return &cnfg->phy_layers[i].dev_info;
+ }
+ }
+ /* Otherwise just return something */
+ for (i = 1; i < MAX_PHY_LAYERS; i++) {
+ if (cnfg->phy_layers[i].id == i) {
+ caif_assert(cnfg->phy_layers != NULL);
+ caif_assert(cnfg->phy_layers[i].id == i);
+ return &cnfg->phy_layers[i].dev_info;
+ }
+ }
+
+ return NULL;
+}
+
+static struct cfcnfg_phyinfo *cfcnfg_get_phyinfo(struct cfcnfg *cnfg,
+ u8 phyid)
+{
+ int i;
+ /* Try to match with specified preference */
+ for (i = 0; i < MAX_PHY_LAYERS; i++)
+ if (cnfg->phy_layers[i].frm_layer != NULL &&
+ cnfg->phy_layers[i].id == phyid)
+ return &cnfg->phy_layers[i];
+ return NULL;
+}
+
+int cfcnfg_get_named(struct cfcnfg *cnfg, char *name)
+{
+ int i;
+
+ /* Try to match with specified name */
+ for (i = 0; i < MAX_PHY_LAYERS; i++) {
+ if (cnfg->phy_layers[i].frm_layer != NULL
+ && strcmp(cnfg->phy_layers[i].phy_layer->name,
+ name) == 0)
+ return cnfg->phy_layers[i].frm_layer->id;
+ }
+ return 0;
+}
+
+int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
+{
+ u8 channel_id = 0;
+ int ret = 0;
+ struct cflayer *servl = NULL;
+ struct cfcnfg_phyinfo *phyinfo = NULL;
+ u8 phyid = 0;
+ caif_assert(adap_layer != NULL);
+ channel_id = adap_layer->id;
+ if (adap_layer->dn == NULL || channel_id == 0) {
+ pr_err("CAIF: %s():adap_layer->id is 0\n", __func__);
+ ret = -ENOTCONN;
+ goto end;
+ }
+ servl = cfmuxl_remove_uplayer(cnfg->mux, channel_id);
+ if (servl == NULL)
+ goto end;
+ layer_set_up(servl, NULL);
+ ret = cfctrl_linkdown_req(cnfg->ctrl, channel_id, adap_layer);
+ if (servl == NULL) {
+ pr_err("CAIF: %s(): PROTOCOL ERROR "
+ "- Error removing service_layer Channel_Id(%d)",
+ __func__, channel_id);
+ ret = -EINVAL;
+ goto end;
+ }
+ caif_assert(channel_id == servl->id);
+ if (adap_layer->dn != NULL) {
+ phyid = cfsrvl_getphyid(adap_layer->dn);
+
+ phyinfo = cfcnfg_get_phyinfo(cnfg, phyid);
+ if (phyinfo == NULL) {
+ pr_warning("CAIF: %s(): "
+ "No interface to send disconnect to\n",
+ __func__);
+ ret = -ENODEV;
+ goto end;
+ }
+ if (phyinfo->id != phyid ||
+ phyinfo->phy_layer->id != phyid ||
+ phyinfo->frm_layer->id != phyid) {
+ pr_err("CAIF: %s(): "
+ "Inconsistency in phy registration\n",
+ __func__);
+ ret = -EINVAL;
+ goto end;
+ }
+ }
+ if (phyinfo != NULL && --phyinfo->phy_ref_count == 0 &&
+ phyinfo->phy_layer != NULL &&
+ phyinfo->phy_layer->modemcmd != NULL) {
+ phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
+ _CAIF_MODEMCMD_PHYIF_USELESS);
+ }
+end:
+ cfsrvl_put(servl);
+ cfctrl_cancel_req(cnfg->ctrl, adap_layer);
+ if (adap_layer->ctrlcmd != NULL)
+ adap_layer->ctrlcmd(adap_layer, CAIF_CTRLCMD_DEINIT_RSP, 0);
+ return ret;
+
+}
+EXPORT_SYMBOL(cfcnfg_disconn_adapt_layer);
+
+void cfcnfg_release_adap_layer(struct cflayer *adap_layer)
+{
+ if (adap_layer->dn)
+ cfsrvl_put(adap_layer->dn);
+}
+EXPORT_SYMBOL(cfcnfg_release_adap_layer);
+
+static void cfcnfg_linkdestroy_rsp(struct cflayer *layer, u8 channel_id)
+{
+}
+
+int cfcnfg_add_adaptation_layer(struct cfcnfg *cnfg,
+ struct cfctrl_link_param *param,
+ struct cflayer *adap_layer)
+{
+ struct cflayer *frml;
+ if (adap_layer == NULL) {
+ pr_err("CAIF: %s(): adap_layer is zero", __func__);
+ return -EINVAL;
+ }
+ if (adap_layer->receive == NULL) {
+ pr_err("CAIF: %s(): adap_layer->receive is NULL", __func__);
+ return -EINVAL;
+ }
+ if (adap_layer->ctrlcmd == NULL) {
+ pr_err("CAIF: %s(): adap_layer->ctrlcmd == NULL", __func__);
+ return -EINVAL;
+ }
+ frml = cnfg->phy_layers[param->phyid].frm_layer;
+ if (frml == NULL) {
+ pr_err("CAIF: %s(): Specified PHY type does not exist!",
+ __func__);
+ return -ENODEV;
+ }
+ caif_assert(param->phyid == cnfg->phy_layers[param->phyid].id);
+ caif_assert(cnfg->phy_layers[param->phyid].frm_layer->id ==
+ param->phyid);
+ caif_assert(cnfg->phy_layers[param->phyid].phy_layer->id ==
+ param->phyid);
+ /* FIXME: ENUMERATE INITIALLY WHEN ACTIVATING PHYSICAL INTERFACE */
+ cfctrl_enum_req(cnfg->ctrl, param->phyid);
+ return cfctrl_linkup_request(cnfg->ctrl, param, adap_layer);
+}
+EXPORT_SYMBOL(cfcnfg_add_adaptation_layer);
+
+static void cfcnfg_reject_rsp(struct cflayer *layer, u8 channel_id,
+ struct cflayer *adapt_layer)
+{
+ if (adapt_layer != NULL && adapt_layer->ctrlcmd != NULL)
+ adapt_layer->ctrlcmd(adapt_layer,
+ CAIF_CTRLCMD_INIT_FAIL_RSP, 0);
+}
+
+static void
+cfcnfg_linkup_rsp(struct cflayer *layer, u8 channel_id, enum cfctrl_srv serv,
+ u8 phyid, struct cflayer *adapt_layer)
+{
+ struct cfcnfg *cnfg = container_obj(layer);
+ struct cflayer *servicel = NULL;
+ struct cfcnfg_phyinfo *phyinfo;
+ if (adapt_layer == NULL) {
+ pr_debug("CAIF: %s(): link setup response "
+ "but no client exist, send linkdown back\n",
+ __func__);
+ cfctrl_linkdown_req(cnfg->ctrl, channel_id, NULL);
+ return;
+ }
+
+ caif_assert(cnfg != NULL);
+ caif_assert(phyid != 0);
+ phyinfo = &cnfg->phy_layers[phyid];
+ caif_assert(phyinfo != NULL);
+ caif_assert(phyinfo->id == phyid);
+ caif_assert(phyinfo->phy_layer != NULL);
+ caif_assert(phyinfo->phy_layer->id == phyid);
+
+ if (phyinfo != NULL &&
+ phyinfo->phy_ref_count++ == 0 &&
+ phyinfo->phy_layer != NULL &&
+ phyinfo->phy_layer->modemcmd != NULL) {
+ caif_assert(phyinfo->phy_layer->id == phyid);
+ phyinfo->phy_layer->modemcmd(phyinfo->phy_layer,
+ _CAIF_MODEMCMD_PHYIF_USEFULL);
+
+ }
+ adapt_layer->id = channel_id;
+
+ switch (serv) {
+ case CFCTRL_SRV_VEI:
+ servicel = cfvei_create(channel_id, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_DATAGRAM:
+ servicel = cfdgml_create(channel_id, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_RFM:
+ servicel = cfrfml_create(channel_id, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_UTIL:
+ servicel = cfutill_create(channel_id, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_VIDEO:
+ servicel = cfvidl_create(channel_id, &phyinfo->dev_info);
+ break;
+ case CFCTRL_SRV_DBG:
+ servicel = cfdbgl_create(channel_id, &phyinfo->dev_info);
+ break;
+ default:
+ pr_err("CAIF: %s(): Protocol error. "
+ "Link setup response - unknown channel type\n",
+ __func__);
+ return;
+ }
+ if (!servicel) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ layer_set_dn(servicel, cnfg->mux);
+ cfmuxl_set_uplayer(cnfg->mux, servicel, channel_id);
+ layer_set_up(servicel, adapt_layer);
+ layer_set_dn(adapt_layer, servicel);
+ cfsrvl_get(servicel);
+ servicel->ctrlcmd(servicel, CAIF_CTRLCMD_INIT_RSP, 0);
+}
+
+void
+cfcnfg_add_phy_layer(struct cfcnfg *cnfg, enum cfcnfg_phy_type phy_type,
+ void *dev, struct cflayer *phy_layer, u16 *phyid,
+ enum cfcnfg_phy_preference pref,
+ bool fcs, bool stx)
+{
+ struct cflayer *frml;
+ struct cflayer *phy_driver = NULL;
+ int i;
+
+
+ if (cnfg->phy_layers[cnfg->last_phyid].frm_layer == NULL) {
+ *phyid = cnfg->last_phyid;
+
+ /* range: * 1..(MAX_PHY_LAYERS-1) */
+ cnfg->last_phyid =
+ (cnfg->last_phyid % (MAX_PHY_LAYERS - 1)) + 1;
+ } else {
+ *phyid = 0;
+ for (i = 1; i < MAX_PHY_LAYERS; i++) {
+ if (cnfg->phy_layers[i].frm_layer == NULL) {
+ *phyid = i;
+ break;
+ }
+ }
+ }
+ if (*phyid == 0) {
+ pr_err("CAIF: %s(): No Available PHY ID\n", __func__);
+ return;
+ }
+
+ switch (phy_type) {
+ case CFPHYTYPE_FRAG:
+ phy_driver =
+ cfserl_create(CFPHYTYPE_FRAG, *phyid, stx);
+ if (!phy_driver) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+
+ break;
+ case CFPHYTYPE_CAIF:
+ phy_driver = NULL;
+ break;
+ default:
+ pr_err("CAIF: %s(): %d", __func__, phy_type);
+ return;
+ break;
+ }
+
+ phy_layer->id = *phyid;
+ cnfg->phy_layers[*phyid].pref = pref;
+ cnfg->phy_layers[*phyid].id = *phyid;
+ cnfg->phy_layers[*phyid].dev_info.id = *phyid;
+ cnfg->phy_layers[*phyid].dev_info.dev = dev;
+ cnfg->phy_layers[*phyid].phy_layer = phy_layer;
+ cnfg->phy_layers[*phyid].phy_ref_count = 0;
+ phy_layer->type = phy_type;
+ frml = cffrml_create(*phyid, fcs);
+ if (!frml) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cnfg->phy_layers[*phyid].frm_layer = frml;
+ cfmuxl_set_dnlayer(cnfg->mux, frml, *phyid);
+ layer_set_up(frml, cnfg->mux);
+
+ if (phy_driver != NULL) {
+ phy_driver->id = *phyid;
+ layer_set_dn(frml, phy_driver);
+ layer_set_up(phy_driver, frml);
+ layer_set_dn(phy_driver, phy_layer);
+ layer_set_up(phy_layer, phy_driver);
+ } else {
+ layer_set_dn(frml, phy_layer);
+ layer_set_up(phy_layer, frml);
+ }
+}
+EXPORT_SYMBOL(cfcnfg_add_phy_layer);
+
+int cfcnfg_del_phy_layer(struct cfcnfg *cnfg, struct cflayer *phy_layer)
+{
+ struct cflayer *frml, *frml_dn;
+ u16 phyid;
+ phyid = phy_layer->id;
+ caif_assert(phyid == cnfg->phy_layers[phyid].id);
+ caif_assert(phy_layer == cnfg->phy_layers[phyid].phy_layer);
+ caif_assert(phy_layer->id == phyid);
+ caif_assert(cnfg->phy_layers[phyid].frm_layer->id == phyid);
+
+ memset(&cnfg->phy_layers[phy_layer->id], 0,
+ sizeof(struct cfcnfg_phyinfo));
+ frml = cfmuxl_remove_dnlayer(cnfg->mux, phy_layer->id);
+ frml_dn = frml->dn;
+ cffrml_set_uplayer(frml, NULL);
+ cffrml_set_dnlayer(frml, NULL);
+ kfree(frml);
+
+ if (phy_layer != frml_dn) {
+ layer_set_up(frml_dn, NULL);
+ layer_set_dn(frml_dn, NULL);
+ kfree(frml_dn);
+ }
+ layer_set_up(phy_layer, NULL);
+ return 0;
+}
+EXPORT_SYMBOL(cfcnfg_del_phy_layer);
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
new file mode 100644
index 000000000000..a521d32cfe56
--- /dev/null
+++ b/net/caif/cfctrl.c
@@ -0,0 +1,693 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfctrl.h>
+
+#define container_obj(layr) container_of(layr, struct cfctrl, serv.layer)
+#define UTILITY_NAME_LENGTH 16
+#define CFPKT_CTRL_PKT_LEN 20
+
+
+#ifdef CAIF_NO_LOOP
+static int handle_loop(struct cfctrl *ctrl,
+ int cmd, struct cfpkt *pkt){
+ return CAIF_FAILURE;
+}
+#else
+static int handle_loop(struct cfctrl *ctrl,
+ int cmd, struct cfpkt *pkt);
+#endif
+static int cfctrl_recv(struct cflayer *layr, struct cfpkt *pkt);
+static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
+
+struct cflayer *cfctrl_create(void)
+{
+ struct dev_info dev_info;
+ struct cfctrl *this =
+ kmalloc(sizeof(struct cfctrl), GFP_ATOMIC);
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
+ memset(&dev_info, 0, sizeof(dev_info));
+ dev_info.id = 0xff;
+ memset(this, 0, sizeof(*this));
+ cfsrvl_init(&this->serv, 0, &dev_info);
+ spin_lock_init(&this->info_list_lock);
+ atomic_set(&this->req_seq_no, 1);
+ atomic_set(&this->rsp_seq_no, 1);
+ this->serv.layer.receive = cfctrl_recv;
+ sprintf(this->serv.layer.name, "ctrl");
+ this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
+ spin_lock_init(&this->loop_linkid_lock);
+ this->loop_linkid = 1;
+ return &this->serv.layer;
+}
+
+static bool param_eq(struct cfctrl_link_param *p1, struct cfctrl_link_param *p2)
+{
+ bool eq =
+ p1->linktype == p2->linktype &&
+ p1->priority == p2->priority &&
+ p1->phyid == p2->phyid &&
+ p1->endpoint == p2->endpoint && p1->chtype == p2->chtype;
+
+ if (!eq)
+ return false;
+
+ switch (p1->linktype) {
+ case CFCTRL_SRV_VEI:
+ return true;
+ case CFCTRL_SRV_DATAGRAM:
+ return p1->u.datagram.connid == p2->u.datagram.connid;
+ case CFCTRL_SRV_RFM:
+ return
+ p1->u.rfm.connid == p2->u.rfm.connid &&
+ strcmp(p1->u.rfm.volume, p2->u.rfm.volume) == 0;
+ case CFCTRL_SRV_UTIL:
+ return
+ p1->u.utility.fifosize_kb == p2->u.utility.fifosize_kb
+ && p1->u.utility.fifosize_bufs ==
+ p2->u.utility.fifosize_bufs
+ && strcmp(p1->u.utility.name, p2->u.utility.name) == 0
+ && p1->u.utility.paramlen == p2->u.utility.paramlen
+ && memcmp(p1->u.utility.params, p2->u.utility.params,
+ p1->u.utility.paramlen) == 0;
+
+ case CFCTRL_SRV_VIDEO:
+ return p1->u.video.connid == p2->u.video.connid;
+ case CFCTRL_SRV_DBG:
+ return true;
+ case CFCTRL_SRV_DECM:
+ return false;
+ default:
+ return false;
+ }
+ return false;
+}
+
+bool cfctrl_req_eq(struct cfctrl_request_info *r1,
+ struct cfctrl_request_info *r2)
+{
+ if (r1->cmd != r2->cmd)
+ return false;
+ if (r1->cmd == CFCTRL_CMD_LINK_SETUP)
+ return param_eq(&r1->param, &r2->param);
+ else
+ return r1->channel_id == r2->channel_id;
+}
+
+/* Insert request at the end */
+void cfctrl_insert_req(struct cfctrl *ctrl,
+ struct cfctrl_request_info *req)
+{
+ struct cfctrl_request_info *p;
+ spin_lock(&ctrl->info_list_lock);
+ req->next = NULL;
+ atomic_inc(&ctrl->req_seq_no);
+ req->sequence_no = atomic_read(&ctrl->req_seq_no);
+ if (ctrl->first_req == NULL) {
+ ctrl->first_req = req;
+ spin_unlock(&ctrl->info_list_lock);
+ return;
+ }
+ p = ctrl->first_req;
+ while (p->next != NULL)
+ p = p->next;
+ p->next = req;
+ spin_unlock(&ctrl->info_list_lock);
+}
+
+/* Compare and remove request */
+struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
+ struct cfctrl_request_info *req)
+{
+ struct cfctrl_request_info *p;
+ struct cfctrl_request_info *ret;
+
+ spin_lock(&ctrl->info_list_lock);
+ if (ctrl->first_req == NULL) {
+ spin_unlock(&ctrl->info_list_lock);
+ return NULL;
+ }
+
+ if (cfctrl_req_eq(req, ctrl->first_req)) {
+ ret = ctrl->first_req;
+ caif_assert(ctrl->first_req);
+ atomic_set(&ctrl->rsp_seq_no,
+ ctrl->first_req->sequence_no);
+ ctrl->first_req = ctrl->first_req->next;
+ spin_unlock(&ctrl->info_list_lock);
+ return ret;
+ }
+
+ p = ctrl->first_req;
+
+ while (p->next != NULL) {
+ if (cfctrl_req_eq(req, p->next)) {
+ pr_warning("CAIF: %s(): Requests are not "
+ "received in order\n",
+ __func__);
+ ret = p->next;
+ atomic_set(&ctrl->rsp_seq_no,
+ p->next->sequence_no);
+ p->next = p->next->next;
+ spin_unlock(&ctrl->info_list_lock);
+ return ret;
+ }
+ p = p->next;
+ }
+ spin_unlock(&ctrl->info_list_lock);
+
+ pr_warning("CAIF: %s(): Request does not match\n",
+ __func__);
+ return NULL;
+}
+
+struct cfctrl_rsp *cfctrl_get_respfuncs(struct cflayer *layer)
+{
+ struct cfctrl *this = container_obj(layer);
+ return &this->res;
+}
+
+void cfctrl_set_dnlayer(struct cflayer *this, struct cflayer *dn)
+{
+ this->dn = dn;
+}
+
+void cfctrl_set_uplayer(struct cflayer *this, struct cflayer *up)
+{
+ this->up = up;
+}
+
+static void init_info(struct caif_payload_info *info, struct cfctrl *cfctrl)
+{
+ info->hdr_len = 0;
+ info->channel_id = cfctrl->serv.layer.id;
+ info->dev_info = &cfctrl->serv.dev_info;
+}
+
+void cfctrl_enum_req(struct cflayer *layer, u8 physlinkid)
+{
+ struct cfctrl *cfctrl = container_obj(layer);
+ int ret;
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ caif_assert(offsetof(struct cfctrl, serv.layer) == 0);
+ init_info(cfpkt_info(pkt), cfctrl);
+ cfpkt_info(pkt)->dev_info->id = physlinkid;
+ cfctrl->serv.dev_info.id = physlinkid;
+ cfpkt_addbdy(pkt, CFCTRL_CMD_ENUM);
+ cfpkt_addbdy(pkt, physlinkid);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0) {
+ pr_err("CAIF: %s(): Could not transmit enum message\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ }
+}
+
+int cfctrl_linkup_request(struct cflayer *layer,
+ struct cfctrl_link_param *param,
+ struct cflayer *user_layer)
+{
+ struct cfctrl *cfctrl = container_obj(layer);
+ u32 tmp32;
+ u16 tmp16;
+ u8 tmp8;
+ struct cfctrl_request_info *req;
+ int ret;
+ char utility_name[16];
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_SETUP);
+ cfpkt_addbdy(pkt, (param->chtype << 4) + param->linktype);
+ cfpkt_addbdy(pkt, (param->priority << 3) + param->phyid);
+ cfpkt_addbdy(pkt, param->endpoint & 0x03);
+
+ switch (param->linktype) {
+ case CFCTRL_SRV_VEI:
+ break;
+ case CFCTRL_SRV_VIDEO:
+ cfpkt_addbdy(pkt, (u8) param->u.video.connid);
+ break;
+ case CFCTRL_SRV_DBG:
+ break;
+ case CFCTRL_SRV_DATAGRAM:
+ tmp32 = cpu_to_le32(param->u.datagram.connid);
+ cfpkt_add_body(pkt, &tmp32, 4);
+ break;
+ case CFCTRL_SRV_RFM:
+ /* Construct a frame, convert DatagramConnectionID to network
+ * format long and copy it out...
+ */
+ tmp32 = cpu_to_le32(param->u.rfm.connid);
+ cfpkt_add_body(pkt, &tmp32, 4);
+ /* Add volume name, including zero termination... */
+ cfpkt_add_body(pkt, param->u.rfm.volume,
+ strlen(param->u.rfm.volume) + 1);
+ break;
+ case CFCTRL_SRV_UTIL:
+ tmp16 = cpu_to_le16(param->u.utility.fifosize_kb);
+ cfpkt_add_body(pkt, &tmp16, 2);
+ tmp16 = cpu_to_le16(param->u.utility.fifosize_bufs);
+ cfpkt_add_body(pkt, &tmp16, 2);
+ memset(utility_name, 0, sizeof(utility_name));
+ strncpy(utility_name, param->u.utility.name,
+ UTILITY_NAME_LENGTH - 1);
+ cfpkt_add_body(pkt, utility_name, UTILITY_NAME_LENGTH);
+ tmp8 = param->u.utility.paramlen;
+ cfpkt_add_body(pkt, &tmp8, 1);
+ cfpkt_add_body(pkt, param->u.utility.params,
+ param->u.utility.paramlen);
+ break;
+ default:
+ pr_warning("CAIF: %s():Request setup of bad link type = %d\n",
+ __func__, param->linktype);
+ return -EINVAL;
+ }
+ req = kmalloc(sizeof(*req), GFP_KERNEL);
+ if (!req) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+ memset(req, 0, sizeof(*req));
+ req->client_layer = user_layer;
+ req->cmd = CFCTRL_CMD_LINK_SETUP;
+ req->param = *param;
+ cfctrl_insert_req(cfctrl, req);
+ init_info(cfpkt_info(pkt), cfctrl);
+ /*
+ * NOTE:Always send linkup and linkdown request on the same
+ * device as the payload. Otherwise old queued up payload
+ * might arrive with the newly allocated channel ID.
+ */
+ cfpkt_info(pkt)->dev_info->id = param->phyid;
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0) {
+ pr_err("CAIF: %s(): Could not transmit linksetup request\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+int cfctrl_linkdown_req(struct cflayer *layer, u8 channelid,
+ struct cflayer *client)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return -ENOMEM;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_LINK_DESTROY);
+ cfpkt_addbdy(pkt, channelid);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0) {
+ pr_err("CAIF: %s(): Could not transmit link-down request\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ }
+ return ret;
+}
+
+void cfctrl_sleep_req(struct cflayer *layer)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_SLEEP);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0)
+ cfpkt_destroy(pkt);
+}
+
+void cfctrl_wake_req(struct cflayer *layer)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_WAKE);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0)
+ cfpkt_destroy(pkt);
+}
+
+void cfctrl_getstartreason_req(struct cflayer *layer)
+{
+ int ret;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfpkt *pkt = cfpkt_create(CFPKT_CTRL_PKT_LEN);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return;
+ }
+ cfpkt_addbdy(pkt, CFCTRL_CMD_START_REASON);
+ init_info(cfpkt_info(pkt), cfctrl);
+ ret =
+ cfctrl->serv.layer.dn->transmit(cfctrl->serv.layer.dn, pkt);
+ if (ret < 0)
+ cfpkt_destroy(pkt);
+}
+
+
+void cfctrl_cancel_req(struct cflayer *layr, struct cflayer *adap_layer)
+{
+ struct cfctrl_request_info *p, *req;
+ struct cfctrl *ctrl = container_obj(layr);
+ spin_lock(&ctrl->info_list_lock);
+
+ if (ctrl->first_req == NULL) {
+ spin_unlock(&ctrl->info_list_lock);
+ return;
+ }
+
+ if (ctrl->first_req->client_layer == adap_layer) {
+
+ req = ctrl->first_req;
+ ctrl->first_req = ctrl->first_req->next;
+ kfree(req);
+ }
+
+ p = ctrl->first_req;
+ while (p != NULL && p->next != NULL) {
+ if (p->next->client_layer == adap_layer) {
+
+ req = p->next;
+ p->next = p->next->next;
+ kfree(p->next);
+ }
+ p = p->next;
+ }
+
+ spin_unlock(&ctrl->info_list_lock);
+}
+
+static int cfctrl_recv(struct cflayer *layer, struct cfpkt *pkt)
+{
+ u8 cmdrsp;
+ u8 cmd;
+ int ret = -1;
+ u16 tmp16;
+ u8 len;
+ u8 param[255];
+ u8 linkid;
+ struct cfctrl *cfctrl = container_obj(layer);
+ struct cfctrl_request_info rsp, *req;
+
+
+ cfpkt_extr_head(pkt, &cmdrsp, 1);
+ cmd = cmdrsp & CFCTRL_CMD_MASK;
+ if (cmd != CFCTRL_CMD_LINK_ERR
+ && CFCTRL_RSP_BIT != (CFCTRL_RSP_BIT & cmdrsp)) {
+ if (handle_loop(cfctrl, cmd, pkt) == CAIF_FAILURE)
+ cmdrsp |= CFCTRL_ERR_BIT;
+ }
+
+ switch (cmd) {
+ case CFCTRL_CMD_LINK_SETUP:
+ {
+ enum cfctrl_srv serv;
+ enum cfctrl_srv servtype;
+ u8 endpoint;
+ u8 physlinkid;
+ u8 prio;
+ u8 tmp;
+ u32 tmp32;
+ u8 *cp;
+ int i;
+ struct cfctrl_link_param linkparam;
+ memset(&linkparam, 0, sizeof(linkparam));
+
+ cfpkt_extr_head(pkt, &tmp, 1);
+
+ serv = tmp & CFCTRL_SRV_MASK;
+ linkparam.linktype = serv;
+
+ servtype = tmp >> 4;
+ linkparam.chtype = servtype;
+
+ cfpkt_extr_head(pkt, &tmp, 1);
+ physlinkid = tmp & 0x07;
+ prio = tmp >> 3;
+
+ linkparam.priority = prio;
+ linkparam.phyid = physlinkid;
+ cfpkt_extr_head(pkt, &endpoint, 1);
+ linkparam.endpoint = endpoint & 0x03;
+
+ switch (serv) {
+ case CFCTRL_SRV_VEI:
+ case CFCTRL_SRV_DBG:
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ break;
+ case CFCTRL_SRV_VIDEO:
+ cfpkt_extr_head(pkt, &tmp, 1);
+ linkparam.u.video.connid = tmp;
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ break;
+
+ case CFCTRL_SRV_DATAGRAM:
+ cfpkt_extr_head(pkt, &tmp32, 4);
+ linkparam.u.datagram.connid =
+ le32_to_cpu(tmp32);
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ break;
+ case CFCTRL_SRV_RFM:
+ /* Construct a frame, convert
+ * DatagramConnectionID
+ * to network format long and copy it out...
+ */
+ cfpkt_extr_head(pkt, &tmp32, 4);
+ linkparam.u.rfm.connid =
+ le32_to_cpu(tmp32);
+ cp = (u8 *) linkparam.u.rfm.volume;
+ for (cfpkt_extr_head(pkt, &tmp, 1);
+ cfpkt_more(pkt) && tmp != '\0';
+ cfpkt_extr_head(pkt, &tmp, 1))
+ *cp++ = tmp;
+ *cp = '\0';
+
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+
+ break;
+ case CFCTRL_SRV_UTIL:
+ /* Construct a frame, convert
+ * DatagramConnectionID
+ * to network format long and copy it out...
+ */
+ /* Fifosize KB */
+ cfpkt_extr_head(pkt, &tmp16, 2);
+ linkparam.u.utility.fifosize_kb =
+ le16_to_cpu(tmp16);
+ /* Fifosize bufs */
+ cfpkt_extr_head(pkt, &tmp16, 2);
+ linkparam.u.utility.fifosize_bufs =
+ le16_to_cpu(tmp16);
+ /* name */
+ cp = (u8 *) linkparam.u.utility.name;
+ caif_assert(sizeof(linkparam.u.utility.name)
+ >= UTILITY_NAME_LENGTH);
+ for (i = 0;
+ i < UTILITY_NAME_LENGTH
+ && cfpkt_more(pkt); i++) {
+ cfpkt_extr_head(pkt, &tmp, 1);
+ *cp++ = tmp;
+ }
+ /* Length */
+ cfpkt_extr_head(pkt, &len, 1);
+ linkparam.u.utility.paramlen = len;
+ /* Param Data */
+ cp = linkparam.u.utility.params;
+ while (cfpkt_more(pkt) && len--) {
+ cfpkt_extr_head(pkt, &tmp, 1);
+ *cp++ = tmp;
+ }
+ if (CFCTRL_ERR_BIT & cmdrsp)
+ break;
+ /* Link ID */
+ cfpkt_extr_head(pkt, &linkid, 1);
+ /* Length */
+ cfpkt_extr_head(pkt, &len, 1);
+ /* Param Data */
+ cfpkt_extr_head(pkt, &param, len);
+ break;
+ default:
+ pr_warning("CAIF: %s(): Request setup "
+ "- invalid link type (%d)",
+ __func__, serv);
+ goto error;
+ }
+
+ rsp.cmd = cmd;
+ rsp.param = linkparam;
+ req = cfctrl_remove_req(cfctrl, &rsp);
+
+ if (CFCTRL_ERR_BIT == (CFCTRL_ERR_BIT & cmdrsp) ||
+ cfpkt_erroneous(pkt)) {
+ pr_err("CAIF: %s(): Invalid O/E bit or parse "
+ "error on CAIF control channel",
+ __func__);
+ cfctrl->res.reject_rsp(cfctrl->serv.layer.up,
+ 0,
+ req ? req->client_layer
+ : NULL);
+ } else {
+ cfctrl->res.linksetup_rsp(cfctrl->serv.
+ layer.up, linkid,
+ serv, physlinkid,
+ req ? req->
+ client_layer : NULL);
+ }
+
+ if (req != NULL)
+ kfree(req);
+ }
+ break;
+ case CFCTRL_CMD_LINK_DESTROY:
+ cfpkt_extr_head(pkt, &linkid, 1);
+ cfctrl->res.linkdestroy_rsp(cfctrl->serv.layer.up, linkid);
+ break;
+ case CFCTRL_CMD_LINK_ERR:
+ pr_err("CAIF: %s(): Frame Error Indication received\n",
+ __func__);
+ cfctrl->res.linkerror_ind();
+ break;
+ case CFCTRL_CMD_ENUM:
+ cfctrl->res.enum_rsp();
+ break;
+ case CFCTRL_CMD_SLEEP:
+ cfctrl->res.sleep_rsp();
+ break;
+ case CFCTRL_CMD_WAKE:
+ cfctrl->res.wake_rsp();
+ break;
+ case CFCTRL_CMD_LINK_RECONF:
+ cfctrl->res.restart_rsp();
+ break;
+ case CFCTRL_CMD_RADIO_SET:
+ cfctrl->res.radioset_rsp();
+ break;
+ default:
+ pr_err("CAIF: %s(): Unrecognized Control Frame\n", __func__);
+ goto error;
+ break;
+ }
+ ret = 0;
+error:
+ cfpkt_destroy(pkt);
+ return ret;
+}
+
+static void cfctrl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ struct cfctrl *this = container_obj(layr);
+ switch (ctrl) {
+ case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ spin_lock(&this->info_list_lock);
+ if (this->first_req != NULL) {
+ pr_debug("CAIF: %s(): Received flow off in "
+ "control layer", __func__);
+ }
+ spin_unlock(&this->info_list_lock);
+ break;
+ default:
+ break;
+ }
+}
+
+#ifndef CAIF_NO_LOOP
+static int handle_loop(struct cfctrl *ctrl, int cmd, struct cfpkt *pkt)
+{
+ static int last_linkid;
+ u8 linkid, linktype, tmp;
+ switch (cmd) {
+ case CFCTRL_CMD_LINK_SETUP:
+ spin_lock(&ctrl->loop_linkid_lock);
+ for (linkid = last_linkid + 1; linkid < 255; linkid++)
+ if (!ctrl->loop_linkused[linkid])
+ goto found;
+ for (linkid = last_linkid - 1; linkid > 0; linkid--)
+ if (!ctrl->loop_linkused[linkid])
+ goto found;
+ spin_unlock(&ctrl->loop_linkid_lock);
+ pr_err("CAIF: %s(): Out of link-ids\n", __func__);
+ return -EINVAL;
+found:
+ if (!ctrl->loop_linkused[linkid])
+ ctrl->loop_linkused[linkid] = 1;
+
+ last_linkid = linkid;
+
+ cfpkt_add_trail(pkt, &linkid, 1);
+ spin_unlock(&ctrl->loop_linkid_lock);
+ cfpkt_peek_head(pkt, &linktype, 1);
+ if (linktype == CFCTRL_SRV_UTIL) {
+ tmp = 0x01;
+ cfpkt_add_trail(pkt, &tmp, 1);
+ cfpkt_add_trail(pkt, &tmp, 1);
+ }
+ break;
+
+ case CFCTRL_CMD_LINK_DESTROY:
+ spin_lock(&ctrl->loop_linkid_lock);
+ cfpkt_peek_head(pkt, &linkid, 1);
+ ctrl->loop_linkused[linkid] = 0;
+ spin_unlock(&ctrl->loop_linkid_lock);
+ break;
+ default:
+ break;
+ }
+ return CAIF_SUCCESS;
+}
+#endif
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
new file mode 100644
index 000000000000..ab6b6dc34cf8
--- /dev/null
+++ b/net/caif/cfdbgl.c
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
+
+struct cflayer *cfdbgl_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *dbg = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!dbg) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(dbg, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(dbg, channel_id, dev_info);
+ dbg->layer.receive = cfdbgl_receive;
+ dbg->layer.transmit = cfdbgl_transmit;
+ snprintf(dbg->layer.name, CAIF_LAYER_NAME_SZ - 1, "dbg%d", channel_id);
+ return &dbg->layer;
+}
+
+static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ return layr->up->receive(layr->up, pkt);
+}
+
+static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ return layr->dn->transmit(layr->dn, pkt);
+}
diff --git a/net/caif/cfdgml.c b/net/caif/cfdgml.c
new file mode 100644
index 000000000000..53194840ecb6
--- /dev/null
+++ b/net/caif/cfdgml.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define container_obj(layr) ((struct cfsrvl *) layr)
+
+#define DGM_CMD_BIT 0x80
+#define DGM_FLOW_OFF 0x81
+#define DGM_FLOW_ON 0x80
+#define DGM_CTRL_PKT_SIZE 1
+
+static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt);
+
+struct cflayer *cfdgml_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *dgm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!dgm) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(dgm, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(dgm, channel_id, dev_info);
+ dgm->layer.receive = cfdgml_receive;
+ dgm->layer.transmit = cfdgml_transmit;
+ snprintf(dgm->layer.name, CAIF_LAYER_NAME_SZ - 1, "dgm%d", channel_id);
+ dgm->layer.name[CAIF_LAYER_NAME_SZ - 1] = '\0';
+ return &dgm->layer;
+}
+
+static int cfdgml_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 cmd = -1;
+ u8 dgmhdr[3];
+ int ret;
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->receive != NULL);
+ caif_assert(layr->ctrlcmd != NULL);
+
+ if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+
+ if ((cmd & DGM_CMD_BIT) == 0) {
+ if (cfpkt_extr_head(pkt, &dgmhdr, 3) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ ret = layr->up->receive(layr->up, pkt);
+ return ret;
+ }
+
+ switch (cmd) {
+ case DGM_FLOW_OFF: /* FLOW OFF */
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case DGM_FLOW_ON: /* FLOW ON */
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ default:
+ cfpkt_destroy(pkt);
+ pr_info("CAIF: %s(): Unknown datagram control %d (0x%x)\n",
+ __func__, cmd, cmd);
+ return -EPROTO;
+ }
+}
+
+static int cfdgml_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u32 zero = 0;
+ struct caif_payload_info *info;
+ struct cfsrvl *service = container_obj(layr);
+ int ret;
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+
+ cfpkt_add_head(pkt, &zero, 4);
+
+ /* Add info for MUX-layer to route the packet out. */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ /* To optimize alignment, we add up the size of CAIF header
+ * before payload.
+ */
+ info->hdr_len = 4;
+ info->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0) {
+ u32 tmp32;
+ cfpkt_extr_head(pkt, &tmp32, 4);
+ }
+ return ret;
+}
diff --git a/net/caif/cffrml.c b/net/caif/cffrml.c
new file mode 100644
index 000000000000..e86a4ca3b217
--- /dev/null
+++ b/net/caif/cffrml.c
@@ -0,0 +1,151 @@
+/*
+ * CAIF Framing Layer.
+ *
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <linux/crc-ccitt.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cffrml.h>
+
+#define container_obj(layr) container_of(layr, struct cffrml, layer)
+
+struct cffrml {
+ struct cflayer layer;
+ bool dofcs; /* !< FCS active */
+};
+
+static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt);
+static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
+static u32 cffrml_rcv_error;
+static u32 cffrml_rcv_checsum_error;
+struct cflayer *cffrml_create(u16 phyid, bool use_fcs)
+{
+ struct cffrml *this = kmalloc(sizeof(struct cffrml), GFP_ATOMIC);
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cffrml, layer) == 0);
+
+ memset(this, 0, sizeof(struct cflayer));
+ this->layer.receive = cffrml_receive;
+ this->layer.transmit = cffrml_transmit;
+ this->layer.ctrlcmd = cffrml_ctrlcmd;
+ snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "frm%d", phyid);
+ this->dofcs = use_fcs;
+ this->layer.id = phyid;
+ return (struct cflayer *) this;
+}
+
+void cffrml_set_uplayer(struct cflayer *this, struct cflayer *up)
+{
+ this->up = up;
+}
+
+void cffrml_set_dnlayer(struct cflayer *this, struct cflayer *dn)
+{
+ this->dn = dn;
+}
+
+static u16 cffrml_checksum(u16 chks, void *buf, u16 len)
+{
+ /* FIXME: FCS should be moved to glue in order to use OS-Specific
+ * solutions
+ */
+ return crc_ccitt(chks, buf, len);
+}
+
+static int cffrml_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u16 tmp;
+ u16 len;
+ u16 hdrchks;
+ u16 pktchks;
+ struct cffrml *this;
+ this = container_obj(layr);
+
+ cfpkt_extr_head(pkt, &tmp, 2);
+ len = le16_to_cpu(tmp);
+
+ /* Subtract for FCS on length if FCS is not used. */
+ if (!this->dofcs)
+ len -= 2;
+
+ if (cfpkt_setlen(pkt, len) < 0) {
+ ++cffrml_rcv_error;
+ pr_err("CAIF: %s():Framing length error (%d)\n", __func__, len);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ /*
+ * Don't do extract if FCS is false, rather do setlen - then we don't
+ * get a cache-miss.
+ */
+ if (this->dofcs) {
+ cfpkt_extr_trail(pkt, &tmp, 2);
+ hdrchks = le16_to_cpu(tmp);
+ pktchks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
+ if (pktchks != hdrchks) {
+ cfpkt_add_trail(pkt, &tmp, 2);
+ ++cffrml_rcv_error;
+ ++cffrml_rcv_checsum_error;
+ pr_info("CAIF: %s(): Frame checksum error "
+ "(0x%x != 0x%x)\n", __func__, hdrchks, pktchks);
+ return -EILSEQ;
+ }
+ }
+ if (cfpkt_erroneous(pkt)) {
+ ++cffrml_rcv_error;
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ return layr->up->receive(layr->up, pkt);
+}
+
+static int cffrml_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ int tmp;
+ u16 chks;
+ u16 len;
+ int ret;
+ struct cffrml *this = container_obj(layr);
+ if (this->dofcs) {
+ chks = cfpkt_iterate(pkt, cffrml_checksum, 0xffff);
+ tmp = cpu_to_le16(chks);
+ cfpkt_add_trail(pkt, &tmp, 2);
+ } else {
+ cfpkt_pad_trail(pkt, 2);
+ }
+ len = cfpkt_getlen(pkt);
+ tmp = cpu_to_le16(len);
+ cfpkt_add_head(pkt, &tmp, 2);
+ cfpkt_info(pkt)->hdr_len += 2;
+ if (cfpkt_erroneous(pkt)) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ return -EPROTO;
+ }
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0) {
+ /* Remove header on faulty packet. */
+ cfpkt_extr_head(pkt, &tmp, 2);
+ }
+ return ret;
+}
+
+static void cffrml_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ if (layr->up->ctrlcmd)
+ layr->up->ctrlcmd(layr->up, ctrl, layr->id);
+}
diff --git a/net/caif/cfmuxl.c b/net/caif/cfmuxl.c
new file mode 100644
index 000000000000..7372f27f1d32
--- /dev/null
+++ b/net/caif/cfmuxl.c
@@ -0,0 +1,251 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfmuxl.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cffrml.h>
+
+#define container_obj(layr) container_of(layr, struct cfmuxl, layer)
+
+#define CAIF_CTRL_CHANNEL 0
+#define UP_CACHE_SIZE 8
+#define DN_CACHE_SIZE 8
+
+struct cfmuxl {
+ struct cflayer layer;
+ struct list_head srvl_list;
+ struct list_head frml_list;
+ struct cflayer *up_cache[UP_CACHE_SIZE];
+ struct cflayer *dn_cache[DN_CACHE_SIZE];
+ /*
+ * Set when inserting or removing downwards layers.
+ */
+ spinlock_t transmit_lock;
+
+ /*
+ * Set when inserting or removing upwards layers.
+ */
+ spinlock_t receive_lock;
+
+};
+
+static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt);
+static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+static struct cflayer *get_up(struct cfmuxl *muxl, u16 id);
+
+struct cflayer *cfmuxl_create(void)
+{
+ struct cfmuxl *this = kmalloc(sizeof(struct cfmuxl), GFP_ATOMIC);
+ if (!this)
+ return NULL;
+ memset(this, 0, sizeof(*this));
+ this->layer.receive = cfmuxl_receive;
+ this->layer.transmit = cfmuxl_transmit;
+ this->layer.ctrlcmd = cfmuxl_ctrlcmd;
+ INIT_LIST_HEAD(&this->srvl_list);
+ INIT_LIST_HEAD(&this->frml_list);
+ spin_lock_init(&this->transmit_lock);
+ spin_lock_init(&this->receive_lock);
+ snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "mux");
+ return &this->layer;
+}
+
+int cfmuxl_set_uplayer(struct cflayer *layr, struct cflayer *up, u8 linkid)
+{
+ struct cfmuxl *muxl = container_obj(layr);
+ spin_lock(&muxl->receive_lock);
+ cfsrvl_get(up);
+ list_add(&up->node, &muxl->srvl_list);
+ spin_unlock(&muxl->receive_lock);
+ return 0;
+}
+
+bool cfmuxl_is_phy_inuse(struct cflayer *layr, u8 phyid)
+{
+ struct list_head *node;
+ struct cflayer *layer;
+ struct cfmuxl *muxl = container_obj(layr);
+ bool match = false;
+ spin_lock(&muxl->receive_lock);
+
+ list_for_each(node, &muxl->srvl_list) {
+ layer = list_entry(node, struct cflayer, node);
+ if (cfsrvl_phyid_match(layer, phyid)) {
+ match = true;
+ break;
+ }
+
+ }
+ spin_unlock(&muxl->receive_lock);
+ return match;
+}
+
+u8 cfmuxl_get_phyid(struct cflayer *layr, u8 channel_id)
+{
+ struct cflayer *up;
+ int phyid;
+ struct cfmuxl *muxl = container_obj(layr);
+ spin_lock(&muxl->receive_lock);
+ up = get_up(muxl, channel_id);
+ if (up != NULL)
+ phyid = cfsrvl_getphyid(up);
+ else
+ phyid = 0;
+ spin_unlock(&muxl->receive_lock);
+ return phyid;
+}
+
+int cfmuxl_set_dnlayer(struct cflayer *layr, struct cflayer *dn, u8 phyid)
+{
+ struct cfmuxl *muxl = (struct cfmuxl *) layr;
+ spin_lock(&muxl->transmit_lock);
+ list_add(&dn->node, &muxl->frml_list);
+ spin_unlock(&muxl->transmit_lock);
+ return 0;
+}
+
+static struct cflayer *get_from_id(struct list_head *list, u16 id)
+{
+ struct list_head *node;
+ struct cflayer *layer;
+ list_for_each(node, list) {
+ layer = list_entry(node, struct cflayer, node);
+ if (layer->id == id)
+ return layer;
+ }
+ return NULL;
+}
+
+struct cflayer *cfmuxl_remove_dnlayer(struct cflayer *layr, u8 phyid)
+{
+ struct cfmuxl *muxl = container_obj(layr);
+ struct cflayer *dn;
+ spin_lock(&muxl->transmit_lock);
+ memset(muxl->dn_cache, 0, sizeof(muxl->dn_cache));
+ dn = get_from_id(&muxl->frml_list, phyid);
+ if (dn == NULL) {
+ spin_unlock(&muxl->transmit_lock);
+ return NULL;
+ }
+ list_del(&dn->node);
+ caif_assert(dn != NULL);
+ spin_unlock(&muxl->transmit_lock);
+ return dn;
+}
+
+/* Invariant: lock is taken */
+static struct cflayer *get_up(struct cfmuxl *muxl, u16 id)
+{
+ struct cflayer *up;
+ int idx = id % UP_CACHE_SIZE;
+ up = muxl->up_cache[idx];
+ if (up == NULL || up->id != id) {
+ up = get_from_id(&muxl->srvl_list, id);
+ muxl->up_cache[idx] = up;
+ }
+ return up;
+}
+
+/* Invariant: lock is taken */
+static struct cflayer *get_dn(struct cfmuxl *muxl, struct dev_info *dev_info)
+{
+ struct cflayer *dn;
+ int idx = dev_info->id % DN_CACHE_SIZE;
+ dn = muxl->dn_cache[idx];
+ if (dn == NULL || dn->id != dev_info->id) {
+ dn = get_from_id(&muxl->frml_list, dev_info->id);
+ muxl->dn_cache[idx] = dn;
+ }
+ return dn;
+}
+
+struct cflayer *cfmuxl_remove_uplayer(struct cflayer *layr, u8 id)
+{
+ struct cflayer *up;
+ struct cfmuxl *muxl = container_obj(layr);
+ spin_lock(&muxl->receive_lock);
+ up = get_up(muxl, id);
+ if (up == NULL)
+ return NULL;
+ memset(muxl->up_cache, 0, sizeof(muxl->up_cache));
+ list_del(&up->node);
+ cfsrvl_put(up);
+ spin_unlock(&muxl->receive_lock);
+ return up;
+}
+
+static int cfmuxl_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ int ret;
+ struct cfmuxl *muxl = container_obj(layr);
+ u8 id;
+ struct cflayer *up;
+ if (cfpkt_extr_head(pkt, &id, 1) < 0) {
+ pr_err("CAIF: %s(): erroneous Caif Packet\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+
+ spin_lock(&muxl->receive_lock);
+ up = get_up(muxl, id);
+ spin_unlock(&muxl->receive_lock);
+ if (up == NULL) {
+ pr_info("CAIF: %s():Received data on unknown link ID = %d "
+ "(0x%x) up == NULL", __func__, id, id);
+ cfpkt_destroy(pkt);
+ /*
+ * Don't return ERROR, since modem misbehaves and sends out
+ * flow on before linksetup response.
+ */
+ return /* CFGLU_EPROT; */ 0;
+ }
+ cfsrvl_get(up);
+ ret = up->receive(up, pkt);
+ cfsrvl_put(up);
+ return ret;
+}
+
+static int cfmuxl_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ int ret;
+ struct cfmuxl *muxl = container_obj(layr);
+ u8 linkid;
+ struct cflayer *dn;
+ struct caif_payload_info *info = cfpkt_info(pkt);
+ dn = get_dn(muxl, cfpkt_info(pkt)->dev_info);
+ if (dn == NULL) {
+ pr_warning("CAIF: %s(): Send data on unknown phy "
+ "ID = %d (0x%x)\n",
+ __func__, info->dev_info->id, info->dev_info->id);
+ return -ENOTCONN;
+ }
+ info->hdr_len += 1;
+ linkid = info->channel_id;
+ cfpkt_add_head(pkt, &linkid, 1);
+ ret = dn->transmit(dn, pkt);
+ /* Remove MUX protocol header upon error. */
+ if (ret < 0)
+ cfpkt_extr_head(pkt, &linkid, 1);
+ return ret;
+}
+
+static void cfmuxl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ struct cfmuxl *muxl = container_obj(layr);
+ struct list_head *node;
+ struct cflayer *layer;
+ list_for_each(node, &muxl->srvl_list) {
+ layer = list_entry(node, struct cflayer, node);
+ if (cfsrvl_phyid_match(layer, phyid))
+ layer->ctrlcmd(layer, ctrl, phyid);
+ }
+}
diff --git a/net/caif/cfpkt_skbuff.c b/net/caif/cfpkt_skbuff.c
new file mode 100644
index 000000000000..83fff2ff6658
--- /dev/null
+++ b/net/caif/cfpkt_skbuff.c
@@ -0,0 +1,571 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <linux/hardirq.h>
+#include <net/caif/cfpkt.h>
+
+#define PKT_PREFIX CAIF_NEEDED_HEADROOM
+#define PKT_POSTFIX CAIF_NEEDED_TAILROOM
+#define PKT_LEN_WHEN_EXTENDING 128
+#define PKT_ERROR(pkt, errmsg) do { \
+ cfpkt_priv(pkt)->erronous = true; \
+ skb_reset_tail_pointer(&pkt->skb); \
+ pr_warning("CAIF: " errmsg);\
+ } while (0)
+
+struct cfpktq {
+ struct sk_buff_head head;
+ atomic_t count;
+ /* Lock protects count updates */
+ spinlock_t lock;
+};
+
+/*
+ * net/caif/ is generic and does not
+ * understand SKB, so we do this typecast
+ */
+struct cfpkt {
+ struct sk_buff skb;
+};
+
+/* Private data inside SKB */
+struct cfpkt_priv_data {
+ struct dev_info dev_info;
+ bool erronous;
+};
+
+inline struct cfpkt_priv_data *cfpkt_priv(struct cfpkt *pkt)
+{
+ return (struct cfpkt_priv_data *) pkt->skb.cb;
+}
+
+inline bool is_erronous(struct cfpkt *pkt)
+{
+ return cfpkt_priv(pkt)->erronous;
+}
+
+inline struct sk_buff *pkt_to_skb(struct cfpkt *pkt)
+{
+ return &pkt->skb;
+}
+
+inline struct cfpkt *skb_to_pkt(struct sk_buff *skb)
+{
+ return (struct cfpkt *) skb;
+}
+
+
+struct cfpkt *cfpkt_fromnative(enum caif_direction dir, void *nativepkt)
+{
+ struct cfpkt *pkt = skb_to_pkt(nativepkt);
+ cfpkt_priv(pkt)->erronous = false;
+ return pkt;
+}
+EXPORT_SYMBOL(cfpkt_fromnative);
+
+void *cfpkt_tonative(struct cfpkt *pkt)
+{
+ return (void *) pkt;
+}
+EXPORT_SYMBOL(cfpkt_tonative);
+
+static struct cfpkt *cfpkt_create_pfx(u16 len, u16 pfx)
+{
+ struct sk_buff *skb;
+
+ if (likely(in_interrupt()))
+ skb = alloc_skb(len + pfx, GFP_ATOMIC);
+ else
+ skb = alloc_skb(len + pfx, GFP_KERNEL);
+
+ if (unlikely(skb == NULL))
+ return NULL;
+
+ skb_reserve(skb, pfx);
+ return skb_to_pkt(skb);
+}
+
+inline struct cfpkt *cfpkt_create(u16 len)
+{
+ return cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
+}
+EXPORT_SYMBOL(cfpkt_create);
+
+void cfpkt_destroy(struct cfpkt *pkt)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ kfree_skb(skb);
+}
+EXPORT_SYMBOL(cfpkt_destroy);
+
+inline bool cfpkt_more(struct cfpkt *pkt)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ return skb->len > 0;
+}
+EXPORT_SYMBOL(cfpkt_more);
+
+int cfpkt_peek_head(struct cfpkt *pkt, void *data, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ if (skb_headlen(skb) >= len) {
+ memcpy(data, skb->data, len);
+ return 0;
+ }
+ return !cfpkt_extr_head(pkt, data, len) &&
+ !cfpkt_add_head(pkt, data, len);
+}
+EXPORT_SYMBOL(cfpkt_peek_head);
+
+int cfpkt_extr_head(struct cfpkt *pkt, void *data, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ u8 *from;
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+
+ if (unlikely(len > skb->len)) {
+ PKT_ERROR(pkt, "cfpkt_extr_head read beyond end of packet\n");
+ return -EPROTO;
+ }
+
+ if (unlikely(len > skb_headlen(skb))) {
+ if (unlikely(skb_linearize(skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_extr_head linearize failed\n");
+ return -EPROTO;
+ }
+ }
+ from = skb_pull(skb, len);
+ from -= len;
+ memcpy(data, from, len);
+ return 0;
+}
+EXPORT_SYMBOL(cfpkt_extr_head);
+
+int cfpkt_extr_trail(struct cfpkt *pkt, void *dta, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ u8 *data = dta;
+ u8 *from;
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+
+ if (unlikely(skb_linearize(skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_extr_trail linearize failed\n");
+ return -EPROTO;
+ }
+ if (unlikely(skb->data + len > skb_tail_pointer(skb))) {
+ PKT_ERROR(pkt, "cfpkt_extr_trail read beyond end of packet\n");
+ return -EPROTO;
+ }
+ from = skb_tail_pointer(skb) - len;
+ skb_trim(skb, skb->len - len);
+ memcpy(data, from, len);
+ return 0;
+}
+EXPORT_SYMBOL(cfpkt_extr_trail);
+
+int cfpkt_pad_trail(struct cfpkt *pkt, u16 len)
+{
+ return cfpkt_add_body(pkt, NULL, len);
+}
+EXPORT_SYMBOL(cfpkt_pad_trail);
+
+int cfpkt_add_body(struct cfpkt *pkt, const void *data, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ struct sk_buff *lastskb;
+ u8 *to;
+ u16 addlen = 0;
+
+
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+
+ lastskb = skb;
+
+ /* Check whether we need to add space at the tail */
+ if (unlikely(skb_tailroom(skb) < len)) {
+ if (likely(len < PKT_LEN_WHEN_EXTENDING))
+ addlen = PKT_LEN_WHEN_EXTENDING;
+ else
+ addlen = len;
+ }
+
+ /* Check whether we need to change the SKB before writing to the tail */
+ if (unlikely((addlen > 0) || skb_cloned(skb) || skb_shared(skb))) {
+
+ /* Make sure data is writable */
+ if (unlikely(skb_cow_data(skb, addlen, &lastskb) < 0)) {
+ PKT_ERROR(pkt, "cfpkt_add_body: cow failed\n");
+ return -EPROTO;
+ }
+ /*
+ * Is the SKB non-linear after skb_cow_data()? If so, we are
+ * going to add data to the last SKB, so we need to adjust
+ * lengths of the top SKB.
+ */
+ if (lastskb != skb) {
+ pr_warning("CAIF: %s(): Packet is non-linear\n",
+ __func__);
+ skb->len += len;
+ skb->data_len += len;
+ }
+ }
+
+ /* All set to put the last SKB and optionally write data there. */
+ to = skb_put(lastskb, len);
+ if (likely(data))
+ memcpy(to, data, len);
+ return 0;
+}
+EXPORT_SYMBOL(cfpkt_add_body);
+
+inline int cfpkt_addbdy(struct cfpkt *pkt, u8 data)
+{
+ return cfpkt_add_body(pkt, &data, 1);
+}
+EXPORT_SYMBOL(cfpkt_addbdy);
+
+int cfpkt_add_head(struct cfpkt *pkt, const void *data2, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ struct sk_buff *lastskb;
+ u8 *to;
+ const u8 *data = data2;
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+ if (unlikely(skb_headroom(skb) < len)) {
+ PKT_ERROR(pkt, "cfpkt_add_head: no headroom\n");
+ return -EPROTO;
+ }
+
+ /* Make sure data is writable */
+ if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
+ PKT_ERROR(pkt, "cfpkt_add_head: cow failed\n");
+ return -EPROTO;
+ }
+
+ to = skb_push(skb, len);
+ memcpy(to, data, len);
+ return 0;
+}
+EXPORT_SYMBOL(cfpkt_add_head);
+
+inline int cfpkt_add_trail(struct cfpkt *pkt, const void *data, u16 len)
+{
+ return cfpkt_add_body(pkt, data, len);
+}
+EXPORT_SYMBOL(cfpkt_add_trail);
+
+inline u16 cfpkt_getlen(struct cfpkt *pkt)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ return skb->len;
+}
+EXPORT_SYMBOL(cfpkt_getlen);
+
+inline u16 cfpkt_iterate(struct cfpkt *pkt,
+ u16 (*iter_func)(u16, void *, u16),
+ u16 data)
+{
+ /*
+ * Don't care about the performance hit of linearizing,
+ * Checksum should not be used on high-speed interfaces anyway.
+ */
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+ if (unlikely(skb_linearize(&pkt->skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_iterate: linearize failed\n");
+ return -EPROTO;
+ }
+ return iter_func(data, pkt->skb.data, cfpkt_getlen(pkt));
+}
+EXPORT_SYMBOL(cfpkt_iterate);
+
+int cfpkt_setlen(struct cfpkt *pkt, u16 len)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+
+
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+
+ if (likely(len <= skb->len)) {
+ if (unlikely(skb->data_len))
+ ___pskb_trim(skb, len);
+ else
+ skb_trim(skb, len);
+
+ return cfpkt_getlen(pkt);
+ }
+
+ /* Need to expand SKB */
+ if (unlikely(!cfpkt_pad_trail(pkt, len - skb->len)))
+ PKT_ERROR(pkt, "cfpkt_setlen: skb_pad_trail failed\n");
+
+ return cfpkt_getlen(pkt);
+}
+EXPORT_SYMBOL(cfpkt_setlen);
+
+struct cfpkt *cfpkt_create_uplink(const unsigned char *data, unsigned int len)
+{
+ struct cfpkt *pkt = cfpkt_create_pfx(len + PKT_POSTFIX, PKT_PREFIX);
+ if (unlikely(data != NULL))
+ cfpkt_add_body(pkt, data, len);
+ return pkt;
+}
+EXPORT_SYMBOL(cfpkt_create_uplink);
+
+struct cfpkt *cfpkt_append(struct cfpkt *dstpkt,
+ struct cfpkt *addpkt,
+ u16 expectlen)
+{
+ struct sk_buff *dst = pkt_to_skb(dstpkt);
+ struct sk_buff *add = pkt_to_skb(addpkt);
+ u16 addlen = skb_headlen(add);
+ u16 neededtailspace;
+ struct sk_buff *tmp;
+ u16 dstlen;
+ u16 createlen;
+ if (unlikely(is_erronous(dstpkt) || is_erronous(addpkt))) {
+ cfpkt_destroy(addpkt);
+ return dstpkt;
+ }
+ if (expectlen > addlen)
+ neededtailspace = expectlen;
+ else
+ neededtailspace = addlen;
+
+ if (dst->tail + neededtailspace > dst->end) {
+ /* Create a dumplicate of 'dst' with more tail space */
+ dstlen = skb_headlen(dst);
+ createlen = dstlen + neededtailspace;
+ tmp = pkt_to_skb(
+ cfpkt_create(createlen + PKT_PREFIX + PKT_POSTFIX));
+ if (!tmp)
+ return NULL;
+ skb_set_tail_pointer(tmp, dstlen);
+ tmp->len = dstlen;
+ memcpy(tmp->data, dst->data, dstlen);
+ cfpkt_destroy(dstpkt);
+ dst = tmp;
+ }
+ memcpy(skb_tail_pointer(dst), add->data, skb_headlen(add));
+ cfpkt_destroy(addpkt);
+ dst->tail += addlen;
+ dst->len += addlen;
+ return skb_to_pkt(dst);
+}
+EXPORT_SYMBOL(cfpkt_append);
+
+struct cfpkt *cfpkt_split(struct cfpkt *pkt, u16 pos)
+{
+ struct sk_buff *skb2;
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ u8 *split = skb->data + pos;
+ u16 len2nd = skb_tail_pointer(skb) - split;
+
+ if (unlikely(is_erronous(pkt)))
+ return NULL;
+
+ if (skb->data + pos > skb_tail_pointer(skb)) {
+ PKT_ERROR(pkt,
+ "cfpkt_split: trying to split beyond end of packet");
+ return NULL;
+ }
+
+ /* Create a new packet for the second part of the data */
+ skb2 = pkt_to_skb(
+ cfpkt_create_pfx(len2nd + PKT_PREFIX + PKT_POSTFIX,
+ PKT_PREFIX));
+
+ if (skb2 == NULL)
+ return NULL;
+
+ /* Reduce the length of the original packet */
+ skb_set_tail_pointer(skb, pos);
+ skb->len = pos;
+
+ memcpy(skb2->data, split, len2nd);
+ skb2->tail += len2nd;
+ skb2->len += len2nd;
+ return skb_to_pkt(skb2);
+}
+EXPORT_SYMBOL(cfpkt_split);
+
+char *cfpkt_log_pkt(struct cfpkt *pkt, char *buf, int buflen)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ char *p = buf;
+ int i;
+
+ /*
+ * Sanity check buffer length, it needs to be at least as large as
+ * the header info: ~=50+ bytes
+ */
+ if (buflen < 50)
+ return NULL;
+
+ snprintf(buf, buflen, "%s: pkt:%p len:%ld(%ld+%ld) {%ld,%ld} data: [",
+ is_erronous(pkt) ? "ERRONOUS-SKB" :
+ (skb->data_len != 0 ? "COMPLEX-SKB" : "SKB"),
+ skb,
+ (long) skb->len,
+ (long) (skb_tail_pointer(skb) - skb->data),
+ (long) skb->data_len,
+ (long) (skb->data - skb->head),
+ (long) (skb_tail_pointer(skb) - skb->head));
+ p = buf + strlen(buf);
+
+ for (i = 0; i < skb_tail_pointer(skb) - skb->data && i < 300; i++) {
+ if (p > buf + buflen - 10) {
+ sprintf(p, "...");
+ p = buf + strlen(buf);
+ break;
+ }
+ sprintf(p, "%02x,", skb->data[i]);
+ p = buf + strlen(buf);
+ }
+ sprintf(p, "]\n");
+ return buf;
+}
+EXPORT_SYMBOL(cfpkt_log_pkt);
+
+int cfpkt_raw_append(struct cfpkt *pkt, void **buf, unsigned int buflen)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+ struct sk_buff *lastskb;
+
+ caif_assert(buf != NULL);
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+ /* Make sure SKB is writable */
+ if (unlikely(skb_cow_data(skb, 0, &lastskb) < 0)) {
+ PKT_ERROR(pkt, "cfpkt_raw_append: skb_cow_data failed\n");
+ return -EPROTO;
+ }
+
+ if (unlikely(skb_linearize(skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_raw_append: linearize failed\n");
+ return -EPROTO;
+ }
+
+ if (unlikely(skb_tailroom(skb) < buflen)) {
+ PKT_ERROR(pkt, "cfpkt_raw_append: buffer too short - failed\n");
+ return -EPROTO;
+ }
+
+ *buf = skb_put(skb, buflen);
+ return 1;
+}
+EXPORT_SYMBOL(cfpkt_raw_append);
+
+int cfpkt_raw_extract(struct cfpkt *pkt, void **buf, unsigned int buflen)
+{
+ struct sk_buff *skb = pkt_to_skb(pkt);
+
+ caif_assert(buf != NULL);
+ if (unlikely(is_erronous(pkt)))
+ return -EPROTO;
+
+ if (unlikely(buflen > skb->len)) {
+ PKT_ERROR(pkt, "cfpkt_raw_extract: buflen too large "
+ "- failed\n");
+ return -EPROTO;
+ }
+
+ if (unlikely(buflen > skb_headlen(skb))) {
+ if (unlikely(skb_linearize(skb) != 0)) {
+ PKT_ERROR(pkt, "cfpkt_raw_extract: linearize failed\n");
+ return -EPROTO;
+ }
+ }
+
+ *buf = skb->data;
+ skb_pull(skb, buflen);
+
+ return 1;
+}
+EXPORT_SYMBOL(cfpkt_raw_extract);
+
+inline bool cfpkt_erroneous(struct cfpkt *pkt)
+{
+ return cfpkt_priv(pkt)->erronous;
+}
+EXPORT_SYMBOL(cfpkt_erroneous);
+
+struct cfpktq *cfpktq_create(void)
+{
+ struct cfpktq *q = kmalloc(sizeof(struct cfpktq), GFP_ATOMIC);
+ if (!q)
+ return NULL;
+ skb_queue_head_init(&q->head);
+ atomic_set(&q->count, 0);
+ spin_lock_init(&q->lock);
+ return q;
+}
+EXPORT_SYMBOL(cfpktq_create);
+
+void cfpkt_queue(struct cfpktq *pktq, struct cfpkt *pkt, unsigned short prio)
+{
+ atomic_inc(&pktq->count);
+ spin_lock(&pktq->lock);
+ skb_queue_tail(&pktq->head, pkt_to_skb(pkt));
+ spin_unlock(&pktq->lock);
+
+}
+EXPORT_SYMBOL(cfpkt_queue);
+
+struct cfpkt *cfpkt_qpeek(struct cfpktq *pktq)
+{
+ struct cfpkt *tmp;
+ spin_lock(&pktq->lock);
+ tmp = skb_to_pkt(skb_peek(&pktq->head));
+ spin_unlock(&pktq->lock);
+ return tmp;
+}
+EXPORT_SYMBOL(cfpkt_qpeek);
+
+struct cfpkt *cfpkt_dequeue(struct cfpktq *pktq)
+{
+ struct cfpkt *pkt;
+ spin_lock(&pktq->lock);
+ pkt = skb_to_pkt(skb_dequeue(&pktq->head));
+ if (pkt) {
+ atomic_dec(&pktq->count);
+ caif_assert(atomic_read(&pktq->count) >= 0);
+ }
+ spin_unlock(&pktq->lock);
+ return pkt;
+}
+EXPORT_SYMBOL(cfpkt_dequeue);
+
+int cfpkt_qcount(struct cfpktq *pktq)
+{
+ return atomic_read(&pktq->count);
+}
+EXPORT_SYMBOL(cfpkt_qcount);
+
+struct cfpkt *cfpkt_clone_release(struct cfpkt *pkt)
+{
+ struct cfpkt *clone;
+ clone = skb_to_pkt(skb_clone(pkt_to_skb(pkt), GFP_ATOMIC));
+ /* Free original packet. */
+ cfpkt_destroy(pkt);
+ if (!clone)
+ return NULL;
+ return clone;
+}
+EXPORT_SYMBOL(cfpkt_clone_release);
+
+struct caif_payload_info *cfpkt_info(struct cfpkt *pkt)
+{
+ return (struct caif_payload_info *)&pkt_to_skb(pkt)->cb;
+}
+EXPORT_SYMBOL(cfpkt_info);
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
new file mode 100644
index 000000000000..cd2830fec935
--- /dev/null
+++ b/net/caif/cfrfml.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
+
+#define RFM_SEGMENTATION_BIT 0x01
+#define RFM_PAYLOAD 0x00
+#define RFM_CMD_BIT 0x80
+#define RFM_FLOW_OFF 0x81
+#define RFM_FLOW_ON 0x80
+#define RFM_SET_PIN 0x82
+#define RFM_CTRL_PKT_SIZE 1
+
+static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt);
+static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl);
+
+struct cflayer *cfrfml_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *rfm = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!rfm) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(rfm, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(rfm, channel_id, dev_info);
+ rfm->layer.modemcmd = cfservl_modemcmd;
+ rfm->layer.receive = cfrfml_receive;
+ rfm->layer.transmit = cfrfml_transmit;
+ snprintf(rfm->layer.name, CAIF_LAYER_NAME_SZ, "rfm%d", channel_id);
+ return &rfm->layer;
+}
+
+static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
+{
+ return -EPROTO;
+}
+
+static int cfrfml_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 tmp;
+ bool segmented;
+ int ret;
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->receive != NULL);
+
+ /*
+ * RFM is taking care of segmentation and stripping of
+ * segmentation bit.
+ */
+ if (cfpkt_extr_head(pkt, &tmp, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ segmented = tmp & RFM_SEGMENTATION_BIT;
+ caif_assert(!segmented);
+
+ ret = layr->up->receive(layr->up, pkt);
+ return ret;
+}
+
+static int cfrfml_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 tmp = 0;
+ int ret;
+ struct cfsrvl *service = container_obj(layr);
+
+ caif_assert(layr->dn != NULL);
+ caif_assert(layr->dn->transmit != NULL);
+
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+
+ if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ pr_err("CAIF: %s():Packet too large - size=%d\n",
+ __func__, cfpkt_getlen(pkt));
+ return -EOVERFLOW;
+ }
+ if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ return -EPROTO;
+ }
+
+ /* Add info for MUX-layer to route the packet out. */
+ cfpkt_info(pkt)->channel_id = service->layer.id;
+ /*
+ * To optimize alignment, we add up the size of CAIF header before
+ * payload.
+ */
+ cfpkt_info(pkt)->hdr_len = 1;
+ cfpkt_info(pkt)->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0)
+ cfpkt_extr_head(pkt, &tmp, 1);
+ return ret;
+}
diff --git a/net/caif/cfserl.c b/net/caif/cfserl.c
new file mode 100644
index 000000000000..06029ea2da2f
--- /dev/null
+++ b/net/caif/cfserl.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/spinlock.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/cfserl.h>
+
+#define container_obj(layr) ((struct cfserl *) layr)
+
+#define CFSERL_STX 0x02
+#define CAIF_MINIUM_PACKET_SIZE 4
+struct cfserl {
+ struct cflayer layer;
+ struct cfpkt *incomplete_frm;
+ /* Protects parallel processing of incoming packets */
+ spinlock_t sync;
+ bool usestx;
+};
+#define STXLEN(layr) (layr->usestx ? 1 : 0)
+
+static int cfserl_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfserl_transmit(struct cflayer *layr, struct cfpkt *pkt);
+static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid);
+
+struct cflayer *cfserl_create(int type, int instance, bool use_stx)
+{
+ struct cfserl *this = kmalloc(sizeof(struct cfserl), GFP_ATOMIC);
+ if (!this) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfserl, layer) == 0);
+ memset(this, 0, sizeof(struct cfserl));
+ this->layer.receive = cfserl_receive;
+ this->layer.transmit = cfserl_transmit;
+ this->layer.ctrlcmd = cfserl_ctrlcmd;
+ this->layer.type = type;
+ this->usestx = use_stx;
+ spin_lock_init(&this->sync);
+ snprintf(this->layer.name, CAIF_LAYER_NAME_SZ, "ser1");
+ return &this->layer;
+}
+
+static int cfserl_receive(struct cflayer *l, struct cfpkt *newpkt)
+{
+ struct cfserl *layr = container_obj(l);
+ u16 pkt_len;
+ struct cfpkt *pkt = NULL;
+ struct cfpkt *tail_pkt = NULL;
+ u8 tmp8;
+ u16 tmp;
+ u8 stx = CFSERL_STX;
+ int ret;
+ u16 expectlen = 0;
+ caif_assert(newpkt != NULL);
+ spin_lock(&layr->sync);
+
+ if (layr->incomplete_frm != NULL) {
+
+ layr->incomplete_frm =
+ cfpkt_append(layr->incomplete_frm, newpkt, expectlen);
+ pkt = layr->incomplete_frm;
+ } else {
+ pkt = newpkt;
+ }
+ layr->incomplete_frm = NULL;
+
+ do {
+ /* Search for STX at start of pkt if STX is used */
+ if (layr->usestx) {
+ cfpkt_extr_head(pkt, &tmp8, 1);
+ if (tmp8 != CFSERL_STX) {
+ while (cfpkt_more(pkt)
+ && tmp8 != CFSERL_STX) {
+ cfpkt_extr_head(pkt, &tmp8, 1);
+ }
+ if (!cfpkt_more(pkt)) {
+ cfpkt_destroy(pkt);
+ layr->incomplete_frm = NULL;
+ spin_unlock(&layr->sync);
+ return -EPROTO;
+ }
+ }
+ }
+
+ pkt_len = cfpkt_getlen(pkt);
+
+ /*
+ * pkt_len is the accumulated length of the packet data
+ * we have received so far.
+ * Exit if frame doesn't hold length.
+ */
+
+ if (pkt_len < 2) {
+ if (layr->usestx)
+ cfpkt_add_head(pkt, &stx, 1);
+ layr->incomplete_frm = pkt;
+ spin_unlock(&layr->sync);
+ return 0;
+ }
+
+ /*
+ * Find length of frame.
+ * expectlen is the length we need for a full frame.
+ */
+ cfpkt_peek_head(pkt, &tmp, 2);
+ expectlen = le16_to_cpu(tmp) + 2;
+ /*
+ * Frame error handling
+ */
+ if (expectlen < CAIF_MINIUM_PACKET_SIZE
+ || expectlen > CAIF_MAX_FRAMESIZE) {
+ if (!layr->usestx) {
+ if (pkt != NULL)
+ cfpkt_destroy(pkt);
+ layr->incomplete_frm = NULL;
+ expectlen = 0;
+ spin_unlock(&layr->sync);
+ return -EPROTO;
+ }
+ continue;
+ }
+
+ if (pkt_len < expectlen) {
+ /* Too little received data */
+ if (layr->usestx)
+ cfpkt_add_head(pkt, &stx, 1);
+ layr->incomplete_frm = pkt;
+ spin_unlock(&layr->sync);
+ return 0;
+ }
+
+ /*
+ * Enough data for at least one frame.
+ * Split the frame, if too long
+ */
+ if (pkt_len > expectlen)
+ tail_pkt = cfpkt_split(pkt, expectlen);
+ else
+ tail_pkt = NULL;
+
+ /* Send the first part of packet upwards.*/
+ spin_unlock(&layr->sync);
+ ret = layr->layer.up->receive(layr->layer.up, pkt);
+ spin_lock(&layr->sync);
+ if (ret == -EILSEQ) {
+ if (layr->usestx) {
+ if (tail_pkt != NULL)
+ pkt = cfpkt_append(pkt, tail_pkt, 0);
+
+ /* Start search for next STX if frame failed */
+ continue;
+ } else {
+ cfpkt_destroy(pkt);
+ pkt = NULL;
+ }
+ }
+
+ pkt = tail_pkt;
+
+ } while (pkt != NULL);
+
+ spin_unlock(&layr->sync);
+ return 0;
+}
+
+static int cfserl_transmit(struct cflayer *layer, struct cfpkt *newpkt)
+{
+ struct cfserl *layr = container_obj(layer);
+ int ret;
+ u8 tmp8 = CFSERL_STX;
+ if (layr->usestx)
+ cfpkt_add_head(newpkt, &tmp8, 1);
+ ret = layer->dn->transmit(layer->dn, newpkt);
+ if (ret < 0)
+ cfpkt_extr_head(newpkt, &tmp8, 1);
+
+ return ret;
+}
+
+static void cfserl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+}
diff --git a/net/caif/cfsrvl.c b/net/caif/cfsrvl.c
new file mode 100644
index 000000000000..aff31f34528f
--- /dev/null
+++ b/net/caif/cfsrvl.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define SRVL_CTRL_PKT_SIZE 1
+#define SRVL_FLOW_OFF 0x81
+#define SRVL_FLOW_ON 0x80
+#define SRVL_SET_PIN 0x82
+#define SRVL_CTRL_PKT_SIZE 1
+
+#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
+
+static void cfservl_ctrlcmd(struct cflayer *layr, enum caif_ctrlcmd ctrl,
+ int phyid)
+{
+ struct cfsrvl *service = container_obj(layr);
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->up->ctrlcmd != NULL);
+ switch (ctrl) {
+ case CAIF_CTRLCMD_INIT_RSP:
+ service->open = true;
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+ break;
+ case CAIF_CTRLCMD_DEINIT_RSP:
+ case CAIF_CTRLCMD_INIT_FAIL_RSP:
+ service->open = false;
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+ break;
+ case _CAIF_CTRLCMD_PHYIF_FLOW_OFF_IND:
+ if (phyid != service->dev_info.id)
+ break;
+ if (service->modem_flow_on)
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
+ service->phy_flow_on = false;
+ break;
+ case _CAIF_CTRLCMD_PHYIF_FLOW_ON_IND:
+ if (phyid != service->dev_info.id)
+ return;
+ if (service->modem_flow_on) {
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_FLOW_ON_IND,
+ phyid);
+ }
+ service->phy_flow_on = true;
+ break;
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ if (service->phy_flow_on) {
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_FLOW_OFF_IND, phyid);
+ }
+ service->modem_flow_on = false;
+ break;
+ case CAIF_CTRLCMD_FLOW_ON_IND:
+ if (service->phy_flow_on) {
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_FLOW_ON_IND, phyid);
+ }
+ service->modem_flow_on = true;
+ break;
+ case _CAIF_CTRLCMD_PHYIF_DOWN_IND:
+ /* In case interface is down, let's fake a remove shutdown */
+ layr->up->ctrlcmd(layr->up,
+ CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, phyid);
+ break;
+ case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+ break;
+ default:
+ pr_warning("CAIF: %s(): "
+ "Unexpected ctrl in cfsrvl (%d)\n", __func__, ctrl);
+ /* We have both modem and phy flow on, send flow on */
+ layr->up->ctrlcmd(layr->up, ctrl, phyid);
+ service->phy_flow_on = true;
+ break;
+ }
+}
+
+static int cfservl_modemcmd(struct cflayer *layr, enum caif_modemcmd ctrl)
+{
+ struct cfsrvl *service = container_obj(layr);
+ caif_assert(layr != NULL);
+ caif_assert(layr->dn != NULL);
+ caif_assert(layr->dn->transmit != NULL);
+ switch (ctrl) {
+ case CAIF_MODEMCMD_FLOW_ON_REQ:
+ {
+ struct cfpkt *pkt;
+ struct caif_payload_info *info;
+ u8 flow_on = SRVL_FLOW_ON;
+ pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
+ if (!pkt) {
+ pr_warning("CAIF: %s(): Out of memory\n",
+ __func__);
+ return -ENOMEM;
+ }
+
+ if (cfpkt_add_head(pkt, &flow_on, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->hdr_len = 1;
+ info->dev_info = &service->dev_info;
+ return layr->dn->transmit(layr->dn, pkt);
+ }
+ case CAIF_MODEMCMD_FLOW_OFF_REQ:
+ {
+ struct cfpkt *pkt;
+ struct caif_payload_info *info;
+ u8 flow_off = SRVL_FLOW_OFF;
+ pkt = cfpkt_create(SRVL_CTRL_PKT_SIZE);
+ if (cfpkt_add_head(pkt, &flow_off, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n",
+ __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->hdr_len = 1;
+ info->dev_info = &service->dev_info;
+ return layr->dn->transmit(layr->dn, pkt);
+ }
+ default:
+ break;
+ }
+ return -EINVAL;
+}
+
+void cfservl_destroy(struct cflayer *layer)
+{
+ kfree(layer);
+}
+
+void cfsrvl_init(struct cfsrvl *service,
+ u8 channel_id,
+ struct dev_info *dev_info)
+{
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ service->open = false;
+ service->modem_flow_on = true;
+ service->phy_flow_on = true;
+ service->layer.id = channel_id;
+ service->layer.ctrlcmd = cfservl_ctrlcmd;
+ service->layer.modemcmd = cfservl_modemcmd;
+ service->dev_info = *dev_info;
+ kref_init(&service->ref);
+}
+
+void cfsrvl_release(struct kref *kref)
+{
+ struct cfsrvl *service = container_of(kref, struct cfsrvl, ref);
+ kfree(service);
+}
+
+bool cfsrvl_ready(struct cfsrvl *service, int *err)
+{
+ if (service->open && service->modem_flow_on && service->phy_flow_on)
+ return true;
+ if (!service->open) {
+ *err = -ENOTCONN;
+ return false;
+ }
+ caif_assert(!(service->modem_flow_on && service->phy_flow_on));
+ *err = -EAGAIN;
+ return false;
+}
+u8 cfsrvl_getphyid(struct cflayer *layer)
+{
+ struct cfsrvl *servl = container_obj(layer);
+ return servl->dev_info.id;
+}
+
+bool cfsrvl_phyid_match(struct cflayer *layer, int phyid)
+{
+ struct cfsrvl *servl = container_obj(layer);
+ return servl->dev_info.id == phyid;
+}
diff --git a/net/caif/cfutill.c b/net/caif/cfutill.c
new file mode 100644
index 000000000000..5fd2c9ea8b42
--- /dev/null
+++ b/net/caif/cfutill.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define container_obj(layr) ((struct cfsrvl *) layr)
+#define UTIL_PAYLOAD 0x00
+#define UTIL_CMD_BIT 0x80
+#define UTIL_REMOTE_SHUTDOWN 0x82
+#define UTIL_FLOW_OFF 0x81
+#define UTIL_FLOW_ON 0x80
+#define UTIL_CTRL_PKT_SIZE 1
+static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt);
+
+struct cflayer *cfutill_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *util = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!util) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(util, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(util, channel_id, dev_info);
+ util->layer.receive = cfutill_receive;
+ util->layer.transmit = cfutill_transmit;
+ snprintf(util->layer.name, CAIF_LAYER_NAME_SZ - 1, "util1");
+ return &util->layer;
+}
+
+static int cfutill_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 cmd = -1;
+ struct cfsrvl *service = container_obj(layr);
+ caif_assert(layr != NULL);
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->up->receive != NULL);
+ caif_assert(layr->up->ctrlcmd != NULL);
+ if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+
+ switch (cmd) {
+ case UTIL_PAYLOAD:
+ return layr->up->receive(layr->up, pkt);
+ case UTIL_FLOW_OFF:
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case UTIL_FLOW_ON:
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case UTIL_REMOTE_SHUTDOWN: /* Remote Shutdown Request */
+ pr_err("CAIF: %s(): REMOTE SHUTDOWN REQUEST RECEIVED\n",
+ __func__);
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND, 0);
+ service->open = false;
+ cfpkt_destroy(pkt);
+ return 0;
+ default:
+ cfpkt_destroy(pkt);
+ pr_warning("CAIF: %s(): Unknown service control %d (0x%x)\n",
+ __func__, cmd, cmd);
+ return -EPROTO;
+ }
+}
+
+static int cfutill_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 zero = 0;
+ struct caif_payload_info *info;
+ int ret;
+ struct cfsrvl *service = container_obj(layr);
+ caif_assert(layr != NULL);
+ caif_assert(layr->dn != NULL);
+ caif_assert(layr->dn->transmit != NULL);
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+
+ if (cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ pr_err("CAIF: %s(): packet too large size=%d\n",
+ __func__, cfpkt_getlen(pkt));
+ return -EOVERFLOW;
+ }
+
+ cfpkt_add_head(pkt, &zero, 1);
+ /* Add info for MUX-layer to route the packet out. */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ /*
+ * To optimize alignment, we add up the size of CAIF header before
+ * payload.
+ */
+ info->hdr_len = 1;
+ info->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0) {
+ u32 tmp32;
+ cfpkt_extr_head(pkt, &tmp32, 4);
+ }
+ return ret;
+}
diff --git a/net/caif/cfveil.c b/net/caif/cfveil.c
new file mode 100644
index 000000000000..0fd827f49491
--- /dev/null
+++ b/net/caif/cfveil.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define VEI_PAYLOAD 0x00
+#define VEI_CMD_BIT 0x80
+#define VEI_FLOW_OFF 0x81
+#define VEI_FLOW_ON 0x80
+#define VEI_SET_PIN 0x82
+#define VEI_CTRL_PKT_SIZE 1
+#define container_obj(layr) container_of(layr, struct cfsrvl, layer)
+
+static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt);
+
+struct cflayer *cfvei_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *vei = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!vei) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+ memset(vei, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(vei, channel_id, dev_info);
+ vei->layer.receive = cfvei_receive;
+ vei->layer.transmit = cfvei_transmit;
+ snprintf(vei->layer.name, CAIF_LAYER_NAME_SZ - 1, "vei%d", channel_id);
+ return &vei->layer;
+}
+
+static int cfvei_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 cmd;
+ int ret;
+ caif_assert(layr->up != NULL);
+ caif_assert(layr->receive != NULL);
+ caif_assert(layr->ctrlcmd != NULL);
+
+
+ if (cfpkt_extr_head(pkt, &cmd, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ switch (cmd) {
+ case VEI_PAYLOAD:
+ ret = layr->up->receive(layr->up, pkt);
+ return ret;
+ case VEI_FLOW_OFF:
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_OFF_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case VEI_FLOW_ON:
+ layr->ctrlcmd(layr, CAIF_CTRLCMD_FLOW_ON_IND, 0);
+ cfpkt_destroy(pkt);
+ return 0;
+ case VEI_SET_PIN: /* SET RS232 PIN */
+ cfpkt_destroy(pkt);
+ return 0;
+ default: /* SET RS232 PIN */
+ pr_warning("CAIF: %s():Unknown VEI control packet %d (0x%x)!\n",
+ __func__, cmd, cmd);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+}
+
+static int cfvei_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u8 tmp = 0;
+ struct caif_payload_info *info;
+ int ret;
+ struct cfsrvl *service = container_obj(layr);
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+ caif_assert(layr->dn != NULL);
+ caif_assert(layr->dn->transmit != NULL);
+ if (!cfpkt_getlen(pkt) > CAIF_MAX_PAYLOAD_SIZE) {
+ pr_warning("CAIF: %s(): Packet too large - size=%d\n",
+ __func__, cfpkt_getlen(pkt));
+ return -EOVERFLOW;
+ }
+
+ if (cfpkt_add_head(pkt, &tmp, 1) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ return -EPROTO;
+ }
+
+ /* Add info-> for MUX-layer to route the packet out. */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->hdr_len = 1;
+ info->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0)
+ cfpkt_extr_head(pkt, &tmp, 1);
+ return ret;
+}
diff --git a/net/caif/cfvidl.c b/net/caif/cfvidl.c
new file mode 100644
index 000000000000..89ad4ea239f1
--- /dev/null
+++ b/net/caif/cfvidl.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Author: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/errno.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfsrvl.h>
+#include <net/caif/cfpkt.h>
+
+#define container_obj(layr) ((struct cfsrvl *) layr)
+
+static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt);
+static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt);
+
+struct cflayer *cfvidl_create(u8 channel_id, struct dev_info *dev_info)
+{
+ struct cfsrvl *vid = kmalloc(sizeof(struct cfsrvl), GFP_ATOMIC);
+ if (!vid) {
+ pr_warning("CAIF: %s(): Out of memory\n", __func__);
+ return NULL;
+ }
+ caif_assert(offsetof(struct cfsrvl, layer) == 0);
+
+ memset(vid, 0, sizeof(struct cfsrvl));
+ cfsrvl_init(vid, channel_id, dev_info);
+ vid->layer.receive = cfvidl_receive;
+ vid->layer.transmit = cfvidl_transmit;
+ snprintf(vid->layer.name, CAIF_LAYER_NAME_SZ - 1, "vid1");
+ return &vid->layer;
+}
+
+static int cfvidl_receive(struct cflayer *layr, struct cfpkt *pkt)
+{
+ u32 videoheader;
+ if (cfpkt_extr_head(pkt, &videoheader, 4) < 0) {
+ pr_err("CAIF: %s(): Packet is erroneous!\n", __func__);
+ cfpkt_destroy(pkt);
+ return -EPROTO;
+ }
+ return layr->up->receive(layr->up, pkt);
+}
+
+static int cfvidl_transmit(struct cflayer *layr, struct cfpkt *pkt)
+{
+ struct cfsrvl *service = container_obj(layr);
+ struct caif_payload_info *info;
+ u32 videoheader = 0;
+ int ret;
+ if (!cfsrvl_ready(service, &ret))
+ return ret;
+ cfpkt_add_head(pkt, &videoheader, 4);
+ /* Add info for MUX-layer to route the packet out */
+ info = cfpkt_info(pkt);
+ info->channel_id = service->layer.id;
+ info->dev_info = &service->dev_info;
+ ret = layr->dn->transmit(layr->dn, pkt);
+ if (ret < 0)
+ cfpkt_extr_head(pkt, &videoheader, 4);
+ return ret;
+}
diff --git a/net/caif/chnl_net.c b/net/caif/chnl_net.c
new file mode 100644
index 000000000000..610966abe2dc
--- /dev/null
+++ b/net/caif/chnl_net.c
@@ -0,0 +1,467 @@
+/*
+ * Copyright (C) ST-Ericsson AB 2010
+ * Authors: Sjur Brendeland/sjur.brandeland@stericsson.com
+ * Daniel Martensson / Daniel.Martensson@stericsson.com
+ * License terms: GNU General Public License (GPL) version 2
+ */
+
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/if_ether.h>
+#include <linux/moduleparam.h>
+#include <linux/ip.h>
+#include <linux/sched.h>
+#include <linux/sockios.h>
+#include <linux/caif/if_caif.h>
+#include <net/rtnetlink.h>
+#include <net/caif/caif_layer.h>
+#include <net/caif/cfcnfg.h>
+#include <net/caif/cfpkt.h>
+#include <net/caif/caif_dev.h>
+
+/* GPRS PDP connection has MTU to 1500 */
+#define SIZE_MTU 1500
+/* 5 sec. connect timeout */
+#define CONNECT_TIMEOUT (5 * HZ)
+#define CAIF_NET_DEFAULT_QUEUE_LEN 500
+
+#undef pr_debug
+#define pr_debug pr_warning
+
+/*This list is protected by the rtnl lock. */
+static LIST_HEAD(chnl_net_list);
+
+MODULE_LICENSE("GPL");
+MODULE_ALIAS_RTNL_LINK("caif");
+
+enum caif_states {
+ CAIF_CONNECTED = 1,
+ CAIF_CONNECTING,
+ CAIF_DISCONNECTED,
+ CAIF_SHUTDOWN
+};
+
+struct chnl_net {
+ struct cflayer chnl;
+ struct net_device_stats stats;
+ struct caif_connect_request conn_req;
+ struct list_head list_field;
+ struct net_device *netdev;
+ char name[256];
+ wait_queue_head_t netmgmt_wq;
+ /* Flow status to remember and control the transmission. */
+ bool flowenabled;
+ enum caif_states state;
+};
+
+static void robust_list_del(struct list_head *delete_node)
+{
+ struct list_head *list_node;
+ struct list_head *n;
+ ASSERT_RTNL();
+ list_for_each_safe(list_node, n, &chnl_net_list) {
+ if (list_node == delete_node) {
+ list_del(list_node);
+ return;
+ }
+ }
+ WARN_ON(1);
+}
+
+static int chnl_recv_cb(struct cflayer *layr, struct cfpkt *pkt)
+{
+ struct sk_buff *skb;
+ struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
+ int pktlen;
+ int err = 0;
+
+ priv = container_of(layr, struct chnl_net, chnl);
+
+ if (!priv)
+ return -EINVAL;
+
+ /* Get length of CAIF packet. */
+ pktlen = cfpkt_getlen(pkt);
+
+ skb = (struct sk_buff *) cfpkt_tonative(pkt);
+ /* Pass some minimum information and
+ * send the packet to the net stack.
+ */
+ skb->dev = priv->netdev;
+ skb->protocol = htons(ETH_P_IP);
+
+ /* If we change the header in loop mode, the checksum is corrupted. */
+ if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ else
+ skb->ip_summed = CHECKSUM_NONE;
+
+ if (in_interrupt())
+ netif_rx(skb);
+ else
+ netif_rx_ni(skb);
+
+ /* Update statistics. */
+ priv->netdev->stats.rx_packets++;
+ priv->netdev->stats.rx_bytes += pktlen;
+
+ return err;
+}
+
+static int delete_device(struct chnl_net *dev)
+{
+ ASSERT_RTNL();
+ if (dev->netdev)
+ unregister_netdevice(dev->netdev);
+ return 0;
+}
+
+static void close_work(struct work_struct *work)
+{
+ struct chnl_net *dev = NULL;
+ struct list_head *list_node;
+ struct list_head *_tmp;
+ /* May be called with or without RTNL lock held */
+ int islocked = rtnl_is_locked();
+ if (!islocked)
+ rtnl_lock();
+ list_for_each_safe(list_node, _tmp, &chnl_net_list) {
+ dev = list_entry(list_node, struct chnl_net, list_field);
+ if (dev->state == CAIF_SHUTDOWN)
+ dev_close(dev->netdev);
+ }
+ if (!islocked)
+ rtnl_unlock();
+}
+static DECLARE_WORK(close_worker, close_work);
+
+static void chnl_flowctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow,
+ int phyid)
+{
+ struct chnl_net *priv = container_of(layr, struct chnl_net, chnl);
+ pr_debug("CAIF: %s(): NET flowctrl func called flow: %s\n",
+ __func__,
+ flow == CAIF_CTRLCMD_FLOW_ON_IND ? "ON" :
+ flow == CAIF_CTRLCMD_INIT_RSP ? "INIT" :
+ flow == CAIF_CTRLCMD_FLOW_OFF_IND ? "OFF" :
+ flow == CAIF_CTRLCMD_DEINIT_RSP ? "CLOSE/DEINIT" :
+ flow == CAIF_CTRLCMD_INIT_FAIL_RSP ? "OPEN_FAIL" :
+ flow == CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND ?
+ "REMOTE_SHUTDOWN" : "UKNOWN CTRL COMMAND");
+
+
+
+ switch (flow) {
+ case CAIF_CTRLCMD_FLOW_OFF_IND:
+ priv->flowenabled = false;
+ netif_stop_queue(priv->netdev);
+ break;
+ case CAIF_CTRLCMD_DEINIT_RSP:
+ priv->state = CAIF_DISCONNECTED;
+ break;
+ case CAIF_CTRLCMD_INIT_FAIL_RSP:
+ priv->state = CAIF_DISCONNECTED;
+ wake_up_interruptible(&priv->netmgmt_wq);
+ break;
+ case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND:
+ priv->state = CAIF_SHUTDOWN;
+ netif_tx_disable(priv->netdev);
+ schedule_work(&close_worker);
+ break;
+ case CAIF_CTRLCMD_FLOW_ON_IND:
+ priv->flowenabled = true;
+ netif_wake_queue(priv->netdev);
+ break;
+ case CAIF_CTRLCMD_INIT_RSP:
+ priv->state = CAIF_CONNECTED;
+ priv->flowenabled = true;
+ netif_wake_queue(priv->netdev);
+ wake_up_interruptible(&priv->netmgmt_wq);
+ break;
+ default:
+ break;
+ }
+}
+
+static int chnl_net_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+ struct chnl_net *priv;
+ struct cfpkt *pkt = NULL;
+ int len;
+ int result = -1;
+ /* Get our private data. */
+ priv = netdev_priv(dev);
+
+ if (skb->len > priv->netdev->mtu) {
+ pr_warning("CAIF: %s(): Size of skb exceeded MTU\n", __func__);
+ return -ENOSPC;
+ }
+
+ if (!priv->flowenabled) {
+ pr_debug("CAIF: %s(): dropping packets flow off\n", __func__);
+ return NETDEV_TX_BUSY;
+ }
+
+ if (priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP)
+ swap(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
+
+ /* Store original SKB length. */
+ len = skb->len;
+
+ pkt = cfpkt_fromnative(CAIF_DIR_OUT, (void *) skb);
+
+ /* Send the packet down the stack. */
+ result = priv->chnl.dn->transmit(priv->chnl.dn, pkt);
+ if (result) {
+ if (result == -EAGAIN)
+ result = NETDEV_TX_BUSY;
+ return result;
+ }
+
+ /* Update statistics. */
+ dev->stats.tx_packets++;
+ dev->stats.tx_bytes += len;
+
+ return NETDEV_TX_OK;
+}
+
+static int chnl_net_open(struct net_device *dev)
+{
+ struct chnl_net *priv = NULL;
+ int result = -1;
+ ASSERT_RTNL();
+ priv = netdev_priv(dev);
+ if (!priv) {
+ pr_debug("CAIF: %s(): chnl_net_open: no priv\n", __func__);
+ return -ENODEV;
+ }
+
+ if (priv->state != CAIF_CONNECTING) {
+ priv->state = CAIF_CONNECTING;
+ result = caif_connect_client(&priv->conn_req, &priv->chnl);
+ if (result != 0) {
+ priv->state = CAIF_DISCONNECTED;
+ pr_debug("CAIF: %s(): err: "
+ "Unable to register and open device,"
+ " Err:%d\n",
+ __func__,
+ result);
+ return result;
+ }
+ }
+
+ result = wait_event_interruptible_timeout(priv->netmgmt_wq,
+ priv->state != CAIF_CONNECTING,
+ CONNECT_TIMEOUT);
+
+ if (result == -ERESTARTSYS) {
+ pr_debug("CAIF: %s(): wait_event_interruptible"
+ " woken by a signal\n", __func__);
+ return -ERESTARTSYS;
+ }
+ if (result == 0) {
+ pr_debug("CAIF: %s(): connect timeout\n", __func__);
+ caif_disconnect_client(&priv->chnl);
+ priv->state = CAIF_DISCONNECTED;
+ pr_debug("CAIF: %s(): state disconnected\n", __func__);
+ return -ETIMEDOUT;
+ }
+
+ if (priv->state != CAIF_CONNECTED) {
+ pr_debug("CAIF: %s(): connect failed\n", __func__);
+ return -ECONNREFUSED;
+ }
+ pr_debug("CAIF: %s(): CAIF Netdevice connected\n", __func__);
+ return 0;
+}
+
+static int chnl_net_stop(struct net_device *dev)
+{
+ struct chnl_net *priv;
+
+ ASSERT_RTNL();
+ priv = netdev_priv(dev);
+ priv->state = CAIF_DISCONNECTED;
+ caif_disconnect_client(&priv->chnl);
+ return 0;
+}
+
+static int chnl_net_init(struct net_device *dev)
+{
+ struct chnl_net *priv;
+ ASSERT_RTNL();
+ priv = netdev_priv(dev);
+ strncpy(priv->name, dev->name, sizeof(priv->name));
+ return 0;
+}
+
+static void chnl_net_uninit(struct net_device *dev)
+{
+ struct chnl_net *priv;
+ ASSERT_RTNL();
+ priv = netdev_priv(dev);
+ robust_list_del(&priv->list_field);
+}
+
+static const struct net_device_ops netdev_ops = {
+ .ndo_open = chnl_net_open,
+ .ndo_stop = chnl_net_stop,
+ .ndo_init = chnl_net_init,
+ .ndo_uninit = chnl_net_uninit,
+ .ndo_start_xmit = chnl_net_start_xmit,
+};
+
+static void ipcaif_net_setup(struct net_device *dev)
+{
+ struct chnl_net *priv;
+ dev->netdev_ops = &netdev_ops;
+ dev->destructor = free_netdev;
+ dev->flags |= IFF_NOARP;
+ dev->flags |= IFF_POINTOPOINT;
+ dev->needed_headroom = CAIF_NEEDED_HEADROOM;
+ dev->needed_tailroom = CAIF_NEEDED_TAILROOM;
+ dev->mtu = SIZE_MTU;
+ dev->tx_queue_len = CAIF_NET_DEFAULT_QUEUE_LEN;
+
+ priv = netdev_priv(dev);
+ priv->chnl.receive = chnl_recv_cb;
+ priv->chnl.ctrlcmd = chnl_flowctrl_cb;
+ priv->netdev = dev;
+ priv->conn_req.protocol = CAIFPROTO_DATAGRAM;
+ priv->conn_req.link_selector = CAIF_LINK_HIGH_BANDW;
+ priv->conn_req.priority = CAIF_PRIO_LOW;
+ /* Insert illegal value */
+ priv->conn_req.sockaddr.u.dgm.connection_id = -1;
+ priv->flowenabled = false;
+
+ ASSERT_RTNL();
+ init_waitqueue_head(&priv->netmgmt_wq);
+ list_add(&priv->list_field, &chnl_net_list);
+}
+
+
+static int ipcaif_fill_info(struct sk_buff *skb, const struct net_device *dev)
+{
+ struct chnl_net *priv;
+ u8 loop;
+ priv = netdev_priv(dev);
+ NLA_PUT_U32(skb, IFLA_CAIF_IPV4_CONNID,
+ priv->conn_req.sockaddr.u.dgm.connection_id);
+ NLA_PUT_U32(skb, IFLA_CAIF_IPV6_CONNID,
+ priv->conn_req.sockaddr.u.dgm.connection_id);
+ loop = priv->conn_req.protocol == CAIFPROTO_DATAGRAM_LOOP;
+ NLA_PUT_U8(skb, IFLA_CAIF_LOOPBACK, loop);
+
+
+ return 0;
+nla_put_failure:
+ return -EMSGSIZE;
+
+}
+
+static void caif_netlink_parms(struct nlattr *data[],
+ struct caif_connect_request *conn_req)
+{
+ if (!data) {
+ pr_warning("CAIF: %s: no params data found\n", __func__);
+ return;
+ }
+ if (data[IFLA_CAIF_IPV4_CONNID])
+ conn_req->sockaddr.u.dgm.connection_id =
+ nla_get_u32(data[IFLA_CAIF_IPV4_CONNID]);
+ if (data[IFLA_CAIF_IPV6_CONNID])
+ conn_req->sockaddr.u.dgm.connection_id =
+ nla_get_u32(data[IFLA_CAIF_IPV6_CONNID]);
+ if (data[IFLA_CAIF_LOOPBACK]) {
+ if (nla_get_u8(data[IFLA_CAIF_LOOPBACK]))
+ conn_req->protocol = CAIFPROTO_DATAGRAM_LOOP;
+ else
+ conn_req->protocol = CAIFPROTO_DATAGRAM;
+ }
+}
+
+static int ipcaif_newlink(struct net *src_net, struct net_device *dev,
+ struct nlattr *tb[], struct nlattr *data[])
+{
+ int ret;
+ struct chnl_net *caifdev;
+ ASSERT_RTNL();
+ caifdev = netdev_priv(dev);
+ caif_netlink_parms(data, &caifdev->conn_req);
+ dev_net_set(caifdev->netdev, src_net);
+
+ ret = register_netdevice(dev);
+ if (ret)
+ pr_warning("CAIF: %s(): device rtml registration failed\n",
+ __func__);
+ return ret;
+}
+
+static int ipcaif_changelink(struct net_device *dev, struct nlattr *tb[],
+ struct nlattr *data[])
+{
+ struct chnl_net *caifdev;
+ ASSERT_RTNL();
+ caifdev = netdev_priv(dev);
+ caif_netlink_parms(data, &caifdev->conn_req);
+ netdev_state_change(dev);
+ return 0;
+}
+
+static size_t ipcaif_get_size(const struct net_device *dev)
+{
+ return
+ /* IFLA_CAIF_IPV4_CONNID */
+ nla_total_size(4) +
+ /* IFLA_CAIF_IPV6_CONNID */
+ nla_total_size(4) +
+ /* IFLA_CAIF_LOOPBACK */
+ nla_total_size(2) +
+ 0;
+}
+
+static const struct nla_policy ipcaif_policy[IFLA_CAIF_MAX + 1] = {
+ [IFLA_CAIF_IPV4_CONNID] = { .type = NLA_U32 },
+ [IFLA_CAIF_IPV6_CONNID] = { .type = NLA_U32 },
+ [IFLA_CAIF_LOOPBACK] = { .type = NLA_U8 }
+};
+
+
+static struct rtnl_link_ops ipcaif_link_ops __read_mostly = {
+ .kind = "caif",
+ .priv_size = sizeof(struct chnl_net),
+ .setup = ipcaif_net_setup,
+ .maxtype = IFLA_CAIF_MAX,
+ .policy = ipcaif_policy,
+ .newlink = ipcaif_newlink,
+ .changelink = ipcaif_changelink,
+ .get_size = ipcaif_get_size,
+ .fill_info = ipcaif_fill_info,
+
+};
+
+static int __init chnl_init_module(void)
+{
+ return rtnl_link_register(&ipcaif_link_ops);
+}
+
+static void __exit chnl_exit_module(void)
+{
+ struct chnl_net *dev = NULL;
+ struct list_head *list_node;
+ struct list_head *_tmp;
+ rtnl_link_unregister(&ipcaif_link_ops);
+ rtnl_lock();
+ list_for_each_safe(list_node, _tmp, &chnl_net_list) {
+ dev = list_entry(list_node, struct chnl_net, list_field);
+ list_del(list_node);
+ delete_device(dev);
+ }
+ rtnl_unlock();
+}
+
+module_init(chnl_init_module);
+module_exit(chnl_exit_module);