Project

General

Profile

新建 #216 » 4g-ec20-wwan.patch

余 顺, 09/27/2023 07:17 AM

View differences:

device/rockchip/common/sepolicy/vendor/file_contexts
/dev/ttyFIQ[0-9]* u:object_r:serial_device:s0
/dev/ttyUSB[0-4]* u:object_r:radio_device:s0
/dev/cdc-wdm0 u:object_r:radio_device:s0
/vendor/bin/hw/rild u:object_r:rild_exec:s0
/dev/mhi_DUN u:object_r:radio_device:s0
/dev/mhi_DIAG u:object_r:radio_device:s0
device/rockchip/common/ueventd.rockchip.rc
/dev/ttyUSB8 0660 radio radio
/dev/ttyUSB9 0660 radio radio
/dev/cdc-wdm0 0660 radio radio
# for mali-t764
/dev/mali0 0666 system system
kernel/.config
CONFIG_USB_NET_ZAURUS=y
CONFIG_USB_NET_CX82310_ETH=y
CONFIG_USB_NET_KALMIA=y
# CONFIG_USB_NET_QMI_WWAN is not set
CONFIG_USB_NET_QMI_WWAN=y
CONFIG_USB_HSO=y
CONFIG_USB_NET_INT51X1=y
CONFIG_USB_IPHETH=y
kernel/drivers/net/usb/Makefile
obj-$(CONFIG_USB_NET_CDC_NCM) += cdc_ncm.o
obj-$(CONFIG_USB_NET_HUAWEI_CDC_NCM) += huawei_cdc_ncm.o
obj-$(CONFIG_USB_VL600) += lg-vl600.o
obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o
#obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan.o
obj-$(CONFIG_USB_NET_QMI_WWAN) += qmi_wwan_q.o
obj-$(CONFIG_USB_NET_CDC_MBIM) += cdc_mbim.o
obj-$(CONFIG_USB_NET_CH9200) += ch9200.o
kernel/drivers/net/usb/qmi_wwan_q.c
/*
* Copyright (c) 2012 Bjørn Mork <bjorn@mork.no>
*
* The probing code is heavily inspired by cdc_ether, which is:
* Copyright (C) 2003-2005 by David Brownell
* Copyright (C) 2006 by Ole Andre Vadla Ravnas (ActiveSync)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/version.h>
#include <linux/interrupt.h>
#include <linux/netdevice.h>
#include <linux/ethtool.h>
#include <linux/etherdevice.h>
#include <linux/time.h>
#if LINUX_VERSION_CODE > KERNEL_VERSION(3,16,0) //8b094cd03b4a3793220d8d8d86a173bfea8c285b
#include <linux/timekeeping.h>
#else
#define timespec64 timespec
#define ktime_get_ts64 ktime_get_ts
#define timespec64_sub timespec_sub
#endif
#include <net/arp.h>
#include <net/ip.h>
#include <net/ipv6.h>
#include <linux/mii.h>
#include <linux/usb.h>
#include <linux/usb/cdc.h>
#include <linux/usb/usbnet.h>
#include <linux/usb/cdc-wdm.h>
#ifndef ETH_P_MAP
#define ETH_P_MAP 0xDA1A
#endif
#if (ETH_P_MAP == 0x00F9)
#undef ETH_P_MAP
#define ETH_P_MAP 0xDA1A
#endif
#ifndef ARPHRD_RAWIP
#define ARPHRD_RAWIP ARPHRD_NONE
#endif
#ifdef CONFIG_PINCTRL_IPQ807x
#define CONFIG_QCA_NSS_DRV
//#define CONFIG_QCA_NSS_PACKET_FILTER
#endif
#define _RMNET_NSS_H_
#define _RMENT_NSS_H_
struct rmnet_nss_cb {
int (*nss_create)(struct net_device *dev);
int (*nss_free)(struct net_device *dev);
int (*nss_tx)(struct sk_buff *skb);
};
static struct rmnet_nss_cb __read_mostly *nss_cb = NULL;
#if defined(CONFIG_PINCTRL_IPQ807x) || defined(CONFIG_PINCTRL_IPQ5018)
#ifdef CONFIG_RMNET_DATA
#define CONFIG_QCA_NSS_DRV
/* define at qsdk/qca/src/linux-4.4/net/rmnet_data/rmnet_data_main.c */
/* set at qsdk/qca/src/data-kernel/drivers/rmnet-nss/rmnet_nss.c */
extern struct rmnet_nss_cb *rmnet_nss_callbacks __rcu __read_mostly;
#endif
#endif
/* This driver supports wwan (3G/LTE/?) devices using a vendor
* specific management protocol called Qualcomm MSM Interface (QMI) -
* in addition to the more common AT commands over serial interface
* management
*
* QMI is wrapped in CDC, using CDC encapsulated commands on the
* control ("master") interface of a two-interface CDC Union
* resembling standard CDC ECM. The devices do not use the control
* interface for any other CDC messages. Most likely because the
* management protocol is used in place of the standard CDC
* notifications NOTIFY_NETWORK_CONNECTION and NOTIFY_SPEED_CHANGE
*
* Alternatively, control and data functions can be combined in a
* single USB interface.
*
* Handling a protocol like QMI is out of the scope for any driver.
* It is exported as a character device using the cdc-wdm driver as
* a subdriver, enabling userspace applications ("modem managers") to
* handle it.
*
* These devices may alternatively/additionally be configured using AT
* commands on a serial interface
*/
#define VERSION_NUMBER "V1.2.1"
#define QUECTEL_WWAN_VERSION "Quectel_Linux&Android_QMI_WWAN_Driver_"VERSION_NUMBER
static const char driver_name[] = "qmi_wwan_q";
/* driver specific data */
struct qmi_wwan_state {
struct usb_driver *subdriver;
atomic_t pmcount;
unsigned long unused;
struct usb_interface *control;
struct usb_interface *data;
};
/* default ethernet address used by the modem */
static const u8 default_modem_addr[ETH_ALEN] = {0x02, 0x50, 0xf3};
#if 1 //Added by Quectel
/*
Quectel_WCDMA&LTE_Linux_USB_Driver_User_Guide_V1.9.pdf
5.6. Test QMAP on GobiNet or QMI WWAN
0 - no QMAP
1 - QMAP (Aggregation protocol)
X - QMAP (Multiplexing and Aggregation protocol)
*/
#define QUECTEL_WWAN_QMAP 4 //MAX is 7
#if defined(QUECTEL_WWAN_QMAP)
#define QUECTEL_QMAP_MUX_ID 0x81
static uint __read_mostly qmap_mode = 0;
module_param( qmap_mode, uint, S_IRUGO);
module_param_named( rx_qmap, qmap_mode, uint, S_IRUGO );
#endif
#if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
#define QUECTEL_BRIDGE_MODE
#endif
#ifdef QUECTEL_BRIDGE_MODE
static uint __read_mostly bridge_mode = 0/*|BIT(1)*/;
module_param( bridge_mode, uint, S_IRUGO );
#endif
#if defined(QUECTEL_WWAN_QMAP)
#define QUECTEL_UL_DATA_AGG 1
#if defined(QUECTEL_UL_DATA_AGG)
struct tx_agg_ctx {
/* QMIWDS_ADMIN_SET_DATA_FORMAT_RESP TLV_0x17 and TLV_0x18 */
uint ul_data_aggregation_max_datagrams; //UplinkDataAggregationMaxDatagramsTlv
uint ul_data_aggregation_max_size; //UplinkDataAggregationMaxSizeTlv
uint dl_minimum_padding; //0x1A
};
#endif
typedef struct {
unsigned int size;
unsigned int rx_urb_size;
unsigned int ep_type;
unsigned int iface_id;
unsigned int qmap_mode;
unsigned int qmap_version;
unsigned int dl_minimum_padding;
char ifname[8][16];
unsigned char mux_id[8];
} RMNET_INFO;
typedef struct sQmiWwanQmap
{
struct usbnet *mpNetDev;
struct driver_info driver_info;
atomic_t refcount;
struct net_device *mpQmapNetDev[QUECTEL_WWAN_QMAP];
uint link_state;
uint qmap_mode;
uint qmap_size;
uint qmap_version;
#if defined(QUECTEL_UL_DATA_AGG)
struct tx_agg_ctx tx_ctx;
struct tasklet_struct txq;
#endif
#ifdef QUECTEL_BRIDGE_MODE
uint bridge_mode;
uint bridge_ipv4;
unsigned char bridge_mac[ETH_ALEN];
#endif
uint use_rmnet_usb;
RMNET_INFO rmnet_info;
} sQmiWwanQmap;
#if LINUX_VERSION_CODE > KERNEL_VERSION(3,13,0) //8f84985fec10de64a6b4cdfea45f2b0ab8f07c78
#define MHI_NETDEV_STATUS64
#endif
struct qmap_priv {
struct usbnet *dev;
struct net_device *real_dev;
struct net_device *self_dev;
u8 offset_id;
u8 mux_id;
u8 qmap_version; // 5~v1, 9~v5
u8 link_state;
#if defined(MHI_NETDEV_STATUS64)
struct pcpu_sw_netstats __percpu *stats64;
#endif
spinlock_t agg_lock;
struct sk_buff *agg_skb;
unsigned agg_count;
struct timespec64 agg_time;
struct hrtimer agg_hrtimer;
struct work_struct agg_wq;
#ifdef QUECTEL_BRIDGE_MODE
uint bridge_mode;
uint bridge_ipv4;
unsigned char bridge_mac[ETH_ALEN];
#endif
uint use_qca_nss;
};
struct qmap_hdr {
u8 cd_rsvd_pad;
u8 mux_id;
u16 pkt_len;
} __packed;
enum rmnet_map_v5_header_type {
RMNET_MAP_HEADER_TYPE_UNKNOWN,
RMNET_MAP_HEADER_TYPE_COALESCING = 0x1,
RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD = 0x2,
RMNET_MAP_HEADER_TYPE_ENUM_LENGTH
};
/* Main QMAP header */
struct rmnet_map_header {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 pad_len:6;
u8 next_hdr:1;
u8 cd_bit:1;
#elif defined (__BIG_ENDIAN_BITFIELD)
u8 cd_bit:1;
u8 next_hdr:1;
u8 pad_len:6;
#else
#error "Please fix <asm/byteorder.h>"
#endif
u8 mux_id;
__be16 pkt_len;
} __aligned(1);
/* QMAP v5 headers */
struct rmnet_map_v5_csum_header {
#if defined(__LITTLE_ENDIAN_BITFIELD)
u8 next_hdr:1;
u8 header_type:7;
u8 hw_reserved:7;
u8 csum_valid_required:1;
#elif defined (__BIG_ENDIAN_BITFIELD)
u8 header_type:7;
u8 next_hdr:1;
u8 csum_valid_required:1;
u8 hw_reserved:7;
#else
#error "Please fix <asm/byteorder.h>"
#endif
__be16 reserved;
} __aligned(1);
#ifdef QUECTEL_BRIDGE_MODE
static int is_qmap_netdev(const struct net_device *netdev);
#endif
#endif
static const struct driver_info rmnet_usb_info;
#ifdef QUECTEL_BRIDGE_MODE
static int bridge_arp_reply(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4) {
struct arphdr *parp;
u8 *arpptr, *sha;
u8 sip[4], tip[4], ipv4[4];
struct sk_buff *reply = NULL;
ipv4[0] = (bridge_ipv4 >> 24) & 0xFF;
ipv4[1] = (bridge_ipv4 >> 16) & 0xFF;
ipv4[2] = (bridge_ipv4 >> 8) & 0xFF;
ipv4[3] = (bridge_ipv4 >> 0) & 0xFF;
parp = arp_hdr(skb);
if (parp->ar_hrd == htons(ARPHRD_ETHER) && parp->ar_pro == htons(ETH_P_IP)
&& parp->ar_op == htons(ARPOP_REQUEST) && parp->ar_hln == 6 && parp->ar_pln == 4) {
arpptr = (u8 *)parp + sizeof(struct arphdr);
sha = arpptr;
arpptr += net->addr_len; /* sha */
memcpy(sip, arpptr, sizeof(sip));
arpptr += sizeof(sip);
arpptr += net->addr_len; /* tha */
memcpy(tip, arpptr, sizeof(tip));
pr_info("%s sip = %d.%d.%d.%d, tip=%d.%d.%d.%d, ipv4=%d.%d.%d.%d\n", netdev_name(net),
sip[0], sip[1], sip[2], sip[3], tip[0], tip[1], tip[2], tip[3], ipv4[0], ipv4[1], ipv4[2], ipv4[3]);
//wwan0 sip = 10.151.137.255, tip=10.151.138.0, ipv4=10.151.137.255
if (tip[0] == ipv4[0] && tip[1] == ipv4[1] && (tip[2]&0xFC) == (ipv4[2]&0xFC) && tip[3] != ipv4[3])
reply = arp_create(ARPOP_REPLY, ETH_P_ARP, *((__be32 *)sip), net, *((__be32 *)tip), sha, default_modem_addr, sha);
if (reply) {
skb_reset_mac_header(reply);
__skb_pull(reply, skb_network_offset(reply));
reply->ip_summed = CHECKSUM_UNNECESSARY;
reply->pkt_type = PACKET_HOST;
netif_rx_ni(reply);
}
return 1;
}
return 0;
}
static struct sk_buff *bridge_mode_tx_fixup(struct net_device *net, struct sk_buff *skb, uint bridge_ipv4, unsigned char *bridge_mac) {
struct ethhdr *ehdr;
const struct iphdr *iph;
skb_reset_mac_header(skb);
ehdr = eth_hdr(skb);
if (ehdr->h_proto == htons(ETH_P_ARP)) {
if (bridge_ipv4)
bridge_arp_reply(net, skb, bridge_ipv4);
return NULL;
}
iph = ip_hdr(skb);
//DBG("iphdr: ");
//PrintHex((void *)iph, sizeof(struct iphdr));
// 1 0.000000000 0.0.0.0 255.255.255.255 DHCP 362 DHCP Request - Transaction ID 0xe7643ad7
if (ehdr->h_proto == htons(ETH_P_IP) && iph->protocol == IPPROTO_UDP && iph->saddr == 0x00000000 && iph->daddr == 0xFFFFFFFF) {
//if (udp_hdr(skb)->dest == htons(67)) //DHCP Request
{
memcpy(bridge_mac, ehdr->h_source, ETH_ALEN);
pr_info("%s PC Mac Address: %02x:%02x:%02x:%02x:%02x:%02x\n", netdev_name(net),
bridge_mac[0], bridge_mac[1], bridge_mac[2], bridge_mac[3], bridge_mac[4], bridge_mac[5]);
}
}
if (memcmp(ehdr->h_source, bridge_mac, ETH_ALEN)) {
return NULL;
}
return skb;
}
static void bridge_mode_rx_fixup(sQmiWwanQmap *pQmapDev, struct net_device *net, struct sk_buff *skb) {
uint bridge_mode = 0;
unsigned char *bridge_mac;
if (pQmapDev->qmap_mode > 1 || pQmapDev->use_rmnet_usb == 1) {
struct qmap_priv *priv = netdev_priv(net);
bridge_mode = priv->bridge_mode;
bridge_mac = priv->bridge_mac;
}
else {
bridge_mode = pQmapDev->bridge_mode;
bridge_mac = pQmapDev->bridge_mac;
}
if (bridge_mode)
memcpy(eth_hdr(skb)->h_dest, bridge_mac, ETH_ALEN);
else
memcpy(eth_hdr(skb)->h_dest, net->dev_addr, ETH_ALEN);
}
#endif
#if defined(QUECTEL_WWAN_QMAP)
static ssize_t qmap_mode_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct net_device *netdev = to_net_dev(dev);
struct usbnet * usbnetdev = netdev_priv( netdev );
struct qmi_wwan_state *info = (void *)&usbnetdev->data;
sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused;
return snprintf(buf, PAGE_SIZE, "%d\n", pQmapDev->qmap_mode);
}
static DEVICE_ATTR(qmap_mode, S_IRUGO, qmap_mode_show, NULL);
static ssize_t qmap_size_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct net_device *netdev = to_net_dev(dev);
struct usbnet * usbnetdev = netdev_priv( netdev );
struct qmi_wwan_state *info = (void *)&usbnetdev->data;
sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused;
return snprintf(buf, PAGE_SIZE, "%u\n", pQmapDev->qmap_size);
}
static DEVICE_ATTR(qmap_size, S_IRUGO, qmap_size_show, NULL);
static ssize_t link_state_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct net_device *netdev = to_net_dev(dev);
struct usbnet * usbnetdev = netdev_priv( netdev );
struct qmi_wwan_state *info = (void *)&usbnetdev->data;
sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused;
return snprintf(buf, PAGE_SIZE, "0x%x\n", pQmapDev->link_state);
}
static ssize_t link_state_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) {
struct net_device *netdev = to_net_dev(dev);
struct usbnet * usbnetdev = netdev_priv( netdev );
struct qmi_wwan_state *info = (void *)&usbnetdev->data;
sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused;
unsigned link_state = 0;
unsigned old_link = pQmapDev->link_state;
uint offset_id = 0;
link_state = simple_strtoul(buf, NULL, 0);
if (pQmapDev->qmap_mode == 1) {
pQmapDev->link_state = !!link_state;
}
else if (pQmapDev->qmap_mode > 1) {
offset_id = ((link_state&0x7F) - 1);
if (offset_id >= pQmapDev->qmap_mode) {
dev_info(dev, "%s offset_id is %d. but qmap_mode is %d\n", __func__, offset_id, pQmapDev->qmap_mode);
return count;
}
if (link_state&0x80)
pQmapDev->link_state &= ~(1 << offset_id);
else
pQmapDev->link_state |= (1 << offset_id);
}
if (old_link != pQmapDev->link_state) {
struct net_device *qmap_net = pQmapDev->mpQmapNetDev[offset_id];
if (usbnetdev->net->flags & IFF_UP) {
if (pQmapDev->link_state) {
netif_carrier_on(usbnetdev->net);
}
}
if (qmap_net && qmap_net != netdev) {
struct qmap_priv *priv = netdev_priv(qmap_net);
priv->link_state = !!(pQmapDev->link_state & (1 << offset_id));
if (qmap_net->flags & IFF_UP) {
if (priv->link_state) {
netif_carrier_on(qmap_net);
if (netif_queue_stopped(qmap_net) && !netif_queue_stopped(usbnetdev->net))
netif_wake_queue(qmap_net);
}
else {
netif_carrier_off(qmap_net);
}
}
}
if (usbnetdev->net->flags & IFF_UP) {
if (!pQmapDev->link_state) {
netif_carrier_off(usbnetdev->net);
}
}
dev_info(dev, "link_state 0x%x -> 0x%x\n", old_link, pQmapDev->link_state);
}
return count;
}
#ifdef QUECTEL_BRIDGE_MODE
static ssize_t bridge_mode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) {
struct net_device *netdev = to_net_dev(dev);
uint old_mode = 0;
uint bridge_mode = simple_strtoul(buf, NULL, 0);
if (netdev->type != ARPHRD_ETHER) {
return count;
}
if (is_qmap_netdev(netdev)) {
struct qmap_priv *priv = netdev_priv(netdev);
old_mode = priv->bridge_mode;
priv->bridge_mode = bridge_mode;
}
else {
struct usbnet * usbnetdev = netdev_priv( netdev );
struct qmi_wwan_state *info = (void *)&usbnetdev->data;
sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused;
old_mode = pQmapDev->bridge_mode;
pQmapDev->bridge_mode = bridge_mode;
}
if (old_mode != bridge_mode) {
dev_info(dev, "bridge_mode change to 0x%x\n", bridge_mode);
}
return count;
}
static ssize_t bridge_mode_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct net_device *netdev = to_net_dev(dev);
uint bridge_mode = 0;
if (is_qmap_netdev(netdev)) {
struct qmap_priv *priv = netdev_priv(netdev);
bridge_mode = priv->bridge_mode;
}
else {
struct usbnet * usbnetdev = netdev_priv( netdev );
struct qmi_wwan_state *info = (void *)&usbnetdev->data;
sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused;
bridge_mode = pQmapDev->bridge_mode;
}
return snprintf(buf, PAGE_SIZE, "%u\n", bridge_mode);
}
static ssize_t bridge_ipv4_show(struct device *dev, struct device_attribute *attr, char *buf) {
struct net_device *netdev = to_net_dev(dev);
unsigned int bridge_ipv4 = 0;
unsigned char ipv4[4];
if (is_qmap_netdev(netdev)) {
struct qmap_priv *priv = netdev_priv(netdev);
bridge_ipv4 = priv->bridge_ipv4;
}
else {
struct usbnet * usbnetdev = netdev_priv( netdev );
struct qmi_wwan_state *info = (void *)&usbnetdev->data;
sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused;
bridge_ipv4 = pQmapDev->bridge_ipv4;
}
ipv4[0] = (bridge_ipv4 >> 24) & 0xFF;
ipv4[1] = (bridge_ipv4 >> 16) & 0xFF;
ipv4[2] = (bridge_ipv4 >> 8) & 0xFF;
ipv4[3] = (bridge_ipv4 >> 0) & 0xFF;
return snprintf(buf, PAGE_SIZE, "%d.%d.%d.%d\n", ipv4[0], ipv4[1], ipv4[2], ipv4[3]);
}
static ssize_t bridge_ipv4_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) {
struct net_device *netdev = to_net_dev(dev);
if (is_qmap_netdev(netdev)) {
struct qmap_priv *priv = netdev_priv(netdev);
priv->bridge_ipv4 = simple_strtoul(buf, NULL, 16);
}
else {
struct usbnet * usbnetdev = netdev_priv( netdev );
struct qmi_wwan_state *info = (void *)&usbnetdev->data;
sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused;
pQmapDev->bridge_ipv4 = simple_strtoul(buf, NULL, 16);
}
return count;
}
#endif
static DEVICE_ATTR(link_state, S_IWUSR | S_IRUGO, link_state_show, link_state_store);
#ifdef QUECTEL_BRIDGE_MODE
static DEVICE_ATTR(bridge_mode, S_IWUSR | S_IRUGO, bridge_mode_show, bridge_mode_store);
static DEVICE_ATTR(bridge_ipv4, S_IWUSR | S_IRUGO, bridge_ipv4_show, bridge_ipv4_store);
#endif
static struct attribute *qmi_wwan_sysfs_attrs[] = {
&dev_attr_link_state.attr,
&dev_attr_qmap_mode.attr,
&dev_attr_qmap_size.attr,
#ifdef QUECTEL_BRIDGE_MODE
&dev_attr_bridge_mode.attr,
&dev_attr_bridge_ipv4.attr,
#endif
NULL,
};
static struct attribute_group qmi_wwan_sysfs_attr_group = {
.attrs = qmi_wwan_sysfs_attrs,
};
#ifdef QUECTEL_BRIDGE_MODE
static struct attribute *qmi_qmap_sysfs_attrs[] = {
&dev_attr_bridge_mode.attr,
&dev_attr_bridge_ipv4.attr,
NULL,
};
static struct attribute_group qmi_qmap_sysfs_attr_group = {
.attrs = qmi_qmap_sysfs_attrs,
};
#endif
static int qmap_open(struct net_device *qmap_net)
{
struct qmap_priv *priv = netdev_priv(qmap_net);
struct net_device *real_dev = priv->real_dev;
//printk("%s %s real_dev %d %d %d %d+++\n", __func__, dev->name,
// netif_carrier_ok(real_dev), netif_queue_stopped(real_dev), netif_carrier_ok(dev), netif_queue_stopped(dev));
if (!(priv->real_dev->flags & IFF_UP))
return -ENETDOWN;
if (priv->link_state) {
netif_carrier_on(real_dev);
netif_carrier_on(qmap_net);
if (netif_queue_stopped(qmap_net) && !netif_queue_stopped(real_dev))
netif_wake_queue(qmap_net);
}
//printk("%s %s real_dev %d %d %d %d---\n", __func__, dev->name,
// netif_carrier_ok(real_dev), netif_queue_stopped(real_dev), netif_carrier_ok(dev), netif_queue_stopped(dev));
return 0;
}
static int qmap_stop(struct net_device *qmap_net)
{
//printk("%s %s %d %d+++\n", __func__, dev->name,
// netif_carrier_ok(dev), netif_queue_stopped(dev));
netif_carrier_off(qmap_net);
return 0;
}
static void qmap_wake_queue(sQmiWwanQmap *pQmapDev)
{
uint i = 0;
if (!pQmapDev || !pQmapDev->use_rmnet_usb)
return;
for (i = 0; i < pQmapDev->qmap_mode; i++) {
struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i];
if (qmap_net && netif_carrier_ok(qmap_net) && netif_queue_stopped(qmap_net)) {
netif_wake_queue(qmap_net);
}
}
}
static struct sk_buff * add_qhdr(struct sk_buff *skb, u8 mux_id) {
struct qmap_hdr *qhdr;
int pad = 0;
pad = skb->len%4;
if (pad) {
pad = 4 - pad;
if (skb_tailroom(skb) < pad) {
printk("skb_tailroom small!\n");
pad = 0;
}
if (pad)
__skb_put(skb, pad);
}
qhdr = (struct qmap_hdr *)skb_push(skb, sizeof(struct qmap_hdr));
qhdr->cd_rsvd_pad = pad;
qhdr->mux_id = mux_id;
qhdr->pkt_len = cpu_to_be16(skb->len - sizeof(struct qmap_hdr));
return skb;
}
static struct sk_buff * add_qhdr_v5(struct sk_buff *skb, u8 mux_id) {
struct rmnet_map_header *map_header;
struct rmnet_map_v5_csum_header *ul_header;
u32 padding, map_datalen;
map_datalen = skb->len;
padding = map_datalen%4;
if (padding) {
padding = 4 - padding;
if (skb_tailroom(skb) < padding) {
printk("skb_tailroom small!\n");
padding = 0;
}
if (padding)
__skb_put(skb, padding);
}
map_header = (struct rmnet_map_header *)skb_push(skb, (sizeof(struct rmnet_map_header) + sizeof(struct rmnet_map_v5_csum_header)));
map_header->cd_bit = 0;
map_header->next_hdr = 1;
map_header->pad_len = padding;
map_header->mux_id = mux_id;
map_header->pkt_len = htons(map_datalen + padding);
ul_header = (struct rmnet_map_v5_csum_header *)(map_header + 1);
memset(ul_header, 0, sizeof(*ul_header));
ul_header->header_type = RMNET_MAP_HEADER_TYPE_CSUM_OFFLOAD;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
#if 0 //TODO
skb->ip_summed = CHECKSUM_NONE;
/* Ask for checksum offloading */
ul_header->csum_valid_required = 1;
#endif
}
return skb;
}
static void rmnet_vnd_update_rx_stats(struct net_device *net,
unsigned rx_packets, unsigned rx_bytes) {
#if defined(MHI_NETDEV_STATUS64)
struct qmap_priv *dev = netdev_priv(net);
struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
u64_stats_update_begin(&stats64->syncp);
stats64->rx_packets += rx_packets;
stats64->rx_bytes += rx_bytes;
u64_stats_update_end(&stats64->syncp);
#else
net->stats.rx_packets += rx_packets;
net->stats.rx_bytes += rx_bytes;
#endif
}
static void rmnet_vnd_update_tx_stats(struct net_device *net,
unsigned tx_packets, unsigned tx_bytes) {
#if defined(MHI_NETDEV_STATUS64)
struct qmap_priv *dev = netdev_priv(net);
struct pcpu_sw_netstats *stats64 = this_cpu_ptr(dev->stats64);
u64_stats_update_begin(&stats64->syncp);
stats64->tx_packets += tx_packets;
stats64->tx_bytes += tx_bytes;
u64_stats_update_end(&stats64->syncp);
#else
net->stats.tx_packets += tx_packets;
net->stats.tx_bytes += tx_bytes;
#endif
}
#if defined(MHI_NETDEV_STATUS64)
static struct rtnl_link_stats64 *_rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats)
{
struct qmap_priv *dev = netdev_priv(net);
unsigned int start;
int cpu;
netdev_stats_to_stats64(stats, &net->stats);
if (nss_cb && dev->use_qca_nss) { // rmnet_nss.c:rmnet_nss_tx() will update rx stats
stats->rx_packets = 0;
stats->rx_bytes = 0;
}
for_each_possible_cpu(cpu) {
struct pcpu_sw_netstats *stats64;
u64 rx_packets, rx_bytes;
u64 tx_packets, tx_bytes;
stats64 = per_cpu_ptr(dev->stats64, cpu);
do {
start = u64_stats_fetch_begin_irq(&stats64->syncp);
rx_packets = stats64->rx_packets;
rx_bytes = stats64->rx_bytes;
tx_packets = stats64->tx_packets;
tx_bytes = stats64->tx_bytes;
} while (u64_stats_fetch_retry_irq(&stats64->syncp, start));
stats->rx_packets += rx_packets;
stats->rx_bytes += rx_bytes;
stats->tx_packets += tx_packets;
stats->tx_bytes += tx_bytes;
}
return stats;
}
#if (LINUX_VERSION_CODE > KERNEL_VERSION( 4,10,0 )) //bc1f44709cf27fb2a5766cadafe7e2ad5e9cb221
static void rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) {
_rmnet_vnd_get_stats64(net, stats);
}
#else
static struct rtnl_link_stats64 *rmnet_vnd_get_stats64(struct net_device *net, struct rtnl_link_stats64 *stats) {
return _rmnet_vnd_get_stats64(net, stats);
}
#endif
#endif
#if defined(QUECTEL_UL_DATA_AGG)
static void rmnet_usb_tx_wake_queue(unsigned long data) {
qmap_wake_queue((sQmiWwanQmap *)data);
}
static void rmnet_usb_tx_skb_destructor(struct sk_buff *skb) {
struct net_device *net = skb->dev;
struct usbnet * dev = netdev_priv( net );
struct qmi_wwan_state *info = (void *)&dev->data;
sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused;
if (pQmapDev && pQmapDev->use_rmnet_usb) {
int i;
for (i = 0; i < pQmapDev->qmap_mode; i++) {
struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i];
if (qmap_net && netif_carrier_ok(qmap_net) && netif_queue_stopped(qmap_net)) {
tasklet_schedule(&pQmapDev->txq);
break;
}
}
}
}
static int rmnet_usb_tx_agg_skip(struct sk_buff *skb, int offset)
{
u8 *packet_start = skb->data + offset;
int ready2send = 0;
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *ip4h = (struct iphdr *)(packet_start);
if (ip4h->protocol == IPPROTO_TCP) {
const struct tcphdr *th = (const struct tcphdr *)(packet_start + sizeof(struct iphdr));
if (th->psh) {
ready2send = 1;
}
}
else if (ip4h->protocol == IPPROTO_ICMP)
ready2send = 1;
} else if (skb->protocol == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = (struct ipv6hdr *)(packet_start);
if (ip6h->nexthdr == NEXTHDR_TCP) {
const struct tcphdr *th = (const struct tcphdr *)(packet_start + sizeof(struct ipv6hdr));
if (th->psh) {
ready2send = 1;
}
} else if (ip6h->nexthdr == NEXTHDR_ICMP) {
ready2send = 1;
} else if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
struct frag_hdr *frag;
frag = (struct frag_hdr *)(packet_start
+ sizeof(struct ipv6hdr));
if (frag->nexthdr == IPPROTO_ICMPV6)
ready2send = 1;
}
}
return ready2send;
}
static void rmnet_usb_tx_agg_work(struct work_struct *work)
{
struct qmap_priv *priv =
container_of(work, struct qmap_priv, agg_wq);
struct sk_buff *skb = NULL;
unsigned long flags;
spin_lock_irqsave(&priv->agg_lock, flags);
if (likely(priv->agg_skb)) {
skb = priv->agg_skb;
priv->agg_skb = NULL;
priv->agg_count = 0;
skb->protocol = htons(ETH_P_MAP);
skb->dev = priv->real_dev;
ktime_get_ts64(&priv->agg_time);
}
spin_unlock_irqrestore(&priv->agg_lock, flags);
if (skb) {
int err = dev_queue_xmit(skb);
if (err != NET_XMIT_SUCCESS) {
priv->self_dev->stats.tx_errors++;
}
}
}
static enum hrtimer_restart rmnet_usb_tx_agg_timer_cb(struct hrtimer *timer)
{
struct qmap_priv *priv =
container_of(timer, struct qmap_priv, agg_hrtimer);
schedule_work(&priv->agg_wq);
return HRTIMER_NORESTART;
}
static long agg_time_limit __read_mostly = 1000000L; //reduce this time, can get better TPUT performance, but will increase USB interrupts
module_param(agg_time_limit, long, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(agg_time_limit, "Maximum time packets sit in the agg buf");
static long agg_bypass_time __read_mostly = 10000000L;
module_param(agg_bypass_time, long, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(agg_bypass_time, "Skip agg when apart spaced more than this");
static int rmnet_usb_tx_agg(struct sk_buff *skb, struct qmap_priv *priv) {
struct qmi_wwan_state *info = (void *)&priv->dev->data;
sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused;
struct tx_agg_ctx *ctx = &pQmapDev->tx_ctx;
int ready2send = 0;
int xmit_more = 0;
struct timespec64 diff, now;
struct sk_buff *agg_skb = NULL;
unsigned long flags;
int err;
struct net_device *pNet = priv->self_dev;
#if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0) //6b16f9ee89b8d5709f24bc3ac89ae8b5452c0d7c
#if LINUX_VERSION_CODE > KERNEL_VERSION(3,16,0)
xmit_more = skb->xmit_more;
#endif
#else
xmit_more = netdev_xmit_more();
#endif
rmnet_vnd_update_tx_stats(pNet, 1, skb->len);
if (ctx->ul_data_aggregation_max_datagrams == 1) {
skb->protocol = htons(ETH_P_MAP);
skb->dev = priv->real_dev;
if (!skb->destructor)
skb->destructor = rmnet_usb_tx_skb_destructor;
err = dev_queue_xmit(skb);
if (err != NET_XMIT_SUCCESS)
pNet->stats.tx_errors++;
return NET_XMIT_SUCCESS;
}
new_packet:
spin_lock_irqsave(&priv->agg_lock, flags);
agg_skb = NULL;
ready2send = 0;
ktime_get_ts64(&now);
diff = timespec64_sub(now, priv->agg_time);
if (priv->agg_skb) {
if ((priv->agg_skb->len + skb->len) < ctx->ul_data_aggregation_max_size) {
memcpy(skb_put(priv->agg_skb, skb->len), skb->data, skb->len);
priv->agg_count++;
if (diff.tv_sec > 0 || diff.tv_nsec > agg_time_limit) {
ready2send = 1;
}
else if (priv->agg_count == ctx->ul_data_aggregation_max_datagrams) {
ready2send = 1;
}
else if (xmit_more == 0) {
struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb->data;
size_t offset = sizeof(struct rmnet_map_header);
if (map_header->next_hdr)
offset += sizeof(struct rmnet_map_v5_csum_header);
ready2send = rmnet_usb_tx_agg_skip(skb, offset);
}
dev_kfree_skb_any(skb);
skb = NULL;
}
else {
ready2send = 1;
}
if (ready2send) {
agg_skb = priv->agg_skb;
priv->agg_skb = NULL;
priv->agg_count = 0;
}
}
else if (skb) {
if (diff.tv_sec > 0 || diff.tv_nsec > agg_bypass_time) {
ready2send = 1;
}
else if (xmit_more == 0) {
struct rmnet_map_header *map_header = (struct rmnet_map_header *)skb->data;
size_t offset = sizeof(struct rmnet_map_header);
if (map_header->next_hdr)
offset += sizeof(struct rmnet_map_v5_csum_header);
ready2send = rmnet_usb_tx_agg_skip(skb, offset);
}
if (ready2send == 0) {
priv->agg_skb = alloc_skb(ctx->ul_data_aggregation_max_size, GFP_ATOMIC);
if (priv->agg_skb) {
skb_reset_network_header(priv->agg_skb); //protocol da1a is buggy, dev wwan0
memcpy(skb_put(priv->agg_skb, skb->len), skb->data, skb->len);
priv->agg_count++;
dev_kfree_skb_any(skb);
skb = NULL;
}
else {
ready2send = 1;
}
}
if (ready2send) {
agg_skb = skb;
skb = NULL;
}
}
if (ready2send) {
priv->agg_time = now;
}
spin_unlock_irqrestore(&priv->agg_lock, flags);
if (agg_skb) {
agg_skb->protocol = htons(ETH_P_MAP);
agg_skb->dev = priv->real_dev;
if (!agg_skb->destructor)
agg_skb->destructor = rmnet_usb_tx_skb_destructor;
err = dev_queue_xmit(agg_skb);
if (err != NET_XMIT_SUCCESS) {
pNet->stats.tx_errors++;
}
}
if (skb) {
goto new_packet;
}
if (priv->agg_skb) {
if (!hrtimer_is_queued(&priv->agg_hrtimer))
hrtimer_start(&priv->agg_hrtimer, ns_to_ktime(NSEC_PER_MSEC * 2), HRTIMER_MODE_REL);
}
return NET_XMIT_SUCCESS;
}
#endif
static netdev_tx_t rmnet_vnd_start_xmit(struct sk_buff *skb,
struct net_device *pNet)
{
int err;
struct qmap_priv *priv = netdev_priv(pNet);
if (netif_queue_stopped(priv->real_dev)) {
netif_stop_queue(pNet);
return NETDEV_TX_BUSY;
}
//printk("%s 1 skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len);
if (pNet->type == ARPHRD_ETHER) {
skb_reset_mac_header(skb);
#ifdef QUECTEL_BRIDGE_MODE
if (priv->bridge_mode && bridge_mode_tx_fixup(pNet, skb, priv->bridge_ipv4, priv->bridge_mac) == NULL) {
dev_kfree_skb_any (skb);
return NETDEV_TX_OK;
}
#endif
if (skb_pull(skb, ETH_HLEN) == NULL) {
dev_kfree_skb_any (skb);
return NETDEV_TX_OK;
}
}
//printk("%s 2 skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len);
if (priv->qmap_version == 5) {
add_qhdr(skb, priv->mux_id);
}
else if (priv->qmap_version == 9) {
add_qhdr_v5(skb, priv->mux_id);
}
else {
dev_kfree_skb_any (skb);
return NETDEV_TX_OK;
}
//printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len);
err = rmnet_usb_tx_agg(skb, priv);
return err;
}
static int rmnet_vnd_change_mtu(struct net_device *rmnet_dev, int new_mtu)
{
if (new_mtu < 0 || new_mtu > 1500)
return -EINVAL;
rmnet_dev->mtu = new_mtu;
return 0;
}
/* drivers may override default ethtool_ops in their bind() routine */
static const struct ethtool_ops rmnet_vnd_ethtool_ops = {
.get_link = ethtool_op_get_link,
};
static const struct net_device_ops rmnet_vnd_ops = {
.ndo_open = qmap_open,
.ndo_stop = qmap_stop,
.ndo_start_xmit = rmnet_vnd_start_xmit,
.ndo_change_mtu = rmnet_vnd_change_mtu,
#if defined(MHI_NETDEV_STATUS64)
.ndo_get_stats64 = rmnet_vnd_get_stats64,
#endif
};
static void rmnet_usb_ether_setup(struct net_device *rmnet_dev)
{
ether_setup(rmnet_dev);
rmnet_dev->flags |= IFF_NOARP;
rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
rmnet_dev->ethtool_ops = &rmnet_vnd_ethtool_ops;
rmnet_dev->netdev_ops = &rmnet_vnd_ops;
}
static void rmnet_usb_rawip_setup(struct net_device *rmnet_dev)
{
rmnet_dev->needed_headroom = 16;
/* Raw IP mode */
rmnet_dev->header_ops = NULL; /* No header */
rmnet_dev->type = ARPHRD_RAWIP;
rmnet_dev->hard_header_len = 0;
rmnet_dev->flags |= IFF_NOARP;
rmnet_dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
rmnet_dev->ethtool_ops = &rmnet_vnd_ethtool_ops;
rmnet_dev->netdev_ops = &rmnet_vnd_ops;
}
static rx_handler_result_t qca_nss_rx_handler(struct sk_buff **pskb)
{
struct sk_buff *skb = *pskb;
if (!skb)
return RX_HANDLER_CONSUMED;
//printk("%s skb=%p, len=%d, protocol=%x, hdr_len=%d\n", __func__, skb, skb->len, skb->protocol, skb->hdr_len);
if (skb->pkt_type == PACKET_LOOPBACK)
return RX_HANDLER_PASS;
/* Check this so that we dont loop around netif_receive_skb */
if (skb->cb[0] == 1) {
skb->cb[0] = 0;
return RX_HANDLER_PASS;
}
if (nss_cb) {
nss_cb->nss_tx(skb);
return RX_HANDLER_CONSUMED;
}
return RX_HANDLER_PASS;
}
static int qmap_register_device(sQmiWwanQmap * pDev, u8 offset_id)
{
struct net_device *real_dev = pDev->mpNetDev->net;
struct net_device *qmap_net;
struct qmap_priv *priv;
int err;
char name[IFNAMSIZ];
int use_qca_nss = !!nss_cb;
sprintf(name, "%s_%d", real_dev->name, offset_id + 1);
#ifdef NET_NAME_UNKNOWN
qmap_net = alloc_netdev(sizeof(struct qmap_priv), name,
NET_NAME_UNKNOWN, rmnet_usb_ether_setup);
#else
qmap_net = alloc_netdev(sizeof(struct qmap_priv), name,
rmnet_usb_ether_setup);
#endif
if (!qmap_net)
return -ENOBUFS;
SET_NETDEV_DEV(qmap_net, &real_dev->dev);
priv = netdev_priv(qmap_net);
priv->offset_id = offset_id;
priv->real_dev = real_dev;
priv->self_dev = qmap_net;
priv->dev = pDev->mpNetDev;
priv->qmap_version = pDev->qmap_version;
priv->mux_id = QUECTEL_QMAP_MUX_ID + offset_id;
memcpy (qmap_net->dev_addr, real_dev->dev_addr, ETH_ALEN);
#ifdef QUECTEL_BRIDGE_MODE
priv->bridge_mode = !!(pDev->bridge_mode & BIT(offset_id));
qmap_net->sysfs_groups[0] = &qmi_qmap_sysfs_attr_group;
if (priv->bridge_mode)
use_qca_nss = 0;
#endif
if (nss_cb && use_qca_nss) {
rmnet_usb_rawip_setup(qmap_net);
}
priv->agg_skb = NULL;
priv->agg_count = 0;
hrtimer_init(&priv->agg_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
priv->agg_hrtimer.function = rmnet_usb_tx_agg_timer_cb;
INIT_WORK(&priv->agg_wq, rmnet_usb_tx_agg_work);
ktime_get_ts64(&priv->agg_time);
spin_lock_init(&priv->agg_lock);
priv->use_qca_nss = 0;
#if defined(MHI_NETDEV_STATUS64)
priv->stats64 = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
if (!priv->stats64) {
err = -ENOBUFS;
goto out_free_newdev;
}
#endif
err = register_netdev(qmap_net);
if (err)
dev_info(&real_dev->dev, "%s(%s)=%d\n", __func__, qmap_net->name, err);
if (err < 0)
goto out_free_newdev;
netif_device_attach (qmap_net);
netif_carrier_off(qmap_net);
if (nss_cb && use_qca_nss) {
int rc = nss_cb->nss_create(qmap_net);
if (rc) {
/* Log, but don't fail the device creation */
netdev_err(qmap_net, "Device will not use NSS path: %d\n", rc);
} else {
priv->use_qca_nss = 1;
netdev_info(qmap_net, "NSS context created\n");
rtnl_lock();
netdev_rx_handler_register(qmap_net, qca_nss_rx_handler, NULL);
rtnl_unlock();
}
}
strcpy(pDev->rmnet_info.ifname[offset_id], qmap_net->name);
pDev->rmnet_info.mux_id[offset_id] = priv->mux_id;
pDev->mpQmapNetDev[offset_id] = qmap_net;
dev_info(&real_dev->dev, "%s %s\n", __func__, qmap_net->name);
return 0;
out_free_newdev:
free_netdev(qmap_net);
return err;
}
static void qmap_unregister_device(sQmiWwanQmap * pDev, u8 offset_id) {
struct net_device *qmap_net = pDev->mpQmapNetDev[offset_id];
if (qmap_net != NULL && qmap_net != pDev->mpNetDev->net) {
struct qmap_priv *priv = netdev_priv(qmap_net);
unsigned long flags;
pr_info("qmap_unregister_device(%s)\n", qmap_net->name);
pDev->mpQmapNetDev[offset_id] = NULL;
netif_carrier_off( qmap_net );
netif_stop_queue( qmap_net );
hrtimer_cancel(&priv->agg_hrtimer);
cancel_work_sync(&priv->agg_wq);
spin_lock_irqsave(&priv->agg_lock, flags);
if (priv->agg_skb) {
kfree_skb(priv->agg_skb);
}
spin_unlock_irqrestore(&priv->agg_lock, flags);
if (nss_cb && priv->use_qca_nss) {
rtnl_lock();
netdev_rx_handler_unregister(qmap_net);
rtnl_unlock();
nss_cb->nss_free(qmap_net);
}
#if defined(MHI_NETDEV_STATUS64)
free_percpu(priv->stats64);
#endif
unregister_netdev (qmap_net);
free_netdev(qmap_net);
}
}
typedef struct {
unsigned int size;
unsigned int rx_urb_size;
unsigned int ep_type;
unsigned int iface_id;
unsigned int MuxId;
unsigned int ul_data_aggregation_max_datagrams; //0x17
unsigned int ul_data_aggregation_max_size ;//0x18
unsigned int dl_minimum_padding; //0x1A
} QMAP_SETTING;
int qma_setting_store(struct device *dev, QMAP_SETTING *qmap_settings, size_t size) {
struct net_device *netdev = to_net_dev(dev);
struct usbnet * usbnetdev = netdev_priv( netdev );
struct qmi_wwan_state *info = (void *)&usbnetdev->data;
sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused;
if (qmap_settings->size != size) {
dev_err(dev, "ERROR: qmap_settings.size donot match!\n");
return -EOPNOTSUPP;
}
#ifdef QUECTEL_UL_DATA_AGG
netif_tx_lock_bh(netdev);
if (pQmapDev->tx_ctx.ul_data_aggregation_max_datagrams == 1 && qmap_settings->ul_data_aggregation_max_datagrams > 1) {
pQmapDev->tx_ctx.ul_data_aggregation_max_datagrams = qmap_settings->ul_data_aggregation_max_datagrams;
pQmapDev->tx_ctx.ul_data_aggregation_max_size = qmap_settings->ul_data_aggregation_max_size;
pQmapDev->tx_ctx.dl_minimum_padding = qmap_settings->dl_minimum_padding;
dev_info(dev, "ul_data_aggregation_max_datagrams=%d, ul_data_aggregation_max_size=%d, dl_minimum_padding=%d\n",
pQmapDev->tx_ctx.ul_data_aggregation_max_datagrams,
pQmapDev->tx_ctx.ul_data_aggregation_max_size,
pQmapDev->tx_ctx.dl_minimum_padding);
}
netif_tx_unlock_bh(netdev);
return 0;
#endif
return -EOPNOTSUPP;
}
static int qmap_ndo_do_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) {
struct usbnet * usbnetdev = netdev_priv( dev );
struct qmi_wwan_state *info = (void *)&usbnetdev->data;
sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused;
int rc = -EOPNOTSUPP;
uint link_state = 0;
QMAP_SETTING qmap_settings = {0};
switch (cmd) {
case 0x89F1: //SIOCDEVPRIVATE
rc = copy_from_user(&link_state, ifr->ifr_ifru.ifru_data, sizeof(link_state));
if (!rc) {
char buf[32];
snprintf(buf, sizeof(buf), "%u", link_state);
link_state_store(&dev->dev, NULL, buf, strlen(buf));
}
break;
case 0x89F2: //SIOCDEVPRIVATE
rc = copy_from_user(&qmap_settings, ifr->ifr_ifru.ifru_data, sizeof(qmap_settings));
if (!rc) {
rc = qma_setting_store(&dev->dev, &qmap_settings, sizeof(qmap_settings));
}
break;
case 0x89F3: //SIOCDEVPRIVATE
if (pQmapDev->use_rmnet_usb) {
uint i;
for (i = 0; i < pQmapDev->qmap_mode; i++) {
struct net_device *qmap_net = pQmapDev->mpQmapNetDev[i];
if (!qmap_net)
break;
strcpy(pQmapDev->rmnet_info.ifname[i], qmap_net->name);
}
rc = copy_to_user(ifr->ifr_ifru.ifru_data, &pQmapDev->rmnet_info, sizeof(pQmapDev->rmnet_info));
}
break;
default:
break;
}
return rc;
}
#ifdef QUECTEL_BRIDGE_MODE
static int is_qmap_netdev(const struct net_device *netdev) {
return netdev->netdev_ops == &rmnet_vnd_ops;
}
#endif
#endif
static struct sk_buff *qmi_wwan_tx_fixup(struct usbnet *dev, struct sk_buff *skb, gfp_t flags) {
//MDM9x07,MDM9628,MDM9x40,SDX20,SDX24 only work on RAW IP mode
if ((dev->driver_info->flags & FLAG_NOARP) == 0)
return skb;
// Skip Ethernet header from message
if (dev->net->hard_header_len == 0)
return skb;
else
skb_reset_mac_header(skb);
#ifdef QUECTEL_BRIDGE_MODE
{
struct qmi_wwan_state *info = (void *)&dev->data;
sQmiWwanQmap *pQmapDev = (sQmiWwanQmap *)info->unused;
if (pQmapDev->bridge_mode && bridge_mode_tx_fixup(dev->net, skb, pQmapDev->bridge_ipv4, pQmapDev->bridge_mac) == NULL) {
dev_kfree_skb_any (skb);
return NULL;
}
}
#endif
if (skb_pull(skb, ETH_HLEN)) {
return skb;
} else {
dev_err(&dev->intf->dev, "Packet Dropped ");
}
// Filter the packet out, release it
dev_kfree_skb_any(skb);
return NULL;
}
#endif
/* Make up an ethernet header if the packet doesn't have one.
*
* A firmware bug common among several devices cause them to send raw
* IP packets under some circumstances. There is no way for the
* driver/host to know when this will happen. And even when the bug
* hits, some packets will still arrive with an intact header.
*
* The supported devices are only capably of sending IPv4, IPv6 and
* ARP packets on a point-to-point link. Any packet with an ethernet
* header will have either our address or a broadcast/multicast
* address as destination. ARP packets will always have a header.
*
* This means that this function will reliably add the appropriate
* header iff necessary, provided our hardware address does not start
* with 4 or 6.
*
* Another common firmware bug results in all packets being addressed
* to 00:a0:c6:00:00:00 despite the host address being different.
* This function will also fixup such packets.
*/
static int qmi_wwan_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
{
__be16 proto;
/* This check is no longer done by usbnet */
if (skb->len < dev->net->hard_header_len)
return 0;
switch (skb->data[0] & 0xf0) {
case 0x40:
... This diff was truncated because it exceeds the maximum size that can be displayed.
(2-2/10)