系列文章:
Linux spi驱动框架分析(一)
Linux spi驱动框架分析(二)
Linux spi驱动框架分析(三)
Linux spi驱动框架分析(四)
spi_master的消息队列机制
SPI数据传输可以有两种方式:同步方式和异步方式。所谓同步方式是指数据传输的发起者必须等待本次传输的结束,期间不能做其它事情,用代码来解释就是,调用传输的函数后,直到数据传输完成,函数才会返回。而异步方式则正好相反,数据传输的发起者无需等待传输的结束,数据传输期间还可以做其它事情,用代码来解释就是,调用传输的函数后,函数会立刻返回而不用等待数据传输完成,我们只需设置一个回调函数,传输完成后,该回调函数会被调用以通知发起者数据传送已经完成。同步方式简单易用,很适合处理那些少量数据的单次传输。但是对于数据量大、次数多的传输来说,异步方式就显得更加合适。
对于SPI控制器来说,要支持异步方式必须要考虑以下两种状况:
-
对于同一个数据传输的发起者,既然异步方式无需等待数据传输完成即可返回,返回后,该发起者可以立刻又发起一个message,而这时上一个message还没有处理完。
-
对于另外一个不同的发起者来说,也有可能同时发起一次message传输请求。
队列化正是为了为了解决以上的问题,所谓队列化,是指把等待传输的message放入一个队列中,发起一个传输操作,其实就是把对应的message按先后顺序放入一个队列中。内核会创建一个内核工作线程,通过线程来处理队列上的message。
一个或者多个设备驱动程序可以同时向控制器驱动发起多个spi_message请求,这些spi_message也是以链表的形式被链接在spi_master结构体的queue成员里。
spi_master,spi_message,spi_transfer这几个数据结构的关系可以用下图来描述:
如果spi控制器驱动要想支持消息队列机制的话,注册spi_master时,其transfer成员不能设置,具体细节如下代码所示:
int spi_register_master(struct spi_master *master)
{
static atomic_t dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
struct device *dev = master->dev.parent;
struct boardinfo *bi;
int status = -ENODEV;
int dynamic = 0;
......
if (master->transfer)
dev_info(dev, "master is unqueued, this is deprecated\n");
else {
//消息队列机制初始化,创建内核工作队列等
status = spi_master_initialize_queue(master);
if (status) {
device_del(&master->dev);
goto done;
}
}
......
}
spi_master_initialize_queue函数执行流程图如下所示:
进入spi_master_initialize_queue函数:
static int spi_master_initialize_queue(struct spi_master *master)
{
int ret;
//设置spi_master->transfer为spi_queued_transfer
master->transfer = spi_queued_transfer;
//若驱动未提供transfer_one_message则设置为spi_transfer_one_message
if (!master->transfer_one_message)
master->transfer_one_message = spi_transfer_one_message;
/* 创建工作线程等*/
ret = spi_init_queue(master);
if (ret) {
dev_err(&master->dev, "problem initializing queue\n");
goto err_init_queue;
}
master->queued = true;
//开始工作
ret = spi_start_queue(master);
if (ret) {
dev_err(&master->dev, "problem starting queue\n");
goto err_start_queue;
}
return 0;
err_start_queue:
spi_destroy_queue(master);
err_init_queue:
return ret;
}
在分析spi_init_queue函数之前,先介绍下什么是工作线程。要完成工作的话,得有工人和工作。kthread_worker代表工人,而kthread_work代表工作,定义于include/linux/kthread.h。
struct kthread_worker {
unsigned int flags;
spinlock_t lock;
//kthread_work会链接在这链表上,相当于流水线
struct list_head work_list;
struct list_head delayed_work_list;
//为该kthread_worker执行任务的线程对于的task_struct
struct task_struct *task;
//当前正在处理的kthread_work
struct kthread_work *current_work;
};
struct kthread_work {
struct list_head node;
//执行函数,该kthread_work所要做的事情
kthread_work_func_t func;
//指向处理该kthread_work的kthread_worker
struct kthread_worker *worker;
/* Number of canceling calls that are running at the moment. */
int canceling;
};
工作线程即是创建一个内线线程,内核线程执行kthread_worker_fn函数,在该函数会从kthread_worker的work_list链表里,取出每个kthread_work然后执行kthread_work里的func函数。
spi_init_queue函数主要做创建线程,初始化kthread_worker等工作。
static int spi_init_queue(struct spi_master *master)
{
struct sched_param param = { .sched_priority = MAX_RT_PRIO - 1 };
master->running = false;
master->busy = false;
//初始化spi_master里的kthread_worker
kthread_init_worker(&master->kworker);
//创建一个内核线程,执行函数为kthread_worker_fn
master->kworker_task = kthread_run(kthread_worker_fn,
&master->kworker, "%s",
dev_name(&master->dev));
......
//初始化spi_master里的kthread_work,把func设置为spi_pump_messages
kthread_init_work(&master->pump_messages, spi_pump_messages);
......
}
初始化完后,调用spi_start_queue函数开始工作:
static int spi_start_queue(struct spi_master *master)
{
unsigned long flags;
......
//把spi_master->kthread_work放入spi_master->kthread_worker的work_list链表,并唤醒工作线程
kthread_queue_work(&master->kworker, &master->pump_messages);
return 0;
}
工人和工作都有了,准备就绪,看看kthread_worker_fn函数:
int kthread_worker_fn(void *worker_ptr)
{
struct kthread_worker *worker = worker_ptr;
struct kthread_work *work;
......
repeat:
......
work = NULL;
spin_lock_irq(&worker->lock);
//判断work_list链表是否空
if (!list_empty(&worker->work_list)) {
//不空,则取出第一个kthread_work
work = list_first_entry(&worker->work_list,
struct kthread_work, node);
list_del_init(&work->node);
}
worker->current_work = work;
spin_unlock_irq(&worker->lock);
//如果存在kthread_work
if (work) {
__set_current_state(TASK_RUNNING);
work->func(work); //执行工作,调用spi_pump_messages函数
} else if (!freezing(current))
schedule(); //无工作则睡眠
try_to_freeze();
goto repeat;
}
存在kthread_work,调用里面的func函数,即spi_pump_messages:
static void spi_pump_messages(struct kthread_work *work)
{
struct spi_master *master =
container_of(work, struct spi_master, pump_messages);
__spi_pump_messages(master, true);
}
static void __spi_pump_messages(struct spi_master *master, bool in_kthread)
{
unsigned long flags;
bool was_busy = false;
int ret;
......
/* 如果spi_master->queue里没有messages */
if (list_empty(&master->queue) || !master->running) {
......
//调用spi_master->unprepare_transfer_hardware释放相关硬件资源
if (master->unprepare_transfer_hardware &&
master->unprepare_transfer_hardware(master))
dev_err(&master->dev,
"failed to unprepare transfer hardware\n");
......
return;
}
//spi_master->queue里有messages,取出第一个messages
master->cur_msg =
list_first_entry(&master->queue, struct spi_message, queue);
......
//调用spi_master->prepare_transfer_hardware准备必要的硬件资源
if (!was_busy && master->prepare_transfer_hardware) {
ret = master->prepare_transfer_hardware(master);
......
}
trace_spi_message_start(master->cur_msg);
//调用spi_master->prepare_message对spi_message进行预处理
if (master->prepare_message) {
ret = master->prepare_message(master, master->cur_msg);
......
master->cur_msg_prepared = true;
}
......
//最后调用spi_master->transfer_one_message传输一个spi_message
ret = master->transfer_one_message(master, master->cur_msg);
......
}
前面分析spi_master_initialize_queue函数时,如果驱动未提供transfer_one_message则设置为spi_transfer_one_message,那么就来分析这个函数:
static int spi_transfer_one_message(struct spi_master *master,
struct spi_message *msg)
{
struct spi_transfer *xfer;
bool keep_cs = false;
int ret = 0;
unsigned long long ms = 1;
struct spi_statistics *statm = &master->statistics;
struct spi_statistics *stats = &msg->spi->statistics;
//片选
spi_set_cs(msg->spi, true);
......
//取出每个spi_transfer,调用spi_master ->transfer_one函数发送
list_for_each_entry(xfer, &msg->transfers, transfer_list) {
trace_spi_transfer_start(msg, xfer);
......
if (xfer->tx_buf || xfer->rx_buf) {
reinit_completion(&master->xfer_completion);
ret = master->transfer_one(master, msg->spi, xfer);
......
}
......
out:
if (ret != 0 || !keep_cs)
spi_set_cs(msg->spi, false);
......
//当前的message传输完成,做相关处理
spi_finalize_current_message(master);
return ret;
}
spi_finalize_current_message:
void spi_finalize_current_message(struct spi_master *master)
{
struct spi_message *mesg;
unsigned long flags;
int ret;
......
//调用master->unprepare_message释放一些资源
if (master->cur_msg_prepared && master->unprepare_message) {
ret = master->unprepare_message(master, mesg);
if (ret) {
dev_err(&master->dev,
"failed to unprepare message: %d\n", ret);
}
}
......
//重新放入kworker
kthread_queue_work(&master->kworker, &master->pump_messages);
......
//处理完message,调用spi_master->complete
if (mesg->complete)
mesg->complete(mesg->context);
}
对于设备驱动程序来讲,之后调用spi_sync或spi_async函数即可发起一个message请求,队列化和工作线程被激活,触发一系列的操作,最终完成message的传输操作。
int spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_master *master = spi->master;
int ret;
unsigned long flags;
//检查spi_message
ret = __spi_validate(spi, message);
if (ret != 0)
return ret;
spin_lock_irqsave(&master->bus_lock_spinlock, flags);
if (master->bus_lock_flag)
ret = -EBUSY;
else
ret = __spi_async(spi, message); //发起异步传输
spin_unlock_irqrestore(&master->bus_lock_spinlock, flags);
return ret;
}
static int __spi_async(struct spi_device *spi, struct spi_message *message)
{
struct spi_master *master = spi->master;
message->spi = spi;
......
//调用spi_master->transfer,前面代码分析,采用队列化机制的话,被设置为spi_queued_transfer
return master->transfer(spi, message);
}
spi_queued_transfer:
static int spi_queued_transfer(struct spi_device *spi, struct spi_message *msg)
{
return __spi_queued_transfer(spi, msg, true);
}
static int __spi_queued_transfer(struct spi_device *spi,
struct spi_message *msg,
bool need_pump)
{
struct spi_master *master = spi->master;
unsigned long flags;
spin_lock_irqsave(&master->queue_lock, flags);
......
//把spi_message放入spi_message->queue链表
list_add_tail(&msg->queue, &master->queue);
if (!master->busy && need_pump)
/* 重新把spi_message->pump_messages这个kthread_work放入spi_message->kworker
* 并唤醒工作线程
*/
kthread_queue_work(&master->kworker, &master->pump_messages);
spin_unlock_irqrestore(&master->queue_lock, flags);
return 0;
}
最后总结一下spi_async函数执行流程图,如下:
通用spi设备驱动
内核提供了一个通用的SPI外设驱动,驱动文件为driver/spi/spidev.c。
入口函数:
static int __init spidev_init(void)
{
int status;
......
//注册字符设备,主设备号为153,file_operations为spidev_fops
status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
if (status < 0)
return status;
//创建class
spidev_class = class_create(THIS_MODULE, "spidev");
if (IS_ERR(spidev_class)) {
unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
return PTR_ERR(spidev_class);
}
//注册spi驱动
status = spi_register_driver(&spidev_spi_driver);
if (status < 0) {
class_destroy(spidev_class);
unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
}
return status;
}
static struct spi_driver spidev_spi_driver = {
.driver = {
.name = "spidev",
.of_match_table = of_match_ptr(spidev_dt_ids),
.acpi_match_table = ACPI_PTR(spidev_acpi_ids),
},
.probe = spidev_probe,
.remove = spidev_remove,
};
驱动与设备匹配,调用spidev_probe:
struct spidev_data {
dev_t devt;
spinlock_t spi_lock;
struct spi_device *spi;
//用于插入全局链表device_list
struct list_head device_entry;
struct mutex buf_lock;
unsigned users;
//保存用户空间传入的数据
u8 *tx_buffer;
//用于接收设备的数据
u8 *rx_buffer;
u32 speed_hz;
};
static int spidev_probe(struct spi_device *spi)
{
struct spidev_data *spidev;
int status;
unsigned long minor;
......
/* 分配一个struct spidev_data,用于保存设备相关信息 */
spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
if (!spidev)
return -ENOMEM;
/* 初始化spidev_data */
spidev->spi = spi;
spin_lock_init(&spidev->spi_lock);
mutex_init(&spidev->buf_lock);
INIT_LIST_HEAD(&spidev->device_entry);
mutex_lock(&device_list_lock);
minor = find_first_zero_bit(minors, N_SPI_MINORS);
if (minor < N_SPI_MINORS) {
struct device *dev;
spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
//创建设备文件
dev = device_create(spidev_class, &spi->dev, spidev->devt,
spidev, "spidev%d.%d",
spi->master->bus_num, spi->chip_select);
status = PTR_ERR_OR_ZERO(dev);
} else {
dev_dbg(&spi->dev, "no minor number available!\n");
status = -ENODEV;
}
if (status == 0) {
set_bit(minor, minors);
//把spidev_data插入全局链表device_list
list_add(&spidev->device_entry, &device_list);
}
mutex_unlock(&device_list_lock);
spidev->speed_hz = spi->max_speed_hz;
if (status == 0)
spi_set_drvdata(spi, spidev);
else
kfree(spidev);
return status;
}
这个驱动程序为用户空间提供了统一的接口:
static const struct file_operations spidev_fops = {
.owner = THIS_MODULE,
.write = spidev_write,
.read = spidev_read,
.unlocked_ioctl = spidev_ioctl,
.compat_ioctl = spidev_compat_ioctl,
.open = spidev_open,
.release = spidev_release,
.llseek = no_llseek,
};
当应用层open时,会调用到spidev_fops的open函数:
static int spidev_open(struct inode *inode, struct file *filp)
{
struct spidev_data *spidev;
int status = -ENXIO;
mutex_lock(&device_list_lock);
list_for_each_entry(spidev, &device_list, device_entry) {
if (spidev->devt == inode->i_rdev) {
status = 0;
break;
}
}
......
//分配发送buf,大小为4096
if (!spidev->tx_buffer) {
spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
if (!spidev->tx_buffer) {
dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
status = -ENOMEM;
goto err_find_dev;
}
}
//分配buf缓存
if (!spidev->rx_buffer) {
spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
if (!spidev->rx_buffer) {
dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
status = -ENOMEM;
goto err_alloc_rx_buf;
}
}
......
}
当应用层write时,会调用到spidev_fops的write函数进行发送数据:
static ssize_t
spidev_write(struct file *filp, const char __user *buf,
size_t count, loff_t *f_pos)
{
struct spidev_data *spidev;
ssize_t status = 0;
unsigned long missing;
/* chipselect only toggles at start or end of operation */
if (count > bufsiz)
return -EMSGSIZE;
spidev = filp->private_data;
mutex_lock(&spidev->buf_lock);
//拷贝数据到发送buf,
missing = copy_from_user(spidev->tx_buffer, buf, count);
if (missing == 0)
//同步发送
status = spidev_sync_write(spidev, count);
else
status = -EFAULT;
mutex_unlock(&spidev->buf_lock);
return status;
}
static inline ssize_t
spidev_sync_write(struct spidev_data *spidev, size_t len)
{
//构造一个struct spi_message
struct spi_transfer t = {
.tx_buf = spidev->tx_buffer,
.len = len,
.speed_hz = spidev->speed_hz,
};
struct spi_message m;
spi_message_init(&m);
spi_message_add_tail(&t, &m);
//发送spi_message
return spidev_sync(spidev, &m);
}
进行复杂的数据传输时,通过ioctl命令,如下使用例子:
struct spi_ioc_transfer xfer;
......
//初始化xfer
//传输数据
ioctl(fd, SPI_IOC_MESSAGE(2), xfer);
......
struct spi_ioc_transfer结构体跟struct spi_transfer结构体类似,定义如下:
struct spi_ioc_transfer {
__u64 tx_buf;
__u64 rx_buf;
__u32 len;
__u32 speed_hz;
__u16 delay_usecs;
__u8 bits_per_word;
__u8 cs_change;
__u8 tx_nbits;
__u8 rx_nbits;
__u16 pad;
};
当应用层ioctl时,会调用到spidev_fops的spidev_ioctl函数,对于通过ioctl命令进行数据传输:
static long
spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
{
int err = 0;
int retval = 0;
struct spidev_data *spidev;
struct spi_device *spi;
u32 tmp;
unsigned n_ioc;
struct spi_ioc_transfer *ioc;
......
switch (cmd) {
......
default:
//拷贝用户空间传入的参数到内核空间
ioc = spidev_get_ioc_message(cmd,
(struct spi_ioc_transfer __user *)arg, &n_ioc);
......
//用spi_ioc_transfer构建spi_message,最终调用spi_sync
retval = spidev_message(spidev, ioc, n_ioc);
kfree(ioc);
break;
}
......
}