Linux 内核在哪里锁定和解锁信号量?
Linux Kernel Where to lock and unlock semaphores?
在 Linux 内核(专门针对设备驱动程序)中,我如何知道要锁定哪些变量以及何时需要锁定?特别是,为什么以下代码中的锁定只发生在设置了 dev 之后,即使 dev 指向全局变量 scull_devices?
struct scull_qset {
void **data; /* pointer to an array of pointers which each point to a quantum buffer */
struct scull_qset *next;
};
struct scull_dev {
struct scull_qset *data; /* Pointer to first quantum set */
int quantum; /* the current quantum size */
int qset; /* the current array size */
unsigned long size; /* amount of data stored here */
unsigned int access_key; /* used by sculluid and scullpriv */
struct semaphore sem; /* mutual exclusion semaphore */
struct cdev cdev; /* Char device structure initialized in scull_init_module */
};
struct scull_dev *scull_devices; /* allocated dynamically in scull_init_module */
int scull_open(struct inode *inode, struct file *filp)
{
struct scull_dev *dev; /* device information */
dev = container_of(inode->i_cdev, struct scull_dev, cdev);
filp->private_data = dev; /* for other methods */
/* now trim to 0 the length of the device if open was write-only */
if ( (filp->f_flags & O_ACCMODE) == O_WRONLY) {
if (down_interruptible(&dev->sem))
return -ERESTARTSYS;
scull_trim(dev); /* empty out the scull device */
up(&dev->sem);
}
return 0; /* success */
}
如果需要 scull_init_module 的代码以获得更完整的图片,这里是:
int scull_major = SCULL_MAJOR;
int scull_minor = 0;
int scull_quantum = SCULL_QUANTUM;
int scull_qset = SCULL_QSET;
int scull_nr_devs = SCULL_NR_DEVS;
int scull_init_module(void)
{
int result, i;
dev_t dev = 0;
/* assigns major and minor numbers (left out for brevity sake) */
/*
* allocate the devices -- we can't have them static, as the number
* can be specified at load time
*/
scull_devices = kmalloc(scull_nr_devs * sizeof(struct scull_dev), GFP_KERNEL);
if (!scull_devices) {
result = -ENOMEM;
goto fail;
}
memset(scull_devices, 0, scull_nr_devs * sizeof(struct scull_dev));
/* Initialize each device. */
for (i = 0; i < scull_nr_devs; i++) {
scull_devices[i].quantum = scull_quantum;
scull_devices[i].qset = scull_qset;
init_MUTEX(&scull_devices[i].sem);
scull_setup_cdev(&scull_devices[i], i);
}
/* some other stuff left out for brevity sake */
return 0; /* succeed */
fail: /* isn't this a little redundant? */
scull_cleanup_module();
return result;
}
/*
* Set up the char_dev structure for this device.
*/
static void scull_setup_cdev(struct scull_dev *dev, int index)
{
int err, devno = MKDEV(scull_major, scull_minor + index);
cdev_init(&dev->cdev, &scull_fops);
dev->cdev.owner = THIS_MODULE;
dev->cdev.ops = &scull_fops;
err = cdev_add (&dev->cdev, devno, 1);
/* Fail gracefully if need be */
if (err)
printk(KERN_NOTICE "Error %d adding scull%d", err, index);
}
示例中的锁定与全局scull_devices
变量无关,但锁定用于保护一个scull_dev
.
的属性
例如假设存在一个 read()
操作,它从 data
复制 size
字节,而提到的 scroll_trim()
操作释放 data
.
因此,当进程 #1 调用 open()
并且进程 #2 尝试同时从已打开的设备 read()
时,read()
操作可以访问已释放的 data
哎呀。
这就是您需要保护数据免受竞争的原因。信号量是一种方式;互斥另一个通常更合适的。自旋锁和原子变量也可能有效。
lock - 这是保护临界区的方法
临界区 - 在您的驱动程序代码中,如果多个实例正在访问同一区域,那就是临界区。
多个实例 - 它可以是线程、常规 ioctl cmd(来自用户 space)以及 softirq 和 irq。这取决于您的驱动程序实现。
基于"context",你也应该使用不同的锁。
thread context which can sleep -> semaphore/mutex
non-sleeping context -> spinlock
softirq, tasklet -> spin_lock_bh
irq -> spin_lock_irq, spin_lock_irqsave
完全根据您的要求。
举个例子。如果您正在处理网络驱动程序,您的 netdev 具有统计信息和数据包缓冲区,并且需要通过锁定来保护它们,因为它可以由多个实例更新,例如 net_rx_softirq、net_tx_softirq、ioctl/netlink 来自用户space 请求等等。
在这种情况下,根据您的资源上下文,您需要使用不同的 lock/mutex,有时您需要超过 1 个锁。
在 Linux 内核(专门针对设备驱动程序)中,我如何知道要锁定哪些变量以及何时需要锁定?特别是,为什么以下代码中的锁定只发生在设置了 dev 之后,即使 dev 指向全局变量 scull_devices?
struct scull_qset {
void **data; /* pointer to an array of pointers which each point to a quantum buffer */
struct scull_qset *next;
};
struct scull_dev {
struct scull_qset *data; /* Pointer to first quantum set */
int quantum; /* the current quantum size */
int qset; /* the current array size */
unsigned long size; /* amount of data stored here */
unsigned int access_key; /* used by sculluid and scullpriv */
struct semaphore sem; /* mutual exclusion semaphore */
struct cdev cdev; /* Char device structure initialized in scull_init_module */
};
struct scull_dev *scull_devices; /* allocated dynamically in scull_init_module */
int scull_open(struct inode *inode, struct file *filp)
{
struct scull_dev *dev; /* device information */
dev = container_of(inode->i_cdev, struct scull_dev, cdev);
filp->private_data = dev; /* for other methods */
/* now trim to 0 the length of the device if open was write-only */
if ( (filp->f_flags & O_ACCMODE) == O_WRONLY) {
if (down_interruptible(&dev->sem))
return -ERESTARTSYS;
scull_trim(dev); /* empty out the scull device */
up(&dev->sem);
}
return 0; /* success */
}
如果需要 scull_init_module 的代码以获得更完整的图片,这里是:
int scull_major = SCULL_MAJOR;
int scull_minor = 0;
int scull_quantum = SCULL_QUANTUM;
int scull_qset = SCULL_QSET;
int scull_nr_devs = SCULL_NR_DEVS;
int scull_init_module(void)
{
int result, i;
dev_t dev = 0;
/* assigns major and minor numbers (left out for brevity sake) */
/*
* allocate the devices -- we can't have them static, as the number
* can be specified at load time
*/
scull_devices = kmalloc(scull_nr_devs * sizeof(struct scull_dev), GFP_KERNEL);
if (!scull_devices) {
result = -ENOMEM;
goto fail;
}
memset(scull_devices, 0, scull_nr_devs * sizeof(struct scull_dev));
/* Initialize each device. */
for (i = 0; i < scull_nr_devs; i++) {
scull_devices[i].quantum = scull_quantum;
scull_devices[i].qset = scull_qset;
init_MUTEX(&scull_devices[i].sem);
scull_setup_cdev(&scull_devices[i], i);
}
/* some other stuff left out for brevity sake */
return 0; /* succeed */
fail: /* isn't this a little redundant? */
scull_cleanup_module();
return result;
}
/*
* Set up the char_dev structure for this device.
*/
static void scull_setup_cdev(struct scull_dev *dev, int index)
{
int err, devno = MKDEV(scull_major, scull_minor + index);
cdev_init(&dev->cdev, &scull_fops);
dev->cdev.owner = THIS_MODULE;
dev->cdev.ops = &scull_fops;
err = cdev_add (&dev->cdev, devno, 1);
/* Fail gracefully if need be */
if (err)
printk(KERN_NOTICE "Error %d adding scull%d", err, index);
}
示例中的锁定与全局scull_devices
变量无关,但锁定用于保护一个scull_dev
.
例如假设存在一个 read()
操作,它从 data
复制 size
字节,而提到的 scroll_trim()
操作释放 data
.
因此,当进程 #1 调用 open()
并且进程 #2 尝试同时从已打开的设备 read()
时,read()
操作可以访问已释放的 data
哎呀。
这就是您需要保护数据免受竞争的原因。信号量是一种方式;互斥另一个通常更合适的。自旋锁和原子变量也可能有效。
lock - 这是保护临界区的方法
临界区 - 在您的驱动程序代码中,如果多个实例正在访问同一区域,那就是临界区。
多个实例 - 它可以是线程、常规 ioctl cmd(来自用户 space)以及 softirq 和 irq。这取决于您的驱动程序实现。
基于"context",你也应该使用不同的锁。
thread context which can sleep -> semaphore/mutex non-sleeping context -> spinlock softirq, tasklet -> spin_lock_bh irq -> spin_lock_irq, spin_lock_irqsave
完全根据您的要求。
举个例子。如果您正在处理网络驱动程序,您的 netdev 具有统计信息和数据包缓冲区,并且需要通过锁定来保护它们,因为它可以由多个实例更新,例如 net_rx_softirq、net_tx_softirq、ioctl/netlink 来自用户space 请求等等。
在这种情况下,根据您的资源上下文,您需要使用不同的 lock/mutex,有时您需要超过 1 个锁。