Lines Matching defs:cmb_area
370 * struct cmb_area - container for global cmb data
377 struct cmb_area {
384 static struct cmb_area cmb_area = {
385 .lock = __SPIN_LOCK_UNLOCKED(cmb_area.lock),
386 .list = LIST_HEAD_INIT(cmb_area.list),
402 module_param_named(maxchannels, cmb_area.num_channels, uint, 0444);
434 * Insert a single device into the cmb_area list.
435 * Called with cmb_area.lock held from alloc_cmb.
451 * Find first unused cmb in cmb_area.mem.
452 * This is a little tricky: cmb_area.list
455 cmb = cmb_area.mem;
456 list_for_each_entry(node, &cmb_area.list, cmb_list) {
463 if (cmb - cmb_area.mem >= cmb_area.num_channels) {
496 spin_lock(&cmb_area.lock);
498 if (!cmb_area.mem) {
500 size = sizeof(struct cmb) * cmb_area.num_channels;
501 WARN_ON(!list_empty(&cmb_area.list));
503 spin_unlock(&cmb_area.lock);
506 spin_lock(&cmb_area.lock);
508 if (cmb_area.mem) {
518 cmb_area.mem = mem;
519 cmf_activate(cmb_area.mem, CMF_ON);
526 spin_unlock(&cmb_area.lock);
539 spin_lock(&cmb_area.lock);
550 if (list_empty(&cmb_area.list)) {
552 size = sizeof(struct cmb) * cmb_area.num_channels;
554 free_pages((unsigned long)cmb_area.mem, get_order(size));
555 cmb_area.mem = NULL;
558 spin_unlock(&cmb_area.lock);
573 offset = mme ? (struct cmb *)cmb_data->hw_block - cmb_area.mem : 0;
781 spin_lock(&cmb_area.lock);
789 if (list_empty(&cmb_area.list))
791 list_add_tail(&cdev->private->cmb_list, &cmb_area.list);
794 spin_unlock(&cmb_area.lock);
799 spin_unlock(&cmb_area.lock);
814 spin_lock(&cmb_area.lock);
826 if (list_empty(&cmb_area.list))
829 spin_unlock(&cmb_area.lock);
1244 spin_lock(&cmb_area.lock);
1245 if (!list_empty(&cmb_area.list))
1246 cmf_activate(cmb_area.mem, CMF_ON);
1247 spin_unlock(&cmb_area.lock);