Lines Matching defs:lcu

26  * - A device is connected to an lcu as long as the device exists.
70 list_for_each_entry(pos, &server->lculist, lcu) {
77 static struct alias_pav_group *_find_group(struct alias_lcu *lcu,
84 if (lcu->pav == HYPER_PAV) {
85 if (list_empty(&lcu->grouplist))
88 return list_first_entry(&lcu->grouplist,
97 list_for_each_entry(pos, &lcu->grouplist, group) {
126 struct alias_lcu *lcu;
128 lcu = kzalloc(sizeof(*lcu), GFP_KERNEL);
129 if (!lcu)
131 lcu->uac = kzalloc(sizeof(*(lcu->uac)), GFP_KERNEL | GFP_DMA);
132 if (!lcu->uac)
134 lcu->rsu_cqr = kzalloc(sizeof(*lcu->rsu_cqr), GFP_KERNEL | GFP_DMA);
135 if (!lcu->rsu_cqr)
137 lcu->rsu_cqr->cpaddr = kzalloc(sizeof(struct ccw1),
139 if (!lcu->rsu_cqr->cpaddr)
141 lcu->rsu_cqr->data = kzalloc(16, GFP_KERNEL | GFP_DMA);
142 if (!lcu->rsu_cqr->data)
145 memcpy(lcu->uid.vendor, uid->vendor, sizeof(uid->vendor));
146 memcpy(lcu->uid.serial, uid->serial, sizeof(uid->serial));
147 lcu->uid.ssid = uid->ssid;
148 lcu->pav = NO_PAV;
149 lcu->flags = NEED_UAC_UPDATE | UPDATE_PENDING;
150 INIT_LIST_HEAD(&lcu->lcu);
151 INIT_LIST_HEAD(&lcu->inactive_devices);
152 INIT_LIST_HEAD(&lcu->active_devices);
153 INIT_LIST_HEAD(&lcu->grouplist);
154 INIT_WORK(&lcu->suc_data.worker, summary_unit_check_handling_work);
155 INIT_DELAYED_WORK(&lcu->ruac_data.dwork, lcu_update_work);
156 spin_lock_init(&lcu->lock);
157 init_completion(&lcu->lcu_setup);
158 return lcu;
161 kfree(lcu->rsu_cqr->cpaddr);
163 kfree(lcu->rsu_cqr);
165 kfree(lcu->uac);
167 kfree(lcu);
171 static void _free_lcu(struct alias_lcu *lcu)
173 kfree(lcu->rsu_cqr->data);
174 kfree(lcu->rsu_cqr->cpaddr);
175 kfree(lcu->rsu_cqr);
176 kfree(lcu->uac);
177 kfree(lcu);
181 * This is the function that will allocate all the server and lcu data,
183 * If the return value is 1, the lcu was already known before, if it
184 * is 0, this is a new lcu.
192 struct alias_lcu *lcu, *newlcu;
214 lcu = _find_lcu(server, &uid);
215 if (!lcu) {
221 lcu = _find_lcu(server, &uid);
222 if (!lcu) {
223 list_add(&newlcu->lcu, &server->lculist);
224 lcu = newlcu;
230 spin_lock(&lcu->lock);
231 list_add(&device->alias_list, &lcu->inactive_devices);
232 private->lcu = lcu;
233 spin_unlock(&lcu->lock);
248 struct alias_lcu *lcu;
253 lcu = private->lcu;
255 if (!lcu)
258 spin_lock_irqsave(&lcu->lock, flags);
260 if (device == lcu->suc_data.device) {
261 spin_unlock_irqrestore(&lcu->lock, flags);
262 cancel_work_sync(&lcu->suc_data.worker);
263 spin_lock_irqsave(&lcu->lock, flags);
264 if (device == lcu->suc_data.device) {
266 lcu->suc_data.device = NULL;
270 if (device == lcu->ruac_data.device) {
271 spin_unlock_irqrestore(&lcu->lock, flags);
273 cancel_delayed_work_sync(&lcu->ruac_data.dwork);
274 spin_lock_irqsave(&lcu->lock, flags);
275 if (device == lcu->ruac_data.device) {
277 lcu->ruac_data.device = NULL;
280 private->lcu = NULL;
281 spin_unlock_irqrestore(&lcu->lock, flags);
284 spin_lock(&lcu->lock);
286 if (list_empty(&lcu->grouplist) &&
287 list_empty(&lcu->active_devices) &&
288 list_empty(&lcu->inactive_devices)) {
289 list_del(&lcu->lcu);
290 spin_unlock(&lcu->lock);
291 _free_lcu(lcu);
292 lcu = NULL;
295 _schedule_lcu_update(lcu, NULL);
296 spin_unlock(&lcu->lock);
308 * in the lcu is up to date and will update the device uid before
312 static int _add_device_to_lcu(struct alias_lcu *lcu,
322 private->uid.type = lcu->uac->unit[private->uid.real_unit_addr].ua_type;
324 lcu->uac->unit[private->uid.real_unit_addr].base_ua;
328 if (lcu->pav == NO_PAV) {
329 list_move(&device->alias_list, &lcu->active_devices);
332 group = _find_group(lcu, &uid);
348 list_add(&group->group, &lcu->grouplist);
358 static void _remove_device_from_lcu(struct alias_lcu *lcu,
364 list_move(&device->alias_list, &lcu->inactive_devices);
416 struct alias_lcu *lcu)
449 memset(lcu->uac, 0, sizeof(*(lcu->uac)));
453 ccw->count = sizeof(*(lcu->uac));
454 ccw->cda = (__u32)(addr_t) lcu->uac;
460 spin_lock_irqsave(&lcu->lock, flags);
461 lcu->flags &= ~NEED_UAC_UPDATE;
462 spin_unlock_irqrestore(&lcu->lock, flags);
473 spin_lock_irqsave(&lcu->lock, flags);
474 lcu->flags |= NEED_UAC_UPDATE;
475 spin_unlock_irqrestore(&lcu->lock, flags);
482 static int _lcu_update(struct dasd_device *refdev, struct alias_lcu *lcu)
490 spin_lock_irqsave(&lcu->lock, flags);
491 list_for_each_entry_safe(pavgroup, tempgroup, &lcu->grouplist, group) {
494 list_move(&device->alias_list, &lcu->active_devices);
500 list_move(&device->alias_list, &lcu->active_devices);
507 spin_unlock_irqrestore(&lcu->lock, flags);
509 rc = read_unit_address_configuration(refdev, lcu);
513 spin_lock_irqsave(&lcu->lock, flags);
520 if (lcu->flags & NEED_UAC_UPDATE)
522 lcu->pav = NO_PAV;
524 switch (lcu->uac->unit[i].ua_type) {
526 lcu->pav = BASE_PAV;
529 lcu->pav = HYPER_PAV;
532 if (lcu->pav != NO_PAV)
536 list_for_each_entry_safe(device, tempdev, &lcu->active_devices,
538 _add_device_to_lcu(lcu, device, refdev);
541 spin_unlock_irqrestore(&lcu->lock, flags);
547 struct alias_lcu *lcu;
554 lcu = container_of(ruac_data, struct alias_lcu, ruac_data);
556 rc = _lcu_update(device, lcu);
562 spin_lock_irqsave(&lcu->lock, flags);
563 if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
565 " alias data in lcu (rc = %d), retry later", rc);
566 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
570 lcu->ruac_data.device = NULL;
571 lcu->flags &= ~UPDATE_PENDING;
573 spin_unlock_irqrestore(&lcu->lock, flags);
576 static int _schedule_lcu_update(struct alias_lcu *lcu,
582 lcu->flags |= NEED_UAC_UPDATE;
583 if (lcu->ruac_data.device) {
590 if (!usedev && !list_empty(&lcu->grouplist)) {
591 group = list_first_entry(&lcu->grouplist,
602 if (!usedev && !list_empty(&lcu->active_devices)) {
603 usedev = list_first_entry(&lcu->active_devices,
608 * device that will be set active will trigger an lcu update
613 lcu->ruac_data.device = usedev;
614 if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
623 struct alias_lcu *lcu = private->lcu;
628 spin_lock_irqsave(&lcu->lock, flags);
630 * Check if device and lcu type differ. If so, the uac data may be
633 if (private->uid.type != lcu->uac->unit[uaddr].ua_type) {
634 lcu->flags |= UPDATE_PENDING;
638 if (!(lcu->flags & UPDATE_PENDING)) {
639 rc = _add_device_to_lcu(lcu, device, device);
641 lcu->flags |= UPDATE_PENDING;
643 if (lcu->flags & UPDATE_PENDING) {
644 list_move(&device->alias_list, &lcu->active_devices);
646 _schedule_lcu_update(lcu, device);
648 spin_unlock_irqrestore(&lcu->lock, flags);
656 private->lcu->flags |= UPDATE_PENDING;
663 struct alias_lcu *lcu = private->lcu;
667 if (!lcu)
669 spin_lock_irqsave(&lcu->lock, flags);
670 _remove_device_from_lcu(lcu, device);
671 spin_unlock_irqrestore(&lcu->lock, flags);
678 struct alias_lcu *lcu = private->lcu;
683 if (!lcu)
685 if (lcu->pav == NO_PAV ||
686 lcu->flags & (NEED_UAC_UPDATE | UPDATE_PENDING))
699 spin_lock_irqsave(&lcu->lock, flags);
702 spin_unlock_irqrestore(&lcu->lock, flags);
708 spin_unlock_irqrestore(&lcu->lock, flags);
722 spin_unlock_irqrestore(&lcu->lock, flags);
735 static int reset_summary_unit_check(struct alias_lcu *lcu,
743 cqr = lcu->rsu_cqr;
766 static void _restart_all_base_devices_on_lcu(struct alias_lcu *lcu)
773 list_for_each_entry(device, &lcu->active_devices, alias_list) {
780 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
787 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
795 static void flush_all_alias_devices_on_lcu(struct alias_lcu *lcu)
806 * the lcu lock during that time, so we must assume that
814 spin_lock_irqsave(&lcu->lock, flags);
815 list_for_each_entry_safe(device, temp, &lcu->active_devices,
823 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
829 spin_unlock_irqrestore(&lcu->lock, flags);
831 spin_lock_irqsave(&lcu->lock, flags);
838 list_move(&device->alias_list, &lcu->active_devices);
843 spin_unlock_irqrestore(&lcu->lock, flags);
846 static void _stop_all_devices_on_lcu(struct alias_lcu *lcu)
851 list_for_each_entry(device, &lcu->active_devices, alias_list) {
856 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
861 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
875 static void _unstop_all_devices_on_lcu(struct alias_lcu *lcu)
880 list_for_each_entry(device, &lcu->active_devices, alias_list) {
885 list_for_each_entry(device, &lcu->inactive_devices, alias_list) {
890 list_for_each_entry(pavgroup, &lcu->grouplist, group) {
906 struct alias_lcu *lcu;
913 lcu = container_of(suc_data, struct alias_lcu, suc_data);
917 flush_all_alias_devices_on_lcu(lcu);
924 reset_summary_unit_check(lcu, device, suc_data->reason);
926 spin_lock_irqsave(&lcu->lock, flags);
927 _unstop_all_devices_on_lcu(lcu);
928 _restart_all_base_devices_on_lcu(lcu);
930 _schedule_lcu_update(lcu, device);
931 lcu->suc_data.device = NULL;
933 spin_unlock_irqrestore(&lcu->lock, flags);
941 struct alias_lcu *lcu;
944 lcu = private->lcu;
945 if (!lcu) {
948 " unit check (no lcu structure)");
951 spin_lock_irqsave(&lcu->lock, flags);
961 if (lcu->suc_data.device) {
968 _stop_all_devices_on_lcu(lcu);
970 lcu->flags |= NEED_UAC_UPDATE | UPDATE_PENDING;
971 lcu->suc_data.reason = private->suc_reason;
972 lcu->suc_data.device = device;
974 if (!schedule_work(&lcu->suc_data.worker))
977 spin_unlock_irqrestore(&lcu->lock, flags);