Lines Matching refs:sch

152 static int io_subchannel_prepare(struct subchannel *sch)
159 cdev = sch_get_cdev(sch);
287 struct subchannel *sch;
301 sch = to_subchannel(cdev->dev.parent);
322 io_subchannel_quiesce(sch);
554 struct subchannel *sch;
564 sch = to_subchannel(dev->parent);
565 if (!sch->lpm)
579 struct subchannel *sch = to_subchannel(dev);
582 rc = chsc_siosl(sch->schid);
585 sch->schid.ssid, sch->schid.sch_no, rc);
589 sch->schid.ssid, sch->schid.sch_no);
596 struct subchannel *sch = to_subchannel(dev);
598 return sprintf(buf, "%02x\n", sch->vpm);
700 static struct ccw_device * io_subchannel_allocate_dev(struct subchannel *sch)
712 cdev->dev.coherent_dma_mask = sch->dev.coherent_dma_mask;
713 cdev->dev.dma_mask = sch->dev.dma_mask;
735 static int io_subchannel_initialize_dev(struct subchannel *sch,
744 priv->dev_id.devno = sch->schib.pmcw.dev;
745 priv->dev_id.ssid = sch->schid.ssid;
753 cdev->ccwlock = sch->lock;
754 cdev->dev.parent = &sch->dev;
763 if (!get_device(&sch->dev)) {
768 spin_lock_irq(sch->lock);
769 sch_set_cdev(sch, cdev);
770 spin_unlock_irq(sch->lock);
779 static struct ccw_device * io_subchannel_create_ccwdev(struct subchannel *sch)
784 cdev = io_subchannel_allocate_dev(sch);
786 ret = io_subchannel_initialize_dev(sch, cdev);
795 static void sch_create_and_recog_new_device(struct subchannel *sch)
800 cdev = io_subchannel_create_ccwdev(sch);
803 css_sch_device_unregister(sch);
807 io_subchannel_recog(cdev, sch);
815 struct subchannel *sch;
819 sch = to_subchannel(cdev->dev.parent);
826 if (!device_is_registered(&sch->dev))
828 css_update_ssd_info(sch);
852 if (dev_get_uevent_suppress(&sch->dev)) {
853 dev_set_uevent_suppress(&sch->dev, 0);
854 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
862 spin_lock_irqsave(sch->lock, flags);
863 sch_set_cdev(sch, NULL);
864 spin_unlock_irqrestore(sch->lock, flags);
879 struct subchannel *sch;
884 sch = to_subchannel(cdev->dev.parent);
885 css_sch_device_unregister(sch);
887 put_device(&sch->dev);
920 static void io_subchannel_recog(struct ccw_device *cdev, struct subchannel *sch)
926 spin_lock_irq(sch->lock);
928 spin_unlock_irq(sch->lock);
932 struct subchannel *sch)
939 if (!get_device(&sch->dev))
951 put_device(&sch->dev);
956 mutex_lock(&sch->reg_mutex);
957 rc = device_move(&cdev->dev, &sch->dev, DPM_ORDER_PARENT_BEFORE_DEV);
958 mutex_unlock(&sch->reg_mutex);
962 cdev->private->dev_id.devno, sch->schid.ssid,
963 sch->schib.pmcw.dev, rc);
971 put_device(&sch->dev);
984 spin_lock_irq(sch->lock);
985 cdev->ccwlock = sch->lock;
986 if (!sch_is_pseudo_sch(sch))
987 sch_set_cdev(sch, cdev);
988 spin_unlock_irq(sch->lock);
989 if (!sch_is_pseudo_sch(sch))
990 css_update_ssd_info(sch);
996 struct subchannel *sch = to_subchannel(cdev->dev.parent);
997 struct channel_subsystem *css = to_css(sch->dev.parent);
1002 static void io_subchannel_irq(struct subchannel *sch)
1006 cdev = sch_get_cdev(sch);
1009 CIO_TRACE_EVENT(6, dev_name(&sch->dev));
1016 void io_subchannel_init_config(struct subchannel *sch)
1018 memset(&sch->config, 0, sizeof(sch->config));
1019 sch->config.csense = 1;
1022 static void io_subchannel_init_fields(struct subchannel *sch)
1024 if (cio_is_console(sch->schid))
1025 sch->opm = 0xff;
1027 sch->opm = chp_get_sch_opm(sch);
1028 sch->lpm = sch->schib.pmcw.pam & sch->opm;
1029 sch->isc = cio_is_console(sch->schid) ? CONSOLE_ISC : IO_SCH_ISC;
1033 sch->schib.pmcw.dev, sch->schid.ssid,
1034 sch->schid.sch_no, sch->schib.pmcw.pim,
1035 sch->schib.pmcw.pam, sch->schib.pmcw.pom);
1037 io_subchannel_init_config(sch);
1044 static int io_subchannel_probe(struct subchannel *sch)
1050 if (cio_is_console(sch->schid)) {
1051 rc = sysfs_create_group(&sch->dev.kobj,
1057 sch->schid.ssid, sch->schid.sch_no, rc);
1063 if (dev_get_uevent_suppress(&sch->dev)) {
1065 dev_set_uevent_suppress(&sch->dev, 0);
1066 kobject_uevent(&sch->dev.kobj, KOBJ_ADD);
1068 cdev = sch_get_cdev(sch);
1079 io_subchannel_init_fields(sch);
1080 rc = cio_commit_config(sch);
1083 rc = sysfs_create_group(&sch->dev.kobj,
1092 io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1100 set_io_private(sch, io_priv);
1101 css_schedule_eval(sch->schid);
1105 spin_lock_irq(sch->lock);
1106 css_sched_sch_todo(sch, SCH_TODO_UNREG);
1107 spin_unlock_irq(sch->lock);
1111 static int io_subchannel_remove(struct subchannel *sch)
1113 struct io_subchannel_private *io_priv = to_io_private(sch);
1116 cdev = sch_get_cdev(sch);
1121 spin_lock_irq(sch->lock);
1122 sch_set_cdev(sch, NULL);
1123 set_io_private(sch, NULL);
1124 spin_unlock_irq(sch->lock);
1126 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1129 sysfs_remove_group(&sch->dev.kobj, &io_subchannel_attr_group);
1133 static void io_subchannel_verify(struct subchannel *sch)
1137 cdev = sch_get_cdev(sch);
1142 static void io_subchannel_terminate_path(struct subchannel *sch, u8 mask)
1146 cdev = sch_get_cdev(sch);
1149 if (cio_update_schib(sch))
1152 if (scsw_actl(&sch->schib.scsw) == 0 || sch->schib.pmcw.lpum != mask)
1158 if (cio_clear(sch))
1169 static int io_subchannel_chp_event(struct subchannel *sch,
1172 struct ccw_device *cdev = sch_get_cdev(sch);
1175 mask = chp_ssd_get_mask(&sch->ssd_info, link);
1180 sch->opm &= ~mask;
1181 sch->lpm &= ~mask;
1184 io_subchannel_terminate_path(sch, mask);
1187 sch->opm |= mask;
1188 sch->lpm |= mask;
1191 io_subchannel_verify(sch);
1194 if (cio_update_schib(sch))
1198 io_subchannel_terminate_path(sch, mask);
1201 if (cio_update_schib(sch))
1203 sch->lpm |= mask & sch->opm;
1206 io_subchannel_verify(sch);
1212 static void io_subchannel_quiesce(struct subchannel *sch)
1217 spin_lock_irq(sch->lock);
1218 cdev = sch_get_cdev(sch);
1219 if (cio_is_console(sch->schid))
1221 if (!sch->schib.pmcw.ena)
1223 ret = cio_disable_subchannel(sch);
1234 spin_unlock_irq(sch->lock);
1237 spin_lock_irq(sch->lock);
1239 ret = cio_disable_subchannel(sch);
1242 spin_unlock_irq(sch->lock);
1245 static void io_subchannel_shutdown(struct subchannel *sch)
1247 io_subchannel_quiesce(sch);
1261 struct subchannel *sch;
1267 sch = to_subchannel(cdev->dev.parent);
1268 if ((sch->schib.pmcw.pam & sch->opm) == sch->vpm)
1376 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1379 CIO_TRACE_EVENT(2, dev_name(&sch->dev));
1381 cio_disable_subchannel(sch);
1398 static enum io_sch_action sch_get_action(struct subchannel *sch)
1402 cdev = sch_get_cdev(sch);
1403 if (cio_update_schib(sch)) {
1414 if (sch->schib.pmcw.dev != cdev->private->dev_id.devno) {
1419 if ((sch->schib.pmcw.pam & sch->opm) == 0) {
1435 * @sch: subchannel
1443 static int io_subchannel_sch_event(struct subchannel *sch, int process)
1451 spin_lock_irqsave(sch->lock, flags);
1452 if (!device_is_registered(&sch->dev))
1454 if (work_pending(&sch->todo_work))
1456 cdev = sch_get_cdev(sch);
1459 action = sch_get_action(sch);
1460 CIO_MSG_EVENT(2, "event: sch 0.%x.%04x, process=%d, action=%d\n",
1461 sch->schid.ssid, sch->schid.sch_no, process,
1472 io_subchannel_verify(sch);
1504 spin_unlock_irqrestore(sch->lock, flags);
1519 spin_lock_irqsave(sch->lock, flags);
1525 sch_set_cdev(sch, NULL);
1526 spin_unlock_irqrestore(sch->lock, flags);
1538 css_sch_device_unregister(sch);
1543 dev_id.ssid = sch->schid.ssid;
1544 dev_id.devno = sch->schib.pmcw.dev;
1547 sch_create_and_recog_new_device(sch);
1550 rc = ccw_device_move_to_sch(cdev, sch);
1556 spin_lock_irqsave(sch->lock, flags);
1558 spin_unlock_irqrestore(sch->lock, flags);
1568 spin_unlock_irqrestore(sch->lock, flags);
1588 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1594 io_subchannel_init_fields(sch);
1595 rc = cio_commit_config(sch);
1598 sch->driver = &io_subchannel_driver;
1599 io_subchannel_recog(cdev, sch);
1629 struct subchannel *sch;
1631 sch = cio_probe_console();
1632 if (IS_ERR(sch))
1633 return ERR_CAST(sch);
1638 io_priv->dma_area = dma_alloc_coherent(&sch->dev,
1643 set_io_private(sch, io_priv);
1644 cdev = io_subchannel_create_ccwdev(sch);
1646 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1648 set_io_private(sch, NULL);
1649 put_device(&sch->dev);
1660 put_device(&sch->dev);
1666 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1667 struct io_subchannel_private *io_priv = to_io_private(sch);
1669 set_io_private(sch, NULL);
1670 dma_free_coherent(&sch->dev, sizeof(*io_priv->dma_area),
1672 put_device(&sch->dev);
1687 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1690 cio_tsch(sch);
1691 if (sch->schib.scsw.cmd.actl == 0)
1760 struct subchannel *sch;
1786 sch = to_subchannel(cdev->dev.parent);
1788 io_subchannel_quiesce(sch);
1831 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1845 spin_lock_irq(sch->lock);
1847 spin_unlock_irq(sch->lock);
1855 spin_lock_irq(sch->lock);
1856 ret = cio_disable_subchannel(sch);
1857 spin_unlock_irq(sch->lock);
1865 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1871 spin_lock_irq(sch->lock);
1873 ret = cio_enable_subchannel(sch, (u32)(addr_t)sch);
1875 spin_unlock_irq(sch->lock);
1893 struct subchannel *sch = to_subchannel(cdev->dev.parent);
1895 spin_lock_irq(sch->lock);
1896 if (cio_is_console(sch->schid)) {
1897 cio_enable_subchannel(sch, (u32)(addr_t)sch);
1906 css_sched_sch_todo(sch, SCH_TODO_EVAL);
1907 spin_unlock_irq(sch->lock);
1911 sch = to_subchannel(cdev->dev.parent);
1912 spin_lock_irq(sch->lock);
1918 spin_unlock_irq(sch->lock);
1921 spin_lock_irq(sch->lock);
1925 spin_unlock_irq(sch->lock);
1949 struct subchannel *sch;
1953 sch = to_subchannel(cdev->dev.parent);
1954 spin_lock_irq(sch->lock);
1955 if (cio_is_console(sch->schid))
1991 spin_unlock_irq(sch->lock);
1993 spin_lock_irq(sch->lock);
2003 spin_unlock_irq(sch->lock);
2005 spin_lock_irq(sch->lock);
2015 spin_unlock_irq(sch->lock);
2021 spin_unlock_irq(sch->lock);
2075 struct subchannel *sch;
2080 sch = to_subchannel(cdev->dev.parent);
2100 if (!sch_is_pseudo_sch(sch))
2101 css_schedule_eval(sch->schid);
2104 if (sch_is_pseudo_sch(sch))
2151 struct subchannel *sch = to_subchannel(cdev->dev.parent);
2153 return chsc_siosl(sch->schid);