Lines Matching refs:cpts
23 #include "am65-cpts.h"
189 static void am65_cpts_settime(struct am65_cpts *cpts, u64 start_tstamp)
194 am65_cpts_write32(cpts, val, ts_load_val_hi);
196 am65_cpts_write32(cpts, val, ts_load_val_lo);
198 am65_cpts_write32(cpts, AM65_CPTS_TS_LOAD_EN, ts_load_en);
201 static void am65_cpts_set_add_val(struct am65_cpts *cpts)
204 cpts->ts_add_val = (NSEC_PER_SEC / cpts->refclk_freq - 1) & 0x7;
206 am65_cpts_write32(cpts, cpts->ts_add_val, ts_add_val);
209 static void am65_cpts_disable(struct am65_cpts *cpts)
211 am65_cpts_write32(cpts, 0, control);
212 am65_cpts_write32(cpts, 0, int_enable);
227 static int am65_cpts_cpts_purge_events(struct am65_cpts *cpts)
233 list_for_each_safe(this, next, &cpts->events) {
237 list_add(&event->list, &cpts->pool);
243 dev_dbg(cpts->dev, "event pool cleaned up %d\n", removed);
247 static bool am65_cpts_fifo_pop_event(struct am65_cpts *cpts,
250 u32 r = am65_cpts_read32(cpts, intstat_raw);
253 event->timestamp = am65_cpts_read32(cpts, event_0);
254 event->event1 = am65_cpts_read32(cpts, event_1);
255 event->event2 = am65_cpts_read32(cpts, event_2);
256 event->timestamp |= (u64)am65_cpts_read32(cpts, event_3) << 32;
257 am65_cpts_write32(cpts, AM65_CPTS_EVENT_POP, event_pop);
263 static int am65_cpts_fifo_read(struct am65_cpts *cpts)
271 spin_lock_irqsave(&cpts->lock, flags);
273 event = list_first_entry_or_null(&cpts->pool,
277 if (am65_cpts_cpts_purge_events(cpts)) {
278 dev_err(cpts->dev, "cpts: event pool empty\n");
285 if (am65_cpts_fifo_pop_event(cpts, event))
291 cpts->timestamp = event->timestamp;
292 dev_dbg(cpts->dev, "AM65_CPTS_EV_PUSH t:%llu\n",
293 cpts->timestamp);
301 list_add_tail(&event->list, &cpts->events);
303 dev_dbg(cpts->dev,
313 dev_dbg(cpts->dev, "AM65_CPTS_EV_HW p:%d t:%llu\n",
316 ptp_clock_event(cpts->ptp_clock, &pevent);
323 dev_dbg(cpts->dev,
330 dev_err(cpts->dev, "cpts: unknown event type\n");
337 spin_unlock_irqrestore(&cpts->lock, flags);
340 ptp_schedule_worker(cpts->ptp_clock, 0);
345 static u64 am65_cpts_gettime(struct am65_cpts *cpts,
351 /* temporarily disable cpts interrupt to avoid intentional
354 am65_cpts_write32(cpts, 0, int_enable);
357 spin_lock_irqsave(&cpts->lock, flags);
359 am65_cpts_write32(cpts, AM65_CPTS_TS_PUSH, ts_push);
360 am65_cpts_read32(cpts, ts_push);
362 spin_unlock_irqrestore(&cpts->lock, flags);
364 am65_cpts_fifo_read(cpts);
366 am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
368 val = cpts->timestamp;
375 struct am65_cpts *cpts = dev_id;
377 if (am65_cpts_fifo_read(cpts))
378 dev_dbg(cpts->dev, "cpts: unable to obtain a time stamp\n");
386 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
405 adj_period = div_u64(cpts->refclk_freq, ppb);
407 mutex_lock(&cpts->ptp_clk_lock);
409 val = am65_cpts_read32(cpts, control);
414 am65_cpts_write32(cpts, val, control);
417 am65_cpts_write32(cpts, val, ts_ppm_hi);
419 am65_cpts_write32(cpts, val, ts_ppm_low);
421 mutex_unlock(&cpts->ptp_clk_lock);
428 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
431 mutex_lock(&cpts->ptp_clk_lock);
432 ns = am65_cpts_gettime(cpts, NULL);
434 am65_cpts_settime(cpts, ns);
435 mutex_unlock(&cpts->ptp_clk_lock);
444 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
447 mutex_lock(&cpts->ptp_clk_lock);
448 ns = am65_cpts_gettime(cpts, sts);
449 mutex_unlock(&cpts->ptp_clk_lock);
455 u64 am65_cpts_ns_gettime(struct am65_cpts *cpts)
460 mutex_lock(&cpts->ptp_clk_lock);
461 ns = am65_cpts_gettime(cpts, NULL);
462 mutex_unlock(&cpts->ptp_clk_lock);
471 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
475 mutex_lock(&cpts->ptp_clk_lock);
476 am65_cpts_settime(cpts, ns);
477 mutex_unlock(&cpts->ptp_clk_lock);
482 static void am65_cpts_extts_enable_hw(struct am65_cpts *cpts, u32 index, int on)
486 v = am65_cpts_read32(cpts, control);
489 cpts->hw_ts_enable |= BIT(index);
492 cpts->hw_ts_enable &= ~BIT(index);
494 am65_cpts_write32(cpts, v, control);
497 static int am65_cpts_extts_enable(struct am65_cpts *cpts, u32 index, int on)
499 if (!!(cpts->hw_ts_enable & BIT(index)) == !!on)
502 mutex_lock(&cpts->ptp_clk_lock);
503 am65_cpts_extts_enable_hw(cpts, index, on);
504 mutex_unlock(&cpts->ptp_clk_lock);
506 dev_dbg(cpts->dev, "%s: ExtTS:%u %s\n",
512 int am65_cpts_estf_enable(struct am65_cpts *cpts, int idx,
518 cycles = cfg->ns_period * cpts->refclk_freq;
524 am65_cpts_write32(cpts, 0, estf[idx].length);
527 am65_cpts_write32(cpts, val, estf[idx].comp_hi);
529 am65_cpts_write32(cpts, val, estf[idx].comp_lo);
531 am65_cpts_write32(cpts, val, estf[idx].length);
533 dev_dbg(cpts->dev, "%s: ESTF:%u enabled\n", __func__, idx);
539 void am65_cpts_estf_disable(struct am65_cpts *cpts, int idx)
541 am65_cpts_write32(cpts, 0, estf[idx].length);
543 dev_dbg(cpts->dev, "%s: ESTF:%u disabled\n", __func__, idx);
547 static void am65_cpts_perout_enable_hw(struct am65_cpts *cpts,
559 cycles = (ns_period * cpts->refclk_freq) / NSEC_PER_SEC;
566 am65_cpts_write32(cpts, val, genf[req->index].comp_hi);
568 am65_cpts_write32(cpts, val, genf[req->index].comp_lo);
570 am65_cpts_write32(cpts, val, genf[req->index].length);
572 cpts->genf_enable |= BIT(req->index);
574 am65_cpts_write32(cpts, 0, genf[req->index].length);
576 cpts->genf_enable &= ~BIT(req->index);
580 static int am65_cpts_perout_enable(struct am65_cpts *cpts,
583 if (!!(cpts->genf_enable & BIT(req->index)) == !!on)
586 mutex_lock(&cpts->ptp_clk_lock);
587 am65_cpts_perout_enable_hw(cpts, req, on);
588 mutex_unlock(&cpts->ptp_clk_lock);
590 dev_dbg(cpts->dev, "%s: GenF:%u %s\n",
599 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
603 return am65_cpts_extts_enable(cpts, rq->extts.index, on);
605 return am65_cpts_perout_enable(cpts, &rq->perout, on);
626 static bool am65_cpts_match_tx_ts(struct am65_cpts *cpts,
642 spin_lock_irqsave(&cpts->txq.lock, flags);
643 skb_queue_splice_init(&cpts->txq, &txq_list);
644 spin_unlock_irqrestore(&cpts->txq.lock, flags);
646 /* no need to grab txq.lock as access is always done under cpts->lock */
661 dev_dbg(cpts->dev,
669 dev_dbg(cpts->dev,
677 spin_lock_irqsave(&cpts->txq.lock, flags);
678 skb_queue_splice(&txq_list, &cpts->txq);
679 spin_unlock_irqrestore(&cpts->txq.lock, flags);
684 static void am65_cpts_find_ts(struct am65_cpts *cpts)
692 spin_lock_irqsave(&cpts->lock, flags);
693 list_splice_init(&cpts->events, &events);
694 spin_unlock_irqrestore(&cpts->lock, flags);
698 if (am65_cpts_match_tx_ts(cpts, event) ||
705 spin_lock_irqsave(&cpts->lock, flags);
706 list_splice_tail(&events, &cpts->events);
707 list_splice_tail(&events_free, &cpts->pool);
708 spin_unlock_irqrestore(&cpts->lock, flags);
713 struct am65_cpts *cpts = container_of(ptp, struct am65_cpts, ptp_info);
717 am65_cpts_find_ts(cpts);
719 spin_lock_irqsave(&cpts->txq.lock, flags);
720 if (!skb_queue_empty(&cpts->txq))
722 spin_unlock_irqrestore(&cpts->txq.lock, flags);
729 * @cpts: cpts handle
735 void am65_cpts_rx_enable(struct am65_cpts *cpts, bool en)
739 mutex_lock(&cpts->ptp_clk_lock);
740 val = am65_cpts_read32(cpts, control);
745 am65_cpts_write32(cpts, val, control);
746 mutex_unlock(&cpts->ptp_clk_lock);
776 * @cpts: cpts handle
782 void am65_cpts_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
795 skb_queue_tail(&cpts->txq, skb);
796 ptp_schedule_worker(cpts->ptp_clock, 0);
802 * @cpts: cpts handle
806 * It checks if packet can be timestamped, fills internal cpts data
809 void am65_cpts_prep_tx_timestamp(struct am65_cpts *cpts, struct sk_buff *skb)
827 int am65_cpts_phc_index(struct am65_cpts *cpts)
829 return cpts->phc_index;
835 struct am65_cpts *cpts = data;
837 of_clk_del_provider(cpts->clk_mux_np);
838 clk_hw_unregister_mux(cpts->clk_mux_hw);
839 of_node_put(cpts->clk_mux_np);
842 static int cpts_of_mux_clk_setup(struct am65_cpts *cpts,
851 cpts->clk_mux_np = of_get_child_by_name(node, "refclk-mux");
852 if (!cpts->clk_mux_np)
855 num_parents = of_clk_get_parent_count(cpts->clk_mux_np);
857 dev_err(cpts->dev, "mux-clock %pOF must have parents\n",
858 cpts->clk_mux_np);
862 parent_names = devm_kcalloc(cpts->dev, sizeof(char *), num_parents,
869 of_clk_parent_fill(cpts->clk_mux_np, parent_names, num_parents);
871 clk_mux_name = devm_kasprintf(cpts->dev, GFP_KERNEL, "%s.%pOFn",
872 dev_name(cpts->dev), cpts->clk_mux_np);
878 reg = &cpts->reg->rftclk_sel;
882 cpts->clk_mux_hw = clk_hw_register_mux(NULL, clk_mux_name,
885 if (IS_ERR(cpts->clk_mux_hw)) {
886 ret = PTR_ERR(cpts->clk_mux_hw);
890 ret = of_clk_add_hw_provider(cpts->clk_mux_np, of_clk_hw_simple_get,
891 cpts->clk_mux_hw);
895 ret = devm_add_action_or_reset(cpts->dev, cpts_free_clk_mux, cpts);
897 dev_err(cpts->dev, "failed to add clkmux reset action %d", ret);
902 clk_hw_unregister_mux(cpts->clk_mux_hw);
904 of_node_put(cpts->clk_mux_np);
908 static int am65_cpts_of_parse(struct am65_cpts *cpts, struct device_node *node)
912 if (!of_property_read_u32(node, "ti,cpts-ext-ts-inputs", &prop[0]))
913 cpts->ext_ts_inputs = prop[0];
915 if (!of_property_read_u32(node, "ti,cpts-periodic-outputs", &prop[0]))
916 cpts->genf_num = prop[0];
918 return cpts_of_mux_clk_setup(cpts, node);
923 struct am65_cpts *cpts = data;
925 ptp_clock_unregister(cpts->ptp_clock);
926 am65_cpts_disable(cpts);
927 clk_disable_unprepare(cpts->refclk);
933 struct am65_cpts *cpts;
936 cpts = devm_kzalloc(dev, sizeof(*cpts), GFP_KERNEL);
937 if (!cpts)
940 cpts->dev = dev;
941 cpts->reg = (struct am65_cpts_regs __iomem *)regs;
943 cpts->irq = of_irq_get_byname(node, "cpts");
944 if (cpts->irq <= 0) {
945 ret = cpts->irq ?: -ENXIO;
952 ret = am65_cpts_of_parse(cpts, node);
956 mutex_init(&cpts->ptp_clk_lock);
957 INIT_LIST_HEAD(&cpts->events);
958 INIT_LIST_HEAD(&cpts->pool);
959 spin_lock_init(&cpts->lock);
960 skb_queue_head_init(&cpts->txq);
963 list_add(&cpts->pool_data[i].list, &cpts->pool);
965 cpts->refclk = devm_get_clk_from_child(dev, node, "cpts");
966 if (IS_ERR(cpts->refclk)) {
967 ret = PTR_ERR(cpts->refclk);
973 ret = clk_prepare_enable(cpts->refclk);
979 cpts->refclk_freq = clk_get_rate(cpts->refclk);
981 am65_ptp_info.max_adj = cpts->refclk_freq / AM65_CPTS_MIN_PPM;
982 cpts->ptp_info = am65_ptp_info;
984 if (cpts->ext_ts_inputs)
985 cpts->ptp_info.n_ext_ts = cpts->ext_ts_inputs;
986 if (cpts->genf_num)
987 cpts->ptp_info.n_per_out = cpts->genf_num;
989 am65_cpts_set_add_val(cpts);
991 am65_cpts_write32(cpts, AM65_CPTS_CONTROL_EN |
995 am65_cpts_write32(cpts, AM65_CPTS_INT_ENABLE_TS_PEND_EN, int_enable);
998 am65_cpts_settime(cpts, ktime_to_ns(ktime_get_real()));
1000 cpts->ptp_clock = ptp_clock_register(&cpts->ptp_info, cpts->dev);
1001 if (IS_ERR_OR_NULL(cpts->ptp_clock)) {
1003 PTR_ERR(cpts->ptp_clock));
1004 ret = cpts->ptp_clock ? PTR_ERR(cpts->ptp_clock) : -ENODEV;
1007 cpts->phc_index = ptp_clock_index(cpts->ptp_clock);
1009 ret = devm_add_action_or_reset(dev, am65_cpts_release, cpts);
1015 ret = devm_request_threaded_irq(dev, cpts->irq, NULL,
1017 IRQF_ONESHOT, dev_name(dev), cpts);
1019 dev_err(cpts->dev, "error attaching irq %d\n", ret);
1024 am65_cpts_read32(cpts, idver),
1025 cpts->refclk_freq, cpts->ts_add_val);
1027 return cpts;
1030 clk_disable_unprepare(cpts->refclk);
1039 struct am65_cpts *cpts;
1043 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "cpts");
1048 cpts = am65_cpts_create(dev, base, node);
1049 return PTR_ERR_OR_ZERO(cpts);
1053 { .compatible = "ti,am65-cpts", },
1054 { .compatible = "ti,j721e-cpts", },
1062 .name = "am65-cpts",